repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
luiarthur/CytofRepFAM.jl
[ "1f997d1620d74861c5bde5559ebdd1e6c449b9e7" ]
[ "runs/patients/runs/cluster-viz/plot_fam_tsne.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport seaborn as sns\nimport os\n\nos.makedirs('img', exist_ok=True)\n\nfor phi in (0, 1, 100, 10000): # (0, 1, 25, 50, 100, 1000)\n path_to_fam = 'img/fam-phi{}-clusterings.csv'.format(phi)\n fam_orig = np.loadtxt(path_to_fam).astype(int)\n fam = fam_orig[:, 0]\n sample_idx = fam_orig[:, 1]\n\n # TSNE for sampels combined\n path_to_tsne_combined = 'img/tsne-combined.txt'\n tsne = np.loadtxt(path_to_tsne_combined, delimiter=',')\n\n # number of samples\n num_sampels = np.unique(sample_idx).size\n\n for i in range(num_sampels):\n print('Making figure {}'.format(i + 1))\n #\n mask_i = (sample_idx == i + 1)\n df = pd.DataFrame(tsne[mask_i], columns=[\"comp1\", \"comp2\"])\n clustername = \"fam-phi{}\".format(phi)\n df[clustername] = fam[mask_i]\n # \n markersize = 20 if df.shape[0] < 3000 else 10\n sns.pairplot(x_vars=\"comp1\", y_vars=\"comp2\", data=df,\n hue=clustername,\n plot_kws=dict(linewidth=0, s=markersize),\n aspect=1, height=5)\n plt.savefig(\"img/fam-phi{}-tsne-combined-{}.pdf\".format(phi, i + 1),\n bbox_inches=\"tight\")\n plt.close();\n" ]
[ [ "pandas.DataFrame", "numpy.loadtxt", "matplotlib.pyplot.close", "numpy.unique" ] ]
lonestar686/diluvian
[ "b206c65fff457d4014c8ca76aeb954569bf28916" ]
[ "diluvian/diluvian.py" ]
[ "# -*- coding: utf-8 -*-\n\n\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom collections import deque\nimport itertools\nimport logging\nfrom multiprocessing import (\n Manager,\n Process,\n )\nimport os\nimport random\n\nimport numpy as np\nimport pytoml as toml\nimport six\nfrom six.moves import input as raw_input\nfrom tqdm import tqdm\n\nfrom .config import CONFIG\nfrom . import preprocessing\nfrom .training import augment_subvolume_generator\nfrom .util import (\n get_color_shader,\n Roundrobin,\n WrappedViewer,\n )\nfrom .volumes import (\n HDF5Volume,\n partition_volumes,\n SubvolumeBounds,\n )\nfrom .regions import Region\n\n\ndef generate_subvolume_bounds(filename, volumes, num_bounds, sparse=False, moves=None):\n if '{volume}' not in filename:\n raise ValueError('CSV filename must contain \"{volume}\" for volume name replacement.')\n\n if moves is None:\n moves = 5\n else:\n moves = np.asarray(moves)\n subv_shape = CONFIG.model.input_fov_shape + CONFIG.model.move_step * 2 * moves\n\n if sparse:\n gen_kwargs = {'sparse_margin': subv_shape}\n else:\n gen_kwargs = {'shape': subv_shape}\n for k, v in six.iteritems(volumes):\n bounds = v.downsample(CONFIG.volume.resolution)\\\n .subvolume_bounds_generator(**gen_kwargs)\n bounds = itertools.islice(bounds, num_bounds)\n SubvolumeBounds.iterable_to_csv(bounds, filename.format(volume=k))\n\n\ndef fill_volume_with_model(\n model_file,\n volume,\n resume_prediction=None,\n checkpoint_filename=None,\n checkpoint_label_interval=20,\n seed_generator='sobel',\n background_label_id=0,\n bias=True,\n move_batch_size=1,\n max_moves=None,\n max_bodies=None,\n num_workers=CONFIG.training.num_gpus,\n worker_prequeue=1,\n filter_seeds_by_mask=True,\n reject_non_seed_components=True,\n reject_early_termination=False,\n remask_interval=None,\n shuffle_seeds=True):\n subvolume = volume.get_subvolume(SubvolumeBounds(start=np.zeros(3, dtype=np.int64), stop=volume.shape))\n # Create an output label volume.\n if resume_prediction is None:\n prediction = np.full_like(subvolume.image, background_label_id, dtype=np.uint64)\n label_id = 0\n else:\n if resume_prediction.shape != subvolume.image.shape:\n raise ValueError('Resume volume prediction is wrong shape.')\n prediction = resume_prediction\n prediction.flags.writeable = True\n label_id = prediction.max()\n # Create a conflict count volume that tracks locations where segmented\n # bodies overlap. For now the first body takes precedence in the\n # predicted labels.\n conflict_count = np.full_like(prediction, 0, dtype=np.uint32)\n\n def worker(worker_id, set_devices, model_file, image, seeds, results, lock, revoked):\n lock.acquire()\n import tensorflow as tf\n\n if set_devices:\n # Only make one GPU visible to Tensorflow so that it does not allocate\n # all available memory on all devices.\n # See: https://stackoverflow.com/questions/37893755\n os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'\n os.environ['CUDA_VISIBLE_DEVICES'] = str(worker_id)\n\n with tf.device('/gpu:0'):\n # Late import to avoid Keras import until TF bindings are set.\n from .network import load_model\n\n logging.debug('Worker %s: loading model', worker_id)\n model = load_model(model_file, CONFIG.network)\n lock.release()\n\n def is_revoked(test_seed):\n ret = False\n lock.acquire()\n if tuple(test_seed) in revoked:\n ret = True\n revoked.remove(tuple(test_seed))\n lock.release()\n return ret\n\n while True:\n seed = seeds.get(True)\n\n if not isinstance(seed, np.ndarray):\n logging.debug('Worker %s: got DONE', worker_id)\n break\n\n if is_revoked(seed):\n results.put((seed, None))\n continue\n\n def stopping_callback(region):\n stop = is_revoked(seed)\n if reject_non_seed_components and \\\n region.bias_against_merge and \\\n region.mask[tuple(region.seed_vox)] < 0.5:\n stop = True\n return stop\n\n logging.debug('Worker %s: got seed %s', worker_id, np.array_str(seed))\n\n # Flood-fill and get resulting mask.\n # Allow reading outside the image volume bounds to allow segmentation\n # to fill all the way to the boundary.\n region = Region(image, seed_vox=seed, sparse_mask=True, block_padding='reflect')\n region.bias_against_merge = bias\n early_termination = False\n try:\n six.next(region.fill(\n model,\n move_batch_size=move_batch_size,\n max_moves=max_moves,\n progress=2 + worker_id,\n stopping_callback=stopping_callback,\n remask_interval=remask_interval))\n except Region.EarlyFillTermination:\n early_termination = True\n except StopIteration:\n pass\n if reject_early_termination and early_termination:\n body = None\n else:\n body = region.to_body()\n logging.debug('Worker %s: seed %s filled', worker_id, np.array_str(seed))\n\n results.put((seed, body))\n\n # Generate seeds from volume.\n generator = preprocessing.SEED_GENERATORS[seed_generator]\n seeds = generator(subvolume.image, CONFIG.volume.resolution)\n\n if filter_seeds_by_mask and volume.mask_data is not None:\n seeds = [s for s in seeds if volume.mask_data[tuple(volume.world_coord_to_local(s))]]\n\n pbar = tqdm(desc='Seed queue', total=len(seeds), miniters=1, smoothing=0.0)\n label_pbar = tqdm(desc='Labeled vox', total=prediction.size, miniters=1, smoothing=0.0, position=1)\n num_seeds = len(seeds)\n if shuffle_seeds:\n random.shuffle(seeds)\n seeds = iter(seeds)\n\n manager = Manager()\n # Queue of seeds to be picked up by workers.\n seed_queue = manager.Queue()\n # Queue of results from workers.\n results_queue = manager.Queue()\n # Dequeue of seeds that were put in seed_queue but have not yet been\n # combined by the main process.\n dispatched_seeds = deque()\n # Seeds that were placed in seed_queue but subsequently covered by other\n # results before their results have been processed. This allows workers to\n # abort working on these seeds by checking this list.\n revoked_seeds = manager.list()\n # Results that have been received by the main process but have not yet\n # been combined because they were not received in the dispatch order.\n unordered_results = {}\n\n def queue_next_seed():\n total = 0\n for seed in seeds:\n if prediction[seed[0], seed[1], seed[2]] != background_label_id:\n # This seed has already been filled.\n total += 1\n continue\n dispatched_seeds.append(seed)\n seed_queue.put(seed)\n\n break\n\n return total\n\n for _ in range(min(num_seeds, num_workers * worker_prequeue)):\n processed_seeds = queue_next_seed()\n pbar.update(processed_seeds)\n\n if 'CUDA_VISIBLE_DEVICES' in os.environ:\n set_devices = False\n num_workers = 1\n logging.warn('Environment variable CUDA_VISIBLE_DEVICES is set, so only one worker can be used.\\n'\n 'See https://github.com/aschampion/diluvian/issues/11')\n else:\n set_devices = True\n\n workers = []\n loading_lock = manager.Lock()\n for worker_id in range(num_workers):\n w = Process(target=worker, args=(worker_id, set_devices, model_file, subvolume.image,\n seed_queue, results_queue, loading_lock, revoked_seeds))\n w.start()\n workers.append(w)\n\n last_checkpoint_label = label_id\n\n # For each seed, create region, fill, threshold, and merge to output volume.\n while dispatched_seeds:\n processed_seeds = 1\n expected_seed = dispatched_seeds.popleft()\n logging.debug('Expecting seed %s', np.array_str(expected_seed))\n\n if tuple(expected_seed) in unordered_results:\n logging.debug('Expected seed %s is in old results', np.array_str(expected_seed))\n seed = expected_seed\n body = unordered_results[tuple(seed)]\n del unordered_results[tuple(seed)]\n\n else:\n seed, body = results_queue.get(True)\n processed_seeds += queue_next_seed()\n\n while not np.array_equal(seed, expected_seed):\n logging.debug('Seed %s is early, stashing', np.array_str(seed))\n unordered_results[tuple(seed)] = body\n seed, body = results_queue.get(True)\n processed_seeds += queue_next_seed()\n\n logging.debug('Processing seed at %s', np.array_str(seed))\n pbar.set_description('Seed ' + np.array_str(seed))\n pbar.update(processed_seeds)\n\n if prediction[seed[0], seed[1], seed[2]] != background_label_id:\n # This seed has already been filled.\n logging.debug('Seed (%s) was filled but has been covered in the meantime.',\n np.array_str(seed))\n loading_lock.acquire()\n if tuple(seed) in revoked_seeds:\n revoked_seeds.remove(tuple(seed))\n loading_lock.release()\n continue\n\n if body is None:\n logging.debug('Body was None.')\n continue\n\n if reject_non_seed_components and not body.is_seed_in_mask():\n logging.debug('Seed (%s) is not in its body.', np.array_str(seed))\n continue\n\n if reject_non_seed_components:\n mask, bounds = body.get_seeded_component(CONFIG.postprocessing.closing_shape)\n else:\n mask, bounds = body._get_bounded_mask()\n\n body_size = np.count_nonzero(mask)\n\n if body_size == 0:\n logging.debug('Body was empty.')\n continue\n\n # Generate a label ID for this region.\n label_id += 1\n if label_id == background_label_id:\n label_id += 1\n\n logging.debug('Adding body to prediction label volume.')\n bounds_shape = list(map(slice, bounds[0], bounds[1]))\n prediction_mask = prediction[bounds_shape] == background_label_id\n for seed in dispatched_seeds:\n if np.all(bounds[0] <= seed) and np.all(bounds[1] > seed) and mask[tuple(seed - bounds[0])]:\n loading_lock.acquire()\n if tuple(seed) not in revoked_seeds:\n revoked_seeds.append(tuple(seed))\n loading_lock.release()\n conflict_count[bounds_shape][np.logical_and(np.logical_not(prediction_mask), mask)] += 1\n label_shape = np.logical_and(prediction_mask, mask)\n prediction[bounds_shape][np.logical_and(prediction_mask, mask)] = label_id\n\n label_pbar.set_description('Label {}'.format(label_id))\n label_pbar.update(np.count_nonzero(label_shape))\n logging.info('Filled seed (%s) with %s voxels labeled %s.',\n np.array_str(seed), body_size, label_id)\n\n if max_bodies and label_id >= max_bodies:\n # Drain the queues.\n while not seed_queue.empty():\n seed_queue.get_nowait()\n break\n\n if checkpoint_filename is not None and label_id - last_checkpoint_label > checkpoint_label_interval:\n config = HDF5Volume.write_file(\n checkpoint_filename + '.hdf5',\n CONFIG.volume.resolution,\n label_data=prediction)\n config['name'] = 'segmentation checkpoint'\n with open(checkpoint_filename + '.toml', 'wb') as tomlfile:\n tomlfile.write('# Filling model: {}\\n'.format(model_file))\n tomlfile.write(str(toml.dumps({'dataset': [config]})))\n\n for _ in range(num_workers):\n seed_queue.put('DONE')\n for wid, worker in enumerate(workers):\n worker.join()\n manager.shutdown()\n\n label_pbar.close()\n pbar.close()\n\n return prediction, conflict_count\n\n\ndef fill_volumes_with_model(\n model_file,\n volumes,\n filename,\n resume_filename=None,\n partition=False,\n viewer=False,\n **kwargs):\n if '{volume}' not in filename:\n raise ValueError('HDF5 filename must contain \"{volume}\" for volume name replacement.')\n if resume_filename is not None and '{volume}' not in resume_filename:\n raise ValueError('TOML resume filename must contain \"{volume}\" for volume name replacement.')\n\n if partition:\n _, volumes = partition_volumes(volumes)\n\n for volume_name, volume in six.iteritems(volumes):\n logging.info('Filling volume %s...', volume_name)\n volume = volume.downsample(CONFIG.volume.resolution)\n if resume_filename is not None:\n resume_volume_filename = resume_filename.format(volume=volume_name)\n resume_volume = six.next(six.itervalues(HDF5Volume.from_toml(resume_volume_filename)))\n resume_prediction = resume_volume.to_memory_volume().label_data\n else:\n resume_prediction = None\n\n volume_filename = filename.format(volume=volume_name)\n checkpoint_filename = volume_filename + '_checkpoint'\n prediction, conflict_count = fill_volume_with_model(\n model_file,\n volume,\n resume_prediction=resume_prediction,\n checkpoint_filename=checkpoint_filename,\n **kwargs)\n\n config = HDF5Volume.write_file(\n volume_filename + '.hdf5',\n CONFIG.volume.resolution,\n label_data=prediction)\n config['name'] = volume_name + ' segmentation'\n with open(volume_filename + '.toml', 'wb') as tomlfile:\n tomlfile.write('# Filling model: {}\\n'.format(model_file))\n tomlfile.write('# Filling kwargs: {}\\n'.format(str(kwargs)))\n tomlfile.write(str(toml.dumps({'dataset': [config]})))\n\n if viewer:\n viewer = WrappedViewer(voxel_size=list(np.flipud(CONFIG.volume.resolution)))\n subvolume = volume.get_subvolume(SubvolumeBounds(start=np.zeros(3, dtype=np.int64), stop=volume.shape))\n viewer.add(subvolume.image, name='Image')\n viewer.add(prediction, name='Labels')\n viewer.add(conflict_count, name='Conflicts')\n\n viewer.print_view_prompt()\n\n\ndef fill_region_with_model(\n model_file,\n volumes=None,\n partition=False,\n augment=False,\n bounds_input_file=None,\n bias=True,\n move_batch_size=1,\n max_moves=None,\n remask_interval=None,\n sparse=False,\n moves=None):\n # Late import to avoid Keras import until TF bindings are set.\n from .network import load_model\n\n if volumes is None:\n raise ValueError('Volumes must be provided.')\n\n if partition:\n _, volumes = partition_volumes(volumes)\n\n if bounds_input_file is not None:\n gen_kwargs = {\n k: {'bounds_generator': iter(SubvolumeBounds.iterable_from_csv(bounds_input_file.format(volume=k)))}\n for k in volumes.iterkeys()}\n else:\n if moves is None:\n moves = 5\n else:\n moves = np.asarray(moves)\n subv_shape = CONFIG.model.input_fov_shape + CONFIG.model.move_step * 2 * moves\n\n if sparse:\n gen_kwargs = {\n k: {'sparse_margin': subv_shape}\n for k in volumes.iterkeys()}\n else:\n gen_kwargs = {\n k: {'shape': subv_shape}\n for k in volumes.iterkeys()}\n subvolumes = [\n v.downsample(CONFIG.volume.resolution)\n .subvolume_generator(**gen_kwargs[k])\n for k, v in six.iteritems(volumes)]\n if augment:\n subvolumes = map(augment_subvolume_generator, subvolumes)\n regions = Roundrobin(*[Region.from_subvolume_generator(v, block_padding='reflect') for v in subvolumes])\n\n model = load_model(model_file, CONFIG.network)\n\n for region in regions:\n region.bias_against_merge = bias\n try:\n six.next(region.fill(\n model,\n progress=True,\n move_batch_size=move_batch_size,\n max_moves=max_moves,\n remask_interval=remask_interval))\n except (StopIteration, Region.EarlyFillTermination):\n pass\n body = region.to_body()\n viewer = region.get_viewer()\n try:\n mask, bounds = body.get_seeded_component(CONFIG.postprocessing.closing_shape)\n viewer.add(mask.astype(np.float32),\n name='Body Mask',\n offset=bounds[0],\n shader=get_color_shader(2))\n except ValueError:\n logging.info('Seed not in body.')\n print(viewer)\n while True:\n s = raw_input('Press Enter to continue, '\n 'v to open in browser, '\n 'a to export animation, '\n 'r to 3D render body, '\n 'q to quit...')\n if s == 'q':\n return\n elif s == 'a':\n region_copy = region.unfilled_copy()\n # Must assign the animation to a variable so that it is not GCed.\n ani = region_copy.fill_animation( # noqa\n 'export.mp4',\n model,\n progress=True,\n move_batch_size=move_batch_size,\n max_moves=max_moves,\n remask_interval=remask_interval)\n s = raw_input(\"Press Enter when animation is complete...\")\n elif s == 'r':\n region.render_body()\n elif s == 'ra':\n region_copy = region.unfilled_copy()\n region_copy.fill_render(\n model,\n progress=True,\n move_batch_size=move_batch_size,\n max_moves=max_moves,\n remask_interval=remask_interval)\n elif s == 's':\n body.to_swc('{}.swc'.format('_'.join(map(str, tuple(body.seed)))))\n elif s == 'v':\n viewer.open_in_browser()\n else:\n break\n\n\ndef evaluate_volume(\n volumes,\n gt_name,\n pred_name,\n partition=False,\n border_threshold=None,\n use_gt_mask=True,\n relabel=False):\n # TODO: This is very intrusive into Volumes and should be refactored to\n # handle much of the partioned access and resampling there.\n\n import cremi\n\n if partition:\n _, volumes = partition_volumes(volumes, downsample=False)\n\n def labels_to_cremi(v):\n label_data = v.label_data.copy()\n if hasattr(v, 'bounds'):\n label_data = label_data[list(map(slice, list(v.bounds[0]), list(v.bounds[1])))]\n volume = cremi.Volume(label_data, resolution=v.resolution)\n\n return volume\n\n gt_vol = volumes[gt_name]\n pred_vol = volumes[pred_name]\n logging.info('GT shape: %s\\t Prediction shape:%s', gt_vol.shape, pred_vol.shape)\n\n pred_upsample = gt_vol._get_downsample_from_resolution(pred_vol.resolution)\n if np.any(pred_upsample > 0):\n scale = np.exp2(pred_upsample).astype(np.int64)\n logging.warn('Segmentation is different resolution than groundtruth. Upsampling by %s.', scale)\n\n pred_data = pred_vol.label_data\n if hasattr(pred_vol, 'bounds'):\n pred_data = pred_data[list(map(slice, list(pred_vol.bounds[0]), list(pred_vol.bounds[1])))]\n orig_shape = pred_data.shape\n pred_data = np.lib.stride_tricks.as_strided(pred_data,\n [b for a in zip(orig_shape, scale) for b in a],\n [b for a in zip(pred_data.strides, [0, 0, 0]) for b in a])\n new_shape = np.array(orig_shape) * scale\n pred_data = pred_data.reshape(list(new_shape))\n\n padding = np.array(gt_vol.shape) - new_shape\n if np.any(padding > 0):\n logging.warn('Padding segmentation (%s) to be groundtruth size (%s)', new_shape, gt_vol.shape)\n pred_data = np.pad(pred_data, zip([0, 0, 0], list(padding)), 'edge')\n\n pred = cremi.Volume(pred_data, resolution=gt_vol.resolution)\n else:\n pred = labels_to_cremi(pred_vol)\n\n gt = labels_to_cremi(gt_vol)\n\n # Some augmented CREMI volumes have not just a uint64 -1 as background, but\n # several large values. Set these all to background to avoid breaking\n # coo_matrix.\n gt.data[gt.data > np.uint64(-10)] = np.uint64(-1)\n background_label_id = 0\n pred.data[pred.data > np.uint64(-10)] = background_label_id\n\n if use_gt_mask and gt_vol.mask_data is not None:\n logging.warn('Groundtruth has a mask channel that will be applied to segmentation.')\n mask_data = gt_vol.mask_data\n if hasattr(gt_vol, 'bounds'):\n mask_data = mask_data[list(map(slice, list(gt_vol.bounds[0]), list(gt_vol.bounds[1])))]\n\n if relabel:\n mask_exiting_bodies = np.unique(pred.data[np.logical_not(mask_data)])\n\n pred.data[np.logical_not(mask_data)] = background_label_id\n\n if relabel:\n from skimage import morphology\n\n pred_copy = np.zeros_like(pred.data)\n exiting_bodies_mask = np.isin(pred.data, mask_exiting_bodies)\n pred_copy[exiting_bodies_mask] = pred.data[exiting_bodies_mask]\n\n new_pred = morphology.label(pred_copy, background=background_label_id, connectivity=2)\n\n pred.data[exiting_bodies_mask] = new_pred[exiting_bodies_mask]\n\n gt_neuron_ids = cremi.evaluation.NeuronIds(gt, border_threshold=border_threshold)\n\n (voi_split, voi_merge) = gt_neuron_ids.voi(pred)\n adapted_rand = gt_neuron_ids.adapted_rand(pred)\n\n print('VOI split :', voi_split)\n print('VOI merge :', voi_merge)\n print('Adapted Rand-index:', adapted_rand)\n print('CREMI :', np.sqrt((voi_split + voi_merge) * adapted_rand))\n\n\ndef view_volumes(volumes, partition=False):\n \"\"\"Display a set of volumes together in a neuroglancer viewer.\n\n Parameters\n ----------\n volumes : dict\n Dictionary mapping volume name to diluvian.volumes.Volume.\n partition : bool\n If true, partition the volumes and put the view origin at the validaiton\n partition origin.\n \"\"\"\n\n if partition:\n _, volumes = partition_volumes(volumes, downsample=False)\n\n viewer = WrappedViewer()\n\n for volume_name, volume in six.iteritems(volumes):\n resolution = list(np.flipud(volume.resolution))\n offset = getattr(volume, 'bounds', [np.zeros(3, dtype=np.int32)])[0]\n offset = np.flipud(-offset)\n #\n logging.debug(' volume name: %s, resolution: %s, offset: %s', volume_name, resolution, offset)\n \n viewer.add(volume.image_data,\n name='{} (Image)'.format(volume_name),\n voxel_size=resolution,\n voxel_offset=offset)\n if volume.label_data is not None:\n viewer.add(volume.label_data,\n name='{} (Labels)'.format(volume_name),\n voxel_size=resolution,\n voxel_offset=offset)\n if volume.mask_data is not None:\n viewer.add(volume.mask_data,\n name='{} (Mask)'.format(volume_name),\n voxel_size=resolution,\n voxel_offset=offset)\n\n viewer.print_view_prompt()\n" ]
[ [ "numpy.logical_not", "numpy.count_nonzero", "numpy.array", "numpy.zeros_like", "numpy.asarray", "numpy.isin", "numpy.zeros", "numpy.array_equal", "numpy.exp2", "numpy.array_str", "numpy.logical_and", "numpy.flipud", "numpy.any", "numpy.all", "numpy.sqrt", "tensorflow.device", "numpy.uint64", "numpy.full_like" ] ]
AnhTran01/PPOPT
[ "4f62ee5363100766a7524ca6bbe03ddd64b32b8d" ]
[ "tests/other_tests/test_general_utils.py" ]
[ "import numpy\nfrom src.ppopt.utils.general_utils import make_column, make_row, select_not_in_list, remove_size_zero_matrices\n\n\ndef test_make_column_1():\n test_case = make_column([1, 1, 1, 1])\n correct_result = numpy.array([[1], [1], [1], [1]])\n\n assert numpy.allclose(correct_result, test_case)\n assert correct_result.shape == test_case.shape\n\n\ndef test_make_column_2():\n k = numpy.ones((2, 2))\n assert make_column(k).shape == (4, 1)\n\n\ndef test_make_column_3():\n k = numpy.ones((2,))\n assert make_column(k).shape == (2, 1)\n\n\ndef test_make_row_1():\n test_case = make_row([1, 1, 1, 1])\n correct_result = numpy.array([[1, 1, 1, 1]])\n\n assert numpy.allclose(correct_result, test_case)\n assert correct_result.shape == test_case.shape\n\n\ndef test_make_row_2():\n k = numpy.ones((2, 2))\n assert make_row(k).shape == (1, 4)\n\n\ndef test_make_row_3():\n k = numpy.ones((2,))\n assert make_row(k).shape == (1, 2)\n\n\ndef test_select_not_in_list_1():\n A = numpy.eye(5)\n B = select_not_in_list(A, [0])\n assert numpy.allclose(A[[1, 2, 3, 4]], B)\n\n\ndef test_select_not_in_list_2():\n A = numpy.eye(5)\n B = select_not_in_list(A, [0, 1, 2, 3, 4])\n assert B.size == 0\n\n\ndef test_remove_size_zero_matrices():\n A = [numpy.eye(0), numpy.eye(1), numpy.zeros((2, 0))]\n assert remove_size_zero_matrices(A) == [numpy.eye(1)]\n\n" ]
[ [ "numpy.array", "numpy.zeros", "numpy.ones", "numpy.eye", "numpy.allclose" ] ]
xiaocn/ImageClassifer
[ "3075150aa7ef547333729dcff5876147682c6694" ]
[ "src/nets/nasnet/pnasnet.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport copy\nimport tensorflow as tf\n\nfrom nets.nasnet import nasnet\nfrom nets.nasnet import nasnet_utils\n\narg_scope = tf.contrib.framework.arg_scope\nslim = tf.contrib.slim\n\n\ndef large_imagenet_config():\n \"\"\"Large ImageNet configuration based on PNASNet-5.\"\"\"\n return tf.contrib.training.HParams(\n stem_multiplier=3.0,\n dense_dropout_keep_prob=0.5,\n num_cells=12,\n filter_scaling_rate=2.0,\n num_conv_filters=216,\n drop_path_keep_prob=0.6,\n use_aux_head=1,\n num_reduction_layers=2,\n data_format='NHWC',\n skip_reduction_layer_input=1,\n total_training_steps=250000,\n )\n\n\ndef mobile_imagenet_config():\n \"\"\"Mobile ImageNet configuration based on PNASNet-5.\"\"\"\n return tf.contrib.training.HParams(\n stem_multiplier=1.0,\n dense_dropout_keep_prob=0.5,\n num_cells=9,\n filter_scaling_rate=2.0,\n num_conv_filters=54,\n drop_path_keep_prob=1.0,\n use_aux_head=1,\n num_reduction_layers=2,\n data_format='NHWC',\n skip_reduction_layer_input=1,\n total_training_steps=250000,\n )\n\n\ndef pnasnet_large_arg_scope(weight_decay=4e-5, batch_norm_decay=0.9997,\n batch_norm_epsilon=0.001):\n \"\"\"Default arg scope for the PNASNet Large ImageNet model.\"\"\"\n return nasnet.nasnet_large_arg_scope(\n weight_decay, batch_norm_decay, batch_norm_epsilon)\n\n\ndef pnasnet_mobile_arg_scope(weight_decay=4e-5,\n batch_norm_decay=0.9997,\n batch_norm_epsilon=0.001):\n \"\"\"Default arg scope for the PNASNet Mobile ImageNet model.\"\"\"\n return nasnet.nasnet_mobile_arg_scope(weight_decay, batch_norm_decay,\n batch_norm_epsilon)\n\n\ndef _build_pnasnet_base(images,\n normal_cell,\n num_classes,\n hparams,\n is_training,\n final_endpoint=None):\n \"\"\"Constructs a PNASNet image model.\"\"\"\n\n end_points = {}\n\n def add_and_check_endpoint(endpoint_name, net):\n end_points[endpoint_name] = net\n return final_endpoint and (endpoint_name == final_endpoint)\n\n # Find where to place the reduction cells or stride normal cells\n reduction_indices = nasnet_utils.calc_reduction_layers(\n hparams.num_cells, hparams.num_reduction_layers)\n\n # pylint: disable=protected-access\n stem = lambda: nasnet._imagenet_stem(images, hparams, normal_cell)\n # pylint: enable=protected-access\n net, cell_outputs = stem()\n if add_and_check_endpoint('Stem', net):\n return net, end_points\n\n # Setup for building in the auxiliary head.\n aux_head_cell_idxes = []\n if len(reduction_indices) >= 2:\n aux_head_cell_idxes.append(reduction_indices[1] - 1)\n\n # Run the cells\n filter_scaling = 1.0\n # true_cell_num accounts for the stem cells\n true_cell_num = 2\n for cell_num in range(hparams.num_cells):\n is_reduction = cell_num in reduction_indices\n stride = 2 if is_reduction else 1\n if is_reduction: filter_scaling *= hparams.filter_scaling_rate\n if hparams.skip_reduction_layer_input or not is_reduction:\n prev_layer = cell_outputs[-2]\n net = normal_cell(\n net,\n scope='cell_{}'.format(cell_num),\n filter_scaling=filter_scaling,\n stride=stride,\n prev_layer=prev_layer,\n cell_num=true_cell_num)\n if add_and_check_endpoint('Cell_{}'.format(cell_num), net):\n return net, end_points\n true_cell_num += 1\n cell_outputs.append(net)\n\n if (hparams.use_aux_head and cell_num in aux_head_cell_idxes and\n num_classes and is_training):\n aux_net = tf.nn.relu(net)\n # pylint: disable=protected-access\n nasnet._build_aux_head(aux_net, end_points, num_classes, hparams,\n scope='aux_{}'.format(cell_num))\n # pylint: enable=protected-access\n\n # Final softmax layer\n with tf.variable_scope('final_layer'):\n net = tf.nn.relu(net)\n net = nasnet_utils.global_avg_pool(net)\n if add_and_check_endpoint('global_pool', net) or not num_classes:\n return net, end_points\n net = slim.dropout(net, hparams.dense_dropout_keep_prob, scope='dropout')\n logits = slim.fully_connected(net, num_classes)\n\n if add_and_check_endpoint('Logits', logits):\n return net, end_points\n\n predictions = tf.nn.softmax(logits, name='predictions')\n if add_and_check_endpoint('Predictions', predictions):\n return net, end_points\n return logits, end_points\n\n\ndef build_pnasnet_large(images,\n num_classes,\n is_training=True,\n final_endpoint=None,\n config=None):\n \"\"\"Build PNASNet Large model for the ImageNet Dataset.\"\"\"\n hparams = copy.deepcopy(config) if config else large_imagenet_config()\n # pylint: disable=protected-access\n nasnet._update_hparams(hparams, is_training)\n # pylint: enable=protected-access\n\n if tf.test.is_gpu_available() and hparams.data_format == 'NHWC':\n tf.logging.info('A GPU is available on the machine, consider using NCHW '\n 'data format for increased speed on GPU.')\n\n if hparams.data_format == 'NCHW':\n images = tf.transpose(images, [0, 3, 1, 2])\n\n # Calculate the total number of cells in the network.\n # There is no distinction between reduction and normal cells in PNAS so the\n # total number of cells is equal to the number normal cells plus the number\n # of stem cells (two by default).\n total_num_cells = hparams.num_cells + 2\n\n normal_cell = PNasNetNormalCell(hparams.num_conv_filters,\n hparams.drop_path_keep_prob, total_num_cells,\n hparams.total_training_steps)\n with arg_scope(\n [slim.dropout, nasnet_utils.drop_path, slim.batch_norm],\n is_training=is_training):\n with arg_scope([slim.avg_pool2d, slim.max_pool2d, slim.conv2d,\n slim.batch_norm, slim.separable_conv2d,\n nasnet_utils.factorized_reduction,\n nasnet_utils.global_avg_pool,\n nasnet_utils.get_channel_index,\n nasnet_utils.get_channel_dim],\n data_format=hparams.data_format):\n return _build_pnasnet_base(\n images,\n normal_cell=normal_cell,\n num_classes=num_classes,\n hparams=hparams,\n is_training=is_training,\n final_endpoint=final_endpoint)\nbuild_pnasnet_large.default_image_size = 331\n\n\ndef build_pnasnet_mobile(images,\n num_classes,\n is_training=True,\n final_endpoint=None,\n config=None):\n \"\"\"Build PNASNet Mobile model for the ImageNet Dataset.\"\"\"\n hparams = copy.deepcopy(config) if config else mobile_imagenet_config()\n # pylint: disable=protected-access\n nasnet._update_hparams(hparams, is_training)\n # pylint: enable=protected-access\n\n if tf.test.is_gpu_available() and hparams.data_format == 'NHWC':\n tf.logging.info('A GPU is available on the machine, consider using NCHW '\n 'data format for increased speed on GPU.')\n\n if hparams.data_format == 'NCHW':\n images = tf.transpose(images, [0, 3, 1, 2])\n\n # Calculate the total number of cells in the network.\n # There is no distinction between reduction and normal cells in PNAS so the\n # total number of cells is equal to the number normal cells plus the number\n # of stem cells (two by default).\n total_num_cells = hparams.num_cells + 2\n\n normal_cell = PNasNetNormalCell(hparams.num_conv_filters,\n hparams.drop_path_keep_prob, total_num_cells,\n hparams.total_training_steps)\n with arg_scope(\n [slim.dropout, nasnet_utils.drop_path, slim.batch_norm],\n is_training=is_training):\n with arg_scope(\n [\n slim.avg_pool2d, slim.max_pool2d, slim.conv2d, slim.batch_norm,\n slim.separable_conv2d, nasnet_utils.factorized_reduction,\n nasnet_utils.global_avg_pool, nasnet_utils.get_channel_index,\n nasnet_utils.get_channel_dim\n ],\n data_format=hparams.data_format):\n return _build_pnasnet_base(\n images,\n normal_cell=normal_cell,\n num_classes=num_classes,\n hparams=hparams,\n is_training=is_training,\n final_endpoint=final_endpoint)\n\n\nbuild_pnasnet_mobile.default_image_size = 224\n\n\nclass PNasNetNormalCell(nasnet_utils.NasNetABaseCell):\n \"\"\"PNASNet Normal Cell.\"\"\"\n\n def __init__(self, num_conv_filters, drop_path_keep_prob, total_num_cells,\n total_training_steps):\n # Configuration for the PNASNet-5 model.\n operations = [\n 'separable_5x5_2', 'max_pool_3x3', 'separable_7x7_2', 'max_pool_3x3',\n 'separable_5x5_2', 'separable_3x3_2', 'separable_3x3_2', 'max_pool_3x3',\n 'separable_3x3_2', 'none'\n ]\n used_hiddenstates = [1, 1, 0, 0, 0, 0, 0]\n hiddenstate_indices = [1, 1, 0, 0, 0, 0, 4, 0, 1, 0]\n\n super(PNasNetNormalCell, self).__init__(\n num_conv_filters, operations, used_hiddenstates, hiddenstate_indices,\n drop_path_keep_prob, total_num_cells, total_training_steps)" ]
[ [ "tensorflow.nn.relu", "tensorflow.test.is_gpu_available", "tensorflow.logging.info", "tensorflow.transpose", "tensorflow.variable_scope", "tensorflow.nn.softmax", "tensorflow.contrib.training.HParams" ] ]
etijskens/ET-rng
[ "2e987bd088dd8bc1680b91412ea431b400a3e7d6" ]
[ "perf/rng.py" ]
[ "import et_rng\nf90 = et_rng.frng.f90\nfrom et_stopwatch import Stopwatch\nimport numpy as np\nimport pprint\n\"\"\"\nhere is the output on my mac:\nNote that the Numpy version is returning an array of random numbers, whereas the Python and fortran\nversions just create random numbers and store them in always the same variable. The creation and \nfilling of the array consumes time too. However, for larger n the numpy version is significantly\nfaster.\n \n(.venv) > python perf/rng.py\n10\n/Users/etijskens/software/dev/workspace/ET-rng/et_rng/__init__.py:84: RuntimeWarning: overflow encountered in ulong_scalars\n self.x = (self.a * self.x + self.b) % self.m\nGenerating 10 random numbers in Python : : 0.001592 s\nGenerating 10 random numbers in Fortran: : 7e-06 s\nGenerating 10 random numbers in Numpy: : 0.000194 s\n100\nGenerating 100 random numbers in Python : : 8.7e-05 s\nGenerating 100 random numbers in Fortran: : 6.6e-05 s\nGenerating 100 random numbers in Numpy: : 5.2e-05 s\n1000\nGenerating 1000 random numbers in Python : : 0.000778 s\nGenerating 1000 random numbers in Fortran: : 0.000361 s\nGenerating 1000 random numbers in Numpy: : 4.6e-05 s\n10000\nGenerating 10000 random numbers in Python : : 0.006986 s\nGenerating 10000 random numbers in Fortran: : 0.002862 s\nGenerating 10000 random numbers in Numpy: : 0.000112 s\n{ 'Fortran-n=10': 7e-06,\n 'Fortran-n=100': 6.6e-05,\n 'Fortran-n=1000': 0.000361,\n 'Fortran-n=10000': 0.002862,\n 'Python-n=10': 0.001592,\n 'Python-n=100': 8.7e-05,\n 'Python-n=1000': 0.000778,\n 'Python-n=10000': 0.006986,\n 'Python/Fortran-n=10': 227.42857142857144,\n 'Python/Fortran-n=100': 1.3181818181818181,\n 'Python/Fortran-n=1000': 2.1551246537396125,\n 'Python/Fortran-n=10000': 2.440950384346611,\n 'numpy-n=10': 0.000194,\n 'numpy-n=100': 5.2e-05,\n 'numpy-n=1000': 4.6e-05,\n 'numpy-n=10000': 0.000112}\n(.venv) etijskens@MacOSX@local [94] ~/software/dev/workspace/ET-rng\n>\n\"\"\"\nif __name__ == \"__main__\":\n results = {}\n rng = et_rng.LCG1()\n pp = pprint.PrettyPrinter(indent=4)\n for n in [10,100,1000,10000]:\n print(n)\n with Stopwatch(message=f\"Generating {n} random numbers in Python : \") as sw:\n for i in range(n):\n r = rng()\n results[f\"Python-n={n}\"] = sw.stop()\n\n with Stopwatch(message=f\"Generating {n} random numbers in Fortran: \") as sw:\n for i in range(n):\n r = f90.lcg1()\n results[f\"Fortran-n={n}\"] = sw.stop()\n results[f\"Python/Fortran-n={n}\"] = results[f\"Python-n={n}\"] / results[f\"Fortran-n={n}\"]\n\n with Stopwatch(message=f\"Generating {n} random numbers in Numpy: \") as sw:\n a = np.random.randint(0, rng.m, n, dtype=et_rng.LCG1.UINT)\n results[f\"numpy-n={n}\"] = sw.stop()\n\n pp.pprint(results)" ]
[ [ "numpy.random.randint" ] ]
KatharineShapcott/syncopy
[ "7b24eda65cf752e395538db5260cd3075029081f" ]
[ "syncopy/specest/mtmfft.py" ]
[ "# -*- coding: utf-8 -*-\n# \n# Spectral estimation with (multi-)tapered FFT\n# \n\n# Builtin/3rd party package imports\nimport numpy as np\nimport scipy.signal.windows as spwin\n\n# Local imports\nfrom syncopy.shared.computational_routine import ComputationalRoutine\nfrom syncopy.datatype import padding\nimport syncopy.specest.freqanalysis as freq\nfrom syncopy.shared.kwarg_decorators import unwrap_io\nfrom syncopy.shared.tools import best_match\n\n\n# Local workhorse that performs the computational heavy lifting\n@unwrap_io\ndef mtmfft(trl_dat, samplerate=None, foi=None, nTaper=1, timeAxis=0,\n taper=spwin.hann, taperopt={}, \n pad=\"nextpow2\", padtype=\"zero\", padlength=None,\n keeptapers=True, polyremoval=None, output_fmt=\"pow\",\n noCompute=False, chunkShape=None):\n \"\"\"\n Compute (multi-)tapered Fourier transform of multi-channel time series data\n \n Parameters\n ----------\n trl_dat : 2D :class:`numpy.ndarray`\n Uniformly sampled multi-channel time-series \n samplerate : float\n Samplerate of `trl_dat` in Hz\n foi : 1D :class:`numpy.ndarray`\n Frequencies of interest (Hz) for output. If desired frequencies\n cannot be matched exactly the closest possible frequencies (respecting \n data length and padding) are used.\n nTaper : int\n Number of filter windows to use\n timeAxis : int\n Index of running time axis in `trl_dat` (0 or 1)\n taper : callable \n Taper function to use, one of :data:`~syncopy.specest.freqanalysis.availableTapers`\n taperopt : dict\n Additional keyword arguments passed to the `taper` function. For further \n details, please refer to the \n `SciPy docs <https://docs.scipy.org/doc/scipy/reference/signal.windows.html>`_\n pad : str\n Padding mode; one of `'absolute'`, `'relative'`, `'maxlen'`, or `'nextpow2'`.\n See :func:`syncopy.padding` for more information.\n padtype : str\n Values to be used for padding. Can be 'zero', 'nan', 'mean', \n 'localmean', 'edge' or 'mirror'. See :func:`syncopy.padding` for \n more information.\n padlength : None, bool or positive scalar\n Number of samples to pad to data (if `pad` is 'absolute' or 'relative'). \n See :func:`syncopy.padding` for more information.\n keeptapers : bool\n If `True`, results of Fourier transform are preserved for each taper, \n otherwise spectrum is averaged across tapers. \n polyremoval : int or None\n **FIXME: Not implemented yet**\n Order of polynomial used for de-trending data in the time domain prior \n to spectral analysis. A value of 0 corresponds to subtracting the mean \n (\"de-meaning\"), ``polyremoval = 1`` removes linear trends (subtracting the \n least squares fit of a linear polynomial), ``polyremoval = N`` for `N > 1` \n subtracts a polynomial of order `N` (``N = 2`` quadratic, ``N = 3`` cubic \n etc.). If `polyremoval` is `None`, no de-trending is performed. \n output_fmt : str\n Output of spectral estimation; one of :data:`~syncopy.specest.freqanalysis.availableOutputs`\n noCompute : bool\n Preprocessing flag. If `True`, do not perform actual calculation but\n instead return expected shape and :class:`numpy.dtype` of output\n array.\n chunkShape : None or tuple\n If not `None`, represents shape of output `spec` (respecting provided \n values of `nTaper`, `keeptapers` etc.)\n \n Returns\n -------\n spec : :class:`numpy.ndarray`\n Complex or real spectrum of (padded) input data. \n\n Notes\n -----\n This method is intended to be used as \n :meth:`~syncopy.shared.computational_routine.ComputationalRoutine.computeFunction`\n inside a :class:`~syncopy.shared.computational_routine.ComputationalRoutine`. \n Thus, input parameters are presumed to be forwarded from a parent metafunction. \n Consequently, this function does **not** perform any error checking and operates \n under the assumption that all inputs have been externally validated and cross-checked. \n \n The computational heavy lifting in this code is performed by NumPy's reference\n implementation of the Fast Fourier Transform :func:`numpy.fft.fft`. \n \n See also\n --------\n syncopy.freqanalysis : parent metafunction\n MultiTaperFFT : :class:`~syncopy.shared.computational_routine.ComputationalRoutine`\n instance that calls this method as \n :meth:`~syncopy.shared.computational_routine.ComputationalRoutine.computeFunction`\n numpy.fft.fft : NumPy's FFT implementation\n \"\"\"\n \n # Re-arrange array if necessary and get dimensional information\n if timeAxis != 0:\n dat = trl_dat.T # does not copy but creates view of `trl_dat`\n else:\n dat = trl_dat\n\n # Padding (updates no. of samples)\n if pad:\n dat = padding(dat, padtype, pad=pad, padlength=padlength, prepadlength=True)\n nSamples = dat.shape[0]\n nChannels = dat.shape[1]\n \n # Determine frequency band and shape of output (time=1 x taper x freq x channel)\n nFreq = int(np.floor(nSamples / 2) + 1)\n freqs = np.linspace(0, samplerate / 2, nFreq)\n _, fidx = best_match(freqs, foi, squash_duplicates=True)\n nFreq = fidx.size\n outShape = (1, max(1, nTaper * keeptapers), nFreq, nChannels)\n \n # For initialization of computational routine, just return output shape and dtype\n if noCompute:\n return outShape, freq.spectralDTypes[output_fmt]\n\n # In case tapers aren't preserved allocate `spec` \"too big\" and average afterwards\n spec = np.full((1, nTaper, nFreq, nChannels), np.nan, dtype=freq.spectralDTypes[output_fmt])\n fill_idx = tuple([slice(None, dim) for dim in outShape[2:]])\n\n # Actual computation\n win = np.atleast_2d(taper(nSamples, **taperopt))\n for taperIdx, taper in enumerate(win):\n if dat.ndim > 1:\n taper = np.tile(taper, (nChannels, 1)).T\n spec[(0, taperIdx,) + fill_idx] = freq.spectralConversions[output_fmt](np.fft.rfft(dat * taper, axis=0)[fidx, :])\n\n # Average across tapers if wanted\n if not keeptapers:\n return spec.mean(axis=1, keepdims=True)\n return spec\n\n\nclass MultiTaperFFT(ComputationalRoutine):\n \"\"\"\n Compute class that calculates (multi-)tapered Fourier transfrom of :class:`~syncopy.AnalogData` objects\n \n Sub-class of :class:`~syncopy.shared.computational_routine.ComputationalRoutine`, \n see :doc:`/developer/compute_kernels` for technical details on Syncopy's compute \n classes and metafunctions. \n \n See also\n --------\n syncopy.freqanalysis : parent metafunction\n \"\"\"\n\n computeFunction = staticmethod(mtmfft)\n\n def process_metadata(self, data, out):\n \n # Some index gymnastics to get trial begin/end \"samples\"\n if data._selection is not None:\n chanSec = data._selection.channel\n trl = data._selection.trialdefinition\n for row in range(trl.shape[0]):\n trl[row, :2] = [row, row + 1]\n else:\n chanSec = slice(None)\n time = np.arange(len(data.trials))\n time = time.reshape((time.size, 1))\n trl = np.hstack((time, time + 1, \n np.zeros((len(data.trials), 1)), \n np.array(data.trialinfo)))\n\n # Attach constructed trialdef-array (if even necessary)\n if self.keeptrials:\n out.trialdefinition = trl\n else:\n out.trialdefinition = np.array([[0, 1, 0]])\n\n # Attach remaining meta-data\n out.samplerate = data.samplerate\n out.channel = np.array(data.channel[chanSec])\n out.taper = np.array([self.cfg[\"taper\"].__name__] * self.outputShape[out.dimord.index(\"taper\")])\n out.freq = self.cfg[\"foi\"]\n" ]
[ [ "numpy.full", "numpy.array", "numpy.fft.rfft", "numpy.tile", "numpy.linspace", "numpy.floor" ] ]
jbohnslav/BigGAN-PyTorch
[ "489a058ed8930966e180fe9379bc9b8e6b083785" ]
[ "datasets.py" ]
[ "''' Datasets\r\n This file contains definitions for our CIFAR, ImageFolder, and HDF5 datasets\r\n'''\r\nimport os\r\nimport os.path\r\nimport sys\r\nfrom PIL import Image\r\nimport numpy as np\r\nfrom tqdm import tqdm, trange\r\n\r\nimport torchvision.datasets as dset\r\nimport torchvision.transforms as transforms\r\nfrom torchvision.datasets.utils import download_url, check_integrity\r\nimport torch.utils.data as data\r\nfrom torch.utils.data import DataLoader\r\n \r\nIMG_EXTENSIONS = ['.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm']\r\n\r\n\r\ndef is_image_file(filename):\r\n \"\"\"Checks if a file is an image.\r\n\r\n Args:\r\n filename (string): path to a file\r\n\r\n Returns:\r\n bool: True if the filename ends with a known image extension\r\n \"\"\"\r\n filename_lower = filename.lower()\r\n return any(filename_lower.endswith(ext) for ext in IMG_EXTENSIONS)\r\n\r\n\r\ndef find_classes(dir):\r\n classes = [d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d))]\r\n classes.sort()\r\n class_to_idx = {classes[i]: i for i in range(len(classes))}\r\n return classes, class_to_idx\r\n\r\n\r\ndef make_dataset(dir, class_to_idx):\r\n images = []\r\n dir = os.path.expanduser(dir)\r\n for target in tqdm(sorted(os.listdir(dir))):\r\n d = os.path.join(dir, target)\r\n if not os.path.isdir(d):\r\n continue\r\n\r\n for root, _, fnames in sorted(os.walk(d)):\r\n for fname in sorted(fnames):\r\n if is_image_file(fname):\r\n path = os.path.join(root, fname)\r\n item = (path, class_to_idx[target])\r\n images.append(item)\r\n\r\n return images\r\n\r\n\r\ndef pil_loader(path):\r\n # open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)\r\n with open(path, 'rb') as f:\r\n img = Image.open(f)\r\n return img.convert('RGB')\r\n\r\n\r\ndef accimage_loader(path):\r\n import accimage\r\n try:\r\n return accimage.Image(path)\r\n except IOError:\r\n # Potentially a decoding problem, fall back to PIL.Image\r\n return pil_loader(path)\r\n\r\n\r\ndef default_loader(path):\r\n from torchvision import get_image_backend\r\n if get_image_backend() == 'accimage':\r\n return accimage_loader(path)\r\n else:\r\n return pil_loader(path)\r\n\r\n\r\nclass ImageFolder(data.Dataset):\r\n \"\"\"A generic data loader where the images are arranged in this way: ::\r\n\r\n root/dogball/xxx.png\r\n root/dogball/xxy.png\r\n root/dogball/xxz.png\r\n\r\n root/cat/123.png\r\n root/cat/nsdf3.png\r\n root/cat/asd932_.png\r\n\r\n Args:\r\n root (string): Root directory path.\r\n transform (callable, optional): A function/transform that takes in an PIL image\r\n and returns a transformed version. E.g, ``transforms.RandomCrop``\r\n target_transform (callable, optional): A function/transform that takes in the\r\n target and transforms it.\r\n loader (callable, optional): A function to load an image given its path.\r\n\r\n Attributes:\r\n classes (list): List of the class names.\r\n class_to_idx (dict): Dict with items (class_name, class_index).\r\n imgs (list): List of (image path, class_index) tuples\r\n \"\"\"\r\n\r\n def __init__(self, root, transform=None, target_transform=None,\r\n loader=default_loader, load_in_mem=False, \r\n index_filename='imagenet_imgs.npz', **kwargs):\r\n classes, class_to_idx = find_classes(root)\r\n # Load pre-computed image directory walk\r\n if os.path.exists(index_filename):\r\n print('Loading pre-saved Index file %s...' % index_filename)\r\n imgs = np.load(index_filename)['imgs']\r\n # If first time, walk the folder directory and save the \r\n # results to a pre-computed file.\r\n else:\r\n print('Generating Index file %s...' % index_filename)\r\n imgs = make_dataset(root, class_to_idx)\r\n np.savez_compressed(index_filename, **{'imgs' : imgs})\r\n if len(imgs) == 0:\r\n raise(RuntimeError(\"Found 0 images in subfolders of: \" + root + \"\\n\"\r\n \"Supported image extensions are: \" + \",\".join(IMG_EXTENSIONS)))\r\n\r\n self.root = root\r\n self.imgs = imgs\r\n self.classes = classes\r\n self.class_to_idx = class_to_idx\r\n self.transform = transform\r\n self.target_transform = target_transform\r\n self.loader = loader\r\n self.load_in_mem = load_in_mem\r\n \r\n if self.load_in_mem:\r\n print('Loading all images into memory...')\r\n self.data, self.labels = [], []\r\n for index in tqdm(range(len(self.imgs))):\r\n path, target = imgs[index][0], imgs[index][1]\r\n self.data.append(self.transform(self.loader(path)))\r\n self.labels.append(target)\r\n \r\n\r\n def __getitem__(self, index):\r\n \"\"\"\r\n Args:\r\n index (int): Index\r\n\r\n Returns:\r\n tuple: (image, target) where target is class_index of the target class.\r\n \"\"\"\r\n if self.load_in_mem:\r\n img = self.data[index]\r\n target = self.labels[index]\r\n else:\r\n path, target = self.imgs[index]\r\n img = self.loader(str(path))\r\n if self.transform is not None:\r\n img = self.transform(img)\r\n \r\n if self.target_transform is not None:\r\n target = self.target_transform(target)\r\n \r\n # print(img.size(), target)\r\n return img, int(target)\r\n\r\n def __len__(self):\r\n return len(self.imgs)\r\n\r\n def __repr__(self):\r\n fmt_str = 'Dataset ' + self.__class__.__name__ + '\\n'\r\n fmt_str += ' Number of datapoints: {}\\n'.format(self.__len__())\r\n fmt_str += ' Root Location: {}\\n'.format(self.root)\r\n tmp = ' Transforms (if any): '\r\n fmt_str += '{0}{1}\\n'.format(tmp, self.transform.__repr__().replace('\\n', '\\n' + ' ' * len(tmp)))\r\n tmp = ' Target Transforms (if any): '\r\n fmt_str += '{0}{1}'.format(tmp, self.target_transform.__repr__().replace('\\n', '\\n' + ' ' * len(tmp)))\r\n return fmt_str\r\n\r\n\r\nclass PumganDataset(data.Dataset):\r\n \"\"\"A generic data loader where the images are arranged in this way: ::\r\n\r\n root/dogball/xxx.png\r\n root/dogball/xxy.png\r\n root/dogball/xxz.png\r\n\r\n root/cat/123.png\r\n root/cat/nsdf3.png\r\n root/cat/asd932_.png\r\n\r\n Args:\r\n root (string): Root directory path.\r\n transform (callable, optional): A function/transform that takes in an PIL image\r\n and returns a transformed version. E.g, ``transforms.RandomCrop``\r\n target_transform (callable, optional): A function/transform that takes in the\r\n target and transforms it.\r\n loader (callable, optional): A function to load an image given its path.\r\n\r\n Attributes:\r\n classes (list): List of the class names.\r\n class_to_idx (dict): Dict with items (class_name, class_index).\r\n imgs (list): List of (image path, class_index) tuples\r\n \"\"\"\r\n\r\n def __init__(self, root, transform=None, target_transform=None,\r\n loader=default_loader, load_in_mem=False, \r\n index_filename='pumgan_imgs.npz', **kwargs):\r\n # classes, class_to_idx = find_classes(root)\r\n # Load pre-computed image directory walk\r\n if os.path.exists(index_filename):\r\n print('Loading pre-saved Index file %s...' % index_filename)\r\n imgs = np.load(index_filename)['imgs']\r\n # If first time, walk the folder directory and save the \r\n # results to a pre-computed file.\r\n else:\r\n print('Generating Index file %s...' % index_filename)\r\n imgs = make_dataset(root, class_to_idx)\r\n np.savez_compressed(index_filename, **{'imgs' : imgs})\r\n if len(imgs) == 0:\r\n raise(RuntimeError(\"Found 0 images in subfolders of: \" + root + \"\\n\"\r\n \"Supported image extensions are: \" + \",\".join(IMG_EXTENSIONS)))\r\n\r\n self.root = root\r\n self.imgs = imgs\r\n self.classes = classes\r\n self.class_to_idx = class_to_idx\r\n self.transform = transform\r\n self.target_transform = target_transform\r\n self.loader = loader\r\n self.load_in_mem = load_in_mem\r\n \r\n if self.load_in_mem:\r\n print('Loading all images into memory...')\r\n self.data, self.labels = [], []\r\n for index in tqdm(range(len(self.imgs))):\r\n path, target = imgs[index][0], imgs[index][1]\r\n self.data.append(self.transform(self.loader(path)))\r\n self.labels.append(target)\r\n \r\n\r\n def __getitem__(self, index):\r\n \"\"\"\r\n Args:\r\n index (int): Index\r\n\r\n Returns:\r\n tuple: (image, target) where target is class_index of the target class.\r\n \"\"\"\r\n if self.load_in_mem:\r\n img = self.data[index]\r\n target = self.labels[index]\r\n else:\r\n path, target = self.imgs[index]\r\n img = self.loader(str(path))\r\n if self.transform is not None:\r\n img = self.transform(img)\r\n \r\n if self.target_transform is not None:\r\n target = self.target_transform(target)\r\n \r\n # print(img.size(), target)\r\n return img, int(target)\r\n\r\n def __len__(self):\r\n return len(self.imgs)\r\n\r\n def __repr__(self):\r\n fmt_str = 'Dataset ' + self.__class__.__name__ + '\\n'\r\n fmt_str += ' Number of datapoints: {}\\n'.format(self.__len__())\r\n fmt_str += ' Root Location: {}\\n'.format(self.root)\r\n tmp = ' Transforms (if any): '\r\n fmt_str += '{0}{1}\\n'.format(tmp, self.transform.__repr__().replace('\\n', '\\n' + ' ' * len(tmp)))\r\n tmp = ' Target Transforms (if any): '\r\n fmt_str += '{0}{1}'.format(tmp, self.target_transform.__repr__().replace('\\n', '\\n' + ' ' * len(tmp)))\r\n return fmt_str\r\n \r\n\r\n''' ILSVRC_HDF5: A dataset to support I/O from an HDF5 to avoid\r\n having to load individual images all the time. '''\r\nimport h5py as h5\r\nimport torch\r\nclass ILSVRC_HDF5(data.Dataset):\r\n def __init__(self, root, transform=None, target_transform=None,\r\n load_in_mem=False, train=True,download=False, validate_seed=0,\r\n val_split=0, **kwargs): # last four are dummies\r\n \r\n self.root = root\r\n self.num_imgs = len(h5.File(root, 'r')['labels'])\r\n \r\n # self.transform = transform\r\n self.target_transform = target_transform \r\n \r\n # Set the transform here\r\n self.transform = transform\r\n \r\n # load the entire dataset into memory? \r\n self.load_in_mem = load_in_mem\r\n \r\n # If loading into memory, do so now\r\n if self.load_in_mem:\r\n print('Loading %s into memory...' % root)\r\n with h5.File(root,'r') as f:\r\n self.data = f['imgs'][:]\r\n self.labels = f['labels'][:]\r\n\r\n def __getitem__(self, index):\r\n \"\"\"\r\n Args:\r\n index (int): Index\r\n\r\n Returns:\r\n tuple: (image, target) where target is class_index of the target class.\r\n \"\"\"\r\n # If loaded the entire dataset in RAM, get image from memory\r\n if self.load_in_mem:\r\n img = self.data[index]\r\n target = self.labels[index]\r\n \r\n # Else load it from disk\r\n else:\r\n with h5.File(self.root,'r') as f:\r\n img = f['imgs'][index]\r\n target = f['labels'][index]\r\n \r\n \r\n # if self.transform is not None:\r\n # img = self.transform(img)\r\n # Apply my own transform\r\n img = ((torch.from_numpy(img).float() / 255) - 0.5) * 2\r\n \r\n if self.target_transform is not None:\r\n target = self.target_transform(target)\r\n \r\n return img, int(target)\r\n\r\n def __len__(self):\r\n return self.num_imgs\r\n # return len(self.f['imgs'])\r\n\r\nimport pickle\r\nclass CIFAR10(dset.CIFAR10):\r\n\r\n def __init__(self, root, train=True,\r\n transform=None, target_transform=None,\r\n download=True, validate_seed=0,\r\n val_split=0, load_in_mem=True, **kwargs):\r\n self.root = os.path.expanduser(root)\r\n self.transform = transform\r\n self.target_transform = target_transform\r\n self.train = train # training set or test set\r\n self.val_split = val_split\r\n\r\n if download:\r\n self.download()\r\n\r\n if not self._check_integrity():\r\n raise RuntimeError('Dataset not found or corrupted.' +\r\n ' You can use download=True to download it')\r\n\r\n # now load the picked numpy arrays \r\n self.data = []\r\n self.labels= []\r\n for fentry in self.train_list:\r\n f = fentry[0]\r\n file = os.path.join(self.root, self.base_folder, f)\r\n fo = open(file, 'rb')\r\n if sys.version_info[0] == 2:\r\n entry = pickle.load(fo)\r\n else:\r\n entry = pickle.load(fo, encoding='latin1')\r\n self.data.append(entry['data'])\r\n if 'labels' in entry:\r\n self.labels += entry['labels']\r\n else:\r\n self.labels += entry['fine_labels']\r\n fo.close()\r\n \r\n self.data = np.concatenate(self.data)\r\n # Randomly select indices for validation\r\n if self.val_split > 0:\r\n label_indices = [[] for _ in range(max(self.labels)+1)]\r\n for i,l in enumerate(self.labels):\r\n label_indices[l] += [i] \r\n label_indices = np.asarray(label_indices)\r\n \r\n # randomly grab 500 elements of each class\r\n np.random.seed(validate_seed)\r\n self.val_indices = [] \r\n for l_i in label_indices:\r\n self.val_indices += list(l_i[np.random.choice(len(l_i), int(len(self.data) * val_split) // (max(self.labels) + 1) ,replace=False)])\r\n \r\n if self.train=='validate': \r\n self.data = self.data[self.val_indices]\r\n self.labels = list(np.asarray(self.labels)[self.val_indices])\r\n \r\n self.data = self.data.reshape((int(50e3 * self.val_split), 3, 32, 32))\r\n self.data = self.data.transpose((0, 2, 3, 1)) # convert to HWC\r\n \r\n elif self.train:\r\n print(np.shape(self.data))\r\n if self.val_split > 0:\r\n self.data = np.delete(self.data,self.val_indices,axis=0)\r\n self.labels = list(np.delete(np.asarray(self.labels),self.val_indices,axis=0))\r\n \r\n self.data = self.data.reshape((int(50e3 * (1.-self.val_split)), 3, 32, 32))\r\n self.data = self.data.transpose((0, 2, 3, 1)) # convert to HWC\r\n else:\r\n f = self.test_list[0][0]\r\n file = os.path.join(self.root, self.base_folder, f)\r\n fo = open(file, 'rb')\r\n if sys.version_info[0] == 2:\r\n entry = pickle.load(fo)\r\n else:\r\n entry = pickle.load(fo, encoding='latin1')\r\n self.data = entry['data']\r\n if 'labels' in entry:\r\n self.labels = entry['labels']\r\n else:\r\n self.labels = entry['fine_labels']\r\n fo.close()\r\n self.data = self.data.reshape((10000, 3, 32, 32))\r\n self.data = self.data.transpose((0, 2, 3, 1)) # convert to HWC\r\n \r\n def __getitem__(self, index):\r\n \"\"\"\r\n Args:\r\n index (int): Index\r\n Returns:\r\n tuple: (image, target) where target is index of the target class.\r\n \"\"\"\r\n img, target = self.data[index], self.labels[index]\r\n\r\n # doing this so that it is consistent with all other datasets\r\n # to return a PIL Image\r\n img = Image.fromarray(img)\r\n\r\n if self.transform is not None:\r\n img = self.transform(img)\r\n\r\n if self.target_transform is not None:\r\n target = self.target_transform(target)\r\n\r\n return img, target\r\n \r\n def __len__(self):\r\n return len(self.data)\r\n\r\n\r\nclass CIFAR100(CIFAR10):\r\n base_folder = 'cifar-100-python'\r\n url = \"http://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz\"\r\n filename = \"cifar-100-python.tar.gz\"\r\n tgz_md5 = 'eb9058c3a382ffc7106e4002c42a8d85'\r\n train_list = [\r\n ['train', '16019d7e3df5f24257cddd939b257f8d'],\r\n ]\r\n\r\n test_list = [\r\n ['test', 'f0ef6b0ae62326f3e7ffdfab6717acfc'],\r\n ]\r\n" ]
[ [ "numpy.concatenate", "numpy.delete", "numpy.asarray", "numpy.random.seed", "numpy.load", "numpy.shape", "torch.from_numpy", "numpy.savez_compressed" ] ]
dgorelik/differential-privacy-library
[ "5a7a267c591320036615a52dfad1918dc3718e62", "5a7a267c591320036615a52dfad1918dc3718e62" ]
[ "diffprivlib/mechanisms/binary.py", "tests/tools/test_mean.py" ]
[ "# MIT License\n#\n# Copyright (C) IBM Corporation 2019\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the\n# Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\n# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\"\"\"\nThe binary mechanism for differential privacy.\n\n\"\"\"\nimport numpy as np\nfrom numpy.random import random\n\nfrom diffprivlib.mechanisms.base import DPMechanism\n\n\nclass Binary(DPMechanism):\n \"\"\"The classic binary mechanism in differential privacy.\n\n Given a binary input value, the mechanism randomly decides to flip to the other binary value or not, in order to\n satisfy differential privacy.\n\n Paper link: https://arxiv.org/pdf/1612.05568.pdf\n\n \"\"\"\n def __init__(self):\n super().__init__()\n self._value0 = None\n self._value1 = None\n\n def __repr__(self):\n output = super().__repr__()\n output += \".set_labels(\" + str(self._value0) + \", \" + str(self._value1) + \")\" \\\n if self._value0 is not None else \"\"\n\n return output\n\n def set_labels(self, value0, value1):\n \"\"\"Sets the binary labels of the mechanism.\n\n Labels must be unique, non-empty strings. If non-string labels are required, consider using a\n :class:`.DPTransformer`.\n\n Parameters\n ----------\n value0 : str\n 0th binary label.\n\n value1 : str\n 1st binary label.\n\n Returns\n -------\n self : class\n\n \"\"\"\n if not isinstance(value0, str) or not isinstance(value1, str):\n raise TypeError(\"Binary labels must be strings. Use a DPTransformer (e.g. transformers.IntToString) for \"\n \"non-string labels\")\n\n if len(value0) * len(value1) == 0:\n raise ValueError(\"Binary labels must be non-empty strings\")\n\n if value0 == value1:\n raise ValueError(\"Binary labels must not match\")\n\n self._value0 = value0\n self._value1 = value1\n return self\n\n def check_inputs(self, value):\n \"\"\"Checks that all parameters of the mechanism have been initialised correctly, and that the mechanism is ready\n to be used.\n\n Parameters\n ----------\n value : str\n The value to be checked.\n\n Returns\n -------\n True if the mechanism is ready to be used.\n\n Raises\n ------\n Exception\n If parameters have not been set correctly, or if `value` falls outside the domain of the mechanism.\n\n \"\"\"\n super().check_inputs(value)\n\n if (self._value0 is None) or (self._value1 is None):\n raise ValueError(\"Binary labels must be set\")\n\n if not isinstance(value, str):\n raise TypeError(\"Value to be randomised must be a string\")\n\n if value not in [self._value0, self._value1]:\n raise ValueError(\"Value to be randomised is not in the domain {\\\"\" + self._value0 + \"\\\", \\\"\" + self._value1\n + \"\\\"}\")\n\n return True\n\n def randomise(self, value):\n \"\"\"Randomise `value` with the mechanism.\n\n Parameters\n ----------\n value : str\n The value to be randomised.\n\n Returns\n -------\n str\n The randomised value.\n\n \"\"\"\n self.check_inputs(value)\n\n indicator = 0 if value == self._value0 else 1\n\n unif_rv = random() * (np.exp(self._epsilon) + 1)\n\n if unif_rv > np.exp(self._epsilon) + self._delta:\n indicator = 1 - indicator\n\n return self._value0 if indicator == 0 else self._value1\n", "from unittest import TestCase\n\nimport numpy as np\n\nfrom diffprivlib.tools.utils import mean\nfrom diffprivlib.utils import PrivacyLeakWarning\n\n\nclass TestMean(TestCase):\n def test_not_none(self):\n mech = mean\n self.assertIsNotNone(mech)\n\n def test_no_params(self):\n a = np.array([1, 2, 3])\n with self.assertWarns(PrivacyLeakWarning):\n res = mean(a)\n self.assertIsNotNone(res)\n\n def test_no_epsilon(self):\n a = np.array([1, 2, 3])\n self.assertIsNotNone(mean(a, range=1))\n\n def test_no_range(self):\n a = np.array([1, 2, 3])\n with self.assertWarns(PrivacyLeakWarning):\n res = mean(a, epsilon=1)\n self.assertIsNotNone(res)\n\n def test_negative_range(self):\n a = np.array([1, 2, 3])\n with self.assertRaises(ValueError):\n mean(a, epsilon=1, range=-1)\n\n def test_missing_range(self):\n a = np.array([1, 2, 3])\n with self.assertWarns(PrivacyLeakWarning):\n res = mean(a, epsilon=1, range=None)\n self.assertIsNotNone(res)\n\n def test_large_epsilon(self):\n a = np.random.random(1000)\n res = float(np.mean(a))\n res_dp = mean(a, epsilon=1, range=1)\n\n self.assertAlmostEqual(res, res_dp, delta=0.01)\n\n def test_large_epsilon_axis(self):\n a = np.random.random((1000, 5))\n res = np.mean(a, axis=0)\n res_dp = mean(a, epsilon=1, range=1, axis=0)\n\n for i in range(res.shape[0]):\n self.assertAlmostEqual(res[i], res_dp[i], delta=0.01)\n" ]
[ [ "numpy.random.random", "numpy.exp" ], [ "numpy.random.random", "numpy.array", "numpy.mean" ] ]
cmelani/timecop-1
[ "108802c685b2b5d2df0cb576fa1b124386b8658d" ]
[ "engines/helpers.py" ]
[ "from keras.models import Sequential\nfrom keras.layers.recurrent import LSTM\nfrom keras.layers.core import Dense, Activation, Dropout\nimport pandas as pd\nimport numpy as np\nfrom sklearn.metrics import mean_squared_error,mean_absolute_error\nimport statsmodels.api as sm\n\ndef trendline(data, order=1):\n\n coeffs = np.polyfit(np.arange(0,len(data)), list(data), order)\n slope = coeffs[-2]\n return float(slope)\n\n\n\n\n\ndef seasonal_options (a):\n print(\" Starting seasonal finding\")\n print(a)\n x =sm.tsa.stattools.pacf(a)\n\n possible =[]\n for i in range(4, len(x)-6):\n before2 = x[i-2]\n before= x[i-1]\n period = x[i]\n last = x[i+1]\n last2 = x[i+2]\n if (before2 < before < period > last ):\n possible.append(i-1)\n print (\"Finishing seasonal finding\")\n return possible\n\ndef windows(seq, num):\n avg = len(seq) / float(num)\n out = []\n last = 0.0\n\n while last < len(seq):\n out.append(seq[int(last):int(last + avg)])\n last += avg\n\n return out\n\n\ndef create_dataset(dataset, window_size = 1):\n data_X, data_Y = [], []\n for i in range(len(dataset) - window_size - 1):\n a = dataset[i:(i + window_size), 0]\n data_X.append(a)\n data_Y.append(dataset[i + window_size, 0])\n return(np.array(data_X), np.array(data_Y))\n\n\n# Define the model.\ndef fit_model_new(train_X, train_Y, window_size = 1):\n model2 = Sequential()\n model2.add(LSTM(input_shape = (window_size, 1),\n units = window_size,\n return_sequences = True))\n model2.add(Dropout(0.5))\n model2.add(LSTM(256))\n model2.add(Dropout(0.5))\n model2.add(Dense(1))\n model2.add(Activation(\"linear\"))\n model2.compile(loss = \"mse\",\n optimizer = \"adam\")\n model2.summary()\n\n # Fit the first model.\n model2.fit(train_X, train_Y, epochs = 80,\n batch_size = 1,\n verbose = 2)\n return(model2)\n\n\ndef predict_and_score(model, X, Y,scaler):\n # Make predictions on the original scale of the data.\n pred_scaled = model.predict(X)\n pred = scaler.inverse_transform(pred_scaled)\n # Prepare Y data to also be on the original scale for interpretability.\n orig_data = scaler.inverse_transform([Y])\n # Calculate RMSE.\n score = mean_squared_error(orig_data[0], pred[:, 0])\n mae = mean_absolute_error(orig_data[0], pred[:, 0])\n return(score, pred, pred_scaled,mae)\n\n\n\ndef mean_absolute_percentage_error(y_true, y_pred):\n y_true, y_pred = np.array(y_true), np.array(y_pred)\n return np.mean(np.abs((y_true - y_pred) / y_true)) * 100\n\n\n\ndef merge_two_dicts(x, y):\n z = x.copy() # start with x's keys and values\n z.update(y) # modifies z with y's keys and values & returns None\n return z\n\ndef create_train_test(lista_puntos, lista_datos):\n df = pd.DataFrame()\n df['puntos'] = lista_puntos\n df['valores'] = lista_datos\n\n df.set_index('puntos',inplace=True,drop=False)\n tam_train = int(len(df)*0.7)\n print (\" train length\" + str(tam_train))\n\n df_train = df[:tam_train]\n df_test = df[tam_train:]\n return df, df_train, df_test\n" ]
[ [ "numpy.array", "sklearn.metrics.mean_squared_error", "pandas.DataFrame", "sklearn.metrics.mean_absolute_error", "numpy.abs" ] ]
JacobHanouna/agents
[ "94647342e48abe8915bade94ab6251152631ac29", "e906d512d86ca2358d13bc8fc2b2440141b8794c" ]
[ "tf_agents/agents/behavioral_cloning/behavioral_cloning_agent_test.py", "tf_agents/environments/gym_wrapper_test.py" ]
[ "# coding=utf-8\n# Copyright 2018 The TF-Agents Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for agents.behavioral_cloning.behavioral_cloning_agent.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl.testing import parameterized\nimport numpy as np\nimport tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import\n\nfrom tf_agents.agents.behavioral_cloning import behavioral_cloning_agent\nfrom tf_agents.drivers import test_utils as driver_test_utils\nfrom tf_agents.environments import trajectory_replay\nfrom tf_agents.networks import network\nfrom tf_agents.networks import q_network\nfrom tf_agents.networks import q_rnn_network\nfrom tf_agents.policies import actor_policy\nfrom tf_agents.specs import tensor_spec\nfrom tf_agents.trajectories import time_step as ts\nfrom tf_agents.trajectories import trajectory\nfrom tf_agents.utils import common\nfrom tf_agents.utils import test_utils\n\n# Number of times to train in test loops.\nTRAIN_ITERATIONS = 10\n\n\nclass DummyNet(network.Network):\n\n def __init__(self, unused_observation_spec, action_spec, name=None):\n super(DummyNet, self).__init__(\n unused_observation_spec, state_spec=(), name=name)\n action_spec = tf.nest.flatten(action_spec)[0]\n num_actions = action_spec.maximum - action_spec.minimum + 1\n\n # Store custom layers that can be serialized through the Checkpointable API.\n self._dummy_layers = [\n tf.keras.layers.Dense(\n num_actions,\n kernel_initializer=tf.compat.v1.initializers.constant([[2, 1],\n [1, 1]]),\n bias_initializer=tf.compat.v1.initializers.constant([[1], [1]]),\n dtype=tf.float32)\n ]\n\n def call(self, inputs, step_type=None, network_state=()):\n del step_type\n inputs = tf.cast(inputs[0], tf.float32)\n for layer in self._dummy_layers:\n inputs = layer(inputs)\n return inputs, network_state\n\n\nclass ActorBCAgent(behavioral_cloning_agent.BehavioralCloningAgent):\n \"\"\"BehavioralCloningAgent for Actor policies/networks.\"\"\"\n\n def _get_policies(self, time_step_spec, action_spec, cloning_network):\n policy = actor_policy.ActorPolicy(\n time_step_spec=time_step_spec,\n action_spec=action_spec,\n actor_network=cloning_network,\n clip=True)\n\n return policy, policy\n\n\nclass BehavioralCloningAgentTest(test_utils.TestCase, parameterized.TestCase):\n\n def setUp(self):\n super(BehavioralCloningAgentTest, self).setUp()\n self._obs_spec = [tensor_spec.TensorSpec([2], tf.float32)]\n self._time_step_spec = ts.time_step_spec(self._obs_spec)\n self._action_spec = tensor_spec.BoundedTensorSpec([], tf.int32, 0, 1)\n self._observation_spec = self._time_step_spec.observation\n\n def testCreateAgent(self):\n cloning_net = DummyNet(self._observation_spec, self._action_spec)\n agent = behavioral_cloning_agent.BehavioralCloningAgent(\n self._time_step_spec,\n self._action_spec,\n cloning_network=cloning_net,\n optimizer=None)\n self.assertIsNotNone(agent.policy)\n\n def testCreateAgentNestSizeChecks(self):\n action_spec = [\n tensor_spec.BoundedTensorSpec([], tf.int32, 0, 1),\n tensor_spec.BoundedTensorSpec([], tf.int32, 0, 1)\n ]\n\n cloning_net = DummyNet(self._observation_spec, action_spec)\n with self.assertRaisesRegexp(ValueError, '.*nested actions.*'):\n behavioral_cloning_agent.BehavioralCloningAgent(\n self._time_step_spec,\n action_spec,\n cloning_network=cloning_net,\n optimizer=None)\n\n def testCreateAgentWithMultipleActionsAndCustomLossFn(self):\n action_spec = [\n tensor_spec.BoundedTensorSpec([], tf.int32, 0, 1),\n tensor_spec.BoundedTensorSpec([], tf.int32, 0, 1)\n ]\n\n cloning_net = DummyNet(self._observation_spec, action_spec)\n\n # We create an ActorBCAgent here instead of a BehavioralCloningAgent since\n # QPolicy currently doesn't accept action_specs with multiple actions.\n ActorBCAgent(\n self._time_step_spec,\n action_spec,\n cloning_network=cloning_net,\n optimizer=None,\n loss_fn=lambda logits, actions: 0)\n\n def testCreateAgentWithListActionSpec(self):\n action_spec = [tensor_spec.BoundedTensorSpec([1], tf.int32, 0, 1)]\n cloning_net = DummyNet(self._observation_spec, action_spec)\n with self.assertRaisesRegexp(ValueError, '.*nested actions.*'):\n behavioral_cloning_agent.BehavioralCloningAgent(\n self._time_step_spec, action_spec,\n cloning_network=cloning_net,\n optimizer=None)\n\n def testCreateAgentDimChecks(self):\n action_spec = tensor_spec.BoundedTensorSpec([1, 2], tf.int32, 0, 1)\n cloning_net = DummyNet(self._observation_spec, action_spec)\n with self.assertRaisesRegexp(NotImplementedError, '.*scalar, unnested.*'):\n behavioral_cloning_agent.BehavioralCloningAgent(\n self._time_step_spec, action_spec,\n cloning_network=cloning_net,\n optimizer=None)\n\n # TODO(kbanoop): Add a test where the target network has different values.\n def testLoss(self):\n cloning_net = DummyNet(self._observation_spec, self._action_spec)\n agent = behavioral_cloning_agent.BehavioralCloningAgent(\n self._time_step_spec,\n self._action_spec,\n cloning_network=cloning_net,\n optimizer=None)\n\n observations = [tf.constant([[1, 2], [3, 4]], dtype=tf.float32)]\n actions = tf.constant([0, 1], dtype=tf.int32)\n rewards = tf.constant([10, 20], dtype=tf.float32)\n discounts = tf.constant([0.9, 0.9], dtype=tf.float32)\n\n experience = trajectory.first(\n observation=observations,\n action=actions,\n policy_info=(),\n reward=rewards,\n discount=discounts)\n loss_info = agent._loss(experience)\n\n self.evaluate(tf.compat.v1.global_variables_initializer())\n total_loss, _ = self.evaluate(loss_info)\n\n expected_loss = tf.reduce_mean(\n input_tensor=tf.compat.v1.nn.sparse_softmax_cross_entropy_with_logits(\n logits=cloning_net(observations)[0], labels=actions))\n\n self.assertAllClose(total_loss, expected_loss)\n\n @parameterized.named_parameters(('TrainOnMultipleSteps', False),\n ('TrainOnSingleStep', True))\n def testTrainWithNN(self, is_convert):\n # Emits trajectories shaped (batch=1, time=6, ...)\n traj, time_step_spec, action_spec = (\n driver_test_utils.make_random_trajectory())\n if is_convert:\n # Convert to single step trajectory of shapes (batch=6, 1, ...).\n traj = tf.nest.map_structure(common.transpose_batch_time, traj)\n cloning_net = q_network.QNetwork(\n time_step_spec.observation, action_spec)\n agent = behavioral_cloning_agent.BehavioralCloningAgent(\n time_step_spec,\n action_spec,\n cloning_network=cloning_net,\n optimizer=tf.compat.v1.train.AdamOptimizer(learning_rate=0.001),\n num_outer_dims=2)\n # Disable clipping to make sure we can see the difference in behavior\n agent.policy._clip = False\n # Remove policy_info, as BehavioralCloningAgent expects none.\n traj = traj.replace(policy_info=())\n # TODO(b/123883319)\n if tf.executing_eagerly():\n train_and_loss = lambda: agent.train(traj)\n else:\n train_and_loss = agent.train(traj)\n replay = trajectory_replay.TrajectoryReplay(agent.policy)\n self.evaluate(tf.compat.v1.global_variables_initializer())\n initial_actions = self.evaluate(replay.run(traj)[0])\n for _ in range(TRAIN_ITERATIONS):\n self.evaluate(train_and_loss)\n post_training_actions = self.evaluate(replay.run(traj)[0])\n post_training_actions = self.evaluate(replay.run(traj)[0])\n # We don't necessarily converge to the same actions as in trajectory after\n # 10 steps of an untuned optimizer, but the policy does change.\n self.assertFalse(np.all(initial_actions == post_training_actions))\n\n def testTrainWithSingleOuterDimension(self):\n # Emits trajectories shaped (batch=1, time=6, ...)\n traj, time_step_spec, action_spec = (\n driver_test_utils.make_random_trajectory())\n # Convert to shapes (batch=6, 1, ...) so this works with a non-RNN model.\n traj = tf.nest.map_structure(common.transpose_batch_time, traj)\n # Remove the time dimension so there is only one outer dimension.\n traj = tf.nest.map_structure(lambda x: tf.squeeze(x, axis=1), traj)\n cloning_net = q_network.QNetwork(\n time_step_spec.observation, action_spec)\n agent = behavioral_cloning_agent.BehavioralCloningAgent(\n time_step_spec,\n action_spec,\n cloning_network=cloning_net,\n optimizer=tf.compat.v1.train.AdamOptimizer(learning_rate=0.01))\n # Disable clipping to make sure we can see the difference in behavior\n agent.policy._clip = False\n # Remove policy_info, as BehavioralCloningAgent expects none.\n traj = traj.replace(policy_info=())\n # TODO(b/123883319)\n if tf.executing_eagerly():\n train_and_loss = lambda: agent.train(traj)\n else:\n train_and_loss = agent.train(traj)\n self.evaluate(tf.compat.v1.global_variables_initializer())\n for _ in range(TRAIN_ITERATIONS):\n self.evaluate(train_and_loss)\n # Note that we skip the TrajectoryReplay since it requires a time dimension.\n\n def testTrainWithRNN(self):\n # Emits trajectories shaped (batch=1, time=6, ...)\n traj, time_step_spec, action_spec = (\n driver_test_utils.make_random_trajectory())\n cloning_net = q_rnn_network.QRnnNetwork(\n time_step_spec.observation, action_spec)\n agent = behavioral_cloning_agent.BehavioralCloningAgent(\n time_step_spec,\n action_spec,\n cloning_network=cloning_net,\n optimizer=tf.compat.v1.train.AdamOptimizer(learning_rate=0.01),\n num_outer_dims=2)\n # Disable clipping to make sure we can see the difference in behavior\n agent.policy._clip = False\n # Remove policy_info, as BehavioralCloningAgent expects none.\n traj = traj.replace(policy_info=())\n # TODO(b/123883319)\n if tf.executing_eagerly():\n train_and_loss = lambda: agent.train(traj)\n else:\n train_and_loss = agent.train(traj)\n replay = trajectory_replay.TrajectoryReplay(agent.policy)\n self.evaluate(tf.compat.v1.global_variables_initializer())\n initial_actions = self.evaluate(replay.run(traj)[0])\n\n for _ in range(TRAIN_ITERATIONS):\n self.evaluate(train_and_loss)\n post_training_actions = self.evaluate(replay.run(traj)[0])\n # We don't necessarily converge to the same actions as in trajectory after\n # 10 steps of an untuned optimizer, but the policy does change.\n self.assertFalse(np.all(initial_actions == post_training_actions))\n\n def testPolicy(self):\n cloning_net = DummyNet(self._observation_spec, self._action_spec)\n agent = behavioral_cloning_agent.BehavioralCloningAgent(\n self._time_step_spec,\n self._action_spec,\n cloning_network=cloning_net,\n optimizer=None)\n observations = [tf.constant([[1, 2], [3, 4]], dtype=tf.float32)]\n time_steps = ts.restart(observations, batch_size=2)\n policy = agent.policy\n action_step = policy.action(time_steps)\n # Batch size 2.\n self.assertAllEqual(\n [2] + self._action_spec.shape.as_list(),\n action_step.action.shape,\n )\n self.evaluate(tf.compat.v1.global_variables_initializer())\n actions_ = self.evaluate(action_step.action)\n self.assertTrue(all(actions_ <= self._action_spec.maximum))\n self.assertTrue(all(actions_ >= self._action_spec.minimum))\n\n def testInitializeRestoreAgent(self):\n cloning_net = DummyNet(self._observation_spec, self._action_spec)\n agent = behavioral_cloning_agent.BehavioralCloningAgent(\n self._time_step_spec,\n self._action_spec,\n cloning_network=cloning_net,\n optimizer=None)\n observations = [tf.constant([[1, 2], [3, 4]], dtype=tf.float32)]\n time_steps = ts.restart(observations, batch_size=2)\n policy = agent.policy\n action_step = policy.action(time_steps)\n self.evaluate(tf.compat.v1.global_variables_initializer())\n\n checkpoint = tf.train.Checkpoint(agent=agent)\n\n latest_checkpoint = tf.train.latest_checkpoint(self.get_temp_dir())\n checkpoint_load_status = checkpoint.restore(latest_checkpoint)\n\n with self.cached_session() as sess:\n checkpoint_load_status.initialize_or_restore(sess)\n self.assertAllEqual(sess.run(action_step.action), [0, 0])\n\n\nif __name__ == '__main__':\n test_utils.main()\n", "# coding=utf-8\n# Copyright 2018 The TF-Agents Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for environments.gym_wrapper.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport math\nfrom absl.testing.absltest import mock\nimport gym\nimport gym.spaces\nimport numpy as np\n\nfrom tf_agents.environments import gym_wrapper\nfrom tf_agents.utils import test_utils\n\n\nclass GymWrapperSpecTest(test_utils.TestCase):\n\n def test_spec_from_gym_space_discrete(self):\n discrete_space = gym.spaces.Discrete(3)\n spec = gym_wrapper.spec_from_gym_space(discrete_space)\n\n self.assertEqual((), spec.shape)\n self.assertEqual(np.int64, spec.dtype)\n self.assertEqual(0, spec.minimum)\n self.assertEqual(2, spec.maximum)\n\n def test_spec_from_gym_space_multi_discrete(self):\n multi_discrete_space = gym.spaces.MultiDiscrete([1, 2, 3, 4])\n spec = gym_wrapper.spec_from_gym_space(multi_discrete_space)\n\n self.assertEqual((4,), spec.shape)\n self.assertEqual(np.int32, spec.dtype)\n np.testing.assert_array_equal(np.array([0], dtype=np.int), spec.minimum)\n np.testing.assert_array_equal(\n np.array([0, 1, 2, 3], dtype=np.int), spec.maximum)\n\n def test_spec_from_gym_space_multi_binary(self):\n multi_binary_space = gym.spaces.MultiBinary(4)\n spec = gym_wrapper.spec_from_gym_space(multi_binary_space)\n\n self.assertEqual((4,), spec.shape)\n self.assertEqual(np.int8, spec.dtype)\n np.testing.assert_array_equal(np.array([0], dtype=np.int), spec.minimum)\n np.testing.assert_array_equal(np.array([1], dtype=np.int), spec.maximum)\n\n def test_spec_from_gym_space_box_scalars(self):\n for dtype in (np.float32, np.float64):\n box_space = gym.spaces.Box(-1.0, 1.0, (3, 4), dtype=dtype)\n spec = gym_wrapper.spec_from_gym_space(box_space)\n\n self.assertEqual((3, 4), spec.shape)\n self.assertEqual(dtype, spec.dtype)\n np.testing.assert_array_equal(-np.ones((3, 4)), spec.minimum)\n np.testing.assert_array_equal(np.ones((3, 4)), spec.maximum)\n\n def test_spec_from_gym_space_box_scalars_simplify_bounds(self):\n box_space = gym.spaces.Box(-1.0, 1.0, (3, 4))\n spec = gym_wrapper.spec_from_gym_space(box_space, simplify_box_bounds=True)\n\n self.assertEqual((3, 4), spec.shape)\n self.assertEqual(np.float32, spec.dtype)\n np.testing.assert_array_equal(np.array([-1], dtype=np.int), spec.minimum)\n np.testing.assert_array_equal(np.array([1], dtype=np.int), spec.maximum)\n\n def test_spec_from_gym_space_when_simplify_box_bounds_false(self):\n # testing on gym.spaces.Dict which makes recursive calls to\n # _spec_from_gym_space\n box_space = gym.spaces.Box(-1.0, 1.0, (2,))\n dict_space = gym.spaces.Dict({'box1': box_space, 'box2': box_space})\n spec = gym_wrapper.spec_from_gym_space(\n dict_space, simplify_box_bounds=False)\n\n self.assertEqual((2,), spec['box1'].shape)\n self.assertEqual((2,), spec['box2'].shape)\n self.assertEqual(np.float32, spec['box1'].dtype)\n self.assertEqual(np.float32, spec['box2'].dtype)\n self.assertEqual('box1', spec['box1'].name)\n self.assertEqual('box2', spec['box2'].name)\n np.testing.assert_array_equal(np.array([-1, -1], dtype=np.int),\n spec['box1'].minimum)\n np.testing.assert_array_equal(np.array([1, 1], dtype=np.int),\n spec['box1'].maximum)\n np.testing.assert_array_equal(np.array([-1, -1], dtype=np.int),\n spec['box2'].minimum)\n np.testing.assert_array_equal(np.array([1, 1], dtype=np.int),\n spec['box2'].maximum)\n\n def test_spec_from_gym_space_box_array(self):\n for dtype in (np.float32, np.float64):\n box_space = gym.spaces.Box(np.array([-1.0, -2.0]), np.array([2.0, 4.0]),\n dtype=dtype)\n spec = gym_wrapper.spec_from_gym_space(box_space)\n\n self.assertEqual((2,), spec.shape)\n self.assertEqual(dtype, spec.dtype)\n np.testing.assert_array_equal(np.array([-1.0, -2.0]), spec.minimum)\n np.testing.assert_array_equal(np.array([2.0, 4.0]), spec.maximum)\n\n def test_spec_from_gym_space_tuple(self):\n tuple_space = gym.spaces.Tuple((gym.spaces.Discrete(2),\n gym.spaces.Discrete(3)))\n spec = gym_wrapper.spec_from_gym_space(tuple_space)\n\n self.assertEqual(2, len(spec))\n self.assertEqual((), spec[0].shape)\n self.assertEqual(np.int64, spec[0].dtype)\n self.assertEqual(0, spec[0].minimum)\n self.assertEqual(1, spec[0].maximum)\n\n self.assertEqual((), spec[1].shape)\n self.assertEqual(np.int64, spec[1].dtype)\n self.assertEqual(0, spec[1].minimum)\n self.assertEqual(2, spec[1].maximum)\n\n def test_spec_from_gym_space_tuple_mixed(self):\n tuple_space = gym.spaces.Tuple((\n gym.spaces.Discrete(2),\n gym.spaces.Box(-1.0, 1.0, (3, 4)),\n gym.spaces.Tuple((gym.spaces.Discrete(2), gym.spaces.Discrete(3))),\n gym.spaces.Dict({\n 'spec_1':\n gym.spaces.Discrete(2),\n 'spec_2':\n gym.spaces.Tuple((gym.spaces.Discrete(2),\n gym.spaces.Discrete(3))),\n }),\n ))\n spec = gym_wrapper.spec_from_gym_space(tuple_space)\n\n self.assertEqual(4, len(spec))\n # Test Discrete\n self.assertEqual((), spec[0].shape)\n self.assertEqual(np.int64, spec[0].dtype)\n self.assertEqual(0, spec[0].minimum)\n self.assertEqual(1, spec[0].maximum)\n\n # Test Box\n self.assertEqual((3, 4), spec[1].shape)\n self.assertEqual(np.float32, spec[1].dtype)\n np.testing.assert_array_almost_equal(-np.ones((3, 4)), spec[1].minimum)\n np.testing.assert_array_almost_equal(np.ones((3, 4)), spec[1].maximum)\n\n # Test Tuple\n self.assertEqual(2, len(spec[2]))\n self.assertEqual((), spec[2][0].shape)\n self.assertEqual(np.int64, spec[2][0].dtype)\n self.assertEqual(0, spec[2][0].minimum)\n self.assertEqual(1, spec[2][0].maximum)\n self.assertEqual((), spec[2][1].shape)\n self.assertEqual(np.int64, spec[2][1].dtype)\n self.assertEqual(0, spec[2][1].minimum)\n self.assertEqual(2, spec[2][1].maximum)\n\n # Test Dict\n # Test Discrete in Dict\n discrete_in_dict = spec[3]['spec_1']\n self.assertEqual((), discrete_in_dict.shape)\n self.assertEqual(np.int64, discrete_in_dict.dtype)\n self.assertEqual(0, discrete_in_dict.minimum)\n self.assertEqual(1, discrete_in_dict.maximum)\n\n # Test Tuple in Dict\n tuple_in_dict = spec[3]['spec_2']\n self.assertEqual(2, len(tuple_in_dict))\n self.assertEqual((), tuple_in_dict[0].shape)\n self.assertEqual(np.int64, tuple_in_dict[0].dtype)\n self.assertEqual(0, tuple_in_dict[0].minimum)\n self.assertEqual(1, tuple_in_dict[0].maximum)\n self.assertEqual((), tuple_in_dict[1].shape)\n self.assertEqual(np.int64, tuple_in_dict[1].dtype)\n self.assertEqual(0, tuple_in_dict[1].minimum)\n self.assertEqual(2, tuple_in_dict[1].maximum)\n\n def test_spec_from_gym_space_dict(self):\n dict_space = gym.spaces.Dict([\n ('spec_2', gym.spaces.Box(-1.0, 1.0, (3, 4))),\n ('spec_1', gym.spaces.Discrete(2)),\n ])\n\n spec = gym_wrapper.spec_from_gym_space(dict_space)\n\n keys = list(spec.keys())\n self.assertEqual('spec_1', keys[1])\n self.assertEqual(2, len(spec))\n self.assertEqual((), spec['spec_1'].shape)\n self.assertEqual(np.int64, spec['spec_1'].dtype)\n self.assertEqual(0, spec['spec_1'].minimum)\n self.assertEqual(1, spec['spec_1'].maximum)\n\n self.assertEqual('spec_2', keys[0])\n self.assertEqual((3, 4), spec['spec_2'].shape)\n self.assertEqual(np.float32, spec['spec_2'].dtype)\n np.testing.assert_array_almost_equal(\n -np.ones((3, 4)),\n spec['spec_2'].minimum,\n )\n np.testing.assert_array_almost_equal(\n np.ones((3, 4)),\n spec['spec_2'].maximum,\n )\n\n def test_spec_from_gym_space_dtype_map(self):\n class Box(gym.spaces.Box):\n \"\"\"Box space without the dtype property.\"\"\"\n\n def __init__(self, *args, **kwargs):\n super(Box, self).__init__(*args, **kwargs)\n del self.dtype\n\n tuple_space = gym.spaces.Tuple((\n gym.spaces.Discrete(2),\n Box(0, 1, (3, 4)),\n gym.spaces.Tuple((gym.spaces.Discrete(2), gym.spaces.Discrete(3))),\n gym.spaces.Dict({\n 'spec_1':\n gym.spaces.Discrete(2),\n 'spec_2':\n gym.spaces.Tuple((\n gym.spaces.Discrete(2),\n Box(0, 1, (3, 4)),\n )),\n }),\n ))\n\n dtype_map = {gym.spaces.Discrete: np.uint8, gym.spaces.Box: np.uint16}\n spec = gym_wrapper.spec_from_gym_space(tuple_space, dtype_map=dtype_map)\n self.assertEqual(np.uint8, spec[0].dtype)\n self.assertEqual(np.uint16, spec[1].dtype)\n self.assertEqual(np.uint8, spec[2][0].dtype)\n self.assertEqual(np.uint8, spec[2][1].dtype)\n self.assertEqual(np.uint8, spec[3]['spec_1'].dtype)\n self.assertEqual(np.uint8, spec[3]['spec_2'][0].dtype)\n self.assertEqual(np.uint16, spec[3]['spec_2'][1].dtype)\n\n def test_spec_name(self):\n box_space = gym.spaces.Box(\n np.array([-1.0, -2.0]), np.array([2.0, 4.0]), dtype=np.float32)\n spec = gym_wrapper.spec_from_gym_space(box_space, name='observation')\n self.assertEqual('observation', spec.name)\n\n def test_spec_name_nested(self):\n dict_space = gym.spaces.Tuple((gym.spaces.Dict({\n 'spec_0':\n gym.spaces.Dict({\n 'spec_1': gym.spaces.Discrete(2),\n 'spec_2': gym.spaces.Discrete(2),\n }),\n }), gym.spaces.Discrete(2)))\n spec = gym_wrapper.spec_from_gym_space(dict_space, name='observation')\n self.assertEqual('observation/tuple_0/spec_0/spec_1',\n spec[0]['spec_0']['spec_1'].name)\n self.assertEqual('observation/tuple_0/spec_0/spec_2',\n spec[0]['spec_0']['spec_2'].name)\n self.assertEqual('observation/tuple_1', spec[1].name)\n\n\nclass GymWrapperOnCartpoleTest(test_utils.TestCase):\n\n def test_wrapped_cartpole_specs(self):\n # Note we use spec.make on gym envs to avoid getting a TimeLimit wrapper on\n # the environment.\n cartpole_env = gym.spec('CartPole-v1').make()\n env = gym_wrapper.GymWrapper(cartpole_env)\n\n action_spec = env.action_spec()\n self.assertEqual((), action_spec.shape)\n self.assertEqual(0, action_spec.minimum)\n self.assertEqual(1, action_spec.maximum)\n\n observation_spec = env.observation_spec()\n self.assertEqual((4,), observation_spec.shape)\n self.assertEqual(np.float32, observation_spec.dtype)\n high = np.array([\n 4.8,\n np.finfo(np.float32).max, 2 / 15.0 * math.pi,\n np.finfo(np.float32).max\n ])\n np.testing.assert_array_almost_equal(-high, observation_spec.minimum)\n np.testing.assert_array_almost_equal(high, observation_spec.maximum)\n\n def test_wrapped_cartpole_reset(self):\n cartpole_env = gym.spec('CartPole-v1').make()\n env = gym_wrapper.GymWrapper(cartpole_env)\n\n first_time_step = env.reset()\n self.assertTrue(first_time_step.is_first())\n self.assertEqual(0.0, first_time_step.reward)\n self.assertEqual(1.0, first_time_step.discount)\n self.assertEqual((4,), first_time_step.observation.shape)\n self.assertEqual(np.float32, first_time_step.observation.dtype)\n\n def test_wrapped_cartpole_transition(self):\n cartpole_env = gym.spec('CartPole-v1').make()\n env = gym_wrapper.GymWrapper(cartpole_env)\n env.reset()\n transition_time_step = env.step(np.array(0, dtype=np.int32))\n\n self.assertTrue(transition_time_step.is_mid())\n self.assertNotEqual(None, transition_time_step.reward)\n self.assertEqual(1.0, transition_time_step.discount)\n self.assertEqual((4,), transition_time_step.observation.shape)\n\n def test_wrapped_cartpole_final(self):\n cartpole_env = gym.spec('CartPole-v1').make()\n env = gym_wrapper.GymWrapper(cartpole_env)\n time_step = env.reset()\n\n while not time_step.is_last():\n time_step = env.step(np.array(1, dtype=np.int32))\n\n self.assertTrue(time_step.is_last())\n self.assertNotEqual(None, time_step.reward)\n self.assertEqual(0.0, time_step.discount)\n self.assertEqual((4,), time_step.observation.shape)\n\n def test_get_info(self):\n cartpole_env = gym.spec('CartPole-v1').make()\n env = gym_wrapper.GymWrapper(cartpole_env)\n self.assertEqual(None, env.get_info())\n env.reset()\n self.assertEqual(None, env.get_info())\n env.step(np.array(0, dtype=np.int32))\n self.assertEqual({}, env.get_info())\n\n def test_automatic_reset_after_create(self):\n cartpole_env = gym.spec('CartPole-v1').make()\n env = gym_wrapper.GymWrapper(cartpole_env)\n\n first_time_step = env.step(0)\n self.assertTrue(first_time_step.is_first())\n\n def test_automatic_reset_after_done(self):\n cartpole_env = gym.spec('CartPole-v1').make()\n env = gym_wrapper.GymWrapper(cartpole_env)\n time_step = env.reset()\n\n while not time_step.is_last():\n time_step = env.step(np.array(1, dtype=np.int32))\n\n self.assertTrue(time_step.is_last())\n first_time_step = env.step(0)\n self.assertTrue(first_time_step.is_first())\n\n def test_automatic_reset_after_done_not_using_reset_directly(self):\n cartpole_env = gym.spec('CartPole-v1').make()\n env = gym_wrapper.GymWrapper(cartpole_env)\n time_step = env.step(1)\n\n while not time_step.is_last():\n time_step = env.step(np.array(1, dtype=np.int32))\n\n self.assertTrue(time_step.is_last())\n first_time_step = env.step(0)\n self.assertTrue(first_time_step.is_first())\n\n def test_method_propagation(self):\n cartpole_env = gym.spec('CartPole-v1').make()\n for method_name in ('render', 'seed', 'close'):\n setattr(cartpole_env, method_name, mock.MagicMock())\n env = gym_wrapper.GymWrapper(cartpole_env)\n env.render()\n self.assertEqual(1, cartpole_env.render.call_count)\n env.seed(0)\n self.assertEqual(1, cartpole_env.seed.call_count)\n cartpole_env.seed.assert_called_with(0)\n env.close()\n self.assertEqual(1, cartpole_env.close.call_count)\n\n def test_obs_dtype(self):\n cartpole_env = gym.spec('CartPole-v1').make()\n env = gym_wrapper.GymWrapper(cartpole_env)\n time_step = env.reset()\n self.assertEqual(env.observation_spec().dtype, time_step.observation.dtype)\n\n\nif __name__ == '__main__':\n test_utils.main()\n" ]
[ [ "tensorflow.compat.v1.global_variables_initializer", "tensorflow.compat.v1.train.AdamOptimizer", "tensorflow.cast", "tensorflow.nest.map_structure", "tensorflow.constant", "tensorflow.executing_eagerly", "tensorflow.nest.flatten", "tensorflow.squeeze", "tensorflow.compat.v1.initializers.constant", "numpy.all", "tensorflow.train.Checkpoint" ], [ "numpy.testing.assert_array_almost_equal", "numpy.finfo", "numpy.array", "numpy.ones" ] ]
rahulbordoloi/Rare-One-Hot-Enocder
[ "53c64a4e39ab7e179f7967037298fc251083a320" ]
[ "Test/Package_Test.py" ]
[ "# Import Package\nimport pandas as pd\nimport RLOHE as r # Rare Label One-Hot Encoder\nimport warnings\n\n# Other Settings\nwarnings.filterwarnings('ignore')\npd.options.display.float_format = '{:.6f}'.format\npd.set_option('display.max_columns', 256)\n\n\n# Main Method\nif __name__ == '__main__':\n\n # Loading in Dataset\n train = pd.read_csv('https://raw.githubusercontent.com/rahulbordoloi/Rare-Label-One-Hot-Enocder/main/Data/Train_Data.csv')\n test = pd.read_csv('https://raw.githubusercontent.com/rahulbordoloi/Rare-Label-One-Hot-Enocder/main/Data/Test_Data.csv')\n\n # Top Labeled Entries in Both of the Sets for Analysis\n r.TopLabeledEntries(train, test, feature_name = 'department_info', threshold = 10, secondary_feature = 'cost_to_pay')\n\n '''\n # Rare Label One-Hot Encoder [Level Wise]\n encodedTrain, encodedTest = r.RareLabelOneHotEncoder(train, test, feature_name = 'department_info', threshold = 10,\n criterion = 'level', prefix_name = 'dept', verbose = True)\n '''\n\n # Rare Label One-Hot Encoder [Volume Wise]\n encodedTrain, encodedTest = r.RareLabelOneHotEncoder(train, test, feature_name = 'department_info', threshold = 12500,\n criterion = 'volume', prefix_name = 'dept')\n\n # Printing out DataFrame [Train]\n print(encodedTrain.head(1))\n\n # Printing out DataFrame [Test]\n print(encodedTest.head(1))\n\n\n\n\n\n" ]
[ [ "pandas.read_csv", "pandas.set_option" ] ]
ejlouw/veroku
[ "27de540846d33ab21c931cfd3f87ef6fbb88620e" ]
[ "veroku/factors/experimental/nonlinear_gaussian.py" ]
[ "\"\"\"\nA module for instantiating and performing operations on Non-linear Gaussian functions.\n\"\"\"\n\n# Standard imports\nimport operator\nimport copy\n\n# Third-party imports\nimport numpy as np\n\n# Local imports\nfrom veroku.factors import _factor_utils\nfrom veroku.factors._sigma_points import get_sigma_points_array, sigma_points_array_to_joint_params\nfrom veroku.factors._factor import Factor\nfrom veroku.factors.gaussian import Gaussian\nfrom veroku.factors.experimental.gaussian_mixture import GaussianMixture\nfrom veroku.factors._factor_utils import (\n make_square_matrix,\n indexed_square_matrix_operation,\n format_list_elements,\n)\nfrom veroku.factors._factor_template import FactorTemplate\nfrom veroku._constants import DEFAULT_FACTOR_RTOL, DEFAULT_FACTOR_ATOL\n\n\n# TODO: see that lazy evaluation (recompute joint) is done sensibly and consistently\n\n# pylint: disable=protected-access\n\n\nclass NonLinearGaussian(Factor):\n \"\"\"\n A Class for instantiating and performing operations on multivariate Gaussian mixture functions.\n \"\"\"\n\n # pylint: disable=too-many-instance-attributes\n def __init__(self,\n conditioning_vars,\n conditional_vars,\n transform,\n noise_cov,\n log_weight=0.0,\n joint_distribution=None,\n conditional_update_factor=None,\n conditioning_factor=None,\n observed_evidence=None):\n \"\"\"\n The initializer.\n\n :param list conditioning_vars: The conditioning vars.\n :param list conditional_vars: The conditioning vars.\n :param transform: The transformation that links the conditioning to the conditional distribution\n :param noise_cov: The noise covariance matrix of the zero mean Gaussian noise.\n :param Gaussian conditional_update_factor: A factor, with same scope, subset of, the conditional vars, to be\n multiplied with the transformed (joint) distribution to form the final joint distribution.\n :param Gaussian conditioning_factor: The factor with scope less than or equals to the conditional variables to be\n transformed by the transform. If the scope is less than the conditional variables set, observed evidence\n (if available) can be used to still enable the successful transformation and calculation of the joint.\n :param dict observed_evidence: A dictionary mapping variable names ot observed values.\n \"\"\"\n if len(set(conditioning_vars).intersection(conditional_vars)) != 0:\n raise ValueError(\"variable overlap between conditioning_vars and conditional_vars.\")\n var_names = conditioning_vars + conditional_vars\n\n super().__init__(var_names=var_names)\n self.transform = transform\n if noise_cov.shape[0] != len(conditional_vars):\n raise ValueError(\n \"noise_cov rows and columns should correspond to the number of conditional variables\"\n )\n self.noise_cov = make_square_matrix(noise_cov)\n\n self.log_weight = log_weight\n self.conditioning_vars = conditioning_vars.copy()\n self.conditional_vars = conditional_vars.copy()\n\n self.conditioning_factor = None\n if conditioning_factor is not None:\n self.conditioning_factor = conditioning_factor.copy()\n\n self.conditional_update_factor = None\n if conditional_update_factor is not None:\n self.conditional_update_factor = conditional_update_factor.copy()\n\n if joint_distribution is not None:\n self._joint_distribution = joint_distribution.copy()\n else:\n self._joint_distribution = Gaussian.make_vacuous(var_names)\n if observed_evidence is None:\n self.observed_evidence = {}\n else:\n self.observed_evidence = copy.deepcopy(observed_evidence)\n\n def equals(self, factor, rtol=DEFAULT_FACTOR_RTOL, atol=DEFAULT_FACTOR_ATOL):\n \"\"\"\n Check if this factor is the same as another factor.\n\n :param factor: The other factor to compare to.\n :type factor: Gaussian\n :return: The result of the comparison.\n :rtype: bool\n \"\"\"\n return self._joint_distribution.equals(factor, rtol=rtol, atol=atol)\n\n def distance_from_vacuous(self):\n \"\"\"\n NOTE: Not Implemented yet.\n Get the Kullback-Leibler (KL) divergence between the message factor and a vacuous (uniform) version of it.\n\n :return: The KL-divergence\n \"\"\"\n raise NotImplementedError('This function has not been implemented yet.')\n\n def kl_divergence(self, factor):\n \"\"\"\n NOTE: Not Implemented yet.\n Get the KL-divergence D_KL(self || factor) between a normalized version of this factor and another factor.\n\n :param factor: The other factor\n :type factor: GaussianMixture\n :return: The Kullback-Leibler divergence\n :rtype: float\n \"\"\"\n raise NotImplementedError('This function has not been implemented yet.')\n\n def copy(self):\n \"\"\"\n Make a copy of the factor.\n\n :return: the copy\n :rtype: NonLinearGaussian\n \"\"\"\n nlg_copy = NonLinearGaussian(\n conditioning_vars=copy.deepcopy(self.conditioning_vars),\n conditional_vars=copy.deepcopy(self.conditional_vars),\n transform=copy.deepcopy(self.transform),\n noise_cov=copy.deepcopy(self.noise_cov),\n log_weight=self.log_weight,\n joint_distribution=self._joint_distribution,\n conditional_update_factor=self.conditional_update_factor,\n conditioning_factor=self.conditioning_factor,\n observed_evidence=self.observed_evidence,\n )\n return nlg_copy\n\n @property\n def joint_distribution(self):\n \"\"\"\n The joint distribution.\n\n :return: the joint distribution\n \"\"\"\n self_copy = self.copy()\n self_copy._recompute_joint()\n return self_copy._joint_distribution\n\n def get_prec(self):\n \"\"\"\n Get the K matrix (after ensuring that the canonical form is updated.)\n\n :return: The K parameter.\n \"\"\"\n self._recompute_joint()\n return self._joint_distribution.get_prec()\n\n def get_h(self):\n \"\"\"\n Get the log h vector (after ensuring that the canonical form is updated.)\n\n :return: The h parameter.\n \"\"\"\n self._recompute_joint()\n return self._joint_distribution.get_h()\n\n def get_g(self):\n \"\"\"\n Get the g scalar (after ensuring that the canonical form is updated.)\n\n :return: The g parameter.\n \"\"\"\n self._recompute_joint()\n return self._joint_distribution.get_g()\n\n def get_cov(self):\n \"\"\"\n Get the covariance matrix (after ensuring that the covariance form is updated.)\n\n :return: The cov parameter.\n \"\"\"\n self._recompute_joint()\n return self._joint_distribution.get_cov()\n\n def get_mean(self):\n \"\"\"\n Get the mean vector (after ensuring that the covariance form is updated.)\n\n :return: The mean parameter.\n \"\"\"\n self._recompute_joint()\n return self._joint_distribution.get_mean()\n\n def get_log_weight(self):\n \"\"\"\n Get the log weight (after ensuring that the covariance form is updated.)\n\n :return: The log_weight parameter.\n \"\"\"\n self._recompute_joint()\n return self._joint_distribution.get_log_weight()\n\n def add_log_weight(self, log_weight_to_add):\n \"\"\"\n Add a log weight to the factor.\n\n :param log_weight_to_add: The log weight to add.\n \"\"\"\n self.log_weight += log_weight_to_add\n\n def _recompute_joint(self):\n \"\"\"\n Recompute the joint distribution.\n \"\"\"\n # TODO: improve this function (after code coverage is completed for this class) and remove disable below.\n # pylint: disable=too-many-locals\n observed_vars = list(self.observed_evidence.keys())\n\n conditioning_observed_vars = [var for var in observed_vars if var in self.conditioning_vars]\n conditioning_observed_values = [\n self.observed_evidence[var] for var in observed_vars if var in self.conditioning_vars\n ]\n\n conditional_observed_vars = [var for var in observed_vars if var in self.conditional_vars]\n conditional_observed_values = [\n self.observed_evidence[var] for var in observed_vars if var in self.conditional_vars\n ]\n\n # under 3 conditions we can successfully perform the transformation:\n # 1. We have a well defined conditional distribution with the full scope of the conditional variables.\n # 2. All the conditional variables are observed.\n # 3. We have a conditional distribution over a subset of the conditional variables and the rest of the\n # conditional variables are observed.\n\n # Initial feasibility check\n conditioning_factor_var_names = []\n if self.conditioning_factor is not None:\n conditioning_factor_var_names = self.conditioning_factor.var_names\n vars_obs_vars_union = set(conditioning_factor_var_names).union(set(conditioning_observed_vars))\n if vars_obs_vars_union != set(self.conditioning_vars):\n # The conditioning_factor variables and observed conditional variables combined do not make up a\n # complete set - so we cannot calculate the transform.\n self._joint_distribution = Gaussian.make_vacuous(self.var_names)\n return\n\n # If we are still here, there we can calculate the transformation.\n conditioning_factor_sigma_points_array = np.expand_dims(np.array([]), axis=0).T # empty dummy for generalisation\n if self.conditioning_factor is not None:\n conditioning_factor = self.conditioning_factor.copy()\n cond_factor_observed_vars = list(set(observed_vars).intersection(conditioning_factor.var_names))\n cond_factor_observed_values = [self.observed_evidence[var] for var in cond_factor_observed_vars]\n\n if len(cond_factor_observed_vars) > 0:\n conditioning_factor = conditioning_factor.reduce(\n vrs=cond_factor_observed_vars, values=cond_factor_observed_values\n )\n\n conditioning_factor_sigma_points_array = get_sigma_points_array(conditioning_factor)\n\n # extend sigma points with observed variable values\n if len(conditioning_observed_vars) > 0:\n observed_vec = _factor_utils.make_column_vector(conditioning_observed_values)\n tiled_observed_vec = np.tile(observed_vec, [1, conditioning_factor_sigma_points_array.shape[1]])\n extended_sigma_points_array = np.concatenate([tiled_observed_vec, conditioning_factor_sigma_points_array])\n extended_sigma_point_vars = conditioning_observed_vars + conditioning_factor.var_names\n else:\n extended_sigma_points_array = conditioning_factor_sigma_points_array\n extended_sigma_point_vars = conditioning_factor.var_names\n\n # TODO: Add conditional var_names here somehow. We need to ensure that the correct variables are used\n # in the correct place in the transformation. In the past, we have simply done this by standardising\n # on the variable indices in different place, but this is probably not very safe.\n joint_cov, joint_mean = sigma_points_array_to_joint_params(sigma_points_array=extended_sigma_points_array,\n transform=self.transform,\n var_names=extended_sigma_point_vars)\n if (joint_mean.shape[0] != len(self.var_names)) or (joint_cov.shape[0] != len(self.var_names)):\n raise AssertionError(\"Transform resulted in incorrect number of variables.\")\n\n # remove the observed vars again after transformation\n nov = len(conditioning_observed_vars) # number of observed vars\n joint_cov = joint_cov[nov:, nov:]\n joint_mean = joint_mean[nov:, :]\n joint_var_names = conditioning_factor.var_names + self.conditional_vars\n\n joint_cov_plus_noise, joint_vars = indexed_square_matrix_operation(\n joint_cov, self.noise_cov, joint_var_names, self.conditional_vars, operator.add\n )\n joint_log_weight = self.log_weight + conditioning_factor.log_weight\n self._joint_distribution = Gaussian(cov=joint_cov_plus_noise,\n mean=joint_mean,\n log_weight=joint_log_weight,\n var_names=joint_vars,\n )\n if self.conditional_update_factor is not None:\n self._joint_distribution = self._joint_distribution.multiply(self.conditional_update_factor)\n if len(conditional_observed_vars) > 0:\n self._joint_distribution = self._joint_distribution.reduce(\n conditional_observed_vars, conditional_observed_values\n )\n\n def normalize(self):\n \"\"\"\n Normalize the factor.\n\n :return: The normalized factor.\n :rtype: Gaussian\n \"\"\"\n # TODO: make this more efficient\n return self.joint_distribution.normalize()\n\n def multiply(self, factor):\n \"\"\"\n Multiply this factor with another factor.\n\n :param factor: the factor to multiply with\n :return: the resulting factor\n :rtype: NonLinearGaussian\n \"\"\"\n\n if isinstance(factor, GaussianMixture):\n nlgs = []\n for gaussian_factor in factor.components:\n nlgs.append(self.multiply(gaussian_factor))\n return NonLinearGaussianMixture(nlgs)\n\n self_copy = self.copy()\n if isinstance(factor, NonLinearGaussian):\n raise NotImplementedError(\n \"Multiplication of two nonlinear-Gaussian factors has not been implemented.\"\n )\n if factor._is_vacuous:\n # TODO: check this (esp to see what to do with)\n return self_copy\n # TODO: change to 'update conditional model and re-transform model' (if correct)\n if not isinstance(factor, Gaussian):\n raise ValueError(f\"factor must be Gaussian, got {type(factor)}\")\n\n if set(factor.var_names) <= set(self_copy.conditional_vars):\n if self_copy.conditional_update_factor is None:\n self_copy.conditional_update_factor = factor.copy()\n else:\n self_copy.conditional_update_factor = self_copy.conditional_update_factor.multiply(factor)\n elif set(factor.var_names) <= set(self_copy.conditioning_vars):\n if self_copy.conditioning_factor is None:\n self_copy.conditioning_factor = factor.copy()\n else:\n self_copy.conditioning_factor = self_copy.conditioning_factor.multiply(factor)\n else:\n raise ValueError(\n f\"cannot absorb factor with scope ({factor.var_names}) \\n \"\n f\" which has neither conditional ({self_copy.conditioning_vars}) \\n \"\n f\" nor conditioning ({self_copy.conditional_vars}) scope.\"\n )\n return self_copy\n\n def divide(self, factor):\n \"\"\"\n Divide this factor by another factor.\n\n :param factor: the factor divide by\n :return: the resulting factor\n :rtype: NonLinearGaussian\n \"\"\"\n self_copy = self.copy()\n if factor._is_vacuous:\n # TODO: check this (esp to see what to do with)\n return self_copy\n # TODO: change to 'update conditional model and re-transform model' (if correct)\n if not isinstance(factor, Gaussian):\n raise ValueError(\"factor must be Gaussian.\")\n\n if set(factor.var_names) <= set(self_copy.conditional_vars):\n self_copy.conditional_update_factor = self_copy.conditional_update_factor.divide(factor)\n elif set(factor.var_names) <= set(self_copy.conditioning_vars):\n if self_copy.conditioning_factor is None:\n raise NotImplementedError()\n self_copy.conditioning_factor = self_copy.conditioning_factor.divide(factor)\n else:\n raise ValueError(\n f\"cannot cancel factor with scope ({factor.var_names}) \\n \"\n f\" which has neither conditional ({self_copy.conditioning_vars}) \\n \"\n f\" nor conditioning ({self_copy.conditional_vars}) scope.\"\n )\n self_copy._recompute_joint()\n return self_copy\n\n def reduce(self, vrs, values):\n \"\"\"\n Observe a subset of the variables in the scope of this factor and return the resulting factor.\n\n :param vrs: the names of the observed variable (list)\n :param values: the values of the observed variables (list or vector-like object)\n :return: the resulting factor\n :rtype: NonLinearGaussian\n \"\"\"\n\n self_copy = self.copy()\n evidence_dict = dict(zip(vrs, values))\n self_copy.observed_evidence.update(evidence_dict)\n return self_copy\n\n def marginalize(self, vrs, keep=True):\n \"\"\"\n Integrate out variables from this factor.\n\n :param vrs: (list) a subset of variables in the factor's scope\n :param keep: Whether to keep or sum out vrs\n :return: the resulting marginal\n :rtype: Gaussian\n \"\"\"\n\n vrs_to_keep = super().get_marginal_vars(vrs, keep=keep)\n assert set(vrs_to_keep) <= set(self.var_names), \"Error: asked to keep vars not in factor.\"\n self_copy = self.copy()\n self_copy._recompute_joint()\n if self_copy._joint_distribution._is_vacuous:\n if set(vrs_to_keep) <= set(self_copy.conditioning_vars):\n if self_copy.conditioning_factor is not None:\n if set(vrs_to_keep) <= set(self_copy.conditioning_factor.var_names):\n marginal = self_copy.conditioning_factor.marginalize(vrs_to_keep, keep=True)\n return marginal\n if set(vrs_to_keep) <= set(self_copy.conditional_vars):\n if self_copy.conditional_update_factor is not None:\n if set(vrs_to_keep) <= set(self_copy.conditional_update_factor.var_names):\n marginal = self_copy.conditional_update_factor.marginalize(vrs_to_keep, keep=True)\n marginal._add_log_weight(self.log_weight)\n return marginal\n else:\n return Gaussian.make_vacuous(vrs_to_keep)\n return self_copy._joint_distribution.marginalize(vrs_to_keep, keep=True)\n\n def sample(self, num_samples):\n \"\"\"\n Draw samples from the Gaussian joint distribution defined by the non-linear Gaussian and the factors that have\n (potentially) been multiplied with it.\n\n :param num_samples: The number of samples to draw.\n :type num_samples:\n :return: The samples.\n :rtype: int\n \"\"\"\n self._recompute_joint()\n return self._joint_distribution.sample(num_samples=num_samples)\n\n def show(self):\n \"\"\"\n Print the parameters of the nonlinear Gaussian distribution.\n \"\"\"\n # TODO: add __repr__ function and use here.\n print(\"Non-linear Gaussian\")\n print(\"conditioning variables:\", self.conditioning_vars)\n print(\"conditional variables:\", self.conditional_vars)\n print(\"transform: \", self.transform)\n print(\"noise covariance = \\n\", self.noise_cov)\n print(\"joint_distribution = \\n\")\n if self._joint_distribution is not None:\n self._recompute_joint()\n self._joint_distribution.show()\n\n print(\"conditional update distribution = \\n\")\n if self.conditional_update_factor is not None:\n self.conditional_update_factor.show()\n else:\n print(\"vacuous\")\n\n print(\"conditioning distribution = \\n\")\n if self.conditioning_factor is not None:\n self.conditioning_factor.show()\n else:\n print(\"vacuous\")\n\n def plot(self):\n \"\"\"\n Plot the joint distribution.\n \"\"\"\n self_copy = self.copy()\n self_copy._recompute_joint()\n self._joint_distribution.plot()\n\n\nclass NonLinearGaussianMixture(Factor):\n \"\"\"\n A Class for instantiating and performing operations on multivariate Gaussian mixture functions.\n \"\"\"\n\n def __init__(self, factors, split_singles_before_absorb=True):\n \"\"\"\n The initializer.\n\n :param factors: The list of factors.\n :type factors: NonLinearGaussian factor list\n :param bool split_singles_before_absorb: Whether or not to split a Gaussian (or single component\n GaussianMixture) before it is multiplied in. This can result in a better approximation of non-linear\n Gaussian transformations.\n \"\"\"\n self.nlgs = []\n if len(factors) == 0:\n raise ValueError(\"received empty list of factors.\")\n super().__init__(var_names=factors[0].var_names)\n for factor in factors:\n if not isinstance(factor, NonLinearGaussian):\n raise TypeError(f\"expected NonLinearGaussian type, received {type(factor)}.\")\n if not set(self.var_names) == set(factor.var_names):\n raise ValueError(\n f\"Inconsistent variable names. First factor has var_names = {self.var_names},\"\n f\" another has var_names = {factor.var_names}\"\n )\n self.nlgs.append(factor.copy())\n self.split_singles_before_absorb = split_singles_before_absorb\n\n def normalize(self):\n \"\"\"\n Normalise the factor (not yet implemented).\n \"\"\"\n raise NotImplementedError()\n\n def copy(self):\n \"\"\"\n Make a copy of this factor.\n\n :return: the factor\n :rtype: NonLinearGaussianMixture\n \"\"\"\n return NonLinearGaussianMixture(self.nlgs)\n\n @property\n def joint_distribution(self):\n gaussian_joints = []\n for nlg in self.nlgs:\n gaussian_joints.append(nlg.joint_distribution)\n return GaussianMixture(gaussian_joints)\n\n def multiply(self, factor):\n \"\"\"\n Multiply this factor with another factor.\n\n :param factor: the factor to multiply with\n :return: the resulting factor\n :rtype: NonLinearGaussianMixture\n \"\"\"\n if isinstance(factor, GaussianMixture):\n gm_factor = factor\n elif isinstance(factor, Gaussian):\n gm_factor = GaussianMixture([Gaussian])\n else:\n raise NotImplementedError()\n\n # TODO: Generalise the Gaussian.split_gaussian function to more than one dimensional cases and remove the\n # limitation here\n if self.split_singles_before_absorb and len(gm_factor.var_names) == 1:\n if len(gm_factor.components) == 1:\n gm_factor = gm_factor.components[0]._split_gaussian()\n\n new_nlgs = []\n for gauss in gm_factor.components:\n for nlg in self.nlgs:\n new_nlgs.append(nlg.multiply(gauss))\n return NonLinearGaussianMixture(new_nlgs)\n\n def divide(self, factor):\n \"\"\"\n Divide this factor by another factor.\n\n :param factor: the factor divide by\n :return: the resulting factor\n :rtype: NonLinearGaussianMixture\n \"\"\"\n if isinstance(factor, Gaussian):\n gaussian_factor = factor\n elif isinstance(factor, GaussianMixture):\n # TODO: Add better Gaussian mixture division approximation\n gaussian_factor = factor.moment_match_to_single_gaussian()\n else:\n raise NotImplementedError(\"cannot divide NonLinearGaussianMixture by {type(factor)}.\")\n new_nlgs = []\n for nlg in self.nlgs:\n new_nlgs.append(nlg.divide(gaussian_factor))\n return NonLinearGaussianMixture(new_nlgs)\n\n def reduce(self, vrs, values):\n \"\"\"\n Observe a subset of the variables in the scope of this factor and return the resulting factor.\n\n :param vrs: the names of the observed variable (list)\n :param values: the values of the observed variables (list or vector-like object)\n :return: the resulting factor\n :rtype: NonLinearGaussianMixture\n \"\"\"\n new_nlgs = []\n for nlg in self.nlgs:\n new_nlgs.append(nlg.reduce(vrs, values))\n return NonLinearGaussianMixture(new_nlgs)\n\n def marginalize(self, vrs, keep=True):\n \"\"\"\n Integrate out variables from this factor.\n\n :param vrs: (list) a subset of variables in the factor's scope\n :param keep: Whether to keep or sum out vrs\n :return: the resulting marginal\n :rtype: NonLinearGaussianMixture\n \"\"\"\n new_nlgs = []\n for nlg in self.nlgs:\n new_nlgs.append(nlg.marginalize(vrs, keep))\n return NonLinearGaussianMixture(new_nlgs)\n\n def sample(self, num_samples):\n \"\"\"\n Draw samples from the Gaussian mixture joint distribution defined by the non-linear Gaussians and the factors\n that have (potentially) been multiplied with them.\n\n :param num_samples: The number of samples to draw.\n :type num_samples:\n :return: The samples.\n :rtype: int\n \"\"\"\n\n # TODO: fix incorrect num samples issue\n sample_sets = []\n for nlg in self.nlgs:\n samples = nlg.sample(num_samples)\n sample_sets.append(samples)\n return np.concatenate(sample_sets)\n\n def plot(self):\n \"\"\"\n Plot the joint Gaussian Mixture distribution.\n \"\"\"\n gaussian_components = [nlg.joint_distribution for nlg in self.nlgs]\n gaussian_mixture = GaussianMixture(gaussian_components)\n gaussian_mixture.plot()\n\n def show(self):\n \"\"\"\n Print the parameters of the nonlinear Gaussian distribution.\n \"\"\"\n for i, nlg in enumerate(self.nlgs):\n print(f\"######### NLG {i} #############################\")\n nlg.show()\n\n\nclass NonLinearGaussianTemplate(FactorTemplate):\n \"\"\"\n A template class for NonLinearGaussian factors.\n \"\"\"\n\n def __init__(self, conditioning_var_templates, conditional_var_templates, transition_function, noise_cov):\n \"\"\"\n The initializer.\n\n :param conditioning_var_templates: The list of formattable strings for the conditioning variables (i.e: ['var_a{i}_{t}', 'var_b{i}_{t}'])\n :param conditional_var_templates: The list of formattable strings for the conditional variables (i.e: ['var_c{i}_{t}', 'var_d{i}_{t}'])\n :param conditioning_var_templates: The formattable strings for the conditioning variables.\n :type conditioning_var_templates: str list\n :param conditional_var_templates: The formattable strings for the conditioning variables.\n :type conditional_var_templates: str list\n :param callable transition_function: The function that specifies the non-linear transform. This function takes\n 2 parameters, a vector-like value to be transformed and the variable names list specifying the names of the\n variable elements of the value vector (i.e transition_function = lamda x, var_names: np.square(x)).\n The variable names do not need to be used, but can be useful in certain cases where functions need to be\n applied to specific variables.\n :param noise_cov: The noise covariance matrix of the additive noise random variable.\n \"\"\"\n\n super().__init__(\n conditioning_var_templates=conditioning_var_templates,\n conditional_var_templates=conditional_var_templates,\n )\n self.transition_function = transition_function\n self.noise_cov = noise_cov\n\n def make_factor(self, format_dict=None, conditioning_vars=None, conditional_vars=None):\n \"\"\"\n Make a factor with var_templates formatted by format_dict to create specific var names.\n\n :param format_dict: The dictionary to be used to format the var_templates strings.\n :type format_dict: str dict\n :param conditioning_vars: The conditioning variables strings.\n :type conditioning_vars: str list\n :param conditional_vars: The conditioning variables strings.\n :type conditional_vars: str list\n :return: The instantiated factor.\n :rtype: NonLinearGaussianMixture\n \"\"\"\n if format_dict is not None:\n assert conditioning_vars is None\n assert conditioning_vars is None\n\n conditioning_vars = format_list_elements(self._conditioning_var_templates, format_dict)\n conditional_vars = format_list_elements(self._conditional_var_templates, format_dict)\n\n return NonLinearGaussian(\n conditioning_vars,\n conditional_vars,\n self.transition_function,\n self.noise_cov,\n log_weight=0.0,\n joint_distribution=None,\n )\n" ]
[ [ "numpy.concatenate", "numpy.array", "numpy.tile" ] ]
OmarJabri7/RoboCupChallenge
[ "f115d4b376887726e1dac5852c818da72b6acf54" ]
[ "robocup_ws/src/Planner/src/striker_controller.py" ]
[ "from game_interfaces.msg import PlayerCommand\nimport numpy as np\nfrom BasicCommonActions.go_to_action import simple_go_to_action, go_to_fast\nfrom game_interfaces.msg import Position\nfrom BasicCommonActions.plan_supporting_functions import TeamMasterSupporting\n\n\nclass Team1Striker:\n def __init__(self, init_y_pos, my_idx, team_id):\n self.my_team_id = team_id\n self.conversion = 1 if self.my_team_id == 0 else -1\n self.my_idx = my_idx\n self.init_y_pos = init_y_pos\n self.kick_distance = 0.17\n\n def get_action(self, my_pos_efcs, ball_pos_efcs, team_positions_wcs=None, opponents_positions_wcs=None):\n d_player2ball = np.hypot(my_pos_efcs.x - ball_pos_efcs.x, my_pos_efcs.y - ball_pos_efcs.y)\n\n opponents_x_pos_wcs = [pos.x for pos in opponents_positions_wcs]\n opponents_y_pos_wcs = [pos.y for pos in opponents_positions_wcs]\n\n opponents_sorted_ids_by_x = np.argsort(opponents_x_pos_wcs)\n # the last defender is second closest to net player\n last_defender_id = opponents_sorted_ids_by_x[1] if self.my_team_id == 1 else opponents_sorted_ids_by_x[-2]\n best_x_efcs = opponents_x_pos_wcs[last_defender_id] * self.conversion\n best_y_efcs = 1 * np.sign(self.init_y_pos)\n l_v, r_v = go_to_fast(my_pos_efcs, Position(best_x_efcs, best_y_efcs, 0))\n\n #if d_player2ball < self.kick_distance:\n # direct_kick_feasible, kick_x, kick_y = TeamMasterSupporting.check_if_direct_goal_feasible(None, ball_pos_efcs, opponents_positions_wcs, self.my_team_id)\n\n return PlayerCommand(l_v, r_v, 0)\n\n\n\nclass DummyStrikerWithPredefinedPointsToMove: #(Robot)\n def __init__(self):\n self.read = True\n self.points_to_visit = [Position(1,1, 0),\n Position(-1, -1, 0),\n Position(2, 0, 0),\n Position(0, -2, 0),\n Position(-5, 2, 0),\n Position(5, -3, 0)]\n self.current_goal = 0\n self.goal_threshold = 0.1\n\n def get_action(self, my_pos_efcs:Position, ball_pos_efcs: Position, team_positions_wcs=None, opponents_positions_wcs=None):\n goal_pos = self.points_to_visit[self.current_goal]\n\n if np.hypot(goal_pos.x - my_pos_efcs.x, goal_pos.y - my_pos_efcs.y) < self.goal_threshold:\n self.current_goal += 1\n self.current_goal %= 6\n l_rpm, r_rpm, = simple_go_to_action(my_pos_efcs, goal_pos)\n\n return PlayerCommand(l_rpm, r_rpm, 0)\n\n\n\nclass RotateController: #(Robot)\n def __init__(self):\n pass\n\n def get_action(self, my_pos_efcs:Position, ball_pos_efcs: Position):\n # l = 1, r = 2: d = 0.15, r = 0.075\n # l = 1, r = 3, d = 0.1, r = 0.05\n\n l_rpm = 1\n r_rpm = 3\n print(\"[\", my_pos_efcs.x, \",\", my_pos_efcs.y, \"],\")\n return PlayerCommand(l_rpm, r_rpm, 0)" ]
[ [ "numpy.sign", "numpy.hypot", "numpy.argsort" ] ]
KyraKerz/phyre
[ "cec6c9be22f96fef6793fba079935b2071735ac5" ]
[ "agents/random_agents.py" ]
[ "import random\n\nimport numpy as np\nimport phyre\nfrom tqdm import tqdm_notebook\n\nimport animations\n\nrandom.seed(0)\n\n# Evaluation Setup\neval_setup = 'ball_cross_template'\nfold_id = 0 # For simplicity, we will just use one fold for evaluation.\ntrain_tasks, dev_tasks, test_tasks = phyre.get_fold(eval_setup,0)\naction_tier = phyre.eval_setup_to_action_tier(eval_setup)\ntasks = dev_tasks[0:1]\nprint((tasks))\nsimulator = phyre.initialize_simulator(tasks, action_tier)\nactions = simulator.build_discrete_action_space(max_actions=1000)\n\n\ndef evaluate_random_agent(tasks, tier):\n # Create a simulator for the task and tier.\n simulator = phyre.initialize_simulator(tasks, tier)\n evaluator = phyre.Evaluator(tasks)\n assert tuple(tasks) == simulator.task_ids\n images = []\n actions = []\n for task_index in tqdm_notebook(range(len(tasks)), desc='Evaluate tasks'):\n while evaluator.get_attempts_for_task(\n task_index) < phyre.MAX_TEST_ATTEMPTS:\n # Sample a random valid action from the simulator for the given action space.\n action = simulator.sample()\n # Simulate the given action and add the status from taking the action to the evaluator.\n status = simulator.simulate_action(task_index,\n action,\n need_images=True)\n\n stati = status.status\n actions.append(action)\n images.append(status.images)\n evaluator.maybe_log_attempt(task_index, stati)\n return evaluator, images, actions\nprint(\"hello\")\n\nfinish, images, actionlog = evaluate_random_agent(tasks, action_tier)\nfinishlog = finish._log\n\nindices1 = [i for i, x in enumerate(images) if x is None]\n\n\nfor i in reversed(indices1):\n del images[i]\n del actionlog[i]\n #del finishlog[i]\n#print(images)\nindices2 = [i for i, x in enumerate(images) if images[i].shape[0] != 17]\nfor i in reversed(indices2):\n del images[i]\n del actionlog[i]\n del finishlog[i]\n\n\nnew_list = [ seq[1] for seq in finishlog ]\n\nprint(len(images))\nprint(\"actionlog:\",len(actionlog))\nprint(len(new_list))\nimagearray = np.asarray(images)\nprint(imagearray.shape)\nnp.save('ImagesLog1.npy', images)\nnp.save('EvaluationsLog1.npy', new_list)\nnp.save('ActionLog1.npy1', actionlog)\n\n\n\nanimations.animateSimulatedTask(images)\n" ]
[ [ "numpy.asarray", "numpy.save" ] ]
mariafabiano/childrens_asr_transfer_learning
[ "901e0a9e9cf4412655452b490c47f32d60484a84" ]
[ "wav2vec2_data.py" ]
[ "from collections import defaultdict\nimport glob\nimport os\nimport re\nimport numpy as np\n\nimport librosa\nfrom torch.utils.data import Dataset\n\nclass MySTDataset(Dataset):\n \"\"\"MyST dataset.\"\"\"\n\n def __init__(self, data_path, sample_rate=16000,\n\t\t chars_to_ignore_regex='[\\,\\?\\.\\!\\-\\;\\:\\\"]'):\n \"\"\"\n Args:\n data_path (str): path to MyST dataset.\n \"\"\"\n self.data_path = data_path\n self.audio_files = glob.glob(os.path.join(self.data_path,\n \t\t\t\t\t\t\t\t\t\t '*/*/*.flac'))\n self.sample_rate = sample_rate\n self.chars_to_ignore = chars_to_ignore_regex\n self.remove_short_audio()\n print(f'# of audio files after removing short audio: {len(self.audio_files)}')\n self.processor = None\n\n def init_processor(self, processor):\n self.processor = processor\n\n def read_text_file(self, text_file):\n with open(text_file, 'r') as f:\n text = f.read().lower().strip()\n text = re.sub(self.chars_to_ignore, '', text)\n text = re.sub('<[a-zA-Z|_]*>', '', text)\n text = text.replace('(())', '') # Ignore noise.\n return text\n\n def extract_all_chars(self):\n vocab = set()\n for audio_file in self.audio_files:\n text_file = audio_file[:-4] + 'trn'\n text = self.read_text_file(text_file)\n vocab.update(text)\n return {\"vocab\": [vocab]}\n\n def remove_short_audio(self):\n min_input_length_in_sec = 1.0\n min_char_count = 2\n files_to_keep = []\n for i in range(len(self.audio_files)):\n audio_input, sample_rate = librosa.load(self.audio_files[i], sr=self.sample_rate)\n text_file = self.audio_files[i][:-4] + 'trn'\n text = self.read_text_file(text_file)\n if len(audio_input) >= sample_rate*min_input_length_in_sec and len(text) > min_char_count:\n files_to_keep.append(self.audio_files[i])\n self.audio_files = files_to_keep\n \n def prepare_dataset(self, audio_array, text):\n batch = {}\n # batched output is \"un-batched\" to ensure mapping is correct\n batch[\"input_values\"] = self.processor(np.array(audio_array),\n sampling_rate=self.sample_rate).input_values[0]\n batch[\"input_length\"] = len(batch[\"input_values\"])\n \n with self.processor.as_target_processor():\n batch[\"labels\"] = self.processor(text).input_ids\n return batch\n\n def __len__(self):\n return len(self.audio_files)\n\t\t\n def __getitem__(self, idx):\n if isinstance(idx, int):\n audio_file = self.audio_files[idx]\n audio_input, sample_rate = librosa.load(audio_file, sr=self.sample_rate)\n text_file = audio_file[:-4] + 'trn'\n text = self.read_text_file(text_file)\n if self.processor is not None:\n prepared_audio_dict = self.prepare_dataset(audio_input, text)\n return prepared_audio_dict\n return {'audio': {'array': audio_input,\n 'path': audio_file,\n 'sampling_rate': self.sample_rate},\n 'file': audio_file,\n 'text': text} \n else:\n audio_files = None\n if isinstance(idx, slice):\n audio_files = self.audio_files[idx]\n elif isinstance(idx, list):\n audio_files = [self.audio_files[i] for i in idx]\n audio_input = [librosa.load(audio_file, sr=self.sample_rate)[0] for audio_file in audio_files]\n text_files = [x[:-4] + 'trn' for x in audio_files]\n texts = []\n for text_file in text_files:\n text = self.read_text_file(text_file)\n texts.append(text)\n return {'audio': [{'array': audio,\n 'path': path,\n 'sampling_rate': self.sample_rate} for audio, path in zip(audio_input, audio_files)],\n 'file': audio_files,\n 'text': texts}\n\n\nclass ZenodoDataset(Dataset):\n\t\"\"\"Zenodo dataset.\"\"\"\n\n\tdef __init__(self,\n\t\t\t\t data_path,\n\t\t\t\t sample_rate=16000,\n\t\t\t\t chars_to_ignore_regex='[\\,\\?\\.\\!\\-\\;\\:\\\"]',\n\t\t\t\t words_sentences='english_words_sentences/*/studio_mic/*/*.wav',\n\t\t\t\t free_speech='english_free_speech/*/studio_mic/*/*.wav'):\n\t\t\"\"\"\n\t\tArgs:\n\t\t\tdata_path (str): path to Zenodo dataset.\n\t\t\"\"\"\n\t\tself.data_path = data_path\n\t\tself.audio_files = glob.glob(os.path.join(self.data_path, words_sentences)) + glob.glob(os.path.join(self.data_path, free_speech))\n\t\tself.sample_rate = sample_rate\n\t\tself.chars_to_ignore = chars_to_ignore_regex\n\t\tself.remove_short_audio()\n\t\tprint(f'# of audio files after removing short audio: {len(self.audio_files)}')\n\t\tself.processor = None\n\n\tdef init_processor(self, processor):\n\t\tself.processor = processor\n\n\tdef read_text(self, audio_file):\n\t\t# Get the file name and ignore the .wav extension.\n\t\ttext = audio_file.split('/')[-1][:-4]\n\t\t# Split by underscore, join together by space, convert to lowercase.\n\t\ttext = ' '.join(text.split('_')).lower().strip()\n\t\ttext = text.lower().strip()\n\t\ttext = re.sub(self.chars_to_ignore, '', text)\n\t\ttext = re.sub('<[a-zA-Z|_]*>', '', text)\n\t\ttext = text.replace('(())', '') # Ignore noise.\n\t\treturn text\n\n\tdef remove_short_audio(self):\n\t\tmin_input_length_in_sec = 1.0\n\t\tmin_char_count = 2\n\t\tfiles_to_keep = []\n\t\tfor i in range(len(self.audio_files)):\n\t\t\taudio_input, sample_rate = librosa.load(self.audio_files[i], sr=self.sample_rate)\n\t\t\ttext = self.read_text(self.audio_files[i])\n\t\t\tif len(audio_input) >= sample_rate*min_input_length_in_sec and len(text) > min_char_count:\n\t\t\t\tfiles_to_keep.append(self.audio_files[i])\n\t\tself.audio_files = files_to_keep\n\n\tdef prepare_dataset(self, audio_array, text):\n\t\tbatch = {}\n\t\t# batched output is \"un-batched\" to ensure mapping is correct\n\t\tbatch[\"input_values\"] = self.processor(np.array(audio_array),\n\t\t sampling_rate=self.sample_rate).input_values[0]\n\t\tbatch[\"input_length\"] = len(batch[\"input_values\"])\n\n\t\twith self.processor.as_target_processor():\n\t\t\tbatch[\"labels\"] = self.processor(text).input_ids\n\t\treturn batch\n\n\tdef __len__(self):\n\t\treturn len(self.audio_files)\n\n\tdef __getitem__(self, idx):\n\t\tif isinstance(idx, int):\n\t\t\taudio_file = self.audio_files[idx]\n\t\t\taudio_input, sample_rate = librosa.load(audio_file, sr=16000)\n\t\t\ttext = self.read_text(audio_file)\n\t\t\tif self.processor is not None:\n\t\t\t\tprepared_audio_dict = self.prepare_dataset(audio_input, text)\n\t\t\t\treturn prepared_audio_dict\n\t\t\treturn {'audio': {'array': audio_input,\n\t\t\t 'path': audio_file,\n\t\t\t 'sampling_rate': self.sample_rate},\n\t\t\t 'file': audio_file,\n\t\t\t 'text': text} \n\t\telse:\n\t\t\taudio_files = None\n\t\t\tif isinstance(idx, slice):\n\t\t\t\taudio_files = self.audio_files[idx]\n\t\t\telif isinstance(idx, list):\n\t\t\t\taudio_files = [self.audio_files[i] for i in idx]\n\t\t\taudio_input = [librosa.load(audio_file, sr=self.sample_rate)[0] for audio_file in audio_files]\n\t\t\ttexts = []\n\t\t\tfor file in audio_files:\n\t\t\t\ttext = self.read_text(file)\n\t\t\t\ttexts.append(text)\n\t\t\treturn {'audio': [{'array': audio,\n\t\t\t 'path': path,\n\t\t\t 'sampling_rate': self.sample_rate} for audio, path in zip(audio_input, audio_files)],\n\t\t\t 'file': audio_files,\n\t\t\t 'text': texts} \t\n" ]
[ [ "numpy.array" ] ]
chrisvalx/scipy
[ "41f812e6aafd9b5698c56ef17d175da223bd6b76" ]
[ "scipy/spatial/tests/test_distance.py" ]
[ "#\n# Author: Damian Eads\n# Date: April 17, 2008\n#\n# Copyright (C) 2008 Damian Eads\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# 1. Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following\n# disclaimer in the documentation and/or other materials provided\n# with the distribution.\n#\n# 3. The name of the author may not be used to endorse or promote\n# products derived from this software without specific prior\n# written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS\n# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE\n# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\n# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING\n# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport os.path\n\nfrom functools import wraps, partial\n\nimport numpy as np\nimport warnings\nfrom numpy.linalg import norm\nfrom numpy.testing import (verbose, assert_,\n assert_array_equal, assert_equal,\n assert_almost_equal, assert_allclose,\n suppress_warnings)\nimport pytest\nfrom pytest import raises as assert_raises\n\nfrom scipy.spatial.distance import (squareform, pdist, cdist, num_obs_y,\n num_obs_dm, is_valid_dm, is_valid_y,\n _validate_vector, _METRICS_NAMES)\n\n# these were missing: chebyshev cityblock kulsinski\n# jensenshannon, matching and seuclidean are referenced by string name.\nfrom scipy.spatial.distance import (braycurtis, canberra, chebyshev, cityblock,\n correlation, cosine, dice, euclidean,\n hamming, jaccard, jensenshannon,\n kulsinski, mahalanobis, matching,\n minkowski, rogerstanimoto, russellrao,\n seuclidean, sokalmichener, sokalsneath,\n sqeuclidean, yule)\nfrom scipy.spatial.distance import wminkowski as old_wminkowski\n\n_filenames = [\n \"cdist-X1.txt\",\n \"cdist-X2.txt\",\n \"iris.txt\",\n \"pdist-boolean-inp.txt\",\n \"pdist-chebyshev-ml-iris.txt\",\n \"pdist-chebyshev-ml.txt\",\n \"pdist-cityblock-ml-iris.txt\",\n \"pdist-cityblock-ml.txt\",\n \"pdist-correlation-ml-iris.txt\",\n \"pdist-correlation-ml.txt\",\n \"pdist-cosine-ml-iris.txt\",\n \"pdist-cosine-ml.txt\",\n \"pdist-double-inp.txt\",\n \"pdist-euclidean-ml-iris.txt\",\n \"pdist-euclidean-ml.txt\",\n \"pdist-hamming-ml.txt\",\n \"pdist-jaccard-ml.txt\",\n \"pdist-jensenshannon-ml-iris.txt\",\n \"pdist-jensenshannon-ml.txt\",\n \"pdist-minkowski-3.2-ml-iris.txt\",\n \"pdist-minkowski-3.2-ml.txt\",\n \"pdist-minkowski-5.8-ml-iris.txt\",\n \"pdist-seuclidean-ml-iris.txt\",\n \"pdist-seuclidean-ml.txt\",\n \"pdist-spearman-ml.txt\",\n \"random-bool-data.txt\",\n \"random-double-data.txt\",\n \"random-int-data.txt\",\n \"random-uint-data.txt\",\n ]\n\n_tdist = np.array([[0, 662, 877, 255, 412, 996],\n [662, 0, 295, 468, 268, 400],\n [877, 295, 0, 754, 564, 138],\n [255, 468, 754, 0, 219, 869],\n [412, 268, 564, 219, 0, 669],\n [996, 400, 138, 869, 669, 0]], dtype='double')\n\n_ytdist = squareform(_tdist)\n\n# A hashmap of expected output arrays for the tests. These arrays\n# come from a list of text files, which are read prior to testing.\n# Each test loads inputs and outputs from this dictionary.\neo = {}\n\n\ndef load_testing_files():\n for fn in _filenames:\n name = fn.replace(\".txt\", \"\").replace(\"-ml\", \"\")\n fqfn = os.path.join(os.path.dirname(__file__), 'data', fn)\n fp = open(fqfn)\n eo[name] = np.loadtxt(fp)\n fp.close()\n eo['pdist-boolean-inp'] = np.bool_(eo['pdist-boolean-inp'])\n eo['random-bool-data'] = np.bool_(eo['random-bool-data'])\n eo['random-float32-data'] = np.float32(eo['random-double-data'])\n eo['random-int-data'] = np.int_(eo['random-int-data'])\n eo['random-uint-data'] = np.uint(eo['random-uint-data'])\n\n\nload_testing_files()\n\n\ndef _is_32bit():\n return np.intp(0).itemsize < 8\n\n\ndef _chk_asarrays(arrays, axis=None):\n arrays = [np.asanyarray(a) for a in arrays]\n if axis is None:\n # np < 1.10 ravel removes subclass from arrays\n arrays = [np.ravel(a) if a.ndim != 1 else a\n for a in arrays]\n axis = 0\n arrays = tuple(np.atleast_1d(a) for a in arrays)\n if axis < 0:\n if not all(a.ndim == arrays[0].ndim for a in arrays):\n raise ValueError(\"array ndim must be the same for neg axis\")\n axis = range(arrays[0].ndim)[axis]\n return arrays + (axis,)\n\n\ndef _chk_weights(arrays, weights=None, axis=None,\n force_weights=False, simplify_weights=True,\n pos_only=False, neg_check=False,\n nan_screen=False, mask_screen=False,\n ddof=None):\n chked = _chk_asarrays(arrays, axis=axis)\n arrays, axis = chked[:-1], chked[-1]\n\n simplify_weights = simplify_weights and not force_weights\n if not force_weights and mask_screen:\n force_weights = any(np.ma.getmask(a) is not np.ma.nomask for a in arrays)\n\n if nan_screen:\n has_nans = [np.isnan(np.sum(a)) for a in arrays]\n if any(has_nans):\n mask_screen = True\n force_weights = True\n arrays = tuple(np.ma.masked_invalid(a) if has_nan else a\n for a, has_nan in zip(arrays, has_nans))\n\n if weights is not None:\n weights = np.asanyarray(weights)\n elif force_weights:\n weights = np.ones(arrays[0].shape[axis])\n else:\n return arrays + (weights, axis)\n\n if ddof:\n weights = _freq_weights(weights)\n\n if mask_screen:\n weights = _weight_masked(arrays, weights, axis)\n\n if not all(weights.shape == (a.shape[axis],) for a in arrays):\n raise ValueError(\"weights shape must match arrays along axis\")\n if neg_check and (weights < 0).any():\n raise ValueError(\"weights cannot be negative\")\n\n if pos_only:\n pos_weights = np.nonzero(weights > 0)[0]\n if pos_weights.size < weights.size:\n arrays = tuple(np.take(a, pos_weights, axis=axis) for a in arrays)\n weights = weights[pos_weights]\n if simplify_weights and (weights == 1).all():\n weights = None\n return arrays + (weights, axis)\n\n\ndef _freq_weights(weights):\n if weights is None:\n return weights\n int_weights = weights.astype(int)\n if (weights != int_weights).any():\n raise ValueError(\"frequency (integer count-type) weights required %s\" % weights)\n return int_weights\n\n\ndef _weight_masked(arrays, weights, axis):\n if axis is None:\n axis = 0\n weights = np.asanyarray(weights)\n for a in arrays:\n axis_mask = np.ma.getmask(a)\n if axis_mask is np.ma.nomask:\n continue\n if a.ndim > 1:\n not_axes = tuple(i for i in range(a.ndim) if i != axis)\n axis_mask = axis_mask.any(axis=not_axes)\n weights *= 1 - axis_mask.astype(int)\n return weights\n\n\ndef within_tol(a, b, tol):\n return np.abs(a - b).max() < tol\n\n\ndef _assert_within_tol(a, b, atol=0, rtol=0, verbose_=False):\n if verbose_:\n print(np.abs(a - b).max())\n assert_allclose(a, b, rtol=rtol, atol=atol)\n\n\ndef _rand_split(arrays, weights, axis, split_per, seed=None):\n # inverse operation for stats.collapse_weights\n weights = np.array(weights, dtype=np.float64) # modified inplace; need a copy\n seeded_rand = np.random.RandomState(seed)\n\n def mytake(a, ix, axis):\n record = np.asanyarray(np.take(a, ix, axis=axis))\n return record.reshape([a.shape[i] if i != axis else 1\n for i in range(a.ndim)])\n\n n_obs = arrays[0].shape[axis]\n assert all(a.shape[axis] == n_obs for a in arrays), \"data must be aligned on sample axis\"\n for i in range(int(split_per) * n_obs):\n split_ix = seeded_rand.randint(n_obs + i)\n prev_w = weights[split_ix]\n q = seeded_rand.rand()\n weights[split_ix] = q * prev_w\n weights = np.append(weights, (1. - q) * prev_w)\n arrays = [np.append(a, mytake(a, split_ix, axis=axis),\n axis=axis) for a in arrays]\n return arrays, weights\n\n\ndef _rough_check(a, b, compare_assert=partial(assert_allclose, atol=1e-5),\n key=lambda x: x, w=None):\n check_a = key(a)\n check_b = key(b)\n try:\n if np.array(check_a != check_b).any(): # try strict equality for string types\n compare_assert(check_a, check_b)\n except AttributeError: # masked array\n compare_assert(check_a, check_b)\n except (TypeError, ValueError): # nested data structure\n for a_i, b_i in zip(check_a, check_b):\n _rough_check(a_i, b_i, compare_assert=compare_assert)\n\n# diff from test_stats:\n# n_args=2, weight_arg='w', default_axis=None\n# ma_safe = False, nan_safe = False\ndef _weight_checked(fn, n_args=2, default_axis=None, key=lambda x: x, weight_arg='w',\n squeeze=True, silent=False,\n ones_test=True, const_test=True, dup_test=True,\n split_test=True, dud_test=True, ma_safe=False, ma_very_safe=False, nan_safe=False,\n split_per=1.0, seed=0, compare_assert=partial(assert_allclose, atol=1e-5)):\n \"\"\"runs fn on its arguments 2 or 3 ways, checks that the results are the same,\n then returns the same thing it would have returned before\"\"\"\n @wraps(fn)\n def wrapped(*args, **kwargs):\n result = fn(*args, **kwargs)\n\n arrays = args[:n_args]\n rest = args[n_args:]\n weights = kwargs.get(weight_arg, None)\n axis = kwargs.get('axis', default_axis)\n\n chked = _chk_weights(arrays, weights=weights, axis=axis, force_weights=True, mask_screen=True)\n arrays, weights, axis = chked[:-2], chked[-2], chked[-1]\n if squeeze:\n arrays = [np.atleast_1d(a.squeeze()) for a in arrays]\n\n try:\n # WEIGHTS CHECK 1: EQUAL WEIGHTED OBESERVATIONS\n args = tuple(arrays) + rest\n if ones_test:\n kwargs[weight_arg] = weights\n _rough_check(result, fn(*args, **kwargs), key=key)\n if const_test:\n kwargs[weight_arg] = weights * 101.0\n _rough_check(result, fn(*args, **kwargs), key=key)\n kwargs[weight_arg] = weights * 0.101\n try:\n _rough_check(result, fn(*args, **kwargs), key=key)\n except Exception as e:\n raise type(e)((e, arrays, weights)) from e\n\n # WEIGHTS CHECK 2: ADDL 0-WEIGHTED OBS\n if dud_test:\n # add randomly resampled rows, weighted at 0\n dud_arrays, dud_weights = _rand_split(arrays, weights, axis, split_per=split_per, seed=seed)\n dud_weights[:weights.size] = weights # not exactly 1 because of masked arrays\n dud_weights[weights.size:] = 0\n dud_args = tuple(dud_arrays) + rest\n kwargs[weight_arg] = dud_weights\n _rough_check(result, fn(*dud_args, **kwargs), key=key)\n # increase the value of those 0-weighted rows\n for a in dud_arrays:\n indexer = [slice(None)] * a.ndim\n indexer[axis] = slice(weights.size, None)\n indexer = tuple(indexer)\n a[indexer] = a[indexer] * 101\n dud_args = tuple(dud_arrays) + rest\n _rough_check(result, fn(*dud_args, **kwargs), key=key)\n # set those 0-weighted rows to NaNs\n for a in dud_arrays:\n indexer = [slice(None)] * a.ndim\n indexer[axis] = slice(weights.size, None)\n indexer = tuple(indexer)\n a[indexer] = a[indexer] * np.nan\n if kwargs.get(\"nan_policy\", None) == \"omit\" and nan_safe:\n dud_args = tuple(dud_arrays) + rest\n _rough_check(result, fn(*dud_args, **kwargs), key=key)\n # mask out those nan values\n if ma_safe:\n dud_arrays = [np.ma.masked_invalid(a) for a in dud_arrays]\n dud_args = tuple(dud_arrays) + rest\n _rough_check(result, fn(*dud_args, **kwargs), key=key)\n if ma_very_safe:\n kwargs[weight_arg] = None\n _rough_check(result, fn(*dud_args, **kwargs), key=key)\n del dud_arrays, dud_args, dud_weights\n\n # WEIGHTS CHECK 3: DUPLICATE DATA (DUMB SPLITTING)\n if dup_test:\n dup_arrays = [np.append(a, a, axis=axis) for a in arrays]\n dup_weights = np.append(weights, weights) / 2.0\n dup_args = tuple(dup_arrays) + rest\n kwargs[weight_arg] = dup_weights\n _rough_check(result, fn(*dup_args, **kwargs), key=key)\n del dup_args, dup_arrays, dup_weights\n\n # WEIGHT CHECK 3: RANDOM SPLITTING\n if split_test and split_per > 0:\n split_arrays, split_weights = _rand_split(arrays, weights, axis, split_per=split_per, seed=seed)\n split_args = tuple(split_arrays) + rest\n kwargs[weight_arg] = split_weights\n _rough_check(result, fn(*split_args, **kwargs), key=key)\n except NotImplementedError as e:\n # when some combination of arguments makes weighting impossible,\n # this is the desired response\n if not silent:\n warnings.warn(\"%s NotImplemented weights: %s\" % (fn.__name__, e))\n return result\n return wrapped\n\n\nwcdist = _weight_checked(cdist, default_axis=1, squeeze=False)\nwcdist_no_const = _weight_checked(cdist, default_axis=1, squeeze=False, const_test=False)\nwpdist = _weight_checked(pdist, default_axis=1, squeeze=False, n_args=1)\nwpdist_no_const = _weight_checked(pdist, default_axis=1, squeeze=False, const_test=False, n_args=1)\nwrogerstanimoto = _weight_checked(rogerstanimoto)\nwmatching = whamming = _weight_checked(hamming, dud_test=False)\nwyule = _weight_checked(yule)\nwdice = _weight_checked(dice)\nwcityblock = _weight_checked(cityblock)\nwchebyshev = _weight_checked(chebyshev)\nwcosine = _weight_checked(cosine)\nwcorrelation = _weight_checked(correlation)\nwkulsinski = _weight_checked(kulsinski)\nwminkowski = _weight_checked(minkowski, const_test=False)\nwjaccard = _weight_checked(jaccard)\nweuclidean = _weight_checked(euclidean, const_test=False)\nwsqeuclidean = _weight_checked(sqeuclidean, const_test=False)\nwbraycurtis = _weight_checked(braycurtis)\nwcanberra = _weight_checked(canberra, const_test=False)\nwsokalsneath = _weight_checked(sokalsneath)\nwsokalmichener = _weight_checked(sokalmichener)\nwrussellrao = _weight_checked(russellrao)\n\n\nclass TestCdist(object):\n\n def setup_method(self):\n self.rnd_eo_names = ['random-float32-data', 'random-int-data',\n 'random-uint-data', 'random-double-data',\n 'random-bool-data']\n self.valid_upcasts = {'bool': [np.uint, np.int_, np.float32, np.double],\n 'uint': [np.int_, np.float32, np.double],\n 'int': [np.float32, np.double],\n 'float32': [np.double]}\n\n def test_cdist_extra_args(self):\n # Tests that args and kwargs are correctly handled\n def _my_metric(x, y, arg, kwarg=1, kwarg2=2):\n return arg + kwarg + kwarg2\n\n X1 = [[1., 2., 3.], [1.2, 2.3, 3.4], [2.2, 2.3, 4.4]]\n X2 = [[7., 5., 8.], [7.5, 5.8, 8.4], [5.5, 5.8, 4.4]]\n kwargs = {'N0tV4l1D_p4raM': 3.14, \"w\":np.arange(3)}\n args = [3.14] * 200\n with suppress_warnings() as w:\n w.filter(DeprecationWarning)\n for metric in _METRICS_NAMES:\n assert_raises(TypeError, cdist, X1, X2,\n metric=metric, **kwargs)\n assert_raises(TypeError, cdist, X1, X2,\n metric=eval(metric), **kwargs)\n assert_raises(TypeError, cdist, X1, X2,\n metric=\"test_\" + metric, **kwargs)\n assert_raises(TypeError, cdist, X1, X2,\n metric=metric, *args)\n assert_raises(TypeError, cdist, X1, X2,\n metric=eval(metric), *args)\n assert_raises(TypeError, cdist, X1, X2,\n metric=\"test_\" + metric, *args)\n\n assert_raises(TypeError, cdist, X1, X2, _my_metric)\n assert_raises(TypeError, cdist, X1, X2, _my_metric, *args)\n assert_raises(TypeError, cdist, X1, X2, _my_metric, **kwargs)\n assert_raises(TypeError, cdist, X1, X2, _my_metric,\n kwarg=2.2, kwarg2=3.3)\n assert_raises(TypeError, cdist, X1, X2, _my_metric, 1, 2, kwarg=2.2)\n\n assert_raises(TypeError, cdist, X1, X2, _my_metric, 1.1, 2.2, 3.3)\n assert_raises(TypeError, cdist, X1, X2, _my_metric, 1.1, 2.2)\n assert_raises(TypeError, cdist, X1, X2, _my_metric, 1.1)\n assert_raises(TypeError, cdist, X1, X2, _my_metric, 1.1,\n kwarg=2.2, kwarg2=3.3)\n\n # this should work\n assert_allclose(cdist(X1, X2, metric=_my_metric,\n arg=1.1, kwarg2=3.3), 5.4)\n\n def test_cdist_euclidean_random_unicode(self):\n eps = 1e-07\n X1 = eo['cdist-X1']\n X2 = eo['cdist-X2']\n Y1 = wcdist_no_const(X1, X2, 'euclidean')\n Y2 = wcdist_no_const(X1, X2, 'test_euclidean')\n _assert_within_tol(Y1, Y2, eps, verbose > 2)\n\n @pytest.mark.parametrize(\"p\", [1.0, 1.23, 2.0, 3.8, 4.6, np.inf])\n def test_cdist_minkowski_random(self, p):\n eps = 1e-07\n X1 = eo['cdist-X1']\n X2 = eo['cdist-X2']\n Y1 = wcdist_no_const(X1, X2, 'minkowski', p=p)\n Y2 = wcdist_no_const(X1, X2, 'test_minkowski', p=p)\n _assert_within_tol(Y1, Y2, eps, verbose > 2)\n\n def test_cdist_cosine_random(self):\n eps = 1e-07\n X1 = eo['cdist-X1']\n X2 = eo['cdist-X2']\n Y1 = wcdist(X1, X2, 'cosine')\n\n # Naive implementation\n def norms(X):\n return np.linalg.norm(X, axis=1).reshape(-1, 1)\n\n Y2 = 1 - np.dot((X1 / norms(X1)), (X2 / norms(X2)).T)\n\n _assert_within_tol(Y1, Y2, eps, verbose > 2)\n\n def test_cdist_mahalanobis(self):\n # 1-dimensional observations\n x1 = np.array([[2], [3]])\n x2 = np.array([[2], [5]])\n dist = cdist(x1, x2, metric='mahalanobis')\n assert_allclose(dist, [[0.0, np.sqrt(4.5)], [np.sqrt(0.5), np.sqrt(2)]])\n\n # 2-dimensional observations\n x1 = np.array([[0, 0], [-1, 0]])\n x2 = np.array([[0, 2], [1, 0], [0, -2]])\n dist = cdist(x1, x2, metric='mahalanobis')\n rt2 = np.sqrt(2)\n assert_allclose(dist, [[rt2, rt2, rt2], [2, 2 * rt2, 2]])\n\n # Too few observations\n assert_raises(ValueError,\n cdist, [[0, 1]], [[2, 3]], metric='mahalanobis')\n\n def test_cdist_custom_notdouble(self):\n class myclass(object):\n pass\n\n def _my_metric(x, y):\n if not isinstance(x[0], myclass) or not isinstance(y[0], myclass):\n raise ValueError(\"Type has been changed\")\n return 1.123\n data = np.array([[myclass()]], dtype=object)\n cdist_y = cdist(data, data, metric=_my_metric)\n right_y = 1.123\n assert_equal(cdist_y, right_y, verbose=verbose > 2)\n\n def _check_calling_conventions(self, X1, X2, metric, eps=1e-07, **kwargs):\n # helper function for test_cdist_calling_conventions\n try:\n y1 = cdist(X1, X2, metric=metric, **kwargs)\n y2 = cdist(X1, X2, metric=eval(metric), **kwargs)\n y3 = cdist(X1, X2, metric=\"test_\" + metric, **kwargs)\n except Exception as e:\n e_cls = e.__class__\n if verbose > 2:\n print(e_cls.__name__)\n print(e)\n assert_raises(e_cls, cdist, X1, X2, metric=metric, **kwargs)\n assert_raises(e_cls, cdist, X1, X2, metric=eval(metric), **kwargs)\n assert_raises(e_cls, cdist, X1, X2, metric=\"test_\" + metric, **kwargs)\n else:\n _assert_within_tol(y1, y2, rtol=eps, verbose_=verbose > 2)\n _assert_within_tol(y1, y3, rtol=eps, verbose_=verbose > 2)\n\n def test_cdist_calling_conventions(self):\n # Ensures that specifying the metric with a str or scipy function\n # gives the same behaviour (i.e. same result or same exception).\n # NOTE: The correctness should be checked within each metric tests.\n for eo_name in self.rnd_eo_names:\n # subsampling input data to speed-up tests\n # NOTE: num samples needs to be > than dimensions for mahalanobis\n X1 = eo[eo_name][::5, ::-2]\n X2 = eo[eo_name][1::5, ::2]\n for metric in _METRICS_NAMES:\n if verbose > 2:\n print(\"testing: \", metric, \" with: \", eo_name)\n if metric == 'wminkowski':\n continue\n if metric in {'dice', 'yule', 'kulsinski', 'matching',\n 'rogerstanimoto', 'russellrao', 'sokalmichener',\n 'sokalsneath'} and 'bool' not in eo_name:\n # python version permits non-bools e.g. for fuzzy logic\n continue\n self._check_calling_conventions(X1, X2, metric)\n\n # Testing built-in metrics with extra args\n if metric == \"seuclidean\":\n X12 = np.vstack([X1, X2]).astype(np.double)\n V = np.var(X12, axis=0, ddof=1)\n self._check_calling_conventions(X1, X2, metric, V=V)\n elif metric == \"mahalanobis\":\n X12 = np.vstack([X1, X2]).astype(np.double)\n V = np.atleast_2d(np.cov(X12.T))\n VI = np.array(np.linalg.inv(V).T)\n self._check_calling_conventions(X1, X2, metric, VI=VI)\n\n def test_cdist_dtype_equivalence(self):\n # Tests that the result is not affected by type up-casting\n eps = 1e-07\n tests = [(eo['random-bool-data'], self.valid_upcasts['bool']),\n (eo['random-uint-data'], self.valid_upcasts['uint']),\n (eo['random-int-data'], self.valid_upcasts['int']),\n (eo['random-float32-data'], self.valid_upcasts['float32'])]\n for metric in _METRICS_NAMES:\n for test in tests:\n X1 = test[0][::5, ::-2]\n X2 = test[0][1::5, ::2]\n try:\n y1 = cdist(X1, X2, metric=metric)\n except Exception as e:\n e_cls = e.__class__\n if verbose > 2:\n print(e_cls.__name__)\n print(e)\n for new_type in test[1]:\n X1new = new_type(X1)\n X2new = new_type(X2)\n assert_raises(e_cls, cdist, X1new, X2new, metric=metric)\n else:\n for new_type in test[1]:\n y2 = cdist(new_type(X1), new_type(X2), metric=metric)\n _assert_within_tol(y1, y2, eps, verbose > 2)\n\n def test_cdist_out(self):\n # Test that out parameter works properly\n eps = 1e-07\n X1 = eo['cdist-X1']\n X2 = eo['cdist-X2']\n out_r, out_c = X1.shape[0], X2.shape[0]\n with suppress_warnings() as sup:\n sup.filter(DeprecationWarning,\n message=\"'wminkowski' metric is deprecated\")\n for metric in _METRICS_NAMES:\n kwargs = dict()\n if metric in ['minkowski', 'wminkowski']:\n kwargs['p'] = 1.23\n if metric == 'wminkowski':\n kwargs['w'] = 1.0 / X1.std(axis=0)\n out1 = np.empty((out_r, out_c), dtype=np.double)\n Y1 = cdist(X1, X2, metric, **kwargs)\n Y2 = cdist(X1, X2, metric, out=out1, **kwargs)\n # test that output is numerically equivalent\n _assert_within_tol(Y1, Y2, eps, verbose > 2)\n # test that Y_test1 and out1 are the same object\n assert_(Y2 is out1)\n # test for incorrect shape\n out2 = np.empty((out_r-1, out_c+1), dtype=np.double)\n assert_raises(ValueError,\n cdist, X1, X2, metric, out=out2, **kwargs)\n # test for C-contiguous order\n out3 = np.empty(\n (2 * out_r, 2 * out_c), dtype=np.double)[::2, ::2]\n out4 = np.empty((out_r, out_c), dtype=np.double, order='F')\n assert_raises(ValueError,\n cdist, X1, X2, metric, out=out3, **kwargs)\n assert_raises(ValueError,\n cdist, X1, X2, metric, out=out4, **kwargs)\n # test for incorrect dtype\n out5 = np.empty((out_r, out_c), dtype=np.int64)\n assert_raises(ValueError,\n cdist, X1, X2, metric, out=out5, **kwargs)\n\n def test_striding(self):\n # test that striding is handled correct with calls to\n # _copy_array_if_base_present\n eps = 1e-07\n X1 = eo['cdist-X1'][::2, ::2]\n X2 = eo['cdist-X2'][::2, ::2]\n X1_copy = X1.copy()\n X2_copy = X2.copy()\n\n # confirm equivalence\n assert_equal(X1, X1_copy)\n assert_equal(X2, X2_copy)\n # confirm contiguity\n assert_(not X1.flags.c_contiguous)\n assert_(not X2.flags.c_contiguous)\n assert_(X1_copy.flags.c_contiguous)\n assert_(X2_copy.flags.c_contiguous)\n\n with suppress_warnings() as sup:\n sup.filter(DeprecationWarning, \"'wminkowski' metric is deprecated\")\n for metric in _METRICS_NAMES:\n kwargs = dict()\n if metric in ['minkowski', 'wminkowski']:\n kwargs['p'] = 1.23\n if metric == 'wminkowski':\n kwargs['w'] = 1.0 / X1.std(axis=0)\n Y1 = cdist(X1, X2, metric, **kwargs)\n Y2 = cdist(X1_copy, X2_copy, metric, **kwargs)\n # test that output is numerically equivalent\n _assert_within_tol(Y1, Y2, eps, verbose > 2)\n\nclass TestPdist(object):\n\n def setup_method(self):\n self.rnd_eo_names = ['random-float32-data', 'random-int-data',\n 'random-uint-data', 'random-double-data',\n 'random-bool-data']\n self.valid_upcasts = {'bool': [np.uint, np.int_, np.float32, np.double],\n 'uint': [np.int_, np.float32, np.double],\n 'int': [np.float32, np.double],\n 'float32': [np.double]}\n\n def test_pdist_extra_args(self):\n # Tests that args and kwargs are correctly handled\n def _my_metric(x, y, arg, kwarg=1, kwarg2=2):\n return arg + kwarg + kwarg2\n\n X1 = [[1., 2.], [1.2, 2.3], [2.2, 2.3]]\n kwargs = {'N0tV4l1D_p4raM': 3.14, \"w\":np.arange(2)}\n args = [3.14] * 200\n with suppress_warnings() as w:\n w.filter(DeprecationWarning)\n for metric in _METRICS_NAMES:\n assert_raises(TypeError, pdist, X1, metric=metric, **kwargs)\n assert_raises(TypeError, pdist, X1,\n metric=eval(metric), **kwargs)\n assert_raises(TypeError, pdist, X1,\n metric=\"test_\" + metric, **kwargs)\n assert_raises(TypeError, pdist, X1, metric=metric, *args)\n assert_raises(TypeError, pdist, X1, metric=eval(metric), *args)\n assert_raises(TypeError, pdist, X1,\n metric=\"test_\" + metric, *args)\n\n assert_raises(TypeError, pdist, X1, _my_metric)\n assert_raises(TypeError, pdist, X1, _my_metric, *args)\n assert_raises(TypeError, pdist, X1, _my_metric, **kwargs)\n assert_raises(TypeError, pdist, X1, _my_metric,\n kwarg=2.2, kwarg2=3.3)\n assert_raises(TypeError, pdist, X1, _my_metric, 1, 2, kwarg=2.2)\n\n assert_raises(TypeError, pdist, X1, _my_metric, 1.1, 2.2, 3.3)\n assert_raises(TypeError, pdist, X1, _my_metric, 1.1, 2.2)\n assert_raises(TypeError, pdist, X1, _my_metric, 1.1)\n assert_raises(TypeError, pdist, X1, _my_metric, 1.1,\n kwarg=2.2, kwarg2=3.3)\n\n # these should work\n assert_allclose(pdist(X1, metric=_my_metric,\n arg=1.1, kwarg2=3.3), 5.4)\n\n def test_pdist_euclidean_random(self):\n eps = 1e-07\n X = eo['pdist-double-inp']\n Y_right = eo['pdist-euclidean']\n Y_test1 = wpdist_no_const(X, 'euclidean')\n _assert_within_tol(Y_test1, Y_right, eps)\n\n def test_pdist_euclidean_random_u(self):\n eps = 1e-07\n X = eo['pdist-double-inp']\n Y_right = eo['pdist-euclidean']\n Y_test1 = wpdist_no_const(X, 'euclidean')\n _assert_within_tol(Y_test1, Y_right, eps)\n\n def test_pdist_euclidean_random_float32(self):\n eps = 1e-07\n X = np.float32(eo['pdist-double-inp'])\n Y_right = eo['pdist-euclidean']\n Y_test1 = wpdist_no_const(X, 'euclidean')\n _assert_within_tol(Y_test1, Y_right, eps)\n\n def test_pdist_euclidean_random_nonC(self):\n eps = 1e-07\n X = eo['pdist-double-inp']\n Y_right = eo['pdist-euclidean']\n Y_test2 = wpdist_no_const(X, 'test_euclidean')\n _assert_within_tol(Y_test2, Y_right, eps)\n\n @pytest.mark.slow\n def test_pdist_euclidean_iris_double(self):\n eps = 1e-07\n X = eo['iris']\n Y_right = eo['pdist-euclidean-iris']\n Y_test1 = wpdist_no_const(X, 'euclidean')\n _assert_within_tol(Y_test1, Y_right, eps)\n\n @pytest.mark.slow\n def test_pdist_euclidean_iris_float32(self):\n eps = 1e-06\n X = np.float32(eo['iris'])\n Y_right = eo['pdist-euclidean-iris']\n Y_test1 = wpdist_no_const(X, 'euclidean')\n _assert_within_tol(Y_test1, Y_right, eps, verbose > 2)\n\n @pytest.mark.slow\n def test_pdist_euclidean_iris_nonC(self):\n # Test pdist(X, 'test_euclidean') [the non-C implementation] on the\n # Iris data set.\n eps = 1e-07\n X = eo['iris']\n Y_right = eo['pdist-euclidean-iris']\n Y_test2 = wpdist_no_const(X, 'test_euclidean')\n _assert_within_tol(Y_test2, Y_right, eps)\n\n def test_pdist_seuclidean_random(self):\n eps = 1e-05\n X = eo['pdist-double-inp']\n Y_right = eo['pdist-seuclidean']\n Y_test1 = pdist(X, 'seuclidean')\n _assert_within_tol(Y_test1, Y_right, eps)\n\n def test_pdist_seuclidean_random_float32(self):\n eps = 1e-05\n X = np.float32(eo['pdist-double-inp'])\n Y_right = eo['pdist-seuclidean']\n Y_test1 = pdist(X, 'seuclidean')\n _assert_within_tol(Y_test1, Y_right, eps)\n\n # Check no error is raise when V has float32 dtype (#11171).\n V = np.var(X, axis=0, ddof=1)\n Y_test2 = pdist(X, 'seuclidean', V=V)\n _assert_within_tol(Y_test2, Y_right, eps)\n\n def test_pdist_seuclidean_random_nonC(self):\n # Test pdist(X, 'test_sqeuclidean') [the non-C implementation]\n eps = 1e-05\n X = eo['pdist-double-inp']\n Y_right = eo['pdist-seuclidean']\n Y_test2 = pdist(X, 'test_seuclidean')\n _assert_within_tol(Y_test2, Y_right, eps)\n\n def test_pdist_seuclidean_iris(self):\n eps = 1e-05\n X = eo['iris']\n Y_right = eo['pdist-seuclidean-iris']\n Y_test1 = pdist(X, 'seuclidean')\n _assert_within_tol(Y_test1, Y_right, eps)\n\n def test_pdist_seuclidean_iris_float32(self):\n # Tests pdist(X, 'seuclidean') on the Iris data set (float32).\n eps = 1e-05\n X = np.float32(eo['iris'])\n Y_right = eo['pdist-seuclidean-iris']\n Y_test1 = pdist(X, 'seuclidean')\n _assert_within_tol(Y_test1, Y_right, eps)\n\n def test_pdist_seuclidean_iris_nonC(self):\n # Test pdist(X, 'test_seuclidean') [the non-C implementation] on the\n # Iris data set.\n eps = 1e-05\n X = eo['iris']\n Y_right = eo['pdist-seuclidean-iris']\n Y_test2 = pdist(X, 'test_seuclidean')\n _assert_within_tol(Y_test2, Y_right, eps)\n\n def test_pdist_cosine_random(self):\n eps = 1e-08\n X = eo['pdist-double-inp']\n Y_right = eo['pdist-cosine']\n Y_test1 = wpdist(X, 'cosine')\n _assert_within_tol(Y_test1, Y_right, eps)\n\n def test_pdist_cosine_random_float32(self):\n eps = 1e-08\n X = np.float32(eo['pdist-double-inp'])\n Y_right = eo['pdist-cosine']\n Y_test1 = wpdist(X, 'cosine')\n _assert_within_tol(Y_test1, Y_right, eps)\n\n def test_pdist_cosine_random_nonC(self):\n # Test pdist(X, 'test_cosine') [the non-C implementation]\n eps = 1e-08\n X = eo['pdist-double-inp']\n Y_right = eo['pdist-cosine']\n Y_test2 = wpdist(X, 'test_cosine')\n _assert_within_tol(Y_test2, Y_right, eps)\n\n @pytest.mark.slow\n def test_pdist_cosine_iris(self):\n eps = 1e-08\n X = eo['iris']\n Y_right = eo['pdist-cosine-iris']\n Y_test1 = wpdist(X, 'cosine')\n _assert_within_tol(Y_test1, Y_right, eps)\n\n @pytest.mark.slow\n def test_pdist_cosine_iris_float32(self):\n eps = 1e-07\n X = np.float32(eo['iris'])\n Y_right = eo['pdist-cosine-iris']\n Y_test1 = wpdist(X, 'cosine')\n _assert_within_tol(Y_test1, Y_right, eps, verbose > 2)\n\n @pytest.mark.slow\n def test_pdist_cosine_iris_nonC(self):\n eps = 1e-08\n X = eo['iris']\n Y_right = eo['pdist-cosine-iris']\n Y_test2 = wpdist(X, 'test_cosine')\n _assert_within_tol(Y_test2, Y_right, eps)\n\n def test_pdist_cosine_bounds(self):\n # Test adapted from @joernhees's example at gh-5208: case where\n # cosine distance used to be negative. XXX: very sensitive to the\n # specific norm computation.\n x = np.abs(np.random.RandomState(1337).rand(91))\n X = np.vstack([x, x])\n assert_(wpdist(X, 'cosine')[0] >= 0,\n msg='cosine distance should be non-negative')\n\n def test_pdist_cityblock_random(self):\n eps = 1e-06\n X = eo['pdist-double-inp']\n Y_right = eo['pdist-cityblock']\n Y_test1 = wpdist_no_const(X, 'cityblock')\n _assert_within_tol(Y_test1, Y_right, eps)\n\n def test_pdist_cityblock_random_float32(self):\n eps = 1e-06\n X = np.float32(eo['pdist-double-inp'])\n Y_right = eo['pdist-cityblock']\n Y_test1 = wpdist_no_const(X, 'cityblock')\n _assert_within_tol(Y_test1, Y_right, eps)\n\n def test_pdist_cityblock_random_nonC(self):\n eps = 1e-06\n X = eo['pdist-double-inp']\n Y_right = eo['pdist-cityblock']\n Y_test2 = wpdist_no_const(X, 'test_cityblock')\n _assert_within_tol(Y_test2, Y_right, eps)\n\n @pytest.mark.slow\n def test_pdist_cityblock_iris(self):\n eps = 1e-14\n X = eo['iris']\n Y_right = eo['pdist-cityblock-iris']\n Y_test1 = wpdist_no_const(X, 'cityblock')\n _assert_within_tol(Y_test1, Y_right, eps)\n\n @pytest.mark.slow\n def test_pdist_cityblock_iris_float32(self):\n eps = 1e-06\n X = np.float32(eo['iris'])\n Y_right = eo['pdist-cityblock-iris']\n Y_test1 = wpdist_no_const(X, 'cityblock')\n _assert_within_tol(Y_test1, Y_right, eps, verbose > 2)\n\n @pytest.mark.slow\n def test_pdist_cityblock_iris_nonC(self):\n # Test pdist(X, 'test_cityblock') [the non-C implementation] on the\n # Iris data set.\n eps = 1e-14\n X = eo['iris']\n Y_right = eo['pdist-cityblock-iris']\n Y_test2 = wpdist_no_const(X, 'test_cityblock')\n _assert_within_tol(Y_test2, Y_right, eps)\n\n def test_pdist_correlation_random(self):\n eps = 1e-07\n X = eo['pdist-double-inp']\n Y_right = eo['pdist-correlation']\n Y_test1 = wpdist(X, 'correlation')\n _assert_within_tol(Y_test1, Y_right, eps)\n\n def test_pdist_correlation_random_float32(self):\n eps = 1e-07\n X = np.float32(eo['pdist-double-inp'])\n Y_right = eo['pdist-correlation']\n Y_test1 = wpdist(X, 'correlation')\n _assert_within_tol(Y_test1, Y_right, eps)\n\n def test_pdist_correlation_random_nonC(self):\n eps = 1e-07\n X = eo['pdist-double-inp']\n Y_right = eo['pdist-correlation']\n Y_test2 = wpdist(X, 'test_correlation')\n _assert_within_tol(Y_test2, Y_right, eps)\n\n @pytest.mark.slow\n def test_pdist_correlation_iris(self):\n eps = 1e-08\n X = eo['iris']\n Y_right = eo['pdist-correlation-iris']\n Y_test1 = wpdist(X, 'correlation')\n _assert_within_tol(Y_test1, Y_right, eps)\n\n @pytest.mark.slow\n def test_pdist_correlation_iris_float32(self):\n eps = 1e-07\n X = eo['iris']\n Y_right = np.float32(eo['pdist-correlation-iris'])\n Y_test1 = wpdist(X, 'correlation')\n _assert_within_tol(Y_test1, Y_right, eps, verbose > 2)\n\n @pytest.mark.slow\n def test_pdist_correlation_iris_nonC(self):\n eps = 1e-08\n X = eo['iris']\n Y_right = eo['pdist-correlation-iris']\n Y_test2 = wpdist(X, 'test_correlation')\n _assert_within_tol(Y_test2, Y_right, eps)\n\n @pytest.mark.parametrize(\"p\", [1.0, 2.0, 3.2, np.inf])\n def test_pdist_minkowski_random_p(self, p):\n eps = 1e-05\n X = eo['pdist-double-inp']\n Y1 = wpdist_no_const(X, 'minkowski', p=p)\n Y2 = wpdist_no_const(X, 'test_minkowski', p=p)\n _assert_within_tol(Y1, Y2, eps)\n\n def test_pdist_minkowski_random(self):\n eps = 1e-05\n X = eo['pdist-double-inp']\n Y_right = eo['pdist-minkowski-3.2']\n Y_test1 = wpdist_no_const(X, 'minkowski', p=3.2)\n _assert_within_tol(Y_test1, Y_right, eps)\n\n def test_pdist_minkowski_random_float32(self):\n eps = 1e-05\n X = np.float32(eo['pdist-double-inp'])\n Y_right = eo['pdist-minkowski-3.2']\n Y_test1 = wpdist_no_const(X, 'minkowski', p=3.2)\n _assert_within_tol(Y_test1, Y_right, eps)\n\n def test_pdist_minkowski_random_nonC(self):\n eps = 1e-05\n X = eo['pdist-double-inp']\n Y_right = eo['pdist-minkowski-3.2']\n Y_test2 = wpdist_no_const(X, 'test_minkowski', p=3.2)\n _assert_within_tol(Y_test2, Y_right, eps)\n\n @pytest.mark.slow\n def test_pdist_minkowski_3_2_iris(self):\n eps = 1e-07\n X = eo['iris']\n Y_right = eo['pdist-minkowski-3.2-iris']\n Y_test1 = wpdist_no_const(X, 'minkowski', p=3.2)\n _assert_within_tol(Y_test1, Y_right, eps)\n\n @pytest.mark.slow\n def test_pdist_minkowski_3_2_iris_float32(self):\n eps = 1e-06\n X = np.float32(eo['iris'])\n Y_right = eo['pdist-minkowski-3.2-iris']\n Y_test1 = wpdist_no_const(X, 'minkowski', p=3.2)\n _assert_within_tol(Y_test1, Y_right, eps)\n\n @pytest.mark.slow\n def test_pdist_minkowski_3_2_iris_nonC(self):\n eps = 1e-07\n X = eo['iris']\n Y_right = eo['pdist-minkowski-3.2-iris']\n Y_test2 = wpdist_no_const(X, 'test_minkowski', p=3.2)\n _assert_within_tol(Y_test2, Y_right, eps)\n\n @pytest.mark.slow\n def test_pdist_minkowski_5_8_iris(self):\n eps = 1e-07\n X = eo['iris']\n Y_right = eo['pdist-minkowski-5.8-iris']\n Y_test1 = wpdist_no_const(X, 'minkowski', p=5.8)\n _assert_within_tol(Y_test1, Y_right, eps)\n\n @pytest.mark.slow\n def test_pdist_minkowski_5_8_iris_float32(self):\n eps = 1e-06\n X = np.float32(eo['iris'])\n Y_right = eo['pdist-minkowski-5.8-iris']\n Y_test1 = wpdist_no_const(X, 'minkowski', p=5.8)\n _assert_within_tol(Y_test1, Y_right, eps, verbose > 2)\n\n @pytest.mark.slow\n def test_pdist_minkowski_5_8_iris_nonC(self):\n eps = 1e-07\n X = eo['iris']\n Y_right = eo['pdist-minkowski-5.8-iris']\n Y_test2 = wpdist_no_const(X, 'test_minkowski', p=5.8)\n _assert_within_tol(Y_test2, Y_right, eps)\n\n def test_pdist_mahalanobis(self):\n # 1-dimensional observations\n x = np.array([2.0, 2.0, 3.0, 5.0]).reshape(-1, 1)\n dist = pdist(x, metric='mahalanobis')\n assert_allclose(dist, [0.0, np.sqrt(0.5), np.sqrt(4.5),\n np.sqrt(0.5), np.sqrt(4.5), np.sqrt(2.0)])\n\n # 2-dimensional observations\n x = np.array([[0, 0], [-1, 0], [0, 2], [1, 0], [0, -2]])\n dist = pdist(x, metric='mahalanobis')\n rt2 = np.sqrt(2)\n assert_allclose(dist, [rt2, rt2, rt2, rt2, 2, 2 * rt2, 2, 2, 2 * rt2, 2])\n\n # Too few observations\n assert_raises(ValueError,\n wpdist, [[0, 1], [2, 3]], metric='mahalanobis')\n\n def test_pdist_hamming_random(self):\n eps = 1e-07\n X = eo['pdist-boolean-inp']\n Y_right = eo['pdist-hamming']\n Y_test1 = wpdist(X, 'hamming')\n _assert_within_tol(Y_test1, Y_right, eps)\n\n def test_pdist_hamming_random_float32(self):\n eps = 1e-07\n X = np.float32(eo['pdist-boolean-inp'])\n Y_right = eo['pdist-hamming']\n Y_test1 = wpdist(X, 'hamming')\n _assert_within_tol(Y_test1, Y_right, eps)\n\n def test_pdist_hamming_random_nonC(self):\n eps = 1e-07\n X = eo['pdist-boolean-inp']\n Y_right = eo['pdist-hamming']\n Y_test2 = wpdist(X, 'test_hamming')\n _assert_within_tol(Y_test2, Y_right, eps)\n\n def test_pdist_dhamming_random(self):\n eps = 1e-07\n X = np.float64(eo['pdist-boolean-inp'])\n Y_right = eo['pdist-hamming']\n Y_test1 = wpdist(X, 'hamming')\n _assert_within_tol(Y_test1, Y_right, eps)\n\n def test_pdist_dhamming_random_float32(self):\n eps = 1e-07\n X = np.float32(eo['pdist-boolean-inp'])\n Y_right = eo['pdist-hamming']\n Y_test1 = wpdist(X, 'hamming')\n _assert_within_tol(Y_test1, Y_right, eps)\n\n def test_pdist_dhamming_random_nonC(self):\n eps = 1e-07\n X = np.float64(eo['pdist-boolean-inp'])\n Y_right = eo['pdist-hamming']\n Y_test2 = wpdist(X, 'test_hamming')\n _assert_within_tol(Y_test2, Y_right, eps)\n\n def test_pdist_jaccard_random(self):\n eps = 1e-08\n X = eo['pdist-boolean-inp']\n Y_right = eo['pdist-jaccard']\n Y_test1 = wpdist(X, 'jaccard')\n _assert_within_tol(Y_test1, Y_right, eps)\n\n def test_pdist_jaccard_random_float32(self):\n eps = 1e-08\n X = np.float32(eo['pdist-boolean-inp'])\n Y_right = eo['pdist-jaccard']\n Y_test1 = wpdist(X, 'jaccard')\n _assert_within_tol(Y_test1, Y_right, eps)\n\n def test_pdist_jaccard_random_nonC(self):\n eps = 1e-08\n X = eo['pdist-boolean-inp']\n Y_right = eo['pdist-jaccard']\n Y_test2 = wpdist(X, 'test_jaccard')\n _assert_within_tol(Y_test2, Y_right, eps)\n\n def test_pdist_djaccard_random(self):\n eps = 1e-08\n X = np.float64(eo['pdist-boolean-inp'])\n Y_right = eo['pdist-jaccard']\n Y_test1 = wpdist(X, 'jaccard')\n _assert_within_tol(Y_test1, Y_right, eps)\n\n def test_pdist_djaccard_random_float32(self):\n eps = 1e-08\n X = np.float32(eo['pdist-boolean-inp'])\n Y_right = eo['pdist-jaccard']\n Y_test1 = wpdist(X, 'jaccard')\n _assert_within_tol(Y_test1, Y_right, eps)\n\n def test_pdist_djaccard_allzeros(self):\n eps = 1e-08\n Y = pdist(np.zeros((5, 3)), 'jaccard')\n _assert_within_tol(np.zeros(10), Y, eps)\n\n def test_pdist_djaccard_random_nonC(self):\n eps = 1e-08\n X = np.float64(eo['pdist-boolean-inp'])\n Y_right = eo['pdist-jaccard']\n Y_test2 = wpdist(X, 'test_jaccard')\n _assert_within_tol(Y_test2, Y_right, eps)\n\n def test_pdist_jensenshannon_random(self):\n eps = 1e-08\n X = eo['pdist-double-inp']\n Y_right = eo['pdist-jensenshannon']\n Y_test1 = pdist(X, 'jensenshannon')\n _assert_within_tol(Y_test1, Y_right, eps)\n\n def test_pdist_jensenshannon_random_float32(self):\n eps = 1e-07\n X = np.float32(eo['pdist-double-inp'])\n Y_right = eo['pdist-jensenshannon']\n Y_test1 = pdist(X, 'jensenshannon')\n _assert_within_tol(Y_test1, Y_right, eps, verbose > 2)\n\n def test_pdist_jensenshannon_random_nonC(self):\n eps = 1e-08\n X = eo['pdist-double-inp']\n Y_right = eo['pdist-jensenshannon']\n Y_test2 = pdist(X, 'test_jensenshannon')\n _assert_within_tol(Y_test2, Y_right, eps)\n\n def test_pdist_jensenshannon_iris(self):\n if _is_32bit():\n # Test failing on 32-bit Linux on Azure otherwise, see gh-12810\n eps = 1.5e-10\n else:\n eps = 1e-12\n\n X = eo['iris']\n Y_right = eo['pdist-jensenshannon-iris']\n Y_test1 = pdist(X, 'jensenshannon')\n _assert_within_tol(Y_test1, Y_right, eps)\n\n def test_pdist_jensenshannon_iris_float32(self):\n eps = 1e-06\n X = np.float32(eo['iris'])\n Y_right = eo['pdist-jensenshannon-iris']\n Y_test1 = pdist(X, 'jensenshannon')\n _assert_within_tol(Y_test1, Y_right, eps, verbose > 2)\n\n def test_pdist_jensenshannon_iris_nonC(self):\n eps = 5e-12\n X = eo['iris']\n Y_right = eo['pdist-jensenshannon-iris']\n Y_test2 = pdist(X, 'test_jensenshannon')\n _assert_within_tol(Y_test2, Y_right, eps)\n\n def test_pdist_djaccard_allzeros_nonC(self):\n eps = 1e-08\n Y = pdist(np.zeros((5, 3)), 'test_jaccard')\n _assert_within_tol(np.zeros(10), Y, eps)\n\n def test_pdist_chebyshev_random(self):\n eps = 1e-08\n X = eo['pdist-double-inp']\n Y_right = eo['pdist-chebyshev']\n Y_test1 = pdist(X, 'chebyshev')\n _assert_within_tol(Y_test1, Y_right, eps)\n\n def test_pdist_chebyshev_random_float32(self):\n eps = 1e-07\n X = np.float32(eo['pdist-double-inp'])\n Y_right = eo['pdist-chebyshev']\n Y_test1 = pdist(X, 'chebyshev')\n _assert_within_tol(Y_test1, Y_right, eps, verbose > 2)\n\n def test_pdist_chebyshev_random_nonC(self):\n eps = 1e-08\n X = eo['pdist-double-inp']\n Y_right = eo['pdist-chebyshev']\n Y_test2 = pdist(X, 'test_chebyshev')\n _assert_within_tol(Y_test2, Y_right, eps)\n\n def test_pdist_chebyshev_iris(self):\n eps = 1e-15\n X = eo['iris']\n Y_right = eo['pdist-chebyshev-iris']\n Y_test1 = pdist(X, 'chebyshev')\n _assert_within_tol(Y_test1, Y_right, eps)\n\n def test_pdist_chebyshev_iris_float32(self):\n eps = 1e-06\n X = np.float32(eo['iris'])\n Y_right = eo['pdist-chebyshev-iris']\n Y_test1 = pdist(X, 'chebyshev')\n _assert_within_tol(Y_test1, Y_right, eps, verbose > 2)\n\n def test_pdist_chebyshev_iris_nonC(self):\n eps = 1e-15\n X = eo['iris']\n Y_right = eo['pdist-chebyshev-iris']\n Y_test2 = pdist(X, 'test_chebyshev')\n _assert_within_tol(Y_test2, Y_right, eps)\n\n def test_pdist_matching_mtica1(self):\n # Test matching(*,*) with mtica example #1 (nums).\n m = wmatching(np.array([1, 0, 1, 1, 0]),\n np.array([1, 1, 0, 1, 1]))\n m2 = wmatching(np.array([1, 0, 1, 1, 0], dtype=bool),\n np.array([1, 1, 0, 1, 1], dtype=bool))\n assert_allclose(m, 0.6, rtol=0, atol=1e-10)\n assert_allclose(m2, 0.6, rtol=0, atol=1e-10)\n\n def test_pdist_matching_mtica2(self):\n # Test matching(*,*) with mtica example #2.\n m = wmatching(np.array([1, 0, 1]),\n np.array([1, 1, 0]))\n m2 = wmatching(np.array([1, 0, 1], dtype=bool),\n np.array([1, 1, 0], dtype=bool))\n assert_allclose(m, 2 / 3, rtol=0, atol=1e-10)\n assert_allclose(m2, 2 / 3, rtol=0, atol=1e-10)\n\n def test_pdist_jaccard_mtica1(self):\n m = wjaccard(np.array([1, 0, 1, 1, 0]),\n np.array([1, 1, 0, 1, 1]))\n m2 = wjaccard(np.array([1, 0, 1, 1, 0], dtype=bool),\n np.array([1, 1, 0, 1, 1], dtype=bool))\n assert_allclose(m, 0.6, rtol=0, atol=1e-10)\n assert_allclose(m2, 0.6, rtol=0, atol=1e-10)\n\n def test_pdist_jaccard_mtica2(self):\n m = wjaccard(np.array([1, 0, 1]),\n np.array([1, 1, 0]))\n m2 = wjaccard(np.array([1, 0, 1], dtype=bool),\n np.array([1, 1, 0], dtype=bool))\n assert_allclose(m, 2 / 3, rtol=0, atol=1e-10)\n assert_allclose(m2, 2 / 3, rtol=0, atol=1e-10)\n\n def test_pdist_yule_mtica1(self):\n m = wyule(np.array([1, 0, 1, 1, 0]),\n np.array([1, 1, 0, 1, 1]))\n m2 = wyule(np.array([1, 0, 1, 1, 0], dtype=bool),\n np.array([1, 1, 0, 1, 1], dtype=bool))\n if verbose > 2:\n print(m)\n assert_allclose(m, 2, rtol=0, atol=1e-10)\n assert_allclose(m2, 2, rtol=0, atol=1e-10)\n\n def test_pdist_yule_mtica2(self):\n m = wyule(np.array([1, 0, 1]),\n np.array([1, 1, 0]))\n m2 = wyule(np.array([1, 0, 1], dtype=bool),\n np.array([1, 1, 0], dtype=bool))\n if verbose > 2:\n print(m)\n assert_allclose(m, 2, rtol=0, atol=1e-10)\n assert_allclose(m2, 2, rtol=0, atol=1e-10)\n\n def test_pdist_dice_mtica1(self):\n m = wdice(np.array([1, 0, 1, 1, 0]),\n np.array([1, 1, 0, 1, 1]))\n m2 = wdice(np.array([1, 0, 1, 1, 0], dtype=bool),\n np.array([1, 1, 0, 1, 1], dtype=bool))\n if verbose > 2:\n print(m)\n assert_allclose(m, 3 / 7, rtol=0, atol=1e-10)\n assert_allclose(m2, 3 / 7, rtol=0, atol=1e-10)\n\n def test_pdist_dice_mtica2(self):\n m = wdice(np.array([1, 0, 1]),\n np.array([1, 1, 0]))\n m2 = wdice(np.array([1, 0, 1], dtype=bool),\n np.array([1, 1, 0], dtype=bool))\n if verbose > 2:\n print(m)\n assert_allclose(m, 0.5, rtol=0, atol=1e-10)\n assert_allclose(m2, 0.5, rtol=0, atol=1e-10)\n\n def test_pdist_sokalsneath_mtica1(self):\n m = sokalsneath(np.array([1, 0, 1, 1, 0]),\n np.array([1, 1, 0, 1, 1]))\n m2 = sokalsneath(np.array([1, 0, 1, 1, 0], dtype=bool),\n np.array([1, 1, 0, 1, 1], dtype=bool))\n if verbose > 2:\n print(m)\n assert_allclose(m, 3 / 4, rtol=0, atol=1e-10)\n assert_allclose(m2, 3 / 4, rtol=0, atol=1e-10)\n\n def test_pdist_sokalsneath_mtica2(self):\n m = wsokalsneath(np.array([1, 0, 1]),\n np.array([1, 1, 0]))\n m2 = wsokalsneath(np.array([1, 0, 1], dtype=bool),\n np.array([1, 1, 0], dtype=bool))\n if verbose > 2:\n print(m)\n assert_allclose(m, 4 / 5, rtol=0, atol=1e-10)\n assert_allclose(m2, 4 / 5, rtol=0, atol=1e-10)\n\n def test_pdist_rogerstanimoto_mtica1(self):\n m = wrogerstanimoto(np.array([1, 0, 1, 1, 0]),\n np.array([1, 1, 0, 1, 1]))\n m2 = wrogerstanimoto(np.array([1, 0, 1, 1, 0], dtype=bool),\n np.array([1, 1, 0, 1, 1], dtype=bool))\n if verbose > 2:\n print(m)\n assert_allclose(m, 3 / 4, rtol=0, atol=1e-10)\n assert_allclose(m2, 3 / 4, rtol=0, atol=1e-10)\n\n def test_pdist_rogerstanimoto_mtica2(self):\n m = wrogerstanimoto(np.array([1, 0, 1]),\n np.array([1, 1, 0]))\n m2 = wrogerstanimoto(np.array([1, 0, 1], dtype=bool),\n np.array([1, 1, 0], dtype=bool))\n if verbose > 2:\n print(m)\n assert_allclose(m, 4 / 5, rtol=0, atol=1e-10)\n assert_allclose(m2, 4 / 5, rtol=0, atol=1e-10)\n\n def test_pdist_russellrao_mtica1(self):\n m = wrussellrao(np.array([1, 0, 1, 1, 0]),\n np.array([1, 1, 0, 1, 1]))\n m2 = wrussellrao(np.array([1, 0, 1, 1, 0], dtype=bool),\n np.array([1, 1, 0, 1, 1], dtype=bool))\n if verbose > 2:\n print(m)\n assert_allclose(m, 3 / 5, rtol=0, atol=1e-10)\n assert_allclose(m2, 3 / 5, rtol=0, atol=1e-10)\n\n def test_pdist_russellrao_mtica2(self):\n m = wrussellrao(np.array([1, 0, 1]),\n np.array([1, 1, 0]))\n m2 = wrussellrao(np.array([1, 0, 1], dtype=bool),\n np.array([1, 1, 0], dtype=bool))\n if verbose > 2:\n print(m)\n assert_allclose(m, 2 / 3, rtol=0, atol=1e-10)\n assert_allclose(m2, 2 / 3, rtol=0, atol=1e-10)\n\n @pytest.mark.slow\n def test_pdist_canberra_match(self):\n D = eo['iris']\n if verbose > 2:\n print(D.shape, D.dtype)\n eps = 1e-10\n y1 = wpdist_no_const(D, \"canberra\")\n y2 = wpdist_no_const(D, \"test_canberra\")\n _assert_within_tol(y1, y2, eps, verbose > 2)\n\n def test_pdist_canberra_ticket_711(self):\n # Test pdist(X, 'canberra') to see if Canberra gives the right result\n # as reported on gh-1238.\n eps = 1e-8\n pdist_y = wpdist_no_const(([3.3], [3.4]), \"canberra\")\n right_y = 0.01492537\n _assert_within_tol(pdist_y, right_y, eps, verbose > 2)\n\n def test_pdist_custom_notdouble(self):\n # tests that when using a custom metric the data type is not altered\n class myclass(object):\n pass\n\n def _my_metric(x, y):\n if not isinstance(x[0], myclass) or not isinstance(y[0], myclass):\n raise ValueError(\"Type has been changed\")\n return 1.123\n data = np.array([[myclass()], [myclass()]], dtype=object)\n pdist_y = pdist(data, metric=_my_metric)\n right_y = 1.123\n assert_equal(pdist_y, right_y, verbose=verbose > 2)\n\n def _check_calling_conventions(self, X, metric, eps=1e-07, **kwargs):\n # helper function for test_pdist_calling_conventions\n try:\n y1 = pdist(X, metric=metric, **kwargs)\n y2 = pdist(X, metric=eval(metric), **kwargs)\n y3 = pdist(X, metric=\"test_\" + metric, **kwargs)\n except Exception as e:\n e_cls = e.__class__\n if verbose > 2:\n print(e_cls.__name__)\n print(e)\n assert_raises(e_cls, pdist, X, metric=metric, **kwargs)\n assert_raises(e_cls, pdist, X, metric=eval(metric), **kwargs)\n assert_raises(e_cls, pdist, X, metric=\"test_\" + metric, **kwargs)\n else:\n _assert_within_tol(y1, y2, rtol=eps, verbose_=verbose > 2)\n _assert_within_tol(y1, y3, rtol=eps, verbose_=verbose > 2)\n\n def test_pdist_calling_conventions(self):\n # Ensures that specifying the metric with a str or scipy function\n # gives the same behaviour (i.e. same result or same exception).\n # NOTE: The correctness should be checked within each metric tests.\n # NOTE: Extra args should be checked with a dedicated test\n for eo_name in self.rnd_eo_names:\n # subsampling input data to speed-up tests\n # NOTE: num samples needs to be > than dimensions for mahalanobis\n X = eo[eo_name][::5, ::2]\n for metric in _METRICS_NAMES:\n if metric == 'wminkowski':\n continue\n if verbose > 2:\n print(\"testing: \", metric, \" with: \", eo_name)\n if metric in {'dice', 'yule', 'kulsinski', 'matching',\n 'rogerstanimoto', 'russellrao', 'sokalmichener',\n 'sokalsneath'} and 'bool' not in eo_name:\n # python version permits non-bools e.g. for fuzzy logic\n continue\n self._check_calling_conventions(X, metric)\n\n # Testing built-in metrics with extra args\n if metric == \"seuclidean\":\n V = np.var(X.astype(np.double), axis=0, ddof=1)\n self._check_calling_conventions(X, metric, V=V)\n elif metric == \"mahalanobis\":\n V = np.atleast_2d(np.cov(X.astype(np.double).T))\n VI = np.array(np.linalg.inv(V).T)\n self._check_calling_conventions(X, metric, VI=VI)\n\n def test_pdist_dtype_equivalence(self):\n # Tests that the result is not affected by type up-casting\n eps = 1e-07\n tests = [(eo['random-bool-data'], self.valid_upcasts['bool']),\n (eo['random-uint-data'], self.valid_upcasts['uint']),\n (eo['random-int-data'], self.valid_upcasts['int']),\n (eo['random-float32-data'], self.valid_upcasts['float32'])]\n for metric in _METRICS_NAMES:\n for test in tests:\n X1 = test[0][::5, ::2]\n try:\n y1 = pdist(X1, metric=metric)\n except Exception as e:\n e_cls = e.__class__\n if verbose > 2:\n print(e_cls.__name__)\n print(e)\n for new_type in test[1]:\n X2 = new_type(X1)\n assert_raises(e_cls, pdist, X2, metric=metric)\n else:\n for new_type in test[1]:\n y2 = pdist(new_type(X1), metric=metric)\n _assert_within_tol(y1, y2, eps, verbose > 2)\n\n def test_pdist_out(self):\n # Test that out parameter works properly\n eps = 1e-07\n X = eo['random-float32-data'][::5, ::2]\n out_size = int((X.shape[0] * (X.shape[0] - 1)) / 2)\n with suppress_warnings() as sup:\n sup.filter(DeprecationWarning, \"'wminkowski' metric is deprecated\")\n for metric in _METRICS_NAMES:\n kwargs = dict()\n if metric in ['minkowski', 'wminkowski']:\n kwargs['p'] = 1.23\n if metric == 'wminkowski':\n kwargs['w'] = 1.0 / X.std(axis=0)\n out1 = np.empty(out_size, dtype=np.double)\n Y_right = pdist(X, metric, **kwargs)\n Y_test1 = pdist(X, metric, out=out1, **kwargs)\n # test that output is numerically equivalent\n _assert_within_tol(Y_test1, Y_right, eps)\n # test that Y_test1 and out1 are the same object\n assert_(Y_test1 is out1)\n # test for incorrect shape\n out2 = np.empty(out_size + 3, dtype=np.double)\n assert_raises(ValueError, pdist, X, metric, out=out2, **kwargs)\n # test for (C-)contiguous output\n out3 = np.empty(2 * out_size, dtype=np.double)[::2]\n assert_raises(ValueError, pdist, X, metric, out=out3, **kwargs)\n # test for incorrect dtype\n out5 = np.empty(out_size, dtype=np.int64)\n assert_raises(ValueError, pdist, X, metric, out=out5, **kwargs)\n\n def test_striding(self):\n # test that striding is handled correct with calls to\n # _copy_array_if_base_present\n eps = 1e-07\n X = eo['random-float32-data'][::5, ::2]\n X_copy = X.copy()\n\n # confirm contiguity\n assert_(not X.flags.c_contiguous)\n assert_(X_copy.flags.c_contiguous)\n\n with suppress_warnings() as sup:\n sup.filter(DeprecationWarning,\n message=\"'wminkowski' metric is deprecated\")\n for metric in _METRICS_NAMES:\n kwargs = dict()\n if metric in ['minkowski', 'wminkowski']:\n kwargs['p'] = 1.23\n if metric == 'wminkowski':\n kwargs['w'] = 1.0 / X.std(axis=0)\n Y1 = pdist(X, metric, **kwargs)\n Y2 = pdist(X_copy, metric, **kwargs)\n # test that output is numerically equivalent\n _assert_within_tol(Y1, Y2, eps, verbose > 2)\n\nclass TestSomeDistanceFunctions(object):\n\n def setup_method(self):\n # 1D arrays\n x = np.array([1.0, 2.0, 3.0])\n y = np.array([1.0, 1.0, 5.0])\n # 3x1 arrays\n x31 = x[:, np.newaxis]\n y31 = y[:, np.newaxis]\n # 1x3 arrays\n x13 = x31.T\n y13 = y31.T\n\n self.cases = [(x, y), (x31, y31), (x13, y13)]\n\n def test_minkowski(self):\n for x, y in self.cases:\n dist1 = wminkowski(x, y, p=1)\n assert_almost_equal(dist1, 3.0)\n dist1p5 = wminkowski(x, y, p=1.5)\n assert_almost_equal(dist1p5, (1.0 + 2.0**1.5)**(2. / 3))\n wminkowski(x, y, p=2)\n\n # Check that casting input to minimum scalar type doesn't affect result\n # (issue #10262). This could be extended to more test inputs with\n # np.min_scalar_type(np.max(input_matrix)).\n a = np.array([352, 916])\n b = np.array([350, 660])\n assert_equal(minkowski(a, b),\n minkowski(a.astype('uint16'), b.astype('uint16')))\n\n def test_old_wminkowski(self):\n with suppress_warnings() as wrn:\n wrn.filter(DeprecationWarning,\n message=\".*wminkowski is deprecated\")\n w = np.array([1.0, 2.0, 0.5])\n for x, y in self.cases:\n dist1 = old_wminkowski(x, y, p=1, w=w)\n assert_almost_equal(dist1, 3.0)\n dist1p5 = old_wminkowski(x, y, p=1.5, w=w)\n assert_almost_equal(dist1p5, (2.0**1.5+1.0)**(2./3))\n dist2 = old_wminkowski(x, y, p=2, w=w)\n assert_almost_equal(dist2, np.sqrt(5))\n\n # test weights Issue #7893\n arr = np.arange(4)\n w = np.full_like(arr, 4)\n assert_almost_equal(old_wminkowski(arr, arr + 1, p=2, w=w), 8.0)\n assert_almost_equal(wminkowski(arr, arr + 1, p=2, w=w), 4.0)\n\n def test_euclidean(self):\n for x, y in self.cases:\n dist = weuclidean(x, y)\n assert_almost_equal(dist, np.sqrt(5))\n\n def test_sqeuclidean(self):\n for x, y in self.cases:\n dist = wsqeuclidean(x, y)\n assert_almost_equal(dist, 5.0)\n\n def test_cosine(self):\n for x, y in self.cases:\n dist = wcosine(x, y)\n assert_almost_equal(dist, 1.0 - 18.0 / (np.sqrt(14) * np.sqrt(27)))\n\n def test_correlation(self):\n xm = np.array([-1.0, 0, 1.0])\n ym = np.array([-4.0 / 3, -4.0 / 3, 5.0 - 7.0 / 3])\n for x, y in self.cases:\n dist = wcorrelation(x, y)\n assert_almost_equal(dist, 1.0 - np.dot(xm, ym) / (norm(xm) * norm(ym)))\n\n def test_correlation_positive(self):\n # Regression test for gh-12320 (negative return value due to rounding\n x = np.array([0., 0., 0., 0., 0., 0., -2., 0., 0., 0., -2., -2., -2.,\n 0., -2., 0., -2., 0., 0., -1., -2., 0., 1., 0., 0., -2.,\n 0., 0., -2., 0., -2., -2., -2., -2., -2., -2., 0.])\n y = np.array([1., 1., 1., 1., 1., 1., -1., 1., 1., 1., -1., -1., -1.,\n 1., -1., 1., -1., 1., 1., 0., -1., 1., 2., 1., 1., -1.,\n 1., 1., -1., 1., -1., -1., -1., -1., -1., -1., 1.])\n dist = correlation(x, y)\n assert 0 <= dist <= 10 * np.finfo(np.float64).eps\n\n def test_mahalanobis(self):\n x = np.array([1.0, 2.0, 3.0])\n y = np.array([1.0, 1.0, 5.0])\n vi = np.array([[2.0, 1.0, 0.0], [1.0, 2.0, 1.0], [0.0, 1.0, 2.0]])\n for x, y in self.cases:\n dist = mahalanobis(x, y, vi)\n assert_almost_equal(dist, np.sqrt(6.0))\n\n\nclass TestSquareForm(object):\n checked_dtypes = [np.float64, np.float32, np.int32, np.int8, bool]\n\n def test_squareform_matrix(self):\n for dtype in self.checked_dtypes:\n self.check_squareform_matrix(dtype)\n\n def test_squareform_vector(self):\n for dtype in self.checked_dtypes:\n self.check_squareform_vector(dtype)\n\n def check_squareform_matrix(self, dtype):\n A = np.zeros((0, 0), dtype=dtype)\n rA = squareform(A)\n assert_equal(rA.shape, (0,))\n assert_equal(rA.dtype, dtype)\n\n A = np.zeros((1, 1), dtype=dtype)\n rA = squareform(A)\n assert_equal(rA.shape, (0,))\n assert_equal(rA.dtype, dtype)\n\n A = np.array([[0, 4.2], [4.2, 0]], dtype=dtype)\n rA = squareform(A)\n assert_equal(rA.shape, (1,))\n assert_equal(rA.dtype, dtype)\n assert_array_equal(rA, np.array([4.2], dtype=dtype))\n\n def check_squareform_vector(self, dtype):\n v = np.zeros((0,), dtype=dtype)\n rv = squareform(v)\n assert_equal(rv.shape, (1, 1))\n assert_equal(rv.dtype, dtype)\n assert_array_equal(rv, [[0]])\n\n v = np.array([8.3], dtype=dtype)\n rv = squareform(v)\n assert_equal(rv.shape, (2, 2))\n assert_equal(rv.dtype, dtype)\n assert_array_equal(rv, np.array([[0, 8.3], [8.3, 0]], dtype=dtype))\n\n def test_squareform_multi_matrix(self):\n for n in range(2, 5):\n self.check_squareform_multi_matrix(n)\n\n def check_squareform_multi_matrix(self, n):\n X = np.random.rand(n, 4)\n Y = wpdist_no_const(X)\n assert_equal(len(Y.shape), 1)\n A = squareform(Y)\n Yr = squareform(A)\n s = A.shape\n k = 0\n if verbose >= 3:\n print(A.shape, Y.shape, Yr.shape)\n assert_equal(len(s), 2)\n assert_equal(len(Yr.shape), 1)\n assert_equal(s[0], s[1])\n for i in range(0, s[0]):\n for j in range(i + 1, s[1]):\n if i != j:\n assert_equal(A[i, j], Y[k])\n k += 1\n else:\n assert_equal(A[i, j], 0)\n\n\nclass TestNumObsY(object):\n\n def test_num_obs_y_multi_matrix(self):\n for n in range(2, 10):\n X = np.random.rand(n, 4)\n Y = wpdist_no_const(X)\n assert_equal(num_obs_y(Y), n)\n\n def test_num_obs_y_1(self):\n # Tests num_obs_y(y) on a condensed distance matrix over 1\n # observations. Expecting exception.\n assert_raises(ValueError, self.check_y, 1)\n\n def test_num_obs_y_2(self):\n # Tests num_obs_y(y) on a condensed distance matrix over 2\n # observations.\n assert_(self.check_y(2))\n\n def test_num_obs_y_3(self):\n assert_(self.check_y(3))\n\n def test_num_obs_y_4(self):\n assert_(self.check_y(4))\n\n def test_num_obs_y_5_10(self):\n for i in range(5, 16):\n self.minit(i)\n\n def test_num_obs_y_2_100(self):\n # Tests num_obs_y(y) on 100 improper condensed distance matrices.\n # Expecting exception.\n a = set([])\n for n in range(2, 16):\n a.add(n * (n - 1) / 2)\n for i in range(5, 105):\n if i not in a:\n assert_raises(ValueError, self.bad_y, i)\n\n def minit(self, n):\n assert_(self.check_y(n))\n\n def bad_y(self, n):\n y = np.random.rand(n)\n return num_obs_y(y)\n\n def check_y(self, n):\n return num_obs_y(self.make_y(n)) == n\n\n def make_y(self, n):\n return np.random.rand((n * (n - 1)) // 2)\n\n\nclass TestNumObsDM(object):\n\n def test_num_obs_dm_multi_matrix(self):\n for n in range(1, 10):\n X = np.random.rand(n, 4)\n Y = wpdist_no_const(X)\n A = squareform(Y)\n if verbose >= 3:\n print(A.shape, Y.shape)\n assert_equal(num_obs_dm(A), n)\n\n def test_num_obs_dm_0(self):\n # Tests num_obs_dm(D) on a 0x0 distance matrix. Expecting exception.\n assert_(self.check_D(0))\n\n def test_num_obs_dm_1(self):\n # Tests num_obs_dm(D) on a 1x1 distance matrix.\n assert_(self.check_D(1))\n\n def test_num_obs_dm_2(self):\n assert_(self.check_D(2))\n\n def test_num_obs_dm_3(self):\n assert_(self.check_D(2))\n\n def test_num_obs_dm_4(self):\n assert_(self.check_D(4))\n\n def check_D(self, n):\n return num_obs_dm(self.make_D(n)) == n\n\n def make_D(self, n):\n return np.random.rand(n, n)\n\n\ndef is_valid_dm_throw(D):\n return is_valid_dm(D, throw=True)\n\n\nclass TestIsValidDM(object):\n\n def test_is_valid_dm_improper_shape_1D_E(self):\n D = np.zeros((5,), dtype=np.double)\n assert_raises(ValueError, is_valid_dm_throw, (D))\n\n def test_is_valid_dm_improper_shape_1D_F(self):\n D = np.zeros((5,), dtype=np.double)\n assert_equal(is_valid_dm(D), False)\n\n def test_is_valid_dm_improper_shape_3D_E(self):\n D = np.zeros((3, 3, 3), dtype=np.double)\n assert_raises(ValueError, is_valid_dm_throw, (D))\n\n def test_is_valid_dm_improper_shape_3D_F(self):\n D = np.zeros((3, 3, 3), dtype=np.double)\n assert_equal(is_valid_dm(D), False)\n\n def test_is_valid_dm_nonzero_diagonal_E(self):\n y = np.random.rand(10)\n D = squareform(y)\n for i in range(0, 5):\n D[i, i] = 2.0\n assert_raises(ValueError, is_valid_dm_throw, (D))\n\n def test_is_valid_dm_nonzero_diagonal_F(self):\n y = np.random.rand(10)\n D = squareform(y)\n for i in range(0, 5):\n D[i, i] = 2.0\n assert_equal(is_valid_dm(D), False)\n\n def test_is_valid_dm_asymmetric_E(self):\n y = np.random.rand(10)\n D = squareform(y)\n D[1, 3] = D[3, 1] + 1\n assert_raises(ValueError, is_valid_dm_throw, (D))\n\n def test_is_valid_dm_asymmetric_F(self):\n y = np.random.rand(10)\n D = squareform(y)\n D[1, 3] = D[3, 1] + 1\n assert_equal(is_valid_dm(D), False)\n\n def test_is_valid_dm_correct_1_by_1(self):\n D = np.zeros((1, 1), dtype=np.double)\n assert_equal(is_valid_dm(D), True)\n\n def test_is_valid_dm_correct_2_by_2(self):\n y = np.random.rand(1)\n D = squareform(y)\n assert_equal(is_valid_dm(D), True)\n\n def test_is_valid_dm_correct_3_by_3(self):\n y = np.random.rand(3)\n D = squareform(y)\n assert_equal(is_valid_dm(D), True)\n\n def test_is_valid_dm_correct_4_by_4(self):\n y = np.random.rand(6)\n D = squareform(y)\n assert_equal(is_valid_dm(D), True)\n\n def test_is_valid_dm_correct_5_by_5(self):\n y = np.random.rand(10)\n D = squareform(y)\n assert_equal(is_valid_dm(D), True)\n\n\ndef is_valid_y_throw(y):\n return is_valid_y(y, throw=True)\n\n\nclass TestIsValidY(object):\n # If test case name ends on \"_E\" then an exception is expected for the\n # given input, if it ends in \"_F\" then False is expected for the is_valid_y\n # check. Otherwise the input is expected to be valid.\n\n def test_is_valid_y_improper_shape_2D_E(self):\n y = np.zeros((3, 3,), dtype=np.double)\n assert_raises(ValueError, is_valid_y_throw, (y))\n\n def test_is_valid_y_improper_shape_2D_F(self):\n y = np.zeros((3, 3,), dtype=np.double)\n assert_equal(is_valid_y(y), False)\n\n def test_is_valid_y_improper_shape_3D_E(self):\n y = np.zeros((3, 3, 3), dtype=np.double)\n assert_raises(ValueError, is_valid_y_throw, (y))\n\n def test_is_valid_y_improper_shape_3D_F(self):\n y = np.zeros((3, 3, 3), dtype=np.double)\n assert_equal(is_valid_y(y), False)\n\n def test_is_valid_y_correct_2_by_2(self):\n y = self.correct_n_by_n(2)\n assert_equal(is_valid_y(y), True)\n\n def test_is_valid_y_correct_3_by_3(self):\n y = self.correct_n_by_n(3)\n assert_equal(is_valid_y(y), True)\n\n def test_is_valid_y_correct_4_by_4(self):\n y = self.correct_n_by_n(4)\n assert_equal(is_valid_y(y), True)\n\n def test_is_valid_y_correct_5_by_5(self):\n y = self.correct_n_by_n(5)\n assert_equal(is_valid_y(y), True)\n\n def test_is_valid_y_2_100(self):\n a = set([])\n for n in range(2, 16):\n a.add(n * (n - 1) / 2)\n for i in range(5, 105):\n if i not in a:\n assert_raises(ValueError, self.bad_y, i)\n\n def bad_y(self, n):\n y = np.random.rand(n)\n return is_valid_y(y, throw=True)\n\n def correct_n_by_n(self, n):\n y = np.random.rand((n * (n - 1)) // 2)\n return y\n\n\ndef test_bad_p():\n # Raise ValueError if p < 1.\n p = 0.5\n assert_raises(ValueError, wminkowski, [1, 2], [3, 4], p)\n assert_raises(ValueError, wminkowski, [1, 2], [3, 4], p, [1, 1])\n\n\ndef test_sokalsneath_all_false():\n # Regression test for ticket #876\n assert_raises(ValueError, sokalsneath, [False, False, False], [False, False, False])\n\n\ndef test_canberra():\n # Regression test for ticket #1430.\n assert_equal(wcanberra([1, 2, 3], [2, 4, 6]), 1)\n assert_equal(wcanberra([1, 1, 0, 0], [1, 0, 1, 0]), 2)\n\n\ndef test_braycurtis():\n # Regression test for ticket #1430.\n assert_almost_equal(wbraycurtis([1, 2, 3], [2, 4, 6]), 1. / 3, decimal=15)\n assert_almost_equal(wbraycurtis([1, 1, 0, 0], [1, 0, 1, 0]), 0.5, decimal=15)\n\n\ndef test_euclideans():\n # Regression test for ticket #1328.\n x1 = np.array([1, 1, 1])\n x2 = np.array([0, 0, 0])\n\n # Basic test of the calculation.\n assert_almost_equal(wsqeuclidean(x1, x2), 3.0, decimal=14)\n assert_almost_equal(weuclidean(x1, x2), np.sqrt(3), decimal=14)\n\n # Check flattening for (1, N) or (N, 1) inputs\n assert_almost_equal(weuclidean(x1[np.newaxis, :], x2[np.newaxis, :]),\n np.sqrt(3), decimal=14)\n assert_almost_equal(wsqeuclidean(x1[np.newaxis, :], x2[np.newaxis, :]),\n 3.0, decimal=14)\n assert_almost_equal(wsqeuclidean(x1[:, np.newaxis], x2[:, np.newaxis]),\n 3.0, decimal=14)\n\n # Distance metrics only defined for vectors (= 1-D)\n x = np.arange(4).reshape(2, 2)\n assert_raises(ValueError, weuclidean, x, x)\n assert_raises(ValueError, wsqeuclidean, x, x)\n\n # Another check, with random data.\n rs = np.random.RandomState(1234567890)\n x = rs.rand(10)\n y = rs.rand(10)\n d1 = weuclidean(x, y)\n d2 = wsqeuclidean(x, y)\n assert_almost_equal(d1**2, d2, decimal=14)\n\n\ndef test_hamming_unequal_length():\n # Regression test for gh-4290.\n x = [0, 0, 1]\n y = [1, 0, 1, 0]\n # Used to give an AttributeError from ndarray.mean called on bool\n assert_raises(ValueError, whamming, x, y)\n\n\ndef test_hamming_string_array():\n # https://github.com/scikit-learn/scikit-learn/issues/4014\n a = np.array(['eggs', 'spam', 'spam', 'eggs', 'spam', 'spam', 'spam',\n 'spam', 'spam', 'spam', 'spam', 'eggs', 'eggs', 'spam',\n 'eggs', 'eggs', 'eggs', 'eggs', 'eggs', 'spam'],\n dtype='|S4')\n b = np.array(['eggs', 'spam', 'spam', 'eggs', 'eggs', 'spam', 'spam',\n 'spam', 'spam', 'eggs', 'spam', 'eggs', 'spam', 'eggs',\n 'spam', 'spam', 'eggs', 'spam', 'spam', 'eggs'],\n dtype='|S4')\n desired = 0.45\n assert_allclose(whamming(a, b), desired)\n\n\ndef test_minkowski_w():\n # Regression test for gh-8142.\n arr_in = np.array([[83.33333333, 100., 83.33333333, 100., 36.,\n 60., 90., 150., 24., 48.],\n [83.33333333, 100., 83.33333333, 100., 36.,\n 60., 90., 150., 24., 48.]])\n p0 = pdist(arr_in, metric='minkowski', p=1, w=None)\n c0 = cdist(arr_in, arr_in, metric='minkowski', p=1, w=None)\n p1 = pdist(arr_in, metric='minkowski', p=1)\n c1 = cdist(arr_in, arr_in, metric='minkowski', p=1)\n\n assert_allclose(p0, p1, rtol=1e-15)\n assert_allclose(c0, c1, rtol=1e-15)\n\n\ndef test_sqeuclidean_dtypes():\n # Assert that sqeuclidean returns the right types of values.\n # Integer types should be converted to floating for stability.\n # Floating point types should be the same as the input.\n x = [1, 2, 3]\n y = [4, 5, 6]\n\n for dtype in [np.int8, np.int16, np.int32, np.int64]:\n d = wsqeuclidean(np.asarray(x, dtype=dtype), np.asarray(y, dtype=dtype))\n assert_(np.issubdtype(d.dtype, np.floating))\n\n for dtype in [np.uint8, np.uint16, np.uint32, np.uint64]:\n d1 = wsqeuclidean([0], np.asarray([-1], dtype=dtype))\n d2 = wsqeuclidean(np.asarray([-1], dtype=dtype), [0])\n\n assert_equal(d1, d2)\n assert_equal(d1, np.float64(np.iinfo(dtype).max)**2)\n\n dtypes = [np.float32, np.float64, np.complex64, np.complex128]\n for dtype in ['float16', 'float128']:\n # These aren't present in older numpy versions; float128 may also not\n # be present on all platforms.\n if hasattr(np, dtype):\n dtypes.append(getattr(np, dtype))\n\n for dtype in dtypes:\n d = wsqeuclidean(np.asarray(x, dtype=dtype), np.asarray(y, dtype=dtype))\n assert_equal(d.dtype, dtype)\n\n\ndef test_sokalmichener():\n # Test that sokalmichener has the same result for bool and int inputs.\n p = [True, True, False]\n q = [True, False, True]\n x = [int(b) for b in p]\n y = [int(b) for b in q]\n dist1 = sokalmichener(p, q)\n dist2 = sokalmichener(x, y)\n # These should be exactly the same.\n assert_equal(dist1, dist2)\n\n\ndef test_modifies_input():\n # test whether cdist or pdist modifies input arrays\n X1 = np.asarray([[1., 2., 3.],\n [1.2, 2.3, 3.4],\n [2.2, 2.3, 4.4],\n [22.2, 23.3, 44.4]])\n X1_copy = X1.copy()\n with suppress_warnings() as w:\n w.filter(message=\"'wminkowski' metric is deprecated\")\n for metric in _METRICS_NAMES:\n kwargs = {\"w\": 1.0 / X1.std(axis=0)} if metric == \"wminkowski\" else {}\n cdist(X1, X1, metric, **kwargs)\n pdist(X1, metric, **kwargs)\n assert_array_equal(X1, X1_copy)\n\n\ndef test_Xdist_deprecated_args():\n # testing both cdist and pdist deprecated warnings\n X1 = np.asarray([[1., 2., 3.],\n [1.2, 2.3, 3.4],\n [2.2, 2.3, 4.4],\n [22.2, 23.3, 44.4]])\n weights = np.arange(3)\n for metric in _METRICS_NAMES:\n kwargs = {\"w\": weights} if metric == \"wminkowski\" else dict()\n with suppress_warnings() as w:\n w.filter(DeprecationWarning,\n message=\"'wminkowski' metric is deprecated\")\n with pytest.raises(TypeError):\n cdist(X1, X1, metric, 2., **kwargs)\n\n with pytest.raises(TypeError):\n pdist(X1, metric, 2., **kwargs)\n\n for arg in [\"p\", \"V\", \"VI\"]:\n kwargs = {arg:\"foo\"}\n\n if metric == \"wminkowski\":\n if \"p\" in kwargs or \"w\" in kwargs:\n continue\n kwargs[\"w\"] = weights\n\n if((arg == \"V\" and metric == \"seuclidean\") or\n (arg == \"VI\" and metric == \"mahalanobis\") or\n (arg == \"p\" and metric == \"minkowski\")):\n continue\n\n with suppress_warnings() as w:\n w.filter(DeprecationWarning,\n message=\"'wminkowski' metric is deprecated\")\n with pytest.raises(TypeError):\n cdist(X1, X1, metric, **kwargs)\n\n with pytest.raises(TypeError):\n pdist(X1, metric, **kwargs)\n\n\ndef test_Xdist_non_negative_weights():\n X = eo['random-float32-data'][::5, ::2]\n w = np.ones(X.shape[1])\n w[::5] = -w[::5]\n with suppress_warnings() as sup:\n sup.filter(DeprecationWarning,\n message=\"'wminkowski' metric is deprecated\")\n for metric in _METRICS_NAMES:\n if metric in ['seuclidean', 'mahalanobis', 'jensenshannon']:\n continue\n\n for m in [metric, eval(metric), \"test_\" + metric]:\n assert_raises(ValueError, pdist, X, m, w=w)\n assert_raises(ValueError, cdist, X, X, m, w=w)\n\n\ndef test__validate_vector():\n x = [1, 2, 3]\n y = _validate_vector(x)\n assert_array_equal(y, x)\n\n y = _validate_vector(x, dtype=np.float64)\n assert_array_equal(y, x)\n assert_equal(y.dtype, np.float64)\n\n x = [1]\n y = _validate_vector(x)\n assert_equal(y.ndim, 1)\n assert_equal(y, x)\n\n x = 1\n y = _validate_vector(x)\n assert_equal(y.ndim, 1)\n assert_equal(y, [x])\n\n x = np.arange(5).reshape(1, -1, 1)\n y = _validate_vector(x)\n assert_equal(y.ndim, 1)\n assert_array_equal(y, x[0, :, 0])\n\n x = [[1, 2], [3, 4]]\n assert_raises(ValueError, _validate_vector, x)\n" ]
[ [ "numpy.testing.assert_allclose", "numpy.dot", "numpy.random.rand", "numpy.testing.suppress_warnings", "numpy.finfo", "numpy.int_", "scipy.spatial.distance.wminkowski", "numpy.issubdtype", "numpy.full_like", "numpy.linalg.norm", "scipy.spatial.distance.is_valid_y", "numpy.empty", "scipy.spatial.distance.minkowski", "scipy.spatial.distance.num_obs_y", "scipy.spatial.distance.is_valid_dm", "numpy.nonzero", "numpy.take", "numpy.arange", "numpy.sqrt", "numpy.append", "numpy.linalg.inv", "numpy.vstack", "numpy.array", "scipy.spatial.distance.sokalmichener", "numpy.testing.assert_equal", "numpy.testing.assert_almost_equal", "numpy.zeros", "scipy.spatial.distance.squareform", "numpy.ma.masked_invalid", "numpy.testing.assert_", "numpy.float64", "numpy.float32", "numpy.loadtxt", "numpy.bool_", "scipy.spatial.distance._validate_vector", "numpy.iinfo", "scipy.spatial.distance.cdist", "numpy.ma.getmask", "scipy.spatial.distance.pdist", "numpy.cov", "numpy.asarray", "numpy.random.RandomState", "scipy.spatial.distance.mahalanobis", "numpy.sum", "numpy.uint", "numpy.ones", "numpy.testing.assert_array_equal", "scipy.spatial.distance.correlation", "scipy.spatial.distance.num_obs_dm", "numpy.atleast_1d", "numpy.ravel", "numpy.abs", "numpy.intp", "numpy.asanyarray", "numpy.var" ] ]
alisure-fork/PoolNet
[ "b951056a7bcfb1f80024da9df5145289205e3391" ]
[ "main_my3_bs1_Res50.py" ]
[ "import os\nimport cv2\nimport math\nimport time\nimport torch\nimport random\nimport numbers\nimport numpy as np\nfrom torch import nn\nfrom PIL import Image\nimport scipy.misc as sm\nfrom torch.nn import init\nfrom torch.optim import Adam\nfrom torch.utils import data\nfrom torch.backends import cudnn\nfrom torchvision import transforms\nimport torchvision.utils as vutils\nfrom alisuretool.Tools import Tools\nfrom collections import OrderedDict\nfrom torch.autograd import Variable\nfrom dataset.dataset import ImageDataTrain\nfrom torch.nn import utils, functional as F\n\n\nclass RandomHorizontalFlip(object):\n\n def __call__(self, sample):\n img = sample['image']\n mask = sample['label']\n if random.random() < 0.5:\n img = img.transpose(Image.FLIP_LEFT_RIGHT)\n mask = mask.transpose(Image.FLIP_LEFT_RIGHT)\n return {'image': img, 'label': mask}\n\n pass\n\n\nclass ImageDataTrain2(data.Dataset):\n\n def __init__(self, data_root, data_list):\n self.sal_root = data_root\n self.sal_source = data_list\n with open(self.sal_source, 'r') as f:\n self.sal_list = [x.strip() for x in f.readlines()]\n self.sal_num = len(self.sal_list)\n\n self.transform1 = transforms.Compose([RandomHorizontalFlip()])\n self.transform2 = transforms.Compose([\n transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])\n self.transform3 = transforms.Compose([transforms.ToTensor()])\n pass\n\n def __getitem__(self, item):\n im_name = self.sal_list[item % self.sal_num].split()[0]\n gt_name = self.sal_list[item % self.sal_num].split()[1]\n image = Image.open(os.path.join(self.sal_root, im_name)).convert(\"RGB\")\n label = Image.open(os.path.join(self.sal_root, gt_name)).convert(\"L\")\n\n if image.size == label.size:\n sample = {'image': image, 'label': label}\n sample = self.transform1(sample)\n image, label = sample['image'], sample['label']\n\n image = self.transform2(image)\n label = self.transform3(label)\n else:\n Tools.print('IMAGE ERROR, PASSING {} {}'.format(im_name, gt_name))\n image, label = self.__getitem__(np.random.randint(0, self.sal_num))\n pass\n return image, label\n\n def __len__(self):\n return self.sal_num\n\n pass\n\n\nclass ImageDataTest(data.Dataset):\n\n def __init__(self, sal_mode):\n self.data_source = self.get_test_info(sal_mode)\n self.data_root = self.data_source[\"image_root\"]\n with open(self.data_source[\"image_source\"], 'r') as f:\n self.image_list = [x.strip() for x in f.readlines()]\n self.image_num = len(self.image_list)\n\n self.transform = transforms.Compose([\n transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])\n pass\n\n def __getitem2__(self, item):\n name = self.image_list[item % self.image_num]\n image = Image.open(os.path.join(self.data_root, name)).convert(\"RGB\")\n image = self.transform(image)\n return image, name\n\n def __getitem__(self, item):\n name = self.image_list[item % self.image_num]\n im = cv2.imread(os.path.join(self.data_root, name))\n in_ = np.array(im, dtype=np.float32)\n in_ -= np.array((104.00699, 116.66877, 122.67892))\n in_ = in_.transpose((2, 0, 1))\n in_ = torch.Tensor(in_)\n return in_, name\n\n def __len__(self):\n return self.image_num\n\n @staticmethod\n def get_test_info(sal_mode='e'):\n result = {}\n if sal_mode == 'e':\n result[\"image_root\"] = './data/ECSSD/Imgs/'\n result[\"image_source\"] = './data/ECSSD/test.lst'\n elif sal_mode == 'p':\n image_root, image_source = './data/PASCALS/Imgs/', './data/PASCALS/test.lst'\n result[\"image_root\"] = image_root\n result[\"image_source\"] = image_source\n elif sal_mode == 'd':\n image_root, image_source = './data/DUTOMRON/Imgs/', './data/DUTOMRON/test.lst'\n result[\"image_root\"] = image_root\n result[\"image_source\"] = image_source\n elif sal_mode == 'h':\n image_root, image_source = './data/HKU-IS/Imgs/', './data/HKU-IS/test.lst'\n result[\"image_root\"] = image_root\n result[\"image_source\"] = image_source\n elif sal_mode == 's':\n image_root, image_source = './data/SOD/Imgs/', './data/SOD/test.lst'\n result[\"image_root\"] = image_root\n result[\"image_source\"] = image_source\n elif sal_mode == 't':\n image_root, image_source = './data/DUTS/DUTS-TE/DUTS-TE-Image/', './data/DUTS/DUTS-TE/test.lst'\n mask_root = './data/DUTS/DUTS-TE/DUTS-TE-Mask/'\n result[\"image_root\"] = image_root\n result[\"mask_root\"] = mask_root\n result[\"image_source\"] = image_source\n elif sal_mode == 'm_r': # for speed test\n image_root, image_source = './data/MSRA/Imgs_resized/', './data/MSRA/test_resized.lst'\n result[\"image_root\"] = image_root\n result[\"image_source\"] = image_source\n else:\n raise Exception(\".................\")\n return result\n\n pass\n\n\ndef conv3x3(in_planes, out_planes, stride=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None):\n super(Bottleneck, self).__init__()\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, stride=stride, bias=False) # change\n self.bn1 = nn.BatchNorm2d(planes,)\n self.conv2 = nn.Conv2d(planes, planes, 3, stride=1, padding=dilation, bias=False, dilation=dilation)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm2d(planes * 4)\n\n for i in self.bn1.parameters():\n i.requires_grad = False\n for i in self.bn2.parameters():\n i.requires_grad = False\n for i in self.bn3.parameters():\n i.requires_grad = False\n pass\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n pass\n\n\nclass ResNet(nn.Module):\n\n def __init__(self, block, layers):\n self.inplanes = 64\n super(ResNet, self).__init__()\n self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)\n self.bn1 = nn.BatchNorm2d(64)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1, ceil_mode=True)\n\n self.layer1 = self._make_layer(block, 64, layers[0])\n self.layer2 = self._make_layer(block, 128, layers[1], stride=2)\n self.layer3 = self._make_layer(block, 256, layers[2], stride=2)\n self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=2)\n\n for i in self.bn1.parameters():\n i.requires_grad = False\n\n self.weight_init(self.modules())\n pass\n\n @staticmethod\n def weight_init(modules):\n for m in modules:\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, 0.01)\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n pass\n pass\n\n def _make_layer(self, block, planes, blocks, stride=1, dilation=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion or dilation == 2 or dilation == 4:\n downsample = nn.Sequential(nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1,\n stride=stride, bias=False), nn.BatchNorm2d(planes * block.expansion))\n for i in downsample._modules['1'].parameters():\n i.requires_grad = False\n layers = [block(self.inplanes, planes, stride, dilation=dilation, downsample=downsample)]\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes, dilation=dilation))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n tmp_x = []\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n tmp_x.append(x)\n x = self.maxpool(x)\n\n x = self.layer1(x)\n tmp_x.append(x)\n x = self.layer2(x)\n tmp_x.append(x)\n x = self.layer3(x)\n tmp_x.append(x)\n x = self.layer4(x)\n tmp_x.append(x)\n\n return tmp_x\n\n def load_pretrained_model(self, pretrained_model=\"./pretrained/resnet50_caffe.pth\"):\n self.load_state_dict(torch.load(pretrained_model), strict=False)\n pass\n pass\n\n\nclass DeepPoolLayer(nn.Module):\n\n def __init__(self, k, k_out, need_x2, need_fuse):\n super(DeepPoolLayer, self).__init__()\n self.need_x2 = need_x2\n self.need_fuse = need_fuse\n\n self.pool2 = nn.AvgPool2d(kernel_size=2, stride=2)\n self.pool4 = nn.AvgPool2d(kernel_size=4, stride=4)\n self.pool8 = nn.AvgPool2d(kernel_size=8, stride=8)\n self.conv1 = nn.Conv2d(k, k, 3, 1, 1, bias=False)\n self.conv2 = nn.Conv2d(k, k, 3, 1, 1, bias=False)\n self.conv3 = nn.Conv2d(k, k, 3, 1, 1, bias=False)\n\n self.conv_sum = nn.Conv2d(k, k_out, 3, 1, 1, bias=False)\n if self.need_fuse:\n self.conv_sum_c = nn.Conv2d(k_out, k_out, 3, 1, 1, bias=False)\n self.relu = nn.ReLU()\n pass\n\n def forward(self, x, x2=None, x3=None):\n x_size = x.size()\n\n y1 = self.conv1(self.pool2(x))\n y2 = self.conv2(self.pool4(x))\n y3 = self.conv3(self.pool8(x))\n res = torch.add(x, F.interpolate(y1, x_size[2:], mode='bilinear', align_corners=True))\n res = torch.add(res, F.interpolate(y2, x_size[2:], mode='bilinear', align_corners=True))\n res = torch.add(res, F.interpolate(y3, x_size[2:], mode='bilinear', align_corners=True))\n res = self.relu(res)\n\n if self.need_x2:\n res = F.interpolate(res, x2.size()[2:], mode='bilinear', align_corners=True)\n\n res = self.conv_sum(res)\n\n if self.need_fuse:\n res = self.conv_sum_c(torch.add(torch.add(res, x2), x3))\n return res\n\n pass\n\n\nclass PoolNet(nn.Module):\n\n def __init__(self):\n super(PoolNet, self).__init__()\n # BASE\n self.resnet = ResNet(Bottleneck, [3, 4, 6, 3])\n\n # Convert\n self.relu = nn.ReLU(inplace=True)\n self.convert1 = nn.Conv2d(64, 128, 1, 1, bias=False)\n self.convert2 = nn.Conv2d(256, 256, 1, 1, bias=False)\n self.convert3 = nn.Conv2d(512, 256, 1, 1, bias=False)\n self.convert4 = nn.Conv2d(1024, 512, 1, 1, bias=False)\n self.convert5 = nn.Conv2d(2048, 512, 1, 1, bias=False)\n\n # PPM\n ind = 512\n self.ppms_pre = nn.Conv2d(2048, ind, 1, 1, bias=False) # 2048 -> 512\n self.ppm1 = nn.Sequential(nn.AdaptiveAvgPool2d(1), nn.Conv2d(ind, ind, 1, 1, bias=False), nn.ReLU(inplace=True))\n self.ppm2 = nn.Sequential(nn.AdaptiveAvgPool2d(3), nn.Conv2d(ind, ind, 1, 1, bias=False), nn.ReLU(inplace=True))\n self.ppm3 = nn.Sequential(nn.AdaptiveAvgPool2d(5), nn.Conv2d(ind, ind, 1, 1, bias=False), nn.ReLU(inplace=True))\n self.ppm_cat = nn.Sequential(nn.Conv2d(ind * 4, ind, 3, 1, 1, bias=False), nn.ReLU(inplace=True))\n\n # INFO\n out_dim = [128, 256, 256, 512]\n self.info1 = nn.Sequential(nn.Conv2d(ind, out_dim[0], 3, 1, 1, bias=False), nn.ReLU(inplace=True))\n self.info2 = nn.Sequential(nn.Conv2d(ind, out_dim[1], 3, 1, 1, bias=False), nn.ReLU(inplace=True))\n self.info3 = nn.Sequential(nn.Conv2d(ind, out_dim[2], 3, 1, 1, bias=False), nn.ReLU(inplace=True))\n self.info4 = nn.Sequential(nn.Conv2d(ind, out_dim[3], 3, 1, 1, bias=False), nn.ReLU(inplace=True))\n\n # DEEP POOL\n deep_pool = [[512, 512, 256, 256, 128], [512, 256, 256, 128, 128]]\n self.deep_pool5 = DeepPoolLayer(deep_pool[0][0], deep_pool[1][0], False, True)\n self.deep_pool4 = DeepPoolLayer(deep_pool[0][1], deep_pool[1][1], True, True)\n self.deep_pool3 = DeepPoolLayer(deep_pool[0][2], deep_pool[1][2], True, True)\n self.deep_pool2 = DeepPoolLayer(deep_pool[0][3], deep_pool[1][3], True, True)\n self.deep_pool1 = DeepPoolLayer(deep_pool[0][4], deep_pool[1][4], False, False)\n\n # ScoreLayer\n score = 128\n self.score = nn.Conv2d(score, 1, 1, 1)\n\n self.weight_init(self.modules())\n pass\n\n @staticmethod\n def weight_init(modules):\n for m in modules:\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, 0.01)\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n pass\n pass\n\n def forward(self, x):\n # BASE\n feature1, feature2, feature3, feature4, _feature5 = self.resnet(x)\n feature1 = self.relu(self.convert1(feature1))\n feature2 = self.relu(self.convert2(feature2))\n feature3 = self.relu(self.convert3(feature3))\n feature4 = self.relu(self.convert4(feature4))\n feature5 = self.relu(self.convert5(_feature5))\n\n x_size = x.size()[2:]\n feature1_size = feature1.size()[2:]\n feature2_size = feature2.size()[2:]\n feature3_size = feature3.size()[2:]\n feature4_size = feature4.size()[2:]\n feature5_size = feature5.size()[2:]\n\n # PPM\n feature5_p1 = self.ppms_pre(_feature5)\n ppm_list = [feature5_p1,\n F.interpolate(self.ppm1(feature5_p1), feature5_size, mode='bilinear', align_corners=True),\n F.interpolate(self.ppm2(feature5_p1), feature5_size, mode='bilinear', align_corners=True),\n F.interpolate(self.ppm3(feature5_p1), feature5_size, mode='bilinear', align_corners=True)]\n ppm_cat = self.ppm_cat(torch.cat(ppm_list, dim=1))\n\n # INFO\n info1 = self.info1(F.interpolate(ppm_cat, feature1_size, mode='bilinear', align_corners=True))\n info2 = self.info2(F.interpolate(ppm_cat, feature2_size, mode='bilinear', align_corners=True))\n info3 = self.info3(F.interpolate(ppm_cat, feature3_size, mode='bilinear', align_corners=True))\n info4 = self.info4(F.interpolate(ppm_cat, feature4_size, mode='bilinear', align_corners=True))\n\n # DEEP POOL\n merge = self.deep_pool5(feature5, feature4, info4) # A + F\n merge = self.deep_pool4(merge, feature3, info3) # A + F\n merge = self.deep_pool3(merge, feature2, info2) # A + F\n merge = self.deep_pool2(merge, feature1, info1) # A + F\n merge = self.deep_pool1(merge) # A\n\n # ScoreLayer\n merge = self.score(merge)\n if x_size is not None:\n merge = F.interpolate(merge, x_size, mode='bilinear', align_corners=True)\n return merge\n\n pass\n\n\nclass Solver(object):\n\n def __init__(self, train_loader, epoch, iter_size, save_folder, show_every, lr, wd, lr_decay_epoch):\n self.train_loader = train_loader\n self.iter_size = iter_size\n self.epoch = epoch\n self.show_every = show_every\n self.save_folder = save_folder\n\n self.wd = wd\n self.lr = lr\n self.lr_decay_epoch = lr_decay_epoch\n\n self.net = self.build_model()\n self.optimizer = Adam(filter(lambda p: p.requires_grad, self.net.parameters()),\n lr=self.lr, weight_decay=self.wd)\n # self.optimizer = Adam(self.net.parameters(), lr=self.lr, weight_decay=self.wd)\n pass\n\n def build_model(self):\n net = PoolNet().cuda()\n net.eval() # use_global_stats = True\n net.resnet.load_pretrained_model()\n self._print_network(net, 'PoolNet Structure')\n return net\n\n def train(self):\n iter_num = len(self.train_loader.dataset)\n ave_grad = 0\n for epoch in range(self.epoch):\n r_sal_loss = 0\n self.net.zero_grad()\n for i, (sal_image, sal_label) in enumerate(self.train_loader):\n sal_image, sal_label = sal_image.cuda(), sal_label.cuda()\n\n sal_pred = self.net(sal_image)\n sal_loss_fuse = F.binary_cross_entropy_with_logits(sal_pred, sal_label, reduction='sum')\n\n sal_loss = sal_loss_fuse / self.iter_size\n r_sal_loss += sal_loss.data\n\n sal_loss.backward()\n\n ave_grad += 1\n\n # accumulate gradients as done in DSS\n if ave_grad % self.iter_size == 0:\n self.optimizer.step()\n self.optimizer.zero_grad()\n ave_grad = 0\n pass\n\n if i % self.show_every == 0:\n Tools.print('epoch: [{:2d}/{:2d}], lr={:.6f} iter:[{:5d}/{:5d}] || Sal:{:10.4f}'.format(\n epoch, self.epoch, self.lr, i, iter_num, r_sal_loss / self.show_every))\n r_sal_loss = 0\n pass\n pass\n\n torch.save(self.net.state_dict(), '{}/epoch_{}.pth'.format(self.save_folder, epoch + 1))\n\n if epoch in self.lr_decay_epoch:\n self.lr = self.lr * 0.1\n self.optimizer = Adam(filter(lambda p: p.requires_grad, self.net.parameters()),\n lr=self.lr, weight_decay=self.wd)\n # self.optimizer = Adam(self.net.parameters(), lr=self.lr, weight_decay=self.wd)\n pass\n pass\n\n torch.save(self.net.state_dict(), '{}/final.pth'.format(self.save_folder))\n pass\n\n @staticmethod\n def test(model_path, test_loader, result_fold):\n Tools.print('Loading trained model from {}'.format(model_path))\n net = PoolNet().cuda()\n net.eval()\n net.load_state_dict(torch.load(model_path))\n\n time_s = time.time()\n img_num = len(test_loader)\n for i, (images, names) in enumerate(test_loader):\n if i % 100 == 0:\n Tools.print(\"test {} {}\".format(i, img_num))\n with torch.no_grad():\n images = images.cuda()\n pred = net(images)\n pred = np.squeeze(torch.sigmoid(pred).cpu().data.numpy()) * 255\n cv2.imwrite(os.path.join(result_fold, names[0][:-4] + '.png'), pred)\n time_e = time.time()\n Tools.print('Speed: %f FPS' % (img_num / (time_e - time_s)))\n Tools.print('Test Done!')\n pass\n\n @classmethod\n def eval(cls, label_list, eval_list, th_num=25):\n epoch_mae = 0.0\n epoch_prec = np.zeros(shape=(th_num,)) + 1e-6\n epoch_recall = np.zeros(shape=(th_num,)) + 1e-6\n for i, (label_name, eval_name) in enumerate(zip(label_list, eval_list)):\n # Tools.print(\"{} {}\".format(label_name, eval_name))\n if i % 100 == 0:\n Tools.print(\"eval {} {}\".format(i, len(label_list)))\n\n im_label = np.asarray(Image.open(label_name).convert(\"L\")) / 255\n im_eval = np.asarray(Image.open(eval_name).convert(\"L\")) / 255\n\n mae = cls._eval_mae(im_eval, im_label)\n prec, recall = cls._eval_pr(im_eval, im_label, th_num)\n epoch_mae += mae\n epoch_prec += prec\n epoch_recall += recall\n pass\n\n avg_mae = epoch_mae/len(label_list)\n avg_prec, avg_recall = epoch_prec/len(label_list), epoch_recall/len(label_list)\n score4 = (1 + 0.3) * avg_prec * avg_recall / (0.3 * avg_prec + avg_recall)\n return avg_mae, score4.max(), np.mean(score4)\n\n @staticmethod\n def _eval_mae(y_pred, y):\n return np.abs(y_pred - y).mean()\n\n @staticmethod\n def _eval_pr(y_pred, y, th_num=100):\n prec, recall = np.zeros(shape=(th_num,)), np.zeros(shape=(th_num,))\n th_list = np.linspace(0, 1 - 1e-10, th_num)\n for i in range(th_num):\n y_temp = y_pred >= th_list[i]\n tp = (y_temp * y).sum()\n prec[i], recall[i] = tp / (y_temp.sum() + 1e-20), tp / y.sum()\n pass\n return prec, recall\n\n @staticmethod\n def _print_network(model, name):\n num_params = 0\n for p in model.parameters():\n num_params += p.numel()\n Tools.print(name)\n Tools.print(model)\n Tools.print(\"The number of parameters: {}\".format(num_params))\n pass\n\n pass\n\n\ndef my_train(run_name, lr=5e-5, lr_decay_epoch=[20, ], wd=5e-4, epoch=30, iter_size=10, show_every=50):\n train_root, train_list = \"./data/DUTS/DUTS-TR\", \"./data/DUTS/DUTS-TR/train_pair.lst\"\n save_folder = Tools.new_dir('./results/{}'.format(run_name))\n\n dataset = ImageDataTrain(train_root, train_list)\n train_loader = data.DataLoader(dataset=dataset, batch_size=1, shuffle=True, num_workers=8)\n\n train = Solver(train_loader, epoch, iter_size, save_folder, show_every, lr, wd, lr_decay_epoch)\n train.train()\n pass\n\n\ndef my_test(run_name=\"run-6\", sal_mode=\"t\", model_path='./results/run-6/epoch_22.pth'):\n result_fold = Tools.new_dir(\"./results/test/{}/{}\".format(run_name, sal_mode))\n\n dataset = ImageDataTest(sal_mode)\n test_loader = data.DataLoader(dataset=dataset, batch_size=1, shuffle=False, num_workers=1)\n Solver.test(model_path, test_loader, result_fold)\n\n label_list = [os.path.join(dataset.data_source[\"mask_root\"],\n \"{}.png\".format(os.path.splitext(_)[0])) for _ in dataset.image_list]\n eval_list = [os.path.join(result_fold, \"{}.png\".format(os.path.splitext(_)[0])) for _ in dataset.image_list]\n mae, score_max, score_mean = Solver.eval(label_list, eval_list)\n Tools.print(\"{} {} {}\".format(mae, score_max, score_mean))\n pass\n\n\n\"\"\"\n1 2020-08-25 12:09:19 0.09290798801529357 0.7799409876268433 0.6638541039865719\n2 2020-08-25 12:15:50 0.0767591619275389 0.8244083528593976 0.710306342252603\n3 2020-08-25 12:59:07 0.06114729114043897 0.8137821245940858 0.7243047221178921\n4 2020-08-25 13:39:03 0.05508279566306756 0.8395519307893983 0.7529511719970813\n6 2020-08-25 14:31:28 0.05038543232953151 0.8492332687482643 0.7817261060934924\n7 2020-08-25 16:16:35 0.04884050921221514 0.8494100661920133 0.7814156208512428\n14 2020-08-25 18:01:48 0.05409165836261161 0.7890804447979982 0.739574602883087\n17 2020-08-25 19:40:28 0.04831605306314065 0.8552850196469899 0.7929382611034238\n25 2020-08-25 23:11:28 0.039914510669972945 0.8675700565181682 0.8170381191272179\n30 2020-08-26 11:11:15 0.03872968022633624 0.8682404968758722 0.8208055790036504\n\"\"\"\n\n\nif __name__ == '__main__':\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n\n _run_name = \"run-bs1-Res50-3\"\n # my_train(run_name=_run_name, lr=5e-5, lr_decay_epoch=[20, ], wd=5e-4, epoch=30, iter_size=10, show_every=1000)\n my_test(run_name=_run_name, sal_mode=\"t\", model_path='./results/{}/epoch_30.pth'.format(_run_name))\n pass\n" ]
[ [ "torch.cat", "torch.nn.BatchNorm2d", "numpy.mean", "torch.load", "torch.sigmoid", "torch.nn.MaxPool2d", "torch.nn.AvgPool2d", "numpy.random.randint", "torch.utils.data.DataLoader", "torch.Tensor", "torch.nn.functional.binary_cross_entropy_with_logits", "numpy.array", "numpy.zeros", "torch.nn.Sequential", "torch.nn.ReLU", "torch.nn.Conv2d", "torch.nn.functional.interpolate", "torch.no_grad", "torch.add", "numpy.abs", "torch.nn.AdaptiveAvgPool2d", "numpy.linspace" ] ]
fabianp/scipy-lecture-notes
[ "716752791585c5d127b4fc524ed732de8c928fdc" ]
[ "pyplots/image_spectral_clustering.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\n\nfrom scikits.learn.feature_extraction import image\nfrom scikits.learn.cluster import spectral_clustering\n\n################################################################################\nl = 100\nx, y = np.indices((l, l))\n\ncenter1 = (28, 24)\ncenter2 = (40, 50)\ncenter3 = (67, 58)\ncenter4 = (24, 70)\n\nradius1, radius2, radius3, radius4 = 16, 14, 15, 14\n\ncircle1 = (x - center1[0])**2 + (y - center1[1])**2 < radius1**2\ncircle2 = (x - center2[0])**2 + (y - center2[1])**2 < radius2**2\ncircle3 = (x - center3[0])**2 + (y - center3[1])**2 < radius3**2\ncircle4 = (x - center4[0])**2 + (y - center4[1])**2 < radius4**2\n\n################################################################################\n# 4 circles\nimg = circle1 + circle2 + circle3 + circle4\nmask = img.astype(bool)\nimg = img.astype(float)\n\nimg += 1 + 0.2*np.random.randn(*img.shape)\n\n# Convert the image into a graph with the value of the gradient on the\n# edges.\ngraph = image.img_to_graph(img, mask=mask)\n\n# Take a decreasing function of the gradient: we take it weakly\n# dependant from the gradient the segmentation is close to a voronoi\ngraph.data = np.exp(-graph.data/graph.data.std())\n\n# Force the solver to be arpack, since amg is numerically\n# unstable on this example\nlabels = spectral_clustering(graph, k=4, mode='arpack')\nlabel_im = -np.ones(mask.shape)\nlabel_im[mask] = labels\n\nplt.figure(figsize=(6, 3))\nplt.subplot(121)\nplt.imshow(img, cmap=plt.cm.spectral, interpolation='nearest')\nplt.axis('off')\nplt.subplot(122)\nplt.imshow(label_im, cmap=plt.cm.spectral, interpolation='nearest')\nplt.axis('off')\n\nplt.subplots_adjust(wspace=0, hspace=0., top=0.99, bottom=0.01, left=0.01, right=0.99)\n\n" ]
[ [ "numpy.ones", "numpy.random.randn", "matplotlib.pyplot.figure", "numpy.indices", "matplotlib.pyplot.imshow", "matplotlib.pyplot.subplots_adjust", "matplotlib.pyplot.axis", "matplotlib.pyplot.subplot" ] ]
llayer/cmsopen
[ "876e35eb4dfdf727ae91be5c2db29fc087325db7" ]
[ "inferno/training/train.py" ]
[ "from pytorch_inferno.inferno import *\nfrom pytorch_inferno.callback import *\nfrom pytorch_inferno.model_wrapper import *\nfrom pytorch_inferno.data import *\nfrom fastcore.all import partialler\nfrom torch import optim, autograd, nn, Tensor\nimport pandas as pd\nimport numpy as np\n\nimport training.hep_model as hep_model\n\ndef train_inferno(data, args, epochs=100 ):\n \n # Set up network\n lr = args[\"inferno_lr\"]\n temp = args[\"temperature\"]\n neurons = args[\"inferno_neurons\"]\n bins = args[\"inferno_bins\"]\n nfeat = len(args[\"features\"])\n \n print(\"*********************\")\n print(\"Summary INFERNO model\")\n print(\"*********************\")\n print(\"Learning rate\", lr)\n print(\"Temperature\", temp)\n print(\"Neurons\", neurons)\n print(\"Bins\", bins)\n \n if args[\"use_softhist\"] == False:\n \n net_inferno = nn.Sequential(nn.Linear(nfeat,neurons), nn.ReLU(),\n nn.Linear(neurons,neurons), nn.ReLU(),\n nn.Linear(neurons,bins), VariableSoftmax(temp))\n\n else:\n net_inferno = nn.Sequential(nn.Linear(nfeat,neurons), nn.ReLU(),\n nn.Linear(neurons,neurons), nn.ReLU(),\n nn.Linear(neurons,1), nn.Sigmoid())\n \n lt = LossTracker()\n sb = SaveBest(args[\"outpath\"] + \"/weights/best_inferno.h5\")\n hep_inf = hep_model.HEPInferno(b_true=args[\"b_true\"], \n mu_true=args[\"mu_true\"],\n n_shape_systs=len(args[\"shape_syst\"]),\n n_weight_systs=len(args[\"weight_syst\"]),\n shape_norm_sigma=args[\"shape_norm_sigma\"],\n is_sig_shape = args[\"is_sig_shape\"],\n s_norm_sigma = list(args[\"s_norm_sigma\"].values()),\n b_norm_sigma = list(args[\"b_norm_sigma\"].values()),\n b_rate_param = args[\"b_rate_param\"],\n use_hist=args[\"use_softhist\"],\n bins = bins,\n sigmoid_delta = args[\"sigmoid_delta\"],\n ignore_shape_norm=args[\"ignore_shape_norm\"],\n asymm_shape_norm = args[\"asymm_shape_norm\"],\n store_significance = args[\"store_significance\"],\n interp_algo=args[\"interp_algo\"])\n #init_net(net_inferno)\n model_inferno = ModelWrapper(net_inferno)\n\n model_inferno.fit(epochs, data=data, opt=partialler(optim.Adam,lr=lr), loss=None,\n cbs=[hep_inf, lt, sb]) \n \n idx_best = lt.losses[\"val\"].index(sb.min_loss)\n print(\"Evaluating best INFERNO model from epoch\", idx_best, \"with loss\", lt.losses[\"val\"][idx_best])\n \n shapes = {\"bkg\" : hep_inf.val_shapes[\"bkg\"],\n \"sig\" : hep_inf.val_shapes[\"sig\"],\n \"bkg_trn\": hep_inf.trn_shapes[\"bkg\"],\n \"sig_trn\": hep_inf.trn_shapes[\"sig\"],\n #\"sig_up\" : hep_inf.val_shapes[\"sig_up\"],\n #\"sig_down\" : hep_inf.val_shapes[\"sig_down\"]\n \"up\" : hep_inf.val_shapes[\"up\"],\n \"down\" : hep_inf.val_shapes[\"down\"]\n }\n \n return model_inferno, {\"loss\":lt, \"idx_best\": idx_best, \"covs\": hep_inf.covs, \"shapes\" : shapes}\n\n\ndef train_bce(data, args, epochs=100):\n \n lr = args[\"bce_lr\"]\n neurons = args[\"bce_neurons\"]\n nfeat = len(args[\"features\"]) \n neurons = args[\"bce_neurons\"]\n bins = args[\"bce_bins\"]\n \n \"\"\"\n net_bce = nn.Sequential(nn.Linear(nfeat,neurons), nn.ReLU(),\n nn.Linear(neurons,8), nn.ReLU(),\n nn.Linear(8,1), nn.Sigmoid())\n \"\"\"\n net_bce = nn.Sequential(nn.Linear(nfeat,neurons), nn.ReLU(),\n nn.Linear(neurons,neurons), nn.ReLU(),\n nn.Linear(neurons,1), nn.Sigmoid())\n \n print(\"*********************\")\n print(\"Summary BCE model\")\n print(\"*********************\")\n print(\"Learning rate\", lr)\n print(\"Neurons\", neurons)\n print(\"Bins\", bins)\n \n #init_net(net) \n ct = hep_model.HEPInferno(b_true=args[\"b_true\"], \n mu_true=args[\"mu_true\"],\n n_shape_systs=len(args[\"shape_syst\"]),\n n_weight_systs=len(args[\"weight_syst\"]),\n shape_norm_sigma=args[\"shape_norm_sigma\"],\n is_sig_shape = args[\"is_sig_shape\"],\n s_norm_sigma = list(args[\"s_norm_sigma\"].values()),\n b_norm_sigma = list(args[\"b_norm_sigma\"].values()),\n b_rate_param = args[\"b_rate_param\"],\n bins = bins,\n use_hist=True,\n ignore_loss=True,\n ignore_shape_norm=args[\"ignore_shape_norm\"],\n asymm_shape_norm = args[\"asymm_shape_norm\"],\n store_significance = args[\"store_significance\"],\n interp_algo=args[\"interp_algo\"])\n lt = LossTracker()\n sb = SaveBest(args[\"outpath\"] + \"/weights/best_bce.h5\")\n model_bce = ModelWrapper(net_bce)\n model_bce.fit(epochs, data=data, opt=partialler(optim.Adam, lr=lr), loss=nn.BCELoss(),\n cbs=[lt, ct, sb])\n\n idx_best = lt.losses[\"val\"].index(sb.min_loss)\n print(\"Evaluating best BCE model from epoch\", idx_best, \"with loss\", lt.losses[\"val\"][idx_best])\n \n shapes = {\"bkg\" : ct.val_shapes[\"bkg\"],\n \"sig\" : ct.val_shapes[\"sig\"],\n \"bkg_trn\": ct.trn_shapes[\"bkg\"],\n \"sig_trn\": ct.trn_shapes[\"sig\"],\n #\"sig_up\" : ct.val_shapes[\"sig_up\"],\n #\"sig_down\" : ct.val_shapes[\"sig_down\"],\n \"up\" : ct.val_shapes[\"up\"],\n \"down\" : ct.val_shapes[\"down\"],\n }\n \n #bce_trn_covs = ct.covs[\"trn\"]\n #bce_val_covs = ct.covs[\"val\"]\n return model_bce, {\"loss\":lt, \"idx_best\": idx_best, \"covs\": ct.covs, \"shapes\": shapes}\n\n\n\n#\n# Predict test set\n#\n\ndef pred_test(model, test_dl, use_hist=False, name=\"inferno\", bins=10.):\n\n if use_hist == True:\n preds = model._predict_dl(test_dl).squeeze() \n else:\n preds = model._predict_dl(test_dl, pred_cb=InfernoPred())\n \n df = pd.DataFrame({'pred':preds})\n df['gen_target'] = test_dl.dataset.y\n \n if (\"inferno\" in name) & (use_hist == False):\n \n #Sort according to signal fraction\n sig = df[df[\"gen_target\"]==1][\"pred\"]\n bkg = df[df[\"gen_target\"]==0][\"pred\"]\n x_range = (0.,bins)\n sig_h = np.histogram(sig, bins=bins, range=x_range, density=True)[0]\n bkg_h = np.histogram(bkg, bins=bins, range=x_range, density=True)[0]\n sig_bkg = sig_h/(bkg_h+10e-7)\n sor = np.argsort(sig_bkg)\n inv_d = dict(enumerate(np.argsort(sig_bkg))) \n order_d = {v: k for k, v in inv_d.items()}\n df['pred_sorted'] = df[\"pred\"].replace(order_d)\n else:\n order_d = {}\n\n return df, order_d\n\n\n#\n# Predict the nominal samples\n#\n\ndef pred_nominal(samples, features, model, scaler, name, sort_bins = False, use_hist = False, order_d = None):\n \n print(\"*********************\")\n print(\"Predicting\", name, \"samples\")\n \n #\"TTJets_signal\"\n for s in samples:\n X = samples[s][features].values\n X = scaler.transform(X)\n loader = WeightedDataLoader(DataSet(X, None, None), batch_size=1000)\n if use_hist == True:\n samples[s][name] = model._predict_dl(loader)\n else:\n samples[s][name] = model._predict_dl(loader, pred_cb=InfernoPred())\n if sort_bins == True:\n samples[s][name] = samples[s][name].replace(order_d)\n \n \n\n\n\n\n" ]
[ [ "torch.nn.Linear", "numpy.histogram", "torch.nn.Sigmoid", "pandas.DataFrame", "torch.nn.ReLU", "numpy.argsort", "torch.nn.BCELoss" ] ]
jcbird/ppv
[ "d550f4fff9cb0309d43b0d51e1406355ee0231be" ]
[ "src/ppv/groups.py" ]
[ "from . import ppv\nfrom . import plate\nimport numpy as np\nfrom astropy import units as u\nfrom astropy.coordinates import SkyCoord\nfrom astropy.time import Time\nfrom astropy.table import vstack, Column\nfrom copy import copy\n\n\ndef available_plateruns():\n return ppv.available_plateruns()\n\n\ndef indx_in_plateruns(fieldname):\n return np.where(ppv._names_array == fieldname)[0]\n\n\ndef plates_of_field(fieldname): # returns a list\n plates = ppv.allplate_summary['plateid'][indx_in_plateruns(fieldname)]\n return plates.tolist()\n\n\ndef in_platerun(run_name):\n return np.where(ppv._platerun_array == run_name)[0]\n\n\nclass Field:\n \"\"\"\n Class to act as interface to fields.\n \"\"\"\n\n def __init__(self, fieldname):\n \"\"\"\n fieldname is string. Look for fieldnames in ppv.allplate_summary OR\n in plate_run.fieldnames\n \"\"\"\n self.name = fieldname\n self._platenums = plates_of_field(self.name)\n self._summary_indx = indx_in_plateruns(self.name)\n self.ra, self.dec = self._center()\n self.center = SkyCoord(self.ra * u.deg, self.dec * u.deg,\n obstime=Time(2015.5, format='decimalyear'))\n # TODO check epoch of field designation\n self.platerun, self.programname = self.meta()\n self._radius = 1.49 * u.degree\n self._plates = [plate.Plate(platenum) for platenum in self._platenums]\n # self._plugHoles = [plate.get_table(platenum) for platenum in self._plates]\n\n def __repr__(self):\n return f'Field({self.name!r})'\n\n def __str__(self):\n first = f'Field: {self.name!r}, RA: {self.ra!r}, Dec: {self.dec!r}\\n'\n second = f'Plate Numbers: {self._platenums!r}'\n return first + second\n\n @property\n def plates(self):\n return self._plates\n\n def _center(self):\n ra = ppv.allplate_summary['raCen'][self._summary_indx][0]\n dec = ppv.allplate_summary['decCen'][self._summary_indx][0]\n return ra, dec\n\n def _construct_skycoords(self):\n self._plugcoords = [SkyCoord(phole['target_ra'] * u.degree,\n phole['target_dec'] * u.degree,\n obstime=Time(2015.5, format='decimalyear')) for \n phole in self._plugHoles]\n return self._plugcoords\n\n\n @property\n def plate_data(self):\n \"\"\"\n Table(s) of data from yanny files describing plate(s).\n \"\"\"\n return self._plugHoles\n\n @property\n def plugged_coords(self):\n \"\"\"\n self.targets = self._load_table()\n Coordinates of targets assigned fibers on plate\n \"\"\"\n try:\n return self._plugcoords\n except AttributeError:\n return self._construct_skycoords()\n\n @property\n def targets(self):\n try:\n return self._targets\n except AttributeError:\n self._targets = self._load_table()\n return self._targets\n\n @property\n def science_targets(self):\n try:\n return self._science_targets\n except AttributeError:\n _tmp = self._load_table()\n self._science_targets = _tmp[_tmp['targettype'] == 'science']\n return self._science_targets\n\n def _load_table(self):\n \"\"\"\n Takes all converts plates in field and combines target tables.\n Creates new column with the fieldname. While repititive, this will\n make Plateruns much easier to implement.\n \"\"\"\n\n table = copy(vstack([pl.targets for pl in self.plates]))\n N_targets = len(table)\n field_column = Column(data=[self.name] * N_targets,\n name='field',\n dtype='S200')\n table.add_column(field_column)\n table.sort('catalogid') # sort by catalogID\n return table\n\n def _contains(self, catIDs):\n \"\"\"\n Checks for membership in a plate based on catalogID.\n\n Parameters\n ----------\n catIDs : array-like\n List of catalogIDs.\n\n # TODO make util function that checks if catIDs is a scalar or array\n \"\"\"\n try: # already array-like\n return np.in1d(self.targets['catalogid'], catIDs)\n except TypeError:\n return np.in1d(self.targets['catalogid'], np.array([catIDs]))\n\n def get_targets(self, catalogIDs):\n \"\"\"\n Return rows of the Field targets table given a list of catalogIDs\n \"\"\"\n return self.targets[self._contains(catalogIDs)]\n\n def meta(self):\n prun = ppv.allplate_summary['platerun'][self._summary_indx][0]\n programname = ppv.allplate_summary['programname'][self._summary_indx][0]\n return prun, programname\n\n def contains(self, catIDs):\n \"\"\"\n Checks for membership in a plate based on catalogID.\n ALL catIDs must be in plate to return True.\n\n Parameters\n ----------\n catIDs : array-like\n List of catalogIDs.\n\n \"\"\"\n try: # already array-like\n return np.in1d(catIDs, self.targets['catalogid'])\n except TypeError:\n return np.in1d(np.array([catIDs]), self.targets['catalogid'])\n\n\nclass Platerun:\n \"\"\"\n Class to act as interface to platerun.\n \"\"\"\n\n def __init__(self, run_name):\n if _check_platerun(run_name):\n pass # all is well, platerun available\n self.name = run_name\n self.fieldnames = self._get_fields()\n\n def _get_fields(self):\n idx = in_platerun(self.name)\n names = ppv.allplate_summary['name'].astype('U')[idx]\n return np.unique(names) # no field repeats\n\n @property\n def platesummary(self):\n return ppv.allplate_summary[in_platerun(self.name)]\n\n\n def load_fields(self):\n return [Field(fname) for fname in\n self.fieldnames]\n\n @property\n def fields(self):\n try:\n return self._fields\n except AttributeError:\n self._fields = self.load_fields()\n return self._fields\n\n @property\n def targets(self):\n try:\n return self._targets\n except AttributeError:\n print(f\"\"\"Please be patient.\n Initial target loading can take up to 1 second per field.\n Loading target data from {len(self.fieldnames)} Fields...\"\"\", flush=True)\n self._targets = self._load_table()\n return self._targets\n\n def _load_table(self):\n \"\"\"\n Takes all fields within platerun and combines target tables.\n \"\"\"\n\n table = vstack([field.targets for field in self.fields])\n table.sort('catalogid') # sort by catalogID\n return table\n\n def _contains(self, catIDs):\n \"\"\"\n Checks for membership in a plate based on catalogID.\n\n Parameters\n ----------\n catIDs : array-like\n List of catalogIDs.\n\n # TODO make util function that checks if catIDs is a scalar or array\n \"\"\"\n try: # already array-like\n return np.in1d(self.targets['catalogid'], catIDs)\n except TypeError:\n return np.in1d(self.targets['catalogid'], np.array([catIDs]))\n\n def get_targets(self, catalogIDs):\n \"\"\"\n Return rows of the Platerun targets table given a list of catalogIDs\n \"\"\"\n return self.targets[self._contains(catalogIDs)]\n\n\nclass PlateRunMissingError(Exception):\n def __init__(self, run_name):\n if run_name:\n self.message = run_name\n else:\n self.message = None\n\n def __str__(self):\n if self.message:\n first = f'\\nPlateRunMissingError, {self.message} is not an available platerun'\n second = f'\\n ======================================================== \\n'\n third = ' Run ppv.ppv.update_platefiles() to update your platerun summary file and try again.'\n fourth = f'\\n ======================================================== \\n'\n return first + second + third + fourth\n # return f'PlateRunMissingError, {self.message} is not an available platerun'\n else:\n return 'PlateRunMissingError has been raised'\n\n\ndef _check_platerun(run_name):\n if run_name in available_plateruns():\n return True # All is well\n else: # platerun NOT available\n raise PlateRunMissingError(run_name)\n\n\n\n\n\n" ]
[ [ "numpy.where", "numpy.array", "numpy.unique", "numpy.in1d" ] ]
burhanmudassar/smallObject
[ "55b6022eeeb8b0a34c8bf435505d0969d78a8e26" ]
[ "lib/layers/functions/detection.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.backends.cudnn as cudnn\nfrom torch.autograd import Function\nfrom torch.autograd import Variable\nfrom lib.utils.box_utils import decode,nms\n# from lib.utils.nms.nms_wrapper import nms\nfrom lib.utils.timer import Timer\nfrom lib.nms.gpu_nms import gpu_nms\nfrom lib.nms.cpu_nms import cpu_nms\nfrom lib.nms.py_cpu_nms import py_cpu_nms\nimport numpy as np\n\nclass Detect(Function):\n \"\"\"At test time, Detect is the final layer of SSD. Decode location preds,\n apply non-maximum suppression to location predictions based on conf\n scores and threshold to a top_k number of output predictions for both\n confidence score and locations.\n \"\"\"\n def __init__(self, cfg, priors):\n self.num_classes = cfg.NUM_CLASSES\n self.background_label = cfg.BACKGROUND_LABEL\n self.conf_thresh = cfg.SCORE_THRESHOLD\n self.nms_thresh = cfg.IOU_THRESHOLD \n self.top_k = cfg.MAX_DETECTIONS \n self.variance = cfg.VARIANCE\n self.priors = priors\n\n # def forward(self, predictions, prior):\n # \"\"\"\n # Args:\n # loc_data: (tensor) Loc preds from loc layers\n # Shape: [batch,num_priors*4]\n # conf_data: (tensor) Shape: Conf preds from conf layers\n # Shape: [batch*num_priors,num_classes]\n # prior_data: (tensor) Prior boxes and variances from priorbox layers\n # Shape: [1,num_priors,4]\n # \"\"\"\n # loc, conf = predictions\n\n # loc_data = loc.data\n # conf_data = conf.data\n # prior_data = prior.data\n\n # num = loc_data.size(0) # batch size\n # num_priors = prior_data.size(0)\n # self.boxes = torch.zeros(1, num_priors, 4)\n # self.scores = torch.zeros(1, num_priors, self.num_classes)\n\n # if num == 1:\n # # size batch x num_classes x num_priors\n # conf_preds = conf_data.unsqueeze(0)\n\n # else:\n # conf_preds = conf_data.view(num, num_priors,\n # self.num_classes)\n # self.boxes.expand_(num, num_priors, 4)\n # self.scores.expand_(num, num_priors, self.num_classes)\n\n # # Decode predictions into bboxes.\n # for i in range(num):\n # decoded_boxes = decode(loc_data[i], prior_data, self.variance)\n # # For each class, perform nms\n # conf_scores = conf_preds[i].clone()\n # '''\n # c_mask = conf_scores.gt(self.thresh)\n # decoded_boxes = decoded_boxes[c_mask]\n # conf_scores = conf_scores[c_mask]\n # '''\n\n # conf_scores = conf_preds[i].clone()\n # num_det = 0\n # for cl in range(1, self.num_classes):\n # c_mask = conf_scores[cl].gt(self.conf_thresh)\n # scores = conf_scores[cl][c_mask]\n # if scores.dim() == 0:\n # continue\n # l_mask = c_mask.unsqueeze(1).expand_as(decoded_boxes)\n # boxes = decoded_boxes[l_mask].view(-1, 4)\n # ids, count = nms(boxes, scores, self.nms_thresh, self.top_k)\n # self.output[i, cl, :count] = \\\n # torch.cat((scores[ids[:count]].unsqueeze(1),\n # boxes[ids[:count]]), 1)\n\n # return self.output\n\n\n def forward(self, predictions):\n \"\"\"\n Args:\n loc_data: (tensor) Loc preds from loc layers\n Shape: [batch,num_priors*4]\n conf_data: (tensor) Shape: Conf preds from conf layers\n Shape: [batch*num_priors,num_classes]\n prior_data: (tensor) Prior boxes and variances from priorbox layers\n Shape: [1,num_priors,4]\n \"\"\"\n loc, conf = predictions\n\n loc_data = loc.data\n conf_data = conf.data\n prior_data = self.priors.data\n\n num = loc_data.size(0) # batch size\n num_priors = prior_data.size(0)\n #self.output.zero_()\n if num == 1:\n # size batch x num_classes x num_priors\n conf_preds = conf_data.t().contiguous().unsqueeze(0)\n else:\n conf_preds = conf_data.view(num, num_priors,\n self.num_classes).transpose(2, 1)\n #self.output.expand_(num, self.num_classes, self.top_k, 5)\n output = torch.zeros(num, self.num_classes, self.top_k, 5)\n\n _t = {'decode': Timer(), 'misc': Timer(), 'box_mask':Timer(), 'score_mask':Timer(),'nms':Timer(), 'cpu':Timer(),'sort':Timer()}\n gpunms_time = 0\n scores_time=0\n box_time=0\n cpu_tims=0\n sort_time=0\n decode_time=0\n _t['misc'].tic()\n # Decode predictions into bboxes.\n for i in range(num):\n _t['decode'].tic()\n decoded_boxes = decode(loc_data[i], prior_data, self.variance)\n decode_time += _t['decode'].toc()\n # For each class, perform nms\n conf_scores = conf_preds[i].clone()\n num_det = 0\n for cl in range(1, self.num_classes):\n _t['cpu'].tic()\n c_mask = conf_scores[cl].gt(self.conf_thresh).nonzero().view(-1)\n cpu_tims+=_t['cpu'].toc()\n if c_mask.size(0) == 0:\n continue\n _t['score_mask'].tic()\n scores = conf_scores[cl][c_mask]\n scores_time+=_t['score_mask'].toc()\n if scores.size(0) == 0:\n continue\n _t['box_mask'].tic()\n # l_mask = c_mask.unsqueeze(1).expand_as(decoded_boxes)\n # boxes = decoded_boxes[l_mask].view(-1, 4)\n boxes = decoded_boxes[c_mask, :]\n box_time+=_t['box_mask'].toc()\n # idx of highest scoring and non-overlapping boxes per class\n _t['nms'].tic()\n # cls_dets = torch.cat((boxes, scores), 1)\n # _, order = torch.sort(scores, 0, True)\n # cls_dets = cls_dets[order]\n # keep = nms(cls_dets, self.nms_thresh)\n # cls_dets = cls_dets[keep.view(-1).long()]\n try:\n new_boxes = boxes * 300\n # ids, count = nms(boxes, scores, self.nms_thresh, self.top_k)\n ids = gpu_nms(torch.cat((new_boxes, scores.unsqueeze(1)), 1).cpu().numpy(), self.nms_thresh)\n # new_ids_cpu = cpu_nms(torch.cat((boxes, scores.unsqueeze(1)), 1).cpu().numpy(), self.nms_thresh)\n\n if len(ids) > self.top_k:\n count = self.top_k\n else:\n count = len(ids)\n\n except:\n print(c_mask.size())\n print(boxes.size())\n print(scores.size())\n\n gpunms_time += _t['nms'].toc()\n output[i, cl, :count] = \\\n torch.cat((scores[ids[:count]].unsqueeze(1),\n boxes[ids[:count]]), 1)\n nms_time= _t['misc'].toc()\n # print(nms_time, cpu_tims, scores_time,box_time,gpunms_time)\n # flt = self.output.view(-1, 5)\n # _, idx = flt[:, 0].sort(0)\n # _, rank = idx.sort(0)\n # flt[(rank >= self.top_k).unsqueeze(1).expand_as(flt)].fill_(0)\n return output" ]
[ [ "torch.zeros" ] ]
caiyancheng/heyhey
[ "7756afdbe27beed2d6ac5a4ed9876fecebd32251" ]
[ "baselines/main.py" ]
[ "import matplotlib.pyplot as plt\nimport torch\nimport numpy as np\nfrom torchvision import models\nfrom torchvision import transforms\nfrom torchvision import models\nfrom PIL import Image\nfrom torch.utils.data import DataLoader\nfrom net.fcn import FCN32,FCN16,FCN8\nfrom net.linknet import LINKNET\nfrom net.segnet import SEGNET\nfrom net.pspnet import PSPNET\nfrom net.icnet import ICNET, ICLoss\nfrom net.unet import UNET\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport tqdm \nfrom torchvision.utils import make_grid\nimport time \nfrom torchsummary import summary\nfrom data import myCityscapes\nfrom config import device\nfrom PIL import ImageFile\nImageFile.LOAD_TRUNCATED_IMAGES = True\n\n#设置太大的BATCH会爆显存\nBATCH = 8\n\ndef labelTransform(x):\n x = transforms.Resize(256)(x)\n x = np.array(x)\n x = torch.LongTensor(x)\n #第7类为道路\n mask = torch.LongTensor([[7]]).expand_as(x)\n y = (x==mask).long()\n return y \n\nmyTransform = transforms.Compose([\n transforms.Resize(256),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n])\n\n#mode = 'foggy' or 'fine'\ndataset = myCityscapes('./datasets/', split='train', mode='fine', target_type='semantic',transform=myTransform,target_transform=labelTransform)\ndataloader = DataLoader(dataset, batch_size=BATCH,shuffle=True)\nvalidset = myCityscapes('./datasets/', split='val', mode='fine', target_type='semantic',transform=myTransform,target_transform=labelTransform)\nvalidloader = DataLoader(validset, batch_size=BATCH, shuffle=True)\n\n#fcn = models.segmentation.fcn_resnet50(pretrained=True).to(device)\n\nclass SegModel():\n def __init__(self):\n #demo使用很少的训练轮次\n self.epoch = 10\n self.lr = 1e-4\n\n #FCN/SEGNET基于vgg16进行初始化\n self.model = FCN8().to(device)\n self.model.init_vgg16_params()\n #self.model = SEGNET().to(device)\n #self.model.init_vgg16_params()\n\n #使用残差卷积的LNINNET\n #self.model = LINKNET().to(device)\n\n #用resnet作为backbone并加入金字塔的PSPNET\n #self.model = PSPNET().to(device)\n\n #self.model = UNET().to(device)\n self.critirion = nn.CrossEntropyLoss()\n\n #self.model = ICNET().to(device)\n #self.critirion = ICLoss()\n\n self.optimizer = torch.optim.Adam(self.model.parameters(),lr=self.lr)\n \n def train(self,dataloader,validloader):\n train_acces = []\n valid_acces = []\n losses = []\n for e in range(self.epoch):\n Loss = 0\n \n #共2975张图片\n for i, data in tqdm.tqdm(enumerate(dataloader)):\n #demo用比较少图片的训练\n if i > 50:\n break\n\n self.optimizer.zero_grad()\n x,y = data\n x = x.to(device)\n y = y.to(device)\n\n out = self.model(x)\n \n loss = self.critirion(out,y)\n loss.backward()\n self.optimizer.step()\n\n Loss += loss\n\n Loss /= len(dataloader)\n losses.append(Loss)\n print(\"epoch: \",e, \"loss: \",Loss.item())\n train_acc,_,_ = self.getMetrics(dataloader)\n valid_acc,_,_ = self.getMetrics(validloader)\n train_acces.append(train_acc)\n valid_acces.append(valid_acc)\n\n savepath = \"results/\" + str(e) + \".png\"\n self.getPicture(dataloader, savepath)\n\n plt.figure()\n plt.plot(train_acces)\n plt.plot(valid_acces)\n plt.legend([\"train\",\"valid\"])\n plt.savefig(\"results/acc.png\")\n\n plt.figure()\n plt.plot(losses)\n plt.savefig(\"results/loss.png\")\n\n def test(self,dataloader):\n acc,miou,fps = self.getMetrics(dataloader)\n print(\"accuracy: \", acc, \"MIou: \", miou, \"fps: \",fps) \n\n @torch.no_grad()\n def getMetrics(self,dataloader):\n correct = 0\n tot_pixel = 0\n tot_time = 0\n tot_num = 0\n\n tot_intersect = 0\n tot_union = 0\n \n for i,data in enumerate(dataloader):\n #选取部分计算评价指标\n if i > 5:\n break\n x,y = data\n B = x.shape[0]\n x = x.to(device)\n y = y.to(device)\n\n tot_num += 1\n\n start_time = time.time()\n #ICNET取第一个元素为输出\n out = self.model(x)\n if isinstance(out,list):\n out = out[0]\n end_time = time.time()\n tot_time += end_time - start_time\n\n pred = torch.argmax(out,dim=1)\n \n correct += torch.sum(pred == y)\n tot_pixel += torch.sum((y>=0))\n\n pred = pred.bool()\n y = y.bool()\n tot_intersect += torch.sum(pred & y) \n tot_union += torch.sum(pred | y)\n\n acc = (correct / tot_pixel).item()\n miou = (tot_intersect / tot_union).item()\n fps = (tot_num*BATCH) / tot_time\n \n return acc,miou,fps\n \n\n def getPicture(self,dataloader,path):\n for i,data in enumerate(dataloader):\n if i>0:\n break\n x,y = data\n x = x.to(device)\n \n plt.figure() \n plt.subplot(2,1,1)\n out = self.model(x)\n if isinstance(out,list):\n out = out[0]\n pred = torch.argmax(out,dim=1).float().detach().cpu()\n pred = pred.unsqueeze(dim=1)\n pic = make_grid(pred,padding=2).permute(1,2,0)\n plt.imshow(pic)\n plt.axis('off')\n\n plt.subplot(2,1,2)\n y = y.float().detach().cpu()\n y = y.unsqueeze(dim=1)\n pic = make_grid(y,padding=2).permute(1,2,0)\n plt.imshow(pic)\n plt.axis('off')\n plt.savefig(path)\n\nseg = SegModel()\n#summary(seg.model, (3,256,512))\nseg.train(dataloader,validloader)\nseg.test(validloader) #使用valloader而不使用testloader,猜测testdata标注有误\ntorch.save(seg.model.state_dict(), 'pth/segnet.pth')\n" ]
[ [ "matplotlib.pyplot.subplot", "numpy.array", "torch.argmax", "matplotlib.pyplot.savefig", "torch.no_grad", "matplotlib.pyplot.plot", "matplotlib.pyplot.legend", "matplotlib.pyplot.figure", "torch.LongTensor", "torch.utils.data.DataLoader", "matplotlib.pyplot.imshow", "torch.nn.CrossEntropyLoss", "matplotlib.pyplot.axis", "torch.sum" ] ]
gargnupur/tools
[ "fc7be06c76a6c85a71221a610409d0c46ddea710" ]
[ "perf_dashboard/regression_alerts/views.py" ]
[ "# Copyright Istio Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom django.shortcuts import render\nfrom helpers import download\nimport pandas as pd\nimport os\n\n\ncwd = os.getcwd()\nperf_data_path = cwd + \"/perf_data/\"\ncurrent_release = [os.getenv('CUR_RELEASE')]\n\n\n# Create your views here.\ndef cur_alert(request):\n cur_release_names, cur_release_dates, master_release_names, master_release_dates = download.download_benchmark_csv(40)\n\n cur_pattern_mixer_base_p90 = get_mixer_mode_y_series(cur_release_names, cur_release_dates, '_mixer_base', 'p90')\n cur_pattern_mixer_serveronly_p90 = get_mixer_mode_y_series(cur_release_names, cur_release_dates, '_mixer_serveronly', 'p90')\n cur_pattern_mixer_both_p90 = get_mixer_mode_y_series(cur_release_names, cur_release_dates, '_mixer_both', 'p90')\n cur_pattern_none_serveronly_p90 = get_mixer_mode_y_series(cur_release_names, cur_release_dates, '_none_serveronly', 'p90')\n cur_pattern_none_both_p90 = get_mixer_mode_y_series(cur_release_names, cur_release_dates, '_none_both', 'p90')\n cur_pattern_v2_serveronly_p90 = get_mixer_mode_y_series(cur_release_names, cur_release_dates, 'nullvm_serveronly', 'p90')\n cur_pattern_v2_both_p90 = get_mixer_mode_y_series(cur_release_names, cur_release_dates, 'nullvm_both', 'p90')\n\n cur_pattern_mixer_base_p99 = get_mixer_mode_y_series(cur_release_names, cur_release_dates, '_mixer_base', 'p99')\n cur_pattern_mixer_serveronly_p99 = get_mixer_mode_y_series(cur_release_names, cur_release_dates, '_mixer_serveronly', 'p99')\n cur_pattern_mixer_both_p99 = get_mixer_mode_y_series(cur_release_names, cur_release_dates, '_mixer_both', 'p99')\n cur_pattern_none_serveronly_p99 = get_mixer_mode_y_series(cur_release_names, cur_release_dates, '_none_serveronly', 'p99')\n cur_pattern_none_both_p99 = get_mixer_mode_y_series(cur_release_names, cur_release_dates, '_none_both', 'p99')\n cur_pattern_v2_serveronly_p99 = get_mixer_mode_y_series(cur_release_names, cur_release_dates, 'nullvm_serveronly', 'p99')\n cur_pattern_v2_both_p99 = get_mixer_mode_y_series(cur_release_names, cur_release_dates, 'nullvm_both', 'p99')\n\n context = {'current_release': current_release,\n 'cur_pattern_mixer_base_p90': cur_pattern_mixer_base_p90,\n 'cur_pattern_mixer_serveronly_p90': cur_pattern_mixer_serveronly_p90,\n 'cur_pattern_mixer_both_p90': cur_pattern_mixer_both_p90,\n 'cur_pattern_none_serveronly_p90': cur_pattern_none_serveronly_p90,\n 'cur_pattern_none_both_p90': cur_pattern_none_both_p90,\n 'cur_pattern_v2_serveronly_p90': cur_pattern_v2_serveronly_p90,\n 'cur_pattern_v2_both_p90': cur_pattern_v2_both_p90,\n 'cur_pattern_mixer_base_p99': cur_pattern_mixer_base_p99,\n 'cur_pattern_mixer_serveronly_p99': cur_pattern_mixer_serveronly_p99,\n 'cur_pattern_mixer_both_p99': cur_pattern_mixer_both_p99,\n 'cur_pattern_none_serveronly_p99': cur_pattern_none_serveronly_p99,\n 'cur_pattern_none_both_p99': cur_pattern_none_both_p99,\n 'cur_pattern_v2_serveronly_p99': cur_pattern_v2_serveronly_p99,\n 'cur_pattern_v2_both_p99': cur_pattern_v2_both_p99\n }\n return render(request, \"cur_alert.html\", context=context)\n\n\n# Create your views here.\ndef master_alert(request):\n cur_release_names, cur_release_dates, master_release_names, master_release_dates = download.download_benchmark_csv(40)\n\n master_pattern_mixer_base_p90 = get_mixer_mode_y_series(master_release_names, master_release_dates, '_mixer_base', 'p90')\n master_pattern_mixer_serveronly_p90 = get_mixer_mode_y_series(master_release_names, master_release_dates, '_mixer_serveronly', 'p90')\n master_pattern_mixer_both_p90 = get_mixer_mode_y_series(master_release_names, master_release_dates, '_mixer_both', 'p90')\n master_pattern_none_serveronly_p90 = get_mixer_mode_y_series(master_release_names, master_release_dates, '_none_serveronly', 'p90')\n master_pattern_none_both_p90 = get_mixer_mode_y_series(master_release_names, master_release_dates, '_none_both', 'p90')\n master_pattern_v2_serveronly_p90 = get_mixer_mode_y_series(master_release_names, master_release_dates, 'nullvm_serveronly', 'p90')\n master_pattern_v2_both_p90 = get_mixer_mode_y_series(master_release_names, master_release_dates, 'nullvm_both', 'p90')\n\n master_pattern_mixer_base_p99 = get_mixer_mode_y_series(master_release_names, master_release_dates, '_mixer_base', 'p99')\n master_pattern_mixer_serveronly_p99 = get_mixer_mode_y_series(master_release_names, master_release_dates, '_mixer_serveronly', 'p99')\n master_pattern_mixer_both_p99 = get_mixer_mode_y_series(master_release_names, master_release_dates, '_mixer_both', 'p99')\n master_pattern_none_serveronly_p99 = get_mixer_mode_y_series(master_release_names, master_release_dates, '_none_serveronly', 'p99')\n master_pattern_none_both_p99 = get_mixer_mode_y_series(master_release_names, master_release_dates, '_none_both', 'p99')\n master_pattern_v2_serveronly_p99 = get_mixer_mode_y_series(master_release_names, master_release_dates, 'nullvm_serveronly', 'p99')\n master_pattern_v2_both_p99 = get_mixer_mode_y_series(master_release_names, master_release_dates, 'nullvm_both', 'p99')\n\n context = {'current_release': current_release,\n 'master_pattern_mixer_base_p90': master_pattern_mixer_base_p90,\n 'master_pattern_mixer_serveronly_p90': master_pattern_mixer_serveronly_p90,\n 'master_pattern_mixer_both_p90': master_pattern_mixer_both_p90,\n 'master_pattern_none_serveronly_p90': master_pattern_none_serveronly_p90,\n 'master_pattern_none_both_p90': master_pattern_none_both_p90,\n 'master_pattern_v2_serveronly_p90': master_pattern_v2_serveronly_p90,\n 'master_pattern_v2_both_p90': master_pattern_v2_both_p90,\n 'master_pattern_mixer_base_p99': master_pattern_mixer_base_p99,\n 'master_pattern_mixer_serveronly_p99': master_pattern_mixer_serveronly_p99,\n 'master_pattern_mixer_both_p99': master_pattern_mixer_both_p99,\n 'master_pattern_none_serveronly_p99': master_pattern_none_serveronly_p99,\n 'master_pattern_none_both_p99': master_pattern_none_both_p99,\n 'master_pattern_v2_serveronly_p99': master_pattern_v2_serveronly_p99,\n 'master_pattern_v2_both_p99': master_pattern_v2_both_p99\n }\n return render(request, \"master_alert.html\", context=context)\n\n\n# Helpers\ndef get_latency_y_data_point(df, mixer_mode, quantiles):\n y_series_data = []\n data = df.query('ActualQPS == 1000 and NumThreads == 16 and Labels.str.endswith(@mixer_mode)')\n if not data[quantiles].head().empty:\n y_series_data.append(data[quantiles].head(1).values[0]/1000)\n else:\n y_series_data.append('null')\n return y_series_data\n\n\ndef get_mixer_mode_y_series(release_names, release_dates, mixer_mode, quantiles):\n pattern_data = [[]] * len(release_names)\n for i in range(len(release_names)):\n try:\n df = pd.read_csv(perf_data_path + release_names[i] + \".csv\")\n except Exception as e:\n print(e)\n pattern_data[i] = release_dates[i] + [\"null\"]\n else:\n pattern_data[i] = release_dates[i] + get_latency_y_data_point(df, mixer_mode, quantiles)\n return pattern_data\n" ]
[ [ "pandas.read_csv" ] ]
pjstanle/reV
[ "c22c620749747022a65d2a98a99beef804849ee6" ]
[ "reV/offshore/offshore.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nreV offshore wind analysis module. This module uses the NRWAL library to\nassess offshore losses and LCOE to complement the simple SAM windpower module.\n\nEverything in this module operates on the native wind resource resolution.\n\"\"\"\nimport numpy as np\nimport pandas as pd\nimport logging\nfrom warnings import warn\n\nfrom reV.generation.generation import Gen\nfrom reV.handlers.outputs import Outputs\nfrom reV.utilities.exceptions import (OffshoreWindInputWarning,\n OffshoreWindInputError)\nfrom reV.utilities import log_versions\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass Offshore:\n \"\"\"Framework to handle offshore wind analysis.\"\"\"\n\n # Default columns from the offshore wind data table to join to the\n # offshore meta data\n DEFAULT_META_COLS = ('config', )\n\n # Default keys from the NRWAL config to export as new datasets\n # in the reV output h5\n DEFAULT_NRWAL_KEYS = ('total_losses', 'array', 'export')\n\n def __init__(self, gen_fpath, offshore_fpath, nrwal_configs,\n project_points, offshore_meta_cols=None,\n offshore_nrwal_keys=None, nrwal_lcoe_key='lcoe',\n nrwal_loss_key='total_losses', run_all=False):\n \"\"\"\n Parameters\n ----------\n gen_fpath : str\n Full filepath to reV gen h5 output file.\n offshore_fpath : str\n Full filepath to offshore wind farm data file. Needs \"gid\" and\n \"config\" columns matching the project points input.\n nrwal_configs : dict\n Dictionary lookup of config_id values mapped to config filepaths.\n The same config_id values will be used from the sam_files lookup\n in project_points\n project_points : reV.config.project_points.ProjectPoints\n Instantiated project points instance.\n offshore_meta_cols : list | tuple | None\n Column labels from offshore_fpath to pass through to the output\n meta data. None (default) will use class variable\n DEFAULT_META_COLS, and any additional cols requested here will be\n added to DEFAULT_META_COLS.\n offshore_nrwal_keys : list | tuple | None\n Equation labels from the NRWAL configs to pass through to the\n output h5 file. None will use class variable DEFAULT_NRWAL_KEYS,\n and any additional cols requested here will be added to\n DEFAULT_NRWAL_KEYS.\n nrwal_lcoe_key : str\n Key in the NRWAL config for final LCOE output value. Can be\n changed and runtime for different NRWAL configs using this kwarg.\n nrwal_loss_key : str\n Key in the NRWAL config for final capacity factor losses output\n value. Can be changed and runtime for different NRWAL configs\n using this kwarg.\n run_all : bool\n Flag to run nrwal econ for all generation sites and ignore the\n offshore flag\n \"\"\"\n\n log_versions(logger)\n\n # delayed NRWAL import to cause less errors with old reV installs\n # if not running offshore.\n from NRWAL import NrwalConfig\n\n self._gen_fpath = gen_fpath\n self._offshore_fpath = offshore_fpath\n self._project_points = project_points\n self._meta_out = None\n self._time_index = None\n self._lcoe_key = nrwal_lcoe_key\n self._loss_key = nrwal_loss_key\n self._run_all = run_all\n\n self._nrwal_configs = {k: NrwalConfig(v) for k, v in\n nrwal_configs.items()}\n\n self._offshore_meta_cols = offshore_meta_cols\n if self._offshore_meta_cols is None:\n self._offshore_meta_cols = list(self.DEFAULT_META_COLS)\n else:\n self._offshore_meta_cols = list(self._offshore_meta_cols)\n self._offshore_meta_cols += list(self.DEFAULT_META_COLS)\n self._offshore_meta_cols = list(set(self._offshore_meta_cols))\n\n self._offshore_nrwal_keys = offshore_nrwal_keys\n if self._offshore_nrwal_keys is None:\n self._offshore_nrwal_keys = list(self.DEFAULT_NRWAL_KEYS)\n else:\n self._offshore_nrwal_keys = list(self._offshore_nrwal_keys)\n self._offshore_nrwal_keys += list(self.DEFAULT_NRWAL_KEYS)\n self._offshore_nrwal_keys = list(set(self._offshore_nrwal_keys))\n\n out = self._parse_gen_data(self._gen_fpath, run_all=run_all)\n self._meta_source, self._onshore_mask = out[:2]\n self._offshore_mask, self._cf_mean = out[2:]\n\n self._offshore_data = self._parse_offshore_data(self._offshore_fpath)\n self._system_inputs = self._parse_system_inputs()\n self._preflight_checks()\n\n logger.info('Initialized offshore wind farm aggregation module with '\n '{} onshore resource points, {} offshore resource points.'\n .format(len(self.meta_source_onshore),\n len(self.meta_source_offshore)))\n\n self._out = {self._lcoe_key: np.full(len(self._offshore_data), np.nan),\n self._loss_key: np.full(len(self._offshore_data), np.nan)}\n for key in self._offshore_nrwal_keys:\n if key in self._offshore_data:\n self._out[key] = self._offshore_data[key].values\n else:\n self._out[key] = np.full(len(self._offshore_data), np.nan,\n dtype=np.float32)\n\n @staticmethod\n def _parse_gen_data(gen_fpath, run_all=False):\n \"\"\"Parse cf meta dataframe and get masks for onshore/offshore points.\n\n Parameters\n ----------\n gen_fpath : str\n Full filepath to reV gen h5 output file.\n\n Returns\n -------\n meta : pd.DataFrame\n Full meta data from gen_fpath with \"offshore\" column.\n onshore_mask : pd.Series\n Boolean series indicating where onshore sites are.\n offshore_mask : pd.Series\n Boolean series indicating where offshore sites are.\n cf_mean : np.ndarray\n 1D array of mean capacity factor values corresponding to the\n un-masked meta data\n run_all : bool\n Flag to run nrwal econ for all generation sites and ignore the\n offshore flag\n \"\"\"\n\n with Outputs(gen_fpath, mode='r') as out:\n meta = out.meta\n if 'cf_mean_raw' in out.dsets:\n cf_mean = out['cf_mean_raw']\n elif 'cf_mean' in out.dsets:\n cf_mean = out['cf_mean']\n else:\n msg = ('Could not find cf_mean or cf_mean_raw in file: {}'\n .format(gen_fpath))\n logger.error(msg)\n raise OffshoreWindInputError(msg)\n\n msg = ('Could not find \"gid\" column in source '\n 'capacity factor meta data!')\n assert 'gid' in meta, msg\n\n # currently an assumption of sorted gids in the reV gen output\n msg = ('Source capacity factor meta data is not ordered!')\n assert list(meta['gid']) == sorted(list(meta['gid'])), msg\n\n if 'offshore' not in meta and not run_all:\n e = ('Offshore module cannot run without \"offshore\" flag in meta '\n 'data of gen_fpath: {}'.format(gen_fpath))\n logger.error(e)\n raise KeyError(e)\n elif run_all:\n meta['offshore'] = 1\n\n onshore_mask = meta['offshore'] == 0\n offshore_mask = meta['offshore'] == 1\n\n logger.info('Finished parsing reV gen output for resource gid '\n '{} through {} with {} offshore points.'\n .format(meta['gid'].values.min(),\n meta['gid'].values.max(), offshore_mask.sum()))\n logger.info('Offshore capacity factor has min / median / mean / max: '\n '{:.3f} / {:.3f} / {:.3f} / {:.3f}'\n .format(cf_mean.min(), np.median(cf_mean),\n np.mean(cf_mean), cf_mean.max()))\n\n return meta, onshore_mask, offshore_mask, cf_mean\n\n def _parse_offshore_data(self, offshore_fpath,\n required_columns=('gid', 'config')):\n \"\"\"Parse the offshore data file for offshore farm site data and coords.\n\n Parameters\n ----------\n offshore_fpath : str\n Full filepath to offshore wind farm data file.\n required_columns : tuple | list\n List of column names that must be in the offshore data in\n order to run the reV offshore module.\n\n Returns\n -------\n offshore_data : pd.DataFrame\n Dataframe of extracted offshore farm data. Each row is a farm and\n columns are farm data attributes.\n \"\"\"\n\n offshore_data = pd.read_csv(offshore_fpath)\n\n if 'dist_l_to_ts' in offshore_data:\n if offshore_data['dist_l_to_ts'].sum() > 0:\n w = ('Possible incorrect Offshore data input! \"dist_l_to_ts\" '\n '(distance land to transmission) input is non-zero. '\n 'Most reV runs set this to zero and input the cost '\n 'of transmission from landfall tie-in to '\n 'transmission feature in the supply curve module.')\n logger.warning(w)\n warn(w, OffshoreWindInputWarning)\n\n for c in required_columns:\n if c not in offshore_data:\n msg = ('Did not find required \"{}\" column in offshore_data!'\n .format(c))\n logger.error(msg)\n raise KeyError(msg)\n\n available_gids = list(offshore_data['gid'].values)\n missing = set(self.offshore_res_gids) - set(available_gids)\n if any(missing):\n msg = ('The following gids were requested in the reV project '\n 'points input but were not available in the offshore data '\n 'input: {}'.format(missing))\n logger.error(msg)\n raise OffshoreWindInputError(msg)\n\n # only keep the offshore data corresponding to relevant project points\n mask = offshore_data['gid'].isin(self.offshore_res_gids)\n offshore_data = offshore_data[mask]\n offshore_data = offshore_data.sort_values('gid')\n offshore_data = offshore_data.reset_index(drop=True)\n\n return offshore_data\n\n def _parse_system_inputs(self):\n \"\"\"Get the system inputs dict (SAM tech inputs) from project points.\n\n Returns\n -------\n system_inputs : pd.DataFrame\n DataFrame of SAM config inputs (columns) for every offshore\n resource gid (row). Index is resource gids and there is also\n a column \"gid\" with the copied gids.\n \"\"\"\n\n system_inputs = {}\n\n for gid in self.offshore_res_gids:\n system_inputs[gid] = self._project_points[gid][1]\n\n if ('wind_turbine_powercurve_powerout' in system_inputs[gid]\n and 'turbine_capacity' not in system_inputs[gid]):\n # convert from SAM kw powercurve to MW.\n arr = system_inputs[gid]['wind_turbine_powercurve_powerout']\n cap_kw = np.max(arr)\n cap_mw = cap_kw / 1000\n system_inputs[gid]['turbine_capacity'] = cap_mw\n\n system_inputs = pd.DataFrame(system_inputs).T\n system_inputs = system_inputs.sort_index()\n system_inputs['gid'] = system_inputs.index.values\n system_inputs.index.name = 'gid'\n\n return system_inputs\n\n def _preflight_checks(self):\n \"\"\"Run some preflight checks on the offshore inputs\"\"\"\n sam_configs = {k: v for k, v in\n self._project_points.sam_inputs.items()\n if k in self._nrwal_configs}\n for cid, sys_in in sam_configs.items():\n if 'turbine_capacity' not in sys_in:\n msg = ('System input key \"turbine_capacity\" not found in '\n 'SAM system inputs for \"{}\". Calculating from turbine '\n 'power curves.'.format(cid))\n logger.warning(msg)\n warn(msg, OffshoreWindInputWarning)\n\n loss1 = sys_in.get('wind_farm_losses_percent', 0)\n loss2 = sys_in.get('turb_generic_loss', 0)\n if loss1 != 0 or loss2 != 0:\n msg = ('Wind farm loss for config \"{}\" is not 0. The offshore '\n 'module uses gross capacity factors from reV '\n 'generation and applies losses from the NRWAL equations'\n .format(cid))\n logger.warning(msg)\n warn(msg, OffshoreWindInputWarning)\n\n available_ids = list(self._nrwal_configs.keys())\n requested_ids = list(self._offshore_data['config'].values)\n missing = set(requested_ids) - set(available_ids)\n if any(missing):\n msg = ('The following config ids were requested in the offshore '\n 'data input but were not available in the NRWAL config '\n 'input dict: {}'.format(missing))\n logger.error(msg)\n raise OffshoreWindInputError(msg)\n\n check_gid_order = (self._offshore_data['gid'].values\n == self._system_inputs['gid'].values)\n msg = 'Offshore and system input dataframes had bad order'\n assert (check_gid_order).all(), msg\n\n if 'gcf' in self._offshore_data:\n msg = 'Offshore data input already had gross capacity factor!'\n logger.error(msg)\n raise OffshoreWindInputError(msg)\n\n # store capacity factor under gcf (gross cf) and also under cf_mean\n # (useful for non-orca configs)\n self._offshore_data['gcf'] = self._cf_mean[self._offshore_mask]\n self._offshore_data['cf_mean'] = self._cf_mean[self._offshore_mask]\n\n for config_id, nrwal_config in self._nrwal_configs.items():\n system_vars = [var for var in nrwal_config.required_inputs\n if var not in self._offshore_data]\n missing_vars = [var for var in nrwal_config.required_inputs\n if var not in self._offshore_data\n and var not in self._system_inputs]\n\n if any(missing_vars):\n msg = ('Could not find required input variables {} '\n 'for NRWAL config \"{}\" in either the offshore '\n 'data or the SAM system data!'\n .format(missing_vars, config_id))\n logger.error(msg)\n raise OffshoreWindInputError(msg)\n\n for var in system_vars:\n sys_data_arr = self._system_inputs[var].values\n self._offshore_data[var] = sys_data_arr\n\n missing = [c for c in self._offshore_meta_cols\n if c not in self._offshore_data]\n if any(missing):\n msg = ('Could not find requested offshore pass through columns '\n 'in offshore input data: {}'.format(missing))\n logger.error(msg)\n raise OffshoreWindInputError(msg)\n\n @property\n def time_index(self):\n \"\"\"Get the source time index.\"\"\"\n if self._time_index is None:\n with Outputs(self._gen_fpath, mode='r') as out:\n self._time_index = out.time_index\n\n return self._time_index\n\n @property\n def meta_source_full(self):\n \"\"\"Get the full meta data (onshore + offshore)\"\"\"\n return self._meta_source\n\n @property\n def meta_source_onshore(self):\n \"\"\"Get the onshore only meta data.\"\"\"\n return self._meta_source[self._onshore_mask]\n\n @property\n def meta_source_offshore(self):\n \"\"\"Get the offshore only meta data.\"\"\"\n return self._meta_source[self._offshore_mask]\n\n @property\n def meta_out(self):\n \"\"\"Get the combined onshore and offshore meta data.\"\"\"\n if self._meta_out is None:\n self._meta_out = self.meta_source_full.copy()\n for col in self._offshore_meta_cols:\n if col not in self._meta_out:\n self._meta_out[col] = np.nan\n\n # note that this assumes that offshore data has been reduced\n # to only those rows with gids in meta_out and is sorted by gid\n data = self._offshore_data[col]\n self._meta_out.loc[self._offshore_mask, col] = data.values\n\n return self._meta_out\n\n @property\n def meta_out_onshore(self):\n \"\"\"Get the onshore only meta data.\"\"\"\n return self.meta_out[self._onshore_mask]\n\n @property\n def meta_out_offshore(self):\n \"\"\"Get the output offshore meta data.\"\"\"\n return self.meta_out[self._offshore_mask]\n\n @property\n def onshore_res_gids(self):\n \"\"\"Get a list of resource gids for the onshore sites.\"\"\"\n return self.meta_source_onshore['gid'].values.tolist()\n\n @property\n def offshore_res_gids(self):\n \"\"\"Get a list of resource gids for the offshore sites.\"\"\"\n return self.meta_source_offshore['gid'].values.tolist()\n\n @property\n def outputs(self):\n \"\"\"Get a dict of offshore outputs\"\"\"\n return self._out\n\n def run_nrwal(self):\n \"\"\"Run offshore analysis via the NRWAL analysis library\"\"\"\n from NRWAL import Equation\n\n for i, (cid, nrwal_config) in enumerate(self._nrwal_configs.items()):\n mask = self._offshore_data['config'].values == cid\n logger.info('Running offshore config {} of {}: \"{}\" and applying '\n 'to {} out of {} offshore gids'\n .format(i + 1, len(self._nrwal_configs), cid,\n mask.sum(), len(mask)))\n\n outs = nrwal_config.eval(inputs=self._offshore_data)\n\n # pylint: disable=C0201,C0206\n for name in self._out.keys():\n value = None\n\n if name in outs:\n value = outs[name]\n self._out[name][mask] = value[mask]\n\n elif name in nrwal_config.keys():\n value = nrwal_config[name]\n if isinstance(value, Equation):\n msg = ('Cannot retrieve Equation \"{}\" from NRWAL. '\n 'Must be a number!'.format(name))\n assert not any(value.variables), msg\n value = value.eval()\n if np.issubdtype(type(value), np.number):\n value *= np.ones(len(self._offshore_data))\n if not isinstance(value, np.ndarray):\n msg = ('NRWAL key \"{}\" returned bad type of \"{}\", '\n 'needs to be numeric or an output array.'\n .format(name, type(value)))\n logger.error(msg)\n raise TypeError(msg)\n self._out[name][mask] = value[mask]\n\n elif name not in self._offshore_data:\n msg = ('Could not find \"{}\" in the output dict of NRWAL '\n 'config {}'.format(name, cid))\n logger.warning(msg)\n warn(msg)\n\n logger.debug('NRWAL output \"{}\": {}'.format(name, value))\n\n def check_outputs(self):\n \"\"\"Check the nrwal outputs for nan values and raise errors if found.\"\"\"\n for name, arr in self._out.items():\n if np.isnan(arr).all():\n msg = ('Output array \"{}\" is all NaN! Probably was not '\n 'found in the available NRWAL keys.'.format(name))\n logger.warning(msg)\n warn(msg)\n elif np.isnan(arr).any():\n mask = np.isnan(arr)\n nan_meta = self.meta_out_offshore[mask]\n nan_gids = nan_meta['gid'].values\n msg = ('NaN values ({} out of {}) persist in offshore '\n 'output \"{}\"!'\n .format(np.isnan(arr).sum(), len(arr), name))\n logger.warning(msg)\n logger.warning('This is the offshore meta that is causing NaN '\n 'outputs: {}'.format(nan_meta))\n logger.warning('These are the resource gids causing NaN '\n 'outputs: {}'.format(nan_gids))\n warn(msg)\n\n def write_to_gen_fpath(self):\n \"\"\"Save offshore outputs to input generation fpath file. This will\n overwrite data!\"\"\"\n\n loss_mult = 1 - self._out[self._loss_key]\n loss_mult = np.where(np.isnan(loss_mult), 1, loss_mult)\n\n with Outputs(self._gen_fpath, 'a') as f:\n meta_attrs = f.get_attrs('meta')\n del f._h5['meta']\n f._set_meta('meta', self.meta_out, attrs=meta_attrs)\n\n if 'lcoe_fcr' in f:\n lcoe = f['lcoe_fcr']\n lcoe[self._offshore_mask] = self._out[self._lcoe_key]\n f['lcoe_fcr'] = lcoe\n elif 'lcoe_fcr' not in f and self._run_all:\n f._add_dset('lcoe_fcr', self._out[self._lcoe_key], np.float32,\n attrs={'units': 'dol/MWh', 'scale_factor': 1})\n elif 'lcoe_fcr' not in f and not self._run_all:\n lcoe = np.zeros(len(self.meta_out), dtype=np.float32)\n lcoe[self._offshore_mask] = self._out[self._lcoe_key]\n f._add_dset('lcoe_fcr', lcoe, np.float32,\n attrs={'units': 'dol/MWh', 'scale_factor': 1})\n\n f._add_dset('cf_mean_raw', self._cf_mean, f.dtypes['cf_mean'],\n attrs=f.attrs['cf_mean'])\n self._cf_mean[self._offshore_mask] *= loss_mult\n f['cf_mean'] = self._cf_mean\n\n profiles = None\n if 'cf_profile_raw' in f.dsets:\n profiles = f['cf_profile_raw']\n elif 'cf_profile' in f.dsets:\n profiles = f['cf_profile']\n if profiles is not None:\n f._add_dset('cf_profile_raw', profiles, f.dtypes['cf_profile'],\n attrs=f.attrs['cf_profile'])\n profiles[:, self._offshore_mask] *= loss_mult\n f['cf_profile'] = profiles\n\n for key, arr in self._out.items():\n if key not in (self._lcoe_key, ):\n if key not in f.dsets:\n data = np.full(len(f.meta), np.nan).astype(np.float32)\n else:\n data = f[key]\n\n data[self._offshore_mask] = arr\n f._add_dset(key, data, np.float32,\n attrs={'scale_factor': 1})\n\n @classmethod\n def run(cls, gen_fpath, offshore_fpath, sam_files, nrwal_configs,\n points, offshore_meta_cols=None, offshore_nrwal_keys=None,\n nrwal_lcoe_key='lcoe', nrwal_loss_key='total_losses',\n run_all=False):\n \"\"\"\n Parameters\n ----------\n gen_fpath : str\n Full filepath to reV gen h5 output file.\n offshore_fpath : str\n Full filepath to offshore wind farm data file. Needs \"gid\" and\n \"config\" columns matching the project points input.\n sam_files : dict\n Dictionary lookup of config_id values mapped to config filepaths.\n The same config_id values will be used from the nrwal_configs\n lookup input.\n nrwal_configs : dict\n Dictionary lookup of config_id values mapped to config filepaths.\n The same config_id values will be used from the sam_files lookup\n in project_points\n points : str\n reV project points to analyze. Has to be a string file path to a\n project points csv with \"gid\" and \"config\" columns. The config\n column maps to the sam_files and nrwal_configs inputs.\n offshore_meta_cols : list | tuple | None\n Column labels from offshore_fpath to pass through to the output\n meta data. None (default) will use class variable\n DEFAULT_META_COLS, and any additional cols requested here will be\n added to DEFAULT_META_COLS.\n offshore_nrwal_keys : list | tuple | None\n Equation labels from the NRWAL configs to pass through to the\n output h5 file. None will use class variable DEFAULT_NRWAL_KEYS,\n and any additional cols requested here will be added to\n DEFAULT_NRWAL_KEYS.\n nrwal_lcoe_key : str\n Key in the NRWAL config for final LCOE output value. Can be\n changed and runtime for different NRWAL configs using this kwarg.\n nrwal_loss_key : str\n Key in the NRWAL config for final capacity factor losses output\n value. Can be changed and runtime for different NRWAL configs\n using this kwarg.\n run_all : bool\n Flag to run nrwal econ for all generation sites and ignore the\n offshore flag\n\n Returns\n -------\n offshore : Offshore\n Instantiated Offshore analysis object.\n \"\"\"\n\n points_range = None\n pc = Gen.get_pc(points, points_range, sam_files, 'windpower')\n\n offshore = cls(gen_fpath, offshore_fpath, nrwal_configs,\n pc.project_points,\n offshore_meta_cols=offshore_meta_cols,\n offshore_nrwal_keys=offshore_nrwal_keys,\n nrwal_lcoe_key=nrwal_lcoe_key,\n nrwal_loss_key=nrwal_loss_key,\n run_all=run_all)\n\n if any(offshore.offshore_res_gids):\n offshore.run_nrwal()\n offshore.check_outputs()\n offshore.write_to_gen_fpath()\n\n logger.info('Offshore wind gen/econ module complete!')\n\n return offshore\n" ]
[ [ "numpy.max", "numpy.isnan", "numpy.median", "pandas.DataFrame", "numpy.mean", "pandas.read_csv" ] ]
lrusnac/pandas
[ "a170e977dc8cc270bdcdee904658f9b6e20c8e86" ]
[ "pandas/core/arrays/timedeltas.py" ]
[ "from datetime import timedelta\nfrom typing import List, Optional, Union\n\nimport numpy as np\n\nfrom pandas._libs import lib, tslibs\nfrom pandas._libs.tslibs import (\n BaseOffset,\n NaT,\n NaTType,\n Period,\n Tick,\n Timedelta,\n Timestamp,\n iNaT,\n to_offset,\n)\nfrom pandas._libs.tslibs.conversion import precision_from_unit\nfrom pandas._libs.tslibs.fields import get_timedelta_field\nfrom pandas._libs.tslibs.timedeltas import (\n array_to_timedelta64,\n ints_to_pytimedelta,\n parse_timedelta_unit,\n)\nfrom pandas.compat.numpy import function as nv\n\nfrom pandas.core.dtypes.common import (\n DT64NS_DTYPE,\n TD64NS_DTYPE,\n is_categorical_dtype,\n is_dtype_equal,\n is_float_dtype,\n is_integer_dtype,\n is_object_dtype,\n is_scalar,\n is_string_dtype,\n is_timedelta64_dtype,\n is_timedelta64_ns_dtype,\n pandas_dtype,\n)\nfrom pandas.core.dtypes.dtypes import DatetimeTZDtype\nfrom pandas.core.dtypes.generic import ABCSeries, ABCTimedeltaIndex\nfrom pandas.core.dtypes.missing import isna\n\nfrom pandas.core import nanops\nfrom pandas.core.algorithms import checked_add_with_arr\nfrom pandas.core.arrays import IntegerArray, datetimelike as dtl\nfrom pandas.core.arrays._ranges import generate_regular_range\nimport pandas.core.common as com\nfrom pandas.core.construction import extract_array\nfrom pandas.core.ops.common import unpack_zerodim_and_defer\n\n\ndef _field_accessor(name: str, alias: str, docstring: str):\n def f(self) -> np.ndarray:\n values = self.asi8\n result = get_timedelta_field(values, alias)\n if self._hasnans:\n result = self._maybe_mask_results(\n result, fill_value=None, convert=\"float64\"\n )\n\n return result\n\n f.__name__ = name\n f.__doc__ = f\"\\n{docstring}\\n\"\n return property(f)\n\n\nclass TimedeltaArray(dtl.TimelikeOps):\n \"\"\"\n Pandas ExtensionArray for timedelta data.\n\n .. versionadded:: 0.24.0\n\n .. warning::\n\n TimedeltaArray is currently experimental, and its API may change\n without warning. In particular, :attr:`TimedeltaArray.dtype` is\n expected to change to be an instance of an ``ExtensionDtype``\n subclass.\n\n Parameters\n ----------\n values : array-like\n The timedelta data.\n\n dtype : numpy.dtype\n Currently, only ``numpy.dtype(\"timedelta64[ns]\")`` is accepted.\n freq : Offset, optional\n copy : bool, default False\n Whether to copy the underlying array of data.\n\n Attributes\n ----------\n None\n\n Methods\n -------\n None\n \"\"\"\n\n _typ = \"timedeltaarray\"\n _scalar_type = Timedelta\n _recognized_scalars = (timedelta, np.timedelta64, Tick)\n _is_recognized_dtype = is_timedelta64_dtype\n\n __array_priority__ = 1000\n # define my properties & methods for delegation\n _other_ops: List[str] = []\n _bool_ops: List[str] = []\n _object_ops = [\"freq\"]\n _field_ops = [\"days\", \"seconds\", \"microseconds\", \"nanoseconds\"]\n _datetimelike_ops = _field_ops + _object_ops + _bool_ops\n _datetimelike_methods = [\n \"to_pytimedelta\",\n \"total_seconds\",\n \"round\",\n \"floor\",\n \"ceil\",\n ]\n\n # Note: ndim must be defined to ensure NaT.__richcmp__(TimedeltaArray)\n # operates pointwise.\n\n def _box_func(self, x) -> Union[Timedelta, NaTType]:\n return Timedelta(x, unit=\"ns\")\n\n @property\n def dtype(self) -> np.dtype:\n \"\"\"\n The dtype for the TimedeltaArray.\n\n .. warning::\n\n A future version of pandas will change dtype to be an instance\n of a :class:`pandas.api.extensions.ExtensionDtype` subclass,\n not a ``numpy.dtype``.\n\n Returns\n -------\n numpy.dtype\n \"\"\"\n return TD64NS_DTYPE\n\n # ----------------------------------------------------------------\n # Constructors\n\n def __init__(self, values, dtype=TD64NS_DTYPE, freq=lib.no_default, copy=False):\n values = extract_array(values)\n\n inferred_freq = getattr(values, \"_freq\", None)\n explicit_none = freq is None\n freq = freq if freq is not lib.no_default else None\n\n if isinstance(values, type(self)):\n if explicit_none:\n # dont inherit from values\n pass\n elif freq is None:\n freq = values.freq\n elif freq and values.freq:\n freq = to_offset(freq)\n freq, _ = dtl.validate_inferred_freq(freq, values.freq, False)\n values = values._data\n\n if not isinstance(values, np.ndarray):\n msg = (\n f\"Unexpected type '{type(values).__name__}'. 'values' must be a \"\n \"TimedeltaArray ndarray, or Series or Index containing one of those.\"\n )\n raise ValueError(msg)\n if values.ndim not in [1, 2]:\n raise ValueError(\"Only 1-dimensional input arrays are supported.\")\n\n if values.dtype == \"i8\":\n # for compat with datetime/timedelta/period shared methods,\n # we can sometimes get here with int64 values. These represent\n # nanosecond UTC (or tz-naive) unix timestamps\n values = values.view(TD64NS_DTYPE)\n\n _validate_td64_dtype(values.dtype)\n dtype = _validate_td64_dtype(dtype)\n\n if freq == \"infer\":\n msg = (\n \"Frequency inference not allowed in TimedeltaArray.__init__. \"\n \"Use 'pd.array()' instead.\"\n )\n raise ValueError(msg)\n\n if copy:\n values = values.copy()\n if freq:\n freq = to_offset(freq)\n\n self._data = values\n self._dtype = dtype\n self._freq = freq\n\n if inferred_freq is None and freq is not None:\n type(self)._validate_frequency(self, freq)\n\n @classmethod\n def _simple_new(\n cls, values, freq: Optional[BaseOffset] = None, dtype=TD64NS_DTYPE\n ) -> \"TimedeltaArray\":\n assert dtype == TD64NS_DTYPE, dtype\n assert isinstance(values, np.ndarray), type(values)\n if values.dtype != TD64NS_DTYPE:\n assert values.dtype == \"i8\"\n values = values.view(TD64NS_DTYPE)\n\n result = object.__new__(cls)\n result._data = values\n result._freq = to_offset(freq)\n result._dtype = TD64NS_DTYPE\n return result\n\n @classmethod\n def _from_sequence(\n cls, data, *, dtype=TD64NS_DTYPE, copy: bool = False\n ) -> \"TimedeltaArray\":\n if dtype:\n _validate_td64_dtype(dtype)\n\n data, inferred_freq = sequence_to_td64ns(data, copy=copy, unit=None)\n freq, _ = dtl.validate_inferred_freq(None, inferred_freq, False)\n\n return cls._simple_new(data, freq=freq)\n\n @classmethod\n def _from_sequence_not_strict(\n cls,\n data,\n dtype=TD64NS_DTYPE,\n copy: bool = False,\n freq=lib.no_default,\n unit=None,\n ) -> \"TimedeltaArray\":\n if dtype:\n _validate_td64_dtype(dtype)\n\n explicit_none = freq is None\n freq = freq if freq is not lib.no_default else None\n\n freq, freq_infer = dtl.maybe_infer_freq(freq)\n\n data, inferred_freq = sequence_to_td64ns(data, copy=copy, unit=unit)\n freq, freq_infer = dtl.validate_inferred_freq(freq, inferred_freq, freq_infer)\n if explicit_none:\n freq = None\n\n result = cls._simple_new(data, freq=freq)\n\n if inferred_freq is None and freq is not None:\n # this condition precludes `freq_infer`\n cls._validate_frequency(result, freq)\n\n elif freq_infer:\n # Set _freq directly to bypass duplicative _validate_frequency\n # check.\n result._freq = to_offset(result.inferred_freq)\n\n return result\n\n @classmethod\n def _generate_range(cls, start, end, periods, freq, closed=None):\n\n periods = dtl.validate_periods(periods)\n if freq is None and any(x is None for x in [periods, start, end]):\n raise ValueError(\"Must provide freq argument if no data is supplied\")\n\n if com.count_not_none(start, end, periods, freq) != 3:\n raise ValueError(\n \"Of the four parameters: start, end, periods, \"\n \"and freq, exactly three must be specified\"\n )\n\n if start is not None:\n start = Timedelta(start)\n\n if end is not None:\n end = Timedelta(end)\n\n left_closed, right_closed = dtl.validate_endpoints(closed)\n\n if freq is not None:\n index = generate_regular_range(start, end, periods, freq)\n else:\n index = np.linspace(start.value, end.value, periods).astype(\"i8\")\n\n if not left_closed:\n index = index[1:]\n if not right_closed:\n index = index[:-1]\n\n return cls._simple_new(index, freq=freq)\n\n # ----------------------------------------------------------------\n # DatetimeLike Interface\n\n def _unbox_scalar(self, value, setitem: bool = False) -> np.timedelta64:\n if not isinstance(value, self._scalar_type) and value is not NaT:\n raise ValueError(\"'value' should be a Timedelta.\")\n self._check_compatible_with(value, setitem=setitem)\n return np.timedelta64(value.value, \"ns\")\n\n def _scalar_from_string(self, value):\n return Timedelta(value)\n\n def _check_compatible_with(self, other, setitem: bool = False):\n # we don't have anything to validate.\n pass\n\n # ----------------------------------------------------------------\n # Array-Like / EA-Interface Methods\n\n def astype(self, dtype, copy: bool = True):\n # We handle\n # --> timedelta64[ns]\n # --> timedelta64\n # DatetimeLikeArrayMixin super call handles other cases\n dtype = pandas_dtype(dtype)\n\n if is_timedelta64_dtype(dtype) and not is_timedelta64_ns_dtype(dtype):\n # by pandas convention, converting to non-nano timedelta64\n # returns an int64-dtyped array with ints representing multiples\n # of the desired timedelta unit. This is essentially division\n if self._hasnans:\n # avoid double-copying\n result = self._data.astype(dtype, copy=False)\n return self._maybe_mask_results(\n result, fill_value=None, convert=\"float64\"\n )\n result = self._data.astype(dtype, copy=copy)\n return result.astype(\"i8\")\n elif is_timedelta64_ns_dtype(dtype):\n if copy:\n return self.copy()\n return self\n return dtl.DatetimeLikeArrayMixin.astype(self, dtype, copy=copy)\n\n def __iter__(self):\n if self.ndim > 1:\n for i in range(len(self)):\n yield self[i]\n else:\n # convert in chunks of 10k for efficiency\n data = self.asi8\n length = len(self)\n chunksize = 10000\n chunks = int(length / chunksize) + 1\n for i in range(chunks):\n start_i = i * chunksize\n end_i = min((i + 1) * chunksize, length)\n converted = ints_to_pytimedelta(data[start_i:end_i], box=True)\n yield from converted\n\n # ----------------------------------------------------------------\n # Reductions\n\n def sum(\n self,\n *,\n axis=None,\n dtype=None,\n out=None,\n keepdims: bool = False,\n initial=None,\n skipna: bool = True,\n min_count: int = 0,\n ):\n nv.validate_sum(\n (), dict(dtype=dtype, out=out, keepdims=keepdims, initial=initial)\n )\n\n result = nanops.nansum(\n self._ndarray, axis=axis, skipna=skipna, min_count=min_count\n )\n return self._wrap_reduction_result(axis, result)\n\n def std(\n self,\n axis=None,\n dtype=None,\n out=None,\n ddof: int = 1,\n keepdims: bool = False,\n skipna: bool = True,\n ):\n nv.validate_stat_ddof_func(\n (), dict(dtype=dtype, out=out, keepdims=keepdims), fname=\"std\"\n )\n\n result = nanops.nanstd(self._ndarray, axis=axis, skipna=skipna, ddof=ddof)\n if axis is None or self.ndim == 1:\n return self._box_func(result)\n return self._from_backing_data(result)\n\n # ----------------------------------------------------------------\n # Rendering Methods\n\n def _formatter(self, boxed=False):\n from pandas.io.formats.format import get_format_timedelta64\n\n return get_format_timedelta64(self, box=True)\n\n def _format_native_types(self, na_rep=\"NaT\", date_format=None, **kwargs):\n from pandas.io.formats.format import get_format_timedelta64\n\n formatter = get_format_timedelta64(self._data, na_rep)\n return np.array([formatter(x) for x in self._data.ravel()]).reshape(self.shape)\n\n # ----------------------------------------------------------------\n # Arithmetic Methods\n\n def _add_offset(self, other):\n assert not isinstance(other, Tick)\n raise TypeError(\n f\"cannot add the type {type(other).__name__} to a {type(self).__name__}\"\n )\n\n def _add_period(self, other: Period):\n \"\"\"\n Add a Period object.\n \"\"\"\n # We will wrap in a PeriodArray and defer to the reversed operation\n from .period import PeriodArray\n\n i8vals = np.broadcast_to(other.ordinal, self.shape)\n oth = PeriodArray(i8vals, freq=other.freq)\n return oth + self\n\n def _add_datetime_arraylike(self, other):\n \"\"\"\n Add DatetimeArray/Index or ndarray[datetime64] to TimedeltaArray.\n \"\"\"\n if isinstance(other, np.ndarray):\n # At this point we have already checked that dtype is datetime64\n from pandas.core.arrays import DatetimeArray\n\n other = DatetimeArray(other)\n\n # defer to implementation in DatetimeArray\n return other + self\n\n def _add_datetimelike_scalar(self, other):\n # adding a timedeltaindex to a datetimelike\n from pandas.core.arrays import DatetimeArray\n\n assert other is not NaT\n other = Timestamp(other)\n if other is NaT:\n # In this case we specifically interpret NaT as a datetime, not\n # the timedelta interpretation we would get by returning self + NaT\n result = self.asi8.view(\"m8[ms]\") + NaT.to_datetime64()\n return DatetimeArray(result)\n\n i8 = self.asi8\n result = checked_add_with_arr(i8, other.value, arr_mask=self._isnan)\n result = self._maybe_mask_results(result)\n dtype = DatetimeTZDtype(tz=other.tz) if other.tz else DT64NS_DTYPE\n return DatetimeArray(result, dtype=dtype, freq=self.freq)\n\n def _addsub_object_array(self, other, op):\n # Add or subtract Array-like of objects\n try:\n # TimedeltaIndex can only operate with a subset of DateOffset\n # subclasses. Incompatible classes will raise AttributeError,\n # which we re-raise as TypeError\n return super()._addsub_object_array(other, op)\n except AttributeError as err:\n raise TypeError(\n f\"Cannot add/subtract non-tick DateOffset to {type(self).__name__}\"\n ) from err\n\n @unpack_zerodim_and_defer(\"__mul__\")\n def __mul__(self, other) -> \"TimedeltaArray\":\n if is_scalar(other):\n # numpy will accept float and int, raise TypeError for others\n result = self._data * other\n freq = None\n if self.freq is not None and not isna(other):\n freq = self.freq * other\n return type(self)(result, freq=freq)\n\n if not hasattr(other, \"dtype\"):\n # list, tuple\n other = np.array(other)\n if len(other) != len(self) and not is_timedelta64_dtype(other.dtype):\n # Exclude timedelta64 here so we correctly raise TypeError\n # for that instead of ValueError\n raise ValueError(\"Cannot multiply with unequal lengths\")\n\n if is_object_dtype(other.dtype):\n # this multiplication will succeed only if all elements of other\n # are int or float scalars, so we will end up with\n # timedelta64[ns]-dtyped result\n result = [self[n] * other[n] for n in range(len(self))]\n result = np.array(result)\n return type(self)(result)\n\n # numpy will accept float or int dtype, raise TypeError for others\n result = self._data * other\n return type(self)(result)\n\n __rmul__ = __mul__\n\n @unpack_zerodim_and_defer(\"__truediv__\")\n def __truediv__(self, other):\n # timedelta / X is well-defined for timedelta-like or numeric X\n\n if isinstance(other, self._recognized_scalars):\n other = Timedelta(other)\n if other is NaT:\n # specifically timedelta64-NaT\n result = np.empty(self.shape, dtype=np.float64)\n result.fill(np.nan)\n return result\n\n # otherwise, dispatch to Timedelta implementation\n return self._data / other\n\n elif lib.is_scalar(other):\n # assume it is numeric\n result = self._data / other\n freq = None\n if self.freq is not None:\n # Tick division is not implemented, so operate on Timedelta\n freq = self.freq.delta / other\n return type(self)(result, freq=freq)\n\n if not hasattr(other, \"dtype\"):\n # e.g. list, tuple\n other = np.array(other)\n\n if len(other) != len(self):\n raise ValueError(\"Cannot divide vectors with unequal lengths\")\n\n elif is_timedelta64_dtype(other.dtype):\n # let numpy handle it\n return self._data / other\n\n elif is_object_dtype(other.dtype):\n # We operate on raveled arrays to avoid problems in inference\n # on NaT\n srav = self.ravel()\n orav = other.ravel()\n result = [srav[n] / orav[n] for n in range(len(srav))]\n result = np.array(result).reshape(self.shape)\n\n # We need to do dtype inference in order to keep DataFrame ops\n # behavior consistent with Series behavior\n inferred = lib.infer_dtype(result)\n if inferred == \"timedelta\":\n flat = result.ravel()\n result = type(self)._from_sequence(flat).reshape(result.shape)\n elif inferred == \"floating\":\n result = result.astype(float)\n\n return result\n\n else:\n result = self._data / other\n return type(self)(result)\n\n @unpack_zerodim_and_defer(\"__rtruediv__\")\n def __rtruediv__(self, other):\n # X / timedelta is defined only for timedelta-like X\n if isinstance(other, self._recognized_scalars):\n other = Timedelta(other)\n if other is NaT:\n # specifically timedelta64-NaT\n result = np.empty(self.shape, dtype=np.float64)\n result.fill(np.nan)\n return result\n\n # otherwise, dispatch to Timedelta implementation\n return other / self._data\n\n elif lib.is_scalar(other):\n raise TypeError(\n f\"Cannot divide {type(other).__name__} by {type(self).__name__}\"\n )\n\n if not hasattr(other, \"dtype\"):\n # e.g. list, tuple\n other = np.array(other)\n\n if len(other) != len(self):\n raise ValueError(\"Cannot divide vectors with unequal lengths\")\n\n elif is_timedelta64_dtype(other.dtype):\n # let numpy handle it\n return other / self._data\n\n elif is_object_dtype(other.dtype):\n # Note: unlike in __truediv__, we do not _need_ to do type\n # inference on the result. It does not raise, a numeric array\n # is returned. GH#23829\n result = [other[n] / self[n] for n in range(len(self))]\n return np.array(result)\n\n else:\n raise TypeError(\n f\"Cannot divide {other.dtype} data by {type(self).__name__}\"\n )\n\n @unpack_zerodim_and_defer(\"__floordiv__\")\n def __floordiv__(self, other):\n\n if is_scalar(other):\n if isinstance(other, self._recognized_scalars):\n other = Timedelta(other)\n if other is NaT:\n # treat this specifically as timedelta-NaT\n result = np.empty(self.shape, dtype=np.float64)\n result.fill(np.nan)\n return result\n\n # dispatch to Timedelta implementation\n result = other.__rfloordiv__(self._data)\n return result\n\n # at this point we should only have numeric scalars; anything\n # else will raise\n result = self.asi8 // other\n result[self._isnan] = iNaT\n freq = None\n if self.freq is not None:\n # Note: freq gets division, not floor-division\n freq = self.freq / other\n if freq.nanos == 0 and self.freq.nanos != 0:\n # e.g. if self.freq is Nano(1) then dividing by 2\n # rounds down to zero\n freq = None\n return type(self)(result.view(\"m8[ns]\"), freq=freq)\n\n if not hasattr(other, \"dtype\"):\n # list, tuple\n other = np.array(other)\n if len(other) != len(self):\n raise ValueError(\"Cannot divide with unequal lengths\")\n\n elif is_timedelta64_dtype(other.dtype):\n other = type(self)(other)\n\n # numpy timedelta64 does not natively support floordiv, so operate\n # on the i8 values\n result = self.asi8 // other.asi8\n mask = self._isnan | other._isnan\n if mask.any():\n result = result.astype(np.float64)\n result[mask] = np.nan\n return result\n\n elif is_object_dtype(other.dtype):\n result = [self[n] // other[n] for n in range(len(self))]\n result = np.array(result)\n if lib.infer_dtype(result, skipna=False) == \"timedelta\":\n result, _ = sequence_to_td64ns(result)\n return type(self)(result)\n return result\n\n elif is_integer_dtype(other.dtype) or is_float_dtype(other.dtype):\n result = self._data // other\n return type(self)(result)\n\n else:\n dtype = getattr(other, \"dtype\", type(other).__name__)\n raise TypeError(f\"Cannot divide {dtype} by {type(self).__name__}\")\n\n @unpack_zerodim_and_defer(\"__rfloordiv__\")\n def __rfloordiv__(self, other):\n\n if is_scalar(other):\n if isinstance(other, self._recognized_scalars):\n other = Timedelta(other)\n if other is NaT:\n # treat this specifically as timedelta-NaT\n result = np.empty(self.shape, dtype=np.float64)\n result.fill(np.nan)\n return result\n\n # dispatch to Timedelta implementation\n result = other.__floordiv__(self._data)\n return result\n\n raise TypeError(\n f\"Cannot divide {type(other).__name__} by {type(self).__name__}\"\n )\n\n if not hasattr(other, \"dtype\"):\n # list, tuple\n other = np.array(other)\n\n if len(other) != len(self):\n raise ValueError(\"Cannot divide with unequal lengths\")\n\n elif is_timedelta64_dtype(other.dtype):\n other = type(self)(other)\n # numpy timedelta64 does not natively support floordiv, so operate\n # on the i8 values\n result = other.asi8 // self.asi8\n mask = self._isnan | other._isnan\n if mask.any():\n result = result.astype(np.float64)\n result[mask] = np.nan\n return result\n\n elif is_object_dtype(other.dtype):\n result = [other[n] // self[n] for n in range(len(self))]\n result = np.array(result)\n return result\n\n else:\n dtype = getattr(other, \"dtype\", type(other).__name__)\n raise TypeError(f\"Cannot divide {dtype} by {type(self).__name__}\")\n\n @unpack_zerodim_and_defer(\"__mod__\")\n def __mod__(self, other):\n # Note: This is a naive implementation, can likely be optimized\n if isinstance(other, self._recognized_scalars):\n other = Timedelta(other)\n return self - (self // other) * other\n\n @unpack_zerodim_and_defer(\"__rmod__\")\n def __rmod__(self, other):\n # Note: This is a naive implementation, can likely be optimized\n if isinstance(other, self._recognized_scalars):\n other = Timedelta(other)\n return other - (other // self) * self\n\n @unpack_zerodim_and_defer(\"__divmod__\")\n def __divmod__(self, other):\n # Note: This is a naive implementation, can likely be optimized\n if isinstance(other, self._recognized_scalars):\n other = Timedelta(other)\n\n res1 = self // other\n res2 = self - res1 * other\n return res1, res2\n\n @unpack_zerodim_and_defer(\"__rdivmod__\")\n def __rdivmod__(self, other):\n # Note: This is a naive implementation, can likely be optimized\n if isinstance(other, self._recognized_scalars):\n other = Timedelta(other)\n\n res1 = other // self\n res2 = other - res1 * self\n return res1, res2\n\n def __neg__(self) -> \"TimedeltaArray\":\n if self.freq is not None:\n return type(self)(-self._data, freq=-self.freq)\n return type(self)(-self._data)\n\n def __pos__(self) -> \"TimedeltaArray\":\n return type(self)(self._data, freq=self.freq)\n\n def __abs__(self) -> \"TimedeltaArray\":\n # Note: freq is not preserved\n return type(self)(np.abs(self._data))\n\n # ----------------------------------------------------------------\n # Conversion Methods - Vectorized analogues of Timedelta methods\n\n def total_seconds(self) -> np.ndarray:\n \"\"\"\n Return total duration of each element expressed in seconds.\n\n This method is available directly on TimedeltaArray, TimedeltaIndex\n and on Series containing timedelta values under the ``.dt`` namespace.\n\n Returns\n -------\n seconds : [ndarray, Float64Index, Series]\n When the calling object is a TimedeltaArray, the return type\n is ndarray. When the calling object is a TimedeltaIndex,\n the return type is a Float64Index. When the calling object\n is a Series, the return type is Series of type `float64` whose\n index is the same as the original.\n\n See Also\n --------\n datetime.timedelta.total_seconds : Standard library version\n of this method.\n TimedeltaIndex.components : Return a DataFrame with components of\n each Timedelta.\n\n Examples\n --------\n **Series**\n\n >>> s = pd.Series(pd.to_timedelta(np.arange(5), unit='d'))\n >>> s\n 0 0 days\n 1 1 days\n 2 2 days\n 3 3 days\n 4 4 days\n dtype: timedelta64[ns]\n\n >>> s.dt.total_seconds()\n 0 0.0\n 1 86400.0\n 2 172800.0\n 3 259200.0\n 4 345600.0\n dtype: float64\n\n **TimedeltaIndex**\n\n >>> idx = pd.to_timedelta(np.arange(5), unit='d')\n >>> idx\n TimedeltaIndex(['0 days', '1 days', '2 days', '3 days', '4 days'],\n dtype='timedelta64[ns]', freq=None)\n\n >>> idx.total_seconds()\n Float64Index([0.0, 86400.0, 172800.0, 259200.00000000003, 345600.0],\n dtype='float64')\n \"\"\"\n return self._maybe_mask_results(1e-9 * self.asi8, fill_value=None)\n\n def to_pytimedelta(self) -> np.ndarray:\n \"\"\"\n Return Timedelta Array/Index as object ndarray of datetime.timedelta\n objects.\n\n Returns\n -------\n datetimes : ndarray\n \"\"\"\n return tslibs.ints_to_pytimedelta(self.asi8)\n\n days = _field_accessor(\"days\", \"days\", \"Number of days for each element.\")\n seconds = _field_accessor(\n \"seconds\",\n \"seconds\",\n \"Number of seconds (>= 0 and less than 1 day) for each element.\",\n )\n microseconds = _field_accessor(\n \"microseconds\",\n \"microseconds\",\n \"Number of microseconds (>= 0 and less than 1 second) for each element.\",\n )\n nanoseconds = _field_accessor(\n \"nanoseconds\",\n \"nanoseconds\",\n \"Number of nanoseconds (>= 0 and less than 1 microsecond) for each element.\",\n )\n\n @property\n def components(self):\n \"\"\"\n Return a dataframe of the components (days, hours, minutes,\n seconds, milliseconds, microseconds, nanoseconds) of the Timedeltas.\n\n Returns\n -------\n a DataFrame\n \"\"\"\n from pandas import DataFrame\n\n columns = [\n \"days\",\n \"hours\",\n \"minutes\",\n \"seconds\",\n \"milliseconds\",\n \"microseconds\",\n \"nanoseconds\",\n ]\n hasnans = self._hasnans\n if hasnans:\n\n def f(x):\n if isna(x):\n return [np.nan] * len(columns)\n return x.components\n\n else:\n\n def f(x):\n return x.components\n\n result = DataFrame([f(x) for x in self], columns=columns)\n if not hasnans:\n result = result.astype(\"int64\")\n return result\n\n\n# ---------------------------------------------------------------------\n# Constructor Helpers\n\n\ndef sequence_to_td64ns(data, copy=False, unit=None, errors=\"raise\"):\n \"\"\"\n Parameters\n ----------\n data : list-like\n copy : bool, default False\n unit : str, optional\n The timedelta unit to treat integers as multiples of. For numeric\n data this defaults to ``'ns'``.\n Must be un-specified if the data contains a str and ``errors==\"raise\"``.\n errors : {\"raise\", \"coerce\", \"ignore\"}, default \"raise\"\n How to handle elements that cannot be converted to timedelta64[ns].\n See ``pandas.to_timedelta`` for details.\n\n Returns\n -------\n converted : numpy.ndarray\n The sequence converted to a numpy array with dtype ``timedelta64[ns]``.\n inferred_freq : Tick or None\n The inferred frequency of the sequence.\n\n Raises\n ------\n ValueError : Data cannot be converted to timedelta64[ns].\n\n Notes\n -----\n Unlike `pandas.to_timedelta`, if setting ``errors=ignore`` will not cause\n errors to be ignored; they are caught and subsequently ignored at a\n higher level.\n \"\"\"\n inferred_freq = None\n if unit is not None:\n unit = parse_timedelta_unit(unit)\n\n # Unwrap whatever we have into a np.ndarray\n if not hasattr(data, \"dtype\"):\n # e.g. list, tuple\n if np.ndim(data) == 0:\n # i.e. generator\n data = list(data)\n data = np.array(data, copy=False)\n elif isinstance(data, ABCSeries):\n data = data._values\n elif isinstance(data, (ABCTimedeltaIndex, TimedeltaArray)):\n inferred_freq = data.freq\n data = data._data\n elif isinstance(data, IntegerArray):\n data = data.to_numpy(\"int64\", na_value=tslibs.iNaT)\n elif is_categorical_dtype(data.dtype):\n data = data.categories.take(data.codes, fill_value=NaT)._values\n copy = False\n\n # Convert whatever we have into timedelta64[ns] dtype\n if is_object_dtype(data.dtype) or is_string_dtype(data.dtype):\n # no need to make a copy, need to convert if string-dtyped\n data = objects_to_td64ns(data, unit=unit, errors=errors)\n copy = False\n\n elif is_integer_dtype(data.dtype):\n # treat as multiples of the given unit\n data, copy_made = ints_to_td64ns(data, unit=unit)\n copy = copy and not copy_made\n\n elif is_float_dtype(data.dtype):\n # cast the unit, multiply base/frac separately\n # to avoid precision issues from float -> int\n mask = np.isnan(data)\n m, p = precision_from_unit(unit or \"ns\")\n base = data.astype(np.int64)\n frac = data - base\n if p:\n frac = np.round(frac, p)\n data = (base * m + (frac * m).astype(np.int64)).view(\"timedelta64[ns]\")\n data[mask] = iNaT\n copy = False\n\n elif is_timedelta64_dtype(data.dtype):\n if data.dtype != TD64NS_DTYPE:\n # non-nano unit\n # TODO: watch out for overflows\n data = data.astype(TD64NS_DTYPE)\n copy = False\n\n else:\n # This includes datetime64-dtype, see GH#23539, GH#29794\n raise TypeError(f\"dtype {data.dtype} cannot be converted to timedelta64[ns]\")\n\n data = np.array(data, copy=copy)\n\n assert data.dtype == \"m8[ns]\", data\n return data, inferred_freq\n\n\ndef ints_to_td64ns(data, unit=\"ns\"):\n \"\"\"\n Convert an ndarray with integer-dtype to timedelta64[ns] dtype, treating\n the integers as multiples of the given timedelta unit.\n\n Parameters\n ----------\n data : numpy.ndarray with integer-dtype\n unit : str, default \"ns\"\n The timedelta unit to treat integers as multiples of.\n\n Returns\n -------\n numpy.ndarray : timedelta64[ns] array converted from data\n bool : whether a copy was made\n \"\"\"\n copy_made = False\n unit = unit if unit is not None else \"ns\"\n\n if data.dtype != np.int64:\n # converting to int64 makes a copy, so we can avoid\n # re-copying later\n data = data.astype(np.int64)\n copy_made = True\n\n if unit != \"ns\":\n dtype_str = f\"timedelta64[{unit}]\"\n data = data.view(dtype_str)\n\n # TODO: watch out for overflows when converting from lower-resolution\n data = data.astype(\"timedelta64[ns]\")\n # the astype conversion makes a copy, so we can avoid re-copying later\n copy_made = True\n\n else:\n data = data.view(\"timedelta64[ns]\")\n\n return data, copy_made\n\n\ndef objects_to_td64ns(data, unit=None, errors=\"raise\"):\n \"\"\"\n Convert a object-dtyped or string-dtyped array into an\n timedelta64[ns]-dtyped array.\n\n Parameters\n ----------\n data : ndarray or Index\n unit : str, default \"ns\"\n The timedelta unit to treat integers as multiples of.\n Must not be specified if the data contains a str.\n errors : {\"raise\", \"coerce\", \"ignore\"}, default \"raise\"\n How to handle elements that cannot be converted to timedelta64[ns].\n See ``pandas.to_timedelta`` for details.\n\n Returns\n -------\n numpy.ndarray : timedelta64[ns] array converted from data\n\n Raises\n ------\n ValueError : Data cannot be converted to timedelta64[ns].\n\n Notes\n -----\n Unlike `pandas.to_timedelta`, if setting `errors=ignore` will not cause\n errors to be ignored; they are caught and subsequently ignored at a\n higher level.\n \"\"\"\n # coerce Index to np.ndarray, converting string-dtype if necessary\n values = np.array(data, dtype=np.object_, copy=False)\n\n result = array_to_timedelta64(values, unit=unit, errors=errors)\n return result.view(\"timedelta64[ns]\")\n\n\ndef _validate_td64_dtype(dtype):\n dtype = pandas_dtype(dtype)\n if is_dtype_equal(dtype, np.dtype(\"timedelta64\")):\n # no precision disallowed GH#24806\n msg = (\n \"Passing in 'timedelta' dtype with no precision is not allowed. \"\n \"Please pass in 'timedelta64[ns]' instead.\"\n )\n raise ValueError(msg)\n\n if not is_dtype_equal(dtype, TD64NS_DTYPE):\n raise ValueError(f\"dtype {dtype} cannot be converted to timedelta64[ns]\")\n\n return dtype\n" ]
[ [ "pandas.core.nanops.nansum", "pandas.core.dtypes.common.is_string_dtype", "pandas._libs.tslibs.NaT.to_datetime64", "pandas.core.ops.common.unpack_zerodim_and_defer", "pandas.core.arrays.datetimelike.validate_inferred_freq", "pandas._libs.tslibs.Timedelta", "pandas.core.arrays.datetimelike.maybe_infer_freq", "pandas.core.construction.extract_array", "pandas.core.dtypes.common.is_float_dtype", "pandas._libs.lib.is_scalar", "numpy.broadcast_to", "numpy.dtype", "pandas.core.dtypes.missing.isna", "pandas._libs.lib.infer_dtype", "numpy.empty", "pandas.core.arrays._ranges.generate_regular_range", "pandas.core.dtypes.common.pandas_dtype", "numpy.ndim", "pandas.core.dtypes.common.is_object_dtype", "pandas.core.dtypes.common.is_integer_dtype", "pandas._libs.tslibs.conversion.precision_from_unit", "pandas.core.dtypes.common.is_dtype_equal", "pandas._libs.tslibs.timedeltas.parse_timedelta_unit", "numpy.array", "pandas.core.dtypes.common.is_categorical_dtype", "pandas._libs.tslibs.to_offset", "pandas.core.dtypes.common.is_scalar", "pandas.core.arrays.datetimelike.validate_periods", "pandas.core.arrays.datetimelike.validate_endpoints", "numpy.round", "pandas._libs.tslibs.timedeltas.array_to_timedelta64", "pandas.core.algorithms.checked_add_with_arr", "pandas.core.common.count_not_none", "pandas._libs.tslibs.fields.get_timedelta_field", "pandas._libs.tslibs.timedeltas.ints_to_pytimedelta", "numpy.timedelta64", "pandas.core.dtypes.dtypes.DatetimeTZDtype", "pandas.core.dtypes.common.is_timedelta64_ns_dtype", "numpy.isnan", "pandas.core.arrays.datetimelike.DatetimeLikeArrayMixin.astype", "pandas.core.arrays.DatetimeArray", "pandas.io.formats.format.get_format_timedelta64", "pandas._libs.tslibs.ints_to_pytimedelta", "numpy.abs", "pandas.core.dtypes.common.is_timedelta64_dtype", "numpy.linspace", "pandas.core.nanops.nanstd", "pandas._libs.tslibs.Timestamp" ] ]
Nikoleta-v3/APIs
[ "1493c9ad1c7d9135aae57ac069674fa97f6a26fc" ]
[ "src/arcas/tools.py" ]
[ "import hashlib\nimport itertools\nfrom xml.etree import ElementTree\n\nimport pandas as pd\nimport requests\n\nimport ratelimit\n\n\nclass APIError(Exception):\n \"\"\"An API Error Exception.\"\"\"\n\n def __init__(self, status):\n self.status = status\n\n def __str__(self):\n return \"APIError: status={}\".format(self.status)\n\n\nclass Api():\n\n def __init__(self, standard):\n \"\"\"Initializations\"\"\"\n self.standard = standard\n\n def create_url_search(self, parameters):\n \"\"\"Creates the search url, combining the standard url and various\n search parameters.\"\"\"\n url = self.standard\n url += parameters[0]\n for i in parameters[1:]:\n url += '&{}'.format(i)\n return url\n\n @staticmethod\n def keys():\n \"\"\"\n Fields we are keeping from arXiv results.\n \"\"\"\n keys = ['url', 'key', 'unique_key', 'title', 'author', 'abstract', 'doi',\n 'date', 'journal', 'provenance', 'category', 'score', 'open_access']\n return keys\n\n @staticmethod\n @ratelimit.rate_limited(3)\n def make_request(url):\n \"\"\"Request from an API and returns response.\"\"\"\n response = requests.get(url, stream=True)\n if response.status_code != 200:\n raise APIError(response.status_code)\n return response\n\n @staticmethod\n def xml_to_dict(record):\n \"\"\"Xml response with information on article to dictionary\"\"\"\n d = {}\n for at in record.iter():\n key = at.tag.split('}')[-1]\n if key in d and at.text is not None:\n d[key] += ', {}'.format(at.text)\n else:\n d.update({key: at.text})\n return d\n\n @staticmethod\n def to_dataframe(raw_article):\n pass\n\n @staticmethod\n def parse(root):\n pass\n\n @staticmethod\n def parameters_fix(author=None, title=None, abstract=None, year=None,\n records=None, start=None, category=None, journal=None):\n pass\n\n @staticmethod\n def get_root(response):\n root = ElementTree.parse(response.raw).getroot()\n return root\n\n @staticmethod\n def lower_case(post):\n post = dict((k.lower() if isinstance(k, str) else k,\n v.lower() if isinstance(v, str) else v) for k, v in\n post.items())\n return post\n\n @staticmethod\n def create_keys(raw_article):\n \"\"\"\n Returns public key 'AuthorYear' and\n unique key hash('Author''Title''Year''Abstract')\n \"\"\"\n try:\n full_name = raw_article['author'][0].split(' ')\n except (TypeError, IndexError) as e:\n full_name = [None]\n year = raw_article['date']\n string = '{}{}{}{}'.format(full_name[-1], raw_article['title'], year,\n raw_article['abstract'])\n\n hash_object = hashlib.md5(string.encode('utf-8'))\n\n key = '{}{}'.format(full_name[-1], year)\n unique_key = hash_object.hexdigest()\n\n return key, unique_key\n\n def dict_to_dataframe(self, raw_article):\n \"\"\"\n Takes a dictionary and returns a dataframe\n \"\"\"\n values = []\n for key in self.keys():\n if type(raw_article[key]) is not list:\n values.append([raw_article[key]])\n else:\n values.append(raw_article[key])\n data = []\n for row in itertools.product(*values):\n data.append(row)\n df = pd.DataFrame(data, columns=self.keys())\n return df\n\n @staticmethod\n def export(df, filename):\n \"\"\" Write the results to a json file\n \"\"\"\n df.to_json(filename)\n\n def run(self, url, arguments, validate):\n \"\"\"Putting everything together. Makes the request,\n transforms from xml to dict to a standardized format and output to\n json file.\n \"\"\"\n response = self.make_request(url)\n root = self.get_root(response)\n raw_articles = self.parse(root)\n if not raw_articles:\n raise ValueError('Empty results at {}'.format(url))\n else:\n dfs = []\n for raw_article in raw_articles:\n df = self.to_dataframe(raw_article)\n dfs.append(df)\n df = pd.concat(dfs, ignore_index=True)\n\n self.export(df, filename=arguments['-f'])\n" ]
[ [ "pandas.concat" ] ]
jfear/larval_gonad_ovary
[ "b0941dbdd450aae5efd6ff60632e6eec7574ab69" ]
[ "scrnaseq-wf/scripts/raw_by_cluster.py" ]
[ "import pandas as pd\n\nraw = pd.read_parquet(snakemake.input.raw[0])\nclusters = pd.read_parquet(snakemake.input.cluster[0])\nraw_cluster = raw.T.join(clusters).groupby('cluster').sum().T\nraw_cluster.columns = [f'clus{x}' for x in raw_cluster.columns]\nraw_cluster.to_parquet(snakemake.output[0])\n" ]
[ [ "pandas.read_parquet" ] ]
chendeheng611/pymatting
[ "06689a44e34eabc5edb81c7bd99e1f039796bd15" ]
[ "pymatting/alpha/estimate_alpha_lkm.py" ]
[ "from pymatting.laplacian.lkm_laplacian import lkm_laplacian\nfrom pymatting.util.util import trimap_split\nfrom pymatting.solver.cg import cg\nimport numpy as np\n\n\ndef estimate_alpha_lkm(image, trimap, laplacian_kwargs={}, cg_kwargs={}):\n \"\"\"\n Estimate alpha from an input image and an input trimap using Learning Based Digital Matting as proposed by :cite:`he2010fast`.\n\n Parameters\n ----------\n image: numpy.ndarray\n Image with shape :math:`h \\\\times w \\\\times d` for which the alpha matte should be estimated\n trimap: numpy.ndarray\n Trimap with shape :math:`h \\\\times w \\\\times 1` of the image\n laplacian_kwargs: dictionary\n Arguments passed to the :code:`lkm_laplacian` function\n cg_kwargs: dictionary\n Arguments passed to the :code:`cg` solver\n\n Returns\n -------\n alpha: numpy.ndarray\n Estimated alpha matte\n\n Example\n -------\n >>> from pymatting import *\n >>> image = load_image(\"data/lemur/lemur.png\", \"RGB\")\n >>> trimap = load_image(\"data/lemur/lemur_trimap.png\", \"GRAY\")\n >>> alpha = estimate_alpha_lkm(\n ... image,\n ... trimap,\n ... laplacian_kwargs={\"epsilon\": 1e-6, \"radius\": 15},\n ... cg_kwargs={\"maxiter\":2000})\n\n \"\"\"\n L_matvec, diag_L = lkm_laplacian(image, **laplacian_kwargs)\n\n is_fg, is_bg, is_known, is_unknown = trimap_split(trimap)\n\n lambda_value = 100.0\n\n c = lambda_value * is_known\n b = lambda_value * is_fg\n\n inv_diag_A = 1.0 / (diag_L + c)\n\n def A_matvec(x):\n return L_matvec(x) + c * x\n\n def jacobi(x):\n return inv_diag_A * x\n\n x = cg(A_matvec, b, M=jacobi, **cg_kwargs)\n\n alpha = np.clip(x, 0, 1).reshape(trimap.shape)\n\n return alpha\n" ]
[ [ "numpy.clip" ] ]
bask0/h2m
[ "4505b7958b3bd524b059d9585294f27e8a22fc1e" ]
[ "src/dataprocessing/datasets/ds_swe_monthly.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nPreprocess snow water equivalent dataset.\n\nCRUNCEP/v8/:\nhttp://www.globsnow.info/\n\nIn:\nSpatial: 0.25 deg\nTemporal: daily\n\nOut:\nSpatial: 1 deg\nTemporal: monthly\n\nSteps:\n1) Harmonize\n2) Gapfilling: Fill SWE values with 0 where 24 days of data are missing (SWE\n has missing values where no snow, and entire Southern Hemisphere) and the\n mean over the same window iw below 10 in the snow cover fraction dataset.\n This is a very conservative gapfilling for pixel-time-steps where we are\n very confident that no snow is present.\n\n\"\"\"\n\nimport os\nimport xarray as xr\nimport logging\nimport numpy as np\nimport pandas as pd\n\nfrom utils.pyutils import exit_if_exists, rm_existing\nfrom utils.cdo_wrappers import cdo_gridbox\nfrom dataprocessing.plotting import plot_var\nfrom dataprocessing.datasets.config import \\\n years_targets, \\\n dir_source, \\\n dir_target, \\\n overwrite\n\nlogging.info('Processing dataset: swe')\nfiles_in = [os.path.join(\n dir_source,\n '0d25_daily/Globsnow_SWE/v2/Data/SWE.1440.720.{:4d}.nc'.format(y))\n for y in years_targets\n]\nfile_out = os.path.join(\n dir_target, 'processed/1d/monthly/swe.nc'\n)\nfile_tmp0 = file_out.replace('.nc', '_tmp0.nc')\nfile_tmp1 = file_out.replace('.nc', '_tmp1.nc')\n\nfiles_scf_in = [os.path.join(\n dir_source,\n '1d00_8daily/MODIS/MOD10C2.006/Data/Eight_Day_CMG_Snow_Cover/Eight_Day_CMG_Snow_Cover.360.180.{:4d}.nc'.format(y))\n for y in years_targets\n]\nfiles_scf_out = os.path.join(\n dir_target, 'processed/1d/8daily/swe.nc'\n)\n\nexit_if_exists(file_out, overwrite)\nos.makedirs(os.path.dirname(file_out), exist_ok=True)\nos.makedirs(os.path.dirname(files_scf_out), exist_ok=True)\n\nscf = xr.open_mfdataset(files_scf_in)\nscf = scf.rename({\n 'Eight_Day_CMG_Snow_Cover': 'val',\n 'latitude': 'lat',\n 'longitude': 'lon'})\nscf.to_netcdf(files_scf_out)\n\n# Drop SWE_var as not needed, stack datasets.\n# Set -1 and -2 to nan as these are masked / non-land values.\nswe_stack = []\nfor p in files_in:\n d = xr.open_dataset(p).drop('SWE_var').rename({'SWE': 'val'})\n d = d.where(d.val != -2, np.nan)\n d = d.where(d.val != -1, np.nan)\n swe_stack.append(d)\n\n# Fix missing attributes for lat and lon (because of cdo).\nlat_attrs = dict(\n long_name='Latitude',\n standard_name='latitude',\n units='degrees_north',\n axis='Y',\n valid_min=-90.0,\n valid_max=90.0\n)\nlon_attrs = dict(\n long_name='Longitude',\n standard_name='longitude',\n units='degrees_east',\n axis='X',\n modulo=360.0,\n topology='circular',\n valid_min=-180.0,\n valid_max=180.0,\n)\nswe = xr.concat(swe_stack, dim='time')\nswe.lat.attrs.update(lat_attrs)\nswe.lon.attrs.update(lon_attrs)\nswe.to_netcdf(file_tmp0)\nswe.close()\n\n# Remap to 1° resolution.\ncdo_gridbox(\n in_files=file_tmp0,\n out_files=file_tmp1,\n nlat=4,\n nlon=4,\n remap_alg='mean')\n\n# Find pixel-time-steps with where the mean in a rolling window of 3\n# observations (24 days) is below a threshold.\nscf = xr.open_dataset(files_scf_out)\nscf_threshold = 10\nscf_red = scf.rolling(time=3, min_periods=3, center=True).mean()\n# Put last observarion back as this is dropped above.\nfill_slice = scf_red.isel(time=0)\nfill_slice['time'] = [np.datetime64('{}-12-31'.format(years_targets[-1]))]\nscf_red = scf_red.merge(fill_slice)\nscf_red_mask = scf_red < scf_threshold\n\n# Resample to daily resolution. This will take the nearest neighbor in the time\n# dimension, but only if in range of 4 days, such that missing values persist.\nscf_red_mask_1d = scf_red_mask.resample(time='1D').nearest(\n tolerance='4D').sel(time=slice('{}-01-01'.format(years_targets[0]), None))\n\n# Get mask per pixel-time-step where in a window of 24 days window all pixels are\n# missing. We only fill those values.\nswe = xr.open_dataset(file_tmp1)\nswe_missing = swe.isnull()\nswe_num_missing = swe_missing.rolling(time=24).sum().sel(\n time=slice('{}-01-01'.format(years_targets[0]), None))\nswe_num_missing_mask = swe_num_missing == 24\n\n# Fill gaps with 0.\nscf_red_mask_1d['time'] = swe_num_missing_mask.time\nfill_mask = scf_red_mask_1d.val * swe_num_missing_mask.val.astype(np.float)\n\nswe_timesubs = swe.sel(time=slice('{}-01-01'.format(years_targets[0]), None))\nswe_gapfiled = swe_timesubs.where(1-fill_mask, 0)\nscf_land_mask = scf.val.notnull().any('time')\nswe_gapfiled = swe_gapfiled.where(scf_land_mask, np.nan)\n\n# to monthly.\nswe_gapfiled = swe_gapfiled.resample(time='MS', keep_attrs=True, skipna=True).mean()\nmonth_bounds = np.concatenate((\n pd.date_range(\n start='{:d}-01-01'.format(years_targets[0]),\n end='{:d}-12-31'.format(\n years_targets[-1]), freq='MS').values.reshape(-1, 1),\n pd.date_range(\n start='{:d}-01-01'.format(years_targets[0]),\n end='{:d}-12-31'.format(years_targets[-1]), freq='M').values.reshape(-1, 1)), axis=1)\n\nmonth_bounds = xr.DataArray(\n month_bounds, coords=[swe_gapfiled.time, xr.IndexVariable('bounds', [0, 1])])\nswe_gapfiled['time_bnds'] = month_bounds\nswe_gapfiled['time'] = swe_gapfiled.time + pd.Timedelta(15, 'D')\n\nswe_gapfiled.to_netcdf(file_out)\n\nrm_existing(file_tmp0)\nrm_existing(file_tmp1)\nrm_existing(files_scf_out)\n\nplot_path = __file__.replace('.py', '.jpg')\nplot_var(path=file_out, plot_path=plot_path)\n\nlogging.info('Done processing dataset: swe')\n" ]
[ [ "pandas.Timedelta" ] ]
densmirn/sdc
[ "30e53955a88506a5134d75d843205dbd5d576051" ]
[ "examples/series/series_astype.py" ]
[ "# *****************************************************************************\n# Copyright (c) 2020, Intel Corporation All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,\n# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR\n# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;\n# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\n# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR\n# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,\n# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n# *****************************************************************************\n\n\"\"\"\nExpect Series\n0 3.000000\n1 -10.000000\n2 nan\n3 0.000000\n4 92.000000\ndtype: object\n\"\"\"\nimport numpy as np\nimport pandas as pd\nfrom numba import njit\n\n\n@njit\ndef series_astype():\n series = pd.Series([3, -10, np.nan, 0, 92])\n\n return series.astype(str)\n\n\nprint(series_astype())\n" ]
[ [ "pandas.Series" ] ]
slamavl/quantarhei
[ "d822bc2db86152c418e330a9152e7866869776f7", "d822bc2db86152c418e330a9152e7866869776f7" ]
[ "quantarhei/qm/liouvillespace/tdredfieldtensor.py", "quantarhei/qm/liouvillespace/rates/ratematrix.py" ]
[ "# -*- coding: utf-8 -*-\nimport numpy\nimport scipy\n\nfrom .redfieldtensor import RedfieldRelaxationTensor\nfrom ...core.time import TimeDependent\n\nclass TDRedfieldRelaxationTensor(RedfieldRelaxationTensor, TimeDependent):\n \n \n \n def _implementation(self, ham, sbi):\n \"\"\" Reference implementation, completely in Python\n \n Implementation of Redfield relaxation tensor according to \n \n V. May and O. Kuehn, Charge and Energy Transfer Dynamics in Molecular\n System, Wiley-VCH, Berlin, 2000, 1st edition, chapter 3.8.2.\n In particular we refer to Eq. (3.8.13) on page 132\n \n We assume the system-bath interaction operator in form of Eq. (3.5.30)\n with the bath part specified through two-point correlation functions.\n We construct operators K_{m} introduced in Eq. (3.5.30) and \n operators \\Lambda_{m} of Eq. (3.8.11). \n \n We do not delete the imaginary part of the tensor (as is done later \n in Section 3.8.3 to get the so-called \"Multi-level Redfield\n Equations\"). Such a deletion can be done later manually. \n \n \n \"\"\"\n #\n # dimension of the Hamiltonian (includes excitons\n # with all multiplicities specified at its creation)\n #\n Na = ham.dim #data.shape[0]\n \n # time axis\n ta = sbi.TimeAxis\n \n #\n # is this beyond single excitation band?\n #\n multi_ex = False\n \n # figure out if the aggregate specifies more than one exciton band\n if sbi.aggregate is not None:\n agg = sbi.aggregate\n if agg.mult > 1:\n multi_ex = True \n \n #\n # shorten the interval of integration if a cut-off time is set\n #\n if self._has_cutoff_time:\n # index of the cut-off time on the time axis\n tcut = ta.nearest(self.cutoff_time)\n # select the section of the time axis up to the cut-off time\n tm = ta.data[0:tcut]\n # length of the section corresponds to the index of cut-off time\n length = tcut\n else:\n # if cut-off time is not set then we take the whole time axis\n tm = ta.data\n # and the length corresponds to the length of the time axis\n length = ta.length\n\n #\n # Get eigenenergies and transformation matrix of the Hamiltonian\n #\n if True:\n hD, SS = numpy.linalg.eigh(ham.data) \n \n #\n # Find all transition frequencies\n # \n Om = numpy.zeros((Na, Na))\n for a in range(Na):\n for b in range(Na):\n Om[a,b] = hD[a] - hD[b]\n \n # number of baths - one per monomer \n Nb = sbi.N\n\n self.Nt = length\n Nt = self.Nt\n \n #\n # Site K_m operators \n #\n\n Km = numpy.zeros((Nb, Na, Na), dtype=numpy.float64) \n # Transform site operators \n S1 = scipy.linalg.inv(SS)\n #FIXME: SBI should also be basis controlled\n for ns in range(Nb): \n Km[ns,:,:] = numpy.dot(S1, numpy.dot(sbi.KK[ns,:,:],SS))\n \n #\n # \\Lambda_m operator\n #\n \n # Integrals of correlation functions from the set \n Lm = numpy.zeros((Nt, Nb, Na, Na), dtype=numpy.complex128)\n for ms in range(Nb):\n #for ns in range(Nb):\n if not multi_ex:\n ns = ms\n \n # correlation function of site ns (if ns == ms)\n # or a cross-correlation function of sites ns and ms\n \n #FIXME: reaching correct correlation function is a nightmare!!!\n rc1 = sbi.CC.get_coft(ms, ns) \n \n for a in range(Na):\n for b in range(Na):\n \n # argument of the integration\n eexp = numpy.exp(-1.0j*Om[a,b]*tm) \n rc = rc1[0:length]*eexp\n \n # spline integration instead of FFT\n rr = numpy.real(rc)\n ri = numpy.imag(rc)\n sr = scipy.interpolate.UnivariateSpline(tm,\n rr, s=0).antiderivative()(tm)\n si = scipy.interpolate.UnivariateSpline(tm,\n ri, s=0).antiderivative()(tm)\n \n # we take the last value (integral to infinity)\n # #### cc_mnab = (sr[length-1] + 1.0j*si[length-1]) \n cc_mnab = (sr + 1.0j*si)\n \n # \\Lambda_m operators\n Lm[:,ms,a,b] += cc_mnab*Km[ns,a,b] \n \n \n # create the Hermite conjuged version of \\Lamnda_m\n Ld = numpy.zeros((Nt, Nb, Na, Na), dtype=numpy.complex128)\n for tt in range(Nt):\n for ms in range(Nb):\n Ld[tt, ms, :, :] += numpy.conj(numpy.transpose(Lm[tt,ms,:,:])) \n \n if self.as_operators:\n \n # save the operators - propagation methods must know about them\n self.Km = Km\n self.Lm = Lm\n self.Ld = Ld\n \n else:\n \n # save the relaxation tensor\n RR = self._convert_operators_2_tensor(Km, Lm, Ld)\n \n\n if True:\n self.data = RR\n self._data_initialized = True\n\n self._is_initialized = True\n\n \n def _convert_operators_2_tensor(self, Km, Lm, Ld):\n \"\"\"Converts operator representation to the tensor one\n \n Convertes operator representation of the Redfield tensor\n into a truely tensor representation\n \n Parameters\n ----------\n \n Km : 3D array\n K_m operators\n \n Lm : 3D array\n \\Lambda_m operators\n \n Ld : 3D array\n Hermite conjuget \\Lambda_m operators\n \n \"\"\" \n \n Na = self.Hamiltonian.data.shape[0]\n Nb = self.SystemBathInteraction.N\n Nt = self.Nt\n \n RR = numpy.zeros((Nt, Na, Na, Na, Na), dtype=numpy.complex128)\n \n for m in range(Nb):\n #print(\"m =\", m ,\"of\", Nb)\n for tt in range(Nt):\n KmLm = numpy.dot(Km[m,:,:],Lm[tt,m,:,:])\n LdKm = numpy.dot(Ld[tt,m,:,:],Km[m,:,:])\n for a in range(Na):\n for b in range(Na):\n for c in range(Na):\n for d in range(Na):\n \n RR[tt,a,b,c,d] += (Km[m,a,c]*Ld[tt,m,d,b] \n + Lm[tt,m,a,c]*Km[m,d,b])\n if b == d:\n RR[tt,a,b,c,d] -= KmLm[a,c] \n if a == c:\n RR[tt,a,b,c,d] -= LdKm[d,b]\n \n return RR\n \n \n def transform(self, SS, inv=None):\n \"\"\"Transformation of the tensor by a given matrix\n \n \n This function transforms the Operator into a different basis, using\n a given transformation matrix.\n \n Parameters\n ----------\n \n SS : matrix, numpy.ndarray\n transformation matrix\n \n inv : matrix, numpy.ndarray\n inverse of the transformation matrix\n \n \"\"\" \n\n if inv is None:\n S1 = numpy.linalg.inv(SS)\n else:\n S1 = inv\n dim = SS.shape[0]\n \n\n if not self._data_initialized:\n for tt in range(self.Nt):\n for m in range(self.Km.shape[0]):\n self.Lm[tt, m, :, :] = \\\n numpy.dot(S1,numpy.dot(self.Lm[tt, m, :, :],SS))\n self.Ld[tt, m, :, :] = \\\n numpy.dot(S1,numpy.dot(self.Ld[tt, m, :, :],SS)) \n for m in range(self.Km.shape[0]):\n self.Km[m, :, :] = numpy.dot(S1,numpy.dot(self.Km[m, :, :],SS))\n \n return\n \n \n if (self.manager.warn_about_basis_change):\n print(\"\\nQr >>> Relaxation tensor '%s' changes basis\" %self.name)\n \n \n for tt in range(self.Nt):\n for c in range(dim):\n for d in range(dim):\n self._data[tt,:,:,c,d] = \\\n numpy.dot(S1,numpy.dot(self._data[tt,:,:,c,d],SS))\n \n for a in range(dim):\n for b in range(dim):\n self._data[tt,a,b,:,:] = \\\n numpy.dot(S1,numpy.dot(self._data[tt,a,b,:,:],SS))\n\n \n def secularize(self):\n \"\"\"Secularizes the relaxation tensor\n\n\n \"\"\"\n if self.as_operators:\n raise Exception(\"Cannot be secularized in an opeator form\")\n \n else:\n N = self.data.shape[1]\n for ii in range(N):\n for jj in range(N):\n for kk in range(N):\n for ll in range(N):\n if not (((ii == jj) and (kk == ll)) \n or ((ii == kk) and (jj == ll))) :\n self.data[:,ii,jj,kk,ll] = 0", "# -*- coding: utf-8 -*-\nimport numpy\n\nfrom ....core.matrixdata import MatrixData\n\nclass RateMatrix(MatrixData):\n \"\"\"Represents a population transfer rate matrix\n \n \n \"\"\"\n\n def __init__(self, dim=None, data=None):\n \n self.N = 0\n \n if dim is not None:\n self.N = dim\n \n if data is not None:\n # check if data are rectangular\n if data.shape[0] != data.shape[1]:\n raise Exception(\"Expecting rectangular matrix\")\n \n if self.N == 0:\n self.N = data.shape[0]\n self.data = data\n elif self.N != dim:\n raise Exception(\"Inconsistent data dimensions\")\n \n else:\n if self.N == 0:\n raise Exception(\"One of the arguments has to be specified\")\n \n else:\n self.data = numpy.zeros((self.N,self.N), dtype=numpy.float64)\n \n \n \n def set_rate(self, pos, value):\n \"\"\" Sets a value of a rate between two states\n \n Diagonal depopulation rates are automatically updated\n \n \n \"\"\"\n N = pos[0]\n M = pos[1]\n orig_val = self.data[N,M]\n self.data[N,M] = value\n \n self.data[M,M] += orig_val\n self.data[M,M] -= value\n \n " ]
[ [ "numpy.dot", "numpy.zeros", "scipy.interpolate.UnivariateSpline", "numpy.linalg.eigh", "numpy.exp", "numpy.real", "numpy.transpose", "scipy.linalg.inv", "numpy.imag", "numpy.linalg.inv" ], [ "numpy.zeros" ] ]
pblottiere/openspectra
[ "cae5dddd6cba9722a57e28ecc54525dfbe8584ea" ]
[ "openspectra/openspectra_file.py" ]
[ "# Developed by Joseph M. Conti and Joseph W. Boardman on 1/21/19 6:29 PM.\n# Last modified 1/21/19 6:29 PM\n# Copyright (c) 2019. All rights reserved.\nimport copy\nimport logging\nimport math\nimport re\nfrom abc import ABC, abstractmethod\nfrom math import cos, sin\nfrom pathlib import Path\nfrom typing import List, Union, Tuple, Dict, Callable\n\nimport numpy as np\n\nfrom openspectra.utils import LogHelper, Logger\n\n\nclass LinearImageStretch(ABC):\n\n @staticmethod\n def create_default_stretch(parameters:str):\n result:LinearImageStretch = None\n if parameters is not None:\n if not re.match(\".*linear$\", parameters):\n raise OpenSpectraHeaderError(\"Only 'linear' 'default stretch' is supported, got: {0}\", parameters)\n else:\n parts = re.split(\"\\s+\", parameters)\n if re.match(\"[0-9]*[\\.][0-9]*%\", parts[0]):\n result = PercentageStretch(float(re.split(\"%\", parts[0])[0]))\n elif len(parts) == 3:\n result = ValueStretch(float(parts[0]), float(parts[1]))\n else:\n raise OpenSpectraHeaderError(\"'default stretch' value is malformed, value was: {0}\", parameters)\n\n return result\n\n @abstractmethod\n def percentage(self) -> Union[int, float]:\n raise NotImplementedError(\"Method not implemented on this class\")\n\n @abstractmethod\n def low(self) -> Union[int, float]:\n raise NotImplementedError(\"Method not implemented on this class\")\n\n @abstractmethod\n def high(self) -> Union[int, float]:\n raise NotImplementedError(\"Method not implemented on this class\")\n\n\nclass PercentageStretch(LinearImageStretch):\n\n def __init__(self, percentage:Union[int, float]):\n self.__stretch = percentage\n\n def __str__(self):\n return \"{0}% linear\".format(self.__stretch)\n\n def percentage(self) -> Union[int, float]:\n return self.__stretch\n\n def low(self) -> Union[int, float]:\n raise NotImplementedError(\"Method not implemented on this sub-class\")\n\n def high(self) -> Union[int, float]:\n raise NotImplementedError(\"Method not implemented on this sub-class\")\n\n\nclass ValueStretch(LinearImageStretch):\n\n def __init__(self, low:Union[int, float], high:Union[int, float]):\n self.__low = low\n self.__high = high\n\n def __str__(self):\n return \"{0} {1} linear\".format(self.__low, self.__high)\n\n def percentage(self) -> Union[int, float]:\n raise NotImplementedError(\"Method not implemented on this sub-class\")\n\n def low(self) -> Union[int, float]:\n return self.__low\n\n def high(self) -> Union[int, float]:\n return self.__high\n\n\nclass OpenSpectraHeader:\n \"\"\"A class that reads, validates and makes open spectra header file details available\"\"\"\n\n __LOG:Logger = LogHelper.logger(\"OpenSpectraHeader\")\n\n _BAND_NAMES = \"band names\"\n _BANDS = \"bands\"\n __DATA_TYPE = \"data type\"\n _HEADER_OFFSET = \"header offset\"\n _INTERLEAVE = \"interleave\"\n _LINES = \"lines\"\n __REFLECTANCE_SCALE_FACTOR = \"reflectance scale factor\"\n _SAMPLES = \"samples\"\n _WAVELENGTHS = \"wavelength\"\n __WAVELENGTH_UNITS = \"wavelength units\"\n _MAP_INFO = \"map info\"\n __SENSOR_TYPE = \"sensor type\"\n __BYTE_ORDER = \"byte order\"\n __FILE_TYPE = \"file type\"\n __DESCRIPTION = \"description\"\n __DATA_IGNORE_VALUE = \"data ignore value\"\n __DEFAULT_STRETCH = \"default stretch\"\n _BAD_BAND_LIST = \"bbl\"\n __COORD_SYSTEM_STR = \"coordinate system string\"\n\n __READ_AS_STRING = [__DESCRIPTION, __COORD_SYSTEM_STR]\n __SUPPORTED_FIELDS = [_BAND_NAMES, _BANDS, __DATA_TYPE, _HEADER_OFFSET, _INTERLEAVE, _LINES,\n __REFLECTANCE_SCALE_FACTOR, _SAMPLES, _WAVELENGTHS, __WAVELENGTH_UNITS,\n _MAP_INFO, __SENSOR_TYPE, __BYTE_ORDER, __FILE_TYPE, __DESCRIPTION,\n __DATA_IGNORE_VALUE, __DEFAULT_STRETCH, _BAD_BAND_LIST, __COORD_SYSTEM_STR]\n\n _DATA_TYPE_DIC:Dict[str, type] = {\n \"1\": np.uint8,\n \"2\": np.int16,\n \"3\": np.int32,\n \"4\": np.float32,\n \"5\": np.float64,\n \"6\": np.complex64,\n \"9\": np.complex128,\n \"12\": np.uint16,\n \"13\": np.uint32,\n \"14\": np.int64,\n \"15\": np.uint64}\n\n BIL_INTERLEAVE:str = \"bil\"\n BSQ_INTERLEAVE:str = \"bsq\"\n BIP_INTERLEAVE:str = \"bip\"\n\n class MapInfo:\n \"\"\"\"A simple class for holding map info from a header file\"\"\"\n\n __LOG: Logger = LogHelper.logger(\"OpenSpectraHeader.MapInfo\")\n\n def __init__(self, map_info_list:List[str]=None, map_info=None):\n if map_info_list is not None:\n self.__init_from_list(map_info_list)\n elif map_info is not None:\n self.__init_from_map_info(map_info)\n else:\n raise ValueError(\"One of map_info_list or map_info must be passed\")\n\n def __init_from_list(self, map_info_list:List[str]):\n # Example [UTM, 1.000, 1.000, 620006.407, 2376995.930, 7.8000000000e+000, 7.8000000000e+000, 4,\n # North, WGS-84, units=Meters, rotation=29.00000000]\n list_size = len(map_info_list)\n if list_size < 7:\n raise OpenSpectraHeaderError(\n \"Found map info but expected it to have at lease 7 elements, only found {0}\".format(len(map_info_list)))\n\n # grab the minimal data\n self.__projection_name:str = map_info_list[0]\n self.__x_reference_pixel:float = float(map_info_list[1])\n self.__y_reference_pixel:float = float(map_info_list[2])\n self.__x_zero_coordinate:float = float(map_info_list[3])\n self.__y_zero_coordinate:float = float(map_info_list[4])\n self.__x_pixel_size:float = float(map_info_list[5])\n self.__y_pixel_size:float = float(map_info_list[6])\n\n self.__projection_zone:int = None\n self.__projection_area:str = None\n self.__datum:str = None\n self.__units:str = None\n self.__rotation:float = None\n self.__rotation_deg:float = None\n\n index = 7\n if self.__projection_name == \"UTM\":\n if list_size < 9:\n raise OpenSpectraHeaderError(\"Map info projection was UTM but zone and area parameters are missing\");\n\n self.__projection_zone = int(map_info_list[index])\n index += 1\n self.__projection_area = map_info_list[index].strip()\n index += 1\n\n if list_size > index + 1:\n self.__datum = map_info_list[index].strip()\n index += 1\n\n for index in range(index, list_size):\n pair = re.split(\"=\", map_info_list[index])\n if len(pair) == 2:\n name:str = pair[0].strip()\n value:str = pair[1].strip()\n if name == \"units\":\n self.__units:str = value\n elif name == \"rotation\":\n # convert rotation angle to radians for compatibility\n # with the math cos and sin functions\n self.__rotation_deg = float(value)\n self.__rotation:float = math.radians(self.__rotation_deg)\n else:\n OpenSpectraHeader.MapInfo.__LOG.warning(\n \"Ignoring unexpected map info item with name: {0}, value: {1}\".format(name, value))\n else:\n OpenSpectraHeader.MapInfo.__LOG.warning(\n \"Could not split map info item: {0}\".format(map_info_list[index]))\n\n def __init_from_map_info(self, map_info):\n self.__projection_name = map_info.projection_name()\n self.__x_reference_pixel = map_info.x_reference_pixel()\n self.__y_reference_pixel = map_info.y_reference_pixel()\n self.__x_zero_coordinate = map_info.x_zero_coordinate()\n self.__y_zero_coordinate = map_info.y_zero_coordinate()\n self.__x_pixel_size = map_info.x_pixel_size()\n self.__y_pixel_size = map_info.y_pixel_size()\n self.__projection_zone = map_info.projection_zone()\n self.__projection_area = map_info.projection_area()\n self.__datum = map_info.datum()\n self.__units = map_info.units()\n self.__rotation_deg = map_info.rotation_deg()\n self.__rotation = map_info.rotation()\n\n def __str__(self) -> str:\n param_list = [\n self.__projection_name,\n \"{:.03f}\".format(self.__x_reference_pixel),\n \"{:.03f}\".format(self.__y_reference_pixel),\n \"{:.03f}\".format(self.__x_zero_coordinate),\n \"{:.03f}\".format(self.__y_zero_coordinate),\n OpenSpectraHeader.MapInfo.__format_pixel_size(self.__x_pixel_size),\n OpenSpectraHeader.MapInfo.__format_pixel_size(self.__y_pixel_size),\n \"{:d}\".format(self.__projection_zone),\n self.__projection_area,\n self.__datum,\n \"units={}\".format(self.__units)]\n\n if self.__rotation is not None:\n param_list.append(\"rotation={:.08f}\".format(self.__rotation_deg))\n\n return \"{\" + \", \".join(param_list) + \"}\"\n\n @staticmethod\n def __format_pixel_size(value:float) -> str:\n val_str = \"{:.010e}\".format(value)\n if \"e+\" in val_str:\n parts = val_str.split(\"e+\")\n if len(parts[1]) < 3:\n parts[1] = \"0\".join(parts[1])\n\n val_str = \"e+\".join(parts)\n elif \"e-\" in val_str:\n parts = val_str.split(\"e-\")\n if len(parts[1]) < 3:\n parts[1] = \"0\".join(parts[1])\n val_str = \"e-\".join(parts)\n\n return val_str\n\n def calculate_coordinates(self, x_pixels:Union[int, float, np.ndarray],\n y_pixels:Union[int, float, np.ndarray]) ->\\\n Tuple[Union[float, np.ndarray], Union[float, np.ndarray]]:\n\n x_coords = (x_pixels - (self.__x_reference_pixel - 1)) * self.__x_pixel_size\n y_coords = (y_pixels - (self.__y_reference_pixel - 1)) * self.__y_pixel_size\n\n x_coords_rot = x_coords\n y_coords_rot = y_coords\n if self.__rotation is not None:\n # This implementation is for counterclockwise rotation\n x_coords_rot = x_coords * cos(self.__rotation) + y_coords * sin(self.__rotation)\n y_coords_rot = -x_coords * sin(self.__rotation) + y_coords * cos(self.__rotation)\n\n x_coords = x_coords_rot + self.__x_zero_coordinate\n y_coords = self.__y_zero_coordinate - y_coords_rot\n\n return x_coords, y_coords\n\n def projection_name(self) -> str:\n return self.__projection_name\n\n def x_reference_pixel(self) -> float:\n return self.__x_reference_pixel\n\n def y_reference_pixel(self) -> float:\n return self.__y_reference_pixel\n\n def x_zero_coordinate(self) -> float:\n return self.__x_zero_coordinate\n\n def y_zero_coordinate(self) -> float:\n return self.__y_zero_coordinate\n\n def x_pixel_size(self) -> float:\n return self.__x_pixel_size\n\n def y_pixel_size(self) -> float:\n return self.__y_pixel_size\n\n def projection_zone(self) -> int:\n return self.__projection_zone\n\n def projection_area(self) -> str:\n return self.__projection_area\n\n def datum(self) -> str:\n return self.__datum\n\n def units(self) -> str:\n return self.__units\n\n def rotation(self) -> float:\n return self.__rotation\n\n def rotation_deg(self) -> float:\n return self.__rotation_deg\n\n def __init__(self, file_name:str=None, props:Dict[str, Union[str, List[str]]]=None,\n unsupported_props:Dict[str, str]=None):\n if file_name is None and props is None:\n raise OpenSpectraHeaderError(\n \"Creating a OpenSpectraHeader requires either a file name or dict of properties\")\n\n self.__path:Path = None\n self.__props:Dict[str, Union[str, List[str]]] = None\n # Anything we don't support but don't want to loose when making a copy\n self.__unsupported_props:Dict[str, Union[str, List[str]]] = None\n\n if file_name is not None:\n self.__path = Path(file_name)\n self.__props = dict()\n self.__unsupported_props = dict()\n else:\n self.__props = copy.deepcopy(props)\n self.__unsupported_props = copy.deepcopy(unsupported_props)\n\n self.__byte_order:int = -1\n self.__interleave:str = None\n self.__samples:int = 0\n self.__lines:int = 0\n self.__band_count:int = 0\n self.__wavelengths:np.array = None\n self.__band_labels:List[Tuple[str, str]] = None\n self.__header_offset:int = 0\n self.__reflectance_scale_factor:np.float64 = None\n self.__map_info:OpenSpectraHeader.MapInfo = None\n self.__data_ignore_value: Union[int, float] = None\n self.__default_stretch:LinearImageStretch = None\n self.__bad_band_list:List[bool] = None\n\n def _get_props(self) -> Dict[str, Union[str, List[str]]]:\n return self.__props\n\n def _get_unsupported_props(self) -> Dict[str, Union[str, List[str]]]:\n return self.__unsupported_props\n\n def _set_unsupported_props(self, props:Dict[str, Union[str, List[str]]]):\n self.__unsupported_props = props\n\n def _get_prop(self, key:str) -> Union[str, List[str]]:\n result = self.__props.get(key)\n if result is not None and isinstance(result, list):\n result = result[:]\n\n return result\n\n def _update_prop(self, key:str, value:Union[int, str, List[str], np.ndarray], validate:bool=True):\n new_value = None\n if isinstance(value, int) :\n new_value = str(value)\n elif isinstance(value, np.ndarray):\n array_list = list(value)\n new_value = [str(item) for item in array_list]\n else:\n new_value = value\n\n self.__props[key] = new_value\n if validate:\n self.__validate()\n\n def dump(self) -> str:\n return \"Props:\\n\" + str(self.__props)\n\n def load(self):\n if self.__path is not None:\n if self.__path.exists() and self.__path.is_file():\n OpenSpectraHeader.__LOG.info(\"Opening file {0} with mode {1}\", self.__path.name, self.__path.stat().st_mode)\n\n with self.__path.open() as headerFile:\n for line in headerFile:\n line = line.rstrip()\n if re.search(\"=\", line) is not None:\n line_pair:List[str] = re.split(\"=\", line, 1)\n key = line_pair[0].strip()\n value = line_pair[1].lstrip()\n\n if key in OpenSpectraHeader.__SUPPORTED_FIELDS:\n if re.search(\"{\", value):\n if key in OpenSpectraHeader.__READ_AS_STRING:\n str_val = self.__read_bracket_str(value, headerFile)\n self.__props[key] = str_val\n else:\n list_value = self.__read_bracket_list(value, headerFile)\n self.__props[key] = list_value\n else:\n self.__props[key] = value\n else:\n if re.search(\"{\", value):\n list_value = self.__read_bracket_list(value, headerFile)\n self.__unsupported_props[key] = list_value\n else:\n self.__unsupported_props[key] = value\n else:\n raise OpenSpectraHeaderError(\"File {0} not found\".format(self.__path.name))\n\n # else we must have been initialized with a set of props\n # now verify what we read makes sense and do some conversion to data type we want\n self.__validate()\n\n def byte_order(self) -> int:\n return self.__byte_order\n\n def bad_band_list(self) -> List[bool]:\n \"\"\"Return the bad band list that can be used to mask a numpy array\n In the header '1' means the band is good and '0' means it's bad. But for an array\n mask True means the value is masked. So in the list returned here '1' from\n the list in the header is converted to False and '0' to True\"\"\"\n return self.__bad_band_list\n\n def band_label(self, band:int) -> Tuple[str, str]:\n \"\"\"Returns a tuple with the band name and wavelength\"\"\"\n return self.__band_labels[band]\n\n def band_labels(self) -> List[Tuple[str, str]]:\n \"\"\"Returns a list of tuples, each tuple is the band name and wavelength \"\"\"\n return self.__band_labels\n\n def band_name(self, band:int) -> str:\n \"\"\"Returns the band name for the given band index\"\"\"\n return self.__props.get(OpenSpectraHeader._BAND_NAMES)[band]\n\n def band_names(self) -> List[str]:\n \"\"\"Returns a list of strings of the band names\"\"\"\n return self.__props.get(OpenSpectraHeader._BAND_NAMES)\n\n def coordinate_system_string(self) -> str:\n return self.__props.get(OpenSpectraHeader.__COORD_SYSTEM_STR)\n\n def data_ignore_value(self) -> Union[int, float]:\n return self.__data_ignore_value\n\n def data_type(self) -> np.dtype.type:\n data_type = self.__props.get(OpenSpectraHeader.__DATA_TYPE)\n return self._DATA_TYPE_DIC.get(data_type)\n\n def default_stretch(self) -> LinearImageStretch:\n return self.__default_stretch\n\n def description(self) -> str:\n return self.__props.get(self.__DESCRIPTION)\n\n def samples(self) -> int:\n return self.__samples\n\n def lines(self) -> int:\n return self.__lines\n\n def band_count(self) -> int:\n return self.__band_count\n\n def file_type(self) -> str:\n return self.__props.get(OpenSpectraHeader.__FILE_TYPE)\n\n def wavelengths(self) -> np.array:\n return self.__wavelengths\n\n def wavelength_units(self) -> str:\n return self.__props.get(OpenSpectraHeader.__WAVELENGTH_UNITS)\n\n def interleave(self) -> str:\n return self.__interleave\n\n def header_offset(self) -> int:\n return self.__header_offset\n\n def sensor_type(self) -> str:\n return self.__props.get(OpenSpectraHeader.__SENSOR_TYPE)\n\n def reflectance_scale_factor(self) -> np.float64:\n return self.__reflectance_scale_factor\n\n def map_info(self) -> MapInfo:\n return self.__map_info\n\n def unsupported_props(self) -> Dict[str, str]:\n return copy.deepcopy(self.__unsupported_props)\n\n @staticmethod\n def __read_bracket_str(value, header_file, strip_bracket:bool=True) -> str:\n str_val = value\n if strip_bracket:\n str_val = value.strip(\"{\").strip()\n\n # check for closing } on same line\n if re.search(\"}\", str_val):\n if strip_bracket:\n str_val = str_val.strip(\"}\").strip()\n else:\n str_val += \"\\n\"\n for line in header_file:\n str_val += line.rstrip()\n if re.search(\"}\", str_val):\n if strip_bracket:\n str_val = str_val.rstrip(\"}\").rstrip()\n break\n else:\n str_val += \"\\n\"\n\n return str_val\n\n @staticmethod\n def __read_bracket_list(value, header_file) -> List[str]:\n done = False\n line = value.strip(\"{\").strip()\n list_value = list()\n\n # check for closing } on same line\n if re.search(\"}\", line):\n line = line.strip(\"}\").strip()\n done = True\n\n # if there are any entries on the first line handle them\n if line:\n elements = line.split(\",\")\n if len(elements) > 0:\n list_value = elements\n\n if not done:\n section = \"\"\n for line in header_file:\n section += line.rstrip()\n if re.search(\"}\", line):\n section = section.rstrip(\"}\").rstrip()\n break\n\n list_value += section.split(\",\")\n\n list_value = [str.strip(item) for item in list_value]\n return list_value\n\n def __validate(self):\n self.__byte_order = int(self.__props.get(OpenSpectraHeader.__BYTE_ORDER))\n if self.__byte_order != 1 and self.__byte_order != 0:\n raise OpenSpectraHeaderError(\"Valid values for byte order in header are '0' or '1'. Value is: {0}\".\n format(self.__props.get(OpenSpectraHeader.__BYTE_ORDER)))\n\n interleave:str = self.__props.get(OpenSpectraHeader._INTERLEAVE)\n if interleave is None or len(interleave) != 3:\n raise OpenSpectraHeaderError(\"Interleave format missing from header file. Must be one of {}, {}, or {}\".format(\n OpenSpectraHeader.BIP_INTERLEAVE, OpenSpectraHeader.BSQ_INTERLEAVE, OpenSpectraHeader.BIL_INTERLEAVE))\n\n interleave = interleave.lower()\n if interleave == OpenSpectraHeader.BIP_INTERLEAVE:\n self.__interleave = OpenSpectraHeader.BIP_INTERLEAVE\n elif interleave == OpenSpectraHeader.BSQ_INTERLEAVE:\n self.__interleave = OpenSpectraHeader.BSQ_INTERLEAVE\n elif interleave == OpenSpectraHeader.BIL_INTERLEAVE:\n self.__interleave = OpenSpectraHeader.BIL_INTERLEAVE\n else:\n raise OpenSpectraHeaderError(\"Unknown interleave format in header file. Value is: {}. Must be one of {}, {}, or {}\".\n format(self.__props.get(OpenSpectraHeader._INTERLEAVE),\n OpenSpectraHeader.BIP_INTERLEAVE, OpenSpectraHeader.BSQ_INTERLEAVE, OpenSpectraHeader.BIL_INTERLEAVE))\n\n self.__samples = int(self.__props.get(OpenSpectraHeader._SAMPLES))\n self.__lines = int(self.__props.get(OpenSpectraHeader._LINES))\n self.__band_count = int(self.__props.get(OpenSpectraHeader._BANDS))\n\n if self.__samples is None or self.__samples <= 0:\n raise OpenSpectraHeaderError(\"Value for 'samples' in header is not valid: {0}\"\n .format(self.__samples))\n\n if self.__lines is None or self.__lines <= 0:\n raise OpenSpectraHeaderError(\"Value for 'lines' in header is not valid: {0}\"\n .format(self.__lines))\n\n if self.__band_count is None or self.__band_count <= 0:\n raise OpenSpectraHeaderError(\"Value for 'bands' in header is not valid: {0}\"\n .format(self.__band_count))\n\n band_names = self.__props.get(OpenSpectraHeader._BAND_NAMES)\n wavelengths_str = self.__props.get(OpenSpectraHeader._WAVELENGTHS)\n\n # possible to have only bands or wavelenghts or both or neither\n if band_names is None:\n band_names = [\"Band \" + index for index in np.arange(\n 1, self.__band_count + 1, 1, np.int16).astype(str)]\n else:\n if len(band_names) != self.__band_count:\n raise OpenSpectraHeaderError(\n \"Number of 'band names' {0} does not match number of bands {1}\".\n format(len(band_names), self.__band_count))\n\n if wavelengths_str is None:\n wavelengths_str = np.arange(\n 1, self.__band_count + 1, 1, np.float64).astype(str)\n else:\n if len(wavelengths_str) != self.__band_count:\n raise OpenSpectraHeaderError(\n \"Number of wavelengths {0} does not match number of bands {1}\".\n format(len(wavelengths_str), self.__band_count))\n\n self.__wavelengths = np.array(wavelengths_str, np.float64)\n self.__band_labels = list(zip(band_names, wavelengths_str))\n\n self.__header_offset = int(self.__props.get(OpenSpectraHeader._HEADER_OFFSET))\n\n if OpenSpectraHeader.__REFLECTANCE_SCALE_FACTOR in self.__props:\n self.__reflectance_scale_factor = np.float64(\n self.__props[OpenSpectraHeader.__REFLECTANCE_SCALE_FACTOR])\n\n # map info = {UTM, 1.000, 1.000, 620006.407, 2376995.930, 7.8000000000e+000, 7.8000000000e+000,\n # 4, North, WGS-84, units=Meters, rotation=29.00000000}\n map_info_list = self.__props.get(OpenSpectraHeader._MAP_INFO)\n if map_info_list is not None:\n self.__map_info:OpenSpectraHeader.MapInfo = OpenSpectraHeader.MapInfo(map_info_list)\n else:\n OpenSpectraHeader.__LOG.debug(\"Optional map info section not found\")\n\n data_type = self.__props.get(OpenSpectraHeader.__DATA_TYPE)\n if data_type not in OpenSpectraHeader._DATA_TYPE_DIC:\n raise OpenSpectraHeaderError(\"Specified 'data type' not recognized, value was: {0}\".format(data_type))\n\n interleave = self.__props.get(OpenSpectraHeader._INTERLEAVE)\n if not (interleave == OpenSpectraHeader.BIL_INTERLEAVE or\n interleave == OpenSpectraHeader.BIP_INTERLEAVE or\n interleave == OpenSpectraHeader.BSQ_INTERLEAVE):\n raise OpenSpectraHeaderError(\"Specified 'interleave' not recognized, value was: {0}\".format(interleave))\n\n data_ignore_value = self.__props.get(OpenSpectraHeader.__DATA_IGNORE_VALUE)\n if data_ignore_value is not None:\n if re.match(\"^[+-]?[0-9]*$\", data_ignore_value):\n self.__data_ignore_value = int(data_ignore_value)\n elif re.match(\"^[+-]?[0-9]*[\\.][0-9]*$\", data_ignore_value):\n self.__data_ignore_value = float(data_ignore_value)\n else:\n raise OpenSpectraHeaderError(\"Couldn't parse 'data ignore value' as a float or int, value was: {0}\", data_ignore_value)\n\n default_stretch:str = self.__props.get(OpenSpectraHeader.__DEFAULT_STRETCH)\n self.__default_stretch = LinearImageStretch.create_default_stretch(default_stretch)\n\n bad_band_list:list = self.__props.get(OpenSpectraHeader._BAD_BAND_LIST)\n if bad_band_list is not None:\n if len(bad_band_list) != self.__band_count:\n raise OpenSpectraHeaderError(\"Bad band list, 'bbl' length did not match band count\")\n\n if not all(item == \"0\" or item == \"1\" for item in bad_band_list):\n raise OpenSpectraHeaderError(\"Bad band list 'bbl' should only have value of 0 or 1, list is: {0}\".format(bad_band_list))\n\n # remember that \"1\" means the band is good, \"0\" means it's bad so\n # but True in a numpy mask means the value is masked so flip the values\n self.__bad_band_list = [not bool(int(item)) for item in bad_band_list]\n\n\nclass MutableOpenSpectraHeader(OpenSpectraHeader):\n\n __LOG:Logger = LogHelper.logger(\"MutableOpenSpectraHeader\")\n\n def __init__(self, source_file_name:str=None, os_header:OpenSpectraHeader=None):\n # Could initialize with neither but for now we don't support creating an entire header from scratch\n if source_file_name is None and os_header is None:\n raise OpenSpectraHeaderError(\n \"Creating a MutableOpenSpectraHeader requires starting with a file or another OpenSpectra header\")\n\n if source_file_name is not None:\n super().__init__(source_file_name)\n else:\n super().__init__(props=os_header._get_props(),\n unsupported_props=os_header._get_unsupported_props())\n\n super().load()\n\n @staticmethod\n def __convert_bool_value(value:bool) -> str:\n # Maintain the naming convention that a False, meaning not masked or good band, is a '1'\n # in the header file and '0' is True\n if value:\n return \"0\"\n else:\n return \"1\"\n\n @staticmethod\n def __format_list(items:List, format_func:Callable) -> str:\n return \", \".join([format_func(item) for item in items])\n\n @staticmethod\n def __convert_data_type(data_type:type) -> str:\n for key, val in OpenSpectraHeader._DATA_TYPE_DIC.items():\n if val == data_type:\n return key\n\n def load(self):\n # prevent parent's load from being called\n pass\n\n def save(self, base_file_name:str):\n file_name = base_file_name + \".hdr\"\n MutableOpenSpectraHeader.__LOG.debug(\"saving header file: {}\", file_name)\n with open(file_name, \"wt\") as out_file:\n out_file.write(\"OpenSpectra\\n\")\n out_file.write(\"description = {0}{1}{2}\\n\".format(\"{\", self.description(), \"}\"))\n out_file.write(\"samples = {0}\\n\".format(self.samples()))\n out_file.write(\"lines = {0}\\n\".format(self.lines()))\n out_file.write(\"bands = {0}\\n\".format(self.band_count()))\n out_file.write(\"header offset = {0}\\n\".format(self.header_offset()))\n out_file.write(\"file type = {0}\\n\".format(self.file_type()))\n out_file.write(\"data type = {0}\\n\".format(self.__convert_data_type(self.data_type())))\n out_file.write(\"interleave = {0}\\n\".format(self.interleave()))\n\n if self.sensor_type() is not None:\n out_file.write(\"sensor type = {0}\\n\".format(self.sensor_type()))\n\n out_file.write(\"byte order = {0}\\n\".format(self.byte_order()))\n out_file.write(\"wavelength units = {0}\\n\".format(self.wavelength_units()))\n\n if self.reflectance_scale_factor() is not None:\n out_file.write(\"reflectance scale factor = {0}\\n\".format(self.reflectance_scale_factor()))\n\n if self.map_info() is not None:\n out_file.write(\"map info = {0}\\n\".format(self.map_info()))\n\n if self.coordinate_system_string() is not None:\n out_file.write(\"coordinate system string = {0}{1}{2}\\n\".format(\"{\", self.coordinate_system_string(), \"}\"))\n\n if self.data_ignore_value() is not None:\n out_file.write(\"data ignore value = {0}\\n\".format(self.data_ignore_value()))\n\n if self.default_stretch() is not None:\n out_file.write(\"default stretch = {0}\\n\".format(self.default_stretch()))\n\n if self.band_names() is not None:\n out_file.write(\"band names = {0}{1}{2}\\n\".format(\"{\\n \", self.__format_list(self.band_names(), \"{}\".format), \"}\"))\n\n out_file.write(\"wavelength = {0}{1}{2}\\n\".format(\"{\\n \", self.__format_list(self.wavelengths(), \"{:.06f}\".format), \"}\"))\n\n if self.bad_band_list() is not None:\n out_file.write(\"bbl = {0}{1}{2}\\n\".format(\"{\\n \", self.__format_list(self.bad_band_list(), self.__convert_bool_value), \"}\"))\n\n for key, value in self._get_unsupported_props().items():\n if isinstance(value, list):\n out_file.write(\"{0} = {1}{2}{3}\\n\".format(key, \"{\", \",\".join(value), \"}\"))\n else:\n out_file.write(\"{0} = {1}\\n\".format(key, value))\n\n out_file.flush()\n\n def set_lines(self, lines:int):\n self._update_prop(self._LINES, lines)\n\n def set_samples(self, samples:int):\n self._update_prop(self._SAMPLES, samples)\n\n def set_bands(self, band_count:int, bands_names:List[str], wavelengths:np.ndarray, bad_bands:List[bool]=None):\n if bands_names is not None and len(bands_names) != band_count:\n raise OpenSpectraHeaderError(\"Length of bands_names doesn't match band_count\")\n\n if len(wavelengths.shape) != 1:\n raise OpenSpectraHeaderError(\"wave_lengths should be one dimensional\")\n\n if wavelengths.size != band_count:\n raise OpenSpectraHeaderError(\"Length of wavelengths doesn't match band_count\")\n\n if bad_bands is not None and len(bad_bands) != band_count:\n raise OpenSpectraHeaderError(\"Length of bad_bands doesn't match band_count\")\n\n self._update_prop(self._BANDS, band_count, False)\n if bands_names is not None:\n self._update_prop(self._BAND_NAMES, bands_names, False)\n\n if bad_bands is not None:\n self._update_prop(self._BAD_BAND_LIST,\n [MutableOpenSpectraHeader.__convert_bool_value(bad_band) for bad_band in bad_bands],\n False)\n self._update_prop(self._WAVELENGTHS, wavelengths)\n\n def set_interleave(self, interleave:str):\n self._update_prop(self._INTERLEAVE, interleave)\n\n def set_header_offset(self, offset:int):\n self._update_prop(self._HEADER_OFFSET, offset)\n\n def set_x_reference(self, x_pixel:float, x_cooridinate:float):\n map_info = self.map_info()\n if map_info is not None:\n map_info_list = self._get_prop(self._MAP_INFO)\n map_info_list[1] = str(x_pixel)\n map_info_list[3] = str(x_cooridinate)\n self._update_prop(self._MAP_INFO, map_info_list)\n\n def set_y_reference(self, y_pixel:float, y_cooridinate:float):\n map_info = self.map_info()\n if map_info is not None:\n map_info_list = self._get_prop(self._MAP_INFO)\n map_info_list[2] = str(y_pixel)\n map_info_list[4] = str(y_cooridinate)\n self._update_prop(self._MAP_INFO, map_info_list)\n\n def set_unsupported_props(self, props:Dict[str, Union[str, List[str]]]):\n self._set_unsupported_props(props)\n\n\nclass Shape:\n\n def __init__(self, x, y, z):\n self.__shape = (x, y, z)\n self.__size = x * y * z\n\n def shape(self) -> (int, int, int):\n return self.__shape\n\n def size(self) -> int:\n return self.__size\n\n def lines(self) -> int:\n pass\n\n def samples(self) -> int:\n pass\n\n def bands(self) -> int:\n pass\n\n\nclass BILShape(Shape):\n\n def __init__(self, lines, samples, bands):\n super().__init__(lines, bands, samples)\n\n def lines(self) -> int:\n return self.shape()[0]\n\n def samples(self) -> int:\n return self.shape()[2]\n\n def bands(self) -> int:\n return self.shape()[1]\n\n\nclass BQSShape(Shape):\n\n def __init__(self, lines, samples, bands):\n super().__init__(bands, lines, samples)\n\n def lines(self) -> int:\n return self.shape()[1]\n\n def samples(self) -> int:\n return self.shape()[2]\n\n def bands(self) -> int:\n return self.shape()[0]\n\n\nclass BIPShape(Shape):\n\n def __init__(self, lines, samples, bands):\n super().__init__(lines, samples, bands)\n\n def lines(self) -> int:\n return self.shape()[0]\n\n def samples(self) -> int:\n return self.shape()[1]\n\n def bands(self) -> int:\n return self.shape()[2]\n\n\nclass FileModel:\n\n def __init__(self, path:Path, header:OpenSpectraHeader):\n self._file:np.ndarray = None\n self._path = path\n self._offset:int = header.header_offset()\n\n # size in bytes of each data element in the file\n self._data_type = np.dtype(header.data_type())\n\n # TODO set based on byte_order from header but doesn't seem to make a difference\n self._data_type.newbyteorder(\"L\")\n # data_type.newbyteorder(\"B\")\n\n def load(self, shape:Shape):\n pass\n\n def file(self) -> np.ndarray:\n return self._file\n\n def name(self):\n return self._path.name\n\n def data_type(self):\n return self._file.dtype\n\n def _validate(self, shape:Shape):\n if self._file.size != shape.size():\n raise OpenSpectraFileError(\"Expected {0} data points but found {1}\".\n format(shape.size(), self._file.size))\n\n\nclass CubeSliceArgs():\n\n def __init__(self, lines:Tuple[int, int], samples:Tuple[int, int], bands:Union[Tuple[int, int], List[int]]):\n self.__line_arg = slice(lines[0], lines[1])\n self.__sample_arg = slice(samples[0], samples[1])\n\n self.__band_arg = None\n if isinstance(bands, Tuple):\n self.__band_arg = slice(bands[0], bands[1])\n elif isinstance(bands, List):\n self.__band_arg = bands\n\n def line_arg(self) -> slice:\n return self.__line_arg\n\n def sample_arg(self) -> slice:\n return self.__sample_arg\n\n def band_arg(self) -> Union[slice, List[int]]:\n return self.__band_arg\n\n\nclass FileTypeDelegate:\n\n def __init__(self, shape:Shape, file_model:FileModel):\n self.__shape = shape\n self._file_model = file_model\n\n def image(self, band:Union[int, tuple]) -> np.ndarray:\n \"\"\"bands are zero based here with a max value of len(band) - 1\n It's important to understand that selecting images with an int index\n returns a view of the underlying data while using a tuple returns a copy.\n See https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html\n for more details\"\"\"\n pass\n\n def bands(self, line:Union[int, tuple, np.ndarray], sample:Union[int, tuple, np.ndarray]) -> np.ndarray:\n \"\"\"lines and samples are zero based here with a max value of len(line) - 1\n It's important to understand that selecting images with an int index\n returns a view of the underlying data while using a tuple or ndarray returns a copy.\n See https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html\n for more details\"\"\"\n pass\n\n def cube(self, lines:Tuple[int, int], samples:Tuple[int, int],\n bands:Union[Tuple[int, int], List[int]]) -> np.ndarray:\n \"\"\"Return a sub-cube or the whole data cube depending on the argument values.\n lines, samples and bands are zero based here and work like the standard python and numpy slicing.\n lines and samples should be a tuple of integers where line[0] is the start line and\n line[1] is the end line and the last line included will be line[1] - 1. So the valid range\n for lines is 0 to the line count defined the in the files header.\n The same applies for samples. lines and samples are then selected contiguously\n from the start value to the end value - 1. Bands can be a tuple of 2 integers indicating the start and end\n bands as with lines and samples or a list of contiguous or non-contiguous integers to be selected. To select\n a single band with index of i pass a tuple of the form (i, i + 1).\n Using the start and end option for selecting contiguous bands will be more efficient since it will result\n in a numpy view being returned while the other two options result in a copy being returned. See\n https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html for more information\"\"\"\n pass\n\n def shape(self) -> Shape:\n return self.__shape\n\n\nclass BILFileDelegate(FileTypeDelegate):\n \"\"\"An 'interleave': 'bil' file\"\"\"\n\n def __init__(self, header:OpenSpectraHeader, file_model:FileModel):\n # inspect header info to make sure it's what we expect\n if header.interleave() != OpenSpectraHeader.BIL_INTERLEAVE:\n raise OpenSpectraFileError(\"Expected a file with interleave type 'bil' got {0}\".\n format(header.interleave()))\n\n super().__init__(\n BILShape(header.lines(), header.samples(), header.band_count()), file_model)\n\n def image(self, band:Union[int, tuple]) -> np.ndarray:\n \"\"\"It's important to understand that selecting images with an int index\n returns a view of the underlying data while using a tuple returns a copy.\n See https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html\n for more details\"\"\"\n return self._file_model.file()[:, band, :]\n\n def bands(self, line:Union[int, tuple, np.ndarray], sample:Union[int, tuple, np.ndarray]) -> np.ndarray:\n \"\"\"It's important to understand that selecting images with an int index\n returns a view of the underlying data while using a tuple or ndarray returns a copy.\n See https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html\n for more details\"\"\"\n return self._file_model.file()[line, :, sample]\n\n def cube(self, lines:Tuple[int, int], samples:Tuple[int, int],\n bands:Union[Tuple[int, int], List[int]]) -> np.ndarray:\n args = CubeSliceArgs(lines, samples, bands)\n return self._file_model.file()[args.line_arg(), args.band_arg(), args.sample_arg()]\n\n\nclass BQSFileDelegate(FileTypeDelegate):\n \"\"\"An 'interleave': 'bsq' file\"\"\"\n\n def __init__(self, header:OpenSpectraHeader, file_model:FileModel):\n # inspect header info to make sure it's what we expect\n if header.interleave() != OpenSpectraHeader.BSQ_INTERLEAVE:\n raise OpenSpectraFileError(\"Expected a file with interleave type 'bsq' got {0}\".\n format(header.interleave()))\n\n super().__init__(\n BQSShape(header.lines(), header.samples(), header.band_count()), file_model)\n\n def image(self, band:Union[int, tuple]) -> np.ndarray:\n \"\"\"It's important to understand that selecting images with an int index\n returns a view of the underlying data while using a tuple returns a copy.\n See https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html\n for more details\"\"\"\n return self._file_model.file()[band, :, :]\n\n def bands(self, line:Union[int, tuple, np.ndarray], sample:Union[int, tuple, np.ndarray]) -> np.ndarray:\n \"\"\"It's important to understand that selecting images with an int index\n returns a view of the underlying data while using a tuple or ndarray returns a copy.\n See https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html\n for more details\"\"\"\n return self._file_model.file()[:, line, sample]\n\n def cube(self, lines:Tuple[int, int], samples:Tuple[int, int],\n bands:Union[Tuple[int, int], List[int]]) -> np.ndarray:\n args = CubeSliceArgs(lines, samples, bands)\n return self._file_model.file()[args.band_arg(), args.line_arg(), args.sample_arg()]\n\n\nclass BIPFileDelegate(FileTypeDelegate):\n \"\"\"An 'interleave': 'bip' file\"\"\"\n\n def __init__(self, header:OpenSpectraHeader, file_model:FileModel):\n # inspect header info to make sure it's what we expect\n if header.interleave() != OpenSpectraHeader.BIP_INTERLEAVE:\n raise OpenSpectraFileError(\"Expected a file with interleave type 'bip' got {0}\".\n format(header.interleave()))\n\n super().__init__(\n BIPShape(header.lines(), header.samples(), header.band_count()), file_model)\n\n def image(self, band:Union[int, tuple]) -> np.ndarray:\n \"\"\"It's important to understand that selecting images with an int index\n returns a view of the underlying data while using a tuple returns a copy.\n See https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html\n for more details\"\"\"\n return self._file_model.file()[:, :, band]\n\n def bands(self, line:Union[int, tuple, np.ndarray], sample:Union[int, tuple, np.ndarray]) -> np.ndarray:\n \"\"\"It's important to understand that selecting images with an int index\n returns a view of the underlying data while using a tuple or ndarray returns a copy.\n See https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html\n for more details\"\"\"\n return self._file_model.file()[line, sample, :]\n\n def cube(self, lines:Tuple[int, int], samples:Tuple[int, int],\n bands:Union[Tuple[int, int], List[int]]) -> np.ndarray:\n args = CubeSliceArgs(lines, samples, bands)\n return self._file_model.file()[args.line_arg(), args.sample_arg(), args.band_arg()]\n\n\nclass MemoryModel(FileModel):\n\n def __init__(self, path:Path, header: OpenSpectraHeader):\n super().__init__(path, header)\n\n def load(self, shape:Shape):\n self._file = np.array([], self._data_type)\n with self._path.open(\"rb\") as file:\n file.seek(self._offset)\n bytes_in = file.read()\n while bytes_in:\n self._file = np.append(self._file, np.frombuffer(bytes_in, self._data_type))\n bytes_in = file.read()\n\n self._validate(shape)\n self._file = self._file.reshape(shape.shape())\n\n\nclass MappedModel(FileModel):\n\n def __init__(self, path:Path, header:OpenSpectraHeader):\n super().__init__(path, header)\n\n def load(self, shape:Shape):\n self._file = np.memmap(self._path, dtype = self._data_type, mode = 'r',\n offset=self._offset, shape = shape.shape())\n self._validate(shape)\n\n\nclass OpenSpectraFile:\n\n __LOG:Logger = LogHelper.logger(\"OpenSpectraFile\")\n\n def __init__(self, header:OpenSpectraHeader, file_delegate:FileTypeDelegate,\n memory_model:FileModel):\n self.__header = header\n self.__memory_model = memory_model\n self.__file_delegate = file_delegate\n self.__validate()\n\n if OpenSpectraFile.__LOG.isEnabledFor(logging.DEBUG):\n OpenSpectraFile.__LOG.debug(\"Shape: {0}\", self.__memory_model.file().shape)\n OpenSpectraFile.__LOG.debug(\"NDim: {0}\", self.__memory_model.file().ndim)\n OpenSpectraFile.__LOG.debug(\"Size: {0}\", self.__memory_model.file().size)\n OpenSpectraFile.__LOG.debug(\"Type: {0}\", self.__memory_model.file().dtype)\n\n def raw_image(self, band:Union[int, tuple]) -> np.ndarray:\n \"\"\"Return the image data for the given band.\n It's important to understand that selecting images with an int index\n returns a view of the underlying data while using a tuple returns a copy.\n See https://docs.scipy.org/doc/numpy-1.16.0/user/basics.indexing.html\n for more details\"\"\"\n return self.__file_delegate.image(band)\n\n def bands(self, line:Union[int, tuple, np.ndarray], sample:Union[int, tuple, np.ndarray]) -> np.ndarray:\n \"\"\"Return all of the band values for a given pixel. The number of lines and samples passed needs\n to be the same. The array returned will have a shape of (number of lines & samples, number of bands)\n It's important to understand that selecting bands with an int index\n returns a view of the underlying data while using a tuple or ndarray returns a copy.\n See https://docs.scipy.org/doc/numpy-1.16.0/user/basics.indexing.html\n for more details\"\"\"\n self.__validate_band_args(line, sample)\n bands = self.__file_delegate.bands(line, sample)\n\n # If the arguments were single ints the array of bands will be one\n # dimensional so reshape it so it's consistent with multi-point results\n if len(bands.shape) == 1:\n bands = bands.reshape(1, bands.size)\n return bands\n\n def cube(self, lines:Tuple[int, int], samples:Tuple[int, int],\n bands:Union[Tuple[int, int], List[int]]) -> np.ndarray:\n return self.__file_delegate.cube(lines, samples, bands)\n\n def name(self) -> str:\n return self.__memory_model.name()\n\n def header(self) -> OpenSpectraHeader:\n return self.__header\n\n def __validate(self):\n if self.__memory_model.data_type() != self.__header.data_type():\n raise TypeError(\"Header file type {0}, does not match actually data type {1}\",\n self.__header.data_type(), self.__memory_model.data_type())\n\n def __validate_band_args(self, line:Union[int, tuple, np.ndarray], sample:Union[int, tuple, np.ndarray]):\n if isinstance(line, int) and isinstance(sample, int):\n pass\n elif isinstance(line, tuple) and isinstance(sample, tuple):\n if len(line) != len(sample):\n raise ValueError(\"tuple arguments must have the same length\")\n\n elif isinstance(line, np.ndarray) and isinstance(sample, np.ndarray):\n if line.ndim != 1 or sample.ndim != 1:\n raise ValueError(\"ndarray arguments must have a dimension of 1\")\n\n if line.size != sample.size:\n raise ValueError(\"ndarray arguments must have the same size\")\n\n else:\n raise TypeError(\"'line' and 'sample' arguments must have the same type\")\n\n\nclass OpenSpectraFileFactory:\n \"\"\"An object oriented way to create an OpenSpectra file\"\"\"\n\n __LOG: logging.Logger = LogHelper.logger(\"OpenSpectraFileFactory\")\n\n MEMORY_MODEL:int = 0\n MAPPED_MODEL:int = 1\n\n @staticmethod\n def create_open_spectra_file(file_name, model=MAPPED_MODEL) -> OpenSpectraFile:\n path = Path(file_name)\n\n if path.exists() and path.is_file():\n OpenSpectraFileFactory.__LOG.info(\"Opening {0} with mode {1}\", path.name, path.stat().st_mode)\n\n header = OpenSpectraHeader(file_name + \".hdr\")\n header.load()\n file_type = header.interleave()\n\n memory_model = None\n if model == OpenSpectraFileFactory.MEMORY_MODEL:\n memory_model = MemoryModel(path, header)\n else:\n memory_model:FileModel = MappedModel(path, header)\n\n if file_type == OpenSpectraHeader.BIL_INTERLEAVE:\n file_delegate = BILFileDelegate(header, memory_model)\n elif file_type == OpenSpectraHeader.BSQ_INTERLEAVE:\n file_delegate = BQSFileDelegate(header, memory_model)\n elif file_type == OpenSpectraHeader.BIP_INTERLEAVE:\n file_delegate = BIPFileDelegate(header, memory_model)\n else:\n raise OpenSpectraHeaderError(\"Unexpected file type: {0}\".format(file_type))\n\n memory_model.load(file_delegate.shape())\n return OpenSpectraFile(header, file_delegate, memory_model)\n\n else:\n raise OpenSpectraFileError(\"File {0} not found\".format(path))\n\n\ndef create_open_spectra_file(file_name, model=OpenSpectraFileFactory.MAPPED_MODEL) -> OpenSpectraFile:\n \"\"\"A function based way to create an OpenSpectra file\"\"\"\n\n return OpenSpectraFileFactory.create_open_spectra_file(file_name, model)\n\n\nclass OpenSpectraHeaderError(Exception):\n \"\"\"Raised when there's a problem with the header file\"\"\"\n pass\n\n\nclass OpenSpectraFileError(Exception):\n \"\"\"Raised when there's a problem with the data file\"\"\"\n pass\n\n" ]
[ [ "numpy.float64", "numpy.array", "numpy.arange", "numpy.frombuffer" ] ]
IanHawke/numerical-methods-course
[ "f402faa23ab5ce6a72a7b3b5bf292d5f3c18c645" ]
[ "exercises/solution_03_07.py" ]
[ "import numpy as np\n\ndef simpson(f, a, b, nstrips):\n '''\n Compute the quadrature of f on [a, b].\n\n Parameters\n ----------\n\n f : function\n The integrand\n a : float\n The start of the domain\n b : float\n The end of the domain\n nstrips : int\n The number of strips\n\n Returns\n -------\n\n I : float\n The integral approximation\n '''\n x, dx = np.linspace(a, b, num=2*nstrips+1, endpoint=True, retstep=True)\n return dx / 3 * (f(x[0]) + f(x[-1]) + 4 * np.sum(f(x[1:-1:2]))+ 2 * np.sum(f(x[2:-1:2])))\n" ]
[ [ "numpy.linspace" ] ]
ponll/machine_learning_hello_world
[ "7c16ae4f37dab3e4e0495804943d760e9dfa0907" ]
[ "machine_learning_hello_world.py" ]
[ "'''\r\nfor a series of inputs x, there is an output y with the relationship y=f(x). Machine learning attempts to build a data model based \r\non features of the data, this statistical model g(x) is an approximation of f(x). In this case the output is a binary class - did\r\nthe passenger survive or not. The inputs include features like: did the passenger have a first, second or third class ticket, was \r\nthe passenger male or female and so on. \r\nBelow I use the test-train split approach with three different algorithms. The 'art' in supervised machine learning is getting the\r\nbest prediction accuracy without overfitting.\r\n\r\nThis problem is a classification problem as every passenger fell into one of two categories - they survived or they died\r\nso the 3 machine learning techniques used below are classification techniques, they are also supervised\r\nlearning techniques which means we need to break the original data set into a training dataset and a test\r\ndataset. The difference between classification and regression:\r\n\r\nRegression: the output variable takes continuous values.\r\n\r\nClassification: the output variable takes class labels.\r\n\r\n\r\nThe original data file is available from various sources, for example: http://biostat.mc.vanderbilt.edu/wiki/pub/Main/DataSets/titanic3.xls\r\nAfter downloading the file the only change I made was I converted it to a csv file.\r\n\r\nThe script was written and tested using idle in python 2. I wrote this before I discovered ipython/jupyter notebooks.\r\n'''\r\n\r\n#import the necessary libraries\r\nimport pandas as pd\r\nfrom sklearn import tree, preprocessing\r\nimport sklearn.ensemble as ske\r\nfrom sklearn.model_selection import train_test_split\r\n\r\n#read the data into a pandas dataframe - this makes the preprocessing of the data easier\r\ndf = pd.read_csv('titanic_data.csv')\r\n#the data needs to be prepared for ML - drop fields which have lots of missing data, then drop rows with missing data\r\ndf = df.drop(['body','cabin','boat','home.dest','name','ticket'],axis=1)\r\ndf = df.dropna()\r\n\r\n#machine learning needs numerical values not strings\r\nle = preprocessing.LabelEncoder()\r\ndf.sex = le.fit_transform(df.sex)\r\ndf.embarked = le.fit_transform(df.embarked)\r\n'''\r\na row from the original data looked like:\r\npclass\tsurvived\tname\t sex\tage\tsibsp\tparch\tticket\tfare\t cabin\tembarked\tboat\tbody\thome.dest\r\n1\t1\tAllen, Miss. Elisabeth Walton\tfemale\t29\t0\t0\t24160\t211.3375\tB5\tS\t 2\t\tSt Louis, MO\r\n\r\na typical row now looks like:\r\npclass survived sex age sibsp parch fare embarked\r\n1 1 0 29.0000 0 0 211.3375 2\r\n'''\r\n#create two new numpy arrays, X has the survived column values removed and y is only the survived column values\r\nX = df.drop(['survived'], axis=1).values\r\ny = df['survived'].values\r\n\r\n#we are using supervised learning so we need training and test data, the test_size parameter determines the relative sizes of the training and test data sets\r\nX_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.3, random_state=0)\r\n\r\n#use three different approaches and print out the success rate (1 = 100%), you can vary the parameters below and the test_size parameter above to try to\r\n#improve success rate\r\nclf_dt = tree.DecisionTreeClassifier(max_depth=10)\r\nclf_dt = clf_dt.fit(X_train, y_train)\r\nprint(clf_dt.score(X_test,y_test))\r\n \r\nclf_rf = ske.RandomForestClassifier(n_estimators=50)\r\nclf_rf = clf_rf.fit(X_train, y_train)\r\nprint(clf_rf.score(X_test,y_test))\r\n\r\nclf_gb = ske.GradientBoostingClassifier(n_estimators=50)\r\nclf_gb = clf_gb.fit(X_train, y_train)\r\nprint(clf_gb.score(X_test,y_test))\r\n\r\n'''\r\nI found the gradient boosting technique gave the best results (about 82% accuracy without any fine tuning) and the decision\r\ntree gave the worst results\r\n\r\nYou'll find plenty of examples online explaining what random forest, decision trees and gradient boosting are.\r\n'''\r\n\r\n\r\n\r\n" ]
[ [ "sklearn.preprocessing.LabelEncoder", "sklearn.ensemble.RandomForestClassifier", "sklearn.tree.DecisionTreeClassifier", "sklearn.model_selection.train_test_split", "pandas.read_csv", "sklearn.ensemble.GradientBoostingClassifier" ] ]
SeaOfOcean/mlflow
[ "83aabfd00096ed0939fc319bb20719957e573088" ]
[ "mlflow/pyfunc/scoring_server/__init__.py" ]
[ "\"\"\"\nScoring server for python model format.\nThe passed int model is expected to have function:\n predict(pandas.Dataframe) -> pandas.DataFrame\n\nInput, expected intext/csv or application/json format,\nis parsed into pandas.DataFrame and passed to the model.\n\nDefines two endpoints:\n /ping used for health check\n /invocations used for scoring\n\"\"\"\nfrom __future__ import print_function\n\nfrom collections import OrderedDict\nimport flask\nimport json\nfrom json import JSONEncoder\nimport logging\nimport numpy as np\nimport pandas as pd\nfrom six import reraise\nimport sys\nimport traceback\n\n# NB: We need to be careful what we import form mlflow here. Scoring server is used from within\n# model's conda environment. The version of mlflow doing the serving (outside) and the version of\n# mlflow in the model's conda environment (inside) can differ. We should therefore keep mlflow\n# dependencies to the minimum here.\n# ALl of the mlfow dependencies below need to be backwards compatible.\nfrom mlflow.exceptions import MlflowException\nfrom mlflow.utils.proto_json_utils import NumpyEncoder\n\ntry:\n from mlflow.pyfunc import load_model\nexcept ImportError:\n from mlflow.pyfunc import load_pyfunc as load_model\nfrom mlflow.protos.databricks_pb2 import MALFORMED_REQUEST, BAD_REQUEST\nfrom mlflow.server.handlers import catch_mlflow_exception\n\ntry:\n from StringIO import StringIO\nexcept ImportError:\n from io import StringIO\n\n_SERVER_MODEL_PATH = \"__pyfunc_model_path__\"\n\nCONTENT_TYPE_CSV = \"text/csv\"\nCONTENT_TYPE_JSON = \"application/json\"\nCONTENT_TYPE_JSON_RECORDS_ORIENTED = \"application/json; format=pandas-records\"\nCONTENT_TYPE_JSON_SPLIT_ORIENTED = \"application/json; format=pandas-split\"\nCONTENT_TYPE_JSON_SPLIT_NUMPY = \"application/json-numpy-split\"\n\nCONTENT_TYPES = [\n CONTENT_TYPE_CSV,\n CONTENT_TYPE_JSON,\n CONTENT_TYPE_JSON_RECORDS_ORIENTED,\n CONTENT_TYPE_JSON_SPLIT_ORIENTED,\n CONTENT_TYPE_JSON_SPLIT_NUMPY\n]\n\n_logger = logging.getLogger(__name__)\n\n\ndef parse_json_input(json_input, orient=\"split\"):\n \"\"\"\n :param json_input: A JSON-formatted string representation of a Pandas DataFrame, or a stream\n containing such a string representation.\n :param orient: The Pandas DataFrame orientation of the JSON input. This is either 'split'\n or 'records'.\n \"\"\"\n # pylint: disable=broad-except\n try:\n return pd.read_json(json_input, orient=orient, dtype=False)\n except Exception:\n _handle_serving_error(\n error_message=(\n \"Failed to parse input as a Pandas DataFrame. Ensure that the input is\"\n \" a valid JSON-formatted Pandas DataFrame with the `{orient}` orient\"\n \" produced using the `pandas.DataFrame.to_json(..., orient='{orient}')`\"\n \" method.\".format(orient=orient)),\n error_code=MALFORMED_REQUEST)\n\n\ndef parse_csv_input(csv_input):\n \"\"\"\n :param csv_input: A CSV-formatted string representation of a Pandas DataFrame, or a stream\n containing such a string representation.\n \"\"\"\n # pylint: disable=broad-except\n try:\n return pd.read_csv(csv_input)\n except Exception:\n _handle_serving_error(\n error_message=(\n \"Failed to parse input as a Pandas DataFrame. Ensure that the input is\"\n \" a valid CSV-formatted Pandas DataFrame produced using the\"\n \" `pandas.DataFrame.to_csv()` method.\"),\n error_code=MALFORMED_REQUEST)\n\n\ndef parse_split_oriented_json_input_to_numpy(json_input):\n \"\"\"\n :param json_input: A JSON-formatted string representation of a Pandas DataFrame with split\n orient, or a stream containing such a string representation.\n \"\"\"\n # pylint: disable=broad-except\n try:\n json_input_list = json.loads(json_input, object_pairs_hook=OrderedDict)\n return pd.DataFrame(index=json_input_list['index'],\n data=np.array(json_input_list['data'], dtype=object),\n columns=json_input_list['columns']).infer_objects()\n except Exception:\n _handle_serving_error(\n error_message=(\n \"Failed to parse input as a Numpy. Ensure that the input is\"\n \" a valid JSON-formatted Pandas DataFrame with the split orient\"\n \" produced using the `pandas.DataFrame.to_json(..., orient='split')`\"\n \" method.\"\n ),\n error_code=MALFORMED_REQUEST)\n\n\ndef predictions_to_json(raw_predictions, output):\n predictions = _get_jsonable_obj(raw_predictions, pandas_orient=\"records\")\n json.dump(predictions, output, cls=NumpyEncoder)\n\n\ndef _handle_serving_error(error_message, error_code):\n \"\"\"\n Logs information about an exception thrown by model inference code that is currently being\n handled and reraises it with the specified error message. The exception stack trace\n is also included in the reraised error message.\n\n :param error_message: A message for the reraised exception.\n :param error_code: An appropriate error code for the reraised exception. This should be one of\n the codes listed in the `mlflow.protos.databricks_pb2` proto.\n \"\"\"\n traceback_buf = StringIO()\n traceback.print_exc(file=traceback_buf)\n reraise(MlflowException,\n MlflowException(\n message=error_message,\n error_code=error_code,\n stack_trace=traceback_buf.getvalue()))\n\n\ndef init(model):\n \"\"\"\n Initialize the server. Loads pyfunc model from the path.\n \"\"\"\n app = flask.Flask(__name__)\n\n @app.route('/ping', methods=['GET'])\n def ping(): # pylint: disable=unused-variable\n \"\"\"\n Determine if the container is working and healthy.\n We declare it healthy if we can load the model successfully.\n \"\"\"\n health = model is not None\n status = 200 if health else 404\n return flask.Response(response='\\n', status=status, mimetype='application/json')\n\n @app.route('/invocations', methods=['POST'])\n @catch_mlflow_exception\n def transformation(): # pylint: disable=unused-variable\n \"\"\"\n Do an inference on a single batch of data. In this sample server,\n we take data as CSV or json, convert it to a Pandas DataFrame or Numpy,\n generate predictions and convert them back to json.\n \"\"\"\n # Convert from CSV to pandas\n if flask.request.content_type == CONTENT_TYPE_CSV:\n data = flask.request.data.decode('utf-8')\n csv_input = StringIO(data)\n data = parse_csv_input(csv_input=csv_input)\n elif flask.request.content_type in [CONTENT_TYPE_JSON, CONTENT_TYPE_JSON_SPLIT_ORIENTED]:\n data = parse_json_input(json_input=flask.request.data.decode('utf-8'),\n orient=\"split\")\n elif flask.request.content_type == CONTENT_TYPE_JSON_RECORDS_ORIENTED:\n data = parse_json_input(json_input=flask.request.data.decode('utf-8'),\n orient=\"records\")\n elif flask.request.content_type == CONTENT_TYPE_JSON_SPLIT_NUMPY:\n data = parse_split_oriented_json_input_to_numpy(flask.request.data.decode('utf-8'))\n else:\n return flask.Response(\n response=(\"This predictor only supports the following content types,\"\n \" {supported_content_types}. Got '{received_content_type}'.\".format(\n supported_content_types=CONTENT_TYPES,\n received_content_type=flask.request.content_type)),\n status=415,\n mimetype='text/plain')\n\n # Do the prediction\n # pylint: disable=broad-except\n try:\n raw_predictions = model.predict(data)\n except Exception:\n _handle_serving_error(\n error_message=(\n \"Encountered an unexpected error while evaluating the model. Verify\"\n \" that the serialized input Dataframe is compatible with the model for\"\n \" inference.\"),\n error_code=BAD_REQUEST)\n result = StringIO()\n predictions_to_json(raw_predictions, result)\n return flask.Response(response=result.getvalue(), status=200, mimetype='application/json')\n\n return app\n\n\ndef _predict(model_uri, input_path, output_path, content_type, json_format):\n pyfunc_model = load_model(model_uri)\n if input_path is None:\n input_path = sys.stdin\n\n if content_type == \"json\":\n df = parse_json_input(input_path, orient=json_format)\n elif content_type == \"csv\":\n df = parse_csv_input(input_path)\n else:\n raise Exception(\"Unknown content type '{}'\".format(content_type))\n\n if output_path is None:\n predictions_to_json(pyfunc_model.predict(df), sys.stdout)\n else:\n with open(output_path, \"w\") as fout:\n predictions_to_json(pyfunc_model.predict(df), fout)\n\n\ndef _serve(model_uri, port, host):\n pyfunc_model = load_model(model_uri)\n init(pyfunc_model).run(port=port, host=host)\n\n\ndef _get_jsonable_obj(data, pandas_orient=\"records\"):\n \"\"\"Attempt to make the data json-able via standard library.\n Look for some commonly used types that are not jsonable and convert them into json-able ones.\n Unknown data types are returned as is.\n\n :param data: data to be converted, works with pandas and numpy, rest will be returned as is.\n :param pandas_orient: If `data` is a Pandas DataFrame, it will be converted to a JSON\n dictionary using this Pandas serialization orientation.\n \"\"\"\n if isinstance(data, np.ndarray):\n return data.tolist()\n if isinstance(data, pd.DataFrame):\n return data.to_dict(orient=pandas_orient)\n if isinstance(data, pd.Series):\n return pd.DataFrame(data).to_dict(orient=pandas_orient)\n else: # by default just return whatever this is and hope for the best\n return data\n" ]
[ [ "pandas.DataFrame", "pandas.read_json", "numpy.array", "pandas.read_csv" ] ]
jancervenka/airbus-ship-detection
[ "87cb1c786182afa248a324f65b23153aa998e6ae" ]
[ "asdc/tests/test_backend.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# 2020, Jan Cervenka\n\nimport json\nimport numpy as np\nfrom unittest import TestCase, main\nfrom .utils import MockRedis, MockKerasModel\nfrom ..service.constants import REQUEST_BATCH_SIZE, QUEUE_NAME\nfrom ..service.backend import RequestProcessor\n\n\nclass BackendProcessorTest(TestCase):\n \"\"\"\n Tests `backend.RequestProcessor` class.\n \"\"\"\n\n def setUp(self):\n \"\"\"\n Sets up the tests.\n \"\"\"\n\n self._test_image_shape = (10, 10, 3)\n self._test_n_output = 4\n\n # png image ones(5, 5, 3)\n self._test_image_b64 = ''.join(\n ('iVBORw0KGgoAAAANSUhEUgAAAAIAAAACCAIAAAD91JpzAAAAE',\n '0lEQVQIHWMEAgYGBkYgYGBgAAAAVgAJ0q8TBQAAAABJRU5ErkJggg=='))\n\n # decoded and resized self._test_image_b64\n self._test_image = np.ones(\n shape=self._test_image_shape).astype('float32') / 255\n\n self._db = MockRedis()\n self._model = MockKerasModel(\n image_shape=self._test_image_shape, n_output=self._test_n_output)\n\n self._request_processor = RequestProcessor(\n db=self._db, model=self._model)\n\n def test_get_requests(self):\n \"\"\"\n Tests correct number of values is retrieved from the\n Redis queue.\n \"\"\"\n\n self.assertListEqual(self._request_processor._get_requests(), [])\n\n def test_case_wrapper(n_to_push, n_expected):\n \"\"\"\n Test case wrapper.\n\n :param n_to_push: number of values to be pushed\n to the queue\n :param n_expected: number of values expected\n to be retrieved from the queue\n \"\"\"\n\n for _ in range(n_to_push):\n self._db.rpush(QUEUE_NAME, {})\n\n self.assertListEqual(\n self._request_processor._get_requests(), [{}] * n_expected)\n\n test_case_wrapper(3, 3)\n test_case_wrapper(REQUEST_BATCH_SIZE + 10, REQUEST_BATCH_SIZE)\n\n self._db.delete(QUEUE_NAME)\n\n def test_resize_if_required(self):\n \"\"\"\n Tests that `backend.RequestProcessor._resize_if_required`\n correctly resizes an image if necessary.\n \"\"\"\n\n expected = np.zeros(shape=self._test_image_shape)\n\n tests = (\n (expected, expected),\n (np.zeros(shape=(28, 28, 3)), expected),\n (np.zeros(shape=(5, 5, 3)), expected))\n\n for test_case_image, expected in tests:\n np.testing.assert_array_equal(\n self._request_processor._resize_if_required(test_case_image),\n expected)\n\n def test_create_image_failure(self):\n \"\"\"\n Tests that `backend.RequestProcessor._create_image` returns\n `None` if an image cannot be created.\n \"\"\"\n\n self.assertIsNone(self._request_processor._create_image('test'))\n\n def test_create_image_success(self):\n \"\"\"\n Tests that `backend.RequestProcessor._create_image` can\n successfully create an image.\n \"\"\"\n\n expected = self._test_image\n np.testing.assert_array_equal(\n self._request_processor._create_image(self._test_image_b64),\n expected)\n\n def test_prepare_ok_nok_requests_empty(self):\n \"\"\"\n Tests that `backend.RequestProcessor._prepare_ok_nok_requests`\n can handle empty requests.\n \"\"\"\n\n for result in self._request_processor._prepare_ok_nok_requests([]):\n self.assertListEqual(list(result), [])\n\n def test_prepare_ok_nok_requests(self):\n \"\"\"\n Test that `backend.RequestProcessor._prepare_ok_nok_requests`\n correctly prepares and filters ok and nok requests.\n \"\"\"\n\n test_case_requests = [\n {'id': 1, 'image_b64': self._test_image_b64},\n {'id': 2, 'image_b64': 'test'},\n {'id': 3, 'image_b64': 'test'},\n {'id': 4, 'image_b64': self._test_image_b64}]\n\n expected_ok = [(self._test_image, 1), (self._test_image, 4)]\n expected_nok = [(None, 2), (None, 3)]\n\n result_ok, result_nok = self._request_processor._prepare_ok_nok_requests(\n test_case_requests)\n\n def assert_prepared_requests_equal(result, expected):\n \"\"\"\n Asserts that a list produced by the function\n is equal to the expected list.\n \"\"\"\n\n for r_r, r_e in zip(result, expected):\n\n # compare id\n self.assertEqual(r_r[1], r_e[1])\n # compare arrays\n if isinstance(r_e[0], np.ndarray):\n np.testing.assert_array_equal(r_r[0], r_e[0])\n else:\n self.assertIsNone(r_e[0])\n self.assertIsNone(r_e[0])\n\n assert_prepared_requests_equal(list(result_ok), expected_ok)\n assert_prepared_requests_equal(list(result_nok), expected_nok)\n\n def test_process_nok_requests(self):\n \"\"\"\n Tests that `backend.RequestProcessor._process_nok_requests`\n correctly process nok requests and stores them in the Redis.\n \"\"\"\n\n test_case_nok_requests = ((None, 't_1'), (None, 't_2'))\n self._request_processor._process_nok_requests(test_case_nok_requests)\n\n for key in ('t_1', 't_2'):\n self.assertDictEqual(json.loads(self._db[key]), {'error': 'image_not_compatible'})\n\n self._db.delete('t_1')\n self._db.delete('t_2')\n\n def test_process_ok_requests(self):\n \"\"\"\n Tests that `backend.RequestProcessor._process_ok_requests`\n correctly process nok requests and stores them in the Redis.\n \"\"\"\n\n # tests empty requests, nothing to process, empty Redis\n self._request_processor._process_ok_requests(tuple())\n self.assertEqual(len(self._db), 0)\n\n # tests one requests\n test_case_ok_requests = ((self._test_image, 't_3'),)\n self._request_processor._process_ok_requests(test_case_ok_requests)\n self.assertDictEqual(json.loads(self._db['t_3']), {'prediction': 0.5})\n\n # tests n requests\n test_case_ok_requests = ((self._test_image, 't_4'),\n (self._test_image, 't_5'))\n self._request_processor._process_ok_requests(test_case_ok_requests)\n\n # test everything is stored, then cleaup\n for request_id in ('t_3', 't_4', 't_5'):\n self.assertDictEqual(json.loads(self._db[request_id]), {'prediction': 0.5})\n self._db.delete(request_id)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.testing.assert_array_equal", "numpy.ones", "numpy.zeros" ] ]
rohankumardubey/bayesianpy
[ "a5f69ac6153b010051019e442a27274e5f38eed2" ]
[ "bayesianpy/dask/cross_validation.py" ]
[ "import dask.array as da\nimport numpy as np\nimport numbers\nimport dask.dataframe as dd\nimport bayesianpy.utils.list\n\nclass KFold:\n\n def __init__(self, n_splits):\n if not isinstance(n_splits, numbers.Integral):\n raise ValueError('The number of folds must be of Integral type. '\n '%s of type %s was passed.'\n % (n_splits, type(n_splits)))\n n_splits = int(n_splits)\n\n if n_splits <= 1:\n raise ValueError(\n \"k-fold cross-validation requires at least one\"\n \" train/test split by setting n_splits=2 or more,\"\n \" got n_splits={0}.\".format(n_splits))\n\n #if not isinstance(shuffle, bool):\n # raise TypeError(\"shuffle must be True or False;\"\n # \" got {0}\".format(shuffle))\n\n self.n_splits = n_splits\n #self.shuffle = shuffle\n #self.random_state = random_state\n\n def split(self, ddf):\n partitions = ddf.random_split((1 / self.n_splits) * np.ones(self.n_splits, dtype=np.int))\n for split in range(self.n_splits):\n training_partitions = bayesianpy.utils.list.exclude(partitions, split)\n yield (dd.concat(training_partitions, axis=0, interleave_partitions=True), partitions[split])\n\n\n def get_n_splits(self):\n \"\"\"Returns the number of splitting iterations in the cross-validator\n Parameters\n ----------\n X : object\n Always ignored, exists for compatibility.\n y : object\n Always ignored, exists for compatibility.\n groups : object\n Always ignored, exists for compatibility.\n Returns\n -------\n n_splits : int\n Returns the number of splitting iterations in the cross-validator.\n \"\"\"\n return self.n_splits\n\n" ]
[ [ "numpy.ones" ] ]
maledicente/cursos
[ "00ace48da7e48b04485e4ca97b3ca9ba5f33a283" ]
[ "Data_Science/grafico-bar.py" ]
[ "from matplotlib import pyplot as plt\nmovies = [\"Annie Hall\", \"Ben-Hur\", \"Casablanca\", \"Gandhi\", \"West Side Story\"]\nnum_oscars = [5, 11, 3, 8, 10]\n\n# barras possuem o tamanho padrão de 0.8, então adicionaremos 0.1 às\n# coordenadas à esquerda para que cada barra seja centralizada\nxs = [i + 0.1 for i, _ in enumerate(movies)]\n\n# as barras do gráfico com as coordenadas x à esquerda [xs], alturas [num_oscars]\nplt.bar(xs, num_oscars)\nplt.ylabel(\"# de Premiações\")\nplt.title(\"Meus Filmes Favoritos\")\n\n# nomeia o eixo x com nomes de filmes na barra central\nplt.xticks([i + 0.5 for i, _ in enumerate(movies)], movies)\nplt.show()" ]
[ [ "matplotlib.pyplot.title", "matplotlib.pyplot.bar", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
paulttt/mAP_3Dvolume
[ "ad54ba1126c3a1d19958be83f5568860c565842e" ]
[ "vol3d_eval_custom.py" ]
[ "import numpy as np\nimport datetime\nimport time\n\nclass VOL3Deval:\n # Interface for evaluating video instance segmentation on the YouTubeVIS dataset.\n #\n # The usage for YTVOSeval is as follows:\n # cocoGt=..., cocoDt=... # load dataset and results\n # E = VOL3Deval(cocoGt,cocoDt); # initialize YTVOSeval object\n # E.params.recThrs = ...; # set parameters as desired\n # E.evaluate(); # run per image evaluation\n # E.accumulate(); # accumulate per image results\n # E.summarize(); # display summary metrics of results\n # For example usage see evalDemo.m and http://mscoco.org/.\n #\n # The evaluation parameters are as follows (defaults in brackets):\n # imgIds - [all] N img ids to use for evaluation\n # catIds - [all] K cat ids to use for evaluation\n # iouThrs - [.5:.05:.95] T=10 IoU thresholds for evaluation\n # recThrs - [0:.01:1] R=101 recall thresholds for evaluation\n # areaRng - [...] A=4 object area ranges for evaluation\n # maxDets - [1 10 100] M=3 thresholds on max detections per image\n # iouType - ['segm'] set iouType to 'segm', 'bbox' or 'keypoints'\n # iouType replaced the now DEPRECATED useSegm parameter.\n # useCats - [1] if true use category labels for evaluation\n # Note: if useCats=0 category labels are ignored as in proposal scoring.\n # Note: multiple areaRngs [Ax2] and maxDets [Mx1] can be specified.\n #\n # evaluate(): evaluates detections on every image and every category and\n # concats the results into the \"evalImgs\" with fields:\n # dtIds - [1xD] id for each of the D detections (dt)\n # gtIds - [1xG] id for each of the G ground truths (gt)\n # dtMatches - [TxD] matching gt id at each IoU or 0\n # gtMatches - [TxG] matching dt id at each IoU or 0\n # dtScores - [1xD] confidence of each dt\n # gtIgnore - [1xG] ignore flag for each gt\n # dtIgnore - [TxD] ignore flag for each dt at each IoU\n #\n # accumulate(): accumulates the per-image, per-category evaluation\n # results in \"evalImgs\" into the dictionary \"eval\" with fields:\n # params - parameters used for evaluation\n # date - date evaluation was performed\n # counts - [T,R,K,A,M] parameter dimensions (see above)\n # precision - [TxRxKxAxM] precision for every evaluation setting\n # recall - [TxKxAxM] max recall for every evaluation setting\n # Note: precision and recall==-1 for settings with no gt objects.\n #\n # See also coco, mask, pycocoDemo, pycocoEvalDemo\n #\n # Microsoft COCO Toolbox. version 2.0\n # Data, paper, and tutorials available at: http://mscoco.org/\n # Code written by Piotr Dollar and Tsung-Yi Lin, 2015.\n # Licensed under the Simplified BSD License [see coco/license.txt]\n def __init__(self, result_p, result_fn, score_p=None, iouType='segm', output_name=''):\n '''\n Initialize CocoEval using coco APIs for gt and dt\n :param cocoGt: coco object with ground truth annotations\n :param cocoDt: coco object with detection results\n :return: None\n '''\n if not iouType:\n print('iouType not specified. use default iouType segm')\n # num_obj x {all, s, m ,l} x {id, size, IOU}\n\n # load false negative\n self.result_fn = result_fn\n self.result_p = result_p\n self.output_name = output_name\n self.output_writer = None\n\n # load detection\n self.cocoDt = result_p[:, :2] # detections COCO API\n self.D = self.cocoDt.shape[0]\n self.scores = score_p # detections COCO API\n if self.scores is None:\n self.scores = np.zeros(self.D)\n\n self.params = Params(iouType=iouType) # parameters\n self.th = self.params.iouThrs.repeat(self.D).reshape((-1, self.D)) #get same length as ious\n self.T = len(self.params.iouThrs)\n\n self.cocoGt = result_p[:, 2:].reshape(-1, 4, 3) # ground truth COCO API\n gid, gix = np.unique(np.hstack([self.result_fn[:, 2], self.cocoGt[:, 0, 0]]), return_index=True)\n gic = np.hstack([self.result_fn[:, 3], self.cocoGt[:, 0, 1]])[gix[gid > 0]]\n self.gid = gid[gid > 0].astype(int)\n self.gic = gic\n self.G = len(self.gid)\n\n self.eval = {} # accumulated evaluation results\n self.stats = [] # result summarization\n\n def get_dtm_by_area(self, area_id):\n \"\"\"\n For each instance, we need the number of true positives, false positives and false negatives\n at each IoU threshold.\n \"\"\"\n\n cocoGt = self.cocoGt[:, area_id]\n\n # gtIg: size self.G (include 0)\n gtIg = (self.gic <= self.params.areaRng[area_id][0])+(self.gic>self.params.areaRng[area_id][1])\n gtIg_id = self.gid[gtIg]\n\n # if no match in the area range, add back best\n match_id = cocoGt[:, 0].astype(int)\n match_iou = cocoGt[:, 2]\n match_iou[match_id == 0] = self.cocoGt[match_id == 0, 0, 2]\n match_id[match_id == 0] = self.cocoGt[match_id == 0, 0, 0]\n\n dtm = match_id * (match_iou >= self.th)\n # find detection outside the area range\n dtIg = (dtm > 0)*np.isin(dtm, gtIg_id).reshape(dtm.shape)\n a = (self.cocoDt[:, 1] <= self.params.areaRng[area_id][0])+(self.cocoDt[:, 1] > self.params.areaRng[area_id][1])\n dtIg = np.logical_or(dtIg, np.logical_and(dtm == 0, np.tile(a, (self.T, 1))))\n\n tps = np.logical_and( dtm, np.logical_not(dtIg) )\n fps = np.logical_and(np.logical_not(dtm), np.logical_not(dtIg) )\n\n npig = (gtIg == 0).sum()\n return tps, fps, npig\n\n def accumulate(self, p = None):\n '''\n Accumulate per image evaluation results and store the result in self.eval\n :param p: input params for evaluation\n :return: None\n '''\n\n# if not self.evalImgs:\n# print('Please run evaluate() first')\n # allows input customized parameters\n if p is None:\n p = self.params\n T = len(p.iouThrs)\n R = len(p.recThrs)\n A = len(p.areaRng)\n precision = -np.ones((T, R, A)) #-1 for the precision of absent categories\n recall = -np.ones((T, A))\n scores = -np.ones((T, R, A))\n\n # create dictionary for future indexing\n _pe = self.params\n setA = set(map(tuple, _pe.areaRng))\n # get inds to evaluate\n a_list = [n for n, a in enumerate(map(lambda x: tuple(x), p.areaRng)) if a in setA]\n A0 = len(_pe.areaRng)\n # retrieve E at each category, area range, and max number of detections\n Nk = A0\n for a, a0 in enumerate(a_list):\n tps, fps, npig = self.get_dtm_by_area(a)\n if npig == 0:\n continue\n\n tp_sum = np.cumsum(tps, axis=1).astype(dtype=np.float)\n fp_sum = np.cumsum(fps, axis=1).astype(dtype=np.float)\n\n for t, (tp, fp) in enumerate(zip(tp_sum, fp_sum)):\n tp = np.array(tp)\n fp = np.array(fp)\n nd = len(tp)\n rc = tp / npig\n pr = tp / (fp+tp+np.spacing(1))\n q = np.zeros((R,))\n ss = np.zeros((R,))\n\n if nd:\n recall[t, a] = rc[-1]\n else:\n recall[t, a] = 0\n\n # numpy is slow without cython optimization for accessing elements\n # use python array gets significant speed improvement\n pr = pr.tolist(); q = q.tolist()\n\n for i in range(nd-1, 0, -1):\n if pr[i] > pr[i-1]:\n pr[i-1] = pr[i]\n\n inds = np.searchsorted(rc, p.recThrs, side='left')\n try:\n for ri, pi in enumerate(inds):\n q[ri] = pr[pi]\n ss[ri] = self.scores[pi]\n except:\n pass\n precision[t, :, a] = np.array(q)\n scores[t, :, a] = np.array(ss)\n self.eval = {\n 'params': p,\n 'counts': [T, R, A],\n# 'counts': [T, R, K, A, M],\n 'date': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),\n 'precision': precision,\n 'recall': recall,\n 'scores': scores,\n }\n\n def summarize(self):\n '''\n Compute and display summary metrics for evaluation results.\n Note this function can *only* be applied on the default parameter setting\n '''\n def _summarize(ap=1, iouThr=None, areaRng='all', maxDets=100):\n p = self.params\n iStr = ' {:<18} {} @[ IoU={:<9} | area={:>6s} | maxDets={:>3d} ] = {:0.3f}'\n titleStr = 'Average Precision' if ap == 1 else 'Average Recall'\n typeStr = '(AP)' if ap == 1 else '(AR)'\n iouStr = '{:0.2f}:{:0.2f}'.format(p.iouThrs[0], p.iouThrs[-1]) \\\n if iouThr is None else '{:0.2f}'.format(iouThr)\n\n aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng]\n\n if ap == 1:\n # dimension of precision: [TxRxKxAxM]\n s = self.eval['precision']\n # IoU\n if iouThr is not None:\n t = np.where(iouThr == p.iouThrs)[0]\n s = s[t]\n s = s[:, :, aind]\n else:\n # dimension of recall: [TxKxAxM]\n s = self.eval['recall']\n if iouThr is not None:\n t = np.where(iouThr == p.iouThrs)[0]\n s = s[t]\n s = s[:, aind]\n if len(s[s > -1]) == 0:\n mean_s = -1\n else:\n mean_s = np.mean(s[s>-1])\n\n msg = iStr.format(titleStr, typeStr, iouStr, areaRng, maxDets, mean_s)\n if self.output_writer is None:\n print(msg)\n else:\n self.output_writer.write(msg+'\\n')\n\n return mean_s\n\n def _summarizeDets():\n stats = np.zeros((10,))\n stats[0] = _summarize(1)\n stats[1] = _summarize(1, iouThr=.5)#, maxDets=self.params.maxDets[2])\n stats[2] = _summarize(1, iouThr=.75)#, maxDets=self.params.maxDets[2])\n stats[3] = _summarize(1, areaRng='small', iouThr=.75)#, maxDets=self.params.maxDets[2])\n stats[4] = _summarize(1, areaRng='medium', iouThr=.75)#, maxDets=self.params.maxDets[2])\n stats[5] = _summarize(1, areaRng='large', iouThr=.75)#, maxDets=self.params.maxDets[2])\n # no recall\n \"\"\"\n stats[6] = _summarize(0)#, maxDets=self.params.maxDets[0])\n stats[7] = _summarize(0, areaRng='small')\n stats[8] = _summarize(0, areaRng='medium')\n stats[9] = _summarize(0, areaRng='large')\n \"\"\"\n return stats\n\n if not self.eval:\n raise Exception('Please run accumulate() first')\n\n #self.output_writer = open(self.output_name+'_map.txt', 'w') if self.output_name != '' else None\n iouType = self.params.iouType\n if iouType == 'segm' or iouType == 'bbox':\n summarize = _summarizeDets\n\n self.stats = summarize()\n if self.output_writer is not None:\n self.output_writer.close()\n\n def save_match_p(self, output_name=''):\n header = '\\tprediction |\\t\\t gt all \\t\\t|\\t\\t gt small \\t\\t|\\t\\tgt medium \\t\\t|\\t gt large\\n' + \\\n 'ID\\tSIZE\\t| ID\\tSIZE\\tIoU\\t| ID\\tSIZE\\tIoU\\t| ID\\tSIZE\\tIoU\\t| ID\\tSIZE\\tIoU \\t\\n' + '-'*108\n rowformat = '%d\\t%4d\\t%d\\t%4d\\t%.4f\\t%d\\t%4d\\t%.4f\\t\\t%d\\t%4d\\t%.4f\\t%d\\t%4d\\t%.4f'\n np.savetxt(self.output_name+output_name+'_match_p.txt', self.result_p, fmt=rowformat, header=header)\n\n def save_match_fn(self, output_name=''):\n header = '\\tprediction \\t |\\t gt \\t\\n' + \\\n 'ID\\tSIZE\\t| ID\\tSIZE\\tIoU \\n' + '-'*40\n rowformat = '%d\\t%4d\\t%d\\t%4d\\t%.4f'\n np.savetxt(self.output_name+output_name+'_match_fn.txt', self.result_fn, fmt=rowformat, header=header)\n\n def __str__(self):\n self.summarize()\n \n def summarize_without_file(self):\n '''\n Compute and display summary metrics for evaluation results.\n Note this functin can *only* be applied on the default parameter setting\n '''\n def _summarize(ap=1, iouThr=None, areaRng='all', maxDets=100):\n p = self.params\n\n aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng]\n\n if ap == 1:\n # dimension of precision: [TxRxKxAxM]\n s = self.eval['precision']\n # IoU\n if iouThr is not None:\n t = np.where(np.abs(p.iouThrs - iouThr) < 1e-5)[0]\n s = s[t]\n s = s[:, :, aind]\n else:\n # dimension of recall: [TxKxAxM]\n s = self.eval['recall']\n if iouThr is not None:\n t = np.where(np.abs(p.iouThrs - iouThr) < 1e-5)[0]\n s = s[t]\n s = s[:, aind]\n if len(s[s > -1]) == 0:\n mean_s = -1\n else:\n mean_s = np.mean(s[s > -1])\n\n return mean_s\n\n def _summarizeDets():\n stats = np.zeros((10,))\n stats[0] = _summarize(1)\n stats[1] = _summarize(1, iouThr=.5)#, maxDets=self.params.maxDets[2])\n stats[2] = _summarize(1, iouThr=.75)#, maxDets=self.params.maxDets[2])\n stats[3] = _summarize(1, iouThr=.9) # , maxDets=self.params.maxDets[2])\n # stats[3] = _summarize(1, areaRng='small', iouThr=.75)#, maxDets=self.params.maxDets[2])\n # stats[4] = _summarize(1, areaRng='medium', iouThr=.75)#, maxDets=self.params.maxDets[2])\n # stats[5] = _summarize(1, areaRng='large', iouThr=.75)#, maxDets=self.params.maxDets[2])\n # no recall\n \"\"\"\n stats[6] = _summarize(0)#, maxDets=self.params.maxDets[0])\n stats[7] = _summarize(0, areaRng='small')\n stats[8] = _summarize(0, areaRng='medium')\n stats[9] = _summarize(0, areaRng='large')\n \"\"\"\n return stats\n\n if not self.eval:\n raise Exception('Please run accumulate() first')\n\n iouType = self.params.iouType\n if iouType == 'segm' or iouType == 'bbox':\n summarize = _summarizeDets\n \n self.stats = _summarizeDets()\n \n def get_stats(self):\n stats = {}\n self.accumulate()\n self.summarize_without_file()\n stats['mAP @ 0.5:0.95'] = self.stats[0]\n stats['AP @ 0.5'] = self.stats[1]\n stats['AP @ 0.75'] = self.stats[2]\n stats['AP @ 0.9'] = self.stats[3]\n #stats['small @ 0.75'] = self.stats[3]\n #stats['medium @ 0.75'] = self.stats[4]\n #stats['large @ 0.75'] = self.stats[5]\n stats['Precision'] = self.eval['precision']\n stats['Average Precision'] = np.mean(self.eval['precision'], axis=1)\n stats['Recall'] = self.eval['recall']\n stats['Scores'] = self.eval['scores']\n return stats\n\n\nclass Params:\n '''\n Params for coco evaluation api\n '''\n def setDetParams(self):\n \t# np.arange causes trouble. the data point on arange is slightly larger than the true\n self.iouThrs = 0.5 + 0.05 * np.arange(0,10)\n self.recThrs = 0.01 * np.arange(0,101)\n self.areaRng = [[0 ** 2, 1e5 ** 2], [0 ** 2, 128 ** 2], [ 128 ** 2, 256 ** 2], [256 ** 2, 1e5 ** 2]]\n self.areaRngLbl = ['all', 'small', 'medium', 'large']\n\n def __init__(self, iouType='segm'):\n if iouType == 'segm' or iouType == 'bbox':\n self.setDetParams()\n else:\n raise Exception('iouType not supported')\n self.iouType = iouType\n # useSegm is deprecated\n self.useSegm = None\n" ]
[ [ "numpy.logical_not", "numpy.array", "numpy.savetxt", "numpy.zeros", "numpy.searchsorted", "numpy.ones", "numpy.tile", "numpy.mean", "numpy.where", "numpy.arange", "numpy.abs", "numpy.cumsum", "numpy.hstack", "numpy.spacing", "numpy.isin" ] ]
zrphercule/glow
[ "3dab5984680e9acc3dd2b96157ff205b1ef46c93" ]
[ "torch_glow/tests/nodes/quantized_conv2d_test.py" ]
[ "from __future__ import absolute_import, division, print_function, unicode_literals\n\nimport unittest\n\nimport torch\nfrom tests import utils\n\n\nclass TestQuantizedConv2d(utils.TorchGlowTestCase):\n @unittest.skip(reason=\"Requires freezing\")\n def test_quantized_conv2d_unpacked(self):\n \"\"\"Basic test of the PyTorch quantize::conv2d Node with unpacked weights on Glow.\"\"\"\n\n class SimpleQuantizedConvModel(torch.nn.Module):\n def __init__(self):\n super(SimpleQuantizedConvModel, self).__init__()\n\n def forward(self, a, w, b):\n qu = torch.nn.quantized.Quantize(1 / 16, 0, torch.quint8)\n qi = torch.nn.quantized.Quantize(1 / 16, 0, torch.qint8)\n dq = torch.nn.quantized.DeQuantize()\n conv = torch.nn.quantized.functional.conv2d\n return dq(conv(qu(a), qi(w), b))\n\n # TODO\n # Due to the quantization error between\n # PyTorch and Glow, we would like to use some\n # determined test data instead of random ones\n # x = torch.randn([3, 3, 3, 3])\n # w = torch.randn([3, 3, 3, 3])\n # b = torch.randn([3])\n\n x = torch.tensor([[[[5.0, 6.0], [7.0, 8.0]]]])\n w = torch.tensor([[[[1.0, 2.0], [3.0, 4.0]]]])\n b_zero = torch.zeros(1)\n b = torch.randn(1)\n\n utils.compare_tracing_methods(\n SimpleQuantizedConvModel(),\n x,\n w,\n b,\n fusible_ops={\n \"aten::quantize_per_tensor\",\n \"glow::unpacked_quantized_conv2d\",\n \"aten::dequantize\",\n },\n skip_to_glow=True,\n )\n\n utils.compare_tracing_methods(\n SimpleQuantizedConvModel(),\n x,\n w,\n b_zero,\n fusible_ops={\n \"aten::quantize_per_tensor\",\n \"glow::unpacked_quantized_conv2d\",\n \"aten::dequantize\",\n },\n skip_to_glow=True,\n )\n\n def test_quantized_conv2d_packed_groupwise(self):\n \"\"\"Basic test of PyTorch quantize::conv2d Node with packed weights on Glow.\"\"\"\n\n x = torch.tensor(range(5), dtype=torch.float)\n x = torch.cat((x, x, x, x, x))\n x = torch.cat((x, x, x))\n x = torch.reshape(x, [1, 3, 5, 5])\n q = torch.nn.quantized.Quantize(0.1, 2, torch.quint8)\n conv = torch.nn.Conv2d(3, 3, [2, 2], groups=3)\n dq = torch.nn.quantized.DeQuantize()\n\n # Due to the off-by-one error, we cannot let the weights, bias & input\n # to be totally random.\n conv.weight.data.fill_(2.0)\n conv.bias.data.fill_(1.0)\n\n model = torch.nn.Sequential(q, conv, dq)\n model.eval()\n model.qconfig = torch.quantization.get_default_qconfig(\"fbgemm\")\n\n torch.quantization.prepare(model, inplace=True)\n torch.quantization.convert(model, inplace=True)\n\n utils.compare_tracing_methods(\n model,\n x,\n fusible_ops={\n \"aten::quantize_per_tensor\",\n \"quantized::conv2d\",\n \"aten::dequantize\",\n },\n )\n\n def test_quantized_conv2d_packed_cut_q_dq(self):\n \"\"\"Basic test of PyTorch quantize::conv2d Node with packed weights on Glow, with quantize and dequantize excluded.\"\"\"\n\n x = torch.tensor(range(5), dtype=torch.float)\n x = torch.cat((x, x, x, x, x))\n x = torch.cat((x, x, x))\n x = torch.reshape(x, [1, 3, 5, 5])\n q = torch.nn.quantized.Quantize(0.1, 2, torch.quint8)\n conv = torch.nn.Conv2d(3, 3, [2, 2], groups=3)\n dq = torch.nn.quantized.DeQuantize()\n\n # Due to the off-by-one error, we cannot let the weights, bias & input\n # to be totally random.\n conv.weight.data.fill_(2.0)\n conv.bias.data.fill_(1.0)\n\n model = torch.nn.Sequential(q, conv, dq)\n model.eval()\n model.qconfig = torch.quantization.get_default_qconfig(\"fbgemm\")\n\n torch.quantization.prepare(model, inplace=True)\n torch.quantization.convert(model, inplace=True)\n\n utils.compare_tracing_methods(\n model,\n x,\n fusible_ops={\"quantized::conv2d\"},\n fusion_blocklist=[\"aten::quantize_per_tensor\", \"aten::dequantize\"],\n skip_to_glow=True,\n )\n\n def test_quantized_conv2d_packed_channelwise(self):\n \"\"\"Basic test of PyTorch quantize::conv2d Node with packed channelwise weights on Glow.\"\"\"\n\n with torch.no_grad():\n x = torch.randn([1, 4, 4, 4])\n\n conv = torch.nn.Conv2d(4, 2, [2, 2], groups=1)\n conv.weight.random_(-1, 1)\n conv.bias.data.random_(-1, 1)\n\n model = torch.quantization.QuantWrapper(conv)\n model.qconfig = torch.quantization.get_default_qconfig(\"fbgemm\")\n\n torch.quantization.prepare(model, inplace=True)\n # Calibration\n model.forward(x)\n torch.quantization.convert(model, inplace=True)\n\n # TODO: acuracy needs to be investigated. Average acuracy is decent\n # but some elements have errors (possibly from rounding differences)\n utils.compare_tracing_methods(\n model,\n x,\n fusible_ops={\n \"aten::quantize_per_tensor\",\n \"quantized::conv2d\",\n \"aten::dequantize\",\n },\n )\n\n def test_quantized_conv2d_packed_channelwise_serial_qconv(self):\n \"\"\"Test of serial structure PyTorch quantized::conv2d on Glow.\"\"\"\n\n class SerialQuantizedConvModel(torch.nn.Module):\n def __init__(self):\n super(SerialQuantizedConvModel, self).__init__()\n self.qconfig = torch.quantization.get_default_qconfig(\"fbgemm\")\n\n self.quant = torch.quantization.QuantStub()\n\n self.conv1 = torch.nn.Conv2d(4, 4, [2, 2], groups=1)\n self.conv1.weight.random_(-1, 1)\n self.conv1.bias.data.random_(-1, 1)\n\n self.conv2 = torch.nn.Conv2d(4, 2, [2, 2], groups=1)\n self.conv2.weight.random_(-1, 1)\n self.conv2.bias.data.random_(-1, 1)\n\n self.dequant = torch.quantization.DeQuantStub()\n\n def forward(self, x):\n x = self.quant(x)\n x = self.conv1(x)\n x = self.conv2(x)\n x = self.dequant(x)\n return x\n\n with torch.no_grad():\n x = torch.randn([1, 4, 4, 4])\n model = SerialQuantizedConvModel()\n\n torch.quantization.prepare(model, inplace=True)\n # Calibration\n model.forward(x)\n torch.quantization.convert(model, inplace=True)\n\n utils.compare_tracing_methods(\n model,\n x,\n fusible_ops={\n \"aten::quantize_per_tensor\",\n \"quantized::conv2d\",\n \"aten::dequantize\",\n },\n )\n\n def test_quantized_conv2d_packed_channelwise_parallel_qconv(self):\n \"\"\"Test of parallel structure PyTorch quantized::conv2d on Glow.\"\"\"\n\n class ParallelQuantizedConvModel(torch.nn.Module):\n def __init__(self):\n super(ParallelQuantizedConvModel, self).__init__()\n self.qconfig = torch.quantization.get_default_qconfig(\"fbgemm\")\n\n self.quant = torch.quantization.QuantStub()\n\n self.conv1 = torch.nn.Conv2d(4, 2, [2, 2], groups=1)\n self.conv1.weight.random_(-1, 1)\n self.conv1.bias.data.random_(-1, 1)\n\n self.conv2 = torch.nn.Conv2d(4, 2, [2, 2], groups=1)\n self.conv2.weight.random_(-1, 1)\n self.conv2.bias.data.random_(-1, 1)\n\n self.cat = torch.ops.quantized.cat\n self.dequant = torch.quantization.DeQuantStub()\n self.dequant2 = torch.quantization.DeQuantStub()\n\n def forward(self, x):\n x = self.quant(x)\n x1 = self.conv1(x)\n x2 = self.conv2(x)\n x1 = self.dequant(x1)\n x2 = self.dequant2(x2)\n x = torch.cat((x1, x2), dim=0)\n return x\n\n with torch.no_grad():\n x = torch.randn([1, 4, 4, 4])\n model = ParallelQuantizedConvModel()\n\n torch.quantization.prepare(model, inplace=True)\n # Calibration\n model.forward(x)\n torch.quantization.convert(model, inplace=True)\n\n utils.compare_tracing_methods(\n model,\n x,\n fusible_ops={\n \"aten::quantize_per_tensor\",\n \"quantized::conv2d\",\n \"aten::dequantize\",\n },\n skip_to_glow=True,\n )\n" ]
[ [ "torch.zeros", "torch.cat", "torch.quantization.convert", "torch.quantization.QuantStub", "torch.nn.Sequential", "torch.quantization.prepare", "torch.no_grad", "torch.quantization.DeQuantStub", "torch.quantization.QuantWrapper", "torch.nn.Conv2d", "torch.nn.quantized.DeQuantize", "torch.tensor", "torch.quantization.get_default_qconfig", "torch.nn.quantized.Quantize", "torch.randn", "torch.reshape" ] ]
chris4540/DT2119-Final-Project
[ "a9e665d2fcc91442bcd80171fe557b09fcd71d00" ]
[ "scripts/get_train_dataset.py" ]
[ "\"\"\"\nGenerate the training dataset.\n\nThe number of phone should be 48 after using this script.\n\nRead data:\n>>> data = np.load('data/raw/full_traindata.npz')\n>>> data['phone_to_idx'].item() # the mapping\n>>> traindata = data['data'] # training features\n\n\nPrerequisite:\n data/raw/phone_to_idx.json\nGenerate by:\n ipython scripts/build_phone_to_idx.json can help to build the json\n\"\"\"\n\nimport os\nfrom preprocess import TIMITFeatureExtractor\nfrom tqdm import tqdm\nimport numpy as np\nimport pandas as pd\nimport json\nfrom utils import map_phone_to_idx\n\nclass Config:\n dump_file_name = \"data/raw/full_traindata.npz\"\n phone_map_tsv = \"data/map/phones.60-48-39.map\"\n folder = os.path.join(\"TIMIT\", \"TRAIN\")\n\ndef extract_featurs_from(folder, phone_to_idx):\n # ===================================================\n data = list()\n cnt = 0\n for root, dirs, files in os.walk(folder):\n for f in files:\n fname = os.path.join(root, f)\n # skip if not a sound file\n if not f.endswith(\".WAV\"):\n continue\n # skip if SA sentances\n if f.startswith(\"SA\"):\n continue\n\n ext = TIMITFeatureExtractor(fname)\n extracted = ext.extract()\n\n phone = extracted['phone']\n\n # drop q phone\n idxs = np.argwhere(phone == 'q')\n phone = np.delete(phone, idxs)\n features = np.delete(extracted['features'], idxs, axis=0)\n assert len(phone) == features.shape[0]\n\n if '0' in phone:\n print(fname)\n raise IOError(\"Encounter 0 phone\")\n\n phone_idxs = map_phone_to_idx(phone, phone_to_idx)\n data.append({\n 'features': features,\n 'phone_idx': phone_idxs,\n 'file': extracted['file']\n })\n cnt += 1\n\n if cnt % 500 == 0:\n print(\"Processed %d data....\" % cnt)\n\n return data\n\nif __name__ == \"__main__\":\n # load the mapping\n df = pd.read_csv(Config.phone_map_tsv, sep=\"\\t\", index_col=0)\n df = df.dropna()\n df = df.drop('eval', axis=1)\n train_phn_idx = {k: i for i, k in enumerate(df['train'].unique())}\n df['train_idx'] = df['train'].map(train_phn_idx)\n phone_to_idx = df['train_idx'].to_dict()\n\n\n data = extract_featurs_from(Config.folder, phone_to_idx)\n print(\"Writing training data to %s ....\" % Config.dump_file_name)\n # saving\n kwargs = {\n 'data': data,\n 'phone_to_idx': train_phn_idx\n }\n np.savez(Config.dump_file_name, **kwargs)\n" ]
[ [ "pandas.read_csv", "numpy.savez", "numpy.delete", "numpy.argwhere" ] ]
kjersbry/pyRiemann
[ "f39734a5547d2c87a9b0585172e75beb0b1d5c71", "f39734a5547d2c87a9b0585172e75beb0b1d5c71" ]
[ "tests/test_clustering.py", "tests/test_spatialfilters.py" ]
[ "import numpy as np\nfrom numpy.testing import assert_array_equal\nimport pytest\nfrom pyriemann.clustering import Kmeans, KmeansPerClassTransform, Potato\n\n\ndef generate_cov(Nt, Ne):\n \"\"\"Generate a set of cavariances matrices for test purpose.\"\"\"\n rs = np.random.RandomState(1234)\n diags = 2.0 + 0.1 * rs.randn(Nt, Ne)\n A = 2*rs.rand(Ne, Ne) - 1\n A /= np.atleast_2d(np.sqrt(np.sum(A**2, 1))).T\n covmats = np.empty((Nt, Ne, Ne))\n for i in range(Nt):\n covmats[i] = np.dot(np.dot(A, np.diag(diags[i])), A.T)\n return covmats\n\n\ndef test_Kmeans_init():\n \"\"\"Test Kmeans\"\"\"\n covset = generate_cov(20, 3)\n labels = np.array([0, 1]).repeat(10)\n\n # init\n km = Kmeans(2)\n\n # fit\n km.fit(covset)\n\n # fit with init\n km = Kmeans(2, init=covset[0:2])\n km.fit(covset)\n\n # fit with labels\n km.fit(covset, y=labels)\n\n # predict\n km.predict(covset)\n\n # transform\n km.transform(covset)\n\n # n_jobs\n km = Kmeans(2, n_jobs=2)\n km.fit(covset)\n\n\ndef test_KmeansPCT_init():\n \"\"\"Test Kmeans PCT\"\"\"\n covset = generate_cov(20, 3)\n labels = np.array([0, 1]).repeat(10)\n\n # init\n km = KmeansPerClassTransform(2)\n\n # fit\n km.fit(covset, labels)\n\n # transform\n km.transform(covset)\n\n\ndef test_Potato_init():\n \"\"\"Test Potato\"\"\"\n covset = generate_cov(20, 3)\n labels = np.array([0, 1]).repeat(10)\n\n # init\n pt = Potato()\n\n # fit no labels\n pt.fit(covset)\n\n # fit with labels\n with pytest.raises(ValueError):\n pt.fit(covset, y=[1])\n\n with pytest.raises(ValueError):\n pt.fit(covset, y=[0] * 20)\n\n with pytest.raises(ValueError):\n pt.fit(covset, y=[0, 2, 3] + [1] * 17)\n\n pt.fit(covset, labels)\n\n # transform\n pt.transform(covset)\n pt.transform(covset[0][np.newaxis, ...]) # transform a single trial\n\n # predict\n pt.predict(covset)\n pt.predict(covset[0][np.newaxis, ...]) # predict a single trial\n\n # predict_proba\n pt.predict_proba(covset)\n pt.predict_proba(covset[0][np.newaxis, ...])\n\n # lower threshold\n pt = Potato(threshold=1)\n pt.fit(covset)\n\n # test positive labels\n pt = Potato(threshold=1, pos_label=2, neg_label=7)\n pt.fit(covset)\n assert_array_equal(np.unique(pt.predict(covset)), [2, 7])\n\n # test with custom positive label\n pt.fit(covset, y=[2]*20)\n\n # different positive and neg label\n with pytest.raises(ValueError):\n Potato(pos_label=0)\n", "import numpy as np\nfrom numpy.testing import assert_array_equal\nfrom pyriemann.spatialfilters import Xdawn, CSP, SPoC, BilinearFilter\nimport pytest\n\n\ndef generate_cov(Nt, Ne):\n \"\"\"Generate a set of cavariances matrices for test purpose\"\"\"\n rs = np.random.RandomState(1234)\n diags = 2.0 + 0.1 * rs.randn(Nt, Ne)\n A = 2*rs.rand(Ne, Ne) - 1\n A /= np.atleast_2d(np.sqrt(np.sum(A**2, 1))).T\n covmats = np.empty((Nt, Ne, Ne))\n for i in range(Nt):\n covmats[i] = np.dot(np.dot(A, np.diag(diags[i])), A.T)\n return covmats\n\n\ndef test_Xdawn_init():\n \"\"\"Test init of Xdawn\"\"\"\n xd = Xdawn()\n\n\ndef test_Xdawn_fit():\n \"\"\"Test Fit of Xdawn\"\"\"\n x = np.random.randn(100, 3, 10)\n labels = np.array([0, 1]).repeat(50)\n xd = Xdawn()\n xd.fit(x, labels)\n\n\ndef test_Xdawn_transform():\n \"\"\"Test transform of Xdawn\"\"\"\n x = np.random.randn(100, 3, 10)\n labels = np.array([0, 1]).repeat(50)\n xd = Xdawn()\n xd.fit(x, labels)\n xd.transform(x)\n\n\ndef test_Xdawn_baselinecov():\n \"\"\"Test cov precomputation\"\"\"\n x = np.random.randn(100, 3, 10)\n labels = np.array([0, 1]).repeat(50)\n baseline_cov = np.identity(3)\n xd = Xdawn(baseline_cov=baseline_cov)\n xd.fit(x, labels)\n xd.transform(x)\n\n\ndef test_CSP():\n \"\"\"Test CSP\"\"\"\n n_trials = 90\n X = generate_cov(n_trials, 3)\n labels = np.array([0, 1, 2]).repeat(n_trials // 3)\n\n # Test Init\n csp = CSP()\n assert csp.nfilter == 4\n assert csp.metric == 'euclid'\n assert csp.log\n csp = CSP(3, 'riemann', False)\n assert csp.nfilter == 3\n assert csp.metric == 'riemann'\n assert not csp.log\n\n with pytest.raises(TypeError):\n CSP('foo')\n\n with pytest.raises(ValueError):\n CSP(metric='foo')\n\n with pytest.raises(TypeError):\n CSP(log='foo')\n\n # Test fit\n csp = CSP()\n csp.fit(X, labels % 2) # two classes\n csp.fit(X, labels) # 3 classes\n\n with pytest.raises(ValueError):\n csp.fit(X, labels * 0.) # 1 class\n with pytest.raises(ValueError):\n csp.fit(X, labels[:1]) # unequal # of samples\n with pytest.raises(TypeError):\n csp.fit(X, 'foo') # y must be an array\n with pytest.raises(TypeError):\n csp.fit('foo', labels) # X must be an array\n with pytest.raises(ValueError):\n csp.fit(X[:, 0], labels)\n with pytest.raises(ValueError):\n csp.fit(X, X)\n\n assert_array_equal(csp.filters_.shape, [X.shape[1], X.shape[1]])\n assert_array_equal(csp.patterns_.shape, [X.shape[1], X.shape[1]])\n\n # Test transform\n Xt = csp.transform(X)\n assert_array_equal(Xt.shape, [len(X), X.shape[1]])\n\n with pytest.raises(TypeError):\n csp.transform('foo')\n with pytest.raises(ValueError):\n csp.transform(X[:, 1:, :]) # unequal # of chans\n\n csp.log = False\n Xt = csp.transform(X)\n\n\ndef test_Spoc():\n \"\"\"Test Spoc\"\"\"\n n_trials = 90\n X = generate_cov(n_trials, 3)\n labels = np.random.randn(n_trials)\n\n # Test Init\n spoc = SPoC()\n\n # Test fit\n spoc.fit(X, labels)\n\n\ndef test_BilinearFilter():\n \"\"\"Test Bilinear filter\"\"\"\n n_trials = 90\n X = generate_cov(n_trials, 3)\n labels = np.array([0, 1, 2]).repeat(n_trials // 3)\n filters = np.eye(3)\n # Test Init\n bf = BilinearFilter(filters)\n assert not bf.log\n with pytest.raises(TypeError):\n BilinearFilter('foo')\n\n with pytest.raises(TypeError):\n BilinearFilter(np.eye(3), log='foo')\n\n # test fit\n bf = BilinearFilter(filters)\n bf.fit(X, labels % 2)\n\n # Test transform\n Xt = bf.transform(X)\n assert_array_equal(Xt.shape, [len(X), filters.shape[0], filters.shape[0]])\n\n with pytest.raises(TypeError):\n bf.transform('foo')\n with pytest.raises(ValueError):\n bf.transform(X[:, 1:, :]) # unequal # of chans\n\n bf.log = True\n Xt = bf.transform(X)\n assert_array_equal(Xt.shape, [len(X), filters.shape[0]])\n\n filters = filters[0:2, :]\n bf = BilinearFilter(filters)\n Xt = bf.transform(X)\n assert_array_equal(Xt.shape, [len(X), filters.shape[0], filters.shape[0]])\n" ]
[ [ "numpy.array", "numpy.empty", "numpy.random.RandomState", "numpy.sum", "numpy.diag" ], [ "numpy.array", "numpy.empty", "numpy.random.RandomState", "numpy.sum", "numpy.testing.assert_array_equal", "numpy.random.randn", "numpy.eye", "numpy.identity", "numpy.diag" ] ]
Jelso13/DancingLinksSudoku
[ "43b12afe514a055e2eb89bdff34002ca84d4531c" ]
[ "solve.py" ]
[ "import numpy as np\nimport sys\nfrom project.Sudoku import Sudoku\n\n\ndef sudoku_solver(sudoku):\n sudoku = np.array(sudoku)\n s = Sudoku()\n solExample, fExample = s.solve(sudoku.astype(int))\n x = s.returnSol(solExample, fExample)\n return x\n\nif __name__ == \"__main__\":\n args = sys.argv[1:]\n\n if len(args) < 1:\n print(\"Please provide a sudoku as a file or plain text...\")\n else:\n for arg in args:\n if arg[-4:] == \".txt\":\n s = []\n with open(arg, \"r\") as f:\n for line in f.readlines():\n s.append([int(d) for d in [c for c in line] if d.isdigit()])\n print(sudoku_solver(np.array(s)))\n else:\n print(sudoku_solver(np.reshape(np.array([int(c) for c in arg if c.isdigit()]), (-1, 9))))\n print()\n\n" ]
[ [ "numpy.array" ] ]
uhecr-project/fancy
[ "c6015b21fd88aecfb7e45f2aec438d5bda0df050" ]
[ "fancy/interfaces/integration.py" ]
[ "from re import T\nfrom scipy import integrate, interpolate\nimport h5py\nfrom tqdm import tqdm as progress_bar\n\nfrom ..detector.exposure import *\n\nfrom multiprocessing import Pool, cpu_count\n\n__all__ = ['ExposureIntegralTable']\n\n\nclass ExposureIntegralTable():\n \"\"\"\n Handles the building and storage of exposure integral tables \n that are passed to Stan to be interpolated over.\n \"\"\"\n\n # number of threads, take 3/4 so that CPU doesnt overload\n nthreads = int(cpu_count() * 0.75)\n\n\n def __init__(self, varpi=None, params=None, input_filename=None):\n \"\"\"\n Handles the building and storage of integral tables \n that are passed to Stan to be used in both simulation \n and sampling.\n \n :param kappa: an array of kappa values to evaluate the integral for\n :param varpi: an array of 3D unit vectors to pass to the integrand\n :param params: an array of other parameters to pass to the integrand\n :param input_filename: the filename to use to initialise the object\n \"\"\"\n\n self.varpi = varpi\n self.params = params\n\n self.table = []\n self.sim_table = []\n\n if input_filename != None:\n self.init_from_file(input_filename)\n\n def build_for_sim(self, kappa, alpha, B, D):\n \"\"\"\n Build the tabulated integrals to be used for simulations and posterior predictive checks.\n Save with the filename given.\n \n Expects self.kappa to be either a fixed value or a list of values of length equal to the \n total number of sources. The integral is evaluated once for each source. \n \"\"\"\n\n self.sim_kappa = kappa\n self.sim_alpha = alpha\n self.sim_B = B\n self.sim_D = D\n\n # single fixed kappa\n if isinstance(self.sim_kappa, int) or isinstance(\n self.sim_kappa, float):\n k = self.sim_kappa\n results = []\n\n for i in progress_bar(range(len(self.varpi)),\n desc='Precomputing exposure integral'):\n v = self.varpi[i]\n result, err = integrate.dblquad(integrand,\n 0,\n np.pi,\n lambda phi: 0,\n lambda phi: 2 * np.pi,\n args=(v, k, self.params))\n\n results.append(result)\n self.sim_table = results\n print()\n\n # different kappa for each source\n else:\n results = []\n for i in progress_bar(range(len(self.varpi)),\n desc='Precomputing exposure integral'):\n v = self.varpi[i]\n result, err = integrate.dblquad(integrand,\n 0,\n np.pi,\n lambda phi: 0,\n lambda phi: 2 * np.pi,\n args=(v, self.sim_kappa[i],\n self.params))\n\n results.append(result)\n\n self.sim_table = results\n print()\n\n def build_for_sim_parallel(self, kappa, alpha, B, D):\n \"\"\"\n Build the tabulated integrals to be used for simulations and posterior predictive checks.\n Save with the filename given.\n\n This parallelizes the exposure integral evaluation used to simulate events.\n \n Expects self.kappa to be either a fixed value or a list of values of length equal to the \n total number of sources. The integral is evaluated once for each source. \n \"\"\"\n\n self.sim_kappa = kappa\n self.sim_alpha = alpha\n self.sim_B = B\n self.sim_D = D\n\n # single fixed kappa\n if isinstance(self.sim_kappa, int) or isinstance(\n self.sim_kappa, float):\n k = self.sim_kappa\n\n args = [(v, k, self.params) for v in self.varpi]\n\n with Pool(self.nthreads) as mpool:\n results = list(progress_bar(\n mpool.imap(self.eps_per_source_sim, args), total=len(self.varpi),\n desc='Precomputing exposure integral'\n )) \n\n self.sim_table = results\n print()\n\n # different kappa for each source\n else:\n args = [(v, k, self.params) for v, k in zip(self.varpi, self.sim_kappa)]\n\n with Pool(self.nthreads) as mpool:\n results = list(progress_bar(\n mpool.imap(self.eps_per_source_sim, args), total=len(self.varpi),\n desc='Precomputing exposure integral'\n )) \n \n self.sim_table = results\n print()\n\n def eps_per_source_sim(self, args):\n '''\n The exposure integral using the source direction and simulated magnetic deflections\n for each source. \n\n :param: args : tuple containing source unit vector, simulated kappa, and self.params\n '''\n result, err = integrate.dblquad(integrand,\n 0,\n np.pi,\n lambda phi: 0,\n lambda phi: 2 * np.pi,\n args=args)\n return result\n\n def build_for_fit(self, kappa):\n \"\"\"\n Build the tabulated integrals to be interpolated over in the fit.\n Save with filename given.\n \n Expects self.kappa to be a list of kappa values to evaluate the integral for, \n for each source individually.\n \"\"\"\n\n self.kappa = kappa\n for i in progress_bar(range(len(self.varpi)),\n desc='Precomputing exposure integral'):\n v = self.varpi[i]\n\n results = []\n for k in self.kappa:\n result, err = integrate.dblquad(integrand,\n 0,\n np.pi,\n lambda phi: 0,\n lambda phi: 2 * np.pi,\n args=(v, k, self.params))\n\n results.append(result)\n self.table.append(np.asarray(results))\n print()\n self.table = np.asarray(self.table)\n\n def eps_per_source(self, v):\n '''\n Evaluate exposure integral per source. This corresponds to the inner for loop\n that contains the double integral evaluation for each kappa.\n\n :param: v : source unit vector\n '''\n\n results = []\n for k in self.kappa:\n result, err = integrate.dblquad(integrand,\n 0,\n np.pi,\n lambda phi: 0,\n lambda phi: 2 * np.pi,\n args=(v, k, self.params))\n\n results.append(result)\n\n return results\n\n def build_for_fit_parallel(self, kappa):\n \"\"\"\n Build the tabulated integrals to be interpolated over in the fit.\n Save with filename given.\n\n This is the parallelized version, using multiprocessing over each source\n in the provided source catalogue.\n\n For SBG, runtime decreases from 30 min -> 1.5 min with ~28 cores\n \n Expects self.kappa to be a list of kappa values to evaluate the integral for, \n for each source individually.\n \"\"\"\n\n self.kappa = kappa\n\n with Pool(self.nthreads) as mpool:\n results = list(progress_bar(\n mpool.imap(self.eps_per_source, self.varpi), total=len(self.varpi),\n desc='Precomputing exposure integral'\n ))\n self.table.append(np.asarray(results))\n print()\n self.table = np.asarray(self.table)\n\n def init_from_file(self, input_filename):\n \"\"\"\n Initialise the object from the given file.\n \"\"\"\n\n with h5py.File(input_filename, 'r') as f:\n\n self.varpi = f['varpi'][()]\n self.params = f['params'][()]\n self.kappa = f['main']['kappa'][()]\n self.table = f['main']['table'][()]\n\n if f['simulation']['kappa'][()] is not h5py.Empty('f'):\n try:\n self.sim_kappa = f['simulation']['kappa'][()]\n self.sim_table = f['simulation']['table'][()]\n self.sim_alpha = f['simulation']['alpha'][()]\n self.sim_B = f['simulation']['B'][()]\n self.sim_D = f['simulation']['D'][()]\n except:\n print(\"skipped simulation values\")\n\n def save(self, output_filename):\n \"\"\"\n Save the computed integral table(s) to a HDF5 file \n for later use as inputs.\n \n If no table is found, create an empty dataset.\n \n :param output_filename: the name of the file to write to \n \"\"\"\n\n with h5py.File(output_filename, 'w') as f:\n\n # common params\n f.create_dataset('varpi', data=self.varpi)\n f.create_dataset('params', data=self.params)\n\n # main interpolation table\n main = f.create_group('main')\n if self.table != []:\n main.create_dataset('kappa', data=self.kappa)\n main.create_dataset('table', data=self.table)\n else:\n main.create_dataset('kappa', data=h5py.Empty('f'))\n main.create_dataset('table', data=h5py.Empty('f'))\n\n # simulation table\n sim = f.create_group('simulation')\n if self.sim_table != []:\n sim.create_dataset('kappa', data=self.sim_kappa)\n sim.create_dataset('table', data=self.sim_table)\n sim.create_dataset('alpha', data=self.sim_alpha)\n sim.create_dataset('B', data=self.sim_B)\n sim.create_dataset('D', data=self.sim_D)\n else:\n sim.create_dataset('kappa', data=h5py.Empty('f'))\n sim.create_dataset('table', data=h5py.Empty('f'))\n sim.create_dataset('alpha', data=h5py.Empty('f'))\n sim.create_dataset('B', data=h5py.Empty('f'))\n sim.create_dataset('D', data=h5py.Empty('f'))\n" ]
[ [ "scipy.integrate.dblquad" ] ]
WENGIF/youmin_textclassifier
[ "15410aaba009019ec387a8e64aec4734ae396922" ]
[ "youmin_textclassifier/models/general.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\" 常规的算法,如lr、bayes等 \"\"\"\n\nimport numpy as np\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.externals import joblib\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.naive_bayes import GaussianNB, MultinomialNB\nfrom sklearn.svm import SVC\n\n\nclass GeneralModel:\n def __init__(self, name=\"lr\", model_params=None):\n self.model_params = model_params if model_params else {}\n self._name = name\n self.models = {\n \"mnb\": MultinomialNB, \n \"gnb\": GaussianNB, \n \"svm\": SVC,\n \"rf\": RandomForestClassifier,\n \"lr\": LogisticRegression,\n }\n\n def train(self, input_x, input_y, model_path):\n \"\"\"\n Args:\n input_x -- scipy.sparse.csr_matrix 或 numpy.matrix,如[array(100), array(100), ]\n input_y -- 统一预留参数,[\"label1\", \"label2\", ...]\n model_path -- 模型文件导出路径\n Returns:\n clf.get_params() -- 分类器训练参数字典\n \"\"\"\n self.clf = self.models[self._name](**self.model_params)\n self.clf.fit(input_x, input_y)\n joblib.dump(self.clf, model_path)\n return self.clf.get_params()\n\n def test(self, input_x, input_y, min_proba=0):\n \"\"\"\n Args:\n input_x, input_y -- 同self.train\n Kwargs:\n min_proba -- 测试概率阈值\n Returns:\n yt -- 真实标签列表,[\"label1\", \"label2\",]\n yp -- 预测标签列表,[\"label1\", \"label2\",]\n \"\"\"\n y_proba = self.clf.predict_proba(input_x)\n test_result = []\n for _r, _yp in enumerate(y_proba):\n maxv_ix = np.argmax(_yp)\n if _yp[maxv_ix] >= min_proba:\n test_result.append((input_y[_r], self.clf.classes_[maxv_ix]))\n return zip(*test_result)\n\n def load(self, model_path):\n self.clf = joblib.load(model_path)\n\n def predict(self, input_x):\n \"\"\"\n Args:\n input_x -- 同self.train\n Returns:\n predict_result -- [(label, proba),]\n \"\"\"\n y_val_proba = self.clf.predict_proba(input_x)\n maxv_ixs = np.argmax(y_val_proba, axis=1)\n predict_result = list(zip(self.clf.classes_[maxv_ixs],\n np.max(y_val_proba, axis=1)))\n return predict_result\n" ]
[ [ "sklearn.externals.joblib.load", "sklearn.externals.joblib.dump", "numpy.max", "numpy.argmax" ] ]
eric8607242/darts
[ "34c79a0956039f56a6a87bfb7f4b1ae2af615bea" ]
[ "cnn/donas_utils/optim.py" ]
[ "import logging \n\nimport torch\nimport torch.nn as nn\n\ndef get_lr_scheduler(optimizer, step_per_epoch, CONFIG):\n if CONFIG.lr_scheduler == \"cosine\":\n lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=step_per_epoch*CONFIG.epochs)\n elif CONFIG.lr_scheduler == \"step\":\n lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=CONFIG.step_size, gamma=CONFIG.decay_ratio, last_epoch=-1)\n\n return lr_scheduler\n \n\n\ndef get_optimizer(model, CONFIG, log_info=\"\"):\n if CONFIG.optimizer == \"sgd\":\n logging.info(\"{} optimizer: SGD\".format(log_info))\n optimizer = torch.optim.SGD(params=model.parameters(),\n lr=CONFIG.lr,\n momentum=CONFIG.momentum,\n weight_decay=CONFIG.weight_decay)\n\n elif CONFIG.optimizer == \"rmsprop\":\n logging.info(\"{} optimizer: RMSprop\".format(log_info))\n optimizer = torch.optim.RMSprop(model.parameters(),\n lr=CONFIG.lr,\n alpha=CONFIG.alpha,\n momentum=CONFIG.momentum,\n weight_decay=CONFIG.weight_decay)\n elif CONFIG.optimizer == \"adam\":\n logging.info(\"{} optimizer: Adam\".format(log_info))\n optimizer = torch.optim.Adam(model.parameters(),\n weight_decay=CONFIG.weight_decay,\n lr=CONFIG.lr,\n betas=(CONFIG.beta, 0.999))\n\n\n\n return optimizer\n\n\ndef cal_hc_loss(generate_hc, target_hc, alpha, loss_penalty):\n if generate_hc > target_hc + 0.1:\n return (generate_hc-target_hc)**2 * alpha * loss_penalty\n elif generate_hc < target_hc - 0.1:\n return (target_hc-generate_hc)**2 * alpha\n else:\n return torch.tensor(0)\n\nclass CrossEntropyLossSoft(torch.nn.modules.loss._Loss):\n def forward(self, output, target):\n output_log_prob = torch.nn.functional.log_softmax(output, dim=1)\n target = target.unsqueeze(1)\n output_log_prob = output_log_prob.unsqueeze(2)\n cross_entropy_loss = -torch.bmm(target, output_log_prob)\n return torch.mean(cross_entropy_loss)\n" ]
[ [ "torch.optim.lr_scheduler.StepLR", "torch.optim.lr_scheduler.CosineAnnealingLR", "torch.bmm", "torch.nn.functional.log_softmax", "torch.tensor", "torch.mean" ] ]
ArjunVarma39/Tensorflow-2.0-Computer-Vision-Cookbook
[ "92ea6713f664cff9eccaaccea8ac756f808e2066" ]
[ "ch8/recipe2/unet.py" ]
[ "import cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow_datasets as tfdata\nimport tensorflow_docs as tfdocs\nimport tensorflow_docs.plots\nfrom tensorflow.keras.layers import *\nfrom tensorflow.keras.losses import \\\n SparseCategoricalCrossentropy\nfrom tensorflow.keras.models import *\nfrom tensorflow.keras.optimizers import RMSprop\n\nAUTOTUNE = tf.data.experimental.AUTOTUNE\n\n\ndef normalize(input_image, input_mask):\n input_image = tf.cast(input_image, tf.float32) / 255.0\n input_mask -= 1\n\n return input_image, input_mask\n\n\[email protected]\ndef load_image(dataset_element, train=True):\n input_image = tf.image.resize(dataset_element['image'],\n (256, 256))\n input_mask = tf.image.resize(\n dataset_element['segmentation_mask'], (256, 256))\n\n if train and np.random.uniform() > 0.5:\n input_image = tf.image.flip_left_right(input_image)\n input_mask = tf.image.flip_left_right(input_mask)\n\n input_image, input_mask = normalize(input_image,\n input_mask)\n\n return input_image, input_mask\n\n\nclass UNet(object):\n def __init__(self,\n input_size=(256, 256, 3),\n output_channels=3):\n self.input_size = input_size\n self.output_channels = output_channels\n\n self.model = self._create_model()\n\n loss = SparseCategoricalCrossentropy(from_logits=True)\n self.model.compile(optimizer=RMSprop(),\n loss=loss,\n metrics=['accuracy'])\n\n @staticmethod\n def _downsample(filters, size, batch_norm=True):\n initializer = tf.random_normal_initializer(0.0, 0.02)\n\n layers = Sequential()\n layers.add(Conv2D(filters=filters,\n kernel_size=size,\n strides=2,\n padding='same',\n kernel_initializer=initializer,\n use_bias=False))\n\n if batch_norm:\n layers.add(BatchNormalization())\n\n layers.add(LeakyReLU())\n\n return layers\n\n @staticmethod\n def _upsample(filters, size, dropout=False):\n init = tf.random_normal_initializer(0.0, 0.02)\n\n layers = Sequential()\n layers.add(Conv2DTranspose(filters=filters,\n kernel_size=size,\n strides=2,\n padding='same',\n kernel_initializer=init,\n use_bias=False))\n\n layers.add(BatchNormalization())\n\n if dropout:\n layers.add(Dropout(rate=0.5))\n\n layers.add(ReLU())\n\n return layers\n\n def _create_model(self):\n down_stack = [self._downsample(64, 4,\n batch_norm=False)]\n for filters in (128, 256, 512, 512, 512, 512, 512):\n down_block = self._downsample(filters, 4)\n down_stack.append(down_block)\n\n up_stack = []\n for _ in range(3):\n up_block = self._upsample(512, 4, dropout=True)\n up_stack.append(up_block)\n\n for filters in (512, 256, 128, 64):\n up_block = self._upsample(filters, 4)\n up_stack.append(up_block)\n\n inputs = Input(shape=self.input_size)\n x = inputs\n\n skip_layers = []\n for down in down_stack:\n x = down(x)\n skip_layers.append(x)\n\n skip_layers = reversed(skip_layers[:-1])\n\n for up, skip_connection in zip(up_stack, skip_layers):\n x = up(x)\n x = Concatenate()([x, skip_connection])\n\n init = tf.random_normal_initializer(0.0, 0.02)\n output = Conv2DTranspose(\n filters=self.output_channels,\n kernel_size=3,\n strides=2,\n padding='same',\n kernel_initializer=init)(x)\n\n return Model(inputs, outputs=output)\n\n @staticmethod\n def _plot_model_history(model_history, metric, ylim=None):\n plt.style.use('seaborn-darkgrid')\n plotter = tfdocs.plots.HistoryPlotter()\n plotter.plot({'Model': model_history}, metric=metric)\n\n plt.title(f'{metric.upper()}')\n if ylim is None:\n plt.ylim([0, 1])\n else:\n plt.ylim(ylim)\n\n plt.savefig(f'{metric}.png')\n plt.close()\n\n def train(self, train_dataset, epochs, steps_per_epoch,\n validation_dataset, validation_steps):\n hist = \\\n self.model.fit(train_dataset,\n epochs=epochs,\n steps_per_epoch=steps_per_epoch,\n validation_steps=validation_steps,\n validation_data=validation_dataset)\n\n self._plot_model_history(hist, 'loss', [0., 2.0])\n self._plot_model_history(hist, 'accuracy')\n\n @staticmethod\n def _process_mask(mask):\n mask = (mask.numpy() * 127.5).astype('uint8')\n mask = cv2.cvtColor(mask, cv2.COLOR_GRAY2RGB)\n\n return mask\n\n def _save_image_and_masks(self, image,\n ground_truth_mask,\n prediction_mask,\n image_id):\n image = (image.numpy() * 255.0).astype('uint8')\n gt_mask = self._process_mask(ground_truth_mask)\n pred_mask = self._process_mask(prediction_mask)\n\n mosaic = np.hstack([image, gt_mask, pred_mask])\n mosaic = cv2.cvtColor(mosaic, cv2.COLOR_RGB2BGR)\n\n cv2.imwrite(f'mosaic_{image_id}.jpg', mosaic)\n\n @staticmethod\n def _create_mask(prediction_mask):\n prediction_mask = tf.argmax(prediction_mask, axis=-1)\n prediction_mask = prediction_mask[..., tf.newaxis]\n\n return prediction_mask[0]\n\n def _save_predictions(self, dataset, sample_size=1):\n for id, (image, mask) in \\\n enumerate(dataset.take(sample_size), start=1):\n pred_mask = self.model.predict(image)\n pred_mask = self._create_mask(pred_mask)\n\n image = image[0]\n ground_truth_mask = mask[0]\n\n self._save_image_and_masks(image,\n ground_truth_mask,\n pred_mask,\n image_id=id)\n\n def evaluate(self, test_dataset, sample_size=5):\n result = self.model.evaluate(test_dataset)\n print(f'Accuracy: {result[1] * 100:.2f}%')\n\n self._save_predictions(test_dataset, sample_size)\n\n\ndataset, info = tfdata.load('oxford_iiit_pet',\n with_info=True)\n\nTRAIN_SIZE = info.splits['train'].num_examples\nVALIDATION_SIZE = info.splits['test'].num_examples\nBATCH_SIZE = 64\nSTEPS_PER_EPOCH = TRAIN_SIZE // BATCH_SIZE\n\nVALIDATION_SUBSPLITS = 5\nVALIDATION_STEPS = VALIDATION_SIZE // BATCH_SIZE\nVALIDATION_STEPS //= VALIDATION_SUBSPLITS\n\nBUFFER_SIZE = 1000\ntrain_dataset = (dataset['train']\n .map(load_image, num_parallel_calls=AUTOTUNE)\n .cache()\n .shuffle(BUFFER_SIZE)\n .batch(BATCH_SIZE)\n .repeat()\n .prefetch(buffer_size=AUTOTUNE))\ntest_dataset = (dataset['test']\n .map(lambda d: load_image(d, train=False),\n num_parallel_calls=AUTOTUNE)\n .batch(BATCH_SIZE))\n\nunet = UNet()\nunet.train(train_dataset,\n epochs=50,\n steps_per_epoch=STEPS_PER_EPOCH,\n validation_steps=VALIDATION_STEPS,\n validation_dataset=test_dataset)\n\nunet.evaluate(test_dataset)\n" ]
[ [ "matplotlib.pyplot.savefig", "tensorflow.argmax", "matplotlib.pyplot.ylim", "matplotlib.pyplot.close", "tensorflow.keras.losses.SparseCategoricalCrossentropy", "numpy.random.uniform", "matplotlib.pyplot.style.use", "tensorflow.keras.optimizers.RMSprop", "numpy.hstack", "tensorflow.image.resize", "tensorflow.image.flip_left_right", "tensorflow.cast", "tensorflow.random_normal_initializer" ] ]
WoodResourcesGroup/EPIC_AllPowerLabs
[ "bf3240672f02fa93243cb2241e9c49249ce710aa" ]
[ "px2db.py" ]
[ "#!/usr/bin/env python\n\nimport pandas as pd\nimport cec_utils as ut\nimport sys\n\nsrc_file = sys.argv[1]\ndbname = sys.argv[2]\nsch = sys.argv[3]\ntblname = sys.argv[4]\n\neng = ut.dbconfig(dbname)\ndata = pd.read_csv(src_file)\ndata.columns = [i.lower() for i in data.columns]\ndata.to_sql(tblname, eng, schema=sch, if_exists=\"replace\")\n" ]
[ [ "pandas.read_csv" ] ]
robertbsnook/booking_quote
[ "e6a8a97a82daf53fdf3b5b64450aed035a7eda12" ]
[ "src/booking/Package_booking.py" ]
[ "#!/usr/bin/env python\nimport datetime\nimport pandas as pd\nfrom tabulate import tabulate\n\n\nclass TravelRoute:\n def __init__(self, destination, dangerous, urgency, dimension, weight):\n self.destination = destination\n self.dangerous = dangerous\n self.urgency = urgency\n self.dimension = float(dimension)\n self.weight = float(weight)\n\n def __str__(self):\n return str(self.charge)\n\n\nclass Air(TravelRoute):\n def __init__(self, destination, dangerous, urgency, dimension, weight):\n super().__init__(destination, dangerous, urgency, dimension, weight)\n\n def charge(self):\n if self.dangerous == \"unsafe\":\n return 0\n elif self.weight*10 > self.dimension*20:\n return float(self.weight*10)\n else:\n return float(self.dimension * 20)\n\n\nclass Truck(TravelRoute):\n def __init__(self, destination, dangerous, urgency, dimension, weight):\n super().__init__(destination, dangerous, urgency, dimension, weight)\n\n def charge(self):\n if self.destination == 'overseas':\n return 0\n elif self.urgency == 'urgent':\n return 45\n else:\n return 25\n\n\nclass Boat(TravelRoute):\n def __init__(self, destination, dangerous, urgency, dimension, weight):\n super().__init__(destination, dangerous, urgency, dimension, weight)\n\n def charge(self):\n if self.destination == 'in-country':\n return 0\n elif self.urgency == 'urgent':\n return 0\n else:\n return 30\n\n\n\n\ndef urgent_check(delivery_date):\n future = datetime.datetime.today() + datetime.timedelta(days=3)\n if delivery_date > future:\n return \"not urgent\"\n else:\n return \"urgent\"\n\n\ndef destination_check():\n while True:\n dest = input(\"Is this package remaining in (c)ountry, or (o)verseas: \").lower()\n if dest == 'c':\n return 'in-country'\n elif dest == 'o':\n return 'overseas'\n else:\n print(\"Use 'c' or 'o'.\")\n\n\ndef danger_check():\n while True:\n danger = input(\"Does the package contain anything dangerous (y/n): \").lower()\n if danger == 'n':\n return 'Safe'\n elif danger == 'y':\n return 'unsafe'\n else:\n print(\"Is it safe or unsafe? (y/n)\")\n\n\ndef next_customer():\n next_c = input(\"Is there another customer: (y/n)\").lower()\n if next_c == 'y':\n return True\n else:\n return False\n\n\ndef delivery_options(destination, dangerous, urgency, dimension, weight):\n options = {}\n air_option = Air(destination, dangerous, urgency, dimension, weight)\n truck_option = Truck(destination, dangerous, urgency, dimension, weight)\n boat_option = Boat(destination, dangerous, urgency, dimension, weight)\n if air_option.charge() > 0.0:\n options['Air'] = air_option.charge()\n if truck_option.charge() > 0.0:\n options['Truck'] = truck_option.charge()\n if boat_option.charge() > 0.0:\n options['Boat'] = boat_option.charge()\n df2 = pd.DataFrame(list(options.items()), columns=['Option', 'Cost'])\n print(tabulate(df2, tablefmt='psql'))\n selection = 0\n while selection == 0:\n try:\n delivery_choice = input(\"Choose the delivery method:\")\n delivery_choice = int(delivery_choice)\n\n if delivery_choice < 0 or delivery_choice > df2.last_valid_index():\n print(\"Please select a valid method of transport\")\n else:\n selection = 1\n except ValueError:\n print('Please enter a valid shipping option')\n df2_option = df2.at[delivery_choice, 'Option']\n df2_cost = df2.at[delivery_choice, 'Cost']\n return df2_option, df2_cost\n\n\ndef print_customer(df):\n row = df.tail(1).transpose()\n print(\"Order ID:\", df.last_valid_index())\n print(tabulate(row, tablefmt='psql'))\n\n\ndef get_name():\n while True:\n try:\n name = input(\"Please enter customer name: \")\n if not name:\n raise ValueError(\"Please enter a valid name. Cannot be blank\")\n else:\n break\n except ValueError as e:\n print(e)\n return name\n\n\ndef get_description():\n while True:\n try:\n description = input(\"General description of package: \")\n if not description:\n raise ValueError(\"Please enter a description. Cannot be blank\")\n else:\n break\n except ValueError as e:\n print(e)\n return description\n\n\ndef get_delivery_date():\n day = 0\n while day == 0:\n d_date = input(\"When do they want the package to arrive: yyyy/dd/mm \")\n try:\n d_date = datetime.datetime.strptime(d_date, '%Y/%m/%d')\n if d_date <= datetime.datetime.today():\n print(\"Please enter a delivery date at least one day in advance.\")\n else:\n day = 1\n except ValueError:\n print(\"Incorrect date format, should be YYYY/MM/DD.\")\n return d_date\n\n\ndef get_dimensions():\n print(\"Minimum dimension size is 0.1 meter.\\n \"\n \"Anything smaller should be rounded up to 0.1.\\n\"\n \"Minimum overall size is 0.5m\")\n while True:\n try:\n length = float(input(\"L: \"))\n if not length:\n raise ValueError(\"Please enter a length.\")\n elif length < 0.1:\n print(\"Please enter a dimension greater than 0.0999.\")\n else:\n break\n except ValueError as e:\n print(e)\n while True:\n try:\n width = float(input(\"W: \"))\n if not width:\n raise ValueError(\"Please enter a width.\")\n elif width < 0.1:\n print(\"Please enter a dimension greater than 0.0999.\")\n else:\n break\n except ValueError as e:\n print(e)\n while True:\n try:\n height = float(input(\"H: \"))\n if not height:\n raise ValueError(\"Please enter a height.\")\n elif height < 0.1:\n print(\"Please enter a dimension greater than 0.0999.\")\n else:\n break\n except ValueError as e:\n print(e)\n if length*width*height < 0.5:\n dimension = 0.5\n else:\n dimension = length*width*height\n return dimension\n\n\ndef size_check(dimension):\n if dimension > 124.999:\n print(\"Sorry, but this package is too large to be shipped by our methods. Please reduce the size to less \"\n \"than 5x5x5\")\n return False\n else:\n return True\n\n\ndef get_weight():\n while True:\n try:\n weight = float(input(\"How many kilograms does it weigh: \"))\n if not weight:\n raise ValueError(\"Please enter a weight. Cannot be blank\")\n elif weight <= 0:\n print(\"Please enter a positive weight.\")\n else:\n break\n except ValueError as e:\n print(e)\n return weight\n\n\ndef weight_check(weight):\n if weight > 9.999:\n print(\"Sorry, but this package weighs too much. Please reduce the weight to under 10kg\")\n return False\n else:\n return True\n\n\ndef main():\n customer = True\n while customer:\n customer_name = get_name()\n destination = destination_check()\n package_desc = get_description()\n dangerous = danger_check()\n delivery_date = get_delivery_date()\n urgency = urgent_check(delivery_date)\n weight = get_weight()\n weight_check(weight)\n dimension = get_dimensions()\n df = pd.read_csv('booking_quotes.csv', index_col=0)\n df.index.name = 'ID'\n df = df.reset_index(drop=True)\n new_row = {'Customer_Name': customer_name.title(),\n 'Destination': destination,\n 'Package_desc': package_desc,\n 'Dangerous': dangerous,\n 'Delivery_date': delivery_date.date(),\n 'Urgency': urgency,\n 'Weight': weight,\n 'Size': round(dimension,2),\n 'Shipping_option': '',\n 'Cost': ''}\n df = df.append(new_row, ignore_index=True)\n print_customer(df)\n d_option, d_cost = delivery_options(destination, dangerous, urgency, dimension, weight)\n df.at[df.last_valid_index(), 'Shipping_option'] = d_option\n df.at[df.last_valid_index(), 'Cost'] = d_cost\n df.to_csv('booking_quotes.csv', index=True)\n print_customer(df)\n customer = next_customer()\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "pandas.read_csv" ] ]
katherinelamb/data
[ "cb0cdcd73bc06f13e94b4b02fbd0203cc972560e" ]
[ "scripts/india_plfs/daily_wage_data/preprocess.py" ]
[ "# Copyright 2021 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Classes and methods to import Average wage/salary earnings from Periodic Labour Force Survey (PLFS)\"\"\"\n\n__author__ = [\"Thejesh GN <[email protected]>\"]\n\nimport os\nimport json\nimport csv\nimport pandas as pd\nimport numpy as np\nimport urllib.request\nfrom os import path\n\nINDIA_ISO_CODES = {\n \"Andhra Pradesh\": \"IN-AP\",\n \"Arunachal Pradesh\": \"IN-AR\",\n \"Assam\": \"IN-AS\",\n \"Bihar\": \"IN-BR\",\n \"Chattisgarh\": \"IN-CT\",\n \"Chhattisgarh\": \"IN-CT\",\n \"Goa\": \"IN-GA\",\n \"Gujarat\": \"IN-GJ\",\n \"Haryana\": \"IN-HR\",\n \"Himachal Pradesh\": \"IN-HP\",\n \"Jharkhand\": \"IN-JH\",\n \"Jharkhand#\": \"IN-JH\",\n \"Karnataka\": \"IN-KA\",\n \"Kerala\": \"IN-KL\",\n \"Madhya Pradesh\": \"IN-MP\",\n \"Madhya Pradesh#\": \"IN-MP\",\n \"Maharashtra\": \"IN-MH\",\n \"Manipur\": \"IN-MN\",\n \"Meghalaya\": \"IN-ML\",\n \"Mizoram\": \"IN-MZ\",\n \"Nagaland\": \"IN-NL\",\n \"Nagaland#\": \"IN-NL\",\n \"Odisha\": \"IN-OR\",\n \"Punjab\": \"IN-PB\",\n \"Rajasthan\": \"IN-RJ\",\n \"Sikkim\": \"IN-SK\",\n \"Tamil Nadu\": \"IN-TN\",\n \"Tamilnadu\": \"IN-TN\",\n \"Telengana\": \"IN-TG\",\n \"Telangana\": \"IN-TG\",\n \"Tripura\": \"IN-TR\",\n \"Uttarakhand\": \"IN-UT\",\n \"Uttar Pradesh\": \"IN-UP\",\n \"West Bengal\": \"IN-WB\",\n \"Andaman and Nicobar Islands\": \"IN-AN\",\n \"Andaman & Nicobar Islands\": \"IN-AN\",\n \"Andaman & N. Island\": \"IN-AN\",\n \"A & N Islands\": \"IN-AN\",\n \"Chandigarh\": \"IN-CH\",\n \"Dadra and Nagar Haveli\": \"IN-DN\",\n \"Dadra & Nagar Haveli\": \"IN-DN\",\n \"Dadar Nagar Haveli\": \"IN-DN\",\n \"Daman and Diu\": \"IN-DD\",\n \"Daman & Diu\": \"IN-DD\",\n \"Delhi\": \"IN-DL\",\n \"Jammu and Kashmir\": \"IN-JK\",\n \"Jammu & Kashmir\": \"IN-JK\",\n \"Ladakh\": \"IN-LA\",\n \"Lakshadweep\": \"IN-LD\",\n \"Lakshwadeep\": \"IN-LD\",\n \"Pondicherry\": \"IN-PY\",\n \"Puducherry\": \"IN-PY\",\n \"Puduchery\": \"IN-PY\",\n \"Dadra and Nagar Haveli and Daman and Diu\": \"IN-DH\",\n \"Telangana\": \"IN-TG\",\n \"all India\": \"IN\",\n \"all-India\": \"IN\",\n}\n\nDATASETS = [\n {\n \"period\": \"2017-07\",\n \"data_file\": \"Table_43_07_09_2017\"\n },\n {\n \"period\": \"2017-10\",\n \"data_file\": \"Table_43_10_12_2017\"\n },\n {\n \"period\": \"2018-01\",\n \"data_file\": \"Table_43_01_03_2018\"\n },\n {\n \"period\": \"2018-04\",\n \"data_file\": \"Table_43_04_06_2018\"\n },\n {\n \"period\": \"2018-07\",\n \"data_file\": \"Table_43_07_09_2018\"\n },\n {\n \"period\": \"2018-10\",\n \"data_file\": \"Table_43_10_12_2018\"\n },\n {\n \"period\": \"2019-01\",\n \"data_file\": \"Table_43_01_03_2019\"\n },\n {\n \"period\": \"2019-04\",\n \"data_file\": \"Table_43_04_06_2019\"\n },\n]\n\n\nclass PLFSDailyWageDataLoader:\n COLUMN_HEADERS = [\n \"period\",\n \"territory\",\n \"wage_rural_male\",\n \"wage_rural_female\",\n \"wage_rural_person\",\n \"wage_urban_male\",\n \"wage_urban_female\",\n \"wage_urban_person\",\n \"wage_total_male\",\n \"wage_total_female\",\n \"wage_total_person\",\n ]\n\n def __init__(self, source, period):\n self.source = source\n self.period = period\n self.raw_df = None\n self.clean_df = None\n\n def load(self):\n df = pd.read_excel(self.source)\n # Drop title rows in the top and rows after 41.\n # The actual data is between 4nd and 41st row. So keep only them.\n df = df.iloc[4:41]\n self.raw_df = df\n\n def _setup_location(self):\n self.clean_df[\"territory\"] = self.clean_df[\"territory\"].apply(\n lambda x: INDIA_ISO_CODES[x])\n\n def _make_column_numerical(self, column):\n self.clean_df[column] = self.clean_df[column].astype(str).str.replace(\n \",\", \"\")\n self.clean_df[column] = pd.to_numeric(self.clean_df[column])\n\n def process(self):\n # Set the date or period\n self.clean_df = self.raw_df\n self.clean_df.insert(loc=0, column=\"period\", value=self.period)\n\n # Rename columns\n self.clean_df.columns = self.COLUMN_HEADERS\n\n self._make_column_numerical(\"wage_rural_male\")\n self._make_column_numerical(\"wage_rural_female\")\n self._make_column_numerical(\"wage_rural_person\")\n self._make_column_numerical(\"wage_urban_male\")\n self._make_column_numerical(\"wage_urban_female\")\n self._make_column_numerical(\"wage_urban_person\")\n self._make_column_numerical(\"wage_total_male\")\n self._make_column_numerical(\"wage_total_female\")\n self._make_column_numerical(\"wage_total_person\")\n\n # Setup place ISO codes\n self._setup_location()\n\n def save(self, csv_file_path):\n if path.exists(csv_file_path):\n # If the file exists then append to the same\n self.clean_df.to_csv(csv_file_path,\n mode='a',\n index=False,\n header=False)\n else:\n self.clean_df.to_csv(csv_file_path, index=False, header=True)\n\n\ndef main():\n \"\"\"Runs the program.\"\"\"\n\n # If the final output csv already exists\n # Remove it, so it can be regenerated\n csv_file_path = os.path.join(os.path.dirname(__file__),\n \"./PLFSDailyWageData_India.csv\")\n if path.exists(csv_file_path):\n os.remove(csv_file_path)\n\n for dataset in DATASETS:\n period = dataset[\"period\"]\n data_file = dataset[\"data_file\"]\n data_file_path = os.path.join(\n os.path.dirname(__file__),\n \"data/{data_file}.xlsx\".format(data_file=data_file),\n )\n loader = PLFSDailyWageDataLoader(data_file_path, period)\n loader.load()\n loader.process()\n loader.save(csv_file_path)\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "pandas.read_excel", "pandas.to_numeric" ] ]
astrojuanlu/d3rlpy
[ "e27852664647b7774f56ec775437b0ca73a24f3f" ]
[ "d3rlpy/algos/torch/dqn_impl.py" ]
[ "import copy\nfrom typing import Optional, Sequence\n\nimport numpy as np\nimport torch\nfrom torch.optim import Optimizer\n\nfrom ...gpu import Device\nfrom ...models.builders import create_discrete_q_function\nfrom ...models.encoders import EncoderFactory\nfrom ...models.optimizers import OptimizerFactory\nfrom ...models.q_functions import QFunctionFactory\nfrom ...models.torch import EnsembleDiscreteQFunction, EnsembleQFunction\nfrom ...preprocessing import Scaler\nfrom ...torch_utility import TorchMiniBatch, hard_sync, torch_api, train_api\nfrom .base import TorchImplBase\nfrom .utility import DiscreteQFunctionMixin\n\n\nclass DQNImpl(DiscreteQFunctionMixin, TorchImplBase):\n\n _learning_rate: float\n _optim_factory: OptimizerFactory\n _encoder_factory: EncoderFactory\n _q_func_factory: QFunctionFactory\n _gamma: float\n _n_critics: int\n _target_reduction_type: str\n _use_gpu: Optional[Device]\n _q_func: Optional[EnsembleDiscreteQFunction]\n _targ_q_func: Optional[EnsembleDiscreteQFunction]\n _optim: Optional[Optimizer]\n\n def __init__(\n self,\n observation_shape: Sequence[int],\n action_size: int,\n learning_rate: float,\n optim_factory: OptimizerFactory,\n encoder_factory: EncoderFactory,\n q_func_factory: QFunctionFactory,\n gamma: float,\n n_critics: int,\n target_reduction_type: str,\n use_gpu: Optional[Device],\n scaler: Optional[Scaler],\n ):\n super().__init__(\n observation_shape=observation_shape,\n action_size=action_size,\n scaler=scaler,\n action_scaler=None,\n )\n self._learning_rate = learning_rate\n self._optim_factory = optim_factory\n self._encoder_factory = encoder_factory\n self._q_func_factory = q_func_factory\n self._gamma = gamma\n self._n_critics = n_critics\n self._target_reduction_type = target_reduction_type\n self._use_gpu = use_gpu\n\n # initialized in build\n self._q_func = None\n self._targ_q_func = None\n self._optim = None\n\n def build(self) -> None:\n # setup torch models\n self._build_network()\n\n # setup target network\n self._targ_q_func = copy.deepcopy(self._q_func)\n\n if self._use_gpu:\n self.to_gpu(self._use_gpu)\n else:\n self.to_cpu()\n\n # setup optimizer after the parameters move to GPU\n self._build_optim()\n\n def _build_network(self) -> None:\n self._q_func = create_discrete_q_function(\n self._observation_shape,\n self._action_size,\n self._encoder_factory,\n self._q_func_factory,\n n_ensembles=self._n_critics,\n )\n\n def _build_optim(self) -> None:\n assert self._q_func is not None\n self._optim = self._optim_factory.create(\n self._q_func.parameters(), lr=self._learning_rate\n )\n\n @train_api\n @torch_api(scaler_targets=[\"obs_t\", \"obs_tpn\"])\n def update(self, batch: TorchMiniBatch) -> np.ndarray:\n assert self._optim is not None\n\n self._optim.zero_grad()\n\n q_tpn = self.compute_target(batch)\n\n loss = self.compute_loss(batch, q_tpn)\n\n loss.backward()\n self._optim.step()\n\n return loss.cpu().detach().numpy()\n\n def compute_loss(\n self,\n batch: TorchMiniBatch,\n q_tpn: torch.Tensor,\n ) -> torch.Tensor:\n assert self._q_func is not None\n return self._q_func.compute_error(\n obs_t=batch.observations,\n act_t=batch.actions.long(),\n rew_tp1=batch.next_rewards,\n q_tp1=q_tpn,\n ter_tp1=batch.terminals,\n gamma=self._gamma ** batch.n_steps,\n use_independent_target=self._target_reduction_type == \"none\",\n masks=batch.masks,\n )\n\n def compute_target(self, batch: TorchMiniBatch) -> torch.Tensor:\n assert self._targ_q_func is not None\n with torch.no_grad():\n next_actions = self._targ_q_func(batch.next_observations)\n max_action = next_actions.argmax(dim=1)\n return self._targ_q_func.compute_target(\n batch.next_observations,\n max_action,\n reduction=self._target_reduction_type,\n )\n\n def _predict_best_action(self, x: torch.Tensor) -> torch.Tensor:\n assert self._q_func is not None\n return self._q_func(x).argmax(dim=1)\n\n def _sample_action(self, x: torch.Tensor) -> torch.Tensor:\n return self._predict_best_action(x)\n\n def update_target(self) -> None:\n assert self._q_func is not None\n assert self._targ_q_func is not None\n hard_sync(self._targ_q_func, self._q_func)\n\n @property\n def q_function(self) -> EnsembleQFunction:\n assert self._q_func\n return self._q_func\n\n\nclass DoubleDQNImpl(DQNImpl):\n def compute_target(self, batch: TorchMiniBatch) -> torch.Tensor:\n assert self._targ_q_func is not None\n with torch.no_grad():\n action = self._predict_best_action(batch.next_observations)\n return self._targ_q_func.compute_target(\n batch.next_observations,\n action,\n reduction=self._target_reduction_type,\n )\n" ]
[ [ "torch.no_grad" ] ]
GSxiongkun/pyprobml
[ "a3fe8086844ae0885e3f21d30be5f2e6448cdeba" ]
[ "scripts/cifar_viz_tf.py" ]
[ "# Based on\n# https://github.com/tensorflow/docs/blob/master/site/en/tutorials/keras/basic_classification.ipynb\n# (MIT License)\n\nfrom __future__ import absolute_import, division, print_function\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nfigdir = \"../figures\"\ndef save_fig(fname): plt.savefig(os.path.join(figdir, fname))\n\n\nimport tensorflow as tf\nfrom tensorflow import keras\n\n\nprint(tf.__version__)\nnp.random.seed(0)\n\n\ndata = keras.datasets.cifar10\n\n(train_images, train_labels), (test_images, test_labels) = data.load_data()\n\nprint(np.shape(train_images))\nprint(np.shape(test_images))\n\n\n# For CIFAR:\n# (50000, 32, 32, 3)\n# (10000, 32, 32, 3)\n\nclass_names = ['plane', 'car', 'bird', 'cat', 'deer', 'dog',\n 'frog', 'horse', 'ship', 'truck']\n\nplt.figure(figsize=(10,10))\nfor i in range(25):\n plt.subplot(5,5,i+1)\n plt.xticks([])\n plt.yticks([])\n plt.grid(False)\n plt.imshow(train_images[i])\n y = train_labels[i][0]\n plt.xlabel(class_names[y])\nsave_fig(\"cifar10-data.pdf\")\nplt.show()\n\n" ]
[ [ "numpy.random.seed", "matplotlib.pyplot.grid", "matplotlib.pyplot.xlabel", "numpy.shape", "matplotlib.pyplot.figure", "matplotlib.pyplot.yticks", "matplotlib.pyplot.show", "matplotlib.pyplot.imshow", "matplotlib.pyplot.xticks", "matplotlib.pyplot.subplot" ] ]
kimlindner/TURL
[ "72d3dad8073dfe821551b35d59c02febf71aad3a" ]
[ "run_table_RE_finetuning.py" ]
[ "# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nFine-tuning the library models for language modeling on a text file (GPT, GPT-2, BERT, RoBERTa).\nGPT and GPT-2 are fine-tuned using a causal language modeling (CLM) loss while BERT and RoBERTa are fine-tuned\nusing a masked language modeling (MLM) loss.\n\"\"\"\n\nfrom __future__ import absolute_import, division, print_function\n\nimport argparse\nimport glob\nimport logging\nimport os\nimport pickle\nimport random\nimport re\nimport shutil\n\nimport numpy as np\nimport torch\nfrom torch.utils.data import DataLoader, Dataset, SequentialSampler, RandomSampler\nfrom torch.utils.data.distributed import DistributedSampler\n\ntry:\n from torch.utils.tensorboard import SummaryWriter\nexcept:\n from tensorboardX import SummaryWriter\n\nfrom tqdm import tqdm, trange\n\nfrom data_loader.data_loaders import *\nfrom data_loader.RE_data_loaders import *\nfrom model.configuration import TableConfig\nfrom model.model import HybridTableRE\nfrom model.transformers import BertTokenizer, WEIGHTS_NAME, AdamW, get_linear_schedule_with_warmup\nfrom model.optim import DenseSparseAdam\nfrom model.metric import *\nfrom utils.util import *\n\nlogger = logging.getLogger(__name__)\n\nMODEL_CLASSES = {\n 'RE': (TableConfig, HybridTableRE, BertTokenizer)\n}\n\ndef set_seed(args):\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n if args.n_gpu > 0:\n torch.cuda.manual_seed_all(args.seed)\n\n\ndef _rotate_checkpoints(args, checkpoint_prefix, use_mtime=False):\n if not args.save_total_limit:\n return\n if args.save_total_limit <= 0:\n return\n\n # Check if we should delete older checkpoint(s)\n glob_checkpoints = glob.glob(os.path.join(args.output_dir, '{}-*'.format(checkpoint_prefix)))\n if len(glob_checkpoints) <= args.save_total_limit:\n return\n\n ordering_and_checkpoint_path = []\n for path in glob_checkpoints:\n if use_mtime:\n ordering_and_checkpoint_path.append((os.path.getmtime(path), path))\n else:\n regex_match = re.match('.*{}-([0-9]+)'.format(checkpoint_prefix), path)\n if regex_match and regex_match.groups():\n ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path))\n\n checkpoints_sorted = sorted(ordering_and_checkpoint_path)\n checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted]\n number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - args.save_total_limit)\n checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete]\n for checkpoint in checkpoints_to_be_deleted:\n logger.info(\"Deleting older checkpoint [{}] due to args.save_total_limit\".format(checkpoint))\n shutil.rmtree(checkpoint)\n\n\ndef train(args, config, train_dataset, model, eval_dataset = None):\n \"\"\" Train the model \"\"\"\n if args.local_rank in [-1, 0]:\n tb_writer = SummaryWriter(os.path.join(args.output_dir, 'logs'))\n\n args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)\n train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)\n train_dataloader = RELoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size, is_train=True)\n\n if args.max_steps > 0:\n t_total = args.max_steps\n args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1\n else:\n t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs\n\n # Prepare optimizer and schedule (linear warmup and decay)\n no_decay = ['bias', 'LayerNorm.weight']\n optimizer_grouped_parameters = [\n {'params': [p for n, p in model.table.named_parameters() if (not any(nd in n for nd in no_decay))], 'weight_decay': args.weight_decay, 'lr': args.learning_rate},\n {'params': [p for n, p in model.table.named_parameters() if (any(nd in n for nd in no_decay))], 'weight_decay': 0.0, 'lr': args.learning_rate},\n {'params': [p for n, p in model.cls.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay, 'lr': args.learning_rate*10},\n {'params': [p for n, p in model.cls.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0, 'lr': args.learning_rate*10}\n ]\n optimizer = DenseSparseAdam(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)\n # optimizer = AdamW(optimizer_grouped_parameters, eps=args.adam_epsilon)\n scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total)\n if args.fp16:\n try:\n from apex import amp\n except ImportError:\n raise ImportError(\"Please install apex from https://www.github.com/nvidia/apex to use fp16 training.\")\n model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)\n\n # multi-gpu training (should be after apex fp16 initialization)\n if args.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n # Distributed training (should be after apex fp16 initialization)\n if args.local_rank != -1:\n model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],\n output_device=args.local_rank,\n find_unused_parameters=True)\n\n # Train!\n logger.info(\"***** Running training *****\")\n logger.info(\" Num examples = %d\", len(train_dataset))\n logger.info(\" Num Epochs = %d\", args.num_train_epochs)\n logger.info(\" Instantaneous batch size per GPU = %d\", args.per_gpu_train_batch_size)\n logger.info(\" Total train batch size (w. parallel, distributed & accumulation) = %d\",\n args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1))\n logger.info(\" Gradient Accumulation steps = %d\", args.gradient_accumulation_steps)\n logger.info(\" Total optimization steps = %d\", t_total)\n\n global_step = 0\n tr_loss, logging_loss = 0.0, 0.0\n tr_map, logging_map = 0.0, 0.0\n model.zero_grad()\n train_iterator = trange(int(args.num_train_epochs), desc=\"Epoch\", disable=args.local_rank not in [-1, 0])\n set_seed(args) # Added here for reproducibility (even between python 2 and 3)\n for _ in train_iterator:\n epoch_iterator = tqdm(train_dataloader, desc=\"Iteration\", disable=args.local_rank not in [-1, 0])\n for step, batch in enumerate(epoch_iterator):\n table_id, input_tok, input_tok_type, input_tok_pos, input_tok_mask, \\\n input_ent_text, input_ent_text_length, input_ent, input_ent_type, input_ent_mask, \\\n column_entity_mask, column_header_mask, labels_mask, labels = batch\n input_tok = input_tok.to(args.device)\n input_tok_type = input_tok_type.to(args.device)\n input_tok_pos = input_tok_pos.to(args.device)\n input_tok_mask = input_tok_mask.to(args.device)\n input_ent_text = input_ent_text.to(args.device)\n input_ent_text_length = input_ent_text_length.to(args.device)\n input_ent = input_ent.to(args.device)\n input_ent_type = input_ent_type.to(args.device)\n input_ent_mask = input_ent_mask.to(args.device)\n column_entity_mask = column_entity_mask.to(args.device)\n column_header_mask = column_header_mask.to(args.device)\n labels_mask = labels_mask.to(args.device)\n labels = labels.to(args.device)\n model.train()\n if args.mode == 1:\n input_ent_mask = input_ent_mask[:,:,input_tok_mask.shape[1]:]\n input_tok = None\n input_tok_type = None\n input_tok_pos = None\n input_tok_mask = None\n elif args.mode == 2:\n input_tok_mask = input_tok_mask[:,:,:input_tok_mask.shape[1]]\n input_ent_text = None\n input_ent_text_length = None\n input_ent = None\n input_ent_type = None\n input_ent_mask = None\n elif args.mode == 3:\n input_ent = None\n elif args.mode == 4:\n input_ent_mask = input_ent_mask[:,:,input_tok_mask.shape[1]:]\n input_tok = None\n input_tok_type = None\n input_tok_pos = None\n input_tok_mask = None\n input_ent = None\n elif args.mode == 5:\n input_ent_mask = input_ent_mask[:,:,input_tok_mask.shape[1]:]\n input_tok = None\n input_tok_type = None\n input_tok_pos = None\n input_tok_mask = None\n input_ent_text = None\n input_ent_text_length = None\n outputs = model(input_tok, input_tok_type, input_tok_pos, input_tok_mask,\\\n input_ent_text, input_ent_text_length, input_ent, input_ent_type, input_ent_mask, column_entity_mask, column_header_mask, labels_mask, labels)\n # model outputs are always tuple in transformers (see doc)\n loss = outputs[0]\n\n prediction_scores = outputs[1]\n ap = average_precision(prediction_scores.view(-1, config.class_num), labels.view((-1, config.class_num)))\n map = (ap*labels_mask.view(-1)).sum()/labels_mask.sum()\n\n if args.n_gpu > 1:\n loss = loss.mean() # mean() to average on multi-gpu parallel training\n if args.gradient_accumulation_steps > 1:\n loss = loss / args.gradient_accumulation_steps\n\n if args.fp16:\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n\n tr_loss += loss.item()\n tr_map += map.item()\n if (step + 1) % args.gradient_accumulation_steps == 0:\n if args.fp16:\n torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)\n else:\n torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)\n optimizer.step()\n scheduler.step() # Update learning rate schedule\n model.zero_grad()\n global_step += 1\n\n if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:\n # Log metrics\n if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well\n results = evaluate(args, config, eval_dataset, model)\n for key, value in results.items():\n tb_writer.add_scalar('eval_{}'.format(key), value, global_step)\n tb_writer.add_scalar('lr', scheduler.get_lr()[0], global_step)\n tb_writer.add_scalar('loss', (tr_loss - logging_loss)/args.logging_steps, global_step)\n tb_writer.add_scalar('map', (tr_map - logging_map)/(args.gradient_accumulation_steps*args.logging_steps), global_step)\n logging_map = tr_map\n logging_loss = tr_loss\n\n if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:\n checkpoint_prefix = 'checkpoint'\n # Save model checkpoint\n output_dir = os.path.join(args.output_dir, '{}-{}'.format(checkpoint_prefix, global_step))\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training\n model_to_save.save_pretrained(output_dir)\n torch.save(args, os.path.join(output_dir, 'training_args.bin'))\n logger.info(\"Saving model checkpoint to %s\", output_dir)\n\n _rotate_checkpoints(args, checkpoint_prefix)\n\n if args.max_steps > 0 and global_step > args.max_steps:\n epoch_iterator.close()\n break\n if args.max_steps > 0 and global_step > args.max_steps:\n train_iterator.close()\n break\n\n if args.local_rank in [-1, 0]:\n tb_writer.close()\n\n return global_step, tr_loss / global_step\n\n\ndef evaluate(args, config, eval_dataset, model, prefix=\"\"):\n # Loop to handle MNLI double evaluation (matched, mis-matched)\n eval_output_dir = args.output_dir\n\n if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:\n os.makedirs(eval_output_dir)\n # multi-gpu evaluate\n if args.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n model.eval()\n args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)\n # Note that DistributedSampler samples randomly\n eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)\n eval_dataloader = RELoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size, is_train=False)\n\n # Eval!\n logger.info(\"***** Running evaluation {} *****\".format(prefix))\n logger.info(\" Num examples = %d\", len(eval_dataset))\n logger.info(\" Batch size = %d\", args.eval_batch_size)\n eval_loss = 0.0\n eval_map = 0.0\n nb_eval_steps = 0\n \n for batch in tqdm(eval_dataloader, desc=\"Evaluating\"):\n table_id, input_tok, input_tok_type, input_tok_pos, input_tok_mask, \\\n input_ent_text, input_ent_text_length, input_ent, input_ent_type, input_ent_mask, \\\n column_entity_mask, column_header_mask, labels_mask, labels = batch\n input_tok = input_tok.to(args.device)\n input_tok_type = input_tok_type.to(args.device)\n input_tok_pos = input_tok_pos.to(args.device)\n input_tok_mask = input_tok_mask.to(args.device)\n input_ent_text = input_ent_text.to(args.device)\n input_ent_text_length = input_ent_text_length.to(args.device)\n input_ent = input_ent.to(args.device)\n input_ent_type = input_ent_type.to(args.device)\n input_ent_mask = input_ent_mask.to(args.device)\n column_entity_mask = column_entity_mask.to(args.device)\n column_header_mask = column_header_mask.to(args.device)\n labels_mask = labels_mask.to(args.device)\n labels = labels.to(args.device)\n if args.mode == 1:\n input_ent_mask = input_ent_mask[:,:,input_tok_mask.shape[1]:]\n input_tok = None\n input_tok_type = None\n input_tok_pos = None\n input_tok_mask = None\n elif args.mode == 2:\n input_tok_mask = input_tok_mask[:,:,:input_tok_mask.shape[1]]\n input_ent_text = None\n input_ent_text_length = None\n input_ent = None\n input_ent_type = None\n input_ent_mask = None\n elif args.mode == 3:\n input_ent = None\n elif args.mode == 4:\n input_ent_mask = input_ent_mask[:,:,input_tok_mask.shape[1]:]\n input_tok = None\n input_tok_type = None\n input_tok_pos = None\n input_tok_mask = None\n input_ent = None\n elif args.mode == 5:\n input_ent_mask = input_ent_mask[:,:,input_tok_mask.shape[1]:]\n input_tok = None\n input_tok_type = None\n input_tok_pos = None\n input_tok_mask = None\n input_ent_text = None\n input_ent_text_length = None\n with torch.no_grad():\n outputs = model(input_tok, input_tok_type, input_tok_pos, input_tok_mask,\\\n input_ent_text, input_ent_text_length, input_ent, input_ent_type, input_ent_mask, column_entity_mask, column_header_mask, labels_mask, labels)\n loss = outputs[0]\n prediction_scores = outputs[1]\n # pdb.set_trace()\n ap = average_precision(prediction_scores.view(-1, config.class_num), labels.view((-1, config.class_num)))\n map = (ap*labels_mask.view(-1)).sum()/labels_mask.sum()\n eval_loss += loss.mean().item()\n eval_map += map.item()\n nb_eval_steps += 1\n \n eval_loss = eval_loss / nb_eval_steps\n eval_map = eval_map / nb_eval_steps\n\n result = {\n \"eval_loss\": eval_loss,\n \"eval_map\": eval_map,\n }\n logger.info(\"***** Eval results {} *****\".format(prefix))\n for key in sorted(result.keys()):\n logger.info(\" %s = %s\", key, str(result[key]))\n return result\n\n\n\ndef main():\n parser = argparse.ArgumentParser()\n\n ## Required parameters\n parser.add_argument(\"--data_dir\", default=None, type=str, required=True,\n help=\"The input data directory.\")\n parser.add_argument(\"--output_dir\", default=None, type=str, required=True,\n help=\"The output directory where the model predictions and checkpoints will be written.\")\n\n ## Other parameters\n parser.add_argument(\"--model_type\", default=\"bert\", type=str,\n help=\"The model architecture to be fine-tuned.\")\n parser.add_argument(\"--model_name_or_path\", default=\"bert-base-cased\", type=str,\n help=\"The model checkpoint for weights initialization.\")\n parser.add_argument(\"--mode\", default=0, type=int,\n help=\"0: use both;1: use table;2: use entity\")\n\n parser.add_argument(\"--config_name\", default=\"\", type=str,\n help=\"Optional pretrained config name or path if not the same as model_name_or_path\")\n parser.add_argument(\"--tokenizer_name\", default=\"\", type=str,\n help=\"Optional pretrained tokenizer name or path if not the same as model_name_or_path\")\n parser.add_argument(\"--cache_dir\", default=\"\", type=str,\n help=\"Optional directory to store the pre-trained models downloaded from s3 (instread of the default one)\")\n parser.add_argument(\"--block_size\", default=-1, type=int,\n help=\"Optional input sequence length after tokenization.\"\n \"The training dataset will be truncated in block of this size for training.\"\n \"Default to the model max input length for single sentence inputs (take into account special tokens).\")\n parser.add_argument(\"--do_train\", action='store_true',\n help=\"Whether to run training.\")\n parser.add_argument(\"--do_eval\", action='store_true',\n help=\"Whether to run eval on the dev set.\")\n parser.add_argument(\"--evaluate_during_training\", action='store_true',\n help=\"Run evaluation during training at each logging step.\")\n parser.add_argument(\"--do_lower_case\", action='store_true',\n help=\"Set this flag if you are using an uncased model.\")\n\n parser.add_argument(\"--per_gpu_train_batch_size\", default=4, type=int,\n help=\"Batch size per GPU/CPU for training.\")\n parser.add_argument(\"--per_gpu_eval_batch_size\", default=4, type=int,\n help=\"Batch size per GPU/CPU for evaluation.\")\n parser.add_argument('--gradient_accumulation_steps', type=int, default=1,\n help=\"Number of updates steps to accumulate before performing a backward/update pass.\")\n parser.add_argument(\"--learning_rate\", default=5e-5, type=float,\n help=\"The initial learning rate for Adam.\")\n parser.add_argument(\"--weight_decay\", default=0.0, type=float,\n help=\"Weight deay if we apply some.\")\n parser.add_argument(\"--adam_epsilon\", default=1e-8, type=float,\n help=\"Epsilon for Adam optimizer.\")\n parser.add_argument(\"--max_grad_norm\", default=1.0, type=float,\n help=\"Max gradient norm.\")\n parser.add_argument(\"--num_train_epochs\", default=1.0, type=float,\n help=\"Total number of training epochs to perform.\")\n parser.add_argument(\"--max_steps\", default=-1, type=int,\n help=\"If > 0: set total number of training steps to perform. Override num_train_epochs.\")\n parser.add_argument(\"--warmup_steps\", default=0, type=int,\n help=\"Linear warmup over warmup_steps.\")\n\n parser.add_argument('--logging_steps', type=int, default=50,\n help=\"Log every X updates steps.\")\n parser.add_argument('--save_steps', type=int, default=50,\n help=\"Save checkpoint every X updates steps.\")\n parser.add_argument('--save_total_limit', type=int, default=None,\n help='Limit the total amount of checkpoints, delete the older checkpoints in the output_dir, does not delete by default')\n parser.add_argument(\"--eval_all_checkpoints\", action='store_true',\n help=\"Evaluate all checkpoints starting with the same prefix as model_name_or_path ending and ending with step number\")\n parser.add_argument(\"--no_cuda\", action='store_true',\n help=\"Avoid using CUDA when available\")\n parser.add_argument('--overwrite_output_dir', action='store_true',\n help=\"Overwrite the content of the output directory\")\n parser.add_argument('--overwrite_cache', action='store_true',\n help=\"Overwrite the cached training and evaluation sets\")\n parser.add_argument('--seed', type=int, default=1,\n help=\"random seed for initialization\")\n\n parser.add_argument('--fp16', action='store_true',\n help=\"Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit\")\n parser.add_argument('--fp16_opt_level', type=str, default='O1',\n help=\"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].\"\n \"See details at https://nvidia.github.io/apex/amp.html\")\n parser.add_argument(\"--local_rank\", type=int, default=-1,\n help=\"For distributed training: local_rank\")\n parser.add_argument('--server_ip', type=str, default='', help=\"For distant debugging.\")\n parser.add_argument('--server_port', type=str, default='', help=\"For distant debugging.\")\n args = parser.parse_args()\n\n\n if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir:\n raise ValueError(\"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.\".format(args.output_dir))\n\n # Setup distant debugging if needed\n if args.server_ip and args.server_port:\n # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script\n import ptvsd\n print(\"Waiting for debugger attach\")\n ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)\n ptvsd.wait_for_attach()\n\n # Setup CUDA, GPU & distributed training\n if args.local_rank == -1 or args.no_cuda:\n device = torch.device(\"cuda\" if torch.cuda.is_available() and not args.no_cuda else \"cpu\")\n args.n_gpu = torch.cuda.device_count()\n else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs\n torch.cuda.set_device(args.local_rank)\n device = torch.device(\"cuda\", args.local_rank)\n torch.distributed.init_process_group(backend='nccl')\n args.n_gpu = 1\n args.device = device\n\n # Setup logging\n logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',\n datefmt = '%m/%d/%Y %H:%M:%S',\n level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN)\n logger.warning(\"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s\",\n args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16)\n\n # Set seed\n set_seed(args)\n\n # Load pretrained model and tokenizer\n if args.local_rank not in [-1, 0]:\n torch.distributed.barrier() # Barrier to make sure only the first process in distributed training download model & vocab\n\n config_class, model_class, _ = MODEL_CLASSES[args.model_type]\n config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path,\n cache_dir=args.cache_dir if args.cache_dir else None)\n type_vocab = load_relation_vocab(args.data_dir)\n config.class_num = len(type_vocab)\n config.mode = args.mode\n model = model_class(config, is_simple=True)\n if args.do_train:\n # lm_checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(args.model_name_or_path + '/**/' + WEIGHTS_NAME, recursive=True)))\n # logger.info(\"load pre-trained model from %s\", lm_checkpoints[-1])\n # lm_checkpoint = torch.load(os.path.join(lm_checkpoints[-1],\"pytorch_model.bin\"))\n lm_checkpoint = torch.load(os.path.join(args.model_name_or_path,\"pytorch_model.bin\"))\n model.load_pretrained(lm_checkpoint)\n model.to(args.device)\n\n if args.local_rank == 0:\n torch.distributed.barrier() # End of barrier to make sure only the first process in distributed training download model & vocab\n\n logger.info(\"Training/evaluation parameters %s\", args)\n\n # Training\n if args.do_train:\n if args.local_rank not in [-1, 0]:\n torch.distributed.barrier() # Barrier to make sure only the first process in distributed training process the dataset, and the others will use the cache\n entity_vocab = load_entity_vocab(args.data_dir, ignore_bad_title=True, min_ent_count=2)\n # train_dataset = WikiCTDataset(args.data_dir, entity_vocab, type_vocab, max_input_tok=500, src=\"train\", max_length = [50, 10, 10], force_new=False, tokenizer = None)\n train_dataset = REDataset(args.data_dir, entity_vocab, type_vocab, max_input_tok=500, src=\"train\", max_length = [50, 10, 10], force_new=False, tokenizer = None)\n eval_dataset = REDataset(args.data_dir, entity_vocab, type_vocab, max_input_tok=500, src=\"dev\", max_length = [50, 10, 10], force_new=False, tokenizer = None)\n assert config.vocab_size == len(train_dataset.tokenizer), \\\n \"vocab size mismatch, vocab_size=%d\"%(len(train_dataset.tokenizer))\n\n if args.local_rank == 0:\n torch.distributed.barrier()\n\n global_step, tr_loss = train(args, config, train_dataset, model, eval_dataset=eval_dataset)\n logger.info(\" global_step = %s, average loss = %s\", global_step, tr_loss)\n\n\n # Saving best-practices: if you use save_pretrained for the model and tokenizer, you can reload them using from_pretrained()\n if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):\n # Create output directory if needed\n if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:\n os.makedirs(args.output_dir)\n\n logger.info(\"Saving model checkpoint to %s\", args.output_dir)\n # Save a trained model, configuration and tokenizer using `save_pretrained()`.\n # They can then be reloaded using `from_pretrained()`\n model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training\n model_to_save.save_pretrained(args.output_dir)\n\n # Good practice: save your training arguments together with the trained model\n torch.save(args, os.path.join(args.output_dir, 'training_args.bin'))\n\n # Load a trained model and vocabulary that you have fine-tuned\n model = model_class.from_pretrained(args.output_dir)\n model.to(args.device)\n\n\n # Evaluation\n results = {}\n if args.do_eval and args.local_rank in [-1, 0]:\n checkpoints = [args.output_dir]\n if args.eval_all_checkpoints:\n checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True)))\n logging.getLogger(\"transformers.modeling_utils\").setLevel(logging.WARN) # Reduce logging\n logger.info(\"Evaluate the following checkpoints: %s\", checkpoints)\n for checkpoint in checkpoints:\n global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else \"\"\n prefix = checkpoint.split('/')[-1] if checkpoint.find('checkpoint') != -1 else \"\"\n \n model = model_class.from_pretrained(checkpoint)\n model.to(args.device)\n result = evaluate(args, config, eval_dataset, model, prefix=prefix)\n result = dict((k + '_{}'.format(global_step), v) for k, v in result.items())\n results.update(result)\n\n\n return results\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "torch.device", "torch.distributed.get_world_size", "torch.cuda.manual_seed_all", "torch.utils.data.RandomSampler", "numpy.random.seed", "torch.distributed.init_process_group", "torch.no_grad", "torch.utils.data.SequentialSampler", "torch.nn.parallel.DistributedDataParallel", "torch.cuda.device_count", "torch.manual_seed", "torch.cuda.set_device", "torch.cuda.is_available", "torch.utils.data.distributed.DistributedSampler", "torch.distributed.get_rank", "torch.distributed.barrier", "torch.nn.DataParallel" ] ]
StephenCurry-LH/ecg
[ "f6dffeb108515d7307773112482d4d8f81ba9442", "f6dffeb108515d7307773112482d4d8f81ba9442" ]
[ "models_test/resnet.py", "ecg-new/utils/dft.py" ]
[ "# -*- coding: utf-8 -*-\n'''\n@time: 2019/9/8 20:14\n直接修改torch的resnet\n@ author: javis\n'''\n\nimport torch.nn as nn\nimport math\nimport torch.utils.model_zoo as model_zoo\nfrom models_test.modules import *\n\ndef conv3x3(in_planes, out_planes, stride=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv1d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n def __init__(self, inplanes, planes, stride=1, downsample=None,freq_select='top1'):\n super(BasicBlock, self).__init__()\n\n c2wh = dict([(64, 1250), (128, 625), (256, 313), (512, 157)])\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = nn.BatchNorm1d(planes)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = nn.BatchNorm1d(planes)\n self.downsample = downsample\n self.stride = stride\n self.dropout = nn.Dropout(.2)\n self.att=MultiSpectralDCTLayer(planes,c2wh[planes],freq_sel_method=freq_select)\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n out = self.dropout(out)\n out = self.conv2(out)\n out = self.bn2(out)\n #out = self.att(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv1d(inplanes, planes, kernel_size=7, bias=False, padding=3)\n self.bn1 = nn.BatchNorm1d(planes)\n self.conv2 = nn.Conv1d(planes, planes, kernel_size=11, stride=stride,\n padding=5, bias=False)\n self.bn2 = nn.BatchNorm1d(planes)\n self.conv3 = nn.Conv1d(planes, planes * 4, kernel_size=7, bias=False, padding=3)\n self.bn3 = nn.BatchNorm1d(planes * 4)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n self.dropout = nn.Dropout(.2)\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n out = self.dropout(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass ResNet(nn.Module):\n def __init__(self, block, layers, num_classes=55):\n self.inplanes = 64\n super(ResNet, self).__init__()\n self.conv1 = nn.Conv1d(8, 64, kernel_size=3, stride=2, padding=1,\n bias=False)\n self.bn1 = nn.BatchNorm1d(64)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool1d(kernel_size=3, stride=2, padding=1)\n self.layer1 = self._make_layer(block, 64, layers[0])\n self.layer2 = self._make_layer(block, 128, layers[1], stride=2)\n self.layer3 = self._make_layer(block, 256, layers[2], stride=2)\n self.layer4 = self._make_layer(block, 512, layers[3], stride=2)\n self.avgpool = nn.AdaptiveAvgPool1d(1)\n self.fc = nn.Linear(512 * block.expansion, num_classes)\n\n for m in self.modules():\n if isinstance(m, nn.Conv1d):\n n = m.kernel_size[0] * m.kernel_size[0] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance(m, nn.BatchNorm1d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n def _make_layer(self, block, planes, blocks, stride=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv1d(self.inplanes, planes * block.expansion,\n kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm1d(planes * block.expansion),\n )\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n x = self.avgpool(x)\n x = x.view(x.size(0), -1)\n x = self.fc(x)\n return x\n\n\ndef resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n return model\n\n\ndef resnet34(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n return model\n\n\ndef resnet50(pretrained=False, **kwargs):\n model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\n return model\n\n\ndef resnet101(pretrained=False, **kwargs):\n model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)\n return model\n\n\ndef resnet152(pretrained=False, **kwargs):\n model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)\n return model\n\n\nif __name__ == '__main__':\n import torch\n\n x = torch.randn(1, 8, 60)\n m = resnet18(num_classes=2)\n out=m(x)\n torch.save(m.state_dict(),'../ecg.pth')\n print('saving')\n print(out)\n\n", "import math\nfrom math import pi\nfrom math import sin\nfrom math import cos\nfrom matplotlib import pyplot as plt\n\n# para=10+10j\n# print(abs(para))\n# sum=0\n# length=40\n# for i in range(length):\n# basis=math.cos(math.pi*2*(2*i)/length)-math.sin(math.pi*2*(2*i)/length)*1j\n#\n# sum+=(math.cos(math.pi*2*(2*i)/length+pi/6)*basis)\n# #print(sum)\n#\n# print(sum)\n# print (math.atan(10/17.3))\n# print (pi/6)\n#DFT y=sin (2*pi*x)\n\nplot_y=[]\nN=40\nK=40\nsum=0\nre=0\nim=0\nfor k in range(K): #频率\n for n in range(N): #第几个点\n re=cos(2*pi*2*n/K+pi/4)*cos(2*pi*k*n/K)\n im=cos(2*pi*2*n/K+pi/4)*sin(2*pi*k*n/K)*(-1j)\n #basis=cos(2*pi*n*k/K)-sin(2*pi*n*k/K)*1j\n #basis=cos(2*pi*k*n/40)\n\n sum+=re\n sum+=im\n\n\n print ('X'+str(k),sum)\n plot_y.append(sum)\n sum=0\n\nx=[i for i in range(len(plot_y))]\ny=list(map(abs,plot_y))\n\nplt.figure()\nplt.scatter(x,y)\nplt.savefig('40samples.png')\n\n\n\n# sum=0\n# re=0\n# im=0\n# N=400\n# K=400\n# for k in range(K):\n# for n in range(N):\n# re+=cos(2*pi*n)*cos(2*pi*k*n/40)\n# im+=cos(2*pi*n)*sin(2*pi*k*n/40)*1j*(-1)\n# sum=sum+re+im\n# print (' k= ',k,re,' ',im)\n# #print (k,'__',sum,abs(sum))\n# #sum = 0\n# re=0\n# im=0\n#\n# #计算实部与虚部" ]
[ [ "torch.nn.Linear", "torch.nn.Dropout", "torch.nn.Conv1d", "torch.nn.Sequential", "torch.nn.ReLU", "torch.nn.AdaptiveAvgPool1d", "torch.nn.BatchNorm1d", "torch.nn.MaxPool1d", "torch.randn" ], [ "matplotlib.pyplot.scatter", "matplotlib.pyplot.savefig", "matplotlib.pyplot.figure" ] ]
sabopSCR/models
[ "97764e1526377bcb7e7f9b6ff847c33ce185753b" ]
[ "research/object_detection/inputs.py" ]
[ "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Model input function for tf-learn object detection model.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport functools\n\nimport tensorflow.compat.v1 as tf\nfrom object_detection.builders import dataset_builder\nfrom object_detection.builders import image_resizer_builder\nfrom object_detection.builders import model_builder\nfrom object_detection.builders import preprocessor_builder\nfrom object_detection.core import box_list\nfrom object_detection.core import box_list_ops\nfrom object_detection.core import densepose_ops\nfrom object_detection.core import keypoint_ops\nfrom object_detection.core import preprocessor\nfrom object_detection.core import standard_fields as fields\nfrom object_detection.data_decoders import tf_example_decoder\nfrom object_detection.protos import eval_pb2\nfrom object_detection.protos import image_resizer_pb2\nfrom object_detection.protos import input_reader_pb2\nfrom object_detection.protos import model_pb2\nfrom object_detection.protos import train_pb2\nfrom object_detection.utils import config_util\nfrom object_detection.utils import ops as util_ops\nfrom object_detection.utils import shape_utils\n\nHASH_KEY = 'hash'\nHASH_BINS = 1 << 31\nSERVING_FED_EXAMPLE_KEY = 'serialized_example'\n_LABEL_OFFSET = 1\n\n# A map of names to methods that help build the input pipeline.\nINPUT_BUILDER_UTIL_MAP = {\n 'dataset_build': dataset_builder.build,\n 'model_build': model_builder.build,\n}\n\n\ndef _multiclass_scores_or_one_hot_labels(multiclass_scores,\n groundtruth_boxes,\n groundtruth_classes, num_classes):\n \"\"\"Returns one-hot encoding of classes when multiclass_scores is empty.\"\"\"\n # Replace groundtruth_classes tensor with multiclass_scores tensor when its\n # non-empty. If multiclass_scores is empty fall back on groundtruth_classes\n # tensor.\n def true_fn():\n return tf.reshape(multiclass_scores,\n [tf.shape(groundtruth_boxes)[0], num_classes])\n def false_fn():\n return tf.one_hot(groundtruth_classes, num_classes)\n return tf.cond(tf.size(multiclass_scores) > 0, true_fn, false_fn)\n\n\ndef _convert_labeled_classes_to_k_hot(groundtruth_labeled_classes, num_classes,\n map_empty_to_ones=False):\n \"\"\"Returns k-hot encoding of the labeled classes.\n\n If map_empty_to_ones is enabled and the input labeled_classes is empty,\n this function assumes all classes are exhaustively labeled, thus returning\n an all-one encoding.\n\n Args:\n groundtruth_labeled_classes: a Tensor holding a sparse representation of\n labeled classes.\n num_classes: an integer representing the number of classes\n map_empty_to_ones: boolean (default: False). Set this to be True to default\n to an all-ones result if given an empty `groundtruth_labeled_classes`.\n Returns:\n A k-hot (and 0-indexed) tensor representation of\n `groundtruth_labeled_classes`.\n \"\"\"\n\n # If the input labeled_classes is empty, it assumes all classes are\n # exhaustively labeled, thus returning an all-one encoding.\n def true_fn():\n return tf.sparse_to_dense(\n groundtruth_labeled_classes - _LABEL_OFFSET, [num_classes],\n tf.constant(1, dtype=tf.float32),\n validate_indices=False)\n\n def false_fn():\n return tf.ones(num_classes, dtype=tf.float32)\n\n if map_empty_to_ones:\n return tf.cond(tf.size(groundtruth_labeled_classes) > 0, true_fn, false_fn)\n return true_fn()\n\n\ndef _remove_unrecognized_classes(class_ids, unrecognized_label):\n \"\"\"Returns class ids with unrecognized classes filtered out.\"\"\"\n\n recognized_indices = tf.squeeze(\n tf.where(tf.greater(class_ids, unrecognized_label)), -1)\n return tf.gather(class_ids, recognized_indices)\n\n\ndef assert_or_prune_invalid_boxes(boxes):\n \"\"\"Makes sure boxes have valid sizes (ymax >= ymin, xmax >= xmin).\n\n When the hardware supports assertions, the function raises an error when\n boxes have an invalid size. If assertions are not supported (e.g. on TPU),\n boxes with invalid sizes are filtered out.\n\n Args:\n boxes: float tensor of shape [num_boxes, 4]\n\n Returns:\n boxes: float tensor of shape [num_valid_boxes, 4] with invalid boxes\n filtered out.\n\n Raises:\n tf.errors.InvalidArgumentError: When we detect boxes with invalid size.\n This is not supported on TPUs.\n \"\"\"\n\n ymin, xmin, ymax, xmax = tf.split(\n boxes, num_or_size_splits=4, axis=1)\n\n height_check = tf.Assert(tf.reduce_all(ymax >= ymin), [ymin, ymax])\n width_check = tf.Assert(tf.reduce_all(xmax >= xmin), [xmin, xmax])\n\n with tf.control_dependencies([height_check, width_check]):\n boxes_tensor = tf.concat([ymin, xmin, ymax, xmax], axis=1)\n boxlist = box_list.BoxList(boxes_tensor)\n # TODO(b/149221748) Remove pruning when XLA supports assertions.\n boxlist = box_list_ops.prune_small_boxes(boxlist, 0)\n\n return boxlist.get()\n\n\ndef transform_input_data(tensor_dict,\n model_preprocess_fn,\n image_resizer_fn,\n num_classes,\n data_augmentation_fn=None,\n merge_multiple_boxes=False,\n retain_original_image=False,\n use_multiclass_scores=False,\n use_bfloat16=False,\n retain_original_image_additional_channels=False,\n keypoint_type_weight=None):\n \"\"\"A single function that is responsible for all input data transformations.\n\n Data transformation functions are applied in the following order.\n 1. If key fields.InputDataFields.image_additional_channels is present in\n tensor_dict, the additional channels will be merged into\n fields.InputDataFields.image.\n 2. data_augmentation_fn (optional): applied on tensor_dict.\n 3. model_preprocess_fn: applied only on image tensor in tensor_dict.\n 4. keypoint_type_weight (optional): If groundtruth keypoints are in\n the tensor dictionary, per-keypoint weights are produced. These weights are\n initialized by `keypoint_type_weight` (or ones if left None).\n Then, for all keypoints that are not visible, the weights are set to 0 (to\n avoid penalizing the model in a loss function).\n 5. image_resizer_fn: applied on original image and instance mask tensor in\n tensor_dict.\n 6. one_hot_encoding: applied to classes tensor in tensor_dict.\n 7. merge_multiple_boxes (optional): when groundtruth boxes are exactly the\n same they can be merged into a single box with an associated k-hot class\n label.\n\n Args:\n tensor_dict: dictionary containing input tensors keyed by\n fields.InputDataFields.\n model_preprocess_fn: model's preprocess function to apply on image tensor.\n This function must take in a 4-D float tensor and return a 4-D preprocess\n float tensor and a tensor containing the true image shape.\n image_resizer_fn: image resizer function to apply on groundtruth instance\n `masks. This function must take a 3-D float tensor of an image and a 3-D\n tensor of instance masks and return a resized version of these along with\n the true shapes.\n num_classes: number of max classes to one-hot (or k-hot) encode the class\n labels.\n data_augmentation_fn: (optional) data augmentation function to apply on\n input `tensor_dict`.\n merge_multiple_boxes: (optional) whether to merge multiple groundtruth boxes\n and classes for a given image if the boxes are exactly the same.\n retain_original_image: (optional) whether to retain original image in the\n output dictionary.\n use_multiclass_scores: whether to use multiclass scores as class targets\n instead of one-hot encoding of `groundtruth_classes`. When\n this is True and multiclass_scores is empty, one-hot encoding of\n `groundtruth_classes` is used as a fallback.\n use_bfloat16: (optional) a bool, whether to use bfloat16 in training.\n retain_original_image_additional_channels: (optional) Whether to retain\n original image additional channels in the output dictionary.\n keypoint_type_weight: A list (of length num_keypoints) containing\n groundtruth loss weights to use for each keypoint. If None, will use a\n weight of 1.\n\n Returns:\n A dictionary keyed by fields.InputDataFields containing the tensors obtained\n after applying all the transformations.\n\n Raises:\n KeyError: If both groundtruth_labeled_classes and groundtruth_image_classes\n are provided by the decoder in tensor_dict since both fields are\n considered to contain the same information.\n \"\"\"\n out_tensor_dict = tensor_dict.copy()\n\n input_fields = fields.InputDataFields\n labeled_classes_field = input_fields.groundtruth_labeled_classes\n image_classes_field = input_fields.groundtruth_image_classes\n verified_neg_classes_field = input_fields.groundtruth_verified_neg_classes\n not_exhaustive_field = input_fields.groundtruth_not_exhaustive_classes\n\n if (labeled_classes_field in out_tensor_dict and\n image_classes_field in out_tensor_dict):\n raise KeyError('groundtruth_labeled_classes and groundtruth_image_classes'\n 'are provided by the decoder, but only one should be set.')\n\n for field, map_empty_to_ones in [\n (labeled_classes_field, True),\n (image_classes_field, True),\n (verified_neg_classes_field, False),\n (not_exhaustive_field, False)]:\n if field in out_tensor_dict:\n out_tensor_dict[field] = _remove_unrecognized_classes(\n out_tensor_dict[field], unrecognized_label=-1)\n out_tensor_dict[field] = _convert_labeled_classes_to_k_hot(\n out_tensor_dict[field], num_classes, map_empty_to_ones)\n\n if input_fields.multiclass_scores in out_tensor_dict:\n out_tensor_dict[\n input_fields\n .multiclass_scores] = _multiclass_scores_or_one_hot_labels(\n out_tensor_dict[input_fields.multiclass_scores],\n out_tensor_dict[input_fields.groundtruth_boxes],\n out_tensor_dict[input_fields.groundtruth_classes],\n num_classes)\n\n if input_fields.groundtruth_boxes in out_tensor_dict:\n out_tensor_dict = util_ops.filter_groundtruth_with_nan_box_coordinates(\n out_tensor_dict)\n out_tensor_dict = util_ops.filter_unrecognized_classes(out_tensor_dict)\n\n if retain_original_image:\n out_tensor_dict[input_fields.original_image] = tf.cast(\n image_resizer_fn(out_tensor_dict[input_fields.image],\n None)[0], tf.uint8)\n\n if input_fields.image_additional_channels in out_tensor_dict:\n channels = out_tensor_dict[input_fields.image_additional_channels]\n out_tensor_dict[input_fields.image] = tf.concat(\n [out_tensor_dict[input_fields.image], channels], axis=2)\n if retain_original_image_additional_channels:\n out_tensor_dict[\n input_fields.image_additional_channels] = tf.cast(\n image_resizer_fn(channels, None)[0], tf.uint8)\n\n # Apply data augmentation ops.\n if data_augmentation_fn is not None:\n out_tensor_dict = data_augmentation_fn(out_tensor_dict)\n\n # Apply model preprocessing ops and resize instance masks.\n image = out_tensor_dict[input_fields.image]\n preprocessed_resized_image, true_image_shape = model_preprocess_fn(\n tf.expand_dims(tf.cast(image, dtype=tf.float32), axis=0))\n\n preprocessed_shape = tf.shape(preprocessed_resized_image)\n new_height, new_width = preprocessed_shape[1], preprocessed_shape[2]\n\n im_box = tf.stack([\n 0.0, 0.0,\n tf.to_float(new_height) / tf.to_float(true_image_shape[0, 0]),\n tf.to_float(new_width) / tf.to_float(true_image_shape[0, 1])\n ])\n\n if input_fields.groundtruth_boxes in tensor_dict:\n bboxes = out_tensor_dict[input_fields.groundtruth_boxes]\n boxlist = box_list.BoxList(bboxes)\n realigned_bboxes = box_list_ops.change_coordinate_frame(boxlist, im_box)\n\n realigned_boxes_tensor = realigned_bboxes.get()\n valid_boxes_tensor = assert_or_prune_invalid_boxes(realigned_boxes_tensor)\n out_tensor_dict[\n input_fields.groundtruth_boxes] = valid_boxes_tensor\n\n if input_fields.groundtruth_keypoints in tensor_dict:\n keypoints = out_tensor_dict[input_fields.groundtruth_keypoints]\n realigned_keypoints = keypoint_ops.change_coordinate_frame(keypoints,\n im_box)\n out_tensor_dict[\n input_fields.groundtruth_keypoints] = realigned_keypoints\n flds_gt_kpt = input_fields.groundtruth_keypoints\n flds_gt_kpt_vis = input_fields.groundtruth_keypoint_visibilities\n flds_gt_kpt_weights = input_fields.groundtruth_keypoint_weights\n if flds_gt_kpt_vis not in out_tensor_dict:\n out_tensor_dict[flds_gt_kpt_vis] = tf.ones_like(\n out_tensor_dict[flds_gt_kpt][:, :, 0],\n dtype=tf.bool)\n out_tensor_dict[flds_gt_kpt_weights] = (\n keypoint_ops.keypoint_weights_from_visibilities(\n out_tensor_dict[flds_gt_kpt_vis],\n keypoint_type_weight))\n\n dp_surface_coords_fld = input_fields.groundtruth_dp_surface_coords\n if dp_surface_coords_fld in tensor_dict:\n dp_surface_coords = out_tensor_dict[dp_surface_coords_fld]\n realigned_dp_surface_coords = densepose_ops.change_coordinate_frame(\n dp_surface_coords, im_box)\n out_tensor_dict[dp_surface_coords_fld] = realigned_dp_surface_coords\n\n if use_bfloat16:\n preprocessed_resized_image = tf.cast(\n preprocessed_resized_image, tf.bfloat16)\n if input_fields.context_features in out_tensor_dict:\n out_tensor_dict[input_fields.context_features] = tf.cast(\n out_tensor_dict[input_fields.context_features], tf.bfloat16)\n out_tensor_dict[input_fields.image] = tf.squeeze(\n preprocessed_resized_image, axis=0)\n out_tensor_dict[input_fields.true_image_shape] = tf.squeeze(\n true_image_shape, axis=0)\n if input_fields.groundtruth_instance_masks in out_tensor_dict:\n masks = out_tensor_dict[input_fields.groundtruth_instance_masks]\n _, resized_masks, _ = image_resizer_fn(image, masks)\n if use_bfloat16:\n resized_masks = tf.cast(resized_masks, tf.bfloat16)\n out_tensor_dict[\n input_fields.groundtruth_instance_masks] = resized_masks\n\n zero_indexed_groundtruth_classes = out_tensor_dict[\n input_fields.groundtruth_classes] - _LABEL_OFFSET\n if use_multiclass_scores:\n out_tensor_dict[\n input_fields.groundtruth_classes] = out_tensor_dict[\n input_fields.multiclass_scores]\n else:\n out_tensor_dict[input_fields.groundtruth_classes] = tf.one_hot(\n zero_indexed_groundtruth_classes, num_classes)\n out_tensor_dict.pop(input_fields.multiclass_scores, None)\n\n if input_fields.groundtruth_confidences in out_tensor_dict:\n groundtruth_confidences = out_tensor_dict[\n input_fields.groundtruth_confidences]\n # Map the confidences to the one-hot encoding of classes\n out_tensor_dict[input_fields.groundtruth_confidences] = (\n tf.reshape(groundtruth_confidences, [-1, 1]) *\n out_tensor_dict[input_fields.groundtruth_classes])\n else:\n groundtruth_confidences = tf.ones_like(\n zero_indexed_groundtruth_classes, dtype=tf.float32)\n out_tensor_dict[input_fields.groundtruth_confidences] = (\n out_tensor_dict[input_fields.groundtruth_classes])\n\n if merge_multiple_boxes:\n merged_boxes, merged_classes, merged_confidences, _ = (\n util_ops.merge_boxes_with_multiple_labels(\n out_tensor_dict[input_fields.groundtruth_boxes],\n zero_indexed_groundtruth_classes,\n groundtruth_confidences,\n num_classes))\n merged_classes = tf.cast(merged_classes, tf.float32)\n out_tensor_dict[input_fields.groundtruth_boxes] = merged_boxes\n out_tensor_dict[input_fields.groundtruth_classes] = merged_classes\n out_tensor_dict[input_fields.groundtruth_confidences] = (\n merged_confidences)\n if input_fields.groundtruth_boxes in out_tensor_dict:\n out_tensor_dict[input_fields.num_groundtruth_boxes] = tf.shape(\n out_tensor_dict[input_fields.groundtruth_boxes])[0]\n\n return out_tensor_dict\n\n\ndef pad_input_data_to_static_shapes(tensor_dict,\n max_num_boxes,\n num_classes,\n spatial_image_shape=None,\n max_num_context_features=None,\n context_feature_length=None,\n max_dp_points=336):\n \"\"\"Pads input tensors to static shapes.\n\n In case num_additional_channels > 0, we assume that the additional channels\n have already been concatenated to the base image.\n\n Args:\n tensor_dict: Tensor dictionary of input data\n max_num_boxes: Max number of groundtruth boxes needed to compute shapes for\n padding.\n num_classes: Number of classes in the dataset needed to compute shapes for\n padding.\n spatial_image_shape: A list of two integers of the form [height, width]\n containing expected spatial shape of the image.\n max_num_context_features (optional): The maximum number of context\n features needed to compute shapes padding.\n context_feature_length (optional): The length of the context feature.\n max_dp_points (optional): The maximum number of DensePose sampled points per\n instance. The default (336) is selected since the original DensePose paper\n (https://arxiv.org/pdf/1802.00434.pdf) indicates that the maximum number\n of samples per part is 14, and therefore 24 * 14 = 336 is the maximum\n sampler per instance.\n\n Returns:\n A dictionary keyed by fields.InputDataFields containing padding shapes for\n tensors in the dataset.\n\n Raises:\n ValueError: If groundtruth classes is neither rank 1 nor rank 2, or if we\n detect that additional channels have not been concatenated yet, or if\n max_num_context_features is not specified and context_features is in the\n tensor dict.\n \"\"\"\n if not spatial_image_shape or spatial_image_shape == [-1, -1]:\n height, width = None, None\n else:\n height, width = spatial_image_shape # pylint: disable=unpacking-non-sequence\n\n input_fields = fields.InputDataFields\n num_additional_channels = 0\n if input_fields.image_additional_channels in tensor_dict:\n num_additional_channels = shape_utils.get_dim_as_int(tensor_dict[\n input_fields.image_additional_channels].shape[2])\n\n # We assume that if num_additional_channels > 0, then it has already been\n # concatenated to the base image (but not the ground truth).\n num_channels = 3\n if input_fields.image in tensor_dict:\n num_channels = shape_utils.get_dim_as_int(\n tensor_dict[input_fields.image].shape[2])\n\n if num_additional_channels:\n if num_additional_channels >= num_channels:\n raise ValueError(\n 'Image must be already concatenated with additional channels.')\n\n if (input_fields.original_image in tensor_dict and\n shape_utils.get_dim_as_int(\n tensor_dict[input_fields.original_image].shape[2]) ==\n num_channels):\n raise ValueError(\n 'Image must be already concatenated with additional channels.')\n\n if input_fields.context_features in tensor_dict and (\n max_num_context_features is None):\n raise ValueError('max_num_context_features must be specified in the model '\n 'config if include_context is specified in the input '\n 'config')\n\n padding_shapes = {\n input_fields.image: [height, width, num_channels],\n input_fields.original_image_spatial_shape: [2],\n input_fields.image_additional_channels: [\n height, width, num_additional_channels\n ],\n input_fields.source_id: [],\n input_fields.filename: [],\n input_fields.key: [],\n input_fields.groundtruth_difficult: [max_num_boxes],\n input_fields.groundtruth_boxes: [max_num_boxes, 4],\n input_fields.groundtruth_classes: [max_num_boxes, num_classes],\n input_fields.groundtruth_instance_masks: [\n max_num_boxes, height, width\n ],\n input_fields.groundtruth_is_crowd: [max_num_boxes],\n input_fields.groundtruth_group_of: [max_num_boxes],\n input_fields.groundtruth_area: [max_num_boxes],\n input_fields.groundtruth_weights: [max_num_boxes],\n input_fields.groundtruth_confidences: [\n max_num_boxes, num_classes\n ],\n input_fields.num_groundtruth_boxes: [],\n input_fields.groundtruth_label_types: [max_num_boxes],\n input_fields.groundtruth_label_weights: [max_num_boxes],\n input_fields.true_image_shape: [3],\n input_fields.groundtruth_image_classes: [num_classes],\n input_fields.groundtruth_image_confidences: [num_classes],\n input_fields.groundtruth_labeled_classes: [num_classes],\n }\n\n if input_fields.original_image in tensor_dict:\n padding_shapes[input_fields.original_image] = [\n height, width,\n shape_utils.get_dim_as_int(tensor_dict[input_fields.\n original_image].shape[2])\n ]\n if input_fields.groundtruth_keypoints in tensor_dict:\n tensor_shape = (\n tensor_dict[input_fields.groundtruth_keypoints].shape)\n padding_shape = [max_num_boxes,\n shape_utils.get_dim_as_int(tensor_shape[1]),\n shape_utils.get_dim_as_int(tensor_shape[2])]\n padding_shapes[input_fields.groundtruth_keypoints] = padding_shape\n if input_fields.groundtruth_keypoint_visibilities in tensor_dict:\n tensor_shape = tensor_dict[input_fields.\n groundtruth_keypoint_visibilities].shape\n padding_shape = [max_num_boxes, shape_utils.get_dim_as_int(tensor_shape[1])]\n padding_shapes[input_fields.\n groundtruth_keypoint_visibilities] = padding_shape\n\n if input_fields.groundtruth_keypoint_weights in tensor_dict:\n tensor_shape = (\n tensor_dict[input_fields.groundtruth_keypoint_weights].shape)\n padding_shape = [max_num_boxes, shape_utils.get_dim_as_int(tensor_shape[1])]\n padding_shapes[input_fields.\n groundtruth_keypoint_weights] = padding_shape\n if input_fields.groundtruth_dp_num_points in tensor_dict:\n padding_shapes[\n input_fields.groundtruth_dp_num_points] = [max_num_boxes]\n padding_shapes[\n input_fields.groundtruth_dp_part_ids] = [\n max_num_boxes, max_dp_points]\n padding_shapes[\n input_fields.groundtruth_dp_surface_coords] = [\n max_num_boxes, max_dp_points, 4]\n if input_fields.groundtruth_track_ids in tensor_dict:\n padding_shapes[\n input_fields.groundtruth_track_ids] = [max_num_boxes]\n\n if input_fields.groundtruth_verified_neg_classes in tensor_dict:\n padding_shapes[\n input_fields.groundtruth_verified_neg_classes] = [num_classes]\n if input_fields.groundtruth_not_exhaustive_classes in tensor_dict:\n padding_shapes[\n input_fields.groundtruth_not_exhaustive_classes] = [num_classes]\n\n # Prepare for ContextRCNN related fields.\n if input_fields.context_features in tensor_dict:\n padding_shape = [max_num_context_features, context_feature_length]\n padding_shapes[input_fields.context_features] = padding_shape\n\n tensor_shape = tf.shape(\n tensor_dict[fields.InputDataFields.context_features])\n tensor_dict[fields.InputDataFields.valid_context_size] = tensor_shape[0]\n padding_shapes[fields.InputDataFields.valid_context_size] = []\n if fields.InputDataFields.context_feature_length in tensor_dict:\n padding_shapes[fields.InputDataFields.context_feature_length] = []\n if fields.InputDataFields.context_features_image_id_list in tensor_dict:\n padding_shapes[fields.InputDataFields.context_features_image_id_list] = [\n max_num_context_features]\n\n if input_fields.is_annotated in tensor_dict:\n padding_shapes[input_fields.is_annotated] = []\n\n padded_tensor_dict = {}\n for tensor_name in tensor_dict:\n padded_tensor_dict[tensor_name] = shape_utils.pad_or_clip_nd(\n tensor_dict[tensor_name], padding_shapes[tensor_name])\n\n # Make sure that the number of groundtruth boxes now reflects the\n # padded/clipped tensors.\n if input_fields.num_groundtruth_boxes in padded_tensor_dict:\n padded_tensor_dict[input_fields.num_groundtruth_boxes] = (\n tf.minimum(\n padded_tensor_dict[input_fields.num_groundtruth_boxes],\n max_num_boxes))\n return padded_tensor_dict\n\n\ndef augment_input_data(tensor_dict, data_augmentation_options):\n \"\"\"Applies data augmentation ops to input tensors.\n\n Args:\n tensor_dict: A dictionary of input tensors keyed by fields.InputDataFields.\n data_augmentation_options: A list of tuples, where each tuple contains a\n function and a dictionary that contains arguments and their values.\n Usually, this is the output of core/preprocessor.build.\n\n Returns:\n A dictionary of tensors obtained by applying data augmentation ops to the\n input tensor dictionary.\n \"\"\"\n tensor_dict[fields.InputDataFields.image] = tf.expand_dims(\n tf.cast(tensor_dict[fields.InputDataFields.image], dtype=tf.float32), 0)\n\n include_instance_masks = (fields.InputDataFields.groundtruth_instance_masks\n in tensor_dict)\n include_keypoints = (fields.InputDataFields.groundtruth_keypoints\n in tensor_dict)\n include_keypoint_visibilities = (\n fields.InputDataFields.groundtruth_keypoint_visibilities in tensor_dict)\n include_label_weights = (fields.InputDataFields.groundtruth_weights\n in tensor_dict)\n include_label_confidences = (fields.InputDataFields.groundtruth_confidences\n in tensor_dict)\n include_multiclass_scores = (fields.InputDataFields.multiclass_scores in\n tensor_dict)\n dense_pose_fields = [fields.InputDataFields.groundtruth_dp_num_points,\n fields.InputDataFields.groundtruth_dp_part_ids,\n fields.InputDataFields.groundtruth_dp_surface_coords]\n include_dense_pose = all(field in tensor_dict for field in dense_pose_fields)\n tensor_dict = preprocessor.preprocess(\n tensor_dict, data_augmentation_options,\n func_arg_map=preprocessor.get_default_func_arg_map(\n include_label_weights=include_label_weights,\n include_label_confidences=include_label_confidences,\n include_multiclass_scores=include_multiclass_scores,\n include_instance_masks=include_instance_masks,\n include_keypoints=include_keypoints,\n include_keypoint_visibilities=include_keypoint_visibilities,\n include_dense_pose=include_dense_pose))\n tensor_dict[fields.InputDataFields.image] = tf.squeeze(\n tensor_dict[fields.InputDataFields.image], axis=0)\n return tensor_dict\n\n\ndef _get_labels_dict(input_dict):\n \"\"\"Extracts labels dict from input dict.\"\"\"\n required_label_keys = [\n fields.InputDataFields.num_groundtruth_boxes,\n fields.InputDataFields.groundtruth_boxes,\n fields.InputDataFields.groundtruth_classes,\n fields.InputDataFields.groundtruth_weights,\n ]\n labels_dict = {}\n for key in required_label_keys:\n labels_dict[key] = input_dict[key]\n\n optional_label_keys = [\n fields.InputDataFields.groundtruth_confidences,\n fields.InputDataFields.groundtruth_labeled_classes,\n fields.InputDataFields.groundtruth_keypoints,\n fields.InputDataFields.groundtruth_instance_masks,\n fields.InputDataFields.groundtruth_area,\n fields.InputDataFields.groundtruth_is_crowd,\n fields.InputDataFields.groundtruth_group_of,\n fields.InputDataFields.groundtruth_difficult,\n fields.InputDataFields.groundtruth_keypoint_visibilities,\n fields.InputDataFields.groundtruth_keypoint_weights,\n fields.InputDataFields.groundtruth_dp_num_points,\n fields.InputDataFields.groundtruth_dp_part_ids,\n fields.InputDataFields.groundtruth_dp_surface_coords,\n fields.InputDataFields.groundtruth_track_ids,\n fields.InputDataFields.groundtruth_verified_neg_classes,\n fields.InputDataFields.groundtruth_not_exhaustive_classes\n ]\n\n for key in optional_label_keys:\n if key in input_dict:\n labels_dict[key] = input_dict[key]\n if fields.InputDataFields.groundtruth_difficult in labels_dict:\n labels_dict[fields.InputDataFields.groundtruth_difficult] = tf.cast(\n labels_dict[fields.InputDataFields.groundtruth_difficult], tf.int32)\n return labels_dict\n\n\ndef _replace_empty_string_with_random_number(string_tensor):\n \"\"\"Returns string unchanged if non-empty, and random string tensor otherwise.\n\n The random string is an integer 0 and 2**63 - 1, casted as string.\n\n\n Args:\n string_tensor: A tf.tensor of dtype string.\n\n Returns:\n out_string: A tf.tensor of dtype string. If string_tensor contains the empty\n string, out_string will contain a random integer casted to a string.\n Otherwise string_tensor is returned unchanged.\n\n \"\"\"\n\n empty_string = tf.constant('', dtype=tf.string, name='EmptyString')\n\n random_source_id = tf.as_string(\n tf.random_uniform(shape=[], maxval=2**63 - 1, dtype=tf.int64))\n\n out_string = tf.cond(\n tf.equal(string_tensor, empty_string),\n true_fn=lambda: random_source_id,\n false_fn=lambda: string_tensor)\n\n return out_string\n\n\ndef _get_features_dict(input_dict, include_source_id=False):\n \"\"\"Extracts features dict from input dict.\"\"\"\n\n source_id = _replace_empty_string_with_random_number(\n input_dict[fields.InputDataFields.source_id])\n\n hash_from_source_id = tf.string_to_hash_bucket_fast(source_id, HASH_BINS)\n features = {\n fields.InputDataFields.image:\n input_dict[fields.InputDataFields.image],\n HASH_KEY: tf.cast(hash_from_source_id, tf.int32),\n fields.InputDataFields.true_image_shape:\n input_dict[fields.InputDataFields.true_image_shape],\n fields.InputDataFields.original_image_spatial_shape:\n input_dict[fields.InputDataFields.original_image_spatial_shape]\n }\n if include_source_id:\n features[fields.InputDataFields.source_id] = source_id\n if fields.InputDataFields.original_image in input_dict:\n features[fields.InputDataFields.original_image] = input_dict[\n fields.InputDataFields.original_image]\n if fields.InputDataFields.image_additional_channels in input_dict:\n features[fields.InputDataFields.image_additional_channels] = input_dict[\n fields.InputDataFields.image_additional_channels]\n if fields.InputDataFields.context_features in input_dict:\n features[fields.InputDataFields.context_features] = input_dict[\n fields.InputDataFields.context_features]\n if fields.InputDataFields.valid_context_size in input_dict:\n features[fields.InputDataFields.valid_context_size] = input_dict[\n fields.InputDataFields.valid_context_size]\n if fields.InputDataFields.context_features_image_id_list in input_dict:\n features[fields.InputDataFields.context_features_image_id_list] = (\n input_dict[fields.InputDataFields.context_features_image_id_list])\n return features\n\n\ndef create_train_input_fn(train_config, train_input_config,\n model_config):\n \"\"\"Creates a train `input` function for `Estimator`.\n\n Args:\n train_config: A train_pb2.TrainConfig.\n train_input_config: An input_reader_pb2.InputReader.\n model_config: A model_pb2.DetectionModel.\n\n Returns:\n `input_fn` for `Estimator` in TRAIN mode.\n \"\"\"\n\n def _train_input_fn(params=None):\n return train_input(train_config, train_input_config, model_config,\n params=params)\n\n return _train_input_fn\n\n\ndef train_input(train_config, train_input_config,\n model_config, model=None, params=None, input_context=None):\n \"\"\"Returns `features` and `labels` tensor dictionaries for training.\n\n Args:\n train_config: A train_pb2.TrainConfig.\n train_input_config: An input_reader_pb2.InputReader.\n model_config: A model_pb2.DetectionModel.\n model: A pre-constructed Detection Model.\n If None, one will be created from the config.\n params: Parameter dictionary passed from the estimator.\n input_context: optional, A tf.distribute.InputContext object used to\n shard filenames and compute per-replica batch_size when this function\n is being called per-replica.\n\n Returns:\n A tf.data.Dataset that holds (features, labels) tuple.\n\n features: Dictionary of feature tensors.\n features[fields.InputDataFields.image] is a [batch_size, H, W, C]\n float32 tensor with preprocessed images.\n features[HASH_KEY] is a [batch_size] int32 tensor representing unique\n identifiers for the images.\n features[fields.InputDataFields.true_image_shape] is a [batch_size, 3]\n int32 tensor representing the true image shapes, as preprocessed\n images could be padded.\n features[fields.InputDataFields.original_image] (optional) is a\n [batch_size, H, W, C] float32 tensor with original images.\n labels: Dictionary of groundtruth tensors.\n labels[fields.InputDataFields.num_groundtruth_boxes] is a [batch_size]\n int32 tensor indicating the number of groundtruth boxes.\n labels[fields.InputDataFields.groundtruth_boxes] is a\n [batch_size, num_boxes, 4] float32 tensor containing the corners of\n the groundtruth boxes.\n labels[fields.InputDataFields.groundtruth_classes] is a\n [batch_size, num_boxes, num_classes] float32 one-hot tensor of\n classes.\n labels[fields.InputDataFields.groundtruth_weights] is a\n [batch_size, num_boxes] float32 tensor containing groundtruth weights\n for the boxes.\n -- Optional --\n labels[fields.InputDataFields.groundtruth_instance_masks] is a\n [batch_size, num_boxes, H, W] float32 tensor containing only binary\n values, which represent instance masks for objects.\n labels[fields.InputDataFields.groundtruth_keypoints] is a\n [batch_size, num_boxes, num_keypoints, 2] float32 tensor containing\n keypoints for each box.\n labels[fields.InputDataFields.groundtruth_weights] is a\n [batch_size, num_boxes, num_keypoints] float32 tensor containing\n groundtruth weights for the keypoints.\n labels[fields.InputDataFields.groundtruth_visibilities] is a\n [batch_size, num_boxes, num_keypoints] bool tensor containing\n groundtruth visibilities for each keypoint.\n labels[fields.InputDataFields.groundtruth_labeled_classes] is a\n [batch_size, num_classes] float32 k-hot tensor of classes.\n labels[fields.InputDataFields.groundtruth_dp_num_points] is a\n [batch_size, num_boxes] int32 tensor with the number of sampled\n DensePose points per object.\n labels[fields.InputDataFields.groundtruth_dp_part_ids] is a\n [batch_size, num_boxes, max_sampled_points] int32 tensor with the\n DensePose part ids (0-indexed) per object.\n labels[fields.InputDataFields.groundtruth_dp_surface_coords] is a\n [batch_size, num_boxes, max_sampled_points, 4] float32 tensor with the\n DensePose surface coordinates. The format is (y, x, v, u), where (y, x)\n are normalized image coordinates and (v, u) are normalized surface part\n coordinates.\n labels[fields.InputDataFields.groundtruth_track_ids] is a\n [batch_size, num_boxes] int32 tensor with the track ID for each object.\n\n Raises:\n TypeError: if the `train_config`, `train_input_config` or `model_config`\n are not of the correct type.\n \"\"\"\n if not isinstance(train_config, train_pb2.TrainConfig):\n raise TypeError('For training mode, the `train_config` must be a '\n 'train_pb2.TrainConfig.')\n if not isinstance(train_input_config, input_reader_pb2.InputReader):\n raise TypeError('The `train_input_config` must be a '\n 'input_reader_pb2.InputReader.')\n if not isinstance(model_config, model_pb2.DetectionModel):\n raise TypeError('The `model_config` must be a '\n 'model_pb2.DetectionModel.')\n\n if model is None:\n model_preprocess_fn = INPUT_BUILDER_UTIL_MAP['model_build'](\n model_config, is_training=True).preprocess\n else:\n model_preprocess_fn = model.preprocess\n\n num_classes = config_util.get_number_of_classes(model_config)\n\n def transform_and_pad_input_data_fn(tensor_dict):\n \"\"\"Combines transform and pad operation.\"\"\"\n data_augmentation_options = [\n preprocessor_builder.build(step)\n for step in train_config.data_augmentation_options\n ]\n data_augmentation_fn = functools.partial(\n augment_input_data,\n data_augmentation_options=data_augmentation_options)\n\n image_resizer_config = config_util.get_image_resizer_config(model_config)\n image_resizer_fn = image_resizer_builder.build(image_resizer_config)\n keypoint_type_weight = train_input_config.keypoint_type_weight or None\n transform_data_fn = functools.partial(\n transform_input_data, model_preprocess_fn=model_preprocess_fn,\n image_resizer_fn=image_resizer_fn,\n num_classes=num_classes,\n data_augmentation_fn=data_augmentation_fn,\n merge_multiple_boxes=train_config.merge_multiple_label_boxes,\n retain_original_image=train_config.retain_original_images,\n use_multiclass_scores=train_config.use_multiclass_scores,\n use_bfloat16=train_config.use_bfloat16,\n keypoint_type_weight=keypoint_type_weight)\n\n tensor_dict = pad_input_data_to_static_shapes(\n tensor_dict=transform_data_fn(tensor_dict),\n max_num_boxes=train_input_config.max_number_of_boxes,\n num_classes=num_classes,\n spatial_image_shape=config_util.get_spatial_image_size(\n image_resizer_config),\n max_num_context_features=config_util.get_max_num_context_features(\n model_config),\n context_feature_length=config_util.get_context_feature_length(\n model_config))\n include_source_id = train_input_config.include_source_id\n return (_get_features_dict(tensor_dict, include_source_id),\n _get_labels_dict(tensor_dict))\n reduce_to_frame_fn = get_reduce_to_frame_fn(train_input_config, True)\n\n dataset = INPUT_BUILDER_UTIL_MAP['dataset_build'](\n train_input_config,\n transform_input_data_fn=transform_and_pad_input_data_fn,\n batch_size=params['batch_size'] if params else train_config.batch_size,\n input_context=input_context,\n reduce_to_frame_fn=reduce_to_frame_fn)\n return dataset\n\n\ndef create_eval_input_fn(eval_config, eval_input_config, model_config):\n \"\"\"Creates an eval `input` function for `Estimator`.\n\n Args:\n eval_config: An eval_pb2.EvalConfig.\n eval_input_config: An input_reader_pb2.InputReader.\n model_config: A model_pb2.DetectionModel.\n\n Returns:\n `input_fn` for `Estimator` in EVAL mode.\n \"\"\"\n\n def _eval_input_fn(params=None):\n return eval_input(eval_config, eval_input_config, model_config,\n params=params)\n\n return _eval_input_fn\n\n\ndef eval_input(eval_config, eval_input_config, model_config,\n model=None, params=None, input_context=None):\n \"\"\"Returns `features` and `labels` tensor dictionaries for evaluation.\n\n Args:\n eval_config: An eval_pb2.EvalConfig.\n eval_input_config: An input_reader_pb2.InputReader.\n model_config: A model_pb2.DetectionModel.\n model: A pre-constructed Detection Model.\n If None, one will be created from the config.\n params: Parameter dictionary passed from the estimator.\n input_context: optional, A tf.distribute.InputContext object used to\n shard filenames and compute per-replica batch_size when this function\n is being called per-replica.\n\n Returns:\n A tf.data.Dataset that holds (features, labels) tuple.\n\n features: Dictionary of feature tensors.\n features[fields.InputDataFields.image] is a [1, H, W, C] float32 tensor\n with preprocessed images.\n features[HASH_KEY] is a [1] int32 tensor representing unique\n identifiers for the images.\n features[fields.InputDataFields.true_image_shape] is a [1, 3]\n int32 tensor representing the true image shapes, as preprocessed\n images could be padded.\n features[fields.InputDataFields.original_image] is a [1, H', W', C]\n float32 tensor with the original image.\n labels: Dictionary of groundtruth tensors.\n labels[fields.InputDataFields.groundtruth_boxes] is a [1, num_boxes, 4]\n float32 tensor containing the corners of the groundtruth boxes.\n labels[fields.InputDataFields.groundtruth_classes] is a\n [num_boxes, num_classes] float32 one-hot tensor of classes.\n labels[fields.InputDataFields.groundtruth_area] is a [1, num_boxes]\n float32 tensor containing object areas.\n labels[fields.InputDataFields.groundtruth_is_crowd] is a [1, num_boxes]\n bool tensor indicating if the boxes enclose a crowd.\n labels[fields.InputDataFields.groundtruth_difficult] is a [1, num_boxes]\n int32 tensor indicating if the boxes represent difficult instances.\n -- Optional --\n labels[fields.InputDataFields.groundtruth_instance_masks] is a\n [1, num_boxes, H, W] float32 tensor containing only binary values,\n which represent instance masks for objects.\n labels[fields.InputDataFields.groundtruth_weights] is a\n [batch_size, num_boxes, num_keypoints] float32 tensor containing\n groundtruth weights for the keypoints.\n labels[fields.InputDataFields.groundtruth_visibilities] is a\n [batch_size, num_boxes, num_keypoints] bool tensor containing\n groundtruth visibilities for each keypoint.\n labels[fields.InputDataFields.groundtruth_group_of] is a [1, num_boxes]\n bool tensor indicating if the box covers more than 5 instances of the\n same class which heavily occlude each other.\n labels[fields.InputDataFields.groundtruth_labeled_classes] is a\n [num_boxes, num_classes] float32 k-hot tensor of classes.\n labels[fields.InputDataFields.groundtruth_dp_num_points] is a\n [batch_size, num_boxes] int32 tensor with the number of sampled\n DensePose points per object.\n labels[fields.InputDataFields.groundtruth_dp_part_ids] is a\n [batch_size, num_boxes, max_sampled_points] int32 tensor with the\n DensePose part ids (0-indexed) per object.\n labels[fields.InputDataFields.groundtruth_dp_surface_coords] is a\n [batch_size, num_boxes, max_sampled_points, 4] float32 tensor with the\n DensePose surface coordinates. The format is (y, x, v, u), where (y, x)\n are normalized image coordinates and (v, u) are normalized surface part\n coordinates.\n labels[fields.InputDataFields.groundtruth_track_ids] is a\n [batch_size, num_boxes] int32 tensor with the track ID for each object.\n\n Raises:\n TypeError: if the `eval_config`, `eval_input_config` or `model_config`\n are not of the correct type.\n \"\"\"\n params = params or {}\n if not isinstance(eval_config, eval_pb2.EvalConfig):\n raise TypeError('For eval mode, the `eval_config` must be a '\n 'train_pb2.EvalConfig.')\n if not isinstance(eval_input_config, input_reader_pb2.InputReader):\n raise TypeError('The `eval_input_config` must be a '\n 'input_reader_pb2.InputReader.')\n if not isinstance(model_config, model_pb2.DetectionModel):\n raise TypeError('The `model_config` must be a '\n 'model_pb2.DetectionModel.')\n\n if eval_config.force_no_resize:\n arch = model_config.WhichOneof('model')\n arch_config = getattr(model_config, arch)\n image_resizer_proto = image_resizer_pb2.ImageResizer()\n image_resizer_proto.identity_resizer.CopyFrom(\n image_resizer_pb2.IdentityResizer())\n arch_config.image_resizer.CopyFrom(image_resizer_proto)\n\n if model is None:\n model_preprocess_fn = INPUT_BUILDER_UTIL_MAP['model_build'](\n model_config, is_training=False).preprocess\n else:\n model_preprocess_fn = model.preprocess\n\n def transform_and_pad_input_data_fn(tensor_dict):\n \"\"\"Combines transform and pad operation.\"\"\"\n num_classes = config_util.get_number_of_classes(model_config)\n\n image_resizer_config = config_util.get_image_resizer_config(model_config)\n image_resizer_fn = image_resizer_builder.build(image_resizer_config)\n keypoint_type_weight = eval_input_config.keypoint_type_weight or None\n\n data_augmentation_options = [\n preprocessor_builder.build(step)\n for step in eval_config.data_augmentation_options\n ]\n data_augmentation_fn = functools.partial(\n augment_input_data,\n data_augmentation_options=data_augmentation_options)\n\n transform_data_fn = functools.partial(\n transform_input_data, model_preprocess_fn=model_preprocess_fn,\n image_resizer_fn=image_resizer_fn,\n num_classes=num_classes,\n data_augmentation_fn=data_augmentation_fn,\n retain_original_image=eval_config.retain_original_images,\n retain_original_image_additional_channels=\n eval_config.retain_original_image_additional_channels,\n keypoint_type_weight=keypoint_type_weight)\n tensor_dict = pad_input_data_to_static_shapes(\n tensor_dict=transform_data_fn(tensor_dict),\n max_num_boxes=eval_input_config.max_number_of_boxes,\n num_classes=config_util.get_number_of_classes(model_config),\n spatial_image_shape=config_util.get_spatial_image_size(\n image_resizer_config),\n max_num_context_features=config_util.get_max_num_context_features(\n model_config),\n context_feature_length=config_util.get_context_feature_length(\n model_config))\n include_source_id = eval_input_config.include_source_id\n return (_get_features_dict(tensor_dict, include_source_id),\n _get_labels_dict(tensor_dict))\n\n reduce_to_frame_fn = get_reduce_to_frame_fn(eval_input_config, False)\n\n dataset = INPUT_BUILDER_UTIL_MAP['dataset_build'](\n eval_input_config,\n batch_size=params['batch_size'] if params else eval_config.batch_size,\n transform_input_data_fn=transform_and_pad_input_data_fn,\n input_context=input_context,\n reduce_to_frame_fn=reduce_to_frame_fn)\n return dataset\n\n\ndef create_predict_input_fn(model_config, predict_input_config):\n \"\"\"Creates a predict `input` function for `Estimator`.\n\n Args:\n model_config: A model_pb2.DetectionModel.\n predict_input_config: An input_reader_pb2.InputReader.\n\n Returns:\n `input_fn` for `Estimator` in PREDICT mode.\n \"\"\"\n\n def _predict_input_fn(params=None):\n \"\"\"Decodes serialized tf.Examples and returns `ServingInputReceiver`.\n\n Args:\n params: Parameter dictionary passed from the estimator.\n\n Returns:\n `ServingInputReceiver`.\n \"\"\"\n del params\n example = tf.placeholder(dtype=tf.string, shape=[], name='tf_example')\n\n num_classes = config_util.get_number_of_classes(model_config)\n model_preprocess_fn = INPUT_BUILDER_UTIL_MAP['model_build'](\n model_config, is_training=False).preprocess\n\n image_resizer_config = config_util.get_image_resizer_config(model_config)\n image_resizer_fn = image_resizer_builder.build(image_resizer_config)\n\n transform_fn = functools.partial(\n transform_input_data, model_preprocess_fn=model_preprocess_fn,\n image_resizer_fn=image_resizer_fn,\n num_classes=num_classes,\n data_augmentation_fn=None)\n\n decoder = tf_example_decoder.TfExampleDecoder(\n load_instance_masks=False,\n num_additional_channels=predict_input_config.num_additional_channels)\n input_dict = transform_fn(decoder.decode(example))\n images = tf.cast(input_dict[fields.InputDataFields.image], dtype=tf.float32)\n images = tf.expand_dims(images, axis=0)\n true_image_shape = tf.expand_dims(\n input_dict[fields.InputDataFields.true_image_shape], axis=0)\n\n return tf.estimator.export.ServingInputReceiver(\n features={\n fields.InputDataFields.image: images,\n fields.InputDataFields.true_image_shape: true_image_shape},\n receiver_tensors={SERVING_FED_EXAMPLE_KEY: example})\n\n return _predict_input_fn\n\n\ndef get_reduce_to_frame_fn(input_reader_config, is_training):\n \"\"\"Returns a function reducing sequence tensors to single frame tensors.\n\n If the input type is not TF_SEQUENCE_EXAMPLE, the tensors are passed through\n this function unchanged. Otherwise, when in training mode, a single frame is\n selected at random from the sequence example, and the tensors for that frame\n are converted to single frame tensors, with all associated context features.\n In evaluation mode all frames are converted to single frame tensors with\n copied context tensors. After the sequence example tensors are converted into\n one or many single frame tensors, the images from each frame are decoded.\n\n Args:\n input_reader_config: An input_reader_pb2.InputReader.\n is_training: Whether we are in training mode.\n\n Returns:\n `reduce_to_frame_fn` for the dataset builder\n \"\"\"\n if input_reader_config.input_type != (\n input_reader_pb2.InputType.Value('TF_SEQUENCE_EXAMPLE')):\n return lambda dataset, dataset_map_fn, batch_size, config: dataset\n else:\n def reduce_to_frame(dataset, dataset_map_fn, batch_size,\n input_reader_config):\n \"\"\"Returns a function reducing sequence tensors to single frame tensors.\n\n Args:\n dataset: A tf dataset containing sequence tensors.\n dataset_map_fn: A function that handles whether to\n map_with_legacy_function for this dataset\n batch_size: used if map_with_legacy_function is true to determine\n num_parallel_calls\n input_reader_config: used if map_with_legacy_function is true to\n determine num_parallel_calls\n\n Returns:\n A tf dataset containing single frame tensors.\n \"\"\"\n if is_training:\n def get_single_frame(tensor_dict):\n \"\"\"Returns a random frame from a sequence.\n\n Picks a random frame and returns slices of sequence tensors\n corresponding to the random frame. Returns non-sequence tensors\n unchanged.\n\n Args:\n tensor_dict: A dictionary containing sequence tensors.\n\n Returns:\n Tensors for a single random frame within the sequence.\n \"\"\"\n num_frames = tf.cast(\n tf.shape(tensor_dict[fields.InputDataFields.source_id])[0],\n dtype=tf.int32)\n if input_reader_config.frame_index == -1:\n frame_index = tf.random.uniform((), minval=0, maxval=num_frames,\n dtype=tf.int32)\n else:\n frame_index = tf.constant(input_reader_config.frame_index,\n dtype=tf.int32)\n out_tensor_dict = {}\n for key in tensor_dict:\n if key in fields.SEQUENCE_FIELDS:\n # Slice random frame from sequence tensors\n out_tensor_dict[key] = tensor_dict[key][frame_index]\n else:\n # Copy all context tensors.\n out_tensor_dict[key] = tensor_dict[key]\n return out_tensor_dict\n dataset = dataset_map_fn(dataset, get_single_frame, batch_size,\n input_reader_config)\n else:\n dataset = dataset_map_fn(dataset, util_ops.tile_context_tensors,\n batch_size, input_reader_config)\n dataset = dataset.unbatch()\n # Decode frame here as SequenceExample tensors contain encoded images.\n dataset = dataset_map_fn(dataset, util_ops.decode_image, batch_size,\n input_reader_config)\n return dataset\n return reduce_to_frame\n" ]
[ [ "tensorflow.compat.v1.string_to_hash_bucket_fast", "tensorflow.compat.v1.ones_like", "tensorflow.compat.v1.equal", "tensorflow.compat.v1.shape", "tensorflow.compat.v1.estimator.export.ServingInputReceiver", "tensorflow.compat.v1.to_float", "tensorflow.compat.v1.ones", "tensorflow.compat.v1.constant", "tensorflow.compat.v1.placeholder", "tensorflow.compat.v1.reshape", "tensorflow.compat.v1.random.uniform", "tensorflow.compat.v1.split", "tensorflow.compat.v1.cast", "tensorflow.compat.v1.greater", "tensorflow.compat.v1.control_dependencies", "tensorflow.compat.v1.expand_dims", "tensorflow.compat.v1.gather", "tensorflow.compat.v1.reduce_all", "tensorflow.compat.v1.random_uniform", "tensorflow.compat.v1.squeeze", "tensorflow.compat.v1.one_hot", "tensorflow.compat.v1.size", "tensorflow.compat.v1.concat", "tensorflow.compat.v1.minimum" ] ]
SinisterFour/FinancialOracleServer
[ "54bc7d6845b340c0bcc991f94c53c07292135ed4" ]
[ "app/test_endpoint.py" ]
[ "import requests\nimport json\nimport numpy as np\nfrom sklearn.linear_model import LinearRegression\n\nPROD = \"TODO: SET THIS WHEN AVAILABLE\"\nLOCALHOST = \"http://127.0.0.1:5000\"\n\ndef get_linear_regression_test(environment):\n link = environment + \"/linear_reggression\"\n reader = np.genfromtxt(\"./datasets/imdb-movies-clean-data.csv\", delimiter=\",\")\n \n params = {\n \"train\": [],\n \"test\": [],\n \"train_inpc\": [],\n \"test_inpc\": [],\n }\n\n params[\"train\"] = reader[1:, :-2].tolist() # Train\n params[\"train_inpc\"] = reader[1:, -1:].tolist() # Train inpc\n params[\"test_inpc\"] = [153, 26435, 6000000, 1927]\n\n req = requests.get(url=link, json=params)\n # print(json.loads(req.content.decode(\"utf-8\")))\n print(req.content.decode(\"utf-8\"))\n\n\ndef get_polynomial_regression_test(environment):\n link = environment + \"/linear_reggression\"\n params = {\n \"train\": [\n [\"Columna1\", \"Columna2\", \"Columna3\", \"...\"],\n [1, 1, 2000, 48.80116016],\n [2, 1, 2000, 49.1371793],\n [1, 2, 2000, 49.41320941],\n ],\n \"test\": [\n [\"Columna1\", \"Columna2\", \"Columna3\", \"...\"],\n [1, 1, 2000, 48.80116016],\n [2, 1, 2000, 49.1371793],\n [1, 2, 2000, 49.41320941],\n ],\n \"train_inpc\": [0, 1, 2, 3, 2],\n \"test_inpc\": [0, 1, 2, 3, 2],\n }\n\n req = requests.get(url=link, json=params)\n data = req.json\n print(json.loads(req.content))\n\n\nget_linear_regression_test(LOCALHOST)\n" ]
[ [ "numpy.genfromtxt" ] ]
oliverphilcox/ChempyMulti
[ "1ab0d0c56a03c4f4b710ee8f0142bcccc7e84e22" ]
[ "Chempy/wrapper.py" ]
[ "import numpy as np \nfrom .weighted_yield import SSP, lifetime_Argast, lifetime_Raiteri\nfrom .imf import IMF\nfrom .yields import SN2_feedback, AGB_feedback, SN1a_feedback, Hypernova_feedback\n\nclass SSP_wrap():\n '''\n This is the wrapper around the SSP function. It preloads the needed classes and calls all nucleosynthetic enrichment processes when the enrichment is calculated.\n '''\n def __init__(self, a):\n '''\n Upon initialization the default IMF, CC-SN yields, SN Ia yields and AGB yields is loaded.\n\n INPUT:\n \n a = Modelparameter class. So the default IMF etc are loaded. If we want other yield sets etc. loaded we need to specify that in paramter.py\n '''\n\n ## loading the IMF and the yieldsets prescribed in a (containing all the model parameters)\n basic_imf = IMF(a.mmin,a.mmax,a.mass_steps)\n getattr(basic_imf, a.imf_type_name)(a.imf_parameter)\n basic_sn2 = SN2_feedback()\n getattr(basic_sn2, a.yield_table_name_sn2)()\n basic_1a = SN1a_feedback()\n getattr(basic_1a, a.yield_table_name_1a)()\n basic_agb = AGB_feedback()\n getattr(basic_agb, a.yield_table_name_agb)()\n ## mixing of Nomoto CC-SN and HN yields\n if a.yield_table_name_sn2 == 'Nomoto2013':\n basic_hn = Hypernova_feedback()\n getattr(basic_hn, a.yield_table_name_hn)()\n for item in basic_sn2.metallicities:\n x = np.copy(basic_sn2.table[item])\n y = np.copy(basic_hn.table[item])\n for jtem in basic_hn.masses:\n basic_sn2.table[item]['mass_in_remnants'][np.where(basic_sn2.table[item]['Mass']==jtem)] = a.sn2_to_hn * (x['mass_in_remnants'][np.where(x['Mass']==jtem)]) + (1-a.sn2_to_hn) * (y['mass_in_remnants'][np.where(y['Mass']==jtem)])\n basic_sn2.table[item]['unprocessed_mass_in_winds'][np.where(basic_sn2.table[item]['Mass']==jtem)] = a.sn2_to_hn * (x['unprocessed_mass_in_winds'][np.where(x['Mass']==jtem)]) + (1-a.sn2_to_hn) * (y['unprocessed_mass_in_winds'][np.where(y['Mass']==jtem)])\n hn_mass = []\n sn_mass = []\n for stem in basic_sn2.elements:\n sn_mass.append(x[stem][np.where(x['Mass']==jtem)])\n hn_mass.append(y[stem][np.where(y['Mass']==jtem)])\n basic_sn2.table[item][stem][np.where(basic_sn2.table[item]['Mass']==jtem)]= a.sn2_to_hn * (x[stem][np.where(x['Mass']==jtem)]) + (1-a.sn2_to_hn) * (y[stem][np.where(y['Mass']==jtem)])\n ## to pass the information on to the feedback calculation\n self.a = a\n self.imf = basic_imf\n self.sn2 = basic_sn2\n self.sn1a = basic_1a\n self.agb = basic_agb\n\n def calculate_feedback(self, z, elements, element_fractions, time_steps):\n '''\n The feedback is calculated for the initializes SSP.\n\n INPUT:\n \n z = metallicity of the SSP in mass fraction (not normed to solar!)\n \n elements = which elements to follow\n \n element_fractions = the birth material of the SSP in the same order as 'elements'\n \n time_steps = the time-steps for which the enrichment of the SSP should be calculated (usually the time-steps until the end of the chempy simulation)\n '''\n basic_ssp = SSP(False, float(z), np.copy(self.imf.x), np.copy(self.imf.dm), np.copy(self.imf.dn), np.copy(time_steps), list(elements), str(self.a.stellar_lifetimes), str(self.a.interpolation_scheme), bool(self.a.only_net_yields_in_process_tables))\n basic_ssp.sn2_feedback(list(self.sn2.elements), dict(self.sn2.table), np.copy(self.sn2.metallicities), float(self.a.sn2mmin), float(self.a.sn2mmax),list(element_fractions))\n basic_ssp.agb_feedback(list(self.agb.elements), dict(self.agb.table), list(self.agb.metallicities), float(self.a.agbmmin), float(self.a.agbmmax),np.hstack(element_fractions))\n basic_ssp.sn1a_feedback(list(self.sn1a.elements), list(self.sn1a.metallicities), dict(self.sn1a.table), str(self.a.time_delay_functional_form), float(self.a.sn1ammin), float(self.a.sn1ammax), self.a.sn1a_parameter, float(self.a.total_mass), bool(self.a.stochastic_IMF))\n basic_ssp.bh_feedback(float(self.a.bhmmin),float(self.a.bhmmax),list(elements), np.hstack(element_fractions) , float(self.a.percentage_of_bh_mass))\n \n # exposing these tables to the outside wrapper\n self.table = basic_ssp.table\n self.sn2_table = basic_ssp.sn2_table\n self.agb_table = basic_ssp.agb_table\n self.sn1a_table = basic_ssp.sn1a_table\n self.bh_table = basic_ssp.bh_table\n self.inverse_imf = basic_ssp.inverse_imf\n\ndef initialise_stuff(a):\n '''\n Convenience function initialising the solar abundance, SFR and infall with the default values provided in parameter.py as a\n '''\n from .solar_abundance import solar_abundances\n from .sfr import SFR \n from .infall import INFALL\n\n basic_solar = solar_abundances()\n getattr(basic_solar, a.solar_abundance_name)()\n \n basic_sfr = SFR(a.start,a.end,a.time_steps)\n if a.basic_sfr_name == 'gamma_function':\n getattr(basic_sfr, a.basic_sfr_name)(S0 = a.S_0 * a.mass_factor,a_parameter = a.a_parameter, loc = a.sfr_beginning, scale = a.sfr_scale)\n elif a.basic_sfr_name == 'model_A':\n basic_sfr.model_A(a.mass_factor*a.S_0,a.t_0,a.t_1)\n elif a.basic_sfr_name == 'prescribed':\n basic_sfr.prescribed(a.mass_factor, a.name_of_file)\n elif a.basic_sfr_name == 'doubly_peaked':\n basic_sfr.doubly_peaked(S0 = a.mass_factor*a.S_0, peak_ratio = a.peak_ratio, decay = a.sfr_decay, t0 = a.sfr_t0, peak1t0 = a.peak1t0, peak1sigma = a.peak1sigma)\n \n basic_sfr.sfr = np.divide(basic_sfr.sfr,sum(basic_sfr.sfr))\n \n \n basic_infall = INFALL(np.copy(basic_sfr.t),np.copy(basic_sfr.sfr))\n if a.basic_infall_name == 'exponential':\n getattr(basic_infall, a.basic_infall_name)((a.infall_amplitude,a.tau_infall,a.infall_time_offset,a.c_infall,a.norm_infall))\n elif a.basic_infall_name == 'gamma_function':\n getattr(basic_infall, a.basic_infall_name)(mass_factor = a.norm_infall, a_parameter = a.infall_a_parameter, loc = a.infall_beginning, scale = a.infall_scale)\n elif a.basic_infall_name == 'sfr_related':\n getattr(basic_infall, a.basic_infall_name)()\n\n\n return basic_solar, basic_sfr, basic_infall\n\ndef Chempy(a):\n '''\n Chemical evolution run with the default parameters using the net yields.\n\n INPUT: \n \n a = ModelParameters() from parameter.py\n\n OUTPUT:\n \n cube = The ISM evolution class\n \n abundances = The abundances of the ISM\n '''\n from .infall import PRIMORDIAL_INFALL\n from .time_integration import ABUNDANCE_MATRIX\n from .making_abundances import mass_fraction_to_abundances\n from numpy.lib.recfunctions import append_fields \n basic_solar, basic_sfr, basic_infall = initialise_stuff(a)\n elements_to_trace = a.elements_to_trace\n basic_primordial = PRIMORDIAL_INFALL(list(elements_to_trace),np.copy(basic_solar.table))\n basic_primordial.primordial()\n \n # Needed a rescaling for the shortened sfr \n gas_reservoir_mass_factor = a.gas_reservoir_mass_factor / a.shortened_sfr_rescaling\n #sfr_factor_for_cosmic_accretion = a.sfr_factor_for_cosmic_accretion / a.shortened_sfr_rescaling\n gas_at_start = a.gas_at_start / a.shortened_sfr_rescaling\n \n cube = ABUNDANCE_MATRIX(np.copy(basic_sfr.t),np.copy(basic_sfr.sfr),np.copy(basic_infall.infall),list(elements_to_trace),\n list(basic_primordial.symbols),list(basic_primordial.fractions),float(gas_at_start),list(basic_primordial.symbols),list(basic_primordial.fractions),\n float(gas_reservoir_mass_factor),float(a.outflow_feedback_fraction),bool(a.check_processes),float(a.starformation_efficiency),float(a.gas_power),\n float(a.sfr_factor_for_cosmic_accretion), list(basic_primordial.symbols), list(basic_primordial.fractions))\n\n basic_ssp = SSP_wrap(a)\n for i in range(len(basic_sfr.t)-1):\n j = len(basic_sfr.t)-i\n element_fractions = []\n for item in elements_to_trace:\n element_fractions.append(float(np.copy(cube.cube[item][max(i-1,0)]/cube.cube['gas'][max(i-1,0)])))## gas element fractions from one time step before \n if element_fractions[-1]<0:\n print('-ve Error')\n #raise Exception('-ve Error')\n metallicity = float(cube.cube['Z'][i])\n #print(metallicity) \n \n time_steps = np.copy(basic_sfr.t[:j])\n basic_ssp.calculate_feedback(float(metallicity), list(elements_to_trace), list(element_fractions), np.copy(time_steps)) \n cube.advance_one_step(i+1,np.copy(basic_ssp.table),np.copy(basic_ssp.sn2_table),np.copy(basic_ssp.agb_table),np.copy(basic_ssp.sn1a_table),np.copy(basic_ssp.bh_table))\n if cube.cube['gas'][i] < 0:\n print(i, basic_sfr.t[i])\n print('gas became negative. returning -inf')\n return -np.inf, [0]\n if cube.gas_reservoir['gas'][i] < 0:\n print('gas_reservoir became negative. returning -inf')\n return -np.inf, [0]\n\n abundances,elements,numbers = mass_fraction_to_abundances(np.copy(cube.cube),np.copy(basic_solar.table))\n weights = cube.cube['sfr']\n abundances = append_fields(abundances,'weights',weights)\n abundances = append_fields(abundances,'time', cube.cube['time'])\n abundances = np.array(abundances)\n \n for element in elements: \n if element != 'Fe':\n try:\n abundances[element] -= abundances['Fe']\n except RuntimeWarning: # Remove error from first Fe abundance = -inf\n pass\n #TEST output\n #print('Chempy output')\n #print(abundances[:][-1])\n \n return cube, abundances\n\ndef Chempy_all_times(a):\n '''\n Chemical evolution run with the default parameters using the net yields.\n\n INPUT: \n \n a = ModelParameters() from parameter.py\n\n OUTPUT:\n \n cube = The ISM evolution class\n \n abundances = The abundances of the ISM\n '''\n from .infall import PRIMORDIAL_INFALL\n from .time_integration import ABUNDANCE_MATRIX\n from .making_abundances import mass_fraction_to_abundances\n from numpy.lib.recfunctions import append_fields \n basic_solar, basic_sfr, basic_infall = initialise_stuff(a)\n elements_to_trace = a.elements_to_trace\n basic_primordial = PRIMORDIAL_INFALL(list(elements_to_trace),np.copy(basic_solar.table))\n basic_primordial.primordial()\n gas_reservoir_mass_factor = a.gas_reservoir_mass_factor / a.shortened_sfr_rescaling\n gas_at_start = a.gas_at_start / a.shortened_sfr_rescaling # unlikely to be needed unless a.gas_at_start is non-zero\n \n cube = ABUNDANCE_MATRIX(np.copy(basic_sfr.t),np.copy(basic_sfr.sfr),np.copy(basic_infall.infall),list(elements_to_trace),list(basic_primordial.symbols),\n list(basic_primordial.fractions),float(gas_at_start),list(basic_primordial.symbols),list(basic_primordial.fractions),float(gas_reservoir_mass_factor),\n float(a.outflow_feedback_fraction),bool(a.check_processes),float(a.starformation_efficiency),float(a.gas_power),\n float(a.sfr_factor_for_cosmic_accretion),list(basic_primordial.symbols), list(basic_primordial.fractions))\n\n basic_ssp = SSP_wrap(a)\n \n for i in range(len(basic_sfr.t)-1):\n j = len(basic_sfr.t)-i\n element_fractions = []\n for item in elements_to_trace:\n element_fractions.append(float(np.copy(cube.cube[item][max(i-1,0)]/cube.cube['gas'][max(i-1,0)])))## gas element fractions from one time step before \n if element_fractions[-1]<0:\n print('-ve Error')\n #raise Exception('-ve Error')\n metallicity = float(cube.cube['Z'][i])\n #print(metallicity) \n \n time_steps = np.copy(basic_sfr.t[:j])\n basic_ssp.calculate_feedback(float(metallicity), list(elements_to_trace), list(element_fractions), np.copy(time_steps)) \n cube.advance_one_step(i+1,np.copy(basic_ssp.table),np.copy(basic_ssp.sn2_table),np.copy(basic_ssp.agb_table),np.copy(basic_ssp.sn1a_table),np.copy(basic_ssp.bh_table))\n \n \n for item in elements_to_trace:\n if cube.cube[item][i]<0:\n print(i,item)\n print('element %s became negative. returning -inf'%item)\n return -np.inf,[0]\n if cube.cube['gas'][i] < 0:\n print(i, basic_sfr.t[i])\n print('gas became negative. returning -inf')\n return -np.inf, [0]\n if cube.gas_reservoir['gas'][i] < 0:\n print('gas_reservoir became negative. returning -inf')\n return -np.inf, [0]\n\n abundances,elements,numbers = mass_fraction_to_abundances(np.copy(cube.cube),np.copy(basic_solar.table))\n weights = cube.cube['sfr']\n abundances = append_fields(abundances,'weights',weights)\n abundances = append_fields(abundances,'time', cube.cube['time'])\n abundances = np.array(abundances)\n \n for element in elements: \n if element != 'Fe':\n filt = np.where(np.logical_not(np.isfinite(abundances['Fe'])))\n abundances[element][filt]=np.inf\n filt2 = np.where(np.isfinite(abundances['Fe']))\n abundances[element][filt2]-=abundances['Fe'][filt2]\n #TEST output\n #print('Chempy output')\n #print(abundances[:][-1])\n \n return cube, abundances\n\n\n\ndef Chempy_gross(a):\n '''\n Chemical evolution run with the default parameters but now using solar scaled material (testing the worse case when total yields provided).\n\n INPUT: \n \n a = ModelParameters() from parameter.py\n\n OUTPUT:\n \n cube = The ISM evolution class\n \n abundances = The abundances of the ISM\n '''\n from infall import PRIMORDIAL_INFALL\n from time_integration import ABUNDANCE_MATRIX\n from making_abundances import mass_fraction_to_abundances\n from numpy.lib.recfunctions import append_fields \n basic_solar, basic_sfr, basic_infall = initialise_stuff(a)\n elements_to_trace = a.elements_to_trace\n basic_primordial = PRIMORDIAL_INFALL(list(elements_to_trace),np.copy(basic_solar.table))\n basic_primordial.primordial(0)\n gas_reservoir_mass_factor = a.gas_reservoir_mass_factor / a.shortened_sfr_rescaling\n gas_at_start = a.gas_at_start / a.shortened_sfr_rescaling\n cube = ABUNDANCE_MATRIX(np.copy(basic_sfr.t),np.copy(basic_sfr.sfr),np.copy(basic_infall.infall),list(elements_to_trace),list(basic_primordial.symbols),\n list(basic_primordial.fractions),float(gas_at_start),list(basic_primordial.symbols),list(basic_primordial.fractions),float(gas_reservoir_mass_factor),\n float(a.outflow_feedback_fraction),bool(a.check_processes),float(a.starformation_efficiency),float(a.gas_power), float(a.sfr_factor_for_cosmic_accretion), \n list(basic_primordial.symbols), list(basic_primordial.fractions))\n basic_ssp = SSP_wrap(a)\n for i in range(len(basic_sfr.t)-1):\n j = len(basic_sfr.t)-i\n metallicity = float(cube.cube['Z'][i])\n solar_scaled_material = PRIMORDIAL_INFALL(list(elements_to_trace),np.copy(basic_solar.table))\n solar_scaled_material.solar(np.log10(metallicity/basic_solar.z))\n element_fractions = list(solar_scaled_material.fractions)\n for item in elements_to_trace:\n element_fractions.append(float(np.copy(cube.cube[item][max(i-1,0)]/cube.cube['gas'][max(i-1,0)])))## gas element fractions from one time step before \n time_steps = np.copy(basic_sfr.t[:j])\n basic_ssp.calculate_feedback(float(metallicity), list(elements_to_trace), list(element_fractions), np.copy(time_steps))\n cube.advance_one_step(i+1,np.copy(basic_ssp.table),np.copy(basic_ssp.sn2_table),np.copy(basic_ssp.agb_table),np.copy(basic_ssp.sn1a_table))\n abundances,elements,numbers = mass_fraction_to_abundances(np.copy(cube.cube),np.copy(basic_solar.table))\n weights = cube.cube['sfr']\n abundances = append_fields(abundances,'weights',weights)\n abundances = np.array(abundances)\n\n return cube, abundances\n\n\ndef multi_star_optimization():\n '''\n This function will optimize the parameters of all stars in a hierachical manner (similar to gibbs sampling)\n\n INPUT: \n\n a = will be loaded from parameter.py (prepare all variables there)\n\n OUTPUT:\n\n log_list = a list of intermediate results (so far only for debugging)\n '''\n import time\n import multiprocessing as mp\n from .optimization import minimizer_initial, minimizer_global, minimizer_local\n from .cem_function import global_optimization_error_returned\n from .parameter import ModelParameters\n \n # For testing\n import warnings\n warnings.filterwarnings(\"ignore\")\n \n a = ModelParameters()\n print(a.stellar_identifier_list)\n start_time = time.time()\n\n log_list = []\n # I: Minimization for each star seperately\n # 1: for each star make initial conditions (each star needs other model parameters) \n parameter_list = []\n for item in a.stellar_identifier_list:\n parameter_list.append(item)\n # 2: call posterior_function_for_minimization with scipy.optimize.minimize in multiprocess for each star and recover the found parameters\n p = mp.Pool(len(parameter_list))\n t = p.map(minimizer_initial, parameter_list)\n p.close()\n p.join()\n result = np.vstack(t)\n\n log_list.append(np.copy(result))\n log_list.append('initial minimization')\n initial = time.time()\n print('first minimization for each star separately took: %2.f seconds' %(initial - start_time))\n\n # IV: repeat II and III until posterior does not change much\n result[:,:len(a.SSP_parameters)] = np.mean(result[:,:len(a.SSP_parameters)], axis = 0)\n posteriors = []\n counter = 0\n while True:\n counter += 1\n if len(posteriors) > 1:\n if np.abs(posteriors[-1] - posteriors[-2]) < a.gibbs_sampler_tolerance:\n break\n if len(posteriors) > a.gibbs_sampler_maxiter:\n break\n\n initial = time.time()\n # II: Global parameter minimization:\n # 1: only SSP parameters free. Use mean SSP parameter values and individual (but fixed ISM parameter values)\n changing_parameter = result[0,:len(a.SSP_parameters)]\n # 2: Call each star in multiprocess but only return the predictions\n # 3: Calculate the likelihood for each star and optimize the common model error (is all done within minimizer global, which is calling 'global optimization')\n x = minimizer_global(changing_parameter, a.tol_minimization, a.maxiter_minimization, a.verbose, result)\n\n # 4: return global SSP parameters and common model error\n posterior, error_list, elements = global_optimization_error_returned(x, result)\n posteriors.append(posterior)\n print(posteriors)\n\n global_iteration1 = time.time()\n print('step %d global minimization took: %2.f seconds' %(counter, global_iteration1 - initial)) \n\n # III: Local parameter minimization:\n # 1: Use fixed global parameters and fixed common errors make initial conditions\n result[:,:len(a.SSP_parameters)] = x\n\n log_list.append((np.copy(x),posterior))\n log_list.append('step %d global minimization' %(counter))\n\n p0_list = []\n parameter_list = []\n x_list = []\n error_list_mp = []\n element_list_mp = []\n\n for i,item in enumerate(a.stellar_identifier_list):\n parameter_list.append(item)\n p0_list.append(result[i,len(a.SSP_parameters):])\n x_list.append(x)\n error_list_mp.append(error_list)\n element_list_mp.append(elements)\n\n args = zip(p0_list,parameter_list,x_list,error_list_mp,element_list_mp)\n\n # 2: Minimize each star ISM parameters in multiprocess\n p = mp.Pool(len(parameter_list))\n t = p.map(minimizer_local, args)\n p.close()\n p.join()\n local_parameters = np.vstack(t)\n result[:,len(a.SSP_parameters):] = local_parameters\n\n log_list.append(np.copy(result))\n log_list.append('step %d local minimization' %(counter))\n local_iteration1 = time.time()\n print('step %d local minimization took: %2.f seconds' %(counter, local_iteration1 - global_iteration1)) \n\n log_list.append(posteriors)\n print(log_list)\n\n # V: MCMC run\n ## reshape the result to have global parameters in the front and the local parameters following\n changing_parameter = list(result[0,:len(a.SSP_parameters)])\n for i in range(result.shape[0]):\n changing_parameter.append(list(result[i,len(a.SSP_parameters):]))\n changing_parameter = np.hstack(changing_parameter)\n ## jitter the parameters to initialise the chain (add a validation later, i.e. testing that the particular parameters yield a result)\n mcmc_multi(changing_parameter, error_list, elements)\n # 1: Free all parameters and optimize common error (SSP should be the same for all stars)\n # 2: Plug everything into emcee and sample the posterior\n return log_list\n\ndef mcmc(a):\n '''\n Convenience function to use the MCMC. A subdirectory mcmc/ will be created in the current directory and intermediate chains will be stored there.\n \n The MCMC will sample the volume of best posterior for the likelihood functions that are declared in parameter.py. Default is ['sol_norm','gas_reservoir','sn_ratio'] which corresponds to 'Sun+' from the paper.\n '''\n import time\n import os\n import multiprocessing as mp\n from .optimization import creating_chain, posterior_probability\n import emcee\n\n start1 = time.time()\n directory = 'mcmc/'\n if os.path.exists(directory):\n if a.verbose:\n print('%s already existed. Content might be overwritten' %(directory))\n else:\n os.makedirs(directory)\n \n a.check_processes = False\n a.number_of_models_overplotted = 1\n a.only_net_yields_in_process_tables = False\n a.testing_output = False\n a.summary_pdf = False\n a.nthreads = mp.cpu_count()\n if a.nthreads == 4:\n a.nthreads = 2\n \n chain = creating_chain(a,np.copy(a.p0))\n sampler = emcee.EnsembleSampler(a.nwalkers,a.ndim,posterior_probability,threads=a.nthreads, args = [a])\n pos,prob,state,blobs = sampler.run_mcmc(chain,a.mburn)\n \n mean_prob = mean_prob_beginning = np.zeros((a.m))\n posterior_list = []\n posterior_std_list = []\n for i in range(a.m):\n print('step ', i+1 , 'of ',a.m)\n pos, prob, state, blobs = sampler.run_mcmc(pos, a.save_state_every, rstate0=state, lnprob0=prob, blobs0 = blobs, storechain = True)\n np.save('%s/flatchain' %(directory),sampler.chain)\n np.save('%s/flatlnprobability' %(directory),sampler.lnprobability)\n np.save('%s/flatblobs' %(directory),sampler.blobs)\n posterior = np.load('%s/flatlnprobability.npy' %(directory))\n posterior_list.append(np.mean(posterior, axis = 0)[-1])\n posterior_std_list.append(np.std(posterior, axis = 0)[-1])\n np.save('%s/flatmeanposterior' %(directory), posterior_list)\n np.save('%s/flatstdposterior' %(directory), posterior_std_list)\n print(np.mean(posterior, axis = 0)[0], np.mean(posterior, axis = 0)[-1])\n \n if i>202:\n print('posterior -1, -100, -200',np.mean(posterior, axis = 0)[-1], np.mean(posterior, axis = 0)[-100], np.mean(posterior, axis = 0)[-200])\n print('posterior 0, 100, 200',np.mean(posterior, axis = 0)[0], np.mean(posterior, axis = 0)[100], np.mean(posterior, axis = 0)[200])\n #print(\"Mean acceptance fraction:\", sampler.acceptance_fraction)\n elapsed1 = (time.time() - start1)\n print('calculation so far took', elapsed1, ' seconds')\n if i>a.min_mcmc_iterations and np.abs(np.mean(posterior, axis = 0)[-1] - np.mean(posterior, axis = 0)[-100]) < a.mcmc_tolerance and np.abs(np.mean(posterior, axis = 0)[-1] - np.mean(posterior, axis = 0)[-200]) < a.mcmc_tolerance:\n break\n if a.send_email:\n send_email(a.nthreads, i, np.mean(posterior, axis = 0)[0], np.mean(posterior, axis = 0)[-1], a, elapsed1)\n\n\n\ndef mcmc_multi(changing_parameter, error_list, elements):\n '''\n Convenience function to use the MCMC for multiple zones (and therefore multiple observations). A subdirectory mcmc/ will be created in the current directory and intermediate chains will be stored there.\n The MCMC will sample the volume of best posterior for the likelihood functions that are declared in parameter.py. \n Default is a list of Proto-sun, Arcturus and B-stars. The MCMC uses many walkers and can use multiple threads. Each walker will evaluate a series of Chempy zones and add their posterior together which then will be returned.\n \n INPUT:\n\n changing_parameter = the parameter vector for initialization (will usually be found from minimization before). The initial chain will be created by jittering slightly the initial parameter guess\n\n error_list = the vector of element errors\n\n elements = the corresponding element symbols\n\n OUTPUT:\n\n The function will create a folder and store the chain as well as the predicted element values\n\n The MCMC stops when the convergence criteria is met, which is when the median posterior of all walkers does not change much inbetween 200 steps anymore.\n '''\n import time\n import os\n import multiprocessing as mp\n from .cem_function import posterior_function_many_stars\n from .parameter import ModelParameters\n import emcee\n\n a = ModelParameters()\n start1 = time.time()\n directory = 'mcmc/'\n if os.path.exists(directory):\n if a.verbose:\n print('%s already existed. Content might be overwritten' %(directory))\n else:\n os.makedirs(directory)\n \n nthreads = mp.cpu_count()\n if nthreads == 4:\n nthreads = 2\n ndim = len(changing_parameter)\n a.nwalkers = max(a.nwalkers, int(ndim*2))\n chain = np.empty(shape = (a.nwalkers,ndim))\n \n for i in range(a.nwalkers):\n result = -np.inf\n while result == -np.inf:\n jitter = np.random.normal(loc = 0, scale = 0.001, size = ndim)\n result, dummy = posterior_function_many_stars(changing_parameter + jitter,error_list,elements)\n chain[i] = changing_parameter + jitter\n print('Chain created')\n sampler = emcee.EnsembleSampler(a.nwalkers,ndim,posterior_function_many_stars,threads=nthreads, args = [error_list,elements])\n pos,prob,state,blobs = sampler.run_mcmc(chain,a.mburn)\n \n mean_prob = mean_prob_beginning = np.zeros((a.m))\n posterior_list = []\n posterior_std_list = []\n for i in range(a.m):\n print('step ', i+1 , 'of ',a.m)\n pos, prob, state, blobs = sampler.run_mcmc(pos, a.save_state_every, rstate0=state, lnprob0=prob, blobs0 = blobs, storechain = True)\n np.save('%s/flatchain' %(directory),sampler.chain)\n np.save('%s/flatlnprobability' %(directory),sampler.lnprobability)\n np.save('%s/flatblobs' %(directory),sampler.blobs)\n posterior = np.load('%s/flatlnprobability.npy' %(directory))\n posterior_list.append(np.mean(posterior, axis = 0)[-1])\n posterior_std_list.append(np.std(posterior, axis = 0)[-1])\n np.save('%s/flatmeanposterior' %(directory), posterior_list)\n np.save('%s/flatstdposterior' %(directory), posterior_std_list)\n print(np.mean(posterior, axis = 0)[0], np.mean(posterior, axis = 0)[-1])\n \n if i>202:\n print('posterior -1, -100, -200',np.mean(posterior, axis = 0)[-1], np.mean(posterior, axis = 0)[-100], np.mean(posterior, axis = 0)[-200])\n print('posterior 0, 100, 200',np.mean(posterior, axis = 0)[0], np.mean(posterior, axis = 0)[100], np.mean(posterior, axis = 0)[200])\n #print(\"Mean acceptance fraction:\", sampler.acceptance_fraction)\n elapsed1 = (time.time() - start1)\n print('calculation so far took', elapsed1, ' seconds')\n if i>a.min_mcmc_iterations and np.abs(np.mean(posterior, axis = 0)[-1] - np.mean(posterior, axis = 0)[-100]) < a.mcmc_tolerance and np.abs(np.mean(posterior, axis = 0)[-1] - np.mean(posterior, axis = 0)[-200]) < a.mcmc_tolerance:\n break\n if a.send_email:\n send_email(nthreads, i, np.mean(posterior, axis = 0)[0], np.mean(posterior, axis = 0)[-1], a, elapsed1)\n\n\ndef send_email(thread_count, iteration_count, posterior_beginning, posterior_end, parameters, time):\n from email.MIMEMultipart import MIMEMultipart\n from email.MIMEText import MIMEText\n import smtplib\n\n\n fromaddr = \"[email protected]\"\n toaddr = \"[email protected]\"\n msg = MIMEMultipart()\n msg['From'] = fromaddr\n msg['To'] = toaddr\n msg['Subject'] = \"Threads = %d, Run finished after %.2f hours\" %(thread_count, time/3600.)\n body = \"After %.1f hours %d threads produced %d iterations.\\n The posterior at beginning was: %.2f. The posterior now is: %.2f.\\n The stellar identifier list = %s.\\n The error marginalization is %s \\n The yields are: %s %s %s \\n \" %(time/3600., thread_count, iteration_count, posterior_beginning, posterior_end, str(parameters.stellar_identifier_list), str(parameters.error_marginalization), parameters.yield_table_name_sn2, parameters.yield_table_name_agb, parameters.yield_table_name_1a)\n msg.attach(MIMEText(body, 'plain'))\n\n server = smtplib.SMTP('smtp.gmail.com', 587)\n server.ehlo()\n server.starttls()\n server.ehlo()\n server.login(\"[email protected]\", \"MPIA_Server_runs\")\n text = msg.as_string()\n server.sendmail(fromaddr, toaddr, text) \n \ndef mcmc_quick(changing_parameter,elements,preload):\n '''\n Convenience function to use the MCMC for one zone. A subdirectory mcmc/ will be created in the current directory and intermediate chains will be stored there.\n The MCMC will sample the volume of best posterior for the likelihood functions that are declared in parameter.py. \n This is a cut down version to speed up MCMC for one star only\n INPUT:\n\n changing_parameter = the parameter vector for initialization (will usually be found from minimization before). The initial chain will be created by jittering slightly the initial parameter guess\n\n error_list = the vector of element errors\n\n elements = the corresponding element symbols\n\n OUTPUT:\n\n The function will create a folder and store the chain as well as the predicted element values\n\n The MCMC stops when the convergence criteria is met, which is when the median posterior of all walkers does not change much inbetween 200 steps anymore.\n '''\n import time\n import os\n import multiprocessing as mp\n from .cem_function import posterior_function_mcmc_quick\n from .score_function import preload_params_mcmc\n from .parameter import ModelParameters\n import emcee\n\n a = ModelParameters()\n start1 = time.time()\n directory = 'mcmc/'\n if os.path.exists(directory):\n if a.verbose:\n print('%s already existed. Content might be overwritten' %(directory))\n else:\n os.makedirs(directory)\n \n nthreads = mp.cpu_count()\n if nthreads == 4:\n nthreads = 2\n ndim = len(changing_parameter)\n a.nwalkers = max(a.nwalkers, int(ndim*2))\n chain = np.empty(shape = (a.nwalkers,ndim))\n \n for i in range(a.nwalkers):\n result = -np.inf\n while result == -np.inf:\n jitter = np.random.normal(loc = 0, scale = 0.001, size = ndim)\n result = posterior_function_mcmc_quick(changing_parameter + jitter,elements,preload)\n chain[i] = changing_parameter + jitter\n\n pool=mp.Pool()\n sampler = emcee.EnsembleSampler(a.nwalkers,ndim,posterior_function_mcmc_quick,threads=nthreads, args = [elements,preload],pool=pool)\n pos,prob,state,blobs = sampler.run_mcmc(chain,a.mburn)\n \n \n mean_prob = mean_prob_beginning = np.zeros((a.m))\n posterior_list = []\n posterior_std_list = []\n for i in range(a.m):\n print('step ', i+1 , 'of ',a.m)\n pos, prob, state, blobs = sampler.run_mcmc(pos, a.save_state_every, rstate0=state, lnprob0=prob, blobs0 = blobs, storechain = True)\n # np.save('%s/flatchain' %(directory),sampler.chain)\n # np.save('%s/flatlnprobability' %(directory),sampler.lnprobability)\n # np.save('%s/flatblobs' %(directory),sampler.blobs)\n # posterior = np.load('%s/flatlnprobability.npy' %(directory))\n posterior = sampler.lnprobability\n posterior_list.append(np.mean(posterior, axis = 0)[-1])\n posterior_std_list.append(np.std(posterior, axis = 0)[-1])\n # np.save('%s/flatmeanposterior' %(directory), posterior_list)\n # np.save('%s/flatstdposterior' %(directory), posterior_std_list)\n print(np.mean(posterior, axis = 0)[0], np.mean(posterior, axis = 0)[-1])\n \n #if i>202:\n #print('posterior -1, -100, -200',np.mean(posterior, axis = 0)[-1], np.mean(posterior, axis = 0)[-100], np.mean(posterior, axis = 0)[-200])\n #print('posterior 0, 100, 200',np.mean(posterior, axis = 0)[0], np.mean(posterior, axis = 0)[100], np.mean(posterior, axis = 0)[200])\n #print(\"Mean acceptance fraction:\", sampler.acceptance_fraction)\n elapsed1 = (time.time() - start1)\n print('calculation so far took', elapsed1, ' seconds')\n if i>a.min_mcmc_iterations and np.abs(np.mean(posterior, axis = 0)[-1] - np.mean(posterior, axis = 0)[-100]) < a.mcmc_tolerance and np.abs(np.mean(posterior, axis = 0)[-1] - np.mean(posterior, axis = 0)[-200]) < a.mcmc_tolerance:\n break\n np.save('%s/flatchain' %(directory),sampler.chain)\n np.save('%s/flatlnprobability' %(directory),sampler.lnprobability)\n np.save('%s/flatblobs' %(directory),sampler.blobs)\n posterior = sampler.lnprobability\n #posterior = np.load('%s/flatlnprobability.npy' %(directory))\n posterior_list.append(np.mean(posterior, axis = 0)[-1])\n posterior_std_list.append(np.std(posterior, axis = 0)[-1])\n np.save('%s/flatmeanposterior' %(directory), posterior_list)\n np.save('%s/flatstdposterior' %(directory), posterior_std_list)\n pool.close() \n if a.send_email:\n send_email(nthreads, i, np.mean(posterior, axis = 0)[0], np.mean(posterior, axis = 0)[-1], a, elapsed1)\n\n\n \ndef single_star_optimization():\n '''\n This function will optimize the parameters of a single zone quickly\n\n INPUT: \n\n a = will be loaded from parameter.py (prepare all variables there)\n\n OUTPUT:\n\n log_list = a list of intermediate results (so far only for debugging)\n '''\n import time\n #import multiprocessing as mp\n from .optimization import minimizer_initial_quick\n from .cem_function import global_optimization_error_returned\n from .parameter import ModelParameters\n from .score_function import preload_params_mcmc\n \n # For testing\n import warnings\n warnings.filterwarnings(\"ignore\")\n \n a = ModelParameters()\n preload = preload_params_mcmc()\n \n print(a.stellar_identifier_list)\n start_time = time.time()\n\n log_list = []\n # I: Minimization for each star seperately\n # 1: for each star make initial conditions (each star needs other model parameters) \n parameter_list = []\n for item in a.stellar_identifier_list:\n parameter_list.append(item)\n # 2: call posterior_function_for_minimization with scipy.optimize.minimize in multiprocess for each star and recover the found parameters\n #p = mp.Pool(len(parameter_list))\n #t = p.map(minimizer_initial_quick, parameter_list)\n #p.close()\n #p.join()\n #result = np.vstack(t)\n \n result = minimizer_initial_quick(parameter_list)\n \n log_list.append(np.copy(result))\n log_list.append('initial minimization')\n initial = time.time()\n print('first minimization for each star separately took: %2.f seconds' %(initial - start_time))\n\n # IV: repeat II and III until posterior does not change much\n #result[:,:len(a.SSP_parameters)] = np.mean(result[:,:len(a.SSP_parameters)], axis = 0)\n #posteriors = []\n #counter = 0\n #while True:\n # counter += 1\n # if len(posteriors) > 1:\n # if np.abs(posteriors[-1] - posteriors[-2]) < a.gibbs_sampler_tolerance:\n # break\n # if len(posteriors) > a.gibbs_sampler_maxiter:\n # break\n\n # initial = time.time()\n # II: Global parameter minimization:\n # 1: only SSP parameters free. Use mean SSP parameter values and individual (but fixed ISM parameter values)\n # changing_parameter = result[0,:len(a.SSP_parameters)]\n # 2: Call each star in multiprocess but only return the predictions\n # 3: Calculate the likelihood for each star and optimize the common model error (is all done within minimizer global, which is calling 'global optimization')\n # x = minimizer_global(changing_parameter, a.tol_minimization, a.maxiter_minimization, a.verbose, result)\n\n # 4: return global SSP parameters and common model error\n # posterior, error_list, elements = global_optimization_error_returned(x, result)\n # posteriors.append(posterior)\n # print(posteriors)\n\n # global_iteration1 = time.time()\n # print('step %d global minimization took: %2.f seconds' %(counter, global_iteration1 - initial)) \n\n # III: Local parameter minimization:\n # 1: Use fixed global parameters and fixed common errors make initial conditions\n # result[:,:len(a.SSP_parameters)] = x\n\n # log_list.append((np.copy(x),posterior))\n # log_list.append('step %d global minimization' %(counter))\n\n # p0_list = []\n # parameter_list = []\n # x_list = []\n # error_list_mp = []\n # element_list_mp = []\n\n # for i,item in enumerate(a.stellar_identifier_list):\n # parameter_list.append(item)\n # p0_list.append(result[i,len(a.SSP_parameters):])\n # x_list.append(x)\n # error_list_mp.append(error_list)\n # element_list_mp.append(elements)\n\n # args = zip(p0_list,parameter_list,x_list,error_list_mp,element_list_mp)\n\n # 2: Minimize each star ISM parameters in multiprocess\n # p = mp.Pool(len(parameter_list))\n # t = p.map(minimizer_local, args)\n # p.close()\n # p.join()\n # local_parameters = np.vstack(t)\n # result[:,len(a.SSP_parameters):] = local_parameters\n\n # log_list.append(np.copy(result))\n # log_list.append('step %d local minimization' %(counter))\n # local_iteration1 = time.time()\n # print('step %d local minimization took: %2.f seconds' %(counter, local_iteration1 - global_iteration1)) \n\n #log_list.append(posteriors)\n #print(log_list)\n\n # V: MCMC run\n ## reshape the result to have global parameters in the front and the local parameters following\n #changing_parameter = list(result[0,:len(a.SSP_parameters)])\n \n elements = np.unique(a.elements_to_trace,preload.wildcard.dtype.names)\n changing_parameter = list(result)\n\n #for i in range(result.shape[0]):\n # changing_parameter.append(list(result[i,len(a.SSP_parameters):]))\n \n changing_parameter = np.hstack(changing_parameter)\n \n ## jitter the parameters to initialise the chain (add a validation later, i.e. testing that the particular parameters yield a result)\n \n mcmc_quick(changing_parameter, elements,preload)\n \n # 1: Free all parameters and optimize common error (SSP should be the same for all stars)\n # 2: Plug everything into emcee and sample the posterior\n return log_list\n\n\ndef scoring_wrapper():\n \"\"\"\n NO LONGER USED \n This function will calculate Bayes and CV scores for yield set, using the code in score_function.py.\n \n The neural network must be trained beforehand using training_data and create_network \n \n Main outputs are labelled .npz files in the Scores/ file.\n \n MUST set a.UseNeural = True for this and select correct dataset.\n \"\"\"\n from Chempy.neural import training_data,create_network\n import time\n from Chempy.parameter import ModelParameters\n from Chempy.score_function import CV_wrapper, Bayes_wrapper\n init_time = time.time()\n a = ModelParameters()\n \n print('Step 1 (at time %.2f s): Calculate Bayes score' %(time.time()-init_time))\n Bayes_wrapper()\n \n print('Step 2 (at time %.2f s): Calculate cross-validation score' %(time.time()-init_time))\n CV_wrapper()\n \n print('Process complete in time %.2f s' %(time.time()-init_time)) \n return None \n \n" ]
[ [ "numpy.random.normal", "numpy.array", "numpy.empty", "numpy.zeros", "numpy.lib.recfunctions.append_fields", "numpy.copy", "numpy.load", "numpy.save", "numpy.mean", "numpy.std", "numpy.where", "numpy.unique", "numpy.isfinite", "numpy.abs", "numpy.hstack", "numpy.log10", "numpy.vstack" ] ]
yellowzp/algorithm_model_learning
[ "4c8c2e29f4724fe565e73bb243709d7c302178c9" ]
[ "sklearn/util.py" ]
[ "#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport matplotlib.pyplot as plt\n\n\nclass Util(object):\n \"\"\"\n utility tools\n \"\"\"\n\n @classmethod\n def plot(cls, points, lines, title=\"test\", x_label=\"X\", y_label=\"Y\", save_path=\"\"):\n \"\"\"\n 绘制二维坐标图\n :param points:\n :param lines:\n :param title:\n :param x_label:\n :param y_label:\n :param save_path:\n :return:\n \"\"\"\n point_color = \"blue\"\n color_list = [\"green\", \"red\", \"cyan\", \"yellow\", \"purple\", \"springgreen\", \"orange\", \"lightcoral\", \"peru\", \"tan\", \"gold\"]\n # points\n x_list, y_list = points\n plt.scatter(x_list, y_list, s=20, c=point_color, alpha=.5)\n # lines\n for idx, line in enumerate(lines):\n color = color_list[idx % len(color_list)]\n x_list, y_list = line\n plt.plot(x_list, y_list, color)\n plt.title(title)\n plt.xlabel(x_label)\n plt.ylabel(y_label)\n plt.legend()\n plt.show()\n if save_path:\n plt.savefig(save_path)\n return True\n\n\n" ]
[ [ "matplotlib.pyplot.xlabel", "matplotlib.pyplot.savefig", "matplotlib.pyplot.title", "matplotlib.pyplot.legend", "matplotlib.pyplot.plot", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.scatter" ] ]
vlouf/cpol_processing
[ "097994422c46773754e04a3d4911b81c01673fa5" ]
[ "cpol_processing/production.py" ]
[ "\"\"\"\nCPOL Level 1b main production line. These are the drivers function.\n\n@title: production\n@author: Valentin Louf\n@email: [email protected]\n@copyright: Valentin Louf (2017-2021)\n@institution: Bureau of Meteorology and Monash University\n@date: 30/03/2021\n\n.. autosummary::\n :toctree: generated/\n\n _mkdir\n buffer\n process_and_save\n production_line\n\"\"\"\n# Python Standard Library\nimport gc\nimport os\nimport time\nimport uuid\nimport datetime\nimport traceback\nimport warnings\n\n# Other Libraries\nimport pyart\nimport cftime\nimport numpy as np\n\n# Custom modules.\nfrom . import attenuation\nfrom . import cfmetadata\nfrom . import filtering\nfrom . import hydrometeors\nfrom . import phase\nfrom . import radar_codes\nfrom . import velocity\n\n\ndef _mkdir(dir):\n \"\"\"\n Make directory. Might seem redundant but you might have concurrency issue\n when dealing with multiprocessing.\n \"\"\"\n if os.path.exists(dir):\n return None\n\n try:\n os.mkdir(dir)\n except FileExistsError:\n pass\n\n return None\n\n\ndef buffer(func):\n \"\"\"\n Decorator to catch and kill error message. Almost want to name the function\n dont_fail.\n \"\"\"\n\n def wrapper(*args, **kwargs):\n try:\n rslt = func(*args, **kwargs)\n except Exception:\n traceback.print_exc()\n rslt = None\n return rslt\n\n return wrapper\n\n\nclass Chronos():\n def __init__(self, messg=None):\n self.messg = messg\n def __enter__(self):\n self.start = time.time()\n def __exit__(self, ntype, value, traceback):\n self.time = time.time() - self.start\n if self.messg is not None:\n print(f\"{self.messg} took {self.time:.2f}s.\")\n else:\n print(f\"Processed in {self.time:.2f}s.\")\n\n\n@buffer\ndef process_and_save(\n radar_file_name: str, outpath: str, sound_dir: str = None, do_dealiasing: bool = True, instrument: str = \"CPOL\",\n) -> None:\n \"\"\"\n Call processing function and write data.\n\n Parameters:\n ===========\n radar_file_name: str\n Name of the input radar file.\n outpath: str\n Path for saving output data.\n sound_dir: str\n Path to radiosoundings directory.\n instrument: str\n Name of radar (only CPOL will change something).\n do_dealiasing: bool\n Dealias velocity.\n \"\"\"\n today = datetime.datetime.utcnow()\n if instrument == \"CPOL\":\n is_cpol = True\n else:\n is_cpol = False\n\n # Create directories.\n _mkdir(outpath)\n outpath = os.path.join(outpath, \"v{}\".format(today.strftime(\"%Y\")))\n _mkdir(outpath)\n outpath_ppi = os.path.join(outpath, \"ppi\")\n _mkdir(outpath_ppi)\n tick = time.time()\n\n # Business start here.\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n radar = production_line(radar_file_name, sound_dir, is_cpol=is_cpol, do_dealiasing=do_dealiasing)\n # Business over.\n gc.collect()\n\n if radar is None:\n print(f\"{radar_file_name} has not been processed. Check logs.\")\n return None\n\n radar_start_date = cftime.num2pydate(radar.time[\"data\"][0], radar.time[\"units\"])\n radar_end_date = cftime.num2pydate(radar.time[\"data\"][-1], radar.time[\"units\"])\n outpath_ppi = os.path.join(outpath_ppi, str(radar_start_date.year))\n _mkdir(outpath_ppi)\n outpath_ppi = os.path.join(outpath_ppi, radar_start_date.strftime(\"%Y%m%d\"))\n _mkdir(outpath_ppi)\n\n # Generate output file name.\n if instrument == \"CPOL\":\n outfilename = \"twp10cpolppi.b1.{}00.nc\".format(radar_start_date.strftime(\"%Y%m%d.%H%M\"))\n else:\n outfilename = \"cfrad.\" + radar_start_date.strftime(\"%Y%m%d_%H%M%S\") + \".nc\"\n\n outfilename = os.path.join(outpath_ppi, outfilename)\n\n # Check if output file already exists.\n if os.path.isfile(outfilename):\n print(f\"Output file {outfilename} already exists.\")\n return None\n\n if is_cpol:\n # Lat/lon informations\n latitude = radar.gate_latitude[\"data\"]\n longitude = radar.gate_longitude[\"data\"]\n maxlon = longitude.max()\n minlon = longitude.min()\n maxlat = latitude.max()\n minlat = latitude.min()\n origin_altitude = \"50\"\n origin_latitude = \"-12.2491\"\n origin_longitude = \"131.0444\"\n\n unique_id = str(uuid.uuid4())\n metadata = {\n \"Conventions\": \"CF-1.6, ACDD-1.3\",\n \"acknowledgement\": \"This work has been supported by the U.S. Department of Energy Atmospheric Systems Research Program through the grant DE-SC0014063. Data may be freely distributed.\",\n \"country\": \"Australia\",\n \"creator_email\": \"[email protected]\",\n \"creator_name\": \"Commonwealth of Australia, Bureau of Meteorology, Science and Innovation, Research, Weather and Environmental Prediction, Radar Science and Nowcasting\",\n \"creator_url\": \"http://www.bom.gov.au/australia/radar/\",\n \"date_created\": today.isoformat(),\n \"geospatial_bounds\": f\"POLYGON(({minlon:0.6} {minlat:0.6},{minlon:0.6} {maxlat:0.6},{maxlon:0.6} {maxlat:0.6},{maxlon:0.6} {minlat:0.6},{minlon:0.6} {minlat:0.6}))\",\n \"geospatial_lat_max\": f\"{maxlat:0.6}\",\n \"geospatial_lat_min\": f\"{minlat:0.6}\",\n \"geospatial_lat_units\": \"degrees_north\",\n \"geospatial_lon_max\": f\"{maxlon:0.6}\",\n \"geospatial_lon_min\": f\"{minlon:0.6}\",\n \"geospatial_lon_units\": \"degrees_east\",\n \"history\": \"created by Valentin Louf on raijin.nci.org.au at \" + today.isoformat() + \" using Py-ART\",\n \"id\": unique_id,\n \"institution\": \"Bureau of Meteorology\",\n \"instrument\": \"radar\",\n \"instrument_name\": \"CPOL\",\n \"instrument_type\": \"radar\",\n \"keywords\": \"radar, tropics, Doppler, dual-polarization\",\n \"license\": \"CC BY-NC-SA 4.0\",\n \"naming_authority\": \"au.gov.bom\",\n \"origin_altitude\": origin_altitude,\n \"origin_latitude\": origin_latitude,\n \"origin_longitude\": origin_longitude,\n \"platform_is_mobile\": \"false\",\n \"processing_level\": \"b1\",\n \"project\": \"CPOL\",\n \"publisher_name\": \"NCI\",\n \"publisher_url\": \"nci.gov.au\",\n \"product_version\": f\"v{today.year}.{today.month:02}\",\n \"references\": \"doi:10.1175/JTECH-D-18-0007.1\",\n \"site_name\": \"Gunn Pt\",\n \"source\": \"radar\",\n \"state\": \"NT\",\n \"standard_name_vocabulary\": \"CF Standard Name Table v71\",\n \"summary\": \"Volumetric scan from CPOL dual-polarization Doppler radar (Darwin, Australia)\",\n \"time_coverage_start\": radar_start_date.isoformat(),\n \"time_coverage_end\": radar_end_date.isoformat(),\n \"time_coverage_duration\": \"P10M\",\n \"time_coverage_resolution\": \"PT10M\",\n \"title\": \"radar PPI volume from CPOL\",\n \"uuid\": unique_id,\n \"version\": radar.metadata[\"version\"],\n }\n\n radar.metadata = metadata\n\n # Write results\n with Chronos(f\"Writing {outfilename}\"):\n pyart.io.write_cfradial(outfilename, radar, format=\"NETCDF4\")\n print(\"%s processed in %0.2fs.\" % (os.path.basename(radar_file_name), (time.time() - tick)))\n\n # Free memory\n del radar\n\n return None\n\n\ndef production_line(\n radar_file_name: str, sound_dir: str, is_cpol: bool = True, do_dealiasing: bool = True\n) -> pyart.core.radar.Radar:\n \"\"\"\n Production line for correcting and estimating CPOL data radar parameters.\n The naming convention for these parameters is assumed to be DBZ, ZDR, VEL,\n PHIDP, KDP, SNR, RHOHV, and NCP. KDP, NCP, and SNR are optional and can be\n recalculated.\n\n Parameters:\n ===========\n radar_file_name: str\n Name of the input radar file.\n sound_dir: str\n Path to radiosounding directory.\n is_cpol: bool\n Name of radar (only CPOL will change something).\n do_dealiasing: bool\n Dealias velocity.\n\n Returns:\n ========\n radar: pyart.core.radar.Radar\n Py-ART radar structure.\n\n PLAN:\n =====\n 01/ Read input radar file.\n 02/ Check if radar file OK (no problem with azimuth and reflectivity).\n 03/ Get radar date.\n 04/ Check if NCP field exists (creating a fake one if it doesn't)\n 05/ Check if RHOHV field exists (creating a fake one if it doesn't)\n 06/ Compute SNR and temperature using radiosoundings.\n 07/ Correct RHOHV using Ryzhkov algorithm.\n 08/ Create gatefilter (remove noise and incorrect data).\n 09/ Correct ZDR using Ryzhkov algorithm.\n 10/ Compute Giangrande's PHIDP using pyart.\n 11/ Unfold velocity.\n 12/ Compute attenuation for ZH\n 13/ Compute attenuation for ZDR\n 14/ Estimate Hydrometeors classification using csu toolbox.\n 15/ Estimate Rainfall rate using csu toolbox.\n 16/ Removing fake/temporary fieds.\n 17/ Rename fields to pyart standard names.\n \"\"\"\n FIELDS_NAMES = [\n (\"VEL\", \"velocity\"),\n (\"VEL_UNFOLDED\", \"corrected_velocity\"),\n (\"DBZ\", \"total_power\"),\n (\"DBZ_CORR\", \"corrected_reflectivity\"),\n (\"RHOHV_CORR\", \"cross_correlation_ratio\"),\n (\"ZDR\", \"differential_reflectivity\"),\n (\"ZDR_CORR_ATTEN\", \"corrected_differential_reflectivity\"),\n (\"PHIDP\", \"differential_phase\"),\n (\"PHIDP_BRINGI\", \"bringi_differential_phase\"),\n (\"PHIDP_GG\", \"giangrande_differential_phase\"),\n (\"PHIDP_VAL\", \"corrected_differential_phase\"),\n (\"KDP\", \"specific_differential_phase\"),\n (\"KDP_BRINGI\", \"bringi_specific_differential_phase\"),\n (\"KDP_GG\", \"giangrande_specific_differential_phase\"),\n (\"KDP_VAL\", \"corrected_specific_differential_phase\"),\n (\"WIDTH\", \"spectrum_width\"),\n (\"SNR\", \"signal_to_noise_ratio\"),\n (\"NCP\", \"normalized_coherent_power\"),\n (\"DBZV\", \"reflectivity_v\"),\n (\"WRADV\", \"spectrum_width_v\"),\n (\"SNRV\", \"signal_to_noise_ratio_v\"),\n (\"SQIV\", \"normalized_coherent_power_v\"),\n ]\n\n # List of keys that we'll keep in the output radar dataset.\n OUTPUT_RADAR_FLD = [\n \"corrected_differential_phase\",\n \"corrected_differential_reflectivity\",\n \"corrected_reflectivity\",\n \"corrected_specific_differential_phase\",\n \"corrected_velocity\",\n \"cross_correlation_ratio\",\n \"differential_phase\",\n \"differential_reflectivity\",\n \"radar_echo_classification\",\n \"radar_estimated_rain_rate\",\n \"signal_to_noise_ratio\",\n \"spectrum_width\",\n \"total_power\",\n \"velocity\",\n ]\n\n # !!! READING THE RADAR !!!\n if is_cpol:\n radar = pyart.io.read(radar_file_name)\n else:\n radar = radar_codes.read_radar(radar_file_name)\n\n pos = radar.range['data'] < 3e3\n for k in radar.fields.keys():\n radar.fields[k]['data'][:, pos] = np.NaN\n\n # Correct data type manually\n try:\n radar.longitude[\"data\"] = np.ma.masked_invalid(radar.longitude[\"data\"].astype(np.float32))\n radar.latitude[\"data\"] = np.ma.masked_invalid(radar.latitude[\"data\"].astype(np.float32))\n radar.altitude[\"data\"] = np.ma.masked_invalid(radar.altitude[\"data\"].astype(np.int32))\n except Exception:\n pass\n\n # Check if radar reflecitivity field is correct.\n if not radar_codes.check_reflectivity(radar):\n raise TypeError(f\"Reflectivity field is empty in {radar_file_name}.\")\n\n if not radar_codes.check_azimuth(radar):\n raise TypeError(f\"Azimuth field is empty in {radar_file_name}.\")\n\n if not radar_codes.check_year(radar):\n print(f\"{radar_file_name} date probably wrong. Had to correct century.\")\n\n new_azimuth, azi_has_changed = radar_codes.correct_azimuth(radar)\n if azi_has_changed:\n radar.azimuth[\"data\"] = new_azimuth\n\n # Getting radar's date and time.\n radar_start_date = cftime.num2pydate(radar.time[\"data\"][0], radar.time[\"units\"])\n radar.time[\"units\"] = radar.time[\"units\"].replace(\"since\", \"since \")\n\n # Correct Doppler velocity units.\n try:\n radar.fields[\"VEL\"][\"units\"] = \"m s-1\"\n vel_missing = False\n except KeyError:\n vel_missing = True\n\n # Looking for RHOHV field\n # For CPOL, season 09/10, there are no RHOHV fields before March!!!!\n try:\n radar.fields[\"RHOHV\"]\n fake_rhohv = False # Don't need to delete this field cause it's legit.\n except KeyError:\n # Creating a fake RHOHV field.\n fake_rhohv = True # We delete this fake field later.\n rho = pyart.config.get_metadata(\"cross_correlation_ratio\")\n rho[\"data\"] = np.ones_like(radar.fields[\"DBZ\"][\"data\"])\n radar.add_field(\"RHOHV\", rho)\n radar.add_field(\"RHOHV_CORR\", rho)\n\n # Compute SNR and extract radiosounding temperature.\n # Requires radiosoundings\n if sound_dir is not None:\n radiosonde_fname = radar_codes.get_radiosoundings(sound_dir, radar_start_date)\n try:\n height, temperature, snr = radar_codes.snr_and_sounding(radar, radiosonde_fname)\n radar.add_field(\"temperature\", temperature, replace_existing=True)\n radar.add_field(\"height\", height, replace_existing=True)\n except ValueError:\n traceback.print_exc()\n print(f\"Impossible to compute SNR {radar_file_name}\")\n return None\n\n # Looking for SNR\n try:\n radar.fields[\"SNR\"]\n except KeyError:\n radar.add_field(\"SNR\", snr, replace_existing=True)\n\n # Correct RHOHV\n if not fake_rhohv:\n rho_corr = radar_codes.correct_rhohv(radar)\n radar.add_field_like(\"RHOHV\", \"RHOHV_CORR\", rho_corr, replace_existing=True)\n\n # Correct ZDR\n corr_zdr = radar_codes.correct_zdr(radar)\n radar.add_field_like(\"ZDR\", \"ZDR_CORR\", corr_zdr, replace_existing=True)\n\n # GateFilter\n if is_cpol:\n gatefilter = filtering.do_gatefilter_cpol(\n radar, refl_name=\"DBZ\", phidp_name=\"PHIDP\", rhohv_name=\"RHOHV_CORR\", zdr_name=\"ZDR\"\n )\n else:\n gatefilter = filtering.do_gatefilter(\n radar, refl_name=\"DBZ\", phidp_name=\"PHIDP\", rhohv_name=\"RHOHV_CORR\", zdr_name=\"ZDR\"\n )\n\n # Check if NCP exists.\n try:\n radar.fields[\"NCP\"]\n fake_ncp = False\n except KeyError:\n fake_ncp = True\n ncp = pyart.config.get_metadata(\"normalized_coherent_power\")\n ncp[\"data\"] = np.zeros_like(radar.fields[\"RHOHV\"][\"data\"])\n ncp[\"data\"][gatefilter.gate_included] = 1\n radar.add_field(\"NCP\", ncp)\n\n with Chronos(f\"PHIDP for {os.path.basename(radar_file_name)}\"):\n phidp, kdp = phase.phidp_giangrande(radar, gatefilter)\n radar.add_field(\"PHIDP_VAL\", phidp)\n radar.add_field(\"KDP_VAL\", kdp)\n kdp_field_name = \"KDP_VAL\"\n phidp_field_name = \"PHIDP_VAL\"\n\n # Unfold VELOCITY\n if do_dealiasing:\n with Chronos(f\"UNRAVEL for {os.path.basename(radar_file_name)}\"):\n if not vel_missing:\n if is_cpol:\n vdop_unfold = velocity.unravel(radar, gatefilter, nyquist=13.3)\n else:\n vdop_unfold = velocity.unravel(radar, gatefilter)\n radar.add_field(\"VEL_UNFOLDED\", vdop_unfold, replace_existing=True)\n\n # Correct attenuation ZH and ZDR and hardcode gatefilter\n zh_corr = attenuation.correct_attenuation_zh_pyart(radar, gatefilter, phidp_field=phidp_field_name)\n radar.add_field_like(\"DBZ\", \"DBZ_CORR\", zh_corr)\n\n zdr_corr = attenuation.correct_attenuation_zdr(radar, gatefilter, phidp_name=phidp_field_name)\n radar.add_field(\"ZDR_CORR_ATTEN\", zdr_corr)\n\n # Hydrometeors classification\n hydro_class = hydrometeors.hydrometeor_classification(\n radar, gatefilter, kdp_name=kdp_field_name, zdr_name=\"ZDR_CORR_ATTEN\"\n )\n\n radar.add_field(\"radar_echo_classification\", hydro_class, replace_existing=True)\n\n # Rainfall rate\n rainfall = hydrometeors.rainfall_rate(\n radar, gatefilter, kdp_name=kdp_field_name, refl_name=\"DBZ_CORR\", zdr_name=\"ZDR_CORR_ATTEN\"\n )\n radar.add_field(\"radar_estimated_rain_rate\", rainfall)\n\n # Removing fake and useless fields.\n if fake_ncp:\n radar.fields.pop(\"NCP\")\n\n if fake_rhohv:\n radar.fields.pop(\"RHOHV\")\n radar.fields.pop(\"RHOHV_CORR\")\n\n # Remove obsolete fields:\n for obsolete_key in [\"Refl\", \"PHI_UNF\", \"PHI_CORR\", \"height\", \"TH\", \"TV\", \"ZDR_CORR\", \"RHOHV\"]:\n try:\n radar.fields.pop(obsolete_key)\n except KeyError:\n continue\n\n # Change the temporary working name of fields to the one define by the user.\n for old_key, new_key in FIELDS_NAMES:\n try:\n radar.add_field(new_key, radar.fields.pop(old_key), replace_existing=True)\n except KeyError:\n continue\n\n # Delete working variables.\n if is_cpol:\n for k in list(radar.fields.keys()):\n if k not in OUTPUT_RADAR_FLD:\n radar.fields.pop(k)\n\n # Correct the standard_name metadata:\n cfmetadata.correct_standard_name(radar)\n # ACDD-1.3 compliant metadata:\n cfmetadata.coverage_content_type(radar)\n cfmetadata.correct_units(radar)\n\n return radar\n" ]
[ [ "numpy.zeros_like", "numpy.ones_like" ] ]
mirand863/hiclass
[ "49a4bcded7e0a50621f2efa79145dd2d60a87332" ]
[ "tests/test_BinaryPolicy.py" ]
[ "from pathlib import Path\nfrom scipy.sparse import csr_matrix\nimport networkx as nx\nimport numpy as np\nimport pytest\nfrom numpy.testing import assert_array_equal\n\nfrom hiclass.BinaryPolicy import (\n BinaryPolicy,\n ExclusivePolicy,\n LessExclusivePolicy,\n ExclusiveSiblingsPolicy,\n InclusivePolicy,\n LessInclusivePolicy,\n SiblingsPolicy,\n)\n\nfixtures_loc = Path(__file__).parent / \"fixtures\"\n\n\[email protected]\ndef digraph():\n return nx.DiGraph(\n [\n (\"r\", \"1\"),\n (\"r\", \"2\"),\n (\"1\", \"1.1\"),\n (\"1\", \"1.2\"),\n (\"2\", \"2.1\"),\n (\"2\", \"2.2\"),\n (\"2.1\", \"2.1.1\"),\n (\"2.1\", \"2.1.2\"),\n (\"2.2\", \"2.2.1\"),\n (\"2.2\", \"2.2.2\"),\n ]\n )\n\n\[email protected]\ndef features_1d():\n return np.array(\n [\n 1,\n 2,\n 3,\n 4,\n 5,\n 6,\n 7,\n 8,\n ]\n )\n\n\[email protected]\ndef features_2d():\n return np.array(\n [\n [1, 2],\n [3, 4],\n [5, 6],\n [7, 8],\n [9, 10],\n [11, 12],\n [13, 14],\n [15, 16],\n ]\n )\n\n\[email protected]\ndef features_sparse():\n return csr_matrix(\n [\n [1, 2],\n [3, 4],\n [5, 6],\n [7, 8],\n [9, 10],\n [11, 12],\n [13, 14],\n [15, 16],\n ]\n )\n\n\[email protected]\ndef labels():\n return np.array(\n [\n [\"1\", \"1.1\"],\n [\"1\", \"1.2\"],\n [\"2\", \"2.1\"],\n [\"2\", \"2.2\"],\n [\"2.1\", \"2.1.1\"],\n [\"2.1\", \"2.1.2\"],\n [\"2.2\", \"2.2.1\"],\n [\"2.2\", \"2.2.2\"],\n ]\n )\n\n\ndef test_binary_policy_positive_examples(digraph, features_1d, labels):\n policy = BinaryPolicy(digraph, features_1d, labels)\n with pytest.raises(NotImplementedError):\n policy.positive_examples(\"1\")\n\n\ndef test_binary_policy_negative_examples(digraph, features_1d, labels):\n policy = BinaryPolicy(digraph, features_1d, labels)\n with pytest.raises(NotImplementedError):\n policy.negative_examples(\"1\")\n\n\ndef test_exclusive_policy_positive_examples_1(digraph, features_1d, labels):\n policy = ExclusivePolicy(digraph, features_1d, labels)\n ground_truth = [True, True, False, False, False, False, False, False]\n result = policy.positive_examples(\"1\")\n assert_array_equal(ground_truth, result)\n\n\ndef test_exclusive_policy_positive_examples_2(digraph, features_1d, labels):\n policy = ExclusivePolicy(digraph, features_1d, labels)\n ground_truth = [False, False, True, True, False, False, False, False]\n result = policy.positive_examples(\"2\")\n assert_array_equal(ground_truth, result)\n\n\ndef test_exclusive_policy_positive_examples_3(digraph, features_1d, labels):\n policy = ExclusivePolicy(digraph, features_1d, labels)\n ground_truth = [False, False, True, False, True, True, False, False]\n result = policy.positive_examples(\"2.1\")\n assert_array_equal(ground_truth, result)\n\n\ndef test_exclusive_policy_negative_examples_1(digraph, features_1d, labels):\n policy = ExclusivePolicy(digraph, features_1d, labels)\n ground_truth = [False, False, True, True, True, True, True, True]\n result = policy.negative_examples(\"1\")\n assert_array_equal(ground_truth, result)\n\n\ndef test_exclusive_policy_negative_examples_2(digraph, features_1d, labels):\n policy = ExclusivePolicy(digraph, features_1d, labels)\n ground_truth = [True, True, False, False, True, True, True, True]\n result = policy.negative_examples(\"2\")\n assert_array_equal(ground_truth, result)\n\n\ndef test_exclusive_policy_negative_examples_3(digraph, features_1d, labels):\n policy = ExclusivePolicy(digraph, features_1d, labels)\n ground_truth = [True, True, False, True, False, False, True, True]\n result = policy.negative_examples(\"2.1\")\n assert_array_equal(ground_truth, result)\n\n\ndef test_less_exclusive_policy_negative_examples_1(digraph, features_1d, labels):\n policy = LessExclusivePolicy(digraph, features_1d, labels)\n ground_truth = [False, False, True, True, True, True, True, True]\n result = policy.negative_examples(\"1\")\n assert_array_equal(ground_truth, result)\n\n\ndef test_less_exclusive_policy_negative_examples_2(digraph, features_1d, labels):\n policy = LessExclusivePolicy(digraph, features_1d, labels)\n ground_truth = [True, True, False, False, False, False, False, False]\n result = policy.negative_examples(\"2\")\n assert_array_equal(ground_truth, result)\n\n\ndef test_less_exclusive_policy_negative_examples_3(digraph, features_1d, labels):\n policy = LessExclusivePolicy(digraph, features_1d, labels)\n ground_truth = [True, True, False, True, False, False, True, True]\n result = policy.negative_examples(\"2.1\")\n assert_array_equal(ground_truth, result)\n\n\ndef test_exclusive_siblings_policy_negative_examples_1(digraph, features_1d, labels):\n policy = ExclusiveSiblingsPolicy(digraph, features_1d, labels)\n ground_truth = [False, False, True, True, False, False, False, False]\n result = policy.negative_examples(\"1\")\n assert_array_equal(ground_truth, result)\n\n\ndef test_exclusive_siblings_policy_negative_examples_2(digraph, features_1d, labels):\n policy = ExclusiveSiblingsPolicy(digraph, features_1d, labels)\n ground_truth = [True, True, False, False, False, False, False, False]\n result = policy.negative_examples(\"2\")\n assert_array_equal(ground_truth, result)\n\n\ndef test_exclusive_siblings_policy_negative_examples_3(digraph, features_1d, labels):\n policy = ExclusiveSiblingsPolicy(digraph, features_1d, labels)\n ground_truth = [False, False, False, True, False, False, True, True]\n result = policy.negative_examples(\"2.1\")\n assert_array_equal(ground_truth, result)\n\n\ndef test_inclusive_policy_positive_examples_1(digraph, features_1d, labels):\n policy = InclusivePolicy(digraph, features_1d, labels)\n ground_truth = [True, True, False, False, False, False, False, False]\n result = policy.positive_examples(\"1\")\n assert_array_equal(ground_truth, result)\n\n\ndef test_inclusive_policy_positive_examples_2(digraph, features_1d, labels):\n policy = InclusivePolicy(digraph, features_1d, labels)\n ground_truth = [False, False, True, True, True, True, True, True]\n result = policy.positive_examples(\"2\")\n assert_array_equal(ground_truth, result)\n\n\ndef test_inclusive_policy_positive_examples_3(digraph, features_1d, labels):\n policy = InclusivePolicy(digraph, features_1d, labels)\n ground_truth = [False, False, True, False, True, True, False, False]\n result = policy.positive_examples(\"2.1\")\n assert_array_equal(ground_truth, result)\n\n\ndef test_inclusive_policy_negative_examples_1(digraph, features_1d, labels):\n policy = InclusivePolicy(digraph, features_1d, labels)\n ground_truth = [False, False, True, True, True, True, True, True]\n result = policy.negative_examples(\"1\")\n assert_array_equal(ground_truth, result)\n\n\ndef test_inclusive_policy_negative_examples_2(digraph, features_1d, labels):\n policy = InclusivePolicy(digraph, features_1d, labels)\n ground_truth = [True, True, False, False, False, False, False, False]\n result = policy.negative_examples(\"2\")\n assert_array_equal(ground_truth, result)\n\n\ndef test_inclusive_policy_negative_examples_3(digraph, features_1d, labels):\n policy = InclusivePolicy(digraph, features_1d, labels)\n ground_truth = [True, True, False, False, False, False, True, True]\n result = policy.negative_examples(\"2.1\")\n assert_array_equal(ground_truth, result)\n\n\ndef test_less_inclusive_policy_negative_examples_1(digraph, features_1d, labels):\n policy = LessInclusivePolicy(digraph, features_1d, labels)\n ground_truth = [False, False, True, True, True, True, True, True]\n result = policy.negative_examples(\"1\")\n assert_array_equal(ground_truth, result)\n\n\ndef test_less_inclusive_policy_negative_examples_2(digraph, features_1d, labels):\n policy = LessInclusivePolicy(digraph, features_1d, labels)\n ground_truth = [True, True, False, False, False, False, False, False]\n result = policy.negative_examples(\"2\")\n assert_array_equal(ground_truth, result)\n\n\ndef test_less_inclusive_policy_negative_examples_3(digraph, features_1d, labels):\n policy = LessInclusivePolicy(digraph, features_1d, labels)\n ground_truth = [True, True, False, True, False, False, True, True]\n result = policy.negative_examples(\"2.1\")\n assert_array_equal(ground_truth, result)\n\n\ndef test_siblings_policy_negative_examples_1(digraph, features_1d, labels):\n policy = SiblingsPolicy(digraph, features_1d, labels)\n ground_truth = [False, False, True, True, True, True, True, True]\n result = policy.negative_examples(\"1\")\n assert_array_equal(ground_truth, result)\n\n\ndef test_siblings_policy_negative_examples_2(digraph, features_1d, labels):\n policy = SiblingsPolicy(digraph, features_1d, labels)\n ground_truth = [True, True, False, False, False, False, False, False]\n result = policy.negative_examples(\"2\")\n assert_array_equal(ground_truth, result)\n\n\ndef test_siblings_policy_negative_examples_3(digraph, features_1d, labels):\n policy = SiblingsPolicy(digraph, features_1d, labels)\n ground_truth = [False, False, False, True, False, False, True, True]\n result = policy.negative_examples(\"2.1\")\n assert_array_equal(ground_truth, result)\n\n\ndef test_siblings_get_binary_examples_1d_1(digraph, features_1d, labels):\n policy = SiblingsPolicy(digraph, features_1d, labels)\n ground_truth_x = [1, 2, 3, 4, 5, 6, 7, 8]\n ground_truth_y = [1, 1, 0, 0, 0, 0, 0, 0]\n x, y = policy.get_binary_examples(\"1\")\n assert_array_equal(ground_truth_x, x)\n assert_array_equal(ground_truth_y, y)\n\n\ndef test_siblings_get_binary_examples_1d_2(digraph, features_1d, labels):\n policy = SiblingsPolicy(digraph, features_1d, labels)\n ground_truth_x = [3, 4, 5, 6, 7, 8, 1, 2]\n ground_truth_y = [1, 1, 1, 1, 1, 1, 0, 0]\n x, y = policy.get_binary_examples(\"2\")\n assert_array_equal(ground_truth_x, x)\n assert_array_equal(ground_truth_y, y)\n\n\ndef test_siblings_get_binary_examples_1d_3(digraph, features_1d, labels):\n policy = SiblingsPolicy(digraph, features_1d, labels)\n ground_truth_x = [3, 5, 6, 4, 7, 8]\n ground_truth_y = [1, 1, 1, 0, 0, 0]\n x, y = policy.get_binary_examples(\"2.1\")\n assert_array_equal(ground_truth_x, x)\n assert_array_equal(ground_truth_y, y)\n\n\ndef test_siblings_get_binary_examples_2d_1(digraph, features_2d, labels):\n policy = SiblingsPolicy(digraph, features_2d, labels)\n ground_truth_x = [\n [1, 2],\n [3, 4],\n [5, 6],\n [7, 8],\n [9, 10],\n [11, 12],\n [13, 14],\n [15, 16],\n ]\n ground_truth_y = [1, 1, 0, 0, 0, 0, 0, 0]\n x, y = policy.get_binary_examples(\"1\")\n assert_array_equal(ground_truth_x, x)\n assert_array_equal(ground_truth_y, y)\n\n\ndef test_siblings_get_binary_examples_2d_2(digraph, features_2d, labels):\n policy = SiblingsPolicy(digraph, features_2d, labels)\n ground_truth_x = [\n [5, 6],\n [7, 8],\n [9, 10],\n [11, 12],\n [13, 14],\n [15, 16],\n [1, 2],\n [3, 4],\n ]\n ground_truth_y = [1, 1, 1, 1, 1, 1, 0, 0]\n x, y = policy.get_binary_examples(\"2\")\n assert_array_equal(ground_truth_x, x)\n assert_array_equal(ground_truth_y, y)\n\n\ndef test_siblings_get_binary_examples_2d_3(digraph, features_2d, labels):\n policy = SiblingsPolicy(digraph, features_2d, labels)\n ground_truth_x = [[5, 6], [9, 10], [11, 12], [7, 8], [13, 14], [15, 16]]\n ground_truth_y = [1, 1, 1, 0, 0, 0]\n x, y = policy.get_binary_examples(\"2.1\")\n assert_array_equal(ground_truth_x, x)\n assert_array_equal(ground_truth_y, y)\n\n\ndef test_siblings_get_binary_examples_sparse_1(digraph, features_sparse, labels):\n policy = SiblingsPolicy(digraph, features_sparse, labels)\n ground_truth_x = [\n [1, 2],\n [3, 4],\n [5, 6],\n [7, 8],\n [9, 10],\n [11, 12],\n [13, 14],\n [15, 16],\n ]\n ground_truth_y = [1, 1, 0, 0, 0, 0, 0, 0]\n x, y = policy.get_binary_examples(\"1\")\n assert_array_equal(ground_truth_x, x.todense())\n assert_array_equal(ground_truth_y, y)\n\n\ndef test_siblings_get_binary_examples_sparse_2(digraph, features_sparse, labels):\n policy = SiblingsPolicy(digraph, features_sparse, labels)\n ground_truth_x = [\n [5, 6],\n [7, 8],\n [9, 10],\n [11, 12],\n [13, 14],\n [15, 16],\n [1, 2],\n [3, 4],\n ]\n ground_truth_y = [1, 1, 1, 1, 1, 1, 0, 0]\n x, y = policy.get_binary_examples(\"2\")\n assert_array_equal(ground_truth_x, x.todense())\n assert_array_equal(ground_truth_y, y)\n\n\ndef test_siblings_get_binary_examples_sparse_3(digraph, features_sparse, labels):\n policy = SiblingsPolicy(digraph, features_sparse, labels)\n ground_truth_x = [[5, 6], [9, 10], [11, 12], [7, 8], [13, 14], [15, 16]]\n ground_truth_y = [1, 1, 1, 0, 0, 0]\n x, y = policy.get_binary_examples(\"2.1\")\n assert_array_equal(ground_truth_x, x.todense())\n assert_array_equal(ground_truth_y, y)\n" ]
[ [ "numpy.array", "scipy.sparse.csr_matrix", "numpy.testing.assert_array_equal" ] ]
sergeLabo/depthai_blazepose_labo
[ "c9d6808c1e0b406c541e012df28ac7cd7ebfa422" ]
[ "ge_osc.py" ]
[ "\n# Echap pour finir proprement le script\n# Espace pour bascule, plein écran / normal\n\n\nimport os\nfrom time import time, sleep\nfrom threading import Thread\n\nimport cv2\nimport numpy as np\n\nfrom oscpy.server import OSCThreadServer\n\nfrom pynput.mouse import Button, Controller\n\nfrom filtre import moving_average\nfrom my_config import MyConfig\n\n\nglobal GE_LOOP\nGE_LOOP = 1\n\n\n\nclass GrandeEchelleViewer:\n \"\"\"Affichage dans une fenêtre OpenCV, et gestion des fenêtres\"\"\"\n global GE_LOOP\n\n def __init__(self, config):\n\n self.config = config\n\n freq = int(self.config['histopocene']['frame_rate_du_film'])\n if freq != 0:\n self.tempo = int(1000/freq)\n else:\n print(\"Le frame rate du film est à 0 !\")\n os._exit(0)\n\n self.info = int(self.config['histopocene']['info'])\n self.mode_expo = int(self.config['histopocene']['mode_expo'])\n self.full_screen = int(self.config['histopocene']['full_screen'])\n if self.mode_expo:\n self.info = 0\n self.full_screen = 1\n self.create_window()\n self.mouse = Controller()\n\n def create_window(self):\n cv2.namedWindow('histopocene', cv2.WND_PROP_FULLSCREEN)\n\n def set_window(self):\n \"\"\" from pynput.mouse import Button, Controller\n mouse = Controller()\n mouse.position = (50,60)\n \"\"\"\n if self.full_screen:\n cv2.setWindowProperty( 'histopocene',\n cv2.WND_PROP_FULLSCREEN,\n cv2.WINDOW_FULLSCREEN)\n x, y, w, h = cv2.getWindowImageRect('histopocene')\n self.mouse.position = (w, h)\n else:\n cv2.setWindowProperty( 'histopocene',\n cv2.WND_PROP_FULLSCREEN,\n cv2.WINDOW_NORMAL)\n\n def run(self):\n \"\"\"Boucle infinie du script\"\"\"\n global GE_LOOP\n\n while GE_LOOP:\n self.video.set(cv2.CAP_PROP_POS_FRAMES, self.frame_nbr)\n ret, img = self.video.read()\n\n if self.mode_expo:\n self.info = 0\n self.full_screen = 1\n self.set_window()\n\n if ret:\n if self.info:\n img = self.draw_text(img, self.frame_nbr)\n # # print(self.frame_nbr)\n cv2.imshow('histopocene', img)\n\n k = cv2.waitKey(10)\n # Space pour full screen or not\n if k == 32: # space\n if not self.mode_expo:\n if self.full_screen == 1:\n self.full_screen = 0\n elif self.full_screen == 0:\n self.full_screen = 1\n self.set_window()\n # Esc to exit\n if k == 27:\n GE_LOOP = 0\n\n self.video.release()\n cv2.destroyAllWindows()\n\n\n\nclass GrandeEchelle(GrandeEchelleViewer):\n\n global GE_LOOP\n\n def __init__(self, current_dir, config):\n\n self.config = config\n\n # Fenêtres OpenCV\n GrandeEchelleViewer.__init__(self, config)\n\n\n osc = OSCThreadServer()\n sock = osc.listen(address='127.0.0.1', port=8000, default=True)\n @osc.address(b'/depth')\n def callback(*values):\n depth = int(values[0])\n # # print(f\"depth: {depth}\")\n self.get_frame(depth)\n\n self.frame_nbr = 0\n self.last_time = time()\n self.raz = int(self.config['histopocene']['raz'])\n\n film = str(current_dir) + \"/\" + self.config['histopocene']['film']\n print(\"Le film est:\", film)\n self.video = cv2.VideoCapture(film)\n self.lenght = int(self.video.get(cv2.CAP_PROP_FRAME_COUNT))\n print(\"Longueur du film :\", self.lenght) # 38400\n\n self.profondeur_mini = int(self.config['histopocene']['profondeur_mini'])\n self.profondeur_maxi = int(self.config['histopocene']['profondeur_maxi'])\n self.largeur_maxi = int(self.config['histopocene']['largeur_maxi'])\n self.pile_size = int(self.config['histopocene']['pile_size'])\n self.lissage = int(self.config['histopocene']['lissage'])\n print(\"self.lissage\", self.lissage)\n self.depth = 1\n self.histo = [self.profondeur_mini + 1000]*self.pile_size\n\n # Stockage des 8 dernières valeurs de frame\n self.slow_size = int(self.config['histopocene']['slow_size'])\n self.histo_slow = [0]*self.slow_size\n\n def get_frame(self, depth):\n \"\"\" Appelé à chaque réception de depth dans receive 'depth',\n longueur en mm\n 160 frame pour 12 000 ans\n 39750 frame pour 300 cm\n 1 cm pour 132 frames\n \"\"\"\n # # print(\"depth\", depth)\n # Mise à jour de la pile\n self.histo.append(depth)\n del self.histo[0]\n\n try:\n depth = int(moving_average( np.array(self.histo),\n self.lissage,\n type_='simple')[0])\n except:\n print(\"Erreur moving_average depth\")\n\n # Pour bien comprendre\n mini = self.profondeur_mini + 500 # frame 0 si mini\n maxi = self.profondeur_maxi - 500 # frame lenght si maxi\n lenght = self.lenght\n\n # Voir le dessin\n # (x1, y1, x2, y2) = (mini, 0, maxi, lenght)\n a, b = get_a_b(mini, lenght, maxi, 0)\n frame = int(a*depth + b)\n print(\"frame\", frame)\n # Pour ne jamais planté\n if frame < 0:\n frame = 0\n if frame >= lenght:\n frame = lenght - 1\n\n # Pile des 8 dernières valeurs lissées\n self.histo_slow.append(frame)\n del self.histo_slow[0]\n try:\n frame = int(moving_average( np.array(self.histo_slow),\n self.slow_size - 1,\n type_='simple')[0])\n except:\n print(\"Erreur moving_average depth\")\n\n # Si pas de nouvelle frame en self.raz secondes, remise à 0\n if time() - self.last_time > self.raz:\n # Si tout près del'écran et non capturé\n if frame > self.lenght - 500:\n frame = self.lenght - 1\n else:\n frame = 0\n\n self.last_time = time()\n\n self.frame_nbr = frame\n\n def draw_text(self, img, frame):\n d = { \"Frame du Film\": frame,\n \"Profondeur mini\": self.profondeur_mini,\n \"Profondeur maxi\": self.profondeur_maxi,\n \"X maxi\": self.largeur_maxi,\n \"Taille pile\": self.pile_size,\n \"Lissage\": self.lissage}\n i = 0\n for key, val in d.items():\n text = key + \" : \" + str(val)\n cv2.putText(img, # image\n text,\n (30, 150*i+200), # position\n cv2.FONT_HERSHEY_SIMPLEX, # police\n 2, # taille police\n (0, 255, 0), # couleur\n 6) # épaisseur\n i += 1\n\n return img\n\n\n\ndef get_a_b(x1, y1, x2, y2):\n a = (y1 - y2)/(x1 - x2)\n b = y1 - a*x1\n return a, b\n\n\n\nif __name__ == '__main__':\n current_dir = '/media/data/3D/projets/depthai_blazepose_labo'\n\n mc = MyConfig('/media/data/3D/projets/depthai_blazepose_labo/grande_echelle.ini')\n config = mc.conf\n\n ge = GrandeEchelle(current_dir, config)\n # run est dans Viewer\n ge.run()\n" ]
[ [ "numpy.array" ] ]
sousouhou/BA-ScaleFreeNetwork
[ "1c22f05b414dd57fdd1a70d738069a96fa69f6aa" ]
[ "BA-ScaleFreeNetwork.py" ]
[ "# barabasi albert model algorithm to generate a scale free network.\r\n# Refer to https://en.wikipedia.org/wiki/Barab%C3%A1si%E2%80%93Albert_model\r\n\r\nimport numpy as np\r\nimport random\r\n\r\n# Generated graph is bidirectional, edges are in pairs a->b and b->a\r\n# Every node is not self-linked.\r\ndef BA_SFN(N, M0, m) :\r\n \"\"\"\r\n N, number of nodes in the final network.\r\n M0, initial connected network of M0 nodes.\r\n m, Each new node is connected to m existing nodes.\r\n \"\"\"\r\n assert( M0 < N )\r\n assert( m <= M0 )\r\n \r\n #adjacency matrix\r\n AM = np.zeros((N,N))\r\n \r\n for i in range(0, M0):\r\n for j in range(i+1,M0):\r\n AM[i,j] = 1\r\n AM[j,i] = 1\r\n \r\n # add 'c' node\r\n for c in range(M0,N):\r\n Allk = np.sum(AM) # all degree Eki\r\n ki = np.sum(AM , axis = 1) # ki each degree for node i\r\n \r\n pi = np.zeros(c,dtype=np.float) # probability\r\n for i in range(0,c):\r\n pi[i] = ki[i]/(Allk*1.0)\r\n # print pi\r\n \r\n # connect m edges.\r\n for d in range(0,m):\r\n rand01 = random.random() #[0,1.0)\r\n \r\n sumpi = 0.0\r\n for g in range(0,c):\r\n sumpi += pi[g]\r\n if sumpi>rand01 : # connect 'c' node with 'g' node.\r\n AM[c,g] = 1\r\n AM[g,c] = 1 \r\n break\r\n \r\n return AM \r\n \r\n \r\nif __name__ == '__main__':\r\n\r\n re_AM = BA_SFN(200,3,2)\r\n \r\n pfile = open(\"result-scale-free-network.txt\", \"w\")\r\n \r\n pfile.write(\"Adjacency matrix: \\n\")\r\n for i in range(0, re_AM.shape[0] ) :\r\n for j in range(0, re_AM.shape[1] ) :\r\n pfile.write(\"%.0f \"%re_AM[i,j])\r\n pfile.write(\"\\n\")\r\n \r\n pfile.write(\"\\n----------\\nAdjacency list: \\n\")\r\n for i in range(0, re_AM.shape[0] ) :\r\n pfile.write(\"%3d -> \"%i )\r\n for j in range(0, re_AM.shape[1] ) : \r\n if 1 == re_AM[i,j] :\r\n pfile.write(\"%d \"%j )\r\n pfile.write(\"\\n\") \r\n \r\n \r\n pfile.close()\r\n\r\n \r\n \r\n \r\n \r\n " ]
[ [ "numpy.sum", "numpy.zeros" ] ]
tatuanb/monai_V1
[ "ff9bbfa82763de46cbac75553e340633e3d84ecb", "ff9bbfa82763de46cbac75553e340633e3d84ecb" ]
[ "tests/test_dataloader.py", "tests/test_handler_hausdorff_distance.py" ]
[ "# Copyright 2020 - 2021 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\nimport unittest\n\nimport numpy as np\nimport torch\nfrom parameterized import parameterized\n\nfrom monai.data import CacheDataset, DataLoader, Dataset\nfrom monai.transforms import Compose, DataStatsd, Randomizable, SimulateDelayd\nfrom monai.utils import set_determinism\n\nTEST_CASE_1 = [[{\"image\": np.asarray([1, 2, 3])}, {\"image\": np.asarray([4, 5])}]]\n\nTEST_CASE_2 = [[{\"label\": torch.as_tensor([[3], [2]])}, {\"label\": np.asarray([[1], [2]])}]]\n\n\nclass TestDataLoader(unittest.TestCase):\n def test_values(self):\n datalist = [\n {\"image\": \"spleen_19.nii.gz\", \"label\": \"spleen_label_19.nii.gz\"},\n {\"image\": \"spleen_31.nii.gz\", \"label\": \"spleen_label_31.nii.gz\"},\n ]\n transform = Compose(\n [\n DataStatsd(keys=[\"image\", \"label\"], data_shape=False, value_range=False, data_value=True),\n SimulateDelayd(keys=[\"image\", \"label\"], delay_time=0.1),\n ]\n )\n dataset = CacheDataset(data=datalist, transform=transform, cache_rate=0.5, cache_num=1)\n n_workers = 0 if sys.platform == \"win32\" else 2\n dataloader = DataLoader(dataset=dataset, batch_size=2, num_workers=n_workers)\n for d in dataloader:\n self.assertEqual(d[\"image\"][0], \"spleen_19.nii.gz\")\n self.assertEqual(d[\"image\"][1], \"spleen_31.nii.gz\")\n self.assertEqual(d[\"label\"][0], \"spleen_label_19.nii.gz\")\n self.assertEqual(d[\"label\"][1], \"spleen_label_31.nii.gz\")\n\n @parameterized.expand([TEST_CASE_1, TEST_CASE_2])\n def test_exception(self, datalist):\n dataset = Dataset(data=datalist, transform=None)\n dataloader = DataLoader(dataset=dataset, batch_size=2, num_workers=0)\n with self.assertRaisesRegex((TypeError, RuntimeError), \"Collate error on the key\"):\n for _ in dataloader:\n pass\n\n\nclass _RandomDataset(torch.utils.data.Dataset, Randomizable):\n def __getitem__(self, index):\n return self.R.randint(0, 1000, (1,))\n\n def __len__(self):\n return 8\n\n\nclass TestLoaderRandom(unittest.TestCase):\n \"\"\"\n Testing data loader working with the randomizable interface\n \"\"\"\n\n def setUp(self):\n set_determinism(0)\n\n def tearDown(self):\n set_determinism(None)\n\n def test_randomize(self):\n dataset = _RandomDataset()\n dataloader = DataLoader(dataset, batch_size=2, num_workers=3)\n output = []\n for _ in range(2):\n for batch in dataloader:\n output.extend(batch.data.numpy().flatten().tolist())\n self.assertListEqual(output, [594, 170, 524, 778, 370, 906, 292, 589, 762, 763, 156, 886, 42, 405, 221, 166])\n\n\nif __name__ == \"__main__\":\n unittest.main()\n", "# Copyright 2020 - 2021 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\nfrom typing import Tuple\n\nimport numpy as np\nimport torch\nfrom ignite.engine import Engine\n\nfrom monai.handlers import HausdorffDistance\n\n\ndef create_spherical_seg_3d(\n radius: float = 20.0, centre: Tuple[int, int, int] = (49, 49, 49), im_shape: Tuple[int, int, int] = (99, 99, 99)\n) -> np.ndarray:\n \"\"\"\n Return a 3D image with a sphere inside. Voxel values will be\n 1 inside the sphere, and 0 elsewhere.\n\n Args:\n radius: radius of sphere (in terms of number of voxels, can be partial)\n centre: location of sphere centre.\n im_shape: shape of image to create\n\n See also:\n :py:meth:`~create_test_image_3d`\n \"\"\"\n # Create image\n image = np.zeros(im_shape, dtype=np.int32)\n spy, spx, spz = np.ogrid[\n -centre[0] : im_shape[0] - centre[0], -centre[1] : im_shape[1] - centre[1], -centre[2] : im_shape[2] - centre[2]\n ]\n circle = (spx * spx + spy * spy + spz * spz) <= radius * radius\n\n image[circle] = 1\n image[~circle] = 0\n return image\n\n\nsampler_sphere = torch.Tensor(create_spherical_seg_3d(radius=20, centre=(20, 20, 20))).unsqueeze(0).unsqueeze(0)\n# test input a list of channel-first tensor\nsampler_sphere_gt = [torch.Tensor(create_spherical_seg_3d(radius=20, centre=(10, 20, 20))).unsqueeze(0)]\nsampler_sphere_zeros = torch.zeros_like(sampler_sphere)\n\nTEST_SAMPLE_1 = [sampler_sphere, sampler_sphere_gt]\nTEST_SAMPLE_2 = [sampler_sphere_gt, sampler_sphere_gt]\nTEST_SAMPLE_3 = [sampler_sphere_zeros, sampler_sphere_gt]\nTEST_SAMPLE_4 = [sampler_sphere_zeros, sampler_sphere_zeros]\n\n\nclass TestHandlerHausdorffDistance(unittest.TestCase):\n # TODO test multi node Hausdorff Distance\n\n def test_compute(self):\n hd_metric = HausdorffDistance(include_background=True)\n\n def _val_func(engine, batch):\n pass\n\n engine = Engine(_val_func)\n hd_metric.attach(engine, \"hausdorff_distance\")\n\n y_pred, y = TEST_SAMPLE_1\n hd_metric.update([y_pred, y])\n self.assertEqual(hd_metric.compute(), 10)\n y_pred, y = TEST_SAMPLE_2\n hd_metric.update([y_pred, y])\n self.assertEqual(hd_metric.compute(), 5)\n y_pred, y = TEST_SAMPLE_3\n hd_metric.update([y_pred, y])\n self.assertEqual(hd_metric.compute(), float(\"inf\"))\n y_pred, y = TEST_SAMPLE_4\n hd_metric.update([y_pred, y])\n self.assertEqual(hd_metric.compute(), float(\"inf\"))\n\n def test_shape_mismatch(self):\n hd_metric = HausdorffDistance(include_background=True)\n with self.assertRaises((AssertionError, ValueError)):\n y_pred = TEST_SAMPLE_1[0]\n y = torch.ones((1, 1, 10, 10, 10))\n hd_metric.update([y_pred, y])\n\n def test_reduction(self):\n hd_metric = HausdorffDistance(include_background=True, reduction=\"mean_channel\")\n\n def _val_func(engine, batch):\n pass\n\n engine = Engine(_val_func)\n hd_metric.attach(engine, \"hausdorff_distance\")\n\n y_pred, y = TEST_SAMPLE_1\n hd_metric.update([y_pred, y])\n y_pred, y = TEST_SAMPLE_2\n hd_metric.update([y_pred, y])\n torch.testing.assert_allclose(hd_metric.compute().float(), torch.tensor([10.0, 0.0]))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n" ]
[ [ "torch.as_tensor", "numpy.asarray" ], [ "torch.zeros_like", "torch.tensor", "numpy.zeros", "torch.ones" ] ]
Acornagain/lux
[ "161b6c3dbeccb23ff7eb0e6f12b7c59691ee8df9" ]
[ "lux/executor/PandasExecutor.py" ]
[ "# Copyright 2019-2020 The Lux Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport pandas as pd\nfrom lux.vis.VisList import VisList\nfrom lux.vis.Vis import Vis\nfrom lux.core.frame import LuxDataFrame\nfrom lux.executor.Executor import Executor\nfrom lux.utils import utils\nfrom lux.utils.date_utils import is_datetime_series\nfrom lux.utils.utils import check_import_lux_widget, check_if_id_like, is_numeric_nan_column\nimport warnings\nimport lux\n\n\nclass PandasExecutor(Executor):\n \"\"\"\n Given a Vis objects with complete specifications, fetch and process data using Pandas dataframe operations.\n \"\"\"\n\n def __init__(self):\n self.name = \"PandasExecutor\"\n warnings.formatwarning = lux.warning_format\n\n def __repr__(self):\n return f\"<PandasExecutor>\"\n\n @staticmethod\n def execute_sampling(ldf: LuxDataFrame):\n \"\"\"\n Compute and cache a sample for the overall dataframe\n\n - When # of rows exceeds lux.config.sampling_start, take 75% df as sample\n - When # of rows exceeds lux.config.sampling_cap, cap the df at {lux.config.sampling_cap} rows\n\n lux.config.sampling_start = 100k rows\n lux.config.sampling_cap = 1M rows\n\n Parameters\n ----------\n ldf : LuxDataFrame\n \"\"\"\n ldf.history.freeze()\n\n SAMPLE_FLAG = lux.config.sampling\n SAMPLE_START = lux.config.sampling_start\n SAMPLE_CAP = lux.config.sampling_cap\n SAMPLE_FRAC = 0.75\n\n if SAMPLE_FLAG and len(ldf) > SAMPLE_CAP:\n if ldf._sampled is None: # memoize unfiltered sample df\n ldf._sampled = ldf.sample(n=SAMPLE_CAP, random_state=1)\n ldf._message.add_unique(\n f\"Large dataframe detected: Lux is only visualizing a sample capped at {SAMPLE_CAP} rows.\",\n priority=99,\n )\n elif SAMPLE_FLAG and len(ldf) > SAMPLE_START:\n if ldf._sampled is None: # memoize unfiltered sample df\n ldf._sampled = ldf.sample(frac=SAMPLE_FRAC, random_state=1)\n ldf._message.add_unique(\n f\"Large dataframe detected: Lux is visualizing a sample of {SAMPLE_FRAC}% of the dataframe ({len(ldf._sampled)} rows).\",\n priority=99,\n )\n else:\n ldf._sampled = ldf\n ldf.history.unfreeze()\n\n @staticmethod\n def execute_approx_sample(ldf: LuxDataFrame):\n \"\"\"\n Compute and cache an approximate sample of the overall dataframe\n for the purpose of early pruning of the visualization search space\n\n Parameters\n ----------\n ldf : LuxDataFrame\n \"\"\"\n ldf.history.freeze()\n if ldf._approx_sample is None:\n if len(ldf._sampled) > lux.config.early_pruning_sample_start:\n ldf._approx_sample = ldf._sampled.sample(\n n=lux.config.early_pruning_sample_cap, random_state=1\n )\n else:\n ldf._approx_sample = ldf._sampled\n ldf.history.unfreeze()\n\n\n @staticmethod\n def execute(vislist: VisList, ldf: LuxDataFrame, approx=False):\n \"\"\"\n Given a VisList, fetch the data required to render the vis.\n 1) Apply filters\n 2) Retrieve relevant attribute\n 3) Perform vis-related processing (aggregation, binning)\n 4) return a DataFrame with relevant results\n\n Parameters\n ----------\n vislist: list[lux.Vis]\n vis list that contains lux.Vis objects for visualization.\n ldf : lux.core.frame\n LuxDataFrame with specified intent.\n\n Returns\n -------\n None\n \"\"\"\n ldf.history.freeze()\n PandasExecutor.execute_sampling(ldf)\n for vis in vislist:\n # The vis data starts off being original or sampled dataframe\n vis._vis_data = ldf._sampled\n # Approximating vis for early pruning\n if approx:\n vis._original_df = vis._vis_data\n PandasExecutor.execute_approx_sample(ldf)\n vis._vis_data = ldf._approx_sample\n vis.approx = True\n filter_executed = PandasExecutor.execute_filter(vis)\n # Select relevant data based on attribute information\n attributes = set([])\n for clause in vis._inferred_intent:\n if clause.attribute != \"Record\":\n attributes.add(clause.attribute)\n # TODO: Add some type of cap size on Nrows ?\n vis._vis_data = vis.data[list(attributes)]\n\n if vis.mark == \"bar\" or vis.mark == \"line\" or vis.mark == \"geographical\":\n PandasExecutor.execute_aggregate(vis, isFiltered=filter_executed)\n elif vis.mark == \"histogram\":\n PandasExecutor.execute_binning(ldf, vis)\n elif vis.mark == \"heatmap\":\n # Early pruning based on interestingness of scatterplots\n if approx:\n vis._mark = \"scatter\"\n else:\n vis._mark = \"heatmap\"\n PandasExecutor.execute_2D_binning(vis)\n vis.data.clear_intent() # Ensure that intent is not propogated to the vis data\n ldf.history.unfreeze()\n\n\n @staticmethod\n def execute_aggregate(vis: Vis, isFiltered=True):\n \"\"\"\n Aggregate data points on an axis for bar or line charts\n\n Parameters\n ----------\n vis: lux.Vis\n lux.Vis object that represents a visualization\n ldf : lux.core.frame\n LuxDataFrame with specified intent.\n\n Returns\n -------\n None\n \"\"\"\n import numpy as np\n\n x_attr = vis.get_attr_by_channel(\"x\")[0]\n y_attr = vis.get_attr_by_channel(\"y\")[0]\n has_color = False\n groupby_attr = \"\"\n measure_attr = \"\"\n attr_unique_vals = []\n if x_attr.aggregation is None or y_attr.aggregation is None:\n return\n if y_attr.aggregation != \"\":\n groupby_attr = x_attr\n measure_attr = y_attr\n agg_func = y_attr.aggregation\n if x_attr.aggregation != \"\":\n groupby_attr = y_attr\n measure_attr = x_attr\n agg_func = x_attr.aggregation\n if groupby_attr.attribute in vis.data.unique_values.keys():\n attr_unique_vals = vis.data.unique_values.get(groupby_attr.attribute)\n # checks if color is specified in the Vis\n if len(vis.get_attr_by_channel(\"color\")) == 1:\n color_attr = vis.get_attr_by_channel(\"color\")[0]\n color_attr_vals = vis.data.unique_values[color_attr.attribute]\n color_cardinality = len(color_attr_vals)\n # NOTE: might want to have a check somewhere to not use categorical variables with greater than some number of categories as a Color variable----------------\n has_color = True\n else:\n color_cardinality = 1\n if measure_attr != \"\":\n if measure_attr.attribute == \"Record\":\n # need to get the index name so that we can rename the index column to \"Record\"\n # if there is no index, default to \"index\"\n index_name = vis.data.index.name\n if index_name == None:\n index_name = \"index\"\n\n vis._vis_data = vis.data.reset_index()\n # if color is specified, need to group by groupby_attr and color_attr\n\n if has_color:\n vis._vis_data = (\n vis.data.groupby(\n [groupby_attr.attribute, color_attr.attribute], dropna=False, history=False\n )\n .count()\n .reset_index()\n .rename(columns={index_name: \"Record\"})\n )\n vis._vis_data = vis.data[[groupby_attr.attribute, color_attr.attribute, \"Record\"]]\n else:\n vis._vis_data = (\n vis.data.groupby(groupby_attr.attribute, dropna=False, history=False)\n .count()\n .reset_index()\n .rename(columns={index_name: \"Record\"})\n )\n vis._vis_data = vis.data[[groupby_attr.attribute, \"Record\"]]\n else:\n # if color is specified, need to group by groupby_attr and color_attr\n if has_color:\n groupby_result = vis.data.groupby(\n [groupby_attr.attribute, color_attr.attribute], dropna=False, history=False\n )\n else:\n groupby_result = vis.data.groupby(\n groupby_attr.attribute, dropna=False, history=False\n )\n groupby_result = groupby_result.agg(agg_func)\n intermediate = groupby_result.reset_index()\n vis._vis_data = intermediate.__finalize__(vis.data)\n result_vals = list(vis.data[groupby_attr.attribute])\n # create existing group by attribute combinations if color is specified\n # this is needed to check what combinations of group_by_attr and color_attr values have a non-zero number of elements in them\n if has_color:\n res_color_combi_vals = []\n result_color_vals = list(vis.data[color_attr.attribute])\n for i in range(0, len(result_vals)):\n res_color_combi_vals.append([result_vals[i], result_color_vals[i]])\n # For filtered aggregation that have missing groupby-attribute values, set these aggregated value as 0, since no datapoints\n if isFiltered or has_color and attr_unique_vals:\n N_unique_vals = len(attr_unique_vals)\n if len(result_vals) != N_unique_vals * color_cardinality:\n columns = vis.data.columns\n if has_color:\n df = pd.DataFrame(\n {\n columns[0]: attr_unique_vals * color_cardinality,\n columns[1]: pd.Series(color_attr_vals).repeat(N_unique_vals),\n }\n )\n vis._vis_data = vis.data.merge(\n df,\n on=[columns[0], columns[1]],\n how=\"right\",\n suffixes=[\"\", \"_right\"],\n )\n for col in columns[2:]:\n vis.data[col] = vis.data[col].fillna(0) # Triggers __setitem__\n assert len(list(vis.data[groupby_attr.attribute])) == N_unique_vals * len(\n color_attr_vals\n ), f\"Aggregated data missing values compared to original range of values of `{groupby_attr.attribute, color_attr.attribute}`.\"\n\n # Keep only the three relevant columns not the *_right columns resulting from merge\n vis._vis_data = vis.data.iloc[:, :3]\n\n else:\n df = pd.DataFrame({columns[0]: attr_unique_vals})\n\n vis._vis_data = vis.data.merge(\n df, on=columns[0], how=\"right\", suffixes=[\"\", \"_right\"]\n )\n\n for col in columns[1:]:\n vis.data[col] = vis.data[col].fillna(0)\n assert (\n len(list(vis.data[groupby_attr.attribute])) == N_unique_vals\n ), f\"Aggregated data missing values compared to original range of values of `{groupby_attr.attribute}`.\"\n\n vis._vis_data = vis._vis_data.dropna(subset=[measure_attr.attribute])\n try:\n vis._vis_data = vis._vis_data.sort_values(by=groupby_attr.attribute, ascending=True)\n except TypeError:\n warnings.warn(\n f\"\\nLux detects that the attribute '{groupby_attr.attribute}' maybe contain mixed type.\"\n + f\"\\nTo visualize this attribute, you may want to convert the '{groupby_attr.attribute}' into a uniform type as follows:\"\n + f\"\\n\\tdf['{groupby_attr.attribute}'] = df['{groupby_attr.attribute}'].astype(str)\"\n )\n vis._vis_data[groupby_attr.attribute] = vis._vis_data[groupby_attr.attribute].astype(str)\n vis._vis_data = vis._vis_data.sort_values(by=groupby_attr.attribute, ascending=True)\n vis._vis_data = vis._vis_data.reset_index()\n vis._vis_data = vis._vis_data.drop(columns=\"index\")\n\n @staticmethod\n def execute_binning(ldf: LuxDataFrame, vis: Vis):\n \"\"\"\n Binning of data points for generating histograms\n\n Parameters\n ----------\n vis: lux.Vis\n lux.Vis object that represents a visualization\n ldf : lux.core.frame\n LuxDataFrame with specified intent.\n\n Returns\n -------\n None\n \"\"\"\n import numpy as np\n\n bin_attribute = list(filter(lambda x: x.bin_size != 0, vis._inferred_intent))[0]\n bin_attr = bin_attribute.attribute\n series = vis.data[bin_attr]\n\n if series.hasnans:\n ldf._message.add_unique(\n f\"The column <code>{bin_attr}</code> contains missing values, not shown in the displayed histogram.\",\n priority=100,\n )\n series = series.dropna()\n if pd.api.types.is_object_dtype(series):\n series = series.astype(\"float\", errors=\"ignore\")\n\n counts, bin_edges = np.histogram(series, bins=bin_attribute.bin_size)\n # bin_edges of size N+1, so need to compute bin_start as the bin location\n bin_start = bin_edges[0:-1]\n binned_result = np.array([bin_start, counts]).T\n vis._vis_data = pd.DataFrame(binned_result, columns=[bin_attr, \"Number of Records\"])\n\n @staticmethod\n def execute_filter(vis: Vis) -> bool:\n \"\"\"\n Apply a Vis's filter to vis.data\n\n Parameters\n ----------\n vis : Vis\n\n Returns\n -------\n bool\n Boolean flag indicating if any filter was applied\n \"\"\"\n assert (\n vis.data is not None\n ), \"execute_filter assumes input vis.data is populated (if not, populate with LuxDataFrame values)\"\n filters = utils.get_filter_specs(vis._inferred_intent)\n\n if filters:\n # TODO: Need to handle OR logic\n for filter in filters:\n vis._vis_data = PandasExecutor.apply_filter(\n vis.data, filter.attribute, filter.filter_op, filter.value\n )\n return True\n else:\n return False\n\n @staticmethod\n def apply_filter(df: pd.DataFrame, attribute: str, op: str, val: object) -> pd.DataFrame:\n \"\"\"\n Helper function for applying filter to a dataframe\n\n Parameters\n ----------\n df : pandas.DataFrame\n Dataframe to filter on\n attribute : str\n Filter attribute\n op : str\n Filter operation, '=', '<', '>', '<=', '>=', '!='\n val : object\n Filter value\n\n Returns\n -------\n df: pandas.DataFrame\n Dataframe resulting from the filter operation\n \"\"\"\n # Handling NaN filter values\n if utils.like_nan(val):\n if op != \"=\" and op != \"!=\":\n warnings.warn(\"Filter on NaN must be used with equality operations (i.e., `=` or `!=`)\")\n else:\n if op == \"=\":\n return df[df[attribute].isna()]\n elif op == \"!=\":\n return df[~df[attribute].isna()]\n # Applying filter in regular, non-NaN cases\n if op == \"=\":\n return df[df[attribute] == val]\n elif op == \"<\":\n return df[df[attribute] < val]\n elif op == \">\":\n return df[df[attribute] > val]\n elif op == \"<=\":\n return df[df[attribute] <= val]\n elif op == \">=\":\n return df[df[attribute] >= val]\n elif op == \"!=\":\n return df[df[attribute] != val]\n return df\n\n @staticmethod\n def execute_2D_binning(vis: Vis) -> None:\n \"\"\"\n Apply 2D binning (heatmap) to vis.data\n\n Parameters\n ----------\n vis : Vis\n \"\"\"\n pd.reset_option(\"mode.chained_assignment\")\n with pd.option_context(\"mode.chained_assignment\", None):\n x_attr = vis.get_attr_by_channel(\"x\")[0].attribute\n y_attr = vis.get_attr_by_channel(\"y\")[0].attribute\n\n vis._vis_data[\"xBin\"] = pd.cut(vis._vis_data[x_attr], bins=lux.config.heatmap_bin_size)\n vis._vis_data[\"yBin\"] = pd.cut(vis._vis_data[y_attr], bins=lux.config.heatmap_bin_size)\n\n color_attr = vis.get_attr_by_channel(\"color\")\n if len(color_attr) > 0:\n color_attr = color_attr[0]\n groups = vis._vis_data.groupby([\"xBin\", \"yBin\"], history=False)[color_attr.attribute]\n if color_attr.data_type == \"nominal\":\n # Compute mode and count. Mode aggregates each cell by taking the majority vote for the category variable. In cases where there is ties across categories, pick the first item (.iat[0])\n result = groups.agg(\n [\n (\"count\", \"count\"),\n (color_attr.attribute, lambda x: pd.Series.mode(x).iat[0]),\n ]\n ).reset_index()\n elif color_attr.data_type == \"quantitative\" or color_attr.data_type == \"temporal\":\n # Compute the average of all values in the bin\n result = groups.agg(\n [(\"count\", \"count\"), (color_attr.attribute, \"mean\")]\n ).reset_index()\n result = result.dropna()\n else:\n groups = vis._vis_data.groupby([\"xBin\", \"yBin\"], history=False)[x_attr]\n result = groups.count().reset_index(name=x_attr)\n result = result.rename(columns={x_attr: \"count\"})\n result = result[result[\"count\"] != 0]\n\n # convert type to facilitate weighted correlation interestingess calculation\n result[\"xBinStart\"] = result[\"xBin\"].apply(lambda x: x.left).astype(\"float\")\n result[\"xBinEnd\"] = result[\"xBin\"].apply(lambda x: x.right)\n\n result[\"yBinStart\"] = result[\"yBin\"].apply(lambda x: x.left).astype(\"float\")\n result[\"yBinEnd\"] = result[\"yBin\"].apply(lambda x: x.right)\n\n vis._vis_data = result.drop(columns=[\"xBin\", \"yBin\"])\n\n #######################################################\n ############ Metadata: data type, model #############\n #######################################################\n def compute_dataset_metadata(self, ldf: LuxDataFrame):\n ldf.history.freeze()\n ldf._data_type = {}\n self.compute_data_type(ldf)\n ldf.history.unfreeze()\n\n def compute_data_type(self, ldf: LuxDataFrame):\n ldf.history.freeze()\n from pandas.api.types import is_datetime64_any_dtype as is_datetime\n\n for attr in list(ldf.columns):\n if attr in ldf._type_override:\n ldf._data_type[attr] = ldf._type_override[attr]\n else:\n temporal_var_list = [\"month\", \"year\", \"day\", \"date\", \"time\", \"weekday\"]\n if is_datetime(ldf[attr]):\n ldf._data_type[attr] = \"temporal\"\n elif self._is_datetime_string(ldf[attr]):\n ldf._data_type[attr] = \"temporal\"\n elif isinstance(attr, pd._libs.tslibs.timestamps.Timestamp):\n ldf._data_type[attr] = \"temporal\"\n elif str(attr).lower() in temporal_var_list:\n ldf._data_type[attr] = \"temporal\"\n elif self._is_datetime_number(ldf[attr]):\n ldf._data_type[attr] = \"temporal\"\n elif self._is_geographical_attribute(ldf[attr]):\n ldf._data_type[attr] = \"geographical\"\n elif pd.api.types.is_float_dtype(ldf.dtypes[attr]):\n\n if ldf.cardinality[attr] != len(ldf) and (ldf.cardinality[attr] < 20):\n ldf._data_type[attr] = \"nominal\"\n else:\n ldf._data_type[attr] = \"quantitative\"\n elif pd.api.types.is_integer_dtype(ldf.dtypes[attr]):\n # See if integer value is quantitative or nominal by checking if the ratio of cardinality/data size is less than 0.4 and if there are less than 10 unique values\n if ldf.pre_aggregated:\n if ldf.cardinality[attr] == len(ldf): # TODO why does this make sense?\n ldf._data_type[attr] = \"nominal\"\n if ldf.cardinality[attr] / len(ldf) < 0.4 and ldf.cardinality[attr] < 20:\n ldf._data_type[attr] = \"nominal\"\n else:\n ldf._data_type[attr] = \"quantitative\"\n if check_if_id_like(ldf, attr):\n ldf._data_type[attr] = \"id\"\n # Eliminate this clause because a single NaN value can cause the dtype to be object\n elif pd.api.types.is_string_dtype(ldf.dtypes[attr]):\n # Check first if it's castable to float after removing NaN\n is_numeric_nan, series = is_numeric_nan_column(ldf[attr])\n if is_numeric_nan:\n # int columns gets coerced into floats if contain NaN\n ldf._data_type[attr] = \"quantitative\"\n # min max was not computed since object type, so recompute here\n ldf._min_max[attr] = (\n series.min(),\n series.max(),\n )\n elif check_if_id_like(ldf, attr):\n ldf._data_type[attr] = \"id\"\n else:\n ldf._data_type[attr] = \"nominal\"\n # check if attribute is any type of datetime dtype\n elif is_datetime_series(ldf.dtypes[attr]):\n ldf._data_type[attr] = \"temporal\"\n else:\n ldf._data_type[attr] = \"nominal\"\n if not pd.api.types.is_integer_dtype(ldf.index) and ldf.index.name:\n ldf._data_type[ldf.index.name] = \"nominal\"\n\n non_datetime_attrs = []\n for attr in ldf.columns:\n if ldf._data_type[attr] == \"temporal\" and not is_datetime(ldf[attr]):\n non_datetime_attrs.append(attr)\n ldf.history.unfreeze()\n\n warn_msg = \"\"\n if len(non_datetime_attrs) == 1:\n warn_msg += f\"\\nLux detects that the attribute '{non_datetime_attrs[0]}' may be temporal.\\n\"\n elif len(non_datetime_attrs) > 1:\n warn_msg += f\"\\nLux detects that attributes {non_datetime_attrs} may be temporal.\\n\"\n if len(non_datetime_attrs) > 0:\n warn_msg += \"To display visualizations for these attributes accurately, please convert temporal attributes to Pandas Datetime objects using the pd.to_datetime function and provide a 'format' parameter to specify the datetime format of the attribute.\\nFor example, you can convert a year-only attribute (e.g., 1998, 1971, 1982) to Datetime type by specifying the `format` as '%Y'.\\n\\nHere is a starter template that you can use for converting the temporal fields:\\n\"\n for attr in non_datetime_attrs:\n warn_msg += f\"\\tdf['{attr}'] = pd.to_datetime(df['{attr}'], format='<replace-with-datetime-format>')\\n\"\n warn_msg += \"\\nSee more at: https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.to_datetime.html\"\n warn_msg += f\"\\nIf {attr} is not a temporal attribute, please use override Lux's automatically detected type:\"\n warn_msg += f\"\\n\\tdf.set_data_type({{'{attr}':'quantitative'}})\"\n warnings.warn(warn_msg, stacklevel=2)\n\n @staticmethod\n def _is_datetime_string(series):\n if series.dtype == object:\n not_numeric = False\n try:\n pd.to_numeric(series)\n except Exception as e:\n not_numeric = True\n\n datetime_col = None\n if not_numeric:\n try:\n datetime_col = pd.to_datetime(series)\n except Exception as e:\n return False\n if datetime_col is not None:\n return True\n return False\n\n @staticmethod\n def _is_geographical_attribute(series):\n # run detection algorithm\n name = str(series.name).lower()\n return utils.like_geo(name)\n\n @staticmethod\n def _is_datetime_number(series):\n is_int_dtype = pd.api.types.is_integer_dtype(series.dtype)\n if is_int_dtype:\n try:\n temp = series.astype(str)\n pd.to_datetime(temp)\n return True\n except Exception:\n return False\n return False\n\n def compute_stats(self, ldf: LuxDataFrame):\n ldf.history.freeze()\n # precompute statistics\n ldf.unique_values = {}\n ldf._min_max = {}\n ldf.cardinality = {}\n ldf._length = len(ldf)\n\n for attribute in ldf.columns:\n\n if isinstance(attribute, pd._libs.tslibs.timestamps.Timestamp):\n # If timestamp, make the dictionary keys the _repr_ (e.g., TimeStamp('2020-04-05 00.000')--> '2020-04-05')\n attribute_repr = str(attribute._date_repr)\n else:\n attribute_repr = attribute\n\n ldf.unique_values[attribute_repr] = list(ldf[attribute].unique())\n ldf.cardinality[attribute_repr] = len(ldf.unique_values[attribute_repr])\n\n if pd.api.types.is_float_dtype(ldf.dtypes[attribute]) or pd.api.types.is_integer_dtype(\n ldf.dtypes[attribute]\n ):\n ldf._min_max[attribute_repr] = (\n ldf[attribute].min(),\n ldf[attribute].max(),\n )\n\n if not pd.api.types.is_integer_dtype(ldf.index):\n index_column_name = ldf.index.name\n ldf.unique_values[index_column_name] = list(ldf.index)\n ldf.cardinality[index_column_name] = len(ldf.index)\n \n ldf.history.unfreeze()" ]
[ [ "pandas.to_datetime", "numpy.histogram", "pandas.reset_option", "numpy.array", "pandas.cut", "pandas.DataFrame", "pandas.api.types.is_datetime64_any_dtype", "pandas.api.types.is_float_dtype", "pandas.option_context", "pandas.api.types.is_string_dtype", "pandas.api.types.is_object_dtype", "pandas.to_numeric", "pandas.Series", "pandas.Series.mode", "pandas.api.types.is_integer_dtype" ] ]
shubaoyu/CRSLab
[ "a05730e8b2c03df278587be34923fa818945d4c4" ]
[ "crslab/model/crs/tgredial/tg_policy.py" ]
[ "# @Time : 2020/12/9\n# @Author : Yuanhang Zhou\n# @Email : [email protected]\n\n# UPDATE:\n# @Time : 2021/1/7, 2020/12/15, 2021/1/4\n# @Author : Xiaolei Wang, Yuanhang Zhou, Yuanhang Zhou\n# @Email : [email protected], sdzyh002@gmail, [email protected]\n\nr\"\"\"\nTGReDial_Policy\n===============\nReferences:\n Zhou, Kun, et al. `\"Towards Topic-Guided Conversational Recommender System.\"`_ in COLING 2020.\n\n.. _`\"Towards Topic-Guided Conversational Recommender System.\"`:\n https://www.aclweb.org/anthology/2020.coling-main.365/\n\n\"\"\"\n\nimport os\n\nimport torch\nfrom torch import nn\nfrom transformers import BertModel\n\nfrom crslab.config import PRETRAIN_PATH\nfrom crslab.data import dataset_language_map\nfrom crslab.model.base import BaseModel\nfrom crslab.model.pretrained_models import resources\n\n\nclass TGPolicyModel(BaseModel):\n def __init__(self, opt, device, vocab, side_data):\n \"\"\"\n\n Args:\n opt (dict): A dictionary record the hyper parameters.\n device (torch.device): A variable indicating which device to place the data and model.\n vocab (dict): A dictionary record the vocabulary information.\n side_data (dict): A dictionary record the side data.\n \n \"\"\"\n self.topic_class_num = vocab['n_topic']\n self.n_sent = opt.get('n_sent', 10)\n\n language = dataset_language_map[opt['dataset']]\n resource = resources['bert'][language]\n dpath = os.path.join(PRETRAIN_PATH, \"bert\", language)\n super(TGPolicyModel, self).__init__(opt, device, dpath, resource)\n\n def build_model(self, *args, **kwargs):\n \"\"\"build model\"\"\"\n self.context_bert = BertModel.from_pretrained(self.dpath)\n self.topic_bert = BertModel.from_pretrained(self.dpath)\n self.profile_bert = BertModel.from_pretrained(self.dpath)\n\n self.bert_hidden_size = self.context_bert.config.hidden_size\n self.state2topic_id = nn.Linear(self.bert_hidden_size * 3,\n self.topic_class_num)\n\n self.loss = nn.CrossEntropyLoss()\n\n def guide(self, batch, mode):\n # conv_id, message_id, context, context_mask, topic_path_kw, tp_mask, user_profile, profile_mask, y = batch\n context, context_mask, topic_path_kw, tp_mask, user_profile, profile_mask, y = batch\n\n context_rep = self.context_bert(\n context,\n context_mask).pooler_output # (bs, hidden_size)\n\n topic_rep = self.topic_bert(\n topic_path_kw,\n tp_mask).pooler_output # (bs, hidden_size)\n\n bs = user_profile.shape[0] // self.n_sent\n profile_rep = self.profile_bert(user_profile, profile_mask).pooler_output # (bs, word_num, hidden)\n profile_rep = profile_rep.view(bs, self.n_sent, -1)\n profile_rep = torch.mean(profile_rep, dim=1) # (bs, hidden)\n\n state_rep = torch.cat((context_rep, topic_rep, profile_rep), dim=1) # [bs, hidden_size*3]\n topic_scores = self.state2topic_id(state_rep)\n topic_loss = self.loss(topic_scores, y)\n\n return topic_loss, topic_scores\n" ]
[ [ "torch.nn.Linear", "torch.mean", "torch.cat", "torch.nn.CrossEntropyLoss" ] ]
kentakuramochi/deep_learning_from_scratch
[ "8b78369f6da316a6c14c16f729c658d959454b0e" ]
[ "chap5/gradient_check.py" ]
[ "# !usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport sys, os\nsys.path.append(os.pardir)\nimport numpy as np\nfrom dataset.mnist import load_mnist\nfrom two_layer_net import TwoLayerNet\n\n(x_train, t_train), (x_test, t_text) = \\\n load_mnist(normalize=True, one_hot_label=True)\n\nnetwork = TwoLayerNet(input_size=784, hidden_size=50, output_size=10)\n\nx_batch = x_train[:3]\nt_batch = t_train[:3]\n\ngrad_numerical = network.numerical_gradient(x_batch, t_batch)\ngrad_backprop = network.gradient(x_batch, t_batch)\n\nfor key in grad_numerical.keys():\n grad_bp = grad_backprop[key]\n grad_num = grad_numerical[key]\n diff = np.average(np.abs(grad_bp - grad_num))\n print(\"{0} : {1}\".format(key, str(diff)))\n" ]
[ [ "numpy.abs" ] ]
google/seqio
[ "ef7e20b18a0aa071f329bf2b9310ed0fad3dc7a2" ]
[ "seqio/dataset_providers.py" ]
[ "# Copyright 2021 The SeqIO Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"Classes for data loading and processing.\n\nDefines Tasks, TaskRegistry, Mixture, and MixtureRegistry\n\"\"\"\n\nimport abc\nimport collections\nimport dataclasses\nimport functools\nimport inspect\nimport json\nimport os\nimport re\nfrom typing import Any, Callable, Iterable, Mapping, MutableMapping, Optional, Sequence, Tuple, Type, Union\n\nfrom absl import logging\nimport numpy as np\nfrom packaging import version\nfrom seqio import metrics as metrics_lib\nfrom seqio import utils\nfrom seqio.feature_converters import FeatureConverter\nfrom seqio.vocabularies import PassThroughVocabulary\nfrom seqio.vocabularies import Vocabulary\nimport tensorflow.compat.v2 as tf\nimport tensorflow_datasets as tfds\nimport typing_extensions\n\n\n_DEFAULT_FEATURE_KEYS = [\"inputs\", \"targets\"]\n\n_VALID_TASK_NAME_REGEX = re.compile(r\"^[\\w\\d\\.\\:_]+$\")\n_MAX_EXAMPLES_TO_MEM_CACHE = 10000\nSHUFFLE_BUFFER_SIZE = 1000\n\nDatasetReaderType = Callable[[Union[str, Iterable[str]]], tf.data.Dataset]\nDecodeFnType = Callable[..., Mapping[str, tf.train.Feature]]\n\n\[email protected](frozen=True)\nclass Feature:\n \"\"\"A container for attributes of output features of data providers.\"\"\"\n vocabulary: Vocabulary\n add_eos: bool = True\n required: bool = True\n dtype: tf.DType = tf.int32\n rank: int = 1\n\n\[email protected](frozen=True)\nclass ContinuousFeature(Feature):\n \"\"\"A container for multi-modal output features of data providers.\"\"\"\n vocabulary: Vocabulary = PassThroughVocabulary(size=0)\n add_eos: bool = False\n\n\[email protected](frozen=True)\nclass ShardInfo:\n \"\"\"A container for specifying sharding info.\"\"\"\n index: int\n num_shards: int\n\n\nclass DatasetProviderBase(metaclass=abc.ABCMeta):\n \"\"\"Abstract base for classes that provide a tf.data.Dataset.\"\"\"\n\n @abc.abstractproperty\n def output_features(self) -> Mapping[str, Feature]:\n raise NotImplementedError\n\n @abc.abstractproperty\n def splits(self) -> Sequence[str]:\n raise NotImplementedError\n\n @abc.abstractmethod\n def get_dataset(\n self,\n sequence_length: int,\n split: str,\n use_cached: bool = False,\n shuffle: bool = True,\n seed: Optional[int] = None,\n shard_info: Optional[ShardInfo] = None,\n num_epochs: int = 1\n ) -> tf.data.Dataset:\n \"\"\"Returns the requested tf.data.Dataset.\"\"\"\n raise NotImplementedError\n\n @abc.abstractmethod\n def num_input_examples(self, split: str) -> int:\n raise NotImplementedError\n\n\nclass DatasetProviderRegistry(object):\n \"\"\"Base for registry of data providers.\n\n Subclasses must wrap `get` method to override the return type for pytype.\n TODO(adarob): Remove the need to override `get`.\n \"\"\"\n # Class variables must be defined in subclasses.\n _REGISTRY: MutableMapping[str, DatasetProviderBase]\n _PROVIDER_TYPE: Type[DatasetProviderBase]\n\n @classmethod\n def add_provider(cls, name: str, provider):\n \"\"\"Adds a data provider instance to the registry.\"\"\"\n if name in cls._REGISTRY:\n raise ValueError(\"Attempting to register duplicate provider: %s\" % name)\n if not isinstance(provider, cls._PROVIDER_TYPE):\n raise ValueError(\n \"Attempting to register a class of an invalid type. \"\n \"Expecting instance of %s, got %s\" %\n (cls._PROVIDER_TYPE, type(provider).__name__))\n\n cls._REGISTRY[name] = provider\n\n @classmethod\n def add(\n cls,\n name: str,\n provider_cls,\n *provider_args,\n **provider_kwargs\n ):\n \"\"\"Instantiates and adds provider to the registry.\"\"\"\n if not issubclass(provider_cls, cls._PROVIDER_TYPE):\n raise ValueError(\n \"Attempting to register a class of an invalid type. \"\n \"Expecting instance of %s, got %s\" %\n (cls._PROVIDER_TYPE, provider_cls))\n provider = provider_cls(*provider_args, **provider_kwargs)\n cls.add_provider(name, provider)\n return provider\n\n @classmethod\n def remove(cls, name):\n \"\"\"Remove provider from the registry, if it exists.\"\"\"\n if name in cls._REGISTRY:\n del cls._REGISTRY[name]\n\n @classmethod\n def get(cls, name):\n \"\"\"Returns provider from the registry.\"\"\"\n if name not in cls._REGISTRY:\n raise ValueError(\"Provider name not registered: %s\" % name)\n return cls._REGISTRY[name]\n\n @classmethod\n def names(cls):\n \"\"\"Returns all provider names in registry.\"\"\"\n return cls._REGISTRY.keys()\n\n @classmethod\n def reset(cls):\n \"\"\"Removes all of the registered tasks.\"\"\"\n cls._REGISTRY = {}\n\n @classmethod\n def get_dataset(\n cls,\n name,\n sequence_length,\n split,\n use_cached=False,\n shuffle=True,\n seed=None,\n shard_info=None,\n num_epochs=1):\n \"\"\"Returns the requested tf.data.Dataset.\"\"\"\n return cls.get(name).get_dataset(\n sequence_length=sequence_length, split=split, use_cached=use_cached,\n shuffle=shuffle, seed=seed, shard_info=shard_info,\n num_epochs=num_epochs)\n\n\n# =============================== DataSources ==================================\n\n\nclass DataSourceInterface(typing_extensions.Protocol):\n \"\"\"Interface for DataSource.\"\"\"\n\n def num_input_examples(self, split: str) -> int:\n ...\n\n @property\n def caching_permitted(self) -> bool:\n ...\n\n @property\n def splits(self) -> Sequence[str]:\n ...\n\n @property\n def supports_arbitrary_sharding(self) -> bool:\n ...\n\n @property\n def output_features(self) -> Mapping[str, Feature]:\n ...\n\n def list_shards(self, split: str) -> Sequence[str]:\n ...\n\n def get_dataset(\n self,\n split: str,\n shuffle: bool = True,\n seed: Optional[int] = None,\n shard_info: Optional[ShardInfo] = None\n ) -> tf.data.Dataset:\n ...\n\n\nclass DataSource(DatasetProviderBase):\n \"\"\"A `DatasetProvider` that provides raw data from an input source.\n\n Inherits all abstract methods and properties of `DatasetProviderBase` except\n those overidden below.\n \"\"\"\n\n def __init__(\n self,\n splits: Iterable[str],\n num_input_examples: Optional[Mapping[str, int]] = None,\n caching_permitted: bool = True):\n self._splits = tuple(splits)\n self._num_input_examples = (\n dict(num_input_examples) if num_input_examples is not None else None)\n self._caching_permitted = caching_permitted\n\n @property\n def caching_permitted(self) -> bool:\n \"\"\"Indicates whether this data source may be cached.\n\n Caching may be prohibited for the sake of data versioning rigor or as a\n matter of policy for certain datasets.\n \"\"\"\n return self._caching_permitted\n\n @property\n def splits(self) -> Sequence[str]:\n return self._splits\n\n @abc.abstractproperty\n def supports_arbitrary_sharding(self) -> bool:\n \"\"\"Whether supports sharding beyond those available in `list_shards`.\"\"\"\n raise NotImplementedError\n\n @property\n def output_features(self) -> Mapping[str, Feature]:\n \"\"\"Override unused property of `DatasetProviderBase`.\"\"\"\n raise NotImplementedError\n\n @abc.abstractmethod\n def list_shards(self, split: str) -> Sequence[str]:\n \"\"\"Returns string identifiers of input shards.\"\"\"\n raise NotImplementedError\n\n @abc.abstractmethod\n def get_dataset(\n self,\n split: str,\n shuffle: bool = True,\n seed: Optional[int] = None,\n shard_info: Optional[ShardInfo] = None\n ) -> tf.data.Dataset:\n \"\"\"Overrides base class to add shard identifier and remove use_cached.\n\n Args:\n split: string, the split to return.\n shuffle: bool, whether to shuffle the input source.\n seed: tf.int64 scalar tf.Tensor (or None) for shuffling input source.\n shard_info: optional specification for loading a shard of the split.\n \"\"\"\n raise NotImplementedError\n\n def num_input_examples(self, split: str) -> Optional[int]:\n if self._num_input_examples is None:\n return None\n return self._num_input_examples[split]\n\n\ndef _get_name(function):\n \"\"\"Returns the name of a (possibly partially applied) function.\"\"\"\n if isinstance(function, functools.partial):\n return function.func.__name__\n else:\n return function.__name__\n\n\ndef _validate_args(fn, expected_pos_args):\n \"\"\"Ensure function has exactly expected positional args.\"\"\"\n argspec = inspect.getfullargspec(fn)\n expected_pos_args = tuple(expected_pos_args)\n actual_args = tuple(argspec.args)\n if actual_args[:len(expected_pos_args)] != expected_pos_args:\n raise ValueError(\n \"'%s' must have positional args %s, got: %s\" % (\n _get_name(fn), expected_pos_args, actual_args))\n actual_pos_args = tuple(\n argspec.args[:-len(argspec.defaults)]\n if argspec.defaults else argspec.args)\n if actual_pos_args != expected_pos_args[:len(actual_pos_args)]:\n raise ValueError(\n \"'%s' may only have positional args %s, got: %s\" % (\n _get_name(fn), expected_pos_args, actual_pos_args))\n\n\nclass DatasetFnCallable(typing_extensions.Protocol):\n\n def __call__(self,\n split: str,\n shuffle_files: bool,\n seed: Optional[int] = None) -> tf.data.Dataset:\n ...\n\n\nclass FunctionDataSource(DataSource):\n \"\"\"A `DataSource` that uses a function to provide the input data.\"\"\"\n\n def __init__(\n self,\n dataset_fn: DatasetFnCallable,\n splits: Iterable[str],\n num_input_examples: Optional[Mapping[str, int]] = None,\n caching_permitted: bool = True\n ):\n \"\"\"FunctionDataSource constructor.\n\n Args:\n dataset_fn: a function with the signature `dataset_fn(split,\n shuffle_files)' (and optionally the variable `seed`) that returns a\n `tf.data.Dataset`.\n splits: an iterable of applicable string split names.\n num_input_examples: dict or None, an optional dictionary mapping split to\n its size in number of input examples (before preprocessing). The\n `num_input_examples` method will return None if not provided.\n caching_permitted: indicates whether this data source may be cached.\n Default True.\n \"\"\"\n _validate_args(dataset_fn, [\"split\", \"shuffle_files\"])\n self._dataset_fn = dataset_fn\n super().__init__(\n splits=splits,\n num_input_examples=num_input_examples,\n caching_permitted=caching_permitted)\n\n @property\n def supports_arbitrary_sharding(self) -> bool:\n return False\n\n def get_dataset(\n self,\n split: str,\n shuffle: bool = True,\n seed: Optional[int] = None,\n shard_info: Optional[ShardInfo] = None\n ) -> tf.data.Dataset:\n if shard_info and shard_info.num_shards > 1:\n raise ValueError(\n \"`FunctionDataSource` does not support low-level sharding. Use \"\n \"tf.data.Dataset.shard instead.\")\n\n if seed is None:\n ds = self._dataset_fn(split=split, shuffle_files=shuffle)\n else:\n _validate_args(self._dataset_fn, [\"split\", \"shuffle_files\", \"seed\"])\n ds = self._dataset_fn(split=split, shuffle_files=shuffle, seed=seed)\n return ds\n\n def list_shards(self, split: str) -> Sequence[str]:\n return [split]\n\n\nclass TfdsDataSource(DataSource):\n \"\"\"A `DataSource` that uses TensorFlow Datasets to provide the input data.\"\"\"\n\n def __init__(\n self,\n tfds_name: str,\n tfds_data_dir: Optional[str] = None,\n splits: Optional[Union[Iterable[str], Mapping[str, str]]] = None,\n caching_permitted: bool = True,\n decoders: Optional[tfds.typing.TreeDict[tfds.decode.Decoder]] = None,\n ):\n \"\"\"TfdsTask constructor.\n\n Args:\n tfds_name: string, the name and version number of a TFDS dataset,\n optionally with a config.\n tfds_data_dir: string, an optional path to a specific TFDS data directory\n to use.\n splits: an iterable of allowable string split names, a dict mapping\n allowable canonical splits (e.g., 'validation') to TFDS splits or slices\n (e.g., 'train[':1%']), or None. The default, None, uses all available\n splits from the TFDS dataset info.\n caching_permitted: indicates whether this data source may be cached.\n Default True.\n decoders: dict (optional), mapping from features to tfds.decode.Decoders,\n such as tfds.decode.SkipDecoding() for skipping image byte decoding\n \"\"\"\n if \":\" not in tfds_name:\n raise ValueError(\"TFDS name must contain a version number, got: %s\" %\n tfds_name)\n\n if splits and not isinstance(splits, dict):\n splits = {k: k for k in splits}\n\n self._tfds_dataset = utils.LazyTfdsLoader(\n tfds_name,\n data_dir=tfds_data_dir,\n split_map=splits if isinstance(splits, dict) else None,\n decoders=decoders)\n\n # If splits are not provided, we pass an empty tuple and use the lazy\n # lookup in the `splits` property.\n super().__init__(splits=splits or (), caching_permitted=caching_permitted)\n\n @property\n def splits(self):\n \"\"\"Overrides since we can't call `info.splits` until after init.\"\"\"\n return self._splits or self._tfds_dataset.info.splits\n\n @property\n def tfds_dataset(self):\n return self._tfds_dataset\n\n @property\n def supports_arbitrary_sharding(self) -> bool:\n return False\n\n def get_dataset(\n self,\n split: str,\n shuffle: bool = True,\n seed: Optional[int] = None,\n shard_info: Optional[ShardInfo] = None\n ) -> tf.data.Dataset:\n return self.tfds_dataset.load(\n split, shuffle_files=shuffle, seed=seed, shard_info=shard_info)\n\n def num_input_examples(self, split: str) -> int:\n \"\"\"Overrides since we can't call `info.splits` until after init.\"\"\"\n return self.tfds_dataset.size(split)\n\n def list_shards(self, split: str) -> Sequence[str]:\n def _get_filename(info):\n if isinstance(info, dict): # this is true for unit tests\n return info[\"filename\"]\n return info.filename # TFDS FileInstruction\n return [_get_filename(info) for info in self.tfds_dataset.files(split)]\n\n\nclass FileDataSource(DataSource):\n \"\"\"A `DataSource` that reads a file to provide the input dataset.\"\"\"\n\n def __init__(self,\n read_file_fn: Callable[[tf.data.Dataset], tf.data.Dataset],\n split_to_filepattern: Mapping[str, Union[str, Iterable[str]]],\n num_input_examples: Optional[Mapping[str, int]] = None,\n caching_permitted: bool = True,\n file_shuffle_buffer_size: Optional[int] = None):\n \"\"\"FileDataSource constructor.\n\n Args:\n read_file_fn: a callable for creating a `tf.data.Dataset` from a\n `tf.data.Dataset` of file paths, e.g., `tf.data.TFRecordDataset`.\n split_to_filepattern: a mapping from split names to filepatterns to be\n expanded with glob.\n num_input_examples: dict or None, an optional dictionary mapping split to\n its size in number of input examples (before preprocessing). The\n `num_input_examples` method will return None if not provided.\n caching_permitted: indicates whether this data source may be cached.\n Default True.\n file_shuffle_buffer_size: The buffer size to shuffle files when needed. If\n None, the number of files is used as buffer size for a perfect shuffle\n (default and recommended). A value of 16 may be explicitly set to\n replicate earlier behavior.\n \"\"\"\n self._split_to_filepattern = split_to_filepattern\n self._reader = read_file_fn\n self._file_shuffle_buffer_size = file_shuffle_buffer_size\n super().__init__(\n splits=split_to_filepattern.keys(),\n num_input_examples=num_input_examples,\n caching_permitted=caching_permitted)\n\n @property\n def supports_arbitrary_sharding(self) -> bool:\n return False\n\n def get_dataset(\n self,\n split: str,\n shuffle: bool = True,\n seed: Optional[int] = None,\n shard_info: Optional[ShardInfo] = None\n ) -> tf.data.Dataset:\n files = self.list_shards(split)\n\n if not files:\n raise ValueError(\n \"No file is found for the file pattern: \"\n f\"{self._split_to_filepattern[split]}.\"\n )\n files_ds = tf.data.Dataset.from_tensor_slices(np.array(files, dtype=str))\n\n if shard_info:\n if len(files) < shard_info.num_shards:\n raise ValueError(\n f\"Dataset has too few files to shard. {len(files)} files vs \"\n f\"{shard_info.num_shards} shards requested.\")\n files_ds = files_ds.shard(shard_info.num_shards, shard_info.index)\n\n if shuffle:\n if self._file_shuffle_buffer_size:\n logging.warning(\n \"`file_shuffle_buffer_size` is explicitly set to %d; this may lead \"\n \"to an imperfect file shuffle. Leave `file_shuffle_buffer_size` \"\n \"unset for a perfect shuffle.\", self._file_shuffle_buffer_size)\n file_shuffle_buffer_size = self._file_shuffle_buffer_size or len(files)\n files_ds = files_ds.shuffle(\n buffer_size=file_shuffle_buffer_size, seed=seed)\n\n return files_ds.interleave(\n self._reader,\n cycle_length=16,\n block_length=16,\n num_parallel_calls=tf.data.experimental.AUTOTUNE)\n\n def list_shards(self, split: str) -> Sequence[str]:\n return tf.io.gfile.glob(self._split_to_filepattern[split])\n\n\nclass TextLineDataSource(FileDataSource):\n \"\"\"A `FileDataSource` that reads lines of text from a file as input.\"\"\"\n\n def __init__(self,\n split_to_filepattern: Mapping[str, Union[str, Iterable[str]]],\n skip_header_lines: int = 0,\n num_input_examples: Optional[Mapping[str, int]] = None,\n caching_permitted: bool = True,\n file_shuffle_buffer_size: Optional[int] = None):\n \"\"\"TextLineDataSource constructor.\n\n Args:\n split_to_filepattern: a mapping from split names to filepatterns to be\n expanded with glob.\n skip_header_lines: int, number of header lines to skip in each source\n file.\n num_input_examples: dict or None, an optional dictionary mapping split to\n its size in number of input examples (before preprocessing). The\n `num_input_examples` method will return None if not provided.\n caching_permitted: indicates whether this data source may be cached.\n Default True.\n file_shuffle_buffer_size: The buffer size to shuffle files when needed. If\n None, the number of files is used as buffer size for a perfect shuffle\n (default and recommended). A value of 16 may be explicitly set to\n replicate earlier behavior.\n \"\"\"\n # Used during caching.\n self._skip_header_lines = skip_header_lines\n\n def read_file_fn(filepattern):\n return tf.data.TextLineDataset(filepattern).skip(skip_header_lines)\n\n super().__init__(\n read_file_fn=read_file_fn,\n split_to_filepattern=split_to_filepattern,\n num_input_examples=num_input_examples,\n caching_permitted=caching_permitted,\n file_shuffle_buffer_size=file_shuffle_buffer_size)\n\n\nclass TFExampleDataSource(FileDataSource):\n \"\"\"A `FileDataSource` that reads files of tf.train.Example protos as input.\"\"\"\n\n def __init__(self,\n split_to_filepattern: Mapping[str, Union[str, Iterable[str]]],\n feature_description: Mapping[str, Union[tf.io.FixedLenFeature,\n tf.io.VarLenFeature]],\n reader_cls: DatasetReaderType = tf.data.TFRecordDataset,\n num_input_examples: Optional[Mapping[str, int]] = None,\n caching_permitted: bool = True,\n file_shuffle_buffer_size: Optional[int] = None):\n \"\"\"TFExampleDataSource constructor.\n\n Args:\n split_to_filepattern: dict of string (split name) to either string\n (filename or filepattern) or list of strings (filenames or\n filepatterns).\n feature_description: dict, a mapping of string feature keys to\n `tf.io.FixedLenFeature` or `tf.io.VarLenFeature` values.\n reader_cls: `tf.data.Dataset`, a dataset class to read the input files.\n num_input_examples: dict or None, an optional dictionary mapping split to\n its size in number of input examples (before preprocessing). The\n `num_input_examples` method will return None if not provided.\n caching_permitted: indicates whether this data source may be cached.\n Default True.\n file_shuffle_buffer_size: The buffer size to shuffle files when needed. If\n None, the number of files is used as buffer size for a perfect shuffle\n (default and recommended). A value of 16 may be explicitly set to\n replicate earlier behavior.\n \"\"\"\n\n def parse_fn(*args):\n pb = args[-1] # Some readers have more than 1 arg.\n return tf.io.parse_single_example(pb, feature_description)\n\n def read_file_fn(filepattern):\n return reader_cls(filepattern).map(\n parse_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n\n super().__init__(\n read_file_fn=read_file_fn,\n split_to_filepattern=split_to_filepattern,\n num_input_examples=num_input_examples,\n caching_permitted=caching_permitted,\n file_shuffle_buffer_size=file_shuffle_buffer_size)\n\n\nclass ProtoDataSource(FileDataSource):\n \"\"\"A `FileDataSource` that reads files of arbitrary protos as input.\"\"\"\n\n def __init__(self,\n split_to_filepattern: Mapping[str, Union[str, Iterable[str]]],\n decode_proto_fn: DecodeFnType,\n reader_cls: DatasetReaderType = tf.data.TFRecordDataset,\n num_input_examples: Optional[Mapping[str, int]] = None,\n caching_permitted: bool = True,\n file_shuffle_buffer_size: Optional[int] = None):\n \"\"\"ProtoDataSource constructor.\n\n Args:\n split_to_filepattern: dict of string (split name) to either string\n (filename or filepattern) or list of strings (filenames or\n filepatterns).\n decode_proto_fn: a callable to parse a serialized proto to features.\n reader_cls: `tf.data.Dataset`, a dataset class to read the input files.\n num_input_examples: dict or None, an optional dictionary mapping split to\n its size in number of input examples (before preprocessing). The\n `num_input_examples` method will return None if not provided.\n caching_permitted: indicates whether this data source may be cached.\n Default True.\n file_shuffle_buffer_size: The buffer size to shuffle files when needed. If\n None, the number of files is used as buffer size for a perfect shuffle\n (default and recommended). A value of 16 may be explicitly set to\n replicate earlier behavior.\n \"\"\"\n\n def read_file_fn(filepattern: Union[str, Iterable[str]]):\n return reader_cls(filepattern).map(\n decode_proto_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n\n super().__init__(\n read_file_fn=read_file_fn,\n split_to_filepattern=split_to_filepattern,\n num_input_examples=num_input_examples,\n caching_permitted=caching_permitted,\n file_shuffle_buffer_size=file_shuffle_buffer_size)\n\n\n# ========================== Offline Caching Helpers ===========================\n\n\ndef _rename_plaintext_to_pretokenized(\n dataset: tf.data.Dataset) -> tf.data.Dataset:\n \"\"\"Rename cached _plaintext features to new _pretokenized standard.\"\"\"\n def _rename(inputs):\n outputs = {}\n for k, v in inputs.items():\n if k.endswith(\"_plaintext\"):\n k = k[:-len(\"plaintext\")] + \"pretokenized\"\n outputs[k] = v\n return outputs\n return dataset.map(\n _rename, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n\n\nclass _CachedDataSource(FileDataSource):\n \"\"\"A `FileDataSource` for reading datasets cached offline.\"\"\"\n\n def __init__(self,\n cache_dir: str,\n split: str,\n file_shuffle_buffer_size: Optional[int] = None):\n\n with tf.io.gfile.GFile(utils.get_cached_info_path(cache_dir, split)) as f:\n split_info = json.load(f)\n features = split_info[\"features\"]\n\n with tf.io.gfile.GFile(utils.get_cached_stats_path(cache_dir, split)) as f:\n stats = json.load(f)\n\n version_when_cached = version.Version(\n split_info.get(\"seqio_version\", \"0.pre\"))\n version_with_true_dtypes = version.Version(\"0.0.0\")\n if version_when_cached < version_with_true_dtypes:\n # Assume that all int64 features are really int32.\n for name, feat in features.items():\n if feat[\"dtype\"] == \"int64\":\n logging.info(\"Casting cached '%s' to int32.\", name)\n feat[\"dtype\"] = \"int32\"\n\n # Use `FixedLenSequenceFeature` for sequences with variable length.\n def _feature_config(shape, dtype):\n if dtype in (\"int32\", \"bool\"):\n # int32 and bool are stored as int64 in the tf.train.Example protobuf.\n # TODO(adarob): Support other conversions.\n dtype = \"int64\"\n if shape and shape[0] is None:\n return tf.io.FixedLenSequenceFeature(\n shape[1:], dtype, allow_missing=True)\n return tf.io.FixedLenFeature(shape, dtype)\n\n feature_description = {\n feat: _feature_config(**desc) for feat, desc in features.items()\n }\n\n def read_file_fn(filepattern):\n ds = tf.data.TFRecordDataset(filepattern)\n ds = ds.map(\n lambda pb: tf.io.parse_single_example(pb, feature_description),\n num_parallel_calls=tf.data.experimental.AUTOTUNE)\n # Cast features back to the types from the info JSON since some features\n # must be cast for storage (e.g., in32 is stored as int64).\n ds = ds.map(\n lambda x: {k: tf.cast(v, features[k][\"dtype\"]) for k, v in x.items()},\n num_parallel_calls=tf.data.experimental.AUTOTUNE)\n # Legacy cached datasets may use old \"_plaintext\" suffix. Rename to\n # \"_pretokenized\".\n ds = _rename_plaintext_to_pretokenized(ds)\n return ds\n\n split_to_filepattern = {\n split: \"%s-*-of-*%d\" % (\n utils.get_cached_tfrecord_prefix(cache_dir, split),\n split_info[\"num_shards\"])\n }\n\n super().__init__(\n read_file_fn=read_file_fn,\n split_to_filepattern=split_to_filepattern,\n num_input_examples={split: stats[\"examples\"]},\n file_shuffle_buffer_size=file_shuffle_buffer_size)\n\n\nclass CacheDatasetPlaceholder(object):\n \"\"\"A placeholder to signal when in the pipeline offline caching will occur.\"\"\"\n\n def __init__(self,\n required=False,\n file_shuffle_buffer_size: Optional[int] = None):\n \"\"\"CacheDatasetPlaceholder constructor.\n\n Args:\n required: whether the dataset must be accessed in its cached form, and\n on-the-fly preprocessing is disallowed.\n file_shuffle_buffer_size: The buffer size to shuffle files when needed. If\n None, the number of files is used as buffer size for a perfect shuffle\n (default and recommended). A value of 16 may be explicitly set to\n replicate earlier behavior.\n \"\"\"\n self._required = required\n self._file_shuffle_buffer_size = file_shuffle_buffer_size\n\n @property\n def required(self):\n return self._required\n\n @property\n def file_shuffle_buffer_size(self):\n return self._file_shuffle_buffer_size\n\n def __call__(self, dataset):\n raise RuntimeError(\"`CacheDatasetPlaceholder` should never be called.\")\n\n\n# ================================ Tasks =======================================\n\nMetricFnCallable = Callable[..., Mapping[str, Union[metrics_lib.MetricValue,\n float]]]\n\n\nclass Task(DatasetProviderBase):\n \"\"\"A class to manage a dataset and its related metrics.\"\"\"\n\n def __init__(\n self,\n name: str,\n source: DataSource,\n output_features: Mapping[str, Feature],\n preprocessors: Optional[Sequence[Callable[..., tf.data.Dataset]]] = None,\n postprocess_fn: Optional[Callable[..., Any]] = None,\n metric_fns: Optional[Sequence[MetricFnCallable]] = None,\n shuffle_buffer_size: Optional[int] = SHUFFLE_BUFFER_SIZE):\n \"\"\"Task constructor.\n\n Args:\n name: a unique name for the Task.\n source: a `DataSource` that provides a raw `tf.data.Dataset`.\n output_features: dict(str, Feature), output features of the Task to be\n passed to the model. After preprocessing, examples will be validated to\n ensure they include features that match this specification. Note that\n additional features may be included (e.g., for evaluation), but they\n will not be passed to the model.\n preprocessors: list(callable), an optional list of functions that receive\n a tf.data.Dataset and return a tf.data.Dataset. These will be executed\n sequentially and the final dataset must include features matching\n `output_features`.\n postprocess_fn: callable, an optional function that receives decoded model\n outputs and converts them to a form that is ready for evaluation using\n the metric functions in `metric_fns`.\n metric_fns: list(callable), an optional list of metric functions with the\n signature `metric_fn(targets, predictions)` to use during evaluation. If\n undefined or empty, no evaluation will occur on the task.\n shuffle_buffer_size: an optional integer to set the shuffle buffer size.\n If None, shuffling will be disallowed.\n \"\"\"\n if not _VALID_TASK_NAME_REGEX.match(name):\n raise ValueError(\n \"Task name '%s' contains invalid characters. Must match regex: %s\" % (\n name, _VALID_TASK_NAME_REGEX.pattern))\n\n metric_fns = metric_fns or []\n self._predict_metric_fns = []\n self._score_metric_fns = []\n for metric_fn in metric_fns:\n pos_args = tuple(\n key for key, param in inspect.signature(metric_fn).parameters.items()\n if param.default == inspect.Parameter.empty\n )\n if pos_args == (\"targets\", \"scores\"):\n self._score_metric_fns.append(metric_fn)\n elif pos_args == (\"targets\", \"predictions\"):\n self._predict_metric_fns.append(metric_fn)\n else:\n raise ValueError(\n \"Metric functions must have positional arguments matching either \"\n \"('targets', 'predictions') or ('targets', 'scores'). \"\n f\"Got: {pos_args}\")\n\n self._name = name\n self._source = source\n\n # Find optional CacheDatasetPlaceholder.\n preprocessors = tuple(preprocessors or [])\n cache_step_idxs = [\n i for i, p in enumerate(preprocessors)\n if isinstance(p, CacheDatasetPlaceholder)\n ]\n if len(cache_step_idxs) > 1:\n raise ValueError(\n \"`CacheDatasetPlaceholder` can appear at most once in the \"\n f\"preprocessing pipeline. Found {len(cache_step_idxs)} in '{name}'.\")\n cache_step_idx = cache_step_idxs[0] if cache_step_idxs else None\n if cache_step_idx is not None:\n if not source.caching_permitted:\n raise ValueError(\n f\"Caching was requested for '{name}', but the underlying data \"\n \"source prohibits caching. Please remove `CacheDatasetPlaceholder` \"\n \"and try again.\")\n for prep in preprocessors[:cache_step_idx]:\n prep_args = inspect.signature(prep).parameters.keys()\n if \"sequence_length\" in prep_args:\n raise ValueError(\n f\"'{_get_name(prep)}' has a `sequence_length` argument but occurs \"\n f\"before `CacheDatasetPlaceholder` in '{name}'. This is not \"\n \"allowed since the sequence length is specified at run time.\")\n if \"seed\" in prep_args or \"seeds\" in prep_args:\n logging.warning(\n \"'%s' has a `seed(s)` argument but occurs before \"\n \"`CacheDatasetPlaceholder` in '%s'. This is not recommended \"\n \"since the same samples will be used each epoch when reading \"\n \"from the cache.\", _get_name(prep), name)\n self._cache_step_idx = cache_step_idx\n self._preprocessors = preprocessors\n\n self._metric_fns = tuple(metric_fns)\n self._postprocess_fn = postprocess_fn\n\n self._cache_dir = None\n self._stats = {}\n self._shuffle_buffer_size = shuffle_buffer_size\n\n self._output_features = collections.OrderedDict(\n sorted(list(output_features.items()))\n )\n\n @property\n def name(self) -> str:\n return self._name\n\n @property\n def metric_fns(self) -> Sequence[MetricFnCallable]:\n \"\"\"List of all metric functions.\"\"\"\n return self._predict_metric_fns + self._score_metric_fns\n\n @property\n def score_metric_fns(self) -> Sequence[MetricFnCallable]:\n \"\"\"List of metric functions that use log likelihood scores.\"\"\"\n return self._score_metric_fns\n\n @property\n def predict_metric_fns(self) -> Sequence[MetricFnCallable]:\n \"\"\"List of metric functions that use model predictions.\"\"\"\n return self._predict_metric_fns\n\n @property\n def output_features(self) -> Mapping[str, Feature]:\n return self._output_features\n\n @property\n def splits(self) -> Sequence[str]:\n s = self.source.splits\n if not s:\n raise ValueError(f\"Task {self.name} has no splits\")\n return s\n\n @property\n def source(self) -> DataSource:\n return self._source\n\n @property\n def preprocessors(self) -> Sequence[Callable[..., tf.data.Dataset]]:\n return self._preprocessors\n\n @property\n def postprocessor(self) -> Callable[..., Any]:\n return self._postprocess_fn\n\n def num_input_examples(self, split: str) -> Optional[int]:\n return self.source.num_input_examples(split)\n\n def _preprocess_dataset(\n self,\n dataset: tf.data.Dataset,\n preprocessors: Sequence[Callable[..., tf.data.Dataset]],\n sequence_length: Optional[Mapping[str, int]] = None) -> tf.data.Dataset:\n \"\"\"Sequentially applies preprocessors.\"\"\"\n for prep_fn in preprocessors:\n # prep_fn must not rely on variable length keyword args such as **kwargs.\n fn_args = set(inspect.signature(prep_fn).parameters.keys())\n kwargs = {}\n if \"sequence_length\" in fn_args:\n kwargs[\"sequence_length\"] = sequence_length\n if \"output_features\" in fn_args:\n kwargs[\"output_features\"] = self.output_features\n dataset = prep_fn(dataset, **kwargs)\n return dataset\n\n def _validate_preprocessing(\n self, dataset: tf.data.Dataset\n ) -> tf.data.Dataset:\n \"\"\"Validates preprocessed dataset, raising Exceptions if needed.\n\n Args:\n dataset: a tf.data.Dataset to validate.\n\n Returns:\n a validated tf.data.Dataset.\n \"\"\"\n actual_specs = dataset.element_spec\n for feat, feat_spec in self.output_features.items():\n if feat not in actual_specs:\n if feat_spec.required:\n raise ValueError(\n \"Task dataset is missing expected output feature after \"\n f\"preprocessing: {feat}\")\n else:\n # It's ok that this feature does not exist.\n continue\n actual_spec = actual_specs[feat]\n if feat_spec.dtype != actual_spec.dtype:\n raise ValueError(\n f\"Task dataset has incorrect type for feature '{feat}' after \"\n f\"preprocessing: Got {actual_spec.dtype.name}, expected \"\n f\"{feat_spec.dtype.name}\")\n if feat_spec.rank != actual_spec.shape.rank:\n raise ValueError(\n f\"Task dataset has incorrect rank for feature '{feat}' after \"\n f\"preprocessing: Got {actual_spec.shape.rank}, expected \"\n f\"{feat_spec.rank}\")\n\n return dataset\n\n def _trim_output_features(\n self,\n dataset: tf.data.Dataset,\n sequence_length: Optional[Mapping[str, int]]\n ) -> tf.data.Dataset:\n \"\"\"Trim output features to sequence length.\"\"\"\n def _trim(k: str, v: tf.Tensor) -> tf.Tensor:\n if (k not in self.output_features or not sequence_length or\n k not in sequence_length or sequence_length[k] is None):\n return v\n return v[:sequence_length[k]]\n\n return dataset.map(\n lambda ex: {k: _trim(k, v) for k, v in ex.items()},\n num_parallel_calls=tf.data.experimental.AUTOTUNE)\n\n def preprocess_precache(\n self,\n dataset: tf.data.Dataset,\n seed: Optional[int] = None\n ) -> tf.data.Dataset:\n \"\"\"Runs preprocessing steps before the optional CacheDatasetPlaceholder.\"\"\"\n if not self.supports_caching:\n return dataset\n\n with utils.map_seed_manager(seed):\n return self._preprocess_dataset(\n dataset,\n self._preprocessors[:self._cache_step_idx],\n )\n\n def preprocess_postcache(\n self,\n dataset: tf.data.Dataset,\n sequence_length: Optional[Mapping[str, int]],\n seed: Optional[int] = None\n ) -> tf.data.Dataset:\n \"\"\"Runs preprocessing steps after the optional CacheDatasetPlaceholder.\n\n Args:\n dataset: a tf.data.Dataset\n sequence_length: dict mapping feature key to int length for that feature.\n If None, the features will not be truncated.\n seed: an optional random seed for deterministic preprocessing.\n\n Returns:\n a tf.data.Dataset\n \"\"\"\n start_idx = 0\n if self.supports_caching:\n # Skip a sufficient number of seeds to avoid duplicating any from\n # pre-cache preprocessing.\n seed = None if seed is None else seed + 42 * self._cache_step_idx\n start_idx = self._cache_step_idx + 1\n with utils.map_seed_manager(seed):\n dataset = self._preprocess_dataset(\n dataset,\n self._preprocessors[start_idx:],\n sequence_length=sequence_length,\n )\n return dataset\n\n @property\n def cache_dir(self) -> Optional[str]:\n \"\"\"Returns the cache directory (or None), initializing if needed.\"\"\"\n if not self._cache_dir:\n # See if cached data exists in any of the cache directories.\n potential_cache_dirs = [\n os.path.join(d, utils.get_task_dir_from_name(self.name))\n for d in utils.get_global_cache_dirs()\n ]\n for cache_dir in potential_cache_dirs:\n try:\n if tf.io.gfile.exists(os.path.join(cache_dir, \"COMPLETED\")):\n self._cache_dir = cache_dir\n logging.info(\"'%s' is cached at %s.\", self.name, self.cache_dir)\n break\n except tf.errors.PermissionDeniedError:\n logging.warning(\n \"Permission denied for global cache folder: %s\", cache_dir)\n except tf.errors.FailedPreconditionError as e:\n logging.warning(\n \"Failed precondition for global cache folder: %s with %r\",\n cache_dir, e)\n\n if not self._cache_dir:\n logging.info(\n \"'%s' does not exist in any task cache directories (searched %s).\",\n self.name,\n potential_cache_dirs,\n )\n return self._cache_dir\n\n @property\n def supports_caching(self) -> bool:\n \"\"\"Whether or not this task supports offline caching.\"\"\"\n return self._cache_step_idx is not None\n\n @property\n def requires_caching(self) -> bool:\n \"\"\"Whether or not this task requires offline caching.\"\"\"\n return (self._cache_step_idx is not None and\n self.preprocessors[self._cache_step_idx].required) # pytype: disable=attribute-error # bind-properties\n\n def assert_cached(self) -> None:\n \"\"\"Raises an assertion error if cached dataset does not exist.\"\"\"\n assert self.cache_dir, (\n f\"'{self.name}' does not exist in any of the task cache directories.\")\n\n def get_cached_stats(self,\n split: str = tfds.Split.TRAIN\n ) -> Mapping[str, Union[int, float]]:\n \"\"\"Returns basic statistics for cached dataset.\"\"\"\n self.assert_cached()\n if split not in self._stats:\n stats_path = utils.get_cached_stats_path(self.cache_dir, split)\n if not tf.io.gfile.exists(stats_path):\n raise ValueError(\n \"Stats do not exist for '%s' split: %s\" % (self.name, split))\n with tf.io.gfile.GFile(stats_path) as f:\n self._stats[split] = json.load(f)\n return self._stats[split]\n\n def get_dataset(\n self,\n sequence_length: Optional[Mapping[str, int]],\n split: str = tfds.Split.TRAIN,\n use_cached: bool = False,\n shuffle: bool = True,\n shuffle_buffer_size: Optional[int] = None,\n seed: Optional[int] = None,\n shard_info: Optional[ShardInfo] = None,\n num_epochs: Optional[int] = 1\n ) -> tf.data.Dataset:\n \"\"\"Returns a tf.data.Dataset from cache or generated on the fly.\n\n Args:\n sequence_length: dict mapping feature key to maximum int length for that\n feature. If longer after preprocessing, the feature will be truncated.\n May be set to None to avoid truncation.\n split: string, the split to return.\n use_cached: bool, whether to use the cached dataset instead of processing\n it on the fly. Defaults to False.\n shuffle: bool, whether to shuffle the dataset. Only used when generating\n on the fly (use_cached=False).\n shuffle_buffer_size: an integer or None to use task-specific buffer size.\n seed: tf.int64 scalar tf.Tensor (or None) for shuffling tf.data.\n shard_info: optional specification for loading a shard of the split. If\n the Task's DataSource contains at least the number of shards in the\n specification, it will be passed the shard info to avoid loading the\n full source dataset. Otherwise, the full source dataset will be loaded\n and sharded at the individual examples.\n num_epochs: the number of times to iterate through the dataset, or `None`\n to repeat indefinitely. Note that the repeat occurs in the pipeline\n after offline caching, but before applying potentially stochastic\n post-cache preprocessors and is therefore typically preferred to calling\n `repeat()` on the returned dataset. Defaults to `1`.\n\n Returns:\n A tf.data.Dataset.\n \"\"\"\n if use_cached and not self.supports_caching:\n logging.warning(\n \"Task '%s' does not support caching. Switching to on-the-fly \"\n \"preprocessing.\", self.name)\n use_cached = False\n elif self.requires_caching and not use_cached:\n raise ValueError(\n f\"Task '{self.name}' requires caching, but was called with \"\n \"`use_cached=False`.\")\n\n if use_cached:\n file_shuffle_buffer_size = self.preprocessors[\n self._cache_step_idx].file_shuffle_buffer_size # pytype: disable=attribute-error\n source = self._get_cached_source(split, file_shuffle_buffer_size)\n else:\n source = self.source\n\n if source.supports_arbitrary_sharding:\n shard_data_source = True\n elif shard_info:\n # Whether we should shard at source or on the examples from the source.\n shard_data_source = (\n len(self.source.list_shards(split=split)) >= shard_info.num_shards)\n logging.info(\"Sharding at the %s: %d of %d\",\n \"data source\" if shard_data_source else \"examples\",\n shard_info.index, shard_info.num_shards)\n else:\n # No sharding.\n shard_data_source = False\n shard_info = ShardInfo(0, 1)\n\n if shard_data_source:\n ds = source.get_dataset(\n split=split, shuffle=shuffle, seed=seed, shard_info=shard_info)\n else:\n ds = source.get_dataset(split=split, shuffle=shuffle, seed=seed)\n ds = ds.shard(shard_info.num_shards, shard_info.index)\n\n if ((use_cached and\n self.get_cached_stats(split)[\"examples\"] < _MAX_EXAMPLES_TO_MEM_CACHE)\n or (self.num_input_examples(split) and\n self.num_input_examples(split) < _MAX_EXAMPLES_TO_MEM_CACHE)):\n logging.info(\n \"Automatically caching small dataset in memory: '%s:%s'\",\n self.name, split)\n ds = ds.cache()\n\n if not use_cached:\n ds = self.preprocess_precache(ds, seed=seed)\n\n ds = ds.prefetch(tf.data.experimental.AUTOTUNE)\n\n # We repeat before calling any (potentially) stochastic post-cache\n # preprocessing in order to take new samples each epoch.\n ds = ds.repeat(num_epochs)\n\n # Post cache processing.\n ds = self.preprocess_postcache(\n ds, sequence_length=sequence_length, seed=seed)\n ds = self._validate_preprocessing(ds)\n ds = self._trim_output_features(ds, sequence_length=sequence_length)\n\n if shuffle:\n if self._shuffle_buffer_size is None:\n raise ValueError(\n f\"Shuffling is disallowed for Task '{self.name}' since its \"\n \"`shuffle_buffer_size` was set to `None` on construction.\")\n shuffle_buffer_size = shuffle_buffer_size or self._shuffle_buffer_size\n # Shuffle before mixing since preprocessor can output multiple\n # (correlated) examples per input.\n ds = ds.shuffle(shuffle_buffer_size, seed=seed)\n\n return ds.prefetch(tf.data.experimental.AUTOTUNE)\n\n def _get_cached_source(\n self,\n split,\n file_shuffle_buffer_size: Optional[int] = None) -> _CachedDataSource:\n \"\"\"Returns a DataSource to read cached files for split.\"\"\"\n self.assert_cached()\n return _CachedDataSource(\n self.cache_dir,\n split,\n file_shuffle_buffer_size=file_shuffle_buffer_size)\n\n def postprocess_fn(self, decoded_model_output: Any,\n **postprocess_kwargs) -> Any:\n \"\"\"Returns the model output after applying the postprocess function.\"\"\"\n if self._postprocess_fn:\n return self._postprocess_fn(decoded_model_output, **postprocess_kwargs)\n return decoded_model_output\n\n\nclass TaskRegistry(DatasetProviderRegistry):\n \"\"\"Registry of Tasks.\"\"\"\n _REGISTRY = {}\n _PROVIDER_TYPE = Task\n\n @classmethod\n def add(\n cls,\n name: str,\n source: DataSourceInterface,\n output_features: Mapping[str, Feature],\n preprocessors: Optional[Sequence[Callable[..., tf.data.Dataset]]] = None,\n postprocess_fn: Optional[Callable[..., Any]] = None,\n metric_fns: Optional[Sequence[MetricFnCallable]] = None,\n **kwargs) -> Task:\n \"\"\"See `Task` constructor for docstring.\"\"\"\n return super().add(name, Task, name, source, output_features, preprocessors,\n postprocess_fn, metric_fns, **kwargs)\n\n @classmethod\n def get(cls, name) -> Task:\n return super().get(name)\n\n\n# ================================ Mixtures ====================================\nSampleFn = Callable[[Sequence[tf.data.Dataset], Sequence[float], Optional[int]],\n tf.data.Dataset]\n\n\nclass Mixture(DatasetProviderBase):\n \"\"\"Class for mixing multiple tasks.\"\"\"\n\n def __init__(self,\n name: str,\n tasks: Union[Sequence[str],\n Sequence[Tuple[str, Union[int, float,\n Callable[[Task],\n float]]]]],\n default_rate: Optional[Union[float, Callable[[Task],\n float]]] = None,\n sample_fn: SampleFn = tf.data.experimental.sample_from_datasets):\n \"\"\"Mixture constructor.\n\n A mixture specifies a set of tasks with associated mixing rates.\n\n Mixing happens on preprocessed tokenized examples.\n\n The mixing rates represent relative numbers of examples to use from their\n associated tasks. Setting the mixing rates to be equal to the numbers of\n examples in the tasks will result in each task going through an epoch in\n about the same amount of time - i.e. all examples are sampled equally across\n all tasks.\n\n Rates can be expressed either as absolute numbers or as functions that\n receive the Task as an argument.\n\n Args:\n name: string, a unique name for the Mixture.\n tasks: a list where each element is either a string (task name) or a pair\n whose first element is the task name and whose second element is either\n a float (rate) or a function from Task to float.\n default_rate: a float or a function from Task to float. This specifies the\n default rate if rates are not provided in the `tasks` argument.\n sample_fn: SampleFn callable that implements sampling logic to interleave\n multiple datasets into a single dataset.\n \"\"\"\n self._task_to_rate = {}\n self._tasks = []\n self._sub_mixtures = []\n self._name = name\n self._sample_fn = sample_fn\n for t in tasks:\n if isinstance(t, str):\n task_name = t\n rate = default_rate\n if default_rate is None:\n raise ValueError(\"need a rate for each task\")\n else:\n task_name, rate = t\n\n if task_name in TaskRegistry.names():\n self._tasks.append(TaskRegistry.get(task_name))\n self._task_to_rate[task_name] = rate\n else:\n self._sub_mixtures.append(MixtureRegistry.get(task_name)) # pytype:disable=name-error\n self._task_to_rate[task_name] = rate\n\n if len(set(tuple(t.output_features) for t in self.tasks)) != 1:\n raise ValueError(\n \"All Tasks in a Mixture must have the same output features.\"\n )\n\n @property\n def name(self) -> str:\n return self._name\n\n @property\n def tasks(self) -> Sequence[Task]:\n sub_tasks = (mix.tasks for mix in self._sub_mixtures)\n return list(sorted(set(sum(sub_tasks, self._tasks)), key=lambda t: t.name))\n\n @property\n def total_rate(self) -> float:\n return sum(float(rate(TaskRegistry.get(name)) if callable(rate) else rate)\n for name, rate in self._task_to_rate.items())\n\n def get_rate(self, task: Task) -> float:\n \"\"\"Computes the mixing rate for the given task.\"\"\"\n value = 0.0\n\n for mix in self._sub_mixtures:\n if task in mix.tasks:\n rate = self._task_to_rate[mix.name]\n value += rate * mix.get_rate(task) / mix.total_rate\n\n if task.name in self._task_to_rate:\n rate = self._task_to_rate[task.name]\n value += float(rate(task) if callable(rate) else rate)\n\n return value\n\n def num_input_examples(self, split: str) -> int:\n return sum(t.num_input_examples(split) for t in self.tasks)\n\n @property\n def splits(self) -> Sequence[str]:\n splits = set()\n for task in self.tasks:\n splits.update(task.splits)\n return tuple(splits)\n\n @property\n def output_features(self) -> Mapping[str, Feature]:\n # We require all tasks to have the same output_features in __init__\n # so we can just get the output_features for the 0th task\n return self.tasks[0].output_features\n\n def _check_compatible_features(self) -> None:\n \"\"\"Throw Exception if features across tasks have different vocabs or dtypes.\"\"\"\n for name, feature in self.tasks[0].output_features.items():\n for task in self.tasks[1:]:\n if task.output_features[name].vocabulary != feature.vocabulary:\n raise ValueError(\n \"Features across tasks in a mixture must use the same vocabulary.\"\n )\n if task.output_features[name].dtype != feature.dtype:\n raise ValueError(\n \"Features across tasks in a mixture must use the same dtype.\"\n )\n\n def get_dataset(\n self,\n sequence_length: Optional[Mapping[str, int]],\n split: str = tfds.Split.TRAIN,\n use_cached: bool = False,\n shuffle: bool = True,\n seed: Optional[int] = None,\n shard_info: Optional[ShardInfo] = None,\n num_epochs: Optional[int] = None,\n copy_pretokenized: bool = False,\n compute_stats_empirically: bool = False,\n passthrough_features: Optional[Sequence[str]] = None\n ) -> tf.data.Dataset:\n \"\"\"Returns the dataset of mixed tasks using the object-specified rates.\n\n Args:\n sequence_length: dict mapping feature key to maximum int length for that\n feature. If longer after preprocessing, the feature will be truncated.\n May be set to None to avoid truncation.\n split: string, the split to return for all tasks.\n use_cached: bool, whether to use the cached dataset instead of processing\n it on the fly. Defaults to False.\n shuffle: bool, whether to shuffle the dataset. Only used when generating\n on the fly (use_cached=False).\n seed: tf.int64 scalar tf.Tensor (or None) for shuffling tf.data.\n shard_info: optional specification for loading a shard of the split.\n num_epochs: the number of times to iterate through the dataset, or `None`\n to repeat indefinitely. Note that the repeat occurs in the pipeline\n after offline caching, but before applying potentially stochastic\n post-cache preprocessors and is therefore typically preferred to calling\n `repeat()` on the returned dataset. Defaults to `None`.\n copy_pretokenized: bool, whether to pass through copies of pretokenized\n features a \"_pretokenized\" suffix added to the key.\n compute_stats_empirically: a boolean - does not work on TPU\n passthrough_features: a list of additional features that will be kept\n after the feature filtering. If set to be None, then only the\n output_features defined for the mixture will be kept.\n \"\"\"\n self._check_compatible_features()\n tasks = []\n for task in self.tasks:\n if split not in task.splits:\n logging.warning(\n \"Task %s has no '%s' split, skipping.\", task.name, split\n )\n continue\n tasks.append(task)\n if not tasks:\n raise ValueError(\"No datasets have a '{}' split\".format(split))\n\n output_feature_keys = set(self.output_features.keys())\n if copy_pretokenized:\n output_feature_keys.update(\n {f + \"_pretokenized\" for f in output_feature_keys})\n\n if passthrough_features:\n output_feature_keys.update(passthrough_features)\n\n def filter_features(ex):\n return {k: v for k, v in ex.items() if k in output_feature_keys}\n\n datasets = [\n task.get_dataset( # pylint:disable=g-complex-comprehension\n sequence_length,\n split=split,\n use_cached=use_cached,\n shuffle=shuffle,\n seed=seed,\n shard_info=shard_info,\n num_epochs=num_epochs)\n .map(filter_features, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n for task in tasks]\n rates = [self.get_rate(task) for task in tasks]\n # Sample from the dataset with the rates rates\n if seed is not None:\n sample_seed = seed\n elif shuffle:\n sample_seed = None\n else:\n sample_seed = 42\n dataset = self._sample_fn(datasets, rates, sample_seed)\n if (split == \"train\" and use_cached and\n all(t.supports_caching for t in tasks)):\n _log_mixing_proportions(tasks, datasets, rates, dataset, sequence_length,\n compute_stats_empirically)\n return dataset\n\n\ndef _log_padding_fractions(dataset, sequence_length, num_examples=100):\n \"\"\"Empirically compute the fraction of padding - log the results.\n\n Args:\n dataset: a tf.data.Dataset\n sequence_length: dict from string to int (packed lengths)\n num_examples: an integer\n \"\"\"\n logging.info(\"computing padding fractions\")\n keys = sequence_length.keys()\n padding_frac = {k: 0 for k in keys}\n for ex in tfds.as_numpy(dataset.take(num_examples)):\n for k in keys:\n padding_frac[k] += 1 - (sequence_length[k] / len(ex[k]))\n for k in keys:\n logging.info(\"%s padding fraction = %g\", k, padding_frac[k])\n\n\ndef _log_mixing_proportions(\n tasks, datasets, rates, mixed_dataset,\n sequence_length, compute_stats_empirically):\n \"\"\"Log information about the mixing proportions.\n\n Called from Mixture.get_dataset.\n\n Args:\n tasks: a list of Task\n datasets: a list of tf.data.Dataset\n rates: a list of floats\n mixed_dataset: a tf.data.Dataset\n sequence_length: dict from string to int (packed lengths)\n compute_stats_empirically: a boolean - does not work on TPU\n \"\"\"\n def _normalize(l):\n denom = sum(l)\n if not denom:\n return l\n return [x / denom for x in l]\n # compute some stats about the mixture\n examples_fraction = _normalize(rates)\n if compute_stats_empirically:\n stats_examples = 100\n mean_inputs_length = []\n mean_targets_length = []\n for dataset in datasets:\n inputs_sum = 0\n targets_sum = 0\n for ex in tfds.as_numpy(dataset.take(stats_examples)):\n # Some tasks, like LMs, don't have inputs.\n if \"inputs\" in ex:\n inputs_sum += ex[\"inputs\"].size\n targets_sum += ex[\"targets\"].size\n mean_inputs_length.append(inputs_sum / float(stats_examples))\n mean_targets_length.append(targets_sum / float(stats_examples))\n else:\n def _estimated_mean_length(task, key):\n if key not in sequence_length:\n return 0\n if (task.supports_caching and\n task._cache_step_idx < len(task._preprocessors) - 1): # pylint:disable=protected-access\n # There is processing after caching, so we can't rely on the stats.\n return sequence_length[key]\n # Some tasks, like LMs, don't have inputs.\n if key + \"_tokens\" in task.get_cached_stats(\"train\"):\n return min(sequence_length[key],\n (task.get_cached_stats(\"train\")[key + \"_tokens\"] /\n task.get_cached_stats(\"train\")[\"examples\"]))\n else:\n return 0\n\n mean_inputs_length = [_estimated_mean_length(task, \"inputs\")\n for task in tasks]\n mean_targets_length = [_estimated_mean_length(task, \"targets\")\n for task in tasks]\n inputs_fraction = _normalize(\n [l * r for l, r in zip(mean_inputs_length, rates)])\n targets_fraction = _normalize(\n [l * r for l, r in zip(mean_targets_length, rates)])\n logging.info(\"%12s %12s %12s %12s %12s %12s %s\",\n \"rate\", \"ex.frac.\", \"inp.frac.\", \"tgt.frac.\",\n \"inp.len.\", \"tgt.len\", \"task\")\n for i in range(len(rates)):\n logging.info(\"%12g %12g %12g %12g %12g %12g %s\",\n rates[i], examples_fraction[i],\n inputs_fraction[i], targets_fraction[i],\n mean_inputs_length[i], mean_targets_length[i],\n tasks[i].name)\n if compute_stats_empirically:\n _log_padding_fractions(mixed_dataset, sequence_length)\n\n\nclass MixtureRegistry(DatasetProviderRegistry):\n \"\"\"Registry of Mixtures.\"\"\"\n _REGISTRY = {}\n _PROVIDER_TYPE = Mixture\n\n @classmethod\n def add(cls, name, tasks, default_rate=None, **kwargs) -> Mixture:\n \"\"\"See `Mixture` constructor for docstring.\"\"\"\n return super().add(name, Mixture, name, tasks, default_rate, **kwargs)\n\n @classmethod\n def get(cls, name) -> Mixture:\n return super().get(name)\n\n\ndef get_mixture_or_task(task_or_mixture_name):\n \"\"\"Return the Task or Mixture from the appropriate registry.\"\"\"\n mixtures = MixtureRegistry.names()\n tasks = TaskRegistry.names()\n if task_or_mixture_name in mixtures:\n if task_or_mixture_name in tasks:\n logging.warning(\"%s is both a Task and a Mixture, returning Mixture\",\n task_or_mixture_name)\n return MixtureRegistry.get(task_or_mixture_name)\n if task_or_mixture_name in tasks:\n return TaskRegistry.get(task_or_mixture_name)\n else:\n raise ValueError(\n \"No Task or Mixture found with name '%s'. Available:\\n - %s\" %\n (task_or_mixture_name, \"\\n - \".join(sorted(mixtures) + sorted(tasks))))\n\n\ndef get_subtasks(task_or_mixture):\n \"\"\"Returns all the Tasks in a Mixture as a list or the Task itself.\"\"\"\n if isinstance(task_or_mixture, Task):\n return [task_or_mixture]\n else:\n return task_or_mixture.tasks\n\n\ndef get_dataset(mixture_or_task_name: str,\n task_feature_lengths: Mapping[str, int],\n feature_converter: FeatureConverter,\n dataset_split: str = \"train\",\n use_cached: bool = False,\n shuffle: bool = False,\n num_epochs: Optional[int] = 1,\n shard_info: Optional[ShardInfo] = None,\n verbose: bool = True,\n seed: Optional[int] = None) -> tf.data.Dataset:\n \"\"\"Get processed dataset with the model features.\n\n In order to use options specific to a feature converter, e.g., packing,\n `feature_converter` instance should be instantiated with those options before\n being pased to this function.\n\n Getting sharded datasets is supported. To use this feature, pass in\n `shard_info`, with shard_index and num_shards information. Sharding is done\n before the feature converter stage. Therefore, if packing is used it will be\n done on the sharded dataset.\n\n Args:\n mixture_or_task_name: mixture or task name for the Task API.\n task_feature_lengths: dict mapping task feature key to its sequence length.\n This specifies the sequence length of the dataset from the Task API.\n feature_converter: a feature converter object to use to convert the task\n features to model features. Must be a subclass of FeatureConverter.\n dataset_split: the split to use.\n use_cached: whether to use the cached dataset instead of processing it on\n the fly.\n shuffle: whether to shuffle the dataset.\n num_epochs: the number of times to iterate through the dataset, or `None` to\n repeat indefinitely. Note that the repeat occurs in the pipeline after\n offline caching, but before applying potentially stochastic post-cache\n preprocessors and is therefore typically preferred to calling `repeat()`\n on the returned dataset. Defaults to `1`.\n shard_info: number of shards and shard index information.\n verbose: if true, log the feature shapes.\n seed: a random seed to for shuffling tf.data.\n\n Returns:\n ds: the processed dataset.\n \"\"\"\n if not isinstance(feature_converter, FeatureConverter):\n raise TypeError(\n \"feature_converter should be an instance of FeatureConverter.\")\n\n mixture_or_task = get_mixture_or_task(mixture_or_task_name)\n\n ds = mixture_or_task.get_dataset(\n task_feature_lengths,\n split=dataset_split,\n use_cached=use_cached,\n shuffle=shuffle,\n seed=seed,\n shard_info=shard_info,\n num_epochs=num_epochs)\n\n ds = feature_converter(ds, task_feature_lengths=task_feature_lengths)\n\n if verbose:\n logging.info(\n \"The output dataset from seqio.get_dataset has the following features\")\n for feature_name, tensor_spec in ds.element_spec.items():\n logging.info(\"feature: %s \\t shape: %s \\t dtype: %s\", feature_name,\n tensor_spec.shape.as_list(), tensor_spec.dtype.name)\n return ds\n" ]
[ [ "tensorflow.compat.v2.io.gfile.glob", "numpy.array", "tensorflow.compat.v2.data.TFRecordDataset", "tensorflow.compat.v2.data.TextLineDataset", "tensorflow.compat.v2.cast", "tensorflow.compat.v2.io.parse_single_example", "tensorflow.compat.v2.io.gfile.exists", "tensorflow.compat.v2.io.FixedLenSequenceFeature", "tensorflow.compat.v2.io.FixedLenFeature", "tensorflow.compat.v2.io.gfile.GFile" ] ]
BZ-2453/DeepBass
[ "a0f4ab8613994a8cfcf732fa2b2b2840313264d4" ]
[ "src/model/nsynth/utils.py" ]
[ "# Copyright 2017 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Utility functions for NSynth.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport importlib\nimport os\n\n# internal imports\nimport librosa\nimport numpy as np\nfrom six.moves import range # pylint: disable=redefined-builtin\nimport tensorflow as tf\n\nslim = tf.contrib.slim\n\n\ndef shell_path(path):\n return os.path.abspath(os.path.expanduser(os.path.expandvars(path)))\n\n\n#===============================================================================\n# WaveNet Functions\n#===============================================================================\ndef get_module(module_path):\n \"\"\"Imports module from NSynth directory.\n\n Args:\n module_path: Path to module separated by dots.\n -> \"configs.linear\"\n\n Returns:\n module: Imported module.\n \"\"\"\n \n module = importlib.import_module(module_path)\n return module\n\n\ndef load_audio(path, sample_length=64000, sr=16000):\n \"\"\"Loading of a wave file.\n\n Args:\n path: Location of a wave file to load.\n sample_length: The truncated total length of the final wave file.\n sr: Samples per a second.\n\n Returns:\n out: The audio in samples from -1.0 to 1.0\n \"\"\"\n audio, _ = librosa.load(path, sr=sr)\n audio = audio[:sample_length]\n return audio\n\n\ndef mu_law(x, mu=255, int8=False):\n \"\"\"A TF implementation of Mu-Law encoding.\n\n Args:\n x: The audio samples to encode.\n mu: The Mu to use in our Mu-Law.\n int8: Use int8 encoding.\n\n Returns:\n out: The Mu-Law encoded int8 data.\n \"\"\"\n \n x = tf.clip_by_value(x, -1, 0.999)\n out = tf.sign(x) * tf.log(1 + mu * tf.abs(x)) / np.log(1 + mu)\n out = tf.floor(out * 128)\n if int8:\n out = tf.cast(out, tf.int8)\n return out\n\n\ndef inv_mu_law(x, mu=255):\n \"\"\"A TF implementation of inverse Mu-Law.\n\n Args:\n x: The Mu-Law samples to decode.\n mu: The Mu we used to encode these samples.\n\n Returns:\n out: The decoded data.\n \"\"\"\n x = tf.cast(x, tf.float32)\n out = (x + 0.5) * 2. / (mu + 1)\n out = tf.sign(out) / mu * ((1 + mu)**tf.abs(out) - 1)\n out = tf.where(tf.equal(x, 0), x, out)\n return out\n\n\ndef inv_mu_law_numpy(x, mu=255.0):\n \"\"\"A numpy implementation of inverse Mu-Law.\n\n Args:\n x: The Mu-Law samples to decode.\n mu: The Mu we used to encode these samples.\n\n Returns:\n out: The decoded data.\n \"\"\"\n x = np.array(x).astype(np.float32)\n out = (x + 0.5) * 2. / (mu + 1)\n out = np.sign(out) / mu * ((1 + mu)**np.abs(out) - 1)\n out = np.where(np.equal(x, 0), x, out)\n return out\n\n\ndef trim_for_encoding(wav_data, sample_length, hop_length=512):\n \"\"\"Make sure audio is a even multiple of hop_size.\n\n Args:\n wav_data: 1-D or 2-D array of floats.\n sample_length: Max length of audio data.\n hop_length: Pooling size of WaveNet autoencoder.\n\n Returns:\n wav_data: Trimmed array.\n sample_length: Length of trimmed array.\n \"\"\"\n if wav_data.ndim == 1:\n # Max sample length is the data length\n if sample_length > wav_data.size:\n sample_length = wav_data.size\n # Multiple of hop_length\n sample_length = (sample_length // hop_length) * hop_length\n # Trim\n wav_data = wav_data[:sample_length]\n # Assume all examples are the same length\n elif wav_data.ndim == 2:\n # Max sample length is the data length\n if sample_length > wav_data[0].size:\n sample_length = wav_data[0].size\n # Multiple of hop_length\n sample_length = (sample_length // hop_length) * hop_length\n # Trim\n wav_data = wav_data[:, :sample_length]\n\n return wav_data, sample_length\n\n\n#===============================================================================\n# Baseline Functions\n#===============================================================================\n#---------------------------------------------------\n# Pre/Post-processing\n#---------------------------------------------------\ndef get_optimizer(learning_rate, hparams):\n \"\"\"Get the tf.train.Optimizer for this optimizer string.\n\n Args:\n learning_rate: The learning_rate tensor.\n hparams: TF.HParams object with the optimizer and momentum values.\n\n Returns:\n optimizer: The tf.train.Optimizer based on the optimizer string.\n \"\"\"\n return {\n \"rmsprop\":\n tf.RMSPropOptimizer(\n learning_rate,\n decay=0.95,\n momentum=hparams.momentum,\n epsilon=1e-4),\n \"adam\":\n tf.AdamOptimizer(learning_rate, beta1=0.9, beta2=0.999, epsilon=1e-8),\n \"adagrad\":\n tf.AdagradOptimizer(learning_rate, initial_accumulator_value=1.0),\n \"mom\":\n tf.MomentumOptimizer(learning_rate, momentum=hparams.momentum),\n \"sgd\":\n tf.GradientDescentOptimizer(learning_rate)\n }.get(hparams.optimizer)\n\n\ndef specgram(audio,\n n_fft=512,\n hop_length=None,\n mask=True,\n log_mag=True,\n re_im=False,\n dphase=True,\n mag_only=False):\n \"\"\"Spectrogram using librosa.\n\n Args:\n audio: 1-D array of float32 sound samples.\n n_fft: Size of the FFT.\n hop_length: Stride of FFT. Defaults to n_fft/2.\n mask: Mask the phase derivative by the magnitude.\n log_mag: Use the logamplitude.\n re_im: Output Real and Imag. instead of logMag and dPhase.\n dphase: Use derivative of phase instead of phase.\n mag_only: Don't return phase.\n\n Returns:\n specgram: [n_fft/2 + 1, audio.size / hop_length, 2]. The first channel is\n the logamplitude and the second channel is the derivative of phase.\n \"\"\"\n if not hop_length:\n hop_length = int(n_fft / 2.)\n\n fft_config = dict(\n n_fft=n_fft, win_length=n_fft, hop_length=hop_length, center=True)\n\n spec = librosa.stft(audio, **fft_config)\n\n if re_im:\n re = spec.real[:, :, np.newaxis]\n im = spec.imag[:, :, np.newaxis]\n spec_real = np.concatenate((re, im), axis=2)\n\n else:\n mag, phase = librosa.core.magphase(spec)\n phase_angle = np.angle(phase)\n\n # Magnitudes, scaled 0-1\n if log_mag:\n mag = (librosa.power_to_db(\n mag**2, amin=1e-13, top_db=120., ref=np.max) / 120.) + 1\n else:\n mag /= mag.max()\n\n if dphase:\n # Derivative of phase\n phase_unwrapped = np.unwrap(phase_angle)\n p = phase_unwrapped[:, 1:] - phase_unwrapped[:, :-1]\n p = np.concatenate([phase_unwrapped[:, 0:1], p], axis=1) / np.pi\n else:\n # Normal phase\n p = phase_angle / np.pi\n # Mask the phase\n if log_mag and mask:\n p = mag * p\n # Return Mag and Phase\n p = p.astype(np.float32)[:, :, np.newaxis]\n mag = mag.astype(np.float32)[:, :, np.newaxis]\n if mag_only:\n spec_real = mag[:, :, np.newaxis]\n else:\n spec_real = np.concatenate((mag, p), axis=2)\n return spec_real\n\n\ndef inv_magphase(mag, phase_angle):\n phase = np.cos(phase_angle) + 1.j * np.sin(phase_angle)\n return mag * phase\n\n\ndef griffin_lim(mag, phase_angle, n_fft, hop, num_iters):\n \"\"\"Iterative algorithm for phase retrival from a magnitude spectrogram.\n\n Args:\n mag: Magnitude spectrogram.\n phase_angle: Initial condition for phase.\n n_fft: Size of the FFT.\n hop: Stride of FFT. Defaults to n_fft/2.\n num_iters: Griffin-Lim iterations to perform.\n\n Returns:\n audio: 1-D array of float32 sound samples.\n \"\"\"\n fft_config = dict(n_fft=n_fft, win_length=n_fft, hop_length=hop, center=True)\n ifft_config = dict(win_length=n_fft, hop_length=hop, center=True)\n complex_specgram = inv_magphase(mag, phase_angle)\n for i in range(num_iters):\n audio = librosa.istft(complex_specgram, **ifft_config)\n if i != num_iters - 1:\n complex_specgram = librosa.stft(audio, **fft_config)\n _, phase = librosa.magphase(complex_specgram)\n phase_angle = np.angle(phase)\n complex_specgram = inv_magphase(mag, phase_angle)\n return audio\n\n\ndef ispecgram(spec,\n n_fft=512,\n hop_length=None,\n mask=True,\n log_mag=True,\n re_im=False,\n dphase=True,\n mag_only=True,\n num_iters=1000):\n \"\"\"Inverse Spectrogram using librosa.\n\n Args:\n spec: 3-D specgram array [freqs, time, (mag_db, dphase)].\n n_fft: Size of the FFT.\n hop_length: Stride of FFT. Defaults to n_fft/2.\n mask: Reverse the mask of the phase derivative by the magnitude.\n log_mag: Use the logamplitude.\n re_im: Output Real and Imag. instead of logMag and dPhase.\n dphase: Use derivative of phase instead of phase.\n mag_only: Specgram contains no phase.\n num_iters: Number of griffin-lim iterations for mag_only.\n\n Returns:\n audio: 1-D array of sound samples. Peak normalized to 1.\n \"\"\"\n if not hop_length:\n hop_length = n_fft // 2\n\n ifft_config = dict(win_length=n_fft, hop_length=hop_length, center=True)\n\n if mag_only:\n mag = spec[:, :, 0]\n phase_angle = np.pi * np.random.rand(*mag.shape)\n elif re_im:\n spec_real = spec[:, :, 0] + 1.j * spec[:, :, 1]\n else:\n mag, p = spec[:, :, 0], spec[:, :, 1]\n if mask and log_mag:\n p /= (mag + 1e-13 * np.random.randn(*mag.shape))\n if dphase:\n # Roll up phase\n phase_angle = np.cumsum(p * np.pi, axis=1)\n else:\n phase_angle = p * np.pi\n\n # Magnitudes\n if log_mag:\n mag = (mag - 1.0) * 120.0\n mag = 10**(mag / 20.0)\n phase = np.cos(phase_angle) + 1.j * np.sin(phase_angle)\n spec_real = mag * phase\n\n if mag_only:\n audio = griffin_lim(\n mag, phase_angle, n_fft, hop_length, num_iters=num_iters)\n else:\n audio = librosa.core.istft(spec_real, **ifft_config)\n return np.squeeze(audio / audio.max())\n\n\ndef batch_specgram(audio,\n n_fft=512,\n hop_length=None,\n mask=True,\n log_mag=True,\n re_im=False,\n dphase=True,\n mag_only=False):\n assert len(audio.shape) == 2\n batch_size = audio.shape[0]\n res = []\n for b in range(batch_size):\n res.append(\n specgram(audio[b], n_fft, hop_length, mask, log_mag, re_im, dphase,\n mag_only))\n return np.array(res)\n\n\ndef batch_ispecgram(spec,\n n_fft=512,\n hop_length=None,\n mask=True,\n log_mag=True,\n re_im=False,\n dphase=True,\n mag_only=False,\n num_iters=1000):\n assert len(spec.shape) == 4\n batch_size = spec.shape[0]\n res = []\n for b in range(batch_size):\n res.append(\n ispecgram(spec[b, :, :, :], n_fft, hop_length, mask, log_mag, re_im,\n dphase, mag_only, num_iters))\n return np.array(res)\n\n\ndef tf_specgram(audio,\n n_fft=512,\n hop_length=None,\n mask=True,\n log_mag=True,\n re_im=False,\n dphase=True,\n mag_only=False):\n return tf.py_func(batch_specgram, [\n audio, n_fft, hop_length, mask, log_mag, re_im, dphase, mag_only\n ], tf.float32)\n\n\ndef tf_ispecgram(spec,\n n_fft=512,\n hop_length=None,\n mask=True,\n pad=True,\n log_mag=True,\n re_im=False,\n dphase=True,\n mag_only=False,\n num_iters=1000):\n dims = spec.get_shape().as_list()\n # Add back in nyquist frequency\n x = spec if not pad else tf.concat(\n [spec, tf.zeros([dims[0], 1, dims[2], dims[3]])], 1)\n audio = tf.py_func(batch_ispecgram, [\n x, n_fft, hop_length, mask, log_mag, re_im, dphase, mag_only, num_iters\n ], tf.float32)\n return audio\n\n\n#---------------------------------------------------\n# Summaries\n#---------------------------------------------------\ndef form_image_grid(input_tensor, grid_shape, image_shape, num_channels):\n \"\"\"Arrange a minibatch of images into a grid to form a single image.\n\n Args:\n input_tensor: Tensor. Minibatch of images to format, either 4D\n ([batch size, height, width, num_channels]) or flattened\n ([batch size, height * width * num_channels]).\n grid_shape: Sequence of int. The shape of the image grid,\n formatted as [grid_height, grid_width].\n image_shape: Sequence of int. The shape of a single image,\n formatted as [image_height, image_width].\n num_channels: int. The number of channels in an image.\n\n Returns:\n Tensor representing a single image in which the input images have been\n arranged into a grid.\n\n Raises:\n ValueError: The grid shape and minibatch size don't match, or the image\n shape and number of channels are incompatible with the input tensor.\n \"\"\"\n if grid_shape[0] * grid_shape[1] != int(input_tensor.get_shape()[0]):\n raise ValueError(\"Grid shape incompatible with minibatch size.\")\n if len(input_tensor.get_shape()) == 2:\n num_features = image_shape[0] * image_shape[1] * num_channels\n if int(input_tensor.get_shape()[1]) != num_features:\n raise ValueError(\"Image shape and number of channels incompatible with \"\n \"input tensor.\")\n elif len(input_tensor.get_shape()) == 4:\n if (int(input_tensor.get_shape()[1]) != image_shape[0] or\n int(input_tensor.get_shape()[2]) != image_shape[1] or\n int(input_tensor.get_shape()[3]) != num_channels):\n raise ValueError(\"Image shape and number of channels incompatible with \"\n \"input tensor.\")\n else:\n raise ValueError(\"Unrecognized input tensor format.\")\n height, width = grid_shape[0] * image_shape[0], grid_shape[1] * image_shape[1]\n input_tensor = tf.reshape(input_tensor,\n grid_shape + image_shape + [num_channels])\n input_tensor = tf.transpose(input_tensor, [0, 1, 3, 2, 4])\n input_tensor = tf.reshape(\n input_tensor, [grid_shape[0], width, image_shape[0], num_channels])\n input_tensor = tf.transpose(input_tensor, [0, 2, 1, 3])\n input_tensor = tf.reshape(input_tensor, [1, height, width, num_channels])\n return input_tensor\n\n\ndef specgram_summaries(spec,\n name,\n hparams,\n rows=4,\n columns=4,\n image=True,\n phase=True,\n audio=True):\n \"\"\"Post summaries of a specgram (Image and Audio).\n\n For image summaries, creates a rows x columns composite image from the batch.\n Also can create audio summaries for raw audio, but hparams.raw_audio must be\n True.\n Args:\n spec: Batch of spectrograms.\n name: String prepended to summaries.\n hparams: Hyperparamenters.\n rows: Int, number of rows in image.\n columns: Int, number of columns in image.\n image: Bool, create image summary.\n phase: Bool, create image summary from second channel in the batch.\n audio: Bool, create audio summaries for each spectrogram in the batch.\n \"\"\"\n batch_size, n_freq, n_time, unused_channels = spec.get_shape().as_list()\n # Must divide minibatch evenly\n b = min(batch_size, rows * columns)\n\n if hparams.raw_audio:\n spec = tf.squeeze(spec)\n spec /= tf.expand_dims(tf.reduce_max(spec, axis=1), axis=1)\n tf.summary.audio(\n name, tf.squeeze(spec), hparams.samples_per_second, max_outputs=b)\n else:\n if image:\n if b % columns != 0:\n rows = np.floor(np.sqrt(b))\n columns = rows\n else:\n rows = b / columns\n tf.summary.image(\"Mag/%s\" % name,\n form_image_grid(spec[:b, :, :, :1], [rows, columns],\n [n_freq, n_time], 1))\n if phase:\n tf.summary.image(\"Phase/%s\" % name,\n form_image_grid(spec[:b, :, :, 1:], [rows, columns],\n [n_freq, n_time], 1))\n if audio:\n tf.summary.audio(\n name,\n tf_ispecgram(\n spec,\n n_fft=hparams.n_fft,\n hop_length=hparams.hop_length,\n mask=hparams.mask,\n log_mag=hparams.log_mag,\n pad=hparams.pad,\n re_im=hparams.re_im,\n dphase=hparams.dphase,\n mag_only=hparams.mag_only),\n hparams.samples_per_second,\n max_outputs=b)\n\n\ndef calculate_softmax_and_summaries(logits, one_hot_labels, name):\n \"\"\"Calculate the softmax cross entropy loss and associated summaries.\n\n Args:\n logits: Tensor of logits, first dimension is batch size.\n one_hot_labels: Tensor of one hot encoded categorical labels. First\n dimension is batch size.\n name: Name to use as prefix for summaries.\n\n Returns:\n loss: Dimensionless tensor representing the mean negative\n log-probability of the true class.\n \"\"\"\n loss = tf.nn.softmax_cross_entropy_with_logits(\n logits=logits, labels=one_hot_labels)\n loss = tf.reduce_mean(loss)\n softmax_summaries(loss, logits, one_hot_labels, name)\n return loss\n\n\ndef calculate_sparse_softmax_and_summaries(logits, labels, name):\n \"\"\"Calculate the softmax cross entropy loss and associated summaries.\n\n Args:\n logits: Tensor of logits, first dimension is batch size.\n labels: Tensor of categorical labels [ints]. First\n dimension is batch size.\n name: Name to use as prefix for summaries.\n\n Returns:\n loss: Dimensionless tensor representing the mean negative\n log-probability of the true class.\n \"\"\"\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n logits=logits, labels=labels)\n loss = tf.reduce_mean(loss)\n softmax_summaries(loss, logits, labels, name)\n return loss\n\n\ndef softmax_summaries(loss, logits, one_hot_labels, name=\"softmax\"):\n \"\"\"Create the softmax summaries for this cross entropy loss.\n\n Args:\n loss: Cross-entropy loss.\n logits: The [batch_size, classes] float tensor representing the logits.\n one_hot_labels: The float tensor representing actual class ids. If this is\n [batch_size, classes], then we take the argmax of it first.\n name: Prepended to summary scope.\n \"\"\"\n tf.summary.scalar(name + \"_loss\", loss)\n\n one_hot_labels = tf.cond(\n tf.equal(tf.rank(one_hot_labels),\n 2), lambda: tf.to_int32(tf.argmax(one_hot_labels, 1)),\n lambda: tf.to_int32(one_hot_labels))\n\n in_top_1 = tf.nn.in_top_k(logits, one_hot_labels, 1)\n tf.summary.scalar(name + \"_precision@1\",\n tf.reduce_mean(tf.to_float(in_top_1)))\n in_top_5 = tf.nn.in_top_k(logits, one_hot_labels, 5)\n tf.summary.scalar(name + \"_precision@5\",\n tf.reduce_mean(tf.to_float(in_top_5)))\n\n\ndef calculate_l2_and_summaries(predicted_vectors, true_vectors, name):\n \"\"\"Calculate L2 loss and associated summaries.\n\n Args:\n predicted_vectors: Tensor of predictions, first dimension is batch size.\n true_vectors: Tensor of labels, first dimension is batch size.\n name: Name to use as prefix for summaries.\n\n Returns:\n loss: Dimensionless tensor representing the mean euclidean distance\n between true and predicted.\n \"\"\"\n loss = tf.reduce_mean((predicted_vectors - true_vectors)**2)\n tf.summary.scalar(name + \"_loss\", loss, name=\"loss\")\n tf.summary.scalar(\n name + \"_prediction_mean_squared_norm\",\n tf.reduce_mean(tf.nn.l2_loss(predicted_vectors)),\n name=name + \"_prediction_mean_squared_norm\")\n tf.summary.scalar(\n name + \"_label_mean_squared_norm\",\n tf.reduce_mean(tf.nn.l2_loss(true_vectors)),\n name=name + \"_label_mean_squared_norm\")\n return loss\n\n\ndef frequency_weighted_cost_mask(peak=10.0, hz_flat=1000, sr=16000, n_fft=512):\n \"\"\"Calculates a mask to weight lower frequencies higher.\n\n Piecewise linear approximation. Assumes magnitude is in log scale.\n Args:\n peak: Cost increase at 0 Hz.\n hz_flat: Hz at which cost increase is 0.\n sr: Sample rate.\n n_fft: FFT size.\n\n Returns:\n Constant tensor [1, N_freq, 1] of cost weighting.\n \"\"\"\n n = int(n_fft / 2)\n cutoff = np.where(\n librosa.core.fft_frequencies(sr=sr, n_fft=n_fft) >= hz_flat)[0][0]\n mask = np.concatenate([np.linspace(peak, 1.0, cutoff), np.ones(n - cutoff)])\n return tf.constant(mask[np.newaxis, :, np.newaxis], dtype=tf.float32)\n\n\n#---------------------------------------------------\n# Neural Nets\n#---------------------------------------------------\ndef pitch_embeddings(batch,\n timesteps=1,\n n_pitches=128,\n dim_embedding=128,\n reuse=False):\n \"\"\"Get a embedding of each pitch note.\n\n Args:\n batch: NSynthDataset batch dictionary.\n timesteps: Number of timesteps to replicate across.\n n_pitches: Number of one-hot embeddings.\n dim_embedding: Dimension of linear projection of one-hot encoding.\n reuse: Reuse variables.\n\n Returns:\n embedding: A tensor of shape [batch_size, 1, timesteps, dim_embedding].\n \"\"\"\n batch_size = batch[\"pitch\"].get_shape().as_list()[0]\n with tf.variable_scope(\"PitchEmbedding\", reuse=reuse):\n w = tf.get_variable(\n name=\"embedding_weights\",\n shape=[n_pitches, dim_embedding],\n initializer=tf.random_normal_initializer())\n one_hot_pitch = tf.reshape(batch[\"pitch\"], [batch_size])\n one_hot_pitch = tf.one_hot(one_hot_pitch, depth=n_pitches)\n embedding = tf.matmul(one_hot_pitch, w)\n embedding = tf.reshape(embedding, [batch_size, 1, 1, dim_embedding])\n if timesteps > 1:\n embedding = tf.tile(embedding, [1, 1, timesteps, 1])\n return embedding\n\n\ndef slim_batchnorm_arg_scope(is_training, activation_fn=None):\n \"\"\"Create a scope for applying BatchNorm in slim.\n\n This scope also applies Glorot initializiation to convolutional weights.\n Args:\n is_training: Whether this is a training run.\n activation_fn: Whether we apply an activation_fn to the convolution result.\n\n Returns:\n scope: Use this scope to automatically apply BatchNorm and Xavier Init to\n slim.conv2d and slim.fully_connected.\n \"\"\"\n batch_norm_params = {\n \"is_training\": is_training,\n \"decay\": 0.999,\n \"epsilon\": 0.001,\n \"variables_collections\": {\n \"beta\": None,\n \"gamma\": None,\n \"moving_mean\": \"moving_vars\",\n \"moving_variance\": \"moving_vars\",\n }\n }\n\n with slim.arg_scope(\n [slim.conv2d, slim.fully_connected, slim.conv2d_transpose],\n weights_initializer=slim.initializers.xavier_initializer(),\n activation_fn=activation_fn,\n normalizer_fn=slim.batch_norm,\n normalizer_params=batch_norm_params) as scope:\n return scope\n\n\ndef conv2d(x,\n kernel_size,\n stride,\n channels,\n is_training,\n scope=\"conv2d\",\n batch_norm=False,\n residual=False,\n gated=False,\n activation_fn=tf.nn.relu,\n resize=False,\n transpose=False,\n stacked_layers=1):\n \"\"\"2D-Conv with optional batch_norm, gating, residual.\n\n Args:\n x: Tensor input [MB, H, W, CH].\n kernel_size: List [H, W].\n stride: List [H, W].\n channels: Int, output channels.\n is_training: Whether to collect stats for BatchNorm.\n scope: Enclosing scope name.\n batch_norm: Apply batch normalization\n residual: Residual connections, have stacked_layers >= 2.\n gated: Gating ala Wavenet.\n activation_fn: Nonlinearity function.\n resize: On transposed convolution, do ImageResize instead of conv_transpose.\n transpose: Use conv_transpose instead of conv.\n stacked_layers: Number of layers before a residual connection.\n\n Returns:\n x: Tensor output.\n \"\"\"\n # For residual\n x0 = x\n # Choose convolution function\n conv_fn = slim.conv2d_transpose if transpose else slim.conv2d\n # Double output channels for gates\n num_outputs = channels * 2 if gated else channels\n normalizer_fn = slim.batch_norm if batch_norm else None\n\n with tf.variable_scope(scope + \"_Layer\"):\n # Apply a stack of convolutions Before adding residual\n for layer_idx in range(stacked_layers):\n with slim.arg_scope(\n slim_batchnorm_arg_scope(is_training, activation_fn=None)):\n # Use interpolation to upsample instead of conv_transpose\n if transpose and resize:\n unused_mb, h, w, unused_ch = x.get_shape().as_list()\n x = tf.image.resize_images(\n x, size=[h * stride[0], w * stride[1]], method=0)\n stride_conv = [1, 1]\n else:\n stride_conv = stride\n\n x = conv_fn(\n inputs=x,\n stride=stride_conv,\n kernel_size=kernel_size,\n num_outputs=num_outputs,\n normalizer_fn=normalizer_fn,\n biases_initializer=tf.zeros_initializer(),\n scope=scope)\n\n if gated:\n with tf.variable_scope(\"Gated\"):\n x1, x2 = x[:, :, :, :channels], x[:, :, :, channels:]\n if activation_fn:\n x1, x2 = activation_fn(x1), tf.sigmoid(x2)\n else:\n x2 = tf.sigmoid(x2)\n x = x1 * x2\n\n # Apply residual to last layer before the last nonlinearity\n if residual and (layer_idx == stacked_layers - 1):\n with tf.variable_scope(\"Residual\"):\n # Don't upsample residual in time\n if stride[0] == 1 and stride[1] == 1:\n channels_in = x0.get_shape().as_list()[-1]\n # Make n_channels match for residual\n if channels != channels_in:\n x0 = slim.conv2d(\n inputs=x0,\n stride=[1, 1],\n kernel_size=[1, 1],\n num_outputs=channels,\n normalizer_fn=None,\n activation_fn=None,\n biases_initializer=tf.zeros_initializer,\n scope=scope + \"_residual\")\n x += x0\n else:\n x += x0\n if activation_fn and not gated:\n x = activation_fn(x)\n return x\n\n\ndef leaky_relu(leak=0.1):\n \"\"\"Leaky ReLU activation function.\n\n Args:\n leak: float. Slope for the negative part of the leaky ReLU function.\n Defaults to 0.1.\n\n Returns:\n A lambda computing the leaky ReLU function with the specified slope.\n \"\"\"\n return lambda x: tf.maximum(x, leak * x)\n\n\ndef causal_linear(x, n_inputs, n_outputs, name, filter_length, rate,\n batch_size):\n \"\"\"Applies dilated convolution using queues.\n\n Assumes a filter_length of 3.\n\n Args:\n x: The [mb, time, channels] tensor input.\n n_inputs: The input number of channels.\n n_outputs: The output number of channels.\n name: The variable scope to provide to W and biases.\n filter_length: The length of the convolution, assumed to be 3.\n rate: The rate or dilation\n batch_size: Non-symbolic value for batch_size.\n\n Returns:\n y: The output of the operation\n (init_1, init_2): Initialization operations for the queues\n (push_1, push_2): Push operations for the queues\n \"\"\"\n assert filter_length == 3\n\n # create queue\n q_1 = tf.FIFOQueue(rate, dtypes=tf.float32, shapes=(batch_size, 1, n_inputs))\n q_2 = tf.FIFOQueue(rate, dtypes=tf.float32, shapes=(batch_size, 1, n_inputs))\n init_1 = q_1.enqueue_many(tf.zeros((rate, batch_size, 1, n_inputs)))\n init_2 = q_2.enqueue_many(tf.zeros((rate, batch_size, 1, n_inputs)))\n state_1 = q_1.dequeue()\n push_1 = q_1.enqueue(x)\n state_2 = q_2.dequeue()\n push_2 = q_2.enqueue(state_1)\n\n # get pretrained weights\n w = tf.get_variable(\n name=name + \"/W\",\n shape=[1, filter_length, n_inputs, n_outputs],\n dtype=tf.float32)\n b = tf.get_variable(\n name=name + \"/biases\", shape=[n_outputs], dtype=tf.float32)\n w_q_2 = tf.slice(w, [0, 0, 0, 0], [-1, 1, -1, -1])\n w_q_1 = tf.slice(w, [0, 1, 0, 0], [-1, 1, -1, -1])\n w_x = tf.slice(w, [0, 2, 0, 0], [-1, 1, -1, -1])\n\n # perform op w/ cached states\n y = tf.nn.bias_add(\n tf.matmul(state_2[:, 0, :], w_q_2[0][0]) + tf.matmul(\n state_1[:, 0, :], w_q_1[0][0]) + tf.matmul(x[:, 0, :], w_x[0][0]), b)\n\n y = tf.expand_dims(y, 1)\n return y, (init_1, init_2), (push_1, push_2)\n\n\ndef linear(x, n_inputs, n_outputs, name):\n \"\"\"Simple linear layer.\n\n Args:\n x: The [mb, time, channels] tensor input.\n n_inputs: The input number of channels.\n n_outputs: The output number of channels.\n name: The variable scope to provide to W and biases.\n\n Returns:\n y: The output of the operation.\n \"\"\"\n w = tf.get_variable(\n name=name + \"/W\", shape=[1, 1, n_inputs, n_outputs], dtype=tf.float32)\n b = tf.get_variable(\n name=name + \"/biases\", shape=[n_outputs], dtype=tf.float32)\n y = tf.nn.bias_add(tf.matmul(x[:, 0, :], w[0][0]), b)\n y = tf.expand_dims(y, 1)\n return y\n" ]
[ [ "tensorflow.nn.in_top_k", "tensorflow.nn.softmax_cross_entropy_with_logits", "numpy.random.rand", "tensorflow.matmul", "tensorflow.reshape", "tensorflow.MomentumOptimizer", "numpy.sign", "tensorflow.clip_by_value", "numpy.cos", "tensorflow.to_float", "tensorflow.tile", "numpy.cumsum", "tensorflow.one_hot", "tensorflow.cast", "tensorflow.random_normal_initializer", "numpy.concatenate", "tensorflow.rank", "numpy.sin", "numpy.angle", "numpy.log", "tensorflow.AdamOptimizer", "tensorflow.sigmoid", "tensorflow.argmax", "tensorflow.FIFOQueue", "tensorflow.transpose", "tensorflow.constant", "tensorflow.variable_scope", "tensorflow.squeeze", "numpy.sqrt", "tensorflow.RMSPropOptimizer", "tensorflow.GradientDescentOptimizer", "tensorflow.floor", "numpy.array", "numpy.equal", "tensorflow.zeros", "tensorflow.abs", "tensorflow.summary.scalar", "tensorflow.py_func", "tensorflow.expand_dims", "tensorflow.maximum", "numpy.random.randn", "tensorflow.nn.l2_loss", "numpy.unwrap", "tensorflow.get_variable", "tensorflow.image.resize_images", "tensorflow.to_int32", "tensorflow.nn.sparse_softmax_cross_entropy_with_logits", "tensorflow.zeros_initializer", "tensorflow.equal", "numpy.ones", "tensorflow.AdagradOptimizer", "tensorflow.sign", "tensorflow.reduce_max", "numpy.abs", "tensorflow.slice", "numpy.linspace", "tensorflow.reduce_mean" ] ]
GUZHIXIANG/DAA_taguchi
[ "5c77f0a326b53e0cc908cf08714fd470870877ec" ]
[ "HDP_HSMM/basic/distributions.py" ]
[ "from __future__ import division\nimport numpy as np\nnp.seterr(divide='ignore')\nfrom numpy import newaxis as na\nfrom numpy.core.umath_tests import inner1d\nimport scipy.stats as stats\nimport scipy.special as special\nimport scipy.linalg\nimport matplotlib.pyplot as plt\nimport copy\nfrom abstractions import GibbsSampling, MeanField, Collapsed, MaxLikelihood, MAP, DurationDistribution\nfrom util import sample_niw, invwishart_entropy, invwishart_log_partitionfunction, getdatasize, flattendata_distribution, getdatadimension, combinedata_distribution, multivariate_t_loglik\n\n\n##############################################\n# Mixins for making duratino distributions #\n##############################################\nclass _StartAtOneMixin(object):\n def log_likelihood(self,x,*args,**kwargs):\n return super(_StartAtOneMixin,self).log_likelihood(x-1,*args,**kwargs)\n def log_sf(self,x,*args,**kwargs):\n return super(_StartAtOneMixin,self).log_sf(x-1,*args,**kwargs)\n def rvs(self,size=None):\n return super(_StartAtOneMixin,self).rvs(size)+1\n def rvs_given_greater_than(self,x):\n return super(_StartAtOneMixin,self).rvs_given_greater_than(x-1)+1\n def resample(self,data=[],*args,**kwargs):\n if isinstance(data,np.ndarray):\n return super(_StartAtOneMixin,self).resample(data-1,*args,**kwargs)\n else:\n return super(_StartAtOneMixin,self).resample([d-1 for d in data],*args,**kwargs)\n def max_likelihood(self,data,weights=None,*args,**kwargs):\n if weights is not None:\n raise NotImplementedError\n else:\n if isinstance(data,np.ndarray):\n return super(_StartAtOneMixin,self).max_likelihood(data-1,weights=None,*args,**kwargs)\n else:\n return super(_StartAtOneMixin,self).max_likelihood([d-1 for d in data],weights=None,*args,**kwargs)\n\n\n##########################################################\n# Multivariate Gaussian distribution classes #\n##########################################################\nclass _GaussianBase(object):\n @property\n def params(self):\n return dict(mu=self.mu,sigma=self.sigma)\n ### internals\n def getsigma(self):\n return self._sigma\n def setsigma(self,sigma):\n self._sigma = sigma\n self._sigma_chol = None\n sigma = property(getsigma,setsigma)\n @property\n def sigma_chol(self):\n if self._sigma_chol is None:\n self._sigma_chol = np.linalg.cholesky(self._sigma)\n return self._sigma_chol\n ### distribution stuff\n def rvs(self,size=None):\n size = 1 if size is None else size\n size = size + (self.mu.shape[0],) if isinstance(size,tuple) else (size,self.mu.shape[0])\n return self.mu + np.random.normal(size=size).dot(self.sigma_chol.T)\n def log_likelihood(self,x):\n mu, sigma, D = self.mu, self.sigma, self.mu.shape[0]\n sigma_chol = self.sigma_chol\n bads = np.isnan(np.atleast_2d(x)).any(axis=1)\n x = np.nan_to_num(x).reshape((-1,D)) - mu\n xs = scipy.linalg.solve_triangular(sigma_chol,x.T,lower=True)\n out = -1./2. * inner1d(xs.T,xs.T) - D/2*np.log(2*np.pi) \\\n - np.log(sigma_chol.diagonal()).sum()\n out[bads] = 0\n return out\n ### plotting\n def plot(self,data=None,indices=None,color='b',plot_params=True,label=''):\n from util import project_data, plot_gaussian_projection, plot_gaussian_2D\n if data is not None:\n data = flattendata_distribution(data)\n D = self.mu.shape[0]\n if D > 2 and ((not hasattr(self,'plotting_subspace_basis'))\n or (self.plotting_subspace_basis.shape[1] != D)):\n # TODO improve this bookkeeping. need a notion of collection. it's\n # totally potentially broken and confusing to set class members like\n # this!\n subspace = np.random.randn(D,2)\n self.__class__.plotting_subspace_basis = np.linalg.qr(subspace)[0].T.copy()\n if data is not None:\n if D > 2:\n data = project_data(data,self.plotting_subspace_basis)\n plt.plot(data[:,0],data[:,1],marker='.',linestyle=' ',color=color)\n if plot_params:\n if D > 2:\n plot_gaussian_projection(self.mu,self.sigma,self.plotting_subspace_basis,\n color=color,label=label)\n else:\n plot_gaussian_2D(self.mu,self.sigma,color=color,label=label)\n def to_json_dict(self):\n D = self.mu.shape[0]\n assert D == 2\n U,s,_ = np.linalg.svd(self.sigma)\n U /= np.linalg.det(U)\n theta = np.arctan2(U[0,0],U[0,1])*180/np.pi\n return {'x':self.mu[0],'y':self.mu[1],'rx':np.sqrt(s[0]),'ry':np.sqrt(s[1]),\n 'theta':theta}\n\nclass Gaussian(_GaussianBase, GibbsSampling, MeanField, Collapsed, MAP, MaxLikelihood):\n '''\n Multivariate Gaussian distribution class.\n NOTE: Only works for 2 or more dimensions. For a scalar Gaussian, use one of\n the scalar classes. Uses a conjugate Normal/Inverse-Wishart prior.\n Hyperparameters mostly follow Gelman et al.'s notation in Bayesian Data\n Analysis, except sigma_0 is proportional to expected covariance matrix:\n nu_0, sigma_0\n mu_0, kappa_0\n Parameters are mean and covariance matrix:\n mu, sigma\n '''\n def __init__(self,mu=None,sigma=None,\n mu_0=None,sigma_0=None,kappa_0=None,nu_0=None,\n kappa_mf=None,nu_mf=None):\n self.mu = mu\n self.sigma = sigma\n self.mu_0 = mu_0\n self.sigma_0 = sigma_0\n self.kappa_0 = kappa_0\n self.nu_0 = nu_0\n self.kappa_mf = kappa_mf if kappa_mf is not None else kappa_0\n self.nu_mf = nu_mf if nu_mf is not None else nu_0\n self.mu_mf = mu\n self.sigma_mf = sigma\n if (mu,sigma) == (None,None) and None not in (mu_0,sigma_0,kappa_0,nu_0):\n self.resample() # initialize from prior\n @property\n def hypparams(self):\n return dict(mu_0=self.mu_0,sigma_0=self.sigma_0,kappa_0=self.kappa_0,nu_0=self.nu_0)\n @property\n def num_parameters(self):\n D = len(self.mu)\n return D*(D+1)/2\n @staticmethod\n def _get_statistics(data,D=None):\n n = getdatasize(data)\n if n > 0:\n D = getdatadimension(data) if D is None else D\n if isinstance(data,np.ndarray):\n xbar = np.reshape(data,(-1,D)).mean(0)\n centered = data - xbar\n sumsq = np.dot(centered.T,centered)\n else:\n xbar = sum(np.reshape(d,(-1,D)).sum(0) for d in data) / n\n sumsq = sum(np.dot((np.reshape(d,(-1,D))-xbar).T,(np.reshape(d,(-1,D))-xbar))\n for d in data)\n else:\n xbar, sumsq = None, None\n return n, xbar, sumsq\n @staticmethod\n def _get_weighted_statistics(data,weights,D=None):\n # NOTE: _get_statistics is special case with all weights being 1\n # this is kept as a separate method for speed and modularity\n if isinstance(data,np.ndarray):\n neff = weights.sum()\n if neff > 0:\n D = getdatadimension(data) if D is None else D\n xbar = np.dot(weights,np.reshape(data,(-1,D))) / neff\n centered = np.reshape(data,(-1,D)) - xbar\n sumsq = np.dot(centered.T,(weights[:,na] * centered))\n else:\n xbar, sumsq = None, None\n else:\n neff = sum(w.sum() for w in weights)\n if neff > 0:\n D = getdatadimension(data) if D is None else D\n xbar = sum(np.dot(w,np.reshape(d,(-1,D))) for w,d in zip(weights,data)) / neff\n sumsq = sum(np.dot((np.reshape(d,(-1,D))-xbar).T,w[:,na]*(np.reshape(d,(-1,D))-xbar))\n for w,d in zip(weights,data))\n else:\n xbar, sumsq = None, None\n return neff, xbar, sumsq\n def _posterior_hypparams(self,n,xbar,sumsq):\n mu_0, sigma_0, kappa_0, nu_0 = self.mu_0, self.sigma_0, self.kappa_0, self.nu_0\n if n > 0:\n mu_n = self.kappa_0 / (self.kappa_0 + n) * self.mu_0 + n / (self.kappa_0 + n) * xbar\n kappa_n = self.kappa_0 + n\n nu_n = self.nu_0 + n\n sigma_n = self.sigma_0 + sumsq + \\\n self.kappa_0*n/(self.kappa_0+n) * np.outer(xbar-self.mu_0,xbar-self.mu_0)\n return mu_n, sigma_n, kappa_n, nu_n\n else:\n return mu_0, sigma_0, kappa_0, nu_0\n def empirical_bayes(self,data):\n D = getdatadimension(data)\n self.kappa_0 = 0\n self.nu_0 = 0\n self.mu_0 = np.zeros(D)\n self.sigma_0 = np.zeros((D,D))\n self.mu_0, self.sigma_0, self.kappa_0, self.nu_0 = \\\n self._posterior_hypparams(*self._get_statistics(data))\n if (self.mu,self.sigma) == (None,None):\n self.resample() # intialize from prior\n return self\n ### Gibbs sampling\n def resample(self,data=[]):\n D = len(self.mu_0)\n self.mu_mf, self.sigma_mf = self.mu, self.sigma = \\\n sample_niw(*self._posterior_hypparams(*self._get_statistics(data,D)))\n return self\n def copy_sample(self):\n new = copy.copy(self)\n new.mu = self.mu.copy()\n new.sigma = self.sigma.copy()\n return new\n ### Mean Field\n # NOTE my sumsq is Bishop's Nk*Sk\n def _get_sigma_mf(self):\n return self._sigma_mf\n def _set_sigma_mf(self,val):\n self._sigma_mf = val\n self._sigma_mf_chol = None\n sigma_mf = property(_get_sigma_mf,_set_sigma_mf)\n @property\n def sigma_mf_chol(self):\n if self._sigma_mf_chol is None:\n self._sigma_mf_chol = np.linalg.cholesky(self.sigma_mf)\n return self._sigma_mf_chol\n def meanfieldupdate(self,data,weights):\n # update\n D = len(self.mu_0)\n self.mu_mf, self.sigma_mf, self.kappa_mf, self.nu_mf = \\\n self._posterior_hypparams(*self._get_weighted_statistics(data,weights,D))\n self.mu, self.sigma = self.mu_mf, self.sigma_mf/(self.nu_mf - D - 1) # for plotting\n def get_vlb(self):\n # return avg energy plus entropy, our contribution to the mean field\n # variational lower bound\n D = len(self.mu_0)\n loglmbdatilde = self._loglmbdatilde()\n # see Eq. 10.77 in Bishop\n q_entropy = -0.5 * (loglmbdatilde + D * (np.log(self.kappa_mf/(2*np.pi))-1)) \\\n + invwishart_entropy(self.sigma_mf,self.nu_mf)\n # see Eq. 10.74 in Bishop, we aren't summing over K\n p_avgengy = 0.5 * (D * np.log(self.kappa_0/(2*np.pi)) + loglmbdatilde \\\n - D*self.kappa_0/self.kappa_mf - self.kappa_0*self.nu_mf*\\\n np.dot(self.mu_mf -\n self.mu_0,np.linalg.solve(self.sigma_mf,self.mu_mf - self.mu_0))) \\\n + invwishart_log_partitionfunction(self.sigma_0,self.nu_0) \\\n + (self.nu_0 - D - 1)/2*loglmbdatilde - 1/2*self.nu_mf*\\\n np.linalg.solve(self.sigma_mf,self.sigma_0).trace()\n return p_avgengy + q_entropy\n def expected_log_likelihood(self,x):\n mu_n, sigma_n, kappa_n, nu_n = self.mu_mf, self.sigma_mf, self.kappa_mf, self.nu_mf\n D = len(mu_n)\n x = np.reshape(x,(-1,D)) - mu_n # x is now centered\n xs = np.linalg.solve(self.sigma_mf_chol,x.T)\n # see Eqs. 10.64, 10.67, and 10.71 in Bishop\n return self._loglmbdatilde()/2 - D/(2*kappa_n) - nu_n/2 * \\\n inner1d(xs.T,xs.T) - D/2*np.log(2*np.pi)\n def _loglmbdatilde(self):\n # see Eq. 10.65 in Bishop\n D = len(self.mu_0)\n chol = self.sigma_mf_chol\n return special.digamma((self.nu_mf-np.arange(D))/2).sum() \\\n + D*np.log(2) - 2*np.log(chol.diagonal()).sum()\n ### Collapsed\n def log_marginal_likelihood(self,data):\n n, D = getdatasize(data), len(self.mu_0)\n return self._log_partition_function(*self._posterior_hypparams(*self._get_statistics(data))) \\\n - self._log_partition_function(self.mu_0,self.sigma_0,self.kappa_0,self.nu_0) \\\n - n*D/2 * np.log(2*np.pi)\n def _log_partition_function(self,mu,sigma,kappa,nu):\n D = len(mu)\n chol = np.linalg.cholesky(sigma)\n return nu*D/2*np.log(2) + special.multigammaln(nu/2,D) + D/2*np.log(2*np.pi/kappa) \\\n - nu*np.log(chol.diagonal()).sum()\n def log_predictive_studentt_datapoints(self,datapoints,olddata):\n D = len(self.mu_0)\n mu_n, sigma_n, kappa_n, nu_n = self._posterior_hypparams(*self._get_statistics(olddata,D))\n return multivariate_t_loglik(datapoints,nu_n-D+1,mu_n,(kappa_n+1)/(kappa_n*(nu_n-D+1))*sigma_n)\n def log_predictive_studentt(self,newdata,olddata):\n # an alternative computation to the generic log_predictive, which is implemented\n # in terms of log_marginal_likelihood. mostly for testing, I think\n newdata = np.atleast_2d(newdata)\n return sum(self.log_predictive_studentt_datapoints(d,combinedata_distribution((olddata,newdata[:i])))[0]\n for i,d in enumerate(newdata))\n ### Max likelihood\n # NOTE: could also use sumsq/(n-1) as the covariance estimate, which would\n # be unbiased but not max likelihood, but if we're in the regime where that\n # matters we've got bigger problems!\n def max_likelihood(self,data,weights=None):\n D = getdatadimension(data)\n if weights is None:\n n, muhat, sumsq = self._get_statistics(data)\n else:\n n, muhat, sumsq = self._get_weighted_statistics(data,weights)\n # this SVD is necessary to check if the max likelihood solution is\n # degenerate, which can happen in the EM algorithm\n if n < D or (np.linalg.svd(sumsq,compute_uv=False) > 1e-6).sum() < D:\n # broken!\n self.mu = 99999999*np.ones(D)\n self.sigma = np.eye(D)\n self.broken = True\n else:\n self.mu = muhat\n self.sigma = sumsq/n\n return self\n def MAP(self,data,weights=None):\n # max likelihood with prior pseudocounts included in data\n if weights is None:\n n, muhat, sumsq = self._get_statistics(data)\n else:\n n, muhat, sumsq = self._get_weighted_statistics(data,weights)\n\n self.mu, self.sigma, _, _ = self._posterior_hypparams(n,muhat,sumsq)\n return self\n\n\n##########################################################\n# Scalar Gaussian distribution classes #\n##########################################################\nclass _ScalarGaussianBase(object):\n @property\n def params(self):\n return dict(mu=self.mu,sigmasq=self.sigmasq)\n def rvs(self,size=None):\n return np.sqrt(self.sigmasq)*np.random.normal(size=size)+self.mu\n def log_likelihood(self,x):\n x = np.reshape(x,(-1,1))\n return (-0.5*(x-self.mu)**2/self.sigmasq - np.log(np.sqrt(2*np.pi*self.sigmasq))).ravel()\n def __repr__(self):\n return self.__class__.__name__ + '(mu=%f,sigmasq=%f)' % (self.mu,self.sigmasq)\n def plot(self,data=None,indices=None,color='b',plot_params=True,label=None):\n data = np.concatenate(data) if data is not None else None\n indices = np.concatenate(indices) if indices is not None else None\n if data is not None:\n assert indices is not None\n plt.plot(indices,data,color=color,marker='x',linestyle='')\n if plot_params:\n assert indices is not None\n if len(indices) > 1:\n from util import rle\n vals, lens = rle(np.diff(indices))\n starts = np.concatenate(((0,),lens.cumsum()[:-1]))\n for start, blocklen in zip(starts[vals == 1], lens[vals == 1]):\n plt.plot(indices[start:start+blocklen],\n np.repeat(self.mu,blocklen),color=color,linestyle='--')\n else:\n plt.plot(indices,[self.mu],color=color,marker='+')\n\n# TODO meanfield, max_likelihood\nclass ScalarGaussianNIX(_ScalarGaussianBase, GibbsSampling, Collapsed):\n '''\n Conjugate Normal-(Scaled-)Inverse-ChiSquared prior. (Another parameterization is the\n Normal-Inverse-Gamma.)\n '''\n def __init__(self,mu=None,sigmasq=None,mu_0=None,kappa_0=None,sigmasq_0=None,nu_0=None):\n self.mu = mu\n self.sigmasq = sigmasq\n self.mu_0 = mu_0\n self.kappa_0 = kappa_0\n self.sigmasq_0 = sigmasq_0\n self.nu_0 = nu_0\n if (mu,sigmasq) == (None,None) and None not in (mu_0,kappa_0,sigmasq_0,nu_0):\n self.resample() # intialize from prior\n @property\n def hypparams(self):\n return dict(mu_0=self.mu_0,kappa_0=self.kappa_0,\n sigmasq_0=self.sigmasq_0,nu_0=self.nu_0)\n def _posterior_hypparams(self,n,ybar,sumsqc):\n mu_0, kappa_0, sigmasq_0, nu_0 = self.mu_0, self.kappa_0, self.sigmasq_0, self.nu_0\n if n > 0:\n kappa_n = kappa_0 + n\n mu_n = (kappa_0 * mu_0 + n * ybar) / kappa_n\n nu_n = nu_0 + n\n sigmasq_n = 1/nu_n * (nu_0 * sigmasq_0 + sumsqc + kappa_0 * n / (kappa_0 + n) * (ybar - mu_0)**2)\n return mu_n, kappa_n, sigmasq_n, nu_n\n else:\n return mu_0, kappa_0, sigmasq_0, nu_0\n ### Gibbs sampling\n def resample(self,data=[]):\n mu_n, kappa_n, sigmasq_n, nu_n = self._posterior_hypparams(*self._get_statistics(data))\n self.sigmasq = nu_n * sigmasq_n / np.random.chisquare(nu_n)\n self.mu = np.sqrt(self.sigmasq / kappa_n) * np.random.randn() + mu_n\n return self\n def _get_statistics(self,data):\n assert isinstance(data,np.ndarray) or \\\n (isinstance(data,list) and all((isinstance(d,np.ndarray))\n for d in data)) or \\\n (isinstance(data,int) or isinstance(data,float))\n n = getdatasize(data)\n if n > 0:\n if isinstance(data,np.ndarray):\n ybar = data.mean()\n sumsqc = ((data-ybar)**2).sum()\n elif isinstance(data,list):\n ybar = sum(d.sum() for d in data)/n\n sumsqc = sum(np.sum((d-ybar)**2) for d in data)\n else:\n ybar = data\n sumsqc = 0\n else:\n ybar = None\n sumsqc = None\n return n, ybar, sumsqc\n ### Collapsed\n def log_marginal_likelihood(self,data):\n n = getdatasize(data)\n mu_0, kappa_0, sigmasq_0, nu_0 = self.mu_0, self.kappa_0, self.sigmasq_0, self.nu_0\n mu_n, kappa_n, sigmasq_n, nu_n = self._posterior_hypparams(*self._get_statistics(data))\n return special.gammaln(nu_n/2) - special.gammaln(nu_0/2) \\\n + 0.5*(np.log(kappa_0) - np.log(kappa_n) \\\n + nu_0 * (np.log(nu_0) + np.log(sigmasq_0)) \\\n - nu_n * (np.log(nu_n) + np.log(sigmasq_n)) \\\n - n*np.log(np.pi))\n def log_predictive_single(self,y,olddata):\n # mostly for testing or speed\n mu_n, kappa_n, sigmasq_n, nu_n = self._posterior_hypparams(*self._get_statistics(olddata))\n return stats.t.logpdf(y,nu_n,loc=mu_n,scale=np.sqrt((1+kappa_n)*sigmasq_n/kappa_n))\n\n\n##########################################################\n# Poisson distribution classes #\n##########################################################\nclass Poisson(GibbsSampling, Collapsed):\n '''\n Poisson distribution with a conjugate Gamma prior.\n NOTE: the support is {0,1,2,...}\n Hyperparameters (following Wikipedia's notation):\n alpha_0, beta_0\n Parameter is the mean/variance parameter:\n lmbda\n '''\n def __init__(self,lmbda=None,alpha_0=None,beta_0=None):\n self.lmbda = lmbda\n self.alpha_0 = alpha_0\n self.beta_0 = beta_0\n if lmbda is None and None not in (alpha_0,beta_0):\n self.resample() # intialize from prior\n @property\n def params(self):\n return dict(lmbda=self.lmbda)\n @property\n def hypparams(self):\n return dict(alpha_0=self.alpha_0,beta_0=self.beta_0)\n def log_sf(self,x):\n return stats.poisson.logsf(x,self.lmbda)\n def _posterior_hypparams(self,n,tot):\n return self.alpha_0 + tot, self.beta_0 + n\n def rvs(self,size=None):\n return np.random.poisson(self.lmbda,size=size)\n def log_likelihood(self,x):\n lmbda = self.lmbda\n x = np.array(x,ndmin=1)\n raw = np.empty(x.shape)\n raw[x>=0] = -lmbda + x[x>=0]*np.log(lmbda) - special.gammaln(x[x>=0]+1)\n raw[x<0] = -np.inf\n return raw if isinstance(x,np.ndarray) else raw[0]\n ### Gibbs Sampling\n def resample(self,data=[]):\n alpha_n, beta_n = self._posterior_hypparams(*self._get_statistics(data))\n self.lmbda = np.random.gamma(alpha_n,1/beta_n)\n return self\n def _get_statistics(self,data):\n if isinstance(data,np.ndarray):\n n = data.shape[0]\n tot = data.sum()\n elif isinstance(data,list):\n n = sum(d.shape[0] for d in data)\n tot = sum(d.sum() for d in data)\n else:\n assert isinstance(data,int)\n n = 1\n tot = data\n return n, tot\n def _get_weighted_statistics(self,data,weights):\n pass # TODO\n ### Collapsed\n def log_marginal_likelihood(self,data):\n return self._log_partition_function(*self._posterior_hypparams(*self._get_statistics(data))) \\\n - self._log_partition_function(self.alpha_0,self.beta_0) \\\n - self._get_sum_of_gammas(data)\n def _log_partition_function(self,alpha,beta):\n return special.gammaln(alpha) - alpha * np.log(beta)\n def _get_sum_of_gammas(self,data):\n if isinstance(data,np.ndarray):\n return special.gammaln(data+1).sum()\n elif isinstance(data,list):\n return sum(special.gammaln(d+1).sum() for d in data)\n else:\n assert isinstance(data,int)\n return special.gammaln(data+1)\n ### Max likelihood\n def max_likelihood(self,data,weights=None):\n if weights is None:\n n, tot = self._get_statistics(data)\n else:\n n, tot = self._get_weighted_statistics(data,weights)\n self.lmbda = tot/n\n\nclass PoissonDuration(_StartAtOneMixin,Poisson,DurationDistribution):\n pass\n" ]
[ [ "numpy.dot", "numpy.core.umath_tests.inner1d", "numpy.random.chisquare", "numpy.linalg.qr", "numpy.outer", "numpy.concatenate", "numpy.random.normal", "numpy.empty", "numpy.nan_to_num", "numpy.log", "numpy.random.poisson", "numpy.seterr", "numpy.eye", "numpy.arange", "numpy.sqrt", "numpy.linalg.cholesky", "numpy.atleast_2d", "numpy.array", "numpy.reshape", "numpy.zeros", "numpy.random.randn", "numpy.linalg.det", "numpy.diff", "numpy.arctan2", "numpy.linalg.svd", "scipy.stats.poisson.logsf", "scipy.special.gammaln", "scipy.special.multigammaln", "numpy.random.gamma", "numpy.sum", "matplotlib.pyplot.plot", "numpy.ones", "numpy.linalg.solve", "numpy.repeat" ] ]
Lelin-HUNUST/VISTA
[ "7bf34132d719cb0e5e803b92cd15451df58a9a5d", "7bf34132d719cb0e5e803b92cd15451df58a9a5d" ]
[ "det3d/models/backbones/scn.py", "det3d/datasets/utils/eval.py" ]
[ "import time\n\nimport numpy as np\nimport spconv.pytorch as spconv \nfrom spconv.pytorch import ops\nfrom spconv.pytorch import SparseConv3d, SubMConv3d\nimport torch\nfrom det3d.models.utils import Empty, change_default_args\nfrom det3d.torchie.cnn import constant_init, kaiming_init\nfrom det3d.torchie.trainer import load_checkpoint\nfrom torch import nn\nfrom torch.nn import BatchNorm1d\nfrom torch.nn import functional as F\nfrom torch.nn.modules.batchnorm import _BatchNorm\n\nfrom .. import builder\nfrom ..registry import BACKBONES\nfrom ..utils import build_conv_layer, build_norm_layer\n\ndef replace_feature(out, new_features):\n if \"replace_feature\" in out.__dir__():\n # spconv 2.x behaviour\n return out.replace_feature(new_features)\n else:\n out.features = new_features\n return out\n\ndef conv3x3(in_planes, out_planes, stride=1, indice_key=None, bias=True):\n \"\"\"3x3 convolution with padding\"\"\"\n return spconv.SubMConv3d(\n in_planes,\n out_planes,\n kernel_size=3,\n stride=stride,\n padding=1,\n bias=bias,\n indice_key=indice_key,\n )\n\n\ndef conv1x1(in_planes, out_planes, stride=1, indice_key=None, bias=True):\n \"\"\"1x1 convolution\"\"\"\n return spconv.SubMConv3d(\n in_planes,\n out_planes,\n kernel_size=1,\n stride=stride,\n padding=1,\n bias=bias,\n indice_key=indice_key,\n )\n\n\nclass SparseBasicBlock(spconv.SparseModule):\n expansion = 1\n\n def __init__(\n self,\n inplanes,\n planes,\n stride=1,\n norm_cfg=None,\n downsample=None,\n indice_key=None,\n ):\n super(SparseBasicBlock, self).__init__()\n\n if norm_cfg is None:\n #norm_cfg = dict(type=\"BN1d\", eps=1e-3, momentum=0.01)\n norm_cfg = dict(type=\"BN1d\", eps=1e-5, momentum=0.1)\n\n bias = norm_cfg is not None\n\n self.conv1 = conv3x3(inplanes, planes, stride, indice_key=indice_key, bias=bias)\n self.bn1 = build_norm_layer(norm_cfg, planes)[1]\n self.relu = nn.ReLU()\n self.conv2 = conv3x3(planes, planes, indice_key=indice_key, bias=bias)\n self.bn2 = build_norm_layer(norm_cfg, planes)[1]\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n identity = x\n\n out = self.conv1(x)\n out = replace_feature(out, self.bn1(out.features))\n out = replace_feature(out, self.relu(out.features))\n\n out = self.conv2(out)\n out = replace_feature(out, self.bn2(out.features))\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out = replace_feature(out, out.features + identity.features)\n out = replace_feature(out, self.relu(out.features))\n\n return out\n\n\[email protected]_module\nclass SpMiddleFHD(nn.Module):\n def __init__(\n self, num_input_features=128, norm_cfg=None, name=\"SpMiddleFHD\", **kwargs\n ):\n super(SpMiddleFHD, self).__init__()\n self.name = name\n\n self.dcn = None\n self.zero_init_residual = False\n\n if norm_cfg is None:\n norm_cfg = dict(type=\"BN1d\", eps=1e-3, momentum=0.01)\n\n self.middle_conv = spconv.SparseSequential(\n SubMConv3d(num_input_features, 16, 3, bias=False, indice_key=\"subm0\"),\n build_norm_layer(norm_cfg, 16)[1],\n nn.ReLU(),\n SubMConv3d(16, 16, 3, bias=False, indice_key=\"subm0\"),\n build_norm_layer(norm_cfg, 16)[1],\n nn.ReLU(),\n SparseConv3d(\n 16, 32, 3, 2, padding=1, bias=False\n ), # [1600, 1200, 41] -> [800, 600, 21]\n build_norm_layer(norm_cfg, 32)[1],\n nn.ReLU(),\n SubMConv3d(32, 32, 3, indice_key=\"subm1\", bias=False),\n build_norm_layer(norm_cfg, 32)[1],\n nn.ReLU(),\n SubMConv3d(32, 32, 3, indice_key=\"subm1\", bias=False),\n build_norm_layer(norm_cfg, 32)[1],\n nn.ReLU(),\n SparseConv3d(\n 32, 64, 3, 2, padding=1, bias=False\n ), # [800, 600, 21] -> [400, 300, 11]\n build_norm_layer(norm_cfg, 64)[1],\n nn.ReLU(),\n SubMConv3d(64, 64, 3, indice_key=\"subm2\", bias=False),\n build_norm_layer(norm_cfg, 64)[1],\n nn.ReLU(),\n SubMConv3d(64, 64, 3, indice_key=\"subm2\", bias=False),\n build_norm_layer(norm_cfg, 64)[1],\n nn.ReLU(),\n SubMConv3d(64, 64, 3, indice_key=\"subm2\", bias=False),\n build_norm_layer(norm_cfg, 64)[1],\n nn.ReLU(),\n SparseConv3d(\n 64, 64, 3, 2, padding=[0, 1, 1], bias=False\n ), # [400, 300, 11] -> [200, 150, 5]\n build_norm_layer(norm_cfg, 64)[1],\n nn.ReLU(),\n SubMConv3d(64, 64, 3, indice_key=\"subm3\", bias=False),\n build_norm_layer(norm_cfg, 64)[1],\n nn.ReLU(),\n SubMConv3d(64, 64, 3, indice_key=\"subm3\", bias=False),\n build_norm_layer(norm_cfg, 64)[1],\n nn.ReLU(),\n SubMConv3d(64, 64, 3, indice_key=\"subm3\", bias=False),\n build_norm_layer(norm_cfg, 64)[1],\n nn.ReLU(),\n SparseConv3d(\n 64, 64, (3, 1, 1), (2, 1, 1), bias=False\n ), # [200, 150, 5] -> [200, 150, 2]\n build_norm_layer(norm_cfg, 64)[1],\n nn.ReLU(),\n )\n\n def init_weights(self, pretrained=None):\n if isinstance(pretrained, str):\n logger = logging.getLogger()\n load_checkpoint(self, pretrained, strict=False, logger=logger)\n elif pretrained is None:\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n kaiming_init(m)\n elif isinstance(m, (_BatchNorm, nn.GroupNorm)):\n constant_init(m, 1)\n\n if self.dcn is not None:\n for m in self.modules():\n if isinstance(m, Bottleneck) and hasattr(m, \"conv2_offset\"):\n constant_init(m.conv2_offset, 0)\n\n if self.zero_init_residual:\n for m in self.modules():\n if isinstance(m, Bottleneck):\n constant_init(m.norm3, 0)\n elif isinstance(m, BasicBlock):\n constant_init(m.norm2, 0)\n else:\n raise TypeError(\"pretrained must be a str or None\")\n\n def forward(self, voxel_features, coors, batch_size, input_shape):\n\n # input: # [41, 1600, 1408]\n sparse_shape = np.array(input_shape[::-1]) + [1, 0, 0]\n coors = coors.int()\n\n ret = spconv.SparseConvTensor(voxel_features, coors, sparse_shape, batch_size)\n ret = self.middle_conv(ret)\n ret = ret.dense()\n\n N, C, D, H, W = ret.shape\n ret = ret.view(N, C * D, H, W)\n\n return ret\n\n\[email protected]_module\nclass SpMiddleFHDNobn(nn.Module):\n def __init__(\n self, num_input_features=128, norm_cfg=None, name=\"SpMiddleFHD\", **kwargs\n ):\n super(SpMiddleFHDNobn, self).__init__()\n self.name = name\n\n self.dcn = None\n self.zero_init_residual = False\n\n if norm_cfg is None:\n norm_cfg = dict(type=\"BN1d\", eps=1e-3, momentum=0.01)\n\n self.middle_conv = spconv.SparseSequential(\n SubMConv3d(num_input_features, 16, 3, bias=True, indice_key=\"subm0\"),\n # build_norm_layer(norm_cfg, 16)[1],\n nn.ReLU(),\n SubMConv3d(16, 16, 3, bias=True, indice_key=\"subm0\"),\n # build_norm_layer(norm_cfg, 16)[1],\n nn.ReLU(),\n SparseConv3d(\n 16, 32, 3, 2, padding=1, bias=True\n ), # [1600, 1200, 41] -> [800, 600, 21]\n # build_norm_layer(norm_cfg, 32)[1],\n nn.ReLU(),\n SubMConv3d(32, 32, 3, indice_key=\"subm1\", bias=True),\n # build_norm_layer(norm_cfg, 32)[1],\n nn.ReLU(),\n SubMConv3d(32, 32, 3, indice_key=\"subm1\", bias=True),\n # build_norm_layer(norm_cfg, 32)[1],\n nn.ReLU(),\n SparseConv3d(\n 32, 64, 3, 2, padding=1, bias=True\n ), # [800, 600, 21] -> [400, 300, 11]\n # build_norm_layer(norm_cfg, 64)[1],\n nn.ReLU(),\n SubMConv3d(64, 64, 3, indice_key=\"subm2\", bias=True),\n # build_norm_layer(norm_cfg, 64)[1],\n nn.ReLU(),\n SubMConv3d(64, 64, 3, indice_key=\"subm2\", bias=True),\n # build_norm_layer(norm_cfg, 64)[1],\n nn.ReLU(),\n SubMConv3d(64, 64, 3, indice_key=\"subm2\", bias=True),\n # build_norm_layer(norm_cfg, 64)[1],\n nn.ReLU(),\n SparseConv3d(\n 64, 64, 3, 2, padding=[0, 1, 1], bias=True\n ), # [400, 300, 11] -> [200, 150, 5]\n # build_norm_layer(norm_cfg, 64)[1],\n nn.ReLU(),\n SubMConv3d(64, 64, 3, indice_key=\"subm3\", bias=True),\n # build_norm_layer(norm_cfg, 64)[1],\n nn.ReLU(),\n SubMConv3d(64, 64, 3, indice_key=\"subm3\", bias=True),\n # build_norm_layer(norm_cfg, 64)[1],\n nn.ReLU(),\n SubMConv3d(64, 64, 3, indice_key=\"subm3\", bias=True),\n # build_norm_layer(norm_cfg, 64)[1],\n nn.ReLU(),\n SparseConv3d(\n 64, 64, (3, 1, 1), (2, 1, 1), bias=True\n ), # [200, 150, 5] -> [200, 150, 2]\n # build_norm_layer(norm_cfg, 64)[1],\n nn.ReLU(),\n )\n\n def init_weights(self, pretrained=None):\n if isinstance(pretrained, str):\n logger = logging.getLogger()\n load_checkpoint(self, pretrained, strict=False, logger=logger)\n elif pretrained is None:\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n kaiming_init(m)\n elif isinstance(m, (_BatchNorm, nn.GroupNorm)):\n constant_init(m, 1)\n\n if self.dcn is not None:\n for m in self.modules():\n if isinstance(m, Bottleneck) and hasattr(m, \"conv2_offset\"):\n constant_init(m.conv2_offset, 0)\n\n if self.zero_init_residual:\n for m in self.modules():\n if isinstance(m, Bottleneck):\n constant_init(m.norm3, 0)\n elif isinstance(m, BasicBlock):\n constant_init(m.norm2, 0)\n else:\n raise TypeError(\"pretrained must be a str or None\")\n\n def forward(self, voxel_features, coors, batch_size, input_shape):\n\n # input: # [41, 1600, 1408]\n sparse_shape = np.array(input_shape[::-1]) + [1, 0, 0]\n coors = coors.int()\n\n ret = spconv.SparseConvTensor(voxel_features, coors, sparse_shape, batch_size)\n ret = self.middle_conv(ret)\n ret = ret.dense()\n\n N, C, D, H, W = ret.shape\n ret = ret.view(N, C * D, H, W)\n\n return ret\n\n\[email protected]_module\nclass SpMiddleResNetFHD(nn.Module):\n def __init__(\n self, num_input_features=128, norm_cfg=None, name=\"SpMiddleResNetFHD\", **kwargs\n ):\n super(SpMiddleResNetFHD, self).__init__()\n self.name = name\n\n self.dcn = None\n self.zero_init_residual = False\n\n if norm_cfg is None:\n #norm_cfg = dict(type=\"BN1d\", eps=1e-3, momentum=0.01)\n norm_cfg = dict(type=\"BN1d\", eps=1e-5, momentum=0.1)\n # input: # [1600, 1200, 41]\n self.middle_conv = spconv.SparseSequential(\n SubMConv3d(num_input_features, 16, 3, bias=False, indice_key=\"res0\"),\n build_norm_layer(norm_cfg, 16)[1],\n nn.ReLU(),\n SparseBasicBlock(16, 16, norm_cfg=norm_cfg, indice_key=\"res0\"),\n SparseBasicBlock(16, 16, norm_cfg=norm_cfg, indice_key=\"res0\"),\n SparseConv3d(\n 16, 32, 3, 2, padding=1, bias=False\n ), # [1600, 1200, 41] -> [800, 600, 21]\n build_norm_layer(norm_cfg, 32)[1],\n nn.ReLU(),\n SparseBasicBlock(32, 32, norm_cfg=norm_cfg, indice_key=\"res1\"),\n SparseBasicBlock(32, 32, norm_cfg=norm_cfg, indice_key=\"res1\"),\n SparseConv3d(\n 32, 64, 3, 2, padding=1, bias=False\n ), # [800, 600, 21] -> [400, 300, 11]\n build_norm_layer(norm_cfg, 64)[1],\n nn.ReLU(),\n SparseBasicBlock(64, 64, norm_cfg=norm_cfg, indice_key=\"res2\"),\n SparseBasicBlock(64, 64, norm_cfg=norm_cfg, indice_key=\"res2\"),\n SparseConv3d(\n 64, 128, 3, 2, padding=[0, 1, 1], bias=False\n ), # [400, 300, 11] -> [200, 150, 5]\n build_norm_layer(norm_cfg, 128)[1],\n nn.ReLU(),\n SparseBasicBlock(128, 128, norm_cfg=norm_cfg, indice_key=\"res3\"),\n SparseBasicBlock(128, 128, norm_cfg=norm_cfg, indice_key=\"res3\"),\n SparseConv3d(\n 128, 128, (3, 1, 1), (2, 1, 1), bias=False\n ), # [200, 150, 5] -> [200, 150, 2]\n build_norm_layer(norm_cfg, 128)[1],\n nn.ReLU(),\n )\n self.mode = kwargs.get('mode', 'bev')\n\n def forward(self, voxel_features, coors, batch_size, input_shape):\n\n # input: # [41, 1600, 1408]\n sparse_shape = np.array(input_shape[::-1]) + [1, 0, 0]\n\n coors = coors.int()\n ret = spconv.SparseConvTensor(voxel_features, coors, sparse_shape, batch_size)\n ret = self.middle_conv(ret)\n ret = ret.dense()\n\n N, C, D, H, W = ret.shape\n if self.mode == 'bev':\n ret = ret.view(N, C * D, H, W)\n\n return ret\n\n\[email protected]_module\nclass RCNNSpMiddleFHD(nn.Module):\n def __init__(\n self, num_input_features=128, norm_cfg=None, name=\"RCNNSpMiddleFHD\", **kwargs\n ):\n super(RCNNSpMiddleFHD, self).__init__()\n self.name = name\n\n self.dcn = None\n self.zero_init_residual = False\n\n if norm_cfg is None:\n norm_cfg = dict(type=\"BN1d\", eps=1e-3, momentum=0.01)\n\n self.middle_conv = spconv.SparseSequential(\n SubMConv3d(num_input_features, 16, 3, bias=False, indice_key=\"subm0\"),\n build_norm_layer(norm_cfg, 16)[1],\n nn.ReLU(),\n SubMConv3d(16, 16, 3, bias=False, indice_key=\"subm0\"),\n build_norm_layer(norm_cfg, 16)[1],\n nn.ReLU(),\n SparseConv3d(\n 16, 32, 3, 2, padding=1, bias=False\n ), # [32, 80, 41] -> [16, 40, 21]\n build_norm_layer(norm_cfg, 32)[1],\n nn.ReLU(),\n SubMConv3d(32, 32, 3, bias=False, indice_key=\"subm1\"),\n build_norm_layer(norm_cfg, 32)[1],\n nn.ReLU(),\n # SubMConv3d(32, 32, 3, bias=False, indice_key=\"subm1\"),\n # build_norm_layer(norm_cfg, 32)[1],\n # nn.ReLU(),\n SparseConv3d(\n 32, 64, 3, 2, bias=False, padding=1\n ), # [16, 40, 21] -> [8, 20, 11]\n build_norm_layer(norm_cfg, 64)[1],\n nn.ReLU(),\n SubMConv3d(64, 64, 3, bias=False, indice_key=\"subm2\"),\n build_norm_layer(norm_cfg, 64)[1],\n nn.ReLU(),\n # SubMConv3d(64, 64, 3, bias=False, indice_key=\"subm2\"),\n # build_norm_layer(norm_cfg, 64)[1],\n # nn.ReLU(),\n # SubMConv3d(64, 64, 3, bias=False, indice_key=\"subm2\"),\n # build_norm_layer(norm_cfg, 64)[1],\n # nn.ReLU(),\n SparseConv3d(\n 64, 64, 3, 2, bias=False, padding=[1, 1, 0]\n ), # [8, 20, 11] -> [4, 10, 5]\n build_norm_layer(norm_cfg, 64)[1],\n nn.ReLU(),\n SubMConv3d(64, 64, 3, bias=False, indice_key=\"subm3\"),\n build_norm_layer(norm_cfg, 64)[1],\n nn.ReLU(),\n # SubMConv3d(64, 64, 3, bias=False, indice_key=\"subm3\"),\n # build_norm_layer(norm_cfg, 64)[1],\n # nn.ReLU(),\n # SubMConv3d(64, 64, 3, bias=False, indice_key=\"subm3\"),\n # build_norm_layer(norm_cfg, 64)[1],\n # nn.ReLU(),\n SparseConv3d(\n 64, 64, (1, 1, 3), (1, 1, 2), bias=False\n ), # [4, 10, 5] -> [4, 10, 2]\n build_norm_layer(norm_cfg, 64)[1],\n nn.ReLU(),\n )\n\n def forward(self, voxel_features, coors, batch_size, input_shape):\n\n # input: # [41, 1600, 1408]\n sparse_shape = np.array(input_shape[::-1]) + [0, 0, 1]\n\n # coors[:, 1] += 1\n coors = coors.int()\n ret = spconv.SparseConvTensor(voxel_features, coors, sparse_shape, batch_size)\n\n ret = self.middle_conv(ret)\n\n ret = ret.dense()\n\n #ret = ret.permute(0, 1, 4, 2, 3).contiguous()\n #N, C, W, D, H = ret.shape\n N, C, D, H, W = ret.shape\n #while 1: print(N, C, D, H, W)\n ret = ret.view(N, C * D, H, W)\n #ret = ret.view(N, C * W, D, H)\n\n return ret\n", "import numpy as np\nimport numba\n\nfrom det3d.core import box_np_ops\n\n\ndef get_split_parts(num, num_part):\n same_part = num // num_part\n remain_num = num % num_part\n if remain_num == 0:\n return [same_part] * num_part\n else:\n return [same_part] * num_part + [remain_num]\n\n\ndef prepare_data(gt_annos, dt_annos, current_class, difficulty=None, clean_data=None):\n gt_datas_list = []\n dt_datas_list = []\n total_dc_num = []\n ignored_gts, ignored_dets, dontcares = [], [], []\n total_num_valid_gt = 0\n for i in range(len(gt_annos)):\n rets = clean_data(gt_annos[i], dt_annos[i], current_class, difficulty)\n num_valid_gt, ignored_gt, ignored_det, dc_bboxes = rets\n ignored_gts.append(np.array(ignored_gt, dtype=np.int64))\n ignored_dets.append(np.array(ignored_det, dtype=np.int64))\n if len(dc_bboxes) == 0:\n dc_bboxes = np.zeros((0, 4)).astype(np.float64)\n else:\n dc_bboxes = np.stack(dc_bboxes, 0).astype(np.float64)\n total_dc_num.append(dc_bboxes.shape[0])\n dontcares.append(dc_bboxes)\n total_num_valid_gt += num_valid_gt\n gt_datas = np.concatenate(\n [gt_annos[i][\"bbox\"], gt_annos[i][\"alpha\"][..., np.newaxis]], 1\n )\n dt_datas = np.concatenate(\n [\n dt_annos[i][\"bbox\"],\n dt_annos[i][\"alpha\"][..., np.newaxis],\n dt_annos[i][\"score\"][..., np.newaxis],\n ],\n 1,\n )\n gt_datas_list.append(gt_datas)\n dt_datas_list.append(dt_datas)\n total_dc_num = np.stack(total_dc_num, axis=0)\n return (\n gt_datas_list,\n dt_datas_list,\n ignored_gts,\n ignored_dets,\n dontcares,\n total_dc_num,\n total_num_valid_gt,\n )\n\n\ndef calculate_iou_partly(\n gt_annos, dt_annos, metric, num_parts=50, z_axis=1, z_center=1.0\n):\n \"\"\"fast iou algorithm. this function can be used independently to\n do result analysis.\n Args:\n gt_annos: dict, must from get_label_annos() in kitti_common.py\n dt_annos: dict, must from get_label_annos() in kitti_common.py\n metric: eval type. 0: bbox, 1: bev, 2: 3d\n num_parts: int. a parameter for fast calculate algorithm\n z_axis: height axis. kitti camera use 1, lidar use 2.\n \"\"\"\n assert len(gt_annos) == len(dt_annos)\n total_dt_num = np.stack([len(a[\"name\"]) for a in dt_annos], 0)\n total_gt_num = np.stack([len(a[\"name\"]) for a in gt_annos], 0)\n num_examples = len(gt_annos)\n split_parts = get_split_parts(num_examples, num_parts)\n parted_overlaps = []\n example_idx = 0\n bev_axes = list(range(3))\n bev_axes.pop(z_axis)\n split_parts = [i for i in split_parts if i != 0]\n for num_part in split_parts:\n gt_annos_part = gt_annos[example_idx : example_idx + num_part]\n dt_annos_part = dt_annos[example_idx : example_idx + num_part]\n if metric == 0:\n gt_boxes = np.concatenate([a[\"bbox\"] for a in gt_annos_part], 0)\n dt_boxes = np.concatenate([a[\"bbox\"] for a in dt_annos_part], 0)\n overlap_part = image_box_overlap(gt_boxes, dt_boxes)\n elif metric == 1:\n loc = np.concatenate([a[\"location\"][:, bev_axes] for a in gt_annos_part], 0)\n dims = np.concatenate(\n [a[\"dimensions\"][:, bev_axes] for a in gt_annos_part], 0\n )\n rots = np.concatenate([a[\"rotation_y\"] for a in gt_annos_part], 0)\n gt_boxes = np.concatenate([loc, dims, rots[..., np.newaxis]], axis=1)\n loc = np.concatenate([a[\"location\"][:, bev_axes] for a in dt_annos_part], 0)\n dims = np.concatenate(\n [a[\"dimensions\"][:, bev_axes] for a in dt_annos_part], 0\n )\n rots = np.concatenate([a[\"rotation_y\"] for a in dt_annos_part], 0)\n dt_boxes = np.concatenate([loc, dims, rots[..., np.newaxis]], axis=1)\n overlap_part = bev_box_overlap(gt_boxes, dt_boxes).astype(np.float64)\n elif metric == 2:\n loc = np.concatenate([a[\"location\"] for a in gt_annos_part], 0)\n dims = np.concatenate([a[\"dimensions\"] for a in gt_annos_part], 0)\n rots = np.concatenate([a[\"rotation_y\"] for a in gt_annos_part], 0)\n gt_boxes = np.concatenate([loc, dims, rots[..., np.newaxis]], axis=1)\n loc = np.concatenate([a[\"location\"] for a in dt_annos_part], 0)\n dims = np.concatenate([a[\"dimensions\"] for a in dt_annos_part], 0)\n rots = np.concatenate([a[\"rotation_y\"] for a in dt_annos_part], 0)\n dt_boxes = np.concatenate([loc, dims, rots[..., np.newaxis]], axis=1)\n overlap_part = box3d_overlap(\n gt_boxes, dt_boxes, z_axis=z_axis, z_center=z_center\n ).astype(np.float64)\n else:\n raise ValueError(\"unknown metric\")\n parted_overlaps.append(overlap_part)\n example_idx += num_part\n\n overlaps = []\n example_idx = 0\n for j, num_part in enumerate(split_parts):\n gt_annos_part = gt_annos[example_idx : example_idx + num_part]\n dt_annos_part = dt_annos[example_idx : example_idx + num_part]\n gt_num_idx, dt_num_idx = 0, 0\n for i in range(num_part):\n gt_box_num = total_gt_num[example_idx + i]\n dt_box_num = total_dt_num[example_idx + i]\n overlaps.append(\n parted_overlaps[j][\n gt_num_idx : gt_num_idx + gt_box_num,\n dt_num_idx : dt_num_idx + dt_box_num,\n ]\n )\n gt_num_idx += gt_box_num\n dt_num_idx += dt_box_num\n example_idx += num_part\n\n return overlaps, parted_overlaps, total_gt_num, total_dt_num\n\n\n#@numba.jit(nopython=True)\ndef compute_statistics_jit(\n overlaps,\n gt_datas,\n dt_datas,\n ignored_gt,\n ignored_det,\n dc_bboxes,\n metric,\n min_overlap,\n thresh=0,\n compute_fp=False,\n compute_aos=False,\n):\n\n det_size = dt_datas.shape[0]\n gt_size = gt_datas.shape[0]\n dt_scores = dt_datas[:, -1]\n dt_alphas = dt_datas[:, 4]\n gt_alphas = gt_datas[:, 4]\n dt_bboxes = dt_datas[:, :4]\n # gt_bboxes = gt_datas[:, :4]\n\n assigned_detection = [False] * det_size\n ignored_threshold = [False] * det_size\n if compute_fp:\n for i in range(det_size):\n if dt_scores[i] < thresh:\n ignored_threshold[i] = True\n NO_DETECTION = -10000000\n tp, fp, fn, similarity = 0, 0, 0, 0\n # thresholds = [0.0]\n # delta = [0.0]\n thresholds = np.zeros((gt_size,))\n thresh_idx = 0\n delta = np.zeros((gt_size,))\n delta_idx = 0\n for i in range(gt_size):\n if ignored_gt[i] == -1:\n continue\n det_idx = -1\n valid_detection = NO_DETECTION\n max_overlap = 0\n assigned_ignored_det = False\n\n for j in range(det_size):\n if ignored_det[j] == -1:\n continue\n if assigned_detection[j]:\n continue\n if ignored_threshold[j]:\n continue\n overlap = overlaps[j, i]\n dt_score = dt_scores[j]\n if (\n not compute_fp\n and (overlap > min_overlap)\n and dt_score > valid_detection\n ):\n det_idx = j\n valid_detection = dt_score\n elif (\n compute_fp\n and (overlap > min_overlap)\n and (overlap > max_overlap or assigned_ignored_det)\n and ignored_det[j] == 0\n ):\n max_overlap = overlap\n det_idx = j\n valid_detection = 1\n assigned_ignored_det = False\n elif (\n compute_fp\n and (overlap > min_overlap)\n and (valid_detection == NO_DETECTION)\n and ignored_det[j] == 1\n ):\n det_idx = j\n valid_detection = 1\n assigned_ignored_det = True\n\n if (valid_detection == NO_DETECTION) and ignored_gt[i] == 0:\n fn += 1\n elif (valid_detection != NO_DETECTION) and (\n ignored_gt[i] == 1 or ignored_det[det_idx] == 1\n ):\n assigned_detection[det_idx] = True\n elif valid_detection != NO_DETECTION:\n # only a tp add a threshold.\n tp += 1\n # thresholds.append(dt_scores[det_idx])\n thresholds[thresh_idx] = dt_scores[det_idx]\n thresh_idx += 1\n if compute_aos:\n # delta.append(gt_alphas[i] - dt_alphas[det_idx])\n delta[delta_idx] = gt_alphas[i] - dt_alphas[det_idx]\n delta_idx += 1\n\n assigned_detection[det_idx] = True\n if compute_fp:\n for i in range(det_size):\n if not (\n assigned_detection[i]\n or ignored_det[i] == -1\n or ignored_det[i] == 1\n or ignored_threshold[i]\n ):\n fp += 1\n nstuff = 0\n if metric == 0:\n overlaps_dt_dc = image_box_overlap(dt_bboxes, dc_bboxes, 0)\n for i in range(dc_bboxes.shape[0]):\n for j in range(det_size):\n if assigned_detection[j]:\n continue\n if ignored_det[j] == -1 or ignored_det[j] == 1:\n continue\n if ignored_threshold[j]:\n continue\n if overlaps_dt_dc[j, i] > min_overlap:\n assigned_detection[j] = True\n nstuff += 1\n fp -= nstuff\n if compute_aos:\n tmp = np.zeros((fp + delta_idx,))\n # tmp = [0] * fp\n for i in range(delta_idx):\n tmp[i + fp] = (1.0 + np.cos(delta[i])) / 2.0\n # tmp.append((1.0 + np.cos(delta[i])) / 2.0)\n # assert len(tmp) == fp + tp\n # assert len(delta) == tp\n if tp > 0 or fp > 0:\n similarity = np.sum(tmp)\n else:\n similarity = -1\n return tp, fp, fn, similarity, thresholds[:thresh_idx]\n\n\[email protected](nopython=True)\ndef image_box_overlap(boxes, query_boxes, criterion=-1):\n N = boxes.shape[0]\n K = query_boxes.shape[0]\n overlaps = np.zeros((N, K), dtype=boxes.dtype)\n for k in range(K):\n qbox_area = (query_boxes[k, 2] - query_boxes[k, 0]) * (\n query_boxes[k, 3] - query_boxes[k, 1]\n )\n for n in range(N):\n iw = min(boxes[n, 2], query_boxes[k, 2]) - max(\n boxes[n, 0], query_boxes[k, 0]\n )\n if iw > 0:\n ih = min(boxes[n, 3], query_boxes[k, 3]) - max(\n boxes[n, 1], query_boxes[k, 1]\n )\n if ih > 0:\n if criterion == -1:\n ua = (\n (boxes[n, 2] - boxes[n, 0]) * (boxes[n, 3] - boxes[n, 1])\n + qbox_area\n - iw * ih\n )\n elif criterion == 0:\n ua = (boxes[n, 2] - boxes[n, 0]) * (boxes[n, 3] - boxes[n, 1])\n elif criterion == 1:\n ua = qbox_area\n else:\n ua = 1.0\n overlaps[n, k] = iw * ih / ua\n return overlaps\n\n\ndef bev_box_overlap(boxes, qboxes, criterion=-1, stable=False):\n if stable:\n riou = box_np_ops.riou_cc(boxes, qboxes)\n else:\n from det3d.ops.nms.nms_gpu import rotate_iou_gpu_eval\n riou = rotate_iou_gpu_eval(boxes, qboxes, criterion)\n return riou\n\n\[email protected](nopython=True, parallel=True)\ndef box3d_overlap_kernel(boxes, qboxes, rinc, criterion=-1, z_axis=1, z_center=1.0):\n \"\"\"\n z_axis: the z (height) axis.\n z_center: unified z (height) center of box.\n \"\"\"\n N, K = boxes.shape[0], qboxes.shape[0]\n for i in range(N):\n for j in range(K):\n if rinc[i, j] > 0:\n min_z = min(\n boxes[i, z_axis] + boxes[i, z_axis + 3] * (1 - z_center),\n qboxes[j, z_axis] + qboxes[j, z_axis + 3] * (1 - z_center),\n )\n max_z = max(\n boxes[i, z_axis] - boxes[i, z_axis + 3] * z_center,\n qboxes[j, z_axis] - qboxes[j, z_axis + 3] * z_center,\n )\n iw = min_z - max_z\n if iw > 0:\n area1 = boxes[i, 3] * boxes[i, 4] * boxes[i, 5]\n area2 = qboxes[j, 3] * qboxes[j, 4] * qboxes[j, 5]\n inc = iw * rinc[i, j]\n if criterion == -1:\n ua = area1 + area2 - inc\n elif criterion == 0:\n ua = area1\n elif criterion == 1:\n ua = area2\n else:\n ua = 1.0\n rinc[i, j] = inc / ua\n else:\n rinc[i, j] = 0.0\n\n\ndef box3d_overlap(boxes, qboxes, criterion=-1, z_axis=1, z_center=1.0):\n \"\"\"kitti camera format z_axis=1.\n \"\"\"\n bev_axes = list(range(7))\n bev_axes.pop(z_axis + 3)\n bev_axes.pop(z_axis)\n from det3d.ops.nms.nms_gpu import rotate_iou_gpu_eval\n rinc = rotate_iou_gpu_eval(boxes[:, bev_axes], qboxes[:, bev_axes], 2)\n box3d_overlap_kernel(boxes, qboxes, rinc, criterion, z_axis, z_center)\n return rinc\n" ]
[ [ "torch.nn.ReLU", "numpy.array" ], [ "numpy.concatenate", "numpy.array", "numpy.zeros", "numpy.sum", "numpy.stack", "numpy.cos" ] ]
apaszke/jax
[ "a3997ba98c67f96ac1b44fa3e6f2420ea4ccacdd" ]
[ "jax/interpreters/xla.py" ]
[ "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom collections import defaultdict, deque, namedtuple\nimport itertools as it\nimport operator as op\nfrom typing import Any, Callable, Dict, List, Optional, Sequence, Set, Type, Tuple\nfrom warnings import warn\n\nfrom absl import logging\nimport numpy as np\n\nfrom ..config import flags, bool_env, config\nfrom .. import core\nfrom .. import ad_util\nfrom .. import dtypes\nfrom .. import lazy\nfrom .. import linear_util as lu\nfrom jax._src import source_info_util\nfrom ..abstract_arrays import (ConcreteArray, ShapedArray, AbstractToken,\n make_shaped_array, array_types, raise_to_shaped,\n abstract_token)\nfrom ..core import Literal, pp_eqn_compact\nfrom ..pprint_util import pp\nfrom ..util import (partial, partialmethod, cache, prod, unzip2,\n extend_name_stack, wrap_name, safe_zip, safe_map)\nfrom ..lib import xla_bridge as xb\nfrom ..lib import xla_client as xc\nfrom . import partial_eval as pe\nfrom . import ad\nfrom . import masking\n\nmap, unsafe_map = safe_map, map\nzip, unsafe_zip = safe_zip, zip\n\nxe = xc._xla\nxops = xc._xla.ops\n\n# Types\nBackend = Any # xc.LocalBackend (why does mypy not like this?)\nDevice = Any # xc.Device\nPyLocalBuffer = Any\n\nXlaOp = Any # xla_extension.XlaOp\nXlaShape = Any # xla_client.Shape\nXlaComputationBuilder = Any # xla_bridge._JaxComputationBuilder\nXlaExecutable = Any # xla_extension.LocalExecutable\n\nFLAGS = flags.FLAGS\nflags.DEFINE_bool('jax_debug_nans',\n bool_env('JAX_DEBUG_NANS', False),\n 'Add nan checks to every operation.')\nflags.DEFINE_bool('jax_log_compiles',\n bool_env('JAX_LOG_COMPILES', False),\n 'Print a message each time a `jit` computation is compiled.')\n\n# This flag is set on exit; no logging should be attempted\n_on_exit = False\n\ndef identity(x): return x\n\n_scalar_types = dtypes.python_scalar_dtypes.keys()\n\n# unit representation\ndef _make_unit_constant(c): return xb.constant(c, np.zeros((), dtype=np.dtype('bool')))\ndef _make_unit_shape(_): return (xc.Shape.array_shape(np.dtype('bool'), ()),)\ndef _device_put_unit(_, device):\n backend = xb.get_device_backend(device)\n return (backend.buffer_from_pyval(np.zeros((), dtype=np.dtype('bool')),\n device),)\ndef _make_array_shape(a):\n if a.dtype is dtypes.float0:\n return (xc.Shape.array_shape(np.dtype('bool'), a.shape),)\n else:\n return (xc.Shape.array_shape(a.dtype, a.shape),)\n\n### handlers\n\nxb.register_constant_handler(core.Unit, lambda c, *_: _make_unit_constant(c))\n\ndef aval_to_xla_shapes(aval):\n try:\n return xla_shape_handlers[type(aval)](aval)\n except KeyError as err:\n raise TypeError(f\"No xla_shape_handler for type: {type(aval)}\") from err\n\nxla_shape_handlers: Dict[Type[core.AbstractValue], Callable] = {\n core.AbstractUnit: _make_unit_shape,\n ShapedArray: _make_array_shape,\n ConcreteArray: _make_array_shape,\n}\n\ndef aval_to_result_handler(device: Optional[Device], aval: core.AbstractValue) -> Callable:\n try:\n return xla_result_handlers[type(aval)](device, aval)\n except KeyError as err:\n raise TypeError(f\"No xla_result_handler for type: {type(aval)}\") from err\n\ndef array_result_handler(device: Optional[Device], aval: core.ShapedArray):\n if aval.dtype is dtypes.float0:\n return lambda _: np.zeros(aval.shape, dtypes.float0)\n return partial(make_device_array, raise_to_shaped(aval), device,\n lazy.array(aval.shape))\n\n\nxla_result_handlers: Dict[Type[core.AbstractValue], Callable[..., Callable]] = {\n core.AbstractUnit: lambda _, __: lambda _: core.unit,\n ShapedArray: array_result_handler,\n ConcreteArray: array_result_handler,\n}\n\ndef device_put(x, device: Optional[Device] = None) -> Tuple[Any]:\n x = canonicalize_dtype(x)\n try:\n return device_put_handlers[type(x)](x, device)\n except KeyError as err:\n raise TypeError(f\"No device_put handler for type: {type(x)}\") from err\n\ndef _device_put_array(x, device: Optional[Device]):\n backend = xb.get_device_backend(device)\n if x.dtype is dtypes.float0:\n x = np.zeros(x.shape, dtype=np.dtype(bool))\n return (backend.buffer_from_pyval(x, device),)\n\ndef _device_put_scalar(x, device):\n return _device_put_array(dtypes.coerce_to_array(x), device)\n\ndevice_put_handlers: Dict[Any, Callable[[Any, Optional[Device]], Tuple[Any]]] = {\n core.Unit: _device_put_unit\n}\ndevice_put_handlers.update((t, _device_put_array) for t in array_types)\ndevice_put_handlers.update((t, _device_put_scalar) for t in _scalar_types)\n\n# TODO(mattjj): try to remove this canonicalize_dtype stuff\ndef canonicalize_dtype(x):\n typ = type(x)\n handler = canonicalize_dtype_handlers.get(typ)\n if handler: return handler(x)\n for typ in typ.mro():\n handler = canonicalize_dtype_handlers.get(typ)\n if handler: return handler(x)\n raise TypeError(f\"No canonicalize_dtype handler for type: {type(x)}\")\n\ndef _canonicalize_ndarray_dtype(x):\n return np.asarray(x, dtypes.canonicalize_dtype(dtypes.result_type(x)))\n\ndef _canonicalize_python_scalar_dtype(typ, x):\n return np.asarray(\n x, dtypes.canonicalize_dtype(dtypes.python_scalar_dtypes[typ]))\n\ncanonicalize_dtype_handlers: Dict[Any, Callable] = {core.Unit: identity}\ncanonicalize_dtype_handlers.update(\n (t, _canonicalize_ndarray_dtype) for t in array_types)\ncanonicalize_dtype_handlers.update(\n (t, partial(_canonicalize_python_scalar_dtype, t)) for t in _scalar_types)\n\ndef abstractify(x) -> core.AbstractValue:\n typ = type(x)\n aval_fn = pytype_aval_mappings.get(typ)\n if aval_fn: return aval_fn(x)\n for typ in typ.mro():\n aval_fn = pytype_aval_mappings.get(typ)\n if aval_fn: return aval_fn(x)\n raise TypeError(f\"Argument '{x}' of type '{type(x)}' is not a valid JAX type\")\n\ndef _make_abstract_python_scalar(typ, _):\n return ShapedArray((), dtypes.python_scalar_dtypes[typ], weak_type=True)\n\npytype_aval_mappings: Dict[Any, Callable[[Any], core.AbstractValue]] = {\n core.Unit: lambda _: core.abstract_unit,\n}\npytype_aval_mappings.update((t, make_shaped_array) for t in array_types)\npytype_aval_mappings.update(\n (t, partial(_make_abstract_python_scalar, t)) for t in _scalar_types)\n\n# We can optionally set a Jaxpr rewriter that can be applied just before\n# compilation. This mechanism is used for compiling id_tap, we can\n# remove it once we bring the id_tap implementation into the core.\noutfeed_rewriter: Optional[Callable[[core.Jaxpr], core.Jaxpr]] = None\ndef apply_outfeed_rewriter(jaxpr: core.Jaxpr) -> core.Jaxpr:\n if outfeed_rewriter is not None:\n return outfeed_rewriter(jaxpr)\n else:\n return jaxpr\n\noutfeed_primitives: Set[core.Primitive] = set()\ndef jaxpr_uses_outfeed(jaxpr: core.Jaxpr) -> bool:\n \"\"\"Finds if there are outfeed primitives anywhere inside a Jaxpr.\"\"\"\n return any(primitive_uses_outfeed(eqn.primitive, eqn.params)\n for eqn in jaxpr.eqns)\n\ndef _param_uses_outfeed(param):\n if type(param) is core.Jaxpr:\n if jaxpr_uses_outfeed(param):\n return True\n elif type(param) is core.ClosedJaxpr:\n if jaxpr_uses_outfeed(param.jaxpr):\n return True\n return False\n\ndef primitive_uses_outfeed(prim: core.Primitive, params: Dict) -> bool:\n if prim in outfeed_primitives:\n return True\n for param in params.values():\n if isinstance(param, tuple):\n if any(unsafe_map(_param_uses_outfeed, param)):\n return True\n elif _param_uses_outfeed(param):\n return True\n return False\n\n### op-by-op execution\n\ndef arg_spec(x):\n aval = abstractify(x)\n try:\n return aval, x._device\n except:\n return aval, None\n\ndef apply_primitive(prim, *args, **params):\n \"\"\"Impl rule that compiles and runs a single primitive 'prim' using XLA.\"\"\"\n compiled_fun = xla_primitive_callable(prim, *unsafe_map(arg_spec, args), **params)\n return compiled_fun(*args)\n\n\ndef _partition_outputs(avals, outs):\n nouts = [aval._num_buffers for aval in avals]\n if not core.skip_checks:\n assert sum(nouts) == len(outs), f\"Internal error: sum(nouts)={sum(nouts)} should equal len(outs)={len(outs)}.\"\n outs = iter(outs)\n return [[next(outs) for _ in range(nout)] for nout in nouts]\n\n\n@cache()\ndef xla_primitive_callable(prim, *arg_specs: Tuple[core.AbstractValue,\n Optional[Device]], **params):\n avals, arg_devices = unzip2(arg_specs)\n donated_invars = (False,) * len(arg_specs)\n device = _device_from_arg_devices(arg_devices)\n backend = xb.get_device_backend(device)\n if primitive_uses_outfeed(prim, params):\n # We use the _xla_callable path, where we pre-process the primitives\n def prim_fun(*args):\n return prim.bind(*args, **params)\n return _xla_callable(lu.wrap_init(prim_fun), device, None, \"prim\", donated_invars,\n *arg_specs)\n aval_out = prim.abstract_eval(*avals, **params)\n if not prim.multiple_results:\n handle_result = aval_to_result_handler(device, aval_out)\n else:\n handlers = map(partial(aval_to_result_handler, device), aval_out)\n handle_result = lambda *bufs:\\\n tuple(handler(*bs) for handler, bs in zip(handlers, _partition_outputs(aval_out, bufs)))\n tuple_args = len(avals) > 100\n if prim in initial_style_translations:\n nreps = initial_style_primitive_replicas(params)\n else:\n nreps = 1\n\n if nreps > xb.device_count(backend):\n raise ValueError(\n f\"compiling a primitive computation `{prim}` that requires {nreps} \"\n f\"replicas, but only {xb.device_count(backend)} XLA devices are \"\n f\"available on backend {backend.platform}.\")\n built_c = primitive_computation(prim, AxisEnv(nreps, (), (), None), backend,\n tuple_args, *avals, **params)\n options = xb.get_compile_options(\n num_replicas=nreps,\n num_partitions=1,\n device_assignment=device and (device.id,))\n options.parameter_is_tupled_arguments = tuple_args\n compiled = backend_compile(backend, built_c, options)\n if nreps == 1:\n return partial(_execute_compiled_primitive, prim, compiled, handle_result)\n else:\n return partial(_execute_replicated_primitive, prim, compiled, handle_result)\n\ndef _device_from_arg_devices(devices: Sequence[Optional[Device]]) -> Optional[Device]:\n \"\"\"Given devices of inputs, determine where to perform a computation.\n\n Args:\n devices: list where each element is a either a `Device` instance or `None`.\n Returns:\n A `Device` instance or None.\n Raises:\n ValueError if input devices are inconsistent.\n \"\"\"\n try:\n device, = {d for d in devices if d is not None} or (None,)\n return device\n except ValueError as err:\n msg = \"primitive arguments must be colocated on the same device, got {}\"\n raise ValueError(msg.format(\", \".join(map(str, devices)))) from err\n\n@cache()\ndef primitive_computation(prim, axis_env, backend, tuple_args, *avals, **params):\n c = xb.make_computation_builder(f\"primitive_computation_{prim.name}\")\n c.set_op_metadata(xc.OpMetadata(\n op_type=prim.name,\n op_name=str(pp_eqn_compact(prim.name, params))))\n platform = xb.get_backend(backend).platform\n xla_args, _ = _xla_callable_args(c, avals, tuple_args)\n # return val always set as a side-effect on c\n if prim in backend_specific_translations[platform]:\n rule = backend_specific_translations[platform][prim]\n ans = rule(c, *xla_args, **params)\n elif prim in translations:\n rule = translations[prim]\n ans = rule(c, *xla_args, **params)\n elif prim in initial_style_translations:\n rule = initial_style_translations[prim]\n ans = rule(c, axis_env, extend_name_stack(prim.name), avals, backend,\n *xla_args, **params)\n else:\n raise NotImplementedError(f\"XLA translation rule for {prim} not found\")\n assert isinstance(ans, xe.XlaOp)\n c.clear_op_metadata()\n try:\n return c.build()\n except RuntimeError as e:\n msg = (\" \".join(map(str, e.args)) + \"\\n\"\n \"This is a bug in JAX's shape-checking rules; please report it!\\n\"\n \"https://github.com/google/jax/issues\\n\")\n raise RuntimeError(msg) from e\n\ndef primitive_subcomputation(prim, *avals, **params):\n axis_env = AxisEnv(1, (), (), None)\n return primitive_computation(prim, axis_env, None, False, *avals, **params)\n\ndef backend_compile(backend, built_c, options):\n # we use a separate function call to ensure that XLA compilation appears\n # separately in Python profiling results\n return backend.compile(built_c, compile_options=options)\n\ndef _execute_compiled_primitive(prim, compiled, result_handler, *args):\n device, = compiled.local_devices()\n input_bufs = list(it.chain.from_iterable(device_put(x, device) for x in args if x is not token))\n out_bufs = compiled.execute(input_bufs)\n if FLAGS.jax_debug_nans: check_nans(prim, out_bufs)\n return result_handler(*out_bufs)\n\ndef _execute_replicated_primitive(prim, compiled, result_handler, *args):\n input_bufs = [\n list(it.chain.from_iterable(device_put(x, device) for x in args if x is not token))\n for device in compiled.local_devices()]\n out_bufs = compiled.execute_on_local_devices(input_bufs)[0]\n return result_handler(*out_bufs)\n\n\ndef check_nans(prim, bufs):\n for buf in bufs:\n # TODO(jblespiau): We can simply use buf.xla_shape() when version 0.1.58 is\n # the default.\n _check_nans(prim.name, getattr(buf, \"xla_shape\", buf.shape)(), buf)\n\ndef _check_nans(name, xla_shape, buf):\n assert not xla_shape.is_tuple()\n if dtypes.issubdtype(xla_shape.element_type(), np.inexact):\n if np.any(np.isnan(buf.to_py())):\n raise FloatingPointError(f\"invalid value (nan) encountered in {name}\")\n\n### compiling jaxprs\n\ndef prefetch(x):\n if isinstance(x, DeviceArray):\n x.copy_to_host_async()\n return x\n\ndef jaxpr_literals(jaxpr):\n \"\"\"Generates all the literals inside a jaxpr, including nested subjaxprs.\"\"\"\n for eqn in jaxpr.eqns:\n for v in eqn.invars:\n if type(v) is core.Literal:\n yield v.val\n for subjaxpr in core.subjaxprs(jaxpr):\n yield from jaxpr_literals(subjaxpr)\n\n\ndef _flatmap(func: Callable, vars: Sequence):\n return list(it.chain.from_iterable(map(func, vars)))\n\ndef _partitionmap(func: Callable, vars: Sequence, nodes: Sequence):\n return map(func, vars, _partition_outputs([v.aval for v in vars], nodes))\n\ndef jaxpr_subcomp(c, jaxpr, backend, axis_env, consts, name_stack, *args):\n if backend not in ('cpu', 'gpu', 'tpu'):\n platform = xb.get_backend(backend).platform # canonicalize\n else:\n platform = backend\n\n def read(v):\n if type(v) is Literal:\n return [xb.constant(c, canonicalize_dtype(v.val))]\n else:\n return env[v]\n\n def aval(v):\n if type(v) is Literal:\n return abstractify(v.val)\n else:\n return v.aval\n\n def write(v, node):\n assert node is not None\n env[v] = node\n\n env = {}\n _partitionmap(write, [core.unitvar], [_make_unit_constant(c)])\n _partitionmap(write, jaxpr.constvars, consts)\n _partitionmap(write, jaxpr.invars, args)\n for eqn in jaxpr.eqns:\n frame = source_info_util.user_frame(eqn.source_info)\n c.set_op_metadata(xc.OpMetadata(\n op_type=eqn.primitive.name,\n op_name=str(pp(name_stack) >> pp_eqn_compact(\n eqn.primitive.name, eqn.params)),\n source_file=frame.file_name if frame else None,\n source_line=frame.line_num if frame else None))\n in_nodes = _flatmap(read, eqn.invars)\n if eqn.primitive in backend_specific_translations[platform]:\n rule = backend_specific_translations[platform][eqn.primitive]\n ans = rule(c, *in_nodes, **eqn.params)\n elif eqn.primitive in translations:\n ans = translations[eqn.primitive](c, *in_nodes, **eqn.params)\n elif eqn.primitive in initial_style_translations:\n new_params = check_backend_params(eqn.params, backend)\n rule = initial_style_translations[eqn.primitive]\n ans = rule(c, axis_env, extend_name_stack(name_stack, eqn.primitive.name),\n map(aval, eqn.invars), backend, *in_nodes, **new_params)\n elif eqn.primitive in parallel_translations:\n rule = parallel_translations[eqn.primitive]\n ans = rule(c, *in_nodes, axis_env=axis_env, platform=platform, **eqn.params)\n elif eqn.primitive in call_translations:\n new_params = check_backend_params(eqn.params, backend)\n rule = call_translations[eqn.primitive]\n ans = rule(c, axis_env, in_nodes,\n name_stack, backend=backend, **new_params)\n else:\n raise NotImplementedError(\n f\"XLA translation rule for primitive '{eqn.primitive.name}' not found\")\n\n assert isinstance(ans, xe.XlaOp)\n c.get_shape(ans) # force xla to do shape error checking\n if eqn.primitive.multiple_results or any(v.aval._num_buffers > 1 for v in eqn.outvars):\n out_nodes = xla_destructure(c, ans)\n else:\n out_nodes = [ans]\n c.clear_op_metadata()\n _partitionmap(write, eqn.outvars, out_nodes)\n return _flatmap(read, jaxpr.outvars)\n\n\ndef xla_destructure(c, ans):\n num_elements = len(c.get_shape(ans).tuple_shapes())\n return [xops.GetTupleElement(ans, i) for i in range(num_elements)]\n\ndef check_backend_params(params, outer_backend):\n # For nested calls, the outermost call sets the backend for all inner calls;\n # it's an error if the inner call has a conflicting explicit backend spec.\n inner_backend = params.get('backend', None)\n if inner_backend and inner_backend != outer_backend:\n raise ValueError(\n f\"Outer-jit backend specification {outer_backend} must match explicit \"\n f\"inner-jit backend specification {inner_backend}.\")\n return {k: params[k] for k in params if k != 'backend'}\n\n\nAxisEnv = namedtuple('AxisEnv', ['nreps', 'names', 'sizes', 'devices'])\n\ndef extend_axis_env(env, name, size):\n return AxisEnv(env.nreps, env.names + (name,), env.sizes + (size,), env.devices)\n\ndef axis_read(axis_env, axis_name):\n try:\n return max(i for i, name in enumerate(axis_env.names) if name == axis_name)\n except ValueError:\n raise NameError(\"unbound axis name: {}\".format(axis_name)) from None\n\ndef axis_groups(axis_env, name):\n if isinstance(name, (list, tuple)):\n mesh_axes = tuple(unsafe_map(partial(axis_read, axis_env), name))\n else:\n mesh_axes = (axis_read(axis_env, name),)\n return _axis_groups(axis_env.nreps, axis_env.sizes, mesh_axes)\n\ndef _axis_groups(nrep, mesh_spec, mesh_axes):\n trailing_size, ragged = divmod(nrep, prod(mesh_spec))\n assert not ragged\n full_spec = list(mesh_spec) + [trailing_size]\n iota = np.arange(prod(full_spec)).reshape(full_spec)\n groups = np.reshape(\n np.moveaxis(iota, mesh_axes, np.arange(len(mesh_axes))),\n (prod(np.take(full_spec, mesh_axes)), -1))\n return tuple(unsafe_map(tuple, groups.T))\n\ndef jaxpr_replicas(jaxpr):\n \"\"\"The number of replicas needed for a jaxpr.\n\n For a eqn, multiply the `axis_size` with the `jaxpr_replicas` of the\n subjaxprs. For a list of eqns, take the maximum number of replicas.\n \"\"\"\n return max(it.chain([1], (eqn_replicas(eqn) for eqn in jaxpr.eqns)))\n\n# TODO(mattjj): this function assumes that only pmap has a parameter named\n# axis_size, and that it corresponds to cross-replica mapping\ndef eqn_replicas(eqn):\n call_jaxpr = eqn.params.get(\"call_jaxpr\")\n if call_jaxpr:\n return eqn.params.get('axis_size', 1) * jaxpr_replicas(call_jaxpr)\n elif eqn.primitive in initial_style_translations:\n return initial_style_primitive_replicas(eqn.params)\n else:\n return 1\n\ndef initial_style_primitive_replicas(params):\n nums = (jaxpr_replicas(param if type(param) is core.Jaxpr else param.jaxpr)\n for param in params.values()\n if type(param) in (core.Jaxpr, core.ClosedJaxpr))\n return max(it.chain([1], nums))\n\n# TODO(mattjj,skyewm): the functions here are utilities for checking if\n# not-yet-supported features are used with multi-host programming\n\ndef jaxpr_has_pmap(jaxpr):\n \"\"\"Whether there is an xla_pmap primitive anywhere inside a Jaxpr.\"\"\"\n for eqn in jaxpr.eqns:\n if 'xla_pmap' in eqn.primitive.name:\n return True\n for subjaxpr in core.subjaxprs(jaxpr):\n if jaxpr_has_pmap(subjaxpr):\n return True\n return False\n\n\ndef jaxpr_collectives(jaxpr):\n \"\"\"Generates all the collective primitives anywhere inside a Jaxpr.\"\"\"\n for eqn in jaxpr.eqns:\n if eqn.primitive in parallel_translations:\n yield eqn.primitive\n for subjaxpr in core.subjaxprs(jaxpr):\n yield from jaxpr_collectives(subjaxpr)\n\n\n### xla_call underlying jit\n\ndef _xla_call_impl(fun: lu.WrappedFun, *args, device, backend, name, donated_invars):\n compiled_fun = _xla_callable(fun, device, backend, name, donated_invars,\n *unsafe_map(arg_spec, args))\n try:\n return compiled_fun(*args)\n except FloatingPointError:\n assert FLAGS.jax_debug_nans # compiled_fun can only raise in this case\n print(\"Invalid value encountered in the output of a jit function. \"\n \"Calling the de-optimized version.\")\n # We want to run the wrapped function again (after _xla_callable already ran\n # it), but linear_util.WrappedFun instances are meant to be run only once.\n # In addition to re-executing the Python code, which is usually undesirable\n # but which FLAGS.jax_debug_nans is meant to opt into, we'll be re-executing\n # any linear_util.py-style side effects, i.e. re-populating Stores created\n # by any transformation_with_aux's applied to fun. Since this is\n # intentional here, to avoid \"Store occupied\" errors we reset the stores to\n # be empty.\n for store in fun.stores: store.reset()\n return fun.call_wrapped(*args) # probably won't return\n\ndef flatten_shape(s: XlaShape) -> Sequence[Tuple[Sequence[int], XlaShape]]:\n \"\"\"Expands a given shape tree into a flat list of indices to arrays.\n\n Given the following computation:\n\n >>> c = xc.XlaBuilder(\"example\")\n >>> p0 = xb.parameter(c, 1, xc.shape_from_pyval(jnp.ones([1])))\n >>> p1 = xb.parameter(c, 2, xc.shape_from_pyval(jnp.ones([2])))\n >>> p2 = xb.parameter(c, 3, xc.shape_from_pyval(jnp.ones([3])))\n >>> o = xops.Tuple(c, [p0, p1, p2])\n\n We can query the arrays in the output tuple:\n\n >>> flatten_shape(c.GetShape(o))\n (((0,), f32[1]{0}),\n ((1,), f32[2]{0}),\n ((2,), f32[3]{0}))\n\n Or the arrays in one of the parameters (which is itself an array):\n\n >>> flatten_shape(c.GetShape(p0))\n (((), f32[1]{0}),)\n\n Args\n s: The input shape.\n\n Returns:\n An iterable of pairs of indices and shapes for each array within the shape\n tree.\n \"\"\"\n def _flatten_shape(s, index):\n if s.is_array():\n yield index, s\n else:\n assert s.is_tuple()\n for i, sub in enumerate(s.tuple_shapes()):\n subindex = index + (i,)\n if sub.is_tuple():\n yield from _flatten_shape(sub, subindex)\n else:\n yield subindex, sub\n return tuple(_flatten_shape(s, index=()))\n\ndef _xla_consts(c, consts):\n unique_consts = {id(const): const for const in consts}\n xla_consts = {\n id_: xb.constant(c, const) for id_, const in unique_consts.items()}\n return [xla_consts[id(const)] for const in consts]\n\[email protected]\ndef _xla_callable(fun: lu.WrappedFun, device, backend, name, donated_invars, *arg_specs):\n if device is not None and backend is not None:\n raise ValueError(\"can't specify both a device and a backend for jit, \"\n \"got device={} and backend={}\".format(device, backend))\n\n abstract_args, arg_devices = unzip2(arg_specs)\n if config.omnistaging_enabled:\n jaxpr, out_avals, consts = pe.trace_to_jaxpr_final(fun, abstract_args)\n if any(isinstance(c, core.Tracer) for c in consts):\n raise core.UnexpectedTracerError(\"Encountered an unexpected tracer.\")\n else:\n pvals: Sequence[pe.PartialVal] = [pe.PartialVal.unknown(aval) for aval in abstract_args]\n jaxpr, pvals, consts = pe.trace_to_jaxpr( # type: ignore\n fun, pvals, instantiate=False, stage_out=True, bottom=True) # type: ignore\n map(prefetch, it.chain(consts, jaxpr_literals(jaxpr)))\n jaxpr = apply_outfeed_rewriter(jaxpr)\n\n nreps = jaxpr_replicas(jaxpr)\n device = _xla_callable_device(nreps, backend, device, arg_devices)\n backend = device.platform if device else backend\n if config.omnistaging_enabled:\n result_handlers = map(partial(aval_to_result_handler, device), out_avals)\n else:\n out_avals = [pval.get_aval() for pval in pvals]\n result_handlers = map(partial(_pval_to_result_handler, device), pvals) # type: ignore\n\n # Computations that only produce constants and/or only rearrange their inputs,\n # which are often produced from partial evaluation, don't need compilation,\n # and don't need to force their (potentially lazy) arguments.\n if not jaxpr.eqns:\n return partial(_execute_trivial, jaxpr, device, consts, out_avals, result_handlers)\n\n if not _on_exit:\n log_priority = logging.WARNING if FLAGS.jax_log_compiles else logging.DEBUG\n logging.log(log_priority, \"Compiling %s for args %s.\", fun.__name__, abstract_args)\n\n if nreps > 1:\n warn(f\"The jitted function {fun.__name__} includes a pmap. Using \"\n \"jit-of-pmap can lead to inefficient data movement, as the outer jit \"\n \"does not preserve sharded data representations and instead collects \"\n \"input and output arrays onto a single device. \"\n \"Consider removing the outer jit unless you know what you're doing. \"\n \"See https://github.com/google/jax/issues/2926.\")\n\n if nreps > xb.device_count(backend):\n raise ValueError(\n f\"compiling computation that requires {nreps} replicas, but only \"\n f\"{xb.device_count(backend)} XLA devices are available\")\n\n if xb.host_count() > 1 and (nreps > 1 or jaxpr_has_pmap(jaxpr)):\n raise NotImplementedError(\n \"jit of multi-host pmap not implemented (and jit-of-pmap can cause \"\n \"extra data movement anyway, so maybe you don't want it after all).\")\n\n tuple_args = len(abstract_args) > 100 # pass long arg lists as tuple for TPU\n\n c = xb.make_computation_builder(\"jit_{}\".format(fun.__name__))\n xla_consts = _xla_consts(c, consts)\n xla_args, donated_invars = _xla_callable_args(c, abstract_args, tuple_args, donated_invars=donated_invars)\n out_nodes = jaxpr_subcomp(\n c, jaxpr, backend, AxisEnv(nreps, (), (), None), xla_consts,\n extend_name_stack(wrap_name(name, 'jit')), *xla_args)\n out_tuple = xops.Tuple(c, out_nodes)\n backend = xb.get_backend(backend)\n if backend.platform in (\"gpu\", \"tpu\"):\n donated_invars = set_up_aliases(c, xla_args, out_tuple, donated_invars, tuple_args)\n if any(donated_invars):\n # TODO(tomhennigan): At call time we should mark these buffers as deleted.\n unused_donations = [str(c.GetShape(a))\n for a, d in zip(xla_args, donated_invars) if d]\n warn(\"Some donated buffers were not usable: {}\".format(\", \".join(unused_donations)))\n built = c.build(out_tuple)\n\n options = xb.get_compile_options(\n num_replicas=nreps,\n num_partitions=1,\n device_assignment=(device.id,) if device else None)\n options.parameter_is_tupled_arguments = tuple_args\n compiled = backend_compile(backend, built, options)\n if nreps == 1:\n return partial(_execute_compiled, compiled, out_avals, result_handlers)\n else:\n return partial(_execute_replicated, compiled, out_avals, result_handlers)\n\ndef set_up_aliases(c, xla_args, out_tuple, donated_args, tuple_args):\n \"\"\"Configures input/output \"must\" aliasing based on `donated_args`.\"\"\"\n # First for every input array add it to `donations` iff it is a member of\n # `donated_args`.\n donations = defaultdict(deque)\n for arg_index, arg in enumerate(xla_args):\n if donated_args[arg_index]:\n for param_index, element in flatten_shape(c.GetShape(arg)):\n key = (element.dimensions(), element.numpy_dtype())\n if tuple_args:\n param_number = 0\n param_index = (arg_index,) + tuple(param_index)\n donations[key].append((param_number, param_index, arg_index))\n else:\n param_number = arg_index\n donations[key].append((param_number, param_index, arg_index))\n\n # Consume donations for outputs.\n out_donated_args = list(donated_args)\n for output_index, element in flatten_shape(c.GetShape(out_tuple)):\n key = (element.dimensions(), element.numpy_dtype())\n if donations.get(key, ()):\n param_number, param_index, arg_index = donations[key].popleft()\n out_donated_args[arg_index] = False\n c.setup_alias(output_index, param_number, param_index)\n\n return tuple(out_donated_args)\n\ndef _xla_callable_device(nreps, backend, device, arg_devices):\n if nreps > 1:\n if device is not None or backend is not None:\n raise ValueError(f\"can't specify device or backend for jit-of-pmap, \"\n f\"got device={device} and backend={backend}\")\n return None\n else:\n if device is None and backend is None:\n return _device_from_arg_devices(arg_devices)\n elif device is not None and backend is None:\n return device\n elif device is None and backend is not None:\n return xb.get_backend(backend).get_default_device_assignment(1)[0]\n else:\n assert False # Unreachable given the error check in _xla_callable\n\n# Used within _xla_callable_args and _xla_param to distinguish between None (no\n# sharding annotation set) and replicated.\n_replicated_param = object()\n\ndef _xla_callable_args(\n c, avals, tuple_args, replicated=None,\n partitions: Optional[Sequence[Optional[Sequence[int]]]] = None,\n donated_invars = None):\n assert partitions is None or len(partitions) == len(avals)\n if not tuple_args:\n if replicated is None:\n replicated = [None] * len(avals)\n if partitions is None:\n parts: List[object] = [None] * len(avals)\n else:\n parts = [_replicated_param if part is None else part\n for part in partitions]\n counts = it.count()\n xla_args = [_xla_param(c, next(counts), xla_shape, r, p)\n if a is not abstract_token else xops.CreateToken(c)\n for (a, r, p) in safe_zip(avals, replicated, parts)\n for xla_shape in aval_to_xla_shapes(a)]\n if donated_invars is not None:\n donated_invars = [d\n for (a, r, p, d) in safe_zip(avals, replicated, parts, donated_invars)\n for xla_shape in aval_to_xla_shapes(a)]\n return xla_args, donated_invars\n else:\n if replicated is not None:\n replicated = [r for a, r in zip(avals, replicated)\n if a is not abstract_token]\n tuple_parts = tuple(partitions) if partitions is not None else None\n tuple_shape = xc.Shape.tuple_shape(\n [shape for a in avals for shape in aval_to_xla_shapes(a) if a is not abstract_token])\n tuple_param = _xla_param(c, 0, tuple_shape, replicated, tuple_parts)\n xla_inputs = iter(xla_destructure(c, tuple_param))\n xla_args = [next(xla_inputs) if a is not abstract_token else\n xops.CreateToken(c) for a in avals]\n assert next(xla_inputs, None) is None\n return xla_args, donated_invars\n\ndef _xla_param(builder, param_num, xla_shape, replicated, partitions):\n make_param = partial(xb.parameter, builder, param_num, xla_shape,\n replicated=replicated)\n if partitions is None:\n return make_param()\n elif partitions is _replicated_param:\n return xb.with_sharding(builder, None, make_param)\n else:\n return xb.with_sharding(builder, partitions, make_param)\n\ndef _execute_compiled(compiled: XlaExecutable, avals, handlers, *args):\n device, = compiled.local_devices()\n input_bufs = list(it.chain.from_iterable(device_put(x, device) for x in args if x is not token))\n out_bufs = compiled.execute(input_bufs)\n if FLAGS.jax_debug_nans: check_nans(xla_call_p, out_bufs)\n return [handler(*bs) for handler, bs in zip(handlers, _partition_outputs(avals, out_bufs))]\n\ndef _execute_replicated(compiled: XlaExecutable, avals, handlers, *args):\n input_bufs = [\n list(it.chain.from_iterable(device_put(x, device) for x in args if x is not token))\n for device in compiled.local_devices()]\n out_bufs = compiled.execute_on_local_devices(input_bufs)[0]\n if FLAGS.jax_debug_nans: check_nans(xla_call_p, out_bufs)\n return [handler(*bs) for handler, bs in zip(handlers, _partition_outputs(avals, out_bufs))]\n\ndef _execute_trivial(jaxpr, device: Optional[Device], consts, avals, handlers, *args):\n env = {core.unitvar: core.unit}\n map(env.setdefault, jaxpr.invars, args)\n map(env.setdefault, jaxpr.constvars, consts)\n outs = [canonicalize_dtype(v.val) if type(v) is Literal else env[v]\n for v in jaxpr.outvars]\n return [_copy_device_array_to_device(x, device) if type_is_device_array(x)\n else h(*device_put(x, device)) for h, x in zip(handlers, outs)]\n\nxla_call_p = core.CallPrimitive('xla_call')\nxla_call = xla_call_p.bind\nxla_call_p.def_impl(_xla_call_impl)\n\ndef _xla_call_partial_eval_update_params(params, in_unknowns):\n call_jaxpr = params['call_jaxpr']\n donated_invars = params['donated_invars']\n if not in_unknowns and donated_invars:\n # JaxprTrace.post_process_call creates a call with no input tracers\n new_donated_invars = (False,) * len(call_jaxpr.invars)\n else:\n # JaxprTrace.process_call drops known input tracers\n donated_invars = [d for d, uk in zip(donated_invars, in_unknowns) if uk]\n new_donated_invars = ((False,) * (len(call_jaxpr.invars) - len(donated_invars))\n + tuple(donated_invars))\n return dict(params, donated_invars=new_donated_invars)\npe.call_param_updaters[xla_call_p] = _xla_call_partial_eval_update_params\n\ndef _xla_call_jvp_update_params(params, nz_tangents):\n donated_invars = params['donated_invars']\n donated_tangents = [d for d, nz in zip(donated_invars, nz_tangents) if nz]\n new_donated_invars = (*donated_invars, *donated_tangents)\n return dict(params, donated_invars=new_donated_invars)\nad.call_param_updaters[xla_call_p] = _xla_call_jvp_update_params\n\ndef _xla_call_transpose_update_params(params, undef_primals, nonzero_cts):\n donated_invars = params['donated_invars']\n donated_primals = [d for d, u in zip(donated_invars, undef_primals) if not u]\n donated_cotangents = [False for nz in nonzero_cts if nz]\n return dict(params, donated_invars=(*donated_primals, *donated_cotangents))\nad.call_transpose_param_updaters[xla_call_p] = _xla_call_transpose_update_params\n\n\ndef _xla_call_translation_rule(c, axis_env,\n in_nodes, name_stack, backend, name,\n call_jaxpr, donated_invars, device=None):\n del device, donated_invars # Ignored.\n subc = xb.make_computation_builder(f\"jit_{name}\")\n args = [xb.parameter(subc, i, c.get_shape(n)) for i, n in enumerate(in_nodes)]\n out_nodes = jaxpr_subcomp(subc, call_jaxpr, backend, axis_env, (),\n extend_name_stack(name_stack, wrap_name(name, 'jit')), *args)\n subc = subc.build(xops.Tuple(subc, out_nodes))\n return xops.Call(c, subc, list(in_nodes))\nad.primitive_transposes[xla_call_p] = partial(ad.call_transpose, xla_call_p)\n\n\n### translation tables\n\ntranslations: Dict[core.Primitive, Callable] = {}\nparallel_translations: Dict[core.Primitive, Callable] = {}\ninitial_style_translations: Dict[core.Primitive, Callable] = {}\ncall_translations: Dict[core.Primitive, Callable] = {}\nbackend_specific_translations: Dict[str, Dict[core.Primitive, Callable]] = defaultdict(dict)\n\ncall_translations[xla_call_p] = _xla_call_translation_rule\n\ndef zeros_like_translation_rule(c, x):\n shape = c.get_shape(x)\n assert not shape.is_tuple()\n zero = xb.constant(c, np.array(0, shape.element_type()))\n return xops.Broadcast(zero, shape.dimensions())\ntranslations[ad_util.zeros_like_p] = zeros_like_translation_rule\n\ndef add_jaxvals_translation_rule(c, x, y):\n shape = c.get_shape(x)\n assert not shape.is_tuple()\n return xops.Add(x, y)\ntranslations[ad_util.add_jaxvals_p] = add_jaxvals_translation_rule\n\ntranslations[ad_util.stop_gradient_p] = lambda c, x: x\n\n\[email protected]\ndef _tuple_output(*args, **kwargs):\n ans = yield args, kwargs\n yield (ans,)\n\ndef lower_fun(fun, multiple_results, parallel=False):\n # This function can only be used to lower functions that take JAX array types\n # as arguments (and e.g. don't accept unit values), because it assumes it can\n # map from XLA types to JAX types. In general that mapping is not possible (as\n # the mapping from JAX types to XLA types is not invertible), but for now at\n # least we assume that the mapping from JAX *array* types to XLA array types\n # is invertible. This assumption is unchecked!\n # TODO(mattjj): remove assumption can map XLA array types to JAX array types\n def f(c, *xla_args, **params):\n # TODO(mattjj): revise this 'calling convention'\n avals = [_array_aval_from_xla_shape(c.get_shape(x)) for x in xla_args]\n if parallel:\n axis_env = params.pop('axis_env')\n del params['platform']\n else:\n axis_env = AxisEnv(1, (), (), None)\n wrapped_fun = lu.wrap_init(fun, params)\n if not multiple_results:\n wrapped_fun = _tuple_output(wrapped_fun)\n if config.omnistaging_enabled:\n jaxpr, _, consts = pe.trace_to_jaxpr_dynamic(wrapped_fun, avals)\n outs = jaxpr_subcomp(c, jaxpr, None, axis_env, _xla_consts(c, consts), '',\n *xla_args)\n else:\n pvals = [pe.PartialVal.unknown(a) for a in avals]\n jaxpr, _, consts = pe.trace_to_jaxpr(wrapped_fun, pvals, instantiate=True,\n stage_out=True) # type: ignore\n xla_consts = _xla_consts(c, consts)\n outs = jaxpr_subcomp(c, jaxpr, None, axis_env, xla_consts, '', *xla_args)\n if multiple_results:\n return xops.Tuple(c, outs)\n else:\n assert len(outs) == 1, outs\n return outs[0]\n return f\n\ndef _array_aval_from_xla_shape(xla_shape):\n # This function instantiates the assumption that we can map fro XLA array\n # types to JAX array types.\n # TODO(mattjj): remove assumption can map XLA array types to JAX array types\n assert not xla_shape.is_tuple()\n return ShapedArray(xla_shape.dimensions(), xla_shape.numpy_dtype())\n\ndef lower_fun_initial_style(fun):\n def f(c, axis_env, name_stack, avals, backend, *xla_args, **params):\n if config.omnistaging_enabled:\n jaxpr, _, consts = pe.trace_to_jaxpr_dynamic(lu.wrap_init(fun, params), avals)\n outs = jaxpr_subcomp(c, jaxpr, backend, axis_env, _xla_consts(c, consts),\n name_stack, *xla_args)\n else:\n pvals = [pe.PartialVal.unknown(a) for a in avals]\n jaxpr, _, consts = pe.trace_to_jaxpr(\n lu.wrap_init(fun, params), pvals, instantiate=True, stage_out=True) # type: ignore\n xla_consts = _xla_consts(c, consts)\n outs = jaxpr_subcomp(c, jaxpr, backend, axis_env, xla_consts, name_stack,\n *xla_args)\n return xops.Tuple(c, outs)\n return f\n\n\n### device-persistent data\n\nclass Token(object): pass\ntoken = Token()\n\npytype_aval_mappings[Token] = lambda _: abstract_token\ncore.pytype_aval_mappings[Token] = lambda _: abstract_token\nxla_shape_handlers[AbstractToken] = lambda _: (xc.Shape.token_shape(),)\nxla_result_handlers[AbstractToken] = lambda _, __: lambda _: token\ncanonicalize_dtype_handlers[Token] = identity\n\n\ndef _forward_method(attrname, self, fun, *args):\n return fun(getattr(self, attrname), *args)\n_forward_to_value = partial(_forward_method, \"_value\")\n\n\ndef make_device_array(aval: core.ShapedArray, device: Optional[Device],\n lazy_expr: lazy.LazyExpr, device_buffer: PyLocalBuffer):\n \"\"\"Returns a DeviceArray implementation based on arguments.\n\n External users should not call the `DeviceArray` constructor as it's an\n internal implementation detail. This is added to smooth the transition to a\n C++ equivalent implementation.\n \"\"\"\n return DeviceArray(aval, device, lazy_expr, device_buffer)\n\n\ndef type_is_device_array(x):\n \"\"\"Returns `True` if `x` is a non-sharded DeviceArray.\n\n Use this function instead of `type(x) is Devicearray`.\n \"\"\"\n # TODO(jblespiau): Extend to also support `PyBuffer`.\n return type(x) is DeviceArray\n\n\nclass DeviceArray:\n \"\"\"A DeviceArray is an ndarray backed by a single device memory buffer.\"\"\"\n # We don't subclass ndarray because that would open up a host of issues,\n # but lax_numpy.py overrides isinstance behavior and attaches ndarray methods.\n __slots__ = [\n \"aval\", \"device_buffer\", \"_npy_value\", \"_device\", \"_lazy_expr\",\n \"__weakref__\"\n ]\n __array_priority__ = 100\n\n # DeviceArray has methods that are dynamically populated in lax_numpy.py,\n # and this annotation is needed to make pytype happy.\n _HAS_DYNAMIC_ATTRIBUTES = True\n\n def __init__(self, aval: core.ShapedArray, device: Optional[Device],\n lazy_expr: lazy.LazyExpr,\n device_buffer: PyLocalBuffer):\n self.aval = aval\n self.device_buffer = device_buffer\n self._device = device\n self._lazy_expr = lazy_expr\n\n self._npy_value = None\n if not core.skip_checks:\n assert type(aval) is ShapedArray\n npy_value = self._value\n assert npy_value.dtype == aval.dtype and npy_value.shape == aval.shape\n assert (device is None) or device is device_buffer.device()\n\n def _check_if_deleted(self):\n if self.device_buffer is deleted_buffer:\n raise ValueError(\"DeviceArray has been deleted.\")\n\n def block_until_ready(self):\n \"\"\"Blocks the caller until the buffer's value has been computed on device.\n\n This method is mostly useful for timing microbenchmarks that wish to\n time how long a computation takes, without transferring the result back\n to the host.\n\n Returns the buffer object (`self`).\n \"\"\"\n self._check_if_deleted()\n self.device_buffer.block_host_until_ready()\n return self\n\n @property\n def _value(self):\n self._check_if_deleted()\n if self._npy_value is None:\n if is_device_constant(self):\n self._npy_value = lazy.eval_lexpr(self._lazy_expr, None)\n else:\n self._npy_value = _force(self).device_buffer.to_py()\n self._npy_value.flags.writeable = False\n return self._npy_value\n\n @property\n def shape(self):\n return self.aval.shape\n\n @property\n def dtype(self):\n return self.aval.dtype\n\n @property\n def size(self):\n return prod(self.aval.shape)\n\n @property\n def ndim(self):\n return len(self.aval.shape)\n\n def copy(self):\n \"\"\"Returns an ndarray (backed by host memory, not device memory).\"\"\"\n return np.asarray(self)\n\n def copy_to_host_async(self):\n \"\"\"Requests a copy of the buffer to the host.\"\"\"\n self._check_if_deleted()\n if self._npy_value is None and not is_device_constant(self):\n self.device_buffer.copy_to_host_async()\n\n def delete(self):\n \"\"\"Deletes the device array and any cached copy on the host.\n\n It is an error to access the contents of a `DeviceArray` after it has\n been deleted.\n\n Use of this method is optional; device buffers will be reclaimed\n automatically by Python when a DeviceArray object is garbage collected.\n However, it is sometimes useful to have more explicit control over the\n time of deletion.\n \"\"\"\n self.device_buffer.delete()\n self.device_buffer = deleted_buffer\n self._npy_value = None\n\n def __repr__(self):\n line_width = np.get_printoptions()['linewidth']\n prefix = '{}('.format(self.__class__.__name__)\n s = np.array2string(self._value, prefix=prefix, suffix=',',\n separator=', ', max_line_width=line_width)\n dtype_str = 'dtype={})'.format(self.dtype.name)\n last_line_len = len(s) - s.rfind('\\n') + 1\n sep = ' '\n if last_line_len + len(dtype_str) + 1 > line_width:\n sep = ' ' * len(prefix)\n return \"{}{},{}{}\".format(prefix, s, sep, dtype_str)\n\n def item(self):\n if dtypes.issubdtype(self.dtype, np.complexfloating):\n return complex(self)\n elif dtypes.issubdtype(self.dtype, np.floating):\n return float(self)\n elif dtypes.issubdtype(self.dtype, np.integer):\n return int(self)\n elif dtypes.issubdtype(self.dtype, np.bool_):\n return bool(self)\n else:\n raise TypeError(self.dtype)\n\n def __len__(self):\n try:\n return self.aval.shape[0]\n except IndexError as err:\n raise TypeError(\"len() of unsized object\") from err # same as numpy error\n\n def __iter__(self):\n if self.ndim == 0:\n raise TypeError(\"iteration over a 0-d array\") # same as numpy error\n else:\n return self._value.__iter__()\n\n def __reversed__(self):\n if self.ndim == 0:\n raise TypeError(\"iteration over a 0-d array\")\n else:\n return reversed(self._value)\n\n def __format__(self, format_spec):\n # Simulates behavior of https://github.com/numpy/numpy/pull/9883\n if self.ndim == 0:\n return format(self._value[()], format_spec)\n else:\n return format(self._value, format_spec)\n\n def __array__(self, dtype=None, context=None):\n return np.asarray(self._value, dtype=dtype)\n\n @property\n def __cuda_array_interface__(self):\n return _force(self).device_buffer.__cuda_array_interface__\n\n __str__ = partialmethod(_forward_to_value, str)\n __bool__ = __nonzero__ = partialmethod(_forward_to_value, bool)\n def __float__(self): return self._value.__float__()\n def __int__(self): return self._value.__int__()\n def __complex__(self): return self._value.__complex__()\n __hex__ = partialmethod(_forward_to_value, hex)\n __oct__ = partialmethod(_forward_to_value, oct)\n __index__ = partialmethod(_forward_to_value, op.index)\n def tobytes(self, order='C'): return self._value.tobytes(order)\n def tolist(self): return self._value.tolist()\n\n # pickle saves and loads just like an ndarray\n __reduce__ = partialmethod(_forward_to_value, op.methodcaller(\"__reduce__\"))\n\n # clobbered when jax.numpy is imported, but useful in tests\n def __eq__(self, other): return self._value == other\n\n def __hash__(self):\n raise TypeError(\"JAX DeviceArray, like numpy.ndarray, is not hashable.\")\n\n # The following methods are dynamically overridden in lax_numpy.py.\n def __getitem__(self, i): raise NotImplementedError\n\nclass DeletedBuffer(object): pass\ndeleted_buffer = DeletedBuffer()\n\nclass DeviceConstant(object):\n __slots__ = [\"_device\"]\n def __init__(self, device=None): self._device = device\n def device(self): return self._device\n def to_py(self): return None\n\ndef is_device_constant(x):\n return type_is_device_array(x) and type(x.device_buffer) is DeviceConstant\n\ncore.literalable_types.add(DeviceArray)\ncore.pytype_aval_mappings[DeviceArray] = ConcreteArray\npytype_aval_mappings[DeviceArray] = op.attrgetter('aval')\ncanonicalize_dtype_handlers[DeviceArray] = identity\n\ndef _device_array_constant_handler(c, val, canonicalize_types=True):\n if is_device_constant(val):\n return lazy.stage_lexpr(c, val._lazy_expr, None)\n else:\n base_val = xb.constant(c, val.device_buffer.to_py())\n return lazy.stage_lexpr(c, val._lazy_expr, base_val)\nxb.register_constant_handler(DeviceArray, _device_array_constant_handler)\n\ndef _device_put_device_array(x: DeviceArray, device: Optional[Device]):\n x = _copy_device_array_to_device(x, device)\n return (_force(x).device_buffer,)\ndevice_put_handlers[DeviceArray] = _device_put_device_array\n\ndef _copy_device_array_to_device(x: DeviceArray, device: Optional[xc.Device]) -> DeviceArray:\n if device is None:\n # no copying to be done because there's no target specified\n return x\n elif is_device_constant(x):\n # create a new DeviceArray with the same lazy expr, no copying\n return DeviceArray(x.aval, device, x._lazy_expr, DeviceConstant(device))\n elif xb.get_device_backend(device).platform == x.device_buffer.platform():\n # source and target platforms are the same\n if x.device_buffer.device() == device:\n # no copying to be done because source equals target\n if x._device == device:\n return x\n else:\n moved_buf = x.device_buffer # We need to change stickyness\n else:\n # move the buffer with a device-to-device copy\n moved_buf = x.device_buffer.copy_to_device(device)\n else:\n # buffers from different XLA backends are passed through the host.\n backend = xb.get_device_backend(device)\n moved_buf = backend.buffer_from_pyval(x.device_buffer.to_py(), device)\n return DeviceArray(x.aval, device, x._lazy_expr, moved_buf)\n\ndef _force(x: DeviceArray) -> DeviceArray:\n if lazy.is_trivial(x._lazy_expr):\n return x\n else:\n # force x on the device where it lives, but preserve stickiness on result\n if x._device:\n device = x._device\n else:\n device = x.device_buffer.device()\n force_fun = _lazy_force_computation(x.aval, device, x._lazy_expr)\n result = force_fun(x)\n return DeviceArray(x.aval, x._device, lazy.array(x.aval.shape), result)\n\n@cache()\ndef _lazy_force_computation(aval: core.ShapedArray,\n device: Device, lexpr: lazy.LazyExpr\n ) -> Callable[[DeviceArray], PyLocalBuffer]:\n c = xb.make_computation_builder(\"lazy_force\")\n if lazy.is_constant(lexpr):\n param = None\n else:\n idxs = [(src, dst) for dst, src in enumerate(lexpr.dims) if src is not None]\n param_shape = [None] * len(idxs)\n for src, dst in idxs:\n param_shape[src] = aval.shape[dst]\n param = xb.parameter(c, 0, xc.Shape.array_shape(aval.dtype, param_shape))\n xla_out = lazy.stage_lexpr(c, lexpr, param)\n built_c = c.build(xla_out)\n\n device = _device_from_arg_devices([device])\n options = xb.get_compile_options(\n num_replicas=1,\n num_partitions=1,\n device_assignment=device and (device.id,))\n compiled = backend_compile(xb.get_device_backend(device), built_c, options)\n\n force_fun: Callable[[DeviceArray], DeviceArray]\n if lazy.is_constant(lexpr):\n def force_fun(_):\n return compiled.execute([])[0]\n else:\n def force_fun(x):\n return compiled.execute([x.device_buffer])[0]\n return force_fun\n\n\ndef _device_put_impl(x, device: Optional[Device] = None):\n if type_is_device_array(x):\n return _copy_device_array_to_device(x, device)\n\n try:\n a = abstractify(x)\n except TypeError as err:\n raise TypeError(\n f\"Argument '{x}' of type {type(x)} is not a valid JAX type\") from err\n return aval_to_result_handler(device, a)(*device_put(x, device))\n\ndevice_put_p = core.Primitive('device_put')\ndevice_put_p.def_impl(_device_put_impl)\ndevice_put_p.def_abstract_eval(lambda x, device=None: x)\ntranslations[device_put_p] = lambda c, x, device=None: x\nad.deflinear(device_put_p, lambda cotangent, **kwargs: [cotangent])\nmasking.defvectorized(device_put_p)\n\n\ndef _remat_translation_rule(c, axis_env, in_nodes,\n name_stack, backend, name, call_jaxpr,\n device=None, concrete=None):\n \"\"\"Lower remat to a Conditional which always returns true. This:\n 1. Circumvents common subexpression elimination.\n 2. In common case of `jax.grad(jax.remat(f))`, ensures the remat blocks\n occur after the primal blocks, because cotangent is an input to the\n Conditional.\"\"\"\n del device, concrete # Unused.\n # Fake condition which always selects True branch.\n rng = xops.RngUniform(xb.constant(c, np.array(0, dtype=np.float32)),\n xb.constant(c, np.array(1, dtype=np.float32)),\n xc.Shape.array_shape(xc.PrimitiveType.F32, []))\n pred = xops.Lt(rng, xb.constant(c, np.array(2, dtype=np.float32)))\n\n true_op = xops.Tuple(c, in_nodes)\n remat_subc = xb.make_computation_builder(\"remat_call_subcomputation\")\n input_op = xb.parameter(remat_subc, 0, c.get_shape(true_op), replicated=[])\n args = [xops.GetTupleElement(input_op, i) for i in range(len(in_nodes))]\n out_nodes = jaxpr_subcomp(remat_subc, call_jaxpr, backend, axis_env, (),\n extend_name_stack(name_stack, wrap_name(name, 'remat')),\n *args)\n out_node_shapes = [remat_subc.get_shape(o) for o in out_nodes]\n remat_subc = remat_subc.build(xops.Tuple(remat_subc, out_nodes))\n\n false_op = true_op\n dummy_subc = xb.make_computation_builder(\"remat_call_dummy_subcomputation\")\n xb.parameter(dummy_subc, 0, c.get_shape(false_op), replicated=[])\n\n def zeros(xla_shape):\n if xla_shape.is_array():\n shape, dtype = xla_shape.dimensions(), xla_shape.numpy_dtype()\n zero = xb.constant(dummy_subc, np.array(0, dtype=dtype))\n return xops.Broadcast(zero, shape)\n else:\n # It is a token\n return xops.CreateToken(dummy_subc)\n out_nodes = [zeros(s) for s in out_node_shapes]\n dummy_subc = dummy_subc.build(xops.Tuple(dummy_subc, out_nodes))\n\n return xops.Conditional(pred, true_op, remat_subc, false_op, dummy_subc)\ncall_translations[pe.remat_call_p] = _remat_translation_rule\n\n\ndef _call_translation_rule(c, axis_env, in_nodes, name_stack,\n *, backend, call_jaxpr):\n subc = xb.make_computation_builder(\"core_call\")\n args = [xb.parameter(subc, i, c.GetShape(n)) for i, n in enumerate(in_nodes)]\n out_nodes = jaxpr_subcomp(subc, call_jaxpr, backend, axis_env, (),\n extend_name_stack(name_stack, 'core_call'), *args)\n subc = subc.Build(xops.Tuple(subc, out_nodes))\n return xops.Call(c, subc, list(in_nodes))\ncall_translations[core.call_p] = _call_translation_rule\n\n\[email protected]_omnistaging_disabler\ndef omnistaging_disabler() -> None:\n global _pval_to_result_handler\n\n def _pval_to_result_handler(device, pval):\n pv, const = pval\n if pv is None:\n const = _device_put_impl(const, device) if device else const\n return lambda _: const\n else:\n return aval_to_result_handler(device, pv)\n\n pe.staged_out_calls.add(xla_call_p) # type: ignore\n" ]
[ [ "numpy.array", "numpy.asarray", "numpy.zeros", "numpy.take", "numpy.get_printoptions", "numpy.dtype", "numpy.array2string" ] ]
multinucliated/ARC
[ "7c545605eb25ba3d76c2ba12b23b26d548a31373" ]
[ "src/manual_solve.py" ]
[ "#!/usr/bin/python\n\nimport json\nimport os\nimport re\n\nimport numpy as np\n\n\n### YOUR CODE HERE: write at least three functions which solve\n### specific tasks by transforming the input x and returning the\n### result. Name them according to the task ID as in the three\n### examples below. Delete the three examples. The tasks you choose\n### must be in the data/training directory, not data/evaluation.\n\n# def solve_6a1e5592(x):\n# return x\n#\n#\n# def solve_b2862040(x):\n# return x\n#\n#\n# def solve_05269061(x):\n# return x\n\n\ndef solve_045e512c(x):\n print(x)\n return x\n\n\ndef main():\n # Find all the functions defined in this file whose names are\n # like solve_abcd1234(), and run them.\n\n # regex to match solve_* functions and extract task IDs\n p = r\"solve_([a-f0-9]{8})\"\n tasks_solvers = []\n # globals() gives a dict containing all global names (variables\n # and functions), as name: value pairs.\n for name in globals():\n m = re.match(p, name)\n if m:\n # if the name fits the pattern eg solve_abcd1234\n ID = m.group(1) # just the task ID\n solve_fn = globals()[name] # the fn itself\n tasks_solvers.append((ID, solve_fn))\n\n for ID, solve_fn in tasks_solvers:\n # for each task, read the data and call test()\n directory = os.path.join(\"..\", \"data\", \"training\")\n json_filename = os.path.join(directory, ID + \".json\")\n data = read_ARC_JSON(json_filename)\n test(ID, solve_fn, data)\n\n\ndef read_ARC_JSON(filepath):\n \"\"\"Given a filepath, read in the ARC task data which is in JSON\n format. Extract the train/test input/output pairs of\n grids. Convert each grid to np.array and return train_input,\n train_output, test_input, test_output.\"\"\"\n\n # Open the JSON file and load it \n data = json.load(open(filepath))\n\n # Extract the train/test input/output grids. Each grid will be a\n # list of lists of ints. We convert to Numpy.\n train_input = [np.array(data['train'][i]['input']) for i in range(len(data['train']))]\n train_output = [np.array(data['train'][i]['output']) for i in range(len(data['train']))]\n test_input = [np.array(data['test'][i]['input']) for i in range(len(data['test']))]\n test_output = [np.array(data['test'][i]['output']) for i in range(len(data['test']))]\n\n return (train_input, train_output, test_input, test_output)\n\n\ndef test(taskID, solve, data):\n \"\"\"Given a task ID, call the given solve() function on every\n example in the task data.\"\"\"\n print(taskID)\n train_input, train_output, test_input, test_output = data\n print(\"Training grids\")\n for x, y in zip(train_input, train_output):\n yhat = solve(x)\n show_result(x, y, yhat)\n print(\"Test grids\")\n for x, y in zip(test_input, test_output):\n yhat = solve(x)\n show_result(x, y, yhat)\n\n\ndef show_result(x, y, yhat):\n print(\"Input\")\n print(x)\n print(\"Correct output\")\n print(y)\n print(\"Our output\")\n print(yhat)\n print(\"Correct?\")\n if y.shape != yhat.shape:\n print(f\"False. Incorrect shape: {y.shape} v {yhat.shape}\")\n else:\n print(np.all(y == yhat))\n\n\nif __name__ == \"__main__\": main()\n\n\n[[3,3,3,3,3,3,3,4,3,4,3]\n,[3,3,3,3,3,3,3,3,3,3,3]\n,[3,3,3,3,1,3,3,3,3,3,3]\n,[3,3,3,3,3,3,3,3,3,3,3]\n,[3,3,1,3,3,3,1,4,3,4,3]\n,[3,3,3,3,3,3,3,3,3,3,3]\n,[3,3,3,3,1,3,2,3,3,3,2]\n,[3,3,3,3,3,3,3,3,3,3,3]\n,[8,3,3,3,8,3,3,3,3,3,3]\n,[3,3,3,3,3,3,3,3,3,3,3]\n,[8,3,3,3,8,3,2,3,3,3,2]\n,[3,3,3,3,3,3,3,3,3,3,3]]" ]
[ [ "numpy.all", "numpy.array" ] ]
kpaxiotis/3DWaveletVivadoHLS
[ "bcc33f49be9d58db401aac224aba573bc0bda802" ]
[ "c_files/power_read.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\n\nprj = [2, 5, 8, 12, 14]\nnum_of_prj = 4\nsol_list = []\npower = []\n\n# for i in range(1, num_of_prj):\nfor i in prj:\n\n directory = \"C:/thesisRepo/c_files/dwt_prj\" + str(i) + \"/solution1/project1/project1.runs/impl_1/design_1_wrapper_power_routed.rpt\"\n sol_list.append(\"sol\" + str(i))\n \n \n with open(directory) as myfile:\n power.append(myfile.readlines()[32].split()[6])\n #print (myfile.readlines()[32].split()[6])\n\npower = [float(i) for i in power] \n\nplt.bar(sol_list, power)\n\nfor a,b in zip(sol_list, power):\n plt.text(a,b,str(b)+ \"__\" + str(round((b - power[0])/power[0] * 100, 2))+ \"%\")\nplt.show()" ]
[ [ "matplotlib.pyplot.show", "matplotlib.pyplot.bar" ] ]
shifty21/neural-numpy
[ "85c97f19c830f82b92b43986bdc2efcb6e114076" ]
[ "src/utils/fixed_point.py" ]
[ "import numpy as np\nfrom rig.type_casts import NumpyFixToFloatConverter, NumpyFloatToFixConverter\n\n\nclass FixedPoint:\n def __init__(self):\n # self.float_to_fixed = NumpyFloatToFixConverter(signed=True, n_bits=8, n_frac=4)\n self.float_to_fixed = NumpyFloatToFixConverter(\n signed=True, n_bits=8, n_frac=4)\n self.fixed_to_float = NumpyFixToFloatConverter(4)\n self.float64_to_fixed = NumpyFloatToFixConverter(\n signed=True, n_bits=8, n_frac=7)\n # self.fixed_to_float = NumpyFixToFloatConverter(7)\n\n def convert_float_to_fixed(self, array):\n return self.float_to_fixed(array)\n\n def convert_fixed_to_float(self, array):\n return self.fixed_to_float(array)\n\n def convert_float4_to_fixed(self, array):\n return self.float64_to_fixed(array)\n\n def right_shift(self, array):\n return np.right_shift(array, 5)\n" ]
[ [ "numpy.right_shift" ] ]
siavash-khodadadeh/MetaLearning-TF2.0
[ "de852bd3b2ff46f8d390cebf561add3a166ee855", "de852bd3b2ff46f8d390cebf561add3a166ee855" ]
[ "models/lasiumprotonetsvae/protonets_vae_mini_imagenet.py", "models/sml/sml_celeba_cactus.py" ]
[ "import tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\n\nfrom databases import MiniImagenetDatabase\n\nfrom models.lasiumprotonetsvae.database_parsers import MiniImagenetParser\nfrom models.lasiumprotonetsvae.protonets_vae import ProtoNetsVAE\nfrom models.lasiumprotonetsvae.vae import VAE, AudioCallback\n\nclass MiniImagenetModel(tf.keras.Model):\n name = 'MiniImagenetModel'\n def __init__(self, *args, **kwargs):\n\n super(MiniImagenetModel, self).__init__(*args, **kwargs)\n self.max_pool = tf.keras.layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2))\n self.conv1 = tf.keras.layers.Conv2D(32, 3, name='conv1')\n self.bn1 = tf.keras.layers.BatchNormalization(center=True, scale=False, name='bn1')\n # self.bn1 = tf.keras.layers.LayerNormalization(center=True, scale=False, name='bn1')\n self.conv2 = tf.keras.layers.Conv2D(32, 3, name='conv2')\n self.bn2 = tf.keras.layers.BatchNormalization(center=True, scale=False, name='bn2')\n # self.bn2 = tf.keras.layers.LayerNormalization(center=True, scale=False, name='bn2')\n self.conv3 = tf.keras.layers.Conv2D(32, 3, name='conv3')\n self.bn3 = tf.keras.layers.BatchNormalization(center=True, scale=False, name='bn3')\n # self.bn3 = tf.keras.layers.LayerNormalization(center=True, scale=False, name='bn3')\n self.conv4 = tf.keras.layers.Conv2D(32, 3, name='conv4')\n self.bn4 = tf.keras.layers.BatchNormalization(center=True, scale=False, name='bn4')\n # self.bn4 = tf.keras.layers.LayerNormalization(center=True, scale=False, name='bn4')\n self.flatten = tf.keras.layers.Flatten(name='flatten')\n\n def conv_block(self, features, conv, bn=None, training=False):\n conv_out = conv(features)\n batch_normalized_out = bn(conv_out, training=training)\n batch_normalized_out = self.max_pool(batch_normalized_out)\n return tf.keras.activations.relu(batch_normalized_out)\n\n def get_features(self, inputs, training=False):\n import numpy as np\n image = inputs\n c1 = self.conv_block(image, self.conv1, self.bn1, training=training)\n c2 = self.conv_block(c1, self.conv2, self.bn2, training=training)\n c3 = self.conv_block(c2, self.conv3, self.bn3, training=training)\n c4 = self.conv_block(c3, self.conv4, self.bn4, training=training)\n c4 = tf.reshape(c4, [-1, np.prod([int(dim) for dim in c4.get_shape()[1:]])])\n f = self.flatten(c4)\n return f\n\n def call(self, inputs, training=False):\n out = self.get_features(inputs, training=training)\n\n return out\n\ndef get_encoder(latent_dim):\n encoder_inputs = keras.Input(shape=(84, 84, 3))\n x = layers.Conv2D(64, 4, activation=None, strides=2, padding=\"same\", use_bias=False)(encoder_inputs)\n x = layers.BatchNormalization()(x)\n x = layers.ReLU()(x)\n\n x = layers.Conv2D(128, 4, activation=None, strides=2, padding=\"same\", use_bias=False)(x)\n x = layers.BatchNormalization()(x)\n x = layers.ReLU()(x)\n\n x = layers.Conv2D(256, 4, activation=None, strides=2, padding=\"same\", use_bias=False)(x)\n x = layers.BatchNormalization()(x)\n x = layers.ReLU()(x)\n\n x = layers.Conv2D(256, 4, activation=None, strides=2, padding=\"same\", use_bias=False)(x)\n x = layers.BatchNormalization()(x)\n x = layers.ReLU()(x)\n\n x = layers.Conv2D(512, 4, activation=None, strides=2, padding=\"same\", use_bias=False)(x)\n x = layers.BatchNormalization()(x)\n x = layers.ReLU()(x)\n\n x = layers.Flatten()(x)\n z_mean = layers.Dense(latent_dim, name=\"z_mean\")(x)\n z_log_var = layers.Dense(latent_dim, name=\"z_log_var\")(x)\n\n encoder = keras.Model(encoder_inputs, [z_mean, z_log_var], name=\"encoder\")\n encoder.summary()\n\n return encoder\n\n\ndef get_decoder(latent_dim):\n latent_inputs = keras.Input(shape=(latent_dim,))\n x = layers.Dense(7 * 7 * 64, activation=\"relu\")(latent_inputs)\n x = layers.Reshape((7, 7, 64))(x)\n x = layers.Conv2DTranspose(512, 4, activation=None, strides=3, padding=\"same\", use_bias=False)(x)\n x = layers.BatchNormalization()(x)\n x = layers.ReLU()(x)\n\n x = layers.Conv2DTranspose(256, 4, activation=None, strides=2, padding=\"same\", use_bias=False)(x)\n x = layers.BatchNormalization()(x)\n x = layers.ReLU()(x)\n\n x = layers.Conv2DTranspose(256, 4, activation=None, strides=2, padding=\"same\", use_bias=False)(x)\n x = layers.BatchNormalization()(x)\n x = layers.ReLU()(x)\n\n x = layers.Conv2DTranspose(128, 4, activation=None, strides=1, padding=\"same\", use_bias=False)(x)\n x = layers.BatchNormalization()(x)\n x = layers.ReLU()(x)\n\n x = layers.Conv2DTranspose(64, 4, activation=None, strides=1, padding=\"same\", use_bias=False)(x)\n x = layers.BatchNormalization()(x)\n x = layers.ReLU()(x)\n\n decoder_outputs = layers.Conv2DTranspose(3, 4, activation=\"sigmoid\", padding=\"same\")(x)\n decoder = keras.Model(latent_inputs, decoder_outputs, name=\"decoder\")\n decoder.summary()\n\n return decoder\n\n\nif __name__ == '__main__':\n# import tensorflow as tf\n# gpus = tf.config.experimental.list_physical_devices('GPU')\n# tf.config.experimental.set_memory_growth(gpus[0], True)\n# tf.config.experimental_run_functions_eagerly(True)\n\n mini_imagenet_database = MiniImagenetDatabase()\n shape = (84, 84, 3)\n latent_dim = 512\n mini_imagenet_encoder = get_encoder(latent_dim)\n mini_imagenet_decoder = get_decoder(latent_dim)\n mini_imagenet_parser = MiniImagenetParser(shape=shape)\n\n vae = VAE(\n 'mini_imagenet',\n image_shape=shape,\n latent_dim=latent_dim,\n database=mini_imagenet_database,\n parser=mini_imagenet_parser,\n encoder=mini_imagenet_encoder,\n decoder=mini_imagenet_decoder,\n visualization_freq=1,\n learning_rate=0.001,\n )\n vae.perform_training(epochs=500, checkpoint_freq=5)\n vae.load_latest_checkpoint()\n vae.visualize_meta_learning_task()\n\n proto_vae = ProtoNetsVAE(\n vae=vae,\n latent_algorithm='p2',\n database=mini_imagenet_database,\n network_cls=MiniImagenetModel,\n n=5,\n k=1,\n k_val_ml=5,\n k_val_train=5,\n k_val_val=5,\n k_val_test=5,\n k_test=5,\n meta_batch_size=4,\n save_after_iterations=1000,\n meta_learning_rate=0.001,\n report_validation_frequency=200,\n log_train_images_after_iteration=200,\n number_of_tasks_val=100,\n number_of_tasks_test=1000,\n experiment_name='proto_vae_mini_imagenet',\n val_seed=42\n )\n\n proto_vae.visualize_meta_learning_task(shape, num_tasks_to_visualize=2)\n\n proto_vae.train(iterations=60000)\n proto_vae.evaluate(-1, seed=42)", "import tensorflow as tf\n\nfrom models.sml.sml import SML\nfrom networks.maml_umtra_networks import MiniImagenetModel\nfrom databases import CelebADatabase, LFWDatabase\n\n\ndef run_celeba():\n celeba_database = CelebADatabase()\n base_model = tf.keras.applications.VGG19(weights='imagenet')\n feature_model = tf.keras.models.Model(inputs=base_model.input, outputs=base_model.layers[24].output)\n\n sml = SML(\n database=celeba_database,\n target_database=LFWDatabase(),\n network_cls=MiniImagenetModel,\n n=5,\n k=1,\n k_val_ml=5,\n k_val_val=15,\n k_val_test=15,\n k_test=15,\n meta_batch_size=4,\n num_steps_ml=5,\n lr_inner_ml=0.05,\n num_steps_validation=5,\n save_after_iterations=15000,\n meta_learning_rate=0.001,\n n_clusters=500,\n feature_model=feature_model,\n # feature_size=288,\n feature_size=4096,\n input_shape=(224, 224, 3),\n preprocess_function=tf.keras.applications.vgg19.preprocess_input,\n log_train_images_after_iteration=1000,\n number_of_tasks_val=100,\n number_of_tasks_test=1000,\n clip_gradients=True,\n report_validation_frequency=250,\n experiment_name='cactus_celeba_original3'\n )\n sml.train(iterations=60000)\n sml.evaluate(iterations=50, seed=42)\n\n\nif __name__ == '__main__':\n run_celeba()\n" ]
[ [ "tensorflow.keras.activations.relu", "tensorflow.keras.layers.Flatten", "tensorflow.keras.layers.MaxPool2D", "tensorflow.keras.layers.Reshape", "tensorflow.keras.layers.Dense", "tensorflow.keras.layers.Conv2D", "tensorflow.keras.Model", "tensorflow.keras.layers.Conv2DTranspose", "tensorflow.keras.Input", "tensorflow.keras.layers.BatchNormalization", "tensorflow.keras.layers.ReLU" ], [ "tensorflow.keras.applications.VGG19", "tensorflow.keras.models.Model" ] ]
gfardell/TomoPhantom
[ "454876a9383dbe031a4449631a7a7914e3bc9f43" ]
[ "Demos/Python/2D/Model2D_t.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nGPLv3 license (ASTRA toolbox)\nNote that the TomoPhantom package is released under Apache License, Version 2.0\n\nScript to generate temporal (2D + time) analytical phantoms and their sinograms\nIf one needs to modify/add phantoms, please edit Phantom2DLibrary.dat or\nPhantom3DLibrary.dat\n! Note that all temporal phantoms start from no. 100 \n\n>>>>> Optional dependencies (reconstruction mainly): <<<<<\n1. ASTRA toolbox: conda install -c astra-toolbox astra-toolbox\n2. tomobar: conda install -c dkazanc tomobar\nor install from https://github.com/dkazanc/ToMoBAR\n\n\n@author: Daniil Kazantsev\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport tomophantom\nfrom tomophantom import TomoP2D\n\nmodel = 102 # note that the selected model is temporal (2D + time)\nN_size = 512 # set dimension of the phantom\n# one can specify an exact path to the parameters file\n# path_library2D = '../../../PhantomLibrary/models/Phantom2DLibrary.dat'\npath = os.path.dirname(tomophantom.__file__)\npath_library2D = os.path.join(path, \"Phantom2DLibrary.dat\")\n#This will generate a N_size x N_size x Time frames phantom (2D + time)\nphantom_2Dt = TomoP2D.ModelTemporal(model, N_size, path_library2D)\n\nplt.close('all')\nplt.figure(1)\nplt.rcParams.update({'font.size': 21})\nplt.title('{}''{}'.format('2D+t phantom using model no.',model))\nfor sl in range(0,np.shape(phantom_2Dt)[0]):\n im = phantom_2Dt[sl,:,:]\n plt.imshow(im, vmin=0, vmax=1)\n plt.pause(.1)\n plt.draw\n\n# create sinogram analytically\nangles_num = int(0.5*np.pi*N_size); # angles number\nangles = np.linspace(0,180,angles_num,dtype='float32')\nangles_rad = angles*(np.pi/180)\nP = int(np.sqrt(2)*N_size) #detectors\n\nsino = TomoP2D.ModelSinoTemporal(model, N_size, P, angles, path_library2D)\n\nplt.figure(2)\nplt.rcParams.update({'font.size': 21})\nplt.title('{}''{}'.format('2D+t sinogram of model no.',model))\nfor sl in range(0,np.shape(phantom_2Dt)[0]):\n im = sino[sl,:,:].transpose()\n plt.imshow(im, vmin=0, vmax=180)\n plt.pause(.1)\n plt.draw\n#%%\nprint (\"%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\")\nprint (\"Reconstructing analytical sinogram using FBP (tomobar)...\")\nprint (\"%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\")\n# initialise tomobar reconstruction class ONCE\nfrom tomobar.methodsDIR import RecToolsDIR\nRectoolsDIR = RecToolsDIR(DetectorsDimH = P, # DetectorsDimH # detector dimension (horizontal)\n DetectorsDimV = None, # DetectorsDimV # detector dimension (vertical) for 3D case only\n CenterRotOffset = None, # Center of Rotation (CoR) scalar (for 3D case only)\n AnglesVec = angles_rad, # array of angles in radians\n ObjSize = N_size, # a scalar to define reconstructed object dimensions\n device_projector='cpu')\n\nFBPrec = RectoolsDIR.FBP(sino[15,:,:].transpose()) # reconstruct one frame\n\nplt.figure(3) \nplt.imshow(FBPrec, vmin=0, vmax=1)\nplt.title('FBP Reconstructed Phantom')\n#%%\n" ]
[ [ "matplotlib.pyplot.rcParams.update", "matplotlib.pyplot.title", "matplotlib.pyplot.close", "numpy.shape", "matplotlib.pyplot.figure", "matplotlib.pyplot.pause", "numpy.sqrt", "numpy.linspace", "matplotlib.pyplot.imshow" ] ]
sdcubber/kaggle_carvana
[ "44f6c7f1e80be2caa3c7ad4c7fb69067af45fe8f" ]
[ "src/main.py" ]
[ "from data.config import *\n# custom modules\nimport time\nimport models.models as mo\nimport models.model_utils as mu\nimport processing.processing_utils as pu\nimport processing.augmentation as pa\nfrom data.data_utils import CarvanaDataset\n\nfrom torch.utils.data import DataLoader\nimport torchvision.transforms as transforms\nfrom PIL import Image\nimport h5py\n\n\ndef run_experiment(parser):\n args = parser.parse_args()\n timestamp = datetime.now()\n file = timestamp.strftime('log_%H_%M_%d_%m_%Y_{}.log'.format(args.arch))\n log = pu.Logger()\n log.open(os.path.join(OUTPUT_LOG_PATH, file), mode='w')\n\n log.write(str(args) + \"\\n\")\n\n # define loss function (criterion) and optimizer\n criterion = mu.BCELoss2D()\n model = mo.UNet128()\n # initialize model with pretrained weights\n #model = torch.load('../models/UNet_128_1024_best_weights_0.004352842413936742.torch')\n #optimizer = torch.optim.SGD(model.parameters(),\n # lr=args.lr,\n # momentum=args.momentum,\n # weight_decay=args.weight_decay)\n optimizer = torch.optim.Adam(model.parameters(),\n lr=args.lr)\n\n if GPU_AVAIL:\n model = model.cuda()\n criterion = criterion.cuda()\n log.write(\"Using GPU...\")\n\n # --- TRAINING --- #\n\n # Type of car rotations\n rot_id = [args.rotation] if args.rotation else range(1, 17)\n\n # Data augmentation\n common_trans = [lambda x,y: pa.randomBrightness(x,y),\n lambda x,y: pa.randomHue(x,y),\n lambda x,y: pa.randomHorizontalFlip(x,y)]\n\n input_trans = transforms.Compose([\n transforms.Lambda(lambda x: pa.resize_cv2(x, args.im_size, args.im_size)),\n ])\n mask_trans = transforms.Compose([\n transforms.Lambda(lambda x: pa.resize_cv2(x, args.im_size, args.im_size)),\n ])\n\n # split data set for training and valid\n train_ids, valid_ids = pu.train_valid_split(TRAIN_MASKS_CSV, rotation_ids=rot_id,\n valid=args.valid_size)\n\n # preparing data flow for training the network\n dset_train = CarvanaDataset(im_dir=TRAIN_IMG_PATH,\n ids_list=train_ids,\n mask_dir=TRAIN_MASKS_PATH,\n common_transforms=common_trans,\n input_transforms=input_trans,\n mask_transforms=mask_trans,\n rotation_ids=rot_id,\n weighted=args.weighted,\n debug=args.debug)\n\n dset_valid = CarvanaDataset(im_dir=TRAIN_IMG_PATH,\n ids_list=valid_ids,\n mask_dir=TRAIN_MASKS_PATH,\n common_transforms=common_trans,\n input_transforms=input_trans,\n mask_transforms=mask_trans,\n rotation_ids=rot_id,\n weighted=args.weighted,\n debug=args.debug)\n\n train_loader = DataLoader(dset_train,\n batch_size=args.batch_size,\n shuffle=True,\n num_workers=args.workers,\n pin_memory=GPU_AVAIL)\n valid_loader = DataLoader(dset_valid,\n batch_size=args.batch_size,\n shuffle=False,\n num_workers=args.workers,\n pin_memory=GPU_AVAIL)\n\n start_time = time.time()\n best_dice, best_loss = mu.train(train_loader, valid_loader, model, criterion, optimizer, args, log)\n elapsed_time = time.time() - start_time\n print('Elapsed time for training: {} minutes'.format(np.round(elapsed_time/60, 2)))\n print('Time per epoch: {} seconds'.format(elapsed_time/args.epochs))\n # ---------------------------------------------------------------------------------#\n\n # --- TESTING --- #\n dset_train_full = CarvanaDataset(im_dir=TRAIN_IMG_PATH,\n input_transforms=input_trans,\n rotation_ids=rot_id,\n debug=args.debug)\n train_full_loader = DataLoader(dset_train_full,\n batch_size=args.batch_size,\n shuffle=False,\n num_workers=args.workers,\n pin_memory=GPU_AVAIL)\n\n dset_test = CarvanaDataset(im_dir=TEST_IMG_PATH,\n input_transforms=input_trans,\n rotation_ids=rot_id,\n debug=args.debug)\n test_loader = DataLoader(dset_test,\n batch_size=args.batch_size,\n shuffle=False,\n num_workers=args.workers,\n pin_memory=GPU_AVAIL)\n\n log.write('Predicting training data...\\n')\n train_idx, rle_encoded_predictions_train, output_train = mu.predict(model, train_full_loader, args, log)\n log.write('Predicting test data...\\n')\n test_idx, rle_encoded_predictions, output_test = mu.predict(model, test_loader, args, log)\n\n # Store rle encoded outputs\n output_file_train = os.path.join(OUTPUT_SUB_PATH, 'train', 'TRAIN_{}_{:.5f}_{:.5f}.gz'\n .format(timestamp.strftime('%H_%M_%d_%m_%Y_{}'.format(args.arch)), best_dice,\n best_loss))\n output_file = os.path.join(OUTPUT_SUB_PATH, 'test', '{}_{:.5f}_{:.5f}.gz'\n .format(timestamp.strftime('%H_%M_%d_%m_%Y_{}'.format(args.arch)), best_dice, best_loss))\n\n if args.store_probabilities:\n log.write('Storing predicted probabilities...\\n')\n # Store output probabilities\n # see https://stackoverflow.com/questions/20928136/input-and-output-numpy-arrays-to-h5py\n # and https://stackoverflow.com/questions/22400652/compress-numpy-arrays-efficiently\n # for reading in the .h5 files\n h5f = h5py.File('../models/probabilities_{}_{:.5f}_{:.5f}.h5'.format(timestamp.strftime('%H_%M_%d_%m_%Y_{}'.format(args.arch)), best_dice,\n best_loss), 'w')\n h5f.create_dataset('TRAIN', data=np.concatenate([output_train], axis=0), compression='gzip', compression_opts=9)\n h5f.create_dataset('TEST', data=np.concatenate([output_test], axis=0), compression='gzip', compression_opts=9)\n h5f.close()\n\n log.write('Writing encoded csv files for training data..\\n')\n pu.make_prediction_file(output_file_train, TRAIN_MASKS_CSV, train_idx, rle_encoded_predictions_train)\n log.write('Writing encoded csv files for test data...\\n')\n pu.make_prediction_file(output_file, SAMPLE_SUB_CSV, test_idx, rle_encoded_predictions)\n log.write('Done!')\n # --------------------------------------------------------------------------------#\n\ndef main():\n prs = argparse.ArgumentParser(description='Kaggle: Carvana car segmentation challenge')\n prs.add_argument('message', default=' ', type=str, help='Message to describe experiment in spreadsheet')\n prs.add_argument('im_size', default=256, type=int, help='image size (default: 256)')\n prs.add_argument('arch', default='UNet', help='Model architecture ')\n prs.add_argument('epochs', default=30, type=int, help='Number of total epochs to run')\n prs.add_argument('-j', '--workers', default=3, type=int, metavar='N', help='Number of data loading workers')\n prs.add_argument('-lr', '--lr', default=0.01, type=float, metavar='LR', help='Initial learning rate')\n prs.add_argument('-b', '--batch_size', default=16, type=int, metavar='N', help='Mini-batch size (default: 16)')\n prs.add_argument('-rot', '--rotation', default=None, type=int, help='Type of car rotation. Default None returns all rotations.')\n prs.add_argument('--start_epoch', default=0, type=int, metavar='N', help='Manual epoch number (useful on restarts)')\n prs.add_argument('--momentum', default=0.9, type=float, metavar='M', help='momentum')\n prs.add_argument('--valid_size', default=0.1, type=float, metavar='M', help='Validation set size')\n prs.add_argument('--weight_decay', '--wd', default=1e-4, type=float, metavar='W', help='weight decay (default: 1e-4)')\n prs.add_argument('--resume', default='', type=str, metavar='PATH', help='Path to latest checkpoint (default: none)')\n prs.add_argument('-db', '--debug', action='store_true', help='Debug mode.')\n prs.add_argument('-we', '--weighted', action='store_true', help='Use weighted loss.')\n prs.add_argument('-sp', '--store_probabilities', action='store_true', help='Store predicted probabilities')\n prs.add_argument('-ac', '--n_acc', type=int, default=1, help='Number of batches to accumulate gradients.')\n run_experiment(prs)\n\n\nif __name__ == '__main__':\n # random.seed(123456789) # Fix seed\n sys.exit(main())\n" ]
[ [ "torch.utils.data.DataLoader" ] ]
cphatak/SLADS-Net
[ "025b1bc3fbfd8018dc50ad1b95c3459e8c8d5d1f" ]
[ "code/computeFeatures.py" ]
[ "#! /usr/bin/env python3\nimport numpy as np\nimport numpy.matlib as matlib\nfrom computeDifference import computeDifference\n\n\ndef computeFeatures(MeasuredValues,MeasuredIdxs,UnMeasuredIdxs,SizeImage,NeighborValues,NeighborWeights,NeighborDistances,TrainingInfo,ReconValues,ReconImage,Resolution,ImageType):\n Feature=np.zeros((np.shape(UnMeasuredIdxs)[0],6))\n\n # Compute st div features\n Feature[:,0],Feature[:,1]=computeStDivFeatures(NeighborValues,NeighborWeights,TrainingInfo,ReconValues,ImageType)\n \n # Compute distance/density features\n Feature[:,2],Feature[:,3]=computeDensityDistanceFeatures(NeighborDistances,NeighborWeights,SizeImage,TrainingInfo,ReconValues,ImageType)\n\n # Compute gradient features\n GradientImageX,GradientImageY=computeGradientFeatures(MeasuredValues,MeasuredIdxs,UnMeasuredIdxs,SizeImage,TrainingInfo,ReconImage,ImageType)\n Feature[:,4] = GradientImageY[UnMeasuredIdxs[:,0],UnMeasuredIdxs[:,1]]\n Feature[:,5] = GradientImageX[UnMeasuredIdxs[:,0],UnMeasuredIdxs[:,1]]\n\n\n PolyFeatures = computePolyFeatures(Feature)\n return PolyFeatures\n\ndef computeGradientFeatures(MeasuredValues,MeasuredIdxs,UnMeasuredIdxs,SizeImage,TrainingInfo,ReconImage,ImageType):\n GradientImageX,GradientImageY = np.gradient(ReconImage)\n if ImageType=='D':\n GradientImageX[GradientImageX!=0]=1\n GradientImageY[GradientImageY!=0]=1\n elif ImageType=='C':\n GradientImageX=abs(GradientImageX)\n GradientImageY=abs(GradientImageY)\n return(GradientImageX,GradientImageY)\n\n\ndef computeStDivFeatures(NeighborValues,NeighborWeights,TrainingInfo,ReconValues,ImageType):\n \n DiffVect = computeDifference(NeighborValues,np.transpose(matlib.repmat(ReconValues,np.shape(NeighborValues)[1],1)),ImageType)\n Feature_0 = np.sum(NeighborWeights*DiffVect,axis=1)\n Feature_1 = np.sqrt((1/TrainingInfo.NumNbrs)*np.sum(np.power(DiffVect,2),axis=1))\n return(Feature_0,Feature_1)\n\n\ndef computeDensityDistanceFeatures(NeighborDistances,NeighborWeights,SizeImage,TrainingInfo,ReconValues,ImageType):\n \n CutoffDist = np.ceil(np.sqrt((TrainingInfo.FeatDistCutoff/100)*(SizeImage[0]*SizeImage[1]/np.pi)))\n Feature_2 = NeighborDistances[:,0]\n NeighborsInCircle=np.sum(NeighborDistances<=CutoffDist,axis=1)\n Feature_3 = (1+(np.pi*(np.power(CutoffDist,2))))/(1+NeighborsInCircle)\n return(Feature_2,Feature_3)\n\n#def computePolyFeatures(Feature):\n# \n# PolyFeatures = np.hstack([np.ones((np.shape(Feature)[0],1)),Feature])\n# for i in range(0,np.shape(Feature)[1]):\n# for j in range(i,np.shape(Feature)[1]):\n# Temp = Feature[:,i]*Feature[:,j]\n# PolyFeatures = np.column_stack([PolyFeatures,Feature[:,i]*Feature[:,j]])\n#\n# return PolyFeatures\n \n \nfrom sklearn.kernel_approximation import RBFSampler\n \ndef computePolyFeatures(Feature):\n \n rbf_feature = RBFSampler(gamma=0.01, n_components=50 ,random_state=1)\n PolyFeatures = rbf_feature.fit_transform(Feature)\n\n# PolyFeatures = np.copy(Feature)\n \n return PolyFeatures \n \n \n \n \n \n \n \n \n\n" ]
[ [ "sklearn.kernel_approximation.RBFSampler", "numpy.sum", "numpy.shape", "numpy.power", "numpy.sqrt", "numpy.gradient" ] ]
bahleg/pt.darts
[ "44c7d46b74dc99cbe9fd535af7179988f77b1b9c" ]
[ "models/cnn_var_naive/search_cells.py" ]
[ "\"\"\" CNN cell for architecture search \"\"\"\nimport torch\nimport torch.nn as nn\nimport models.cnn_var_naive.ops as ops\n\n\nclass SearchCell(nn.Module):\n \"\"\" Cell for search\n Each edge is mixed and continuous relaxed.\n \"\"\"\n def __init__(self, n_nodes, C_pp, C_p, C, reduction_p, reduction):\n \"\"\"\n Args:\n n_nodes: # of intermediate n_nodes\n C_pp: C_out[k-2]\n C_p : C_out[k-1]\n C : C_in[k] (current)\n reduction_p: flag for whether the previous cell is reduction cell or not\n reduction: flag for whether the current cell is reduction cell or not\n \"\"\"\n super().__init__()\n self.reduction = reduction\n self.n_nodes = n_nodes\n\n # If previous cell is reduction cell, current input size does not match with\n # output size of cell[k-2]. So the output[k-2] should be reduced by preprocessing.\n if reduction_p:\n self.preproc0 = ops.FactorizedReduce(C_pp, C, affine=False)\n else:\n self.preproc0 = ops.StdConv(C_pp, C, 1, 1, 0, affine=False)\n self.preproc1 = ops.StdConv(C_p, C, 1, 1, 0, affine=False)\n\n # generate dag\n self.dag = nn.ModuleList()\n for i in range(self.n_nodes):\n self.dag.append(nn.ModuleList())\n for j in range(2+i): # include 2 input nodes\n # reduction should be used only for input node\n stride = 2 if reduction and j < 2 else 1\n op = ops.MixedOp(C, stride)\n self.dag[i].append(op)\n\n def forward(self, s0, s1, w_dag):\n s0 = self.preproc0(s0)\n s1 = self.preproc1(s1)\n\n states = [s0, s1]\n for edges, w_list in zip(self.dag, w_dag):\n s_cur = sum(edges[i](s, w) for i, (s, w) in enumerate(zip(states, w_list)))\n states.append(s_cur)\n\n s_out = torch.cat(states[2:], dim=1)\n return s_out\n" ]
[ [ "torch.cat", "torch.nn.ModuleList" ] ]
guangxingli/python-neo
[ "88b58a43e50bef3788e42504f47e507d169c5802" ]
[ "neo/test/coretest/test_base.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nTests of the neo.core.baseneo.BaseNeo class and related functions\n\"\"\"\n\nfrom datetime import datetime, date, time, timedelta\nfrom decimal import Decimal\nfrom fractions import Fraction\nimport sys\n\ntry:\n import unittest2 as unittest\nexcept ImportError:\n import unittest\n\nimport numpy as np\nimport quantities as pq\n\ntry:\n from IPython.lib.pretty import pretty\nexcept ImportError as err:\n HAVE_IPYTHON = False\nelse:\n HAVE_IPYTHON = True\n\nfrom neo.core.baseneo import (BaseNeo, _check_annotations,\n merge_annotations, merge_annotation)\nfrom neo.test.tools import assert_arrays_equal\n\n\nif sys.version_info[0] >= 3:\n _bytes = bytes\n\n long = int\n\n def bytes(s):\n return _bytes(s, encoding='ascii')\n\n\nclass Test_check_annotations(unittest.TestCase):\n '''\n TestCase to make sure _check_annotations works\n '''\n def setUp(self):\n self.values = [1, 2.2, 3 + 2j,\n 'test', r'test', b'test',\n None,\n datetime(year=2008, month=12, day=3, hour=10, minute=4),\n timedelta(weeks=2, days=7, hours=18, minutes=28,\n seconds=18, milliseconds=28, microseconds=45),\n time(hour=10, minute=4),\n Decimal(\"3.14\"), Fraction(13, 21),\n np.array([1.1, 1.2, 1.3]),\n np.array([1, 2, 3]),\n np.array('test', dtype='S'),\n np.array([True, False])]\n\n def test__check_annotations__invalid_ValueError(self):\n value = set([])\n self.assertRaises(ValueError, _check_annotations, value)\n\n def test__check_annotations__invalid_dtype_ValueError(self):\n value = np.array([], dtype='O')\n self.assertRaises(ValueError, _check_annotations, value)\n\n def test__check_annotations__valid_dtypes(self):\n for value in self.values:\n _check_annotations(value)\n\n def test__check_annotations__list(self):\n _check_annotations(self.values)\n\n def test__check_annotations__tuple(self):\n _check_annotations(tuple(self.values))\n _check_annotations((self.values, self.values))\n\n def test__check_annotations__dict(self):\n names = ['value%s' % i for i in range(len(self.values))]\n values = dict(zip(names, self.values))\n _check_annotations(values)\n\n\nclass TestBaseNeo(unittest.TestCase):\n '''\n TestCase to make sure basic initialization and methods work\n '''\n def test_init(self):\n '''test to make sure initialization works properly'''\n base = BaseNeo(name='a base', description='this is a test')\n self.assertEqual(base.name, 'a base')\n self.assertEqual(base.description, 'this is a test')\n self.assertEqual(base.file_origin, None)\n\n def test_annotate(self):\n '''test to make sure annotation works properly'''\n base = BaseNeo()\n base.annotate(test1=1, test2=1)\n result1 = {'test1': 1, 'test2': 1}\n\n self.assertDictEqual(result1, base.annotations)\n\n base.annotate(test3=2, test4=3)\n result2 = {'test3': 2, 'test4': 3}\n result2a = dict(list(result1.items()) + list(result2.items()))\n\n self.assertDictContainsSubset(result1, base.annotations)\n self.assertDictContainsSubset(result2, base.annotations)\n self.assertDictEqual(result2a, base.annotations)\n\n base.annotate(test1=5, test2=8)\n result3 = {'test1': 5, 'test2': 8}\n result3a = dict(list(result3.items()) + list(result2.items()))\n\n self.assertDictContainsSubset(result2, base.annotations)\n self.assertDictContainsSubset(result3, base.annotations)\n self.assertDictEqual(result3a, base.annotations)\n\n self.assertNotEqual(base.annotations['test1'], result1['test1'])\n self.assertNotEqual(base.annotations['test2'], result1['test2'])\n\n def test__children(self):\n base = BaseNeo()\n\n self.assertEqual(base._single_parent_objects, ())\n self.assertEqual(base._multi_parent_objects, ())\n\n self.assertEqual(base._single_parent_containers, ())\n self.assertEqual(base._multi_parent_containers, ())\n\n self.assertEqual(base._parent_objects, ())\n self.assertEqual(base._parent_containers, ())\n\n self.assertEqual(base.parents, ())\n\n\nclass Test_BaseNeo_merge_annotations_merge(unittest.TestCase):\n '''\n TestCase to make sure merge_annotations and merge methods work\n '''\n def setUp(self):\n self.name1 = 'a base 1'\n self.name2 = 'a base 2'\n self.description1 = 'this is a test 1'\n self.description2 = 'this is a test 2'\n self.base1 = BaseNeo(name=self.name1, description=self.description1)\n self.base2 = BaseNeo(name=self.name2, description=self.description2)\n\n def test_merge_annotations__dict(self):\n self.base1.annotations = {'val0': 'val0', 'val1': 1,\n 'val2': 2.2, 'val3': 'test1',\n 'val4': [.4], 'val5': {0: 0, 1: {0: 0}},\n 'val6': np.array([0, 1, 2])}\n self.base2.annotations = {'val2': 2.2, 'val3': 'test2',\n 'val4': [4, 4.4], 'val5': {1: {1: 1}, 2: 2},\n 'val6': np.array([4, 5, 6]), 'val7': True}\n\n ann1 = self.base1.annotations\n ann2 = self.base2.annotations\n ann1c = self.base1.annotations.copy()\n ann2c = self.base2.annotations.copy()\n\n targ = {'val0': 'val0', 'val1': 1, 'val2': 2.2, 'val3': 'test1;test2',\n 'val4': [.4, 4, 4.4], 'val5': {0: 0, 1: {0: 0, 1: 1}, 2: 2},\n 'val7': True}\n\n self.base1.merge_annotations(self.base2)\n\n val6t = np.array([0, 1, 2, 4, 5, 6])\n val61 = ann1.pop('val6')\n val61c = ann1c.pop('val6')\n val62 = ann2.pop('val6')\n val62c = ann2c.pop('val6')\n\n self.assertEqual(ann1, self.base1.annotations)\n self.assertNotEqual(ann1c, self.base1.annotations)\n self.assertEqual(ann2c, self.base2.annotations)\n self.assertEqual(targ, self.base1.annotations)\n\n assert_arrays_equal(val61, val6t)\n self.assertRaises(AssertionError, assert_arrays_equal, val61c, val6t)\n assert_arrays_equal(val62, val62c)\n\n self.assertEqual(self.name1, self.base1.name)\n self.assertEqual(self.name2, self.base2.name)\n self.assertEqual(self.description1, self.base1.description)\n self.assertEqual(self.description2, self.base2.description)\n\n def test_merge_annotations__func__dict(self):\n ann1 = {'val0': 'val0', 'val1': 1, 'val2': 2.2, 'val3': 'test1',\n 'val4': [.4], 'val5': {0: 0, 1: {0: 0}},\n 'val6': np.array([0, 1, 2])}\n ann2 = {'val2': 2.2, 'val3': 'test2',\n 'val4': [4, 4.4], 'val5': {1: {1: 1}, 2: 2},\n 'val6': np.array([4, 5, 6]), 'val7': True}\n\n ann1c = ann1.copy()\n ann2c = ann2.copy()\n\n targ = {'val0': 'val0', 'val1': 1, 'val2': 2.2, 'val3': 'test1;test2',\n 'val4': [.4, 4, 4.4], 'val5': {0: 0, 1: {0: 0, 1: 1}, 2: 2},\n 'val7': True}\n\n res = merge_annotations(ann1, ann2)\n\n val6t = np.array([0, 1, 2, 4, 5, 6])\n val6r = res.pop('val6')\n val61 = ann1.pop('val6')\n val61c = ann1c.pop('val6')\n val62 = ann2.pop('val6')\n val62c = ann2c.pop('val6')\n\n self.assertEqual(ann1, ann1c)\n self.assertEqual(ann2, ann2c)\n self.assertEqual(res, targ)\n\n assert_arrays_equal(val6r, val6t)\n self.assertRaises(AssertionError, assert_arrays_equal, val61, val6t)\n assert_arrays_equal(val61, val61c)\n assert_arrays_equal(val62, val62c)\n\n def test_merge_annotation__func__str(self):\n ann1 = 'test1'\n ann2 = 'test2'\n\n targ = 'test1;test2'\n\n res = merge_annotation(ann1, ann2)\n\n self.assertEqual(res, targ)\n\n def test_merge_annotation__func__ndarray(self):\n ann1 = np.array([0, 1, 2])\n ann2 = np.array([4, 5, 6])\n\n ann1c = ann1.copy()\n ann2c = ann2.copy()\n\n targ = np.array([0, 1, 2, 4, 5, 6])\n\n res = merge_annotation(ann1, ann2)\n\n assert_arrays_equal(res, targ)\n assert_arrays_equal(ann1, ann1c)\n assert_arrays_equal(ann2, ann2c)\n\n def test_merge_annotation__func__list(self):\n ann1 = [0, 1, 2]\n ann2 = [4, 5, 6]\n\n ann1c = ann1[:]\n ann2c = ann2[:]\n\n targ = [0, 1, 2, 4, 5, 6]\n\n res = merge_annotation(ann1, ann2)\n\n self.assertEqual(res, targ)\n self.assertEqual(ann1, ann1c)\n self.assertEqual(ann2, ann2c)\n\n def test_merge_annotation__func__dict(self):\n ann1 = {0: 0, 1: {0: 0}}\n ann2 = {1: {1: 1}, 2: 2}\n\n ann1c = ann1.copy()\n ann2c = ann2.copy()\n\n targ = {0: 0, 1: {0: 0, 1: 1}, 2: 2}\n\n res = merge_annotation(ann1, ann2)\n\n self.assertEqual(res, targ)\n self.assertEqual(ann1, ann1c)\n self.assertEqual(ann2, ann2c)\n\n def test_merge_annotation__func__int(self):\n ann1 = 1\n ann2 = 1\n ann3 = 3\n\n targ = 1\n\n res = merge_annotation(ann1, ann2)\n\n self.assertEqual(res, targ)\n self.assertRaises(AssertionError, merge_annotation, ann1, ann3)\n\n def test_merge_annotation__func__float(self):\n ann1 = 1.1\n ann2 = 1.1\n ann3 = 1.3\n\n targ = 1.1\n\n res = merge_annotation(ann1, ann2)\n\n self.assertEqual(res, targ)\n self.assertRaises(AssertionError, merge_annotation, ann1, ann3)\n\n def test_merge_annotation__func__bool(self):\n ann1 = False\n ann2 = False\n ann3 = True\n ann4 = True\n\n targ1 = False\n targ2 = True\n\n res1 = merge_annotation(ann1, ann2)\n res2 = merge_annotation(ann3, ann4)\n\n self.assertEqual(res1, targ1)\n self.assertEqual(res2, targ2)\n self.assertRaises(AssertionError, merge_annotation, ann1, ann3)\n self.assertRaises(AssertionError, merge_annotation, ann2, ann4)\n\n\nclass TestBaseNeoCoreTypes(unittest.TestCase):\n '''\n TestCase to make sure annotations are properly checked for core built-in\n python data types\n '''\n def setUp(self):\n '''create the instance to be tested, called before every test'''\n self.base = BaseNeo()\n\n def test_python_nonetype(self):\n '''test to make sure None type data is accepted'''\n value = None\n self.base.annotate(data=value)\n result = {'data': value}\n self.assertEqual(value, self.base.annotations['data'])\n self.assertDictEqual(result, self.base.annotations)\n\n def test_python_int(self):\n '''test to make sure int type data is accepted'''\n value = 10\n self.base.annotate(data=value)\n result = {'data': value}\n self.assertEqual(value, self.base.annotations['data'])\n self.assertDictEqual(result, self.base.annotations)\n\n def test_python_long(self):\n '''test to make sure long type data is accepted'''\n value = long(7)\n self.base.annotate(data=value)\n result = {'data': value}\n self.assertEqual(value, self.base.annotations['data'])\n self.assertDictEqual(result, self.base.annotations)\n\n def test_python_float(self):\n '''test to make sure float type data is accepted'''\n value = 9.2\n self.base.annotate(data=value)\n result = {'data': value}\n self.assertEqual(value, self.base.annotations['data'])\n self.assertDictEqual(result, self.base.annotations)\n\n def test_python_complex(self):\n '''test to make sure complex type data is accepted'''\n value = complex(23.17, 11.29)\n self.base.annotate(data=value)\n result = {'data': value}\n self.assertEqual(value, self.base.annotations['data'])\n self.assertDictEqual(result, self.base.annotations)\n\n def test_python_string(self):\n '''test to make sure string type data is accepted'''\n value = 'this is a test'\n self.base.annotate(data=value)\n result = {'data': value}\n self.assertEqual(value, self.base.annotations['data'])\n self.assertDictEqual(result, self.base.annotations)\n\n def test_python_unicode(self):\n '''test to make sure unicode type data is accepted'''\n value = u'this is also a test'\n self.base.annotate(data=value)\n result = {'data': value}\n self.assertEqual(value, self.base.annotations['data'])\n self.assertDictEqual(result, self.base.annotations)\n\n def test_python_bytes(self):\n '''test to make sure bytes type data is accepted'''\n value = bytes('1,2,3,4,5')\n self.base.annotate(data=value)\n result = {'data': value}\n self.assertEqual(value, self.base.annotations['data'])\n self.assertDictEqual(result, self.base.annotations)\n\n\nclass TestBaseNeoStandardLibraryTypes(unittest.TestCase):\n '''\n TestCase to make sure annotations are properly checked for data types from\n the python standard library that are not core built-in data types\n '''\n def setUp(self):\n '''create the instance to be tested, called before every test'''\n self.base = BaseNeo()\n\n def test_python_fraction(self):\n '''test to make sure Fraction type data is accepted'''\n value = Fraction(13, 21)\n self.base.annotate(data=value)\n result = {'data': value}\n self.assertEqual(value, self.base.annotations['data'])\n self.assertDictEqual(result, self.base.annotations)\n\n def test_python_decimal(self):\n '''test to make sure Decimal type data is accepted'''\n value = Decimal(\"3.14\")\n self.base.annotate(data=value)\n result = {'data': value}\n self.assertEqual(value, self.base.annotations['data'])\n self.assertDictEqual(result, self.base.annotations)\n\n def test_python_datetime(self):\n '''test to make sure datetime type data is accepted'''\n value = datetime(year=2008, month=12, day=3, hour=10, minute=4)\n self.base.annotate(data=value)\n result = {'data': value}\n self.assertEqual(value, self.base.annotations['data'])\n self.assertDictEqual(result, self.base.annotations)\n\n def test_python_date(self):\n '''test to make sure date type data is accepted'''\n value = date(year=2008, month=12, day=3)\n self.base.annotate(data=value)\n result = {'data': value}\n self.assertEqual(value, self.base.annotations['data'])\n self.assertDictEqual(result, self.base.annotations)\n\n def test_python_time(self):\n '''test to make sure time type data is accepted'''\n value = time(hour=10, minute=4)\n self.base.annotate(data=value)\n result = {'data': value}\n self.assertEqual(value, self.base.annotations['data'])\n self.assertDictEqual(result, self.base.annotations)\n\n def test_python_timedelta(self):\n '''test to make sure timedelta type data is accepted'''\n value = timedelta(weeks=2, days=7, hours=18, minutes=28,\n seconds=18, milliseconds=28,\n microseconds=45)\n self.base.annotate(data=value)\n result = {'data': value}\n self.assertEqual(value, self.base.annotations['data'])\n self.assertDictEqual(result, self.base.annotations)\n\n\nclass TestBaseNeoContainerTypes(unittest.TestCase):\n '''\n TestCase to make sure annotations are properly checked for data type\n inside python built-in container types\n '''\n def setUp(self):\n '''create the instance to be tested, called before every test'''\n self.base = BaseNeo()\n\n def test_python_list(self):\n '''test to make sure list type data is accepted'''\n value = [None, 10, 9.2, complex(23, 11),\n ['this is a test', bytes('1,2,3,4,5')],\n [Fraction(13, 21), Decimal(\"3.14\")]]\n self.base.annotate(data=value)\n result = {'data': value}\n self.assertListEqual(value, self.base.annotations['data'])\n self.assertDictEqual(result, self.base.annotations)\n\n def test_python_tuple(self):\n '''test to make sure tuple type data is accepted'''\n value = (None, 10, 9.2, complex(23, 11),\n ('this is a test', bytes('1,2,3,4,5')),\n (Fraction(13, 21), Decimal(\"3.14\")))\n self.base.annotate(data=value)\n result = {'data': value}\n self.assertTupleEqual(value, self.base.annotations['data'])\n self.assertDictEqual(result, self.base.annotations)\n\n def test_python_dict(self):\n '''test to make sure dict type data is accepted'''\n value = {'NoneType': None, 'int': 10, 'float': 9.2,\n 'complex': complex(23, 11),\n 'dict1': {'string': 'this is a test',\n 'bytes': bytes('1,2,3,4,5')},\n 'dict2': {'Fraction': Fraction(13, 21),\n 'Decimal': Decimal(\"3.14\")}}\n self.base.annotate(data=value)\n result = {'data': value}\n self.assertDictEqual(result, self.base.annotations)\n\n def test_python_set(self):\n '''test to make sure set type data is rejected'''\n value = set([None, 10, 9.2, complex(23, 11)])\n self.assertRaises(ValueError, self.base.annotate, data=value)\n\n def test_python_frozenset(self):\n '''test to make sure frozenset type data is rejected'''\n value = frozenset([None, 10, 9.2, complex(23, 11)])\n self.assertRaises(ValueError, self.base.annotate, data=value)\n\n def test_python_iter(self):\n '''test to make sure iter type data is rejected'''\n value = iter([None, 10, 9.2, complex(23, 11)])\n self.assertRaises(ValueError, self.base.annotate, data=value)\n\n\nclass TestBaseNeoNumpyArrayTypes(unittest.TestCase):\n '''\n TestCase to make sure annotations are properly checked for numpy arrays\n '''\n def setUp(self):\n '''create the instance to be tested, called before every test'''\n self.base = BaseNeo()\n\n def test_numpy_array_int(self):\n '''test to make sure int type numpy arrays are accepted'''\n value = np.array([1, 2, 3, 4, 5], dtype=np.int)\n self.base.annotate(data=value)\n result = {'data': value}\n self.assertDictEqual(result, self.base.annotations)\n\n def test_numpy_array_uint(self):\n '''test to make sure uint type numpy arrays are accepted'''\n value = np.array([1, 2, 3, 4, 5], dtype=np.uint)\n self.base.annotate(data=value)\n result = {'data': value}\n self.assertDictEqual(result, self.base.annotations)\n\n def test_numpy_array_int0(self):\n '''test to make sure int0 type numpy arrays are accepted'''\n value = np.array([1, 2, 3, 4, 5], dtype=np.int0)\n self.base.annotate(data=value)\n result = {'data': value}\n self.assertDictEqual(result, self.base.annotations)\n\n def test_numpy_array_uint0(self):\n '''test to make sure uint0 type numpy arrays are accepted'''\n value = np.array([1, 2, 3, 4, 5], dtype=np.uint0)\n self.base.annotate(data=value)\n result = {'data': value}\n self.assertDictEqual(result, self.base.annotations)\n\n def test_numpy_array_int8(self):\n '''test to make sure int8 type numpy arrays are accepted'''\n value = np.array([1, 2, 3, 4, 5], dtype=np.int8)\n self.base.annotate(data=value)\n result = {'data': value}\n self.assertDictEqual(result, self.base.annotations)\n\n def test_numpy_array_uint8(self):\n '''test to make sure uint8 type numpy arrays are accepted'''\n value = np.array([1, 2, 3, 4, 5], dtype=np.uint8)\n self.base.annotate(data=value)\n result = {'data': value}\n self.assertDictEqual(result, self.base.annotations)\n\n def test_numpy_array_int16(self):\n '''test to make sure int16 type numpy arrays are accepted'''\n value = np.array([1, 2, 3, 4, 5], dtype=np.int16)\n self.base.annotate(data=value)\n result = {'data': value}\n self.assertDictEqual(result, self.base.annotations)\n\n def test_numpy_array_uint16(self):\n '''test to make sure uint16 type numpy arrays are accepted'''\n value = np.array([1, 2, 3, 4, 5], dtype=np.uint16)\n self.base.annotate(data=value)\n result = {'data': value}\n self.assertDictEqual(result, self.base.annotations)\n\n def test_numpy_array_int32(self):\n '''test to make sure int32 type numpy arrays are accepted'''\n value = np.array([1, 2, 3, 4, 5], dtype=np.int32)\n self.base.annotate(data=value)\n result = {'data': value}\n self.assertDictEqual(result, self.base.annotations)\n\n def test_numpy_array_uint32(self):\n '''test to make sure uint32 type numpy arrays are accepted'''\n value = np.array([1, 2, 3, 4, 5], dtype=np.uint32)\n self.base.annotate(data=value)\n result = {'data': value}\n self.assertDictEqual(result, self.base.annotations)\n\n def test_numpy_array_int64(self):\n '''test to make sure int64 type numpy arrays are accepted'''\n value = np.array([1, 2, 3, 4, 5], dtype=np.int64)\n self.base.annotate(data=value)\n result = {'data': value}\n self.assertDictEqual(result, self.base.annotations)\n\n def test_numpy_array_uint64(self):\n '''test to make sure uint64 type numpy arrays are accepted'''\n value = np.array([1, 2, 3, 4, 5], dtype=np.uint64)\n self.base.annotate(data=value)\n result = {'data': value}\n self.assertDictEqual(result, self.base.annotations)\n\n def test_numpy_array_float(self):\n '''test to make sure float type numpy arrays are accepted'''\n value = np.array([1, 2, 3, 4, 5], dtype=np.float)\n self.base.annotate(data=value)\n result = {'data': value}\n self.assertDictEqual(result, self.base.annotations)\n\n def test_numpy_array_floating(self):\n '''test to make sure floating type numpy arrays are accepted'''\n value = np.array([1, 2, 3, 4, 5], dtype=np.floating)\n self.base.annotate(data=value)\n result = {'data': value}\n self.assertDictEqual(result, self.base.annotations)\n\n def test_numpy_array_double(self):\n '''test to make sure double type numpy arrays are accepted'''\n value = np.array([1, 2, 3, 4, 5], dtype=np.double)\n self.base.annotate(data=value)\n result = {'data': value}\n self.assertDictEqual(result, self.base.annotations)\n\n def test_numpy_array_float16(self):\n '''test to make sure float16 type numpy arrays are accepted'''\n value = np.array([1, 2, 3, 4, 5], dtype=np.float16)\n self.base.annotate(data=value)\n result = {'data': value}\n self.assertDictEqual(result, self.base.annotations)\n\n def test_numpy_array_float32(self):\n '''test to make sure float32 type numpy arrays are accepted'''\n value = np.array([1, 2, 3, 4, 5], dtype=np.float32)\n self.base.annotate(data=value)\n result = {'data': value}\n self.assertDictEqual(result, self.base.annotations)\n\n def test_numpy_array_float64(self):\n '''test to make sure float64 type numpy arrays are accepted'''\n value = np.array([1, 2, 3, 4, 5], dtype=np.float64)\n self.base.annotate(data=value)\n result = {'data': value}\n self.assertDictEqual(result, self.base.annotations)\n\n @unittest.skipUnless(hasattr(np, \"float128\"), \"float128 not available\")\n def test_numpy_array_float128(self):\n '''test to make sure float128 type numpy arrays are accepted'''\n value = np.array([1, 2, 3, 4, 5], dtype=np.float128)\n self.base.annotate(data=value)\n result = {'data': value}\n self.assertDictEqual(result, self.base.annotations)\n\n def test_numpy_array_complex(self):\n '''test to make sure complex type numpy arrays are accepted'''\n value = np.array([1, 2, 3, 4, 5], dtype=np.complex)\n self.base.annotate(data=value)\n result = {'data': value}\n self.assertDictEqual(result, self.base.annotations)\n\n def test_numpy_scalar_complex64(self):\n '''test to make sure complex64 type numpy arrays are accepted'''\n value = np.array([1, 2, 3, 4, 5], dtype=np.complex64)\n self.base.annotate(data=value)\n result = {'data': value}\n self.assertDictEqual(result, self.base.annotations)\n\n def test_numpy_scalar_complex128(self):\n '''test to make sure complex128 type numpy arrays are accepted'''\n value = np.array([1, 2, 3, 4, 5], dtype=np.complex128)\n self.base.annotate(data=value)\n result = {'data': value}\n self.assertDictEqual(result, self.base.annotations)\n\n @unittest.skipUnless(hasattr(np, \"complex256\"),\n \"complex256 not available\")\n def test_numpy_scalar_complex256(self):\n '''test to make sure complex256 type numpy arrays are accepted'''\n value = np.array([1, 2, 3, 4, 5], dtype=np.complex256)\n self.base.annotate(data=value)\n result = {'data': value}\n self.assertDictEqual(result, self.base.annotations)\n\n def test_numpy_array_bool(self):\n '''test to make sure bool type numpy arrays are accepted'''\n value = np.array([1, 2, 3, 4, 5], dtype=np.bool)\n self.base.annotate(data=value)\n result = {'data': value}\n self.assertDictEqual(result, self.base.annotations)\n\n def test_numpy_array_str(self):\n '''test to make sure str type numpy arrays are accepted'''\n value = np.array([1, 2, 3, 4, 5], dtype=np.str)\n self.base.annotate(data=value)\n result = {'data': value}\n self.assertDictEqual(result, self.base.annotations)\n\n def test_numpy_array_string0(self):\n '''test to make sure string0 type numpy arrays are accepted'''\n if sys.version_info[0] >= 3:\n dtype = np.str0\n else:\n dtype = np.string0\n value = np.array([1, 2, 3, 4, 5], dtype=dtype)\n self.base.annotate(data=value)\n result = {'data': value}\n self.assertDictEqual(result, self.base.annotations)\n\n\nclass TestBaseNeoNumpyScalarTypes(unittest.TestCase):\n '''\n TestCase to make sure annotations are properly checked for numpy scalars\n '''\n def setUp(self):\n '''create the instance to be tested, called before every test'''\n self.base = BaseNeo()\n\n def test_numpy_scalar_int(self):\n '''test to make sure int type numpy scalars are accepted'''\n value = np.array(99, dtype=np.int)\n self.base.annotate(data=value)\n result = {'data': value}\n self.assertDictEqual(result, self.base.annotations)\n\n def test_numpy_scalar_uint(self):\n '''test to make sure uint type numpy scalars are accepted'''\n value = np.array(99, dtype=np.uint)\n self.base.annotate(data=value)\n result = {'data': value}\n self.assertDictEqual(result, self.base.annotations)\n\n def test_numpy_scalar_int0(self):\n '''test to make sure int0 type numpy scalars are accepted'''\n value = np.array(99, dtype=np.int0)\n self.base.annotate(data=value)\n result = {'data': value}\n self.assertDictEqual(result, self.base.annotations)\n\n def test_numpy_scalar_uint0(self):\n '''test to make sure uint0 type numpy scalars are accepted'''\n value = np.array(99, dtype=np.uint0)\n self.base.annotate(data=value)\n result = {'data': value}\n self.assertDictEqual(result, self.base.annotations)\n\n def test_numpy_scalar_int8(self):\n '''test to make sure int8 type numpy scalars are accepted'''\n value = np.array(99, dtype=np.int8)\n self.base.annotate(data=value)\n result = {'data': value}\n self.assertDictEqual(result, self.base.annotations)\n\n def test_numpy_scalar_uint8(self):\n '''test to make sure uint8 type numpy scalars are accepted'''\n value = np.array(99, dtype=np.uint8)\n self.base.annotate(data=value)\n result = {'data': value}\n self.assertDictEqual(result, self.base.annotations)\n\n def test_numpy_scalar_int16(self):\n '''test to make sure int16 type numpy scalars are accepted'''\n value = np.array(99, dtype=np.int16)\n self.base.annotate(data=value)\n result = {'data': value}\n self.assertDictEqual(result, self.base.annotations)\n\n def test_numpy_scalar_uint16(self):\n '''test to make sure uint16 type numpy scalars are accepted'''\n value = np.array(99, dtype=np.uint16)\n self.base.annotate(data=value)\n result = {'data': value}\n self.assertDictEqual(result, self.base.annotations)\n\n def test_numpy_scalar_int32(self):\n '''test to make sure int32 type numpy scalars are accepted'''\n value = np.array(99, dtype=np.int32)\n self.base.annotate(data=value)\n result = {'data': value}\n self.assertDictEqual(result, self.base.annotations)\n\n def test_numpy_scalar_uint32(self):\n '''test to make sure uint32 type numpy scalars are accepted'''\n value = np.array(99, dtype=np.uint32)\n self.base.annotate(data=value)\n result = {'data': value}\n self.assertDictEqual(result, self.base.annotations)\n\n def test_numpy_scalar_int64(self):\n '''test to make sure int64 type numpy scalars are accepted'''\n value = np.array(99, dtype=np.int64)\n self.base.annotate(data=value)\n result = {'data': value}\n self.assertDictEqual(result, self.base.annotations)\n\n def test_numpy_scalar_uint64(self):\n '''test to make sure uint64 type numpy scalars are accepted'''\n value = np.array(99, dtype=np.uint64)\n self.base.annotate(data=value)\n result = {'data': value}\n self.assertDictEqual(result, self.base.annotations)\n\n def test_numpy_scalar_float(self):\n '''test to make sure float type numpy scalars are accepted'''\n value = np.array(99, dtype=np.float)\n self.base.annotate(data=value)\n result = {'data': value}\n self.assertDictEqual(result, self.base.annotations)\n\n def test_numpy_scalar_floating(self):\n '''test to make sure floating type numpy scalars are accepted'''\n value = np.array(99, dtype=np.floating)\n self.base.annotate(data=value)\n result = {'data': value}\n self.assertDictEqual(result, self.base.annotations)\n\n def test_numpy_scalar_double(self):\n '''test to make sure double type numpy scalars are accepted'''\n value = np.array(99, dtype=np.double)\n self.base.annotate(data=value)\n result = {'data': value}\n self.assertDictEqual(result, self.base.annotations)\n\n def test_numpy_scalar_float16(self):\n '''test to make sure float16 type numpy scalars are accepted'''\n value = np.array(99, dtype=np.float16)\n self.base.annotate(data=value)\n result = {'data': value}\n self.assertDictEqual(result, self.base.annotations)\n\n def test_numpy_scalar_float32(self):\n '''test to make sure float32 type numpy scalars are accepted'''\n value = np.array(99, dtype=np.float32)\n self.base.annotate(data=value)\n result = {'data': value}\n self.assertDictEqual(result, self.base.annotations)\n\n def test_numpy_scalar_float64(self):\n '''test to make sure float64 type numpy scalars are accepted'''\n value = np.array(99, dtype=np.float64)\n self.base.annotate(data=value)\n result = {'data': value}\n self.assertDictEqual(result, self.base.annotations)\n\n @unittest.skipUnless(hasattr(np, \"float128\"), \"float128 not available\")\n def test_numpy_scalar_float128(self):\n '''test to make sure float128 type numpy scalars are accepted'''\n value = np.array(99, dtype=np.float128)\n self.base.annotate(data=value)\n result = {'data': value}\n self.assertDictEqual(result, self.base.annotations)\n\n def test_numpy_scalar_complex(self):\n '''test to make sure complex type numpy scalars are accepted'''\n value = np.array(99, dtype=np.complex)\n self.base.annotate(data=value)\n result = {'data': value}\n self.assertDictEqual(result, self.base.annotations)\n\n def test_numpy_scalar_complex64(self):\n '''test to make sure complex64 type numpy scalars are accepted'''\n value = np.array(99, dtype=np.complex64)\n self.base.annotate(data=value)\n result = {'data': value}\n self.assertDictEqual(result, self.base.annotations)\n\n def test_numpy_scalar_complex128(self):\n '''test to make sure complex128 type numpy scalars are accepted'''\n value = np.array(99, dtype=np.complex128)\n self.base.annotate(data=value)\n result = {'data': value}\n self.assertDictEqual(result, self.base.annotations)\n\n @unittest.skipUnless(hasattr(np, \"complex256\"), \"complex256 not available\")\n def test_numpy_scalar_complex256(self):\n '''test to make sure complex256 type numpy scalars are accepted'''\n value = np.array(99, dtype=np.complex256)\n self.base.annotate(data=value)\n result = {'data': value}\n self.assertDictEqual(result, self.base.annotations)\n\n def test_numpy_scalar_bool(self):\n '''test to make sure bool type numpy scalars are rejected'''\n value = np.array(99, dtype=np.bool)\n self.base.annotate(data=value)\n result = {'data': value}\n self.assertDictEqual(result, self.base.annotations)\n\n def test_numpy_array_str(self):\n '''test to make sure str type numpy scalars are accepted'''\n value = np.array(99, dtype=np.str)\n self.base.annotate(data=value)\n result = {'data': value}\n self.assertDictEqual(result, self.base.annotations)\n\n def test_numpy_scalar_string0(self):\n '''test to make sure string0 type numpy scalars are rejected'''\n if sys.version_info[0] >= 3:\n dtype = np.str0\n else:\n dtype = np.string0\n value = np.array(99, dtype=dtype)\n self.base.annotate(data=value)\n result = {'data': value}\n self.assertDictEqual(result, self.base.annotations)\n\n\nclass TestBaseNeoQuantitiesArrayTypes(unittest.TestCase):\n '''\n TestCase to make sure annotations are properly checked for quantities\n arrays\n '''\n def setUp(self):\n '''create the instance to be tested, called before every test'''\n self.base = BaseNeo()\n\n def test_quantities_array_int(self):\n '''test to make sure int type quantites arrays are accepted'''\n value = pq.Quantity([1, 2, 3, 4, 5], dtype=np.int, units=pq.s)\n self.base.annotate(data=value)\n result = {'data': value}\n self.assertDictEqual(result, self.base.annotations)\n\n def test_quantities_array_uint(self):\n '''test to make sure uint type quantites arrays are accepted'''\n value = pq.Quantity([1, 2, 3, 4, 5], dtype=np.uint, units=pq.meter)\n self.base.annotate(data=value)\n result = {'data': value}\n self.assertDictEqual(result, self.base.annotations)\n\n def test_quantities_array_float(self):\n '''test to make sure float type quantites arrays are accepted'''\n value = [1, 2, 3, 4, 5] * pq.kg\n self.base.annotate(data=value)\n result = {'data': value}\n self.assertDictEqual(result, self.base.annotations)\n\n def test_quantities_array_str(self):\n '''test to make sure str type quantites arrays are accepted'''\n value = pq.Quantity([1, 2, 3, 4, 5], dtype=np.str, units=pq.meter)\n self.base.annotate(data=value)\n result = {'data': value}\n self.assertDictEqual(result, self.base.annotations)\n\n\nclass TestBaseNeoQuantitiesScalarTypes(unittest.TestCase):\n '''\n TestCase to make sure annotations are properly checked for quantities\n scalars\n '''\n def setUp(self):\n '''create the instance to be tested, called before every test'''\n self.base = BaseNeo()\n\n def test_quantities_scalar_int(self):\n '''test to make sure int type quantites scalars are accepted'''\n value = pq.Quantity(99, dtype=np.int, units=pq.s)\n self.base.annotate(data=value)\n result = {'data': value}\n self.assertDictEqual(result, self.base.annotations)\n\n def test_quantities_scalar_uint(self):\n '''test to make sure uint type quantites scalars are accepted'''\n value = pq.Quantity(99, dtype=np.uint, units=pq.meter)\n self.base.annotate(data=value)\n result = {'data': value}\n self.assertDictEqual(result, self.base.annotations)\n\n def test_quantities_scalar_float(self):\n '''test to make sure float type quantites scalars are accepted'''\n value = 99 * pq.kg\n self.base.annotate(data=value)\n result = {'data': value}\n self.assertDictEqual(result, self.base.annotations)\n\n def test_quantities_scalar_str(self):\n '''test to make sure str type quantites scalars are accepted'''\n value = pq.Quantity(99, dtype=np.str, units=pq.meter)\n self.base.annotate(data=value)\n result = {'data': value}\n self.assertDictEqual(result, self.base.annotations)\n\n\nclass TestBaseNeoUserDefinedTypes(unittest.TestCase):\n '''\n TestCase to make sure annotations are properly checked for arbitrary\n objects\n '''\n\n def setUp(self):\n '''create the instance to be tested, called before every test'''\n self.base = BaseNeo()\n\n def test_my_class(self):\n '''test to make sure user defined class type data is rejected'''\n class Foo(object):\n pass\n value = Foo()\n self.assertRaises(ValueError, self.base.annotate, data=value)\n\n def test_my_class_list(self):\n '''test to make sure user defined class type data is rejected'''\n class Foo(object):\n pass\n value = [Foo(), Foo(), Foo()]\n self.assertRaises(ValueError, self.base.annotate, data=value)\n\n\[email protected](HAVE_IPYTHON, \"requires IPython\")\nclass Test_pprint(unittest.TestCase):\n def test__pretty(self):\n name = 'an object'\n description = 'this is a test'\n obj = BaseNeo(name=name, description=description)\n res = pretty(obj)\n targ = \"BaseNeo name: '%s' description: '%s'\" % (name, description)\n self.assertEqual(res, targ)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n" ]
[ [ "numpy.array" ] ]
malihass/PelePhysics
[ "1215a6b65818ddfe9705af07f5774d74c4fe949a" ]
[ "Support/Fuego/Pythia/pythia-0.5/packages/fuego/fuego/serialization/f/FPickler.py" ]
[ "#!/usr/bin/env python\n#\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n#\n# Michael A.G. Aivazis\n# California Institute of Technology\n# (C) 1998-2003 All Rights Reserved\n#\n# <LicenseText>\n#\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n#\nfrom __future__ import print_function\n\nimport sys\nfrom builtins import object, range, zip\nfrom collections import defaultdict\n\nimport numpy as np\nfrom pyre.handbook.constants.fundamental import avogadro, boltzmann\nfrom pyre.handbook.constants.fundamental import gas_constant as R\nfrom pyre.units.energy import J, cal, erg, kcal, kJ\nfrom pyre.units.length import cm\nfrom pyre.units.pressure import atm\nfrom pyre.units.SI import kelvin, meter, mole, second\nfrom weaver.mills.CMill import CMill\n\nsmallnum = 1e-100\nR = 8.31451e7 * erg / (mole / kelvin)\nRc = 1.98721558317399617591 * cal / mole / kelvin\nPatm = 1013250.0\nsym = \"\"\nfsym = \"_\"\n\n\nclass speciesDb(object):\n def __init__(self, id, name, mwt):\n self.id = id\n self.symbol = name\n self.weight = mwt\n return\n\n\nclass FPickler(CMill):\n def __init__(self):\n CMill.__init__(self)\n self.species = []\n self.nSpecies = 0\n self.reactionIndex = []\n self.lowT = 100.0\n self.highT = 10000.0\n # print 'INIT PICKLER'\n return\n\n def _setSpecies(self, mechanism):\n \"\"\"For internal use\"\"\"\n import pyre\n\n periodic = pyre.handbook.periodicTable()\n\n nSpecies = len(mechanism.species())\n self.species = [0.0 for x in range(nSpecies)]\n\n for species in mechanism.species():\n weight = 0.0\n for elem, coef in species.composition:\n aw = mechanism.element(elem).weight\n if not aw:\n aw = periodic.symbol(elem.capitalize()).atomicWeight\n weight += coef * aw\n\n tempsp = speciesDb(species.id, species.symbol, weight)\n self.species[species.id] = tempsp\n\n self.nSpecies = nSpecies\n return\n\n def _statics(self, mechanism):\n self._write()\n self._write(\"! Inverse molecular weights\")\n self._write(\n \"double precision, parameter :: imw(%d) = (/ &\" % (self.nSpecies)\n )\n self._indent()\n for i in range(0, self.nSpecies):\n species = self.species[i]\n text = \"1.d0 / %fd0\" % (species.weight)\n if i < self.nSpecies - 1:\n text += \", & \"\n else:\n text += \" /) \"\n self._write(text + \"! %s\" % species.symbol)\n self._outdent()\n\n self._write()\n self._write(\"type :: nonsquare_matrix_double\")\n self._write(\" double precision, allocatable :: vector(:)\")\n self._write(\"end type nonsquare_matrix_double\")\n self._write()\n self._write(\"type :: nonsquare_matrix_int\")\n self._write(\" integer, allocatable :: vector(:)\")\n self._write(\"end type nonsquare_matrix_int\")\n\n nReactions = len(mechanism.reaction())\n self._write()\n self._write(\n \"double precision, save :: fwd_A(%d), fwd_beta(%d), fwd_Ea(%d)\"\n % (nReactions, nReactions, nReactions)\n )\n self._write(\n \"double precision, save :: low_A(%d), low_beta(%d), low_Ea(%d)\"\n % (nReactions, nReactions, nReactions)\n )\n self._write(\n \"double precision, save :: rev_A(%d), rev_beta(%d), rev_Ea(%d)\"\n % (nReactions, nReactions, nReactions)\n )\n self._write(\n \"double precision, save :: troe_a(%d),troe_Ts(%d), troe_Tss(%d), troe_Tsss(%d)\"\n % (nReactions, nReactions, nReactions, nReactions)\n )\n self._write(\n \"double precision, save :: sri_a(%d), sri_b(%d), sri_c(%d), sri_d(%d), sri_e(%d)\"\n % (nReactions, nReactions, nReactions, nReactions, nReactions)\n )\n self._write(\n \"double precision, save :: activation_units(%d), prefactor_units(%d), phase_units(%d)\"\n % (nReactions, nReactions, nReactions)\n )\n self._write(\n \"integer, save :: is_PD(%d), troe_len(%d), sri_len(%d), nTB(%d)\"\n % (nReactions, nReactions, nReactions, nReactions)\n )\n self._write(\"type(nonsquare_matrix_double) :: TB(%d)\" % (nReactions))\n self._write(\"type(nonsquare_matrix_int) :: TBid(%d)\" % (nReactions))\n\n self._write()\n self._write(\n \"double precision, save :: fwd_A_DEF(%d), fwd_beta_DEF(%d), fwd_Ea_DEF(%d)\"\n % (nReactions, nReactions, nReactions)\n )\n self._write(\n \"double precision, save :: low_A_DEF(%d), low_beta_DEF(%d), low_Ea_DEF(%d)\"\n % (nReactions, nReactions, nReactions)\n )\n self._write(\n \"double precision, save :: rev_A_DEF(%d), rev_beta_DEF(%d), rev_Ea_DEF(%d)\"\n % (nReactions, nReactions, nReactions)\n )\n self._write(\n \"double precision, save :: troe_a_DEF(%d),troe_Ts_DEF(%d), troe_Tss_DEF(%d), troe_Tsss_DEF(%d)\"\n % (nReactions, nReactions, nReactions, nReactions)\n )\n self._write(\n \"double precision, save :: sri_a_DEF(%d), sri_b_DEF(%d), sri_c_DEF(%d), sri_d_DEF(%d), sri_e_DEF(%d)\"\n % (nReactions, nReactions, nReactions, nReactions, nReactions)\n )\n self._write(\n \"double precision, save :: activation_units_DEF(%d), prefactor_units_DEF(%d), phase_units_DEF(%d)\"\n % (nReactions, nReactions, nReactions)\n )\n self._write(\n \"integer, save :: is_PD_DEF(%d), troe_len_DEF(%d), sri_len_DEF(%d), nTB_DEF(%d)\"\n % (nReactions, nReactions, nReactions, nReactions)\n )\n self._write(\n \"type(nonsquare_matrix_double) :: TB_DEF(%d)\" % (nReactions)\n )\n self._write(\n \"type(nonsquare_matrix_int) :: TBid_DEF(%d)\" % (nReactions)\n )\n\n self._write()\n self._write(\"! productionRate() static variables\")\n self._write(\"double precision, save :: T_save = -1\")\n self._write(\"double precision, save :: k_f_save(%d)\" % nReactions)\n self._write(\"double precision, save :: Kc_save(%d)\" % nReactions)\n\n # build reverse reaction map\n # rmap = {}\n # for i, reaction in zip(range(nReactions), mechanism.reaction()):\n # rmap[reaction.orig_id-1] = i\n #\n # self._write('integer, parameter :: rxn_map(%d) = (/ %s /)' % (nReactions, \",\".join(str(rmap[x]) for x in range(len(rmap)))))\n self._write()\n\n return\n\n def _renderDocument(self, mechanism, options=None):\n\n self._setSpecies(mechanism)\n\n self.reactionIndex = mechanism._sort_reactions()\n\n self._start_module()\n # self._includes()\n # self._declarations(mechanism)\n self._statics(mechanism)\n self._module_contains(mechanism)\n self._ckinit(mechanism)\n\n # self._main(mechanism)\n\n # chemkin wrappers\n self._ckindx(mechanism)\n # self._ckxnum(mechanism)\n # self._cksnum(mechanism)\n self._cksyme(mechanism)\n self._cksyms(mechanism)\n self._ckrp(mechanism)\n\n # self._ckpx(mechanism)\n self._ckpy(mechanism)\n # self._vckpy(mechanism)\n # self._ckpc(mechanism)\n # self._ckrhox(mechanism)\n self._ckrhoy(mechanism)\n # self._ckrhoc(mechanism)\n self._ckwt(mechanism)\n # self._ckawt(mechanism)\n # self._ckmmwy(mechanism)\n # self._ckmmwx(mechanism)\n # self._ckmmwc(mechanism)\n self._ckytx(mechanism)\n self._vckytx(mechanism)\n # self._ckytcp(mechanism)\n self._ckytcr(mechanism)\n self._ckxty(mechanism)\n # self._ckxtcp(mechanism)\n # self._ckxtcr(mechanism)\n # self._ckctx(mechanism)\n # self._ckcty(mechanism)\n\n # self._ckcpor(mechanism)\n # self._ckhort(mechanism)\n # self._cksor(mechanism)\n\n # self._ckcvml(mechanism)\n # self._ckcpml(mechanism)\n # self._ckuml(mechanism)\n # self._ckhml(mechanism)\n # self._ckgml(mechanism)\n # self._ckaml(mechanism)\n # self._cksml(mechanism)\n\n self._ckcvms(mechanism)\n self._ckcpms(mechanism)\n self._ckums(mechanism)\n self._ckhms(mechanism)\n self._vckhms(mechanism)\n # self._ckgms(mechanism)\n # self._ckams(mechanism)\n # self._cksms(mechanism)\n\n # self._ckcpbl(mechanism)\n self._ckcpbs(mechanism)\n # self._ckcvbl(mechanism)\n self._ckcvbs(mechanism)\n\n # self._ckhbml(mechanism)\n # self._ckhbms(mechanism)\n # self._ckubml(mechanism)\n self._ckubms(mechanism)\n # self._cksbml(mechanism)\n # self._cksbms(mechanism)\n # self._ckgbml(mechanism)\n # self._ckgbms(mechanism)\n # self._ckabml(mechanism)\n # self._ckabms(mechanism)\n\n self._ckwc(mechanism)\n # self._ckwyp(mechanism)\n # self._ckwxp(mechanism)\n # self._ckwyr(mechanism)\n # self._vckwyr(mechanism)\n # self._ckwxr(mechanism)\n\n # self._ckqc(mechanism)\n # self._ckkfkr(mechanism)\n # self._ckqyp(mechanism)\n # self._ckqxp(mechanism)\n # self._ckqyr(mechanism)\n # self._ckqxr(mechanism)\n\n # self._cknu(mechanism)\n # self._ckncf(mechanism)\n\n # self._ckabe(mechanism)\n\n # self._ckeqc(mechanism)\n # self._ckeqyp(mechanism)\n # self._ckeqxp(mechanism)\n # self._ckeqyr(mechanism)\n # self._ckeqxr(mechanism)\n\n # Fuego Functions\n self._productionRate(mechanism)\n # self._vproductionRate(mechanism)\n # self._DproductionRate(mechanism)\n # self._ajac(mechanism)\n # self._dthermodT(mechanism)\n # self._progressRate(mechanism)\n # self._progressRateFR(mechanism)\n # self._equilibriumConstants(mechanism)\n self._thermo(mechanism)\n self._molecularWeight(mechanism)\n # self._atomicWeight(mechanism)\n self._T_given_ey(mechanism)\n # self._T_given_hy(mechanism)\n # self._getCriticalParameters(mechanism)\n self._trans(mechanism)\n self._end_module()\n return\n\n def _start_module(self):\n self._rep += [\n \"module fuego_module\",\n \"\",\n \" implicit none\",\n \" private\",\n \" public :: ckcpms\",\n \" public :: ckums\",\n \" public :: ckrhoy\",\n \" public :: ckcvms\",\n \" public :: ckxty\",\n \" public :: ckytcr\",\n \" public :: ckytx\",\n \" public :: ckhms\",\n \" public :: vckytx\",\n \" public :: vckhms\",\n \" public :: ckcvbs\",\n \" public :: ckubms\",\n \" public :: ckcpbs\",\n \" public :: ckpy\",\n \" public :: get_t_given_ey\",\n \" public :: cksyme\",\n \" public :: cksyms\",\n \" public :: ckwt\",\n \" public :: ckrp\",\n \" public :: ckwc\",\n \" public :: ckindx\",\n \" public :: ckinit\",\n \" public :: ckfinalize\",\n \" public :: egtransetCOFTD\",\n \" public :: egtransetKTDIF\",\n \" public :: egtransetCOFD\",\n \" public :: egtransetCOFLAM\",\n \" public :: egtransetCOFETA\",\n \" public :: egtransetNLIN\",\n \" public :: egtransetZROT\",\n \" public :: egtransetPOL\",\n \" public :: egtransetDIP\",\n \" public :: egtransetSIG\",\n \" public :: egtransetEPS\",\n \" public :: egtransetWT\",\n \" public :: egtransetNLITE\",\n \" public :: egtransetKK\",\n \" public :: egtransetNO\",\n \" public :: egtransetLENRMC\",\n \" public :: egtransetLENIMC\",\n ]\n return\n\n def _module_contains(self, mechanism):\n self._rep += [\"contains\"]\n\n nReactions = len(mechanism.reaction())\n self._write()\n # self._write('void GET_REACTION_MAP(int *rmap)')\n # self._write('{')\n # self._indent()\n # self._write('for (int i=0; i<%d; ++i) {' % (nReactions))\n # self._indent()\n # self._write('rmap[i] = rxn_map[i];')\n # self._outdent()\n # self._write('}')\n # self._outdent()\n # self._write('}')\n # self._write()\n # self._write(\"\")\n # self._write(\"#include <ReactionData.H>\")\n # self._write(\"double* GetParamPtr(int reaction_id,\")\n # self._write(\" REACTION_PARAMETER param_id,\")\n # self._write(\" int species_id,\")\n # self._write(\" int get_default)\")\n # self._write(\"{\")\n # self._write(\" double* ret = 0;\")\n # self._write(\" if (reaction_id<0 || reaction_id>=%d) {\" % (nReactions))\n # self._write(\" printf(\\\"Bad reaction id = %d\\\",reaction_id);\")\n # self._write(\" abort();\")\n # self._write(\" };\")\n # self._write(\" int mrid = rxn_map[reaction_id];\")\n # self._write()\n # self._write(\" if (param_id == THIRD_BODY) {\")\n # self._write(\" if (species_id<0 || species_id>=%d) {\" % (self.nSpecies))\n # self._write(\" printf(\\\"GetParamPtr: Bad species id = %d\\\",species_id);\")\n # self._write(\" abort();\")\n # self._write(\" }\")\n # self._write(\" if (get_default) {\")\n # self._write(\" for (int i=0; i<nTB_DEF[mrid]; ++i) {\")\n # self._write(\" if (species_id == TBid_DEF[mrid][i]) {\")\n # self._write(\" ret = &(TB_DEF[mrid][i]);\")\n # self._write(\" }\")\n # self._write(\" }\")\n # self._write(\" }\")\n # self._write(\" else {\")\n # self._write(\" for (int i=0; i<nTB[mrid]; ++i) {\")\n # self._write(\" if (species_id == TBid[mrid][i]) {\")\n # self._write(\" ret = &(TB[mrid][i]);\")\n # self._write(\" }\")\n # self._write(\" }\")\n # self._write(\" }\")\n # self._write(\" if (ret == 0) {\")\n # self._write(\" printf(\\\"GetParamPtr: No TB for reaction id = %d\\\",reaction_id);\")\n # self._write(\" abort();\")\n # self._write(\" }\")\n # self._write(\" }\")\n # self._write(\" else {\")\n # self._write(\" if ( param_id == FWD_A) {ret = (get_default ? &(fwd_A_DEF[mrid]) : &(fwd_A[mrid]));}\")\n # self._write(\" else if (param_id == FWD_BETA) {ret = (get_default ? &(fwd_beta_DEF[mrid]) : &(fwd_beta[mrid]));}\")\n # self._write(\" else if (param_id == FWD_EA) {ret = (get_default ? &(fwd_Ea_DEF[mrid]) : &(fwd_Ea[mrid]));}\")\n # self._write(\" else if (param_id == LOW_A) {ret = (get_default ? &(low_A_DEF[mrid]) : &(low_A[mrid]));}\")\n # self._write(\" else if (param_id == LOW_BETA) {ret = (get_default ? &(low_beta_DEF[mrid]) : &(low_beta[mrid]));}\")\n # self._write(\" else if (param_id == LOW_EA) {ret = (get_default ? &(low_Ea_DEF[mrid]) : &(low_Ea[mrid]));}\")\n # self._write(\" else if (param_id == REV_A) {ret = (get_default ? &(rev_A_DEF[mrid]) : &(rev_A[mrid]));}\")\n # self._write(\" else if (param_id == REV_BETA) {ret = (get_default ? &(rev_beta_DEF[mrid]) : &(rev_beta[mrid]));}\")\n # self._write(\" else if (param_id == REV_EA) {ret = (get_default ? &(rev_Ea_DEF[mrid]) : &(rev_Ea[mrid]));}\")\n # self._write(\" else if (param_id == TROE_A) {ret = (get_default ? &(troe_a_DEF[mrid]) : &(troe_a[mrid]));}\")\n # self._write(\" else if (param_id == TROE_TS) {ret = (get_default ? &(troe_Ts_DEF[mrid]) : &(troe_Ts[mrid]));}\")\n # self._write(\" else if (param_id == TROE_TSS) {ret = (get_default ? &(troe_Tss_DEF[mrid]) : &(troe_Tss[mrid]));}\")\n # self._write(\" else if (param_id == TROE_TSSS) {ret = (get_default ? &(troe_Tsss_DEF[mrid]) : &(troe_Tsss[mrid]));}\")\n # self._write(\" else if (param_id == SRI_A) {ret = (get_default ? &(sri_a_DEF[mrid]) : &(sri_a[mrid]));}\")\n # self._write(\" else if (param_id == SRI_B) {ret = (get_default ? &(sri_b_DEF[mrid]) : &(sri_b[mrid]));}\")\n # self._write(\" else if (param_id == SRI_C) {ret = (get_default ? &(sri_c_DEF[mrid]) : &(sri_c[mrid]));}\")\n # self._write(\" else if (param_id == SRI_D) {ret = (get_default ? &(sri_d_DEF[mrid]) : &(sri_d[mrid]));}\")\n # self._write(\" else if (param_id == SRI_E) {ret = (get_default ? &(sri_e_DEF[mrid]) : &(sri_e[mrid]));}\")\n # self._write(\" else {\")\n # self._write(\" printf(\\\"GetParamPtr: Unknown parameter id\\\");\")\n # self._write(\" abort();\")\n # self._write(\" }\")\n # self._write(\" }\")\n # self._write(\" return ret;\")\n # self._write(\"}\")\n # self._write()\n # self._write(\"void ResetAllParametersToDefault()\")\n # self._write(\"{\")\n # self._write(\" for (int i=0; i<%d; i++) {\" % (nReactions))\n # self._write(\" if (nTB[i] != 0) {\")\n # self._write(\" nTB[i] = 0;\")\n # self._write(\" free(TB[i]);\")\n # self._write(\" free(TBid[i]);\")\n # self._write(\" }\")\n # self._write(\"\")\n # self._write(\" fwd_A[i] = fwd_A_DEF[i];\")\n # self._write(\" fwd_beta[i] = fwd_beta_DEF[i];\")\n # self._write(\" fwd_Ea[i] = fwd_Ea_DEF[i];\")\n # self._write(\"\")\n # self._write(\" low_A[i] = low_A_DEF[i];\")\n # self._write(\" low_beta[i] = low_beta_DEF[i];\")\n # self._write(\" low_Ea[i] = low_Ea_DEF[i];\")\n # self._write(\"\")\n # self._write(\" rev_A[i] = rev_A_DEF[i];\")\n # self._write(\" rev_beta[i] = rev_beta_DEF[i];\")\n # self._write(\" rev_Ea[i] = rev_Ea_DEF[i];\")\n # self._write(\"\")\n # self._write(\" troe_a[i] = troe_a_DEF[i];\")\n # self._write(\" troe_Ts[i] = troe_Ts_DEF[i];\")\n # self._write(\" troe_Tss[i] = troe_Tss_DEF[i];\")\n # self._write(\" troe_Tsss[i] = troe_Tsss_DEF[i];\")\n # self._write(\"\")\n # self._write(\" sri_a[i] = sri_a_DEF[i];\")\n # self._write(\" sri_b[i] = sri_b_DEF[i];\")\n # self._write(\" sri_c[i] = sri_c_DEF[i];\")\n # self._write(\" sri_d[i] = sri_d_DEF[i];\")\n # self._write(\" sri_e[i] = sri_e_DEF[i];\")\n # self._write(\"\")\n # self._write(\" is_PD[i] = is_PD_DEF[i];\")\n # self._write(\" troe_len[i] = troe_len_DEF[i];\")\n # self._write(\" sri_len[i] = sri_len_DEF[i];\")\n # self._write(\"\")\n # self._write(\" activation_units[i] = activation_units_DEF[i];\")\n # self._write(\" prefactor_units[i] = prefactor_units_DEF[i];\")\n # self._write(\" phase_units[i] = phase_units_DEF[i];\")\n # self._write(\"\")\n # self._write(\" nTB[i] = nTB_DEF[i];\")\n # self._write(\" if (nTB[i] != 0) {\")\n # self._write(\" TB[i] = (double *) malloc(sizeof(double) * nTB[i]);\")\n # self._write(\" TBid[i] = (int *) malloc(sizeof(int) * nTB[i]);\")\n # self._write(\" for (int j=0; j<nTB[i]; j++) {\")\n # self._write(\" TB[i][j] = TB_DEF[i][j];\")\n # self._write(\" TBid[i][j] = TBid_DEF[i][j];\")\n # self._write(\" }\")\n # self._write(\" }\")\n # self._write(\" }\")\n # self._write(\"}\")\n # self._write()\n self._write(\"subroutine SetAllDefaults()\")\n self._write()\n self._write(\" implicit none\")\n self._write()\n self._write(\" integer :: i, j\")\n self._write()\n self._write(\" do i=1, %d\" % (nReactions))\n self._write(\" if (nTB_DEF(i) /= 0) then\")\n self._write(\" nTB_DEF(i) = 0\")\n self._write(\n \" if (allocated(TB_DEF(i) % vector)) deallocate(TB_DEF(i) % vector)\"\n )\n self._write(\n \" if (allocated(TBid_DEF(i) % vector)) deallocate(TBid_DEF(i) % vector)\"\n )\n self._write(\" end if\")\n self._write(\"\")\n self._write(\" fwd_A_DEF(i) = fwd_A(i)\")\n self._write(\" fwd_beta_DEF(i) = fwd_beta(i)\")\n self._write(\" fwd_Ea_DEF(i) = fwd_Ea(i)\")\n self._write(\"\")\n self._write(\" low_A_DEF(i) = low_A(i)\")\n self._write(\" low_beta_DEF(i) = low_beta(i)\")\n self._write(\" low_Ea_DEF(i) = low_Ea(i)\")\n self._write(\"\")\n self._write(\" rev_A_DEF(i) = rev_A(i)\")\n self._write(\" rev_beta_DEF(i) = rev_beta(i)\")\n self._write(\" rev_Ea_DEF(i) = rev_Ea(i)\")\n self._write(\"\")\n self._write(\" troe_a_DEF(i) = troe_a(i)\")\n self._write(\" troe_Ts_DEF(i) = troe_Ts(i)\")\n self._write(\" troe_Tss_DEF(i) = troe_Tss(i)\")\n self._write(\" troe_Tsss_DEF(i) = troe_Tsss(i)\")\n self._write(\"\")\n self._write(\" sri_a_DEF(i) = sri_a(i)\")\n self._write(\" sri_b_DEF(i) = sri_b(i)\")\n self._write(\" sri_c_DEF(i) = sri_c(i)\")\n self._write(\" sri_d_DEF(i) = sri_d(i)\")\n self._write(\" sri_e_DEF(i) = sri_e(i)\")\n self._write(\"\")\n self._write(\" is_PD_DEF(i) = is_PD(i)\")\n self._write(\" troe_len_DEF(i) = troe_len(i)\")\n self._write(\" sri_len_DEF(i) = sri_len(i)\")\n self._write(\"\")\n self._write(\" activation_units_DEF(i) = activation_units(i)\")\n self._write(\" prefactor_units_DEF(i) = prefactor_units(i)\")\n self._write(\" phase_units_DEF(i) = phase_units(i)\")\n self._write(\"\")\n self._write(\" nTB_DEF(i) = nTB(i)\")\n self._write(\" if (nTB_DEF(i) /= 0) then\")\n self._write(\n \" if (.not. allocated(TB_DEF(i) % vector)) allocate(TB_DEF(i) % vector(nTB_DEF(i)))\"\n )\n self._write(\n \" if (.not. allocated(TBid_DEF(i) % vector)) allocate(TBid_DEF(i) % vector(nTB_DEF(i)))\"\n )\n self._write(\" do j=1, nTB_DEF(i)\")\n self._write(\" TB_DEF(i) % vector(j) = TB(i) % vector(j)\")\n self._write(\n \" TBid_DEF(i) % vector(j) = TBid(i) % vector(j)\"\n )\n self._write(\" end do\")\n self._write(\" end if\")\n self._write(\" end do\")\n self._write()\n self._write(\"end subroutine\")\n self._write()\n\n return\n\n def _end_module(self):\n self._rep += [\"\", \"end module fuego_module\"]\n return\n\n # def _includes(self):\n # self._rep += [\n # '#include <math.h>',\n # '#include <stdio.h>',\n # '#include <string.h>',\n # '#include <stdlib.h>'\n # ]\n # return\n\n # def _declarations(self, mechanism):\n # self._rep += [\n # '',\n # '#if defined(BL_FORT_USE_UPPERCASE)',\n # #'#define CKINDX CKINDX',\n # #'#define CKINIT CKINIT',\n # #'#define CKFINALIZE CKFINALIZE',\n # #'#define CKXNUM CKXNUM',\n # #'#define CKSYME CKSYME',\n # #'#define CKSYMS CKSYMS',\n # #'#define CKRP CKRP',\n # #'#define CKPX CKPX',\n # #'#define CKPY CKPY',\n # #'#define CKPC CKPC',\n # #'#define CKRHOX CKRHOX',\n # #'#define CKRHOY CKRHOY',\n # #'#define CKRHOC CKRHOC',\n # #'#define CKWT CKWT',\n # #'#define CKAWT CKAWT',\n # #'#define CKMMWY CKMMWY',\n # #'#define CKMMWX CKMMWX',\n # #'#define CKMMWC CKMMWC',\n # #'#define CKYTX CKYTX',\n # #'#define CKYTCP CKYTCP',\n # #'#define CKYTCR CKYTCR',\n # #'#define CKXTY CKXTY',\n # #'#define CKXTCP CKXTCP',\n # #'#define CKXTCR CKXTCR',\n # #'#define CKCTX CKCTX',\n # #'#define CKCTY CKCTY',\n # #'#define CKCPOR CKCPOR',\n # #'#define CKHORT CKHORT',\n # #'#define CKSOR CKSOR',\n # #'#define CKCVML CKCVML',\n # #'#define CKCPML CKCPML',\n # #'#define CKUML CKUML',\n # #'#define CKHML CKHML',\n # #'#define CKGML CKGML',\n # #'#define CKAML CKAML',\n # #'#define CKSML CKSML',\n # #'#define CKCVMS CKCVMS',\n # #'#define CKCPMS CKCPMS',\n # #'#define CKUMS CKUMS',\n # #'#define CKHMS CKHMS',\n # #'#define CKGMS CKGMS',\n # #'#define CKAMS CKAMS',\n # #'#define CKSMS CKSMS',\n # #'#define CKCPBL CKCPBL',\n # #'#define CKCPBS CKCPBS',\n # #'#define CKCVBL CKCVBL',\n # #'#define CKCVBS CKCVBS',\n # #'#define CKHBML CKHBML',\n # #'#define CKHBMS CKHBMS',\n # #'#define CKUBML CKUBML',\n # #'#define CKUBMS CKUBMS',\n # #'#define CKSBML CKSBML',\n # #'#define CKSBMS CKSBMS',\n # #'#define CKGBML CKGBML',\n # #'#define CKGBMS CKGBMS',\n # #'#define CKABML CKABML',\n # #'#define CKABMS CKABMS',\n # #'#define CKWC CKWC',\n # #'#define CKWYP CKWYP',\n # #'#define CKWXP CKWXP',\n # #'#define CKWYR CKWYR',\n # #'#define CKWXR CKWXR',\n # #'#define CKQC CKQC',\n # #'#define CKKFKR CKKFKR',\n # #'#define CKQYP CKQYP',\n # #'#define CKQXP CKQXP',\n # #'#define CKQYR CKQYR',\n # #'#define CKQXR CKQXR',\n # #'#define CKNU CKNU',\n # #'#define CKNCF CKNCF',\n # #'#define CKABE CKABE',\n # #'#define CKEQC CKEQC',\n # #'#define CKEQYP CKEQYP',\n # #'#define CKEQXP CKEQXP',\n # #'#define CKEQYR CKEQYR',\n # #'#define CKEQXR CKEQXR',\n # #'#define DWDOT DWDOT',\n # '#define VCKHMS VCKHMS',\n # #'#define VCKPY VCKPY',\n # #'#define VCKWYR VCKWYR',\n # '#define VCKYTX VCKYTX',\n # #'#define GET_T_GIVEN_EY GET_T_GIVEN_EY',\n # #'#define GET_T_GIVEN_HY GET_T_GIVEN_HY',\n # #'#define GET_REACTION_MAP GET_REACTION_MAP',\n # #'#define GET_CRITPARAMS GET_CRITPARAMS',\n # '#elif defined(BL_FORT_USE_LOWERCASE)',\n # #'#define CKINDX ckindx',\n # #'#define CKINIT ckinit',\n # #'#define CKFINALIZE ckfinalize',\n # #'#define CKXNUM ckxnum',\n # #'#define CKSYME cksyme',\n # #'#define CKSYMS cksyms',\n # #'#define CKRP ckrp',\n # #'#define CKPX ckpx',\n # #'#define CKPY ckpy',\n # #'#define CKPC ckpc',\n # #'#define CKRHOX ckrhox',\n # #'#define CKRHOY ckrhoy',\n # #'#define CKRHOC ckrhoc',\n # #'#define CKWT ckwt',\n # #'#define CKAWT ckawt',\n # #'#define CKMMWY ckmmwy',\n # #'#define CKMMWX ckmmwx',\n # #'#define CKMMWC ckmmwc',\n # #'#define CKYTX ckytx',\n # #'#define CKYTCP ckytcp',\n # #'#define CKYTCR ckytcr',\n # #'#define CKXTY ckxty',\n # #'#define CKXTCP ckxtcp',\n # #'#define CKXTCR ckxtcr',\n # #'#define CKCTX ckctx',\n # #'#define CKCTY ckcty',\n # #'#define CKCPOR ckcpor',\n # #'#define CKHORT ckhort',\n # #'#define CKSOR cksor',\n # #'#define CKCVML ckcvml',\n # #'#define CKCPML ckcpml',\n # #'#define CKUML ckuml',\n # #'#define CKHML ckhml',\n # #'#define CKGML ckgml',\n # #'#define CKAML ckaml',\n # #'#define CKSML cksml',\n # #'#define CKCVMS ckcvms',\n # #'#define CKCPMS ckcpms',\n # #'#define CKUMS ckums',\n # #'#define CKHMS ckhms',\n # #'#define CKGMS ckgms',\n # #'#define CKAMS ckams',\n # #'#define CKSMS cksms',\n # #'#define CKCPBL ckcpbl',\n # #'#define CKCPBS ckcpbs',\n # #'#define CKCVBL ckcvbl',\n # #'#define CKCVBS ckcvbs',\n # #'#define CKHBML ckhbml',\n # #'#define CKHBMS ckhbms',\n # #'#define CKUBML ckubml',\n # #'#define CKUBMS ckubms',\n # #'#define CKSBML cksbml',\n # #'#define CKSBMS cksbms',\n # #'#define CKGBML ckgbml',\n # #'#define CKGBMS ckgbms',\n # #'#define CKABML ckabml',\n # #'#define CKABMS ckabms',\n # #'#define CKWC ckwc',\n # #'#define CKWYP ckwyp',\n # #'#define CKWXP ckwxp',\n # #'#define CKWYR ckwyr',\n # #'#define CKWXR ckwxr',\n # #'#define CKQC ckqc',\n # #'#define CKKFKR ckkfkr',\n # #'#define CKQYP ckqyp',\n # #'#define CKQXP ckqxp',\n # #'#define CKQYR ckqyr',\n # #'#define CKQXR ckqxr',\n # #'#define CKNU cknu',\n # #'#define CKNCF ckncf',\n # #'#define CKABE ckabe',\n # #'#define CKEQC ckeqc',\n # #'#define CKEQYP ckeqyp',\n # #'#define CKEQXP ckeqxp',\n # #'#define CKEQYR ckeqyr',\n # #'#define CKEQXR ckeqxr',\n # #'#define DWDOT dwdot',\n # '#define VCKHMS vckhms',\n # #'#define VCKPY vckpy',\n # #'#define VCKWYR vckwyr',\n # '#define VCKYTX vckytx',\n # #'#define GET_T_GIVEN_EY get_t_given_ey',\n # #'#define GET_T_GIVEN_HY get_t_given_hy',\n # #'#define GET_REACTION_MAP get_reaction_map',\n # #'#define GET_CRITPARAMS get_critparams',\n # '#elif defined(BL_FORT_USE_UNDERSCORE)',\n # #'#define CKINDX ckindx_',\n # #'#define CKINIT ckinit_',\n # #'#define CKFINALIZE ckfinalize_',\n # #'#define CKXNUM ckxnum_',\n # #'#define CKSYME cksyme_',\n # #'#define CKSYMS cksyms_',\n # #'#define CKRP ckrp_',\n # #'#define CKPX ckpx_',\n # #'#define CKPY ckpy_',\n # #'#define CKPC ckpc_',\n # #'#define CKRHOX ckrhox_',\n # #'#define CKRHOY ckrhoy_',\n # #'#define CKRHOC ckrhoc_',\n # #'#define CKWT ckwt_',\n # #'#define CKAWT ckawt_',\n # #'#define CKMMWY ckmmwy_',\n # #'#define CKMMWX ckmmwx_',\n # #'#define CKMMWC ckmmwc_',\n # #'#define CKYTX ckytx_',\n # #'#define CKYTCP ckytcp_',\n # #'#define CKYTCR ckytcr_',\n # #'#define CKXTY ckxty_',\n # #'#define CKXTCP ckxtcp_',\n # #'#define CKXTCR ckxtcr_',\n # #'#define CKCTX ckctx_',\n # #'#define CKCTY ckcty_',\n # #'#define CKCPOR ckcpor_',\n # #'#define CKHORT ckhort_',\n # #'#define CKSOR cksor_',\n # #'#define CKCVML ckcvml_',\n # #'#define CKCPML ckcpml_',\n # #'#define CKUML ckuml_',\n # #'#define CKHML ckhml_',\n # #'#define CKGML ckgml_',\n # #'#define CKAML ckaml_',\n # #'#define CKSML cksml_',\n # #'#define CKCVMS ckcvms_',\n # #'#define CKCPMS ckcpms_',\n # #'#define CKUMS ckums_',\n # #'#define CKHMS ckhms_',\n # #'#define CKGMS ckgms_',\n # #'#define CKAMS ckams_',\n # #'#define CKSMS cksms_',\n # #'#define CKCPBL ckcpbl_',\n # #'#define CKCPBS ckcpbs_',\n # #'#define CKCVBL ckcvbl_',\n # #'#define CKCVBS ckcvbs_',\n # #'#define CKHBML ckhbml_',\n # #'#define CKHBMS ckhbms_',\n # #'#define CKUBML ckubml_',\n # #'#define CKUBMS ckubms_',\n # #'#define CKSBML cksbml_',\n # #'#define CKSBMS cksbms_',\n # #'#define CKGBML ckgbml_',\n # #'#define CKGBMS ckgbms_',\n # #'#define CKABML ckabml_',\n # #'#define CKABMS ckabms_',\n # #'#define CKWC ckwc_',\n # #'#define CKWYP ckwyp_',\n # #'#define CKWXP ckwxp_',\n # #'#define CKWYR ckwyr_',\n # #'#define CKWXR ckwxr_',\n # #'#define CKQC ckqc_',\n # #'#define CKKFKR ckkfkr_',\n # #'#define CKQYP ckqyp_',\n # #'#define CKQXP ckqxp_',\n # #'#define CKQYR ckqyr_',\n # #'#define CKQXR ckqxr_',\n # #'#define CKNU cknu_',\n # #'#define CKNCF ckncf_',\n # #'#define CKABE ckabe_',\n # #'#define CKEQC ckeqc_',\n # #'#define CKEQYP ckeqyp_',\n # #'#define CKEQXP ckeqxp_',\n # #'#define CKEQYR ckeqyr_',\n # #'#define CKEQXR ckeqxr_',\n # #'#define DWDOT dwdot_',\n # '#define VCKHMS vckhms_',\n # #'#define VCKPY vckpy_',\n # #'#define VCKWYR vckwyr_',\n # '#define VCKYTX vckytx_',\n # #'#define GET_T_GIVEN_EY get_t_given_ey_',\n # #'#define GET_T_GIVEN_HY get_t_given_hy_',\n # #'#define GET_REACTION_MAP get_reaction_map_',\n # #'#define GET_CRITPARAMS get_critparams_',\n # '#endif','',\n # self.line('function declarations'),\n # #'#if defined(BL_FORT_USE_UPPERCASE)',\n # #'#define egtransetEPS EGTRANSETEPS',\n # #'#elif defined(BL_FORT_USE_LOWERCASE)',\n # #'#define egtransetEPS egtranseteps',\n # #'#elif defined(BL_FORT_USE_UNDERSCORE)',\n # #'#define egtransetEPS egtranseteps_',\n # #'#endif',\n # #'void egtransetEPS(double * EPS);',\n # #'#if defined(BL_FORT_USE_UPPERCASE)',\n # #'#define egtransetSIG EGTRANSETSIG',\n # #'#elif defined(BL_FORT_USE_LOWERCASE)',\n # #'#define egtransetSIG egtransetsig',\n # #'#elif defined(BL_FORT_USE_UNDERSCORE)',\n # #'#define egtransetSIG egtransetsig_',\n # #'#endif',\n # #'void egtransetSIG(double* SIG);',\n # #'void atomicWeight(double * restrict awt);',\n # #'void molecularWeight(double * restrict wt);',\n # #'void gibbs(double * restrict species, double * restrict tc);',\n # #'void helmholtz(double * restrict species, double * restrict tc);',\n # #'void speciesInternalEnergy(double * restrict species, double * restrict tc);',\n # 'void speciesEnthalpy(double * restrict species, double * restrict tc);',\n # #'void speciesEntropy(double * restrict species, double * restrict tc);',\n # #'void cp_R(double * restrict species, double * restrict tc);',\n # #'void cv_R(double * restrict species, double * restrict tc);',\n # #'void equilibriumConstants(double * restrict kc, double * restrict g_RT, double T);',\n # #'void productionRate(double * restrict wdot, double * restrict sc, double T);',\n # #'void comp_k_f(double * restrict tc, double invT, double * restrict k_f);',\n # #'void comp_Kc(double * restrict tc, double invT, double * restrict Kc);',\n # #'void comp_qfqr(double * restrict q_f, double * restrict q_r, double * restrict sc, double * restrict tc, double invT);',\n # #'void progressRate(double * restrict qdot, double * restrict speciesConc, double T);',\n # #'void progressRateFR(double * restrict q_f, double * restrict q_r, double * restrict speciesConc, double T);',\n # #'void CKINIT'+sym+'();',\n # #'void CKFINALIZE'+sym+'();',\n # #'void CKINDX'+sym+'(int * iwrk, double * restrict rwrk, int * mm, int * kk, int * ii, int * nfit );',\n # #'void CKXNUM'+sym+'(char * line, int * nexp, int * lout, int * nval, double * restrict rval, int * kerr, int lenline);',\n # #'void CKSNUM'+sym+'(char * line, int * nexp, int * lout, char * kray, int * nn, int * knum, int * nval, double * restrict rval, int * kerr, int lenline, int lenkray);',\n # #'void CKSYME(int * kname, int * lenkname);',\n # #'void CKSYMS(int * kname, int * lenkname);',\n # ##'void CKSYMS'+sym+'(char * cckwrk, int * lout, char * kname, int * kerr, int lencck, int lenkname);',\n # #'void CKRP'+sym+'(int * ickwrk, double * restrict rckwrk, double * restrict ru, double * restrict ruc, double * restrict pa);',\n # #'void CKPX'+sym+'(double * restrict rho, double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict P);',\n # #'void CKPY'+sym+'(double * restrict rho, double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict P);',\n # #'void CKPC'+sym+'(double * restrict rho, double * restrict T, double * restrict c, int * iwrk, double * restrict rwrk, double * restrict P);',\n # #'void CKRHOX'+sym+'(double * restrict P, double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict rho);',\n # #'void CKRHOY'+sym+'(double * restrict P, double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict rho);',\n # #'void CKRHOC'+sym+'(double * restrict P, double * restrict T, double * restrict c, int * iwrk, double * restrict rwrk, double * restrict rho);',\n # #'void CKWT'+sym+'(int * iwrk, double * restrict rwrk, double * restrict wt);',\n # #'void CKAWT'+sym+'(int * iwrk, double * restrict rwrk, double * restrict awt);',\n # #'void CKMMWY'+sym+'(double * restrict y, int * iwrk, double * restrict rwrk, double * restrict wtm);',\n # #'void CKMMWX'+sym+'(double * restrict x, int * iwrk, double * restrict rwrk, double * restrict wtm);',\n # #'void CKMMWC'+sym+'(double * restrict c, int * iwrk, double * restrict rwrk, double * restrict wtm);',\n # #'void CKYTX'+sym+'(double * restrict y, int * iwrk, double * restrict rwrk, double * restrict x);',\n # #'void CKYTCP'+sym+'(double * restrict P, double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict c);',\n # #'void CKYTCR'+sym+'(double * restrict rho, double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict c);',\n # #'void CKXTY'+sym+'(double * restrict x, int * iwrk, double * restrict rwrk, double * restrict y);',\n # #'void CKXTCP'+sym+'(double * restrict P, double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict c);',\n # #'void CKXTCR'+sym+'(double * restrict rho, double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict c);',\n # #'void CKCTX'+sym+'(double * restrict c, int * iwrk, double * restrict rwrk, double * restrict x);',\n # #'void CKCTY'+sym+'(double * restrict c, int * iwrk, double * restrict rwrk, double * restrict y);',\n # #'void CKCPOR'+sym+'(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict cpor);',\n # #'void CKHORT'+sym+'(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict hort);',\n # #'void CKSOR'+sym+'(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict sor);',\n # #\n # #'void CKCVML'+sym+'(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict cvml);',\n # #'void CKCPML'+sym+'(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict cvml);',\n # #'void CKUML'+sym+'(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict uml);',\n # #'void CKHML'+sym+'(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict uml);',\n # #'void CKGML'+sym+'(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict gml);',\n # #'void CKAML'+sym+'(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict aml);',\n # #'void CKSML'+sym+'(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict sml);',\n # #\n # #'void CKCVMS'+sym+'(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict cvms);',\n # #'void CKCPMS'+sym+'(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict cvms);',\n # #'void CKUMS'+sym+'(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict ums);',\n # #'void CKHMS'+sym+'(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict ums);',\n # #'void CKGMS'+sym+'(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict gms);',\n # #'void CKAMS'+sym+'(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict ams);',\n # #'void CKSMS'+sym+'(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict sms);',\n # #\n # #'void CKCPBL'+sym+'(double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict cpbl);',\n # #'void CKCPBS'+sym+'(double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict cpbs);',\n # #'void CKCVBL'+sym+'(double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict cpbl);',\n # #'void CKCVBS'+sym+'(double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict cpbs);',\n # #\n # #'void CKHBML'+sym+'(double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict hbml);',\n # #'void CKHBMS'+sym+'(double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict hbms);',\n # #'void CKUBML'+sym+'(double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict ubml);',\n # #'void CKUBMS'+sym+'(double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict ubms);',\n # #'void CKSBML'+sym+'(double * restrict P, double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict sbml);',\n # #'void CKSBMS'+sym+'(double * restrict P, double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict sbms);',\n # #'void CKGBML'+sym+'(double * restrict P, double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict gbml);',\n # #'void CKGBMS'+sym+'(double * restrict P, double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict gbms);',\n # #'void CKABML'+sym+'(double * restrict P, double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict abml);',\n # #'void CKABMS'+sym+'(double * restrict P, double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict abms);',\n\n # #\n # #'void CKWC'+sym+'(double * restrict T, double * restrict C, int * iwrk, double * restrict rwrk, double * restrict wdot);',\n # #'void CKWYP'+sym+'(double * restrict P, double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict wdot);',\n # #'void CKWXP'+sym+'(double * restrict P, double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict wdot);',\n # #'void CKWYR'+sym+'(double * restrict rho, double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict wdot);',\n # #'void CKWXR'+sym+'(double * restrict rho, double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict wdot);',\n\n # #\n # #'void CKQC'+sym+'(double * restrict T, double * restrict C, int * iwrk, double * restrict rwrk, double * restrict qdot);',\n # #'void CKKFKR(double * restrict P, double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict q_f, double * restrict q_r);',\n # #'void CKQYP'+sym+'(double * restrict P, double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict qdot);',\n # #'void CKQXP'+sym+'(double * restrict P, double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict qdot);',\n # #'void CKQYR'+sym+'(double * restrict rho, double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict qdot);',\n # #'void CKQXR'+sym+'(double * restrict rho, double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict qdot);',\n # #\n # #'void CKNU'+sym+'(int * kdim, int * iwrk, double * restrict rwrk, int * nuki);',\n # #'void CKNCF'+sym+'(int * mdim, int * iwrk, double * restrict rwrk, int * ncf);',\n # #\n # #'void CKABE'+sym+'(int * iwrk, double * restrict rwrk, double * restrict a, double * restrict b, double * restrict e );',\n # #'void CKEQC'+sym+'(double * restrict T, double * restrict C , int * iwrk, double * restrict rwrk, double * restrict eqcon );',\n # #'void CKEQYP'+sym+'(double * restrict P, double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict eqcon);',\n # #'void CKEQXP'+sym+'(double * restrict P, double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict eqcon);',\n # #'void CKEQYR'+sym+'(double * restrict rho, double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict eqcon);',\n # #'void CKEQXR'+sym+'(double * restrict rho, double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict eqcon);',\n # #'void DWDOT(double * restrict J, double * restrict sc, double * restrict T, int * consP);',\n # #'void aJacobian(double * restrict J, double * restrict sc, double T, int consP);',\n # #'void dcvpRdT(double * restrict species, double * restrict tc);',\n # #'void GET_T_GIVEN_EY(double * restrict e, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict t, int *ierr);',\n # #'void GET_T_GIVEN_HY(double * restrict h, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict t, int *ierr);',\n # #'void GET_REACTION_MAP(int * restrict rmap);',\n # #self.line('vector version'),\n # #'void vproductionRate(int npt, double * restrict wdot, double * restrict c, double * restrict T);',\n # 'void VCKHMS'+sym+'(int * restrict np, double * restrict T, int * iwrk, double * restrict rwrk, double * restrict ums);',\n # #'void VCKPY'+sym+'(int * restrict np, double * restrict rho, double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict P);',\n # #'void VCKWYR'+sym+'(int * restrict np, double * restrict rho, double * restrict T,',\n # #' double * restrict y, int * restrict iwrk, double * restrict rwrk,',\n # #' double * restrict wdot);',\n # 'void VCKYTX'+sym+'(int * restrict np, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict x);',\n # #'void vcomp_k_f(int npt, double * restrict k_f_s, double * restrict tc, double * restrict invT);',\n # #'void vcomp_gibbs(int npt, double * restrict g_RT, double * restrict tc);',\n # #'void vcomp_Kc(int npt, double * restrict Kc_s, double * restrict g_RT, double * restrict invT);',\n # #\n # #'void GET_CRITPARAMS(double * restrict Tci, double * restrict ai, double * restrict bi, double * restrict acentric_i);',\n # ]\n # #nReactions = len(mechanism.reaction())\n # #if nReactions <= 50:\n # # self._rep += [\n # # 'void vcomp_wdot(int npt, double * restrict wdot, double * restrict mixture, double * restrict sc,',\n # # ' double * restrict k_f_s, double * restrict Kc_s,',\n # # ' double * restrict tc, double * restrict invT, double * restrict T);',\n # # ]\n # #else:\n # # for i in range(0,nReactions,50):\n # # self._rep += [\n # # 'void vcomp_wdot_%d_%d(int npt, double * restrict wdot, double * restrict mixture, double * restrict sc,' % (i+1,min(i+50,nReactions)),\n # # ' double * restrict k_f_s, double * restrict Kc_s,',\n # # ' double * restrict tc, double * restrict invT, double * restrict T);',\n # # ]\n # return\n\n # def _main(self, mechanism):\n # self._write()\n # self._write()\n # self._write(self.line('optional test program'))\n # self._write('int main()')\n # self._write('{')\n # self._indent()\n\n # nSpecies = len(mechanism.species())\n # nReactions = len(mechanism.reaction())\n\n # # declarations\n # self._write('int species;')\n # self._write('int reaction;')\n\n # self._write('double T;')\n # self._write('double q_dot[%d];' % nReactions)\n # self._write('double wdot[%d];' % nSpecies)\n # self._write('double sc[%d];' % nSpecies)\n # self._write('double uml[%d];' % nSpecies)\n # self._write('double rckdummy[%d];' % nSpecies)\n # self._write('int ickdummy[%d];' % nSpecies)\n\n # # set the temperature\n # self._write()\n # self._write('T = 1000.0;')\n\n # # compute ckuml\n # self._write()\n # self._write(self.line('compute the internal energy'))\n # self._write('CKUML(&T, ickdummy, rckdummy, uml);')\n #\n # # print\n # self._write()\n # self._write('for (species = 0; species < %d; ++species) {' % nSpecies)\n # self._indent()\n # self._write('printf(\" e: %5d %15.7e\\\\n\", species+1, uml[species]);')\n # self._outdent()\n # self._write('}')\n\n # # compute the gibbs free energy\n # # self._write()\n # # self._write(self.line('compute the Gibbs free energy'))\n # # self._write('gibbs(g_RT, T);')\n\n # # compute the equilibrium constants\n # # self._write()\n # # self._write(self.line('compute the equilibrium constants'))\n # # self._write('equilibriumConstants(kc, g_RT, T);')\n\n # self._write('for (species = 0; species < %d; ++species) {' % nSpecies)\n # self._indent()\n # self._write('sc[species] = 1.0e6;')\n # self._outdent()\n # self._write('}')\n\n # # compute the production rates\n # self._write()\n # self._write(self.line('compute the production rate'))\n # self._write('productionRate(wdot, sc, T);')\n\n # # compute the progress rates\n # # self._write()\n # # self._write(self.line('compute the progress rates'))\n # # self._write('progressRate(q_dot, sc, T);')\n\n # # print\n # self._write()\n # self._write('for (species = 0; species < %d; ++species) {' % nSpecies)\n # self._indent()\n # self._write('printf(\"%5d %15.7e\\\\n\", species+1, wdot[species]);')\n # self._outdent()\n # self._write('}')\n\n # # print\n # # self._write()\n # # self._write('for (reaction = 0; reaction < %d; ++reaction) {' % nReactions)\n # # self._indent()\n # # self._write('printf(\"%5d | %15.7e\\\\n\", reaction+1, q_dot[reaction]);')\n # # self._write('}')\n # # self._outdent()\n\n # # done\n # self._write()\n # self._write('return 0;')\n\n # self._outdent()\n # self._write('}')\n # return\n\n def _ckinit(self, mechanism):\n\n nElement = len(mechanism.element())\n nSpecies = len(mechanism.species())\n nReactions = len(mechanism.reaction())\n\n self._write()\n self._write(\"! Finalizes parameter database\")\n self._write(\"subroutine ckfinalize()\")\n self._write()\n self._write(\" implicit none\")\n self._write()\n self._write(\" integer :: i\")\n self._write()\n self._write(\" do i=1, %d\" % nReactions)\n self._write(\n \" if (allocated(TB(i) % vector)) deallocate(TB(i) % vector)\"\n )\n self._write(\" !TB(i) = 0\")\n self._write(\n \" if (allocated(TBid(i) % vector)) deallocate(TBid(i) % vector)\"\n )\n self._write(\" !TBid(i) = 0\")\n self._write(\" nTB(i) = 0\")\n self._write()\n self._write(\n \" if (allocated(TB_DEF(i) % vector)) deallocate(TB_DEF(i) % vector)\"\n )\n self._write(\" !TB_DEF(i) = 0\")\n self._write(\n \" if (allocated(TBid_DEF(i) % vector)) deallocate(TBid_DEF(i) % vector)\"\n )\n self._write(\" !TBid_DEF(i) = 0\")\n self._write(\" nTB_DEF(i) = 0\")\n self._write(\" end do\")\n self._write()\n self._write(\"end subroutine\")\n self._write()\n self._write(\"! Initializes parameter database\")\n self._write(\"subroutine ckinit\" + sym + \"()\")\n self._write()\n self._write(\" implicit none\")\n self._write()\n\n self._indent()\n\n # build reverse reaction map\n rmap = {}\n for i, reaction in zip(list(range(nReactions)), mechanism.reaction()):\n rmap[reaction.orig_id - 1] = i\n\n for j in range(nReactions):\n reaction = mechanism.reaction()[rmap[j]]\n id = reaction.id # - 1\n\n A, beta, E = reaction.arrhenius\n self._write(\n \"! (%d): %s\" % (reaction.orig_id - 1, reaction.equation())\n )\n mynumber = format(A, \".17g\").replace(\"e\", \"d\")\n if \"d\" not in mynumber:\n mynumber = mynumber + \"d0\"\n self._write(\"fwd_A(%d) = %s\" % (id, mynumber))\n mynumber = format(beta, \".17g\").replace(\"e\", \"d\")\n if \"d\" not in mynumber:\n mynumber = mynumber + \"d0\"\n self._write(\"fwd_beta(%d) = %s\" % (id, mynumber))\n mynumber = format(E, \".17g\").replace(\"e\", \"d\")\n if \"d\" not in mynumber:\n mynumber = mynumber + \"d0\"\n self._write(\"fwd_Ea(%d) = %s\" % (id, mynumber))\n\n dim = self._phaseSpaceUnits(reaction.reactants)\n thirdBody = reaction.thirdBody\n low = reaction.low\n if not thirdBody:\n uc = self._prefactorUnits(\n reaction.units[\"prefactor\"], 1 - dim\n ) # Case 3 !PD, !TB\n elif not low:\n uc = self._prefactorUnits(\n reaction.units[\"prefactor\"], -dim\n ) # Case 2 !PD, TB\n else:\n uc = self._prefactorUnits(\n reaction.units[\"prefactor\"], 1 - dim\n ) # Case 1 PD, TB\n low_A, low_beta, low_E = low\n mynumber = format(low_A, \".17g\").replace(\"e\", \"d\")\n if \"d\" not in mynumber:\n mynumber = mynumber + \"d0\"\n self._write(\"low_A(%d) = %s\" % (id, mynumber))\n mynumber = format(low_beta, \".17g\").replace(\"e\", \"d\")\n if \"d\" not in mynumber:\n mynumber = mynumber + \"d0\"\n self._write(\"low_beta(%d) = %s\" % (id, mynumber))\n mynumber = format(low_E, \".17g\").replace(\"e\", \"d\")\n if \"d\" not in mynumber:\n mynumber = mynumber + \"d0\"\n self._write(\"low_Ea(%d) = %s\" % (id, mynumber))\n if reaction.troe:\n troe = reaction.troe\n ntroe = len(troe)\n is_troe = True\n mynumber = format(troe[0], \".17g\").replace(\"e\", \"d\")\n if \"d\" not in mynumber:\n mynumber = mynumber + \"d0\"\n self._write(\"troe_a(%d) = %s\" % (id, mynumber))\n if ntroe > 1:\n mynumber = format(troe[1], \".17g\").replace(\"e\", \"d\")\n if \"d\" not in mynumber:\n mynumber = mynumber + \"d0\"\n self._write(\"troe_Tsss(%d) = %s\" % (id, mynumber))\n if ntroe > 2:\n mynumber = format(troe[2], \".17g\").replace(\"e\", \"d\")\n if \"d\" not in mynumber:\n mynumber = mynumber + \"d0\"\n self._write(\"troe_Ts(%d) = %s\" % (id, mynumber))\n if ntroe > 3:\n mynumber = format(troe[3], \".17g\").replace(\"e\", \"d\")\n if \"d\" not in mynumber:\n mynumber = mynumber + \"d0\"\n self._write(\"troe_Tss(%d) = %s\" % (id, mynumber))\n self._write(\"troe_len(%d) = %d\" % (id, ntroe))\n if reaction.sri:\n sri = reaction.sri\n nsri = len(sri)\n is_sri = True\n mynumber = format(sri[0], \".17g\").replace(\"e\", \"d\")\n if \"d\" not in mynumber:\n mynumber = mynumber + \"d0\"\n self._write(\"sri_a(%d) = %s\" % (id, mynumber))\n if nsri > 1:\n mynumber = format(sri[1], \".17g\").replace(\"e\", \"d\")\n if \"d\" not in mynumber:\n mynumber = mynumber + \"d0\"\n self._write(\"sri_b(%d) = %s\" % (id, mynumber))\n if nsri > 2:\n mynumber = format(sri[2], \".17g\").replace(\"e\", \"d\")\n if \"d\" not in mynumber:\n mynumber = mynumber + \"d0\"\n self._write(\"sri_c(%d) = %s\" % (id, mynumber))\n if nsri > 3:\n mynumber = format(sri[3], \".17g\").replace(\"e\", \"d\")\n if \"d\" not in mynumber:\n mynumber = mynumber + \"d0\"\n self._write(\"sri_d(%d) = %s\" % (id, mynumber))\n if nsri > 4:\n mynumber = format(sri[4], \".17g\").replace(\"e\", \"d\")\n if \"d\" not in mynumber:\n mynumber = mynumber + \"d0\"\n self._write(\"sri_e(%d) = %s\" % (id, mynumber))\n self._write(\"sri_len(%d) = %d\" % (id, nsri))\n\n mynumber = format(uc.value, \".17g\").replace(\"e\", \"d\")\n if \"d\" not in mynumber:\n mynumber = mynumber + \"d0\"\n self._write(\"prefactor_units(%d) = %s\" % (id, mynumber))\n aeuc = self._activationEnergyUnits(reaction.units[\"activation\"])\n mynumber = format((aeuc / (Rc / kelvin)), \".17g\").replace(\"e\", \"d\")\n if \"d\" not in mynumber:\n mynumber = mynumber + \"d0\"\n self._write(\"activation_units(%d) = %s\" % (id, mynumber))\n self._write(\"phase_units(%d) = 1d-%d\" % (id, dim * 6))\n\n if low:\n self._write(\"is_PD(%d) = 1\" % (id))\n else:\n self._write(\"is_PD(%d) = 0\" % (id))\n\n if thirdBody:\n efficiencies = reaction.efficiencies\n self._write(\"nTB(%d) = %d\" % (id, len(efficiencies)))\n self._write(\n \"if (.not. allocated(TB(%d) %% vector)) allocate(TB(%d) %% vector(%d))\"\n % (id, id, len(efficiencies))\n )\n self._write(\n \"if (.not. allocated(TBid(%d) %% vector)) allocate(TBid(%d) %% vector(%d))\"\n % (id, id, len(efficiencies))\n )\n for i, eff in enumerate(efficiencies):\n symbol, efficiency = eff\n mynumber = format(\n mechanism.species(symbol).id, \".17g\"\n ).replace(\"e\", \"d\")\n if \"d\" not in mynumber:\n mynumber = mynumber + \"d0\"\n self._write(\n \"TBid(%d) %% vector(%d) = %s\" % (id, i + 1, mynumber)\n )\n mynumber = format(efficiency, \".17g\").replace(\"e\", \"d\")\n if \"d\" not in mynumber:\n mynumber = mynumber + \"d0\"\n self._write(\n \"TB(%d) %% vector(%d) = %s ! %s\"\n % (id, i + 1, mynumber, symbol)\n )\n else:\n self._write(\"nTB(%d) = 0\" % (id))\n\n self._write()\n\n self._write(\"call SetAllDefaults()\")\n self._outdent()\n self._write()\n self._write(\"end subroutine\")\n self._write()\n\n return\n\n def _thermo(self, mechanism):\n speciesInfo = self._analyzeThermodynamics(mechanism)\n\n self._gibbs(speciesInfo)\n # self._helmholtz(speciesInfo)\n self._cv(speciesInfo)\n self._cp(speciesInfo)\n self._speciesInternalEnergy(speciesInfo)\n self._speciesEnthalpy(speciesInfo)\n # self._speciesEntropy(speciesInfo)\n\n return\n\n def _trans(self, mechanism):\n speciesTransport = self._analyzeTransport(mechanism)\n NLITE = 0\n idxLightSpecs = []\n for spec in self.species:\n if spec.weight < 5.0:\n NLITE += 1\n idxLightSpecs.append(spec.id)\n self._miscTransInfo(KK=self.nSpecies, NLITE=NLITE)\n self._wt()\n self._eps(speciesTransport)\n self._sig(speciesTransport)\n self._dip(speciesTransport)\n self._pol(speciesTransport)\n self._zrot(speciesTransport)\n self._nlin(speciesTransport)\n\n self._viscosity(speciesTransport, NTFit=50)\n self._diffcoefs(speciesTransport, NTFit=50)\n self._lightSpecs(idxLightSpecs)\n self._thermaldiffratios(speciesTransport, idxLightSpecs, NTFit=50)\n\n return\n\n # def _dthermodT(self, mechanism):\n # speciesInfo = self._analyzeThermodynamics(mechanism)\n # self._dcvpdT(speciesInfo)\n # return\n\n # def _ckxnum(self, mechanism):\n # self._write()\n # # self._write()\n # # self._write(self.line(' strtok_r: re-entrant (threadsafe) version of strtok, helper function for tokenizing strings '))\n # # self._write('char *strtok_r(char *s, const char *delim, char **save_ptr)')\n # # self._write('{')\n # # self._indent()\n # # self._write('char *token;')\n # # self._write()\n # # self._write('if (s == NULL)')\n # # self._indent()\n # # self._write('s = *save_ptr;')\n # # self._outdent()\n # # self._write()\n # # self._write('/* Scan leading delimiters. */')\n # # self._write('s += strspn (s, delim);')\n # # self._write('if (*s == \\'\\\\0\\')')\n # # self._write('{')\n # # self._indent()\n # # self._write('*save_ptr = s;')\n # # self._write('return NULL;')\n # # self._outdent()\n # # self._write('}')\n # # self._write()\n # # self._write('/* Find the end of the token. */')\n # # self._write('token = s;')\n # # self._write('s = strpbrk (token, delim);')\n # # self._write('if (s == NULL)')\n # # self._indent()\n # # self._write('/* This token finishes the string. */')\n # # self._write('*save_ptr = __rawmemchr (token, \\'\\\\0\\');')\n # # self._outdent()\n # # self._write('else')\n # # self._write('{')\n # # self._indent()\n # # self._write('/* Terminate the token and make *SAVE_PTR point past it. */')\n # # self._write('*s = \\'\\\\0\\';')\n # # self._write('*save_ptr = s + 1;')\n # # self._outdent()\n # # self._write('}')\n # # self._write('return token;')\n # # self._outdent()\n # # self._write('}')\n # # self._write()\n\n # self._write()\n # self._write()\n # self._write(self.line(' ckxnum... for parsing strings '))\n # self._write('void CKXNUM'+sym+'(char * line, int * nexp, int * lout, int * nval, double * restrict rval, int * kerr, int lenline )')\n # self._write('{')\n # self._indent()\n # self._write('int n,i; /*Loop Counters */')\n # self._write('char cstr[1000];')\n # self._write('char *saveptr;')\n # self._write('char *p; /*String Tokens */')\n # self._write(self.line(' Strip Comments '))\n # self._write('for (i=0; i<lenline; ++i) {')\n # self._indent()\n # self._write('if (line[i]==\\'!\\') {')\n # self._indent()\n # self._write('break;')\n # self._outdent()\n # self._write('}')\n # self._write('cstr[i] = line[i];')\n # self._outdent()\n # self._write('}')\n # self._write('cstr[i] = \\'\\\\0\\';')\n # self._write()\n # self._write('p = strtok_r(cstr,\" \", &saveptr);')\n # self._write('if (!p) {')\n # self._indent()\n # self._write('*nval = 0;')\n # self._write('*kerr = 1;')\n # self._write('return;')\n # self._outdent()\n # self._write('}')\n # self._write('for (n=0; n<*nexp; ++n) {')\n # self._indent()\n # self._write('rval[n] = atof(p);')\n # self._write('p = strtok_r(NULL, \\\" \\\", &saveptr);')\n # self._write('if (!p) break;')\n # self._outdent()\n # self._write('}')\n # self._write('*nval = n+1;')\n # self._write('if (*nval < *nexp) *kerr = 1;')\n # self._write('return;')\n # self._outdent()\n # self._write('}')\n # return\n\n # def _cksnum(self, mechanism):\n # self._write()\n # self._write()\n # self._write(self.line(' cksnum... for parsing strings '))\n # self._write('void CKSNUM'+sym+'(char * line, int * nexp, int * lout, char * kray, int * nn, int * knum, int * nval, double * restrict rval, int * kerr, int lenline, int lenkray)')\n # self._write('{')\n # self._indent()\n #\n # self._write(self.line('Not done yet ...'))\n #\n # # done\n # self._outdent()\n # self._write('}')\n # return\n\n def _ckrp(self, mechanism):\n self._write()\n self._write(\"! Returns R, Rc, Patm\")\n self._write(\"subroutine ckrp\" + sym + \"(ickwrk, rckwrk, ru, ruc, pa)\")\n self._write()\n self._indent()\n self._write(\"implicit none\")\n self._write()\n self._write(\"integer, intent(in) :: ickwrk\")\n self._write(\"double precision, intent(in) :: rckwrk\")\n self._write(\"double precision, intent(out) :: ru\")\n self._write(\"double precision, intent(out) :: ruc\")\n self._write(\"double precision, intent(out) :: pa\")\n self._write()\n self._write(\"ru = %fd0 \" % ((R * mole * kelvin / erg).value))\n self._write(\"ruc = %.20fd0 \" % ((Rc * mole * kelvin / cal)))\n self._write(\"pa = %fd0 \" % (Patm))\n self._outdent()\n self._write()\n self._write(\"end subroutine\")\n return\n\n def _cksyme(self, mechanism):\n nElement = len(mechanism.element())\n self._write()\n self._write(\"! Returns the char strings of element names\")\n self._write(\"subroutine cksyme\" + sym + \"(kname, plenkname)\")\n self._write()\n self._indent()\n self._write(\"implicit none\")\n self._write()\n self._write(\"integer, intent(out) :: kname(plenkname*%d)\" % nElement)\n self._write(\"integer, intent(in) :: plenkname\")\n self._write()\n self._write(\"integer :: i\")\n self._write(\"integer :: lenkname\")\n self._write()\n self._write(\"lenkname = plenkname\")\n self._write()\n self._write(\"!clear kname\")\n self._write(\"do i=1, lenkname*%d\" % nElement)\n self._indent()\n self._write(\"kname(i) = ichar(' ')\")\n self._outdent()\n self._write(\"end do\")\n self._write()\n for element in mechanism.element():\n self._write(\"! %s \" % element.symbol)\n ii = 1\n for char in element.symbol:\n self._write(\n \"kname(%d*lenkname+%d) = ichar('%s')\"\n % (element.id, ii, char.capitalize())\n )\n ii = ii + 1\n self._write(\n \"kname(%d*lenkname+%d) = ichar(' ')\" % (element.id, ii)\n )\n\n self._outdent()\n self._write()\n self._write(\"end subroutine\")\n return\n\n def _cksyms(self, mechanism):\n nSpecies = len(mechanism.species())\n self._write()\n self._write(\"! Returns the char strings of species names\")\n self._write(\"subroutine cksyms\" + sym + \"(kname, plenkname)\")\n self._write()\n self._indent()\n self._write(\"implicit none\")\n self._write()\n self._write(\"integer, intent(out) :: kname(plenkname*%d)\" % nSpecies)\n self._write(\"integer, intent(in) :: plenkname\")\n self._write()\n self._write(\"integer :: i\")\n self._write(\"integer :: lenkname\")\n self._write()\n self._write(\"lenkname = plenkname\")\n self._write()\n self._write(\"!clear kname\")\n self._write(\"do i=1, lenkname*%d\" % nSpecies)\n self._indent()\n self._write(\"kname(i) = ichar(' ')\")\n self._outdent()\n self._write(\"end do\")\n self._write()\n for species in mechanism.species():\n self._write(\"! %s \" % species.symbol)\n ii = 1\n for char in species.symbol:\n self._write(\n \"kname(%d*lenkname+%d) = ichar('%s')\"\n % (species.id, ii, char.capitalize())\n )\n ii = ii + 1\n self._write(\n \"kname(%d*lenkname+%d) = ichar(' ')\" % (species.id, ii)\n )\n\n self._outdent()\n self._write()\n self._write(\"end subroutine\")\n return\n\n def _ckindx(self, mechanism):\n self._write()\n self._write(\"! A few mechanism parameters\")\n self._write(\n \"subroutine ckindx\" + sym + \"(iwrk, rwrk, mm, kk, ii, nfit)\"\n )\n self._write()\n self._indent()\n self._write(\"implicit none\")\n self._write()\n self._write(\"integer, intent(in) :: iwrk\")\n self._write(\"double precision, intent(in) :: rwrk\")\n self._write(\"integer, intent(out) :: mm\")\n self._write(\"integer, intent(out) :: kk\")\n self._write(\"integer, intent(out) :: ii\")\n self._write(\"integer, intent(out) :: nfit\")\n self._write()\n self._write(\"mm = %d\" % len(mechanism.element()))\n self._write(\"kk = %d\" % len(mechanism.species()))\n self._write(\"ii = %d\" % len(mechanism.reaction()))\n self._write(\"nfit = -1\" + \" ! Why do you need this anyway?\")\n self._outdent()\n self._write()\n self._write(\"end subroutine\")\n return\n\n # def _ckpx(self, mechanism):\n # self._write()\n # self._write()\n # self._write(self.line('Compute P = rhoRT/W(x)'))\n # self._write('void CKPX'+sym+'(double * restrict rho, double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict P)')\n # self._write('{')\n # self._indent()\n\n # self._write('double XW = 0;'+\n # self.line(' To hold mean molecular wt'))\n #\n # # molecular weights of all species\n # for species in self.species:\n # self._write('XW += x[%d]*%f; ' % (\n # species.id, species.weight) + self.line('%s' % species.symbol))\n\n # self._write(\n # '*P = *rho * %g * (*T) / XW; ' % (R*kelvin*mole/erg)\n # + self.line('P = rho*R*T/W'))\n #\n # self._write()\n # self._write('return;')\n # self._outdent()\n\n # self._write('}')\n # return\n\n def _ckpy(self, mechanism):\n nSpec = len(self.species)\n self._write()\n self._write(\"! Compute P = rhoRT/W(y)\")\n self._write(\"subroutine ckpy\" + sym + \"(rho, T, y, iwrk, rwrk, P)\")\n self._write()\n self._indent()\n\n self._write(\"implicit none\")\n self._write()\n self._write(\"double precision, intent(in) :: rho\")\n self._write(\"double precision, intent(in) :: T\")\n self._write(\"double precision, intent(in) :: y(%d)\" % nSpec)\n self._write(\"integer, intent(in) :: iwrk\")\n self._write(\"double precision, intent(in) :: rwrk\")\n self._write(\"double precision, intent(out) :: P\")\n self._write()\n self._write(\"double precision :: YOW \" + \"! for computing mean MW\")\n self._write()\n self._write(\"YOW = 0.d0\")\n self._write()\n\n # molecular weights of all species\n for species in self.species:\n self._write(\n \"YOW = YOW + (y(%d) * imw(%d)) \"\n % (species.id + 1, species.id + 1)\n + \"! %s\" % species.symbol\n )\n\n self._write()\n self._write(\"! YOW holds the reciprocal of the mean molecular wt\")\n expression = format((R * kelvin * mole / erg).value, \"15.8e\").replace(\n \"e\", \"d\"\n )\n self._write(\"P = rho *%s * T * YOW \" % expression + \"! P = rho*R*T/W\")\n\n self._outdent()\n self._write()\n self._write(\"end subroutine\")\n\n return\n\n # def _vckpy(self, mechanism):\n # self._write()\n # self._write()\n # self._write(self.line('Compute P = rhoRT/W(y)'))\n # self._write('void VCKPY'+sym+'(int * restrict np, double * restrict rho, double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict P)')\n # self._write('{')\n # self._indent()\n\n # species = self.species\n # nSpec = len(species)\n # self._write('double YOW[*np];')\n # self._write('for (int i=0; i<(*np); i++) {')\n # self._indent()\n # self._write('YOW[i] = 0.0;')\n # self._outdent()\n # self._write('}')\n # self._write('')\n # self._write('for (int n=0; n<%d; n++) {' % (nSpec))\n # self._indent()\n # self._write('for (int i=0; i<(*np); i++) {')\n # self._indent()\n # self._write('YOW[i] += y[n*(*np)+i] * imw[n];')\n # self._outdent()\n # self._write('}')\n # self._outdent()\n # self._write('}')\n\n # self._write('')\n\n # self._write('for (int i=0; i<(*np); i++) {')\n # self._indent()\n # self._write(\n # 'P[i] = rho[i] * %g * T[i] * YOW[i]; ' % (R*kelvin*mole/erg)\n # + self.line('P = rho*R*T/W'))\n # self._outdent()\n # self._write('}')\n #\n # self._write()\n # self._write('return;')\n # self._outdent()\n\n # self._write('}')\n\n # return\n\n # def _ckpc(self, mechanism):\n # self._write()\n # self._write()\n # self._write(self.line('Compute P = rhoRT/W(c)'))\n # self._write('void CKPC'+sym+'(double * restrict rho, double * restrict T, double * restrict c, int * iwrk, double * restrict rwrk, double * restrict P)')\n #\n # self._write('{')\n # self._indent()\n\n # self._write('int id; ' + self.line('loop counter'))\n # self._write(self.line('See Eq 5 in CK Manual'))\n # self._write('double W = 0;')\n # self._write('double sumC = 0;')\n #\n # # molecular weights of all species\n # for species in self.species:\n # self._write('W += c[%d]*%f; ' % (\n # species.id, species.weight) + self.line('%s' % species.symbol))\n\n # self._write()\n # nSpecies = len(mechanism.species())\n # self._write('for (id = 0; id < %d; ++id) {' % nSpecies)\n # self._indent()\n # self._write('sumC += c[id];')\n # self._outdent()\n # self._write('}')\n\n # self.line('W/sumC holds the mean molecular wt')\n # self._write(\n # '*P = *rho * %g * (*T) * sumC / W; ' % (R*kelvin*mole/erg)\n # + self.line('P = rho*R*T/W'))\n #\n # self._write()\n # self._write('return;')\n # self._outdent()\n\n # self._write('}')\n\n # return\n\n # def _ckrhox(self, mechanism):\n # self._write()\n # self._write()\n # self._write(self.line('Compute rho = PW(x)/RT'))\n # self._write('void CKRHOX'+sym+'(double * restrict P, double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict rho)')\n # self._write('{')\n # self._indent()\n\n # self._write('double XW = 0;'+\n # self.line(' To hold mean molecular wt'))\n #\n # # molecular weights of all species\n # for species in self.species:\n # self._write('XW += x[%d]*%f; ' % (\n # species.id, species.weight) + self.line('%s' % species.symbol))\n\n # self._write(\n # '*rho = *P * XW / (%g * (*T)); ' % (R*kelvin*mole/erg)\n # + self.line('rho = P*W/(R*T)'))\n #\n # self._write()\n # self._write('return;')\n # self._outdent()\n\n # self._write('}')\n # return\n\n def _ckrhoy(self, mechanism):\n nSpec = len(self.species)\n self._write()\n self._write(\"! Compute rho = P*W(y)/RT\")\n self._write(\"subroutine ckrhoy\" + sym + \"(P, T, y, iwrk, rwrk, rho)\")\n self._write()\n self._indent()\n self._write(\"implicit none\")\n self._write()\n self._write(\"double precision, intent(in) :: P\")\n self._write(\"double precision, intent(in) :: T\")\n self._write(\"double precision, intent(in) :: y(%d)\" % nSpec)\n self._write(\"integer, intent(in) :: iwrk\")\n self._write(\"double precision, intent(in) :: rwrk\")\n self._write(\"double precision, intent(out) :: rho\")\n self._write()\n self._write(\"double precision :: YOW, tmp(%d)\" % nSpec)\n self._write(\"integer :: i\")\n self._write()\n self._write(\"YOW = 0.d0\")\n self._write()\n self._write(\"do i=1, %d\" % nSpec)\n self._indent()\n self._write(\"tmp(i) = y(i) * imw(i)\")\n self._outdent()\n self._write(\"end do\")\n self._write(\"do i=1, %d\" % nSpec)\n self._indent()\n self._write(\"YOW = YOW + tmp(i)\")\n self._outdent()\n self._write(\"end do\")\n self._write()\n expression = format((R * kelvin * mole / erg).value, \"15.8e\").replace(\n \"e\", \"d\"\n )\n self._write(\n \"rho = P / (%s * T * YOW) \" % expression + \"! rho = P*W/(R*T)\"\n )\n self._outdent()\n self._write()\n self._write(\"end subroutine\")\n return\n\n # def _ckrhoc(self, mechanism):\n # self._write()\n # self._write()\n # self._write(self.line('Compute rho = P*W(c)/(R*T)'))\n # self._write('void CKRHOC'+sym+'(double * restrict P, double * restrict T, double * restrict c, int * iwrk, double * restrict rwrk, double * restrict rho)')\n #\n # self._write('{')\n # self._indent()\n\n # self._write('int id; ' + self.line('loop counter'))\n # self._write(self.line('See Eq 5 in CK Manual'))\n # self._write('double W = 0;')\n # self._write('double sumC = 0;')\n #\n # # molecular weights of all species\n # for species in self.species:\n # self._write('W += c[%d]*%f; ' % (\n # species.id, species.weight) + self.line('%s' % species.symbol))\n\n # self._write()\n # self._write('for (id = 0; id < %d; ++id) {' % self.nSpecies)\n # self._indent()\n # self._write('sumC += c[id];')\n # self._outdent()\n # self._write('}')\n\n # self.line('W/sumC holds the mean molecular wt')\n # self._write(\n # '*rho = *P * W / (sumC * (*T) * %g); ' % (R*kelvin*mole/erg)\n # + self.line('rho = PW/(R*T)'))\n #\n # self._write()\n # self._write('return;')\n # self._outdent()\n\n # self._write('}')\n\n # return\n\n def _ckwt(self, mechanism):\n nSpecies = len(mechanism.species())\n self._write()\n self._write(\"! get molecular weight for all species\")\n self._write(\"subroutine ckwt\" + sym + \"(iwrk, rwrk, wt)\")\n self._write()\n self._indent()\n self._write(\"implicit none\")\n self._write()\n self._write(\"integer, intent(in) :: iwrk\")\n self._write(\"double precision, intent(in) :: rwrk\")\n self._write(\"double precision, intent(out) :: wt(%d)\" % nSpecies)\n self._write()\n self._write(\"call molecularWeight(wt)\")\n self._outdent()\n self._write()\n self._write(\"end subroutine\")\n return\n\n # def _ckawt(self, mechanism):\n\n # self._write()\n # self._write()\n # self._write(self.line('get atomic weight for all elements'))\n # self._write('void CKAWT'+sym+'(int * iwrk, double * restrict rwrk, double * restrict awt)')\n # self._write('{')\n # self._indent()\n\n # # call atomicWeight\n # self._write('atomicWeight(awt);')\n #\n # self._outdent()\n\n # self._write('}')\n\n # return\n #\n # def _ckcvml(self, mechanism):\n # self._write()\n # self._write()\n # self._write(self.line('get specific heat at constant volume as a function '))\n # self._write(self.line('of T for all species (molar units)'))\n # self._write('void CKCVML'+sym+'(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict cvml)')\n # self._write('{')\n # self._indent()\n\n # self._write('int id; ' + self.line('loop counter'))\n #\n # # get temperature cache\n # self._write(\n # 'double tT = *T; '\n # + self.line('temporary temperature'))\n # self._write(\n # 'double tc[] = { 0, tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; '\n # + self.line('temperature cache'))\n #\n # # call routine\n # self._write('cv_R(cvml, tc);')\n #\n # # convert cv/R to cv\n # self._write()\n # self._write(self.line('convert to chemkin units'))\n # self._write('for (id = 0; id < %d; ++id) {' % self.nSpecies)\n # self._indent()\n # self._write('cvml[id] *= %g;' % (R*kelvin*mole/erg) )\n # self._outdent()\n # self._write('}')\n #\n # self._outdent()\n\n # self._write('}')\n\n # return\n #\n # def _ckcpml(self, mechanism):\n # self._write()\n # self._write()\n # self._write(self.line('get specific heat at constant pressure as a '))\n # self._write(self.line('function of T for all species (molar units)'))\n # self._write('void CKCPML'+sym+'(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict cpml)')\n # self._write('{')\n # self._indent()\n\n # self._write('int id; ' + self.line('loop counter'))\n #\n # # get temperature cache\n # self._write(\n # 'double tT = *T; '\n # + self.line('temporary temperature'))\n # self._write(\n # 'double tc[] = { 0, tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; '\n # + self.line('temperature cache'))\n #\n # # call routine\n # self._write('cp_R(cpml, tc);')\n #\n # # convert cp/R to cp\n # self._write()\n # self._write(self.line('convert to chemkin units'))\n # self._write('for (id = 0; id < %d; ++id) {' % self.nSpecies)\n # self._indent()\n # self._write('cpml[id] *= %g;' % (R*kelvin*mole/erg) )\n # self._outdent()\n # self._write('}')\n #\n # self._outdent()\n\n # self._write('}')\n\n # return\n #\n # def _ckuml(self, mechanism):\n # self._write()\n # self._write()\n # self._write(self.line('get internal energy as a function '))\n # self._write(self.line('of T for all species (molar units)'))\n # self._write('void CKUML'+sym+'(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict uml)')\n # self._write('{')\n # self._indent()\n\n # self._write('int id; ' + self.line('loop counter'))\n #\n # # get temperature cache\n # self._write(\n # 'double tT = *T; '\n # + self.line('temporary temperature'))\n # self._write(\n # 'double tc[] = { 0, tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; '\n # + self.line('temperature cache'))\n # self._write(\n # 'double RT = %g*tT; ' % (R*kelvin*mole/erg)\n # + self.line('R*T'))\n #\n # # call routine\n # self._write('speciesInternalEnergy(uml, tc);')\n #\n # # convert e/RT to e with molar units\n # self._write()\n # self._write(self.line('convert to chemkin units'))\n # self._write('for (id = 0; id < %d; ++id) {' % self.nSpecies)\n # self._indent()\n # self._write('uml[id] *= RT;')\n # self._outdent()\n # self._write('}')\n #\n # self._outdent()\n\n # self._write('}')\n\n # return\n #\n # def _ckhml(self, mechanism):\n # self._write()\n # self._write()\n # self._write(self.line('get enthalpy as a function '))\n # self._write(self.line('of T for all species (molar units)'))\n # self._write('void CKHML'+sym+'(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict hml)')\n # self._write('{')\n # self._indent()\n\n # self._write('int id; ' + self.line('loop counter'))\n #\n # # get temperature cache\n # self._write(\n # 'double tT = *T; '\n # + self.line('temporary temperature'))\n # self._write(\n # 'double tc[] = { 0, tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; '\n # + self.line('temperature cache'))\n # self._write(\n # 'double RT = %g*tT; ' % (R*kelvin*mole/erg)\n # + self.line('R*T'))\n #\n # # call routine\n # self._write('speciesEnthalpy(hml, tc);')\n #\n # # convert h/RT to h with molar units\n # self._write()\n # self._write(self.line('convert to chemkin units'))\n # self._write('for (id = 0; id < %d; ++id) {' % self.nSpecies)\n # self._indent()\n # self._write('hml[id] *= RT;')\n # self._outdent()\n # self._write('}')\n #\n # self._outdent()\n\n # self._write('}')\n\n # return\n #\n # def _ckgml(self, mechanism):\n # self._write()\n # self._write()\n # self._write(self.line('get standard-state Gibbs energy as a function '))\n # self._write(self.line('of T for all species (molar units)'))\n # self._write('void CKGML'+sym+'(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict gml)')\n # self._write('{')\n # self._indent()\n\n # self._write('int id; ' + self.line('loop counter'))\n #\n # # get temperature cache\n # self._write(\n # 'double tT = *T; '\n # + self.line('temporary temperature'))\n # self._write(\n # 'double tc[] = { log(tT), tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; '\n # + self.line('temperature cache'))\n # self._write(\n # 'double RT = %g*tT; ' % (R*kelvin*mole/erg)\n # + self.line('R*T'))\n #\n # # call routine\n # self._write('gibbs(gml, tc);')\n #\n # # convert g/RT to g with molar units\n # self._write()\n # self._write(self.line('convert to chemkin units'))\n # self._write('for (id = 0; id < %d; ++id) {' % self.nSpecies)\n # self._indent()\n # self._write('gml[id] *= RT;')\n # self._outdent()\n # self._write('}')\n #\n # self._outdent()\n\n # self._write('}')\n\n # return\n #\n # def _ckaml(self, mechanism):\n # self._write()\n # self._write()\n # self._write(self.line('get standard-state Helmholtz free energy as a '))\n # self._write(self.line('function of T for all species (molar units)'))\n # self._write('void CKAML'+sym+'(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict aml)')\n # self._write('{')\n # self._indent()\n\n # self._write('int id; ' + self.line('loop counter'))\n #\n # # get temperature cache\n # self._write(\n # 'double tT = *T; '\n # + self.line('temporary temperature'))\n # self._write(\n # 'double tc[] = { log(tT), tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; '\n # + self.line('temperature cache'))\n # self._write(\n # 'double RT = %g*tT; ' % (R*kelvin*mole/erg)\n # + self.line('R*T'))\n #\n # # call routine\n # self._write('helmholtz(aml, tc);')\n #\n # # convert A/RT to A with molar units\n # self._write()\n # self._write(self.line('convert to chemkin units'))\n # self._write('for (id = 0; id < %d; ++id) {' % self.nSpecies)\n # self._indent()\n # self._write('aml[id] *= RT;')\n # self._outdent()\n # self._write('}')\n #\n # self._outdent()\n\n # self._write('}')\n\n # return\n\n # def _cksml(self, mechanism):\n # self._write()\n # self._write()\n # self._write(self.line('Returns the standard-state entropies in molar units'))\n # self._write('void CKSML'+sym+'(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict sml)')\n # self._write('{')\n # self._indent()\n\n # self._write('int id; ' + self.line('loop counter'))\n #\n # # get temperature cache\n # self._write(\n # 'double tT = *T; '\n # + self.line('temporary temperature'))\n # self._write(\n # 'double tc[] = { log(tT), tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; '\n # + self.line('temperature cache'))\n #\n # # call routine\n # self._write('speciesEntropy(sml, tc);')\n #\n # # convert s/R to s\n # self._write()\n # self._write(self.line('convert to chemkin units'))\n # self._write('for (id = 0; id < %d; ++id) {' % self.nSpecies)\n # self._indent()\n # self._write('sml[id] *= %g;' % (R*kelvin*mole/erg) )\n # self._outdent()\n # self._write('}')\n #\n # self._outdent()\n\n # self._write('}')\n\n # return\n\n def _ckums(self, mechanism):\n nSpec = len(self.species)\n self._write()\n self._write(\"! Returns internal energy in mass units (Eq 30.)\")\n self._write(\"subroutine ckums\" + sym + \"(T, iwrk, rwrk, ums)\")\n self._write()\n self._indent()\n self._write(\"implicit none\")\n self._write()\n self._write(\"double precision, intent(in) :: T\")\n self._write(\"integer, intent(in) :: iwrk\")\n self._write(\"double precision, intent(in) :: rwrk\")\n self._write(\"double precision, intent(inout) :: ums(%d)\" % nSpec)\n self._write()\n self._write(\"double precision :: tT, tc(5)\")\n self._write(\"double precision :: RT\")\n self._write(\"integer :: i\")\n self._write()\n\n # get temperature cache\n self._write(\"tT = T \" + \"! temporary temperature\")\n self._write(\n \"tc = (/ 0.d0, tT, tT*tT, tT*tT*tT, tT*tT*tT*tT /) \"\n + \"! temperature cache\"\n )\n expression = format((R * kelvin * mole / erg).value, \"15.8e\").replace(\n \"e\", \"d\"\n )\n self._write(\"RT =%s * tT \" % expression + \"! R*T\")\n\n # call routine\n self._write()\n self._write(\"call speciesInternalEnergy(ums, tc)\")\n self._write()\n\n self._write(\"do i=1, %d\" % (nSpec))\n self._indent()\n self._write(\"ums(i) = ums(i) * (RT * imw(i))\")\n self._outdent()\n self._write(\"end do\")\n self._outdent()\n self._write()\n self._write(\"end subroutine\")\n\n return\n\n def _ckhms(self, mechanism):\n species = self.species\n nSpec = len(species)\n self._write()\n self._write(\"! Returns enthalpy in mass units (Eq 27.)\")\n self._write(\"subroutine ckhms\" + sym + \"(T, iwrk, rwrk, hms)\")\n self._write()\n self._indent()\n self._write(\"implicit none\")\n self._write()\n\n # get temperature cache\n self._write(\"double precision, intent(in) :: T\")\n self._write(\"integer, intent(in) :: iwrk\")\n self._write(\"double precision, intent(in) :: rwrk\")\n self._write(\"double precision, intent(inout) :: hms(%d)\" % nSpec)\n self._write()\n self._write(\"double precision :: tT, RT\")\n self._write(\"double precision :: tc(5), h(%d)\" % nSpec)\n self._write(\"integer :: i\")\n self._write()\n self._write(\"tT = T \" + \"! temporary temperature\")\n self._write(\n \"tc = (/ 0.d0, tT, tT*tT, tT*tT*tT, tT*tT*tT*tT /) \"\n + \"! temperature cache\"\n )\n expression = format((R * kelvin * mole / erg).value, \"15.8e\").replace(\n \"e\", \"d\"\n )\n self._write(\"RT =%s * tT \" % expression + \"! R*T\")\n\n # call routine\n self._write()\n self._write(\"call speciesEnthalpy(hms, tc)\")\n self._write()\n self._write(\"do i=1, %d\" % (nSpec))\n self._indent()\n self._write(\"hms(i) = hms(i) * (RT * imw(i))\")\n self._outdent()\n self._write(\"end do\")\n self._outdent()\n self._write()\n self._write(\"end subroutine\")\n\n return\n\n def _vckhms(self, mechanism):\n nSpec = len(self.species)\n self._write()\n self._write(\"! Returns enthalpy in mass units (Eq 27.)\")\n self._write(\"subroutine vckhms\" + sym + \"(np, T, iwrk, rwrk, hms)\")\n self._indent()\n self._write()\n self._write(\"implicit none\")\n self._write()\n self._write(\"integer, intent(in) :: np\")\n self._write(\"double precision, intent(in) :: T(np)\")\n self._write(\"integer, intent(in) :: iwrk\")\n self._write(\"double precision, intent(in) :: rwrk\")\n self._write(\"double precision, intent(inout) :: hms(np,%d)\" % nSpec)\n self._write()\n self._write(\"double precision :: tc(5), h(%d)\" % nSpec)\n self._write(\"integer :: i, n\")\n self._write()\n self._write(\"do i=1, np\")\n self._indent()\n self._write(\"tc(1) = 0.d0\")\n self._write(\"tc(2) = T(i)\")\n self._write(\"tc(3) = T(i)*T(i)\")\n self._write(\"tc(4) = T(i)*T(i)*T(i)\")\n self._write(\"tc(5) = T(i)*T(i)*T(i)*T(i)\")\n self._write()\n self._write(\"call speciesEnthalpy(h, tc)\")\n self._write()\n for ispec in range(nSpec):\n self._write(\"hms(i, %d) = h(%d)\" % (ispec + 1, ispec + 1))\n self._outdent()\n self._write(\"end do\")\n self._write()\n self._write(\"do n=1, %d\" % (nSpec))\n self._indent()\n self._write(\"do i=1, np\")\n self._indent()\n expression = format((R * kelvin * mole / erg).value, \"15.8e\").replace(\n \"e\", \"d\"\n )\n self._write(\"hms(i,n) = hms(i,n) * (%s * T(i) * imw(n))\" % expression)\n self._outdent()\n self._write(\"end do\")\n self._outdent()\n self._write(\"end do\")\n\n self._outdent()\n self._write()\n self._write(\"end subroutine\")\n\n return\n\n # def _ckams(self, mechanism):\n # self._write()\n # self._write()\n # self._write(self.line('Returns helmholtz in mass units (Eq 32.)'))\n # self._write('void CKAMS'+sym+'(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict ams)')\n # self._write('{')\n # self._indent()\n\n # # get temperature cache\n # self._write(\n # 'double tT = *T; '\n # + self.line('temporary temperature'))\n # self._write(\n # 'double tc[] = { log(tT), tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; '\n # + self.line('temperature cache'))\n # self._write(\n # 'double RT = %g*tT; ' % (R*kelvin*mole/erg)\n # + self.line('R*T'))\n #\n # # call routine\n # self._write('helmholtz(ams, tc);')\n #\n # species = self.species\n # nSpec = len(species)\n # self._write('for (int i = 0; i < %d; i++)' % (nSpec))\n # self._write('{')\n # self._indent()\n # self._write('ams[i] *= RT*imw[i];')\n # self._outdent()\n # self._write('}')\n # self._outdent()\n\n # self._write('}')\n\n # return\n\n # def _ckgms(self, mechanism):\n # self._write()\n # self._write()\n # self._write(self.line('Returns gibbs in mass units (Eq 31.)'))\n # self._write('void CKGMS'+sym+'(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict gms)')\n # self._write('{')\n # self._indent()\n\n # # get temperature cache\n # self._write(\n # 'double tT = *T; '\n # + self.line('temporary temperature'))\n # self._write(\n # 'double tc[] = { log(tT), tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; '\n # + self.line('temperature cache'))\n # self._write(\n # 'double RT = %g*tT; ' % (R*kelvin*mole/erg)\n # + self.line('R*T'))\n #\n # # call routine\n # self._write('gibbs(gms, tc);')\n #\n # species = self.species\n # nSpec = len(species)\n # self._write('for (int i = 0; i < %d; i++)' % (nSpec))\n # self._write('{')\n # self._indent()\n # self._write('gms[i] *= RT*imw[i];')\n # self._outdent()\n # self._write('}')\n # self._outdent()\n\n # self._write('}')\n\n # return\n\n def _ckcvms(self, mechanism):\n nSpec = len(self.species)\n self._write()\n self._write(\"! Returns the specific heats at constant volume\")\n self._write(\"! in mass units (Eq. 29)\")\n self._write(\"subroutine ckcvms\" + sym + \"(T, iwrk, rwrk, cvms)\")\n self._write()\n self._indent()\n self._write(\"implicit none\")\n self._write()\n self._write(\"double precision, intent(in) :: T\")\n self._write(\"integer, intent(in) :: iwrk\")\n self._write(\"double precision, intent(in) :: rwrk\")\n self._write(\"double precision, intent(inout) :: cvms(%d)\" % nSpec)\n self._write()\n self._write(\"double precision :: tT, tc(5)\")\n self._write()\n\n # get temperature cache\n self._write(\"tT = T \" + \"! temporary temperature\")\n self._write(\n \"tc = (/ 0.d0, tT, tT*tT, tT*tT*tT, tT*tT*tT*tT /) \"\n + \"! temperature cache\"\n )\n self._write()\n\n # call routine\n self._write(\"call cv_R(cvms, tc)\")\n self._write()\n\n # convert cv/R to cv with mass units\n self._write(\"! multiply by R/molecularweight\")\n for species in self.species:\n ROW = format(\n ((R * kelvin * mole / erg) / species.weight).value, \"20.15e\"\n ).replace(\"e\", \"d\")\n self._write(\n \"cvms(%d) = cvms(%d) * %s \"\n % (species.id + 1, species.id + 1, ROW)\n + \"!%s\" % species.symbol\n )\n\n self._outdent()\n\n self._write()\n self._write(\"end subroutine\")\n\n return\n\n def _ckcpms(self, mechanism):\n nSpec = len(self.species)\n self._write()\n self._write(\"! Returns the specific heats at constant pressure\")\n self._write(\"! in mass units (Eq. 26)\")\n self._write(\"subroutine ckcpms\" + sym + \"(T, iwrk, rwrk, cpms)\")\n self._write()\n self._indent()\n self._write(\"implicit none\")\n self._write()\n self._write(\"double precision, intent(in) :: T\")\n self._write(\"integer, intent(in) :: iwrk\")\n self._write(\"double precision, intent(in) :: rwrk\")\n self._write(\"double precision, intent(inout) :: cpms(%d)\" % nSpec)\n self._write()\n self._write(\"double precision :: tT, tc(5)\")\n self._write()\n\n # get temperature cache\n self._write(\"tT = T \" + \"! temporary temperature\")\n self._write(\n \"tc = (/ 0.d0, tT, tT*tT, tT*tT*tT, tT*tT*tT*tT /) \"\n + \"! temperature cache\"\n )\n\n # call routine\n self._write()\n self._write(\"call cp_R(cpms, tc)\")\n self._write()\n\n # convert cp/R to cp with mass units\n self._write(\"! multiply by R/molecularweight\")\n for species in self.species:\n ROW = format(\n ((R * kelvin * mole / erg) / species.weight).value, \"20.15e\"\n ).replace(\"e\", \"d\")\n self._write(\n \"cpms(%d) = cpms(%d) * %s \"\n % (species.id + 1, species.id + 1, ROW)\n + \"! %s\" % species.symbol\n )\n\n self._outdent()\n self._write()\n self._write(\"end subroutine\")\n\n return\n\n # def _cksms(self, mechanism):\n # self._write()\n # self._write()\n # self._write(self.line('Returns the entropies in mass units (Eq 28.)'))\n # self._write('void CKSMS'+sym+'(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict sms)')\n # self._write('{')\n # self._indent()\n\n # # get temperature cache\n # self._write(\n # 'double tT = *T; '\n # + self.line('temporary temperature'))\n # self._write(\n # 'double tc[] = { log(tT), tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; '\n # + self.line('temperature cache'))\n #\n # # call routine\n # self._write('speciesEntropy(sms, tc);')\n #\n\n # # convert s/R to s with mass units\n # self._write(self.line('multiply by R/molecularweight'))\n # for species in self.species:\n # ROW = (R*kelvin*mole/erg) / species.weight\n # self._write('sms[%d] *= %20.15e; ' % (\n # species.id, ROW) + self.line('%s' % species.symbol))\n\n #\n # self._outdent()\n\n # self._write('}')\n\n # return\n #\n # def _ckcpbl(self, mechanism):\n # self._write()\n # self._write()\n # self._write(self.line('Returns the mean specific heat at CP (Eq. 33)'))\n # self._write('void CKCPBL'+sym+'(double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict cpbl)')\n # self._write('{')\n # self._indent()\n\n # self._write('int id; ' + self.line('loop counter'))\n # self._write('double result = 0; ')\n #\n # # get temperature cache\n # self._write(\n # 'double tT = *T; '\n # + self.line('temporary temperature'))\n # self._write(\n # 'double tc[] = { 0, tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; '\n # + self.line('temperature cache'))\n # self._write(\n # 'double cpor[%d]; ' % self.nSpecies + self.line(' temporary storage'))\n #\n # # call routine\n # self._write('cp_R(cpor, tc);')\n #\n # # dot product\n # self._write()\n # self._write(self.line('perform dot product'))\n # self._write('for (id = 0; id < %d; ++id) {' % self.nSpecies)\n # self._indent()\n # self._write('result += x[id]*cpor[id];')\n # self._outdent()\n # self._write('}')\n\n # self._write()\n # self._write('*cpbl = result * %g;' % (R*kelvin*mole/erg) )\n #\n # self._outdent()\n\n # self._write('}')\n\n # return\n\n def _ckcpbs(self, mechanism):\n nSpec = len(self.species)\n self._write()\n self._write(\"! Returns the mean specific heat at CP (Eq. 34)\")\n self._write(\"subroutine ckcpbs\" + sym + \"(T, y, iwrk, rwrk, cpbs)\")\n self._write()\n self._indent()\n self._write(\"implicit none\")\n self._write()\n\n self._write(\"double precision, intent(in) :: T\")\n self._write(\"double precision, intent(in) :: y(%d)\" % self.nSpecies)\n self._write(\"integer, intent(in) :: iwrk\")\n self._write(\"double precision, intent(in) :: rwrk\")\n self._write(\"double precision, intent(out) :: cpbs\")\n self._write()\n self._write(\"double precision :: cpor(%d)\" % self.nSpecies)\n self._write(\"double precision :: tresult(%d)\" % self.nSpecies)\n self._write(\"double precision :: tT, tc(5)\")\n self._write(\"double precision :: res\")\n self._write(\"integer :: i\")\n self._write()\n self._write(\"res = 0.d0\")\n self._write()\n\n # get temperature cache\n self._write(\"tT = T \" + \"! temporary temperature\")\n self._write(\n \"tc = (/ 0.d0, tT, tT*tT, tT*tT*tT, tT*tT*tT*tT /) \"\n + \"! temperature cache\"\n )\n\n # call routine\n self._write()\n self._write(\"call cp_R(cpor, tc)\")\n self._write()\n\n self._write(\"do i=1, %d\" % nSpec)\n self._indent()\n self._write(\"tresult(i) = cpor(i) * y(i) * imw(i)\")\n self._outdent()\n self._write(\"end do\")\n self._write(\"do i=1, %d\" % nSpec)\n self._indent()\n self._write(\"res = res + tresult(i)\")\n self._outdent()\n self._write(\"end do\")\n self._write()\n expression = format((R * kelvin * mole / erg).value, \"15.8e\").replace(\n \"e\", \"d\"\n )\n self._write(\"cpbs = res *%s\" % expression)\n self._outdent()\n self._write()\n self._write(\"end subroutine\")\n\n return\n\n # def _ckcvbl(self, mechanism):\n # self._write()\n # self._write()\n # self._write(self.line('Returns the mean specific heat at CV (Eq. 35)'))\n # self._write('void CKCVBL'+sym+'(double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict cvbl)')\n # self._write('{')\n # self._indent()\n\n # self._write('int id; ' + self.line('loop counter'))\n # self._write('double result = 0; ')\n #\n # # get temperature cache\n # self._write(\n # 'double tT = *T; '\n # + self.line('temporary temperature'))\n # self._write(\n # 'double tc[] = { 0, tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; '\n # + self.line('temperature cache'))\n # self._write(\n # 'double cvor[%d]; ' % self.nSpecies + self.line(' temporary storage'))\n #\n # # call routine\n # self._write('cv_R(cvor, tc);')\n #\n # # dot product\n # self._write()\n # self._write(self.line('perform dot product'))\n # self._write('for (id = 0; id < %d; ++id) {' % self.nSpecies)\n # self._indent()\n # self._write('result += x[id]*cvor[id];')\n # self._outdent()\n # self._write('}')\n\n # self._write()\n # self._write('*cvbl = result * %g;' % (R*kelvin*mole/erg) )\n #\n # self._outdent()\n\n # self._write('}')\n\n # return\n\n def _ckcvbs(self, mechanism):\n nSpec = len(self.species)\n self._write()\n self._write(\"! Returns the mean specific heat at CV (Eq. 36)\")\n self._write(\"subroutine ckcvbs\" + sym + \"(T, y, iwrk, rwrk, cvbs)\")\n self._write()\n self._indent()\n self._write(\"implicit none\")\n self._write()\n\n self._write(\"double precision, intent(in) :: T\")\n self._write(\"double precision, intent(in) :: y(%d)\" % nSpec)\n self._write(\"integer, intent(in) :: iwrk\")\n self._write(\"double precision, intent(in) :: rwrk\")\n self._write(\"double precision, intent(out) :: cvbs\")\n self._write()\n self._write(\"double precision :: cvor(%d)\" % nSpec)\n self._write(\"double precision :: tT, tc(5)\")\n self._write(\"double precision :: res\")\n self._write()\n self._write(\"res = 0.d0\")\n\n # get temperature cache\n self._write(\"tT = T \" + \"! temporary temperature\")\n self._write(\n \"tc = (/ 0.d0, tT, tT*tT, tT*tT*tT, tT*tT*tT*tT /) \"\n + \"! temperature cache\"\n )\n\n # call routine\n self._write()\n self._write(\"call cv_R(cvor, tc)\")\n self._write()\n\n # do dot product\n self._write(\"! multiply by y/molecularweight\")\n for species in self.species:\n self._write(\n \"res = res + (cvor(%d) * y(%d) * imw(%d)) \"\n % (species.id + 1, species.id + 1, species.id + 1)\n + \"! %s\" % species.symbol\n )\n\n self._write()\n expression = format((R * kelvin * mole / erg).value, \"15.8e\").replace(\n \"e\", \"d\"\n )\n self._write(\"cvbs = res *%s\" % expression)\n\n self._outdent()\n self._write()\n self._write(\"end subroutine\")\n\n return\n\n # def _ckhbml(self, mechanism):\n # self._write()\n # self._write()\n # self._write(self.line('Returns the mean enthalpy of the mixture in molar units'))\n # self._write('void CKHBML'+sym+'(double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict hbml)')\n # self._write('{')\n # self._indent()\n\n # self._write('int id; ' + self.line('loop counter'))\n # self._write('double result = 0; ')\n #\n # # get temperature cache\n # self._write(\n # 'double tT = *T; '\n # + self.line('temporary temperature'))\n # self._write(\n # 'double tc[] = { 0, tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; '\n # + self.line('temperature cache'))\n # self._write(\n # 'double hml[%d]; ' % self.nSpecies + self.line(' temporary storage'))\n # self._write(\n # 'double RT = %g*tT; ' % (R*kelvin*mole/erg)\n # + self.line('R*T'))\n #\n # # call routine\n # self._write('speciesEnthalpy(hml, tc);')\n #\n # # dot product\n # self._write()\n # self._write(self.line('perform dot product'))\n # self._write('for (id = 0; id < %d; ++id) {' % self.nSpecies)\n # self._indent()\n # self._write('result += x[id]*hml[id];')\n # self._outdent()\n # self._write('}')\n\n # self._write()\n # self._write('*hbml = result * RT;')\n #\n # self._outdent()\n\n # self._write('}')\n\n # return\n\n # def _ckhbms(self, mechanism):\n # self._write()\n # self._write()\n # self._write(self.line('Returns mean enthalpy of mixture in mass units'))\n # self._write('void CKHBMS'+sym+'(double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict hbms)')\n # self._write('{')\n # self._indent()\n\n # self._write('double result = 0;')\n #\n # # get temperature cache\n # self._write(\n # 'double tT = *T; '\n # + self.line('temporary temperature'))\n # self._write(\n # 'double tc[] = { 0, tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; '\n # + self.line('temperature cache'))\n # self._write(\n # 'double hml[%d], tmp[%d]; ' % (self.nSpecies,self.nSpecies) + self.line(' temporary storage'))\n #\n # self._write(\n # 'double RT = %g*tT; ' % (R*kelvin*mole/erg)\n # + self.line('R*T'))\n #\n # # call routine\n # self._write('speciesEnthalpy(hml, tc);')\n\n # self._write('int id;')\n # self._write('for (id = 0; id < %d; ++id) {' % self.nSpecies)\n # self._indent()\n # self._write('tmp[id] = y[id]*hml[id]*imw[id];')\n # self._outdent()\n # self._write('}')\n # self._write('for (id = 0; id < %d; ++id) {' % self.nSpecies)\n # self._indent()\n # self._write('result += tmp[id];')\n # self._outdent()\n # self._write('}')\n\n # self._write()\n # # finally, multiply by RT\n # self._write('*hbms = result * RT;')\n #\n # self._outdent()\n\n # self._write('}')\n #\n # return\n #\n # def _ckubml(self, mechanism):\n # self._write()\n # self._write()\n # self._write(self.line('get mean internal energy in molar units'))\n # self._write('void CKUBML'+sym+'(double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict ubml)')\n # self._write('{')\n # self._indent()\n\n # self._write('int id; ' + self.line('loop counter'))\n # self._write('double result = 0; ')\n #\n # # get temperature cache\n # self._write(\n # 'double tT = *T; '\n # + self.line('temporary temperature'))\n # self._write(\n # 'double tc[] = { 0, tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; '\n # + self.line('temperature cache'))\n # self._write(\n # 'double uml[%d]; ' % self.nSpecies + self.line(' temporary energy array'))\n # self._write(\n # 'double RT = %g*tT; ' % (R*kelvin*mole/erg)\n # + self.line('R*T'))\n #\n # # call routine\n # self._write('speciesInternalEnergy(uml, tc);')\n #\n # # dot product\n # self._write()\n # self._write(self.line('perform dot product'))\n # self._write('for (id = 0; id < %d; ++id) {' % self.nSpecies)\n # self._indent()\n # self._write('result += x[id]*uml[id];')\n # self._outdent()\n # self._write('}')\n\n # self._write()\n # self._write('*ubml = result * RT;')\n #\n # self._outdent()\n\n # self._write('}')\n\n # return\n\n def _ckubms(self, mechanism):\n nSpec = len(self.species)\n self._write()\n self._write(\"! get mean internal energy in mass units\")\n self._write(\"subroutine ckubms\" + sym + \"(T, y, iwrk, rwrk, ubms)\")\n self._write()\n self._indent()\n self._write(\"implicit none\")\n self._write()\n self._write(\"double precision, intent(in) :: T\")\n self._write(\"double precision, intent(in) :: y(%d)\" % nSpec)\n self._write(\"integer, intent(in) :: iwrk\")\n self._write(\"double precision, intent(in) :: rwrk\")\n self._write(\"double precision, intent(out) :: ubms\")\n self._write()\n self._write(\n \"double precision :: ums(%d)\" % nSpec + \" ! temporary energy array\"\n )\n self._write(\"double precision :: res\")\n self._write(\"double precision :: RT, tT, tc(5)\")\n self._write()\n self._write(\"res = 0.d0\")\n self._write()\n\n # get temperature cache\n self._write(\"tT = T \" + \"! temporary temperature\")\n self._write(\n \"tc = (/ 0.d0, tT, tT*tT, tT*tT*tT, tT*tT*tT*tT /) \"\n + \"! temperature cache\"\n )\n\n expression = format((R * kelvin * mole / erg).value, \"15.8e\").replace(\n \"e\", \"d\"\n )\n self._write(\"RT =%s * tT \" % expression + \"! R*T\")\n\n # call routine\n self._write()\n self._write(\"call speciesInternalEnergy(ums, tc)\")\n self._write()\n\n # convert e/RT to e with mass units\n self._write(\"! perform dot product + scaling by wt\")\n for species in self.species:\n self._write(\n \"res = res + (y(%d) * ums(%d) * imw(%d)) \"\n % (species.id + 1, species.id + 1, species.id + 1)\n + \"! %s\" % species.symbol\n )\n\n self._write()\n # finally, multiply by RT\n self._write(\"ubms = res * RT\")\n self._outdent()\n self._write()\n self._write(\"end subroutine\")\n\n return\n\n # def _cksbml(self, mechanism):\n # self._write()\n # self._write()\n # self._write(self.line('get mixture entropy in molar units'))\n # self._write('void CKSBML'+sym+'(double * restrict P, double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict sbml)')\n # self._write('{')\n # self._indent()\n\n # self._write('int id; ' + self.line('loop counter'))\n # self._write('double result = 0; ')\n #\n # # get temperature cache\n # self._write(self.line('Log of normalized pressure in cgs units dynes/cm^2 by Patm'))\n # self._write( 'double logPratio = log ( *P / 1013250.0 ); ')\n # self._write(\n # 'double tT = *T; '\n # + self.line('temporary temperature'))\n # self._write(\n # 'double tc[] = { log(tT), tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; '\n # + self.line('temperature cache'))\n # self._write(\n # 'double sor[%d]; ' % self.nSpecies + self.line(' temporary storage'))\n #\n #\n # # call routine\n # self._write('speciesEntropy(sor, tc);')\n #\n # # Equation 42\n # self._write()\n # self._write(self.line('Compute Eq 42'))\n # self._write('for (id = 0; id < %d; ++id) {' % self.nSpecies)\n # self._indent()\n # self._write('result += x[id]*(sor[id]-log((x[id]+%g))-logPratio);' %\n # smallnum )\n # self._outdent()\n # self._write('}')\n\n # self._write()\n #\n # self._write('*sbml = result * %g;' % (R*kelvin*mole/erg) )\n #\n # self._outdent()\n\n # self._write('}')\n\n # return\n\n # def _cksbms(self, mechanism):\n # self._write()\n # self._write()\n # self._write(self.line('get mixture entropy in mass units'))\n # self._write('void CKSBMS'+sym+'(double * restrict P, double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict sbms)')\n # self._write('{')\n # self._indent()\n\n # self._write('double result = 0; ')\n #\n # # get temperature cache\n # self._write(self.line('Log of normalized pressure in cgs units dynes/cm^2 by Patm'))\n # self._write( 'double logPratio = log ( *P / 1013250.0 ); ')\n # self._write(\n # 'double tT = *T; '\n # + self.line('temporary temperature'))\n # self._write(\n # 'double tc[] = { log(tT), tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; '\n # + self.line('temperature cache'))\n # self._write(\n # 'double sor[%d]; ' % self.nSpecies + self.line(' temporary storage'))\n # self._write(\n # 'double x[%d]; ' % self.nSpecies + self.line(' need a ytx conversion'))\n\n # self._write('double YOW = 0; '+self.line('See Eq 4, 6 in CK Manual'))\n #\n #\n # # compute inverse of mean molecular weight first (eq 3)\n # self._write(self.line('Compute inverse of mean molecular wt first'))\n # for species in self.species:\n # self._write('YOW += y[%d]*imw[%d]; ' % (\n # species.id, species.id) + self.line('%s' % species.symbol))\n\n # # now to ytx\n # self._write(self.line('Now compute y to x conversion'))\n # for species in self.species:\n # self._write('x[%d] = y[%d]/(%f*YOW); ' % (\n # species.id, species.id, species.weight) )\n #\n # # call routine\n # self._write('speciesEntropy(sor, tc);')\n #\n # # Equation 42 and 43\n # self._write(self.line('Perform computation in Eq 42 and 43'))\n # for species in self.species:\n # self._write('result += x[%d]*(sor[%d]-log((x[%d]+%g))-logPratio);' %\n # (species.id, species.id, species.id, smallnum) )\n\n # self._write(self.line('Scale by R/W'))\n # self._write('*sbms = result * %g * YOW;' % (R*kelvin*mole/erg) )\n #\n # self._outdent()\n\n # self._write('}')\n\n # return\n\n # def _ckgbml(self, mechanism):\n # self._write()\n # self._write()\n # self._write(self.line('Returns mean gibbs free energy in molar units'))\n # self._write('void CKGBML'+sym+'(double * restrict P, double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict gbml)')\n # self._write('{')\n # self._indent()\n\n # self._write('int id; ' + self.line('loop counter'))\n # self._write('double result = 0; ')\n #\n # # get temperature cache\n # self._write(self.line('Log of normalized pressure in cgs units dynes/cm^2 by Patm'))\n # self._write( 'double logPratio = log ( *P / 1013250.0 ); ')\n # self._write(\n # 'double tT = *T; '\n # + self.line('temporary temperature'))\n # self._write(\n # 'double tc[] = { log(tT), tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; '\n # + self.line('temperature cache'))\n # self._write(\n # 'double RT = %g*tT; ' % (R*kelvin*mole/erg)\n # + self.line('R*T'))\n # self._write(\n # 'double gort[%d]; ' % self.nSpecies + self.line(' temporary storage'))\n #\n # # call routine\n # self._write(self.line('Compute g/RT'))\n # self._write('gibbs(gort, tc);')\n #\n # # Equation 44\n # self._write()\n # self._write(self.line('Compute Eq 44'))\n # self._write('for (id = 0; id < %d; ++id) {' % self.nSpecies)\n # self._indent()\n # self._write('result += x[id]*(gort[id]+log((x[id]+%g))+logPratio);' %\n # smallnum )\n # self._outdent()\n # self._write('}')\n\n # self._write()\n #\n # self._write('*gbml = result * RT;')\n #\n # self._outdent()\n\n # self._write('}')\n\n # return\n\n # def _ckgbms(self, mechanism):\n # self._write()\n # self._write()\n # self._write(self.line('Returns mixture gibbs free energy in mass units'))\n # self._write('void CKGBMS'+sym+'(double * restrict P, double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict gbms)')\n # self._write('{')\n # self._indent()\n\n # self._write('double result = 0; ')\n #\n # # get temperature cache\n # self._write(self.line('Log of normalized pressure in cgs units dynes/cm^2 by Patm'))\n # self._write( 'double logPratio = log ( *P / 1013250.0 ); ')\n # self._write(\n # 'double tT = *T; '\n # + self.line('temporary temperature'))\n # self._write(\n # 'double tc[] = { log(tT), tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; '\n # + self.line('temperature cache'))\n # self._write(\n # 'double RT = %g*tT; ' % (R*kelvin*mole/erg)\n # + self.line('R*T'))\n # self._write(\n # 'double gort[%d]; ' % self.nSpecies + self.line(' temporary storage'))\n # self._write(\n # 'double x[%d]; ' % self.nSpecies + self.line(' need a ytx conversion'))\n\n # self._write(\n # 'double YOW = 0; '\n # + self.line('To hold 1/molecularweight'))\n #\n #\n # # compute inverse of mean molecular weight first (eq 3)\n # self._write(self.line('Compute inverse of mean molecular wt first'))\n # for species in self.species:\n # self._write('YOW += y[%d]*imw[%d]; ' % (\n # species.id, species.id) + self.line('%s' % species.symbol))\n\n # # now to ytx\n # self._write(self.line('Now compute y to x conversion'))\n # for species in self.species:\n # self._write('x[%d] = y[%d]/(%f*YOW); ' % (\n # species.id, species.id, species.weight) )\n #\n # # call routine\n # self._write('gibbs(gort, tc);')\n #\n # # Equation 42 and 43\n # self._write(self.line('Perform computation in Eq 44'))\n # for species in self.species:\n # self._write('result += x[%d]*(gort[%d]+log((x[%d]+%g))+logPratio);' %\n # (species.id, species.id, species.id, smallnum) )\n\n # self._write(self.line('Scale by RT/W'))\n # self._write('*gbms = result * RT * YOW;')\n #\n # self._outdent()\n\n # self._write('}')\n\n # return\n #\n\n # def _ckabml(self, mechanism):\n # self._write()\n # self._write()\n # self._write(self.line('Returns mean helmholtz free energy in molar units'))\n # self._write('void CKABML'+sym+'(double * restrict P, double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict abml)')\n # self._write('{')\n # self._indent()\n\n # self._write('int id; ' + self.line('loop counter'))\n # self._write('double result = 0; ')\n #\n # # get temperature cache\n # self._write(self.line('Log of normalized pressure in cgs units dynes/cm^2 by Patm'))\n # self._write( 'double logPratio = log ( *P / 1013250.0 ); ')\n # self._write(\n # 'double tT = *T; '\n # + self.line('temporary temperature'))\n # self._write(\n # 'double tc[] = { log(tT), tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; '\n # + self.line('temperature cache'))\n # self._write(\n # 'double RT = %g*tT; ' % (R*kelvin*mole/erg)\n # + self.line('R*T'))\n # self._write(\n # 'double aort[%d]; ' % self.nSpecies + self.line(' temporary storage'))\n #\n # # call routine\n # self._write(self.line('Compute g/RT'))\n # self._write('helmholtz(aort, tc);')\n #\n # # Equation 44\n # self._write()\n # self._write(self.line('Compute Eq 44'))\n # self._write('for (id = 0; id < %d; ++id) {' % self.nSpecies)\n # self._indent()\n # self._write('result += x[id]*(aort[id]+log((x[id]+%g))+logPratio);' %\n # smallnum )\n # self._outdent()\n # self._write('}')\n\n # self._write()\n #\n # self._write('*abml = result * RT;')\n #\n # self._outdent()\n\n # self._write('}')\n\n # return\n #\n\n # def _ckabms(self, mechanism):\n # self._write()\n # self._write()\n # self._write(self.line('Returns mixture helmholtz free energy in mass units'))\n # self._write('void CKABMS'+sym+'(double * restrict P, double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict abms)')\n # self._write('{')\n # self._indent()\n\n # self._write('double result = 0; ')\n #\n # # get temperature cache\n # self._write(self.line('Log of normalized pressure in cgs units dynes/cm^2 by Patm'))\n # self._write( 'double logPratio = log ( *P / 1013250.0 ); ')\n # self._write(\n # 'double tT = *T; '\n # + self.line('temporary temperature'))\n # self._write(\n # 'double tc[] = { log(tT), tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; '\n # + self.line('temperature cache'))\n # self._write(\n # 'double RT = %g*tT; ' % (R*kelvin*mole/erg)\n # + self.line('R*T'))\n # self._write(\n # 'double aort[%d]; ' % self.nSpecies + self.line(' temporary storage'))\n # self._write(\n # 'double x[%d]; ' % self.nSpecies + self.line(' need a ytx conversion'))\n\n # self._write(\n # 'double YOW = 0; '\n # + self.line('To hold 1/molecularweight'))\n #\n #\n # # compute inverse of mean molecular weight first (eq 3)\n # self._write(self.line('Compute inverse of mean molecular wt first'))\n # for species in self.species:\n # self._write('YOW += y[%d]*imw[%d]; ' % (\n # species.id, species.id) + self.line('%s' % species.symbol))\n\n # # now to ytx\n # self._write(self.line('Now compute y to x conversion'))\n # for species in self.species:\n # self._write('x[%d] = y[%d]/(%f*YOW); ' % (\n # species.id, species.id, species.weight) )\n #\n # # call routine\n # self._write('helmholtz(aort, tc);')\n #\n # # Equation 42 and 43\n # self._write(self.line('Perform computation in Eq 44'))\n # for species in self.species:\n # self._write('result += x[%d]*(aort[%d]+log((x[%d]+%g))+logPratio);' %\n # (species.id, species.id, species.id, smallnum) )\n\n # self._write(self.line('Scale by RT/W'))\n # self._write('*abms = result * RT * YOW;')\n #\n # self._outdent()\n\n # self._write('}')\n\n # return\n #\n\n def _ckwc(self, mechanism):\n self._write()\n self._write(\"! compute the production rate for each species\")\n self._write(\"subroutine ckwc\" + sym + \"(T, C, iwrk, rwrk, wdot)\")\n self._write()\n self._indent()\n self._write(\"implicit none\")\n self._write()\n self._write(\"double precision, intent(in) :: T\")\n self._write(\"double precision, intent(inout) :: C(%d)\" % self.nSpecies)\n self._write(\"integer, intent(in) :: iwrk\")\n self._write(\"double precision, intent(in) :: rwrk\")\n self._write(\n \"double precision, intent(inout) :: wdot(%d)\" % self.nSpecies\n )\n self._write()\n self._write(\"integer :: id\" + \" ! loop counter\")\n\n # convert C to SI units\n self._write()\n self._write(\"! convert to SI\")\n self._write(\"do id = 1, %d\" % self.nSpecies)\n self._indent()\n self._write(\"C(id) = C(id) * 1.0d6\")\n self._outdent()\n self._write(\"end do\")\n\n # call productionRate\n self._write()\n self._write(\"! convert to chemkin units\")\n self._write(\"call productionRate(wdot, C, T)\")\n\n # convert C and wdot to chemkin units\n self._write()\n self._write(\"! convert to chemkin units\")\n self._write(\"do id=1, %d\" % self.nSpecies)\n self._indent()\n self._write(\"C(id) = C(id) * 1.0d-6\")\n self._write(\"wdot(id) = wdot(id) * 1.0d-6\")\n self._outdent()\n self._write(\"end do\")\n\n self._outdent()\n self._write()\n self._write(\"end subroutine\")\n\n return\n\n # def _ckwyp(self, mechanism):\n # self._write()\n # self._write()\n # self._write(self.line('Returns the molar production rate of species'))\n # self._write(self.line('Given P, T, and mass fractions'))\n # self._write('void CKWYP'+sym+'(double * restrict P, double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict wdot)')\n # self._write('{')\n # self._indent()\n\n # self._write('int id; ' + self.line('loop counter'))\n\n # self._write('double c[%d]; ' % self.nSpecies + self.line('temporary storage'))\n # self._write('double YOW = 0; ')\n # self._write('double PWORT; ')\n #\n # # compute inverse of mean molecular weight first (eq 3)\n # self._write(self.line('Compute inverse of mean molecular wt first'))\n # for species in self.species:\n # self._write('YOW += y[%d]*imw[%d]; ' % (\n # species.id, species.id) + self.line('%s' % species.symbol))\n\n # self._write(self.line('PW/RT (see Eq. 7)'))\n # self._write('PWORT = (*P)/(YOW * %g * (*T)); ' % (R*kelvin*mole/erg) )\n #\n # self._write(self.line('multiply by 1e6 so c goes to SI'))\n # self._write('PWORT *= 1e6; ')\n\n # # now compute conversion\n # self._write(self.line('Now compute conversion (and go to SI)'))\n # for species in self.species:\n # self._write('c[%d] = PWORT * y[%d]*imw[%d]; ' % (\n # species.id, species.id, species.id) )\n\n # # call productionRate\n # self._write()\n # self._write(self.line('convert to chemkin units'))\n # self._write('productionRate(wdot, c, *T);')\n\n # # convert wdot to chemkin units\n # self._write()\n # self._write(self.line('convert to chemkin units'))\n # self._write('for (id = 0; id < %d; ++id) {' % self.nSpecies)\n # self._indent()\n # self._write('wdot[id] *= 1.0e-6;')\n # self._outdent()\n # self._write('}')\n #\n # self._outdent()\n\n # self._write('}')\n\n # return\n\n # def _ckwxp(self, mechanism):\n # self._write()\n # self._write()\n # self._write(self.line('Returns the molar production rate of species'))\n # self._write(self.line('Given P, T, and mole fractions'))\n # self._write('void CKWXP'+sym+'(double * restrict P, double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict wdot)')\n # self._write('{')\n # self._indent()\n\n # self._write('int id; ' + self.line('loop counter'))\n\n # self._write('double c[%d]; ' % self.nSpecies + self.line('temporary storage'))\n #\n # self._write('double PORT = 1e6 * (*P)/(%g * (*T)); ' % (R*kelvin*mole/erg) +\n # self.line('1e6 * P/RT so c goes to SI units'))\n #\n # # now compute conversion\n # self._write()\n # self._write(self.line('Compute conversion, see Eq 10'))\n # self._write('for (id = 0; id < %d; ++id) {' % self.nSpecies)\n # self._indent()\n # self._write('c[id] = x[id]*PORT;')\n # self._outdent()\n # self._write('}')\n #\n # # call productionRate\n # self._write()\n # self._write(self.line('convert to chemkin units'))\n # self._write('productionRate(wdot, c, *T);')\n\n # # convert wdot to chemkin units\n # self._write()\n # self._write(self.line('convert to chemkin units'))\n # self._write('for (id = 0; id < %d; ++id) {' % self.nSpecies)\n # self._indent()\n # self._write('wdot[id] *= 1.0e-6;')\n # self._outdent()\n # self._write('}')\n #\n # self._outdent()\n\n # self._write('}')\n\n # return\n\n # def _ckwyr(self, mechanism):\n # self._write()\n # self._write()\n # self._write(self.line('Returns the molar production rate of species'))\n # self._write(self.line('Given rho, T, and mass fractions'))\n # self._write('void CKWYR'+sym+'(double * restrict rho, double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict wdot)')\n # self._write('{')\n # self._indent()\n\n # self._write('int id; ' + self.line('loop counter'))\n\n # self._write('double c[%d]; ' % self.nSpecies + self.line('temporary storage'))\n\n # # now compute conversion\n # self._write(self.line('See Eq 8 with an extra 1e6 so c goes to SI'))\n # for species in self.species:\n # self._write('c[%d] = 1e6 * (*rho) * y[%d]*imw[%d]; ' % (\n # species.id, species.id, species.id) )\n #\n # # call productionRate\n # self._write()\n # self._write(self.line('call productionRate'))\n # self._write('productionRate(wdot, c, *T);')\n\n # # convert wdot to chemkin units\n # self._write()\n # self._write(self.line('convert to chemkin units'))\n # self._write('for (id = 0; id < %d; ++id) {' % self.nSpecies)\n # self._indent()\n # self._write('wdot[id] *= 1.0e-6;')\n # self._outdent()\n # self._write('}')\n #\n # self._outdent()\n\n # self._write('}')\n\n # return\n\n # def _vckwyr(self, mechanism):\n # self._write()\n # self._write()\n # self._write(self.line('Returns the molar production rate of species'))\n # self._write(self.line('Given rho, T, and mass fractions'))\n # self._write('void VCKWYR'+sym+'(int * restrict np, double * restrict rho, double * restrict T,')\n # self._write('\t double * restrict y, int * restrict iwrk, double * restrict rwrk,')\n # self._write('\t double * restrict wdot)')\n # self._write('{')\n # self._indent()\n\n # self._write('double c[%d*(*np)]; ' % self.nSpecies + self.line('temporary storage'))\n\n # # now compute conversion\n # self._write(self.line('See Eq 8 with an extra 1e6 so c goes to SI'))\n # self._write('for (int n=0; n<%d; n++) {' % self.nSpecies)\n # self._indent()\n # self._write('for (int i=0; i<(*np); i++) {')\n # self._indent()\n # self._write('c[n*(*np)+i] = 1.0e6 * rho[i] * y[n*(*np)+i] * imw[n];')\n # self._outdent()\n # self._write('}')\n # self._outdent()\n # self._write('}')\n\n # # call productionRate\n # self._write()\n # self._write(self.line('call productionRate'))\n # self._write('vproductionRate(*np, wdot, c, T);')\n\n # # convert wdot to chemkin units\n # self._write()\n # self._write(self.line('convert to chemkin units'))\n # self._write('for (int i=0; i<%d*(*np); i++) {' % self.nSpecies)\n # self._indent()\n # self._write('wdot[i] *= 1.0e-6;')\n # self._outdent()\n # self._write('}')\n #\n # self._outdent()\n\n # self._write('}')\n\n # return\n\n # def _ckwxr(self, mechanism):\n # self._write()\n # self._write()\n # self._write(self.line('Returns the molar production rate of species'))\n # self._write(self.line('Given rho, T, and mole fractions'))\n # self._write('void CKWXR'+sym+'(double * restrict rho, double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict wdot)')\n # self._write('{')\n # self._indent()\n\n # self._write('int id; ' + self.line('loop counter'))\n\n # self._write('double c[%d]; ' % self.nSpecies + self.line('temporary storage'))\n #\n # self._write('double XW = 0; '+self.line('See Eq 4, 11 in CK Manual'))\n # self._write('double ROW; ')\n #\n # # compute mean molecular weight first (eq 3)\n # self._write(self.line('Compute mean molecular wt first'))\n # for species in self.species:\n # self._write('XW += x[%d]*%f; ' % (\n # species.id, species.weight) + self.line('%s' % species.symbol))\n\n # # now compute conversion\n # self._write(self.line('Extra 1e6 factor to take c to SI'))\n # self._write('ROW = 1e6*(*rho) / XW;')\n # self._write()\n # self._write(self.line('Compute conversion, see Eq 11'))\n # self._write('for (id = 0; id < %d; ++id) {' % self.nSpecies)\n # self._indent()\n # self._write('c[id] = x[id]*ROW;')\n # self._outdent()\n # self._write('}')\n #\n # # call productionRate\n # self._write()\n # self._write(self.line('convert to chemkin units'))\n # self._write('productionRate(wdot, c, *T);')\n\n # # convert wdot to chemkin units\n # self._write()\n # self._write(self.line('convert to chemkin units'))\n # self._write('for (id = 0; id < %d; ++id) {' % self.nSpecies)\n # self._indent()\n # self._write('wdot[id] *= 1.0e-6;')\n # self._outdent()\n # self._write('}')\n #\n # self._outdent()\n\n # self._write('}')\n\n # return\n\n # def _cknu(self, mechanism):\n\n # nSpecies = len(mechanism.species())\n # nReaction = len(mechanism.reaction())\n\n # self._write()\n # self._write()\n # self._write(self.line('Returns the stoichiometric coefficients'))\n # self._write(self.line('of the reaction mechanism. (Eq 50)'))\n # self._write('void CKNU'+sym+'(int * kdim, int * iwrk, double * restrict rwrk, int * nuki)')\n # self._write('{')\n # self._indent()\n\n # self._write('int id; ' + self.line('loop counter'))\n # self._write('int kd = (*kdim); ')\n # self._write(self.line('Zero nuki'))\n # self._write('for (id = 0; id < %d * kd; ++ id) {' % (nSpecies) )\n # self._indent()\n # self._write(' nuki[id] = 0; ')\n # self._outdent()\n # self._write('}')\n #\n # for reaction in mechanism.reaction():\n\n # self._write()\n # self._write(self.line('reaction %d: %s' % (reaction.id, reaction.equation())))\n\n # for symbol, coefficient in reaction.reactants:\n # self._write(\n # \"nuki[ %d * kd + %d ] += -%d ;\"\n # % (mechanism.species(symbol).id, reaction.id-1, coefficient))\n\n # for symbol, coefficient in reaction.products:\n # self._write(\n # \"nuki[ %d * kd + %d ] += +%d ;\"\n # % (mechanism.species(symbol).id, reaction.id-1, coefficient))\n #\n # # done\n # self._outdent()\n # self._write('}')\n\n # return\n\n # def _ckncf(self, mechanism):\n\n # nSpecies = len(mechanism.species())\n # nElement = len(mechanism.element())\n\n # self._write()\n # self._write()\n # self._write(self.line('Returns the elemental composition '))\n # self._write(self.line('of the speciesi (mdim is num of elements)'))\n # self._write('void CKNCF'+sym+'(int * mdim, int * iwrk, double * restrict rwrk, int * ncf)')\n # self._write('{')\n # self._indent()\n\n # self._write('int id; ' + self.line('loop counter'))\n # self._write('int kd = (*mdim); ')\n # self._write(self.line('Zero ncf'))\n # self._write('for (id = 0; id < kd * %d; ++ id) {' % (self.nSpecies) )\n # self._indent()\n # self._write(' ncf[id] = 0; ')\n # self._outdent()\n # self._write('}')\n #\n # self._write()\n # for species in mechanism.species():\n # self._write(self.line('%s' % species.symbol))\n # for elem, coef in species.composition:\n # self._write('ncf[ %d * kd + %d ] = %d; ' % (\n # species.id, mechanism.element(elem).id, coef) +\n # self.line('%s' % elem) )\n #\n # self._write()\n #\n # # done\n # self._outdent()\n\n # self._write('}')\n\n # return\n\n # def _ckabe(self, mechanism):\n\n # nElement = len(mechanism.element())\n\n # self._write()\n # self._write()\n # self._write(self.line('Returns the arrehenius coefficients '))\n # self._write(self.line('for all reactions'))\n # self._write('void CKABE'+sym+'(int * iwrk, double * restrict rwrk, double * restrict a, double * restrict b, double * restrict e)')\n # self._write('{')\n # self._indent()\n\n # self._write('for (int i=0; i<%d; ++i) {' % len(mechanism.reaction()) )\n # self._indent()\n # self._write(\"a[i] = fwd_A[i];\")\n # self._write(\"b[i] = fwd_beta[i];\")\n # self._write(\"e[i] = fwd_Ea[i];\")\n # self._outdent()\n # self._write('}')\n\n # self._write()\n # self._write('return;')\n # self._outdent()\n\n # self._write('}')\n\n # return\n #\n #\n # def _ckmmwy(self, mechanism):\n # self._write()\n # self._write()\n # self._write(self.line('given y[species]: mass fractions'))\n # self._write(self.line('returns mean molecular weight (gm/mole)'))\n # self._write('void CKMMWY'+sym+'(double * restrict y, int * iwrk, double * restrict rwrk, double * restrict wtm)')\n # self._write('{')\n # self._indent()\n # species = self.species\n # nSpec = len(species)\n # self._write('double YOW = 0;')\n # self._write('double tmp[%d];' % (nSpec))\n # self._write('')\n # self._write('for (int i = 0; i < %d; i++)' % (nSpec))\n # self._write('{')\n # self._indent()\n # self._write('tmp[i] = y[i]*imw[i];')\n # self._outdent()\n # self._write('}')\n # self._write('for (int i = 0; i < %d; i++)' % (nSpec))\n # self._write('{')\n # self._indent()\n # self._write('YOW += tmp[i];')\n # self._outdent()\n # self._write('}')\n # self._write('')\n # self._write('*wtm = 1.0 / YOW;')\n # self._write('return;')\n # self._outdent()\n # self._write('}')\n # return\n\n # def _ckmmwx(self, mechanism):\n # self._write()\n # self._write()\n # self._write(self.line('given x[species]: mole fractions'))\n # self._write(self.line('returns mean molecular weight (gm/mole)'))\n # self._write('void CKMMWX'+sym+'(double * restrict x, int * iwrk, double * restrict rwrk, double * restrict wtm)')\n # self._write('{')\n # self._indent()\n\n # self._write('double XW = 0;'+self.line(' see Eq 4 in CK Manual'))\n #\n # # molecular weights of all species\n # for species in self.species:\n # self._write('XW += x[%d]*%f; ' % (\n # species.id, species.weight) + self.line('%s' % species.symbol))\n\n # self._write('*wtm = XW;')\n #\n # self._write()\n # self._write('return;')\n # self._outdent()\n\n # self._write('}')\n\n # return\n\n # def _ckmmwc(self, mechanism):\n # self._write()\n # self._write()\n # self._write(self.line('given c[species]: molar concentration'))\n # self._write(self.line('returns mean molecular weight (gm/mole)'))\n # self._write('void CKMMWC'+sym+'(double * restrict c, int * iwrk, double * restrict rwrk, double * restrict wtm)')\n # self._write('{')\n # self._indent()\n\n # self._write('int id; ' + self.line('loop counter'))\n # self._write(self.line('See Eq 5 in CK Manual'))\n # self._write('double W = 0;')\n # self._write('double sumC = 0;')\n #\n # # molecular weights of all species\n # for species in self.species:\n # self._write('W += c[%d]*%f; ' % (\n # species.id, species.weight) + self.line('%s' % species.symbol))\n\n # self._write()\n # self._write('for (id = 0; id < %d; ++id) {' % self.nSpecies)\n # self._indent()\n # self._write('sumC += c[id];')\n # self._outdent()\n # self._write('}')\n\n # self._write(self.line(' CK provides no guard against divison by zero'))\n # self._write('*wtm = W/sumC;')\n #\n # self._write()\n # self._write('return;')\n # self._outdent()\n\n # self._write('}')\n\n # return\n\n def _ckytx(self, mechanism):\n nSpec = len(self.species)\n self._write()\n self._write(\n \"! convert y[species] (mass fracs) to x[species] (mole fracs)\"\n )\n self._write(\"subroutine ckytx\" + sym + \"(y, iwrk, rwrk, x)\")\n self._write()\n self._indent()\n self._write(\"implicit none\")\n self._write()\n self._write(\"double precision, intent(in) :: y(%d)\" % (nSpec))\n self._write(\"integer, intent(in) :: iwrk\")\n self._write(\"double precision, intent(in) :: rwrk\")\n self._write(\"double precision, intent(out) :: x(%d)\" % (nSpec))\n self._write()\n self._write(\"double precision :: YOW, YOWINV\")\n self._write(\"integer :: i\")\n self._write()\n self._write(\"YOW = 0.d0\")\n self._write()\n self._write(\"do i=1, %d\" % (nSpec))\n self._indent()\n self._write(\"YOW = YOW + y(i) * imw(i)\")\n self._outdent()\n self._write(\"end do\")\n self._write()\n self._write(\"YOWINV = 1.d0 / YOW\")\n self._write()\n self._write(\"do i=1, %d\" % (nSpec))\n self._indent()\n self._write(\"x(i) = y(i) * imw(i) * YOWINV\")\n self._outdent()\n self._write(\"end do\")\n self._outdent()\n self._write()\n self._write(\"end subroutine\")\n return\n\n def _vckytx(self, mechanism):\n nSpec = len(self.species)\n self._write()\n self._write(\n \"! convert y(npoints,species) (mass fracs) to x(npoints,species) (mole fracs)\"\n )\n self._write(\"subroutine vckytx\" + sym + \"(np, y, iwrk, rwrk, x)\")\n self._write()\n self._indent()\n self._write(\"implicit none\")\n self._write()\n self._write(\"integer, intent(in) :: np\")\n self._write(\"double precision, intent(in) :: y(np,%d)\" % (nSpec))\n self._write(\"integer, intent(in) :: iwrk\")\n self._write(\"double precision, intent(in) :: rwrk\")\n self._write(\"double precision, intent(inout) :: x(np,%d)\" % (nSpec))\n self._write()\n self._write(\"double precision :: YOW(np)\")\n self._write(\"integer :: i, n\")\n self._write()\n self._write(\"do i=1, np\")\n self._indent()\n self._write(\"YOW(i) = 0.d0\")\n self._outdent()\n self._write(\"end do\")\n self._write()\n self._write(\"do n=1, %d\" % (nSpec))\n self._indent()\n self._write(\"do i=1, np\")\n self._indent()\n self._write(\"x(i,n) = y(i,n) * imw(n)\")\n self._write(\"YOW(i) = YOW(i) + x(i,n)\")\n self._outdent()\n self._write(\"end do\")\n self._outdent()\n self._write(\"end do\")\n self._write()\n self._write(\"do i=1, np\")\n self._indent()\n self._write(\"YOW(i) = 1.d0 / YOW(i)\")\n self._outdent()\n self._write(\"end do\")\n self._write()\n self._write(\"do n=1, %d\" % (nSpec))\n self._indent()\n self._write(\"do i=1, np\")\n self._indent()\n self._write(\"x(i,n) = x(i,n) * YOW(i)\")\n self._outdent()\n self._write(\"end do\")\n self._outdent()\n self._write(\"end do\")\n self._outdent()\n self._write()\n self._write(\"end subroutine\")\n return\n\n # def _ckytcp(self, mechanism):\n # self._write()\n # self._write()\n # self._write(self.line(\n # 'convert y[species] (mass fracs) to c[species] (molar conc)'))\n # self._write('void CKYTCP'+sym+'(double * restrict P, double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict c)')\n # self._write('{')\n # self._indent()\n\n # species = self.species\n # nSpec = len(species)\n # self._write('double YOW = 0;')\n # self._write('double PWORT;')\n # self._write('')\n # self._write(self.line('Compute inverse of mean molecular wt first'))\n # self._write('for (int i = 0; i < %d; i++)' % (nSpec))\n # self._write('{')\n # self._indent()\n # self._write('c[i] = y[i]*imw[i];')\n # self._outdent()\n # self._write('}')\n # self._write('for (int i = 0; i < %d; i++)' % (nSpec))\n # self._write('{')\n # self._indent()\n # self._write('YOW += c[i];')\n # self._outdent()\n # self._write('}')\n # self._write('')\n # self._write(self.line('PW/RT (see Eq. 7)'))\n # self._write('PWORT = (*P)/(YOW * %g * (*T)); ' % (R*kelvin*mole/erg) )\n\n # # now compute conversion\n # self._write(self.line('Now compute conversion'))\n # self._write('')\n # self._write('for (int i = 0; i < %d; i++)' % (nSpec))\n # self._write('{')\n # self._indent()\n # self._write('c[i] = PWORT * y[i] * imw[i];')\n # self._outdent()\n # self._write('}')\n # self._write('return;')\n # self._outdent()\n # self._write('}')\n # return\n\n def _ckytcr(self, mechanism):\n nSpec = len(self.species)\n self._write()\n self._write(\n \"! convert y[species] (mass fracs) to c[species] (molar conc)\"\n )\n self._write(\"subroutine ckytcr\" + sym + \"(rho, T, y, iwrk, rwrk, c)\")\n self._write()\n self._indent()\n self._write(\"implicit none\")\n self._write()\n self._write(\"double precision, intent(in) :: rho\")\n self._write(\"double precision, intent(in) :: T\")\n self._write(\"double precision, intent(in) :: y(%d)\" % nSpec)\n self._write(\"integer, intent(in) :: iwrk\")\n self._write(\"double precision, intent(in) :: rwrk\")\n self._write(\"double precision, intent(out) :: c(%d)\" % nSpec)\n self._write()\n self._write(\"integer :: i\")\n self._write()\n self._write(\"do i=1, %d\" % nSpec)\n self._indent()\n self._write(\"c(i) = rho * y(i) * imw(i)\")\n self._outdent()\n self._write(\"end do\")\n self._outdent()\n self._write()\n self._write(\"end subroutine\")\n return\n\n def _ckxty(self, mechanism):\n nSpec = len(self.species)\n self._write()\n self._write(\n \"! convert x[species] (mole fracs) to y[species] (mass fracs)\"\n )\n self._write(\"subroutine ckxty\" + sym + \"(x, iwrk, rwrk, y)\")\n self._write()\n self._indent()\n self._write(\"implicit none\")\n self._write()\n self._write(\"double precision, intent(in) :: x(%d)\" % nSpec)\n self._write(\"integer, intent(in) :: iwrk\")\n self._write(\"double precision, intent(in) :: rwrk\")\n self._write(\"double precision, intent(out) :: y(%d)\" % nSpec)\n self._write()\n self._write(\"double precision :: XW, XWinv\")\n self._write()\n\n self._write(\"XW = 0.d0 \" + \"! See Eq 4, 9 in CK Manual\")\n self._write()\n\n # compute mean molecular weight first (eq 3)\n self._write(\"! Compute mean molecular wt first\")\n for species in self.species:\n expression = format(species.weight, \"15.8e\").replace(\"e\", \"d\")\n self._write(\n \"XW = XW + (x(%d) *%s) \" % (species.id + 1, expression)\n + \"! %s\" % species.symbol\n )\n\n # now compute conversion\n self._write()\n self._write(\"! Now compute conversion\")\n self._write(\"XWinv = 1.d0 / XW\")\n for species in self.species:\n expression = format(species.weight, \"15.8e\").replace(\"e\", \"d\")\n self._write(\n \"y(%d) = x(%d) *%s * XWinv \"\n % (species.id + 1, species.id + 1, expression)\n )\n\n self._outdent()\n self._write()\n self._write(\"end subroutine\")\n\n return\n\n # def _ckxtcp(self, mechanism):\n # self._write()\n # self._write()\n # self._write(self.line(\n # 'convert x[species] (mole fracs) to c[species] (molar conc)'))\n # self._write('void CKXTCP'+sym+'(double * restrict P, double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict c)')\n # self._write('{')\n # self._indent()\n\n # self._write('int id; ' + self.line('loop counter'))\n # self._write('double PORT = (*P)/(%g * (*T)); ' % (R*kelvin*mole/erg) +\n # self.line('P/RT'))\n # # now compute conversion\n # self._write()\n # self._write(self.line('Compute conversion, see Eq 10'))\n # self._write('for (id = 0; id < %d; ++id) {' % self.nSpecies)\n # self._indent()\n # self._write('c[id] = x[id]*PORT;')\n # self._outdent()\n # self._write('}')\n\n # self._write()\n # self._write('return;')\n # self._outdent()\n\n # self._write('}')\n\n # return\n\n # def _ckxtcr(self, mechanism):\n # self._write()\n # self._write()\n # self._write(self.line(\n # 'convert x[species] (mole fracs) to c[species] (molar conc)'))\n # self._write('void CKXTCR'+sym+'(double * restrict rho, double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict c)')\n # self._write('{')\n # self._indent()\n\n # self._write('int id; ' + self.line('loop counter'))\n # self._write('double XW = 0; '+self.line('See Eq 4, 11 in CK Manual'))\n # self._write('double ROW; ')\n #\n # # compute mean molecular weight first (eq 3)\n # self._write(self.line('Compute mean molecular wt first'))\n # for species in self.species:\n # self._write('XW += x[%d]*%f; ' % (\n # species.id, species.weight) + self.line('%s' % species.symbol))\n\n # # now compute conversion\n # self._write('ROW = (*rho) / XW;')\n # self._write()\n # self._write(self.line('Compute conversion, see Eq 11'))\n # self._write('for (id = 0; id < %d; ++id) {' % self.nSpecies)\n # self._indent()\n # self._write('c[id] = x[id]*ROW;')\n # self._outdent()\n # self._write('}')\n\n # self._write()\n # self._write('return;')\n # self._outdent()\n\n # self._write('}')\n\n # return\n\n # def _ckctx(self, mechanism):\n # self._write()\n # self._write()\n # self._write(self.line(\n # 'convert c[species] (molar conc) to x[species] (mole fracs)'))\n # self._write('void CKCTX'+sym+'(double * restrict c, int * iwrk, double * restrict rwrk, double * restrict x)')\n # self._write('{')\n # self._indent()\n\n # self._write('int id; ' + self.line('loop counter'))\n # self._write('double sumC = 0; ')\n\n # self._write()\n # self._write(self.line('compute sum of c '))\n # self._write('for (id = 0; id < %d; ++id) {' % self.nSpecies)\n # self._indent()\n # self._write('sumC += c[id];')\n # self._outdent()\n # self._write('}')\n\n # # now compute conversion\n # self._write()\n # self._write(self.line(' See Eq 13 '))\n # self._write('double sumCinv = 1.0/sumC;')\n # self._write('for (id = 0; id < %d; ++id) {' % self.nSpecies)\n # self._indent()\n # self._write('x[id] = c[id]*sumCinv;')\n # self._outdent()\n # self._write('}')\n\n # self._write()\n # self._write('return;')\n # self._outdent()\n\n # self._write('}')\n\n # return\n\n # def _ckcty(self, mechanism):\n # self._write()\n # self._write()\n # self._write(self.line(\n # 'convert c[species] (molar conc) to y[species] (mass fracs)'))\n # self._write('void CKCTY'+sym+'(double * restrict c, int * iwrk, double * restrict rwrk, double * restrict y)')\n # self._write('{')\n # self._indent()\n\n # self._write('double CW = 0; '+self.line('See Eq 12 in CK Manual'))\n #\n # # compute denominator in eq 12\n # self._write(self.line('compute denominator in eq 12 first'))\n # for species in self.species:\n # self._write('CW += c[%d]*%f; ' % (\n # species.id, species.weight) + self.line('%s' % species.symbol))\n\n # # now compute conversion\n # self._write(self.line('Now compute conversion'))\n # self._write('double CWinv = 1.0/CW;')\n # for species in self.species:\n # self._write('y[%d] = c[%d]*%f*CWinv; ' % (\n # species.id, species.id, species.weight) )\n\n # self._write()\n # self._write('return;')\n # self._outdent()\n\n # self._write('}')\n\n # return\n\n # def _ckcpor(self, mechanism):\n # self._write()\n # self._write()\n # self._write(self.line('get Cp/R as a function of T '))\n # self._write(self.line('for all species (Eq 19)'))\n # self._write('void CKCPOR'+sym+'(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict cpor)')\n # self._write('{')\n # self._indent()\n\n # # get temperature cache\n # self._write(\n # 'double tT = *T; '\n # + self.line('temporary temperature'))\n # self._write(\n # 'double tc[] = { 0, tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; '\n # + self.line('temperature cache'))\n #\n # # call routine\n # self._write('cp_R(cpor, tc);')\n #\n # self._outdent()\n\n # self._write('}')\n\n # return\n #\n # def _ckhort(self, mechanism):\n # self._write()\n # self._write()\n # self._write(self.line('get H/RT as a function of T '))\n # self._write(self.line('for all species (Eq 20)'))\n # self._write('void CKHORT'+sym+'(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict hort)')\n # self._write('{')\n # self._indent()\n\n # # get temperature cache\n # self._write(\n # 'double tT = *T; '\n # + self.line('temporary temperature'))\n # self._write(\n # 'double tc[] = { 0, tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; '\n # + self.line('temperature cache'))\n #\n # # call routine\n # self._write('speciesEnthalpy(hort, tc);')\n #\n # self._outdent()\n\n # self._write('}')\n\n # return\n\n # def _cksor(self, mechanism):\n # self._write()\n # self._write()\n # self._write(self.line('get S/R as a function of T '))\n # self._write(self.line('for all species (Eq 21)'))\n # self._write('void CKSOR'+sym+'(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict sor)')\n # self._write('{')\n # self._indent()\n\n # # get temperature cache\n # self._write(\n # 'double tT = *T; '\n # + self.line('temporary temperature'))\n # self._write(\n # 'double tc[] = { log(tT), tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; '\n # + self.line('temperature cache'))\n #\n # # call routine\n # self._write('speciesEntropy(sor, tc);')\n #\n # self._outdent()\n\n # self._write('}')\n\n # return\n\n # def _ckqc(self, mechanism):\n\n # nSpecies = len(mechanism.species())\n # nReactions = len(mechanism.reaction())\n\n # self._write()\n # self._write()\n # self._write(self.line('Returns the rate of progress for each reaction'))\n # self._write('void CKQC'+sym+'(double * restrict T, double * restrict C, int * iwrk, double * restrict rwrk, double * restrict qdot)')\n # self._write('{')\n # self._indent()\n\n # self._write('int id; ' + self.line('loop counter'))\n\n # # convert C to SI units\n # self._write()\n # self._write(self.line('convert to SI'))\n # self._write('for (id = 0; id < %d; ++id) {' % nSpecies)\n # self._indent()\n # self._write('C[id] *= 1.0e6;')\n # self._outdent()\n # self._write('}')\n #\n # # call productionRate\n # self._write()\n # self._write(self.line('convert to chemkin units'))\n # self._write('progressRate(qdot, C, *T);')\n\n # # convert C to chemkin units\n # self._write()\n # self._write(self.line('convert to chemkin units'))\n # self._write('for (id = 0; id < %d; ++id) {' % nSpecies)\n # self._indent()\n # self._write('C[id] *= 1.0e-6;')\n # self._outdent()\n # self._write('}')\n\n # # convert qdot to chemkin units\n # self._write()\n # self._write('for (id = 0; id < %d; ++id) {' % nReactions)\n # self._indent()\n # self._write('qdot[id] *= 1.0e-6;')\n # self._outdent()\n # self._write('}')\n #\n # self._outdent()\n\n # self._write('}')\n\n # return\n\n #\n # def _ckkfkr(self, mechanism):\n\n # nSpecies = len(mechanism.species())\n # nReactions = len(mechanism.reaction())\n #\n # self._write()\n # self._write()\n # self._write(self.line('Returns the progress rates of each reactions'))\n # self._write(self.line('Given P, T, and mole fractions'))\n # self._write('void CKKFKR'+sym+'(double * restrict P, double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict q_f, double * restrict q_r)')\n # self._write('{')\n # self._indent()\n\n # self._write('int id; ' + self.line('loop counter'))\n\n # self._write('double c[%d]; ' % nSpecies + self.line('temporary storage'))\n #\n # self._write('double PORT = 1e6 * (*P)/(%g * (*T)); ' % (R*kelvin*mole/erg) +\n # self.line('1e6 * P/RT so c goes to SI units'))\n #\n # # now compute conversion\n # self._write()\n # self._write(self.line('Compute conversion, see Eq 10'))\n # self._write('for (id = 0; id < %d; ++id) {' % nSpecies)\n # self._indent()\n # self._write('c[id] = x[id]*PORT;')\n # self._outdent()\n # self._write('}')\n #\n # # call progressRateFR\n # self._write()\n # self._write(self.line('convert to chemkin units'))\n # self._write('progressRateFR(q_f, q_r, c, *T);')\n\n # # convert qdot to chemkin units\n # self._write()\n # self._write(self.line('convert to chemkin units'))\n # self._write('for (id = 0; id < %d; ++id) {' % nReactions )\n # self._indent()\n # self._write('q_f[id] *= 1.0e-6;')\n # self._write('q_r[id] *= 1.0e-6;')\n # self._outdent()\n # self._write('}')\n #\n # self._outdent()\n\n # self._write('}')\n\n # return\n\n # def _ckqyp(self, mechanism):\n\n # nSpecies = len(mechanism.species())\n # nReactions = len(mechanism.reaction())\n #\n # self._write()\n # self._write()\n # self._write(self.line('Returns the progress rates of each reactions'))\n # self._write(self.line('Given P, T, and mass fractions'))\n # self._write('void CKQYP'+sym+'(double * restrict P, double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict qdot)')\n # self._write('{')\n # self._indent()\n\n # self._write('int id; ' + self.line('loop counter'))\n\n # self._write('double c[%d]; ' % nSpecies + self.line('temporary storage'))\n # self._write('double YOW = 0; ')\n # self._write('double PWORT; ')\n #\n # # compute inverse of mean molecular weight first (eq 3)\n # self._write(self.line('Compute inverse of mean molecular wt first'))\n # for species in self.species:\n # self._write('YOW += y[%d]*imw[%d]; ' % (\n # species.id, species.id) + self.line('%s' % species.symbol))\n\n # self._write(self.line('PW/RT (see Eq. 7)'))\n # self._write('PWORT = (*P)/(YOW * %g * (*T)); ' % (R*kelvin*mole/erg) )\n #\n # self._write(self.line('multiply by 1e6 so c goes to SI'))\n # self._write('PWORT *= 1e6; ')\n\n # # now compute conversion\n # self._write(self.line('Now compute conversion (and go to SI)'))\n # for species in self.species:\n # self._write('c[%d] = PWORT * y[%d]*imw[%d]; ' % (\n # species.id, species.id, species.id) )\n\n # # call progressRate\n # self._write()\n # self._write(self.line('convert to chemkin units'))\n # self._write('progressRate(qdot, c, *T);')\n\n # # convert qdot to chemkin units\n # self._write()\n # self._write(self.line('convert to chemkin units'))\n # self._write('for (id = 0; id < %d; ++id) {' % nReactions )\n # self._indent()\n # self._write('qdot[id] *= 1.0e-6;')\n # self._outdent()\n # self._write('}')\n #\n # self._outdent()\n\n # self._write('}')\n\n # return\n\n # def _ckqxp(self, mechanism):\n\n # nSpecies = len(mechanism.species())\n # nReactions = len(mechanism.reaction())\n #\n # self._write()\n # self._write()\n # self._write(self.line('Returns the progress rates of each reactions'))\n # self._write(self.line('Given P, T, and mole fractions'))\n # self._write('void CKQXP'+sym+'(double * restrict P, double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict qdot)')\n # self._write('{')\n # self._indent()\n\n # self._write('int id; ' + self.line('loop counter'))\n\n # self._write('double c[%d]; ' % nSpecies + self.line('temporary storage'))\n #\n # self._write('double PORT = 1e6 * (*P)/(%g * (*T)); ' % (R*kelvin*mole/erg) +\n # self.line('1e6 * P/RT so c goes to SI units'))\n #\n # # now compute conversion\n # self._write()\n # self._write(self.line('Compute conversion, see Eq 10'))\n # self._write('for (id = 0; id < %d; ++id) {' % nSpecies)\n # self._indent()\n # self._write('c[id] = x[id]*PORT;')\n # self._outdent()\n # self._write('}')\n #\n # # call progressRate\n # self._write()\n # self._write(self.line('convert to chemkin units'))\n # self._write('progressRate(qdot, c, *T);')\n\n # # convert qdot to chemkin units\n # self._write()\n # self._write(self.line('convert to chemkin units'))\n # self._write('for (id = 0; id < %d; ++id) {' % nReactions )\n # self._indent()\n # self._write('qdot[id] *= 1.0e-6;')\n # self._outdent()\n # self._write('}')\n #\n # self._outdent()\n\n # self._write('}')\n\n # return\n\n # def _ckqyr(self, mechanism):\n\n # nSpecies = len(mechanism.species())\n # nReactions = len(mechanism.reaction())\n #\n # self._write()\n # self._write()\n # self._write(self.line('Returns the progress rates of each reactions'))\n # self._write(self.line('Given rho, T, and mass fractions'))\n # self._write('void CKQYR'+sym+'(double * restrict rho, double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict qdot)')\n # self._write('{')\n # self._indent()\n\n # self._write('int id; ' + self.line('loop counter'))\n\n # self._write('double c[%d]; ' % nSpecies + self.line('temporary storage'))\n\n # # now compute conversion\n # self._write(self.line('See Eq 8 with an extra 1e6 so c goes to SI'))\n # for species in self.species:\n # self._write('c[%d] = 1e6 * (*rho) * y[%d]*imw[%d]; ' % (\n # species.id, species.id, species.id) )\n #\n # # call progressRate\n # self._write()\n # self._write(self.line('call progressRate'))\n # self._write('progressRate(qdot, c, *T);')\n\n # # convert qdot to chemkin units\n # self._write()\n # self._write(self.line('convert to chemkin units'))\n # self._write('for (id = 0; id < %d; ++id) {' % nReactions )\n # self._indent()\n # self._write('qdot[id] *= 1.0e-6;')\n # self._outdent()\n # self._write('}')\n #\n # self._outdent()\n\n # self._write('}')\n\n # return\n\n # def _ckqxr(self, mechanism):\n\n # nSpecies = len(mechanism.species())\n # nReactions = len(mechanism.reaction())\n #\n # self._write()\n # self._write()\n # self._write(self.line('Returns the progress rates of each reactions'))\n # self._write(self.line('Given rho, T, and mole fractions'))\n # self._write('void CKQXR'+sym+'(double * restrict rho, double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict qdot)')\n # self._write('{')\n # self._indent()\n\n # self._write('int id; ' + self.line('loop counter'))\n\n # self._write('double c[%d]; ' % nSpecies + self.line('temporary storage'))\n #\n # self._write('double XW = 0; '+self.line('See Eq 4, 11 in CK Manual'))\n # self._write('double ROW; ')\n #\n # # compute mean molecular weight first (eq 3)\n # self._write(self.line('Compute mean molecular wt first'))\n # for species in self.species:\n # self._write('XW += x[%d]*%f; ' % (\n # species.id, species.weight) + self.line('%s' % species.symbol))\n\n # # now compute conversion\n # self._write(self.line('Extra 1e6 factor to take c to SI'))\n # self._write('ROW = 1e6*(*rho) / XW;')\n # self._write()\n # self._write(self.line('Compute conversion, see Eq 11'))\n # self._write('for (id = 0; id < %d; ++id) {' % nSpecies)\n # self._indent()\n # self._write('c[id] = x[id]*ROW;')\n # self._outdent()\n # self._write('}')\n #\n # # call progressRate\n # self._write()\n # self._write(self.line('convert to chemkin units'))\n # self._write('progressRate(qdot, c, *T);')\n\n # # convert qdot to chemkin units\n # self._write()\n # self._write(self.line('convert to chemkin units'))\n # self._write('for (id = 0; id < %d; ++id) {' % nReactions )\n # self._indent()\n # self._write('qdot[id] *= 1.0e-6;')\n # self._outdent()\n # self._write('}')\n #\n # self._outdent()\n\n # self._write('}')\n\n # return\n\n #\n # def __ckeqcontent(self, mechanism):\n\n # nSpecies = len(mechanism.species())\n # nReactions = len(mechanism.reaction())\n\n # self._write(\n # 'double tT = *T; '\n # + self.line('temporary temperature'))\n # self._write(\n # 'double tc[] = { log(tT), tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; '\n # + self.line('temperature cache'))\n # self._write(\n # 'double gort[%d]; ' % nSpecies + self.line(' temporary storage'))\n\n # # compute the gibbs free energy\n # self._write()\n # self._write(self.line('compute the Gibbs free energy'))\n # self._write('gibbs(gort, tc);')\n\n # # compute the equilibrium constants\n # self._write()\n # self._write(self.line('compute the equilibrium constants'))\n # self._write('equilibriumConstants(eqcon, gort, tT);')\n\n # for reaction in mechanism.reaction():\n\n # self._write()\n # self._write(self.line('reaction %d: %s' % (reaction.id, reaction.equation())))\n\n # somepow = 0\n # for symbol, coefficient in reaction.reactants:\n # somepow = somepow - coefficient\n\n # for symbol, coefficient in reaction.products:\n # somepow = somepow + coefficient\n\n # if somepow == 0:\n # self._write(self.line(\n # 'eqcon[%d] *= %g; ' % (reaction.id-1, (1e-6)**somepow) ) )\n #\n # else:\n # self._write( 'eqcon[%d] *= %g; ' % (reaction.id-1, (1e-6)**somepow) )\n\n # def _ckeqc(self, mechanism):\n\n # self._write()\n # self._write()\n # self._write(self.line('Returns the equil constants for each reaction'))\n # self._write('void CKEQC'+sym+'(double * restrict T, double * restrict C, int * iwrk, double * restrict rwrk, double * restrict eqcon)')\n # self._write('{')\n # self._indent()\n\n # self.__ckeqcontent(mechanism)\n #\n # self._outdent()\n\n # self._write('}')\n\n # return\n\n #\n # def _ckeqyp(self, mechanism):\n\n # import pyre\n # periodic = pyre.handbook.periodicTable()\n\n # self._write()\n # self._write()\n # self._write(self.line('Returns the equil constants for each reaction'))\n # self._write(self.line('Given P, T, and mass fractions'))\n # self._write('void CKEQYP'+sym+'(double * restrict P, double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict eqcon)')\n # self._write('{')\n # self._indent()\n\n # self.__ckeqcontent(mechanism)\n #\n # self._outdent()\n\n # self._write('}')\n\n # return\n\n # def _ckeqxp(self, mechanism):\n\n # import pyre\n # periodic = pyre.handbook.periodicTable()\n\n # self._write()\n # self._write()\n # self._write(self.line('Returns the equil constants for each reaction'))\n # self._write(self.line('Given P, T, and mole fractions'))\n # self._write('void CKEQXP'+sym+'(double * restrict P, double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict eqcon)')\n # self._write('{')\n # self._indent()\n\n # self.__ckeqcontent(mechanism)\n #\n # self._outdent()\n\n # self._write('}')\n\n # return\n\n # def _ckeqyr(self, mechanism):\n\n # import pyre\n # periodic = pyre.handbook.periodicTable()\n\n # self._write()\n # self._write()\n # self._write(self.line('Returns the equil constants for each reaction'))\n # self._write(self.line('Given rho, T, and mass fractions'))\n # self._write('void CKEQYR'+sym+'(double * restrict rho, double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict eqcon)')\n # self._write('{')\n # self._indent()\n\n # self.__ckeqcontent(mechanism)\n #\n # self._outdent()\n\n # self._write('}')\n\n # return\n\n # def _ckeqxr(self, mechanism):\n\n # import pyre\n # periodic = pyre.handbook.periodicTable()\n\n # self._write()\n # self._write()\n # self._write(self.line('Returns the equil constants for each reaction'))\n # self._write(self.line('Given rho, T, and mole fractions'))\n # self._write('void CKEQXR'+sym+'(double * restrict rho, double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict eqcon)')\n # self._write('{')\n # self._indent()\n\n # self.__ckeqcontent(mechanism)\n #\n # self._outdent()\n\n # self._write('}')\n\n # return\n\n # Fu#ego Extensions. All functions in this section has the fe prefix\n # Al#l fuctions in this section uses the standard fuego chemkin functions\n # def _ck_eytt(self, mechanism):\n\n # nSpecies = len(mechanism.species())\n # lowT,highT,dummy = self._analyzeThermodynamics(mechanism)\n #\n # self._write()\n # self._write()\n # self._write(self.line(\n # 'get temperature given internal energy in mass units and mass fracs'))\n # self._write('int feeytt'+fsym+'(double * restrict e, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict t)')\n # self._write('{')\n # self._indent()\n\n # self._write('const int maxiter = 50;')\n # self._write('const double tol = 0.001;')\n # self._write('double ein = *e;')\n # self._write('double tmin = %g; // max lower bound for thermo def' % lowT)\n # self._write('double tmax = %g; // min upper bound for thermo def' % highT)\n # self._write('double e1,emin,emax,cv,t1,dt;')\n # self._write('int i; // loop counter')\n # self._write('CKUBMS'+sym+'(&tmin, y, iwrk, rwrk, &emin);')\n # self._write('CKUBMS'+sym+'(&tmax, y, iwrk, rwrk, &emax);')\n # self._write('if (ein < emin) {')\n # self._indent()\n # self._write(self.line('Linear Extrapolation below tmin'))\n # self._write('CKCVBS'+sym+'(&tmin, y, iwrk, rwrk, &cv);')\n # self._write('*t = tmin - (emin-ein)/cv;')\n # self._write('return 1;')\n # self._outdent()\n # self._write('}')\n #\n # self._write('if (ein > emax) {')\n # self._indent()\n # self._write(self.line('Linear Extrapolation above tmax'))\n # self._write('CKCVBS'+sym+'(&tmax, y, iwrk, rwrk, &cv);')\n # self._write('*t = tmax - (emax-ein)/cv;')\n # self._write('return 1;')\n # self._outdent()\n # self._write('}')\n\n # self._write('t1 = tmin + (tmax-tmin)/(emax-emin)*(ein-emin);')\n # self._write('for (i = 0; i < maxiter; ++i) {')\n # self._indent()\n # self._write('CKUBMS'+sym+'(&t1,y,iwrk,rwrk,&e1);')\n # self._write('CKCVBS'+sym+'(&t1,y,iwrk,rwrk,&cv);')\n # self._write('dt = (ein - e1) / cv;')\n # self._write('if (dt > 100) { dt = 100; }')\n # self._write('else if (dt < -100) { dt = -100; }')\n # self._write('else if (fabs(dt) < tol) break;')\n # self._write('t1 += dt;')\n # self._outdent()\n # self._write('}')\n #\n # self._write('*t = t1;')\n # self._write('return 0;')\n #\n # self._outdent()\n\n # self._write('}')\n\n # return\n\n # def _ck_hytt(self, mechanism):\n\n # nSpecies = len(mechanism.species())\n # lowT,highT,dummy = self._analyzeThermodynamics(mechanism)\n #\n # self._write()\n # self._write()\n # self._write(self.line(\n # 'get temperature given enthalpy in mass units and mass fracs'))\n # self._write('int fehytt'+fsym+'(double * restrict h, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict t)')\n # self._write('{')\n # self._indent()\n\n # self._write('const int maxiter = 50;')\n # self._write('const double tol = 0.001;')\n # self._write('double hin = *h;')\n # self._write('double tmin = %g; // max lower bound for thermo def' % lowT)\n # self._write('double tmax = %g; // min upper bound for thermo def' % highT)\n # self._write('double h1,hmin,hmax,cp,t1,dt;')\n # self._write('int i; // loop counter')\n # self._write('CKHBMS'+sym+'(&tmin, y, iwrk, rwrk, &hmin);')\n # self._write('CKHBMS'+sym+'(&tmax, y, iwrk, rwrk, &hmax);')\n # self._write('if (hin < hmin) {')\n # self._indent()\n # self._write(self.line('Linear Extrapolation below tmin'))\n # self._write('CKCPBS'+sym+'(&tmin, y, iwrk, rwrk, &cp);')\n # self._write('*t = tmin - (hmin-hin)/cp;')\n # self._write('return 1;')\n # self._outdent()\n # self._write('}')\n #\n # self._write('if (hin > hmax) {')\n # self._indent()\n # self._write(self.line('Linear Extrapolation above tmax'))\n # self._write('CKCPBS'+sym+'(&tmax, y, iwrk, rwrk, &cp);')\n # self._write('*t = tmax - (hmax-hin)/cp;')\n # self._write('return 1;')\n # self._outdent()\n # self._write('}')\n\n # self._write('t1 = tmin + (tmax-tmin)/(hmax-hmin)*(hin-hmin);')\n # self._write('for (i = 0; i < maxiter; ++i) {')\n # self._indent()\n # self._write('CKHBMS'+sym+'(&t1,y,iwrk,rwrk,&h1);')\n # self._write('CKCPBS'+sym+'(&t1,y,iwrk,rwrk,&cp);')\n # self._write('dt = (hin - h1) / cp;')\n # self._write('if (dt > 100) { dt = 100; }')\n # self._write('else if (dt < -100) { dt = -100; }')\n # self._write('else if (fabs(dt) < tol) break;')\n # self._write('t1 += dt;')\n # self._outdent()\n # self._write('}')\n #\n # self._write('*t = t1;')\n # self._write('return 0;')\n #\n # self._outdent()\n\n # self._write('}')\n\n # return\n\n # def _ck_phity(self, mechanism):\n # self._write()\n # self._write()\n # self._write(self.line(\n # 'convert phi[species] (specific mole nums) to y[species] (mass fracs)'))\n # self._write('void fephity'+fsym+'(double * restrict phi, int * iwrk, double * restrict rwrk, double * restrict y)')\n # self._write('{')\n # self._indent()\n\n # self._write('double XW = 0; ')\n # self._write('int id; ' + self.line('loop counter'))\n #\n # # compute mean molecular weight first (eq 3)\n # self._write(self.line('Compute mean molecular wt first'))\n # for species in self.species:\n # self._write('y[%d] = phi[%d]*%f; XW += y[%d]; ' % (\n # species.id, species.id, species.weight, species.id) +\n # self.line('%s' % species.symbol))\n\n # self._write('for (id = 0; id < %d; ++id) {' % self.nSpecies)\n # self._indent()\n # self._write('y[id] = y[id]/XW;')\n # self._outdent()\n # self._write('}')\n #\n # self._write()\n # self._write('return;')\n # self._outdent()\n\n # self._write('}')\n\n # return\n\n # def _ck_ytphi(self, mechanism):\n # self._write()\n # self._write()\n # self._write(self.line(\n # 'convert y[species] (mass fracs) to phi[species] (specific mole num)'))\n # self._write('void feytphi'+fsym+'(double * restrict y, int * iwrk, double * restrict rwrk, double * restrict phi)')\n # self._write('{')\n # self._indent()\n\n # for species in self.species:\n # self._write('phi[%d] = y[%d]/%15.8e; ' % (\n # species.id, species.id, species.weight/1000.0) +\n # self.line('%s (wt in kg)' % species.symbol))\n\n # self._write()\n # self._write('return;')\n # self._outdent()\n\n # self._write('}')\n\n # return\n\n # def _ck_ctyr(self, mechanism):\n # self._write()\n # self._write()\n # self._write(self.line(\n # 'reverse of ytcr, useful for rate computations'))\n # self._write('void fectyr'+fsym+'(double * restrict c, double * restrict rho, int * iwrk, double * restrict rwrk, double * restrict y)')\n # self._write('{')\n # self._indent()\n\n # # now compute conversion\n # for species in self.species:\n # self._write('y[%d] = c[%d] * %f / (*rho); ' % (\n # species.id, species.id, species.weight) )\n #\n # self._write()\n # self._write('return;')\n # self._outdent()\n\n # self._write('}')\n\n # return\n\n # def _ck_cvrhs(self, mechanism):\n # self._write()\n # self._write()\n # self._write(self.line(\n # 'ddebdf compatible right hand side of CV burner'))\n # self._write(self.line(\n # 'rwrk[0] and rwrk[1] should contain rho and ene respectively'))\n # self._write(self.line(\n # 'working variable phi contains specific mole numbers'))\n # self._write('void fecvrhs'+fsym+'(double * restrict time, double * restrict phi, double * restrict phidot, double * restrict rwrk, int * iwrk)')\n\n # self._write('{')\n # self._indent()\n # # main body\n # self._write('double rho,ene; ' + self.line('CV Parameters'))\n # self._write('double y[%s], wdot[%s]; ' % (self.nSpecies, self.nSpecies) +\n # self.line('temporary storage'))\n # self._write('int i; ' + self.line('Loop counter'))\n # self._write('double temperature,pressure; ' + self.line('temporary var'))\n # self._write('rho = rwrk[0];')\n # self._write('ene = rwrk[1];')\n # self._write('fephity'+fsym+'(phi, iwrk, rwrk, y);')\n # self._write('feeytt'+fsym+'(&ene, y, iwrk, rwrk, &temperature);')\n # self._write('CKPY'+sym+'(&rho, &temperature, y, iwrk, rwrk, &pressure);')\n # self._write('CKWYP'+sym+'(&pressure, &temperature, y, iwrk, rwrk, wdot);')\n # self._write('for (i=0; i<%s; ++i) phidot[i] = wdot[i] / (rho/1000.0); ' % self.nSpecies)\n # self._write()\n # self._write('return;')\n\n # self._outdent()\n # self._write('}')\n # return\n\n # def _ck_cvdim(self, mechanism):\n # self._write()\n # self._write()\n # self._write(self.line(\n # 'returns the dimensionality of the cv burner (number of species)'))\n # self._write('int fecvdim'+fsym+'()')\n\n # self._write('{')\n # self._indent()\n # # main body\n # self._write('return %d;' % self.nSpecies)\n\n # self._outdent()\n # self._write('}')\n # return\n\n # def _ck_zndrhs(self, mechanism):\n # self._write()\n # self._write()\n # self._write(self.line(\n # 'ddebdf compatible right hand side of ZND solver'))\n # self._write(self.line( 'rwrk[0] : scaling factor for pressure'))\n # self._write(self.line( 'rwrk[1] : preshock density (g/cc) '))\n # self._write(self.line( 'rwrk[2] : detonation velocity (cm/s) '))\n # self._write(self.line( 'solution vector: [P; rho; y0 ... ylast] '))\n # self._write('void fezndrhs'+fsym+'(double * restrict time, double * restrict z, double * restrict zdot, double * restrict rwrk, int * iwrk)')\n\n # self._write('{')\n # self._indent()\n # # main body\n # self._write('double psc,rho1,udet; ' + self.line('ZND Parameters'))\n # self._write('double wt[%s], hms[%s], wdot[%s]; ' %\n # (self.nSpecies, self.nSpecies, self.nSpecies) +\n # self.line('temporary storage'))\n # self._write('int i; ' + self.line('Loop counter'))\n # self._write(self.line('temporary variables'))\n # self._write('double ru, T, uvel, wtm, p, rho, gam, son, xm, sum, drdy, eta, cp, cv ;')\n # self._write('double * restrict y; ' + self.line('mass frac pointer'))\n # self._write()\n # self._write('ru = %g;' % (R * mole * kelvin / erg))\n # self._write()\n # self._write('psc = rwrk[0];')\n # self._write('rho1 = rwrk[1];')\n # self._write('udet = rwrk[2];')\n # self._write()\n # self._write('p = z[0] * psc;')\n # self._write('rho = z[1];')\n # self._write()\n # self._write('y = &z[3];')\n # self._write()\n # self._write('CKMMWY'+sym+'(y, 0, 0, &wtm);')\n # self._write()\n # self._write('T = p * wtm / rho / ru;')\n # self._write()\n # self._write('uvel = (rho1 * udet)/ rho;')\n # self._write()\n # self._write('CKCPBS'+sym+'(&T, y, 0, 0, &cp);')\n # self._write('CKCVBS'+sym+'(&T, y, 0, 0, &cv);')\n # self._write('gam = cp/cv;')\n # self._write()\n # self._write('son = sqrt(fabs(gam*ru*T/wtm));')\n # self._write('xm = uvel/son;')\n # self._write()\n # self._write('CKHMS'+sym+'(&T, 0, 0, hms);')\n # self._write('CKWT'+sym+'(0, 0, wt);')\n # self._write('CKWYP'+sym+'(&p, &T, y, 0, 0, wdot);')\n # self._write()\n # self._write('sum = 0.0;')\n # self._write('for (i=0; i<%s; ++i) {' % self.nSpecies)\n # self._indent()\n # self._write('zdot[i+3] = wdot[i] * wt[i] / rho;')\n # self._write('drdy = -rho * wtm / wt[i];')\n # self._write('sum += -( drdy + rho * hms[i]/ (cp*T) ) * zdot[i+3];')\n # self._outdent()\n # self._write('}')\n # self._write()\n # self._write('eta = 1.0 - xm*xm;')\n # self._write('zdot[0] = -(uvel*uvel/eta/psc)*sum;')\n # self._write('zdot[1] = -sum/eta;')\n # self._write('zdot[2] = uvel;')\n # self._write()\n # self._write('return;')\n\n # self._outdent()\n # self._write('}')\n # return\n\n # def _ck_znddim(self, mechanism):\n # self._write()\n # self._write()\n # self._write(self.line(\n # 'returns the dimensionality of the ZND solver (3+number of species)'))\n # self._write('int feznddim'+fsym+'()')\n\n # self._write('{')\n # self._indent()\n # # main body\n # self._write('return %d;' % (self.nSpecies + 3) )\n\n # self._outdent()\n # self._write('}')\n # return\n #\n # def _ck_mechfile(self, mechanism):\n # self._write()\n # self._write()\n # self._write(self.line(\n # 'returns the name of the source mechanism file '))\n # self._write('char* femechfile'+fsym+'()')\n\n # self._write('{')\n # self._indent()\n # # main body\n # self._write('return \"%s\";' % mechanism.name())\n\n # self._outdent()\n # self._write('}')\n # return\n\n # def _ck_symnum(self, mechanism):\n # self._write()\n # self._write()\n # self._write(self.line(\n # 'returns the species number'))\n # self._write('int fesymnum'+fsym+'(const char* s1)')\n\n # self._write('{')\n # self._indent()\n #\n # for species in self.species:\n # self._write('if (strcmp(s1, \"%s\")==0) return %d; ' % (\n # species.symbol, species.id))\n\n # self._write(self.line( 'species name not found' ))\n # self._write('return -1;')\n\n # self._outdent()\n # self._write('}')\n # return\n #\n # def _ck_symname(self, mechanism):\n # self._write()\n # self._write()\n # self._write(self.line(\n # 'returns the species name'))\n # self._write('char* fesymname'+fsym+'(int sn)')\n\n # self._write('{')\n # self._indent()\n\n # for species in self.species:\n # self._write('if (sn==%d) return \"%s\"; ' % (\n # species.id, species.symbol))\n\n # self._write(self.line( 'species name not found' ))\n # self._write('return \"NOTFOUND\";')\n\n # self._outdent()\n # self._write('}')\n # return\n\n # Fuego's core routines section begins here\n def _molecularWeight(self, mechanism):\n import pyre\n\n periodic = pyre.handbook.periodicTable()\n nSpecies = len(mechanism.species())\n self._write()\n self._write(\"! save molecular weights into array\")\n self._write(\"subroutine molecularWeight(wt)\")\n self._write()\n self._indent()\n self._write(\"implicit none\")\n self._write()\n self._write(\"double precision, intent(out) :: wt(%d)\" % nSpecies)\n self._write()\n\n # wtTab=np.zeros(nSpecies)\n # molecular weights of all species\n for species in mechanism.species():\n weight = 0.0 # species.molecularWeight()\n for elem, coef in species.composition:\n aw = mechanism.element(elem).weight\n if not aw:\n aw = periodic.symbol(elem.capitalize()).atomicWeight\n weight += coef * aw\n\n self._write(\n \"wt(%d) = %fd0 \" % (species.id + 1, weight)\n + \"! %s\" % species.symbol\n )\n\n self._write()\n self._outdent()\n self._write(\"end subroutine\")\n return\n\n # def _atomicWeight(self, mechanism):\n\n # self._write()\n # self._write()\n # self._write(self.line('save atomic weights into array'))\n # self._write('void atomicWeight(double * restrict awt)')\n # self._write('{')\n # self._indent()\n # import pyre\n # periodic = pyre.handbook.periodicTable()\n # for element in mechanism.element():\n # aw = mechanism.element(element.symbol).weight\n # if not aw:\n # aw = periodic.symbol(element.symbol.capitalize()).atomicWeight\n\n # self._write('awt[%d] = %f; ' % (\n # element.id, aw) + self.line('%s' % element.symbol))\n\n # self._write()\n # self._write('return;')\n # self._outdent()\n\n # self._write('}')\n\n # return\n\n def _productionRate(self, mechanism):\n\n nSpecies = len(mechanism.species())\n nReactions = len(mechanism.reaction())\n\n itroe = self.reactionIndex[0:2]\n isri = self.reactionIndex[1:3]\n ilindemann = self.reactionIndex[2:4]\n i3body = self.reactionIndex[3:5]\n isimple = self.reactionIndex[4:6]\n ispecial = self.reactionIndex[5:7]\n\n if len(self.reactionIndex) != 7:\n print(\"\\n\\nCheck this!!!\\n\")\n sys.exit(1)\n\n ntroe = itroe[1] - itroe[0]\n nsri = isri[1] - isri[0]\n nlindemann = ilindemann[1] - ilindemann[0]\n n3body = i3body[1] - i3body[0]\n nsimple = isimple[1] - isimple[0]\n nspecial = ispecial[1] - ispecial[0]\n\n # main function\n self._write()\n self._write(\"! compute the production rate for each species\")\n self._write(\"subroutine productionRate(wdot, sc, T)\")\n self._write()\n self._indent()\n self._write(\"implicit none\")\n self._write()\n\n self._write(\"double precision, intent(inout) :: wdot(%d)\" % nSpecies)\n self._write(\"double precision, intent(in) :: sc(%d)\" % nSpecies)\n self._write(\"double precision, intent(in) :: T\")\n self._write()\n self._write(\"double precision :: tc(5)\")\n self._write(\"double precision :: invT\")\n self._write(\n \"double precision :: qdot, q_f(%d), q_r(%d)\"\n % (nReactions, nReactions)\n )\n self._write(\"integer :: i\")\n\n self._write()\n self._write(\"tc = (/ log(T), T, T*T, T*T*T, T*T*T*T /)\")\n self._write(\"invT = 1.d0 / tc(2)\")\n\n self._write()\n self._write(\"if (T /= T_save) then\")\n self._indent()\n self._write(\"T_save = T\")\n self._write(\"call comp_k_f(tc,invT,k_f_save)\")\n self._write(\"call comp_Kc(tc,invT,Kc_save)\")\n self._outdent()\n self._write(\"end if\")\n\n self._write()\n self._write(\"call comp_qfqr(q_f, q_r, sc, tc, invT)\")\n\n self._write()\n self._write(\"do i=1, %d\" % nSpecies)\n self._indent()\n self._write(\"wdot(i) = 0.d0\")\n self._outdent()\n self._write(\"end do\")\n\n for i in range(nReactions):\n self._write()\n self._write(\"qdot = q_f(%d)-q_r(%d)\" % (i + 1, i + 1))\n reaction = mechanism.reaction(id=i)\n agents = list(set(reaction.reactants + reaction.products))\n agents = sorted(agents, key=lambda x: mechanism.species(x[0]).id)\n # note that a species might appear as both reactant and product\n # a species might alos appear twice or more on on each side\n # agents is a set that contains unique (symbol, coefficient)\n for a in agents:\n symbol, coefficient = a\n for b in reaction.reactants:\n if b == a:\n if coefficient == 1:\n self._write(\n \"wdot(%d) = wdot(%d) - qdot\"\n % (\n mechanism.species(symbol).id + 1,\n mechanism.species(symbol).id + 1,\n )\n )\n else:\n self._write(\n \"wdot(%d) = wdot(%d) - (%d * qdot)\"\n % (\n mechanism.species(symbol).id + 1,\n mechanism.species(symbol).id + 1,\n coefficient,\n )\n )\n for b in reaction.products:\n if b == a:\n if coefficient == 1:\n self._write(\n \"wdot(%d) = wdot(%d) + qdot\"\n % (\n mechanism.species(symbol).id + 1,\n mechanism.species(symbol).id + 1,\n )\n )\n else:\n self._write(\n \"wdot(%d) = wdot(%d) + (%d * qdot)\"\n % (\n mechanism.species(symbol).id + 1,\n mechanism.species(symbol).id + 1,\n coefficient,\n )\n )\n\n self._write()\n # self._write('return')\n self._outdent()\n\n self._write(\"end subroutine\")\n\n # k_f function\n self._write()\n self._write(\"subroutine comp_k_f(tc, invT, k_f)\")\n self._write()\n self._indent()\n self._write(\"implicit none\")\n self._write()\n self._write(\"double precision, intent(in) :: tc(5)\")\n self._write(\"double precision, intent(in) :: invT\")\n self._write(\"double precision, intent(out) :: k_f(%d)\" % nReactions)\n self._write()\n self._write(\"integer :: i\")\n # self._outdent()\n # self._write('#ifdef __INTEL_COMPILER')\n # self._indent()\n # self._write('#pragma simd')\n # self._outdent()\n # self._write('#endif')\n # self._indent()\n self._write()\n self._write(\"do i=1, %d\" % nReactions)\n self._indent()\n self._write(\n \"k_f(i) = prefactor_units(i)*fwd_A(i)*exp(fwd_beta(i)*tc(1)-activation_units(i)*fwd_Ea(i)*invT)\"\n )\n self._outdent()\n self._write(\"end do\")\n self._write()\n self._outdent()\n self._write(\"end subroutine\")\n\n # Kc\n self._write()\n self._write(\"subroutine comp_Kc(tc, invT, Kc)\")\n self._write()\n self._indent()\n self._write(\"implicit none\")\n self._write()\n self._write(\"double precision, intent(in) :: tc(5)\")\n self._write(\"double precision, intent(in) :: invT\")\n self._write(\"double precision, intent(inout) :: Kc(%d)\" % nReactions)\n self._write()\n self._write(\"double precision :: g_RT(%d)\" % nSpecies)\n self._write(\"double precision :: refC, refCinv\")\n self._write(\"integer :: i\")\n self._write()\n\n self._write(\"! compute the Gibbs free energy\")\n self._write(\"call gibbs(g_RT, tc)\")\n self._write()\n\n for reaction in mechanism.reaction():\n KcExpArg = self._sortedKcExpArg(mechanism, reaction)\n self._write(\"Kc(%d) = %s\" % (reaction.id, KcExpArg))\n\n self._write()\n\n # self._outdent()\n # self._write('#ifdef __INTEL_COMPILER')\n # self._indent()\n # self._write(' #pragma simd')\n # self._outdent()\n # self._write('#endif')\n # self._indent()\n self._write(\"do i=1, %d\" % nReactions)\n self._indent()\n self._write(\"Kc(i) = exp(Kc(i))\")\n self._outdent()\n self._write(\"end do\")\n\n self._write()\n\n self._write(\n \"! reference concentration: P_atm / (RT) in inverse mol/m^3\"\n )\n self._write(\"refC = %gd0 / %gd0 * invT\" % (atm.value, R.value))\n self._write(\"refCinv = 1.d0 / refC\")\n\n self._write()\n\n for reaction in mechanism.reaction():\n KcConv = self._KcConv(mechanism, reaction)\n if KcConv:\n self._write(\n \"Kc(%d) = Kc(%d) * (%s)\"\n % (reaction.id, reaction.id, KcConv)\n )\n\n self._write()\n self._outdent()\n self._write(\"end subroutine\")\n\n # qdot\n nclassd = nReactions - nspecial\n nCorr = n3body + ntroe + nsri + nlindemann\n self._write()\n self._write(\"subroutine comp_qfqr(qf, qr, sc, tc, invT)\")\n self._write()\n self._indent()\n self._write(\"implicit none\")\n self._write()\n self._write(\"double precision, intent(out) :: qf(%d)\" % nReactions)\n self._write(\"double precision, intent(out) :: qr(%d)\" % nReactions)\n self._write(\"double precision, intent(in) :: sc(%d)\" % nSpecies)\n self._write(\"double precision, intent(in) :: tc(5)\")\n self._write(\"double precision, intent(in) :: invT\")\n self._write()\n self._write(\"double precision :: T\")\n self._write(\"double precision :: mixture\")\n self._write(\"double precision :: Corr(%d)\" % nclassd)\n if ntroe > 0:\n self._write(\"double precision :: alpha_troe(%d)\" % ntroe)\n self._write(\n \"double precision :: redP, F, logPred, logFcent, troe_c, troe_n, troe, F_troe\"\n )\n if nsri > 0:\n self._write(\"double precision :: alpha_sri(%d)\" % nsri)\n self._write(\"double precision :: X, F_sri\")\n if nlindemann > 0:\n if nlindemann > 1:\n self._write(\n \"double precision :: alpha_lindemann(%d)\" % nlindemann\n )\n else:\n self._write(\"double precision :: alpha_lindemann\")\n\n self._write(\"double precision :: tmp1, tmp2, tmp3\")\n self._write(\"integer :: i\")\n self._write()\n\n for i in range(nclassd):\n reaction = mechanism.reaction(id=i)\n self._write(\n \"! reaction %d: %s\" % (reaction.id, reaction.equation())\n )\n self._write(\n \"qf(%d) = %s\"\n % (\n i + 1,\n self._sortedPhaseSpace(mechanism, reaction.reactants),\n )\n )\n if reaction.reversible:\n self._write(\n \"qr(%d) = %s\"\n % (\n i + 1,\n self._sortedPhaseSpace(mechanism, reaction.products),\n )\n )\n else:\n self._write(\"qr(%d) = 0.d0\" % (i + 1))\n if reaction.rev:\n print(\"reaction.rev not finished\")\n sys.exit(1)\n\n self._write()\n self._write(\"T = tc(2)\")\n self._write()\n self._write(\"! compute the mixture concentration\")\n self._write(\"mixture = 0.d0\")\n self._write(\"do i=1, %d\" % nSpecies)\n self._indent()\n self._write(\"mixture = mixture + sc(i)\")\n self._outdent()\n self._write(\"end do\")\n\n self._write()\n self._write(\"do i=1, %d\" % nclassd)\n self._indent()\n self._write(\"Corr(i) = 1.d0\")\n self._outdent()\n self._write(\"end do\")\n\n if ntroe > 0:\n self._write()\n self._write(\"! troe\")\n # self._write(\"{\")\n # self._indent()\n alpha_d = {}\n for i in range(itroe[0], itroe[1]):\n ii = i - itroe[0] + 1\n reaction = mechanism.reaction(id=i)\n if reaction.thirdBody:\n alpha = self._enhancement(mechanism, reaction)\n if alpha in alpha_d:\n self._write(\n \"alpha_troe(%d) = %s\" % (ii, alpha_d[alpha])\n )\n else:\n self._write(\"alpha_troe(%d) = %s\" % (ii, alpha))\n alpha_d[alpha] = \"alpha_troe(%d)\" % ii\n self._write()\n\n # if ntroe >= 4:\n # self._outdent()\n # self._outdent()\n # self._write('#ifdef __INTEL_COMPILER')\n # self._indent()\n # self._indent()\n # self._write(' #pragma simd')\n # self._outdent()\n # self._outdent()\n # self._write('#endif')\n # self._indent()\n # self._indent()\n self._write(\"do i=%d, %d\" % (itroe[0] + 1, itroe[1]))\n self._indent()\n self._write(\n \"redP = alpha_troe(i-%d) / k_f_save(i) * phase_units(i) * low_A(i) * exp(low_beta(i) * tc(1) - activation_units(i) * low_Ea(i) *invT)\"\n % itroe[0]\n )\n self._write(\"F = redP / (1.d0 + redP)\")\n self._write(\"logPred = log10(redP)\")\n self._write()\n self._write(\"if (abs(troe_Tsss(i)) > 1.d-100) then\")\n self._write(\" tmp1 = (1.d0-troe_a(i))*exp(-T/troe_Tsss(i))\")\n self._write(\"else\")\n self._write(\" tmp1 = 0.d0\")\n self._write(\"end if\")\n self._write()\n self._write(\"if (abs(troe_Ts(i)) > 1.d-100) then\")\n self._write(\" tmp2 = troe_a(i) * exp(-T/troe_Ts(i))\")\n self._write(\"else\")\n self._write(\" tmp2 = 0.d0\")\n self._write(\"end if\")\n self._write()\n self._write(\"if (troe_len(i) == 4) then\")\n self._write(\" tmp3 = exp(-troe_Tss(i) * invT)\")\n self._write(\"else\")\n self._write(\" tmp3 = 0.d0\")\n self._write(\"end if\")\n self._write()\n self._write(\"logFcent = log10(tmp1+tmp2+tmp3)\")\n self._write(\"troe_c = -0.4d0 - 0.67d0 * logFcent\")\n self._write(\"troe_n = 0.75d0 - 1.27d0 * logFcent\")\n self._write(\n \"troe = (troe_c + logPred) / (troe_n - 0.14d0*(troe_c + logPred))\"\n )\n self._write(\"F_troe = 10.d0 ** (logFcent / (1.d0 + troe*troe))\")\n self._write(\"Corr(i) = F * F_troe\")\n self._outdent()\n self._write(\"end do\")\n # self._outdent()\n # self._write(\"}\")\n\n if nsri > 0:\n self._write()\n self._write(\"! SRI\")\n # self._write(\"{\")\n # self._indent()\n alpha_d = {}\n for i in range(isri[0], isri[1]):\n ii = i - isri[0] + 1\n reaction = mechanism.reaction(id=i)\n if reaction.thirdBody:\n alpha = self._enhancement(mechanism, reaction)\n if alpha in alpha_d:\n self._write(\n \"alpha_sri(%d) = %s\" % (ii, alpha_d[alpha])\n )\n else:\n self._write(\"alpha_sri(%d) = %s\" % (ii, alpha))\n alpha_d[alpha] = \"alpha_sri(%d)\" % ii\n\n # if nsri >= 4:\n # self._outdent()\n # self._outdent()\n # self._write('#ifdef __INTEL_COMPILER')\n # self._indent()\n # self._indent()\n # self._write(' #pragma simd')\n # self._outdent()\n # self._outdent()\n # self._write('#endif')\n # self._indent()\n # self._indent()\n self._write(\"do i=%d, %d\" % (isri[0] + 1, isri[1]))\n self._write()\n self._indent()\n self._write(\n \"redP = alpha_sri(i-%d) / k_f_save(i) * phase_units(i) * low_A(i) * exp(low_beta(i) * tc(1) - activation_units(i) * low_Ea(i) *invT)\"\n % itroe[0]\n )\n self._write(\"F = redP / (1.d0 + redP)\")\n self._write(\"logPred = log10(redP)\")\n self._write(\"X = 1.d0 / (1.d0 + logPred*logPred)\")\n self._write()\n self._write(\"if (sri_c(i) > 1.d-100) then\")\n self._write(\" tmp1 = exp(T/sri_c(i))\")\n self._write(\"else\")\n self._write(\" tmp1 = 0.d0\")\n self._write(\"end if\")\n self._write()\n self._write(\"if (sri_len(i) > 3.d0) then\")\n self._write(\" tmp2 = sri_d(i)*exp(sri_e(i)*tc(1))\")\n self._write(\"else\")\n self._write(\" tmp2 = 0.d0\")\n self._write(\"end if\")\n self._write()\n self._write(\n \"F_sri = exp(X*log(sri_a(i)*exp(-sri_b(i)*invT)+tmp1)*tmp2)\"\n )\n self._write(\"Corr(i) = F * F_sri\")\n self._outdent()\n self._write(\"end do\")\n\n # self._outdent()\n # self._write(\"}\")\n\n if nlindemann > 0:\n self._write()\n self._write(\"! Lindemann\")\n # self._write(\"{\")\n # self._indent()\n\n for i in range(ilindemann[0], ilindemann[1]):\n ii = i - ilindemann[0] + 1\n reaction = mechanism.reaction(id=i)\n if reaction.thirdBody:\n alpha = self._enhancement(mechanism, reaction)\n if nlindemann > 1:\n self._write(\"alpha_lindemann(%d) = %s\" % (ii, alpha))\n else:\n self._write(\"alpha_lindemann = %s\" % (alpha))\n\n if nlindemann == 1:\n self._write(\n \"redP = alpha_lindemann / k_f_save(%d) * phase_units(%d) * low_A(%d) * exp(low_beta(%d) * tc(1) - activation_units(%d) * low_Ea(%d) * invT)\"\n % (\n ilindemann[0],\n ilindemann[0],\n ilindemann[0],\n ilindemann[0],\n ilindemann[0],\n ilindemann[0],\n )\n )\n self._write(\"Corr(%d) = redP / (1.d0 + redP)\" % ilindemann[0])\n else:\n # if nlindemann >= 4:\n # self._outdent()\n # self._write('#ifdef __INTEL_COMPILER')\n # self._indent()\n # self._write(' #pragma simd')\n # self._outdent()\n # self._write('#endif')\n # self._indent()\n self._write(\"do i=%d, %d\" % (ilindemann[0] + 1, ilindemann[1]))\n self._write()\n self._indent()\n self._write(\n \"redP = alpha_lindemann(i-%d) / k_f_save(i) * phase_units(i) * low_A(i) * exp(low_beta(i) * tc(1) - activation_units(i) * low_Ea(i) * invT)\"\n % ilindemann[0]\n )\n self._write(\"Corr(i) = redP / (1.d0 + redP)\")\n self._outdent()\n self._write(\"end do\")\n\n self._outdent()\n\n if n3body > 0:\n self._write()\n self._write(\"! simple three-body correction\")\n alpha_save = \"\"\n for i in range(i3body[0], i3body[1]):\n reaction = mechanism.reaction(id=i)\n if reaction.thirdBody:\n alpha = self._enhancement(mechanism, reaction)\n if alpha != alpha_save:\n alpha_save = alpha\n # self._write(\"alpha_tb = %s\" % alpha)\n self._write(\"Corr(%d) = %s\" % (i + 1, alpha))\n\n self._write()\n self._write(\"do i=1, %d\" % nclassd)\n self._indent()\n self._write(\"qf(i) = qf(i) * (Corr(i) * k_f_save(i))\")\n self._write(\"qr(i) = qr(i) * (Corr(i) * k_f_save(i) / Kc_save(i))\")\n self._outdent()\n self._write(\"end do\")\n\n # if nspecial > 0:\n\n # print \"\\n\\n ***** WARNING: %d unclassified reactions\\n\" % nspecial\n\n # self._write()\n # self._write(self.line('unclassified reactions'))\n # self._write('{')\n # self._indent()\n\n # self._write(self.line(\"reactions: %d to %d\" % (ispecial[0]+1,ispecial[1])))\n\n # self._write('double Kc; ' + self.line('equilibrium constant'))\n # self._write('double k_f; ' + self.line('forward reaction rate'))\n # self._write('double k_r; ' + self.line('reverse reaction rate'))\n # self._write('double q_f; ' + self.line('forward progress rate'))\n # self._write('double q_r; ' + self.line('reverse progress rate'))\n # self._write('double phi_f; '\n # + self.line('forward phase space factor'))\n # self._write('double phi_r; ' + self.line('reverse phase space factor'))\n # self._write('double alpha; ' + self.line('enhancement'))\n\n # self._write('double redP; ' + self.line('reduced pressure'))\n # self._write('double logPred; ' + self.line('log of above'))\n # self._write('double F; ' + self.line('fallof rate enhancement'))\n # self._write()\n # self._write('double F_troe; ' + self.line('TROE intermediate'))\n # self._write('double logFcent; ' + self.line('TROE intermediate'))\n # self._write('double troe; ' + self.line('TROE intermediate'))\n # self._write('double troe_c; ' + self.line('TROE intermediate'))\n # self._write('double troe_n; ' + self.line('TROE intermediate'))\n\n # for i in range(ispecial[0],ispecial[1]):\n # self._write()\n # reaction = mechanism.reaction(id=i)\n # self._write(self.line('reaction %d: %s' % (reaction.id, reaction.equation())))\n\n # # compute the rates\n # self._forwardRate(mechanism, reaction)\n # self._reverseRate(mechanism, reaction)\n\n # # store the progress rate\n # self._write(\"qf[%d] = q_f;\" % i)\n # self._write(\"qr[%d] = q_r;\" % i)\n\n # self._outdent()\n # self._write('}')\n\n self._write()\n self._outdent()\n self._write(\"end subroutine\")\n\n return\n\n # def _DproductionRate(self, mechanism):\n\n # species_list = [x.symbol for x in mechanism.species()]\n # nSpecies = len(species_list)\n\n # self._write()\n # self._write(self.line('compute the reaction Jacobian'))\n # self._write('void DWDOT(double * restrict J, double * restrict sc, double * restrict Tp, int * consP)')\n # self._write('{')\n # self._indent()\n\n # self._write('double c[%d];' % (nSpecies))\n # self._write()\n # self._write('for (int k=0; k<%d; k++) {' % nSpecies)\n # self._indent()\n # self._write('c[k] = 1.e6 * sc[k];')\n # self._outdent()\n # self._write('}')\n\n # self._write()\n # self._write('aJacobian(J, c, *Tp, *consP);')\n\n # self._write()\n # self._write('/* dwdot[k]/dT */')\n # self._write('for (int k=0; k<%d; k++) {' % nSpecies)\n # self._indent()\n # self._write('J[%d+k] *= 1.e-6;' % (nSpecies*(nSpecies+1)))\n # self._outdent()\n # self._write('}')\n\n # self._write()\n # self._write('/* dTdot/d[X] */')\n # self._write('for (int k=0; k<%d; k++) {' % nSpecies)\n # self._indent()\n # self._write('J[k*%d+%d] *= 1.e6;' % (nSpecies+1, nSpecies))\n # self._outdent()\n # self._write('}')\n\n # self._write()\n # self._write('return;')\n # self._outdent()\n\n # self._write('}')\n\n # return\n\n # def _ajac(self, mechanism):\n\n # nSpecies = len(mechanism.species())\n # nReactions = len(mechanism.reaction())\n\n # self._write()\n # self._write(self.line('compute the reaction Jacobian'))\n # self._write('void aJacobian(double * restrict J, double * restrict sc, double T, int consP)')\n # self._write('{')\n # self._indent()\n\n # self._write('for (int i=0; i<%d; i++) {' % (nSpecies+1)**2)\n # self._indent()\n # self._write('J[i] = 0.0;')\n # self._outdent()\n # self._write('}')\n #\n # self._write()\n\n # self._write('double wdot[%d];' % (nSpecies))\n # self._write('for (int k=0; k<%d; k++) {' % (nSpecies))\n # self._indent()\n # self._write('wdot[k] = 0.0;')\n # self._outdent()\n # self._write('}')\n #\n # self._write()\n\n # self._write('double tc[] = { log(T), T, T*T, T*T*T, T*T*T*T }; /*temperature cache */')\n # self._write('double invT = 1.0 / tc[1];')\n # self._write('double invT2 = invT * invT;')\n\n # self._write()\n\n # self._write(self.line('reference concentration: P_atm / (RT) in inverse mol/m^3'))\n # self._write('double refC = %g / %g / T;' % (atm.value, R.value))\n # self._write('double refCinv = 1.0 / refC;')\n\n # self._write()\n\n # self._write(self.line('compute the mixture concentration'))\n # self._write('double mixture = 0.0;')\n # self._write('for (int k = 0; k < %d; ++k) {' % nSpecies)\n # self._indent()\n # self._write('mixture += sc[k];')\n # self._outdent()\n # self._write('}')\n\n # self._write()\n\n # self._write(self.line('compute the Gibbs free energy'))\n # self._write('double g_RT[%d];' % (nSpecies))\n # self._write('gibbs(g_RT, tc);')\n\n # self._write()\n\n # self._write(self.line('compute the species enthalpy'))\n # self._write('double h_RT[%d];' % (nSpecies))\n # self._write('speciesEnthalpy(h_RT, tc);')\n\n # self._write()\n\n # self._write('double phi_f, k_f, k_r, phi_r, Kc, q, q_nocor, Corr, alpha;')\n # self._write('double dlnkfdT, dlnk0dT, dlnKcdT, dkrdT, dqdT;')\n # self._write('double dqdci, dcdc_fac, dqdc[%d];' % (nSpecies))\n # self._write('double Pr, fPr, F, k_0, logPr;')\n # self._write('double logFcent, troe_c, troe_n, troePr_den, troePr, troe;')\n # self._write('double Fcent1, Fcent2, Fcent3, Fcent;')\n # self._write('double dlogFdc, dlogFdn, dlogFdcn_fac;')\n # self._write('double dlogPrdT, dlogfPrdT, dlogFdT, dlogFcentdT, dlogFdlogPr, dlnCorrdT;')\n # self._write('const double ln10 = log(10.0);')\n # self._write('const double log10e = 1.0/log(10.0);')\n\n # for i, reaction in zip(range(nReactions), mechanism.reaction()):\n\n # lt = reaction.lt\n # if lt:\n # print \"Landau-Teller reactions are not supported\"\n # sys.exit(1)\n\n # self._write(self.line('reaction %d: %s' % (i+1, reaction.equation())))\n # if reaction.low: # case 1\n # self._write(self.line('a pressure-fall-off reaction'))\n # self._ajac_reaction(mechanism, reaction, 1)\n # elif reaction.thirdBody: # case 2\n # self._write(self.line('a third-body and non-pressure-fall-off reaction'))\n # self._ajac_reaction(mechanism, reaction, 2)\n # else: # case 3\n # self._write(self.line('a non-third-body and non-pressure-fall-off reaction'))\n # self._ajac_reaction(mechanism, reaction, 3)\n # self._write()\n\n # self._write('double c_R[%d], dcRdT[%d], e_RT[%d];' % (nSpecies, nSpecies, nSpecies))\n # self._write('double * eh_RT;')\n # self._write('if (consP) {')\n # self._indent()\n\n # self._write('cp_R(c_R, tc);')\n # self._write('dcvpRdT(dcRdT, tc);')\n # self._write('eh_RT = &h_RT[0];');\n\n # self._outdent()\n # self._write('}')\n # self._write('else {')\n # self._indent()\n\n # self._write('cv_R(c_R, tc);')\n # self._write('dcvpRdT(dcRdT, tc);')\n # self._write('speciesInternalEnergy(e_RT, tc);');\n # self._write('eh_RT = &e_RT[0];');\n\n # self._outdent()\n # self._write('}')\n\n # self._write()\n\n # self._write('double cmix = 0.0, ehmix = 0.0, dcmixdT=0.0, dehmixdT=0.0;')\n # self._write('for (int k = 0; k < %d; ++k) {' % nSpecies)\n # self._indent()\n # self._write('cmix += c_R[k]*sc[k];')\n # self._write('dcmixdT += dcRdT[k]*sc[k];')\n # self._write('ehmix += eh_RT[k]*wdot[k];')\n # self._write('dehmixdT += invT*(c_R[k]-eh_RT[k])*wdot[k] + eh_RT[k]*J[%d+k];' % \\\n # (nSpecies*(nSpecies+1)))\n # self._outdent()\n # self._write('}')\n\n # self._write()\n # self._write('double cmixinv = 1.0/cmix;')\n # self._write('double tmp1 = ehmix*cmixinv;')\n # self._write('double tmp3 = cmixinv*T;')\n # self._write('double tmp2 = tmp1*tmp3;')\n # self._write('double dehmixdc;')\n\n # self._write('/* dTdot/d[X] */')\n # self._write('for (int k = 0; k < %d; ++k) {' % nSpecies)\n # self._indent()\n # self._write('dehmixdc = 0.0;')\n # self._write('for (int m = 0; m < %d; ++m) {' % nSpecies)\n # self._indent()\n # self._write('dehmixdc += eh_RT[m]*J[k*%s+m];' % (nSpecies+1))\n # self._outdent()\n # self._write('}')\n # self._write('J[k*%d+%d] = tmp2*c_R[k] - tmp3*dehmixdc;' % (nSpecies+1,nSpecies))\n # self._outdent()\n # self._write('}')\n\n # self._write('/* dTdot/dT */')\n # self._write('J[%d] = -tmp1 + tmp2*dcmixdT - tmp3*dehmixdT;' % \\\n # (nSpecies*(nSpecies+1)+nSpecies))\n\n # self._outdent()\n # self._write('}')\n # return\n\n # def _ajac_reaction(self, mechanism, reaction, rcase):\n\n # if rcase == 1: # pressure-dependent reaction\n # isPD = True\n # if reaction.thirdBody:\n # has_alpha = True\n # self._write('/* also 3-body */')\n # else:\n # has_alpha = False\n # self._write('/* non 3-body */')\n # print 'FIXME: pressure dependent non-3-body reaction in _ajac_reaction'\n # sys.exit(1)\n # elif rcase == 2: # third-body and non-pressure-dependent reaction\n # isPD = False\n # has_alpha = True\n # elif rcase == 3: # simple non-third and non-pressure-dependent reaction\n # isPD = False\n # has_alpha = False\n # else:\n # print '_ajac_reaction: wrong case ', rcase\n # exit(1)\n\n # nSpecies = len(mechanism.species())\n # rea_dict = {}\n # pro_dict = {}\n # all_dict = {}\n # sumNuk = 0\n # for symbol, coefficient in reaction.reactants:\n # k = mechanism.species(symbol).id\n # sumNuk -= coefficient\n # if k in rea_dict:\n # coe_old = rea_dict[k][1]\n # rea_dict[k] = (symbol, coefficient+coe_old)\n # else:\n # rea_dict[k] = (symbol, coefficient)\n # for symbol, coefficient in reaction.products:\n # k = mechanism.species(symbol).id\n # sumNuk += coefficient\n # if k in pro_dict:\n # coe_old = pro_dict[k][1]\n # pro_dict[k] = (symbol, coefficient+coe_old)\n # else:\n # pro_dict[k] = (symbol, coefficient)\n # for k in range(nSpecies):\n # if k in rea_dict and k in pro_dict:\n # sr, nur = rea_dict[k]\n # sp, nup = pro_dict[k]\n # all_dict[k] = (sr, nup-nur)\n # elif k in rea_dict:\n # sr, nur = rea_dict[k]\n # all_dict[k] = (sr, -nur)\n # elif k in pro_dict:\n # sp, nup = pro_dict[k]\n # all_dict[k] = (sp, nup)\n\n # sorted_reactants = sorted(rea_dict.values())\n # sorted_products = sorted(pro_dict.values())\n\n # if not reaction.reversible:\n # if isPD or has_alpha:\n # print 'FIXME: inreversible reaction in _ajac_reaction may not work'\n # self._write('/* FIXME: inreversible reaction in _ajac_reaction may not work*/')\n # for k in range(nSpecies):\n # if k in sorted_reactants and k in sorted_products:\n # print 'FIXME: inreversible reaction in _ajac_reaction may not work'\n # self._write('/* FIXME: inreversible reaction in _ajac_reaction may not work*/')\n\n # if isPD:\n # Corr_s = 'Corr *'\n # elif has_alpha:\n # Corr_s = 'alpha * '\n # else:\n # Corr_s = ''\n\n # if has_alpha:\n # self._write(\"/* 3-body correction factor */\")\n # self._write(\"alpha = %s;\" % self._enhancement(mechanism, reaction))\n\n # # forward\n # self._write('/* forward */')\n # self._write(\"phi_f = %s;\" % self._sortedPhaseSpace(mechanism, sorted_reactants))\n # #\n # self._write(\"k_f = prefactor_units[%d] * fwd_A[%d]\" % (reaction.id-1,reaction.id-1))\n # self._write(\" * exp(fwd_beta[%d] * tc[0] - activation_units[%d] * fwd_Ea[%d] * invT);\"\n # %(reaction.id-1,reaction.id-1,reaction.id-1))\n # self._write(\"dlnkfdT = fwd_beta[%d] * invT + activation_units[%d] * fwd_Ea[%d] * invT2;\"\n # %(reaction.id-1,reaction.id-1,reaction.id-1))\n\n # if isPD:\n # self._write('/* pressure-fall-off */')\n # self._write(\"k_0 = low_A[%d] * exp(low_beta[%d] * tc[0] - activation_units[%d] * low_Ea[%d] * invT);\"\n # %(reaction.id-1,reaction.id-1,reaction.id-1,reaction.id-1))\n # self._write('Pr = phase_units[%d] * alpha / k_f * k_0;' % (reaction.id-1))\n # self._write('fPr = Pr / (1.0+Pr);')\n # self._write(\"dlnk0dT = low_beta[%d] * invT + activation_units[%d] * low_Ea[%d] * invT2;\"\n # %(reaction.id-1,reaction.id-1,reaction.id-1))\n # self._write('dlogPrdT = log10e*(dlnk0dT - dlnkfdT);')\n # self._write('dlogfPrdT = dlogPrdT / (1.0+Pr);')\n # if reaction.sri:\n # self._write('/* SRI form */')\n # print \"FIXME: sri not supported in _ajac_reaction yet\"\n # sys.exit(1)\n # elif reaction.troe:\n # self._write('/* Troe form */')\n # troe = reaction.troe\n # self._write(\"logPr = log10(Pr);\")\n # self._write('Fcent1 = (fabs(troe_Tsss[%d]) > 1.e-100 ? (1.-troe_a[%d])*exp(-T/troe_Tsss[%d]) : 0.);'\n # %(reaction.id-1,reaction.id-1,reaction.id-1))\n # self._write('Fcent2 = (fabs(troe_Ts[%d]) > 1.e-100 ? troe_a[%d] * exp(-T/troe_Ts[%d]) : 0.);'\n # %(reaction.id-1,reaction.id-1,reaction.id-1))\n # self._write('Fcent3 = (troe_len[%d] == 4 ? exp(-troe_Tss[%d] * invT) : 0.);'\n # %(reaction.id-1,reaction.id-1))\n # self._write('Fcent = Fcent1 + Fcent2 + Fcent3;')\n # self._write(\"logFcent = log10(Fcent);\")\n # self._write(\"troe_c = -.4 - .67 * logFcent;\")\n # self._write(\"troe_n = .75 - 1.27 * logFcent;\")\n # self._write(\"troePr_den = 1.0 / (troe_n - .14*(troe_c + logPr));\")\n # self._write(\"troePr = (troe_c + logPr) * troePr_den;\")\n # self._write(\"troe = 1.0 / (1.0 + troePr*troePr);\")\n # self._write(\"F = pow(10.0, logFcent * troe);\")\n\n # self._write(\"dlogFcentdT = log10e/Fcent*( \")\n # self._write(\" (fabs(troe_Tsss[%d]) > 1.e-100 ? -Fcent1/troe_Tsss[%d] : 0.)\"\n # %(reaction.id-1,reaction.id-1))\n # self._write(\" + (fabs(troe_Ts[%d]) > 1.e-100 ? -Fcent2/troe_Ts[%d] : 0.)\"\n # %(reaction.id-1,reaction.id-1))\n # self._write(\" + (troe_len[%d] == 4 ? Fcent3*troe_Tss[%d]*invT2 : 0.) );\"\n # %(reaction.id-1,reaction.id-1))\n\n # self._write(\"dlogFdcn_fac = 2.0 * logFcent * troe*troe * troePr * troePr_den;\")\n # self._write('dlogFdc = -troe_n * dlogFdcn_fac * troePr_den;')\n # self._write('dlogFdn = dlogFdcn_fac * troePr;')\n # self._write('dlogFdlogPr = dlogFdc;')\n # self._write('dlogFdT = dlogFcentdT*(troe - 0.67*dlogFdc - 1.27*dlogFdn) + dlogFdlogPr * dlogPrdT;')\n # else:\n # self._write('/* Lindemann form */')\n # self._write('F = 1.0;')\n # self._write('dlogFdlogPr = 0.0;')\n # self._write('dlogFdT = 0.0;')\n\n # # reverse\n # if not reaction.reversible:\n # self._write('/* rate of progress */')\n # if (not has_alpha) and (not isPD):\n # self._write('q = k_f*phi_f;')\n # else:\n # self._write('q_nocor = k_f*phi_f;')\n # if isPD:\n # self._write('Corr = fPr * F;')\n # self._write('q = Corr * q_nocor;')\n # else:\n # self._write('q = alpha * q_nocor;')\n\n # if isPD:\n # self._write('dlnCorrdT = ln10*(dlogfPrdT + dlogFdT);')\n # self._write('dqdT = %sdlnkfdT*k_f*phi_f + dlnCorrdT*q;' % Corr_s)\n # else:\n # self._write('dqdT = %sdlnkfdT*k_f*phi_f;' % Corr_s)\n # else:\n # self._write('/* reverse */')\n # self._write(\"phi_r = %s;\" % self._sortedPhaseSpace(mechanism, sorted_products))\n # self._write('Kc = %s;' % self._sortedKc(mechanism, reaction))\n # self._write('k_r = k_f / Kc;')\n\n # dlnKcdT_s = 'invT * ('\n # terms = []\n # for symbol, coefficient in sorted(sorted_reactants,\n # key=lambda x: mechanism.species(x[0]).id):\n # k = mechanism.species(symbol).id\n # if coefficient == 1:\n # terms.append('h_RT[%d]' % (k))\n # else:\n # terms.append('%d*h_RT[%d]' % (coefficient, k))\n # dlnKcdT_s += '-(' + ' + '.join(terms) + ')'\n # terms = []\n # for symbol, coefficient in sorted(sorted_products,\n # key=lambda x: mechanism.species(x[0]).id):\n # k = mechanism.species(symbol).id\n # if coefficient == 1:\n # terms.append('h_RT[%d]' % (k))\n # else:\n # terms.append('%d*h_RT[%d]' % (coefficient, k))\n # dlnKcdT_s += ' + (' + ' + '.join(terms) + ')'\n # if sumNuk > 0:\n # dlnKcdT_s += ' - %d' % sumNuk\n # elif sumNuk < 0:\n # dlnKcdT_s += ' + %d' % (-sumNuk)\n # dlnKcdT_s += ')'\n # self._write('dlnKcdT = %s;' % dlnKcdT_s)\n\n # self._write('dkrdT = (dlnkfdT - dlnKcdT)*k_r;')\n\n # self._write('/* rate of progress */')\n # if (not has_alpha) and (not isPD):\n # self._write('q = k_f*phi_f - k_r*phi_r;')\n # else:\n # self._write('q_nocor = k_f*phi_f - k_r*phi_r;')\n # if isPD:\n # self._write('Corr = fPr * F;')\n # self._write('q = Corr * q_nocor;')\n # else:\n # self._write('q = alpha * q_nocor;')\n\n # if isPD:\n # self._write('dlnCorrdT = ln10*(dlogfPrdT + dlogFdT);')\n # self._write('dqdT = %s(dlnkfdT*k_f*phi_f - dkrdT*phi_r) + dlnCorrdT*q;' % Corr_s)\n # else:\n # self._write('dqdT = %s(dlnkfdT*k_f*phi_f - dkrdT*phi_r);' % Corr_s)\n\n # self._write(\"/* update wdot */\")\n # for k in sorted(all_dict.keys()):\n # s, nu = all_dict[k]\n # if nu == 1:\n # self._write('wdot[%d] += q; /* %s */' % (k, s))\n # elif nu == -1:\n # self._write('wdot[%d] -= q; /* %s */' % (k, s))\n # elif nu > 0:\n # self._write('wdot[%d] += %.17g * q; /* %s */' % (k, nu, s))\n # elif nu < 0:\n # self._write('wdot[%d] -= %.17g * q; /* %s */' % (k, -nu, s))\n\n # if isPD:\n # self._write('/* for convenience */')\n # self._write('k_f *= Corr;')\n # if reaction.reversible:\n # self._write('k_r *= Corr;')\n # elif has_alpha:\n # self._write('/* for convenience */')\n # self._write('k_f *= alpha;')\n # if reaction.reversible:\n # self._write('k_r *= alpha;')\n # else:\n # self._write('k_r = 0.0;')\n\n # if isPD:\n # self._write('dcdc_fac = q/alpha*(1.0/(Pr+1.0) + dlogFdlogPr);')\n\n # def dqdc_simple(dqdc_s, k):\n # if dqdc_s == \"0\":\n # dqdc_s = ''\n # if k in sorted(rea_dict.keys()):\n # dps = self._DphaseSpace(mechanism,sorted_reactants,rea_dict[k][0])\n # if dps == \"1.0\":\n # dps_s = ''\n # else:\n # dps_s = '*'+dps\n # dqdc_s += ' + k_f%s' % dps_s\n # if reaction.reversible:\n # if k in sorted(pro_dict.keys()):\n # dps = self._DphaseSpace(mechanism,sorted_products,pro_dict[k][0])\n # if dps == \"1.0\":\n # dps_s = ''\n # else:\n # dps_s = '*'+dps\n # dqdc_s += ' - k_r%s' % dps_s\n # return dqdc_s\n\n # if has_alpha or isPD:\n\n # self._write('if (consP) {')\n # self._indent()\n\n # for k in range(nSpecies):\n # dqdc_s = self._Denhancement(mechanism,reaction,k,True)\n # if dqdc_s != \"0\":\n # if isPD:\n # if dqdc_s == \"1\":\n # dqdc_s ='dcdc_fac'\n # else:\n # dqdc_s +='*dcdc_fac'\n # elif has_alpha:\n # if dqdc_s == \"1\":\n # dqdc_s ='q_nocor'\n # else:\n # dqdc_s +='*q_nocor'\n\n # dqdc_s = dqdc_simple(dqdc_s,k)\n # if dqdc_s:\n # symb_k = self.species[k].symbol\n # self._write('/* d()/d[%s] */' % symb_k)\n # self._write('dqdci = %s;' % (dqdc_s))\n # #\n # for m in sorted(all_dict.keys()):\n # if all_dict[m][1] != 0:\n # s1 = 'J[%d] += %.17g * dqdci;' % (k*(nSpecies+1)+m, all_dict[m][1])\n # s1 = s1.replace('+= 1 *', '+=').replace('+= -1 *', '-=')\n # s2 = '/* dwdot[%s]/d[%s] */' % (all_dict[m][0], symb_k)\n # self._write(s1.ljust(30) + s2)\n\n # self._outdent()\n # self._write('}')\n # self._write('else {')\n # self._indent()\n\n # for k in range(nSpecies):\n # dqdc_s = self._Denhancement(mechanism,reaction,k,False)\n # if dqdc_s != '0':\n # if dqdc_s == '1':\n # dqdc_s ='dcdc_fac'\n # elif isPD:\n # dqdc_s +='*dcdc_fac'\n # elif has_alpha:\n # if dqdc_s == '1':\n # dqdc_s ='q_nocor'\n # else:\n # dqdc_s +='*q_nocor'\n # dqdc_s = dqdc_simple(dqdc_s,k)\n # if dqdc_s:\n # self._write('dqdc[%d] = %s;' % (k,dqdc_s))\n\n # self._write('for (int k=0; k<%d; k++) {' % nSpecies)\n # self._indent()\n # for m in sorted(all_dict.keys()):\n # if all_dict[m][1] != 0:\n # s1 = 'J[%d*k+%d] += %.17g * dqdc[k];' % ((nSpecies+1), m, all_dict[m][1])\n # s1 = s1.replace('+= 1 *', '+=').replace('+= -1 *', '-=')\n # self._write(s1)\n # self._outdent()\n # self._write('}')\n\n # self._outdent()\n # self._write('}')\n\n # for m in sorted(all_dict.keys()):\n # if all_dict[m][1] != 0:\n # s1 = 'J[%d] += %.17g * dqdT; /* dwdot[%s]/dT */' % \\\n # (nSpecies*(nSpecies+1)+m, all_dict[m][1], all_dict[m][0])\n # s1 = s1.replace('+= 1 *', '+=').replace('+= -1 *', '-=')\n # self._write(s1)\n\n # else:\n\n # for k in range(nSpecies):\n # dqdc_s = dqdc_simple('',k)\n # if dqdc_s:\n # self._write('/* d()/d[%s] */' % all_dict[k][0])\n # self._write('dqdci = %s;' % (dqdc_s))\n # if reaction.reversible or k in rea_dict:\n # for m in sorted(all_dict.keys()):\n # if all_dict[m][1] != 0:\n # s1 = 'J[%d] += %.17g * dqdci;' % (k*(nSpecies+1)+m, all_dict[m][1])\n # s1 = s1.replace('+= 1 *', '+=').replace('+= -1 *', '-=')\n # s2 = '/* dwdot[%s]/d[%s] */' % (all_dict[m][0], all_dict[k][0])\n # self._write(s1.ljust(30) + s2)\n # self._write('/* d()/dT */')\n # for m in sorted(all_dict.keys()):\n # if all_dict[m][1] != 0:\n # s1 = 'J[%d] += %.17g * dqdT;' % (nSpecies*(nSpecies+1)+m, all_dict[m][1])\n # s1 = s1.replace('+= 1 *', '+=').replace('+= -1 *', '-=').replace('+= -1 *', '-=')\n # s2 = '/* dwdot[%s]/dT */' % (all_dict[m][0])\n # self._write(s1.ljust(30) + s2)\n\n # return\n\n # def _vproductionRate(self, mechanism):\n\n # nSpecies = len(mechanism.species())\n # nReactions = len(mechanism.reaction())\n\n # itroe = self.reactionIndex[0:2]\n # isri = self.reactionIndex[1:3]\n # ilindemann = self.reactionIndex[2:4]\n # i3body = self.reactionIndex[3:5]\n # isimple = self.reactionIndex[4:6]\n # ispecial = self.reactionIndex[5:7]\n #\n # ntroe = itroe[1] - itroe[0]\n # nsri = isri[1] - isri[0]\n # nlindemann = ilindemann[1] - ilindemann[0]\n # n3body = i3body[1] - i3body[0]\n # nsimple = isimple[1] - isimple[0]\n # nspecial = ispecial[1] - ispecial[0]\n\n # self._write()\n # self._write()\n # self._write(self.line('compute the production rate for each species'))\n # self._write('void vproductionRate(int npt, double * restrict wdot, double * restrict sc, double * restrict T)')\n # self._write('{')\n # self._indent()\n\n # self._write('double k_f_s[%d*npt], Kc_s[%d*npt], mixture[npt], g_RT[%d*npt];'\n # % (nReactions, nReactions, nSpecies))\n # self._write('double tc[5*npt], invT[npt];')\n\n # self._write()\n\n # self._outdent()\n # self._write('#ifdef __INTEL_COMPILER')\n # self._indent()\n # self._write(' #pragma simd')\n # self._outdent()\n # self._write('#endif')\n # self._indent()\n # self._write('for (int i=0; i<npt; i++) {')\n # self._indent()\n # self._write('tc[0*npt+i] = log(T[i]);')\n # self._write('tc[1*npt+i] = T[i];')\n # self._write('tc[2*npt+i] = T[i]*T[i];')\n # self._write('tc[3*npt+i] = T[i]*T[i]*T[i];')\n # self._write('tc[4*npt+i] = T[i]*T[i]*T[i]*T[i];')\n # self._write('invT[i] = 1.0 / T[i];')\n # self._outdent()\n # self._write('}')\n\n # self._write()\n # self._write('for (int i=0; i<npt; i++) {')\n # self._indent()\n # self._write('mixture[i] = 0.0;')\n # self._outdent()\n # self._write('}')\n #\n # self._write()\n # self._write('for (int n=0; n<%d; n++) {' % nSpecies)\n # self._indent()\n # self._write('for (int i=0; i<npt; i++) {')\n # self._indent()\n # self._write('mixture[i] += sc[n*npt+i];')\n # self._write('wdot[n*npt+i] = 0.0;')\n # self._outdent()\n # self._write('}')\n # self._outdent()\n # self._write('}')\n\n # self._write()\n # self._write('vcomp_k_f(npt, k_f_s, tc, invT);')\n # self._write()\n # self._write('vcomp_gibbs(npt, g_RT, tc);')\n # self._write()\n # self._write('vcomp_Kc(npt, Kc_s, g_RT, invT);')\n # self._write()\n # if nReactions <= 50:\n # self._write('vcomp_wdot(npt, wdot, mixture, sc, k_f_s, Kc_s, tc, invT, T);')\n # else:\n # for i in range(0,nReactions,50):\n # self._write('vcomp_wdot_%d_%d(npt, wdot, mixture, sc, k_f_s, Kc_s, tc, invT, T);' % (i+1,min(i+50,nReactions)))\n\n # self._outdent()\n # self._write('}')\n\n # self._write()\n\n # self._write('void vcomp_k_f(int npt, double * restrict k_f_s, double * restrict tc, double * restrict invT)')\n # self._write('{')\n # self._write('#ifdef __INTEL_COMPILER')\n # self._indent()\n # self._write('#pragma simd')\n # self._outdent()\n # self._write('#endif')\n # self._indent()\n # self._write('for (int i=0; i<npt; i++) {')\n # self._indent()\n # for reaction in mechanism.reaction():\n # self._write(\"k_f_s[%d*npt+i] = prefactor_units[%d] * fwd_A[%d] * exp(fwd_beta[%d] * tc[i] - activation_units[%d] * fwd_Ea[%d] * invT[i]);\"\n # % (reaction.id-1,reaction.id-1,reaction.id-1,\n # reaction.id-1,reaction.id-1,reaction.id-1))\n # self._outdent()\n # self._write('}')\n # self._outdent()\n # self._write('}')\n\n # self._write()\n\n # self._write('void vcomp_gibbs(int npt, double * restrict g_RT, double * restrict tc)')\n # self._write('{')\n # self._indent()\n # self._write(self.line('compute the Gibbs free energy'))\n # self._write('for (int i=0; i<npt; i++) {')\n # self._indent()\n # self._write('double tg[5], g[%d];' % nSpecies)\n # self._write('tg[0] = tc[0*npt+i];')\n # self._write('tg[1] = tc[1*npt+i];')\n # self._write('tg[2] = tc[2*npt+i];')\n # self._write('tg[3] = tc[3*npt+i];')\n # self._write('tg[4] = tc[4*npt+i];')\n # self._write()\n # self._write('gibbs(g, tg);')\n # self._write()\n # for ispec in range(nSpecies):\n # self._write('g_RT[%d*npt+i] = g[%d];' % (ispec, ispec))\n # self._outdent()\n # self._write('}')\n # self._outdent()\n # self._write('}')\n\n # self._write()\n\n # self._write('void vcomp_Kc(int npt, double * restrict Kc_s, double * restrict g_RT, double * restrict invT)')\n # self._write('{')\n # self._write('#ifdef __INTEL_COMPILER')\n # self._indent()\n # self._write('#pragma simd')\n # self._outdent()\n # self._write('#endif')\n # self._indent()\n # self._write('for (int i=0; i<npt; i++) {')\n # self._indent()\n # self._write(self.line('reference concentration: P_atm / (RT) in inverse mol/m^3'))\n # self._write('double refC = (101325. / 8.31451) * invT[i];');\n # self._write('double refCinv = 1.0 / refC;');\n # self._write()\n # for reaction in mechanism.reaction():\n # K_c = self._vKc(mechanism, reaction)\n # self._write(\"Kc_s[%d*npt+i] = %s;\" % (reaction.id-1,K_c))\n # self._outdent()\n # self._write('}')\n # self._outdent()\n # self._write('}')\n\n # self._write()\n # if nReactions <= 50:\n # self._write('void vcomp_wdot(int npt, double * restrict wdot, double * restrict mixture, double * restrict sc,')\n # self._write('\t\tdouble * restrict k_f_s, double * restrict Kc_s,')\n # self._write('\t\tdouble * restrict tc, double * restrict invT, double * restrict T)')\n # self._write('{')\n # self._vcomp_wdot(mechanism,0,nReactions)\n # self._write('}')\n # else:\n # for i in range(0,nReactions,50):\n # nr = min(50, nReactions-i)\n # self._write('void vcomp_wdot_%d_%d(int npt, double * restrict wdot, double * restrict mixture, double * restrict sc,' % (i+1,i+nr))\n # self._write('\t\tdouble * restrict k_f_s, double * restrict Kc_s,')\n # self._write('\t\tdouble * restrict tc, double * restrict invT, double * restrict T)')\n # self._write('{')\n # self._vcomp_wdot(mechanism,i,nr)\n # self._write('}')\n # self._write()\n\n # return\n\n # def _vcomp_wdot(self, mechanism, istart, nr):\n\n # nSpecies = len(mechanism.species())\n # nReactions = len(mechanism.reaction())\n\n # itroe = self.reactionIndex[0:2]\n # isri = self.reactionIndex[1:3]\n # ilindemann = self.reactionIndex[2:4]\n # i3body = self.reactionIndex[3:5]\n # isimple = self.reactionIndex[4:6]\n # ispecial = self.reactionIndex[5:7]\n #\n # ntroe = itroe[1] - itroe[0]\n # nsri = isri[1] - isri[0]\n # nlindemann = ilindemann[1] - ilindemann[0]\n # n3body = i3body[1] - i3body[0]\n # nsimple = isimple[1] - isimple[0]\n # nspecial = ispecial[1] - ispecial[0]\n\n # self._write('#ifdef __INTEL_COMPILER')\n # self._indent()\n # self._write('#pragma simd')\n # self._outdent()\n # self._write('#endif')\n # self._indent()\n # self._write('for (int i=0; i<npt; i++) {')\n # self._indent()\n\n # self._write('double qdot, q_f, q_r, phi_f, phi_r, k_f, k_r, Kc;')\n # if istart < isimple[0]:\n # self._write('double alpha;')\n # if istart < i3body[0]:\n # self._write('double redP, F;')\n # if istart < ilindemann[0]:\n # self._write('double logPred;')\n # if ntroe>0:\n # self._write('double logFcent, troe_c, troe_n, troe, F_troe;')\n # if nsri>0:\n # self._write('double X, F_sri;')\n\n # first_id = istart + 1\n # last_id = istart + nr\n\n # for reaction in mechanism.reaction():\n\n # if reaction.id < first_id or reaction.id > last_id:\n # continue\n\n # self._write()\n # self._write(self.line('reaction %d: %s' % (reaction.id, reaction.equation())))\n\n # # compute the rates\n # self._vforwardRate(mechanism, reaction)\n # self._vreverseRate(mechanism, reaction)\n\n # # store the progress rate\n # self._write(\"qdot = q_f - q_r;\")\n\n # agents = list(set(reaction.reactants + reaction.products))\n # agents = sorted(agents, key=lambda x: mechanism.species(x[0]).id)\n # # note that a species might appear as both reactant and product\n # # a species might alos appear twice or more on on each side\n # # agents is a set that contains unique (symbol, coefficient)\n # for a in agents:\n # symbol, coefficient = a\n # for b in reaction.reactants:\n # if b == a:\n # if coefficient == 1:\n # self._write(\"wdot[%d*npt+i] -= qdot;\"\n # % (mechanism.species(symbol).id))\n # else:\n # self._write(\"wdot[%d*npt+i] -= %d * qdot;\"\n # % (mechanism.species(symbol).id, coefficient))\n # for b in reaction.products:\n # if b == a:\n # if coefficient == 1:\n # self._write(\"wdot[%d*npt+i] += qdot;\"\n # % (mechanism.species(symbol).id))\n # else:\n # self._write(\"wdot[%d*npt+i] += %d * qdot;\"\n # % (mechanism.species(symbol).id, coefficient))\n\n # self._outdent()\n # self._write('}')\n # self._outdent()\n\n # return\n\n # def _progressRate(self, mechanism):\n\n # nSpecies = len(mechanism.species())\n # nReactions = len(mechanism.reaction())\n\n # itroe = self.reactionIndex[0:2]\n # isri = self.reactionIndex[1:3]\n # ilindemann = self.reactionIndex[2:4]\n # i3body = self.reactionIndex[3:5]\n # isimple = self.reactionIndex[4:6]\n # ispecial = self.reactionIndex[5:7]\n\n # if len(self.reactionIndex) != 7:\n # print '\\n\\nCheck this!!!\\n'\n # sys.exit(1)\n #\n # ntroe = itroe[1] - itroe[0]\n # nsri = isri[1] - isri[0]\n # nlindemann = ilindemann[1] - ilindemann[0]\n # n3body = i3body[1] - i3body[0]\n # nsimple = isimple[1] - isimple[0]\n # nspecial = ispecial[1] - ispecial[0]\n\n # self._write()\n # self._write()\n # self._write(self.line('compute the progress rate for each reaction'))\n # self._write('void progressRate(double * restrict qdot, double * restrict sc, double T)')\n # self._write('{')\n # self._indent()\n\n # self._write('double tc[] = { log(T), T, T*T, T*T*T, T*T*T*T }; /*temperature cache */')\n # self._write('double invT = 1.0 / tc[1];')\n #\n # self._write()\n # self._write('if (T != T_save)')\n # self._write('{')\n # self._indent()\n # self._write('T_save = T;')\n # self._write('comp_k_f(tc,invT,k_f_save);');\n # self._write('comp_Kc(tc,invT,Kc_save);');\n # self._outdent()\n # self._write(\"}\")\n\n # self._write()\n # self._write('double q_f[%d], q_r[%d];' % (nReactions,nReactions))\n # self._write('comp_qfqr(q_f, q_r, sc, tc, invT);');\n\n # self._write()\n # self._write('for (int i = 0; i < %d; ++i) {' % nReactions)\n # self._indent()\n # self._write('qdot[i] = q_f[i] - q_r[i];')\n # self._outdent()\n # self._write('}')\n\n # self._write()\n # self._write('return;')\n # self._outdent()\n\n # self._write('}')\n\n # return\n\n # def _initializeRateCalculation(self, mechanism):\n\n # nSpecies = len(mechanism.species())\n # nReactions = len(mechanism.reaction())\n\n # # declarations\n # self._write()\n # self._write('int id; ' + self.line('loop counter'))\n\n # self._write('double mixture; '\n # + self.line('mixture concentration'))\n # self._write('double g_RT[%d]; ' % nSpecies\n # + self.line('Gibbs free energy'))\n\n # self._write('double Kc; ' + self.line('equilibrium constant'))\n # self._write('double k_f; ' + self.line('forward reaction rate'))\n # self._write('double k_r; ' + self.line('reverse reaction rate'))\n # self._write('double q_f; ' + self.line('forward progress rate'))\n # self._write('double q_r; ' + self.line('reverse progress rate'))\n # self._write('double phi_f; '\n # + self.line('forward phase space factor'))\n # self._write('double phi_r; '\n # + self.line('reverse phase space factor'))\n # self._write('double alpha; ' + self.line('enhancement'))\n\n # self._write('double redP; ' + self.line('reduced pressure'))\n # self._write('double logPred; ' + self.line('log of above'))\n # self._write('double F; '\n # + self.line('fallof rate enhancement'))\n # self._write()\n # self._write('double F_troe; ' + self.line('TROE intermediate'))\n # self._write('double logFcent; ' + self.line('TROE intermediate'))\n # self._write('double troe; ' + self.line('TROE intermediate'))\n # self._write('double troe_c; ' + self.line('TROE intermediate'))\n # self._write('double troe_n; ' + self.line('TROE intermediate'))\n # self._write()\n\n # self._write(\n # 'double tc[] = { log(T), T, T*T, T*T*T, T*T*T*T }; '\n # + self.line('temperature cache'))\n\n # self._write()\n # self._write('double invT = 1.0 / tc[1];')\n\n # # compute the reference concentration\n # self._write()\n # self._write(self.line('reference concentration: P_atm / (RT) in inverse mol/m^3'))\n # self._write('double refC = %g / %g / T;' % (atm.value, R.value))\n # self._write('double refCinv = 1 / refC;')\n\n # # compute the mixture concentration\n # self._write()\n # self._write(self.line('compute the mixture concentration'))\n # self._write('mixture = 0.0;')\n # self._write('for (id = 0; id < %d; ++id) {' % nSpecies)\n # self._indent()\n # self._write('mixture += sc[id];')\n # self._outdent()\n # self._write('}')\n\n # # compute the Gibbs free energies\n # self._write()\n # self._write(self.line('compute the Gibbs free energy'))\n # self._write('gibbs(g_RT, tc);')\n #\n # return\n\n # def _forwardRate(self, mechanism, reaction):\n\n # lt = reaction.lt\n # if lt:\n # import pyre\n # pyre.debug.Firewall.hit(\"Landau-Teller reactions are not supported yet\")\n # return self._landau(reaction)\n\n # dim = self._phaseSpaceUnits(reaction.reactants)\n\n # phi_f = self._phaseSpace(mechanism, reaction.reactants)\n # self._write(\"phi_f = %s;\" % phi_f)\n\n # thirdBody = reaction.thirdBody\n # if not thirdBody:\n # self._write(\"k_f = k_f_save[%d];\" % (reaction.id-1))\n # self._write(\"q_f = phi_f * k_f;\")\n # return\n #\n # alpha = self._enhancement(mechanism, reaction)\n # self._write(\"alpha = %s;\" % alpha)\n\n # sri = reaction.sri\n # low = reaction.low\n # troe = reaction.troe\n\n # if not low:\n # self._write(\"k_f = alpha * k_f_save[%d];\" % (reaction.id-1))\n # self._write(\"q_f = phi_f * k_f;\")\n # return\n\n # self._write(\"k_f = k_f_save[%d];\" % (reaction.id-1))\n\n # self._write(\"redP = alpha / k_f * phase_units[%d] * low_A[%d] * exp(low_beta[%d] * tc[0] - activation_units[%d] * low_Ea[%d] *invT);\"\n # %(reaction.id-1,reaction.id-1,reaction.id-1,reaction.id-1,reaction.id-1))\n # self._write(\"F = redP / (1 + redP);\")\n\n # if sri:\n # self._write(\"logPred = log10(redP);\")\n # self._write(\"X = 1.0 / (1.0 + logPred*logPred);\")\n # self._write(\"F_sri = exp(X * log(sri_a[%d] * exp(-sri_b[%d]/T)\"\n # % (reaction.id-1,reaction.id-1))\n # self._write(\" + (sri_c[%d] > 1.e-100 ? exp(T/sri_c[%d]) : 0.) )\"\n # % (reaction.id-1,reaction.id-1))\n # self._write(\" * (sri_len[%d] > 3 ? sri_d[%d]*exp(sri_e[%d]*tc[0]) : 1);\"\n # % (reaction.id-1,reaction.id-1,reaction.id-1))\n # self._write(\"F *= F_sri;\")\n\n # elif troe:\n # self._write(\"logPred = log10(redP);\")\n\n # self._write('logFcent = log10(')\n # self._write(' (fabs(troe_Tsss[%d]) > 1.e-100 ? (1-troe_a[%d])*exp(-T/troe_Tsss[%d]) : 0) '\n # % (reaction.id-1,reaction.id-1,reaction.id-1))\n # self._write(' + (fabs(troe_Ts[%d]) > 1.e-100 ? troe_a[%d] * exp(-T/troe_Ts[%d]) : 0) '\n # % (reaction.id-1,reaction.id-1,reaction.id-1))\n # self._write(' + (troe_len[%d] == 4 ? exp(-troe_Tss[%d] * invT) : 0) );'\n # % (reaction.id-1,reaction.id-1))\n # self._write(\"troe_c = -.4 - .67 * logFcent;\")\n # self._write(\"troe_n = .75 - 1.27 * logFcent;\")\n # self._write(\"troe = (troe_c + logPred) / (troe_n - .14*(troe_c + logPred));\")\n # self._write(\"F_troe = pow(10, logFcent / (1.0 + troe*troe));\")\n # self._write(\"F *= F_troe;\")\n\n # self._write(\"k_f *= F;\")\n # self._write(\"q_f = phi_f * k_f;\")\n # return\n #\n\n # def _vforwardRate(self, mechanism, reaction):\n\n # lt = reaction.lt\n # if lt:\n # import pyre\n # pyre.debug.Firewall.hit(\"Landau-Teller reactions are not supported yet\")\n # return self._landau(reaction)\n\n # dim = self._phaseSpaceUnits(reaction.reactants)\n\n # phi_f = self._vphaseSpace(mechanism, reaction.reactants)\n # self._write(\"phi_f = %s;\" % phi_f)\n #\n # thirdBody = reaction.thirdBody\n # if not thirdBody:\n # self._write(\"k_f = k_f_s[%d*npt+i];\" % (reaction.id-1))\n # self._write(\"q_f = phi_f * k_f;\")\n # return\n #\n # alpha = self._venhancement(mechanism, reaction)\n # self._write(\"alpha = %s;\" % alpha)\n\n # sri = reaction.sri\n # low = reaction.low\n # troe = reaction.troe\n\n # if not low:\n # self._write(\"k_f = alpha * k_f_s[%d*npt+i];\" % (reaction.id-1))\n # self._write(\"q_f = phi_f * k_f;\")\n # return\n\n # self._write(\"k_f = k_f_s[%d*npt+i];\" % (reaction.id-1))\n # self._write(\"redP = alpha / k_f * phase_units[%d] * low_A[%d] * exp(low_beta[%d] * tc[i] - activation_units[%d] * low_Ea[%d] * invT[i]);\"\n # % (reaction.id-1,reaction.id-1,reaction.id-1,reaction.id-1,reaction.id-1))\n # self._write(\"F = redP / (1 + redP);\")\n\n # if sri:\n # self._write(\"logPred = log10(redP);\")\n # self._write(\"X = 1.0 / (1.0 + logPred*logPred);\")\n # self._write(\"F_sri = exp(X * log(sri_a[%d] * exp(-sri_b[%d]/T[i])\"\n # % (reaction.id-1,reaction.id-1))\n # self._write(\" + (sri_c[%d] > 1.e-100 ? exp(T[i]/sri_c[%d]) : 0.) )\"\n # % (reaction.id-1,reaction.id-1))\n # self._write(\" * (sri_len[%d] > 3 ? sri_d[%d]*exp(sri_e[%d]*tc[i]) : 1.);\"\n # % (reaction.id-1,reaction.id-1,reaction.id-1))\n # self._write(\"F *= F_sri;\")\n\n # elif troe:\n # self._write(\"logPred = log10(redP);\")\n\n # self._write('logFcent = log10(')\n # self._write(' (fabs(troe_Tsss[%d]) > 1.e-100 ? (1.-troe_a[%d])*exp(-T[i]/troe_Tsss[%d]) : 0.) '\n # % (reaction.id-1,reaction.id-1,reaction.id-1))\n # self._write(' + (fabs(troe_Ts[%d]) > 1.e-100 ? troe_a[%d] * exp(-T[i]/troe_Ts[%d]) : 0.) '\n # % (reaction.id-1,reaction.id-1,reaction.id-1))\n # self._write(' + (troe_len[%d] == 4 ? exp(-troe_Tss[%d] * invT[i]) : 0.) );'\n # % (reaction.id-1,reaction.id-1))\n #\n # d = .14\n # self._write(\"troe_c = -.4 - .67 * logFcent;\")\n # self._write(\"troe_n = .75 - 1.27 * logFcent;\")\n # self._write(\"troe = (troe_c + logPred) / (troe_n - .14*(troe_c + logPred));\")\n # self._write(\"F_troe = pow(10., logFcent / (1.0 + troe*troe));\")\n # self._write(\"F *= F_troe;\")\n\n # self._write(\"k_f *= F;\")\n # self._write(\"q_f = phi_f * k_f;\")\n # return\n #\n\n # def _reverseRate(self, mechanism, reaction):\n # if not reaction.reversible:\n # self._write(\"q_r = 0.0;\")\n # return\n\n # phi_r = self._phaseSpace(mechanism, reaction.products)\n # self._write(\"phi_r = %s;\" % phi_r)\n\n # if reaction.rlt:\n # import pyre\n # pyre.debug.Firewall.hit(\"Landau-Teller reactions are not supported yet\")\n # return\n\n # if reaction.rev:\n\n # self._write(\"k_r = prefactor_units[%d] * rev_A[%d] * exp(rev_beta[%d] * tc[0] - activation_units[%d] * rev_Ea[%d] * invT);\"\n # % (reaction.id-1,reaction.id-1,reaction.id-1,reaction.id-1,reaction.id-1))\n\n # thirdBody = reaction.thirdBody\n # if thirdBody:\n # self._write(\"k_r *= alpha;\")\n\n # self._write(\"q_r[%d] = phi_r * k_r;\" % (reaction.id - 1))\n # return\n #\n # self._write(\"Kc = Kc_save[%d];\" % (reaction.id-1))\n\n # self._write(\"k_r = k_f / Kc;\")\n # self._write(\"q_r = phi_r * k_r;\")\n\n # return\n\n # def _vreverseRate(self, mechanism, reaction):\n # if not reaction.reversible:\n # self._write(\"q_r = 0.0;\")\n # return\n\n # phi_r = self._vphaseSpace(mechanism, reaction.products)\n # self._write(\"phi_r = %s;\" % phi_r)\n\n # if reaction.rlt:\n # import pyre\n # pyre.debug.Firewall.hit(\"Landau-Teller reactions are not supported yet\")\n # return\n\n # if reaction.rev:\n\n # self._write(\"k_r = prefactor_units[%d] * rev_A[%d] * exp(rev_beta[%d] * tc[i] - activation_units[%d] * rev_Ea[%d] * invT[i]);\"\n # % (reaction.id-1,reaction.id-1,reaction.id-1,reaction.id-1,reaction.id-1))\n\n # thirdBody = reaction.thirdBody\n # if thirdBody:\n # self._write(\"k_r *= alpha;\")\n\n # self._write(\"q_f = phi_r * k_r;\")\n # return\n #\n # self._write(\"Kc = Kc_s[%d*npt+i];\" % (reaction.id-1))\n\n # self._write(\"k_r = k_f / Kc;\")\n # self._write(\"q_r = phi_r * k_r;\")\n\n # return\n\n # def _progressRateFR(self, mechanism):\n\n # nSpecies = len(mechanism.species())\n # nReactions = len(mechanism.reaction())\n\n # itroe = self.reactionIndex[0:2]\n # isri = self.reactionIndex[1:3]\n # ilindemann = self.reactionIndex[2:4]\n # i3body = self.reactionIndex[3:5]\n # isimple = self.reactionIndex[4:6]\n # ispecial = self.reactionIndex[5:7]\n\n # if len(self.reactionIndex) != 7:\n # print '\\n\\nCheck this!!!\\n'\n # sys.exit(1)\n #\n # ntroe = itroe[1] - itroe[0]\n # nsri = isri[1] - isri[0]\n # nlindemann = ilindemann[1] - ilindemann[0]\n # n3body = i3body[1] - i3body[0]\n # nsimple = isimple[1] - isimple[0]\n # nspecial = ispecial[1] - ispecial[0]\n\n # self._write()\n # self._write()\n # self._write(self.line('compute the progress rate for each reaction'))\n # self._write('void progressRateFR(double * restrict q_f, double * restrict q_r, double * restrict sc, double T)')\n # self._write('{')\n # self._indent()\n\n # self._write('double tc[] = { log(T), T, T*T, T*T*T, T*T*T*T }; /*temperature cache */')\n # self._write('double invT = 1.0 / tc[1];')\n #\n # self._write()\n # self._write('if (T != T_save)')\n # self._write('{')\n # self._indent()\n # self._write('T_save = T;')\n # self._write('comp_k_f(tc,invT,k_f_save);');\n # self._write('comp_Kc(tc,invT,Kc_save);');\n # self._outdent()\n # self._write(\"}\")\n\n # self._write()\n # self._write('comp_qfqr(q_f, q_r, sc, tc, invT);');\n\n # self._write()\n # self._write('return;')\n # self._outdent()\n\n # self._write('}')\n\n # return\n\n # def _getCriticalParameters(self, mechanism):\n #\n #\n # TabulatedCriticalParams = {\n # \"H2\":{'Tci':33.145,\"Pci\":12.964,\"wt\":2.01588,\"acentric_factor\":-0.219},\n # \"O2\":{'Tci':154.581,\"Pci\":50.4304658,\"wt\":31.9988,\"acentric_factor\":0.0222},\n # \"H2O\":{'Tci':647.096,\"Pci\":220.640,\"wt\":18.015340,\"acentric_factor\":0.3443},\n # \"N2\":{'Tci':126.192,\"Pci\":33.958,\"wt\":28.013400,\"acentric_factor\":0.0372},\n # \"CH4\":{'Tci':190.56,\"Pci\":45.99,\"wt\":16.043030,\"acentric_factor\":0.011},\n # \"C2H6\":{'Tci':305.32,\"Pci\":48.72,\"wt\":30.070120,\"acentric_factor\":0.099},\n # \"C3H8\":{'Tci':369.83,\"Pci\":42.48,\"wt\":44.097210,\"acentric_factor\":0.152},\n # \"CO2\":{'Tci':304.12,\"Pci\":73.74,\"wt\":44.009950,\"acentric_factor\":0.225},\n # \"He\":{'Tci':5.1953,\"Pci\":2.2746,\"wt\":4.002602,\"acentric_factor\":-0.382},\n # \"CO\":{'Tci':132.85,\"Pci\":34.94,\"wt\":28.010,\"acentric_factor\":0.045},\n # \"AR\":{'Tci':150.86,\"Pci\":48.98,\"wt\":39.948,\"acentric_factor\":-0.002},\n # \"NO\":{'Tci':180.0,\"Pci\":64.80,\"wt\":30.006,\"acentric_factor\":0.582},\n # \"CH3OH\":{'Tci':512.64,\"Pci\":80.97,\"wt\":32.042,\"acentric_factor\":0.565},\n # \"C2H2\":{'Tci':308.30,\"Pci\":61.14,\"wt\":26.038,\"acentric_factor\":0.189},\n # \"C2H4\":{'Tci':282.34,\"Pci\":50.41,\"wt\":28.054,\"acentric_factor\":0.087},\n # \"N2O\":{'Tci':309.60,\"Pci\":72.55,\"wt\":44.013,\"acentric_factor\":0.162}\n # }\n #\n # nSpecies = len(mechanism.species())\n # self._write()\n # self._write()\n # self._write(self.line('compute the critical parameters for each species'))\n # self._write('void GET_CRITPARAMS(double * restrict Tci, double * restrict ai, double * restrict bi, double * restrict acentric_i)')\n # self._write('{')\n # self._write()\n # self._indent()\n #\n #\n # self._write('double EPS[%d];'%nSpecies)\n # self._write('double SIG[%d];' %nSpecies)\n # self._write('double wt[%d];' %nSpecies)\n # self._write('double avogadro = 6.02214199e23;')\n # self._write('double boltzmann = 1.3806503e-16; //we work in CGS')\n # self._write('double Rcst = 83.144598; //in bar [CGS] !')\n #\n # self._write()\n\n # self._write('egtransetEPS(EPS);')\n # self._write('egtransetSIG(SIG);')\n # self._write('molecularWeight(wt);')\n\n # for species in mechanism.species():\n\n # if species.symbol in TabulatedCriticalParams:\n #\n # self._write()\n # self._write(self.line('species %d: %s' % (species.id, species.symbol)))\n # self._write(self.line('Imported from NIST'))\n # self._write('Tci[%d] = %f ; ' % (\n # species.id,TabulatedCriticalParams[species.symbol][\"Tci\"]))\n # self._write('ai[%d] = 1e6 * 0.42748 * pow(Rcst,2.0) * pow(Tci[%d],2.0) / (pow(%f,2.0) * %f); ' % (\n # species.id,species.id,TabulatedCriticalParams[species.symbol][\"wt\"],TabulatedCriticalParams[species.symbol][\"Pci\"]))\n # self._write('bi[%d] = 0.08664 * Rcst * Tci[%d] / (%f * %f); ' % (\n # species.id,species.id,TabulatedCriticalParams[species.symbol][\"wt\"],TabulatedCriticalParams[species.symbol][\"Pci\"]))\n # self._write('acentric_i[%d] = %f ;'\n # %(species.id,TabulatedCriticalParams[species.symbol][\"acentric_factor\"]))\n # else:\n #\n # self._write()\n # self._write(self.line('species %d: %s' % (species.id, species.symbol)))\n # self._write('Tci[%d] = 1.316 * EPS[%d] ; ' % (\n # species.id,species.id))\n # self._write('ai[%d] = (5.55 * pow(avogadro,2.0) * EPS[%d]*boltzmann * pow(1e-8*SIG[%d],3.0) ) / (pow(wt[%d],2.0)); ' % (\n # species.id,species.id,species.id,species.id))\n # self._write('bi[%d] = 0.855 * avogadro * pow(1e-8*SIG[%d],3.0) / (wt[%d]); ' % (\n # species.id,species.id,species.id))\n # self._write('acentric_i[%d] = 0.0 ;'\n # %(species.id))\n #\n # self._write()\n # self._write('return;')\n # self._outdent()\n # self._write('}')\n\n # return\n\n # def _initializeRateCalculationFR(self, mechanism):\n\n # nSpecies = len(mechanism.species())\n # nReactions = len(mechanism.reaction())\n\n # # declarations\n # self._write()\n # self._write('int id; ' + self.line('loop counter'))\n\n # self._write('double mixture; '\n # + self.line('mixture concentration'))\n # self._write('double g_RT[%d]; ' % nSpecies\n # + self.line('Gibbs free energy'))\n\n # self._write('double Kc; ' + self.line('equilibrium constant'))\n # self._write('double k_f; ' + self.line('forward reaction rate'))\n # self._write('double k_r; ' + self.line('reverse reaction rate'))\n # self._write('double phi_f; '\n # + self.line('forward phase space factor'))\n # self._write('double phi_r; '\n # + self.line('reverse phase space factor'))\n # self._write('double alpha; ' + self.line('enhancement'))\n\n # self._write('double redP; ' + self.line('reduced pressure'))\n # self._write('double logPred; ' + self.line('log of above'))\n # self._write('double F; '\n # + self.line('fallof rate enhancement'))\n # self._write()\n # self._write('double F_troe; ' + self.line('TROE intermediate'))\n # self._write('double logFcent; ' + self.line('TROE intermediate'))\n # self._write('double troe; ' + self.line('TROE intermediate'))\n # self._write('double troe_c; ' + self.line('TROE intermediate'))\n # self._write('double troe_n; ' + self.line('TROE intermediate'))\n # self._write()\n\n # self._write(\n # 'double tc[] = { log(T), T, T*T, T*T*T, T*T*T*T }; '\n # + self.line('temperature cache'))\n\n # self._write()\n # self._write('double invT = 1.0 / tc[1];')\n\n # # compute the reference concentration\n # self._write()\n # self._write(self.line('reference concentration: P_atm / (RT) in inverse mol/m^3'))\n # self._write('double refC = %g / %g / T;' % (atm.value, R.value))\n\n # # compute the mixture concentration\n # self._write()\n # self._write(self.line('compute the mixture concentration'))\n # self._write('mixture = 0.0;')\n # self._write('for (id = 0; id < %d; ++id) {' % nSpecies)\n # self._indent()\n # self._write('mixture += sc[id];')\n # self._outdent()\n # self._write('}')\n\n # # compute the Gibbs free energies\n # self._write()\n # self._write(self.line('compute the Gibbs free energy'))\n # self._write('gibbs(g_RT, tc);')\n #\n # return\n\n # def _forwardRateFR(self, mechanism, reaction):\n\n # lt = reaction.lt\n # if lt:\n # import pyre\n # pyre.debug.Firewall.hit(\"Landau-Teller reactions are not supported yet\")\n # return self._landau(reaction)\n\n # dim = self._phaseSpaceUnits(reaction.reactants)\n\n # phi_f = self._phaseSpace(mechanism, reaction.reactants)\n # self._write(\"phi_f = %s;\" % phi_f)\n\n # thirdBody = reaction.thirdBody\n # if not thirdBody:\n # self._write(\"k_f = k_f_save[%d];\" % (reaction.id-1))\n # self._write(\"q_f[%d] = phi_f * k_f;\" % (reaction.id - 1))\n # return\n #\n # alpha = self._enhancement(mechanism, reaction)\n # self._write(\"alpha = %s;\" % alpha)\n\n # sri = reaction.sri\n # low = reaction.low\n # troe = reaction.troe\n\n # if not low:\n # self._write(\"k_f = alpha * k_f_save[%d];\" % (reaction.id-1))\n # self._write(\"q_f[%d] = phi_f * k_f;\" % (reaction.id - 1))\n # return\n\n # self._write(\"k_f = k_f_save[%d];\" % (reaction.id-1))\n\n # self._write(\"redP = alpha / k_f * phase_units[%d] * low_A[%d] * exp(low_beta[%d] * tc[0] - activation_units[%d] * low_Ea[%d] *invT);\"\n # % (reaction.id-1,reaction.id-1,reaction.id-1,reaction.id-1,reaction.id-1))\n # self._write(\"F = redP / (1 + redP);\")\n\n # if sri:\n # self._write(\"logPred = log10(redP);\")\n # self._write(\"X = 1.0 / (1.0 + logPred*logPred);\")\n # self._write(\"F_sri = exp(X * log(sri_a[%d] * exp(-sri_b[%d]/T)\"\n # % (reaction.id-1,reaction.id-1))\n # self._write(\" + (sri_c[%d] > 1.e-100 ? exp(T/sri_c[%d]) : 0.) )\"\n # % (reaction.id-1,reaction.id-1))\n # self._write(\" * (sri_len[%d] > 3 ? sri_d[%d]*exp(sri_e[%d]*tc[0]) : 1.);\"\n # % (reaction.id-1,reaction.id-1,reaction.id-1))\n # self._write(\"F *= F_sri;\")\n\n # elif troe:\n # self._write(\"logPred = log10(redP);\")\n\n # self._write('logFcent = log10(')\n # self._write(' (fabs(troe_Tsss[%d]) > 1.e-100 ? (1.-troe_a[%d])*exp(-T/troe_Tsss[%d]) : 0.) '\n # % (reaction.id-1,reaction.id-1,reaction.id-1))\n # self._write(' + (fabs(troe_Ts[%d]) > 1.e-100 ? troe_a[%d] * exp(-T/troe_Ts[%d]) : 0.) '\n # % (reaction.id-1,reaction.id-1,reaction.id-1))\n # self._write(' + (troe_len[%d] == 4 ? exp(-troe_Tss[%d] * invT) : 0) );'\n # % (reaction.id-1,reaction.id-1))\n\n # d = .14\n # self._write(\"troe_c = -.4 - .67 * logFcent;\")\n # self._write(\"troe_n = .75 - 1.27 * logFcent;\")\n # self._write(\"troe = (troe_c + logPred) / (troe_n - .14*(troe_c + logPred));\")\n # self._write(\"F_troe = pow(10, logFcent / (1.0 + troe*troe));\")\n # self._write(\"F *= F_troe;\")\n\n # self._write(\"k_f *= F;\")\n # self._write(\"q_f[%d] = phi_f * k_f;\" % (reaction.id - 1))\n # return\n #\n\n # def _reverseRateFR(self, mechanism, reaction):\n # if not reaction.reversible:\n # self._write(\"q_r[%d] = 0.0;\" % (reaction.id - 1))\n # return\n\n # phi_r = self._phaseSpace(mechanism, reaction.products)\n # self._write(\"phi_r = %s;\" % phi_r)\n\n # if reaction.rlt:\n # import pyre\n # pyre.debug.Firewall.hit(\"Landau-Teller reactions are not supported yet\")\n # return\n\n # if reaction.rev:\n # self._write(\"k_r = prefactor_units[%d] * rev_A[%d] * exp(rev_beta[%d] * tc[0] - activation_units[%d] * rev_Ea[%d] * invT);\"\n # % (reaction.id-1,reaction.id-1,reaction.id-1,reaction.id-1,reaction.id-1))\n\n # thirdBody = reaction.thirdBody\n # if thirdBody:\n # self._write(\"k_r *= alpha;\")\n\n # self._write(\"q_r[%d] = phi_r * k_r;\" % (reaction.id - 1))\n # return\n #\n # self._write(\"Kc = Kc_save[%d];\" % (reaction.id-1))\n\n # self._write(\"k_r = k_f / Kc;\")\n\n # self._write(\"q_r[%d] = phi_r * k_r;\" % (reaction.id - 1))\n\n # return\n\n def _prefactorUnits(self, code, exponent):\n\n if code == \"mole/cm**3\":\n units = mole / cm**3\n elif code == \"moles\":\n units = mole / cm**3\n elif code == \"molecules\":\n import pyre\n\n units = 1.0 / avogadro / cm**3\n else:\n import pyre\n\n pyre.debug.Firewall.hit(\"unknown prefactor units '%s'\" % code)\n return 1\n\n return units**exponent / second\n\n def _activationEnergyUnits(self, code):\n if code == \"cal/mole\":\n units = cal / mole\n elif code == \"kcal/mole\":\n units = kcal / mole\n elif code == \"joules/mole\":\n units = J / mole\n elif code == \"kjoules/mole\":\n units = kJ / mole\n elif code == \"kelvins\":\n units = Rc * kelvin\n else:\n pyre.debug.Firewall.hit(\n \"unknown activation energy units '%s'\" % code\n )\n return 1\n\n return units\n\n # def _equilibriumConstants(self, mechanism):\n # self._write()\n # self._write()\n # self._write(self.line('compute the equilibrium constants for each reaction'))\n # self._write('void equilibriumConstants(double * restrict kc, double * restrict g_RT, double T)')\n # self._write('{')\n # self._indent()\n\n # # compute the reference concentration\n # self._write(self.line('reference concentration: P_atm / (RT) in inverse mol/m^3'))\n # self._write('double refC = %g / %g / T;' % (atm.value, R.value))\n\n # # compute the equilibrium constants\n # for reaction in mechanism.reaction():\n # self._write()\n # self._write(self.line('reaction %d: %s' % (reaction.id, reaction.equation())))\n\n # K_c = self._Kc(mechanism, reaction)\n # self._write(\"kc[%d] = %s;\" % (reaction.id - 1, K_c))\n\n # self._write()\n # self._write('return;')\n # self._outdent()\n\n # self._write('}')\n\n # return\n\n # def _phaseSpace(self, mechanism, reagents):\n\n # phi = []\n\n # for symbol, coefficient in reagents:\n # conc = \"sc[%d]\" % mechanism.species(symbol).id\n # phi += [conc] * coefficient\n\n # return \"*\".join(phi)\n\n def _sortedPhaseSpace(self, mechanism, reagents):\n\n phi = []\n\n for symbol, coefficient in sorted(\n reagents, key=lambda x: mechanism.species(x[0]).id\n ):\n conc = \"sc(%d)\" % (mechanism.species(symbol).id + 1)\n phi += [conc] * coefficient\n\n return \"*\".join(phi)\n\n # def _DphaseSpace(self, mechanism, reagents, r):\n\n # phi = []\n\n # for symbol, coefficient in sorted(reagents,key=lambda x:mechanism.species(x[0]).id):\n # if symbol == r:\n # if coefficient > 1:\n # conc = \"sc[%d]\" % mechanism.species(symbol).id\n # phi += [\"%d\" % coefficient]\n # phi += [conc] * (coefficient-1)\n # else:\n # conc = \"sc[%d]\" % mechanism.species(symbol).id\n # phi += [conc] * coefficient\n\n # if phi:\n # return \"*\".join(phi)\n # else:\n # return \"1.0\"\n\n # def _vphaseSpace(self, mechanism, reagents):\n\n # phi = []\n\n # for symbol, coefficient in sorted(reagents,key=lambda x:mechanism.species(x[0]).id):\n # conc = \"sc[%d*npt+i]\" % mechanism.species(symbol).id\n # phi += [conc] * coefficient\n\n # return \"*\".join(phi)\n\n def _phaseSpaceUnits(self, reagents):\n dim = 0\n for symbol, coefficient in reagents:\n dim += coefficient\n\n return dim\n\n def _enhancement(self, mechanism, reaction):\n thirdBody = reaction.thirdBody\n if not thirdBody:\n import pyre\n\n pyre.debug.Firewall.hit(\n \"_enhancement called for a reaction without a third body\"\n )\n return\n\n species, coefficient = thirdBody\n efficiencies = reaction.efficiencies\n\n if not efficiencies:\n if species == \"<mixture>\":\n return \"mixture\"\n return \"sc(%d+1)\" % mechanism.species(species).id\n\n alpha = [\"mixture\"]\n for i, eff in enumerate(efficiencies):\n symbol, efficiency = eff\n factor = \"(TB(%d) %% vector(%d) - 1)\" % (reaction.id, i + 1)\n conc = \"sc(%d)\" % (mechanism.species(symbol).id + 1)\n alpha.append(\"%s*%s\" % (factor, conc))\n\n return \" + \".join(alpha).replace(\"+ -\", \"- \")\n\n # def _Denhancement(self, mechanism, reaction, kid, consP):\n # thirdBody = reaction.thirdBody\n # if not thirdBody:\n # import pyre\n # pyre.debug.Firewall.hit(\"_enhancement called for a reaction without a third body\")\n # return\n\n # species, coefficient = thirdBody\n # efficiencies = reaction.efficiencies\n\n # if not efficiencies:\n # if species == \"<mixture>\":\n # if consP:\n # return \"0\"\n # else:\n # return \"1\"\n # elif mechanism.species(species).id == kid:\n # return \"1\"\n # else:\n # return \"0\"\n # else:\n # if consP:\n # for i, eff in enumerate(efficiencies):\n # symbol, efficiency = eff\n # if mechanism.species(symbol).id == kid:\n # return \"(TB[%d][%d] - 1)\" % (reaction.id-1, i)\n # return \"0\"\n # else:\n # for i, eff in enumerate(efficiencies):\n # symbol, efficiency = eff\n # if mechanism.species(symbol).id == kid:\n # return \"TB[%d][%d]\" % (reaction.id-1,i)\n # return \"1\"\n\n # def _venhancement(self, mechanism, reaction):\n # thirdBody = reaction.thirdBody\n # if not thirdBody:\n # import pyre\n # pyre.debug.Firewall.hit(\"_enhancement called for a reaction without a third body\")\n # return\n\n # species, coefficient = thirdBody\n # efficiencies = reaction.efficiencies\n\n # if not efficiencies:\n # if species == \"<mixture>\":\n # return \"mixture[i]\"\n # return \"sc[%d*npt+i]\" % mechanism.species(species).id\n\n # alpha = [\"mixture[i]\"]\n # for i, eff in enumerate(efficiencies):\n # symbol, efficiency = eff\n # factor = \"(TB[%d][%d] - 1)\" % (reaction.id-1,i)\n # conc = \"sc[%d*npt+i]\" % mechanism.species(symbol).id\n # alpha.append(\"%s*%s\" % (factor, conc))\n\n # return \" + \".join(alpha)\n\n def _cv(self, speciesInfo):\n\n self._write()\n self._write()\n self._write(\"! compute Cv/R at the given temperature\")\n self._write(\"! tc contains precomputed powers of T, tc[0] = log(T)\")\n self._generateThermoRoutine(\"cv_R\", self._cvNASA, speciesInfo)\n\n return\n\n def _cp(self, speciesInfo):\n\n self._write()\n self._write()\n self._write(\"! compute Cp/R at the given temperature\")\n self._write(\"! tc contains precomputed powers of T, tc[0] = log(T)\")\n self._generateThermoRoutine(\"cp_R\", self._cpNASA, speciesInfo)\n\n return\n\n # def _dcvpdT(self, speciesInfo):\n\n # self._write()\n # self._write()\n # self._write(self.line('compute d(Cp/R)/dT and d(Cv/R)/dT at the given temperature'))\n # self._write(self.line('tc contains precomputed powers of T, tc[0] = log(T)'))\n # self._generateThermoRoutine(\"dcvpRdT\", self._dcpdTNASA, speciesInfo)\n\n # return\n\n def _gibbs(self, speciesInfo):\n self._write()\n self._write(\"! compute the g/(RT) at the given temperature\")\n self._write(\"! tc contains precomputed powers of T, tc[0] = log(T)\")\n self._generateThermoRoutine(\"gibbs\", self._gibbsNASA, speciesInfo, 1)\n\n return\n\n # def _helmholtz(self, speciesInfo):\n\n # self._write()\n # self._write()\n # self._write(self.line('compute the a/(RT) at the given temperature'))\n # self._write(self.line('tc contains precomputed powers of T, tc[0] = log(T)'))\n # self._generateThermoRoutine(\"helmholtz\", self._helmholtzNASA, speciesInfo, 1)\n\n # return\n\n # def _speciesEntropy(self, speciesInfo):\n\n # self._write()\n # self._write()\n # self._write(self.line('compute the S/R at the given temperature (Eq 21)'))\n # self._write(self.line('tc contains precomputed powers of T, tc[0] = log(T)'))\n # self._generateThermoRoutine(\"speciesEntropy\", self._entropyNASA, speciesInfo)\n\n # return\n\n def _speciesInternalEnergy(self, speciesInfo):\n\n self._write()\n self._write(\"! compute the e/(RT) at the given temperature\")\n self._write(\"! tc contains precomputed powers of T, tc[0] = log(T)\")\n self._generateThermoRoutine(\n \"speciesInternalEnergy\", self._internalEnergy, speciesInfo, 1\n )\n\n return\n\n def _speciesEnthalpy(self, speciesInfo):\n\n self._write()\n self._write(\"! compute the h/(RT) at the given temperature (Eq 20)\")\n self._write(\"! tc contains precomputed powers of T, tc(1) = log(T)\")\n self._generateThermoRoutine(\n \"speciesEnthalpy\", self._enthalpyNASA, speciesInfo, 1\n )\n\n return\n\n def _generateThermoRoutine(\n self, name, expressionGenerator, speciesInfo, needsInvT=0\n ):\n\n lowT, highT, midpoints = speciesInfo\n\n self._write(\"subroutine %s(species, tc)\" % name)\n self._write()\n self._indent()\n self._write(\"implicit none\")\n\n # declarations\n species = self.species\n nSpec = len(species)\n self._write()\n self._write(\"double precision, intent(out) :: species(%d)\" % nSpec)\n self._write(\"double precision, intent(in) :: tc(5)\")\n self._write()\n self._write(\"double precision :: T\")\n if needsInvT != 0:\n self._write(\"double precision :: invT\")\n if needsInvT == 2:\n self._write(\"double precision :: invT2\")\n\n # temperature check\n # self._write()\n # self._write(self.line('check the temperature value'))\n # self._write('if (T < %g || T > %g) {' % (lowT, highT))\n # self._indent()\n # self._write(\n # 'fprintf(stderr, \"temperature %%g is outside the range (%g, %g)\", T);'\n # % (lowT, highT))\n # self._write('return;')\n # self._outdent()\n # self._write('}')\n\n self._write()\n self._write(\"T = tc(2)\")\n if needsInvT != 0:\n self._write(\"invT = 1.d0 / T\")\n if needsInvT == 2:\n self._write(\"invT2 = invT * invT\")\n\n for midT, speciesList in list(midpoints.items()):\n\n self._write()\n self._write(\"! species with midpoint at T=%g kelvin\" % midT)\n self._write(\"if (T < %fd0) then\" % midT)\n self._indent()\n\n for species, lowRange, highRange in speciesList:\n self._write(\n \"! species %d: %s\" % ((species.id + 1), species.symbol)\n )\n self._write(\"species(%d) = &\" % (species.id + 1))\n self._indent()\n expressionGenerator(lowRange.parameters)\n self._outdent()\n\n self._outdent()\n self._write(\"else\")\n self._indent()\n\n for species, lowRange, highRange in speciesList:\n self._write(\n \"!species %d: %s\" % ((species.id + 1), species.symbol)\n )\n self._write(\"species(%d) = &\" % (species.id + 1))\n self._indent()\n expressionGenerator(highRange.parameters)\n self._outdent()\n\n self._outdent()\n self._write(\"end if\")\n\n self._outdent()\n self._write()\n self._write(\"end subroutine\")\n\n return\n\n def _miscTransInfo(self, KK, NLITE, NO=4):\n\n self._write()\n LENIMC = 4 * KK + NLITE\n self._generateTransRoutineInteger(\n [\n \"egtransetLENIMC\",\n \"EGTRANSETLENIMC\",\n \"egtransetlenimc\",\n \"egtransetlenimc_\",\n \"LENIMC\",\n ],\n LENIMC,\n )\n\n self._write()\n LENRMC = (19 + 2 * NO + NO * NLITE) * KK + (15 + NO) * KK**2\n self._generateTransRoutineInteger(\n [\n \"egtransetLENRMC\",\n \"EGTRANSETLENRMC\",\n \"egtransetlenrmc\",\n \"egtransetlenrmc_\",\n \"LENRMC\",\n ],\n LENRMC,\n )\n\n self._write()\n self._generateTransRoutineInteger(\n [\n \"egtransetNO\",\n \"EGTRANSETNO\",\n \"egtransetno\",\n \"egtransetno_\",\n \"NO\",\n ],\n NO,\n )\n\n self._write()\n self._generateTransRoutineInteger(\n [\n \"egtransetKK\",\n \"EGTRANSETKK\",\n \"egtransetkk\",\n \"egtransetkk_\",\n \"KK\",\n ],\n KK,\n )\n\n self._write()\n self._generateTransRoutineInteger(\n [\n \"egtransetNLITE\",\n \"EGTRANSETNLITE\",\n \"egtransetnlite\",\n \"egtransetnlite_\",\n \"NLITE\",\n ],\n NLITE,\n )\n\n # self._write()\n # self._write()\n # self._write(self.line('Patm in ergs/cm3'))\n # self._write('#if defined(BL_FORT_USE_UPPERCASE)')\n # self._write('#define egtransetPATM EGTRANSETPATM')\n # self._write('#elif defined(BL_FORT_USE_LOWERCASE)')\n # self._write('#define egtransetPATM egtransetpatm')\n # self._write('#elif defined(BL_FORT_USE_UNDERSCORE)')\n # self._write('#define egtransetPATM egtransetpatm_')\n # self._write('#endif')\n\n self._write()\n self._write(\"subroutine egtransetPATM(PATM)\")\n self._indent()\n self._write()\n self._write(\"implicit none\")\n self._write()\n self._write(\"double precision, intent(out) :: PATM\")\n self._write()\n self._write(\"PATM = 0.1013250000000000d+07\")\n self._outdent()\n self._write()\n self._write(\"end subroutine\")\n\n return\n\n def _wt(self):\n self._write()\n self._write(\"! the molecular weights in g/mol\")\n\n # self._write('#if defined(BL_FORT_USE_UPPERCASE)')\n # self._write('#define egtransetWT EGTRANSETWT')\n # self._write('#elif defined(BL_FORT_USE_LOWERCASE)')\n # self._write('#define egtransetWT egtransetwt')\n # self._write('#elif defined(BL_FORT_USE_UNDERSCORE)')\n # self._write('#define egtransetWT egtransetwt_')\n # self._write('#endif')\n\n self._write(\"subroutine %s(%s)\" % (\"egtransetWT\", \"WT\"))\n self._indent()\n self._write()\n self._write(\"implicit none\")\n self._write()\n self._write(\n \"double precision, intent(out) :: %s(%d)\"\n % (\"WT\", len(self.species))\n )\n self._write()\n for species in self.species:\n self._write(\n \"%s(%d) = %s\"\n % (\n \"WT\",\n species.id + 1,\n format(float(species.weight), \".8e\").replace(\"e\", \"d\"),\n )\n )\n self._outdent()\n self._write()\n self._write(\"end subroutine\")\n\n return\n\n def _eps(self, speciesTransport):\n self._write()\n self._write(\"! the lennard-jones potential well depth eps/kb in K\")\n\n # i=0\n # expression=[]\n # for species in mechanism.species():\n # expression[i] = float(species.trans[0].eps)\n # i++\n self._generateTransRoutineSimple(\n [\n \"egtransetEPS\",\n \"EGTRANSETEPS\",\n \"egtranseteps\",\n \"egtranseteps_\",\n \"EPS\",\n ],\n 1,\n speciesTransport,\n )\n\n return\n\n def _sig(self, speciesTransport):\n self._write()\n self._write(\"! the lennard-jones collision diameter in Angstroms\")\n self._generateTransRoutineSimple(\n [\n \"egtransetSIG\",\n \"EGTRANSETSIG\",\n \"egtransetsig\",\n \"egtransetsig_\",\n \"SIG\",\n ],\n 2,\n speciesTransport,\n )\n\n return\n\n def _dip(self, speciesTransport):\n self._write()\n self._write(\"! the dipole moment in Debye\")\n self._generateTransRoutineSimple(\n [\n \"egtransetDIP\",\n \"EGTRANSETDIP\",\n \"egtransetdip\",\n \"egtransetdip_\",\n \"DIP\",\n ],\n 3,\n speciesTransport,\n )\n\n return\n\n def _pol(self, speciesTransport):\n self._write()\n self._write(\"! the polarizability in cubic Angstroms\")\n self._generateTransRoutineSimple(\n [\n \"egtransetPOL\",\n \"EGTRANSETPOL\",\n \"egtransetpol\",\n \"egtransetpol_\",\n \"POL\",\n ],\n 4,\n speciesTransport,\n )\n\n return\n\n def _zrot(self, speciesTransport):\n self._write()\n self._write(\"! the rotational relaxation collision number at 298 K\")\n self._generateTransRoutineSimple(\n [\n \"egtransetZROT\",\n \"EGTRANSETZROT\",\n \"egtransetzrot\",\n \"egtransetzrot_\",\n \"ZROT\",\n ],\n 5,\n speciesTransport,\n )\n\n return\n\n def _nlin(self, speciesTransport):\n self._write()\n self._write(\"! 0: monoatomic, 1: linear, 2: nonlinear\")\n # self._generateTransRoutineSimple([\"egtransetNLIN\", \"EGTRANSETNLIN\", \"egtransetNLIN\", \"egtransetNLIN_\", \"NLIN\"], 0, speciesTransport)\n\n # self._write('#if defined(BL_FORT_USE_UPPERCASE)')\n # self._write('#define egtransetNLIN EGTRANSETNLIN')\n # self._write('#elif defined(BL_FORT_USE_LOWERCASE)')\n # self._write('#define egtransetNLIN egtransetnlin')\n # self._write('#elif defined(BL_FORT_USE_UNDERSCORE)')\n # self._write('#define egtransetNLIN egtransetnlin_')\n # self._write('#endif')\n\n self._write(\"subroutine egtransetNLIN(NLIN)\")\n self._indent()\n self._write()\n self._write(\"implicit none\")\n self._write()\n self._write(\n \"integer, intent(out) :: NLIN(%d)\" % (len(speciesTransport))\n )\n self._write()\n for species in speciesTransport:\n self._write(\n \"%s(%d) = %d\"\n % (\"NLIN\", species.id + 1, int(speciesTransport[species][0]))\n )\n self._outdent()\n self._write()\n self._write(\"end subroutine\")\n\n return\n\n def _viscosity(self, speciesTransport, NTFit):\n\n # compute single constants in g/cm/s\n kb = 1.3806503e-16\n Na = 6.02214199e23\n RU = 8.31447e7\n # conversion coefs\n AtoCM = 1.0e-8\n DEBYEtoCGS = 1.0e-18\n # temperature increment\n dt = (self.highT - self.lowT) / (NTFit - 1)\n # factor dependent upon the molecule\n m_crot = np.zeros(self.nSpecies)\n m_cvib = np.zeros(self.nSpecies)\n isatm = np.zeros(self.nSpecies)\n for spec in speciesTransport:\n if int(speciesTransport[spec][0]) == 0:\n m_crot[spec.id] = 0.0\n m_cvib[spec.id] = 0.0\n isatm[spec.id] = 0.0\n elif int(speciesTransport[spec][0]) == 1:\n m_crot[spec.id] = 1.0\n m_cvib[spec.id] = 5.0 / 2.0\n isatm[spec.id] = 1.0\n else:\n m_crot[spec.id] = 1.5\n m_cvib[spec.id] = 3.0\n isatm[spec.id] = 1.0\n # viscosities coefs (4 per spec)\n cofeta = {}\n # conductivities coefs (4 per spec)\n coflam = {}\n for spec in speciesTransport:\n spvisc = []\n spcond = []\n tlog = []\n for n in range(NTFit):\n t = self.lowT + dt * n\n # variables\n # eq. (2)\n tr = t / float(speciesTransport[spec][1])\n conversion = (\n DEBYEtoCGS * DEBYEtoCGS / (AtoCM / AtoCM / AtoCM / kb)\n )\n dst = (\n 0.5\n * conversion\n * float(speciesTransport[spec][3]) ** 2\n / (\n float(speciesTransport[spec][1])\n * float(speciesTransport[spec][2]) ** 3\n )\n )\n # viscosity of spec at t\n # eq. (1)\n conversion = AtoCM * AtoCM\n visc = (\n (5.0 / 16.0)\n * np.sqrt(\n (np.pi * self.species[spec.id].weight * kb * t / Na)\n )\n / (\n self.om22_CHEMKIN(tr, dst)\n * np.pi\n * float(speciesTransport[spec][2])\n * float(speciesTransport[spec][2])\n * conversion\n )\n )\n # conductivity of spec at t\n # eq. (30)\n conversion = AtoCM * AtoCM\n m_red = self.species[spec.id].weight / (2.0 * Na)\n diffcoef = (\n (3.0 / 16.0)\n * np.sqrt(2.0 * np.pi * kb**3 * t**3 / m_red)\n / (\n 10.0\n * np.pi\n * self.om11_CHEMKIN(tr, dst)\n * float(speciesTransport[spec][2])\n * float(speciesTransport[spec][2])\n * conversion\n )\n )\n # eq. (19)\n cv_vib_R = (\n self._getCVdRspecies(t, spec) - m_cvib[spec.id]\n ) * isatm[spec.id]\n rho_atm = 10.0 * self.species[spec.id].weight / (RU * t)\n f_vib = rho_atm * diffcoef / visc\n # eq. (20)\n A = 2.5 - f_vib\n # eqs. (21) + (32-33)\n cv_rot_R = m_crot[spec.id]\n # note: the T corr is not applied in CANTERA\n B = float(speciesTransport[spec][5]) * self.Fcorr(\n 298.0, float(speciesTransport[spec][1])\n ) / self.Fcorr(t, float(speciesTransport[spec][1])) + (\n 2.0 / np.pi\n ) * (\n (5.0 / 3.0) * cv_rot_R + f_vib\n )\n # eq. (18)\n f_rot = f_vib * (1.0 + 2.0 / np.pi * A / B)\n # eq. (17)\n cv_trans_R = 3.0 / 2.0\n f_trans = (\n 5.0\n / 2.0\n * (1.0 - 2.0 / np.pi * A / B * cv_rot_R / cv_trans_R)\n )\n if int(speciesTransport[spec][0]) == 0:\n cond = (\n ((visc * RU / self.species[spec.id].weight))\n * (5.0 / 2.0)\n * cv_trans_R\n )\n else:\n cond = ((visc * RU / self.species[spec.id].weight)) * (\n f_trans * cv_trans_R\n + f_rot * cv_rot_R\n + f_vib * cv_vib_R\n )\n\n # log transformation for polyfit\n tlog.append(np.log(t))\n spvisc.append(np.log(visc))\n spcond.append(np.log(cond))\n\n cofeta[spec.id] = np.polyfit(tlog, spvisc, 3)\n coflam[spec.id] = np.polyfit(tlog, spcond, 3)\n\n # header for visco\n self._write()\n self._write()\n self._write(\"! Poly fits for the viscosities, dim NO*KK\")\n # self._write('#if defined(BL_FORT_USE_UPPERCASE)')\n # self._write('#define egtransetCOFETA EGTRANSETCOFETA')\n # self._write('#elif defined(BL_FORT_USE_LOWERCASE)')\n # self._write('#define egtransetCOFETA egtransetcofeta')\n # self._write('#elif defined(BL_FORT_USE_UNDERSCORE)')\n # self._write('#define egtransetCOFETA egtransetcofeta_')\n # self._write('#endif')\n\n # visco coefs\n self._write(\"subroutine egtransetCOFETA(COFETA)\")\n self._indent()\n self._write()\n self._write(\"implicit none\")\n self._write()\n self._write(\n \"double precision, intent(out) :: COFETA(%d)\"\n % (len(self.species) * 4)\n )\n self._write()\n for spec in self.species:\n for i in range(4):\n self._write(\n \"%s(%d) = %s\"\n % (\n \"COFETA\",\n spec.id * 4 + i + 1,\n format(cofeta[spec.id][3 - i], \".8e\").replace(\n \"e\", \"d\"\n ),\n )\n )\n self._outdent()\n self._write()\n self._write(\"end subroutine\")\n\n # header for cond\n self._write()\n self._write()\n self._write(\"! Poly fits for the conductivities, dim NO*KK\")\n # self._write('#if defined(BL_FORT_USE_UPPERCASE)')\n # self._write('#define egtransetCOFLAM EGTRANSETCOFLAM')\n # self._write('#elif defined(BL_FORT_USE_LOWERCASE)')\n # self._write('#define egtransetCOFLAM egtransetcoflam')\n # self._write('#elif defined(BL_FORT_USE_UNDERSCORE)')\n # self._write('#define egtransetCOFLAM egtransetcoflam_')\n # self._write('#endif')\n\n # visco coefs\n self._write(\"subroutine egtransetCOFLAM(COFLAM)\")\n self._indent()\n self._write()\n self._write(\"implicit none\")\n self._write()\n self._write(\n \"double precision, intent(out) :: COFLAM(%d)\"\n % (len(self.species) * 4)\n )\n self._write()\n for spec in self.species:\n for i in range(4):\n self._write(\n \"%s(%d) = %s\"\n % (\n \"COFLAM\",\n spec.id * 4 + i + 1,\n format(coflam[spec.id][3 - i], \".8e\").replace(\n \"e\", \"d\"\n ),\n )\n )\n self._outdent()\n self._write()\n self._write(\"end subroutine\")\n\n return\n\n def _thermaldiffratios(self, speciesTransport, lightSpecList, NTFit):\n\n # This is an overhaul of CHEMKIN version III\n # REORDERING OF SPECS\n specOrdered = []\n for i in range(self.nSpecies):\n for spec in speciesTransport:\n if spec.id == i:\n specOrdered.append(spec)\n break\n\n # compute single constants in g/cm/s\n kb = 1.3806503e-16\n # conversion coefs\n DEBYEtoCGS = 1.0e-18\n AtoCM = 1.0e-8\n # temperature increment\n dt = (self.highT - self.lowT) / (NTFit - 1)\n # diff ratios (4 per spec pair involving light species)\n coftd = []\n k = -1\n for i, spec1 in enumerate(specOrdered):\n if i != spec1.id:\n print(\"Problem in _thermaldiffratios computation\")\n stop\n if spec1.id in lightSpecList:\n k = k + 1\n if lightSpecList[k] != spec1.id:\n print(\"Problem in _thermaldiffratios computation\")\n stop\n coftd.append([])\n epsi = float(speciesTransport[spec1][1]) * kb\n sigi = float(speciesTransport[spec1][2]) * AtoCM\n poli = (\n float(speciesTransport[spec1][4]) * AtoCM * AtoCM * AtoCM\n )\n # eq. (12)\n poliRed = poli / sigi**3\n for j, spec2 in enumerate(specOrdered):\n if j != spec2.id:\n print(\"Problem in _thermaldiffratios computation\")\n stop\n # eq. (53)\n Wji = (\n self.species[spec2.id].weight\n - self.species[spec1.id].weight\n ) / (\n self.species[spec1.id].weight\n + self.species[spec2.id].weight\n )\n epsj = float(speciesTransport[spec2][1]) * kb\n sigj = float(speciesTransport[spec2][2]) * AtoCM\n dipj = float(speciesTransport[spec2][3]) * DEBYEtoCGS\n # eq. (13)\n dipjRed = dipj / np.sqrt(epsj * sigj**3)\n epsRatio = epsj / epsi\n tse = 1.0 + 0.25 * poliRed * dipjRed**2 * np.sqrt(\n epsRatio\n )\n eok = tse**2 * np.sqrt(\n float(speciesTransport[spec1][1])\n * float(speciesTransport[spec2][1])\n )\n # enter the loop on temperature\n spthdiffcoef = []\n tTab = []\n for n in range(NTFit):\n t = self.lowT + dt * n\n tslog = np.log(t) - np.log(eok)\n # eq. (53)\n thdifcoeff = (\n 15.0\n / 2.0\n * Wji\n * (2.0 * self.astar(tslog) + 5.0)\n * (6.0 * self.cstar(tslog) - 5.0)\n / (\n self.astar(tslog)\n * (\n 16.0 * self.astar(tslog)\n - 12.0 * self.bstar(tslog)\n + 55.0\n )\n )\n )\n\n # log transformation for polyfit\n tTab.append(t)\n spthdiffcoef.append(thdifcoeff)\n\n coftd[k].append(np.polyfit(tTab, spthdiffcoef, 3))\n\n # header for thermal diff ratios\n self._write()\n self._write()\n self._write(\"! Poly fits for thermal diff ratios, dim NO*NLITE*KK\")\n # self._write('#if defined(BL_FORT_USE_UPPERCASE)')\n # self._write('#define egtransetCOFTD EGTRANSETCOFTD')\n # self._write('#elif defined(BL_FORT_USE_LOWERCASE)')\n # self._write('#define egtransetCOFTD egtransetcoftd')\n # self._write('#elif defined(BL_FORT_USE_UNDERSCORE)')\n # self._write('#define egtransetCOFTD egtransetcoftd_')\n # self._write('#endif')\n\n # visco coefs\n self._write(\"subroutine egtransetCOFTD(COFTD)\")\n self._indent()\n self._write()\n self._write(\"implicit none\")\n self._write()\n self._write(\n \"double precision, intent(out) :: COFTD(%d)\"\n % (len(coftd) * self.nSpecies * 4)\n )\n self._write()\n for i in range(len(coftd)):\n for j in range(self.nSpecies):\n for k in range(4):\n self._write(\n \"%s(%d) = %s\"\n % (\n \"COFTD\",\n i * 4 * self.nSpecies + j * 4 + k + 1,\n format(coftd[i][j][3 - k], \".8e\").replace(\n \"e\", \"d\"\n ),\n )\n )\n self._outdent()\n self._write()\n self._write(\"end subroutine\")\n\n return\n\n def _diffcoefs(self, speciesTransport, NTFit):\n\n # REORDERING OF SPECS\n specOrdered = []\n for i in range(self.nSpecies):\n for spec in speciesTransport:\n if spec.id == i:\n specOrdered.append(spec)\n break\n # checks\n # for spec in speciesTransport:\n # print spec.symbol, spec.id\n # for i in range(self.nSpecies):\n # print i, specOrdered[i].id, specOrdered[i].symbol\n # stop\n\n # compute single constants in g/cm/s\n kb = 1.3806503e-16\n Na = 6.02214199e23\n # conversion coefs\n AtoCM = 1.0e-8\n DEBYEtoCGS = 1.0e-18\n PATM = 0.1013250000000000e07\n # temperature increment\n dt = (self.highT - self.lowT) / (NTFit - 1)\n # diff coefs (4 per spec pair)\n cofd = []\n for i, spec1 in enumerate(specOrdered):\n cofd.append([])\n if i != spec1.id:\n print(\"Problem in _diffcoefs computation\")\n stop\n for j, spec2 in enumerate(specOrdered[0 : i + 1]):\n if j != spec2.id:\n print(\"Problem in _diffcoefs computation\")\n stop\n # eq. (9)\n sigm = (\n 0.5\n * (\n float(speciesTransport[spec1][2])\n + float(speciesTransport[spec2][2])\n )\n * AtoCM\n ) * self.Xi(spec1, spec2, speciesTransport) ** (1.0 / 6.0)\n # eq. (4)\n m_red = (\n self.species[spec1.id].weight\n * self.species[spec2.id].weight\n ) / (\n (\n self.species[spec1.id].weight\n + self.species[spec2.id].weight\n )\n / Na\n )\n # eq. (8) & (14)\n epsm_k = (\n np.sqrt(\n float(speciesTransport[spec1][1])\n * float(speciesTransport[spec2][1])\n )\n * self.Xi(spec1, spec2, speciesTransport) ** 2.0\n )\n\n # eq. (15)\n conversion = DEBYEtoCGS * DEBYEtoCGS / kb\n dst = (\n 0.5\n * conversion\n * float(speciesTransport[spec1][3])\n * float(speciesTransport[spec2][3])\n / (epsm_k * sigm**3)\n )\n if self.Xi_bool(spec1, spec2, speciesTransport) == False:\n dst = 0.0\n # enter the loop on temperature\n spdiffcoef = []\n tlog = []\n for n in range(NTFit):\n t = self.lowT + dt * n\n tr = t / epsm_k\n # eq. (3)\n # note: these are \"corrected\" in CHEMKIN not in CANTERA... we chose not to\n difcoeff = (\n 3.0\n / 16.0\n * 1\n / PATM\n * (\n np.sqrt(2.0 * np.pi * t**3 * kb**3 / m_red)\n / (\n np.pi\n * sigm\n * sigm\n * self.om11_CHEMKIN(tr, dst)\n )\n )\n )\n\n # log transformation for polyfit\n tlog.append(np.log(t))\n spdiffcoef.append(np.log(difcoeff))\n\n cofd[i].append(np.polyfit(tlog, spdiffcoef, 3))\n\n # use the symmetry for upper triangular terms\n # note: starting with this would be preferable (only one bigger loop)\n # note2: or write stuff differently !\n # for i,spec1 in enumerate(specOrdered):\n # for j,spec2 in enumerate(specOrdered[i+1:]):\n # cofd[i].append(cofd[spec2.id][spec1.id])\n\n # header for diffusion coefs\n self._write()\n self._write(\"! Poly fits for the diffusion coefficients, dim NO*KK*KK\")\n # self._write('#if defined(BL_FORT_USE_UPPERCASE)')\n # self._write('#define egtransetCOFD EGTRANSETCOFD')\n # self._write('#elif defined(BL_FORT_USE_LOWERCASE)')\n # self._write('#define egtransetCOFD egtransetcofd')\n # self._write('#elif defined(BL_FORT_USE_UNDERSCORE)')\n # self._write('#define egtransetCOFD egtransetcofd_')\n # self._write('#endif')\n\n # coefs\n self._write(\"subroutine egtransetCOFD(COFD)\")\n self._indent()\n self._write()\n self._write(\"implicit none\")\n self._write()\n cofd_size = 0\n for i, spec1 in enumerate(specOrdered):\n for j, spec2 in enumerate(specOrdered[0 : i + 1]):\n for k in range(4):\n cofd_size = cofd_size + 1\n for j, spec2 in enumerate(specOrdered[i + 1 :]):\n for k in range(4):\n cofd_size = cofd_size + 1\n self._write(\"double precision, intent(out) :: COFD(%d)\" % (cofd_size))\n self._write()\n for i, spec1 in enumerate(specOrdered):\n # for j,spec2 in enumerate(specOrdered):\n for j, spec2 in enumerate(specOrdered[0 : i + 1]):\n for k in range(4):\n # self._write('%s[%d] = %.8E;' % ('COFD', i*self.nSpecies*4+j*4+k, cofd[j][i][3-k]))\n self._write(\n \"%s(%d) = %s\"\n % (\n \"COFD\",\n i * self.nSpecies * 4 + j * 4 + k + 1,\n format(cofd[i][j][3 - k], \".8e\").replace(\"e\", \"d\"),\n )\n )\n for j, spec2 in enumerate(specOrdered[i + 1 :]):\n for k in range(4):\n self._write(\n \"%s(%d) = %s\"\n % (\n \"COFD\",\n i * self.nSpecies * 4 + (j + i + 1) * 4 + k + 1,\n format(cofd[j + i + 1][i][3 - k], \".8e\").replace(\n \"e\", \"d\"\n ),\n )\n )\n self._outdent()\n self._write()\n self._write(\"end subroutine\")\n\n return\n\n def _lightSpecs(self, speclist):\n\n # header\n self._write()\n self._write(\"! List of specs with small weight, dim NLITE\")\n # self._write('#if defined(BL_FORT_USE_UPPERCASE)')\n # self._write('#define egtransetKTDIF EGTRANSETKTDIF')\n # self._write('#elif defined(BL_FORT_USE_LOWERCASE)')\n # self._write('#define egtransetKTDIF egtransetktdif')\n # self._write('#elif defined(BL_FORT_USE_UNDERSCORE)')\n # self._write('#define egtransetKTDIF egtransetktdif_')\n # self._write('#endif')\n\n # coefs\n self._write(\"subroutine egtransetKTDIF(KTDIF)\")\n self._indent()\n self._write()\n self._write(\"implicit none\")\n self._write()\n self._write(\"integer, intent(out) :: KTDIF(%d)\" % len(speclist))\n self._write()\n for i in range(len(speclist)):\n self._write(\"%s(%d) = %d\" % (\"KTDIF\", i + 1, speclist[i] + 1))\n self._outdent()\n self._write()\n self._write(\"end subroutine\")\n\n return\n\n def astar(self, tslog):\n\n aTab = [\n 0.1106910525e01,\n -0.7065517161e-02,\n -0.1671975393e-01,\n 0.1188708609e-01,\n 0.7569367323e-03,\n -0.1313998345e-02,\n 0.1720853282e-03,\n ]\n\n B = aTab[6]\n for i in range(6):\n B = aTab[5 - i] + B * tslog\n\n return B\n\n def bstar(self, tslog):\n\n bTab = [\n 0.1199673577e01,\n -0.1140928763e00,\n -0.2147636665e-02,\n 0.2512965407e-01,\n -0.3030372973e-02,\n -0.1445009039e-02,\n 0.2492954809e-03,\n ]\n\n B = bTab[6]\n for i in range(6):\n B = bTab[5 - i] + B * tslog\n\n return B\n\n def cstar(self, tslog):\n\n cTab = [\n 0.8386993788e00,\n 0.4748325276e-01,\n 0.3250097527e-01,\n -0.1625859588e-01,\n -0.2260153363e-02,\n 0.1844922811e-02,\n -0.2115417788e-03,\n ]\n\n B = cTab[6]\n for i in range(6):\n B = cTab[5 - i] + B * tslog\n\n return B\n\n def Xi(self, spec1, spec2, speciesTransport):\n\n dipmin = 1e-20\n # 1 is polar, 2 is nonpolar\n # err in eq. (11) ?\n if (float(speciesTransport[spec2][3]) < dipmin) and (\n float(speciesTransport[spec1][3]) > dipmin\n ):\n xi = 1.0 + 1.0 / 4.0 * self.redPol(\n spec2, speciesTransport\n ) * self.redDip(spec1, speciesTransport) * self.redDip(\n spec1, speciesTransport\n ) * np.sqrt(\n float(speciesTransport[spec1][1])\n / float(speciesTransport[spec2][1])\n )\n # 1 is nonpolar, 2 is polar\n elif (float(speciesTransport[spec2][3]) > dipmin) and (\n float(speciesTransport[spec1][3]) < dipmin\n ):\n xi = 1.0 + 1.0 / 4.0 * self.redPol(\n spec1, speciesTransport\n ) * self.redDip(spec2, speciesTransport) * self.redDip(\n spec2, speciesTransport\n ) * np.sqrt(\n float(speciesTransport[spec2][1])\n / float(speciesTransport[spec1][1])\n )\n # normal case, either both polar or both nonpolar\n else:\n xi = 1.0\n\n return xi\n\n def Xi_bool(self, spec1, spec2, speciesTransport):\n\n dipmin = 1e-20\n # 1 is polar, 2 is nonpolar\n # err in eq. (11) ?\n if (float(speciesTransport[spec2][3]) < dipmin) and (\n float(speciesTransport[spec1][3]) > dipmin\n ):\n xi_b = False\n # 1 is nonpolar, 2 is polar\n elif (float(speciesTransport[spec2][3]) > dipmin) and (\n float(speciesTransport[spec1][3]) < dipmin\n ):\n xi_b = False\n # normal case, either both polar or both nonpolar\n else:\n xi_b = True\n\n return xi_b\n\n def redPol(self, spec, speciesTransport):\n\n return (\n float(speciesTransport[spec][4])\n / float(speciesTransport[spec][2]) ** 3.0\n )\n\n def redDip(self, spec, speciesTransport):\n\n # compute single constants in g/cm/s\n kb = 1.3806503e-16\n # conversion coefs\n AtoCM = 1.0e-8\n DEBYEtoCGS = 1.0e-18\n convert = DEBYEtoCGS / np.sqrt(kb * AtoCM**3.0)\n return (\n convert\n * float(speciesTransport[spec][3])\n / np.sqrt(\n float(speciesTransport[spec][1])\n * float(speciesTransport[spec][2]) ** 3.0\n )\n )\n\n def Fcorr(self, t, eps_k):\n\n thtwo = 3.0 / 2.0\n return (\n 1\n + np.pi ** (thtwo) / 2.0 * np.sqrt((eps_k / t))\n + (np.pi**2 / 4.0 + 2.0) * ((eps_k / t))\n + ((np.pi * eps_k / t)) ** (thtwo)\n )\n\n # def om11(self, tr, dst):\n\n # # This is an overhaul of CANTERA version 2.3\n # #range of dst\n # dstTab = [0.0, 0.25, 0.50, 0.75, 1.0, 1.5, 2.0, 2.5]\n\n # #range of tr\n # trTab = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0,\n # 1.2, 1.4, 1.6, 1.8, 2.0, 2.5, 3.0, 3.5, 4.0,\n # 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 12.0, 14.0, 16.0,\n # 18.0, 20.0, 25.0, 30.0, 35.0, 40.0, 50.0, 75.0, 100.0]\n\n # #tab of astar corresp. to (tr, dst)\n # #CANTERA\n # astarTab = [1.0065, 1.0840, 1.0840, 1.0840, 1.0840, 1.0840, 1.0840, 1.0840,\n # 1.0231, 1.0660, 1.0380, 1.0400, 1.0430, 1.0500, 1.0520, 1.0510,\n # 1.0424, 1.0450, 1.0480, 1.0520, 1.0560, 1.0650, 1.0660, 1.0640,\n # 1.0719, 1.0670, 1.0600, 1.0550, 1.0580, 1.0680, 1.0710, 1.0710,\n # 1.0936, 1.0870, 1.0770, 1.0690, 1.0680, 1.0750, 1.0780, 1.0780,\n # 1.1053, 1.0980, 1.0880, 1.0800, 1.0780, 1.0820, 1.0840, 1.0840,\n # 1.1104, 1.1040, 1.0960, 1.0890, 1.0860, 1.0890, 1.0900, 1.0900,\n # 1.1114, 1.1070, 1.1000, 1.0950, 1.0930, 1.0950, 1.0960, 1.0950,\n # 1.1104, 1.1070, 1.1020, 1.0990, 1.0980, 1.1000, 1.1000, 1.0990,\n # 1.1086, 1.1060, 1.1020, 1.1010, 1.1010, 1.1050, 1.1050, 1.1040,\n # 1.1063, 1.1040, 1.1030, 1.1030, 1.1040, 1.1080, 1.1090, 1.1080,\n # 1.1020, 1.1020, 1.1030, 1.1050, 1.1070, 1.1120, 1.1150, 1.1150,\n # 1.0985, 1.0990, 1.1010, 1.1040, 1.1080, 1.1150, 1.1190, 1.1200,\n # 1.0960, 1.0960, 1.0990, 1.1030, 1.1080, 1.1160, 1.1210, 1.1240,\n # 1.0943, 1.0950, 1.0990, 1.1020, 1.1080, 1.1170, 1.1230, 1.1260,\n # 1.0934, 1.0940, 1.0970, 1.1020, 1.1070, 1.1160, 1.1230, 1.1280,\n # 1.0926, 1.0940, 1.0970, 1.0990, 1.1050, 1.1150, 1.1230, 1.1300,\n # 1.0934, 1.0950, 1.0970, 1.0990, 1.1040, 1.1130, 1.1220, 1.1290,\n # 1.0948, 1.0960, 1.0980, 1.1000, 1.1030, 1.1120, 1.1190, 1.1270,\n # 1.0965, 1.0970, 1.0990, 1.1010, 1.1040, 1.1100, 1.1180, 1.1260,\n # 1.0997, 1.1000, 1.1010, 1.1020, 1.1050, 1.1100, 1.1160, 1.1230,\n # 1.1025, 1.1030, 1.1040, 1.1050, 1.1060, 1.1100, 1.1150, 1.1210,\n # 1.1050, 1.1050, 1.1060, 1.1070, 1.1080, 1.1110, 1.1150, 1.1200,\n # 1.1072, 1.1070, 1.1080, 1.1080, 1.1090, 1.1120, 1.1150, 1.1190,\n # 1.1091, 1.1090, 1.1090, 1.1100, 1.1110, 1.1130, 1.1150, 1.1190,\n # 1.1107, 1.1110, 1.1110, 1.1110, 1.1120, 1.1140, 1.1160, 1.1190,\n # 1.1133, 1.1140, 1.1130, 1.1140, 1.1140, 1.1150, 1.1170, 1.1190,\n # 1.1154, 1.1150, 1.1160, 1.1160, 1.1160, 1.1170, 1.1180, 1.1200,\n # 1.1172, 1.1170, 1.1170, 1.1180, 1.1180, 1.1180, 1.1190, 1.1200,\n # 1.1186, 1.1190, 1.1190, 1.1190, 1.1190, 1.1190, 1.1200, 1.1210,\n # 1.1199, 1.1200, 1.1200, 1.1200, 1.1200, 1.1210, 1.1210, 1.1220,\n # 1.1223, 1.1220, 1.1220, 1.1220, 1.1220, 1.1230, 1.1230, 1.1240,\n # 1.1243, 1.1240, 1.1240, 1.1240, 1.1240, 1.1240, 1.1250, 1.1250,\n # 1.1259, 1.1260, 1.1260, 1.1260, 1.1260, 1.1260, 1.1260, 1.1260,\n # 1.1273, 1.1270, 1.1270, 1.1270, 1.1270, 1.1270, 1.1270, 1.1280,\n # 1.1297, 1.1300, 1.1300, 1.1300, 1.1300, 1.1300, 1.1300, 1.1290,\n # 1.1339, 1.1340, 1.1340, 1.1350, 1.1350, 1.1340, 1.1340, 1.1320,\n # 1.1364, 1.1370, 1.1370, 1.1380, 1.1390, 1.1380, 1.1370, 1.1350,\n # 1.14187, 1.14187, 1.14187, 1.14187, 1.14187, 1.14187, 1.14187,\n # 1.14187]\n\n # #Find for each fixed tr the poly of deg 6 in dst approx astar values\n # #store the poly coefs in m_apoly\n # m_apoly = []\n # for i in range(37):\n # dstDeg = 6\n # #Polynomial coefficients, highest power first\n # polycoefs = np.polyfit(dstTab,astarTab[8*(i+1):8*(i+2)],dstDeg)\n # m_apoly.append(polycoefs)\n\n # #Find 3 referenced temp points around tr\n # for i in range(37):\n # if tr<trTab[i]:\n # break\n # i1 = max(i-1, 0)\n # i2 = i1+3\n # if (i2 > 36):\n # i2 = 36\n # i1 = i2 - 3\n # #compute astar value for these 3 points\n # values = []\n # for j in range(i1,i2):\n # if (dst == 0.0):\n # values.append(astarTab[8*(j+1)])\n # else:\n # poly6 = np.poly1d(m_apoly[j])\n # values.append(poly6(dst))\n\n # #interpolate to find real tr value\n # trTab_log = []\n # for j in range(len(trTab)):\n # trTab_log.append(np.log(trTab[j]))\n\n # astar_interp = self.quadInterp(np.log(tr), trTab_log[i1:i2], values)\n\n # return self.om22(tr,dst)/astar_interp\n\n def om11_CHEMKIN(self, tr, dst):\n\n # This is an overhaul of CANTERA version 2.3\n # range of dst\n dstTab = [0.0, 0.25, 0.50, 0.75, 1.0, 1.5, 2.0, 2.5]\n\n # range of tr\n trTab = [\n 0.1,\n 0.2,\n 0.3,\n 0.4,\n 0.5,\n 0.6,\n 0.7,\n 0.8,\n 0.9,\n 1.0,\n 1.2,\n 1.4,\n 1.6,\n 1.8,\n 2.0,\n 2.5,\n 3.0,\n 3.5,\n 4.0,\n 5.0,\n 6.0,\n 7.0,\n 8.0,\n 9.0,\n 10.0,\n 12.0,\n 14.0,\n 16.0,\n 18.0,\n 20.0,\n 25.0,\n 30.0,\n 35.0,\n 40.0,\n 50.0,\n 75.0,\n 100.0,\n ]\n\n # tab of omega11 corresp. to (tr, dst)\n # CANTERA\n omegaTab = [\n 4.008,\n 4.002,\n 4.655,\n 5.52,\n 6.454,\n 8.214,\n 9.824,\n 11.31,\n 3.130,\n 3.164,\n 3.355,\n 3.721,\n 4.198,\n 5.23,\n 6.225,\n 7.160,\n 2.649,\n 2.657,\n 2.77,\n 3.002,\n 3.319,\n 4.054,\n 4.785,\n 5.483,\n 2.314,\n 2.32,\n 2.402,\n 2.572,\n 2.812,\n 3.386,\n 3.972,\n 4.539,\n 2.066,\n 2.073,\n 2.14,\n 2.278,\n 2.472,\n 2.946,\n 3.437,\n 3.918,\n 1.877,\n 1.885,\n 1.944,\n 2.06,\n 2.225,\n 2.628,\n 3.054,\n 3.747,\n 1.729,\n 1.738,\n 1.79,\n 1.893,\n 2.036,\n 2.388,\n 2.763,\n 3.137,\n 1.6122,\n 1.622,\n 1.67,\n 1.76,\n 1.886,\n 2.198,\n 2.535,\n 2.872,\n 1.517,\n 1.527,\n 1.572,\n 1.653,\n 1.765,\n 2.044,\n 2.35,\n 2.657,\n 1.44,\n 1.45,\n 1.49,\n 1.564,\n 1.665,\n 1.917,\n 2.196,\n 2.4780,\n 1.3204,\n 1.33,\n 1.364,\n 1.425,\n 1.51,\n 1.72,\n 1.956,\n 2.199,\n 1.234,\n 1.24,\n 1.272,\n 1.324,\n 1.394,\n 1.573,\n 1.777,\n 1.99,\n 1.168,\n 1.176,\n 1.202,\n 1.246,\n 1.306,\n 1.46,\n 1.64,\n 1.827,\n 1.1166,\n 1.124,\n 1.146,\n 1.185,\n 1.237,\n 1.372,\n 1.53,\n 1.7,\n 1.075,\n 1.082,\n 1.102,\n 1.135,\n 1.181,\n 1.3,\n 1.441,\n 1.592,\n 1.0006,\n 1.005,\n 1.02,\n 1.046,\n 1.08,\n 1.17,\n 1.278,\n 1.397,\n 0.95,\n 0.9538,\n 0.9656,\n 0.9852,\n 1.012,\n 1.082,\n 1.168,\n 1.265,\n 0.9131,\n 0.9162,\n 0.9256,\n 0.9413,\n 0.9626,\n 1.019,\n 1.09,\n 1.17,\n 0.8845,\n 0.8871,\n 0.8948,\n 0.9076,\n 0.9252,\n 0.972,\n 1.03,\n 1.098,\n 0.8428,\n 0.8446,\n 0.850,\n 0.859,\n 0.8716,\n 0.9053,\n 0.9483,\n 0.9984,\n 0.813,\n 0.8142,\n 0.8183,\n 0.825,\n 0.8344,\n 0.8598,\n 0.8927,\n 0.9316,\n 0.7898,\n 0.791,\n 0.794,\n 0.7993,\n 0.8066,\n 0.8265,\n 0.8526,\n 0.8836,\n 0.7711,\n 0.772,\n 0.7745,\n 0.7788,\n 0.7846,\n 0.8007,\n 0.822,\n 0.8474,\n 0.7555,\n 0.7562,\n 0.7584,\n 0.7619,\n 0.7667,\n 0.78,\n 0.7976,\n 0.8189,\n 0.7422,\n 0.743,\n 0.7446,\n 0.7475,\n 0.7515,\n 0.7627,\n 0.7776,\n 0.796,\n 0.72022,\n 0.7206,\n 0.722,\n 0.7241,\n 0.7271,\n 0.7354,\n 0.7464,\n 0.76,\n 0.7025,\n 0.703,\n 0.704,\n 0.7055,\n 0.7078,\n 0.7142,\n 0.7228,\n 0.7334,\n 0.68776,\n 0.688,\n 0.6888,\n 0.6901,\n 0.6919,\n 0.697,\n 0.704,\n 0.7125,\n 0.6751,\n 0.6753,\n 0.676,\n 0.677,\n 0.6785,\n 0.6827,\n 0.6884,\n 0.6955,\n 0.664,\n 0.6642,\n 0.6648,\n 0.6657,\n 0.6669,\n 0.6704,\n 0.6752,\n 0.681,\n 0.6414,\n 0.6415,\n 0.6418,\n 0.6425,\n 0.6433,\n 0.6457,\n 0.649,\n 0.653,\n 0.6235,\n 0.6236,\n 0.6239,\n 0.6243,\n 0.6249,\n 0.6267,\n 0.629,\n 0.632,\n 0.60882,\n 0.6089,\n 0.6091,\n 0.6094,\n 0.61,\n 0.6112,\n 0.613,\n 0.6154,\n 0.5964,\n 0.5964,\n 0.5966,\n 0.597,\n 0.5972,\n 0.5983,\n 0.600,\n 0.6017,\n 0.5763,\n 0.5763,\n 0.5764,\n 0.5766,\n 0.5768,\n 0.5775,\n 0.5785,\n 0.58,\n 0.5415,\n 0.5415,\n 0.5416,\n 0.5416,\n 0.5418,\n 0.542,\n 0.5424,\n 0.543,\n 0.518,\n 0.518,\n 0.5182,\n 0.5184,\n 0.5184,\n 0.5185,\n 0.5186,\n 0.5187,\n ]\n\n # First test on tr\n if tr > 75.0:\n omeg12 = (\n 0.623\n - 0.136e-2 * tr\n + 0.346e-5 * tr * tr\n - 0.343e-8 * tr * tr * tr\n )\n else:\n # Find tr idx in trTab\n if tr <= 0.2:\n ii = 1\n else:\n ii = 36\n for i in range(1, 37):\n if (tr > trTab[i - 1]) and (tr <= trTab[i]):\n ii = i\n break\n # Find dst idx in dstTab\n if abs(dst) >= 1.0e-5:\n if dst <= 0.25:\n kk = 1\n else:\n kk = 6\n for i in range(1, 7):\n if (dstTab[i - 1] < dst) and (dstTab[i] >= dst):\n kk = i\n break\n # Find surrounding values and interpolate\n # First on dst\n vert = np.zeros(3)\n for i in range(3):\n arg = np.zeros(3)\n val = np.zeros(3)\n for k in range(3):\n arg[k] = dstTab[kk - 1 + k]\n val[k] = omegaTab[8 * (ii - 1 + i) + (kk - 1 + k)]\n vert[i] = self.qinterp(dst, arg, val)\n # Second on tr\n arg = np.zeros(3)\n for i in range(3):\n arg[i] = trTab[ii - 1 + i]\n omeg12 = self.qinterp(tr, arg, vert)\n else:\n arg = np.zeros(3)\n val = np.zeros(3)\n for i in range(3):\n arg[i] = trTab[ii - 1 + i]\n val[i] = omegaTab[8 * (ii - 1 + i)]\n omeg12 = self.qinterp(tr, arg, val)\n\n return omeg12\n\n def om22_CHEMKIN(self, tr, dst):\n\n # This is an overhaul of CANTERA version 2.3\n # range of dst\n dstTab = [0.0, 0.25, 0.50, 0.75, 1.0, 1.5, 2.0, 2.5]\n\n # range of tr\n trTab = [\n 0.1,\n 0.2,\n 0.3,\n 0.4,\n 0.5,\n 0.6,\n 0.7,\n 0.8,\n 0.9,\n 1.0,\n 1.2,\n 1.4,\n 1.6,\n 1.8,\n 2.0,\n 2.5,\n 3.0,\n 3.5,\n 4.0,\n 5.0,\n 6.0,\n 7.0,\n 8.0,\n 9.0,\n 10.0,\n 12.0,\n 14.0,\n 16.0,\n 18.0,\n 20.0,\n 25.0,\n 30.0,\n 35.0,\n 40.0,\n 50.0,\n 75.0,\n 100.0,\n ]\n\n # tab of omega22 corresp. to (tr, dst)\n # CANTERA\n omegaTab = [\n 4.1005,\n 4.266,\n 4.833,\n 5.742,\n 6.729,\n 8.624,\n 10.34,\n 11.89,\n 3.2626,\n 3.305,\n 3.516,\n 3.914,\n 4.433,\n 5.57,\n 6.637,\n 7.618,\n 2.8399,\n 2.836,\n 2.936,\n 3.168,\n 3.511,\n 4.329,\n 5.126,\n 5.874,\n 2.531,\n 2.522,\n 2.586,\n 2.749,\n 3.004,\n 3.64,\n 4.282,\n 4.895,\n 2.2837,\n 2.277,\n 2.329,\n 2.46,\n 2.665,\n 3.187,\n 3.727,\n 4.249,\n 2.0838,\n 2.081,\n 2.13,\n 2.243,\n 2.417,\n 2.862,\n 3.329,\n 3.786,\n 1.922,\n 1.924,\n 1.97,\n 2.072,\n 2.225,\n 2.614,\n 3.028,\n 3.435,\n 1.7902,\n 1.795,\n 1.84,\n 1.934,\n 2.07,\n 2.417,\n 2.788,\n 3.156,\n 1.6823,\n 1.689,\n 1.733,\n 1.82,\n 1.944,\n 2.258,\n 2.596,\n 2.933,\n 1.5929,\n 1.601,\n 1.644,\n 1.725,\n 1.838,\n 2.124,\n 2.435,\n 2.746,\n 1.4551,\n 1.465,\n 1.504,\n 1.574,\n 1.67,\n 1.913,\n 2.181,\n 2.451,\n 1.3551,\n 1.365,\n 1.4,\n 1.461,\n 1.544,\n 1.754,\n 1.989,\n 2.228,\n 1.28,\n 1.289,\n 1.321,\n 1.374,\n 1.447,\n 1.63,\n 1.838,\n 2.053,\n 1.2219,\n 1.231,\n 1.259,\n 1.306,\n 1.37,\n 1.532,\n 1.718,\n 1.912,\n 1.1757,\n 1.184,\n 1.209,\n 1.251,\n 1.307,\n 1.451,\n 1.618,\n 1.795,\n 1.0933,\n 1.1,\n 1.119,\n 1.15,\n 1.193,\n 1.304,\n 1.435,\n 1.578,\n 1.0388,\n 1.044,\n 1.059,\n 1.083,\n 1.117,\n 1.204,\n 1.31,\n 1.428,\n 0.99963,\n 1.004,\n 1.016,\n 1.035,\n 1.062,\n 1.133,\n 1.22,\n 1.319,\n 0.96988,\n 0.9732,\n 0.983,\n 0.9991,\n 1.021,\n 1.079,\n 1.153,\n 1.236,\n 0.92676,\n 0.9291,\n 0.936,\n 0.9473,\n 0.9628,\n 1.005,\n 1.058,\n 1.121,\n 0.89616,\n 0.8979,\n 0.903,\n 0.9114,\n 0.923,\n 0.9545,\n 0.9955,\n 1.044,\n 0.87272,\n 0.8741,\n 0.878,\n 0.8845,\n 0.8935,\n 0.9181,\n 0.9505,\n 0.9893,\n 0.85379,\n 0.8549,\n 0.858,\n 0.8632,\n 0.8703,\n 0.8901,\n 0.9164,\n 0.9482,\n 0.83795,\n 0.8388,\n 0.8414,\n 0.8456,\n 0.8515,\n 0.8678,\n 0.8895,\n 0.916,\n 0.82435,\n 0.8251,\n 0.8273,\n 0.8308,\n 0.8356,\n 0.8493,\n 0.8676,\n 0.8901,\n 0.80184,\n 0.8024,\n 0.8039,\n 0.8065,\n 0.8101,\n 0.8201,\n 0.8337,\n 0.8504,\n 0.78363,\n 0.784,\n 0.7852,\n 0.7872,\n 0.7899,\n 0.7976,\n 0.8081,\n 0.8212,\n 0.76834,\n 0.7687,\n 0.7696,\n 0.7712,\n 0.7733,\n 0.7794,\n 0.7878,\n 0.7983,\n 0.75518,\n 0.7554,\n 0.7562,\n 0.7575,\n 0.7592,\n 0.7642,\n 0.7711,\n 0.7797,\n 0.74364,\n 0.7438,\n 0.7445,\n 0.7455,\n 0.747,\n 0.7512,\n 0.7569,\n 0.7642,\n 0.71982,\n 0.72,\n 0.7204,\n 0.7211,\n 0.7221,\n 0.725,\n 0.7289,\n 0.7339,\n 0.70097,\n 0.7011,\n 0.7014,\n 0.7019,\n 0.7026,\n 0.7047,\n 0.7076,\n 0.7112,\n 0.68545,\n 0.6855,\n 0.6858,\n 0.6861,\n 0.6867,\n 0.6883,\n 0.6905,\n 0.6932,\n 0.67232,\n 0.6724,\n 0.6726,\n 0.6728,\n 0.6733,\n 0.6743,\n 0.6762,\n 0.6784,\n 0.65099,\n 0.651,\n 0.6512,\n 0.6513,\n 0.6516,\n 0.6524,\n 0.6534,\n 0.6546,\n 0.61397,\n 0.6141,\n 0.6143,\n 0.6145,\n 0.6147,\n 0.6148,\n 0.6148,\n 0.6147,\n 0.5887,\n 0.5889,\n 0.5894,\n 0.59,\n 0.5903,\n 0.5901,\n 0.5895,\n 0.5885,\n ]\n\n # First test on tr\n if tr > 75.0:\n omeg12 = (\n 0.703\n - 0.146e-2 * tr\n + 0.357e-5 * tr * tr\n - 0.343e-8 * tr * tr * tr\n )\n else:\n # Find tr idx in trTab\n if tr <= 0.2:\n ii = 1\n else:\n ii = 36\n for i in range(1, 37):\n if (tr > trTab[i - 1]) and (tr <= trTab[i]):\n ii = i\n break\n # Find dst idx in dstTab\n if abs(dst) >= 1.0e-5:\n if dst <= 0.25:\n kk = 1\n else:\n kk = 6\n for i in range(1, 7):\n if (dstTab[i - 1] < dst) and (dstTab[i] >= dst):\n kk = i\n break\n # Find surrounding values and interpolate\n # First on dst\n vert = np.zeros(3)\n for i in range(3):\n arg = np.zeros(3)\n val = np.zeros(3)\n for k in range(3):\n arg[k] = dstTab[kk - 1 + k]\n val[k] = omegaTab[8 * (ii - 1 + i) + (kk - 1 + k)]\n vert[i] = self.qinterp(dst, arg, val)\n # Second on tr\n arg = np.zeros(3)\n for i in range(3):\n arg[i] = trTab[ii - 1 + i]\n omeg12 = self.qinterp(tr, arg, vert)\n else:\n arg = np.zeros(3)\n val = np.zeros(3)\n for i in range(3):\n arg[i] = trTab[ii - 1 + i]\n val[i] = omegaTab[8 * (ii - 1 + i)]\n omeg12 = self.qinterp(tr, arg, val)\n\n return omeg12\n\n # def om22(self, tr, dst):\n\n # # This is an overhaul of CANTERA version 2.3\n # #range of dst\n # dstTab = [0.0, 0.25, 0.50, 0.75, 1.0, 1.5, 2.0, 2.5]\n\n # #range of tr\n # trTab = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0,\n # 1.2, 1.4, 1.6, 1.8, 2.0, 2.5, 3.0, 3.5, 4.0,\n # 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 12.0, 14.0, 16.0,\n # 18.0, 20.0, 25.0, 30.0, 35.0, 40.0, 50.0, 75.0, 100.0]\n\n # #tab of omega22 corresp. to (tr, dst)\n # #CANTERA\n # omegaTab = [4.1005, 4.266, 4.833, 5.742, 6.729, 8.624, 10.34, 11.89,\n # 3.2626, 3.305, 3.516, 3.914, 4.433, 5.57, 6.637, 7.618,\n # 2.8399, 2.836, 2.936, 3.168, 3.511, 4.329, 5.126, 5.874,\n # 2.531, 2.522, 2.586, 2.749, 3.004, 3.64, 4.282, 4.895,\n # 2.2837, 2.277, 2.329, 2.46, 2.665, 3.187, 3.727, 4.249,\n # 2.0838, 2.081, 2.13, 2.243, 2.417, 2.862, 3.329, 3.786,\n # 1.922, 1.924, 1.97, 2.072, 2.225, 2.614, 3.028, 3.435,\n # 1.7902, 1.795, 1.84, 1.934, 2.07, 2.417, 2.788, 3.156,\n # 1.6823, 1.689, 1.733, 1.82, 1.944, 2.258, 2.596, 2.933,\n # 1.5929, 1.601, 1.644, 1.725, 1.838, 2.124, 2.435, 2.746,\n # 1.4551, 1.465, 1.504, 1.574, 1.67, 1.913, 2.181, 2.451,\n # 1.3551, 1.365, 1.4, 1.461, 1.544, 1.754, 1.989, 2.228,\n # 1.28, 1.289, 1.321, 1.374, 1.447, 1.63, 1.838, 2.053,\n # 1.2219, 1.231, 1.259, 1.306, 1.37, 1.532, 1.718, 1.912,\n # 1.1757, 1.184, 1.209, 1.251, 1.307, 1.451, 1.618, 1.795,\n # 1.0933, 1.1, 1.119, 1.15, 1.193, 1.304, 1.435, 1.578,\n # 1.0388, 1.044, 1.059, 1.083, 1.117, 1.204, 1.31, 1.428,\n # 0.99963, 1.004, 1.016, 1.035, 1.062, 1.133, 1.22, 1.319,\n # 0.96988, 0.9732, 0.983, 0.9991, 1.021, 1.079, 1.153, 1.236,\n # 0.92676, 0.9291, 0.936, 0.9473, 0.9628, 1.005, 1.058, 1.121,\n # 0.89616, 0.8979, 0.903, 0.9114, 0.923, 0.9545, 0.9955, 1.044,\n # 0.87272, 0.8741, 0.878, 0.8845, 0.8935, 0.9181, 0.9505, 0.9893,\n # 0.85379, 0.8549, 0.858, 0.8632, 0.8703, 0.8901, 0.9164, 0.9482,\n # 0.83795, 0.8388, 0.8414, 0.8456, 0.8515, 0.8678, 0.8895, 0.916,\n # 0.82435, 0.8251, 0.8273, 0.8308, 0.8356, 0.8493, 0.8676, 0.8901,\n # 0.80184, 0.8024, 0.8039, 0.8065, 0.8101, 0.8201, 0.8337, 0.8504,\n # 0.78363, 0.784, 0.7852, 0.7872, 0.7899, 0.7976, 0.8081, 0.8212,\n # 0.76834, 0.7687, 0.7696, 0.7712, 0.7733, 0.7794, 0.7878, 0.7983,\n # 0.75518, 0.7554, 0.7562, 0.7575, 0.7592, 0.7642, 0.7711, 0.7797,\n # 0.74364, 0.7438, 0.7445, 0.7455, 0.747, 0.7512, 0.7569, 0.7642,\n # 0.71982, 0.72, 0.7204, 0.7211, 0.7221, 0.725, 0.7289, 0.7339,\n # 0.70097, 0.7011, 0.7014, 0.7019, 0.7026, 0.7047, 0.7076, 0.7112,\n # 0.68545, 0.6855, 0.6858, 0.6861, 0.6867, 0.6883, 0.6905, 0.6932,\n # 0.67232, 0.6724, 0.6726, 0.6728, 0.6733, 0.6743, 0.6762, 0.6784,\n # 0.65099, 0.651, 0.6512, 0.6513, 0.6516, 0.6524, 0.6534, 0.6546,\n # 0.61397, 0.6141, 0.6143, 0.6145, 0.6147, 0.6148, 0.6148, 0.6147,\n # 0.5887, 0.5889, 0.5894, 0.59, 0.5903, 0.5901, 0.5895, 0.5885]\n\n # #Find for each fixed tr the poly of deg 6 in dst approx omega22 values\n # #store the poly coefs in m_o22poly\n # m_o22poly = []\n # for i in range(37):\n # dstDeg = 6\n # #Polynomial coefficients, highest power first\n # polycoefs = np.polyfit(dstTab,omegaTab[8*i:8*(i+1)],dstDeg)\n # m_o22poly.append(polycoefs)\n\n # #Find 3 referenced temp points around tr\n # for i in range(37):\n # if tr<trTab[i]:\n # break\n # i1 = max(i-1, 0)\n # i2 = i1+3\n # if (i2 > 36):\n # i2 = 36\n # i1 = i2 - 3\n # #compute omega22 value for these 3 points\n # values = []\n # for j in range(i1,i2):\n # if (dst == 0.0):\n # values.append(omegaTab[8*j])\n # else:\n # poly6 = np.poly1d(m_o22poly[j])\n # values.append(poly6(dst))\n\n # #interpolate to find real tr value\n # trTab_log = []\n # for j in range(len(trTab)):\n # trTab_log.append(np.log(trTab[j]))\n # #print trTab_log[i1:i2], values\n # om22_interp = self.quadInterp(np.log(tr), trTab_log[i1:i2], values)\n\n # return om22_interp\n\n def qinterp(self, x0, x, y):\n\n val1 = y[0] + (x0 - x[0]) * (y[1] - y[0]) / (x[1] - x[0])\n val2 = y[1] + (x0 - x[1]) * (y[2] - y[1]) / (x[2] - x[1])\n fac1 = (x0 - x[0]) / ((x[1] - x[0]) / 2.0)\n fac2 = (x[2] - x0) / ((x[2] - x[1]) / 2.0)\n if x0 >= x[1]:\n val = (val1 * fac2 + val2) / (1.0 + fac2)\n else:\n val = (val1 + val2 * fac1) / (1.0 + fac1)\n return val\n\n # def quadInterp(self, x0, x, y):\n\n # dx21 = x[1] - x[0]\n # dx32 = x[2] - x[1]\n # dx31 = dx21 + dx32\n # dy32 = y[2] - y[1]\n # dy21 = y[1] - y[0]\n # a = (dx21*dy32 - dy21*dx32)/(dx21*dx31*dx32)\n # return a*(x0 - x[0])*(x0 - x[1]) + (dy21/dx21)*(x0 - x[1]) + y[1]\n\n def _generateTransRoutineSimple(self, nametab, id, speciesTransport):\n\n # self._write('#if defined(BL_FORT_USE_UPPERCASE)')\n # self._write('#define %s %s' % (nametab[0], nametab[1]))\n # self._write('#elif defined(BL_FORT_USE_LOWERCASE)')\n # self._write('#define %s %s' % (nametab[0], nametab[2]))\n # self._write('#elif defined(BL_FORT_USE_UNDERSCORE)')\n # self._write('#define %s %s' % (nametab[0], nametab[3]))\n # self._write('#endif')\n\n self._write(\"subroutine %s(%s)\" % (nametab[0], nametab[4]))\n self._indent()\n self._write()\n self._write(\"implicit none\")\n self._write()\n self._write(\n \"double precision, intent(out) :: %s(%d)\"\n % (nametab[4], len(speciesTransport))\n )\n self._write()\n for species in speciesTransport:\n self._write(\n \"%s(%d) = %s\"\n % (\n nametab[4],\n species.id + 1,\n format(\n float(speciesTransport[species][id]), \".8e\"\n ).replace(\"e\", \"d\"),\n )\n )\n self._outdent()\n self._write()\n self._write(\"end subroutine\")\n\n return\n\n def _generateTransRoutineInteger(self, nametab, expression):\n\n # self._write('#if defined(BL_FORT_USE_UPPERCASE)')\n # self._write('#define %s %s' % (nametab[0], nametab[1]))\n # self._write('#elif defined(BL_FORT_USE_LOWERCASE)')\n # self._write('#define %s %s' % (nametab[0], nametab[2]))\n # self._write('#elif defined(BL_FORT_USE_UNDERSCORE)')\n # self._write('#define %s %s' % (nametab[0], nametab[3]))\n # self._write('#endif')\n\n self._write(\"subroutine %s(%s)\" % (nametab[0], nametab[4]))\n self._indent()\n self._write()\n self._write(\"implicit none\")\n self._write()\n self._write(\"integer, intent(out) :: %s\" % nametab[4])\n self._write()\n self._write(\"%s = %d\" % (nametab[4], expression))\n self._outdent()\n self._write()\n self._write(\"end subroutine\")\n\n return\n\n def _getCVdRspecies(self, t, species):\n\n models = species.thermo\n m1 = models[0]\n m2 = models[1]\n\n if m1.lowT < m2.lowT:\n lowRange = m1\n highRange = m2\n else:\n lowRange = m2\n highRange = m1\n\n low = lowRange.lowT\n mid = lowRange.highT\n high = highRange.highT\n\n if t < mid:\n parameters = lowRange.parameters\n else:\n parameters = highRange.parameters\n\n return (\n (parameters[0] - 1.0)\n + parameters[1] * t\n + parameters[2] * t * t\n + parameters[3] * t * t * t\n + parameters[4] * t * t * t * t\n )\n\n def _analyzeThermodynamics(self, mechanism):\n lowT = 0.0\n highT = 1000000.0\n\n midpoints = {}\n\n for species in mechanism.species():\n\n models = species.thermo\n if len(models) > 2:\n print(\"species: \", species)\n import pyre\n\n pyre.debug.Firewall.hit(\n \"unsupported configuration in species.thermo\"\n )\n return\n\n m1 = models[0]\n m2 = models[1]\n\n if m1.lowT < m2.lowT:\n lowRange = m1\n highRange = m2\n else:\n lowRange = m2\n highRange = m1\n\n low = lowRange.lowT\n mid = lowRange.highT\n high = highRange.highT\n\n if low > lowT:\n lowT = low\n if high < highT:\n highT = high\n\n midpoints.setdefault(mid, []).append(\n (species, lowRange, highRange)\n )\n\n self.lowT = lowT\n self.highT = highT\n return lowT, highT, midpoints\n\n def _analyzeTransport(self, mechanism):\n\n transdata = {}\n\n for species in mechanism.species():\n\n models = species.trans\n if len(models) > 2:\n print(\"species: \", species)\n import pyre\n\n pyre.debug.Firewall.hit(\n \"unsupported configuration in species.trans\"\n )\n return\n\n m1 = models[0]\n\n lin = m1.parameters[0]\n eps = m1.eps\n sig = m1.sig\n dip = m1.dip\n pol = m1.pol\n zrot = m1.zrot\n\n # print \"TRANSPORT DATA FOR SPEC\", species.symbol, \" is\", lin, eps, sig, dip, pol, zrot\n\n transdata[species] = [lin, eps, sig, dip, pol, zrot]\n\n return transdata\n\n # def _Kc(self, mechanism, reaction):\n\n # dim = 0\n # dG = \"\"\n\n # terms = []\n # for symbol, coefficient in reaction.reactants:\n # if coefficient == 1:\n # factor = \"\"\n # else:\n # factor = \"%d * \" % coefficient\n #\n # terms.append(\"%sg_RT[%d]\" % (factor, mechanism.species(symbol).id))\n # dim -= coefficient\n # dG += '(' + ' + '.join(terms) + ')'\n\n # # flip the signs\n # terms = []\n # for symbol, coefficient in reaction.products:\n # if coefficient == 1:\n # factor = \"\"\n # else:\n # factor = \"%d * \" % coefficient\n # terms.append(\"%sg_RT[%d]\" % (factor, mechanism.species(symbol).id))\n # dim += coefficient\n # dG += ' - (' + ' + '.join(terms) + ')'\n\n # K_p = 'exp(' + dG + ')'\n\n # if dim == 0:\n # conversion = \"\"\n # elif dim > 0:\n # conversion = \"*\".join([\"refC\"] * dim) + ' * '\n # else:\n # conversion = \"1.0 / (\" + \"*\".join([\"refC\"] * abs(dim)) + ') * '\n\n # K_c = conversion + K_p\n\n # return K_c\n\n def _KcConv(self, mechanism, reaction):\n dim = 0\n for symbol, coefficient in reaction.reactants:\n dim -= coefficient\n # flip the signs\n for symbol, coefficient in reaction.products:\n dim += coefficient\n\n if dim == 0:\n conversion = \"\"\n elif dim > 0:\n conversion = \"*\".join([\"refC\"] * dim)\n else:\n conversion = \"*\".join([\"refCinv\"] * abs(dim))\n\n return conversion\n\n def _sortedKcExpArg(self, mechanism, reaction):\n\n nSpecies = len(mechanism.species())\n\n terms = []\n for i in range(nSpecies):\n terms.append(\"\")\n for symbol, coefficient in reaction.reactants:\n if coefficient == 1:\n factor = \" + \"\n else:\n factor = \" + %d*\" % coefficient\n i = mechanism.species(symbol).id\n terms[i] += \"%sg_RT(%d)\" % (factor, i + 1)\n\n for symbol, coefficient in reaction.products:\n if coefficient == 1:\n factor = \" - \" # flip the signs\n else:\n factor = \" - %d*\" % coefficient\n i = mechanism.species(symbol).id\n terms[i] += \"%sg_RT(%d)\" % (factor, i + 1)\n\n dG = \"\"\n for i in range(nSpecies):\n if terms[i]:\n dG += terms[i]\n if dG[0:3] == \" + \":\n return dG[3:]\n else:\n return \"-\" + dG[3:]\n\n # def _sortedKc(self, mechanism, reaction):\n # conv = self._KcConv(mechanism,reaction)\n # exparg = self._sortedKcExpArg(mechanism,reaction)\n # if conv:\n # return conv + ' * exp('+exparg+')'\n # else:\n # return 'exp('+exparg+')'\n\n # def _vKc(self, mechanism, reaction):\n\n # dim = 0\n # dG = \"\"\n\n # terms = []\n # for symbol, coefficient in sorted(reaction.reactants,\n # key=lambda x: mechanism.species(x[0]).id):\n # if coefficient == 1:\n # factor = \"\"\n # else:\n # factor = \"%d * \" % coefficient\n #\n # terms.append(\"%sg_RT[%d*npt+i]\" % (factor, mechanism.species(symbol).id))\n # dim -= coefficient\n # dG += '(' + ' + '.join(terms) + ')'\n\n # # flip the signs\n # terms = []\n # for symbol, coefficient in sorted(reaction.products,\n # key=lambda x: mechanism.species(x[0]).id):\n # if coefficient == 1:\n # factor = \"\"\n # else:\n # factor = \"%d * \" % coefficient\n # terms.append(\"%sg_RT[%d*npt+i]\" % (factor, mechanism.species(symbol).id))\n # dim += coefficient\n # dG += ' - (' + ' + '.join(terms) + ')'\n\n # K_p = 'exp(' + dG + ')'\n\n # if dim == 0:\n # conversion = \"\"\n # elif dim > 0:\n # conversion = \"*\".join([\"refC\"] * dim) + ' * '\n # else:\n # conversion = \"*\".join([\"refCinv\"] * abs(dim)) + ' * '\n\n # K_c = conversion + K_p\n\n # return K_c\n\n # def _Kc_exparg(self, mechanism, reaction):\n\n # dG = \"\"\n\n # terms = []\n # for symbol, coefficient in reaction.reactants:\n # if coefficient == 1:\n # factor = \"\"\n # else:\n # factor = \"%d * \" % coefficient\n #\n # terms.append(\"%sg_RT[%d]\" % (factor, mechanism.species(symbol).id))\n # dG += '(' + ' + '.join(terms) + ')'\n\n # # flip the signs\n # terms = []\n # for symbol, coefficient in reaction.products:\n # if coefficient == 1:\n # factor = \"\"\n # else:\n # factor = \"%d * \" % coefficient\n # terms.append(\"%sg_RT[%d]\" % (factor, mechanism.species(symbol).id))\n # dG += ' - (' + ' + '.join(terms) + ')'\n\n # K_p = 'exp(' + dG + ')'\n\n # return dG\n\n def _cpNASA(self, parameters):\n self._write(\"%s &\" % (\"%+15.8e\" % parameters[0]).replace(\"e\", \"d\"))\n self._write(\n \"%s &\" % (\"%+15.8e * tc(2)\" % parameters[1]).replace(\"e\", \"d\")\n )\n self._write(\n \"%s &\" % (\"%+15.8e * tc(3)\" % parameters[2]).replace(\"e\", \"d\")\n )\n self._write(\n \"%s &\" % (\"%+15.8e * tc(4)\" % parameters[3]).replace(\"e\", \"d\")\n )\n self._write(\n \"%s\" % (\"%+15.8e * tc(5)\" % parameters[4]).replace(\"e\", \"d\")\n )\n return\n\n # def _dcpdTNASA(self, parameters):\n # self._write('%+15.8e' % parameters[1])\n # self._write('%+15.8e * tc[1]' % (parameters[2]*2.))\n # self._write('%+15.8e * tc[2]' % (parameters[3]*3.))\n # self._write('%+15.8e * tc[3];' % (parameters[4]*4.))\n # return\n\n def _cvNASA(self, parameters):\n self._write(\n \"%s &\" % (\"%+15.8e\" % (parameters[0] - 1.0)).replace(\"e\", \"d\")\n )\n self._write(\n \"%s &\" % (\"%+15.8e * tc(2)\" % parameters[1]).replace(\"e\", \"d\")\n )\n self._write(\n \"%s &\" % (\"%+15.8e * tc(3)\" % parameters[2]).replace(\"e\", \"d\")\n )\n self._write(\n \"%s &\" % (\"%+15.8e * tc(4)\" % parameters[3]).replace(\"e\", \"d\")\n )\n self._write(\n \"%s\" % (\"%+15.8e * tc(5)\" % parameters[4]).replace(\"e\", \"d\")\n )\n return\n\n def _enthalpyNASA(self, parameters):\n self._write(\"%s &\" % (\"%+15.8e\" % parameters[0]).replace(\"e\", \"d\"))\n self._write(\n \"%s &\"\n % (\"%+15.8e * tc(2)\" % ((parameters[1] / 2))).replace(\"e\", \"d\")\n )\n self._write(\n \"%s &\"\n % (\"%+15.8e * tc(3)\" % ((parameters[2] / 3))).replace(\"e\", \"d\")\n )\n self._write(\n \"%s &\"\n % (\"%+15.8e * tc(4)\" % ((parameters[3] / 4))).replace(\"e\", \"d\")\n )\n self._write(\n \"%s &\"\n % (\"%+15.8e * tc(5)\" % ((parameters[4] / 5))).replace(\"e\", \"d\")\n )\n self._write(\n \"%s\" % (\"%+15.8e * invT\" % (parameters[5])).replace(\"e\", \"d\")\n )\n return\n\n def _internalEnergy(self, parameters):\n self._write(\n \"%s &\" % (\"%+15.8e\" % (parameters[0] - 1.0)).replace(\"e\", \"d\")\n )\n self._write(\n \"%s &\"\n % (\"%+15.8e * tc(2)\" % ((parameters[1] / 2))).replace(\"e\", \"d\")\n )\n self._write(\n \"%s &\"\n % (\"%+15.8e * tc(3)\" % ((parameters[2] / 3))).replace(\"e\", \"d\")\n )\n self._write(\n \"%s &\"\n % (\"%+15.8e * tc(4)\" % ((parameters[3] / 4))).replace(\"e\", \"d\")\n )\n self._write(\n \"%s &\"\n % (\"%+15.8e * tc(5)\" % ((parameters[4] / 5))).replace(\"e\", \"d\")\n )\n self._write(\n \"%s\" % (\"%+15.8e * invT\" % (parameters[5])).replace(\"e\", \"d\")\n )\n return\n\n def _gibbsNASA(self, parameters):\n self._write(\n \"%s &\" % (\"%+20.15e * invT\" % (parameters[5])).replace(\"e\", \"d\")\n )\n self._write(\n \"%s &\"\n % (\"%+20.15e\" % (parameters[0] - parameters[6])).replace(\"e\", \"d\")\n )\n self._write(\n \"%s &\" % (\"%+20.15e * tc(1)\" % (-parameters[0])).replace(\"e\", \"d\")\n )\n self._write(\n \"%s &\"\n % (\"%+20.15e * tc(2)\" % ((-parameters[1] / 2))).replace(\"e\", \"d\")\n )\n self._write(\n \"%s &\"\n % (\"%+20.15e * tc(3)\" % ((-parameters[2] / 6))).replace(\"e\", \"d\")\n )\n self._write(\n \"%s &\"\n % (\"%+20.15e * tc(4)\" % ((-parameters[3] / 12))).replace(\"e\", \"d\")\n )\n self._write(\n \"%s\"\n % (\"%+20.15e * tc(5)\" % ((-parameters[4] / 20))).replace(\"e\", \"d\")\n )\n return\n\n # def _helmholtzNASA(self, parameters):\n # self._write('%+15.8e * invT' % parameters[5])\n # self._write('%+15.8e' % (parameters[0] - parameters[6] - 1.0))\n # self._write('%+15.8e * tc[0]' % (-parameters[0]))\n # self._write('%+15.8e * tc[1]' % (-parameters[1]/2))\n # self._write('%+15.8e * tc[2]' % (-parameters[2]/6))\n # self._write('%+15.8e * tc[3]' % (-parameters[3]/12))\n # self._write('%+15.8e * tc[4];' % (-parameters[4]/20))\n # return\n\n # def _entropyNASA(self, parameters):\n # self._write('%+15.8e * tc[0]' % parameters[0])\n # self._write('%+15.8e * tc[1]' % (parameters[1]))\n # self._write('%+15.8e * tc[2]' % (parameters[2]/2))\n # self._write('%+15.8e * tc[3]' % (parameters[3]/3))\n # self._write('%+15.8e * tc[4]' % (parameters[4]/4))\n # self._write('%+15.8e ;' % (parameters[6]))\n # return\n\n def _T_given_ey(self, mechanism):\n nSpec = len(self.species)\n self._write()\n self._write(\n \"! get temperature given internal energy in mass units and mass fracs\"\n )\n self._write(\"subroutine get_t_given_ey(e, y, iwrk, rwrk, t, ierr)\")\n self._write()\n self._indent()\n self._write(\"implicit none\")\n self._write()\n self._write(\"double precision, intent(in) :: e\")\n self._write(\"double precision, intent(in) :: y(%d)\" % nSpec)\n self._write(\"integer, intent(in) :: iwrk\")\n self._write(\"double precision, intent(in) :: rwrk\")\n self._write(\"double precision, intent(out) :: t\")\n self._write(\"integer, intent(out) :: ierr\")\n self._write()\n self._outdent()\n self._write(\"#ifdef CONVERGENCE\")\n self._indent()\n self._write(\"integer, parameter :: maxiter = 5000\")\n self._write(\"double precision, parameter tol = 1.d-12\")\n self._outdent()\n self._write(\"#else\")\n self._indent()\n self._write(\"integer, parameter :: maxiter = 200\")\n self._write(\"double precision, parameter :: tol = 1.d-6\")\n self._outdent()\n self._write(\"#endif\")\n self._write()\n self._indent()\n self._write(\"double precision :: ein\")\n self._write(\n \"double precision, parameter :: tmin = 90.d0\"\n + \" ! max lower bound for thermo def\"\n )\n self._write(\n \"double precision, parameter :: tmax = 4000.d0\"\n + \" ! min upper bound for thermo def\"\n )\n self._write(\"double precision :: e1,emin,emax,cv,t1,dt\")\n self._write(\"integer :: i\" + \" ! loop counter\")\n self._write()\n self._write(\"ein = e\")\n self._write()\n self._write(\"call ckubms(tmin, y, iwrk, rwrk, emin)\")\n self._write(\"call ckubms(tmax, y, iwrk, rwrk, emax)\")\n self._write()\n self._write(\"if (ein < emin) then\")\n self._indent()\n self._write(\"! Linear Extrapolation below tmin\")\n self._write(\"call ckcvbs(tmin, y, iwrk, rwrk, cv)\")\n self._write(\"t = tmin - (emin-ein) / cv\")\n self._write(\"ierr = 1\")\n self._write(\"return\")\n self._outdent()\n self._write(\"end if\")\n self._write(\"if (ein > emax) then\")\n self._indent()\n self._write(\"! Linear Extrapolation above tmax\")\n self._write(\"call ckcvbs(tmax, y, iwrk, rwrk, cv)\")\n self._write(\"t = tmax - (emax - ein) / cv\")\n self._write(\"ierr = 1\")\n self._write(\"return\")\n self._outdent()\n self._write(\"end if\")\n self._write(\"t1 = t\")\n self._write(\"if (t1 < tmin .or. t1 > tmax) then\")\n self._indent()\n self._write(\"t1 = tmin + (tmax-tmin)/(emax-emin)*(ein-emin)\")\n self._outdent()\n self._write(\"end if\")\n self._write(\"do i=1, maxiter\")\n self._indent()\n self._write(\"call ckubms(t1,y,iwrk,rwrk,e1)\")\n self._write(\"call ckcvbs(t1,y,iwrk,rwrk,cv)\")\n self._write(\"dt = (ein - e1) / cv\")\n self._write(\"if (dt > 100.d0) then\")\n self._indent()\n self._write(\"dt = 100.d0\")\n self._outdent()\n self._write(\"else if (dt < -100.d0) then\")\n self._indent()\n self._write(\"dt = -100.d0\")\n self._outdent()\n self._write(\"else if (abs(dt) < tol) then\")\n self._indent()\n self._write(\"exit\")\n self._outdent()\n self._write(\"else if (t1+dt == t1) then\")\n self._indent()\n self._write(\"exit\")\n self._outdent()\n self._write(\"end if\")\n self._write(\"t1 = t1 + dt\")\n self._outdent()\n self._write(\"end do\")\n self._write()\n self._write(\"t = t1\")\n self._write(\"ierr = 0\")\n self._write()\n self._outdent()\n self._write(\"end subroutine\")\n\n # def _T_given_hy(self, mechanism):\n # self._write(self.line(' get temperature given enthalpy in mass units and mass fracs'))\n # self._write('void GET_T_GIVEN_HY(double * restrict h, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict t, int * ierr)')\n # self._write('{')\n # self._write('#ifdef CONVERGENCE')\n # self._indent()\n # self._write('const int maxiter = 5000;')\n # self._write('const double tol = 1.e-12;')\n # self._outdent()\n # self._write('#else')\n # self._indent()\n # self._write('const int maxiter = 200;')\n # self._write('const double tol = 1.e-6;')\n # self._outdent()\n # self._write('#endif')\n # self._indent()\n # self._write('double hin = *h;')\n # self._write('double tmin = 90;'+self.line('max lower bound for thermo def'))\n # self._write('double tmax = 4000;'+self.line('min upper bound for thermo def'))\n # self._write('double h1,hmin,hmax,cp,t1,dt;')\n # self._write('int i;'+self.line(' loop counter'))\n # self._write('CKHBMS(&tmin, y, iwrk, rwrk, &hmin);')\n # self._write('CKHBMS(&tmax, y, iwrk, rwrk, &hmax);')\n # self._write('if (hin < hmin) {')\n # self._indent()\n # self._write(self.line('Linear Extrapolation below tmin'))\n # self._write('CKCPBS(&tmin, y, iwrk, rwrk, &cp);')\n # self._write('*t = tmin - (hmin-hin)/cp;')\n # self._write('*ierr = 1;')\n # self._write('return;')\n # self._outdent()\n # self._write('}')\n # self._write('if (hin > hmax) {')\n # self._indent()\n # self._write(self.line('Linear Extrapolation above tmax'))\n # self._write('CKCPBS(&tmax, y, iwrk, rwrk, &cp);')\n # self._write('*t = tmax - (hmax-hin)/cp;')\n # self._write('*ierr = 1;')\n # self._write('return;')\n # self._outdent()\n # self._write('}')\n # self._write('t1 = *t;')\n # self._write('if (t1 < tmin || t1 > tmax) {')\n # self._indent()\n # self._write('t1 = tmin + (tmax-tmin)/(hmax-hmin)*(hin-hmin);')\n # self._outdent()\n # self._write('}')\n # self._write('for (i = 0; i < maxiter; ++i) {')\n # self._indent()\n # self._write('CKHBMS(&t1,y,iwrk,rwrk,&h1);')\n # self._write('CKCPBS(&t1,y,iwrk,rwrk,&cp);')\n # self._write('dt = (hin - h1) / cp;')\n # self._write('if (dt > 100.) { dt = 100.; }')\n # self._write('else if (dt < -100.) { dt = -100.; }')\n # self._write('else if (fabs(dt) < tol) break;')\n # self._write('else if (t1+dt == t1) break;')\n # self._write('t1 += dt;')\n # self._outdent()\n # self._write('}')\n # self._write('*t = t1;')\n # self._write('*ierr = 0;')\n # self._write('return;')\n # self._outdent()\n # self._write('}')\n\n\n# version\n# __id__ = \"$Id$\"\n\n# End of file\n" ]
[ [ "numpy.log", "numpy.sqrt", "numpy.polyfit", "numpy.zeros" ] ]
benodry/tensorpack-medical
[ "c9d4006262ab16dc04980215778a740343ea5dcd" ]
[ "examples/AutomaticViewPlanning/DQN/sampleTrain.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# File: sampleTrain.py\n# Author: Amir Alansary <[email protected]>\n\n\n\nimport warnings\nwarnings.simplefilter(\"ignore\", category=ResourceWarning)\n\n\n\nimport numpy as np\nimport SimpleITK as sitk\nfrom tensorpack import logger\nfrom IPython.core.debugger import set_trace\n\n\n__all__ = ['files', 'filesCardio', 'filesFetalUS', 'filesListBrainMRPlane',\n 'filesCardioPlane', 'filesListCardioMRPlane', 'NiftiImage']\n\n#######################################################################\n## list file/directory names\nimport glob\nimport os\nimport re\n\ndef alphanum_key(s):\n \"\"\" Turn a string into a list of string and number chunks.\n \"z23a\" -> [\"z\", 23, \"a\"]\n \"\"\"\n return [ tryint(c) for c in re.split('([0-9]+)', s) ]\n\ndef tryint(s):\n try:\n return int(s)\n except:\n return s\n\ndef listFiles(dirpath, dirnames):\n curpath = os.getcwd()\n os.chdir(os.path.abspath(dirpath))\n f = glob.glob(dirnames)\n f.sort(key=alphanum_key)\n os.chdir(curpath)\n return f\n\n\n#######################################################################\n## extract points from xml file\nimport xml.etree.ElementTree as ET\n\ndef extractPointsXML(filename):\n\n tree = ET.parse(filename)\n root = tree.getroot()\n\n x = []\n y = []\n z = []\n\n for point in root[1].findall('time_series/point'):\n x.append(float(point[2].text))\n y.append(float(point[3].text))\n z.append(float(point[4].text))\n\n return x,y,z\n\n#######################################################################\n## extract points from txt file\ndef extractPointsTXT(filename):\n x = []\n y = []\n z = []\n with open(filename) as f:\n for line in f:\n point = line.split()\n x.append(float(point[0]))\n y.append(float(point[1]))\n z.append(float(point[2]))\n\n return x,y,z\n\n#######################################################################\n## extract points from vtk file\ndef getLandmarksFromVTKFile(file):\n ''' 0-2 RV insert points\n 1 -> RV lateral wall turning point\n 3 -> LV lateral wall mid-point\n 4 -> apex\n 5-> center of the mitral valve\n '''\n with open(file) as fp:\n landmarks = []\n for i, line in enumerate(fp):\n if i == 5:\n landmarks.append([float(k) for k in line.split()])\n elif i == 6:\n landmarks.append([float(k) for k in line.split()])\n elif i > 6:\n landmarks = np.asarray(landmarks).reshape((-1,3))\n landmarks[:,[0, 1]] = -landmarks[:,[0, 1]]\n return landmarks\n\n#######################################################################\n## extract points from txt file\ndef getLandmarksFromTXTFile(file):\n ''' 1->3 Splenium of corpus callosum\n (outer aspect, inferior tip and inner aspect (1,2,3)),\n 4,5 Genu of corpus callosum (outer and inner aspect (4,5)),\n 6,7 Superior and inferior aspect of pons (6,7),\n 8,16 Superior and inferior aspect cerebellum (8,16),\n 9 Fourth ventricle (9),\n 10->13 Putamen posterior and anterior (10,11)(left), (12,13)(right),\n 14,15 Anterior and posterior commissure (14,15),\n 17,18 Anterior tip of lateral ventricle (left and right) (17,18),\n 19,20 Inferior tip of lateral ventricle (left and right) (19,20)\n '''\n with open(file) as fp:\n landmarks = []\n for i, line in enumerate(fp):\n landmarks.append([float(k) for k in line.split(',')])\n landmarks = np.asarray(landmarks).reshape((-1,3))\n return landmarks\n#######################################################################\n\nclass files(object):\n \"\"\" A class for managing train files\n\n Attributes:\n directory: input data directo\n \"\"\"\n\n def __init__(self, directory=None):\n\n assert directory, 'There is no directory containing training files given'\n\n self.dir = directory\n\n # todo make it generic for directories and files with different scenarios\n self.images_list = self._listImages()\n self.landmarks_list = self._listLandmarks()\n self.all_landmarks_list = self._listLandmarks_all()\n\n\n def _listImages(self):\n\n childDirs = listFiles(self.dir,'*')\n\n image_files = []\n\n for child in childDirs:\n dir_path = os.path.join(self.dir, child)\n if not(os.path.isdir(dir_path)): continue\n # todo: extend to all nifti image extensions\n file_name = listFiles(dir_path,'*.nii.gz')\n file_path = os.path.join(dir_path, file_name[0])\n image_files.append(file_path)\n\n return image_files\n\n\n def _listLandmarks(self):\n\n childDirs = listFiles(self.dir,'*')\n landmarks = []\n\n for child in childDirs:\n dir_path = os.path.join(self.dir, child)\n if not(os.path.isdir(dir_path)): continue\n\n file_name = listFiles(dir_path,'*.mps')\n file_path = os.path.join(dir_path, file_name[0])\n points = np.array(extractPointsXML(file_path))\n landmarks.append(np.array(points[:,2]))\n\n return landmarks\n\n\n def _listLandmarks_all(self):\n # extend directory path\n current_dir = self.dir + '/landmarks'\n childDirs = listFiles(current_dir,'*.txt')\n landmarks = []\n\n for child in childDirs:\n file_name = os.path.join(current_dir, child)\n file_path = os.path.join(current_dir, file_name)\n points = np.array(extractPointsTXT(file_path))\n landmark = np.array(points) # all landmark point\n landmarks.append(landmark)\n\n return landmarks\n\n\n def sample_random(self):\n \"\"\" return a random sampled ImageRecord from the list of files\n \"\"\"\n # todo: fix seed for a fair comparison between models\n random_idx = np.random.randint(low=0, high=len(self.images_list))\n sitk_image, image = NiftiImage().decode(self.images_list[random_idx])\n landmark = np.array(sitk_image.TransformPhysicalPointToIndex(self.landmarks_list[random_idx]))\n return image, landmark, random_idx\n\n\n def sample_circular(self,shuffle=False):\n \"\"\" return a random sampled ImageRecord from the list of files\n \"\"\"\n if shuffle:\n indexes = rng.choice(x,len(x),replace=False)\n else:\n indexes = np.arange(self.num_files)\n\n while True:\n for idx in indexes:\n sitk_image, image = NiftiImage().decode(self.images_list[idx])\n landmark = np.array(sitk_image.TransformPhysicalPointToIndex(self.landmarks_list[idx]))\n image_filename = self.images_list[idx][:-7]\n yield image, landmark, image_filename\n\n @property\n def num_files(self):\n return len(self.images_list)\n\n # def _get_target_loc(self,filename):\n # ''' return the center of mass of a given label (target location)\n # '''\n # label_image = NiftiImage().decode_nifti(self.label_file)\n # return np.round(center_of_mass(label_image.data))\n###############################################################################\n\nclass filesListCardioMRPlane(files):\n \"\"\" A class for managing train files for Ozan mri cardio data\n\n Attributes:\n directory: input data directo\n \"\"\"\n def __init__(self, directory=None, files_list=None):\n\n assert directory, 'There is no directory containing training files given'\n assert files_list, 'There is no directory containing files list'\n\n self.dir = directory\n self.files_list = [line.split('\\n')[0] for line in open(files_list)]\n # todo make it generic for directories and files with different scenarios\n # self.images_3d_list = self._listImages('/3DLV/')\n self.images_3d_list = self._listImages('/3DLV_iso_1mm/')\n self.images_2ch_list = self._listImages('/2CH_rreg/')\n self.images_4ch_list = self._listImages('/4CH_rreg_fix_orientation/')\n self.landmarks_list = self._listLandmarks('/landmarks_new/')\n\n\n @property\n def num_files(self):\n return len(self.files_list)\n\n def _listImages(self, suffix):\n # extend directory path\n current_dir = self.dir + suffix\n image_files = []\n for filename in self.files_list:\n file_path = os.path.join(current_dir, filename) #+ '.nii.gz')\n image_files.append(file_path)\n\n return image_files\n\n def _listLandmarks(self, suffix):\n # extend directory path\n current_dir = self.dir + suffix\n landmark_files = []\n for filename in self.files_list:\n filename = filename[:-7] + '.vtk'\n file_path = os.path.join(current_dir, filename)\n landmark_files.append(file_path)\n\n return landmark_files\n\n\n def sample_circular(self,shuffle=False):\n \"\"\" return a random sampled ImageRecord from the list of files\n \"\"\"\n if shuffle:\n indexes = rng.choice(x,len(x),replace=False)\n else:\n indexes = np.arange(self.num_files)\n\n while True:\n for idx in indexes:\n # print('============================================')\n # print('images_3d_list[idx] {} \\nimages_2ch_list[idx] {} \\nimages_4ch_list[idx] {}'.format(self.images_3d_list[idx].split('/')[-1],self.images_2ch_list[idx].split('/')[-1],self.images_4ch_list[idx].split('/')[-1]))\n\n sitk_image3d, _ =NiftiImage().decode(self.images_3d_list[idx])\n sitk_image2ch, _=NiftiImage().decode(self.images_2ch_list[idx])\n sitk_image4ch, _=NiftiImage().decode(self.images_4ch_list[idx])\n landmarks = getLandmarksFromVTKFile(self.landmarks_list[idx])\n filename = self.images_3d_list[idx][:-7]\n # transform landmarks to image coordinates\n landmarks = [sitk_image3d.TransformPhysicalPointToContinuousIndex(point) for point in landmarks]\n\n yield sitk_image3d, sitk_image2ch, sitk_image4ch, landmarks, filename\n\n\n\n###############################################################################\n\nclass filesListBrainMRPlane(files):\n \"\"\" A class for managing train files for Ozan mri cardio data\n\n Attributes:\n directory: input data directo\n \"\"\"\n def __init__(self, directory=None, files_list=None):\n\n assert directory, 'There is no directory containing training files given'\n assert files_list, 'There is no directory containing files list'\n\n self.dir = directory\n self.files_list = [line.split('\\n')[0] for line in open(files_list)]\n # todo make it generic for directories and files with different scenarios\n # self.images_3d_list = self._listImages('/3DLV/')\n\n self.images_3d_list = self._listImages('/Normalized_MNI/')\n self.landmarks_list = self._listLandmarks('/LandmarksMAN/VoxelCoordinates/')\n\n\n @property\n def num_files(self):\n return len(self.files_list)\n\n def _listImages(self, suffix):\n # extend directory path\n current_dir = self.dir + suffix\n image_files = []\n for filename in self.files_list:\n file_path = os.path.join(current_dir, filename) #+ '.nii.gz')\n image_files.append(file_path)\n\n return image_files\n\n def _listLandmarks(self, suffix):\n # extend directory path\n current_dir = self.dir + suffix\n landmark_files = []\n for filename in self.files_list:\n filename = filename[:-32] + '.txt'\n file_path = os.path.join(current_dir, filename)\n landmark_files.append(file_path)\n\n return landmark_files\n\n\n def sample_circular(self,shuffle=False):\n \"\"\" return a random sampled ImageRecord from the list of files\n \"\"\"\n if shuffle:\n indexes = rng.choice(x,len(x),replace=False)\n else:\n indexes = np.arange(self.num_files)\n\n while True:\n for idx in indexes:\n # print('============================================')\n # print('images_3d_list[idx] {} \\nimages_2ch_list[idx] {} \\nimages_4ch_list[idx] {}'.format(self.images_3d_list[idx].split('/')[-1],self.images_2ch_list[idx].split('/')[-1],self.images_4ch_list[idx].split('/')[-1]))\n\n sitk_image3d, _ =NiftiImage().decode(self.images_3d_list[idx])\n landmarks = getLandmarksFromTXTFile(self.landmarks_list[idx])\n filename = self.images_3d_list[idx][:-7]\n\n yield sitk_image3d, landmarks, filename\n\n\n\n\n\n\n\n\n\n\n###############################################################################\n\nclass filesCardioPlane(object):\n \"\"\" A class for managing train files for Ozan mri cardio data\n\n Attributes:\n directory: input data directory\n \"\"\"\n\n def __init__(self, directory=None):\n\n assert directory, 'There is no directory containing training files given'\n\n self.dir = directory\n\n # todo make it generic for directories and files with different scenarios\n self.images_3d_list = self._listImages('/3DLV_1mm_iso/')\n # self.images_3d_list = self._listImages('/3DLV/')\n self.images_2ch_list = self._listImages('/2CH_rreg/')\n self.images_4ch_list = self._listImages('/4CH_rreg_fix_orient/')\n\n def _listImages(self,suffix):\n # extend directory path\n current_dir = self.dir + suffix\n childDirs = listFiles(current_dir,'*.nii.gz')\n image_files = []\n\n for child in childDirs:\n file_name = os.path.join(current_dir, child)\n file_path = os.path.join(current_dir, file_name)\n image_files.append(file_path)\n\n return image_files\n\n\n def sample_circular(self,shuffle=False):\n \"\"\" return a random sampled ImageRecord from the list of files\n \"\"\"\n if shuffle:\n indexes = rng.choice(x,len(x),replace=False)\n else:\n indexes = np.arange(self.num_files)\n\n while True:\n for idx in indexes:\n # print('============================================')\n # print('images_3d_list[idx] {} \\nimages_2ch_list[idx] {} \\nimages_4ch_list[idx] {}'.format(self.images_3d_list[idx].split('/')[-1],self.images_2ch_list[idx].split('/')[-1],self.images_4ch_list[idx].split('/')[-1]))\n\n sitk_image3d, _ =NiftiImage().decode(self.images_3d_list[idx])\n sitk_image2ch, _=NiftiImage().decode(self.images_2ch_list[idx])\n sitk_image4ch, _=NiftiImage().decode(self.images_4ch_list[idx])\n image_filename = self.images_3d_list[idx][:-7]\n\n yield sitk_image3d, sitk_image2ch, sitk_image4ch, image_filename\n\n @property\n def num_files(self):\n return len(self.images_3d_list)\n\n###############################################################################\n###############################################################################\n\nclass filesCardio(files):\n \"\"\" A class for managing train files for Ozan mri cardio data\n\n Attributes:\n directory: input data directo\n \"\"\"\n\n def _listImages(self):\n # extend directory path\n current_dir = self.dir + '/images'\n childDirs = listFiles(current_dir,'*.nii.gz')\n image_files = []\n\n for child in childDirs:\n file_name = os.path.join(current_dir, child)\n file_path = os.path.join(current_dir, file_name)\n image_files.append(file_path)\n\n return image_files\n\n\n def _listLandmarks(self):\n # extend directory path\n current_dir = self.dir + '/landmarks'\n childDirs = listFiles(current_dir,'*.txt')\n landmarks = []\n\n for child in childDirs:\n file_name = os.path.join(current_dir, child)\n file_path = os.path.join(current_dir, file_name)\n points = np.array(extractPointsTXT(file_path))\n landmark = np.array(points[:,0]) # landmark point 0\n landmarks.append(landmark)\n\n return landmarks\n\n\n def _listLandmarks_all(self):\n # extend directory path\n current_dir = self.dir + '/landmarks'\n childDirs = listFiles(current_dir,'*.txt')\n landmarks = []\n\n for child in childDirs:\n file_name = os.path.join(current_dir, child)\n file_path = os.path.join(current_dir, file_name)\n points = np.array(extractPointsTXT(file_path))\n landmark = np.array(points) # all landmark point\n landmarks.append(landmark)\n\n return landmarks\n\n def sample_circular(self,shuffle=False):\n \"\"\" return a random sampled ImageRecord from the list of files\n \"\"\"\n if shuffle:\n indexes = rng.choice(x,len(x),replace=False)\n else:\n indexes = np.arange(self.num_files)\n\n while True:\n for idx in indexes:\n sitk_image, image = NiftiImage().decode(self.images_list[idx])\n landmark = self.landmarks_list[idx]\n image_filename = self.images_list[idx][:-7]\n yield image, landmark, image_filename\n\n###############################################################################\n\nclass filesFetalUS(files):\n \"\"\" A class for managing train files for Ozan mri cardio data\n\n Attributes:\n directory: input data directo\n \"\"\"\n\n def _listImages(self):\n # extend directory path\n current_dir = self.dir + '/images'\n childDirs = listFiles(current_dir,'*.nii.gz')\n image_files = []\n\n for child in childDirs:\n file_name = os.path.join(current_dir, child)\n file_path = os.path.join(current_dir, file_name)\n image_files.append(file_path)\n\n return image_files\n\n\n def _listLandmarks(self):\n # extend directory path\n current_dir = self.dir + '/landmarks'\n childDirs = listFiles(current_dir,'*.txt')\n landmarks = []\n\n for child in childDirs:\n file_name = os.path.join(current_dir, child)\n file_path = os.path.join(current_dir, file_name)\n points = np.array(extractPointsTXT(file_path))\n landmark = np.array(points[:,12]) # landmark point 12\n landmarks.append(landmark)\n\n return landmarks\n\n def _listLandmarks_all(self):\n # extend directory path\n current_dir = self.dir + '/landmarks'\n childDirs = listFiles(current_dir,'*.txt')\n landmarks = []\n\n for child in childDirs:\n file_name = os.path.join(current_dir, child)\n file_path = os.path.join(current_dir, file_name)\n points = np.array(extractPointsTXT(file_path))\n landmark = np.array(points) # all landmark point\n landmarks.append(landmark)\n\n return landmarks\n\n def sample_circular(self,shuffle=False):\n \"\"\" return a random sampled ImageRecord from the list of files\n \"\"\"\n if shuffle:\n indexes = rng.choice(x,len(x),replace=False)\n else:\n indexes = np.arange(self.num_files)\n\n while True:\n for idx in indexes:\n sitk_image, image = NiftiImage().decode(self.images_list[idx])\n landmark = self.landmarks_list[idx]\n image_filename = self.images_list[idx][:-7]\n yield image, landmark, image_filename\n\n\n def sample_circular_all_landmarks(self,shuffle=False):\n \"\"\" return a random sampled ImageRecord from the list of files\n \"\"\"\n if shuffle:\n indexes = rng.choice(x,len(x),replace=False)\n else:\n indexes = np.arange(self.num_files)\n\n while True:\n for idx in indexes:\n sitk_image, image = NiftiImage().decode(self.images_list[idx])\n landmarks = self.all_landmarks_list[idx]\n image_filename = self.images_list[idx][:-7]\n yield image, landmarks, image_filename\n\n# ===================================================================\n# ====================== Nifti Image Class ==========================\n# ===================================================================\nclass ImageRecord(object):\n '''image object to contain height,width, depth and name '''\n pass\n\n\nclass NiftiImage(object):\n \"\"\"Helper class that provides TensorFlow image coding utilities.\"\"\"\n def __init__(self):\n pass\n\n def _is_nifti(self,filename):\n \"\"\"Determine if a file contains a nifti format image.\n Args\n filename: string, path of the image file\n Returns\n boolean indicating if the image is a nifti\n \"\"\"\n extensions = ['.nii','.nii.gz','.img','.hdr']\n return any(i in filename for i in extensions)\n\n def decode(self, filename,label=False):\n \"\"\" decode a single nifti image\n Args\n filename: string for input images\n label: True if nifti image is label\n Returns\n image: an image container with attributes; name, data, dims\n \"\"\"\n image = ImageRecord()\n image.name = filename\n assert self._is_nifti(image.name), \"unknown image format for %r\" % image.name\n\n if label:\n sitk_image = sitk.ReadImage(image.name, sitk.sitkInt8)\n else:\n sitk_image = sitk.ReadImage(image.name, sitk.sitkFloat32)\n np_image = sitk.GetArrayFromImage(sitk_image)\n # threshold image between p10 and p98 then re-scale [0-255]\n p0 = np_image.min().astype('float')\n p10 = np.percentile(np_image,10)\n p99 = np.percentile(np_image,99)\n p100 = np_image.max().astype('float')\n # logger.info('p0 {} , p5 {} , p10 {} , p90 {} , p98 {} , p100 {}'.format(p0,p5,p10,p90,p98,p100))\n sitk_image = sitk.Threshold(sitk_image,\n lower=p10,\n upper=p100,\n outsideValue=p10)\n sitk_image = sitk.Threshold(sitk_image,\n lower=p0,\n upper=p99,\n outsideValue=p99)\n sitk_image = sitk.RescaleIntensity(sitk_image,\n outputMinimum=0,\n outputMaximum=255)\n\n # Convert from [depth, width, height] to [width, height, depth]\n # stupid simpleitk\n image.data = sitk.GetArrayFromImage(sitk_image).transpose(2,1,0)#.astype('uint8')\n image.dims = np.shape(image.data)\n\n return sitk_image, image\n" ]
[ [ "numpy.array", "numpy.asarray", "numpy.percentile", "numpy.shape", "numpy.arange" ] ]
dilayercelik/OOP
[ "c57aaa0acc38bafc6d9fd69a10926c62612e722d" ]
[ "Udacity_AI_Python/Gaussian Class/answer.py" ]
[ "import math\nimport matplotlib.pyplot as plt\n\nclass Gaussian():\n \"\"\" Gaussian distribution class for calculating and \n visualizing a Gaussian distribution.\n \n Attributes:\n mean (float) representing the mean value of the distribution\n stdev (float) representing the standard deviation of the distribution\n data_list (list of floats) a list of floats extracted from the data file\n \n \"\"\"\n def __init__(self, mu = 0, sigma = 1):\n \n self.mean = mu\n self.stdev = sigma\n self.data = []\n\n \n def calculate_mean(self):\n \n \"\"\"Function to calculate the mean of the data set.\n \n Args: \n None\n \n Returns: \n float: mean of the data set\n \n \"\"\"\n \n avg = 1.0 * sum(self.data) / len(self.data)\n \n self.mean = avg\n \n return self.mean\n\n\n\n def calculate_stdev(self, sample=True):\n\n \"\"\"Function to calculate the standard deviation of the data set.\n \n Args: \n sample (bool): whether the data represents a sample or population\n \n Returns: \n float: standard deviation of the data set\n \n \"\"\"\n\n if sample:\n n = len(self.data) - 1\n else:\n n = len(self.data)\n \n mean = self.mean\n \n sigma = 0\n \n for d in self.data:\n sigma += (d - mean) ** 2\n \n sigma = math.sqrt(sigma / n)\n \n self.stdev = sigma\n \n return self.stdev\n \n\n def read_data_file(self, file_name, sample=True):\n \n \"\"\"Function to read in data from a txt file. The txt file should have\n one number (float) per line. The numbers are stored in the data attribute. \n After reading in the file, the mean and standard deviation are calculated\n \n Args:\n file_name (string): name of a file to read from\n \n Returns:\n None\n \n \"\"\"\n \n with open(file_name) as file:\n data_list = []\n line = file.readline()\n while line:\n data_list.append(int(line))\n line = file.readline()\n file.close()\n \n self.data = data_list\n self.mean = self.calculate_mean()\n self.stdev = self.calculate_stdev(sample)\n \n \n def plot_histogram(self):\n \"\"\"Function to output a histogram of the instance variable data using \n matplotlib pyplot library.\n \n Args:\n None\n \n Returns:\n None\n \"\"\"\n plt.hist(self.data)\n plt.title('Histogram of Data')\n plt.xlabel('data')\n plt.ylabel('count')\n \n \n \n def pdf(self, x):\n \"\"\"Probability density function calculator for the gaussian distribution.\n \n Args:\n x (float): point for calculating the probability density function\n \n \n Returns:\n float: probability density function output\n \"\"\"\n \n return (1.0 / (self.stdev * math.sqrt(2*math.pi))) * math.exp(-0.5*((x - self.mean) / self.stdev) ** 2)\n \n\n def plot_histogram_pdf(self, n_spaces = 50):\n\n \"\"\"Function to plot the normalized histogram of the data and a plot of the \n probability density function along the same range\n \n Args:\n n_spaces (int): number of data points \n \n Returns:\n list: x values for the pdf plot\n list: y values for the pdf plot\n \n \"\"\"\n \n mu = self.mean\n sigma = self.stdev\n\n min_range = min(self.data)\n max_range = max(self.data)\n \n # calculates the interval between x values\n interval = 1.0 * (max_range - min_range) / n_spaces\n\n x = []\n y = []\n \n # calculate the x values to visualize\n for i in range(n_spaces):\n tmp = min_range + interval*i\n x.append(tmp)\n y.append(self.pdf(tmp))\n\n # make the plots\n fig, axes = plt.subplots(2,sharex=True)\n fig.subplots_adjust(hspace=.5)\n axes[0].hist(self.data, density=True)\n axes[0].set_title('Normed Histogram of Data')\n axes[0].set_ylabel('Density')\n\n axes[1].plot(x, y)\n axes[1].set_title('Normal Distribution for \\n Sample Mean and Sample Standard Deviation')\n axes[0].set_ylabel('Density')\n plt.show()\n\n return x, y\n" ]
[ [ "matplotlib.pyplot.xlabel", "matplotlib.pyplot.title", "matplotlib.pyplot.subplots", "matplotlib.pyplot.hist", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.show" ] ]
shahad-mahmud/asr_tf
[ "130124ccaf23fabe3e7a6f138d9403a7c0946ef3" ]
[ "tensorflow_asr/losses/rnnt_loss.py" ]
[ "# Copyright 2020 Huy Le Nguyen (@usimarit) and M. Yusuf Sarıgöz (@monatis)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# RNNT loss implementation in pure TensorFlow is borrowed from [iamjanvijay's repo](https://github.com/iamjanvijay/rnnt)\n\nimport tensorflow as tf\nfrom tensorflow.python.ops.gen_array_ops import matrix_diag_part_v2\nfrom ..utils import env_util\n\nlogger = tf.get_logger()\n\nLOG_0 = float(\"-inf\")\n\ntry:\n from warprnnt_tensorflow import rnnt_loss as warp_rnnt_loss\n use_warprnnt = True\n logger.info(\"Use RNNT loss in WarpRnnt\")\nexcept ImportError:\n logger.info(\"Use RNNT loss in TensorFlow\")\n use_warprnnt = False\n\n\nclass RnntLoss(tf.keras.losses.Loss):\n def __init__(self, blank=0, global_batch_size=None, name=None):\n super(RnntLoss, self).__init__(reduction=tf.keras.losses.Reduction.NONE, name=name)\n self.blank = blank\n self.global_batch_size = global_batch_size\n\n def call(self, y_true, y_pred):\n loss = rnnt_loss(\n logits=y_pred[\"logits\"],\n logit_length=y_pred[\"logits_length\"],\n labels=y_true[\"labels\"],\n label_length=y_true[\"labels_length\"],\n blank=self.blank,\n name=self.name\n )\n return tf.nn.compute_average_loss(loss, global_batch_size=self.global_batch_size)\n\n\[email protected]\ndef rnnt_loss(logits, labels, label_length, logit_length, blank=0, name=None):\n if use_warprnnt:\n return rnnt_loss_warprnnt(logits=logits, labels=labels,\n label_length=label_length, logit_length=logit_length, blank=blank)\n else:\n return rnnt_loss_tf(logits=logits, labels=labels, label_length=label_length, logit_length=logit_length, name=name)\n\n\ndef rnnt_loss_warprnnt(logits, labels, label_length, logit_length, blank=0):\n if not env_util.has_devices([\"GPU\", \"TPU\"]):\n logits = tf.nn.log_softmax(logits)\n loss = warp_rnnt_loss(\n acts=tf.cast(logits, tf.float32),\n label_lengths=tf.cast(label_length, tf.int32),\n labels=tf.cast(labels, tf.int32),\n input_lengths=tf.cast(logit_length, tf.int32),\n blank_label=blank\n )\n return loss\n\n\ndef nan_to_zero(input_tensor):\n return tf.where(tf.math.is_nan(input_tensor), tf.zeros_like(input_tensor), input_tensor)\n\n\ndef reduce_logsumexp(input_tensor, axis):\n maximum = tf.reduce_max(input_tensor, axis=axis)\n input_tensor = nan_to_zero(input_tensor - maximum)\n return tf.math.log(tf.reduce_sum(tf.exp(input_tensor), axis=axis)) + maximum\n\n\ndef extract_diagonals(log_probs):\n time_steps = tf.shape(log_probs)[1] # T\n output_steps = tf.shape(log_probs)[2] # U + 1\n reverse_log_probs = tf.reverse(log_probs, axis=[-1])\n paddings = [[0, 0], [0, 0], [time_steps - 1, 0]]\n padded_reverse_log_probs = tf.pad(reverse_log_probs, paddings,\n 'CONSTANT', constant_values=LOG_0)\n diagonals = matrix_diag_part_v2(padded_reverse_log_probs, k=(0, time_steps + output_steps - 2),\n padding_value=LOG_0)\n\n return tf.transpose(diagonals, perm=[1, 0, 2])\n\n\ndef transition_probs(one_hot_labels, log_probs):\n \"\"\"\n :return: blank_probs with shape batch_size x input_max_len x target_max_len\n truth_probs with shape batch_size x input_max_len x (target_max_len-1)\n \"\"\"\n blank_probs = log_probs[:, :, :, 0]\n truth_probs = tf.reduce_sum(tf.multiply(log_probs[:, :, :-1, :], one_hot_labels), axis=-1)\n\n return blank_probs, truth_probs\n\n\ndef forward_dp(bp_diags, tp_diags, batch_size, input_max_len, target_max_len):\n \"\"\"\n :return: forward variable alpha with shape batch_size x input_max_len x target_max_len\n \"\"\"\n\n def next_state(x, trans_probs):\n blank_probs = trans_probs[0]\n truth_probs = trans_probs[1]\n\n x_b = tf.concat([LOG_0 * tf.ones(shape=[batch_size, 1]), x[:, :-1] + blank_probs], axis=1)\n x_t = x + truth_probs\n\n x = tf.math.reduce_logsumexp(tf.stack([x_b, x_t], axis=0), axis=0)\n return x\n\n initial_alpha = tf.concat(\n [tf.zeros(shape=[batch_size, 1]), tf.ones(shape=[batch_size, input_max_len - 1]) * LOG_0], axis=1)\n\n fwd = tf.scan(next_state, (bp_diags[:-1, :, :-1], tp_diags), initializer=initial_alpha)\n\n alpha = tf.transpose(tf.concat([tf.expand_dims(initial_alpha, axis=0), fwd], axis=0), perm=[1, 2, 0])\n alpha = matrix_diag_part_v2(alpha, k=(0, target_max_len - 1), padding_value=LOG_0)\n alpha = tf.transpose(tf.reverse(alpha, axis=[1]), perm=[0, 2, 1])\n\n return alpha\n\n\ndef backward_dp(bp_diags, tp_diags, batch_size, input_max_len, target_max_len, label_length, logit_length, blank_sl):\n \"\"\"\n :return: backward variable beta with shape batch_size x input_max_len x target_max_len\n \"\"\"\n\n def next_state(x, mask_and_trans_probs):\n mask_s, blank_probs_s, truth_probs = mask_and_trans_probs\n\n beta_b = tf.concat([x[:, 1:] + blank_probs_s, LOG_0 * tf.ones(shape=[batch_size, 1])], axis=1)\n beta_t = tf.concat([x[:, :-1] + truth_probs, LOG_0 * tf.ones(shape=[batch_size, 1])], axis=1)\n\n beta_next = reduce_logsumexp(tf.stack([beta_b, beta_t], axis=0), axis=0)\n masked_beta_next = \\\n nan_to_zero(beta_next * tf.expand_dims(mask_s, axis=1)) + nan_to_zero(x * tf.expand_dims((1.0 - mask_s), axis=1))\n return tf.reshape(masked_beta_next, shape=tf.shape(x))\n\n # Initial beta for batches.\n initial_beta_mask = tf.one_hot(logit_length - 1, depth=input_max_len + 1)\n initial_beta = tf.expand_dims(blank_sl, axis=1) * initial_beta_mask + nan_to_zero(LOG_0 * (1.0 - initial_beta_mask))\n\n # Mask for scan iterations.\n mask = tf.sequence_mask(logit_length + label_length - 1, input_max_len + target_max_len - 2, dtype=tf.dtypes.float32)\n mask = tf.transpose(mask, perm=[1, 0])\n\n bwd = tf.scan(next_state, (mask, bp_diags[:-1, :, :], tp_diags), initializer=initial_beta, reverse=True)\n\n beta = tf.transpose(tf.concat([bwd, tf.expand_dims(initial_beta, axis=0)], axis=0), perm=[1, 2, 0])[:, :-1, :]\n beta = matrix_diag_part_v2(beta, k=(0, target_max_len - 1), padding_value=LOG_0)\n beta = tf.transpose(tf.reverse(beta, axis=[1]), perm=[0, 2, 1])\n\n return beta\n\n\ndef compute_rnnt_loss_and_grad_helper(logits, labels, label_length, logit_length):\n batch_size = tf.shape(logits)[0]\n input_max_len = tf.shape(logits)[1]\n target_max_len = tf.shape(logits)[2]\n vocab_size = tf.shape(logits)[3]\n\n one_hot_labels = tf.one_hot(tf.tile(tf.expand_dims(labels, axis=1),\n multiples=[1, input_max_len, 1]), depth=vocab_size)\n\n log_probs = tf.nn.log_softmax(logits)\n blank_probs, truth_probs = transition_probs(one_hot_labels, log_probs)\n bp_diags = extract_diagonals(blank_probs)\n tp_diags = extract_diagonals(truth_probs)\n\n label_mask = tf.expand_dims(tf.sequence_mask(label_length + 1, maxlen=target_max_len, dtype=tf.float32), axis=1)\n small_label_mask = tf.expand_dims(tf.sequence_mask(label_length, maxlen=target_max_len, dtype=tf.float32), axis=1)\n input_mask = tf.expand_dims(tf.sequence_mask(logit_length, maxlen=input_max_len, dtype=tf.float32), axis=2)\n small_input_mask = tf.expand_dims(tf.sequence_mask(logit_length - 1, maxlen=input_max_len, dtype=tf.float32), axis=2)\n mask = label_mask * input_mask\n grad_blank_mask = (label_mask * small_input_mask)[:, :-1, :]\n grad_truth_mask = (small_label_mask * input_mask)[:, :, :-1]\n\n alpha = forward_dp(bp_diags, tp_diags, batch_size, input_max_len, target_max_len) * mask\n\n indices = tf.stack([logit_length - 1, label_length], axis=1)\n blank_sl = tf.gather_nd(blank_probs, indices, batch_dims=1)\n\n beta = backward_dp(bp_diags, tp_diags, batch_size, input_max_len,\n target_max_len, label_length, logit_length, blank_sl) * mask\n beta = tf.where(tf.math.is_nan(beta), tf.zeros_like(beta), beta)\n final_state_probs = beta[:, 0, 0]\n\n # Compute gradients of loss w.r.t. blank log-probabilities.\n grads_blank = -tf.exp(\n (\n alpha[:, :-1, :] + beta[:, 1:, :]\n - tf.reshape(final_state_probs, shape=[batch_size, 1, 1])\n + blank_probs[:, :-1, :]\n ) * grad_blank_mask\n ) * grad_blank_mask\n grads_blank = tf.concat([grads_blank, tf.zeros(shape=(batch_size, 1, target_max_len))], axis=1)\n last_grads_blank = -1 * tf.scatter_nd(\n tf.concat([tf.reshape(tf.range(batch_size, dtype=tf.int64), shape=[batch_size, 1]),\n tf.cast(indices, dtype=tf.int64)], axis=1),\n tf.ones(batch_size, dtype=tf.float32),\n [batch_size, input_max_len, target_max_len]\n )\n grads_blank = grads_blank + last_grads_blank\n\n # Compute gradients of loss w.r.t. truth log-probabilities.\n grads_truth = -tf.exp(\n (\n alpha[:, :, :-1] + beta[:, :, 1:]\n - tf.reshape(final_state_probs, shape=[batch_size, 1, 1])\n + truth_probs\n )\n * grad_truth_mask\n ) * grad_truth_mask\n\n # Compute gradients of loss w.r.t. activations.\n a = tf.tile(tf.reshape(tf.range(target_max_len - 1, dtype=tf.int64), shape=(1, 1, target_max_len - 1, 1)),\n multiples=[batch_size, 1, 1, 1])\n b = tf.cast(tf.reshape(labels - 1, shape=(batch_size, 1, target_max_len - 1, 1)), dtype=tf.int64)\n if not env_util.has_devices([\"GPU\", \"TPU\"]):\n b = tf.where(tf.equal(b, -1), tf.zeros_like(b), b) # for cpu testing (index -1 on cpu will raise errors)\n c = tf.concat([a, b], axis=3)\n d = tf.tile(c, multiples=(1, input_max_len, 1, 1))\n e = tf.tile(tf.reshape(tf.range(input_max_len, dtype=tf.int64), shape=(1, input_max_len, 1, 1)),\n multiples=(batch_size, 1, target_max_len - 1, 1))\n f = tf.concat([e, d], axis=3)\n g = tf.tile(tf.reshape(tf.range(batch_size, dtype=tf.int64), shape=(batch_size, 1, 1, 1)),\n multiples=[1, input_max_len, target_max_len - 1, 1])\n scatter_idx = tf.concat([g, f], axis=3)\n # TODO - improve the part of code for scatter_idx computation.\n probs = tf.exp(log_probs)\n grads_truth_scatter = tf.scatter_nd(scatter_idx, grads_truth,\n [batch_size, input_max_len, target_max_len, vocab_size - 1])\n grads = tf.concat([tf.reshape(grads_blank, shape=(batch_size, input_max_len, target_max_len, -1)),\n grads_truth_scatter], axis=3)\n grads_logits = grads - probs * (tf.reduce_sum(grads, axis=3, keepdims=True))\n\n loss = -final_state_probs\n return loss, grads_logits\n\n\ndef rnnt_loss_tf(logits, labels, label_length, logit_length, name=None):\n name = \"rnnt_loss\" if name is None else name\n with tf.name_scope(name):\n logits = tf.convert_to_tensor(logits, name=\"logits\")\n labels = tf.convert_to_tensor(labels, name=\"labels\")\n label_length = tf.convert_to_tensor(label_length, name=\"label_length\")\n logit_length = tf.convert_to_tensor(logit_length, name=\"logit_length\")\n\n args = [logits, labels, label_length, logit_length]\n\n @tf.custom_gradient\n def compute_rnnt_loss_and_grad(logits_t, labels_t, label_length_t, logit_length_t):\n \"\"\"Compute RNN-T loss and gradients.\"\"\"\n logits_t.set_shape(logits.shape)\n labels_t.set_shape(labels.shape)\n label_length_t.set_shape(label_length.shape)\n logit_length_t.set_shape(logit_length.shape)\n kwargs = dict(logits=logits_t, labels=labels_t, label_length=label_length_t, logit_length=logit_length_t)\n result = compute_rnnt_loss_and_grad_helper(**kwargs)\n\n def grad(grad_loss):\n grads = [tf.reshape(grad_loss, [-1, 1, 1, 1]) * result[1]]\n grads += [None] * (len(args) - len(grads))\n return grads\n\n return result[0], grad\n\n return compute_rnnt_loss_and_grad(*args)\n" ]
[ [ "tensorflow.exp", "tensorflow.ones", "tensorflow.reshape", "tensorflow.scatter_nd", "tensorflow.reverse", "tensorflow.zeros_like", "tensorflow.python.ops.gen_array_ops.matrix_diag_part_v2", "tensorflow.stack", "tensorflow.tile", "tensorflow.one_hot", "tensorflow.cast", "tensorflow.shape", "tensorflow.concat", "tensorflow.transpose", "tensorflow.math.is_nan", "tensorflow.pad", "tensorflow.scan", "tensorflow.nn.log_softmax", "tensorflow.get_logger", "tensorflow.zeros", "tensorflow.range", "tensorflow.expand_dims", "tensorflow.gather_nd", "tensorflow.name_scope", "tensorflow.reduce_sum", "tensorflow.nn.compute_average_loss", "tensorflow.sequence_mask", "tensorflow.multiply", "tensorflow.convert_to_tensor", "tensorflow.equal", "tensorflow.reduce_max" ] ]
Wetterprophet/fingerspelling-gesture-glove
[ "7652c5025526f56273c4287b4b77b830a4979d0b" ]
[ "arduino_python/computeCentroid.py" ]
[ "import numpy as np\nimport arduino_python.measureData as mD\nimport arduino_python.DBA_multivariate as DBA\nfrom scipy import stats\nimport matplotlib.pyplot as plt\n\n\n\ndef main(file_name, pins, smoothing):\n\n \"\"\"\n Returns dictionary of centroids of training data by class labels\n Using multivariate DBA\n \"\"\"\n #for easier testing, just read in saved data\n training_data, target_values = mD.read_data(file_name)\n unique_classes = np.unique(target_values)\n\n #sort data by class label\n sorted_data = sort_data(training_data, target_values, unique_classes)\n\n #order data to shape (dimension, length) & normalize & smooth out\n dim_data = {class_label : np.array([normalize(order_by_dim(series, smoothing)) for series in sorted_data[class_label]])\n for class_label in unique_classes}\n\n\n centroids = compute_centroids(dim_data)\n\n #plotting for visualization and improvement purposes\n plot_centroids(centroids, unique_classes, pins)\n\n return centroids, unique_classes\n\n\n\ndef normalize(series):\n\n \"\"\"\n Normalize each dimension of a series instance individually\n Using z transformation\n \"\"\"\n #normalize dimension with z transformation\n #or return array of zeros if standard deviation is 0\n norm_series = np.array([stats.zscore(dimension) if np.std(dimension) != 0 else np.zeros(len(dimension)) for dimension in series])\n\n return norm_series\n\n\ndef order_by_dim(series, smoothing):\n\n \"\"\"\n Reorder a series to an array of values per dimension\n Needed for DBA and DTW on Arduino\n\n Smoothing is applied here\n \"\"\"\n\n ordered_series = []\n for i in range(len(series[0])):\n ordered_series.append([pin[i] if pin[i] > smoothing else 0 for pin in series])\n\n return np.array(ordered_series)\n\ndef sort_data(training_data, target_values, unique_classes):\n\n \"\"\"\n Returns a dictionary with instances by class labels\n \"\"\"\n\n sorted_data = {label : training_data[target_values==label] for label in unique_classes }\n\n return sorted_data\n\n\ndef compute_centroids(sorted_data):\n\n \"\"\"\n Returns a dictionary with centroid per class\n Computed with multivariate DBA\n \"\"\"\n\n all_centroids = {}\n for label in sorted_data.keys():\n centroid = DBA.performDBA(sorted_data[label])\n\n #round down to .2 float, multiply with 100\n #and convert to int for saving memory on Arduino\n all_centroids[label] = (np.around(centroid, 2) * 100).astype(int)\n\n return all_centroids\n\ndef plot_centroids(centroids, unique_classes, pins):\n\n \"\"\"\n Plots centroid of each dimension (input pin) per class\n Returns none\n \"\"\"\n\n n_dims = len(pins)\n num_plots = len(unique_classes)\n\n fig = plt.figure()\n for num, label in enumerate(centroids.keys()):\n ax = fig.add_subplot(num_plots, 1, num+1)\n for dim in range(0,n_dims):\n ax.plot(range(0,centroids[label].shape[1]), centroids[label][dim], label = pins[dim])\n ax.set_title(label)\n plt.legend(loc = 'best')\n plt.show()\n\n return None\n\n#function for testing purposes\n#and script creation\ndef c_array(centroids):\n\n \"\"\"\n Returns new dictionary with a ready-to-copy string representation of data for use as array in C\n \"\"\"\n c_centroids = {}\n for label in centroids.keys():\n c_centroid = str(centroids[label].tolist()).replace('[', '{')\n c_centroid = c_centroid.replace(']', '}')\n c_centroids[label] = c_centroid\n\n return c_centroids\n\n\n\n#default setting if called on its own\nif __name__ == '__main__':\n main('latext.txt', pins = ['A0', 'A2'], smoothing = 500)\n" ]
[ [ "scipy.stats.zscore", "numpy.array", "matplotlib.pyplot.legend", "matplotlib.pyplot.figure", "numpy.std", "matplotlib.pyplot.show", "numpy.around", "numpy.unique" ] ]
spragunr/echolocation
[ "f78bd53afc3ff965f38f871fe3463da3a2820a35" ]
[ "view.py" ]
[ "import h5py\nimport sys\nimport matplotlib.pyplot as plt\nfrom scipy import signal\nimport numpy as np\n\ndata = h5py.File(sys.argv[1], 'r')\ni = 0\nwhile True:\n entered = input(\"which? (enter for next)\")\n if entered == \"\":\n i += 1\n else:\n i = entered\n rows = 5\n for row in range(rows): \n plt.subplot(rows,5,1 + 5 * row)\n rgb = plt.imshow(data['rgb'][i + row,...])\n \n plt.subplot(rows,5,2 + 5 * row)\n plt.plot(data['audio_aligned'][i + row,:,:])\n plt.ylim([-2**15, 2**15])\n \n plt.subplot(rows,5,3 + 5 * row)\n f, t, Sxx = signal.spectrogram(data['audio_aligned'][i + row,:,0], 44100, nperseg=256,\n noverlap =255)\n plt.pcolormesh(t, f, np.log(1 + Sxx))\n plt.ylabel('Frequency [Hz]')\n plt.xlabel('Time [sec]')\n \n plt.subplot(rows,5,4 + 5 * row)\n f, t, Sxx = signal.spectrogram(data['audio_aligned'][i + row,:,1], 44100, nperseg=256,\n noverlap =255)\n plt.pcolormesh(t, f, np.log(1 + Sxx))\n plt.ylabel('Frequency [Hz]')\n plt.xlabel('Time [sec]')\n \n plt.subplot(rows,5,5 + 5 * row)\n plt.imshow(data['depth'][i + row,...])\n plt.show()\n\n\n\n\n" ]
[ [ "matplotlib.pyplot.subplot", "scipy.signal.spectrogram", "numpy.log", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.ylim", "matplotlib.pyplot.plot", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.show", "matplotlib.pyplot.imshow" ] ]
c-abird/meshio
[ "21301c3c5df3b196c60bea0cf71f27736f9a337e" ]
[ "meshio/h5m/_h5m.py" ]
[ "\"\"\"\nI/O for h5m, cf.\n<https://www.mcs.anl.gov/~fathom/moab-docs/html/h5mmain.html>.\n\"\"\"\nimport logging\nfrom datetime import datetime\n\nimport numpy\n\nfrom .. import __about__\nfrom .._helpers import register\nfrom .._mesh import CellBlock, Mesh\n\n# def _int_to_bool_list(num):\n# # From <https://stackoverflow.com/a/33608387/353337>.\n# bin_string = format(num, '04b')\n# return [x == '1' for x in bin_string[::-1]]\n\n\ndef read(filename):\n \"\"\"Reads H5M files, cf.\n https://trac.mcs.anl.gov/projects/ITAPS/wiki/MOAB/h5m.\n \"\"\"\n import h5py\n\n f = h5py.File(filename, \"r\")\n dset = f[\"tstt\"]\n\n points = dset[\"nodes\"][\"coordinates\"][()]\n # read point data\n point_data = {}\n if \"tags\" in dset[\"nodes\"]:\n for name, dataset in dset[\"nodes\"][\"tags\"].items():\n point_data[name] = dataset[()]\n\n # # Assert that the GLOBAL_IDs are contiguous.\n # point_gids = dset['nodes']['tags']['GLOBAL_ID'][()]\n # point_start_gid = dset['nodes']['coordinates'].attrs['start_id']\n # point_end_gid = point_start_gid + len(point_gids) - 1\n # assert all(point_gids == range(point_start_gid, point_end_gid + 1))\n\n h5m_to_meshio_type = {\n \"Edge2\": \"line\",\n \"Hex8\": \"hexahedron\",\n \"Prism6\": \"wedge\",\n \"Pyramid5\": \"pyramid\",\n \"Quad4\": \"quad\",\n \"Tri3\": \"triangle\",\n \"Tet4\": \"tetra\",\n }\n cells = []\n cell_data = {}\n for h5m_type, data in dset[\"elements\"].items():\n meshio_type = h5m_to_meshio_type[h5m_type]\n conn = data[\"connectivity\"]\n # Note that the indices are off by 1 in h5m.\n cells.append(CellBlock(meshio_type, conn[()] - 1))\n\n # TODO bring cell data back\n # if 'tags' in data:\n # for name, dataset in data['tags'].items():\n # cell_data[name] = dataset[()]\n\n # The `sets` in H5M are special in that they represent a segration of data\n # in the current file, particularly by a load balancer (Metis, Zoltan,\n # etc.). This segregation has no equivalent in other data types, but is\n # certainly worthwhile visualizing.\n # Hence, we will translate the sets into cell data with the prefix \"set::\"\n # here.\n field_data = {}\n # TODO deal with sets\n # if 'sets' in dset and 'contents' in dset['sets']:\n # # read sets\n # sets_contents = dset['sets']['contents'][()]\n # sets_list = dset['sets']['list'][()]\n # sets_tags = dset['sets']['tags']\n\n # cell_start_gid = conn.attrs['start_id']\n # cell_gids = cell_start_gid + elems['tags']['GLOBAL_ID'][()]\n # cell_end_gid = cell_start_gid + len(cell_gids) - 1\n # assert all(cell_gids == range(cell_start_gid, cell_end_gid + 1))\n\n # # create the sets\n # for key, value in sets_tags.items():\n # mod_key = 'set::' + key\n # cell_data[mod_key] = numpy.empty(len(cells), dtype=int)\n # end = 0\n # for k, row in enumerate(sets_list):\n # bits = _int_to_bool_list(row[3])\n # # is_owner = bits[0]\n # # is_unique = bits[1]\n # # is_ordered = bits[2]\n # is_range_compressed = bits[3]\n # if is_range_compressed:\n # start_gids = sets_contents[end:row[0]+1:2]\n # lengths = sets_contents[end+1:row[0]+1:2]\n # for start_gid, length in zip(start_gids, lengths):\n # end_gid = start_gid + length - 1\n # if start_gid >= cell_start_gid and \\\n # end_gid <= cell_end_gid:\n # i0 = start_gid - cell_start_gid\n # i1 = end_gid - cell_start_gid + 1\n # cell_data[mod_key][i0:i1] = value[k]\n # else:\n # # TODO deal with point data\n # raise RuntimeError('')\n # else:\n # gids = sets_contents[end:row[0]+1]\n # cell_data[mod_key][gids - cell_start_gid] = value[k]\n\n # end = row[0] + 1\n\n return Mesh(\n points, cells, point_data=point_data, cell_data=cell_data, field_data=field_data\n )\n\n\ndef write(filename, mesh, add_global_ids=True, compression=\"gzip\", compression_opts=4):\n \"\"\"Writes H5M files, cf.\n https://trac.mcs.anl.gov/projects/ITAPS/wiki/MOAB/h5m.\n \"\"\"\n import h5py\n\n f = h5py.File(filename, \"w\")\n\n tstt = f.create_group(\"tstt\")\n\n # The base index for h5m is 1.\n global_id = 1\n\n # add nodes\n nodes = tstt.create_group(\"nodes\")\n coords = nodes.create_dataset(\n \"coordinates\",\n data=mesh.points,\n compression=compression,\n compression_opts=compression_opts,\n )\n coords.attrs.create(\"start_id\", global_id)\n global_id += len(mesh.points)\n\n # Global tags\n tstt_tags = tstt.create_group(\"tags\")\n\n # The GLOBAL_ID associated with a point is used to identify points if\n # distributed across several processes. mbpart automatically adds them,\n # too.\n # Copy to pd to avoid changing point_data. The items are not deep-copied.\n pd = mesh.point_data.copy()\n if \"GLOBAL_ID\" not in pd and add_global_ids:\n pd[\"GLOBAL_ID\"] = numpy.arange(1, len(mesh.points) + 1)\n\n # add point data\n if pd:\n tags = nodes.create_group(\"tags\")\n for key, data in pd.items():\n if len(data.shape) == 1:\n dtype = data.dtype\n tags.create_dataset(\n key,\n data=data,\n compression=compression,\n compression_opts=compression_opts,\n )\n else:\n # H5M doesn't accept n-x-k arrays as data; it wants an n-x-1\n # array with k-tuples as entries.\n n, k = data.shape\n dtype = numpy.dtype((data.dtype, (k,)))\n dset = tags.create_dataset(\n key,\n (n,),\n dtype=dtype,\n compression=compression,\n compression_opts=compression_opts,\n )\n dset[:] = data\n\n # Create entry in global tags\n g = tstt_tags.create_group(key)\n g[\"type\"] = dtype\n # Add a class tag:\n # From\n # <http://lists.mcs.anl.gov/pipermail/moab-dev/2015/007104.html>:\n # ```\n # /* Was dense tag data in mesh database */\n # define mhdf_DENSE_TYPE 2\n # /** \\brief Was sparse tag data in mesh database */\n # #define mhdf_SPARSE_TYPE 1\n # /** \\brief Was bit-field tag data in mesh database */\n # #define mhdf_BIT_TYPE 0\n # /** \\brief Unused */\n # #define mhdf_MESH_TYPE 3\n #\n g.attrs[\"class\"] = 2\n\n # add elements\n elements = tstt.create_group(\"elements\")\n\n elem_dt = h5py.special_dtype(\n enum=(\n \"i\",\n {\n \"Edge\": 1,\n \"Tri\": 2,\n \"Quad\": 3,\n \"Polygon\": 4,\n \"Tet\": 5,\n \"Pyramid\": 6,\n \"Prism\": 7,\n \"Knife\": 8,\n \"Hex\": 9,\n \"Polyhedron\": 10,\n },\n )\n )\n\n tstt[\"elemtypes\"] = elem_dt\n\n tstt.create_dataset(\n \"history\",\n data=[\n __name__.encode(\"utf-8\"),\n __about__.__version__.encode(\"utf-8\"),\n str(datetime.now()).encode(\"utf-8\"),\n ],\n compression=compression,\n compression_opts=compression_opts,\n )\n\n # number of nodes to h5m name, element type\n meshio_to_h5m_type = {\n \"line\": {\"name\": \"Edge2\", \"type\": 1},\n \"triangle\": {\"name\": \"Tri3\", \"type\": 2},\n \"tetra\": {\"name\": \"Tet4\", \"type\": 5},\n }\n for key, data in mesh.cells:\n if key not in meshio_to_h5m_type:\n logging.warning(\"Unsupported H5M element type '%s'. Skipping.\", key)\n continue\n this_type = meshio_to_h5m_type[key]\n elem_group = elements.create_group(this_type[\"name\"])\n elem_group.attrs.create(\"element_type\", this_type[\"type\"], dtype=elem_dt)\n # h5m node indices are 1-based\n conn = elem_group.create_dataset(\n \"connectivity\",\n data=(data + 1),\n compression=compression,\n compression_opts=compression_opts,\n )\n conn.attrs.create(\"start_id\", global_id)\n global_id += len(data)\n\n # add cell data\n for cell_type, cd in mesh.cell_data.items():\n if cd:\n tags = elem_group.create_group(\"tags\")\n for key, value in cd.items():\n tags.create_dataset(\n key,\n data=value,\n compression=compression,\n compression_opts=compression_opts,\n )\n\n # add empty set -- MOAB wants this\n sets = tstt.create_group(\"sets\")\n sets.create_group(\"tags\")\n\n # set max_id\n tstt.attrs.create(\"max_id\", global_id, dtype=\"u8\")\n\n\nregister(\"h5m\", [\".h5m\"], read, {\"h5m\": write})\n" ]
[ [ "numpy.dtype" ] ]
MicahChambers/autodet
[ "9fea777f50b16f46d1eea0e5bbd5a7e983f3300a" ]
[ "autodet/Decoder.py" ]
[ "import torch.nn as nn\nimport torch.nn.functional as F\n\nfrom autodet.Residual import Residual\n\n\nclass Decoder(nn.Module):\n def __init__(self, ichannels, ochannels):\n super(Decoder, self).__init__()\n self.layer1 = Residual(ichannels=ichannels, ochannels=20, kradius=1)\n self.layer2 = Residual(ichannels=20, ochannels=ochannels, kradius=1)\n\n def forward(self, x):\n x = self.layer1(x)\n x = F.softsign(x)\n x = self.layer2(x)\n x = F.sigmoid(x)\n return x\n" ]
[ [ "torch.nn.functional.sigmoid", "torch.nn.functional.softsign" ] ]
Abrahamon/TransTrack
[ "d339429125c076192b6512ef85270bb373204e9e" ]
[ "models/reid/deformable_detrtrack_train.py" ]
[ "# Modified by Peize Sun, Rufeng Zhang\n# ------------------------------------------------------------------------\n# Deformable DETR\n# Copyright (c) 2020 SenseTime. All Rights Reserved.\n# Licensed under the Apache License, Version 2.0 [see LICENSE for details]\n# ------------------------------------------------------------------------\n# Modified from DETR (https://github.com/facebookresearch/detr)\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n# ------------------------------------------------------------------------\n\"\"\"\nDeformable DETR model and criterion classes.\n\"\"\"\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn\nimport math\n\nfrom util import box_ops\nfrom util.misc import (NestedTensor, nested_tensor_from_tensor_list,\n accuracy, get_world_size, interpolate,\n is_dist_avail_and_initialized, inverse_sigmoid)\n\nfrom .backbone import build_backbone\nfrom .matcher import build_matcher\nfrom .segmentation import (DETRsegm, PostProcessPanoptic, PostProcessSegm,\n dice_loss, sigmoid_focal_loss)\nfrom .deformable_transformer_track import build_deforamble_transformer\nimport copy\nfrom scipy.optimize import linear_sum_assignment\n\n\ndef _get_clones(module, N):\n return nn.ModuleList([copy.deepcopy(module) for i in range(N)])\n\n\nclass DeformableDETR(nn.Module):\n \"\"\" This is the Deformable DETR module that performs object detection \"\"\"\n def __init__(self, backbone, transformer, num_classes, num_ids, reid_dim, num_queries, num_feature_levels,\n aux_loss=True, with_box_refine=False, two_stage=False, reid_shared=False):\n \"\"\" Initializes the model.\n Parameters:\n backbone: torch module of the backbone to be used. See backbone.py\n transformer: torch module of the transformer architecture. See transformer.py\n num_classes: number of object classes\n num_queries: number of object queries, ie detection slot. This is the maximal number of objects\n DETR can detect in a single image. For COCO, we recommend 100 queries.\n aux_loss: True if auxiliary decoding losses (loss at each decoder layer) are to be used.\n with_box_refine: iterative bounding box refinement\n two_stage: two-stage Deformable DETR\n \"\"\"\n super().__init__()\n self.num_queries = num_queries\n self.transformer = transformer\n hidden_dim = transformer.d_model\n self.class_embed = nn.Linear(hidden_dim, num_classes)\n self.reid_embed = MLP(hidden_dim, hidden_dim, reid_dim, 3)\n self.reid_cls = nn.Linear(reid_dim, num_ids)\n self.bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3)\n self.num_feature_levels = num_feature_levels\n if not two_stage:\n self.query_embed = nn.Embedding(num_queries, hidden_dim*2)\n if num_feature_levels > 1:\n num_backbone_outs = len(backbone.strides)\n input_proj_list = []\n for _ in range(num_backbone_outs):\n in_channels = backbone.num_channels[_]\n input_proj_list.append(nn.Sequential(\n nn.Conv2d(in_channels, hidden_dim, kernel_size=1),\n nn.GroupNorm(32, hidden_dim),\n ))\n for _ in range(num_feature_levels - num_backbone_outs):\n input_proj_list.append(nn.Sequential(\n nn.Conv2d(in_channels, hidden_dim, kernel_size=3, stride=2, padding=1),\n nn.GroupNorm(32, hidden_dim),\n ))\n in_channels = hidden_dim\n self.input_proj = nn.ModuleList(input_proj_list)\n else:\n self.input_proj = nn.ModuleList([nn.Conv2d(backbone.num_channels[0], hidden_dim, kernel_size=1)])\n self.combine = nn.Conv2d(hidden_dim * 2, hidden_dim, kernel_size=1)\n\n self.backbone = backbone\n self.aux_loss = aux_loss\n self.with_box_refine = with_box_refine\n self.two_stage = two_stage\n\n prior_prob = 0.01\n bias_value = -math.log((1 - prior_prob) / prior_prob)\n self.class_embed.bias.data = torch.ones(num_classes) * bias_value\n self.reid_cls.bias.data = torch.ones(num_ids) * bias_value\n nn.init.constant_(self.bbox_embed.layers[-1].weight.data, 0)\n nn.init.constant_(self.bbox_embed.layers[-1].bias.data, 0)\n for proj in self.input_proj:\n nn.init.xavier_uniform_(proj[0].weight, gain=1)\n nn.init.constant_(proj[0].bias, 0)\n self.emb_scale = math.sqrt(2) * math.log(num_ids - 1)\n\n # if two-stage, the last class_embed and bbox_embed is for region proposal generation\n num_pred = (transformer.decoder.num_layers + 1) if two_stage else transformer.decoder.num_layers\n if with_box_refine:\n self.class_embed = _get_clones(self.class_embed, num_pred)\n self.bbox_embed = _get_clones(self.bbox_embed, num_pred)\n self.reid_embed = _get_clones(self.reid_embed, num_pred)\n self.reid_cls = _get_clones(self.reid_cls, num_pred)\n nn.init.constant_(self.bbox_embed[0].layers[-1].bias.data[2:], -2.0)\n # hack implementation for iterative bounding box refinement\n self.transformer.decoder.bbox_embed = self.bbox_embed\n else:\n nn.init.constant_(self.bbox_embed.layers[-1].bias.data[2:], -2.0)\n self.class_embed = nn.ModuleList([self.class_embed for _ in range(num_pred)])\n self.bbox_embed = nn.ModuleList([self.bbox_embed for _ in range(num_pred)])\n self.reid_embed = nn.ModuleList([self.reid_embed for _ in range(num_pred)])\n self.reid_cls = nn.ModuleList([self.reid_cls for _ in range(num_pred)])\n self.transformer.decoder.bbox_embed = None\n if two_stage:\n # hack implementation for two-stage\n self.transformer.decoder.class_embed = self.class_embed\n for box_embed in self.bbox_embed:\n nn.init.constant_(box_embed.layers[-1].bias.data[2:], 0.0)\n \n @torch.no_grad()\n def randshift(self, samples, targets):\n bs = samples.tensors.shape[0]\n \n self.xshift = (100 * torch.rand(bs)).int()\n self.xshift *= (torch.randn(bs) > 0.0).int() * 2 - 1 \n self.yshift = (100 * torch.rand(bs)).int()\n self.yshift *= (torch.randn(bs) > 0.0).int() * 2 - 1\n \n shifted_images = []\n new_targets = copy.deepcopy(targets)\n \n for i, (image, target) in enumerate(zip(samples.tensors, targets)):\n _, h, w = image.shape\n img_h, img_w = target['size']\n nopad_image = image[:, :img_h, :img_w]\n image_patch = \\\n nopad_image[:,\n max(0, -self.yshift[i]) : min(h, h - self.yshift[i]), \n max(0, -self.xshift[i]) : min(w, w - self.xshift[i])] \n \n _, patch_h, patch_w = image_patch.shape\n ratio_h, ratio_w = img_h / patch_h, img_w / patch_w \n shifted_image = F.interpolate(image_patch[None], size=(img_h, img_w))[0]\n pad_shifted_image = copy.deepcopy(image)\n pad_shifted_image[:, :img_h, :img_w] = shifted_image\n shifted_images.append(pad_shifted_image)\n \n scale = torch.tensor([img_w, img_h, img_w, img_h], device=image.device)[None]\n bboxes = target['boxes'] * scale\n bboxes -= torch.tensor([max(0, -self.xshift[i]), max(0, -self.yshift[i]), 0, 0], device=image.device)[None]\n bboxes *= torch.tensor([ratio_w, ratio_h, ratio_w, ratio_h], device=image.device)[None]\n shifted_bboxes = bboxes / scale\n new_targets[i]['boxes'] = shifted_bboxes\n \n new_samples = copy.deepcopy(samples)\n new_samples.tensors = torch.stack(shifted_images, dim=0)\n \n return new_samples, new_targets\n \n def forward(self, samples_targets, pre_embed=None):\n if self.training:\n samples, targets = samples_targets \n pre_samples, pre_targets = self.randshift(samples, targets)\n out, _ = self.forward_train(samples, pre_samples)\n return out, None, None\n else:\n samples = samples_targets \n return self.forward_train(samples, samples)\n\n \n def forward_train(self, samples: NestedTensor, pre_samples: NestedTensor):\n \"\"\" The forward expects a NestedTensor, which consists of:\n - samples.tensor: batched images, of shape [batch_size x 3 x H x W]\n - samples.mask: a binary mask of shape [batch_size x H x W], containing 1 on padded pixels\n It returns a dict with the following elements:\n - \"pred_logits\": the classification logits (including no-object) for all queries.\n Shape= [batch_size x num_queries x (num_classes + 1)]\n - \"pred_boxes\": The normalized boxes coordinates for all queries, represented as\n (center_x, center_y, height, width). These values are normalized in [0, 1],\n relative to the size of each individual image (disregarding possible padding).\n See PostProcess for information on how to retrieve the unnormalized bounding box.\n - \"aux_outputs\": Optional, only returned when auxilary losses are activated. It is a list of\n dictionnaries containing the two above keys for each decoder layer.\n \"\"\"\n if not isinstance(samples, NestedTensor):\n samples = nested_tensor_from_tensor_list(samples)\n features, pos = self.backbone(samples)\n \n if not isinstance(pre_samples, NestedTensor):\n pre_samples = nested_tensor_from_tensor_list(pre_samples)\n pre_feat, _ = self.backbone(pre_samples)\n \n srcs = []\n masks = []\n \n for l, (feat, feat2) in enumerate(zip(features, pre_feat)):\n src, mask = feat.decompose()\n src2, _ = feat2.decompose()\n srcs.append(self.combine(torch.cat([self.input_proj[l](src), self.input_proj[l](src2)], dim=1)))\n masks.append(mask)\n assert mask is not None\n\n if self.num_feature_levels > len(srcs):\n _len_srcs = len(srcs)\n for l in range(_len_srcs, self.num_feature_levels):\n if l == _len_srcs:\n src = self.combine(torch.cat([self.input_proj[l](features[-1].tensors), self.input_proj[l](pre_feat[-1].tensors)], dim=1))\n else:\n src = self.input_proj[l](srcs[-1])\n\n m = samples.mask\n mask = F.interpolate(m[None].float(), size=src.shape[-2:]).to(torch.bool)[0]\n pos_l = self.backbone[1](NestedTensor(src, mask)).to(src.dtype)\n srcs.append(src)\n masks.append(mask)\n pos.append(pos_l)\n \n query_embeds = None\n if not self.two_stage:\n query_embeds = self.query_embed.weight \n hs, hs_reid, init_reference, inter_references, enc_outputs_class, enc_outputs_coord_unact, _ = self.transformer(srcs, masks, pos, query_embeds) \n \n outputs_classes = []\n outputs_coords = []\n outputs_reids = []\n outputs_ids = []\n for lvl in range(hs.shape[0]):\n if lvl == 0:\n reference = init_reference\n else:\n reference = inter_references[lvl - 1]\n reference = inverse_sigmoid(reference)\n outputs_class = self.class_embed[lvl](hs[lvl])\n outputs_reid = self.reid_embed[lvl](hs_reid[lvl])\n outputs_id = self.reid_cls[lvl](self.emb_scale * F.normalize(outputs_reid, dim=2))\n tmp = self.bbox_embed[lvl](hs[lvl])\n if reference.shape[-1] == 4:\n tmp += reference\n else:\n assert reference.shape[-1] == 2\n tmp[..., :2] += reference\n outputs_coord = tmp.sigmoid()\n outputs_classes.append(outputs_class)\n outputs_coords.append(outputs_coord)\n outputs_reids.append(outputs_reid)\n outputs_ids.append(outputs_id)\n outputs_class = torch.stack(outputs_classes)\n outputs_coord = torch.stack(outputs_coords)\n outputs_reid = torch.stack(outputs_reids)\n outputs_id = torch.stack(outputs_ids)\n \n out = {'pred_logits': outputs_class[-1], 'pred_boxes': outputs_coord[-1], 'pred_reids': outputs_reid[-1], 'pred_ids': outputs_id[-1]}\n \n if self.aux_loss:\n out['aux_outputs'] = self._set_aux_loss(outputs_class, outputs_coord, outputs_id)\n\n\n if self.two_stage and self.training:\n enc_outputs_coord = enc_outputs_coord_unact.sigmoid()\n out['enc_outputs'] = {'pred_logits': enc_outputs_class, 'pred_boxes': enc_outputs_coord}\n return out, None\n\n @torch.jit.unused\n def _set_aux_loss(self, outputs_class, outputs_coord, outputs_id):\n # this is a workaround to make torchscript happy, as torchscript\n # doesn't support dictionary with non-homogeneous values, such\n # as a dict having both a Tensor and a list.\n return [{'pred_logits': a, 'pred_boxes': b, 'pred_ids': c}\n for a, b, c in zip(outputs_class[:-1], outputs_coord[:-1], outputs_id[:-1])]\n\n\n\nclass SetCriterion(nn.Module):\n \"\"\" This class computes the loss for DETR.\n The process happens in two steps:\n 1) we compute hungarian assignment between ground truth boxes and the outputs of the model\n 2) we supervise each pair of matched ground-truth / prediction (supervise class and box)\n \"\"\"\n def __init__(self, num_classes, num_ids, matcher, weight_dict, losses, focal_alpha=0.25):\n \"\"\" Create the criterion.\n Parameters:\n num_classes: number of object categories, omitting the special no-object category\n matcher: module able to compute a matching between targets and proposals\n weight_dict: dict containing as key the names of the losses and as values their relative weight.\n losses: list of all the losses to be applied. See get_loss for list of available losses.\n focal_alpha: alpha in Focal Loss\n \"\"\"\n super().__init__()\n self.num_classes = num_classes\n self.matcher = matcher\n self.num_ids = num_ids\n self.weight_dict = weight_dict\n self.losses = losses\n self.focal_alpha = focal_alpha\n self.ce_id_loss = nn.CrossEntropyLoss(ignore_index=-1)\n\n def loss_labels(self, outputs, targets, indices, num_boxes, log=True):\n \"\"\"Classification loss (NLL)\n targets dicts must contain the key \"labels\" containing a tensor of dim [nb_target_boxes]\n \"\"\"\n assert 'pred_logits' in outputs\n src_logits = outputs['pred_logits']\n\n idx = self._get_src_permutation_idx(indices)\n target_classes_o = torch.cat([t[\"labels\"][J] for t, (_, J) in zip(targets, indices)])\n target_classes = torch.full(src_logits.shape[:2], self.num_classes,\n dtype=torch.int64, device=src_logits.device)\n target_classes[idx] = target_classes_o\n\n target_classes_onehot = torch.zeros([src_logits.shape[0], src_logits.shape[1], src_logits.shape[2] + 1],\n dtype=src_logits.dtype, layout=src_logits.layout, device=src_logits.device)\n target_classes_onehot.scatter_(2, target_classes.unsqueeze(-1), 1)\n\n target_classes_onehot = target_classes_onehot[:,:,:-1]\n loss_ce = sigmoid_focal_loss(src_logits, target_classes_onehot, num_boxes, alpha=self.focal_alpha, gamma=2) * src_logits.shape[1]\n losses = {'loss_ce': loss_ce}\n\n if log:\n # TODO this should probably be a separate loss, not hacked in this one here\n losses['class_error'] = 100 - accuracy(src_logits[idx], target_classes_o)[0]\n return losses\n\n def loss_ids_ce_pos(self, outputs, targets, indices, num_boxes, log=True):\n \"\"\"Classification loss (NLL)\n targets dicts must contain the key \"labels\" containing a tensor of dim [nb_target_boxes]\n \"\"\"\n assert 'pred_ids' in outputs\n src_logits = outputs['pred_ids']\n\n idx = self._get_src_permutation_idx(indices)\n target_classes_o = torch.cat([t[\"track_ids\"][J] for t, (_, J) in zip(targets, indices)])\n src_pos = src_logits[idx]\n\n loss_id = self.ce_id_loss(src_pos, target_classes_o)\n losses = {'loss_id': loss_id}\n\n #if log:\n # TODO this should probably be a separate loss, not hacked in this one here\n #losses['id_error'] = 100 - accuracy(src_logits[idx], target_classes_o)[0]\n return losses\n\n @torch.no_grad()\n def loss_cardinality(self, outputs, targets, indices, num_boxes):\n \"\"\" Compute the cardinality error, ie the absolute error in the number of predicted non-empty boxes\n This is not really a loss, it is intended for logging purposes only. It doesn't propagate gradients\n \"\"\"\n pred_logits = outputs['pred_logits']\n device = pred_logits.device\n tgt_lengths = torch.as_tensor([len(v[\"labels\"]) for v in targets], device=device)\n # Count the number of predictions that are NOT \"no-object\" (which is the last class)\n card_pred = (pred_logits.argmax(-1) != pred_logits.shape[-1] - 1).sum(1)\n card_err = F.l1_loss(card_pred.float(), tgt_lengths.float())\n losses = {'cardinality_error': card_err}\n return losses\n\n def loss_boxes(self, outputs, targets, indices, num_boxes):\n \"\"\"Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss\n targets dicts must contain the key \"boxes\" containing a tensor of dim [nb_target_boxes, 4]\n The target boxes are expected in format (center_x, center_y, h, w), normalized by the image size.\n \"\"\"\n assert 'pred_boxes' in outputs\n idx = self._get_src_permutation_idx(indices)\n src_boxes = outputs['pred_boxes'][idx]\n target_boxes = torch.cat([t['boxes'][i] for t, (_, i) in zip(targets, indices)], dim=0)\n\n loss_bbox = F.l1_loss(src_boxes, target_boxes, reduction='none')\n\n losses = {}\n losses['loss_bbox'] = loss_bbox.sum() / num_boxes\n\n loss_giou = 1 - torch.diag(box_ops.generalized_box_iou(\n box_ops.box_cxcywh_to_xyxy(src_boxes),\n box_ops.box_cxcywh_to_xyxy(target_boxes)))\n losses['loss_giou'] = loss_giou.sum() / num_boxes\n return losses\n\n def loss_masks(self, outputs, targets, indices, num_boxes):\n \"\"\"Compute the losses related to the masks: the focal loss and the dice loss.\n targets dicts must contain the key \"masks\" containing a tensor of dim [nb_target_boxes, h, w]\n \"\"\"\n assert \"pred_masks\" in outputs\n\n src_idx = self._get_src_permutation_idx(indices)\n tgt_idx = self._get_tgt_permutation_idx(indices)\n\n src_masks = outputs[\"pred_masks\"]\n\n # TODO use valid to mask invalid areas due to padding in loss\n target_masks, valid = nested_tensor_from_tensor_list([t[\"masks\"] for t in targets]).decompose()\n target_masks = target_masks.to(src_masks)\n\n src_masks = src_masks[src_idx]\n # upsample predictions to the target size\n src_masks = interpolate(src_masks[:, None], size=target_masks.shape[-2:],\n mode=\"bilinear\", align_corners=False)\n src_masks = src_masks[:, 0].flatten(1)\n\n target_masks = target_masks[tgt_idx].flatten(1)\n\n losses = {\n \"loss_mask\": sigmoid_focal_loss(src_masks, target_masks, num_boxes),\n \"loss_dice\": dice_loss(src_masks, target_masks, num_boxes),\n }\n return losses\n\n def _get_src_permutation_idx(self, indices):\n # permute predictions following indices\n batch_idx = torch.cat([torch.full_like(src, i) for i, (src, _) in enumerate(indices)])\n src_idx = torch.cat([src for (src, _) in indices])\n return batch_idx, src_idx\n\n def _get_tgt_permutation_idx(self, indices):\n # permute targets following indices\n batch_idx = torch.cat([torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)])\n tgt_idx = torch.cat([tgt for (_, tgt) in indices])\n return batch_idx, tgt_idx\n\n def get_loss(self, loss, outputs, targets, indices, num_boxes, **kwargs):\n loss_map = {\n 'labels': self.loss_labels,\n 'ids': self.loss_ids_ce_pos,\n 'cardinality': self.loss_cardinality,\n 'boxes': self.loss_boxes,\n 'masks': self.loss_masks\n }\n assert loss in loss_map, f'do you really want to compute {loss} loss?'\n return loss_map[loss](outputs, targets, indices, num_boxes, **kwargs)\n\n def forward(self, outputs, targets, pre_outputs=None, pre_targets=None):\n \"\"\" This performs the loss computation.\n Parameters:\n outputs: dict of tensors, see the output specification of the model for the format\n targets: list of dicts, such that len(targets) == batch_size.\n The expected keys in each dict depends on the losses applied, see each loss' doc\n \"\"\"\n outputs_without_aux = {k: v for k, v in outputs.items() if k != 'aux_outputs' and k != 'enc_outputs'}\n # Retrieve the matching between the outputs of the last layer and the targets\n indices = self.matcher(outputs_without_aux, targets)\n \n # Compute the average number of target boxes accross all nodes, for normalization purposes\n num_boxes = sum(len(t[\"labels\"]) for t in targets)\n num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device)\n if is_dist_avail_and_initialized():\n torch.distributed.all_reduce(num_boxes)\n num_boxes = torch.clamp(num_boxes / get_world_size(), min=1).item()\n\n # Compute all the requested losses\n losses = {}\n for loss in self.losses:\n kwargs = {}\n losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes, **kwargs))\n\n # In case of auxiliary losses, we repeat this process with the output of each intermediate layer.\n if 'aux_outputs' in outputs:\n for i, aux_outputs in enumerate(outputs['aux_outputs']):\n indices = self.matcher(aux_outputs, targets)\n\n for loss in self.losses:\n if loss == 'masks':\n # Intermediate masks losses are too costly to compute, we ignore them.\n continue\n kwargs = {}\n if loss == 'labels' or loss == 'ids':\n # Logging is enabled only for the last layer\n kwargs['log'] = False\n l_dict = self.get_loss(loss, aux_outputs, targets, indices, num_boxes, **kwargs)\n l_dict = {k + f'_{i}': v for k, v in l_dict.items()}\n losses.update(l_dict)\n\n if 'enc_outputs' in outputs:\n enc_outputs = outputs['enc_outputs']\n bin_targets = copy.deepcopy(targets)\n for bt in bin_targets:\n bt['labels'] = torch.zeros_like(bt['labels'])\n indices = self.matcher(enc_outputs, bin_targets)\n for loss in self.losses:\n if loss == 'masks':\n # Intermediate masks losses are too costly to compute, we ignore them.\n continue\n kwargs = {}\n if loss == 'labels':\n # Logging is enabled only for the last layer\n kwargs['log'] = False\n l_dict = self.get_loss(loss, enc_outputs, bin_targets, indices, num_boxes, **kwargs)\n l_dict = {k + f'_enc': v for k, v in l_dict.items()}\n losses.update(l_dict)\n\n return losses\n\n\nclass PostProcess(nn.Module):\n \"\"\" This module converts the model's output into the format expected by the coco api\"\"\"\n\n @torch.no_grad()\n def forward(self, outputs, target_sizes):\n \"\"\" Perform the computation\n Parameters:\n outputs: raw outputs of the model\n target_sizes: tensor of dimension [batch_size x 2] containing the size of each images of the batch\n For evaluation, this must be the original image size (before any data augmentation)\n For visualization, this should be the image size after data augment, but before padding\n \"\"\"\n out_logits, out_bbox, out_reid = outputs['pred_logits'], outputs['pred_boxes'], outputs['pred_reids']\n\n assert len(out_logits) == len(target_sizes)\n assert target_sizes.shape[1] == 2\n\n prob = out_logits.sigmoid()\n topk_values, topk_indexes = torch.topk(prob.view(out_logits.shape[0], -1), 100, dim=1)\n scores = topk_values\n topk_boxes = topk_indexes // out_logits.shape[2]\n labels = topk_indexes % out_logits.shape[2]\n reid_dim = out_reid.shape[2]\n boxes = box_ops.box_cxcywh_to_xyxy(out_bbox)\n boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, 4))\n reids = torch.gather(out_reid, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, reid_dim))\n reids = F.normalize(reids, dim=2)\n\n # and from relative [0, 1] to absolute [0, height] coordinates\n img_h, img_w = target_sizes.unbind(1)\n scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1)\n boxes = boxes * scale_fct[:, None, :]\n\n results = [{'scores': s, 'labels': l, 'boxes': b, 'reids':r } for s, l, b, r in zip(scores, labels, boxes, reids)]\n\n return results\n\n\nclass MLP(nn.Module):\n \"\"\" Very simple multi-layer perceptron (also called FFN)\"\"\"\n\n def __init__(self, input_dim, hidden_dim, output_dim, num_layers):\n super().__init__()\n self.num_layers = num_layers\n h = [hidden_dim] * (num_layers - 1)\n self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))\n\n def forward(self, x):\n for i, layer in enumerate(self.layers):\n x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)\n return x\n\n\ndef build(args):\n if args.dataset_file == 'coco':\n num_classes = 91\n elif args.dataset_file == 'mot':\n num_classes = 20\n elif args.dataset_file == \"coco_panoptic\":\n num_classes = 250\n else:\n num_classes = 20 \n device = torch.device(args.device)\n\n backbone = build_backbone(args)\n\n transformer = build_deforamble_transformer(args)\n model = DeformableDETR(\n backbone,\n transformer,\n num_classes=num_classes,\n num_ids=args.num_ids,\n reid_dim=args.reid_dim,\n reid_shared=args.reid_shared,\n num_queries=args.num_queries,\n num_feature_levels=args.num_feature_levels,\n aux_loss=args.aux_loss,\n with_box_refine=args.with_box_refine,\n two_stage=args.two_stage,\n )\n if args.masks:\n model = DETRsegm(model, freeze_detr=(args.frozen_weights is not None))\n matcher = build_matcher(args)\n weight_dict = {'loss_ce': args.cls_loss_coef, 'loss_bbox': args.bbox_loss_coef}\n weight_dict['loss_giou'] = args.giou_loss_coef\n weight_dict['loss_id'] = args.id_loss_coef\n if args.masks:\n weight_dict[\"loss_mask\"] = args.mask_loss_coef\n weight_dict[\"loss_dice\"] = args.dice_loss_coef\n # TODO this is a hack\n if args.aux_loss:\n aux_weight_dict = {}\n for i in range(args.dec_layers - 1):\n aux_weight_dict.update({k + f'_{i}': v for k, v in weight_dict.items()})\n aux_weight_dict.update({k + f'_enc': v for k, v in weight_dict.items()})\n weight_dict.update(aux_weight_dict)\n\n losses = ['labels', 'boxes', 'cardinality', 'ids']\n if args.masks:\n losses += [\"masks\"]\n # num_classes, matcher, weight_dict, losses, focal_alpha=0.25\n criterion = SetCriterion(num_classes, args.num_ids, matcher, weight_dict, losses, focal_alpha=args.focal_alpha)\n criterion.to(device)\n postprocessors = {'bbox': PostProcess()}\n if args.masks:\n postprocessors['segm'] = PostProcessSegm()\n if args.dataset_file == \"coco_panoptic\":\n is_thing_map = {i: i <= 90 for i in range(201)}\n postprocessors[\"panoptic\"] = PostProcessPanoptic(is_thing_map, threshold=0.85)\n\n return model, criterion, postprocessors\n" ]
[ [ "torch.nn.Linear", "torch.cat", "torch.stack", "torch.nn.ModuleList", "torch.ones", "torch.nn.CrossEntropyLoss", "torch.nn.init.constant_", "torch.tensor", "torch.zeros_like", "torch.zeros", "torch.device", "torch.nn.functional.l1_loss", "torch.nn.GroupNorm", "torch.full_like", "torch.randn", "torch.nn.Conv2d", "torch.full", "torch.nn.functional.normalize", "torch.rand", "torch.no_grad", "torch.nn.functional.interpolate", "torch.nn.init.xavier_uniform_", "torch.distributed.all_reduce", "torch.nn.Embedding" ] ]
JeremyZhao1998/PaddleNLP
[ "5a34684a7f0c8a186043fed386be4b62cb85fb15" ]
[ "paddlenlp/ops/faster_transformer/sample/decoding_sample.py" ]
[ "# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport sys\nimport os\nimport numpy as np\nfrom attrdict import AttrDict\nimport argparse\nimport time\n\nimport paddle\n\nimport yaml\nfrom pprint import pprint\n\nfrom paddlenlp.ops import FasterTransformer\n\nfrom paddlenlp.utils.log import logger\n\npaddle.seed(2)\nnp.random.seed(2)\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--config\",\n default=\"./faster_transformer/sample/config/decoding.sample.yaml\",\n type=str,\n help=\"Path of the config file. \")\n parser.add_argument(\n \"--decoding_lib\",\n default=\"./build/lib/libdecoding_op.so\",\n type=str,\n help=\"Path of libdecoding_op.so. \")\n parser.add_argument(\n \"--use_fp16_decoding\",\n action=\"store_true\",\n help=\"Whether to use fp16 decoding to predict. \")\n args = parser.parse_args()\n return args\n\n\ndef do_predict(args):\n place = \"gpu\"\n place = paddle.set_device(place)\n\n # Define model\n transformer = FasterTransformer(\n src_vocab_size=args.src_vocab_size,\n trg_vocab_size=args.trg_vocab_size,\n max_length=args.max_length + 1,\n num_encoder_layers=args.n_layer,\n num_decoder_layers=args.n_layer,\n n_head=args.n_head,\n d_model=args.d_model,\n d_inner_hid=args.d_inner_hid,\n dropout=args.dropout,\n weight_sharing=args.weight_sharing,\n bos_id=args.bos_idx,\n eos_id=args.eos_idx,\n decoding_strategy=args.decoding_strategy,\n beam_size=args.beam_size,\n topk=args.topk,\n topp=args.topp,\n max_out_len=args.max_out_len,\n decoding_lib=args.decoding_lib,\n use_fp16_decoding=args.use_fp16_decoding)\n\n # Set evaluate mode\n transformer.eval()\n\n enc_output = paddle.randn(\n [args.infer_batch_size, args.max_length, args.d_model])\n if args.use_fp16_decoding:\n enc_output = paddle.cast(enc_output, \"float16\")\n mem_seq_len = paddle.randint(\n 1, args.max_length + 1, shape=[args.infer_batch_size], dtype=\"int32\")\n with paddle.no_grad():\n for i in range(100):\n # For warmup. \n if 50 == i:\n start = time.time()\n transformer.decoding(\n enc_output=enc_output, memory_seq_lens=mem_seq_len)\n logger.info(\"Average test time for decoding is %f ms\" % (\n (time.time() - start) / 50 * 1000))\n\n\nif __name__ == \"__main__\":\n ARGS = parse_args()\n yaml_file = ARGS.config\n with open(yaml_file, 'rt') as f:\n args = AttrDict(yaml.safe_load(f))\n pprint(args)\n args.decoding_lib = ARGS.decoding_lib\n args.use_fp16_decoding = ARGS.use_fp16_decoding\n\n do_predict(args)\n" ]
[ [ "numpy.random.seed" ] ]
bluetiger9/Vitis-AI
[ "f61061eef7550d98bf02a171604c9a9f283a7c47" ]
[ "tools/Vitis-AI-Quantizer/vai_q_tensorflow2.x/tensorflow_model_optimization/python/core/quantization/keras/vitis/vitis_quantize.py" ]
[ "# Copyright 2019 Xilinx Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Quantization API functions for tf.keras models.\"\"\"\n\nimport os\nimport copy\nimport collections\n\nimport tensorflow as tf\nimport numpy as np\n\nfrom tensorflow_model_optimization.python.core.quantization.keras.vitis.base import quantize_annotate as quantize_annotate_mod\nfrom tensorflow_model_optimization.python.core.quantization.keras.vitis.base import quantize_config as quantize_config_mod\nfrom tensorflow_model_optimization.python.core.quantization.keras.vitis.common import vitis_quantize_aware_activation\nfrom tensorflow_model_optimization.python.core.quantization.keras.vitis.common import vitis_quantize_wrapper\nfrom tensorflow_model_optimization.python.core.quantization.keras.vitis.common import vitis_quantize_registry\nfrom tensorflow_model_optimization.python.core.quantization.keras.vitis.common import vitis_quantizers\nfrom tensorflow_model_optimization.python.core.quantization.keras.vitis.common import vitis_quantize_configs\nfrom tensorflow_model_optimization.python.core.quantization.keras.vitis.optimizations import vitis_fast_finetune\nfrom tensorflow_model_optimization.python.core.quantization.keras.vitis.optimizations import vitis_bias_correction\nfrom tensorflow_model_optimization.python.core.quantization.keras.vitis.layers import vitis_quantize as vitis_quantize_layer\nfrom tensorflow_model_optimization.python.core.quantization.keras.vitis.layers import vitis_activation\nfrom tensorflow_model_optimization.python.core.quantization.keras.vitis.layers import vitis_pooling\nfrom tensorflow_model_optimization.python.core.quantization.keras.vitis.eight_bit import vitis_8bit_quantize_strategy\nfrom tensorflow_model_optimization.python.core.quantization.keras.vitis.eight_bit_fs import vitis_8bit_fs_quantize_strategy\nfrom tensorflow_model_optimization.python.core.quantization.keras.vitis.utils import common_utils\nfrom tensorflow_model_optimization.python.core.quantization.keras.vitis.utils import model_utils\nfrom tensorflow_model_optimization.python.core.quantization.keras.vitis import vitis_quantize_strategies\n\nlogger = common_utils.VAILogger\nkeras = tf.keras\n\n\ndef quantize_scope(*args):\n \"\"\"Scope which can be used to deserialize quantized Keras models and layers.\n\n Under `quantize_scope`, Keras methods such as `tf.keras.load_model` and\n `tf.keras.models.model_from_config` will be able to deserialize Keras models\n and layers which contain quantization classes such as `QuantizeConfig`\n and `Quantizer`.\n\n Example:\n\n ```python\n tf.keras.models.save_model(quantized_model, keras_file)\n\n with quantize_scope():\n loaded_model = tf.keras.models.load_model(keras_file)\n\n # If your quantized model uses custom objects such as a specific `Quantizer`,\n # you can pass them to quantize_scope to deserialize your model.\n with quantize_scope({'FixedRangeQuantizer', FixedRangeQuantizer}\n loaded_model = tf.keras.models.load_model(keras_file)\n ```\n\n For further understanding, see `tf.keras.utils.custom_object_scope`.\n\n Args:\n *args: Variable length list of dictionaries of `{name, class}` pairs to add\n to the scope created by this method.\n\n Returns:\n Object of type `CustomObjectScope` with quantization objects included.\n \"\"\"\n quantization_objects = {\n 'QuantizeAwareActivation':\n vitis_quantize_aware_activation.QuantizeAwareActivation,\n 'NoQuantizeActivation':\n vitis_quantize_aware_activation.NoQuantizeActivation,\n 'QuantizeWrapper':\n vitis_quantize_wrapper.QuantizeWrapper,\n }\n quantization_objects.update(vitis_quantizers._types_dict())\n quantization_objects.update(vitis_quantize_configs._types_dict())\n quantization_objects.update(vitis_quantize_layer._types_dict())\n quantization_objects.update(vitis_activation._types_dict())\n quantization_objects.update(vitis_pooling._types_dict())\n\n return tf.keras.utils.custom_object_scope(*(args + (quantization_objects,)))\n\n\nclass CollectQuantizeInfoCallback(keras.callbacks.Callback):\n \"\"\"Callback to collect the quantize info of each batch.\"\"\"\n\n def __init__(self):\n super(CollectQuantizeInfoCallback, self).__init__()\n self._quantize_info = collections.OrderedDict()\n\n def on_predict_batch_end(self, batch, logs=None):\n self._quantize_info[batch] = model_utils.get_quantize_info(self.model)\n\n @property\n def quantize_info(self):\n return self._quantize_info\n\n def get_last_quantize_info(self):\n return next(reversed(self._quantize_info.values()))\n\n def get_most_common_quantize_info(self):\n pos_map = {}\n for batch_quantize_info in self._quantize_info.values():\n for layer, q_info in batch_quantize_info.items():\n if q_info.get('type') == 'input':\n if layer not in pos_map:\n pos_map[layer] = {'input': []}\n pos = q_info['info']['quant_pos_var']\n pos_map[layer]['input'].append(pos)\n else:\n if layer not in pos_map:\n pos_map[layer] = {}\n for k, v in q_info.items():\n if not v:\n continue\n if k not in pos_map[layer]:\n pos_map[layer][k] = []\n pos = v['info']['quant_pos_var']\n pos_map[layer][k].append(pos)\n\n mc_pos_map = {}\n for layer, q_info in pos_map.items():\n mc_pos_map[layer] = {}\n for k, v in q_info.items():\n mc_pos_map[layer][k] = max(v, key=v.count)\n\n _, mc_quantize_info = self._quantize_info.popitem()\n for layer, q_info in mc_quantize_info.items():\n if q_info.get('type') == 'input':\n if q_info['info']['quant_pos_var'] != mc_pos_map[layer]['input']:\n q_info['info']['quant_pos_var'] = mc_pos_map[layer]['input']\n else:\n for k, v in q_info.items():\n if not v:\n continue\n if v['info']['quant_pos_var'] != mc_pos_map[layer][k]:\n v['info']['quant_pos_var'] = mc_pos_map[layer][k]\n\n return mc_quantize_info\n\n\nclass VitisQuantizer(object):\n \"\"\"Vitis Quantizer main APIs\"\"\"\n\n def __init__(self,\n float_model,\n quantize_strategy='8bit',\n custom_quantize_strategy=None,\n custom_objects={}):\n \"\"\"Init VitisQuantizer.\"\"\"\n self._float_model = float_model\n self._qat_model = None\n self._qcb_model = None\n self._qcbev_model = None\n self._analyse_model = None\n self._optimized_model = None\n self._candidate_layers = None\n self._layer_metadata = None\n\n # Custom objects\n self._custom_object_scope = tf.keras.utils.custom_object_scope(\n custom_objects)\n\n # Built-in quantize strategy\n self._quantize_strategy = vitis_quantize_strategies.get(quantize_strategy)\n\n # Custom quantize strategy\n if custom_quantize_strategy:\n if isinstance(custom_quantize_strategy, str):\n custom_quantize_strategy = common_utils.load_json(\n custom_quantize_strategy)\n self._quantize_strategy.update(custom_quantize_strategy)\n\n def _create_qat_model(self):\n \"\"\"Create quantize-aware training model.\"\"\"\n if not self._optimized_model:\n logger.error('Should call `optimize_model()` before `_create_qat_model`.')\n self._qat_model, self._layer_metadata = create_quantize_model(\n self._optimized_model,\n candidate_layers=self._candidate_layers,\n layer_metadata=self._layer_metadata,\n quantize_strategy=self._quantize_strategy,\n mode='QAT')\n\n def _run_model_with_collector(self, model, dataset, batch_size, steps):\n \"\"\"Run model with quantize info collector.\"\"\"\n collector = CollectQuantizeInfoCallback()\n model.predict(\n dataset,\n batch_size=batch_size,\n verbose=1,\n steps=steps,\n callbacks=[collector])\n return collector\n\n def _create_optimized_model(self):\n \"\"\"Create optimized model.\"\"\"\n self._optimized_model, self._layer_metadata = create_optimize_model(\n self._float_model,\n candidate_layers=self._candidate_layers,\n layer_metadata=self._layer_metadata,\n quantize_strategy=self._quantize_strategy)\n\n def _create_analysed_model(self, dataset):\n \"\"\"Create analysed model.\"\"\"\n self._analysed_model, self._layer_metadata = create_quantize_model(\n self._float_model,\n candidate_layers=self._candidate_layers,\n layer_metadata=self._layer_metadata,\n quantize_strategy=self._quantize_strategy,\n mode='ANALYSE')\n\n logger.info(\"Start Model Analyse...\")\n collector = self._run_model_with_collector(self._analysed_model, dataset,\n batch_size, steps)\n logger.info(\"Model Analyse Done.\")\n # model_info = collector.get_last_quantize_info()\n model_info = collector.get_most_common_quantize_info()\n return model_info\n\n def _freeze_quantize_info(self, quantize_info):\n \"\"\"Freeze the quantize info into the quantize calibrate and evaluate model.\"\"\"\n if not self._qcb_model:\n logger.error('No qcb_model found.')\n\n if not self._qcbev_model:\n logger.error('No qcbev_model found.')\n\n # Freeze the quantize info into the quantized model\n model_utils.set_quantize_info(self._qcb_model, quantize_info)\n model_utils.set_quantize_info(self._qcbev_model, quantize_info)\n\n def _calibrate_without_loss(self, calib_dataset, calib_batch_size,\n calib_steps):\n \"\"\"Calibrate model without loss, only with unlabeled dataset.\"\"\"\n # Create quantize calibration model\n if not self._optimized_model:\n logger.error(\n 'Should call `optimize_model()` before `_calibrate_without_loss`.')\n self._qcb_model, self._layer_metadata = create_quantize_model(\n self._optimized_model,\n candidate_layers=self._candidate_layers,\n layer_metadata=self._layer_metadata,\n quantize_strategy=self._quantize_strategy,\n mode='QCB')\n\n logger.info(\"Start Quantize Calibration...\")\n collector = self._run_model_with_collector(self._qcb_model, calib_dataset,\n calib_batch_size, calib_steps)\n\n # Create quantize calibration evaluation model\n self._qcbev_model = model_utils.clone_model_with_weights(self._qcb_model)\n model_utils.set_layer_mode(self._qcbev_model, 'QCBEV')\n\n if type(self._quantize_strategy\n ) == vitis_8bit_quantize_strategy.Vitis8BitQuantizeStrategy:\n # Freeze the quantize info into the model, now using most_common_quantize_info\n # last_quantize_info = collector.get_last_quantize_info()\n common_quantize_info = collector.get_most_common_quantize_info()\n self._freeze_quantize_info(common_quantize_info)\n\n logger.info(\"Quantize Calibration Done.\")\n\n def _calibrate_with_loss(self, loss, metrics, calib_dataset, eval_dataset,\n verbose):\n \"\"\"Calibrate model with loss and metrics to get better accuracy, need eval_dataset.\"\"\"\n self._calibrate_without_loss(calib_dataset, calib_batch_size, calib_steps)\n init_quantize_info = model_utils.get_quantize_info(self._qcbev_model)\n\n quantize_layers = {}\n for layer in self._qcb_model.layers:\n if model_utils.is_quantize_layer(layer):\n quantize_layers[layer.name] = layer\n\n def _recompile(model):\n \"\"\"Helper function to re-compile the model.\"\"\"\n # Must reset metrics to get accurate results\n for m in metrics:\n if not isinstance(m, str):\n m.reset_states()\n model.compile(loss=loss, metrics=metrics)\n\n def _evaluate(model):\n \"\"\"Helper function to evaluate model to get loss and accuracy.\"\"\"\n _recompile(model)\n if isinstance(eval_dataset, tuple):\n eval_images, eval_labels = eval_dataset\n return model.evaluate(\n eval_images, eval_labels, verbose=verbose, return_dict=True)\n else:\n return model.evaluate(eval_dataset, verbose=verbose, return_dict=True)\n\n def _print_results(results, title=''):\n \"\"\"Helper function to print evaluation results.\"\"\"\n pstr = '[' + title + ']: ' if title else ''\n for k, v in results.items():\n pstr += '\\t{}: {}'.format(k, v)\n print(pstr)\n\n # Get float results\n model_utils.set_layer_mode(self._qcb_model, 'ANALYSE')\n float_results = _evaluate(self._qcb_model)\n _print_results(float_results, 'float_results')\n\n # Get simple quantize calibrated results\n init_results = _evaluate(self._qcbev_model)\n _print_results(init_results, 'init_results')\n\n # Do quantize pos searching\n logger.info(\"Start Quantize Position Searching...\")\n model_utils.set_layer_mode(self._qcb_model, 'QCBEV')\n best_results = init_results\n best_quantize_info = copy.deepcopy(init_quantize_info)\n count = 0\n for name, layer in quantize_layers.items():\n count += 1\n logger.info('({}/{})Processing layer: {}'.format(count,\n len(quantize_layers),\n name))\n\n def _search_optimal_pos(init_quantize_info,\n init_results,\n layer_name,\n quantizer_name,\n delta=[-1, 1, 2]):\n new_best_results = init_results\n new_best_quantize_info = copy.deepcopy(init_quantize_info)\n\n tmp_quantize_info = copy.deepcopy(init_quantize_info)\n layer_info = tmp_quantize_info[layer_name]\n if quantizer_name == 'NoQuantizeActivation':\n return new_best_quantize_info, new_best_results\n elif quantizer_name == 'input':\n q_info = layer_info['info']\n else:\n q_info = layer_info[quantizer_name]['info']\n q_pos = q_info['quant_pos_var']\n\n for dt in delta:\n if verbose:\n logger.info('Try change {}.{}: {} -> {}'.format(\n layer_name, quantizer_name, q_pos, q_pos + dt))\n q_info['quant_pos_var'] = q_pos + dt\n model_utils.set_quantize_info(self._qcb_model, tmp_quantize_info)\n q_results = _evaluate(self._qcb_model)\n if q_results['loss'] < new_best_results['loss']:\n new_best_results = q_results\n new_best_quantize_info = copy.deepcopy(tmp_quantize_info)\n _print_results(new_best_results, 'Update Best Results')\n return new_best_quantize_info, new_best_results\n\n # Quantize Layer\n if isinstance(layer, vitis_quantize_layer.VitisQuantize):\n best_quantize_info, best_results = _search_optimal_pos(\n init_quantize_info=best_quantize_info,\n init_results=best_results,\n layer_name=layer.name,\n quantizer_name='input')\n # Quantize Wrappers\n elif isinstance(layer, vitis_quantize_wrapper.QuantizeWrapper):\n layer_info = best_quantize_info[layer.layer.name]\n for quantizer_name, q_info in layer_info.items():\n best_quantize_info, best_results = _search_optimal_pos(\n init_quantize_info=best_quantize_info,\n init_results=best_results,\n layer_name=layer.layer.name,\n quantizer_name=quantizer_name)\n\n logger.info(\"Quantize Position Searching Done.\")\n _print_results(best_results, 'Final Best Results')\n\n # Freeze the quantize info into the model, now using last_quantize_info\n self._freeze_quantize_info(best_quantize_info)\n\n def _parse_configs(self, configs, kwargs):\n \"\"\"Parse configs from arguments and update the quantize strategy.\"\"\"\n if not isinstance(configs, dict):\n logger.error('Configs should be a Dict.')\n configs = {}\n configs.update(kwargs)\n if configs:\n self._quantize_strategy.update(configs)\n\n # Public Interfaces\n def optimize_model(self, configs={}, **kwargs):\n \"\"\"Get optimized model.\n\n Available configs:\n * remove_dropout=True\n * fold_conv_bn=True\n * fold_bn=True\n * replace_relu6=False\n * include_cle=True\n * cle_steps=5\n \"\"\"\n # Configure the quantize strategy\n self._parse_configs(configs, kwargs)\n\n with self._custom_object_scope:\n logger.debug('Optimize Configurations:')\n self._quantize_strategy.get_optimize_pipeline().print_configs()\n\n self._create_optimized_model()\n return self._optimized_model\n\n def get_analysed_model(self, dataset):\n \"\"\"Get analysed model.\"\"\"\n if not self._analyse_model:\n with self._custom_object_scope:\n model_info = self._create_analysed_model(dataset)\n return self._analysed_model, model_info\n\n def get_qat_model(self,\n init_quant=False,\n calib_dataset=None,\n calib_batch_size=None,\n calib_steps=None,\n configs={},\n **kwargs):\n \"\"\"Get quantize-aware training model.\n\n Available configs:\n * input_bit=8\n * weight_bit=8\n * activation_bit=8\n * remove_dropout=True\n * fold_conv_bn=True\n * fold_bn=True\n * replace_relu6=False\n * include_cle=True\n * cle_steps=5\n * forced_cle=False\n * include_fast_ft=False\n * fast_ft_epochs=10\n \"\"\"\n with self._custom_object_scope:\n self._parse_configs(configs, kwargs)\n self.optimize_model()\n\n logger.debug('Quantize Pipeline Configurations:')\n self._quantize_strategy.get_quantize_registry().print_configs()\n self._quantize_strategy.get_quantize_pipeline().print_configs()\n\n logger.info('Start Generation of Quantize-aware Training Model.')\n if not self._qat_model:\n self._create_qat_model()\n\n # Do post training quantization to initialize the quantize-aware training model\n if init_quant:\n logger.info('Start Initialization with Quantize Calibration...')\n self.quantize_model(\n loss=None,\n metrics=None,\n calib_dataset=calib_dataset,\n calib_batch_size=calib_batch_size,\n calib_steps=calib_steps,\n eval_dataset=None,\n verbose=0)\n init_weights = self._qcbev_model.get_weights()\n self._qat_model.set_weights(init_weights)\n logger.info('Initialization with Quantize Calibration Done.')\n\n logger.info('Generation of Quantize-aware Training Model Done.')\n return self._qat_model\n\n def quantize_model(self,\n loss=None,\n metrics=None,\n calib_dataset=None,\n calib_batch_size=None,\n calib_steps=None,\n eval_dataset=None,\n verbose=0,\n configs={},\n **kwargs):\n \"\"\"Interface of Post-Training Quantize.\n \n Available configs:\n * input_bit=8\n * weight_bit=8\n * activation_bit=8\n * remove_dropout=True\n * fold_conv_bn=True\n * fold_bn=True\n * replace_relu6=False\n * include_cle=True\n * cle_steps=5\n * forced_cle=False\n * include_fast_ft=False\n * fast_ft_epochs=10\n * include_bias_corr=True\n \"\"\"\n if calib_dataset is None:\n logger.error(\n 'Need to assign `calib_dataset` for when calling quantize_model().')\n\n if loss and not eval_dataset:\n logger.error(\n 'Need to assign `eval_dataset` for when calling quantize_model(loss=loss_fn).'\n )\n\n # Configure the quantize strategy\n self._parse_configs(configs, kwargs)\n configs = self._quantize_strategy.get_configs()\n # Disable tf.logging warnings during quantization\n log_level = tf.get_logger().level\n tf.get_logger().setLevel('ERROR')\n\n with self._custom_object_scope:\n # Optimize model before quantization\n if not self._optimized_model:\n self.optimize_model()\n\n logger.debug('Quantize Pipeline Configurations:')\n self._quantize_strategy.get_quantize_registry().print_configs()\n self._quantize_strategy.get_quantize_pipeline().print_configs()\n\n if loss:\n self._calibrate_with_loss(loss, metrics, calib_dataset,\n calib_batch_size, calib_steps, eval_dataset,\n verbose)\n else:\n self._calibrate_without_loss(calib_dataset, calib_batch_size,\n calib_steps)\n\n # Post-quantize adjustment (Only for 8bit)\n if type(self._quantize_strategy\n ) == vitis_8bit_quantize_strategy.Vitis8BitQuantizeStrategy:\n logger.info(\"Start Post-Quantize Adjustment...\")\n quantize_info = model_utils.get_quantize_info(self._qcbev_model)\n adjust_sc = configs['quantize_pipeline_config']['adjust_shift_cut']\n adjust_sb = configs['quantize_pipeline_config']['adjust_shift_bias']\n adjusted_quantize_info = model_utils.post_quant_adjust(\n self._qcbev_model, quantize_info, adjust_sc, adjust_sb)\n self._freeze_quantize_info(adjusted_quantize_info)\n logger.info(\"Post-Quantize Adjustment Done.\")\n\n if logger.debug_enabled():\n model_utils.save_model(self._qcbev_model, 'calibrated_model.h5',\n './debug/')\n\n # Fast finetune\n include_fast_ft = configs['quantize_pipeline_config']['include_fast_ft']\n fast_ft_epochs = configs['quantize_pipeline_config']['fast_ft_epochs']\n if include_fast_ft:\n logger.info(\"Start Fast Finetuning...\")\n vitis_fast_finetune.fast_finetune(self._qcbev_model,\n self._optimized_model,\n calib_dataset, calib_batch_size,\n calib_steps, fast_ft_epochs)\n logger.info(\"Fast Finetuning Done.\")\n\n # # Bias correction\n # include_bias_corr = configs['quantize_pipeline_config'][\n # 'include_bias_corr']\n # if include_bias_corr:\n # logger.info(\"Start Bias Correction...\")\n # vitis_bias_correction.bias_correction(self._qcbev_model,\n # self._optimized_model,\n # calib_dataset, calib_batch_size,\n # calib_steps)\n # logger.info(\"Bias Correction Done.\")\n\n if type(self._quantize_strategy\n ) == vitis_8bit_quantize_strategy.Vitis8BitQuantizeStrategy:\n if logger.debug_enabled():\n quantize_info = model_utils.get_quantize_info(self._qcbev_model)\n model_utils.save_quantize_info(quantize_info, './debug/')\n\n logger.info(\"Quantization Finished.\")\n\n tf.get_logger().setLevel(log_level)\n return self._qcbev_model\n\n @staticmethod\n def get_deploy_model(model):\n \"\"\"Convert the QAT model to the deploy model which is compatible with the compiler\n and meet the DPU hardware constraints. \"\"\"\n deploy_model = model_utils.clone_model_with_weights(model)\n\n # Fold conv_bn_quantize layers\n deploy_model = model_utils.conv_bn_quantize_fold(deploy_model)\n\n # Convert quantize strategy\n deploy_model = model_utils.convert_quantize_strategy(\n deploy_model, conversion='8bit_tqt_to_8bit')\n\n # Remove dropout\n deploy_model = model_utils.remove_layer(deploy_model, 'Dropout')\n\n # Post-quant adjustment\n quantize_info = model_utils.get_quantize_info(deploy_model)\n adjusted_quantize_info = model_utils.post_quant_adjust(\n deploy_model,\n quantize_info,\n adjust_shift_cut=True,\n adjust_shift_bias=True)\n model_utils.set_quantize_info(deploy_model, adjusted_quantize_info)\n return deploy_model\n\n @staticmethod\n def dump_model(model,\n dataset=None,\n output_dir='./dump_results',\n dump_float=False,\n weights_only=False):\n \"\"\"Dump golden results of quantized model.\"\"\"\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n if not weights_only and dataset is None:\n logger.error('`dataset` is needed to dump with activation.')\n\n logger.info(\"Start Dumping...\")\n model_utils.dump_model_weights(model, dump_float, output_dir)\n if not weights_only:\n model_utils.dump_model_activations(model, dataset, dump_float, output_dir)\n\n\ndef create_optimize_model(model, candidate_layers, layer_metadata,\n quantize_strategy):\n \"\"\"Optimize a `tf.keras` model before quantization, such as bn folding, \n activation folding.\"\"\"\n optimize_pipeline = quantize_strategy.get_optimize_pipeline()\n optimized_model, layer_metadata = optimize_pipeline.apply(\n model, candidate_layers, layer_metadata)\n return optimized_model, layer_metadata\n\n\ndef create_quantize_model(to_quantize, candidate_layers, layer_metadata,\n quantize_strategy, mode):\n \"\"\"Quantize a `tf.keras` model with the default quantization implementation.\n\n Quantization constructs a model which emulates quantization during training.\n This allows the model to learn parameters robust to quantization loss, and\n also model the accuracy of a quantized model.\n\n Note that this function removes the optimizer from the original model.\n\n The returned model copies over weights from the original model. So while\n it preserves the original weights, training it will not modify the weights\n of the original model.\n\n Args:\n to_quantize: tf.keras model to be quantized. It can have pre-trained\n weights.\n quantize_strategy: QuantizeStrategy constaining the configurations.\n\n Returns:\n Returns a new `tf.keras` model prepared for quantization.\n \"\"\"\n if to_quantize is None:\n logger.error('`to_quantize` cannot be None')\n\n if not isinstance(to_quantize, keras.Model):\n logger.error('`to_quantize` can only be a `tf.keras.Model` instance. '\n 'You passed an instance of type: {input}.'.format(\n input=to_quantize.__class__.__name__))\n\n if not isinstance(to_quantize, keras.Sequential) \\\n and not to_quantize._is_graph_network: # pylint: disable=protected-access\n logger.error('`to_quantize` can only either be a tf.keras Sequential or '\n 'Functional model.')\n\n AVAILABLE_MODES = ['QCB', 'QAT', 'ANALYSE', 'QCBEV']\n if mode not in AVAILABLE_MODES:\n logger.error('Mode `{}` is not valid, available modes are:{}.'.format(\n mode, AVAILABLE_MODES))\n\n return quantize_apply(to_quantize, candidate_layers, layer_metadata,\n quantize_strategy, mode)\n\n\ndef quantize_apply(model, candidate_layers, layer_metadata, quantize_strategy,\n mode):\n \"\"\"Quantize a `tf.keras` model that has been annotated for quantization.\n\n Quantization constructs a model which emulates quantization during training.\n This allows the model to learn parameters robust to quantization loss, and\n also model the accuracy of a quantized model.\n\n Note that this function removes the optimizer from the original model.\n\n The returned model copies over weights from the original model. So while\n it preserves the original weights, training it will not modify the weights\n of the original model.\n\n Args:\n model: A `tf.keras` Sequential or Functional model which has been annotated\n with `quantize_annotate`. It can have pre-trained weights.\n\n Returns:\n Returns a new `tf.keras` model in which the annotated layers have been\n prepared for quantization.\n \"\"\"\n if model is None:\n logger.error('`model` cannot be None')\n\n if not isinstance(model, keras.Model):\n logger.error('`model` can only be a `tf.keras.Model` instance.'\n 'You passed an instance of type: {input}.'.format(\n input=model.__class__.__name__))\n\n if not isinstance(model, keras.Sequential) \\\n and not model._is_graph_network: # pylint: disable=protected-access\n logger.error('Only tf.keras Sequential or Functional models are supported.')\n\n if not model.built:\n logger.error('`model` must be a built model. '\n 'been built yet. Please call `model.build(input_shape)` '\n 'before quantizing your model.')\n\n # 1. Create a copy of the model with the same weights. This ensures\n # modifications don't affect the original model, or its weights.\n try:\n model_copy = model_utils.clone_model_with_weights(model)\n except ValueError:\n logger.error(\n 'Unable to clone model. This generally happens if you used custom Keras layers or objects '\n 'in your model. Please wrap the functions in the custom_object_scope() with all the custom layers.'\n )\n\n # 2. Run the pipeline of quantize transforms.\n # Quantizable layers will be wrapped with QuantizeWrapper while others ramain float.\n quantize_pipeline = quantize_strategy.get_quantize_pipeline()\n quantized_model, layer_metadata = quantize_pipeline.apply(\n model_copy, candidate_layers, layer_metadata,\n quantize_strategy.get_quantize_registry(), mode)\n\n return quantized_model, layer_metadata\n" ]
[ [ "tensorflow.keras.utils.custom_object_scope", "tensorflow.get_logger" ] ]
quecloud/rpxdock
[ "41f7f98f5dacf24fc95897910263a0bec2209e59" ]
[ "rpxdock/homog.py" ]
[ "import numpy as np, itertools as it, functools as ft\n\ndef is_valid_quat_rot(quat):\n assert quat.shape[-1] == 4\n return np.isclose(1, np.linalg.norm(quat, axis=-1))\n\ndef quat_to_upper_half(quat):\n ineg0 = (quat[..., 0] < 0)\n ineg1 = (quat[..., 0] == 0) * (quat[..., 1] < 0)\n ineg2 = (quat[..., 0] == 0) * (quat[..., 1] == 0) * (quat[..., 2] < 0)\n ineg3 = ((quat[..., 0] == 0) * (quat[..., 1] == 0) * (quat[..., 2] == 0) * (quat[..., 3] < 0))\n # print(ineg0.shape)\n # print(ineg1.shape)\n # print(ineg2.shape)\n # print(ineg3.shape)\n ineg = ineg0 + ineg1 + ineg2 + ineg3\n quat = quat.copy()\n quat[ineg] = -quat[ineg]\n return quat\n\ndef rand_quat(shape=()):\n if isinstance(shape, int): shape = (shape, )\n q = np.random.randn(*shape, 4)\n q /= np.linalg.norm(q, axis=-1)[..., np.newaxis]\n return quat_to_upper_half(q)\n\ndef rot_to_quat(xform):\n x = np.asarray(xform)\n t0, t1, t2 = x[..., 0, 0], x[..., 1, 1], x[..., 2, 2]\n tr = t0 + t1 + t2\n quat = np.empty(x.shape[:-2] + (4, ))\n\n case0 = tr > 0\n S0 = np.sqrt(tr[case0] + 1) * 2\n quat[case0, 0] = 0.25 * S0\n quat[case0, 1] = (x[case0, 2, 1] - x[case0, 1, 2]) / S0\n quat[case0, 2] = (x[case0, 0, 2] - x[case0, 2, 0]) / S0\n quat[case0, 3] = (x[case0, 1, 0] - x[case0, 0, 1]) / S0\n\n case1 = ~case0 * (t0 >= t1) * (t0 >= t2)\n S1 = np.sqrt(1.0 + x[case1, 0, 0] - x[case1, 1, 1] - x[case1, 2, 2]) * 2\n quat[case1, 0] = (x[case1, 2, 1] - x[case1, 1, 2]) / S1\n quat[case1, 1] = 0.25 * S1\n quat[case1, 2] = (x[case1, 0, 1] + x[case1, 1, 0]) / S1\n quat[case1, 3] = (x[case1, 0, 2] + x[case1, 2, 0]) / S1\n\n case2 = ~case0 * (t1 > t0) * (t1 >= t2)\n S2 = np.sqrt(1.0 + x[case2, 1, 1] - x[case2, 0, 0] - x[case2, 2, 2]) * 2\n quat[case2, 0] = (x[case2, 0, 2] - x[case2, 2, 0]) / S2\n quat[case2, 1] = (x[case2, 0, 1] + x[case2, 1, 0]) / S2\n quat[case2, 2] = 0.25 * S2\n quat[case2, 3] = (x[case2, 1, 2] + x[case2, 2, 1]) / S2\n\n case3 = ~case0 * (t2 > t0) * (t2 > t1)\n S3 = np.sqrt(1.0 + x[case3, 2, 2] - x[case3, 0, 0] - x[case3, 1, 1]) * 2\n quat[case3, 0] = (x[case3, 1, 0] - x[case3, 0, 1]) / S3\n quat[case3, 1] = (x[case3, 0, 2] + x[case3, 2, 0]) / S3\n quat[case3, 2] = (x[case3, 1, 2] + x[case3, 2, 1]) / S3\n quat[case3, 3] = 0.25 * S3\n\n assert (np.sum(case0) + np.sum(case1) + np.sum(case2) + np.sum(case3) == np.prod(\n xform.shape[:-2]))\n\n return quat_to_upper_half(quat)\n\nxform_to_quat = rot_to_quat\n\ndef quat_to_rot(quat, dtype='f8', shape=(3, 3)):\n quat = np.asarray(quat)\n assert quat.shape[-1] == 4\n qr = quat[..., 0]\n qi = quat[..., 1]\n qj = quat[..., 2]\n qk = quat[..., 3]\n outshape = quat.shape[:-1]\n rot = np.zeros(outshape + shape, dtype=dtype)\n rot[..., 0, 0] = 1 - 2 * (qj**2 + qk**2)\n rot[..., 0, 1] = 2 * (qi * qj - qk * qr)\n rot[..., 0, 2] = 2 * (qi * qk + qj * qr)\n rot[..., 1, 0] = 2 * (qi * qj + qk * qr)\n rot[..., 1, 1] = 1 - 2 * (qi**2 + qk**2)\n rot[..., 1, 2] = 2 * (qj * qk - qi * qr)\n rot[..., 2, 0] = 2 * (qi * qk - qj * qr)\n rot[..., 2, 1] = 2 * (qj * qk + qi * qr)\n rot[..., 2, 2] = 1 - 2 * (qi**2 + qj**2)\n return rot\n\ndef quat_to_xform(quat, dtype='f8'):\n r = quat_to_rot(quat, dtype, shape=(4, 4))\n r[..., 3, 3] = 1\n return r\n\ndef quat_multiply(q, r):\n q, r = np.broadcast_arrays(q, r)\n q0, q1, q2, q3 = np.moveaxis(q, -1, 0)\n r0, r1, r2, r3 = np.moveaxis(r, -1, 0)\n assert np.all(q1 == q[..., 1])\n t = np.empty_like(q)\n t[..., 0] = r0 * q0 - r1 * q1 - r2 * q2 - r3 * q3\n t[..., 1] = r0 * q1 + r1 * q0 - r2 * q3 + r3 * q2\n t[..., 2] = r0 * q2 + r1 * q3 + r2 * q0 - r3 * q1\n t[..., 3] = r0 * q3 - r1 * q2 + r2 * q1 + r3 * q0\n return t\n\ndef h_rand_points(shape=(1, )):\n pts = np.ones(shape + (4, ))\n pts[..., 0] = np.random.randn(*shape)\n pts[..., 1] = np.random.randn(*shape)\n pts[..., 2] = np.random.randn(*shape)\n return pts\n\ndef guess_is_degrees(angle):\n return np.max(np.abs(angle)) > 2 * np.pi\n\ndef is_broadcastable(shp1, shp2):\n for a, b in zip(shp1[::-1], shp2[::-1]):\n if a == 1 or b == 1 or a == b:\n pass\n else:\n return False\n return True\n\ndef fast_axis_of(xforms):\n return np.stack((xforms[..., 2, 1] - xforms[..., 1, 2], xforms[..., 0, 2] - xforms[..., 2, 0],\n xforms[..., 1, 0] - xforms[..., 0, 1], np.zeros(xforms.shape[:-2])), axis=-1)\n\ndef is_homog_xform(xforms):\n return ((xforms.shape[-2:] == (4, 4)) and (np.allclose(1, np.linalg.det(xforms[..., :3, :3])))\n and (np.allclose(xforms[..., 3, :], [0, 0, 0, 1])))\n\ndef hinv(xforms):\n return np.linalg.inv(xforms)\n\ndef axis_angle_of_3x3(rots):\n axis = np.stack((\n rots[..., 2, 1] - rots[..., 1, 2],\n rots[..., 0, 2] - rots[..., 2, 0],\n rots[..., 1, 0] - rots[..., 0, 1],\n ), axis=-1)\n\n four_sin2 = np.sum(axis**2, axis=-1)\n sin_angl = np.clip(np.sqrt(four_sin2 / 4), -1, 1)\n tr = np.trace(rots, axis1=-1, axis2=-2)\n cos_angl = np.clip((tr - 1) / 2, -1, 1)\n angl = np.arctan2(sin_angl, cos_angl)\n axis = axis / np.linalg.norm(axis, axis=-1)[..., np.newaxis]\n return axis, angl\n\ndef angle_of_3x3(rots):\n return axis_angle_of_3x3(rots)[1]\n\ndef axis_angle_of(xforms):\n if xforms.shape[-1] == 3:\n return axis_angle_of_3x3(xforms)\n axis = fast_axis_of(xforms)\n four_sin2 = np.sum(axis**2, axis=-1)\n sin_angl = np.clip(np.sqrt(four_sin2 / 4), -1, 1)\n cos_angl = np.clip(np.trace(xforms, axis1=-1, axis2=-2) / 2 - 1, -1, 1)\n nonzero = ~((-0.00001 < sin_angl) * (sin_angl < 0.00001) * (cos_angl > 0.0))\n axis_nonzero = axis[nonzero]\n axis_nonzero = axis_nonzero / np.linalg.norm(axis_nonzero, axis=-1)[..., np.newaxis]\n axis[nonzero] = axis_nonzero\n axis[~nonzero] = [0, 0, 1, 0]\n # tr = 1 + 2*cos\n # cos = (tr-1)/2\n # tr-1 = 1 + 2*cos\n # cos = tr-2/2 = tr/2-1\n angl = np.arctan2(sin_angl, cos_angl)\n return axis, angl\n\ndef angle_of(xforms):\n if xforms.shape[-1] == 3:\n return angle_of_3x3(xforms)\n axis = fast_axis_of(xforms)\n four_sin2 = np.sum(axis**2, axis=-1)\n sin_angl = np.clip(np.sqrt(four_sin2 / 4), -1, 1)\n cos_angl = np.clip(np.trace(xforms, axis1=-1, axis2=-2) / 2 - 1, -1, 1)\n angl = np.arctan2(sin_angl, cos_angl)\n return angl\n\ndef rot(axis, angle, degrees='auto', dtype='f8', shape=(3, 3)):\n axis = np.array(axis, dtype=dtype)\n angle = np.array(angle, dtype=dtype)\n if degrees is 'auto': degrees = guess_is_degrees(angle)\n angle = angle * np.pi / 180.0 if degrees else angle\n if axis.shape and angle.shape and not is_broadcastable(axis.shape[:-1], angle.shape):\n raise ValueError('axis and angle not compatible: ' + str(axis.shape) + ' ' +\n str(angle.shape))\n axis /= np.linalg.norm(axis, axis=-1)[..., np.newaxis]\n a = np.cos(angle / 2.0)\n tmp = axis * -np.sin(angle / 2)[..., np.newaxis]\n b, c, d = tmp[..., 0], tmp[..., 1], tmp[..., 2]\n aa, bb, cc, dd = a * a, b * b, c * c, d * d\n bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d\n outshape = angle.shape if angle.shape else axis.shape[:-1]\n rot3 = np.zeros(outshape + shape, dtype=dtype)\n rot3[..., 0, 0] = aa + bb - cc - dd\n rot3[..., 0, 1] = 2 * (bc + ad)\n rot3[..., 0, 2] = 2 * (bd - ac)\n rot3[..., 1, 0] = 2 * (bc - ad)\n rot3[..., 1, 1] = aa + cc - bb - dd\n rot3[..., 1, 2] = 2 * (cd + ab)\n rot3[..., 2, 0] = 2 * (bd + ac)\n rot3[..., 2, 1] = 2 * (cd - ab)\n rot3[..., 2, 2] = aa + dd - bb - cc\n return rot3\n\ndef hrot(axis, angle, center=None, dtype='f8', **args):\n axis = np.array(axis, dtype=dtype)\n angle = np.array(angle, dtype=dtype)\n center = (np.array([0, 0, 0], dtype=dtype) if center is None else np.array(\n center, dtype=dtype))\n r = rot(axis, angle, dtype=dtype, shape=(4, 4), **args)\n x, y, z = center[..., 0], center[..., 1], center[..., 2]\n r[..., 0, 3] = x - r[..., 0, 0] * x - r[..., 0, 1] * y - r[..., 0, 2] * z\n r[..., 1, 3] = y - r[..., 1, 0] * x - r[..., 1, 1] * y - r[..., 1, 2] * z\n r[..., 2, 3] = z - r[..., 2, 0] * x - r[..., 2, 1] * y - r[..., 2, 2] * z\n r[..., 3, 3] = 1\n return r\n\ndef hpoint(point):\n point = np.asanyarray(point)\n if point.shape[-1] == 4: return point\n elif point.shape[-1] == 3:\n r = np.ones(point.shape[:-1] + (4, ))\n r[..., :3] = point\n return r\n else:\n raise ValueError('point must len 3 or 4')\n\ndef hvec(vec):\n vec = np.asanyarray(vec)\n if vec.shape[-1] == 4: return vec\n elif vec.shape[-1] == 3:\n r = np.zeros(vec.shape[:-1] + (4, ))\n r[..., :3] = vec\n return r\n else:\n raise ValueError('vec must len 3 or 4')\n\ndef hray(origin, direction):\n origin = hpoint(origin)\n direction = hnormalized(direction)\n s = np.broadcast(origin, direction).shape\n r = np.empty(s[:-1] + (4, 2))\n r[..., :origin.shape[-1], 0] = origin\n r[..., 3, 0] = 1\n r[..., :, 1] = direction\n return r\n\ndef hstub(u, v, w, cen=None):\n u, v, w = hpoint(u), hpoint(v), hpoint(w)\n assert u.shape == v.shape == w.shape\n if not cen: cen = u\n cen = hpoint(cen)\n assert cen.shape == u.shape\n stubs = np.empty(u.shape[:-1] + (4, 4))\n stubs[..., :, 0] = hnormalized(u - v)\n stubs[..., :, 2] = hnormalized(hcross(stubs[..., :, 0], w - v))\n stubs[..., :, 1] = hcross(stubs[..., :, 2], stubs[..., :, 0])\n stubs[..., :, 3] = hpoint(cen[..., :])\n return stubs\n\ndef htrans(trans, dtype='f8'):\n trans = np.asanyarray(trans)\n if trans.shape[-1] != 3:\n raise ValueError('trans should be shape (..., 3)')\n tileshape = trans.shape[:-1] + (1, 1)\n t = np.tile(np.identity(4, dtype), tileshape)\n t[..., :trans.shape[-1], 3] = trans\n return t\n\ndef hdot(a, b):\n a = np.asanyarray(a)\n b = np.asanyarray(b)\n return np.sum(a[..., :3] * b[..., :3], axis=-1)\n\ndef hcross(a, b):\n a = np.asanyarray(a)\n b = np.asanyarray(b)\n c = np.zeros(np.broadcast(a, b).shape, dtype=a.dtype)\n c[..., :3] = np.cross(a[..., :3], b[..., :3])\n return c\n\ndef hnorm(a):\n a = np.asanyarray(a)\n return np.sqrt(np.sum(a[..., :3] * a[..., :3], axis=-1))\n\ndef hnorm2(a):\n a = np.asanyarray(a)\n return np.sum(a[..., :3] * a[..., :3], axis=-1)\n\ndef hnormalized(a):\n a = np.asanyarray(a)\n if (not a.shape and len(a) == 3) or (a.shape and a.shape[-1] == 3):\n a, tmp = np.zeros(a.shape[:-1] + (4, )), a\n a[..., :3] = tmp\n return a / hnorm(a)[..., None]\n\ndef is_valid_rays(r):\n r = np.asanyarray(r)\n if r.shape[-2:] != (4, 2): return False\n if np.any(r[..., 3, :] != (1, 0)): return False\n if np.any(abs(np.linalg.norm(r[..., :3, 1], axis=-1) - 1) > 0.000001):\n return False\n return True\n\ndef rand_point(shape=()):\n if isinstance(shape, int): shape = (shape, )\n return hpoint(np.random.randn(*(shape + (3, ))))\n\ndef rand_vec(shape=()):\n if isinstance(shape, int): shape = (shape, )\n return hvec(np.random.randn(*(shape + (3, ))))\n\ndef rand_unit(shape=()):\n if isinstance(shape, int): shape = (shape, )\n return hnormalized(np.random.randn(*(shape + (3, ))))\n\ndef angle(u, v):\n d = hdot(hnormalized(u), hnormalized(v))\n # todo: handle special cases... 1,-1\n return np.arccos(np.clip(d, -1, 1))\n\ndef angle_degrees(u, v):\n return angle(u, v) * 180 / np.pi\n\ndef line_angle(u, v):\n a = angle(u, v)\n return np.minimum(a, np.pi - a)\n\ndef line_angle_degrees(u, v):\n a = angle(u, v)\n a = np.minimum(a, np.pi - a)\n return a * 180 / np.pi\n\ndef rand_ray(shape=(), cen=(0, 0, 0), sdev=1):\n if isinstance(shape, int): shape = (shape, )\n cen = np.asanyarray(cen)\n if cen.shape[-1] not in (3, 4):\n raise ValueError('cen must be len 3 or 4')\n shape = shape or cen.shape[:-1]\n cen = cen + np.random.randn(*(shape + (3, ))) * sdev\n norm = np.random.randn(*(shape + (3, )))\n norm /= np.linalg.norm(norm, axis=-1)[..., np.newaxis]\n r = np.zeros(shape + (4, 2))\n r[..., :3, 0] = cen\n r[..., 3, 0] = 1\n r[..., :3, 1] = norm\n return r\n\ndef rand_xform_aac(shape=(), axis=None, ang=None, cen=None):\n if isinstance(shape, int): shape = (shape, )\n if axis is None:\n axis = rand_unit(shape)\n if ang is None:\n ang = np.random.rand(*shape) * np.pi # todo: make uniform!\n if cen is None:\n cen = rand_point(shape)\n q = rand_quat(shape)\n return hrot(axis, ang, cen)\n\ndef rand_xform(shape=(), cart_cen=0, cart_sd=1):\n if isinstance(shape, int): shape = (shape, )\n q = rand_quat(shape)\n x = quat_to_xform(q)\n x[..., :3, 3] = np.random.randn(*shape, 3) * cart_sd + cart_cen\n return x\n\ndef proj_perp(u, v):\n u = np.asanyarray(u)\n v = np.asanyarray(v)\n return v - hdot(u, v)[..., None] / hnorm2(u)[..., None] * u\n\ndef point_in_plane(plane, pt):\n return np.abs(hdot(plane[..., :3, 1], pt[..., :3] - plane[..., :3, 0])) < 0.000001\n\ndef ray_in_plane(plane, ray):\n assert ray.shape[-2:] == (4, 2)\n return (point_in_plane(plane, ray[..., :3, 0]) *\n point_in_plane(plane, ray[..., :3, 0] + ray[..., :3, 1]))\n\ndef intersect_planes(plane1, plane2):\n \"\"\"intersect_Planes: find the 3D intersection of two planes\n Input: two planes represented by rays shape=(..., 4, 2)\n Output: *L = the intersection line (when it exists)\n Return: rays shape=(...,4,2), status\n 0 = intersection returned\n 1 = disjoint (no intersection)\n 2 = the two planes coincide\n \"\"\"\n if not is_valid_rays(plane1): raise ValueError('invalid plane1')\n if not is_valid_rays(plane2): raise ValueError('invalid plane2')\n shape1, shape2 = np.array(plane1.shape), np.array(plane2.shape)\n if np.any((shape1 != shape2) * (shape1 != 1) * (shape2 != 1)):\n raise ValueError('incompatible shapes for plane1, plane2:')\n p1, n1 = plane1[..., :3, 0], plane1[..., :3, 1]\n p2, n2 = plane2[..., :3, 0], plane2[..., :3, 1]\n shape = tuple(np.maximum(plane1.shape, plane2.shape))\n u = np.cross(n1, n2)\n abs_u = np.abs(u)\n planes_parallel = np.sum(abs_u, axis=-1) < 0.000001\n p2_in_plane1 = point_in_plane(plane1, p2)\n status = np.zeros(shape[:-2])\n status[planes_parallel] = 1\n status[planes_parallel * p2_in_plane1] = 2\n d1 = -hdot(n1, p1)\n d2 = -hdot(n2, p2)\n amax = np.argmax(abs_u, axis=-1)\n sel0, sel1, sel2 = amax == 0, amax == 1, amax == 2\n n1a, n2a, d1a, d2a, ua = (x[sel0] for x in (n1, n2, d1, d2, u))\n n1b, n2b, d1b, d2b, ub = (x[sel1] for x in (n1, n2, d1, d2, u))\n n1c, n2c, d1c, d2c, uc = (x[sel2] for x in (n1, n2, d1, d2, u))\n\n ay = (d2a * n1a[..., 2] - d1a * n2a[..., 2]) / ua[..., 0]\n az = (d1a * n2a[..., 1] - d2a * n1a[..., 1]) / ua[..., 0]\n bz = (d2b * n1b[..., 0] - d1b * n2b[..., 0]) / ub[..., 1]\n bx = (d1b * n2b[..., 2] - d2b * n1b[..., 2]) / ub[..., 1]\n cx = (d2c * n1c[..., 1] - d1c * n2c[..., 1]) / uc[..., 2]\n cy = (d1c * n2c[..., 0] - d2c * n1c[..., 0]) / uc[..., 2]\n isect_pt = np.empty(shape[:-2] + (3, ), dtype=plane1.dtype)\n isect_pt[sel0, 0] = 0\n isect_pt[sel0, 1] = ay\n isect_pt[sel0, 2] = az\n isect_pt[sel1, 0] = bx\n isect_pt[sel1, 1] = 0\n isect_pt[sel1, 2] = bz\n isect_pt[sel2, 0] = cx\n isect_pt[sel2, 1] = cy\n isect_pt[sel2, 2] = 0\n isect = hray(isect_pt, u)\n return isect, status\n\ndef axis_ang_cen_of_eig(xforms, debug=False):\n raise NotImplemented('this is a bad way to get rotation axis')\n axis, angle = axis_angle_of(xforms)\n # # seems to numerically unstable\n ev, cen = np.linalg.eig(xforms)\n # print(axis)\n # print(cen[..., 0])\n # print(cen[..., 1])\n # print(cen[..., 2])\n # axis = np.real(cen[..., 2])\n cen = np.real(cen[..., 3])\n cen /= cen[..., 3][..., None]\n # # todo: this is unstable.... fix?\n # cen = proj_perp(axis, cen) # move to reasonable position\n return axis, angle, cen\n\ndef axis_ang_cen_of_planes(xforms, debug=False):\n axis, angle = axis_angle_of(xforms)\n # sketchy magic points...\n p1 = (-32.09501046777237, 03.36227004372687, 35.34672781477340, 1)\n p2 = (21.15113978202345, 12.55664537217840, -37.48294301885574, 1)\n # p1 = rand_point()\n # p2 = rand_point()\n tparallel = hdot(axis, xforms[..., :, 3])[..., None] * axis\n q1 = xforms @ p1 - tparallel\n q2 = xforms @ p2 - tparallel\n n1 = hnormalized(q1 - p1)\n n2 = hnormalized(q2 - p2)\n c1 = (p1 + q1) / 2.0\n c2 = (p2 + q2) / 2.0\n plane1 = hray(c1, n1)\n plane2 = hray(c2, n2)\n isect, status = intersect_planes(plane1, plane2)\n return axis, angle, isect[..., :, 0]\n\naxis_ang_cen_of = axis_ang_cen_of_planes\n\ndef line_line_distance_pa(pt1, ax1, pt2, ax2):\n # point1, point2 = hpoint(point1), hpoint(point2)\n # axis1, axis2 = hnormalized(axis1), hnormalized(axis2)\n n = abs(hdot(pt2 - pt1, hcross(ax1, ax2)))\n d = hnorm(hcross(ax1, ax2))\n r = np.zeros_like(n)\n i = abs(d) > 0.00001\n r[i] = n[i] / d[i]\n pp = hnorm(proj_perp(ax1, pt2 - pt1))\n return np.where(np.abs(hdot(ax1, ax2)) > 0.9999, pp, r)\n\ndef line_line_distance(ray1, ray2):\n pt1, pt2 = ray1[..., :, 0], ray2[..., :, 0]\n ax1, ax2 = ray1[..., :, 1], ray2[..., :, 1]\n return line_line_distance_pa(pt1, ax1, pt2, ax2)\n\ndef line_line_closest_points_pa(pt1, ax1, pt2, ax2, verbose=0):\n C21 = pt2 - pt1\n M = hcross(ax1, ax2)\n m2 = np.sum(M**2, axis=-1)[..., None]\n R = hcross(C21, M / m2)\n t1 = hdot(R, ax2)[..., None]\n t2 = hdot(R, ax1)[..., None]\n Q1 = pt1 - t1 * ax1\n Q2 = pt2 - t2 * ax2\n if verbose:\n print('C21', C21)\n print('M', M)\n print('m2', m2)\n print('R', R)\n print('t1', t1)\n print('t2', t2)\n print('Q1', Q1)\n print('Q2', Q2)\n return Q1, Q2\n\ndef line_line_closest_points(ray1, ray2, verbose=0):\n \"currently errors if ax1==ax2\"\n # pt1, pt2 = hpoint(pt1), hpoint(pt2)\n # ax1, ax2 = hnormalized(ax1), hnormalized(ax2)\n pt1, pt2 = ray1[..., :, 0], ray2[..., :, 0]\n ax1, ax2 = ray1[..., :, 1], ray2[..., :, 1]\n return line_line_closest_points_pa(pt1, ax1, pt2, ax2)\n\ndef dihedral(p1, p2, p3, p4):\n p1, p2, p3, p4 = hpoint(p1), hpoint(p2), hpoint(p3), hpoint(p4)\n a = hnormalized(p2 - p1)\n b = hnormalized(p3 - p2)\n c = hnormalized(p4 - p3)\n x = np.clip(hdot(a, b) * hdot(b, c) - hdot(a, c), -1, 1)\n y = np.clip(hdot(a, hcross(b, c)), -1, 1)\n return np.arctan2(y, x)\n\ndef align_around_axis(axis, u, v):\n return hrot(axis, -dihedral(u, axis, [0, 0, 0, 0], v))\n\ndef align_vector(a, b):\n return hrot((hnormalized(a) + hnormalized(b)) / 2, np.pi)\n\ndef align_vectors(a1, a2, b1, b2):\n \"minimizes angular error\"\n a1, a2, b1, b2 = (hnormalized(v) for v in (a1, a2, b1, b2))\n aaxis = (a1 + a2) / 2.0\n baxis = (b1 + b2) / 2.0\n Xmiddle = align_vector(aaxis, baxis)\n Xaround = align_around_axis(baxis, Xmiddle @ a1, b1)\n X = Xaround @ Xmiddle\n assert (angle(b1, a1) + angle(b2, a2)) + 0.001 >= (angle(b1, X @ a1) + angle(b2, X @ a2))\n return X\n\ndef calc_dihedral_angle(p1, p2, p3, p4):\n p1, p2, p3, p4 = hpoint(p1), hpoint(p2), hpoint(p3), hpoint(p4)\n p1, p2, p3, p4 = p1.reshape(4), p2.reshape(4), p3.reshape(4), p4.reshape(4)\n # Calculate coordinates for vectors q1, q2 and q3\n q1 = np.subtract(p2, p1) # b - a\n q2 = np.subtract(p3, p2) # c - b\n q3 = np.subtract(p4, p3) # d - c\n q1_x_q2 = hcross(q1, q2)\n q2_x_q3 = hcross(q2, q3)\n n1 = hnormalized(q1_x_q2)\n n2 = hnormalized(q2_x_q3)\n u1 = n2\n u3 = hnormalized(q2)\n u2 = hcross(u3, u1)\n cos_theta = np.sum(n1 * u1)\n sin_theta = np.sum(n1 * u2)\n theta = -np.arctan2(sin_theta, cos_theta)\n return theta\n\ndef rotation_around_dof_for_target_angle(target_angle, dof_angle, fix_to_dof_angle):\n assert fix_to_dof_angle < np.pi / 2\n assert dof_angle <= np.pi / 2 + 0.00001\n assert target_angle <= np.pi\n\n if target_angle + dof_angle < fix_to_dof_angle:\n return np.array([-12345.0])\n\n hdof = np.sin(dof_angle)\n l_dof = np.cos(dof_angle)\n h_tgt = np.sin(target_angle)\n l_tgt = np.cos(target_angle)\n # print('l_dof', l_dof)\n # print('l_tgt', l_tgt)\n xdof = np.sin(fix_to_dof_angle) * l_dof\n ydof = np.cos(fix_to_dof_angle) * l_dof\n assert np.allclose(np.sqrt(xdof**2 + ydof**2), l_dof)\n ytgt = np.cos(target_angle)\n slope = -np.tan(np.pi / 2 - fix_to_dof_angle)\n\n # print('ytgt', ytgt, 'xdof', xdof, 'ydof', ydof)\n\n yhat = ytgt\n xhat = xdof + (ytgt - ydof) * slope\n lhat = np.sqrt(xhat**2 + yhat**2)\n if lhat > 0.999999:\n if lhat > 1.000001:\n return np.array([-12345.0])\n else:\n return np.array([0.0])\n\n hhat = np.sqrt(1.0 - lhat**2)\n ahat = np.arcsin(hhat / hdof)\n\n # print('xhat', xhat, 'yhat', yhat, 'slope', slope, 'lhat', lhat, 'hhat', hhat, 'ahat', ahat)\n\n # print('ytgt', ytgt)\n # print('xdof', xdof)\n # print('ydof', ydof)\n # print('xhat', xhat)\n # print('yhat', yhat)\n # print('ahat', ahat, np.degrees(ahat))\n\n return ahat\n\ndef xform_around_dof_for_vector_target_angle(fix, mov, dof, target_angle):\n if hdot(dof, fix) < 0:\n dof = -dof\n if angle(dof, mov) > np.pi / 2:\n mov = -mov\n dang = calc_dihedral_angle(fix, [0.0, 0.0, 0.0, 0.0], dof, mov)\n assert angle(dof, mov) <= np.pi / 2 + 0.000001\n ahat = rotation_around_dof_for_target_angle(target_angle, angle(mov, dof), angle(fix, dof))\n if ahat == -12345.0:\n return []\n elif ahat == 0:\n mov1 = (hrot(dof, 0.000 - dang) @ mov[..., None]).reshape(1, 4)\n mov2 = (hrot(dof, np.pi - dang) @ mov[..., None]).reshape(1, 4)\n if np.allclose(angle(fix, mov1), target_angle):\n return [hrot(dof, np.pi - dang)]\n return\n elif np.allclose(angle(fix, mov1), target_angle):\n return [hrot(dof, np.pi - dang)]\n else:\n return []\n else:\n angles = [-dang + ahat, -dang - ahat, np.pi - dang + ahat, np.pi - dang - ahat]\n moves = [(hrot(dof, ang + 0.000) @ mov[..., None]).reshape(1, 4) for ang in angles]\n assert (np.allclose(angle(moves[0], fix), angle(moves[1], fix))\n or np.allclose(angle(moves[2], fix), angle(moves[3], fix)))\n\n if np.allclose(angle(moves[0], fix), target_angle):\n return [hrot(dof, angles[0]), hrot(dof, angles[1])]\n elif np.allclose(angle(moves[2], fix), target_angle):\n return [hrot(dof, angles[2]), hrot(dof, angles[3])]\n else:\n return []\n\ndef align_lines_isect_axis2(pt1, ax1, pt2, ax2, ta1, tp1, ta2, sl2):\n ## make sure to align with smaller axis choice\n assert np.allclose(np.linalg.norm(tp1[..., :3]), 0.0)\n if angle(ax1, ax2) > np.pi / 2:\n ax2 = -ax2\n if angle(ta1, ta2) > np.pi / 2:\n ta2 = -ta2\n assert np.allclose(angle(ta1, ta2), angle(ax1, ax2))\n if abs(angle(ta1, ta2)) < 0.1:\n assert 0, 'case not tested'\n # vector delta between pt2 and pt1\n d = proj_perp(ax1, pt2 - pt1)\n Xalign = align_vectors(ax1, d, ta1, sl2) # align d to Y axis\n Xalign[..., :, 3] = -Xalign @ pt1\n slide_dist = (Xalign @ pt2)[..., 1]\n else:\n try:\n Xalign = align_vectors(ax1, ax2, ta1, ta2)\n # print(Xalign @ ax1, ta1)\n # assert np.allclose(Xalign @ ax1, ta1, atol=0.0001)\n # assert np.allclose(Xalign @ ax2, ta2, atol=0.0001)\n # print(Xalign)\n except AssertionError as e:\n print(\"align_vectors error\")\n print(\" \", ax1)\n print(\" \", ax2)\n print(\" \", ta1)\n print(\" \", ta2)\n raise e\n Xalign[..., :, 3] = -Xalign @ pt1 ## move pt1 to origin\n Xalign[..., 3, 3] = 1\n cen2_0 = Xalign @ pt2 # moving pt2 by Xalign\n D = np.stack([ta1[:3], sl2[:3], ta2[:3]]).T\n A1offset, slide_dist, _ = np.linalg.inv(D) @ cen2_0[:3]\n # print(A1offset, slide_dist)\n Xalign[..., :, 3] = Xalign[..., :, 3] - (A1offset * ta1)\n\n return Xalign, slide_dist\n\ndef expand_xforms(G, N=3, redundant_point=hpoint([1, 3, 10]), maxrad=9e9):\n # print('redundant_point', redundant_point)\n seenit = set()\n seenit.add(tuple(np.around(redundant_point).astype('i')[:3]))\n for Xs in it.chain(G, *(it.product(G, repeat=n) for n in range(2, N + 1))):\n X = Xs if isinstance(Xs, np.ndarray) else ft.reduce(np.matmul, Xs)\n if np.linalg.norm(X @ redundant_point - redundant_point) > maxrad: continue\n key = tuple(np.around(X @ redundant_point).astype('i')[:3])\n # print(key, X @ redundant_point)\n if key not in seenit:\n seenit.add(key)\n yield X\n" ]
[ [ "numpy.random.rand", "numpy.minimum", "numpy.tan", "numpy.cos", "numpy.zeros_like", "numpy.sin", "numpy.empty", "numpy.linalg.norm", "numpy.arcsin", "numpy.prod", "numpy.argmax", "numpy.sqrt", "numpy.around", "numpy.cross", "numpy.linalg.inv", "numpy.empty_like", "numpy.array", "numpy.zeros", "numpy.random.randn", "numpy.real", "numpy.linalg.det", "numpy.identity", "numpy.allclose", "numpy.stack", "numpy.subtract", "numpy.arctan2", "numpy.clip", "numpy.trace", "numpy.asarray", "numpy.broadcast_arrays", "numpy.broadcast", "numpy.sum", "numpy.ones", "numpy.any", "numpy.linalg.eig", "numpy.abs", "numpy.all", "numpy.moveaxis", "numpy.asanyarray", "numpy.maximum" ] ]
xairc/lung_nodule_detector
[ "4e77015680b3bbc89f80f4833bbc93b9fc3e9870" ]
[ "make_FROC_submit_native.py" ]
[ "import numpy as np\nimport sys\n\nsys.path.append('../')\n\nfrom training.layers import nms, iou, acc\nimport time\nimport multiprocessing as mp\nimport math\nimport SimpleITK as sitk\nimport os\nfrom config_training import config\nimport pandas\nimport csv\nimport io\n\nsave_dir = 'results/res18_split_focal/bbox/'\nsubmit_file = './luna_submission_res18_split_classify.csv'\nsid = './val9_sid.csv'\n\nval_num = np.load('val9.npy')\nluna_data = config['luna_data']\nluna_label = './labels/lunaqualified_all.csv'\nshorter_label = './labels/shorter.csv'\nresolution = np.array([1,1,1])\nannos = np.array(pandas.read_csv(luna_label))\n\nabbrevs = np.array(pandas.read_csv(shorter_label, header=None))\nnamelist = abbrevs[:, 1]\nids = list(abbrevs[:, 0])\n\ndef sigmoid(x):\n return 1 / (1 + math.exp(-x))\n\ndef load_itk_image(filename):\n with open(filename) as f:\n contents = f.readlines()\n line = [k for k in contents if k.startswith('TransformMatrix')][0]\n transformM = np.array(line.split(' = ')[1].split(' ')).astype('float')\n transformM = np.round(transformM)\n if np.any( transformM!=np.array([1,0,0, 0, 1, 0, 0, 0, 1])):\n isflip = True\n else:\n isflip = False\n\n itkimage = sitk.ReadImage(filename)\n numpyImage = sitk.GetArrayFromImage(itkimage)\n numpyOrigin = np.array(list(reversed(itkimage.GetOrigin())))\n numpySpacing = np.array(list(reversed(itkimage.GetSpacing())))\n\n return numpyImage, numpyOrigin, numpySpacing, isflip\n\ndef convert_worldcoord(idx, pbb, filename_dict):\n sliceim, origin, spacing, isflip = load_itk_image(os.path.join(luna_data, filename_dict[idx] + '.mhd'))\n #Mask, extendbox = Mask_info(idx, filename_dict)\n ori_sliceim_shape_yx = sliceim.shape[1:3]\n for label in pbb:\n pos_ori = label[1:4]\n radious_ori = label[4]\n #pos_ori = pos_ori + extendbox[:, 0]\n pos_ori = pos_ori * resolution / spacing\n\n if isflip:\n pos_ori[1:] = ori_sliceim_shape_yx - pos_ori[1:]\n pos_ori[1] = pos_ori[1] * -1\n pos_ori[2] = pos_ori[2] * -1\n\n pos_ori = pos_ori * spacing\n pos_ori = pos_ori + origin\n pos_ori = pos_ori[::-1]\n\n radious_ori = radious_ori / spacing[1] * resolution[1]\n radious_ori = radious_ori * spacing[1]\n\n label[1:4] = pos_ori\n label[4] = radious_ori\n label[0] = sigmoid(label[0])\n return pbb\n\n\ndef duplicate_file(in_filename):\n out_filename = in_filename + '.bin'\n byte_string = ''\n\n with open(in_filename, 'r') as infile:\n with open(out_filename, 'wb') as outfile:\n char = infile.read(1)\n byte = ord(char)\n # print byte\n byte_string += chr(byte)\n while char != \"\":\n char = infile.read(1)\n if char != \"\":\n byte = ord(char)\n # print byte\n byte_string += chr(byte)\n outfile.write(byte_string)\n outfile.close()\n\nif __name__ == '__main__':\n pbb = []\n lbb = []\n filename_dict = {}\n csv_submit = []\n csv_sid = []\n\n print (\"datadir\", luna_data)\n\n for i in range(len(val_num)):\n pbb_item = np.load(save_dir + str(val_num[i]) + '_pbb.npy')\n lbb_item = np.load(save_dir + str(val_num[i]) + '_lbb.npy')\n\n filename_dict[i] = str(val_num[i])\n pbb_item = pbb_item[pbb_item[:, 0].argsort()[::-1]]\n pbb_append_list = []\n for item in pbb_item:\n\n #append nocule prob > 0.1\n if sigmoid(item[0]) < 0.1:\n continue\n\n #check overlap under 3mm\n is_overlap = False\n for appended in pbb_append_list:\n minimum_dist = 3\n dist = math.sqrt(\n math.pow(appended[0] - item[0], 2) + math.pow(appended[1] - item[1], 2) + math.pow(\n appended[2] - item[2], 2))\n if (dist < minimum_dist):\n is_overlap = True\n break;\n\n if not is_overlap:\n pbb_append_list.append(item)\n\n pbb.append(np.array(pbb_append_list))\n lbb.append(lbb_item)\n\n pbb = np.array(pbb)\n lbb = np.array(lbb)\n\n conf_th = 0.1\n nms_th = 0.3\n detect_th = 0.3\n\n for i in range(len(pbb)):\n nms_pbb = nms(pbb[i], nms_th)\n world_pbb = convert_worldcoord(i, nms_pbb, filename_dict)\n print (filename_dict[i])\n s_id = namelist[ids.index(int(filename_dict[i]))]\n #csv_sid.append([s_id.encode()])\n csv_sid.append([s_id])\n for candidate in world_pbb:\n csv_submit.append([s_id, candidate[1], candidate[2], candidate[3], candidate[0]])\n\n df_annos = pandas.DataFrame(csv_submit, columns=[\"seriesuid\", \"coordX\", \"coordY\", \"coordZ\", \"probability\"])\n df_annos.to_csv(submit_file, index=False)\n\n df_annos = pandas.DataFrame(csv_sid)\n df_annos.to_csv(sid, index=False, header=False)\n\n\n\n" ]
[ [ "numpy.array", "numpy.round", "pandas.DataFrame", "numpy.load", "pandas.read_csv" ] ]
eric-vader/HD-BO-Additive-Models
[ "0d7e1d46194af2e3d402631caec6e7be9a50376a" ]
[ "hdbo/febo/models/gpm.py" ]
[ "import numpy as np\nimport torch\nfrom febo.utils import locate, get_logger\nfrom febo.utils.config import ConfigField, Config, assign_config, Configurable\nfrom febo.models import ConfidenceBoundModel\nfrom febo.models.model import ModelConfig\n\nfrom stpy.gauss_procc import GaussianProcess\nfrom stpy.kernels import KernelFunction\nlogger = get_logger('model')\n\n\n\n\nclass StandaloneGPConfig(ModelConfig):\n\n kernels = ConfigField([('ard', {'variance': 2., 'lengthscale': 0.2 , 'ARD': True, 'groups':None})])\n noise_var = ConfigField(0.1)\n calculate_gradients = ConfigField(True, comment='Enable/Disable computation of gradient on each update.')\n optimize_bias = ConfigField(True)\n optimize_var = ConfigField(True)\n bias = ConfigField(0)\n\n\n\n@assign_config(StandaloneGPConfig)\nclass StandaloneGP(ConfidenceBoundModel):\n\n def __init__(self, domain):\n \"\"\"\n Args:\n d (int): input dimension\n \"\"\"\n self.domain = domain\n self.d = domain.d\n self.fit = False\n self.s = self.config.noise_var\n self.kernel_name = self.config.kernels[0][0]\n self.gamma = self.config.kernels[0][1]['lengthscale']\n\n if self.config.kernels[0][1]['groups'] is None:\n self.groups = self.config.kernels[0][1]['groups']\n else:\n self.groups = eval(self.config.kernels[0][1]['groups'])\n\n kernel = KernelFunction(kernel_name=self.kernel_name, gamma=torch.ones(self.d, dtype=torch.float64) * self.gamma, groups=self.groups)\n self.gp = GaussianProcess(kernel_custom=kernel, s=self.s, d=self.d)\n self._beta_cached = 2\n\n self.X = None\n self.Y = None\n\n def mean(self, X):\n \"\"\"\n Calculate predicted mean at X.\n\n Args:\n X: input narray of shape (N,d)\n\n Returns:\n Predicted mean, narray of shape (N,1)\n\n \"\"\"\n X = torch.from_numpy(X.reshape(-1,self.d))\n mean,_ = self.gp.mean_var(X)\n return mean.numpy()\n\n def var(self, X):\n \"\"\"\n Calculate predicted variance at X\n\n Args:\n X: input narray of shape (N,d)\n\n Returns:\n Predicted variance, narray of shape (N,1)\n\n \"\"\"\n X = torch.from_numpy(X.reshape(-1,self.d))\n mean,var = self.gp.mean_var(X)\n return var.numpy()\n\n\n def _beta(self):\n return self._beta_cached\n\n def mean_var(self, X):\n \"\"\"\n\n Calculate predicted mean and variance at X\n\n Args:\n X: input narray of shape (N,d)\n\n Returns:\n (mean, var) Predicted mean and variance, each narray of shape (N,1)\n\n \"\"\"\n X = torch.from_numpy(X.reshape(-1,self.d))\n mean,var = self.gp.mean_var(X)\n return (mean.numpy(),var.numpy())\n\n def sample(self, X=None):\n \"\"\"\n Returns a sample form the posterior. It should return a function ``def my_sample(X):``\n\n Args:\n X: if specified, the sample function is only valid for points X\n\n Returns (function):\n\n \"\"\"\n def sampler(X):\n X = torch.from_numpy(X.reshape(-1, self.d))\n f = self.gp.sample(X).numpy()\n return f\n\n # def sampler_coord(X):\n # X = torch.from_numpy(X.reshape(-1, self.d))\n # x,val = self.gp.sample_iteratively_max(X, multistart = 20, minimizer = \"coordinate-wise\", grid = 100).numpy()\n # return (x,val )\n return sampler\n\n\n\n def fit_gp(self):\n self.gp.fit_gp(self.X,self.Y)\n self.fit = True\n def add_data(self, X, Y):\n \"\"\"\n Add data to the model.\n\n Args:\n X: input narray of shape (N,d)\n Y: observation narray of shape (N,1)\n\n \"\"\"\n X = torch.from_numpy(X.reshape(-1,self.d))\n Y = torch.from_numpy(Y.reshape(-1,1))\n\n if self.X is None:\n self.X = X\n self.Y = Y\n else:\n self.X = torch.cat((self.X,X), dim=0)\n self.Y = torch.cat((self.Y, Y), dim=0)\n self.fit_gp()\n\n def info(self):\n return self.gp.description()\n\n @property\n def requires_std(self):\n return False\n" ]
[ [ "torch.cat", "torch.ones" ] ]
sheromon/wheat-detection
[ "b9ee9cb1c94c64b6635490f6986b34cf42b59b5c" ]
[ "wheat/visualization.py" ]
[ "\"\"\"A couple of functions for visualizing the dataset in Jupyter notebooks\"\"\"\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport torch\nimport torchvision.transforms as T\nimport torchvision.transforms.functional as F\n\n\n# this conversion is needed because albumentations transforms return\n# images in uint8, but pytorch expects them to be floats in [0, 1]\nimage_float_to_int_transform = T.ConvertImageDtype(torch.uint8)\n\ndef show(imgs):\n \"\"\"Display a single or a list of torch tensor images.\n\n from https://pytorch.org/vision/stable/auto_examples/plot_visualization_utils.html\n \"\"\"\n if not isinstance(imgs, list):\n imgs = [imgs]\n _, axs = plt.subplots(ncols=len(imgs), squeeze=False)\n for i, img in enumerate(imgs):\n img = img.detach()\n img = F.to_pil_image(img)\n axs[0, i].imshow(np.asarray(img))\n axs[0, i].set(xticklabels=[], yticklabels=[], xticks=[], yticks=[])\n" ]
[ [ "numpy.asarray" ] ]
Cardio-AI/3d-mri-domain-adaptation
[ "2a1b8332039aa25b8291cfd746cbcf87f71068c2" ]
[ "src/utils/Loss_and_metrics.py" ]
[ "from tensorflow.keras import backend as K\nimport tensorflow as tf\nimport numpy as np\nimport tensorflow.keras as keras\nfrom functools import partial\nfrom tensorflow.keras.losses import mse\nfrom src.data.Dataset import get_metadata_maybe, ensure_dir\n\ndef max_volume_loss(min_probability=0.8,):\n \"\"\"\n Create a callable loss function which maximizes the probability values of y_pred\n There is additionally the possibility to weight high probabilities in\n :param min_probability:\n :return: loss function which maximize the number of voxels with a probability higher than the threshold\n \"\"\"\n\n def max_loss(y_true, y_pred):\n \"\"\"\n Maximize the foreground voxels in the middle slices with a probability higher than a given threshold.\n :param y_true:\n :param y_pred:\n :param weights:\n :return:\n \"\"\"\n\n # ignore background channel if given, we want to maximize the number of captured foreground voxel\n if y_pred.shape[-1] == 4:\n y_pred = y_pred[...,1:]\n y_pred = tf.cast(y_pred, dtype=tf.float32)\n\n sum_bigger_than = tf.reduce_max(y_pred, axis=-1)\n mask_bigger_than = tf.cast(sum_bigger_than > min_probability, tf.float32)\n sum_bigger_than = sum_bigger_than * mask_bigger_than\n\n return 1- tf.reduce_mean(sum_bigger_than)\n\n return max_loss\n\n\ndef loss_with_zero_mask(loss=mse, mask_smaller_than=0.01, weight_inplane=False,xy_shape=224):\n \"\"\"\n Loss-factory returns a loss which calculates a given loss-function (e.g. MSE) only for the region where y_true is greater than a given threshold\n This is necessary for our AX2SAX comparison, as we have different length of CMR stacks (AX2SAX gt is cropped at z = SAX.z + 20mm)\n Example in-plane weighting which is multiplied to each slice of the volume\n [[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. ]\n [0. 0.25 0.25 0.25 0.25 0.25 0.25 0.25 0.25 0. ]\n [0. 0.25 0.5 0.5 0.5 0.5 0.5 0.5 0.25 0. ]\n [0. 0.25 0.5 0.75 0.75 0.75 0.75 0.5 0.25 0. ]\n [0. 0.25 0.5 0.75 1. 1. 0.75 0.5 0.25 0. ]\n [0. 0.25 0.5 0.75 1. 1. 0.75 0.5 0.25 0. ]\n [0. 0.25 0.5 0.75 0.75 0.75 0.75 0.5 0.25 0. ]\n [0. 0.25 0.5 0.5 0.5 0.5 0.5 0.5 0.25 0. ]\n [0. 0.25 0.25 0.25 0.25 0.25 0.25 0.25 0.25 0. ]\n [0. 0. 0. 0. 0. 0. 0. 0. 0. 0. ]]\n :param loss: any callable loss function. e.g. tf.keras.losses\n :param mask_smaller_than: float, threshold to calculate the loss only for voxels where gt is greater\n :param weight_inplane: bool, apply in-plane weighting\n :param xy_shape: int, number of square in-plane pixels\n :return:\n \"\"\"\n\n # in-plane weighting, which helps to focus on the voxels close to the center\n x_shape = xy_shape\n y_shape = xy_shape\n temp = np.zeros((x_shape, y_shape))\n weights_distribution = np.linspace(0, 100, x_shape // 2)\n for i, l in enumerate(weights_distribution):\n temp[i:-i, i:-i] = l\n weights = temp[None, None, :, :]\n weights = tf.convert_to_tensor(weights, dtype=tf.float32)\n\n def my_loss(y_true, y_pred, weights_inplane=weights):\n \"\"\"\n wrapper to either calculate a loss only on areas where the gt is greater than mask_smaller_than\n and additionally weight the loss in-plane to increase the importance of the voxels close to the center\n :param y_true:\n :param y_pred:\n :return:\n \"\"\"\n y_pred = tf.cast(y_pred, dtype=tf.float32)\n y_true = tf.cast(y_true, dtype=tf.float32)\n mask = tf.squeeze(tf.cast((y_true > mask_smaller_than),tf.float32),axis=-1)\n\n if weight_inplane:\n return (loss(y_true, y_pred) * mask) * weights_inplane + K.epsilon()\n else:\n return loss(y_true, y_pred) * mask\n\n return my_loss\n\n\n# modified with dice coef applied\n# a weighted cross entropy loss function combined with the dice coef for faster learning\ndef weighted_cce_dice_coef(weights):\n \"\"\"\n A weighted version of keras.objectives.categorical_crossentropy\n \n Variables:\n weights: numpy array of shape (C,) where C is the number of classes\n \n Usage:\n weights = np.array([0.5,2,10]) # Class one at 0.5, class 2 twice the normal weights, class 3 10x.\n loss = weighted_categorical_crossentropy(weights)\n model.compile(loss=loss,optimizer='adam')\n \"\"\"\n \n weights = K.variable(weights)\n \n def loss(y_true, y_pred):\n # scale predictions so that the class probs of each sample sum to 1\n y_pred /= K.sum(y_pred, axis=-1, keepdims=True)\n # clip to prevent NaN's and Inf's\n y_pred = K.clip(y_pred, K.epsilon(), 1 - K.epsilon())\n # calc\n loss = y_true * K.log(y_pred) * weights\n loss = -K.sum(loss, -1)\n return loss\n \n def cat_cross_entropy_dice_coef(y_true, y_pred):\n return loss(y_true, y_pred)- dice_coef(y_true, y_pred)\n \n return cat_cross_entropy_dice_coef\n\ndef dice_coef_background(y_true, y_pred):\n y_pred = y_pred[...,0]\n y_true = y_true[...,0]\n return dice_coef(y_true, y_pred)\n\ndef dice_coef_rv(y_true, y_pred):\n y_pred = y_pred[...,-3]\n y_true = y_true[...,-3]\n return dice_coef(y_true, y_pred)\n\ndef dice_coef_myo(y_true, y_pred):\n y_pred = y_pred[...,-2]\n y_true = y_true[...,-2]\n return dice_coef(y_true, y_pred)\n\ndef dice_coef_lv(y_true, y_pred):\n y_pred = y_pred[...,-1]\n y_true = y_true[...,-1]\n return dice_coef(y_true, y_pred)\n\n\n# ignore background score\ndef dice_coef_labels(y_true, y_pred):\n\n # ignore background, slice from the back to work with and without background channels\n y_pred = y_pred[...,-3:]\n y_true = y_true[...,-3:]\n \n return dice_coef(y_true, y_pred)\n\ndef dice_coef(y_true, y_pred):\n smooth = 1.\n \n y_true_f = K.flatten(y_true)\n y_pred_f = K.flatten(y_pred)\n intersection = K.sum(y_true_f * y_pred_f)\n return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)\n\n\ndef dice_coef_squared(y_true, y_pred):\n smooth = 1.\n \n y_true_f = K.flatten(y_true)\n y_pred_f = K.flatten(y_pred)\n intersection = K.sum(y_true_f * y_pred_f)\n return (2. * intersection + smooth) / (K.sum(K.square(y_true_f)) + K.sum(K.square(y_pred_f)) + smooth)\n\n\ndef dice_numpy(y_true, y_pred, empty_score=1.0):\n\n \"\"\"\n Hard Dice for numpy ndarrays\n :param y_true:\n :param y_pred:\n :param empty_score:\n :return:\n \"\"\"\n\n im1 = np.asarray(y_true).astype(np.bool)\n im2 = np.asarray(y_pred).astype(np.bool)\n\n if im1.shape != im2.shape:\n raise ValueError(\"Shape mismatch: im1 and im2 must have the same shape.\")\n\n im_sum = im1.sum() + im2.sum()\n if im_sum == 0:\n return empty_score\n\n # Compute Dice coefficient\n intersection = np.logical_and(im1, im2)\n\n return 2. * intersection.sum() / im_sum\n\n\ndef bce_dice_loss(y_true, y_pred, w_bce=0.5, w_dice=1.):\n \"\"\"\n weighted binary cross entropy - dice coef loss\n uses all labels if shale labels ==3\n otherwise slice the background to ignore over-represented background class\n :param y_true:\n :param y_pred:\n :return:\n \"\"\"\n\n # use only the labels for the loss\n if y_pred.shape[-1] == 4:\n y_pred = y_pred[...,-3:]\n y_true = y_true[...,-3:]\n\n\n return w_bce * tf.keras.losses.binary_crossentropy(y_true, y_pred) - w_dice * dice_coef(y_true, y_pred)\n" ]
[ [ "tensorflow.convert_to_tensor", "tensorflow.keras.backend.sum", "numpy.asarray", "tensorflow.keras.backend.variable", "numpy.zeros", "tensorflow.keras.backend.flatten", "tensorflow.keras.backend.square", "numpy.logical_and", "tensorflow.keras.backend.epsilon", "tensorflow.reduce_max", "tensorflow.keras.backend.log", "numpy.linspace", "tensorflow.reduce_mean", "tensorflow.keras.losses.binary_crossentropy", "tensorflow.cast" ] ]
Dudestin/bisenetv2-tensorflow
[ "0c9761a7d0fd6ac4bdb3abb7195d01caf22d4716" ]
[ "tools/segcomp/freeze_segcomp_bisenetv2_model.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/1/15 上午10:49\n# @Author : MaybeShewill-CV\n# @Site : https://github.com/MaybeShewill-CV/bisenetv2-tensorflow\n# @File : freeze_segcomp_bisenetv2_model.py\n# @IDE: PyCharm\n\"\"\"\nFreeze bisenetv2 model\n\"\"\"\nimport argparse\n\nimport tensorflow as tf\nfrom tensorflow.python.framework import graph_util\nfrom tensorflow.python.tools import optimize_for_inference_lib\n\nfrom bisenet_model import bisenet_v2\nfrom local_utils.config_utils import parse_config_utils\n\nCFG = parse_config_utils.segcomp_cfg\n\n\ndef init_args():\n \"\"\"\n\n :return:\n \"\"\"\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--weights_path', type=str, help='The ckpt file path')\n parser.add_argument('--frozen_pb_file_path', type=str, help='The output frozen pb file path',\n default='./checkpoint/bisenetv2_segcomp_frozen.pb')\n parser.add_argument('--optimized_pb_file_path', type=str, help='The output frozen pb file path',\n default='./checkpoint/bisenetv2_segcomp_optimized.pb')\n\n return parser.parse_args()\n\n\ndef load_graph_from_ckpt_file(weights_path):\n \"\"\"\n\n :param weights_path:\n :return:\n \"\"\"\n # construct compute graph\n input_tensor = tf.placeholder(dtype=tf.float32, shape=[1, 512, 1024, 3], name='input_tensor')\n net = bisenet_v2.BiseNetV2(phase='test', cfg=CFG)\n prediction = net.inference(\n input_tensor=input_tensor,\n name='BiseNetV2',\n reuse=False\n )\n prediction = tf.squeeze(prediction, axis=0, name='final_output')\n prediction = tf.identity(prediction, name='final_output')\n\n sess_config = tf.ConfigProto(allow_soft_placement=True)\n sess_config.gpu_options.per_process_gpu_memory_fraction = 0.9\n sess_config.gpu_options.allow_growth = True\n sess_config.gpu_options.allocator_type = 'BFC'\n\n # define moving average version of the learned variables for eval\n with tf.variable_scope(name_or_scope='moving_avg'):\n variable_averages = tf.train.ExponentialMovingAverage(0.9995)\n variables_to_restore = variable_averages.variables_to_restore()\n\n saver = tf.train.Saver(variables_to_restore)\n # create a session\n sess = tf.Session(config=sess_config)\n\n # import best model\n saver.restore(sess, weights_path) # variables\n\n # get graph definition\n gd = graph_util.remove_training_nodes(sess.graph_def)\n\n return gd, sess, prediction\n\n\ndef freeze_model(output_pb_file_path, sess, graph_def):\n \"\"\"\n\n :param output_pb_file_path:\n :param sess\n :param graph_def:\n :return:\n \"\"\"\n converted_graph_def = graph_util.convert_variables_to_constants(\n sess, graph_def, [\"final_output\"])\n tf.train.write_graph(converted_graph_def, './', output_pb_file_path, as_text=False)\n\n return\n\n\ndef optimize_inference_model(frozen_pb_file_path, output_pb_file_path):\n \"\"\"\n\n :param frozen_pb_file_path:\n :param output_pb_file_path:\n :return:\n \"\"\"\n input_graph = tf.GraphDef()\n with tf.gfile.GFile(frozen_pb_file_path, \"rb\") as f:\n data2read = f.read()\n input_graph.ParseFromString(data2read)\n\n optimized_graph = optimize_for_inference_lib.optimize_for_inference(\n input_graph_def=input_graph,\n input_node_names=['input_tensor'],\n output_node_names=['final_output'],\n placeholder_type_enum=tf.float32.as_datatype_enum\n )\n\n with tf.gfile.GFile(output_pb_file_path, 'w') as f:\n f.write(optimized_graph.SerializeToString())\n return\n\n\nif __name__ == '__main__':\n \"\"\"\n test code\n \"\"\"\n args = init_args()\n\n bisenetv2_gd, bisenetv2_sess, _ = load_graph_from_ckpt_file(args.weights_path)\n\n freeze_model(\n output_pb_file_path=args.frozen_pb_file_path,\n sess=bisenetv2_sess,\n graph_def=bisenetv2_gd\n )\n\n # optimize_inference_model(\n # frozen_pb_file_path=args.frozen_pb_file_path,\n # output_pb_file_path=args.optimized_pb_file_path\n # )\n" ]
[ [ "tensorflow.GraphDef", "tensorflow.Session", "tensorflow.train.Saver", "tensorflow.gfile.GFile", "tensorflow.ConfigProto", "tensorflow.python.framework.graph_util.remove_training_nodes", "tensorflow.variable_scope", "tensorflow.squeeze", "tensorflow.placeholder", "tensorflow.train.write_graph", "tensorflow.train.ExponentialMovingAverage", "tensorflow.python.framework.graph_util.convert_variables_to_constants", "tensorflow.python.tools.optimize_for_inference_lib.optimize_for_inference", "tensorflow.identity" ] ]
LawrenceZ1A/MultipurposeProject
[ "54d5898301d01c33dd771b29e2e19e20d3875a21" ]
[ "Keras/how-to-forecast.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport sys\nimport warnings\n\nif not sys.warnoptions:\n warnings.simplefilter('ignore')\n\n\n# In[107]:\n\n\nimport tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport pandas as pd\nfrom sklearn.preprocessing import MinMaxScaler\nfrom datetime import datetime, date, time, timedelta\n\nfrom tqdm import tqdm\nimport yfinance as yf\nsns.set()\ntf.compat.v1.set_random_seed(1234)\ntf.compat.v1.disable_v2_behavior()\n\n\n\n# In[230]:\nprint(\"It will save the data in the same folder as the script\")\nsymbol = input('Please input stock symbol:\\n').upper()\n\n\ndef get_minute_history_data(symbol, start_date:date, end_date:date):\n ticker = yf.Ticker(symbol)\n df_data = ticker.history(interval=\"1d\", start=str(start_date), end=str(end_date))\n df_data = df_data.round(2)\n return df_data\n\nSymbol = symbol\nnow = datetime.now()\nstart_date = date(now.year - 5, now.month, now.day)\nend_date = date(now.year, now.month, now.day)\ndf = get_minute_history_data(Symbol, start_date, end_date)\ndf = df.reset_index()\n\n\n# In[231]:\n\n\nminmax = MinMaxScaler().fit(df.iloc[:, 4:5].astype('float32')) # Close index\ndf_log = minmax.transform(df.iloc[:, 4:5].astype('float32')) # Close index\ndf_log = pd.DataFrame(df_log)\ndf_log.head()\n\n\n# In[232]:\n\n\nsimulation_size = 5\nnum_layers = 1\nsize_layer = 256\ntimestamp = 5\nepoch = 500\ndropout_rate = 1\ntest_size = 30\nlearning_rate = 0.005\n\ndf_train = df_log\ndf.shape, df_train.shape\ntf.compat.v1.disable_eager_execution()\n\n\n# In[233]:\n\n\nclass Model:\n def __init__(\n self,\n learning_rate,\n num_layers,\n size,\n size_layer,\n output_size,\n forget_bias = 0.1,\n ):\n def lstm_cell(size_layer):\n return tf.compat.v1.nn.rnn_cell.LSTMCell(size_layer, state_is_tuple = False)\n\n rnn_cells = tf.compat.v1.nn.rnn_cell.MultiRNNCell(\n [lstm_cell(size_layer) for _ in range(num_layers)],\n state_is_tuple = False,\n )\n \n self.X = tf.compat.v1.placeholder(tf.float32, (None, None, size))\n self.Y = tf.compat.v1.placeholder(tf.float32, (None, output_size))\n drop = tf.compat.v1.nn.rnn_cell.DropoutWrapper(\n rnn_cells, output_keep_prob = forget_bias\n )\n self.hidden_layer = tf.compat.v1.placeholder(\n tf.float32, (None, num_layers * 2 * size_layer)\n )\n self.outputs, self.last_state = tf.compat.v1.nn.dynamic_rnn(\n drop, self.X, initial_state = self.hidden_layer, dtype = tf.float32\n )\n self.logits = tf.compat.v1.layers.dense(self.outputs[-1], output_size)\n self.cost = tf.reduce_mean(input_tensor=tf.square(self.Y - self.logits))\n self.optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate).minimize(\n self.cost\n )\n\ndef calculate_accuracy(real, predict):\n real = np.array(real) + 1\n predict = np.array(predict) + 1\n percentage = 1 - np.sqrt(np.mean(np.square((real - predict) / real)))\n return percentage * 100\n\ndef anchor(signal, weight):\n buffer = []\n last = signal[0]\n for i in signal:\n smoothed_val = last * weight + (1 - weight) * i\n buffer.append(smoothed_val)\n last = smoothed_val\n return buffer\n\n\n# In[234]:\n\n\ndef forecast():\n tf.compat.v1.reset_default_graph()\n modelnn = Model(\n learning_rate, num_layers, df_log.shape[1], size_layer, df_log.shape[1], dropout_rate\n )\n sess = tf.compat.v1.InteractiveSession()\n sess.run(tf.compat.v1.global_variables_initializer())\n date_ori = pd.to_datetime(df.iloc[:, 0]).tolist()\n\n pbar = tqdm(range(epoch), desc = 'train loop')\n for i in pbar:\n init_value = np.zeros((1, num_layers * 2 * size_layer))\n total_loss, total_acc = [], []\n for k in range(0, df_train.shape[0] - 1, timestamp):\n index = min(k + timestamp, df_train.shape[0] - 1)\n batch_x = np.expand_dims(\n df_train.iloc[k : index, :].values, axis = 0\n )\n batch_y = df_train.iloc[k + 1 : index + 1, :].values\n logits, last_state, _, loss = sess.run(\n [modelnn.logits, modelnn.last_state, modelnn.optimizer, modelnn.cost],\n feed_dict = {\n modelnn.X: batch_x,\n modelnn.Y: batch_y,\n modelnn.hidden_layer: init_value,\n },\n )\n init_value = last_state\n total_loss.append(loss)\n total_acc.append(calculate_accuracy(batch_y[:, 0], logits[:, 0]))\n pbar.set_postfix(cost = np.mean(total_loss), acc = np.mean(total_acc))\n\n future_day = test_size\n\n output_predict = np.zeros((df_train.shape[0] + future_day, df_train.shape[1]))\n output_predict[0] = df_train.iloc[0]\n upper_b = (df_train.shape[0] // timestamp) * timestamp\n init_value = np.zeros((1, num_layers * 2 * size_layer))\n\n for k in range(0, (df_train.shape[0] // timestamp) * timestamp, timestamp):\n out_logits, last_state = sess.run(\n [modelnn.logits, modelnn.last_state],\n feed_dict = {\n modelnn.X: np.expand_dims(\n df_train.iloc[k : k + timestamp], axis = 0\n ),\n modelnn.hidden_layer: init_value,\n },\n )\n init_value = last_state\n output_predict[k + 1 : k + timestamp + 1] = out_logits\n\n if upper_b != df_train.shape[0]:\n out_logits, last_state = sess.run(\n [modelnn.logits, modelnn.last_state],\n feed_dict = {\n modelnn.X: np.expand_dims(df_train.iloc[upper_b:], axis = 0),\n modelnn.hidden_layer: init_value,\n },\n )\n output_predict[upper_b + 1 : df_train.shape[0] + 1] = out_logits\n future_day -= 1\n date_ori.append(date_ori[-1] + timedelta(days = 1))\n\n init_value = last_state\n\n for i in range(future_day):\n o = output_predict[-future_day - timestamp + i:-future_day + i]\n out_logits, last_state = sess.run(\n [modelnn.logits, modelnn.last_state],\n feed_dict = {\n modelnn.X: np.expand_dims(o, axis = 0),\n modelnn.hidden_layer: init_value,\n },\n )\n init_value = last_state\n output_predict[-future_day + i] = out_logits[-1]\n date_ori.append(date_ori[-1] + timedelta(days = 1))\n\n output_predict = minmax.inverse_transform(output_predict)\n deep_future = anchor(output_predict[:, 0], 0.4)\n\n return deep_future\n\n\n# In[235]:\n\n\nresults = []\nfor i in range(simulation_size):\n print('simulation %d' % (i + 1))\n results.append(forecast())\n\n\n# In[236]:\n\n\ndate_ori = pd.to_datetime(df.iloc[:, 0]).tolist()\nfor i in range(test_size):\n date_ori.append(date_ori[-1] + timedelta(days=1))\ndate_ori = pd.Series(date_ori).dt.strftime(date_format='%Y-%m-%d %H:%M:%S').tolist()\ndate_ori[-5:]\n\n\n# In[237]:\n\n\naccepted_results = []\nfor r in results:\n if (\n np.array(\n r[-test_size:]\n ) < np.min(\n df['Close']\n )\n ).sum() == 0 and (\n np.array(\n r[-test_size:]\n ) > np.max(\n df['Close']\n ) * 2\n ).sum() == 0:\n accepted_results.append(r)\nlen(accepted_results)\n\n\n# In[238]:\n\n\naccuracies = [calculate_accuracy(df['Close'].values, r[:-test_size]) for r in accepted_results]\n\nplt.figure(figsize=(15, 5))\nfor no, r in enumerate(accepted_results):\n plt.plot(r, label='forecast %d'%(no + 1))\nplt.plot(df['Close'], label='true trend', c='black')\nplt.legend()\nplt.title('average accuracy: %.4f'%(np.mean(accuracies)))\n\nx_range_future = np.arange(len(results[0]))\nplt.xticks(x_range_future[::30], date_ori[::30])\n\nplt.savefig(symbol + '.png')\nplt.show()\n" ]
[ [ "tensorflow.compat.v1.nn.dynamic_rnn", "tensorflow.compat.v1.disable_v2_behavior", "numpy.min", "numpy.mean", "tensorflow.compat.v1.reset_default_graph", "sklearn.preprocessing.MinMaxScaler", "tensorflow.compat.v1.layers.dense", "tensorflow.compat.v1.set_random_seed", "matplotlib.pyplot.xticks", "tensorflow.compat.v1.placeholder", "numpy.max", "tensorflow.compat.v1.global_variables_initializer", "tensorflow.compat.v1.train.AdamOptimizer", "pandas.DataFrame", "matplotlib.pyplot.savefig", "tensorflow.compat.v1.nn.rnn_cell.DropoutWrapper", "numpy.expand_dims", "pandas.to_datetime", "numpy.square", "numpy.array", "numpy.zeros", "tensorflow.compat.v1.InteractiveSession", "matplotlib.pyplot.figure", "tensorflow.compat.v1.nn.rnn_cell.LSTMCell", "matplotlib.pyplot.show", "matplotlib.pyplot.plot", "matplotlib.pyplot.legend", "pandas.Series", "tensorflow.square", "tensorflow.compat.v1.disable_eager_execution" ] ]
LASER-UMASS/Diva
[ "7dbb532cc6feec1e39c560b30a8e70f2691df365" ]
[ "Diva/models/term_encoder.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport math\nfrom collections import defaultdict\nfrom time import time\nfrom itertools import chain\nfrom lark.tree import Tree\nimport os\nfrom gallina import traverse_postorder\nimport pdb\n\n\nnonterminals = [\n 'constr__constr',\n 'constructor_rel',\n 'constructor_var',\n 'constructor_meta',\n 'constructor_evar',\n 'constructor_sort',\n 'constructor_cast',\n 'constructor_prod',\n 'constructor_lambda',\n 'constructor_letin',\n 'constructor_app',\n 'constructor_const',\n 'constructor_ind',\n 'constructor_construct',\n 'constructor_case',\n 'constructor_fix',\n 'constructor_cofix',\n 'constructor_proj',\n 'constructor_ser_evar',\n 'constructor_prop',\n 'constructor_set',\n 'constructor_type',\n 'constructor_ulevel',\n 'constructor_vmcast',\n 'constructor_nativecast',\n 'constructor_defaultcast',\n 'constructor_revertcast',\n 'constructor_anonymous',\n 'constructor_name',\n 'constructor_constant',\n 'constructor_mpfile',\n 'constructor_mpbound',\n 'constructor_mpdot',\n 'constructor_dirpath',\n 'constructor_mbid',\n 'constructor_instance',\n 'constructor_mutind',\n 'constructor_letstyle',\n 'constructor_ifstyle',\n 'constructor_letpatternstyle',\n 'constructor_matchstyle',\n 'constructor_regularstyle',\n 'constructor_projection',\n 'bool',\n 'int',\n 'names__label__t',\n 'constr__case_printing',\n 'univ__universe__t',\n 'constr__pexistential___constr__constr',\n 'names__inductive',\n 'constr__case_info',\n 'names__constructor',\n 'constr__prec_declaration___constr__constr____constr__constr',\n 'constr__pfixpoint___constr__constr____constr__constr',\n 'constr__pcofixpoint___constr__constr____constr__constr',\n]\n\n\nclass InputOutputUpdateGate(nn.Module):\n\n def __init__(self, hidden_dim, nonlinear):\n super().__init__()\n self.nonlinear = nonlinear\n k = 1. / math.sqrt(hidden_dim)\n self.W = nn.Parameter(torch.Tensor(hidden_dim, len(nonterminals) + hidden_dim))\n nn.init.uniform_(self.W, -k, k)\n self.b = nn.Parameter(torch.Tensor(hidden_dim))\n nn.init.uniform_(self.b, -k, k)\n \n\n def forward(self, xh):\n return self.nonlinear(F.linear(xh, self.W, self.b))\n\n\nclass ForgetGates(nn.Module):\n\n def __init__(self, hidden_dim, opts):\n super().__init__()\n self.hidden_dim = hidden_dim\n self.opts = opts\n k = 1. / math.sqrt(hidden_dim)\n # the weight for the input\n self.W_if = nn.Parameter(torch.Tensor(hidden_dim, len(nonterminals)))\n nn.init.uniform_(self.W_if, -k, k)\n # the weight for the hidden\n self.W_hf = nn.Parameter(torch.Tensor(hidden_dim, hidden_dim))\n nn.init.uniform_(self.W_hf, -k, k)\n # the bias\n self.b_f = nn.Parameter(torch.Tensor(hidden_dim))\n nn.init.uniform_(self.b_f, -k, k)\n\n\n def forward(self, x, h_children, c_children):\n c_remain = torch.zeros(x.size(0), self.hidden_dim).to(self.opts.device)\n\n Wx = F.linear(x, self.W_if)\n all_h = list(chain(*h_children))\n if all_h == []:\n return c_remain\n Uh = F.linear(torch.stack(all_h), self.W_hf, self.b_f)\n i = 0\n for j, h in enumerate(h_children):\n if h == []:\n continue\n f_gates = torch.sigmoid(Wx[j] + Uh[i : i + len(h)])\n i += len(h)\n c_remain[j] = (f_gates * torch.stack(c_children[j])).sum(dim=0)\n \n return c_remain\n\n\nclass TermEncoder(nn.Module):\n\n def __init__(self, opts):\n super().__init__()\n self.opts = opts\n self.input_gate = InputOutputUpdateGate(opts.term_embedding_dim, nonlinear=torch.sigmoid)\n self.forget_gates = ForgetGates(opts.term_embedding_dim, opts)\n self.output_gate = InputOutputUpdateGate(opts.term_embedding_dim, nonlinear=torch.sigmoid)\n self.update_cell = InputOutputUpdateGate(opts.term_embedding_dim, nonlinear=torch.tanh)\n\n\n def forward(self, term_asts):\n # the height of a node determines when it can be processed\n height2nodes = defaultdict(set)\n\n def get_height(node):\n height2nodes[node.height].add(node)\n\n for ast in term_asts:\n traverse_postorder(ast, get_height)\n\n memory_cells = {} # node -> memory cell\n hidden_states = {} # node -> hidden state\n #return torch.zeros(len(term_asts), self.opts.term_embedding_dim).to(self.opts.device)\n\n # compute the embedding for each node\n for height in sorted(height2nodes.keys()):\n nodes_at_height = list(height2nodes[height])\n # sum up the hidden states of the children\n h_sum = []\n c_remains = []\n x = torch.zeros(len(nodes_at_height), len(nonterminals), device=self.opts.device) \\\n .scatter_(1, torch.tensor([nonterminals.index(node.data) for node in nodes_at_height], \n device=self.opts.device).unsqueeze(1), 1.0)\n\n h_sum = torch.zeros(len(nodes_at_height), self.opts.term_embedding_dim).to(self.opts.device)\n h_children = []\n c_children = []\n for j, node in enumerate(nodes_at_height):\n h_children.append([])\n c_children.append([])\n for c in node.children:\n h = hidden_states[c]\n h_sum[j] += h\n h_children[-1].append(h)\n c_children[-1].append(memory_cells[c])\n c_remains = self.forget_gates(x, h_children, c_children) \n\n # gates\n xh = torch.cat([x, h_sum], dim=1)\n i_gate = self.input_gate(xh)\n o_gate = self.output_gate(xh)\n u = self.update_cell(xh) \n cells = i_gate * u + c_remains\n hiddens = o_gate * torch.tanh(cells)\n\n\n for i, node in enumerate(nodes_at_height):\n memory_cells[node] = cells[i]\n hidden_states[node] = hiddens[i]\n \n return torch.stack([hidden_states[ast] for ast in term_asts])" ]
[ [ "torch.cat", "torch.stack", "torch.tanh", "torch.nn.functional.linear", "torch.nn.init.uniform_", "torch.Tensor" ] ]
larkinandy/Green-Space-Virtual-Reality
[ "f8679eea4e0aa388afdbf3b93d58ed375f57e4bd" ]
[ "Twitter_SMA/calcWeekly_tfidf.py" ]
[ "# calcWeekly_tfidf\n# created by Andrew Larkin\n# for Scoial Media Analytics course project\n# December 5, 2017\n\n# This script partitions Tweets from a raw csv file into weekly subsets,\n# writes the subsets to text files, and calculates weekly idf scores\n# for for each subset using trigrams\n\n# import modules \nimport pandas as ps\nfrom datetime import datetime\nfrom copy import deepcopy as dc\nfrom collections import defaultdict\nimport nltk as nk\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\n# define input and output\noutputFolder = \"C:/users/user/desktop/\"\ninputFile = \"C:/users/larkinan/desktop/testOutputNoHashtag.csv\"\n\n\ndef getDayOfYear(dateString):\n return int(datetime.strptime(dateString,\"%Y-%m-%dT%H:%M:%S\").strftime('%j'))\n\n# if a string contains a hyperlink, find the location and remove for downstream text processing\ndef findHttp(inputString):\n httpLoc = str.find(inputString,\"http\")\n if(httpLoc > -1):\n return(inputString[0:httpLoc])\n else:\n return(inputString)\n\n# partition tweet dataset by day \ndef getTextForDay(dataset):\n accumDayText = \"\"\n numObs = 0\n numTweets = len(dataset['text'])\n stringList = list(dataset['text'])\n for singleText in range(0,numTweets):\n newString = stringList[singleText]\n newString = findHttp(newString)\n if(newString not in accumDayText):\n \n # test if the new Tweet content is not a duplicate\n shouldCopy = True\n words1 = list((newString.split()))\n testList = ' '.join(words1[1:max(len(words1)-3,3)])\n \n # text is unique add to daily tweet set\n if(testList in accumDayText):\n shouldCopy = False\n if(shouldCopy):\n accumDayText += newString\n accumDayText += \"\\n\" \n numObs +=1\n \n return([accumDayText,numObs])\n\n# patition tweet dataset by week\ndef partitionWeeklyTweets(rawData):\n rawData['dayOfYear'] = list(map(getDayOfYear, rawData['created']))\n uniqueDays = list(set(rawData['dayOfYear']))\n accumTexts = [] \n accumWeekTexts = [\"\"]\n numObs = [0]*len(uniqueDays)\n weekCounter = 0\n weekIndex = 0\n numWeekley = 0\n\n for dayIndex in range(0,len(uniqueDays)):\n tempCopy = dc(rawData)\n tempData = tempCopy.loc[tempCopy['dayOfYear'] == uniqueDays[dayIndex]]\n [dayText,numObs[dayIndex]] = getTextForDay(tempData)\n accumTexts.append(dayText)\n accumWeekTexts[weekIndex] += dayText\n weekCounter+=1\n numWeekley += numObs[dayIndex]\n # if new week, rest counter and start adding daily tweets to new week subset\n if(weekCounter == 7):\n weekIndex+=1\n weekCounter = 0\n print(\"num weekley\" + str(weekIndex) + \" : \" + str(numWeekley))\n numWeekley = 0\n accumWeekTexts.append(\"\")\n \n print(sum(numObs))\n print(numObs)\n for index in range(0,len(numObs)):\n if(numObs[index] < 100):\n print(index)\n print(len(numObs))\n return(accumWeekTexts)\n\ndef writeWeeklySet(accumWeekTexts):\n for weekText in accumWeekTexts:\n f = open(outputFolder + \"week\" + str(index) + \".txt\",'w')\n f.write(weekText)\n f.close()\n index+=1 \n\n# calculate tfidf scores for entire dataset, grouped by week. Based on script created by Mark Needham\n# http://www.markhneedham.com/blog/2015/02/15/pythonscikit-learn-calculating-tfidf-on-how-i-met-your-mother-transcripts/\ndef calc_tfidf(accumTexts):\n tfidf_matrix = tf.fit_transform(accumTexts)\n feature_names = tf.get_feature_names() \n print(len(feature_names))\n dense = tfidf_matrix.todense()\n for weekIndex in range(0,len(accumTexts)):\n print('values for week' + str(weekIndex))\n weekSet = dense[weekIndex].tolist()[0]\n phrase_scores = [pair for pair in zip(range(0, len(weekSet)), weekSet) if pair[1] > 0]\n sorted_phrase_scores = sorted(phrase_scores, key=lambda t: t[1] * -1)\n for phrase, score in [(feature_names[word_id], score) for (word_id, score) in sorted_phrase_scores][:10]:\n print('{0: <10} {1}'.format(phrase, score))\n\n# main script. Controls program directoin\ndef main():\n rawData = ps.read_csv(inputFile,encoding='latin') \n accumWeekTexts = partitionWeeklyTweets(rawData)\n writeWeeklySet(accumWeekTexts)\n calc_tfidf(accumWeekTexts)\n tf = TfidfVectorizer(analyzer='word', ngram_range=(3,3),min_df = 0,stop_words = 'english',use_idf=False)\n index = 0\n\n\nmain()\n\n\n# end of calcWeekly_tfidf" ]
[ [ "pandas.read_csv", "sklearn.feature_extraction.text.TfidfVectorizer" ] ]