repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
possible_versions
list
jasmainak/ephypype
[ "257603cbb099cef7847a96c8eb141332fb85ebfa" ]
[ "ephypype/compute_inv_problem.py" ]
[ "# Created on Thu Oct 8 17:53:07 2015\n# @author: pasca\n\n\ndef compute_noise_cov(cov_fname, raw):\n \"\"\"\n Compute noise covariance data from a continuous segment of raw data.\n Employ empty room data (collected without the subject) to calculate\n the full noise covariance matrix.\n This is recommended for analyzing ongoing spontaneous activity.\n\n Inputs\n cov_fname : str\n noise covariance file name\n raw : Raw\n the raw data\n\n Output\n cov_fname : str\n noise covariance file name in which is saved the noise covariance\n matrix\n \"\"\"\n\n import os.path as op\n\n from mne import compute_raw_covariance, pick_types, write_cov\n from nipype.utils.filemanip import split_filename as split_f\n from ephypype.preproc import create_reject_dict\n\n print(('***** COMPUTE RAW COV *****' + cov_fname))\n\n if not op.isfile(cov_fname):\n\n data_path, basename, ext = split_f(raw.info['filename'])\n fname = op.join(data_path, '%s-cov.fif' % basename)\n\n reject = create_reject_dict(raw.info)\n\n picks = pick_types(raw.info, meg=True, ref_meg=False, exclude='bads')\n\n noise_cov = compute_raw_covariance(raw, picks=picks, reject=reject)\n\n write_cov(fname, noise_cov)\n\n else:\n print(('*** NOISE cov file %s exists!!!' % cov_fname))\n\n return cov_fname\n\n\ndef read_noise_cov(cov_fname, raw_info):\n \"\"\"\n Read a noise covariance matrix from cov_fname\n\n Inputs\n cov_fname : str\n noise covariance file name\n raw_info : dict\n dictionary containing the information about the raw data\n\n Outputs\n noise_cov : Covariance\n the noise covariance matrix\n \"\"\"\n import os.path as op\n import numpy as np\n import mne\n\n print(('***** READ RAW COV *****' + cov_fname))\n\n if not op.isfile(cov_fname):\n # create an Identity matrix\n picks = mne.pick_types(raw_info, meg=True, ref_meg=False,\n exclude='bads')\n ch_names = [raw_info['ch_names'][i] for i in picks]\n\n C = mne.Covariance(data=np.identity(len(picks)), names=ch_names,\n bads=[], projs=[], nfree=0)\n mne.write_cov(cov_fname, C)\n else:\n print(('*** noise covariance file %s exists!!!' % cov_fname))\n noise_cov = mne.read_cov(cov_fname)\n\n return noise_cov\n\n# TODO remove it!!! not used\n\n\ndef compute_ts_inv_sol(raw, fwd_filename, cov_fname, snr, inv_method, aseg):\n import os.path as op\n import numpy as np\n import mne\n from mne.minimum_norm import make_inverse_operator, apply_inverse_raw\n from nipype.utils.filemanip import split_filename as split_f\n\n print(('***** READ FWD SOL %s *****' % fwd_filename))\n forward = mne.read_forward_solution(fwd_filename)\n\n # Convert to surface orientation for cortically constrained\n # inverse modeling\n if not aseg:\n forward = mne.convert_forward_solution(forward, surf_ori=True)\n\n lambda2 = 1.0 / snr ** 2\n\n # compute inverse operator\n print('***** COMPUTE INV OP *****')\n inverse_operator = make_inverse_operator(raw.info, forward, cov_fname,\n loose=0.2, depth=0.8)\n\n # apply inverse operator to the time windows [t_start, t_stop]s\n # TEST\n t_start = 0 # sec\n t_stop = 3 # sec\n start, stop = raw.time_as_index([t_start, t_stop])\n print(('***** APPLY INV OP ***** [%d %d]sec' % (t_start, t_stop)))\n stc = apply_inverse_raw(raw, inverse_operator, lambda2, inv_method,\n label=None,\n start=start, stop=stop, pick_ori=None)\n\n print('***')\n print(('stc dim ' + str(stc.shape)))\n print('***')\n\n subj_path, basename, ext = split_f(raw.info['filename'])\n data = stc.data\n\n print(('data dim ' + str(data.shape)))\n\n # save results in .npy file that will be the input for spectral node\n print('***** SAVE SOL *****')\n ts_file = op.abspath(basename + '.npy')\n np.save(ts_file, data)\n\n return ts_file\n\n\n'''\n+---------------------+-----------+-----------+-----------+-----------------+--------------+\n| Inverse desired | Forward parameters allowed |\n+=====================+===========+===========+===========+=================+==============+\n| | **loose** | **depth** | **fixed** | **force_fixed** | **surf_ori** |\n+---------------------+-----------+-----------+-----------+-----------------+--------------+\n| | Loose constraint, | 0.2 | 0.8 | False | False | True |\n| | Depth weighted | | | | | |\n+---------------------+-----------+-----------+-----------+-----------------+--------------+\n| | Loose constraint | 0.2 | None | False | False | True |\n+---------------------+-----------+-----------+-----------+-----------------+--------------+\n| | Free orientation, | None | 0.8 | False | False | True |\n| | Depth weighted | | | | | |\n+---------------------+-----------+-----------+-----------+-----------------+--------------+\n| | Free orientation | None | None | False | False | True | False |\n+---------------------+-----------+-----------+-----------+-----------------+--------------+\n| | Fixed constraint, | None | 0.8 | True | False | True |\n| | Depth weighted | | | | | |\n+---------------------+-----------+-----------+-----------+-----------------+--------------+\n| | Fixed constraint | None | None | True | True | True |\n+---------------------+-----------+-----------+-----------+-----------------+--------------+ \n'''\n\n# TODO too long function -> put lines code about labels in a new little function\n\n\ndef compute_ROIs_inv_sol(raw_filename, sbj_id, sbj_dir, fwd_filename,\n cov_fname, is_epoched=False, events_id=[],\n t_min=None, t_max=None, is_evoked=False,\n snr=1.0, inv_method='MNE',\n parc='aparc', aseg=False, aseg_labels=[],\n save_stc=False, is_fixed=False):\n \"\"\"\n Compute the inverse solution on raw/epoched data and return the average\n time series computed in the N_r regions of the source space defined by\n the specified cortical parcellation\n\n Inputs\n raw_filename : str\n filename of the raw/epoched data\n sbj_id : str\n subject name\n sbj_dir : str\n Freesurfer directory\n fwd_filename : str\n filename of the forward operator\n cov_filename : str\n filename of the noise covariance matrix\n is_epoched : bool\n if True and events_id = None the input data are epoch data\n in the format -epo.fif\n if True and events_id is not None, the raw data are epoched\n according to events_id and t_min and t_max values\n events_id: dict\n the dict of events\n t_min, t_max: int\n define the time interval in which to epoch the raw data\n is_evoked: bool\n if True the raw data will be averaged according to the events\n contained in the dict events_id\n inv_method : str\n the inverse method to use; possible choices: MNE, dSPM, sLORETA\n snr : float\n the SNR value used to define the regularization parameter\n parc: str\n the parcellation defining the ROIs atlas in the source space\n aseg: bool\n if True a mixed source space will be created and the sub cortical\n regions defined in aseg_labels will be added to the source space\n aseg_labels: list\n list of substructures we want to include in the mixed source space\n save_stc: bool\n if True the stc will be saved\n\n Outputs\n ts_file : str\n filename of the file where are saved the ROIs time series\n labels_file : str\n filename of the file where are saved the ROIs of the parcellation\n label_names_file : str\n filename of the file where are saved the name of the ROIs of the\n parcellation\n label_coords_file : str\n filename of the file where are saved the coordinates of the\n centroid of the ROIs of the parcellation\n\n \"\"\"\n import os.path as op\n import numpy as np\n import mne\n\n from mne.io import read_raw_fif\n from mne import read_epochs\n from mne.minimum_norm import make_inverse_operator, apply_inverse_raw\n from mne.minimum_norm import apply_inverse_epochs, apply_inverse\n from mne import get_volume_labels_from_src\n\n from nipype.utils.filemanip import split_filename as split_f\n\n from ephypype.preproc import create_reject_dict\n from ephypype.source_space import create_MNI_label_files\n\n try:\n traits.undefined(events_id)\n except NameError:\n events_id = None\n\n print(('\\n*** READ raw filename %s ***\\n' % raw_filename))\n if is_epoched and events_id is None:\n epochs = read_epochs(raw_filename)\n info = epochs.info\n else:\n raw = read_raw_fif(raw_filename, preload=True)\n# raw.set_eeg_reference()\n info = raw.info\n\n subj_path, basename, ext = split_f(raw_filename)\n\n print(('\\n*** READ noise covariance %s ***\\n' % cov_fname))\n noise_cov = mne.read_cov(cov_fname)\n\n print(('\\n*** READ FWD SOL %s ***\\n' % fwd_filename))\n forward = mne.read_forward_solution(fwd_filename)\n\n if not aseg:\n print(('\\n*** fixed orientation {} ***\\n'.format(is_fixed)))\n forward = mne.convert_forward_solution(forward, surf_ori=True,\n force_fixed=is_fixed)\n\n lambda2 = 1.0 / snr ** 2\n\n # compute inverse operator\n print('\\n*** COMPUTE INV OP ***\\n')\n if is_fixed:\n loose = None\n depth = None\n pick_ori = None\n elif aseg:\n loose = 1\n depth = None\n pick_ori = None\n else:\n loose = 0.2\n depth = 0.8\n pick_ori = 'normal'\n\n print(('\\n *** loose {} depth {} ***\\n'.format(loose, depth)))\n inverse_operator = make_inverse_operator(info, forward, noise_cov,\n loose=loose, depth=depth,\n fixed=is_fixed)\n\n # apply inverse operator to the time windows [t_start, t_stop]s\n print('\\n*** APPLY INV OP ***\\n')\n if is_epoched and events_id is not None:\n events = mne.find_events(raw)\n picks = mne.pick_types(info, meg=True, eog=True, exclude='bads')\n reject = create_reject_dict(info)\n\n if is_evoked:\n epochs = mne.Epochs(raw, events, events_id, t_min, t_max,\n picks=picks, baseline=(None, 0), reject=reject)\n evoked = [epochs[k].average() for k in events_id]\n snr = 3.0\n lambda2 = 1.0 / snr ** 2\n\n ev_list = list(events_id.items())\n for k in range(len(events_id)):\n stc = apply_inverse(evoked[k], inverse_operator, lambda2,\n inv_method, pick_ori=pick_ori)\n\n print(('\\n*** STC for event %s ***\\n' % ev_list[k][0]))\n stc_file = op.abspath(basename + '_' + ev_list[k][0])\n\n print('***')\n print(('stc dim ' + str(stc.shape)))\n print('***')\n\n if not aseg:\n stc.save(stc_file)\n\n else:\n epochs = mne.Epochs(raw, events, events_id, t_min, t_max,\n picks=picks, baseline=(None, 0), reject=reject)\n stc = apply_inverse_epochs(epochs, inverse_operator, lambda2,\n inv_method, pick_ori=pick_ori)\n\n print('***')\n print(('len stc %d' % len(stc)))\n print('***')\n\n elif is_epoched and events_id is None:\n stc = apply_inverse_epochs(epochs, inverse_operator, lambda2,\n inv_method, pick_ori=pick_ori)\n print('***')\n print(('len stc %d' % len(stc)))\n print('***')\n else:\n stc = apply_inverse_raw(raw, inverse_operator, lambda2, inv_method,\n label=None,\n start=None, stop=None,\n buffer_size=1000,\n pick_ori=pick_ori) # None 'normal'\n\n print('***')\n print(('stc dim ' + str(stc.shape)))\n print('***')\n\n if not isinstance(stc, list):\n stc = [stc]\n\n if save_stc:\n for i in range(len(stc)):\n stc_file = op.abspath(basename + '_stc_' + str(i) + '.npy')\n np.save(stc_file, stc[i].data)\n\n # these coo are in MRI space and we have to convert to MNI space\n labels_cortex = mne.read_labels_from_annot(sbj_id, parc=parc,\n subjects_dir=sbj_dir)\n\n print(('\\n*** %d ***\\n' % len(labels_cortex)))\n\n src = inverse_operator['src']\n\n # allow_empty : bool -> Instead of emitting an error, return all-zero time\n # courses for labels that do not have any vertices in the source estimate\n\n if is_fixed:\n mode = 'mean_flip'\n else:\n mode = 'mean'\n\n label_ts = mne.extract_label_time_course(stc, labels_cortex, src,\n mode=mode,\n allow_empty=True,\n return_generator=False)\n\n # save results in .npy file that will be the input for spectral node\n print('\\n*** SAVE ROI TS ***\\n')\n print((len(label_ts)))\n\n ts_file = op.abspath(basename + '_ROI_ts.npy')\n np.save(ts_file, label_ts)\n\n if aseg:\n print(sbj_id)\n labels_aseg = get_volume_labels_from_src(src, sbj_id, sbj_dir)\n labels = labels_cortex + labels_aseg\n else:\n labels = labels_cortex\n labels_aseg = None\n\n print((labels[0].pos))\n print((len(labels)))\n\n\n# labels_file, label_names_file, label_coords_file = create_label_files(labels)\n labels_file, label_names_file, label_coords_file = \\\n create_MNI_label_files(forward, labels_cortex, labels_aseg,\n sbj_id, sbj_dir)\n\n return ts_file, labels_file, label_names_file, label_coords_file\n" ]
[ [ "numpy.save" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
tanacchi/machine_learning_intro
[ "c3776fb54b499b106d7f84c3acf759e96b87f1f4", "c3776fb54b499b106d7f84c3acf759e96b87f1f4" ]
[ "pytorch/capter_8/frozen_lake_dqn.py", "samples/scipy/sample_01.py" ]
[ "import gym\nimport numpy as np\n\nimport torch\nfrom torch import nn\nfrom torch.autograd import Variable\nfrom torch import optim\nfrom torch.nn import functional as F\n\n\nenv = gym.make('FrozenLake-v0')\n\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.fc1 = nn.Linear(16, 64)\n self.fc2 = nn.Linear(64, 64)\n self.fc3 = nn.Linear(64, 96)\n self.fc4 = nn.Linear(96, 96)\n self.fc5 = nn.Linear(96, 64)\n self.fc6 = nn.Linear(64, 64)\n self.fc7 = nn.Linear(64, 4)\n\n def forward(self, x):\n x = Variable(x)\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = F.relu(self.fc3(x))\n x = F.relu(self.fc4(x))\n x = F.relu(self.fc5(x))\n x = F.relu(self.fc6(x))\n x = self.fc7(x)\n return x\n\nmodel = Net()\n\n\ndef onehot2tensor(state):\n tmp = np.zeros(16)\n tmp[state] = 1\n vector = np.array(tmp, dtype='float32')\n tensor = torch.from_numpy(vector).float()\n return tensor\n\ndef applymodel(tensor):\n output_tensor = model(tensor)\n output_array = output_tensor.data.numpy()\n return output_tensor, output_array\n\ntotal_reward = 0.0\ncriterion = nn.MSELoss()\noptimizer = optim.Adam(model.parameters(), lr=0.01)\nsteps = 10000\n\nfor i_episode in range(steps):\n observation = env.reset()\n episode_reward = 0.0\n total_loss = 0.0\n\n for t in range(100):\n current_state = observation\n optimizer.zero_grad()\n current_tensor = onehot2tensor(current_state)\n current_output_tensor, current_output_array = \\\n applymodel(current_tensor)\n \n if np.random.rand() < 0.1:\n action = env.action_space.sample()\n else:\n action = np.argmax(current_output_array)\n\n observation, reward, done, info = env.step(action)\n observation_tensor = onehot2tensor(observation)\n observation_output_tensor, observation_output_array = \\\n applymodel(observation_tensor)\n\n q = reward + 0.99 * np.max(observation_output_array)\n q_array = np.copy(current_output_array)\n q_array[action] = q\n q_variable = Variable(torch.Tensor(q_array))\n\n loss = criterion(current_output_tensor, q_variable)\n loss.backward()\n optimizer.step()\n total_loss += loss.data.item()\n\n if done:\n episode_reward += reward\n\n total_reward += episode_reward\n\n if (i_episode + 1) % 1000 == 0:\n print(i_episode + 1, total_loss, total_reward)\n\nprint(total_reward / steps)\n\nobservation = env.reset()\n\nfor i_episode in range(100):\n env.render()\n current_state = observation\n current_tensor = onehot2tensor(current_state)\n current_output_tensor, current_output_array = \\\n applymodel(current_tensor)\n action = np.argmax(current_output_array)\n observation, reward, done, info = env.step(action)\n", "# page 8\nimport numpy as np\nfrom scipy import sparse\n\neye = np.eye(4)\nprint(\"NumPy array:\\n{}\".format(eye))\n\n\"\"\"\nNumPy array:\n[[1. 0. 0. 0.]\n [0. 1. 0. 0.]\n [0. 0. 1. 0.]\n [0. 0. 0. 1.]]\n\n\"\"\"\n\n\nsparse_matrix = sparse.csr_matrix(eye)\nprint(\"\\nScipy sparse CSR matrix:\\n{}\".format(sparse_matrix))\n\n\"\"\"\nScipy sparse CSR matrix:\n (0, 0)\t1.0\n (1, 1)\t1.0\n (2, 2)\t1.0\n (3, 3)\t1.0\n\n\"\"\"\n\n\ndata = np.ones(4) # [1. 1. 1. 1.]\nrow_indices = np.arange(4) # [0 1 2 3]\ncol_indices = np.arange(4)\neye_coo = sparse.coo_matrix((data, (row_indices, col_indices)))\nprint(\"COO representation:\\n{}\".format(eye_coo))\n\n\"\"\"\nCOO representation:\n (0, 0)\t1.0\n (1, 1)\t1.0\n (2, 2)\t1.0\n (3, 3)\t1.0\n\n\"\"\"\n" ]
[ [ "torch.Tensor", "torch.from_numpy", "torch.nn.Linear", "numpy.copy", "numpy.argmax", "numpy.max", "numpy.random.rand", "numpy.array", "numpy.zeros", "torch.nn.MSELoss", "torch.autograd.Variable" ], [ "scipy.sparse.coo_matrix", "numpy.arange", "numpy.eye", "scipy.sparse.csr_matrix", "numpy.ones" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
mlaai/mentornet
[ "76d6be2db1be39714dec6db6bb3bcbb77855ce6e", "76d6be2db1be39714dec6db6bb3bcbb77855ce6e" ]
[ "code/cifar_data_provider.py", "code/inception_model.py" ]
[ "# Copyright 2018 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Contains code for loading and preprocessing the CIFAR data.\"\"\"\n\nimport cifar100_dataset\nimport cifar10_dataset\nimport tensorflow as tf\nimport tensorflow.contrib.slim as slim\nfrom tensorflow.contrib.slim.python.slim.data import dataset_data_provider\n\ndatasets_map = {\n 'cifar10': cifar10_dataset,\n 'cifar100': cifar100_dataset,\n}\n\n\ndef provide_resnet_data(dataset_name,\n split_name,\n batch_size,\n dataset_dir=None,\n num_epochs=None):\n \"\"\"Provides batches of CIFAR images for resnet.\n\n Args:\n dataset_name: Eiether 'cifar10' or 'cifar100'.\n split_name: Either 'train' or 'test'.\n batch_size: The number of images in each batch.\n dataset_dir: The directory where the MNIST data can be found.\n num_epochs: The number of times each data source is read. If left as None,\n the data will be cycled through indefinitely.\n\n Returns:\n images: A `Tensor` of size [batch_size, 32, 32, 1]\n one_hot_labels: A `Tensor` of size [batch_size, NUM_CLASSES], where\n each row has a single element set to one and the rest set to zeros.\n num_samples: The number of total samples in the dataset.\n num_classes: The number of total classes in the dataset.\n\n\n Raises:\n ValueError: If `split_name` is not either 'train' or 'test'.\n \"\"\"\n dataset = get_dataset(dataset_name, split_name, dataset_dir=dataset_dir)\n\n # num_epochs = 1 if split_name == 'test' else None\n provider = dataset_data_provider.DatasetDataProvider(\n dataset,\n common_queue_capacity=2 * batch_size,\n common_queue_min=batch_size,\n shuffle=(split_name == 'train'),\n num_epochs=num_epochs)\n if dataset_name == 'cifar100':\n [image, label] = provider.get(['image', 'fine_label'])\n else:\n [image, label] = provider.get(['image', 'label'])\n\n image = tf.to_float(image)\n\n image_size = 32\n if split_name == 'train':\n image = tf.image.resize_image_with_crop_or_pad(image, image_size + 4,\n image_size + 4)\n image = tf.random_crop(image, [image_size, image_size, 3])\n image = tf.image.random_flip_left_right(image)\n image = tf.image.per_image_standardization(image)\n else:\n image = tf.image.resize_image_with_crop_or_pad(image, image_size,\n image_size)\n image = tf.image.per_image_standardization(image)\n\n # Creates a QueueRunner for the pre-fetching operation.\n images, labels = tf.train.batch(\n [image, label],\n batch_size=batch_size,\n num_threads=1,\n capacity=5 * batch_size,\n allow_smaller_final_batch=True)\n\n one_hot_labels = slim.one_hot_encoding(labels, dataset.num_classes)\n one_hot_labels = tf.squeeze(one_hot_labels, 1)\n return images, one_hot_labels, dataset.num_samples, dataset.num_classes\n\n\ndef provide_cifarnet_data(dataset_name,\n split_name,\n batch_size,\n dataset_dir=None,\n num_epochs=None):\n \"\"\"Provides batches of CIFAR images for cifarnet.\n\n Args:\n dataset_name: Eiether 'cifar10' or 'cifar100'.\n split_name: Either 'train' or 'test'.\n batch_size: The number of images in each batch.\n dataset_dir: The directory where the MNIST data can be found.\n num_epochs: The number of times each data source is read. If left as None,\n the data will be cycled through indefinitely.\n\n Returns:\n images: A `Tensor` of size [batch_size, 32, 32, 1]\n one_hot_labels: A `Tensor` of size [batch_size, NUM_CLASSES], where\n each row has a single element set to one and the rest set to zeros.\n num_samples: The number of total samples in the dataset.\n num_classes: The number of total classes in the dataset.\n\n Raises:\n ValueError: If `split_name` is not either 'train' or 'test'.\n \"\"\"\n dataset = get_dataset(dataset_name, split_name, dataset_dir=dataset_dir)\n # num_epochs = 1 if split_name == 'test' else None\n provider = dataset_data_provider.DatasetDataProvider(\n dataset,\n common_queue_capacity=2 * batch_size,\n common_queue_min=batch_size,\n shuffle=(split_name == 'train'),\n num_epochs=num_epochs)\n\n if dataset_name == 'cifar100':\n [image, label] = provider.get(['image', 'fine_label'])\n else:\n [image, label] = provider.get(['image', 'label'])\n\n image_size = 32\n image = tf.to_float(image)\n\n # preprocess the images.\n if split_name == 'train':\n padding = image_size / 4\n image = tf.pad(image, [[padding, padding], [padding, padding], [0, 0]])\n image = tf.random_crop(image, [image_size, image_size, 3])\n image = tf.image.random_flip_left_right(image)\n image = tf.image.per_image_standardization(image)\n else:\n image = tf.image.resize_image_with_crop_or_pad(image, image_size,\n image_size)\n image = tf.image.per_image_standardization(image)\n\n # Creates a QueueRunner for the pre-fetching operation.\n images, labels = tf.train.batch(\n [image, label],\n batch_size=batch_size,\n num_threads=1,\n capacity=5 * batch_size,\n allow_smaller_final_batch=True)\n\n one_hot_labels = slim.one_hot_encoding(labels, dataset.num_classes)\n one_hot_labels = tf.squeeze(one_hot_labels, 1)\n return images, one_hot_labels, dataset.num_samples, dataset.num_classes\n\n\ndef get_dataset(name, split_name, **kwargs):\n \"\"\"Given a dataset name and a split_name returns a Dataset.\n\n Args:\n name: String, name of the dataset.\n split_name: A train/test split name.\n **kwargs: Extra kwargs for get_split, for example dataset_dir.\n\n Returns:\n A `Dataset` namedtuple.\n\n Raises:\n ValueError: if dataset unknown.\n \"\"\"\n if name not in datasets_map:\n raise ValueError('Name of dataset unknown %s' % name)\n dataset = datasets_map[name].get_split(split_name, **kwargs)\n dataset.name = name\n return dataset\n\n", "# Copyright 2018 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Contains a variant of the CIFAR-10 model definition.\"\"\"\n\n\nimport tensorflow as tf\nimport tensorflow.contrib.slim as slim\n\ntrunc_normal = lambda stddev: tf.truncated_normal_initializer(stddev=stddev)\n\n\ndef cifarnet(images, num_classes=10, is_training=False,\n dropout_keep_prob=0.5,\n prediction_fn=slim.softmax,\n scope='CifarNet'):\n \"\"\"Creates a variant of the CifarNet model.\n\n Note that since the output is a set of 'logits', the values fall in the\n interval of (-infinity, infinity). Consequently, to convert the outputs to a\n probability distribution over the characters, one will need to convert them\n using the softmax function:\n\n logits = cifarnet.cifarnet(images, is_training=False)\n probabilities = tf.nn.softmax(logits)\n predictions = tf.argmax(logits, 1)\n\n Args:\n images: A batch of `Tensors` of size [batch_size, height, width, channels].\n num_classes: the number of classes in the dataset.\n is_training: specifies whether or not we're currently training the model.\n This variable will determine the behaviour of the dropout layer.\n dropout_keep_prob: the percentage of activation values that are retained.\n prediction_fn: a function to get predictions out of logits.\n scope: Optional variable_scope.\n\n Returns:\n logits: the pre-softmax activations, a tensor of size\n [batch_size, `num_classes`]\n end_points: a dictionary from components of the network to the corresponding\n activation.\n \"\"\"\n end_points = {}\n\n# Turn on if the batch norm is used.\n# batch_norm_params = {\n# # Decay for the moving averages.\n# 'decay': 0.9997,\n# # epsilon to prevent 0s in variance.\n# 'epsilon': 0.001,\n# # collection containing the moving mean and moving variance.\n# #'moving_vars': 'moving_vars',\n# }\n\n# with slim.arg_scope([slim.conv2d, slim.fully_connected],\n# normalizer_params=batch_norm_params,\n# normalizer_fn=slim.batch_norm):\n with tf.variable_scope(scope, 'CifarNet', [images, num_classes]):\n net = slim.conv2d(images, 64, [5, 5], scope='conv1')\n end_points['conv1'] = net\n net = slim.max_pool2d(net, [2, 2], 2, scope='pool1')\n end_points['pool1'] = net\n net = tf.nn.lrn(net, 4, bias=1.0, alpha=0.001/9.0, beta=0.75, name='norm1')\n net = slim.conv2d(net, 64, [5, 5], scope='conv2')\n end_points['conv2'] = net\n net = tf.nn.lrn(net, 4, bias=1.0, alpha=0.001/9.0, beta=0.75, name='norm2')\n net = slim.max_pool2d(net, [2, 2], 2, scope='pool2')\n end_points['pool2'] = net\n net = slim.flatten(net)\n end_points['Flatten'] = net\n net = slim.fully_connected(net, 384, scope='fc3')\n end_points['fc3'] = net\n net = slim.dropout(net, dropout_keep_prob, is_training=is_training,\n scope='dropout3')\n net = slim.fully_connected(net, 192, scope='fc4')\n end_points['fc4'] = net\n logits = slim.fully_connected(net, num_classes,\n biases_initializer=tf.zeros_initializer(),\n weights_initializer=trunc_normal(1/192.0),\n weights_regularizer=None,\n activation_fn=None,\n scope='logits')\n\n end_points['Logits'] = logits\n end_points['Predictions'] = prediction_fn(logits, scope='Predictions')\n\n return logits, end_points\n\ncifarnet.default_image_size = 32\n\n\ndef cifarnet_arg_scope(weight_decay=0.004):\n \"\"\"Defines the default cifarnet argument scope.\n\n Args:\n weight_decay: The weight decay to use for regularizing the model.\n\n Returns:\n An `arg_scope` to use for the inception v3 model.\n \"\"\"\n with slim.arg_scope(\n [slim.conv2d],\n weights_initializer=tf.truncated_normal_initializer(stddev=5e-2),\n activation_fn=tf.nn.relu):\n with slim.arg_scope(\n [slim.fully_connected],\n biases_initializer=tf.constant_initializer(0.1),\n weights_initializer=trunc_normal(0.04),\n weights_regularizer=slim.l2_regularizer(weight_decay),\n activation_fn=tf.nn.relu) as sc:\n return sc\n" ]
[ [ "tensorflow.image.resize_image_with_crop_or_pad", "tensorflow.image.random_flip_left_right", "tensorflow.contrib.slim.one_hot_encoding", "tensorflow.squeeze", "tensorflow.random_crop", "tensorflow.image.per_image_standardization", "tensorflow.to_float", "tensorflow.pad", "tensorflow.contrib.slim.python.slim.data.dataset_data_provider.DatasetDataProvider", "tensorflow.train.batch" ], [ "tensorflow.contrib.slim.dropout", "tensorflow.contrib.slim.max_pool2d", "tensorflow.zeros_initializer", "tensorflow.contrib.slim.l2_regularizer", "tensorflow.truncated_normal_initializer", "tensorflow.constant_initializer", "tensorflow.contrib.slim.fully_connected", "tensorflow.nn.lrn", "tensorflow.contrib.slim.flatten", "tensorflow.contrib.slim.conv2d", "tensorflow.variable_scope" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.13", "1.10", "1.12" ] } ]
tdml13/NiftyNet
[ "b35fa19ca307e81d229e2fe8269a417724833da2", "b35fa19ca307e81d229e2fe8269a417724833da2", "b35fa19ca307e81d229e2fe8269a417724833da2", "b35fa19ca307e81d229e2fe8269a417724833da2", "b35fa19ca307e81d229e2fe8269a417724833da2", "b35fa19ca307e81d229e2fe8269a417724833da2", "b35fa19ca307e81d229e2fe8269a417724833da2", "b35fa19ca307e81d229e2fe8269a417724833da2", "b35fa19ca307e81d229e2fe8269a417724833da2", "b35fa19ca307e81d229e2fe8269a417724833da2", "b35fa19ca307e81d229e2fe8269a417724833da2" ]
[ "tests/handler_early_stopping_test.py", "niftynet/contrib/csv_reader/classification_application.py", "niftynet/contrib/csv_reader/applications_maybe/label_driven_registration.py", "niftynet/evaluation/segmentation_evaluations.py", "niftynet/application/regression_application.py", "tests/scalenet_test.py", "niftynet/contrib/segmentation_bf_aug/segmentation_application_bfaug.py", "niftynet/io/image_reader.py", "niftynet/engine/handler_sampler.py", "niftynet/contrib/csv_reader/sampler_uniform_v2_csv.py", "niftynet/network/scalenet.py" ]
[ "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, print_function\n\nimport tensorflow as tf\nimport numpy as np\n\nfrom niftynet.engine.handler_early_stopping import check_should_stop\nfrom tests.niftynet_testcase import NiftyNetTestCase\n\nclass EarlyStopperTest(NiftyNetTestCase):\n\n def test_mean(self):\n should_stop = check_should_stop(mode='mean',\n performance_history=[1, 2, 1, 2, 1,\n 2, 1, 2, 3])\n self.assertTrue(should_stop)\n should_stop = check_should_stop(mode='mean',\n performance_history=[1, 2, 1, 2, 1, 2,\n 1, 2, 3, 0])\n self.assertFalse(should_stop)\n\n def test_robust_mean(self):\n should_stop = check_should_stop(mode='robust_mean',\n performance_history=[1, 2, 1, 2, 1, 2,\n 1, 200, -10, 1.4])\n self.assertFalse(should_stop)\n should_stop = check_should_stop(mode='robust_mean',\n performance_history=[1, 2, 1, 2, 1, 2,\n 1, 200, -10, 1.5])\n self.assertTrue(should_stop)\n\n def test_median(self):\n should_stop = check_should_stop(mode='median',\n performance_history=[1, 2, 1, 2, 1, 2,\n 1, 2, 3])\n self.assertTrue(should_stop)\n should_stop = check_should_stop(mode='median',\n performance_history=[1, 2, 1, 2, 1, 2,\n 1, 2, 3, 0])\n self.assertFalse(should_stop)\n\n def test_generalisation_loss(self):\n should_stop = check_should_stop(mode='generalisation_loss',\n performance_history=[1, 2, 1, 2, 1,\n 2, 1, 2, 3])\n self.assertTrue(should_stop)\n should_stop = check_should_stop(mode='generalisation_loss',\n performance_history=[1, 2, 1, 2, 3,\n 2, 1, 2, 1])\n self.assertFalse(should_stop)\n\n def test_validation_up(self):\n data = []\n for i in range(10):\n data.extend(np.arange(1, 9))\n data.extend(np.arange(2, 10)[::-1])\n should_stop = check_should_stop(mode='validation_up',\n performance_history=\n np.arange(0, 20) / 10.0)\n print(\"1 val\")\n self.assertTrue(should_stop)\n should_stop = check_should_stop(mode='validation_up',\n performance_history=np.arange(\n 0, 20)[::-1] / 10)\n print(\"2 val\")\n self.assertFalse(should_stop)\n\n should_stop = check_should_stop(mode='validation_up',\n performance_history=data,\n min_delta=0.2)\n print(\"3 val\")\n self.assertFalse(should_stop)\n\n def test_median_smoothing(self):\n data = []\n for i in range(10):\n data.extend(np.arange(0, 8))\n data.extend(np.arange(1, 9)[::-1])\n should_stop = \\\n check_should_stop(mode='median_smoothing',\n performance_history=np.arange(0, 20) / 10.0)\n self.assertTrue(should_stop)\n should_stop = check_should_stop(mode='median_smoothing',\n performance_history=np.arange(\n 0, 20)[::-1] / 10)\n self.assertFalse(should_stop)\n\n should_stop = check_should_stop(mode='median_smoothing',\n performance_history=data)\n self.assertFalse(should_stop)\n\n def test_weird_mode(self):\n with self.assertRaises(Exception):\n check_should_stop(mode='adslhfjdkas',\n performance_history=[1, 2, 3, 4, 5, 6, 7, 8, 9])\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n", "# -*- coding: utf-8 -*-\n\"\"\"\nThis module defines an image-level classification application\nthat maps from images to scalar, multi-class labels.\n\nThis class is instantiated and initalized by the application_driver.\n\"\"\"\n\nimport os\n\nimport tensorflow as tf\n\nfrom niftynet.application.base_application import BaseApplication\nfrom niftynet.engine.application_factory import \\\n ApplicationNetFactory, InitializerFactory, OptimiserFactory\nfrom niftynet.engine.application_variables import \\\n CONSOLE, NETWORK_OUTPUT, TF_SUMMARIES\nfrom niftynet.contrib.csv_reader.sampler_resize_v2_csv import ResizeSamplerCSV as ResizeSampler\n# from niftynet.engine.windows_aggregator_classifier import \\\n# ClassifierSamplesAggregator\nfrom niftynet.io.image_reader import ImageReader\nfrom niftynet.contrib.csv_reader.csv_reader import CSVReader\nfrom niftynet.layer.discrete_label_normalisation import \\\n DiscreteLabelNormalisationLayer\nfrom niftynet.layer.histogram_normalisation import \\\n HistogramNormalisationLayer\nfrom niftynet.layer.binary_masking import BinaryMaskingLayer\nfrom niftynet.layer.post_processing import PostProcessingLayer\nfrom niftynet.layer.loss_classification import LossFunction\nfrom niftynet.layer.mean_variance_normalisation import \\\n MeanVarNormalisationLayer\nfrom niftynet.layer.rand_flip import RandomFlipLayer\nfrom niftynet.layer.rand_rotation import RandomRotationLayer\nfrom niftynet.layer.rand_spatial_scaling import RandomSpatialScalingLayer\nfrom niftynet.evaluation.classification_evaluator import ClassificationEvaluator\n\nSUPPORTED_INPUT = set(['image', 'label', 'sampler', 'inferred'])\n\n\nclass ClassificationApplication(BaseApplication):\n \"\"\"This class defines an application for image-level classification\n problems mapping from images to scalar labels.\n\n This is the application class to be instantiated by the driver\n and referred to in configuration files.\n\n Although structurally similar to segmentation, this application\n supports different samplers/aggregators (because patch-based\n processing is not appropriate), and monitoring metrics.\"\"\"\n\n REQUIRED_CONFIG_SECTION = \"CLASSIFICATION\"\n\n def __init__(self, net_param, action_param, action):\n super(ClassificationApplication, self).__init__()\n tf.logging.info('starting classification application')\n self.action = action\n\n self.net_param = net_param\n self.action_param = action_param\n\n self.data_param = None\n self.classification_param = None\n self.SUPPORTED_SAMPLING = {\n 'resize': (self.initialise_resize_sampler,\n self.initialise_resize_sampler),\n }\n\n def initialise_dataset_loader(\n self, data_param=None, task_param=None, data_partitioner=None):\n\n self.data_param = data_param\n self.classification_param = task_param\n\n if self.is_training:\n image_reader_names = ('image', 'sampler')\n csv_reader_names = ('label',)\n elif self.is_inference:\n image_reader_names = ('image',)\n elif self.is_evaluation:\n image_reader_names = ('image', 'inferred')\n csv_reader_names = ('label',)\n else:\n tf.logging.fatal(\n 'Action `%s` not supported. Expected one of %s',\n self.action, self.SUPPORTED_PHASES)\n raise ValueError\n try:\n reader_phase = self.action_param.dataset_to_infer\n except AttributeError:\n reader_phase = None\n file_lists = data_partitioner.get_file_lists_by(\n phase=reader_phase, action=self.action)\n self.readers = [\n ImageReader(image_reader_names).initialise(\n data_param, task_param, file_list) for file_list in file_lists]\n self.csv_readers = [\n CSVReader(csv_reader_names).initialise(\n data_param, task_param, file_list) for file_list in file_lists]\n\n foreground_masking_layer = BinaryMaskingLayer(\n type_str=self.net_param.foreground_type,\n multimod_fusion=self.net_param.multimod_foreground_type,\n threshold=0.0) \\\n if self.net_param.normalise_foreground_only else None\n\n mean_var_normaliser = MeanVarNormalisationLayer(\n image_name='image', binary_masking_func=foreground_masking_layer) \\\n if self.net_param.whitening else None\n histogram_normaliser = HistogramNormalisationLayer(\n image_name='image',\n modalities=vars(task_param).get('image'),\n model_filename=self.net_param.histogram_ref_file,\n binary_masking_func=foreground_masking_layer,\n norm_type=self.net_param.norm_type,\n cutoff=self.net_param.cutoff,\n name='hist_norm_layer') \\\n if (self.net_param.histogram_ref_file and\n self.net_param.normalisation) else None\n\n label_normaliser = DiscreteLabelNormalisationLayer(\n image_name='label',\n modalities=vars(task_param).get('label'),\n model_filename=self.net_param.histogram_ref_file) \\\n if (self.net_param.histogram_ref_file and\n task_param.label_normalisation) else None\n\n normalisation_layers = []\n if histogram_normaliser is not None:\n normalisation_layers.append(histogram_normaliser)\n if mean_var_normaliser is not None:\n normalisation_layers.append(mean_var_normaliser)\n if label_normaliser is not None:\n normalisation_layers.append(label_normaliser)\n\n augmentation_layers = []\n if self.is_training:\n train_param = self.action_param\n if train_param.random_flipping_axes != -1:\n augmentation_layers.append(RandomFlipLayer(\n flip_axes=train_param.random_flipping_axes))\n if train_param.scaling_percentage:\n augmentation_layers.append(RandomSpatialScalingLayer(\n min_percentage=train_param.scaling_percentage[0],\n max_percentage=train_param.scaling_percentage[1]))\n if train_param.rotation_angle or \\\n self.action_param.rotation_angle_x or \\\n self.action_param.rotation_angle_y or \\\n self.action_param.rotation_angle_z:\n rotation_layer = RandomRotationLayer()\n if train_param.rotation_angle:\n rotation_layer.init_uniform_angle(\n train_param.rotation_angle)\n else:\n rotation_layer.init_non_uniform_angle(\n self.action_param.rotation_angle_x,\n self.action_param.rotation_angle_y,\n self.action_param.rotation_angle_z)\n augmentation_layers.append(rotation_layer)\n\n # only add augmentation to first reader (not validation reader)\n self.readers[0].add_preprocessing_layers(\n normalisation_layers + augmentation_layers)\n\n for reader in self.readers[1:]:\n reader.add_preprocessing_layers(normalisation_layers)\n\n def initialise_resize_sampler(self):\n self.sampler = [[ResizeSampler(\n reader=image_reader,\n csv_reader=csv_reader,\n window_sizes=self.data_param,\n batch_size=self.net_param.batch_size,\n shuffle=self.is_training,\n queue_length=self.net_param.queue_length) for image_reader, csv_reader in\n zip(self.readers, self.csv_readers)]]\n\n def initialise_aggregator(self):\n self.output_decoder = ClassifierSamplesAggregator(\n image_reader=self.readers[0],\n output_path=self.action_param.save_seg_dir,\n postfix=self.action_param.output_postfix)\n\n def initialise_sampler(self):\n if self.is_training:\n self.SUPPORTED_SAMPLING[self.net_param.window_sampling][0]()\n else:\n self.SUPPORTED_SAMPLING[self.net_param.window_sampling][1]()\n\n def initialise_network(self):\n w_regularizer = None\n b_regularizer = None\n reg_type = self.net_param.reg_type.lower()\n decay = self.net_param.decay\n if reg_type == 'l2' and decay > 0:\n from tensorflow.contrib.layers.python.layers import regularizers\n w_regularizer = regularizers.l2_regularizer(decay)\n b_regularizer = regularizers.l2_regularizer(decay)\n elif reg_type == 'l1' and decay > 0:\n from tensorflow.contrib.layers.python.layers import regularizers\n w_regularizer = regularizers.l1_regularizer(decay)\n b_regularizer = regularizers.l1_regularizer(decay)\n\n self.net = ApplicationNetFactory.create(self.net_param.name)(\n num_classes=self.classification_param.num_classes,\n w_initializer=InitializerFactory.get_initializer(\n name=self.net_param.weight_initializer),\n b_initializer=InitializerFactory.get_initializer(\n name=self.net_param.bias_initializer),\n w_regularizer=w_regularizer,\n b_regularizer=b_regularizer,\n acti_func=self.net_param.activation_function)\n\n def add_confusion_matrix_summaries_(self,\n outputs_collector,\n net_out,\n data_dict):\n \"\"\" This method defines several monitoring metrics that\n are derived from the confusion matrix \"\"\"\n labels = tf.reshape(tf.cast(data_dict['label'], tf.int64), [-1])\n prediction = tf.reshape(tf.argmax(net_out, -1), [-1])\n num_classes = self.classification_param.num_classes\n conf_mat = tf.confusion_matrix(labels, prediction, num_classes)\n conf_mat = tf.to_float(conf_mat)\n if self.classification_param.num_classes == 2:\n outputs_collector.add_to_collection(\n var=conf_mat[1][1], name='true_positives',\n average_over_devices=True, summary_type='scalar',\n collection=TF_SUMMARIES)\n outputs_collector.add_to_collection(\n var=conf_mat[1][0], name='false_negatives',\n average_over_devices=True, summary_type='scalar',\n collection=TF_SUMMARIES)\n outputs_collector.add_to_collection(\n var=conf_mat[0][1], name='false_positives',\n average_over_devices=True, summary_type='scalar',\n collection=TF_SUMMARIES)\n outputs_collector.add_to_collection(\n var=conf_mat[0][0], name='true_negatives',\n average_over_devices=True, summary_type='scalar',\n collection=TF_SUMMARIES)\n else:\n outputs_collector.add_to_collection(\n var=conf_mat[tf.newaxis, :, :, tf.newaxis],\n name='confusion_matrix',\n average_over_devices=True, summary_type='image',\n collection=TF_SUMMARIES)\n\n outputs_collector.add_to_collection(\n var=tf.trace(conf_mat), name='accuracy',\n average_over_devices=True, summary_type='scalar',\n collection=TF_SUMMARIES)\n\n\n def connect_data_and_network(self,\n outputs_collector=None,\n gradients_collector=None):\n\n def switch_sampler(for_training):\n with tf.name_scope('train' if for_training else 'validation'):\n sampler = self.get_sampler()[0][0 if for_training else -1]\n return sampler.pop_batch_op()\n\n if self.is_training:\n if self.action_param.validation_every_n > 0:\n data_dict = tf.cond(tf.logical_not(self.is_validation),\n lambda: switch_sampler(for_training=True),\n lambda: switch_sampler(for_training=False))\n else:\n data_dict = switch_sampler(for_training=True)\n\n image = tf.cast(data_dict['image'], tf.float32)\n net_args = {'is_training': self.is_training,\n 'keep_prob': self.net_param.keep_prob}\n net_out = self.net(image, **net_args)\n\n with tf.name_scope('Optimiser'):\n optimiser_class = OptimiserFactory.create(\n name=self.action_param.optimiser)\n self.optimiser = optimiser_class.get_instance(\n learning_rate=self.action_param.lr)\n loss_func = LossFunction(\n n_class=self.classification_param.num_classes,\n loss_type=self.action_param.loss_type)\n data_loss = loss_func(\n prediction=net_out,\n ground_truth=data_dict.get('label', None))\n reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)\n if self.net_param.decay > 0.0 and reg_losses:\n reg_loss = tf.reduce_mean(\n [tf.reduce_mean(reg_loss) for reg_loss in reg_losses])\n loss = data_loss + reg_loss\n else:\n loss = data_loss\n grads = self.optimiser.compute_gradients(\n loss, colocate_gradients_with_ops=True)\n # collecting gradients variables\n gradients_collector.add_to_collection([grads])\n # collecting output variables\n outputs_collector.add_to_collection(\n var=data_loss, name='data_loss',\n average_over_devices=False, collection=CONSOLE)\n outputs_collector.add_to_collection(\n var=data_loss, name='data_loss',\n average_over_devices=True, summary_type='scalar',\n collection=TF_SUMMARIES)\n self.add_confusion_matrix_summaries_(outputs_collector,\n net_out,\n data_dict)\n else:\n # converting logits into final output for\n # classification probabilities or argmax classification labels\n data_dict = switch_sampler(for_training=False)\n image = tf.cast(data_dict['image'], tf.float32)\n net_args = {'is_training': self.is_training,\n 'keep_prob': self.net_param.keep_prob}\n net_out = self.net(image, **net_args)\n tf.logging.info(\n 'net_out.shape may need to be resized: %s', net_out.shape)\n output_prob = self.classification_param.output_prob\n num_classes = self.classification_param.num_classes\n if output_prob and num_classes > 1:\n post_process_layer = PostProcessingLayer(\n 'SOFTMAX', num_classes=num_classes)\n elif not output_prob and num_classes > 1:\n post_process_layer = PostProcessingLayer(\n 'ARGMAX', num_classes=num_classes)\n else:\n post_process_layer = PostProcessingLayer(\n 'IDENTITY', num_classes=num_classes)\n net_out = post_process_layer(net_out)\n\n outputs_collector.add_to_collection(\n var=net_out, name='window',\n average_over_devices=False, collection=NETWORK_OUTPUT)\n outputs_collector.add_to_collection(\n var=data_dict['image_location'], name='location',\n average_over_devices=False, collection=NETWORK_OUTPUT)\n self.initialise_aggregator()\n\n def interpret_output(self, batch_output):\n if not self.is_training:\n return self.output_decoder.decode_batch(\n batch_output['window'], batch_output['location'])\n return True\n\n def initialise_evaluator(self, eval_param):\n self.eval_param = eval_param\n self.evaluator = ClassificationEvaluator(self.readers[0],\n self.classification_param,\n eval_param)\n\n def add_inferred_output(self, data_param, task_param):\n return self.add_inferred_output_like(data_param, task_param, 'label')\n", "\"\"\"\nA preliminary re-implementation of:\n Hu et al., Weakly-Supervised Convolutional Neural Networks for\n Multimodal Image Registration, Medical Image Analysis (2018)\n https://doi.org/10.1016/j.media.2018.07.002\n\nThe original implementation and tutorial is available at:\n https://github.com/YipengHu/label-reg\n\"\"\"\n\nfrom __future__ import absolute_import, division, print_function\n\nimport tensorflow as tf\n\nfrom niftynet.application.base_application import BaseApplication\nfrom niftynet.io.image_reader import ImageReader\nfrom niftynet.contrib.sampler_pairwise.sampler_pairwise_uniform import \\\n PairwiseUniformSampler\nfrom niftynet.contrib.sampler_pairwise.sampler_pairwise_resize import \\\n PairwiseResizeSampler\nfrom niftynet.contrib.csv_reader.csv_reader import CSVReader\nfrom niftynet.engine.application_factory import \\\n OptimiserFactory, ApplicationNetFactory\nfrom niftynet.engine.application_variables import \\\n NETWORK_OUTPUT, CONSOLE, TF_SUMMARIES\nfrom niftynet.engine.windows_aggregator_resize import ResizeSamplesAggregator\n\nfrom niftynet.layer.resampler import ResamplerLayer\nfrom niftynet.layer.pad import PadLayer\nfrom niftynet.layer.loss_segmentation import LossFunction\n\n\nSUPPORTED_INPUT = {'moving_image', 'moving_label',\n 'fixed_image', 'fixed_label'}\n\n\nclass RegApp(BaseApplication):\n\n REQUIRED_CONFIG_SECTION = \"REGISTRATION\"\n\n def __init__(self, net_param, action_param, action):\n BaseApplication.__init__(self)\n tf.logging.info('starting label-driven registration')\n self.action = action\n\n self.net_param = net_param\n self.action_param = action_param\n\n self.registration_param = None\n self.data_param = None\n\n def initialise_dataset_loader(\n self, data_param=None, task_param=None, data_partitioner=None):\n self.data_param = data_param\n self.registration_param = task_param\n\n if self.is_evaluation:\n NotImplementedError('Evaluation is not yet '\n 'supported in this application.')\n try:\n reader_phase = self.action_param.dataset_to_infer\n except AttributeError:\n reader_phase = None\n file_lists = data_partitioner.get_file_lists_by(\n phase=reader_phase, action=self.action)\n\n self.readers = []\n for file_list in file_lists:\n fixed_reader = ImageReader({'fixed_image', 'fixed_label'})\n fixed_reader.initialise(data_param, task_param, file_list)\n self.readers.append(fixed_reader)\n\n moving_reader = ImageReader({'moving_image', 'moving_label'})\n moving_reader.initialise(data_param, task_param, file_list)\n self.readers.append(moving_reader)\n\n # pad the fixed target only\n # moving image will be resampled to match the targets\n #volume_padding_layer = []\n #if self.net_param.volume_padding_size:\n # volume_padding_layer.append(PadLayer(\n # image_name=('fixed_image', 'fixed_label'),\n # border=self.net_param.volume_padding_size))\n\n #for reader in self.readers:\n # reader.add_preprocessing_layers(volume_padding_layer)\n\n\n def initialise_sampler(self):\n if self.is_training:\n self.sampler = []\n assert len(self.readers) >= 2, 'at least two readers are required'\n training_sampler = PairwiseUniformSampler(\n reader_0=self.readers[0],\n reader_1=self.readers[1],\n data_param=self.data_param,\n batch_size=self.net_param.batch_size)\n self.sampler.append(training_sampler)\n # adding validation readers if possible\n if len(self.readers) >= 4:\n validation_sampler = PairwiseUniformSampler(\n reader_0=self.readers[2],\n reader_1=self.readers[3],\n data_param=self.data_param,\n batch_size=self.net_param.batch_size)\n self.sampler.append(validation_sampler)\n else:\n self.sampler = PairwiseResizeSampler(\n reader_0=self.readers[0],\n reader_1=self.readers[1],\n data_param=self.data_param,\n batch_size=self.net_param.batch_size)\n\n def initialise_network(self):\n decay = self.net_param.decay\n self.net = ApplicationNetFactory.create(self.net_param.name)(decay)\n\n def connect_data_and_network(self,\n outputs_collector=None,\n gradients_collector=None):\n\n def switch_samplers(for_training):\n with tf.name_scope('train' if for_training else 'validation'):\n sampler = self.get_sampler()[0 if for_training else -1]\n return sampler() # returns image only\n\n if self.is_training:\n self.patience = self.action_param.patience\n if self.action_param.validation_every_n > 0:\n sampler_window = \\\n tf.cond(tf.logical_not(self.is_validation),\n lambda: switch_samplers(True),\n lambda: switch_samplers(False))\n else:\n sampler_window = switch_samplers(True)\n\n image_windows, _ = sampler_window\n # image_windows, locations = sampler_window\n\n # decode channels for moving and fixed images\n image_windows_list = [\n tf.expand_dims(img, axis=-1)\n for img in tf.unstack(image_windows, axis=-1)]\n fixed_image, fixed_label, moving_image, moving_label = \\\n image_windows_list\n\n # estimate ddf\n dense_field = self.net(fixed_image, moving_image)\n if isinstance(dense_field, tuple):\n dense_field = dense_field[0]\n\n # transform the moving labels\n resampler = ResamplerLayer(\n interpolation='linear', boundary='replicate')\n resampled_moving_label = resampler(moving_label, dense_field)\n\n # compute label loss (foreground only)\n loss_func = LossFunction(\n n_class=1,\n loss_type=self.action_param.loss_type,\n softmax=False)\n label_loss = loss_func(prediction=resampled_moving_label,\n ground_truth=fixed_label)\n\n dice_fg = 1.0 - label_loss\n # appending regularisation loss\n total_loss = label_loss\n reg_loss = tf.get_collection('bending_energy')\n if reg_loss:\n total_loss = total_loss + \\\n self.net_param.decay * tf.reduce_mean(reg_loss)\n\n self.total_loss = total_loss\n\n # compute training gradients\n with tf.name_scope('Optimiser'):\n optimiser_class = OptimiserFactory.create(\n name=self.action_param.optimiser)\n self.optimiser = optimiser_class.get_instance(\n learning_rate=self.action_param.lr)\n grads = self.optimiser.compute_gradients(\n total_loss, colocate_gradients_with_ops=True)\n gradients_collector.add_to_collection(grads)\n\n metrics_dice = loss_func(\n prediction=tf.to_float(resampled_moving_label >= 0.5),\n ground_truth=tf.to_float(fixed_label >= 0.5))\n metrics_dice = 1.0 - metrics_dice\n\n # command line output\n outputs_collector.add_to_collection(\n var=dice_fg, name='one_minus_data_loss',\n collection=CONSOLE)\n outputs_collector.add_to_collection(\n var=tf.reduce_mean(reg_loss), name='bending_energy',\n collection=CONSOLE)\n outputs_collector.add_to_collection(\n var=total_loss, name='total_loss', collection=CONSOLE)\n outputs_collector.add_to_collection(\n var=metrics_dice, name='ave_fg_dice', collection=CONSOLE)\n\n # for tensorboard\n outputs_collector.add_to_collection(\n var=dice_fg,\n name='data_loss',\n average_over_devices=True,\n summary_type='scalar',\n collection=TF_SUMMARIES)\n outputs_collector.add_to_collection(\n var=total_loss,\n name='total_loss',\n average_over_devices=True,\n summary_type='scalar',\n collection=TF_SUMMARIES)\n outputs_collector.add_to_collection(\n var=metrics_dice,\n name='averaged_foreground_Dice',\n average_over_devices=True,\n summary_type='scalar',\n collection=TF_SUMMARIES)\n\n # for visualisation debugging\n # resampled_moving_image = resampler(moving_image, dense_field)\n # outputs_collector.add_to_collection(\n # var=fixed_image, name='fixed_image',\n # collection=NETWORK_OUTPUT)\n # outputs_collector.add_to_collection(\n # var=fixed_label, name='fixed_label',\n # collection=NETWORK_OUTPUT)\n # outputs_collector.add_to_collection(\n # var=moving_image, name='moving_image',\n # collection=NETWORK_OUTPUT)\n # outputs_collector.add_to_collection(\n # var=moving_label, name='moving_label',\n # collection=NETWORK_OUTPUT)\n # outputs_collector.add_to_collection(\n # var=resampled_moving_image, name='resampled_image',\n # collection=NETWORK_OUTPUT)\n # outputs_collector.add_to_collection(\n # var=resampled_moving_label, name='resampled_label',\n # collection=NETWORK_OUTPUT)\n # outputs_collector.add_to_collection(\n # var=dense_field, name='ddf', collection=NETWORK_OUTPUT)\n # outputs_collector.add_to_collection(\n # var=locations, name='locations', collection=NETWORK_OUTPUT)\n\n # outputs_collector.add_to_collection(\n # var=shift[0], name='a', collection=CONSOLE)\n # outputs_collector.add_to_collection(\n # var=shift[1], name='b', collection=CONSOLE)\n else:\n image_windows, locations = self.sampler()\n image_windows_list = [\n tf.expand_dims(img, axis=-1)\n for img in tf.unstack(image_windows, axis=-1)]\n fixed_image, fixed_label, moving_image, moving_label = \\\n image_windows_list\n\n dense_field = self.net(fixed_image, moving_image)\n if isinstance(dense_field, tuple):\n dense_field = dense_field[0]\n\n # transform the moving labels\n resampler = ResamplerLayer(\n interpolation='linear', boundary='replicate')\n resampled_moving_image = resampler(moving_image, dense_field)\n resampled_moving_label = resampler(moving_label, dense_field)\n\n outputs_collector.add_to_collection(\n var=fixed_image, name='fixed_image',\n collection=NETWORK_OUTPUT)\n outputs_collector.add_to_collection(\n var=moving_image, name='moving_image',\n collection=NETWORK_OUTPUT)\n outputs_collector.add_to_collection(\n var=resampled_moving_image,\n name='resampled_moving_image',\n collection=NETWORK_OUTPUT)\n outputs_collector.add_to_collection(\n var=resampled_moving_label,\n name='resampled_moving_label',\n collection=NETWORK_OUTPUT)\n\n outputs_collector.add_to_collection(\n var=fixed_label, name='fixed_label',\n collection=NETWORK_OUTPUT)\n outputs_collector.add_to_collection(\n var=moving_label, name='moving_label',\n collection=NETWORK_OUTPUT)\n #outputs_collector.add_to_collection(\n # var=dense_field, name='field',\n # collection=NETWORK_OUTPUT)\n outputs_collector.add_to_collection(\n var=locations, name='locations',\n collection=NETWORK_OUTPUT)\n\n self.output_decoder = ResizeSamplesAggregator(\n image_reader=self.readers[0], # fixed image reader\n name='fixed_image',\n output_path=self.action_param.save_seg_dir,\n interp_order=self.action_param.output_interp_order)\n\n def interpret_output(self, batch_output):\n if self.is_training:\n return True\n return self.output_decoder.decode_batch(\n {'window_resampled':batch_output['resampled_moving_image']},\n batch_output['locations'])\n\n", "# -*- coding: utf-8 -*-\n\"\"\"\nThis module defines built-in evaluation functions for segmentation applications\n\nSegmentations can be evaluated at several scales:\n'foreground' refering to metrics computed once for a foreground label\n'label' refering to metrics computed once for each label (including background)\n'cc' referring to metrics computed once for each connected component set\n Connected components are defined by one-or-more connected\n components on the reference segmentation and one-or-more connected\n components on the infered segmentation.\n These sets are defined by a cc_func. Currently\n this is hard coded to be union_of_seg_for_each_ref_cc which takes each\n connected component on the reference segmentation and all connected\n components on the infered segmentation with any overlap. This will\n eventually be made into a factory option for different cc set definitions\n\nOverlap and distance measures can be computed at each of these levels by\nderiving from PerComponentEvaluation, which handles the logic of identifying\nwhich comparisons need to be done for each scale.\n\nOverlap and distance measures are computed in two convenience functions\n(compute_many_overlap_metrics and compute_many_distance_metrics) and wrapped\nby Evaluation classes\n\"\"\"\n\nfrom __future__ import absolute_import, division, print_function\n\nimport numpy as np\nimport pandas as pd\nfrom scipy import ndimage\n\nfrom niftynet.evaluation.base_evaluations import CachedSubanalysisEvaluation\nfrom niftynet.utilities.util_common import MorphologyOps, \\\n CachedFunction, CachedFunctionByID\nfrom niftynet.evaluation.base_evaluator import ScalarAggregator\n\n\nclass PerComponentEvaluation(CachedSubanalysisEvaluation):\n \"\"\"\n This class represents evaluations performed on binary segmentation\n components computed per label or per connected component. It encodes the\n generation of evaluation tasks. Derived classes should define the\n metric_name constant and the function metric_from_binarized()\n \"\"\"\n\n def subanalyses(self, subject_id, data):\n analyses = self.app_param.evaluation_units.split(',')\n tasks = []\n for analysis in analyses:\n if analysis in ['foreground', 'label']:\n labels = list(range(self.app_param.num_classes))\n if analysis == 'foreground':\n labels.remove(0)\n for label in labels:\n tasks.append({'subject_id': subject_id, 'label': label})\n elif analysis in ['cc']:\n cc_seg, cc_ref = \\\n connected_components(data['inferred'], data['label'],\n self.app_param.output_prob)\n cc_func = union_of_seg_for_each_ref_cc # TODO make into factory\n conncomps = cc_func(cc_seg, cc_ref)\n for conncomp in conncomps:\n tasks.append({'subject_id': subject_id,\n 'cc_labels': conncomps[conncomp]})\n # TODO save an index image from blobs_ref[0]\n return tasks\n\n def layer_op(self, subject_id, data, task):\n # We use a cached binarizer function so that the binarized\n # segmentation have the same python id\n if 'label' in task:\n binarizer = cached_label_binarizer(task['label'],\n self.app_param.output_prob)\n seg, ref = binarizer(data)\n metric_dict = {'subject_id': subject_id, 'label': task['label']}\n metric_dict.update(self.metric_dict_from_binarized(seg, ref))\n pdf = pd.DataFrame.from_records([metric_dict], ('subject_id', 'label'))\n return [pdf]\n elif 'cc_labels' in task:\n binarizer = cached_cc_binarizer(task['cc_labels'],\n self.app_param.output_prob)\n seg, ref = binarizer(data)\n r_str = '&'.join([str(l) for l in task['cc_labels'][1]])\n s_str = '&'.join([str(l) for l in task['cc_labels'][0]])\n cc_id = 'r%s_s%s' % (r_str, s_str)\n metric_dict = {'subject_id': subject_id, 'cc_id': cc_id}\n metric_dict.update(self.metric_dict_from_binarized(seg, ref))\n pdf = pd.DataFrame.from_records([metric_dict], ('subject_id', 'cc_id'))\n return [pdf]\n return []\n\n\n def metric_dict_from_binarized(self, seg, ref):\n \"\"\"\n Computes a metric from a binarized mask\n :param seg: numpy array with binary mask from inferred segmentation\n :param ref: numpy array with binary mask from reference segmentation\n :return: a dictionary of metric_name:metric_value\n \"\"\"\n raise NotImplementedError('Not implemented in abstract base class')\n\n\nclass PerComponentScalarEvaluation(PerComponentEvaluation):\n \"\"\" This class simplifies the implementation when the metric just returns a\n single scalar with the same name as the class name\"\"\"\n def __init__(self, *args, **kwargs):\n super(PerComponentScalarEvaluation, self).__init__(*args,\n **kwargs)\n self.metric_name = self.__class__.__name__\n\n def metric_dict_from_binarized(self, seg, ref):\n \"\"\" Wrap computed metric in dictionary for parent class \"\"\"\n metric_value = self.metric_from_binarized(seg, ref)\n return {self.metric_name: metric_value}\n\n def metric_from_binarized(self, seg, ref):\n \"\"\"\n Computer scalar metric value\n :param seg: numpy array with binary mask from inferred segmentation\n :param ref: numpy array with binary mask from reference segmentation\n :return: scalar metric value\n \"\"\"\n\n def get_aggregations(self):\n aggregations = []\n analyses = self.app_param.evaluation_units.split(',')\n for analysis in analyses:\n if analysis in ['foreground', 'label']:\n mean_agg = ScalarAggregator(self.metric_name,\n ('subject_id', 'label'),\n ('label',), np.mean,\n 'mean_' + self.metric_name)\n std_agg = ScalarAggregator(self.metric_name,\n ('subject_id', 'label'),\n ('label',), np.std,\n 'stdev_' + self.metric_name)\n aggregations.extend([mean_agg, std_agg])\n elif analysis in ['cc']:\n pass\n return aggregations\n\nclass BuiltinOverlapEvaluation(PerComponentScalarEvaluation):\n \"\"\"\n Wrapper class to encode many similar overlap metrics that can be computed\n from a confusion matrix\n Metrics computed in compute_many_overlap_metrics can be wrapped by\n overriding self.metric_name\n \"\"\"\n def metric_from_binarized(self, seg, ref):\n \"\"\"\n Computes a metric from a binarized mask by computing a confusion\n matrix and then delegating the metric computation\n :param seg: numpy array with binary mask from inferred segmentation\n :param ref: numpy array with binary mask from reference segmentation\n :return: scalar metric value\n \"\"\"\n lnot = np.logical_not\n land = np.logical_and\n conf_mat = np.array([[np.sum(land(lnot(seg), lnot(ref))),\n np.sum(land(lnot(seg), (ref)))],\n [np.sum(land((seg), lnot(ref))),\n np.sum(land((seg), (ref)))]])\n return self.metric_from_confusion_matrix(conf_mat)\n\n def metric_from_confusion_matrix(self, confusion_matrix):\n \"\"\"\n Compute metrics from a 2x2 confusion matrix\n :param confusion_matrix: 2x2 numpy array\n :return: scalar metric value\n \"\"\"\n\n\n#pylint: disable=missing-docstring,invalid-name\nclass n_pos_ref(BuiltinOverlapEvaluation):\n def metric_from_confusion_matrix(self, M):\n return M[0, 1] + M[1, 1]\n\n\nclass n_neg_ref(BuiltinOverlapEvaluation):\n def metric_from_confusion_matrix(self, M):\n return M[0, 0] + M[1, 0]\n\n\nclass n_pos_seg(BuiltinOverlapEvaluation):\n def metric_from_confusion_matrix(self, M):\n return M[1, 0] + M[1, 1]\n\n\nclass n_neg_seg(BuiltinOverlapEvaluation):\n def metric_from_confusion_matrix(self, M):\n return M[0, 0] + M[0, 1]\n\n\nclass fp(BuiltinOverlapEvaluation):\n def metric_from_confusion_matrix(self, M):\n return M[1, 0]\n\n\nclass fn(BuiltinOverlapEvaluation):\n def metric_from_confusion_matrix(self, M):\n return M[0, 1]\n\n\nclass tp(BuiltinOverlapEvaluation):\n def metric_from_confusion_matrix(self, M):\n return M[1, 1]\n\n\nclass tn(BuiltinOverlapEvaluation):\n def metric_from_confusion_matrix(self, M):\n return M[0, 0]\n\n\nclass n_intersection(BuiltinOverlapEvaluation):\n def metric_from_confusion_matrix(self, M):\n return M[1, 1]\n\n\nclass n_union(BuiltinOverlapEvaluation):\n def metric_from_confusion_matrix(self, M):\n return M[0, 1] + M[1, 0] + M[1, 1]\n\n\nclass specificity(BuiltinOverlapEvaluation):\n def metric_from_confusion_matrix(self, M):\n return M[0, 0] / (M[0, 0] + M[1, 0])\n\n\nclass sensitivity(BuiltinOverlapEvaluation):\n def metric_from_confusion_matrix(self, M):\n return M[1, 1] / (M[0, 1] + M[1, 1])\n\n\nclass accuracy(BuiltinOverlapEvaluation):\n def metric_from_confusion_matrix(self, M):\n return (M[1, 1] + M[0, 0]) / sum(sum(M))\n\n\nclass false_positive_rate(BuiltinOverlapEvaluation):\n def metric_from_confusion_matrix(self, M):\n return M[1, 0] / (M[0, 0] + M[1, 0])\n\n\nclass positive_predictive_values(BuiltinOverlapEvaluation):\n def metric_from_confusion_matrix(self, M):\n return M[1, 1] / (M[1, 0] + M[1, 1])\n\n\nclass negative_predictive_values(BuiltinOverlapEvaluation):\n def metric_from_confusion_matrix(self, M):\n return M[0, 0] / (M[0, 0] + M[0, 1])\n\n\nclass dice(BuiltinOverlapEvaluation):\n def metric_from_confusion_matrix(self, M):\n return 2 * M[1, 1] / (M[1, 1] * 2 + M[1, 0] + M[0, 1])\n\n\nDice = dice\n\n\nclass jaccard(BuiltinOverlapEvaluation):\n def metric_from_confusion_matrix(self, M):\n return M[1, 1] / (M[0, 1] + M[1, 0] + M[1, 1])\n\n\nintersection_over_union = jaccard\nJaccard = jaccard\n\n\nclass informedness(BuiltinOverlapEvaluation):\n def metric_from_confusion_matrix(self, M):\n return M[1, 1] / (M[0, 1] + M[1, 1]) + \\\n M[0, 0] / (M[0, 0] + M[1, 0]) - 1\n\n\nclass markedness(BuiltinOverlapEvaluation):\n def metric_from_confusion_matrix(self, M):\n return M[1, 1] / (M[1, 0] + M[1, 1]) + \\\n M[0, 0] / (M[0, 0] + M[0, 1]) - 1\n\n\nclass vol_diff(BuiltinOverlapEvaluation):\n def metric_from_confusion_matrix(self, M):\n return (M[1, 1] + M[1, 0]) / (M[0, 1] + M[1, 1])\n\n\n# Distance metrics as e.g. in 10.3978/j.issn.2223-4292.2015.08.02\nclass average_distance(PerComponentScalarEvaluation):\n def metric_from_binarized(self, seg, ref):\n ref_border_dist, seg_border_dist = border_distance(seg, ref, 8)\n border_ref, border_seg = borders(seg, ref, 8)\n return (np.sum(ref_border_dist) + np.sum(\n seg_border_dist)) / (np.sum(border_ref + border_seg))\n\n\nclass hausdorff_distance(PerComponentScalarEvaluation):\n def metric_from_binarized(self, seg, ref):\n ref_border_dist, seg_border_dist = border_distance(seg, ref, 8)\n return np.max([np.max(ref_border_dist), np.max(seg_border_dist)])\n\n\nclass hausdorff95_distance(PerComponentScalarEvaluation):\n def metric_from_binarized(self, seg, ref):\n ref_border_dist, seg_border_dist = border_distance(seg, ref, 8)\n border_ref, border_seg = borders(seg, ref, 8)\n seg_values = ref_border_dist[border_seg > 0]\n ref_values = seg_border_dist[border_ref > 0]\n if seg_values.size == 0 or ref_values.size == 0:\n return np.nan\n return np.max([np.percentile(seg_values, 95),\n np.percentile(ref_values, 95)])\n\n\n#pylint: enable=missing-docstring,invalid-name\n# Helper functions\n@CachedFunction\ndef cached_label_binarizer(label, output_prob):\n \"\"\"\n This class returns a function for binarizing an inferred segmentation\n for a specified label.\n This function is carefully designed to allow caching of unhashable numpy\n objects. Specifically, each call to cached_label_binarizer with the same\n (by-value) parameters will return the same (by python id) function\n object. This enables two calls to\n cached_label_binarizer(...)(numpy_object_1)\n with the same parameters from different metrics to use the cached result\n :param label: Which label to make foreground in the binary mask\n :param output_prob: Is the segmentation probabilistic (if so,\n argmax is used first to compute a label map)\n :return: a function for computing a binary label map\n \"\"\"\n @CachedFunctionByID\n def binarizer(data):\n \"\"\"\n This function binarizes a segmentation based on a specified\n label (defined by outer function)\n :param data: a data dictionary as built by ImageReader\n :return: a numpy array representing a binary label map\n \"\"\"\n if output_prob:\n out = np.argmax(data['inferred'], -1)\n else:\n out = data['inferred']\n return out == label, data['label'] == label\n\n return binarizer\n\n\n@CachedFunction\ndef cached_cc_binarizer(cc_labels, output_prob):\n \"\"\"\n This class returns a function for binarizing inferred and reference\n segmentations for a specified connected component set.\n This function is carefully designed to allow caching of unhashable numpy\n objects. Specifically, each call to cached_label_binarizer with the same\n (by-value) parameters will return the same (by python id) function\n object. This enables two calls to\n cached_cc_binarizer(...)(numpy_object_1)\n with the same parameters from different metrics to use the cached result\n :param cc_labels: [seg_label_list, ref_label_list] where each is a\n list of values to be considered foreground for this cc set\n :param output_prob: Is the segmentation probabilistic (if so,\n argmax is used first to compute a label map)\n :return: a function for computing a binary label map pair\n\n \"\"\"\n @CachedFunctionByID\n def binarizer(data):\n \"\"\"\n This function binarizes a multi-object segmentation and reference\n into a specified connected component set (defined by outer function)\n :param data: a data dictionary as built by ImageReader\n :return: two numpy arrays representing binary masks (from\n inferred and reference segmentations) for a connected component set\n \"\"\"\n cc_func = connected_components\n cc_seg, cc_ref = cc_func(data['inferred'], data['label'], output_prob)\n cc_seg_in = np.zeros_like(cc_seg[0])\n cc_ref_in = np.zeros_like(cc_ref[0])\n for i in cc_labels[0]:\n cc_seg_in[cc_seg[0] == i] = 1\n for i in cc_labels[1]:\n cc_ref_in[cc_ref[0] == i] = 1\n return cc_seg_in, cc_ref_in\n\n return binarizer\n\ndef union_of_seg_for_each_ref_cc(blobs_seg, blobs_ref):\n \"\"\"\n Constructs connected component sets to compute metrics for. Each\n reference connected component is paired with the union of inferred\n segmentation connected components with any overlap\n :param blobs_seg: tuple (numbered_cc_array, number_of_ccs)\n :param blobs_ref: tuple (numbered_cc_array, number_of_ccs)\n :return: dictionary {label:(ref_label_list, seg_label_list)}\n \"\"\"\n keys = {}\n for cc_id in range(1, blobs_ref[1] + 1):\n seg_idx = list(np.unique(blobs_seg[0][blobs_ref[0] == cc_id]))\n if 0 in seg_idx:\n seg_idx.remove(0)\n key = 'r' + str(cc_id) + '_c' + '_'.join([str(s) for s in seg_idx])\n keys[key] = ((cc_id,), tuple(seg_idx))\n return keys\n\n\n@CachedFunctionByID\ndef borders(seg, ref, neigh=8):\n \"\"\"\n This function determines the points that lie on the border of the\n inferred and reference segmentations\n :param seg: numpy array with binary mask from inferred segmentation\n :param ref: numpy array with binary mask from reference segmentation\n :param neigh: connectivity 4 or 8\n :return: numpy arrays of reference and inferred segmentation borders\n \"\"\"\n border_ref = MorphologyOps(ref[:, :, :, 0, 0], neigh).border_map()\n border_seg = MorphologyOps(seg[:, :, :, 0, 0], neigh).border_map()\n return border_ref, border_seg\n\n\n@CachedFunctionByID\ndef border_distance(seg, ref, neigh=8):\n \"\"\"\n This functions determines the distance at each seg border point to the\n nearest ref border point and vice versa\n :param seg: numpy array with binary mask from inferred segmentation\n :param ref: numpy array with binary mask from reference segmentation\n :param neigh: connectivity 4 or 8\n :return: numpy arrays for distance_from_ref_border, distance_from\n seg_border\n \"\"\"\n border_ref, border_seg = borders(seg, ref, neigh)\n distance_ref = ndimage.distance_transform_edt(1 - border_ref)\n distance_seg = ndimage.distance_transform_edt(1 - border_seg)\n distance_border_seg = border_ref * distance_seg\n distance_border_ref = border_seg * distance_ref\n return distance_border_ref, distance_border_seg\n\n\n@CachedFunctionByID\ndef connected_components(seg, ref, output_prob, neigh=8):\n \"\"\"\n Numbers connected components in the reference and inferred segmentations\n :param seg: numpy array with binary mask from inferred segmentation\n :param ref: numpy array with binary mask from reference segmentation\n :param output_prob: Is the segmentation probabilistic (if so,\n argmax is used first to compute a label map)\n :param neigh: connectivity 4 or 8\n :return: (cc_map_ref, count) numbered connected components from reference\n :return: (cc_map_seg, count) numbered connected components from inferred\n \"\"\"\n if output_prob:\n seg = np.argmax(seg, -1)\n blobs_ref = MorphologyOps(ref[:, :, :, 0, 0], neigh).foreground_component()\n blobs_seg = MorphologyOps(seg[:, :, :, 0, 0], neigh).foreground_component()\n\n return (blobs_ref[0][:, :, :, np.newaxis, np.newaxis], blobs_ref[1]), \\\n (blobs_seg[0][:, :, :, np.newaxis, np.newaxis], blobs_seg[1]),\n\n\n# TODO\n# per subject connected component related metrics\n# 'connected_elements': (self.connected_elements, 'TPc,FPc,FNc'),\n# 'outline_error': (self.outline_error, 'OER,OEFP,OEFN'),\n# 'detection_error': (self.detection_error, 'DE,DEFP,DEFN'),\n# list_labels\n# list connected components\n# TODO image_map outputs\n", "# -*- coding: utf-8 -*-\nimport tensorflow as tf\n\nfrom niftynet.application.base_application import BaseApplication\nfrom niftynet.engine.application_factory import \\\n ApplicationNetFactory, InitializerFactory, OptimiserFactory\nfrom niftynet.engine.application_variables import \\\n CONSOLE, NETWORK_OUTPUT, TF_SUMMARIES\nfrom niftynet.engine.sampler_grid_v2 import GridSampler\nfrom niftynet.engine.sampler_resize_v2 import ResizeSampler\nfrom niftynet.engine.sampler_uniform_v2 import UniformSampler\nfrom niftynet.engine.sampler_weighted_v2 import WeightedSampler\nfrom niftynet.engine.sampler_balanced_v2 import BalancedSampler\nfrom niftynet.engine.windows_aggregator_grid import GridSamplesAggregator\nfrom niftynet.engine.windows_aggregator_resize import ResizeSamplesAggregator\nfrom niftynet.io.image_reader import ImageReader\nfrom niftynet.layer.crop import CropLayer\nfrom niftynet.layer.histogram_normalisation import \\\n HistogramNormalisationLayer\nfrom niftynet.layer.loss_regression import LossFunction\nfrom niftynet.layer.mean_variance_normalisation import \\\n MeanVarNormalisationLayer\nfrom niftynet.layer.pad import PadLayer\nfrom niftynet.layer.post_processing import PostProcessingLayer\nfrom niftynet.layer.rand_flip import RandomFlipLayer\nfrom niftynet.layer.rand_rotation import RandomRotationLayer\nfrom niftynet.layer.rand_spatial_scaling import RandomSpatialScalingLayer\nfrom niftynet.layer.rgb_histogram_equilisation import \\\n RGBHistogramEquilisationLayer\nfrom niftynet.evaluation.regression_evaluator import RegressionEvaluator\nfrom niftynet.layer.rand_elastic_deform import RandomElasticDeformationLayer\nfrom niftynet.engine.windows_aggregator_identity import WindowAsImageAggregator\n\nSUPPORTED_INPUT = set(['image', 'output', 'weight', 'sampler', 'inferred'])\n\n\nclass RegressionApplication(BaseApplication):\n REQUIRED_CONFIG_SECTION = \"REGRESSION\"\n\n def __init__(self, net_param, action_param, action):\n BaseApplication.__init__(self)\n tf.logging.info('starting regression application')\n self.action = action\n\n self.net_param = net_param\n self.action_param = action_param\n\n self.data_param = None\n self.regression_param = None\n self.SUPPORTED_SAMPLING = {\n 'uniform': (self.initialise_uniform_sampler,\n self.initialise_grid_sampler,\n self.initialise_grid_aggregator),\n 'weighted': (self.initialise_weighted_sampler,\n self.initialise_grid_sampler,\n self.initialise_grid_aggregator),\n 'resize': (self.initialise_resize_sampler,\n self.initialise_resize_sampler,\n self.initialise_resize_aggregator),\n 'balanced': (self.initialise_balanced_sampler,\n self.initialise_grid_sampler,\n self.initialise_grid_aggregator),\n }\n\n def initialise_dataset_loader(\n self, data_param=None, task_param=None, data_partitioner=None):\n\n self.data_param = data_param\n self.regression_param = task_param\n\n # initialise input image readers\n if self.is_training:\n reader_names = ('image', 'output', 'weight', 'sampler')\n elif self.is_inference:\n # in the inference process use `image` input only\n reader_names = ('image',)\n elif self.is_evaluation:\n reader_names = ('image', 'output', 'inferred')\n else:\n tf.logging.fatal(\n 'Action `%s` not supported. Expected one of %s',\n self.action, self.SUPPORTED_PHASES)\n raise ValueError\n try:\n reader_phase = self.action_param.dataset_to_infer\n except AttributeError:\n reader_phase = None\n file_lists = data_partitioner.get_file_lists_by(\n phase=reader_phase, action=self.action)\n self.readers = [\n ImageReader(reader_names).initialise(\n data_param, task_param, file_list) for file_list in file_lists]\n\n # initialise input preprocessing layers\n mean_var_normaliser = MeanVarNormalisationLayer(image_name='image') \\\n if self.net_param.whitening else None\n histogram_normaliser = HistogramNormalisationLayer(\n image_name='image',\n modalities=vars(task_param).get('image'),\n model_filename=self.net_param.histogram_ref_file,\n norm_type=self.net_param.norm_type,\n cutoff=self.net_param.cutoff,\n name='hist_norm_layer') \\\n if (self.net_param.histogram_ref_file and\n self.net_param.normalisation) else None\n rgb_normaliser = RGBHistogramEquilisationLayer(\n image_name='image',\n name='rbg_norm_layer') if self.net_param.rgb_normalisation else None\n\n normalisation_layers = []\n if histogram_normaliser is not None:\n normalisation_layers.append(histogram_normaliser)\n if mean_var_normaliser is not None:\n normalisation_layers.append(mean_var_normaliser)\n if rgb_normaliser is not None:\n normalisation_layers.append(rgb_normaliser)\n\n volume_padding_layer = [PadLayer(\n image_name=SUPPORTED_INPUT,\n border=self.net_param.volume_padding_size,\n mode=self.net_param.volume_padding_mode,\n pad_to=self.net_param.volume_padding_to_size)\n ]\n\n # initialise training data augmentation layers\n augmentation_layers = []\n if self.is_training:\n train_param = self.action_param\n if train_param.random_flipping_axes != -1:\n augmentation_layers.append(RandomFlipLayer(\n flip_axes=train_param.random_flipping_axes))\n if train_param.scaling_percentage:\n augmentation_layers.append(RandomSpatialScalingLayer(\n min_percentage=train_param.scaling_percentage[0],\n max_percentage=train_param.scaling_percentage[1],\n antialiasing=train_param.antialiasing,\n isotropic=train_param.isotropic_scaling))\n if train_param.rotation_angle:\n rotation_layer = RandomRotationLayer()\n if train_param.rotation_angle:\n rotation_layer.init_uniform_angle(\n train_param.rotation_angle)\n augmentation_layers.append(rotation_layer)\n if train_param.do_elastic_deformation:\n spatial_rank = list(self.readers[0].spatial_ranks.values())[0]\n augmentation_layers.append(RandomElasticDeformationLayer(\n spatial_rank=spatial_rank,\n num_controlpoints=train_param.num_ctrl_points,\n std_deformation_sigma=train_param.deformation_sigma,\n proportion_to_augment=train_param.proportion_to_deform))\n\n # only add augmentation to first reader (not validation reader)\n self.readers[0].add_preprocessing_layers(\n volume_padding_layer + normalisation_layers + augmentation_layers)\n\n for reader in self.readers[1:]:\n reader.add_preprocessing_layers(\n volume_padding_layer + normalisation_layers)\n\n def initialise_uniform_sampler(self):\n self.sampler = [[UniformSampler(\n reader=reader,\n window_sizes=self.data_param,\n batch_size=self.net_param.batch_size,\n windows_per_image=self.action_param.sample_per_volume,\n queue_length=self.net_param.queue_length) for reader in\n self.readers]]\n\n def initialise_weighted_sampler(self):\n self.sampler = [[WeightedSampler(\n reader=reader,\n window_sizes=self.data_param,\n batch_size=self.net_param.batch_size,\n windows_per_image=self.action_param.sample_per_volume,\n queue_length=self.net_param.queue_length) for reader in\n self.readers]]\n\n def initialise_resize_sampler(self):\n self.sampler = [[ResizeSampler(\n reader=reader,\n window_sizes=self.data_param,\n batch_size=self.net_param.batch_size,\n shuffle=self.is_training,\n smaller_final_batch_mode=self.net_param.smaller_final_batch_mode,\n queue_length=self.net_param.queue_length) for reader in\n self.readers]]\n\n def initialise_grid_sampler(self):\n self.sampler = [[GridSampler(\n reader=reader,\n window_sizes=self.data_param,\n batch_size=self.net_param.batch_size,\n spatial_window_size=self.action_param.spatial_window_size,\n window_border=self.action_param.border,\n smaller_final_batch_mode=self.net_param.smaller_final_batch_mode,\n queue_length=self.net_param.queue_length) for reader in\n self.readers]]\n\n def initialise_balanced_sampler(self):\n self.sampler = [[BalancedSampler(\n reader=reader,\n window_sizes=self.data_param,\n batch_size=self.net_param.batch_size,\n windows_per_image=self.action_param.sample_per_volume,\n queue_length=self.net_param.queue_length) for reader in\n self.readers]]\n\n def initialise_grid_aggregator(self):\n self.output_decoder = GridSamplesAggregator(\n image_reader=self.readers[0],\n output_path=self.action_param.save_seg_dir,\n window_border=self.action_param.border,\n interp_order=self.action_param.output_interp_order,\n postfix=self.action_param.output_postfix,\n fill_constant=self.action_param.fill_constant)\n\n def initialise_resize_aggregator(self):\n self.output_decoder = ResizeSamplesAggregator(\n image_reader=self.readers[0],\n output_path=self.action_param.save_seg_dir,\n window_border=self.action_param.border,\n interp_order=self.action_param.output_interp_order,\n postfix=self.action_param.output_postfix)\n \n def initialise_identity_aggregator(self):\n self.output_decoder = WindowAsImageAggregator(\n image_reader=self.readers[0],\n output_path=self.action_param.save_seg_dir,\n postfix=self.action_param.output_postfix)\n\n def initialise_sampler(self):\n if self.is_training:\n self.SUPPORTED_SAMPLING[self.net_param.window_sampling][0]()\n elif self.is_inference:\n self.SUPPORTED_SAMPLING[self.net_param.window_sampling][1]()\n\n def initialise_aggregator(self):\n if self.net_param.force_output_identity_resizing:\n self.initialise_identity_aggregator()\n else:\n self.SUPPORTED_SAMPLING[self.net_param.window_sampling][2]()\n\n def initialise_network(self):\n w_regularizer = None\n b_regularizer = None\n reg_type = self.net_param.reg_type.lower()\n decay = self.net_param.decay\n if reg_type == 'l2' and decay > 0:\n from tensorflow.contrib.layers.python.layers import regularizers\n w_regularizer = regularizers.l2_regularizer(decay)\n b_regularizer = regularizers.l2_regularizer(decay)\n elif reg_type == 'l1' and decay > 0:\n from tensorflow.contrib.layers.python.layers import regularizers\n w_regularizer = regularizers.l1_regularizer(decay)\n b_regularizer = regularizers.l1_regularizer(decay)\n\n self.net = ApplicationNetFactory.create(self.net_param.name)(\n num_classes=1,\n w_initializer=InitializerFactory.get_initializer(\n name=self.net_param.weight_initializer),\n b_initializer=InitializerFactory.get_initializer(\n name=self.net_param.bias_initializer),\n w_regularizer=w_regularizer,\n b_regularizer=b_regularizer,\n acti_func=self.net_param.activation_function)\n\n def connect_data_and_network(self,\n outputs_collector=None,\n gradients_collector=None):\n\n def switch_sampler(for_training):\n with tf.name_scope('train' if for_training else 'validation'):\n sampler = self.get_sampler()[0][0 if for_training else -1]\n return sampler.pop_batch_op()\n\n if self.is_training:\n self.patience = self.action_param.patience\n self.mode = self.action_param.early_stopping_mode\n if self.action_param.validation_every_n > 0:\n data_dict = tf.cond(tf.logical_not(self.is_validation),\n lambda: switch_sampler(for_training=True),\n lambda: switch_sampler(for_training=False))\n else:\n data_dict = switch_sampler(for_training=True)\n\n image = tf.cast(data_dict['image'], tf.float32)\n net_args = {'is_training': self.is_training,\n 'keep_prob': self.net_param.keep_prob}\n net_out = self.net(image, **net_args)\n\n with tf.name_scope('Optimiser'):\n optimiser_class = OptimiserFactory.create(\n name=self.action_param.optimiser)\n self.optimiser = optimiser_class.get_instance(\n learning_rate=self.action_param.lr)\n loss_func = LossFunction(loss_type=self.action_param.loss_type)\n\n weight_map = data_dict.get('weight', None)\n border=self.regression_param.loss_border\n if border == None or tf.reduce_sum(tf.abs(border)) == 0:\n data_loss = loss_func(\n prediction=net_out,\n ground_truth=data_dict['output'],\n weight_map=weight_map)\n else:\n crop_layer = CropLayer(border)\n weight_map = None if weight_map is None else crop_layer(weight_map)\n data_loss = loss_func(\n prediction=crop_layer(net_out),\n ground_truth=crop_layer(data_dict['output']),\n weight_map=weight_map)\n reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)\n if self.net_param.decay > 0.0 and reg_losses:\n reg_loss = tf.reduce_mean(\n [tf.reduce_mean(reg_loss) for reg_loss in reg_losses])\n loss = data_loss + reg_loss\n else:\n loss = data_loss\n\n # Get all vars\n to_optimise = tf.trainable_variables()\n vars_to_freeze = \\\n self.action_param.vars_to_freeze or \\\n self.action_param.vars_to_restore\n if vars_to_freeze:\n import re\n var_regex = re.compile(vars_to_freeze)\n # Only optimise vars that are not frozen\n to_optimise = \\\n [v for v in to_optimise if not var_regex.search(v.name)]\n tf.logging.info(\n \"Optimizing %d out of %d trainable variables, \"\n \"the other variables are fixed (--vars_to_freeze %s)\",\n len(to_optimise),\n len(tf.trainable_variables()),\n vars_to_freeze)\n\n self.total_loss = loss\n\n grads = self.optimiser.compute_gradients(\n loss, var_list=to_optimise, colocate_gradients_with_ops=True)\n # collecting gradients variables\n gradients_collector.add_to_collection([grads])\n\n # collecting output variables\n outputs_collector.add_to_collection(\n var=self.total_loss, name='total_loss',\n average_over_devices=True, collection=CONSOLE)\n outputs_collector.add_to_collection(\n var=self.total_loss, name='total_loss',\n average_over_devices=True, summary_type='scalar',\n collection=TF_SUMMARIES)\n outputs_collector.add_to_collection(\n var=data_loss, name='loss',\n average_over_devices=False, collection=CONSOLE)\n outputs_collector.add_to_collection(\n var=data_loss, name='loss',\n average_over_devices=True, summary_type='scalar',\n collection=TF_SUMMARIES)\n\n\n elif self.is_inference:\n data_dict = switch_sampler(for_training=False)\n image = tf.cast(data_dict['image'], tf.float32)\n net_args = {'is_training': self.is_training,\n 'keep_prob': self.net_param.keep_prob}\n net_out = self.net(image, **net_args)\n net_out = PostProcessingLayer('IDENTITY')(net_out)\n\n outputs_collector.add_to_collection(\n var=net_out, name='window',\n average_over_devices=False, collection=NETWORK_OUTPUT)\n outputs_collector.add_to_collection(\n var=data_dict['image_location'], name='location',\n average_over_devices=False, collection=NETWORK_OUTPUT)\n self.initialise_aggregator()\n\n def interpret_output(self, batch_output):\n if self.is_inference:\n return self.output_decoder.decode_batch(\n {'window_reg':batch_output['window']}, batch_output['location'])\n return True\n\n def initialise_evaluator(self, eval_param):\n self.eval_param = eval_param\n self.evaluator = RegressionEvaluator(self.readers[0],\n self.regression_param,\n eval_param)\n\n def add_inferred_output(self, data_param, task_param):\n return self.add_inferred_output_like(data_param, task_param, 'output')\n", "from __future__ import absolute_import, print_function\n\nimport unittest\n\nimport os\nimport tensorflow as tf\nfrom tensorflow.contrib.layers.python.layers import regularizers\n\nfrom niftynet.network.scalenet import ScaleNet\nfrom tests.niftynet_testcase import NiftyNetTestCase\n\[email protected](os.environ.get('QUICKTEST', \"\").lower() == \"true\", 'Skipping slow tests')\nclass ScaleNetTest(NiftyNetTestCase):\n def test_3d_shape(self):\n input_shape = (2, 32, 32, 32, 4)\n x = tf.ones(input_shape)\n\n scalenet_layer = ScaleNet(num_classes=5)\n out = scalenet_layer(x, is_training=True)\n print(scalenet_layer.num_trainable_params())\n\n with self.cached_session() as sess:\n sess.run(tf.global_variables_initializer())\n out = sess.run(out)\n self.assertAllClose((2, 32, 32, 32, 5), out.shape)\n\n def test_2d_shape(self):\n input_shape = (2, 32, 32, 4)\n x = tf.ones(input_shape)\n\n scalenet_layer = ScaleNet(num_classes=5)\n out = scalenet_layer(x, is_training=True)\n print(scalenet_layer.num_trainable_params())\n\n with self.cached_session() as sess:\n sess.run(tf.global_variables_initializer())\n out = sess.run(out)\n self.assertAllClose((2, 32, 32, 5), out.shape)\n\n def test_3d_reg_shape(self):\n input_shape = (2, 32, 32, 32, 4)\n x = tf.ones(input_shape)\n\n scalenet_layer = ScaleNet(num_classes=5,\n w_regularizer=regularizers.l2_regularizer(\n 0.3))\n out = scalenet_layer(x, is_training=True)\n print(scalenet_layer.num_trainable_params())\n\n with self.cached_session() as sess:\n sess.run(tf.global_variables_initializer())\n out = sess.run(out)\n self.assertAllClose((2, 32, 32, 32, 5), out.shape)\n\n def test_2d_reg_shape(self):\n input_shape = (2, 32, 32, 4)\n x = tf.ones(input_shape)\n\n scalenet_layer = ScaleNet(num_classes=5,\n w_regularizer=regularizers.l2_regularizer(\n 0.3))\n out = scalenet_layer(x, is_training=True)\n print(scalenet_layer.num_trainable_params())\n\n with self.cached_session() as sess:\n sess.run(tf.global_variables_initializer())\n out = sess.run(out)\n self.assertAllClose((2, 32, 32, 5), out.shape)\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n", "import tensorflow as tf\n\nfrom niftynet.application.segmentation_application import \\\n SegmentationApplication, SUPPORTED_INPUT\nfrom niftynet.io.image_reader import ImageReader\nfrom niftynet.layer.binary_masking import BinaryMaskingLayer\nfrom niftynet.layer.discrete_label_normalisation import \\\n DiscreteLabelNormalisationLayer\nfrom niftynet.layer.histogram_normalisation import \\\n HistogramNormalisationLayer\nfrom niftynet.layer.mean_variance_normalisation import \\\n MeanVarNormalisationLayer\nfrom niftynet.layer.pad import PadLayer\nfrom niftynet.layer.rand_bias_field import RandomBiasFieldLayer\nfrom niftynet.layer.rand_flip import RandomFlipLayer\nfrom niftynet.layer.rand_rotation import RandomRotationLayer\nfrom niftynet.layer.rand_spatial_scaling import RandomSpatialScalingLayer\n\n\nclass SegmentationApplicationBFAug(SegmentationApplication):\n REQUIRED_CONFIG_SECTION = \"SEGMENTATION\"\n\n def __init__(self, net_param, action_param, is_training):\n SegmentationApplication.__init__(\n self, net_param, action_param, is_training)\n tf.logging.info('starting segmentation application')\n\n def initialise_dataset_loader(\n self, data_param=None, task_param=None, data_partitioner=None):\n\n self.data_param = data_param\n self.segmentation_param = task_param\n\n # initialise input image readers\n if self.is_training:\n reader_names = ('image', 'label', 'weight', 'sampler')\n elif self.is_inference:\n # in the inference process use `image` input only\n reader_names = ('image',)\n elif self.is_evaluation:\n reader_names = ('image', 'label', 'inferred')\n else:\n tf.logging.fatal(\n 'Action `%s` not supported. Expected one of %s',\n self.action, self.SUPPORTED_PHASES)\n raise ValueError\n try:\n reader_phase = self.action_param.dataset_to_infer\n except AttributeError:\n reader_phase = None\n file_lists = data_partitioner.get_file_lists_by(\n phase=reader_phase, action=self.action)\n self.readers = [\n ImageReader(reader_names).initialise(\n data_param, task_param, file_list) for file_list in file_lists]\n\n foreground_masking_layer = None\n if self.net_param.normalise_foreground_only:\n foreground_masking_layer = BinaryMaskingLayer(\n type_str=self.net_param.foreground_type,\n multimod_fusion=self.net_param.multimod_foreground_type,\n threshold=0.0)\n\n mean_var_normaliser = MeanVarNormalisationLayer(\n image_name='image', binary_masking_func=foreground_masking_layer)\n histogram_normaliser = None\n if self.net_param.histogram_ref_file:\n histogram_normaliser = HistogramNormalisationLayer(\n image_name='image',\n modalities=vars(task_param).get('image'),\n model_filename=self.net_param.histogram_ref_file,\n binary_masking_func=foreground_masking_layer,\n norm_type=self.net_param.norm_type,\n cutoff=self.net_param.cutoff,\n name='hist_norm_layer')\n\n label_normaliser = None\n if self.net_param.histogram_ref_file:\n label_normaliser = DiscreteLabelNormalisationLayer(\n image_name='label',\n modalities=vars(task_param).get('label'),\n model_filename=self.net_param.histogram_ref_file)\n\n normalisation_layers = []\n if self.net_param.normalisation:\n normalisation_layers.append(histogram_normaliser)\n if self.net_param.whitening:\n normalisation_layers.append(mean_var_normaliser)\n if task_param.label_normalisation and \\\n (self.is_training or not task_param.output_prob):\n normalisation_layers.append(label_normaliser)\n\n augmentation_layers = []\n if self.is_training:\n if self.action_param.random_flipping_axes != -1:\n augmentation_layers.append(RandomFlipLayer(\n flip_axes=self.action_param.random_flipping_axes))\n if self.action_param.scaling_percentage:\n augmentation_layers.append(RandomSpatialScalingLayer(\n min_percentage=self.action_param.scaling_percentage[0],\n max_percentage=self.action_param.scaling_percentage[1]))\n if self.action_param.rotation_angle or \\\n self.action_param.rotation_angle_x or \\\n self.action_param.rotation_angle_y or \\\n self.action_param.rotation_angle_z:\n rotation_layer = RandomRotationLayer()\n if self.action_param.rotation_angle:\n rotation_layer.init_uniform_angle(\n self.action_param.rotation_angle)\n else:\n rotation_layer.init_non_uniform_angle(\n self.action_param.rotation_angle_x,\n self.action_param.rotation_angle_y,\n self.action_param.rotation_angle_z)\n augmentation_layers.append(rotation_layer)\n if self.action_param.bias_field_range:\n bias_field_layer = RandomBiasFieldLayer()\n bias_field_layer.init_order(self.action_param.bf_order)\n bias_field_layer.init_uniform_coeff(\n self.action_param.bias_field_range)\n augmentation_layers.append(bias_field_layer)\n\n volume_padding_layer = [PadLayer(\n image_name=SUPPORTED_INPUT,\n border=self.net_param.volume_padding_size,\n mode=self.net_param.volume_padding_mode,\n pad_to=self.net_param.volume_padding_to_size)\n ]\n\n self.readers[0].add_preprocessing_layers(\n volume_padding_layer + normalisation_layers + augmentation_layers)\n\n for reader in self.readers[1:]:\n reader.add_preprocessing_layers(\n volume_padding_layer + normalisation_layers)\n", "# -*- coding: utf-8 -*-\n\"\"\"This module loads images from csv files and outputs numpy arrays.\"\"\"\nfrom __future__ import absolute_import, division, print_function\n\nfrom copy import deepcopy\nimport argparse\nimport numpy as np\nimport pandas\nimport tensorflow as tf\nfrom six import string_types\n\nfrom niftynet.io.misc_io import dtype_casting\nfrom niftynet.io.image_sets_partitioner import COLUMN_UNIQ_ID\nfrom niftynet.io.image_type import ImageFactory\nfrom niftynet.layer.base_layer import Layer, DataDependentLayer, RandomisedLayer\nfrom niftynet.utilities.user_parameters_helper import make_input_tuple\nfrom niftynet.utilities.util_common import print_progress_bar, ParserNamespace\nfrom niftynet.io.image_sets_partitioner import ImageSetsPartitioner\nfrom niftynet.utilities.util_common import look_up_operations\n\nDEFAULT_INTERP_ORDER = 1\nSUPPORTED_DATA_SPEC = {\n\n 'csv_file', 'path_to_search', 'csv_data_file', 'filename_removefromid',\n 'filename_contains', 'filename_not_contains', 'to_ohe',\n 'interp_order', 'loader', 'pixdim', 'axcodes', 'spatial_window_size'}\n\n\ndef infer_tf_dtypes(image_array):\n \"\"\"\n Choosing a suitable tf dtype based on the dtype of input numpy array.\n \"\"\"\n return dtype_casting(\n image_array.dtype[0], image_array.interp_order[0], as_tf=True)\n\n\nclass ImageReader(Layer):\n \"\"\"\n For a concrete example::\n\n _input_sources define multiple modality mappings, e.g.,\n _input_sources {'image': ('T1', 'T2'), 'label': ('manual_map',)}\n\n means:\n\n 'image' consists of two components, formed by\n concatenating 'T1' and 'T2' input source images.\n 'label' consists of one component, loading from 'manual_map'\n\n :param self._names: a tuple of the output names of this reader.\n ``('image', 'labels')``\n\n :param self._shapes: the shapes after combining input sources\n ``{'image': (192, 160, 192, 1, 2), 'label': (192, 160, 192, 1, 1)}``\n\n :param self._dtypes: store the dictionary of tensorflow shapes\n ``{'image': tf.float32, 'label': tf.float32}``\n\n :param self.output_list: a list of dictionaries, with each item::\n\n {'image': <niftynet.io.image_type.SpatialImage4D object>,\n 'label': <niftynet.io.image_type.SpatialImage3D object>}\n\n \"\"\"\n\n def __init__(self, names=None):\n # list of file names\n self._file_list = None\n self._input_sources = None\n self._spatial_ranks = None\n self._shapes = None\n self._dtypes = None\n self._names = None\n if names:\n self.names = names\n\n # list of image objects\n self.output_list = None\n self.current_id = -1\n\n self.preprocessors = []\n super(ImageReader, self).__init__(name='image_reader')\n\n def initialise(self, data_param, task_param=None, file_list=None):\n \"\"\"\n ``task_param`` specifies how to combine user input modalities.\n e.g., for multimodal segmentation 'image' corresponds to multiple\n modality sections, 'label' corresponds to one modality section\n\n This function converts elements of ``file_list`` into\n dictionaries of image objects, and save them to ``self.output_list``.\n e.g.::\n\n data_param = {'T1': {'path_to_search': 'path/to/t1'}\n 'T2': {'path_to_search': 'path/to/t2'}}\n\n loads pairs of T1 and T1 images (grouped by matching the filename).\n The reader's output is in the form of\n ``{'T1': np.array, 'T2': np.array}``.\n If the (optional) ``task_param`` is specified::\n\n task_param = {'image': ('T1', 'T2')}\n\n the reader loads pairs of T1 and T2 and returns the concatenated\n image (both modalities should have the same spatial dimensions).\n The reader's output is in the form of ``{'image': np.array}``.\n\n\n :param data_param: dictionary of input sections\n :param task_param: dictionary of grouping\n :param file_list: a dataframe generated by ImagePartitioner\n for cross validation, so\n that the reader only loads files in training/inference phases.\n :return: the initialised reader instance\n \"\"\"\n data_param = param_to_dict(data_param)\n\n if not task_param:\n task_param = {mod: (mod,) for mod in list(data_param)}\n try:\n if not isinstance(task_param, dict):\n task_param = vars(task_param)\n except ValueError:\n tf.logging.fatal(\n \"To concatenate multiple input data arrays,\\n\"\n \"task_param should be a dictionary in the form:\\n\"\n \"{'new_modality_name': ['modality_1', 'modality_2',...]}.\")\n raise\n if file_list is None:\n # defaulting to all files detected by the input specification\n file_list = ImageSetsPartitioner().initialise(data_param).all_files\n if not self.names:\n # defaulting to load all sections defined in the task_param\n self.names = list(task_param)\n valid_names = [name for name in self.names\n if task_param.get(name, None)]\n if not valid_names:\n tf.logging.fatal(\"Reader requires task input keywords %s, but \"\n \"not exist in the config file.\\n\"\n \"Available task keywords: %s\",\n self.names, list(task_param))\n raise ValueError\n self.names = valid_names\n\n self._input_sources = dict((name, task_param.get(name))\n for name in self.names)\n required_sections = \\\n sum([list(task_param.get(name)) for name in self.names], [])\n\n for required in required_sections:\n try:\n if (file_list is None) or \\\n (required not in list(file_list)) or \\\n (file_list[required].isnull().all()):\n tf.logging.fatal('Reader required input section '\n 'name [%s], but in the filename list '\n 'the column is empty.', required)\n raise ValueError\n except (AttributeError, TypeError, ValueError):\n tf.logging.fatal(\n 'file_list parameter should be a '\n 'pandas.DataFrame instance and has input '\n 'section name [%s] as a column name.', required)\n if required_sections:\n tf.logging.fatal('Reader requires section(s): %s',\n required_sections)\n if file_list is not None:\n tf.logging.fatal('Configuration input sections are: %s',\n list(file_list))\n raise\n\n self.output_list, self._file_list = _filename_to_image_list(\n file_list, self._input_sources, data_param)\n for name in self.names:\n tf.logging.info(\n 'Image reader: loading %d subjects '\n 'from sections %s as input [%s]',\n len(self.output_list), self.input_sources[name], name)\n return self\n\n def prepare_preprocessors(self):\n \"\"\"\n Some preprocessors requires an initial step to initialise\n data dependent internal parameters.\n\n This function find these preprocessors and run the initialisations.\n \"\"\"\n for layer in self.preprocessors:\n if isinstance(layer, DataDependentLayer):\n layer.train(self.output_list)\n\n def add_preprocessing_layers(self, layers):\n \"\"\"\n Adding a ``niftynet.layer`` or a list of layers as preprocessing steps.\n \"\"\"\n assert self.output_list is not None, \\\n 'Please initialise the reader first, ' \\\n 'before adding preprocessors.'\n if isinstance(layers, Layer):\n self.preprocessors.append(layers)\n else:\n self.preprocessors.extend(layers)\n self.prepare_preprocessors()\n\n # pylint: disable=arguments-differ,too-many-branches\n def layer_op(self, idx=None, shuffle=True):\n \"\"\"\n this layer returns dictionaries::\n\n keys: self.output_fields\n values: image volume array\n\n \"\"\"\n if idx is None:\n if shuffle:\n # training, with random list output\n idx = np.random.randint(len(self.output_list))\n else:\n # testing, with sequential output\n # accessing self.current_id, not suitable for multi-thread\n idx = self.current_id + 1\n self.current_id = idx\n\n try:\n image_dict = self.output_list[idx]\n except (IndexError, TypeError):\n return -1, None, None\n\n image_data_dict = \\\n {field: image.get_data() for (field, image) in image_dict.items()}\n interp_order_dict = \\\n {field: image.interp_order for (\n field, image) in image_dict.items()}\n\n preprocessors = [deepcopy(layer) for layer in self.preprocessors]\n # dictionary of masks is cached\n mask = None\n for layer in preprocessors:\n # import time; local_time = time.time()\n if layer is None:\n continue\n if isinstance(layer, RandomisedLayer):\n if \"random_elastic_deformation\" not in layer.name:\n layer.randomise()\n else:\n layer.randomise(image_data_dict)\n\n image_data_dict = layer(image_data_dict, interp_order_dict)\n elif isinstance(layer, Layer):\n image_data_dict, mask = layer(image_data_dict, mask)\n # print('%s, %.3f sec'%(layer, -local_time + time.time()))\n return idx, image_data_dict, interp_order_dict\n\n @property\n def spatial_ranks(self):\n \"\"\"\n Number of spatial dimensions of the images.\n\n :return: integers of spatial rank\n \"\"\"\n if not self.output_list:\n tf.logging.fatal(\"Please initialise the reader first.\")\n raise RuntimeError\n if not self._spatial_ranks:\n first_image = self.output_list[0]\n self._spatial_ranks = {field: first_image[field].spatial_rank\n for field in self.names}\n return self._spatial_ranks\n\n @property\n def shapes(self):\n \"\"\"\n Image shapes before any preprocessing.\n\n :return: tuple of integers as image shape\n\n\n .. caution::\n\n To have fast access, the spatial dimensions are not accurate\n\n 1. only read from the first image in list\n 2. not considering effects of random augmentation layers\n but time and modality dimensions should be correct\n \"\"\"\n if not self.output_list:\n tf.logging.fatal(\"Please initialise the reader first.\")\n raise RuntimeError\n if not self._shapes:\n first_image = self.output_list[0]\n self._shapes = {field: first_image[field].shape\n for field in self.names}\n return self._shapes\n\n @property\n def tf_dtypes(self):\n \"\"\"\n Infer input data dtypes in TF\n (using the first image in the file list).\n \"\"\"\n if not self.output_list:\n tf.logging.fatal(\"Please initialise the reader first.\")\n raise RuntimeError\n if not self._dtypes:\n first_image = self.output_list[0]\n self._dtypes = {field: infer_tf_dtypes(first_image[field])\n for field in self.names}\n return self._dtypes\n\n @property\n def input_sources(self):\n \"\"\"\n returns mapping of input keywords and input sections\n e.g., input_sources::\n\n {'image': ('T1', 'T2'),\n 'label': ('manual_map',)}\n\n map task parameter keywords ``image`` and ``label`` to\n section names ``T1``, ``T2``, and ``manual_map`` respectively.\n \"\"\"\n if not self._input_sources:\n tf.logging.fatal(\"Please initialise the reader first.\")\n raise RuntimeError\n return self._input_sources\n\n @property\n def names(self):\n \"\"\"\n\n :return: the keys of ``self.input_sources`` dictionary\n \"\"\"\n return self._names\n\n @names.setter\n def names(self, fields_tuple):\n \"\"\"\n output_fields is a sequence of output names\n each name might correspond to a list of multiple input sources\n this should be specified in CUSTOM section in the config\n \"\"\"\n self._names = make_input_tuple(fields_tuple, string_types)\n\n @property\n def num_subjects(self):\n \"\"\"\n\n :return: number of subjects in the reader\n \"\"\"\n if not self.output_list:\n return 0\n return len(self.output_list)\n\n def get_subject_id(self, image_index):\n \"\"\"\n Given an integer id returns the subject id.\n \"\"\"\n try:\n return self._file_list.iloc[image_index][COLUMN_UNIQ_ID]\n except KeyError:\n tf.logging.warning('Unknown subject id in reader file list.')\n raise\n\n def get_image_index(self, subject_id):\n \"\"\"\n Given a subject id, return the file_list index\n :param subject_id: a string with the subject id\n :return: an int with the file list index\n \"\"\"\n return np.flatnonzero(self._file_list['subject_id'] == subject_id)[0]\n\n def get_subject(self, image_index=None):\n \"\"\"\n Given an integer id returns the corresponding row of the file list.\n returns: a dictionary of the row\n \"\"\"\n try:\n if image_index is None:\n return self._file_list.iloc[:].to_dict()\n return self._file_list.iloc[image_index].to_dict()\n except (KeyError, AttributeError):\n tf.logging.warning('Unknown subject id in reader file list.')\n raise\n\n\ndef _filename_to_image_list(file_list, mod_dict, data_param):\n \"\"\"\n Converting a list of filenames to a list of image objects,\n Properties (e.g. interp_order) are added to each object\n \"\"\"\n volume_list = []\n valid_idx = []\n for idx in range(len(file_list)):\n # create image instance for each subject\n print_progress_bar(idx, len(file_list),\n prefix='reading datasets headers',\n decimals=1, length=10, fill='*')\n\n # combine fieldnames and volumes as a dictionary\n _dict = {}\n for field, modalities in mod_dict.items():\n _dict[field] = _create_image(\n file_list, idx, modalities, data_param)\n\n # skipping the subject if there're missing image components\n if _dict and None not in list(_dict.values()):\n volume_list.append(_dict)\n valid_idx.append(idx)\n\n if not volume_list:\n tf.logging.fatal(\n \"Empty filename lists, please check the csv \"\n \"files. (removing csv_file keyword if it is\"\n \" in the config file \"\n \"to automatically search folders and generate new csv \"\n \"files again)\\n\\n\"\n \"Please note in the matched file names, each subject id are \"\n \"created by removing all keywords listed `filename_contains` \"\n \"in the config.\\n\\n\"\n \"E.g., `filename_contains=foo, bar` will match file \"\n \"foo_subject42_bar.nii.gz, and the subject id is _subject42_.\")\n raise IOError\n return volume_list, file_list.iloc[valid_idx]\n\n\ndef _create_image(file_list, idx, modalities, data_param):\n \"\"\"\n data_param consists of description of each modality\n This function combines modalities according to the 'modalities'\n parameter and create <niftynet.io.input_type.SpatialImage*D>\n \"\"\"\n try:\n file_path = tuple(file_list.iloc[idx][mod] for mod in modalities)\n any_missing = any([pandas.isnull(file_name) or not bool(file_name)\n for file_name in file_path])\n if any_missing:\n # todo: enable missing modalities again\n # the file_path of a multimodal image will contain `nan`, e.g.\n # this should be handled by `ImageFactory.create_instance`\n # ('testT1.nii.gz', 'testT2.nii.gz', nan, 'testFlair.nii.gz')\n return None\n\n interp_order, pixdim, axcodes, loader = [], [], [], []\n for mod in modalities:\n mod_spec = data_param[mod] \\\n if isinstance(data_param[mod], dict) else vars(data_param[mod])\n interp_order.append(mod_spec.get('interp_order',\n DEFAULT_INTERP_ORDER))\n pixdim.append(mod_spec.get('pixdim', None))\n axcodes.append(mod_spec.get('axcodes', None))\n loader.append(mod_spec.get('loader', None))\n\n except KeyError:\n tf.logging.fatal(\n \"Specified modality names %s \"\n \"not found in config: input sections %s.\",\n modalities, list(data_param))\n raise\n except AttributeError:\n tf.logging.fatal(\n \"Data params must contain: interp_order, pixdim, axcodes.\\n\"\n \"Reader must be initialised with a dataframe as file_list.\")\n raise\n\n image_properties = {'file_path': file_path,\n 'name': modalities,\n 'interp_order': interp_order,\n 'output_pixdim': pixdim,\n 'output_axcodes': axcodes,\n 'loader': loader}\n return ImageFactory.create_instance(**image_properties)\n\n\ndef param_to_dict(input_data_param):\n \"\"\"\n Validate the user input ``input_data_param``\n raise an error if it's invalid.\n\n :param input_data_param:\n :return: input data specifications as a nested dictionary\n \"\"\"\n error_msg = 'Unknown ``data_param`` type. ' \\\n 'It should be a nested dictionary: '\\\n '{\"modality_name\": {\"input_property\": value}} '\\\n 'or a dictionary of: {\"modality_name\": '\\\n 'niftynet.utilities.util_common.ParserNamespace}'\n data_param = deepcopy(input_data_param)\n if isinstance(data_param, (ParserNamespace, argparse.Namespace)):\n data_param = vars(data_param)\n if not isinstance(data_param, dict):\n raise ValueError(error_msg)\n for mod in data_param:\n mod_param = data_param[mod]\n if isinstance(mod_param, (ParserNamespace, argparse.Namespace)):\n dict_param = vars(mod_param)\n elif isinstance(mod_param, dict):\n dict_param = mod_param\n else:\n raise ValueError(error_msg)\n for data_key in dict_param:\n look_up_operations(data_key, SUPPORTED_DATA_SPEC)\n data_param[mod] = dict_param\n return data_param\n", "# -*- coding: utf-8 -*-\n\"\"\"\nThis module implements a sampler threads controller.\n\"\"\"\nimport tensorflow as tf\n\n# from niftynet.engine.signal import SESS_STARTED\nfrom niftynet.engine.signal import SESS_FINISHED\nfrom niftynet.utilities.util_common import traverse_nested\n\n\nclass SamplerThreading(object):\n \"\"\"\n This class handles iteration events to start/stop samplers' threads.\n \"\"\"\n\n def __init__(self, **_unused):\n # SESS_STARTED.connect(self.start_sampler_threads)\n SESS_FINISHED.connect(self.stop_sampler_threads)\n\n def start_sampler_threads(self, _sender, **_unused_msg):\n \"\"\"\n Get samplers from application and try to run sampler's threads.\n\n (deprecating)\n\n :param sender:\n :param _unused_msg:\n :return:\n \"\"\"\n pass\n # try:\n # for sampler in traverse_nested(sender.get_sampler()):\n # if sampler is None:\n # continue\n # sampler.run_threads(self.num_threads)\n # tf.logging.info('filling queues (this can take a few minutes).')\n # except (NameError, TypeError, AttributeError, IndexError):\n # tf.logging.fatal(\n # \"samplers not running, pop_batch_op operations \"\n # \"are blocked.\")\n # raise\n\n def stop_sampler_threads(self, sender, **_unused_msg):\n \"\"\"\n Stop the sampler's threads\n\n :param sender: an instance of niftynet.application\n :param _unused_msg:\n :return:\n \"\"\"\n try:\n tf.logging.info('stopping sampling threads')\n for sampler in traverse_nested(sender.get_sampler()):\n if sampler is None:\n continue\n sampler.close_all()\n except (AttributeError, TypeError):\n pass\n", "# -*- coding: utf-8 -*-\n\"\"\"\nGenerating uniformly distributed image window from input image\nThis can also be considered as a \"random cropping\" layer of the\ninput image.\n\"\"\"\nfrom __future__ import absolute_import, division, print_function\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom niftynet.contrib.csv_reader.sampler_csv_rows import ImageWindowDatasetCSV\nfrom niftynet.engine.image_window import N_SPATIAL, LOCATION_FORMAT\n\n\nclass UniformSamplerCSV(ImageWindowDatasetCSV):\n \"\"\"\n This class generates samples by uniformly sampling each input volume\n currently the coordinates are randomised for spatial dims only,\n i.e., the first three dims of image.\n\n This layer can be considered as a \"random cropping\" layer of the\n input image.\n \"\"\"\n\n def __init__(self,\n reader,\n csv_reader,\n window_sizes,\n batch_size=1,\n windows_per_image=1,\n queue_length=10,\n num_threads=4,\n smaller_final_batch_mode='drop',\n name='uniform_sampler_v2'):\n ImageWindowDatasetCSV.__init__(\n self,\n reader=reader,\n csv_reader=csv_reader,\n window_sizes=window_sizes,\n batch_size=batch_size,\n windows_per_image=windows_per_image,\n queue_length=queue_length,\n num_threads=num_threads,\n shuffle=True,\n epoch=-1,\n smaller_final_batch_mode=smaller_final_batch_mode,\n name=name)\n\n tf.logging.info(\"initialised uniform sampler %s \", self.window.shapes)\n self.window_centers_sampler = rand_spatial_coordinates\n\n # pylint: disable=too-many-locals\n def layer_op(self, idx=None):\n \"\"\"\n This function generates sampling windows to the input buffer\n image data are from ``self.reader()``\n\n It first completes window shapes based on image data,\n then finds random coordinates based on the window shapes\n finally extract window with the coordinates and output\n a dictionary (required by input buffer).\n\n :return: output data dictionary\n ``{image_modality: data_array, image_location: n_samples * 7}``\n \"\"\"\n image_id, data, _ = self.reader(idx=idx, shuffle=True)\n image_shapes = dict(\n (name, data[name].shape) for name in self.window.names)\n static_window_shapes = self.window.match_image_shapes(image_shapes)\n # find random coordinates based on window and image shapes\n coordinates = self._spatial_coordinates_generator(\n subject_id=image_id,\n data=data,\n img_sizes=image_shapes,\n win_sizes=static_window_shapes,\n n_samples=self.window.n_samples)\n\n # initialise output dict, placeholders as dictionary keys\n # this dictionary will be used in\n # enqueue operation in the form of: `feed_dict=output_dict`\n output_dict = {}\n # fill output dict with data\n for name in list(data):\n coordinates_key = LOCATION_FORMAT.format(name)\n image_data_key = name\n\n # fill the coordinates\n location_array = coordinates[name]\n output_dict[coordinates_key] = location_array\n\n # fill output window array\n image_array = []\n for window_id in range(self.window.n_samples):\n x_start, y_start, z_start, x_end, y_end, z_end = \\\n location_array[window_id, 1:]\n try:\n image_window = data[name][\n x_start:x_end, y_start:y_end, z_start:z_end, ...]\n image_array.append(image_window[np.newaxis, ...])\n except ValueError:\n tf.logging.fatal(\n \"dimensionality miss match in input volumes, \"\n \"please specify spatial_window_size with a \"\n \"3D tuple and make sure each element is \"\n \"smaller than the image length in each dim. \"\n \"Current coords %s\", location_array[window_id])\n raise\n if len(image_array) > 1:\n output_dict[image_data_key] = \\\n np.concatenate(image_array, axis=0)\n else:\n output_dict[image_data_key] = image_array[0]\n # the output image shape should be\n # [enqueue_batch_size, x, y, z, time, modality]\n # where enqueue_batch_size = windows_per_image\n if self.csv_reader is not None:\n _, label_dict, _ = self.csv_reader(idx=image_id)\n output_dict.update(label_dict)\n for name in self.csv_reader.names:\n output_dict[name + '_location'] = output_dict['image_location']\n print(\"output_dict gotten\", output_dict.keys(), len(output_dict.keys()))\n return output_dict\n\n def _spatial_coordinates_generator(self,\n subject_id,\n data,\n img_sizes,\n win_sizes,\n n_samples=1):\n \"\"\"\n Generate spatial coordinates for sampling.\n\n Values in ``win_sizes`` could be different --\n for example in a segmentation network ``win_sizes`` could be\n ``{'training_image_spatial_window': (32, 32, 10),\n 'Manual_label_spatial_window': (16, 16, 10)}``\n (the network reduces x-y plane spatial resolution).\n\n This function handles this situation by first find the largest\n window across these window definitions, and generate the coordinates.\n These coordinates are then adjusted for each of the\n smaller window sizes (the output windows are almost concentric).\n \"\"\"\n\n assert data is not None, \"No input from image reader. Please check\" \\\n \"the configuration file.\"\n\n # infer the largest spatial window size and check image spatial shapes\n img_spatial_size, win_spatial_size = \\\n _infer_spatial_size(img_sizes, win_sizes)\n\n sampling_prior_map = None\n try:\n sampling_prior_map = data.get('sampler', None)\n except AttributeError:\n pass\n\n n_samples = max(n_samples, 1)\n window_centres = self.window_centers_sampler(\n n_samples, img_spatial_size, win_spatial_size, sampling_prior_map)\n assert window_centres.shape == (n_samples, N_SPATIAL), \\\n \"the coordinates generator should return \" \\\n \"{} samples of rank {} locations\".format(n_samples, N_SPATIAL)\n\n # adjust spatial coordinates based on each mod spatial window size\n all_coordinates = {}\n for mod in list(win_sizes):\n win_size = np.asarray(win_sizes[mod][:N_SPATIAL])\n half_win = np.floor(win_size / 2.0).astype(int)\n\n # Make starting coordinates of the window\n spatial_coords = np.zeros(\n (n_samples, N_SPATIAL * 2), dtype=np.int32)\n spatial_coords[:, :N_SPATIAL] = np.maximum(\n window_centres[:, :N_SPATIAL] - half_win[:N_SPATIAL], 0)\n\n # Make the opposite corner of the window is\n # just adding the mod specific window size\n spatial_coords[:, N_SPATIAL:] = \\\n spatial_coords[:, :N_SPATIAL] + win_size[:N_SPATIAL]\n assert np.all(spatial_coords[:, N_SPATIAL:] <= img_spatial_size), \\\n 'spatial coords: out of bounds.'\n\n # include subject id as the 1st column of all_coordinates values\n subject_id = np.ones((n_samples,), dtype=np.int32) * subject_id\n spatial_coords = np.append(\n subject_id[:, None], spatial_coords, axis=1)\n all_coordinates[mod] = spatial_coords\n\n return all_coordinates\n\n\ndef rand_spatial_coordinates(\n n_samples, img_spatial_size, win_spatial_size, sampler_map):\n \"\"\"\n Generate spatial coordinates from a discrete uniform distribution.\n\n :param n_samples: number of random coordinates to generate\n :param img_spatial_size: input image size\n :param win_spatial_size: input window size\n :param sampler_map: sampling prior map (not in use)\n :return: (n_samples, N_SPATIAL) coordinates representing sampling\n window centres relative to img_spatial_size\n \"\"\"\n tf.logging.debug('uniform sampler, prior %s ignored', sampler_map)\n\n # Sample coordinates at random\n half_win = np.floor(np.asarray(win_spatial_size) / 2.0).astype(np.int32)\n max_coords = np.zeros((n_samples, N_SPATIAL), dtype=np.int32)\n for (idx, (img, win)) in enumerate(\n zip(img_spatial_size[:N_SPATIAL], win_spatial_size[:N_SPATIAL])):\n max_coords[:, idx] = np.random.randint(\n 0, max(img - win + 1, 1), n_samples)\n max_coords[:, :N_SPATIAL] = \\\n max_coords[:, :N_SPATIAL] + half_win[:N_SPATIAL]\n return max_coords\n\n\ndef _infer_spatial_size(img_sizes, win_sizes):\n \"\"\"\n Utility function to find the spatial size of image,\n and the largest spatial window size across input sections.\n\n Raises NotImplementedError if the images have\n different spatial dimensions.\n\n :param img_sizes: dictionary of {'input_name': (img_size_x, img_size,y,...)}\n :param win_sizes: dictionary of {'input_name': (win_size_x, win_size_y,...)}\n :return: (image_spatial_size, window_largest_spatial_size)\n \"\"\"\n uniq_spatial_size = \\\n set([img_size[:N_SPATIAL] for img_size in list(img_sizes.values())])\n if len(uniq_spatial_size) != 1:\n tf.logging.fatal(\"Don't know how to generate sampling \"\n \"locations: Spatial dimensions of the \"\n \"grouped input sources are not \"\n \"consistent. %s\", uniq_spatial_size)\n raise NotImplementedError\n img_spatial_size = np.asarray(uniq_spatial_size.pop(), dtype=np.int32)\n\n # find the largest spatial window across input sections\n _win_spatial_sizes = \\\n [win_size[:N_SPATIAL] for win_size in win_sizes.values()]\n _win_spatial_sizes = np.asarray(_win_spatial_sizes, dtype=np.int32)\n win_spatial_size = np.max(_win_spatial_sizes, axis=0)\n\n assert all([img_spatial_size[i] >= win_spatial_size[i]\n for i in range(N_SPATIAL)]), \\\n \"window size {} is larger than image size {}\".format(\n win_spatial_size, img_spatial_size)\n\n return img_spatial_size, win_spatial_size\n", "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, print_function, division\n\nimport tensorflow as tf\nfrom niftynet.layer.base_layer import TrainableLayer\nfrom niftynet.layer.convolution import ConvolutionalLayer\nfrom niftynet.network.base_net import BaseNet\nfrom niftynet.network.highres3dnet import HighRes3DNet, HighResBlock\nfrom niftynet.utilities.util_common import look_up_operations\n\n\nclass ScaleNet(BaseNet):\n \"\"\"\n implementation of ScaleNet:\n Fidon et al., \"Scalable multimodal convolutional\n networks for brain tumour segmentation\", MICCAI '17\n\n ### Diagram\n\n INPUT --> [BACKEND] ----> [MERGING] ----> [FRONTEND] ---> OUTPUT\n\n [BACKEND] and [MERGING] are provided by the ScaleBlock below\n [FRONTEND]: it can be any NiftyNet network (default: HighRes3dnet)\n\n ### Constraints:\n - Input image size should be divisible by 8\n - more than one modality should be used\n \"\"\"\n\n def __init__(self,\n num_classes,\n w_initializer=None,\n w_regularizer=None,\n b_initializer=None,\n b_regularizer=None,\n acti_func='prelu',\n name='ScaleNet'):\n \"\"\"\n\n :param num_classes: int, number of channels of output\n :param w_initializer: weight initialisation for network\n :param w_regularizer: weight regularisation for network\n :param b_initializer: bias initialisation for network\n :param b_regularizer: bias regularisation for network\n :param acti_func: activation function to use\n :param name: layer name\n \"\"\"\n\n super(ScaleNet, self).__init__(\n num_classes=num_classes,\n w_initializer=w_initializer,\n w_regularizer=w_regularizer,\n b_initializer=b_initializer,\n b_regularizer=b_regularizer,\n acti_func=acti_func,\n name=name)\n\n self.n_features = 16\n\n def layer_op(self, images, is_training=True, layer_id=-1, **unused_kwargs):\n \"\"\"\n\n :param images: tensor, concatenation of multiple input modalities\n :param is_training: boolean, True if network is in training mode\n :param layer_id: not in use\n :param unused_kwargs:\n :return: predicted tensor\n \"\"\"\n n_modality = images.shape.as_list()[-1]\n rank = images.shape.ndims\n assert n_modality > 1\n roots = tf.split(images, n_modality, axis=rank - 1)\n for (idx, root) in enumerate(roots):\n conv_layer = ConvolutionalLayer(\n n_output_chns=self.n_features,\n kernel_size=3,\n w_initializer=self.initializers['w'],\n w_regularizer=self.regularizers['w'],\n acti_func=self.acti_func,\n name='conv_{}'.format(idx))\n roots[idx] = conv_layer(root, is_training)\n roots = tf.stack(roots, axis=-1)\n\n back_end = ScaleBlock('AVERAGE', n_layers=1)\n output_tensor = back_end(roots, is_training)\n\n front_end = HighRes3DNet(self.num_classes)\n output_tensor = front_end(output_tensor, is_training)\n return output_tensor\n\n\nSUPPORTED_OP = set(['MAX', 'AVERAGE'])\n\n\nclass ScaleBlock(TrainableLayer):\n \"\"\"\n Implementation of the ScaleBlock described in\n Fidon et al., \"Scalable multimodal convolutional\n networks for brain tumour segmentation\", MICCAI '17\n\n See Fig 2(a) for diagram details - SN BackEnd\n\n \"\"\"\n def __init__(self,\n func,\n n_layers=1,\n w_initializer=None,\n w_regularizer=None,\n acti_func='relu',\n name='scaleblock'):\n \"\"\"\n :param func: merging function (SUPPORTED_OP: MAX, AVERAGE)\n :param n_layers: int, number of layers\n :param w_initializer: weight initialisation for network\n :param w_regularizer: weight regularisation for network\n :param acti_func: activation function to use\n :param name: layer name\n \"\"\"\n self.func = look_up_operations(func.upper(), SUPPORTED_OP)\n super(ScaleBlock, self).__init__(name=name)\n self.n_layers = n_layers\n self.acti_func = acti_func\n\n self.initializers = {'w': w_initializer}\n self.regularizers = {'w': w_regularizer}\n\n def layer_op(self, input_tensor, is_training):\n \"\"\"\n\n :param input_tensor: tensor, input to the network\n :param is_training: boolean, True if network is in training mode\n :return: merged tensor after backend layers\n \"\"\"\n n_modality = input_tensor.shape.as_list()[-1]\n n_chns = input_tensor.shape.as_list()[-2]\n rank = input_tensor.shape.ndims\n perm = [i for i in range(rank)]\n perm[-2], perm[-1] = perm[-1], perm[-2]\n\n output_tensor = input_tensor\n for layer in range(self.n_layers):\n # modalities => feature channels\n output_tensor = tf.transpose(output_tensor, perm=perm)\n output_tensor = tf.unstack(output_tensor, axis=-1)\n for (idx, tensor) in enumerate(output_tensor):\n block_name = 'M_F_{}_{}'.format(layer, idx)\n highresblock_op = HighResBlock(\n n_output_chns=n_modality,\n kernels=(3, 1),\n with_res=True,\n w_initializer=self.initializers['w'],\n w_regularizer=self.regularizers['w'],\n acti_func=self.acti_func,\n name=block_name)\n output_tensor[idx] = highresblock_op(tensor, is_training)\n print(highresblock_op)\n output_tensor = tf.stack(output_tensor, axis=-1)\n\n # feature channels => modalities\n output_tensor = tf.transpose(output_tensor, perm=perm)\n output_tensor = tf.unstack(output_tensor, axis=-1)\n for (idx, tensor) in enumerate(output_tensor):\n block_name = 'F_M_{}_{}'.format(layer, idx)\n highresblock_op = HighResBlock(\n n_output_chns=n_chns,\n kernels=(3, 1),\n with_res=True,\n w_initializer=self.initializers['w'],\n w_regularizer=self.regularizers['w'],\n acti_func=self.acti_func,\n name=block_name)\n output_tensor[idx] = highresblock_op(tensor, is_training)\n print(highresblock_op)\n output_tensor = tf.stack(output_tensor, axis=-1)\n\n if self.func == 'MAX':\n output_tensor = tf.reduce_max(output_tensor, axis=-1)\n elif self.func == 'AVERAGE':\n output_tensor = tf.reduce_mean(output_tensor, axis=-1)\n return output_tensor\n" ]
[ [ "numpy.arange", "tensorflow.test.main" ], [ "tensorflow.contrib.layers.python.layers.regularizers.l1_regularizer", "tensorflow.confusion_matrix", "tensorflow.contrib.layers.python.layers.regularizers.l2_regularizer", "tensorflow.reduce_mean", "tensorflow.get_collection", "tensorflow.cast", "tensorflow.logging.info", "tensorflow.to_float", "tensorflow.name_scope", "tensorflow.argmax", "tensorflow.logical_not", "tensorflow.logging.fatal", "tensorflow.trace" ], [ "tensorflow.unstack", "tensorflow.reduce_mean", "tensorflow.get_collection", "tensorflow.expand_dims", "tensorflow.logging.info", "tensorflow.name_scope", "tensorflow.to_float", "tensorflow.logical_not" ], [ "numpy.unique", "scipy.ndimage.distance_transform_edt", "numpy.percentile", "numpy.max", "numpy.argmax", "numpy.zeros_like", "pandas.DataFrame.from_records", "numpy.sum" ], [ "tensorflow.contrib.layers.python.layers.regularizers.l1_regularizer", "tensorflow.contrib.layers.python.layers.regularizers.l2_regularizer", "tensorflow.reduce_mean", "tensorflow.get_collection", "tensorflow.cast", "tensorflow.logging.info", "tensorflow.name_scope", "tensorflow.trainable_variables", "tensorflow.logical_not", "tensorflow.logging.fatal", "tensorflow.abs" ], [ "tensorflow.global_variables_initializer", "tensorflow.contrib.layers.python.layers.regularizers.l2_regularizer", "tensorflow.ones", "tensorflow.test.main" ], [ "tensorflow.logging.info", "tensorflow.logging.fatal" ], [ "tensorflow.logging.warning", "tensorflow.logging.fatal", "pandas.isnull", "numpy.flatnonzero" ], [ "tensorflow.logging.info" ], [ "numpy.maximum", "numpy.asarray", "tensorflow.logging.debug", "numpy.ones", "numpy.all", "numpy.max", "numpy.append", "numpy.concatenate", "tensorflow.logging.info", "numpy.floor", "numpy.zeros", "tensorflow.logging.fatal" ], [ "tensorflow.reduce_max", "tensorflow.transpose", "tensorflow.unstack", "tensorflow.reduce_mean", "tensorflow.stack", "tensorflow.split" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]
andre-cavalheiro/datascience-project
[ "f9e7187faacc2e3e676c7db0e34e354e8c659c3c" ]
[ "src/libs/plot.py" ]
[ "import pandas as pd\nimport matplotlib.pyplot as plt\nfrom pandas.plotting import register_matplotlib_converters\nfrom sklearn.tree import export_graphviz\nimport seaborn as sns\nfrom subprocess import call\nfrom sklearn.decomposition import PCA\nfrom mpl_toolkits.mplot3d import Axes3D\nimport numpy as np\nfrom sklearn.neighbors import NearestNeighbors\n\ndef eps_plot(data, file = None):\n\tnn = NearestNeighbors(n_neighbors=2)\n\tnbrs = nn.fit(data)\n\tdistances, indices = nbrs.kneighbors(data)\n\tdistances = np.sort(distances, axis = 0)\n\tdistances = distances[:,1]\n\tplt.plot(distances)\n\tplt.xlabel('Data Points')\n\tplt.ylabel('Distances to Neighbors')\n\tif(file == None):\n\t\tplt.show()\n\telse:\n\t\tplt.savefig(file)\n\ndef correlation_matrix(data, name, file = None, annotTreshold = 20):\n\tannot = False if len(data.columns) > 20 else True\n\tfig = plt.figure(figsize=[25, 25])\n\tcorr_mtx = data.corr(method='spearman')\n\tsns.heatmap(corr_mtx, xticklabels=False, yticklabels=False, annot=None, cmap='Blues', vmin=-1,vmax=1)\n\t#plt.title('Correlation analysis of {}'.format(name))\n\t\n\tif(file == None):\n\t\tplt.show()\n\telse:\n\t\tplt.savefig(file)\n\tplt.close(fig=fig)\n\ndef sparsity(data, file = None):\n\tcolumns = data.select_dtypes(include='number').columns\n\trows, cols = len(columns)-1, len(columns)-1\n\tplt.figure()\n\tfig, axs = plt.subplots(rows, cols, figsize=(cols*4, rows*4), squeeze=False)\n\tfor i in range(len(columns)):\n\t var1 = columns[i]\n\t for j in range(i+1, len(columns)):\n\t var2 = columns[j]\n\t axs[i, j-1].set_title(\"%s x %s\"%(var1,var2))\n\t axs[i, j-1].set_xlabel(var1)\n\t axs[i, j-1].set_ylabel(var2)\n\t axs[i, j-1].scatter(data[var1], data[var2])\n\tfig.tight_layout()\n\n\tif(file == None):\n\t\tplt.show()\n\telse:\n\t\tplt.savefig(file)\n\tplt.close(fig=fig)\n\ndef sens_spec_scatter(inputF, file=None, name=\"Sensitivity and Sensitivity\", sensLabel = 'sensitivity', \\\n\t\t\t\t\t\tspecLabel = 'specificity', label = 'balancingStrategy'):\n\tdata = pd.read_csv(inputF, sep=',', encoding='utf-8')\n\tfig, ax = plt.subplots()\n\n\tplt.xlabel('Sensitivity')\n\tplt.ylabel('Specificity')\n\n\tfor index, row in data.iterrows():\n\t\tax.scatter(row[sensLabel], row[specLabel], label=row[label], edgecolors='none')\n\n\tplt.title('Sensitivity and specificity of {}'.format(name))\n\tax.legend()\n\tax.grid(True)\n\n\tif(file == None):\n\t\tplt.show()\n\telse:\n\t\tplt.savefig(file)\n\ndef decision_tree_visualizer(tree, dir, filename = \"dtree\", show = False):\n\tdot_file = '{}/{}.dot'.format(dir, filename)\n\tpng_file = '{}/{}.png'.format(dir, filename)\n\n\tdot_data = export_graphviz(tree, out_file=dot_file, filled=True, rounded=True, special_characters=True) \n\tcall(['dot', '-Tpng', dot_file, '-o', png_file, '-Gdpi=600'])\n\n\tif show:\n\t\tplt.figure(figsize = (14, 18))\n\t\tplt.imshow(plt.imread(png_file))\n\t\tplt.axis('off')\n\t\tplt.show()\n\ndef pca_plot(data, predict, file = None, title=None):\n\tpca = PCA(n_components=3)\n\tprincipalComponents = pca.fit_transform(data)\n\n\tprincipalDf = pd.DataFrame(data = principalComponents, columns = ['principal component 1', 'principal component 2', 'principal component 3'])\n\tfinalDf = pd.concat([principalDf, pd.DataFrame(predict)], axis=1)\n\tfinalDf.rename(columns={0: 'class'}, inplace=True)\n\n\tfig = plt.figure(figsize = (8,8))\n\tax = fig.add_subplot(1,1,1,projection='3d') \n\tax.set_xlabel('Principal Component 1', fontsize = 15)\n\tax.set_ylabel('Principal Component 2', fontsize = 15)\n\tax.set_zlabel('Principal Component 3', fontsize = 15)\n\tax.set_title(title, fontsize = 20)\n\n\ttargets = np.arange(len(np.unique(predict)))\n\tfor target in targets:\n\t\tindicesToKeep = finalDf['class'] == target\n\t\tax.scatter(finalDf.loc[indicesToKeep, 'principal component 1'], finalDf.loc[indicesToKeep, 'principal component 2'] , zs= finalDf.loc[indicesToKeep, 'principal component 3'] , s = 50)\n\n\tax.legend(targets)\n\tax.grid()\n\n\tif(file == None):\n\t\tplt.show()\n\telse:\n\t\tplt.savefig('{}/{}.png'.format(dir, filename))\n\n\ndef pca_plot_3d(data, predict, dir, filename = \"pca3d\", title=None, show = False):\n\tif len(data.columns) < 3:\n\t\treturn \n\n\tpca = PCA(n_components=3)\n\tprincipalComponents = pca.fit_transform(data)\n\n\tprincipalDf = pd.DataFrame(data = principalComponents, columns = ['principal component 1', 'principal component 2', 'principal component 3'])\n\tfinalDf = pd.concat([principalDf, pd.DataFrame(predict)], axis=1)\n\tfinalDf.rename(columns={0: 'class'}, inplace=True)\n\n\tfig = plt.figure(figsize = (8,8))\n\tax = fig.add_subplot(1,1,1,projection='3d') \n\tax.set_xlabel('Principal Component 1', fontsize = 15)\n\tax.set_ylabel('Principal Component 2', fontsize = 15)\n\tax.set_zlabel('Principal Component 3', fontsize = 15)\n\tax.set_title(title, fontsize = 20)\n\n\ttargets = np.arange(len(np.unique(predict)))\n\tfor target in targets:\n\t\tindicesToKeep = finalDf['class'] == target\n\t\tax.scatter(finalDf.loc[indicesToKeep, 'principal component 1'], finalDf.loc[indicesToKeep, 'principal component 2'] , zs= finalDf.loc[indicesToKeep, 'principal component 3'] , s = 50)\n\n\tax.legend(targets)\n\tax.grid()\n\n\tif(show):\n\t\tplt.show()\n\telse:\n\t\tplt.savefig('{}/{}.png'.format(dir, filename))\n" ]
[ [ "sklearn.tree.export_graphviz", "pandas.read_csv", "numpy.unique", "matplotlib.pyplot.figure", "matplotlib.pyplot.imread", "matplotlib.pyplot.subplots", "numpy.sort", "pandas.DataFrame", "matplotlib.pyplot.plot", "matplotlib.pyplot.savefig", "matplotlib.pyplot.close", "matplotlib.pyplot.axis", "matplotlib.pyplot.xlabel", "sklearn.neighbors.NearestNeighbors", "sklearn.decomposition.PCA", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
ceostroff/transformers
[ "3095ee9dab739f212a8753b5be4e1a72ba42e28e", "3095ee9dab739f212a8753b5be4e1a72ba42e28e", "3095ee9dab739f212a8753b5be4e1a72ba42e28e" ]
[ "src/transformers/models/albert/modeling_albert.py", "src/transformers/models/reformer/modeling_reformer.py", "tests/test_modeling_ctrl.py" ]
[ "# coding=utf-8\n# Copyright 2018 Google AI, Google Brain and the HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"PyTorch ALBERT model. \"\"\"\n\nimport math\nimport os\nimport warnings\nfrom dataclasses import dataclass\nfrom typing import Optional, Tuple\n\nimport torch\nimport torch.nn as nn\nfrom torch.nn import CrossEntropyLoss, MSELoss\n\nfrom ...activations import ACT2FN\nfrom ...file_utils import (\n ModelOutput,\n add_code_sample_docstrings,\n add_start_docstrings,\n add_start_docstrings_to_model_forward,\n replace_return_docstrings,\n)\nfrom ...modeling_outputs import (\n BaseModelOutput,\n BaseModelOutputWithPooling,\n MaskedLMOutput,\n MultipleChoiceModelOutput,\n QuestionAnsweringModelOutput,\n SequenceClassifierOutput,\n TokenClassifierOutput,\n)\nfrom ...modeling_utils import (\n PreTrainedModel,\n apply_chunking_to_forward,\n find_pruneable_heads_and_indices,\n prune_linear_layer,\n)\nfrom ...utils import logging\nfrom .configuration_albert import AlbertConfig\n\n\nlogger = logging.get_logger(__name__)\n\n_CONFIG_FOR_DOC = \"AlbertConfig\"\n_TOKENIZER_FOR_DOC = \"AlbertTokenizer\"\n\n\nALBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [\n \"albert-base-v1\",\n \"albert-large-v1\",\n \"albert-xlarge-v1\",\n \"albert-xxlarge-v1\",\n \"albert-base-v2\",\n \"albert-large-v2\",\n \"albert-xlarge-v2\",\n \"albert-xxlarge-v2\",\n # See all ALBERT models at https://huggingface.co/models?filter=albert\n]\n\n\ndef load_tf_weights_in_albert(model, config, tf_checkpoint_path):\n \"\"\" Load tf checkpoints in a pytorch model.\"\"\"\n try:\n import re\n\n import numpy as np\n import tensorflow as tf\n except ImportError:\n logger.error(\n \"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see \"\n \"https://www.tensorflow.org/install/ for installation instructions.\"\n )\n raise\n tf_path = os.path.abspath(tf_checkpoint_path)\n logger.info(\"Converting TensorFlow checkpoint from {}\".format(tf_path))\n # Load weights from TF model\n init_vars = tf.train.list_variables(tf_path)\n names = []\n arrays = []\n for name, shape in init_vars:\n logger.info(\"Loading TF weight {} with shape {}\".format(name, shape))\n array = tf.train.load_variable(tf_path, name)\n names.append(name)\n arrays.append(array)\n\n for name, array in zip(names, arrays):\n print(name)\n\n for name, array in zip(names, arrays):\n original_name = name\n\n # If saved from the TF HUB module\n name = name.replace(\"module/\", \"\")\n\n # Renaming and simplifying\n name = name.replace(\"ffn_1\", \"ffn\")\n name = name.replace(\"bert/\", \"albert/\")\n name = name.replace(\"attention_1\", \"attention\")\n name = name.replace(\"transform/\", \"\")\n name = name.replace(\"LayerNorm_1\", \"full_layer_layer_norm\")\n name = name.replace(\"LayerNorm\", \"attention/LayerNorm\")\n name = name.replace(\"transformer/\", \"\")\n\n # The feed forward layer had an 'intermediate' step which has been abstracted away\n name = name.replace(\"intermediate/dense/\", \"\")\n name = name.replace(\"ffn/intermediate/output/dense/\", \"ffn_output/\")\n\n # ALBERT attention was split between self and output which have been abstracted away\n name = name.replace(\"/output/\", \"/\")\n name = name.replace(\"/self/\", \"/\")\n\n # The pooler is a linear layer\n name = name.replace(\"pooler/dense\", \"pooler\")\n\n # The classifier was simplified to predictions from cls/predictions\n name = name.replace(\"cls/predictions\", \"predictions\")\n name = name.replace(\"predictions/attention\", \"predictions\")\n\n # Naming was changed to be more explicit\n name = name.replace(\"embeddings/attention\", \"embeddings\")\n name = name.replace(\"inner_group_\", \"albert_layers/\")\n name = name.replace(\"group_\", \"albert_layer_groups/\")\n\n # Classifier\n if len(name.split(\"/\")) == 1 and (\"output_bias\" in name or \"output_weights\" in name):\n name = \"classifier/\" + name\n\n # No ALBERT model currently handles the next sentence prediction task\n if \"seq_relationship\" in name:\n name = name.replace(\"seq_relationship/output_\", \"sop_classifier/classifier/\")\n name = name.replace(\"weights\", \"weight\")\n\n name = name.split(\"/\")\n\n # Ignore the gradients applied by the LAMB/ADAM optimizers.\n if (\n \"adam_m\" in name\n or \"adam_v\" in name\n or \"AdamWeightDecayOptimizer\" in name\n or \"AdamWeightDecayOptimizer_1\" in name\n or \"global_step\" in name\n ):\n logger.info(\"Skipping {}\".format(\"/\".join(name)))\n continue\n\n pointer = model\n for m_name in name:\n if re.fullmatch(r\"[A-Za-z]+_\\d+\", m_name):\n scope_names = re.split(r\"_(\\d+)\", m_name)\n else:\n scope_names = [m_name]\n\n if scope_names[0] == \"kernel\" or scope_names[0] == \"gamma\":\n pointer = getattr(pointer, \"weight\")\n elif scope_names[0] == \"output_bias\" or scope_names[0] == \"beta\":\n pointer = getattr(pointer, \"bias\")\n elif scope_names[0] == \"output_weights\":\n pointer = getattr(pointer, \"weight\")\n elif scope_names[0] == \"squad\":\n pointer = getattr(pointer, \"classifier\")\n else:\n try:\n pointer = getattr(pointer, scope_names[0])\n except AttributeError:\n logger.info(\"Skipping {}\".format(\"/\".join(name)))\n continue\n if len(scope_names) >= 2:\n num = int(scope_names[1])\n pointer = pointer[num]\n\n if m_name[-11:] == \"_embeddings\":\n pointer = getattr(pointer, \"weight\")\n elif m_name == \"kernel\":\n array = np.transpose(array)\n try:\n assert (\n pointer.shape == array.shape\n ), f\"Pointer shape {pointer.shape} and array shape {array.shape} mismatched\"\n except AssertionError as e:\n e.args += (pointer.shape, array.shape)\n raise\n print(\"Initialize PyTorch weight {} from {}\".format(name, original_name))\n pointer.data = torch.from_numpy(array)\n\n return model\n\n\nclass AlbertEmbeddings(nn.Module):\n \"\"\"\n Construct the embeddings from word, position and token_type embeddings.\n \"\"\"\n\n def __init__(self, config):\n super().__init__()\n self.word_embeddings = nn.Embedding(config.vocab_size, config.embedding_size, padding_idx=config.pad_token_id)\n self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.embedding_size)\n self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.embedding_size)\n\n # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load\n # any TensorFlow checkpoint file\n self.LayerNorm = nn.LayerNorm(config.embedding_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n # position_ids (1, len position emb) is contiguous in memory and exported when serialized\n self.register_buffer(\"position_ids\", torch.arange(config.max_position_embeddings).expand((1, -1)))\n\n # Copied from transformers.models.bert.modeling_bert.BertEmbeddings.forward\n def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None):\n if input_ids is not None:\n input_shape = input_ids.size()\n else:\n input_shape = inputs_embeds.size()[:-1]\n\n seq_length = input_shape[1]\n\n if position_ids is None:\n position_ids = self.position_ids[:, :seq_length]\n\n if token_type_ids is None:\n token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)\n\n if inputs_embeds is None:\n inputs_embeds = self.word_embeddings(input_ids)\n position_embeddings = self.position_embeddings(position_ids)\n token_type_embeddings = self.token_type_embeddings(token_type_ids)\n\n embeddings = inputs_embeds + position_embeddings + token_type_embeddings\n embeddings = self.LayerNorm(embeddings)\n embeddings = self.dropout(embeddings)\n return embeddings\n\n\nclass AlbertAttention(nn.Module):\n def __init__(self, config):\n super().__init__()\n if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, \"embedding_size\"):\n raise ValueError(\n \"The hidden size (%d) is not a multiple of the number of attention \"\n \"heads (%d)\" % (config.hidden_size, config.num_attention_heads)\n )\n\n self.num_attention_heads = config.num_attention_heads\n self.hidden_size = config.hidden_size\n self.attention_head_size = config.hidden_size // config.num_attention_heads\n self.all_head_size = self.num_attention_heads * self.attention_head_size\n\n self.query = nn.Linear(config.hidden_size, self.all_head_size)\n self.key = nn.Linear(config.hidden_size, self.all_head_size)\n self.value = nn.Linear(config.hidden_size, self.all_head_size)\n\n self.attention_dropout = nn.Dropout(config.attention_probs_dropout_prob)\n self.output_dropout = nn.Dropout(config.hidden_dropout_prob)\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.pruned_heads = set()\n\n # Copied from transformers.models.bert.modeling_bert.BertSelfAttention.transpose_for_scores\n def transpose_for_scores(self, x):\n new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)\n x = x.view(*new_x_shape)\n return x.permute(0, 2, 1, 3)\n\n def prune_heads(self, heads):\n if len(heads) == 0:\n return\n heads, index = find_pruneable_heads_and_indices(\n heads, self.num_attention_heads, self.attention_head_size, self.pruned_heads\n )\n\n # Prune linear layers\n self.query = prune_linear_layer(self.query, index)\n self.key = prune_linear_layer(self.key, index)\n self.value = prune_linear_layer(self.value, index)\n self.dense = prune_linear_layer(self.dense, index, dim=1)\n\n # Update hyper params and store pruned heads\n self.num_attention_heads = self.num_attention_heads - len(heads)\n self.all_head_size = self.attention_head_size * self.num_attention_heads\n self.pruned_heads = self.pruned_heads.union(heads)\n\n def forward(self, input_ids, attention_mask=None, head_mask=None, output_attentions=False):\n mixed_query_layer = self.query(input_ids)\n mixed_key_layer = self.key(input_ids)\n mixed_value_layer = self.value(input_ids)\n\n query_layer = self.transpose_for_scores(mixed_query_layer)\n key_layer = self.transpose_for_scores(mixed_key_layer)\n value_layer = self.transpose_for_scores(mixed_value_layer)\n\n # Take the dot product between \"query\" and \"key\" to get the raw attention scores.\n attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))\n attention_scores = attention_scores / math.sqrt(self.attention_head_size)\n if attention_mask is not None:\n # Apply the attention mask is (precomputed for all layers in BertModel forward() function)\n attention_scores = attention_scores + attention_mask\n\n # Normalize the attention scores to probabilities.\n attention_probs = nn.Softmax(dim=-1)(attention_scores)\n\n # This is actually dropping out entire tokens to attend to, which might\n # seem a bit unusual, but is taken from the original Transformer paper.\n attention_probs = self.attention_dropout(attention_probs)\n\n # Mask heads if we want to\n if head_mask is not None:\n attention_probs = attention_probs * head_mask\n\n context_layer = torch.matmul(attention_probs, value_layer)\n\n context_layer = context_layer.permute(0, 2, 1, 3).contiguous()\n\n # Should find a better way to do this\n w = (\n self.dense.weight.t()\n .view(self.num_attention_heads, self.attention_head_size, self.hidden_size)\n .to(context_layer.dtype)\n )\n b = self.dense.bias.to(context_layer.dtype)\n\n projected_context_layer = torch.einsum(\"bfnd,ndh->bfh\", context_layer, w) + b\n projected_context_layer_dropout = self.output_dropout(projected_context_layer)\n layernormed_context_layer = self.LayerNorm(input_ids + projected_context_layer_dropout)\n return (layernormed_context_layer, attention_probs) if output_attentions else (layernormed_context_layer,)\n\n\nclass AlbertLayer(nn.Module):\n def __init__(self, config):\n super().__init__()\n\n self.config = config\n self.chunk_size_feed_forward = config.chunk_size_feed_forward\n self.seq_len_dim = 1\n self.full_layer_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.attention = AlbertAttention(config)\n self.ffn = nn.Linear(config.hidden_size, config.intermediate_size)\n self.ffn_output = nn.Linear(config.intermediate_size, config.hidden_size)\n self.activation = ACT2FN[config.hidden_act]\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(\n self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False, output_hidden_states=False\n ):\n attention_output = self.attention(hidden_states, attention_mask, head_mask, output_attentions)\n\n ffn_output = apply_chunking_to_forward(\n self.ff_chunk,\n self.chunk_size_feed_forward,\n self.seq_len_dim,\n attention_output[0],\n )\n hidden_states = self.full_layer_layer_norm(ffn_output + attention_output[0])\n\n return (hidden_states,) + attention_output[1:] # add attentions if we output them\n\n def ff_chunk(self, attention_output):\n ffn_output = self.ffn(attention_output)\n ffn_output = self.activation(ffn_output)\n ffn_output = self.ffn_output(ffn_output)\n return ffn_output\n\n\nclass AlbertLayerGroup(nn.Module):\n def __init__(self, config):\n super().__init__()\n\n self.albert_layers = nn.ModuleList([AlbertLayer(config) for _ in range(config.inner_group_num)])\n\n def forward(\n self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False, output_hidden_states=False\n ):\n layer_hidden_states = ()\n layer_attentions = ()\n\n for layer_index, albert_layer in enumerate(self.albert_layers):\n layer_output = albert_layer(hidden_states, attention_mask, head_mask[layer_index], output_attentions)\n hidden_states = layer_output[0]\n\n if output_attentions:\n layer_attentions = layer_attentions + (layer_output[1],)\n\n if output_hidden_states:\n layer_hidden_states = layer_hidden_states + (hidden_states,)\n\n outputs = (hidden_states,)\n if output_hidden_states:\n outputs = outputs + (layer_hidden_states,)\n if output_attentions:\n outputs = outputs + (layer_attentions,)\n return outputs # last-layer hidden state, (layer hidden states), (layer attentions)\n\n\nclass AlbertTransformer(nn.Module):\n def __init__(self, config):\n super().__init__()\n\n self.config = config\n self.embedding_hidden_mapping_in = nn.Linear(config.embedding_size, config.hidden_size)\n self.albert_layer_groups = nn.ModuleList([AlbertLayerGroup(config) for _ in range(config.num_hidden_groups)])\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n output_attentions=False,\n output_hidden_states=False,\n return_dict=True,\n ):\n hidden_states = self.embedding_hidden_mapping_in(hidden_states)\n\n all_hidden_states = (hidden_states,) if output_hidden_states else None\n all_attentions = () if output_attentions else None\n\n for i in range(self.config.num_hidden_layers):\n # Number of layers in a hidden group\n layers_per_group = int(self.config.num_hidden_layers / self.config.num_hidden_groups)\n\n # Index of the hidden group\n group_idx = int(i / (self.config.num_hidden_layers / self.config.num_hidden_groups))\n\n layer_group_output = self.albert_layer_groups[group_idx](\n hidden_states,\n attention_mask,\n head_mask[group_idx * layers_per_group : (group_idx + 1) * layers_per_group],\n output_attentions,\n output_hidden_states,\n )\n hidden_states = layer_group_output[0]\n\n if output_attentions:\n all_attentions = all_attentions + layer_group_output[-1]\n\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n if not return_dict:\n return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)\n return BaseModelOutput(\n last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions\n )\n\n\nclass AlbertPreTrainedModel(PreTrainedModel):\n \"\"\"\n An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained\n models.\n \"\"\"\n\n config_class = AlbertConfig\n base_model_prefix = \"albert\"\n authorized_missing_keys = [r\"position_ids\"]\n\n def _init_weights(self, module):\n \"\"\"Initialize the weights.\"\"\"\n if isinstance(module, (nn.Linear, nn.Embedding)):\n # Slightly different from the TF version which uses truncated_normal for initialization\n # cf https://github.com/pytorch/pytorch/pull/5617\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n if isinstance(module, (nn.Linear)) and module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n\n\n@dataclass\nclass AlbertForPreTrainingOutput(ModelOutput):\n \"\"\"\n Output type of :class:`~transformers.AlbertForPreTraining`.\n\n Args:\n loss (`optional`, returned when ``labels`` is provided, ``torch.FloatTensor`` of shape :obj:`(1,)`):\n Total loss as the sum of the masked language modeling loss and the next sequence prediction\n (classification) loss.\n prediction_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):\n Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).\n sop_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, 2)`):\n Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation\n before SoftMax).\n hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):\n Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)\n of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):\n Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,\n sequence_length, sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n \"\"\"\n\n loss: Optional[torch.FloatTensor] = None\n prediction_logits: torch.FloatTensor = None\n sop_logits: torch.FloatTensor = None\n hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n attentions: Optional[Tuple[torch.FloatTensor]] = None\n\n\nALBERT_START_DOCSTRING = r\"\"\"\n\n This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic\n methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,\n pruning heads etc.)\n\n This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__\n subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to\n general usage and behavior.\n\n Args:\n config (:class:`~transformers.AlbertConfig`): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model\n weights.\n\"\"\"\n\nALBERT_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using :class:`~transformers.AlbertTokenizer`. See\n :meth:`transformers.PreTrainedTokenizer.__call__` and :meth:`transformers.PreTrainedTokenizer.encode` for\n details.\n\n `What are input IDs? <../glossary.html#input-ids>`__\n attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`):\n Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,\n 1]``:\n\n - 0 corresponds to a `sentence A` token,\n - 1 corresponds to a `sentence B` token.\n\n `What are token type IDs? <../glossary.html#token-type-ids>`_\n position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,\n config.max_position_embeddings - 1]``.\n\n `What are position IDs? <../glossary.html#position-ids>`_\n head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`):\n Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert :obj:`input_ids` indices into associated\n vectors than the model's internal embedding lookup matrix.\n output_attentions (:obj:`bool`, `optional`):\n Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned\n tensors for more detail.\n output_hidden_states (:obj:`bool`, `optional`):\n Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for\n more detail.\n return_dict (:obj:`bool`, `optional`):\n Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.\n\"\"\"\n\n\n@add_start_docstrings(\n \"The bare ALBERT Model transformer outputting raw hidden-states without any specific head on top.\",\n ALBERT_START_DOCSTRING,\n)\nclass AlbertModel(AlbertPreTrainedModel):\n\n config_class = AlbertConfig\n load_tf_weights = load_tf_weights_in_albert\n base_model_prefix = \"albert\"\n\n def __init__(self, config, add_pooling_layer=True):\n super().__init__(config)\n\n self.config = config\n self.embeddings = AlbertEmbeddings(config)\n self.encoder = AlbertTransformer(config)\n if add_pooling_layer:\n self.pooler = nn.Linear(config.hidden_size, config.hidden_size)\n self.pooler_activation = nn.Tanh()\n else:\n self.pooler = None\n self.pooler_activation = None\n\n self.init_weights()\n\n def get_input_embeddings(self):\n return self.embeddings.word_embeddings\n\n def set_input_embeddings(self, value):\n self.embeddings.word_embeddings = value\n\n def _resize_token_embeddings(self, new_num_tokens):\n old_embeddings = self.embeddings.word_embeddings\n new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens)\n self.embeddings.word_embeddings = new_embeddings\n return self.embeddings.word_embeddings\n\n def _prune_heads(self, heads_to_prune):\n \"\"\"\n Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} ALBERT has\n a different architecture in that its layers are shared across groups, which then has inner groups. If an ALBERT\n model has 12 hidden layers and 2 hidden groups, with two inner groups, there is a total of 4 different layers.\n\n These layers are flattened: the indices [0,1] correspond to the two inner groups of the first hidden layer,\n while [2,3] correspond to the two inner groups of the second hidden layer.\n\n Any layer with in index other than [0,1,2,3] will result in an error. See base class PreTrainedModel for more\n information about head pruning\n \"\"\"\n for layer, heads in heads_to_prune.items():\n group_idx = int(layer / self.config.inner_group_num)\n inner_group_idx = int(layer - group_idx * self.config.inner_group_num)\n self.encoder.albert_layer_groups[group_idx].albert_layers[inner_group_idx].attention.prune_heads(heads)\n\n @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"albert-base-v2\",\n output_type=BaseModelOutputWithPooling,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n elif input_ids is not None:\n input_shape = input_ids.size()\n elif inputs_embeds is not None:\n input_shape = inputs_embeds.size()[:-1]\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n device = input_ids.device if input_ids is not None else inputs_embeds.device\n\n if attention_mask is None:\n attention_mask = torch.ones(input_shape, device=device)\n if token_type_ids is None:\n token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)\n\n extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)\n extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility\n extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0\n head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)\n\n embedding_output = self.embeddings(\n input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds\n )\n encoder_outputs = self.encoder(\n embedding_output,\n extended_attention_mask,\n head_mask=head_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = encoder_outputs[0]\n\n pooled_output = self.pooler_activation(self.pooler(sequence_output[:, 0])) if self.pooler is not None else None\n\n if not return_dict:\n return (sequence_output, pooled_output) + encoder_outputs[1:]\n\n return BaseModelOutputWithPooling(\n last_hidden_state=sequence_output,\n pooler_output=pooled_output,\n hidden_states=encoder_outputs.hidden_states,\n attentions=encoder_outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n Albert Model with two heads on top as done during the pre-training: a `masked language modeling` head and a\n `sentence order prediction (classification)` head.\n \"\"\",\n ALBERT_START_DOCSTRING,\n)\nclass AlbertForPreTraining(AlbertPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n self.albert = AlbertModel(config)\n self.predictions = AlbertMLMHead(config)\n self.sop_classifier = AlbertSOPHead(config)\n\n self.init_weights()\n\n def get_output_embeddings(self):\n return self.predictions.decoder\n\n def get_input_embeddings(self):\n return self.albert.embeddings.word_embeddings\n\n @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @replace_return_docstrings(output_type=AlbertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n sentence_order_label=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n **kwargs,\n ):\n r\"\"\"\n labels (``torch.LongTensor`` of shape ``(batch_size, sequence_length)``, `optional`):\n Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,\n config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored\n (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``\n sentence_order_label (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`):\n Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair\n (see :obj:`input_ids` docstring) Indices should be in ``[0, 1]``. ``0`` indicates original order (sequence\n A, then sequence B), ``1`` indicates switched order (sequence B, then sequence A).\n kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`):\n Used to hide legacy arguments that have been deprecated.\n\n Returns:\n\n Example::\n\n >>> from transformers import AlbertTokenizer, AlbertForPreTraining\n >>> import torch\n\n >>> tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2')\n >>> model = AlbertForPreTraining.from_pretrained('albert-base-v2')\n\n >>> input_ids = torch.tensor(tokenizer.encode(\"Hello, my dog is cute\", add_special_tokens=True)).unsqueeze(0) # Batch size 1\n >>> outputs = model(input_ids)\n\n >>> prediction_logits = outputs.prediction_logits\n >>> sop_logits = outputs.sop_logits\n\n \"\"\"\n\n if \"masked_lm_labels\" in kwargs:\n warnings.warn(\n \"The `masked_lm_labels` argument is deprecated and will be removed in a future version, use `labels` instead.\",\n FutureWarning,\n )\n labels = kwargs.pop(\"masked_lm_labels\")\n assert kwargs == {}, f\"Unexpected keyword arguments: {list(kwargs.keys())}.\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.albert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output, pooled_output = outputs[:2]\n\n prediction_scores = self.predictions(sequence_output)\n sop_scores = self.sop_classifier(pooled_output)\n\n total_loss = None\n if labels is not None and sentence_order_label is not None:\n loss_fct = CrossEntropyLoss()\n masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))\n sentence_order_loss = loss_fct(sop_scores.view(-1, 2), sentence_order_label.view(-1))\n total_loss = masked_lm_loss + sentence_order_loss\n\n if not return_dict:\n output = (prediction_scores, sop_scores) + outputs[2:]\n return ((total_loss,) + output) if total_loss is not None else output\n\n return AlbertForPreTrainingOutput(\n loss=total_loss,\n prediction_logits=prediction_scores,\n sop_logits=sop_scores,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\nclass AlbertMLMHead(nn.Module):\n def __init__(self, config):\n super().__init__()\n\n self.LayerNorm = nn.LayerNorm(config.embedding_size)\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n self.dense = nn.Linear(config.hidden_size, config.embedding_size)\n self.decoder = nn.Linear(config.embedding_size, config.vocab_size)\n self.activation = ACT2FN[config.hidden_act]\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.activation(hidden_states)\n hidden_states = self.LayerNorm(hidden_states)\n hidden_states = self.decoder(hidden_states)\n\n prediction_scores = hidden_states\n\n return prediction_scores\n\n\nclass AlbertSOPHead(nn.Module):\n def __init__(self, config):\n super().__init__()\n\n self.dropout = nn.Dropout(config.classifier_dropout_prob)\n self.classifier = nn.Linear(config.hidden_size, config.num_labels)\n\n def forward(self, pooled_output):\n dropout_pooled_output = self.dropout(pooled_output)\n logits = self.classifier(dropout_pooled_output)\n return logits\n\n\n@add_start_docstrings(\n \"Albert Model with a `language modeling` head on top.\",\n ALBERT_START_DOCSTRING,\n)\nclass AlbertForMaskedLM(AlbertPreTrainedModel):\n\n authorized_unexpected_keys = [r\"pooler\"]\n\n def __init__(self, config):\n super().__init__(config)\n\n self.albert = AlbertModel(config, add_pooling_layer=False)\n self.predictions = AlbertMLMHead(config)\n\n self.init_weights()\n\n def get_output_embeddings(self):\n return self.predictions.decoder\n\n def get_input_embeddings(self):\n return self.albert.embeddings.word_embeddings\n\n @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"albert-base-v2\",\n output_type=MaskedLMOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n **kwargs\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,\n config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored\n (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``\n kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`):\n Used to hide legacy arguments that have been deprecated.\n \"\"\"\n if \"masked_lm_labels\" in kwargs:\n warnings.warn(\n \"The `masked_lm_labels` argument is deprecated and will be removed in a future version, use `labels` instead.\",\n FutureWarning,\n )\n labels = kwargs.pop(\"masked_lm_labels\")\n assert kwargs == {}, f\"Unexpected keyword arguments: {list(kwargs.keys())}.\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.albert(\n input_ids=input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n sequence_outputs = outputs[0]\n\n prediction_scores = self.predictions(sequence_outputs)\n\n masked_lm_loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))\n\n if not return_dict:\n output = (prediction_scores,) + outputs[2:]\n return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output\n\n return MaskedLMOutput(\n loss=masked_lm_loss,\n logits=prediction_scores,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n Albert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled\n output) e.g. for GLUE tasks.\n \"\"\",\n ALBERT_START_DOCSTRING,\n)\nclass AlbertForSequenceClassification(AlbertPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n\n self.albert = AlbertModel(config)\n self.dropout = nn.Dropout(config.classifier_dropout_prob)\n self.classifier = nn.Linear(config.hidden_size, self.config.num_labels)\n\n self.init_weights()\n\n @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"albert-base-v2\",\n output_type=SequenceClassifierOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for computing the sequence classification/regression loss. Indices should be in ``[0, ...,\n config.num_labels - 1]``. If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss),\n If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy).\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.albert(\n input_ids=input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n pooled_output = outputs[1]\n\n pooled_output = self.dropout(pooled_output)\n logits = self.classifier(pooled_output)\n\n loss = None\n if labels is not None:\n if self.num_labels == 1:\n # We are doing regression\n loss_fct = MSELoss()\n loss = loss_fct(logits.view(-1), labels.view(-1))\n else:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n\n if not return_dict:\n output = (logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return SequenceClassifierOutput(\n loss=loss,\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n Albert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for\n Named-Entity-Recognition (NER) tasks.\n \"\"\",\n ALBERT_START_DOCSTRING,\n)\nclass AlbertForTokenClassification(AlbertPreTrainedModel):\n\n authorized_unexpected_keys = [r\"pooler\"]\n\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n\n self.albert = AlbertModel(config, add_pooling_layer=False)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.classifier = nn.Linear(config.hidden_size, self.config.num_labels)\n\n self.init_weights()\n\n @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"albert-base-v2\",\n output_type=TokenClassifierOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels -\n 1]``.\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.albert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0]\n\n sequence_output = self.dropout(sequence_output)\n logits = self.classifier(sequence_output)\n\n loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n # Only keep active parts of the loss\n if attention_mask is not None:\n active_loss = attention_mask.view(-1) == 1\n active_logits = logits.view(-1, self.num_labels)[active_loss]\n active_labels = labels.view(-1)[active_loss]\n loss = loss_fct(active_logits, active_labels)\n else:\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n\n if not return_dict:\n output = (logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return TokenClassifierOutput(\n loss=loss,\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n Albert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear\n layers on top of the hidden-states output to compute `span start logits` and `span end logits`).\n \"\"\",\n ALBERT_START_DOCSTRING,\n)\nclass AlbertForQuestionAnswering(AlbertPreTrainedModel):\n\n authorized_unexpected_keys = [r\"pooler\"]\n\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n\n self.albert = AlbertModel(config, add_pooling_layer=False)\n self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)\n\n self.init_weights()\n\n @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"albert-base-v2\",\n output_type=QuestionAnsweringModelOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n start_positions=None,\n end_positions=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for position (index) of the start of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the\n sequence are not taken into account for computing the loss.\n end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for position (index) of the end of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the\n sequence are not taken into account for computing the loss.\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.albert(\n input_ids=input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0]\n\n logits = self.qa_outputs(sequence_output)\n start_logits, end_logits = logits.split(1, dim=-1)\n start_logits = start_logits.squeeze(-1)\n end_logits = end_logits.squeeze(-1)\n\n total_loss = None\n if start_positions is not None and end_positions is not None:\n # If we are on multi-GPU, split add a dimension\n if len(start_positions.size()) > 1:\n start_positions = start_positions.squeeze(-1)\n if len(end_positions.size()) > 1:\n end_positions = end_positions.squeeze(-1)\n # sometimes the start/end positions are outside our model inputs, we ignore these terms\n ignored_index = start_logits.size(1)\n start_positions.clamp_(0, ignored_index)\n end_positions.clamp_(0, ignored_index)\n\n loss_fct = CrossEntropyLoss(ignore_index=ignored_index)\n start_loss = loss_fct(start_logits, start_positions)\n end_loss = loss_fct(end_logits, end_positions)\n total_loss = (start_loss + end_loss) / 2\n\n if not return_dict:\n output = (start_logits, end_logits) + outputs[2:]\n return ((total_loss,) + output) if total_loss is not None else output\n\n return QuestionAnsweringModelOutput(\n loss=total_loss,\n start_logits=start_logits,\n end_logits=end_logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n Albert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a\n softmax) e.g. for RocStories/SWAG tasks.\n \"\"\",\n ALBERT_START_DOCSTRING,\n)\nclass AlbertForMultipleChoice(AlbertPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n self.albert = AlbertModel(config)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.classifier = nn.Linear(config.hidden_size, 1)\n\n self.init_weights()\n\n @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format(\"batch_size, num_choices, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"albert-base-v2\",\n output_type=MultipleChoiceModelOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for computing the multiple choice classification loss. Indices should be in ``[0, ...,\n num_choices-1]`` where `num_choices` is the size of the second dimension of the input tensors. (see\n `input_ids` above)\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]\n\n input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None\n attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None\n token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None\n position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None\n inputs_embeds = (\n inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))\n if inputs_embeds is not None\n else None\n )\n outputs = self.albert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n pooled_output = outputs[1]\n\n pooled_output = self.dropout(pooled_output)\n logits = self.classifier(pooled_output)\n reshaped_logits = logits.view(-1, num_choices)\n\n loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(reshaped_logits, labels)\n\n if not return_dict:\n output = (reshaped_logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return MultipleChoiceModelOutput(\n loss=loss,\n logits=reshaped_logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n", "# coding=utf-8\n# Copyright 2020 The Trax Authors and The HuggingFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"PyTorch REFORMER model. \"\"\"\n\nimport sys\nfrom collections import namedtuple\nfrom dataclasses import dataclass\nfrom functools import reduce\nfrom operator import mul\nfrom typing import List, Optional, Tuple\n\nimport numpy as np\nimport torch\nfrom torch import nn\nfrom torch.autograd.function import Function\nfrom torch.nn import CrossEntropyLoss, MSELoss\n\nfrom ...activations import ACT2FN\nfrom ...file_utils import (\n DUMMY_INPUTS,\n DUMMY_MASK,\n ModelOutput,\n add_code_sample_docstrings,\n add_start_docstrings,\n add_start_docstrings_to_model_forward,\n)\nfrom ...modeling_outputs import CausalLMOutput, MaskedLMOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput\nfrom ...modeling_utils import PreTrainedModel, apply_chunking_to_forward\nfrom ...utils import logging\nfrom .configuration_reformer import ReformerConfig\n\n\nlogger = logging.get_logger(__name__)\n\n_CONFIG_FOR_DOC = \"ReformerConfig\"\n_TOKENIZER_FOR_DOC = \"ReformerTokenizer\"\n\nREFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = [\n \"google/reformer-crime-and-punishment\",\n \"google/reformer-enwik8\",\n # See all Reformer models at https://huggingface.co/models?filter=reformer\n]\n\n\n# Define named tuples for nn.Modules here\nLSHSelfAttentionOutput = namedtuple(\"LSHSelfAttentionOutput\", [\"hidden_states\", \"attention_probs\", \"buckets\"])\nLocalSelfAttentionOutput = namedtuple(\"LocalSelfAttentionOutput\", [\"hidden_states\", \"attention_probs\"])\nAttentionOutput = namedtuple(\"AttentionOutput\", [\"hidden_states\", \"attention_probs\", \"buckets\"])\nReformerOutput = namedtuple(\"ReformerOutput\", [\"hidden_states\", \"attn_output\", \"attention_probs\", \"buckets\"])\nReformerBackwardOutput = namedtuple(\n \"ReformerBackwardOutput\", [\"attn_output\", \"hidden_states\", \"grad_attn_output\", \"grad_hidden_states\"]\n)\nReformerEncoderOutput = namedtuple(\n \"ReformerEncoderOutput\",\n [\"hidden_states\", \"all_hidden_states\", \"all_attentions\", \"past_buckets_states\"],\n)\n\n\ndef _stable_argsort(vector, dim):\n # this function scales the vector so that torch.argsort is stable.\n # torch.argsort is not stable on its own\n scale_offset = torch.arange(vector.shape[dim], device=vector.device).view(1, 1, -1)\n scale_offset = scale_offset.expand(vector.shape)\n scaled_vector = vector.shape[dim] * vector + (scale_offset % vector.shape[dim])\n return torch.argsort(scaled_vector, dim=dim)\n\n\ndef _get_least_common_mult_chunk_len(config):\n attn_types = config.attn_layers\n attn_types_set = set(attn_types)\n if len(attn_types_set) == 1 and attn_types[0] == \"lsh\":\n return config.lsh_attn_chunk_length\n elif len(attn_types_set) == 1 and attn_types[0] == \"local\":\n return config.local_attn_chunk_length\n elif len(attn_types_set) == 2 and attn_types_set == set([\"lsh\", \"local\"]):\n return np.lcm(config.lsh_attn_chunk_length, config.local_attn_chunk_length)\n else:\n raise NotImplementedError(\n \"Only attn layer types 'lsh' and 'local' exist, but `config.attn_layers`: {}. Select attn layer types from ['lsh', 'local'] only.\".format(\n config.attn_layers\n )\n )\n\n\ndef _get_min_chunk_len(config):\n attn_types = config.attn_layers\n attn_types_set = set(attn_types)\n if len(attn_types_set) == 1 and attn_types[0] == \"lsh\":\n return config.lsh_attn_chunk_length\n elif len(attn_types_set) == 1 and attn_types[0] == \"local\":\n return config.local_attn_chunk_length\n elif len(attn_types_set) == 2 and attn_types_set == set([\"lsh\", \"local\"]):\n return min(config.lsh_attn_chunk_length, config.local_attn_chunk_length)\n else:\n raise NotImplementedError(\n \"Only attn layer types 'lsh' and 'local' exist, but `config.attn_layers`: {}. Select attn layer types from ['lsh', 'local'] only.\".format(\n config.attn_layers\n )\n )\n\n\nclass AxialPositionEmbeddings(nn.Module):\n \"\"\"\n Constructs axial position embeddings. Useful for very long input sequences to save memory and time.\n \"\"\"\n\n def __init__(self, config):\n super().__init__()\n self.axial_pos_shape = config.axial_pos_shape\n self.axial_pos_embds_dim = config.axial_pos_embds_dim\n self.dropout = config.hidden_dropout_prob\n\n self.least_common_mult_chunk_length = _get_least_common_mult_chunk_len(config)\n self.weights = nn.ParameterList()\n\n assert (\n sum(self.axial_pos_embds_dim) == config.hidden_size\n ), \"Make sure that config.axial_pos_embds factors: {} sum to config.hidden_size: {}\".format(\n self.axial_pos_embds_dim, config.hidden_size\n )\n\n # create weights\n for axis, axial_pos_embd_dim in enumerate(self.axial_pos_embds_dim):\n # create expanded shapes\n ax_shape = [1] * len(self.axial_pos_shape)\n ax_shape[axis] = self.axial_pos_shape[axis]\n ax_shape = tuple(ax_shape) + (axial_pos_embd_dim,)\n\n # create tensor and init\n self.weights.append(nn.Parameter(torch.ones(ax_shape, dtype=torch.float32)))\n\n def forward(self, position_ids):\n # broadcast weights to correct shape\n batch_size = position_ids.shape[0]\n sequence_length = position_ids.shape[1]\n\n broadcasted_weights = [\n weight.expand((batch_size,) + self.axial_pos_shape + weight.shape[-1:]) for weight in self.weights\n ]\n\n if self.training is True:\n assert (\n reduce(mul, self.axial_pos_shape) == sequence_length\n ), \"If training, make sure that config.axial_pos_shape factors: {} multiply to sequence length. Got prod({}) != sequence_length: {}. You might want to consider padding your sequence length to {} or changing config.axial_pos_shape.\".format(\n self.axial_pos_shape, self.axial_pos_shape, sequence_length, reduce(mul, self.axial_pos_shape)\n )\n if self.dropout > 0:\n weights = torch.cat(broadcasted_weights, dim=-1)\n # permute weights so that 2D correctly drops dims 1 and 2\n transposed_weights = weights.transpose(2, 1)\n # drop entire matrix of last two dims (prev dims 1 and 2)\n dropped_transposed_weights = nn.functional.dropout2d(\n transposed_weights, p=self.dropout, training=self.training\n )\n dropped_weights = dropped_transposed_weights.transpose(2, 1)\n\n position_encodings = torch.reshape(dropped_weights, (batch_size, sequence_length, -1))\n\n else:\n position_encodings = torch.cat(\n [torch.reshape(weight, (batch_size, sequence_length, -1)) for weight in broadcasted_weights],\n dim=-1,\n )\n\n else:\n assert (\n reduce(mul, self.axial_pos_shape) >= sequence_length\n ), \"Make sure that config.axial_pos_shape factors: {} multiply at least to max(sequence_length, least_common_mult_chunk_length): max({}, {})\".format(\n self.axial_pos_shape,\n sequence_length,\n self.least_common_mult_chunk_length,\n )\n\n # compute how many columns are needed\n max_position_id = position_ids.max().item()\n required_pos_encodings_columns = -(-(max_position_id + 1) // self.axial_pos_shape[1])\n\n # cut to columns that are needed\n position_encodings = torch.cat(\n [weight[:, :required_pos_encodings_columns] for weight in broadcasted_weights], dim=-1\n )\n position_encodings = torch.reshape(position_encodings, (batch_size, -1, position_encodings.shape[-1]))\n\n # select correct position encodings\n position_encodings = torch.cat(\n [\n torch.index_select(position_encodings[i], 0, position_ids[i]).unsqueeze(0)\n for i in range(batch_size)\n ],\n dim=0,\n )\n\n return position_encodings\n\n\nclass PositionEmbeddings(nn.Module):\n \"\"\"Constructs conventional position embeddings of shape `[max_pos_embeddings, hidden_size]`.\"\"\"\n\n def __init__(self, config):\n super().__init__()\n self.dropout = config.hidden_dropout_prob\n self.embedding = nn.Embedding(config.max_position_embeddings, config.hidden_size)\n\n def forward(self, position_ids):\n position_embeddings = self.embedding(position_ids)\n position_embeddings = nn.functional.dropout(position_embeddings, p=self.dropout, training=self.training)\n return position_embeddings\n\n\nclass ReformerEmbeddings(nn.Module):\n \"\"\"Construct the embeddings from word, position and token_type embeddings.\"\"\"\n\n def __init__(self, config):\n super().__init__()\n self.max_position_embeddings = config.max_position_embeddings\n self.dropout = config.hidden_dropout_prob\n\n self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size)\n self.position_embeddings = (\n AxialPositionEmbeddings(config) if config.axial_pos_embds else PositionEmbeddings(config)\n )\n\n def forward(self, input_ids=None, position_ids=None, inputs_embeds=None, start_idx_pos_encodings=0):\n if input_ids is not None:\n input_shape = input_ids.size()\n device = input_ids.device\n else:\n input_shape = inputs_embeds.size()[:-1]\n device = inputs_embeds.device\n\n seq_length = input_shape[1]\n if position_ids is None:\n position_ids = torch.arange(\n start_idx_pos_encodings, start_idx_pos_encodings + seq_length, dtype=torch.long, device=device\n )\n position_ids = position_ids.unsqueeze(0).expand(input_shape)\n\n if inputs_embeds is None:\n inputs_embeds = self.word_embeddings(input_ids)\n\n assert (\n position_ids.shape[-1] <= self.max_position_embeddings\n ), \"Sequence Length: {} has to be larger equal than config.max_position_embeddings: {}\".format(\n position_ids.shape[-1], self.max_position_embeddings\n )\n\n # dropout\n embeddings = nn.functional.dropout(inputs_embeds, p=self.dropout, training=self.training)\n\n # add positional embeddings\n position_embeddings = self.position_embeddings(position_ids)\n embeddings = embeddings + position_embeddings\n return embeddings\n\n\nclass EfficientAttentionMixin:\n \"\"\"\n A few utilities for nn.Modules in Reformer, to be used as a mixin.\n \"\"\"\n\n def _look_adjacent(self, vectors, num_chunks_before, num_chunks_after):\n \"\"\"\n Used to implement attention between consecutive chunks.\n\n Args:\n vectors: array of shape [batch_size, num_attention_heads, n_chunks, chunk_len, ...]\n num_chunks_before: chunks before current chunk to include in attention\n num_chunks_after: chunks after current chunk to include in attention\n\n Returns:\n tensor of shape [num_chunks, N * chunk_length, ...], where N = (1 + num_chunks_before + num_chunks_after).\n \"\"\"\n if num_chunks_before == 0 and num_chunks_after == 0:\n return vectors\n\n slices = []\n for i in range(-num_chunks_before, num_chunks_after + 1):\n if i == 0:\n slices.append(vectors)\n else:\n slices.append(torch.cat([vectors[:, :, i:, ...], vectors[:, :, :i, ...]], dim=2))\n return torch.cat(slices, dim=3)\n\n def _split_hidden_size_dim(self, x, num_attn_heads, attn_head_size):\n \"\"\"\n splits hidden_size dim into attn_head_size and num_attn_heads\n \"\"\"\n new_x_shape = x.size()[:-1] + (num_attn_heads, attn_head_size)\n x = x.view(*new_x_shape)\n return x.transpose(2, 1)\n\n def _merge_hidden_size_dims(self, x, num_attn_heads, attn_head_size):\n \"\"\"\n merges attn_head_size dim and num_attn_heads dim into hidden_size\n \"\"\"\n x = x.permute(0, 2, 1, 3)\n return torch.reshape(x, (x.size()[0], -1, num_attn_heads * attn_head_size))\n\n def _split_seq_length_dim_to(self, vectors, dim_factor_1, dim_factor_2, num_attn_heads, attn_head_size=None):\n \"\"\"\n splits sequence length dim of vectors into `dim_factor_1` and `dim_factor_2` dims\n \"\"\"\n batch_size = vectors.shape[0]\n split_dim_shape = (batch_size, num_attn_heads, dim_factor_1, dim_factor_2)\n\n if len(vectors.shape) == 4:\n return torch.reshape(vectors, split_dim_shape + (attn_head_size,))\n elif len(vectors.shape) == 3:\n return torch.reshape(vectors, split_dim_shape)\n else:\n raise ValueError(\"Input vector rank should be one of [3, 4], but is: {}\".format(len(vectors.shape)))\n\n\nclass LSHSelfAttention(nn.Module, EfficientAttentionMixin):\n def __init__(self, config):\n super().__init__()\n self.config = config\n\n self.chunk_length = config.lsh_attn_chunk_length\n self.num_hashes = config.num_hashes\n self.num_buckets = config.num_buckets\n self.num_chunks_before = config.lsh_num_chunks_before\n self.num_chunks_after = config.lsh_num_chunks_after\n self.hash_seed = config.hash_seed\n self.is_decoder = config.is_decoder\n self.max_position_embeddings = config.max_position_embeddings\n\n self.dropout = config.lsh_attention_probs_dropout_prob\n\n self.num_attention_heads = config.num_attention_heads\n self.attention_head_size = config.attention_head_size\n self.all_head_size = self.num_attention_heads * self.attention_head_size\n self.hidden_size = config.hidden_size\n\n # projection matrices\n self.query_key = nn.Linear(self.hidden_size, self.all_head_size, bias=False)\n self.value = nn.Linear(self.hidden_size, self.all_head_size, bias=False)\n\n # save mask value here. Need fp32 and fp16 mask values\n self.register_buffer(\"self_mask_value_float16\", torch.tensor(-1e3))\n self.register_buffer(\"self_mask_value_float32\", torch.tensor(-1e5))\n self.register_buffer(\"mask_value_float16\", torch.tensor(-1e4))\n self.register_buffer(\"mask_value_float32\", torch.tensor(-1e9))\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n num_hashes=None,\n buckets=None,\n past_buckets_states=None,\n use_cache=False,\n output_attentions=False,\n **kwargs\n ):\n sequence_length = hidden_states.shape[1]\n batch_size = hidden_states.shape[0]\n\n # num hashes can optionally be overwritten by user\n num_hashes = num_hashes if num_hashes is not None else self.num_hashes\n\n do_cached_attention = use_cache and past_buckets_states[1] is not None\n\n # check if cache shall be used and that hidden states are already cached\n if do_cached_attention:\n assert (\n sequence_length == 1\n ), f\"At the moment, auto-regressive language generation is only possible one word at a time. Make sure that input sequence length {sequence_length} equals 1, when `past_buckets_states` is passed.\"\n past_buckets = past_buckets_states[0]\n past_states = past_buckets_states[1]\n\n # get query vector\n query_vectors = self.query_key(hidden_states)\n query_vectors = self._split_hidden_size_dim(\n query_vectors, self.num_attention_heads, self.attention_head_size\n )\n\n if past_buckets is not None:\n key_value_hidden_states, sorted_bucket_idx, buckets = self._get_relevant_hid_states_and_buckets(\n query_vectors=query_vectors,\n attention_mask=attention_mask,\n num_hashes=num_hashes,\n hidden_states=hidden_states,\n past_states=past_states,\n past_buckets=past_buckets,\n )\n\n query_key_vectors = self._query_per_attn_head(key_value_hidden_states)\n value_vectors = self._value_per_attn_head(key_value_hidden_states)\n\n # split key & value vectors by num hashes to apply\n # self attention on each separately\n query_key_vectors = self._split_seq_length_dim_to(\n query_key_vectors,\n num_hashes,\n -1,\n self.num_attention_heads,\n self.attention_head_size,\n )\n value_vectors = self._split_seq_length_dim_to(\n value_vectors,\n num_hashes,\n -1,\n self.num_attention_heads,\n self.attention_head_size,\n )\n # repeat query vectors across hash dimension\n query_vectors = query_vectors.unsqueeze(2).repeat(1, 1, num_hashes, 1, 1)\n else:\n key_value_hidden_states = torch.cat([past_states, hidden_states], dim=1)\n\n query_key_vectors = self.query_key(key_value_hidden_states)\n value_vectors = self.value(key_value_hidden_states)\n\n else:\n # project hidden_states to query_key and value\n query_vectors = None\n query_key_vectors = self.query_key(hidden_states)\n value_vectors = self.value(hidden_states)\n\n # if query key is not already split\n if not do_cached_attention or past_buckets is None:\n query_key_vectors = self._split_hidden_size_dim(\n query_key_vectors, self.num_attention_heads, self.attention_head_size\n )\n value_vectors = self._split_hidden_size_dim(\n value_vectors, self.num_attention_heads, self.attention_head_size\n )\n\n # cache buckets for next incremental decoding\n if do_cached_attention and past_buckets is None and key_value_hidden_states.shape[1] >= self.chunk_length:\n buckets = self._hash_vectors(query_key_vectors, num_hashes, attention_mask)\n\n # free memory\n del hidden_states\n\n assert (\n query_key_vectors.shape[-1] == self.attention_head_size\n ), \"last dim of query_key_vectors is {} but should be {}.\".format(\n query_key_vectors.shape[-1], self.attention_head_size\n )\n assert (\n value_vectors.shape[-1] == self.attention_head_size\n ), \"last dim of value_vectors is {} but should be {}.\".format(\n value_vectors.shape[-1], self.attention_head_size\n )\n\n do_standard_self_attention = (sequence_length <= self.chunk_length) or (\n use_cache and past_buckets_states[1] is not None\n )\n # LSH attention only makes sense if chunked attention should be performed\n if not do_standard_self_attention:\n # set `num_buckets` on the fly, recommended way to do it\n if self.num_buckets is None:\n self._set_num_buckets(sequence_length)\n\n # use cached buckets for backprop only\n if buckets is None:\n # hash query key vectors into buckets\n buckets = self._hash_vectors(query_key_vectors, num_hashes, attention_mask)\n else:\n # make sure buckets has correct shape for LSH attention\n buckets = buckets.view(batch_size, self.num_attention_heads, num_hashes * sequence_length)\n\n assert (\n int(buckets.shape[-1]) == num_hashes * sequence_length\n ), \"last dim of buckets is {}, but should be {}\".format(buckets.shape[-1], num_hashes * sequence_length)\n\n sorted_bucket_idx, undo_sorted_bucket_idx = self._get_sorted_bucket_idx_and_undo_sorted_bucket_idx(\n sequence_length, buckets, num_hashes\n )\n\n # make sure bucket idx is not longer then sequence length\n sorted_bucket_idx_per_hash = sorted_bucket_idx % sequence_length\n\n # cluster query key value vectors according to hashed buckets\n query_key_vectors = self._gather_by_expansion(query_key_vectors, sorted_bucket_idx_per_hash, num_hashes)\n value_vectors = self._gather_by_expansion(value_vectors, sorted_bucket_idx_per_hash, num_hashes)\n query_key_vectors = self._split_seq_length_dim_to(\n query_key_vectors,\n -1,\n self.chunk_length,\n self.num_attention_heads,\n self.attention_head_size,\n )\n value_vectors = self._split_seq_length_dim_to(\n value_vectors,\n -1,\n self.chunk_length,\n self.num_attention_heads,\n self.attention_head_size,\n )\n\n if self.chunk_length is None:\n assert (\n self.num_chunks_before == 0 and self.num_chunks_after == 0\n ), \"If `config.chunk_length` is `None`, make sure `config.num_chunks_after` and `config.num_chunks_before` are set to 0.\"\n elif do_cached_attention and past_buckets is not None:\n # use max sequence length\n sorted_bucket_idx_per_hash = sorted_bucket_idx\n else:\n # get sequence length indices\n sorted_bucket_idx_per_hash = torch.arange(sequence_length, device=query_key_vectors.device).repeat(\n batch_size, self.num_attention_heads, 1\n )\n\n # scale key vectors\n key_vectors = self._len_and_dim_norm(query_key_vectors)\n\n # set query_vectors to query key vectors if LSH self attention\n query_vectors = query_vectors if query_vectors is not None else query_key_vectors\n\n # free memory\n del query_key_vectors\n\n # get attention probs\n out_vectors, logits, attention_probs = self._attend(\n query_vectors=query_vectors,\n key_vectors=key_vectors,\n value_vectors=value_vectors,\n sorted_bucket_idx_per_hash=sorted_bucket_idx_per_hash,\n attention_mask=attention_mask,\n head_mask=head_mask,\n do_standard_self_attention=do_standard_self_attention,\n do_cached_attention=do_cached_attention,\n )\n\n # free memory\n del key_vectors, value_vectors\n\n # re-order out_vectors and logits\n if not do_standard_self_attention:\n # sort clusters back to correct ordering\n out_vectors, logits = ReverseSort.apply(out_vectors, logits, sorted_bucket_idx, undo_sorted_bucket_idx)\n\n if not do_standard_self_attention or (do_cached_attention and past_buckets is not None):\n # sum up all hash rounds\n if num_hashes > 1:\n out_vectors = self._split_seq_length_dim_to(\n out_vectors,\n num_hashes,\n sequence_length,\n self.num_attention_heads,\n self.attention_head_size,\n )\n logits = self._split_seq_length_dim_to(\n logits,\n num_hashes,\n sequence_length,\n self.num_attention_heads,\n self.attention_head_size,\n ).unsqueeze(-1)\n\n probs_vectors = torch.exp(logits - torch.logsumexp(logits, dim=2, keepdim=True))\n out_vectors = torch.sum(out_vectors * probs_vectors, dim=2)\n # free memory\n del probs_vectors\n\n # free memory\n del logits\n\n assert out_vectors.shape == (\n batch_size,\n self.num_attention_heads,\n sequence_length,\n self.attention_head_size,\n ), \"out_vectors have be of shape `[batch_size, config.num_attention_heads, sequence_length, config.attention_head_size]`.\"\n\n out_vectors = self._merge_hidden_size_dims(out_vectors, self.num_attention_heads, self.attention_head_size)\n\n if output_attentions is False:\n attention_probs = ()\n\n if buckets is not None:\n buckets = buckets.view(batch_size, self.num_attention_heads, num_hashes, -1)\n\n return LSHSelfAttentionOutput(hidden_states=out_vectors, attention_probs=attention_probs, buckets=buckets)\n\n def _query_per_attn_head(self, hidden_states):\n per_head_query_key = self.query_key.weight.reshape(\n self.num_attention_heads, self.attention_head_size, self.hidden_size\n ).transpose(-2, -1)\n # only relevant for inference and no bias => we can use einsum here\n query_key_vectors = torch.einsum(\"balh,ahr->balr\", hidden_states, per_head_query_key)\n return query_key_vectors\n\n def _value_per_attn_head(self, hidden_states):\n per_head_value = self.value.weight.reshape(\n self.num_attention_heads, self.attention_head_size, self.hidden_size\n ).transpose(-2, -1)\n # only relevant for inference and no bias => we can use einsum here\n value_vectors = torch.einsum(\"balh,ahr->balr\", hidden_states, per_head_value)\n return value_vectors\n\n def _hash_vectors(self, vectors, num_hashes, attention_mask, increase_num_buckets=False):\n batch_size = vectors.shape[0]\n\n # See https://arxiv.org/pdf/1509.02897.pdf\n # We sample a different random rotation for each round of hashing to\n # decrease the probability of hash misses.\n if isinstance(self.num_buckets, int):\n assert (\n self.num_buckets % 2 == 0\n ), \"There should be an even number of bucktes, but `self.num_bucktes`: {}\".format(self.num_buckets)\n rotation_size = self.num_buckets\n num_buckets = self.num_buckets\n else:\n # Factorize the hash if self.num_buckets is a list or tuple\n rotation_size, num_buckets = 0, 1\n for bucket_factor in self.num_buckets:\n assert bucket_factor % 2 == 0, \"The number of buckets should be even, but `num_bucket`: {}\".format(\n bucket_factor\n )\n rotation_size = rotation_size + bucket_factor\n num_buckets = num_buckets * bucket_factor\n\n # remove gradient\n vectors = vectors.detach()\n\n if self.hash_seed is not None:\n # for determinism\n torch.manual_seed(self.hash_seed)\n\n rotations_shape = (self.num_attention_heads, vectors.shape[-1], num_hashes, rotation_size // 2)\n # create a random self.attention_head_size x num_hashes x num_buckets/2\n random_rotations = torch.randn(rotations_shape, device=vectors.device, dtype=vectors.dtype)\n # Output dim: Batch_Size x Num_Attn_Heads x Num_Hashes x Seq_Len x Num_Buckets/2\n rotated_vectors = torch.einsum(\"bmtd,mdhr->bmhtr\", vectors, random_rotations)\n\n if isinstance(self.num_buckets, int) or len(self.num_buckets) == 1:\n rotated_vectors = torch.cat([rotated_vectors, -rotated_vectors], dim=-1)\n buckets = torch.argmax(rotated_vectors, dim=-1)\n else:\n # Get the buckets for them and combine.\n buckets, cur_sum, cur_product = None, 0, 1\n for bucket_factor in self.num_buckets:\n rotated_vectors_factor = rotated_vectors[..., cur_sum : cur_sum + (bucket_factor // 2)]\n cur_sum = cur_sum + bucket_factor // 2\n rotated_vectors_factor = torch.cat([rotated_vectors_factor, -rotated_vectors_factor], dim=-1)\n if buckets is None:\n buckets = torch.argmax(rotated_vectors_factor, dim=-1)\n else:\n buckets = buckets + (cur_product * torch.argmax(rotated_vectors_factor, dim=-1))\n\n cur_product = cur_product * bucket_factor\n\n if attention_mask is not None and (attention_mask.sum().item() < batch_size * attention_mask.shape[-1]):\n # add an extra bucket for padding tokens only\n num_buckets = num_buckets + 1\n # assign padding tokens extra bucket\n buckets_mask = attention_mask.to(torch.uint8)[:, None, None, :].expand(buckets.shape)\n buckets = torch.where(\n buckets_mask, buckets, torch.tensor(num_buckets - 1, dtype=torch.long, device=buckets.device)\n )\n elif increase_num_buckets:\n num_buckets = num_buckets + 1\n\n # buckets is now (Batch_size x Num_Attn_Heads x Num_Hashes x Seq_Len).\n # Next we add offsets so that bucket numbers from different hashing rounds don't overlap.\n offsets = torch.arange(num_hashes, device=vectors.device)\n offsets = (offsets * num_buckets).view((1, 1, -1, 1))\n\n # expand to batch size and num attention heads\n offsets = offsets.expand((batch_size, self.num_attention_heads) + offsets.shape[-2:])\n offset_buckets = (buckets + offsets).flatten(start_dim=2, end_dim=3)\n\n return offset_buckets\n\n def _get_sorted_bucket_idx_and_undo_sorted_bucket_idx(self, sequence_length, buckets, num_hashes):\n # no gradients are needed\n with torch.no_grad():\n # hash-based sort\n sorted_bucket_idx = _stable_argsort(buckets, dim=-1)\n\n # create simple indices to scatter to, to have undo sort\n indices = (\n torch.arange(sorted_bucket_idx.shape[-1], device=buckets.device)\n .view(1, 1, -1)\n .expand(sorted_bucket_idx.shape)\n )\n\n # get undo sort\n undo_sorted_bucket_idx = sorted_bucket_idx.new(*sorted_bucket_idx.size())\n undo_sorted_bucket_idx.scatter_(-1, sorted_bucket_idx, indices)\n\n return sorted_bucket_idx, undo_sorted_bucket_idx\n\n def _set_num_buckets(self, sequence_length):\n # `num_buckets` should be set to 2 * sequence_length // chunk_length as recommended in paper\n num_buckets_pow_2 = (2 * (sequence_length // self.chunk_length)).bit_length() - 1\n # make sure buckets are power of 2\n num_buckets = 2 ** num_buckets_pow_2\n\n # factorize `num_buckets` if `num_buckets` becomes too large\n num_buckets_limit = 2 * max(\n int((self.max_position_embeddings // self.chunk_length) ** (0.5)),\n self.chunk_length,\n )\n if num_buckets > num_buckets_limit:\n num_buckets = [2 ** (num_buckets_pow_2 // 2), 2 ** (num_buckets_pow_2 - num_buckets_pow_2 // 2)]\n\n logger.warning(\"config.num_buckets is not set. Setting config.num_buckets to {}...\".format(num_buckets))\n\n # set num buckets in config to be properly saved\n self.config.num_buckets = num_buckets\n self.num_buckets = num_buckets\n\n def _attend(\n self,\n query_vectors,\n key_vectors,\n value_vectors,\n sorted_bucket_idx_per_hash,\n attention_mask,\n head_mask,\n do_standard_self_attention,\n do_cached_attention,\n ):\n # look at previous and following chunks if chunked attention\n if not do_standard_self_attention:\n key_vectors = self._look_adjacent(key_vectors, self.num_chunks_before, self.num_chunks_after)\n value_vectors = self._look_adjacent(value_vectors, self.num_chunks_before, self.num_chunks_after)\n\n # get logits and dots\n # (BS, NumAttn, NumHash x NumChunk, Chunk_L x Hidden),(BS, NumAttn, NumHash x NumChunk, Chunk_L * (Num_bef + Num_aft + 1) x Hidden) -> (BS, NumAttn, NumHash x NumChunk, Chunk_L, Chunk_L * (1 + Num_bef + Num_aft))\n query_key_dots = torch.matmul(query_vectors, key_vectors.transpose(-1, -2))\n\n # free memory\n del query_vectors, key_vectors\n\n # if chunked attention split bucket idxs to query and key\n if not do_standard_self_attention:\n query_bucket_idx = self._split_seq_length_dim_to(\n sorted_bucket_idx_per_hash, -1, self.chunk_length, self.num_attention_heads\n )\n key_value_bucket_idx = self._look_adjacent(query_bucket_idx, self.num_chunks_before, self.num_chunks_after)\n elif do_cached_attention and query_key_dots.ndim > 4:\n key_value_bucket_idx = sorted_bucket_idx_per_hash\n query_bucket_idx = (\n key_value_bucket_idx.new_ones(key_value_bucket_idx.shape[:-1] + (1,)) * key_value_bucket_idx.max()\n )\n elif do_cached_attention and query_key_dots.ndim <= 4:\n query_bucket_idx = (query_key_dots.shape[-1] - 1) * torch.ones_like(query_key_dots)[:, :, :, -1]\n key_value_bucket_idx = torch.arange(\n query_key_dots.shape[-1], dtype=torch.long, device=query_key_dots.device\n )[None, None, :].expand(query_bucket_idx.shape[:2] + (-1,))\n else:\n query_bucket_idx = key_value_bucket_idx = sorted_bucket_idx_per_hash\n\n # get correct mask values depending on precision\n if query_key_dots.dtype == torch.float16:\n self_mask_value = self.self_mask_value_float16.half()\n mask_value = self.mask_value_float16.half()\n else:\n self_mask_value = self.self_mask_value_float32\n mask_value = self.mask_value_float32\n\n if not do_cached_attention:\n mask = self._compute_attn_mask(\n query_bucket_idx,\n key_value_bucket_idx,\n attention_mask,\n query_key_dots.shape,\n do_standard_self_attention,\n )\n\n if mask is not None:\n query_key_dots = torch.where(mask, query_key_dots, mask_value)\n\n # free memory\n del mask\n\n # Self mask is ALWAYS applied.\n # From the reformer paper (https://arxiv.org/pdf/2001.04451.pdf):\n # \" While attention to the future is not allowed, typical implementations of the\n # Transformer do allow a position to attend to itself.\n # Such behavior is undesirable in a shared-QK formulation because the dot-product\n # of a query vector with itself will almost always be greater than the dot product of a\n # query vector with a vector at another position. We therefore modify the masking\n # to forbid a token from attending to itself, except in situations\n # where a token has no other valid attention targets (e.g. the first token in a sequence) \"\n\n self_mask = torch.ne(query_bucket_idx.unsqueeze(-1), key_value_bucket_idx.unsqueeze(-2)).to(\n query_bucket_idx.device\n )\n\n # apply self_mask\n query_key_dots = torch.where(self_mask, query_key_dots, self_mask_value)\n\n # free memory\n del self_mask\n\n logits = torch.logsumexp(query_key_dots, dim=-1, keepdim=True)\n # dots shape is `[batch_size, num_attn_heads, num_hashes * seq_len // chunk_length, chunk_length, chunk_length * (1 + num_chunks_before + num_chunks_after)]`\n attention_probs = torch.exp(query_key_dots - logits)\n\n # free memory\n del query_key_dots\n\n # dropout\n attention_probs = nn.functional.dropout(attention_probs, p=self.dropout, training=self.training)\n\n # Mask heads if we want to\n if head_mask is not None:\n attention_probs = attention_probs * head_mask\n\n # attend values\n out_vectors = torch.matmul(attention_probs, value_vectors)\n\n # free memory\n del value_vectors\n\n # merge chunk length\n if out_vectors.ndim > 4:\n logits = logits.flatten(start_dim=2, end_dim=3).squeeze(-1)\n out_vectors = out_vectors.flatten(start_dim=2, end_dim=3)\n\n return out_vectors, logits, attention_probs\n\n def _compute_attn_mask(\n self, query_indices, key_indices, attention_mask, query_key_dot_shape, do_standard_self_attention\n ):\n # attention mask for LSH\n if attention_mask is not None:\n # if chunked attention, the attention mask has to correspond to LSH order\n attention_mask = attention_mask.to(torch.uint8)[:, None, :]\n if not do_standard_self_attention:\n # expand attn_mask to fit with key_value_bucket_idx shape\n attention_mask = attention_mask[:, None, :]\n attention_mask = attention_mask.expand(query_indices.shape[:-1] + (-1,))\n # extract attention mask from LSH sorted key_indices\n attention_mask = torch.gather(attention_mask, -1, key_indices)\n\n attention_mask = attention_mask.unsqueeze(-2).expand(query_key_dot_shape)\n\n # Causal mask\n if self.is_decoder is True:\n causal_mask = torch.ge(query_indices.unsqueeze(-1), key_indices.unsqueeze(-2)).to(query_indices.device)\n\n # add attention mask if not None\n if attention_mask is not None:\n attention_mask = causal_mask * attention_mask\n else:\n attention_mask = causal_mask\n\n return attention_mask\n\n def _get_relevant_hid_states_and_buckets(\n self, query_vectors, attention_mask, num_hashes, hidden_states, past_states, past_buckets\n ):\n # concat hidden states\n hidden_states = torch.cat([past_states, hidden_states], dim=1)\n\n # batch_size hidden\n batch_size = hidden_states.shape[0]\n sequence_length = hidden_states.shape[1]\n\n # check if cached buckets include pad bucket\n max_bucket = self.num_buckets if isinstance(self.num_buckets, int) else reduce(mul, self.num_buckets)\n\n # if pad bucket was cached => need to increase num buckets for caching\n increase_num_buckets = past_buckets.max() > num_hashes * max_bucket - 1\n\n # retrieve query buckets\n query_buckets = self._hash_vectors(\n query_vectors, num_hashes, attention_mask, increase_num_buckets=increase_num_buckets\n )\n\n # concat buckets\n concat_buckets = torch.cat([past_buckets, query_buckets.unsqueeze(-1)], dim=-1)\n\n # hash-based sort\n bucket_idx = _stable_argsort(concat_buckets, dim=-1)\n\n # bucket_idx has shape: BatchSize x NumAttnHeads x NumHashes x SequenceLength\n assert bucket_idx.shape == (\n batch_size,\n self.num_attention_heads,\n num_hashes,\n sequence_length,\n ), f\"bucket_idx should have shape {(batch_size, self.num_attention_heads, num_hashes, sequence_length)}, but has shape {bucket_idx.shape}.\"\n\n # find indices of new bucket indices\n relevant_bucket_idx = (bucket_idx == (bucket_idx.shape[-1] - 1)).nonzero()\n\n # expand relevant bucket indices to its chunks\n relevant_bucket_idx_chunk = self._expand_to_indices_in_relevant_chunk(relevant_bucket_idx, sequence_length)\n relevant_bucket_idx_chunk = bucket_idx[tuple(relevant_bucket_idx_chunk.transpose(0, 1))]\n\n # adapt bucket_idx for batch and hidden states for index select\n bucket_idx_batch_offset = sequence_length * (\n batch_size\n * torch.arange(relevant_bucket_idx_chunk.shape[-1], device=hidden_states.device, dtype=torch.long)\n // relevant_bucket_idx_chunk.shape[-1]\n )\n\n # add batch offset\n relevant_bucket_idx_chunk_all_batch = relevant_bucket_idx_chunk + bucket_idx_batch_offset\n hidden_states = hidden_states.reshape((-1, self.hidden_size))\n\n # select all relevant hidden states\n relevant_hidden_states = hidden_states.index_select(0, relevant_bucket_idx_chunk_all_batch)\n\n # reshape hidden states and bucket_idx to correct output\n relevant_hidden_states = relevant_hidden_states.reshape(\n batch_size, self.num_attention_heads, -1, self.hidden_size\n )\n relevant_bucket_idx_chunk = relevant_bucket_idx_chunk.reshape(\n batch_size, self.num_attention_heads, num_hashes, -1\n )\n\n assert (\n relevant_hidden_states.shape[2]\n == (self.num_chunks_before + self.num_chunks_after + 1) * self.chunk_length * num_hashes\n ), f\"There should be {(self.num_chunks_before + self.num_chunks_after + 1) * self.chunk_length * num_hashes} `hidden_states`, there are {relevant_hidden_states.shape[2]} `hidden_states`.\"\n\n assert (\n relevant_bucket_idx_chunk.shape[-1]\n == (self.num_chunks_before + self.num_chunks_after + 1) * self.chunk_length\n ), f\"There should be {(self.num_chunks_before + self.num_chunks_after + 1) * self.chunk_length} `hidden_states`, there are {relevant_bucket_idx_chunk.shape[-1]} `bucket_idx`.\"\n\n return relevant_hidden_states, relevant_bucket_idx_chunk, query_buckets\n\n def _expand_to_indices_in_relevant_chunk(self, indices, sequence_length):\n # get relevant indices of where chunk starts and its size\n start_indices_chunk = ((indices[:, -1] // self.chunk_length) - self.num_chunks_before) * self.chunk_length\n total_chunk_size = self.chunk_length * (1 + self.num_chunks_before + self.num_chunks_after)\n\n # expand start indices and add correct chunk offset via arange\n expanded_start_indices = start_indices_chunk.unsqueeze(-1).expand(indices.shape[0], total_chunk_size)\n chunk_sequence_indices = expanded_start_indices + torch.arange(\n total_chunk_size, device=indices.device, dtype=torch.long\n ).unsqueeze(0).expand(indices.shape[0], total_chunk_size)\n\n # make sure that circular logic holds via % seq len\n chunk_sequence_indices = chunk_sequence_indices.flatten() % sequence_length\n\n # expand indices and set indices correctly\n indices = indices.unsqueeze(1).expand((indices.shape[0], total_chunk_size, -1)).flatten(0, 1).clone()\n indices[:, -1] = chunk_sequence_indices\n\n return indices\n\n def _len_and_dim_norm(self, vectors):\n \"\"\"\n length and attention head size dim normalization\n \"\"\"\n vectors = self._len_norm(vectors)\n vectors = vectors * torch.rsqrt(\n torch.tensor(self.attention_head_size, device=vectors.device, dtype=vectors.dtype)\n )\n return vectors\n\n def _len_norm(self, x, epsilon=1e-6):\n \"\"\"\n length normalization\n \"\"\"\n variance = torch.mean(x ** 2, -1, keepdim=True)\n norm_x = x * torch.rsqrt(variance + epsilon)\n return norm_x\n\n def _gather_by_expansion(self, vectors, idxs, num_hashes):\n \"\"\"\n expand dims of idxs and vectors for all hashes and gather\n \"\"\"\n expanded_idxs = idxs.unsqueeze(-1).expand(-1, -1, -1, self.attention_head_size)\n vectors = vectors.repeat(1, 1, num_hashes, 1)\n return torch.gather(vectors, 2, expanded_idxs)\n\n\nclass ReverseSort(Function):\n \"\"\"\n After chunked attention is applied which sorted clusters, original ordering has to be restored. Since customized\n backward function is used for Reformer, the gradients of the output vectors have to be explicitly sorted here.\n \"\"\"\n\n @staticmethod\n def forward(ctx, out_vectors, logits, sorted_bucket_idx, undo_sorted_bucket_idx):\n # save sorted_bucket_idx for backprop\n with torch.no_grad():\n ctx.sorted_bucket_idx = sorted_bucket_idx\n\n # undo sort to have correct order for next layer\n expanded_undo_sort_indices = undo_sorted_bucket_idx.unsqueeze(-1).expand(out_vectors.shape)\n out_vectors = torch.gather(out_vectors, 2, expanded_undo_sort_indices)\n logits = torch.gather(logits, 2, undo_sorted_bucket_idx)\n return out_vectors, logits\n\n @staticmethod\n def backward(ctx, grad_out_vectors, grad_logits):\n # get parameters saved in ctx\n sorted_bucket_idx = ctx.sorted_bucket_idx\n\n expanded_sort_indices = sorted_bucket_idx.unsqueeze(-1).expand(grad_out_vectors.shape)\n # reverse sort of forward\n grad_out_vectors = torch.gather(grad_out_vectors, 2, expanded_sort_indices)\n grad_logits = torch.gather(grad_logits, 2, sorted_bucket_idx)\n\n # return grad and `None` fillers for last 2 forward args\n return grad_out_vectors, grad_logits, None, None\n\n\nclass LocalSelfAttention(nn.Module, EfficientAttentionMixin):\n def __init__(self, config):\n super().__init__()\n\n self.num_attention_heads = config.num_attention_heads\n self.chunk_length = config.local_attn_chunk_length\n self.num_chunks_before = config.local_num_chunks_before\n self.num_chunks_after = config.local_num_chunks_after\n self.is_decoder = config.is_decoder\n self.pad_token_id = config.pad_token_id\n\n self.attention_head_size = config.attention_head_size\n self.all_head_size = self.num_attention_heads * self.attention_head_size\n self.hidden_size = config.hidden_size\n\n # projection matrices\n self.query = nn.Linear(self.hidden_size, self.all_head_size, bias=False)\n self.key = nn.Linear(self.hidden_size, self.all_head_size, bias=False)\n self.value = nn.Linear(self.hidden_size, self.all_head_size, bias=False)\n\n self.dropout = config.local_attention_probs_dropout_prob\n\n # save mask value here\n self.register_buffer(\"mask_value_float16\", torch.tensor(-1e4))\n self.register_buffer(\"mask_value_float32\", torch.tensor(-1e9))\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n past_buckets_states=None,\n use_cache=False,\n output_attentions=False,\n **kwargs\n ):\n sequence_length = hidden_states.shape[1]\n batch_size = hidden_states.shape[0]\n\n # check if cache shall be used and that hidden states are already cached\n if use_cache and past_buckets_states[1] is not None:\n assert (\n past_buckets_states[0] is None\n ), \"LocalSelfAttention should not make use of `buckets`. There seems to be an error when caching hidden_states_and_buckets.\"\n key_value_hidden_states = self._retrieve_relevant_hidden_states(\n past_buckets_states[1], self.chunk_length, self.num_chunks_before\n )\n key_value_hidden_states = torch.cat([key_value_hidden_states, hidden_states], dim=1)\n\n # only query vector for last token\n query_vectors = self.query(hidden_states)\n # compute key and value for relevant chunk\n key_vectors = self.key(key_value_hidden_states)\n value_vectors = self.value(key_value_hidden_states)\n\n # free memory\n del key_value_hidden_states\n else:\n # project hidden_states to query, key and value\n query_vectors = self.query(hidden_states)\n key_vectors = self.key(hidden_states)\n value_vectors = self.value(hidden_states)\n\n # split last dim into `config.num_attention_heads` and `config.attention_head_size`\n query_vectors = self._split_hidden_size_dim(query_vectors, self.num_attention_heads, self.attention_head_size)\n key_vectors = self._split_hidden_size_dim(key_vectors, self.num_attention_heads, self.attention_head_size)\n value_vectors = self._split_hidden_size_dim(value_vectors, self.num_attention_heads, self.attention_head_size)\n\n assert (\n query_vectors.shape[-1] == self.attention_head_size\n ), \"last dim of query_key_vectors is {} but should be {}.\".format(\n query_vectors.shape[-1], self.attention_head_size\n )\n assert (\n key_vectors.shape[-1] == self.attention_head_size\n ), \"last dim of query_key_vectors is {} but should be {}.\".format(\n key_vectors.shape[-1], self.attention_head_size\n )\n assert (\n value_vectors.shape[-1] == self.attention_head_size\n ), \"last dim of query_key_vectors is {} but should be {}.\".format(\n value_vectors.shape[-1], self.attention_head_size\n )\n\n if self.chunk_length is None:\n assert (\n self.num_chunks_before == 0 and self.num_chunks_after == 0\n ), \"If `config.chunk_length` is `None`, make sure `config.num_chunks_after` and `config.num_chunks_before` are set to 0.\"\n\n # normalize key vectors\n key_vectors = key_vectors / torch.sqrt(\n torch.tensor(self.attention_head_size, device=key_vectors.device, dtype=key_vectors.dtype)\n )\n\n # get sequence length indices\n indices = torch.arange(sequence_length, device=query_vectors.device).repeat(\n batch_size, self.num_attention_heads, 1\n )\n\n # if one should do normal n^2 self-attention\n do_standard_self_attention = sequence_length <= self.chunk_length\n\n # if input should be chunked\n if not do_standard_self_attention:\n # chunk vectors\n # B x Num_Attn_Head x Seq_Len // chunk_len x chunk_len x attn_head_size\n query_vectors = self._split_seq_length_dim_to(\n query_vectors,\n -1,\n self.chunk_length,\n self.num_attention_heads,\n self.attention_head_size,\n )\n key_vectors = self._split_seq_length_dim_to(\n key_vectors,\n -1,\n self.chunk_length,\n self.num_attention_heads,\n self.attention_head_size,\n )\n value_vectors = self._split_seq_length_dim_to(\n value_vectors,\n -1,\n self.chunk_length,\n self.num_attention_heads,\n self.attention_head_size,\n )\n\n # chunk indices\n query_indices = self._split_seq_length_dim_to(indices, -1, self.chunk_length, self.num_attention_heads)\n key_indices = self._split_seq_length_dim_to(indices, -1, self.chunk_length, self.num_attention_heads)\n\n # append chunks before and after\n key_vectors = self._look_adjacent(key_vectors, self.num_chunks_before, self.num_chunks_after)\n value_vectors = self._look_adjacent(value_vectors, self.num_chunks_before, self.num_chunks_after)\n key_indices = self._look_adjacent(key_indices, self.num_chunks_before, self.num_chunks_after)\n else:\n query_indices = key_indices = indices\n\n # query-key matmul: QK^T\n query_key_dots = torch.matmul(query_vectors, key_vectors.transpose(-1, -2))\n\n # free memory\n del query_vectors, key_vectors\n\n mask = self._compute_attn_mask(\n query_indices, key_indices, attention_mask, query_key_dots.shape, do_standard_self_attention\n )\n\n if mask is not None:\n # get mask tensor depending on half precision or not\n if query_key_dots.dtype == torch.float16:\n mask_value = self.mask_value_float16.half()\n else:\n mask_value = self.mask_value_float32\n\n query_key_dots = torch.where(mask, query_key_dots, mask_value)\n\n # free memory\n del mask\n\n # softmax\n logits = torch.logsumexp(query_key_dots, dim=-1, keepdim=True)\n attention_probs = torch.exp(query_key_dots - logits)\n\n # free memory\n del logits\n\n # dropout\n attention_probs = nn.functional.dropout(attention_probs, p=self.dropout, training=self.training)\n\n # Mask heads if we want to\n if head_mask is not None:\n attention_probs = attention_probs * head_mask\n\n # attend values\n out_vectors = torch.matmul(attention_probs, value_vectors)\n\n # free memory\n del value_vectors\n\n # merge chunk length\n if not do_standard_self_attention:\n out_vectors = out_vectors.flatten(start_dim=2, end_dim=3)\n\n assert out_vectors.shape == (\n batch_size,\n self.num_attention_heads,\n sequence_length,\n self.attention_head_size,\n )\n\n out_vectors = self._merge_hidden_size_dims(out_vectors, self.num_attention_heads, self.attention_head_size)\n\n if output_attentions is False:\n attention_probs = ()\n\n return LocalSelfAttentionOutput(hidden_states=out_vectors, attention_probs=attention_probs)\n\n def _compute_attn_mask(\n self, query_indices, key_indices, attention_mask, query_key_dots_shape, do_standard_self_attention\n ):\n\n # chunk attention mask and look before and after\n if attention_mask is not None:\n attention_mask = attention_mask.to(torch.uint8)[:, None, :]\n\n if not do_standard_self_attention:\n attention_mask = self._split_seq_length_dim_to(attention_mask, -1, self.chunk_length, 1)\n attention_mask = self._look_adjacent(attention_mask, self.num_chunks_before, self.num_chunks_after)\n # create attn_mask\n attention_mask = attention_mask.unsqueeze(-2).expand(query_key_dots_shape)\n\n # Causal mask\n if self.is_decoder is True:\n causal_mask = torch.ge(query_indices.unsqueeze(-1), key_indices.unsqueeze(-2)).to(query_indices.device)\n\n # add attention mask if not None\n if attention_mask is not None:\n attention_mask = causal_mask * attention_mask\n else:\n attention_mask = causal_mask\n\n return attention_mask\n\n @staticmethod\n def _retrieve_relevant_hidden_states(previous_hidden_states, chunk_length, num_chunks_before):\n start_position = ((previous_hidden_states.shape[1] // chunk_length) - num_chunks_before) * chunk_length\n return previous_hidden_states[:, start_position:]\n\n\nclass ReformerSelfOutput(nn.Module):\n def __init__(self, config):\n super().__init__()\n all_head_size = config.num_attention_heads * config.attention_head_size\n self.dropout = config.hidden_dropout_prob\n\n self.dense = nn.Linear(all_head_size, config.hidden_size, bias=False)\n\n def forward(self, hidden_states):\n hidden_states = self.dense(hidden_states)\n hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n return hidden_states\n\n\nclass ReformerAttention(nn.Module):\n def __init__(self, config, layer_id=0):\n super().__init__()\n self.layer_id = layer_id\n self.attn_layers = config.attn_layers\n\n self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n\n if len(set(self.attn_layers)) == 1 and self.attn_layers[0] == \"lsh\":\n self.self_attention = LSHSelfAttention(config)\n elif len(set(self.attn_layers)) == 1 and self.attn_layers[0] == \"local\":\n self.self_attention = LocalSelfAttention(config)\n elif len(set(self.attn_layers)) == 2 and set(self.attn_layers) == set([\"lsh\", \"local\"]):\n # get correct attn layers\n if self.attn_layers[self.layer_id] == \"lsh\":\n self.self_attention = LSHSelfAttention(config)\n else:\n self.self_attention = LocalSelfAttention(config)\n else:\n raise NotImplementedError(\n \"Only attn layer types 'lsh' and 'local' exist, but got `config.attn_layers`: {}. Select attn layer types from ['lsh', 'local'] only.\".format(\n self.attn_layers\n )\n )\n self.output = ReformerSelfOutput(config)\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n num_hashes=None,\n past_buckets_states=None,\n use_cache=False,\n orig_sequence_length=None,\n output_attentions=False,\n buckets=None,\n ):\n hidden_states = self.layer_norm(hidden_states)\n\n # make sure cached hidden states is set to None for backward pass\n if past_buckets_states is not None:\n past_buckets_states_layer = past_buckets_states[self.layer_id]\n else:\n past_buckets_states_layer = None\n\n # use cached buckets for backprob if buckets not None for LSHSelfAttention\n self_attention_outputs = self.self_attention(\n hidden_states=hidden_states,\n head_mask=head_mask,\n attention_mask=attention_mask,\n num_hashes=num_hashes,\n past_buckets_states=past_buckets_states_layer,\n use_cache=use_cache,\n output_attentions=output_attentions,\n buckets=buckets,\n )\n\n # add buckets if necessary\n if hasattr(self_attention_outputs, \"buckets\"):\n buckets = self_attention_outputs.buckets\n else:\n buckets = None\n\n # cache hidden states for future use\n if use_cache:\n if past_buckets_states[self.layer_id][0] is None:\n # padded input should not be cached\n past_buckets = (\n buckets[:, :, :, :orig_sequence_length]\n if (buckets is not None and orig_sequence_length > 1)\n else buckets\n )\n else:\n past_buckets = torch.cat([past_buckets_states[self.layer_id][0], buckets], dim=-1)\n\n if past_buckets_states[self.layer_id][1] is None:\n # padded input should not be cached\n past_states = hidden_states[:, :orig_sequence_length]\n else:\n past_states = torch.cat([past_buckets_states[self.layer_id][1], hidden_states], dim=1)\n\n past_buckets_states[self.layer_id] = (past_buckets, past_states)\n # compute attention feed forward output\n attention_output = self.output(self_attention_outputs.hidden_states)\n\n return AttentionOutput(\n hidden_states=attention_output,\n attention_probs=self_attention_outputs.attention_probs,\n buckets=buckets,\n )\n\n\nclass ReformerFeedForwardDense(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dropout = config.hidden_dropout_prob\n\n if isinstance(config.hidden_act, str):\n self.act_fn = ACT2FN[config.hidden_act]\n else:\n self.act_fn = config.hidden_act\n\n self.dense = nn.Linear(config.hidden_size, config.feed_forward_size)\n\n def forward(self, hidden_states):\n hidden_states = self.dense(hidden_states)\n hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n hidden_states = self.act_fn(hidden_states)\n return hidden_states\n\n\nclass ReformerFeedForwardOutput(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dropout = config.hidden_dropout_prob\n\n self.dense = nn.Linear(config.feed_forward_size, config.hidden_size)\n\n def forward(self, hidden_states):\n hidden_states = self.dense(hidden_states)\n hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n return hidden_states\n\n\nclass ChunkReformerFeedForward(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.chunk_size_feed_forward = config.chunk_size_feed_forward\n self.seq_len_dim = 1\n\n self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dense = ReformerFeedForwardDense(config)\n self.output = ReformerFeedForwardOutput(config)\n\n def forward(self, attention_output):\n return apply_chunking_to_forward(\n self.forward_chunk,\n self.chunk_size_feed_forward,\n self.seq_len_dim,\n attention_output,\n )\n\n def forward_chunk(self, hidden_states):\n hidden_states = self.layer_norm(hidden_states)\n hidden_states = self.dense(hidden_states)\n return self.output(hidden_states)\n\n\nclass ReformerLayer(nn.Module):\n def __init__(self, config, layer_id=0):\n super().__init__()\n self.attention = ReformerAttention(config, layer_id)\n # dropout requires to have the same\n # seed for forward and backward pass\n self.attention_seed = None\n self.feed_forward_seed = None\n\n self.feed_forward = ChunkReformerFeedForward(config)\n\n def _init_attention_seed(self):\n \"\"\"\n This function sets a new seed for the attention layer to make dropout deterministic for both forward calls: 1\n normal forward call and 1 forward call in backward to recalculate activations.\n \"\"\"\n\n # randomize seeds\n # use cuda generator if available\n if hasattr(torch.cuda, \"default_generators\") and len(torch.cuda.default_generators) > 0:\n # GPU\n device_idx = torch.cuda.current_device()\n self.attention_seed = torch.cuda.default_generators[device_idx].seed()\n else:\n # CPU\n self.attention_seed = int(torch.seed() % sys.maxsize)\n\n torch.manual_seed(self.attention_seed)\n\n def _init_feed_forward_seed(self):\n \"\"\"\n This function sets a new seed for the feed forward layer to make dropout deterministic for both forward calls:\n 1 normal forward call and 1 forward call in backward to recalculate activations.\n \"\"\"\n # randomize seeds\n # use cuda generator if available\n if hasattr(torch.cuda, \"default_generators\") and len(torch.cuda.default_generators) > 0:\n # GPU\n device_idx = torch.cuda.current_device()\n self.feed_forward_seed = torch.cuda.default_generators[device_idx].seed()\n else:\n # CPU\n self.feed_forward_seed = int(torch.seed() % sys.maxsize)\n\n torch.manual_seed(self.feed_forward_seed)\n\n def forward(\n self,\n prev_attn_output,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n num_hashes=None,\n past_buckets_states=None,\n use_cache=False,\n orig_sequence_length=None,\n output_attentions=False,\n ):\n with torch.no_grad():\n # every forward pass we sample a different seed\n # for dropout and save for forward fn in backward pass\n # to have correct dropout\n if self.training:\n self._init_attention_seed()\n\n attn_outputs = self.attention(\n hidden_states=hidden_states,\n head_mask=head_mask,\n attention_mask=attention_mask,\n num_hashes=num_hashes,\n past_buckets_states=past_buckets_states,\n use_cache=use_cache,\n orig_sequence_length=orig_sequence_length,\n output_attentions=output_attentions,\n )\n attn_output = attn_outputs.hidden_states\n\n # Implementation of RevNet (see Fig. 6 in https://towardsdatascience.com/illustrating-the-reformer-393575ac6ba0)\n # Y_1 = X_1 + f(X_2)\n attn_output = prev_attn_output + attn_output\n\n # free memory\n del prev_attn_output\n\n # every forward pass we sample a different seed\n # for dropout and save seed for forward fn in backward\n # to have correct dropout\n if self.training:\n self._init_feed_forward_seed()\n # Y_2 = X_2 + g(Y_1)\n hidden_states = hidden_states + self.feed_forward(attn_output)\n\n return ReformerOutput(\n attn_output=attn_output,\n hidden_states=hidden_states,\n attention_probs=attn_outputs.attention_probs,\n buckets=attn_outputs.buckets,\n )\n\n def backward_pass(\n self,\n next_attn_output,\n hidden_states,\n grad_attn_output,\n grad_hidden_states,\n attention_mask=None,\n head_mask=None,\n buckets=None,\n ):\n # Implements the backward pass for reversible ResNets.\n # A good blog post on how this works can be found here:\n # Implementation of RevNet (see Fig. 6 in https://towardsdatascience.com/illustrating-the-reformer-393575ac6ba0)\n # This code is heavily inspired by https://github.com/lucidrains/reformer-pytorch/blob/master/reformer_pytorch/reversible.py\n\n with torch.enable_grad():\n next_attn_output.requires_grad = True\n\n # set seed to have correct dropout\n torch.manual_seed(self.feed_forward_seed)\n # g(Y_1)\n res_hidden_states = self.feed_forward(next_attn_output)\n res_hidden_states.backward(grad_hidden_states, retain_graph=True)\n\n with torch.no_grad():\n # X_2 = Y_2 - g(Y_1)\n hidden_states = hidden_states - res_hidden_states\n del res_hidden_states\n\n grad_attn_output = grad_attn_output + next_attn_output.grad\n next_attn_output.grad = None\n\n with torch.enable_grad():\n hidden_states.requires_grad = True\n\n # set seed to have correct dropout\n torch.manual_seed(self.attention_seed)\n # f(X_2)\n # use cached buckets for backprob if buckets not None for LSHSelfAttention\n output = self.attention(\n hidden_states=hidden_states,\n head_mask=head_mask,\n attention_mask=attention_mask,\n buckets=buckets,\n ).hidden_states\n output.backward(grad_attn_output, retain_graph=True)\n\n with torch.no_grad():\n # X_1 = Y_1 - f(X_2)\n attn_output = next_attn_output - output\n del output, next_attn_output\n\n grad_hidden_states = grad_hidden_states + hidden_states.grad\n hidden_states.grad = None\n hidden_states = hidden_states.detach()\n\n return ReformerBackwardOutput(\n attn_output=attn_output,\n hidden_states=hidden_states,\n grad_attn_output=grad_attn_output,\n grad_hidden_states=grad_hidden_states,\n )\n\n\nclass _ReversibleFunction(Function):\n \"\"\"\n To prevent PyTorch from performing the usual backpropagation, a customized backward function is implemented here.\n This way it is made sure that no memory expensive activations are saved during the forward pass. This function is\n heavily inspired by https://github.com/lucidrains/reformer-pytorch/blob/master/reformer_pytorch/reversible.py\n \"\"\"\n\n @staticmethod\n def forward(\n ctx,\n hidden_states,\n layers,\n attention_mask,\n head_mask,\n num_hashes,\n all_hidden_states,\n all_attentions,\n past_buckets_states,\n use_cache,\n orig_sequence_length,\n output_hidden_states,\n output_attentions,\n ):\n all_buckets = ()\n\n # split duplicated tensor\n hidden_states, attn_output = torch.chunk(hidden_states, 2, dim=-1)\n\n for layer_id, (layer, layer_head_mask) in enumerate(zip(layers, head_mask)):\n if output_hidden_states is True:\n all_hidden_states.append(hidden_states)\n\n layer_outputs = layer(\n prev_attn_output=attn_output,\n hidden_states=hidden_states,\n attention_mask=attention_mask,\n head_mask=layer_head_mask,\n num_hashes=num_hashes,\n past_buckets_states=past_buckets_states,\n use_cache=use_cache,\n orig_sequence_length=orig_sequence_length,\n output_attentions=output_attentions,\n )\n\n attn_output = layer_outputs.attn_output\n hidden_states = layer_outputs.hidden_states\n all_buckets = all_buckets + (layer_outputs.buckets,)\n\n if output_attentions:\n all_attentions.append(layer_outputs.attention_probs)\n\n # Add last layer\n if output_hidden_states is True:\n all_hidden_states.append(hidden_states)\n\n # attach params to ctx for backward\n ctx.save_for_backward(attn_output.detach(), hidden_states.detach())\n ctx.layers = layers\n ctx.all_buckets = all_buckets\n ctx.head_mask = head_mask\n ctx.attention_mask = attention_mask\n\n # Concatenate 2 RevNet outputs\n return torch.cat([attn_output, hidden_states], dim=-1)\n\n @staticmethod\n def backward(ctx, grad_hidden_states):\n grad_attn_output, grad_hidden_states = torch.chunk(grad_hidden_states, 2, dim=-1)\n\n # retrieve params from ctx for backward\n attn_output, hidden_states = ctx.saved_tensors\n\n # create tuple\n output = ReformerBackwardOutput(\n attn_output=attn_output,\n hidden_states=hidden_states,\n grad_attn_output=grad_attn_output,\n grad_hidden_states=grad_hidden_states,\n )\n\n # free memory\n del grad_attn_output, grad_hidden_states, attn_output, hidden_states\n\n layers = ctx.layers\n all_buckets = ctx.all_buckets\n head_mask = ctx.head_mask\n attention_mask = ctx.attention_mask\n\n for idx, layer in enumerate(layers[::-1]):\n # pop last buckets from stack\n buckets = all_buckets[-1]\n all_buckets = all_buckets[:-1]\n\n # backprop\n output = layer.backward_pass(\n next_attn_output=output.attn_output,\n hidden_states=output.hidden_states,\n grad_attn_output=output.grad_attn_output,\n grad_hidden_states=output.grad_hidden_states,\n head_mask=head_mask[len(layers) - idx - 1],\n attention_mask=attention_mask,\n buckets=buckets,\n )\n\n assert all_buckets == (), \"buckets have to be empty after backpropagation\"\n grad_hidden_states = torch.cat([output.grad_attn_output, output.grad_hidden_states], dim=-1)\n\n # num of return vars has to match num of forward() args\n # return gradient for hidden_states arg and None for other args\n return grad_hidden_states, None, None, None, None, None, None, None, None, None, None, None\n\n\nclass ReformerEncoder(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dropout = config.hidden_dropout_prob\n\n self.layers = nn.ModuleList([ReformerLayer(config, i) for i in range(config.num_hidden_layers)])\n # Reformer is using Rev Nets, thus last layer outputs are concatenated and\n # Layer Norm is done over 2 * hidden_size\n self.layer_norm = nn.LayerNorm(2 * config.hidden_size, eps=config.layer_norm_eps)\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n num_hashes=None,\n past_buckets_states=None,\n use_cache=False,\n orig_sequence_length=None,\n output_hidden_states=False,\n output_attentions=False,\n ):\n # hidden_states and attention lists to be filled if wished\n all_hidden_states = []\n all_attentions = []\n\n # init cached hidden states if necessary\n if past_buckets_states is None:\n past_buckets_states = [((None), (None)) for i in range(len(self.layers))]\n\n # concat same tensor for reversible ResNet\n hidden_states = torch.cat([hidden_states, hidden_states], dim=-1)\n hidden_states = _ReversibleFunction.apply(\n hidden_states,\n self.layers,\n attention_mask,\n head_mask,\n num_hashes,\n all_hidden_states,\n all_attentions,\n past_buckets_states,\n use_cache,\n orig_sequence_length,\n output_hidden_states,\n output_attentions,\n )\n\n # Apply layer norm to concatenated hidden states\n hidden_states = self.layer_norm(hidden_states)\n\n # Apply dropout\n hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n\n return ReformerEncoderOutput(\n hidden_states=hidden_states,\n all_hidden_states=all_hidden_states,\n all_attentions=all_attentions,\n past_buckets_states=past_buckets_states,\n )\n\n\nclass ReformerOnlyLMHead(nn.Module):\n def __init__(self, config):\n super().__init__()\n # Reformer is using Rev Nets, thus last layer outputs are concatenated and\n # Layer Norm is done over 2 * hidden_size\n self.seq_len_dim = 1\n self.chunk_size_lm_head = config.chunk_size_lm_head\n self.decoder = nn.Linear(2 * config.hidden_size, config.vocab_size, bias=False)\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n return apply_chunking_to_forward(self.forward_chunk, self.chunk_size_lm_head, self.seq_len_dim, hidden_states)\n\n def forward_chunk(self, hidden_states):\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n\n\nclass ReformerPreTrainedModel(PreTrainedModel):\n \"\"\"\n An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained\n models.\n \"\"\"\n\n config_class = ReformerConfig\n base_model_prefix = \"reformer\"\n\n @property\n def dummy_inputs(self):\n input_ids = torch.tensor(DUMMY_INPUTS)\n input_mask = torch.tensor(DUMMY_MASK)\n dummy_inputs = {\n \"input_ids\": input_ids,\n \"attention_mask\": input_mask,\n }\n return dummy_inputs\n\n def _init_weights(self, module):\n \"\"\" Initialize the weights \"\"\"\n if isinstance(module, AxialPositionEmbeddings):\n for weight in module.weights:\n torch.nn.init.normal_(weight, std=self.config.axial_norm_std)\n elif isinstance(module, nn.Embedding):\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n elif isinstance(module, nn.Linear):\n # Slightly different from the TF version which uses truncated_normal for initialization\n # cf https://github.com/pytorch/pytorch/pull/5617\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n\n elif isinstance(module, nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n if isinstance(module, nn.Linear) and module.bias is not None:\n module.bias.data.zero_()\n\n\n@dataclass\nclass ReformerModelOutput(ModelOutput):\n \"\"\"\n Output type of :class:`~transformers.ReformerModel`.\n\n Args:\n last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_predict, hidden_size)`):\n Sequence of hidden-states at the last layer of the model.\n\n ``num_predict`` corresponds to ``target_mapping.shape[1]``. If ``target_mapping`` is ``None``, then\n ``num_predict`` corresponds to ``sequence_length``.\n past_buckets_states (:obj:`List[Tuple(torch.LongTensor, torch.FloatTensor)]`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):\n List of :obj:`Tuple(torch.LongTensor, torch.FloatTensor` of length :obj:`config.n_layers`, with the first\n element being the previous `buckets` of shape :obj:`(batch_size, num_heads, num_hashes, sequence_length)`)\n and the second being the previous `hidden_states` of shape :obj:`(batch_size, sequence_length,\n hidden_size)`).\n\n Contains precomputed buckets and hidden-states that can be used (see ``past_buckets_states`` input) to\n speed up sequential decoding.\n hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):\n Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings and one for the output of each\n layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):\n Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,\n sequence_length, sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n \"\"\"\n\n last_hidden_state: torch.FloatTensor\n past_buckets_states: Optional[List[Tuple[torch.LongTensor, torch.FloatTensor]]] = None\n hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n attentions: Optional[Tuple[torch.FloatTensor]] = None\n\n\n@dataclass\nclass ReformerModelWithLMHeadOutput(ModelOutput):\n \"\"\"\n Output type of :class:`~transformers.ReformerModelWithLMHead`.\n\n Args:\n loss (:obj:`torch.FloatTensor` of shape `(1,)`, `optional`, returned when ``labels`` is provided)\n Language modeling loss (for next-token prediction).\n logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_predict, config.vocab_size)`):\n Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).\n\n ``num_predict`` corresponds to ``target_mapping.shape[1]``. If ``target_mapping`` is ``None``, then\n ``num_predict`` corresponds to ``sequence_length``.\n past_buckets_states (:obj:`List[Tuple(torch.LongTensor, torch.FloatTensor)]`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):\n List of :obj:`Tuple(torch.LongTensor, torch.FloatTensor` of length :obj:`config.n_layers`, with the first\n element being the previous `buckets` of shape :obj:`(batch_size, num_heads, num_hashes, sequence_length)`)\n and the second being the previous `hidden_states` of shape :obj:`(batch_size, sequence_length,\n hidden_size)`).\n\n Contains precomputed buckets and hidden-states that can be used (see ``past_buckets_states`` input) to\n speed up sequential decoding.\n hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):\n TTuple of :obj:`torch.FloatTensor` (one for the output of the embeddings and one for the output of each\n layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):\n Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,\n sequence_length, sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n \"\"\"\n\n loss: Optional[torch.FloatTensor] = None\n logits: torch.FloatTensor = None\n past_buckets_states: Optional[List[Tuple[torch.LongTensor, torch.FloatTensor]]] = None\n hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n attentions: Optional[Tuple[torch.FloatTensor]] = None\n\n\nREFORMER_START_DOCSTRING = r\"\"\"\n Reformer was proposed in `Reformer: The Efficient Transformer <https://arxiv.org/abs/2001.0445>`__ by Nikita\n Kitaev, Łukasz Kaiser, Anselm Levskaya.\n\n This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic\n methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,\n pruning heads etc.)\n\n This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__\n subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to\n general usage and behavior.\n\n Parameters:\n config (:class:`~transformers.ReformerConfig`): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model\n weights.\n\"\"\"\n\nREFORMER_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. During training the input_ids sequence_length has to be\n a multiple of the relevant model's chunk lengths (lsh's, local's or both). During evaluation, the indices\n are automatically padded to be a multiple of the chunk length.\n\n Indices can be obtained using :class:`~transformers.ReformerTokenizer`. See\n :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for\n details.\n\n `What are input IDs? <../glossary.html#input-ids>`__\n attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n position_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,\n config.max_position_embeddings - 1]``.\n\n `What are position IDs? <../glossary.html#position-ids>`__\n head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):\n Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert :obj:`input_ids` indices into associated\n vectors than the model's internal embedding lookup matrix.\n num_hashes (:obj:`int`, `optional`):\n The number of hashing rounds that should be performed during bucketing. Setting this argument overwrites\n the default defined in :obj:`config.num_hashes`.\n\n For more information, see :obj:`num_hashes` in :class:`~transformers.ReformerConfig`.\n past_buckets_states (:obj:`List[Tuple(torch.LongTensor, torch.FloatTensor)]`, `optional`):\n List of :obj:`Tuple(torch.LongTensor, torch.FloatTensor` of length :obj:`config.n_layers`, with the first\n element being the previous `buckets` of shape :obj:`(batch_size, num_heads, num_hashes, sequence_length)`)\n and the second being the previous `hidden_states` of shape :obj:`(batch_size, sequence_length,\n hidden_size)`).\n\n Contains precomputed hidden-states and buckets (only relevant for LSH Self-Attention). Can be used to speed\n up sequential decoding.\n use_cache (:obj:`bool`, `optional`):\n If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up\n decoding (see :obj:`past_key_values`).\n output_attentions (:obj:`bool`, `optional`):\n Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned\n tensors for more detail.\n output_hidden_states (:obj:`bool`, `optional`):\n Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for\n more detail.\n return_dict (:obj:`bool`, `optional`):\n Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.\n\"\"\"\n\n\n@add_start_docstrings(\n \"The bare Reformer Model transformer outputting raw hidden-states\" \"without any specific head on top.\",\n REFORMER_START_DOCSTRING,\n)\nclass ReformerModel(ReformerPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n self.config = config\n assert (\n self.config.num_hidden_layers > 0\n ), \"`config.attn_layers` is empty. Select at least one attn layer form ['lsh', 'local']\"\n\n self.embeddings = ReformerEmbeddings(config)\n self.encoder = ReformerEncoder(config)\n\n self.init_weights()\n\n def get_input_embeddings(self):\n return self.embeddings.word_embeddings\n\n def set_input_embeddings(self, value):\n self.embeddings.word_embeddings = value\n\n def _prune_heads(self, heads_to_prune):\n \"\"\"\n Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base\n class PreTrainedModel\n \"\"\"\n for layer, heads in heads_to_prune.items():\n self.encoder.layer[layer].attention.prune_heads(heads)\n\n @add_start_docstrings_to_model_forward(REFORMER_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"google/reformer-crime-and-punishment\",\n output_type=ReformerModelOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n num_hashes=None,\n past_buckets_states=None,\n use_cache=None,\n output_hidden_states=None,\n output_attentions=None,\n return_dict=None,\n ):\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n elif input_ids is not None:\n input_shape = input_ids.size() # noqa: F841\n device = input_ids.device\n elif inputs_embeds is not None:\n input_shape = inputs_embeds.size()[:-1] # noqa: F841\n device = inputs_embeds.device\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n assert (\n len(input_shape) == 2\n ), \"`input_ids` have be of shape `[batch_size, sequence_length]`, but got shape: {}\".format(input_shape)\n\n if past_buckets_states is not None:\n assert not self.training, \"`past_buckets_states` can only be used for inference, not for training`.\"\n\n # prepare head mask\n head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers, is_attention_chunked=True)\n\n # original sequence length for padding\n orig_sequence_length = input_shape[-1]\n\n # if needs padding\n least_common_mult_chunk_length = _get_least_common_mult_chunk_len(self.config)\n min_chunk_length = _get_min_chunk_len(self.config)\n\n must_pad_to_match_chunk_length = (\n input_shape[-1] % least_common_mult_chunk_length != 0\n and input_shape[-1] > min_chunk_length\n and past_buckets_states is None\n )\n\n if must_pad_to_match_chunk_length:\n padding_length = least_common_mult_chunk_length - input_shape[-1] % least_common_mult_chunk_length\n\n if self.training is True:\n raise ValueError(\n \"If training, sequence Length {} has to be a multiple of least common multiple chunk_length {}. Please consider padding the input to a length of {}.\".format(\n input_shape[-1], least_common_mult_chunk_length, input_shape[-1] + padding_length\n )\n )\n\n # pad input\n input_ids, inputs_embeds, attention_mask, position_ids, input_shape = self._pad_to_mult_of_chunk_length(\n input_ids,\n inputs_embeds=inputs_embeds,\n attention_mask=attention_mask,\n position_ids=position_ids,\n input_shape=input_shape,\n padding_length=padding_length,\n padded_seq_length=least_common_mult_chunk_length,\n device=device,\n )\n\n # start index for position encoding depends on incremental decoding\n if past_buckets_states is not None:\n start_idx_pos_encodings = past_buckets_states[0][1].shape[1]\n else:\n start_idx_pos_encodings = 0\n\n embedding_output = self.embeddings(\n input_ids=input_ids,\n position_ids=position_ids,\n inputs_embeds=inputs_embeds,\n start_idx_pos_encodings=start_idx_pos_encodings,\n )\n\n encoder_outputs = self.encoder(\n hidden_states=embedding_output,\n head_mask=head_mask,\n attention_mask=attention_mask,\n num_hashes=num_hashes,\n past_buckets_states=past_buckets_states,\n use_cache=use_cache,\n orig_sequence_length=orig_sequence_length,\n output_hidden_states=output_hidden_states,\n output_attentions=output_attentions,\n )\n sequence_output = encoder_outputs.hidden_states\n\n # if padding was applied\n if must_pad_to_match_chunk_length:\n sequence_output = sequence_output[:, :orig_sequence_length]\n\n past_buckets_states = encoder_outputs.past_buckets_states if use_cache else None\n hidden_states = encoder_outputs.all_hidden_states if output_hidden_states else None\n attentions = encoder_outputs.all_attentions if output_attentions else None\n\n if not return_dict:\n return tuple(v for v in [sequence_output, past_buckets_states, hidden_states, attentions] if v is not None)\n return ReformerModelOutput(\n last_hidden_state=sequence_output,\n past_buckets_states=past_buckets_states,\n hidden_states=hidden_states,\n attentions=attentions,\n )\n\n def _pad_to_mult_of_chunk_length(\n self,\n input_ids,\n inputs_embeds=None,\n attention_mask=None,\n position_ids=None,\n input_shape=None,\n padding_length=None,\n padded_seq_length=None,\n device=None,\n ):\n logger.info(\n \"Input ids are automatically padded from {} to {} to be a multiple of `config.chunk_length`: {}\".format(\n input_shape[-1], input_shape[-1] + padding_length, padded_seq_length\n )\n )\n\n padded_input_ids = torch.full(\n (input_shape[0], padding_length),\n self.config.pad_token_id,\n device=device,\n dtype=torch.long,\n )\n\n # Extend `attention_mask`\n if attention_mask is not None:\n pad_attention_mask = torch.zeros(input_shape[0], padding_length, device=device, dtype=attention_mask.dtype)\n\n attention_mask = torch.cat([attention_mask, pad_attention_mask], dim=-1)\n else:\n attention_mask = torch.cat(\n [\n torch.ones(input_shape, device=device, dtype=torch.uint8),\n torch.zeros((input_shape[0], padding_length), device=device, dtype=torch.uint8),\n ],\n dim=-1,\n )\n\n # Extend `input_ids` with padding to match least common multiple chunk_length\n if input_ids is not None:\n input_ids = torch.cat([input_ids, padded_input_ids], dim=-1)\n input_shape = input_ids.size()\n\n # Pad position ids if given\n if position_ids is not None:\n padded_position_ids = torch.arange(input_shape[-1], padded_seq_length, dtype=torch.long, device=device)\n padded_position_ids = position_ids.unsqueeze(0).expand(input_shape[0], padding_length)\n position_ids = torch.cat([position_ids, padded_position_ids], dim=-1)\n\n # Extend `inputs_embeds` with padding to match least common multiple chunk_length\n if inputs_embeds is not None:\n padded_inputs_embeds = self.embeddings(padded_input_ids, position_ids)\n inputs_embeds = torch.cat([inputs_embeds, padded_inputs_embeds], dim=-2)\n input_shape = inputs_embeds.size()\n return input_ids, inputs_embeds, attention_mask, position_ids, input_shape\n\n\n@add_start_docstrings(\"\"\"Reformer Model with a `language modeling` head on top. \"\"\", REFORMER_START_DOCSTRING)\nclass ReformerModelWithLMHead(ReformerPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n assert config.is_decoder, \"If you want to use `ReformerModelWithLMHead` make sure that `is_decoder=True`.\"\n assert (\n \"local\" not in self.config.attn_layers or config.local_num_chunks_after == 0\n ), f\"If causal mask is enabled, make sure that `config.local_num_chunks_after` is set to 0 and not {config.local_num_chunks_after}.\"\n assert (\n \"lsh\" not in self.config.attn_layers or config.lsh_num_chunks_after == 0\n ), f\"If causal mask is enabled, make sure that `config.lsh_num_chunks_after` is set to 1 and not {config.lsh_num_chunks_after}.\"\n\n self.reformer = ReformerModel(config)\n self.lm_head = ReformerOnlyLMHead(config)\n\n self.init_weights()\n\n def get_output_embeddings(self):\n return self.lm_head.decoder\n\n @add_start_docstrings_to_model_forward(REFORMER_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"google/reformer-crime-and-punishment\",\n output_type=CausalLMOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n position_ids=None,\n attention_mask=None,\n head_mask=None,\n inputs_embeds=None,\n num_hashes=None,\n past_buckets_states=None,\n use_cache=None,\n output_hidden_states=None,\n output_attentions=None,\n return_dict=None,\n labels=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[-100, 0,\n ..., config.vocab_size - 1]`. All labels set to ``-100`` are ignored (masked), the loss is only\n computed for labels in ``[0, ..., config.vocab_size]``\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n reformer_outputs = self.reformer(\n input_ids,\n position_ids=position_ids,\n attention_mask=attention_mask,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n num_hashes=num_hashes,\n past_buckets_states=past_buckets_states,\n use_cache=use_cache,\n output_hidden_states=output_hidden_states,\n output_attentions=output_attentions,\n return_dict=return_dict,\n )\n\n sequence_output = reformer_outputs[0]\n logits = self.lm_head(sequence_output)\n\n loss = None\n if labels is not None:\n # Shift so that tokens < n predict n\n shift_logits = logits[..., :-1, :].contiguous()\n shift_labels = labels[..., 1:].contiguous()\n # Flatten the tokens\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(shift_logits.view(-1, self.config.vocab_size), shift_labels.view(-1))\n\n if not return_dict:\n output = (logits,) + reformer_outputs[1:]\n return ((loss,) + output) if loss is not None else output\n\n return ReformerModelWithLMHeadOutput(\n loss=loss,\n logits=logits,\n past_buckets_states=reformer_outputs.past_buckets_states,\n hidden_states=reformer_outputs.hidden_states,\n attentions=reformer_outputs.attentions,\n )\n\n def prepare_inputs_for_generation(self, input_ids, past=None, use_cache=None, num_hashes=None, **kwargs):\n # only last token for inputs_ids if past is defined in kwargs\n if past is not None:\n input_ids = input_ids[:, -1:]\n\n inputs_dict = {\n \"input_ids\": input_ids,\n \"past_buckets_states\": past,\n \"use_cache\": use_cache,\n \"num_hashes\": num_hashes,\n }\n\n return inputs_dict\n\n def _reorder_cache(self, past, beam_idx):\n reord_past_buckets_states = []\n for layer_past in past:\n # buckets\n if layer_past[0] is not None:\n reord_buckets = layer_past[0].index_select(0, beam_idx)\n else:\n reord_buckets = None\n\n # hidden states\n reord_hidden_states = layer_past[1].index_select(0, beam_idx)\n reord_past_buckets_states.append((reord_buckets, reord_hidden_states))\n return reord_past_buckets_states\n\n\n@add_start_docstrings(\"\"\"Reformer Model with a `language modeling` head on top. \"\"\", REFORMER_START_DOCSTRING)\nclass ReformerForMaskedLM(ReformerPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n assert (\n not config.is_decoder\n ), \"If you want to use `ReformerForMaskedLM` make sure `config.is_decoder=False` for bi-directional self-attention.\"\n self.reformer = ReformerModel(config)\n self.lm_head = ReformerOnlyLMHead(config)\n\n self.init_weights()\n\n def get_output_embeddings(self):\n return self.lm_head.decoder\n\n @add_start_docstrings_to_model_forward(REFORMER_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"google/reformer-crime-and-punishment\",\n output_type=MaskedLMOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n position_ids=None,\n attention_mask=None,\n head_mask=None,\n inputs_embeds=None,\n num_hashes=None,\n labels=None,\n output_hidden_states=None,\n output_attentions=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,\n config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored\n (masked), the loss is only computed for the tokens with labels\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n reformer_outputs = self.reformer(\n input_ids,\n position_ids=position_ids,\n attention_mask=attention_mask,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n num_hashes=num_hashes,\n use_cache=False, # no causal mask\n output_hidden_states=output_hidden_states,\n output_attentions=output_attentions,\n return_dict=return_dict,\n )\n\n sequence_output = reformer_outputs[0]\n logits = self.lm_head(sequence_output)\n\n masked_lm_loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss() # -100 index = padding token\n masked_lm_loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1))\n\n if not return_dict:\n output = (logits,) + reformer_outputs[1:]\n return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output\n\n return MaskedLMOutput(\n loss=masked_lm_loss,\n logits=logits,\n hidden_states=reformer_outputs.hidden_states,\n attentions=reformer_outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n Reformer Model transformer with a sequence classification/regression head on top (a linear layer on top of the\n pooled output) e.g. for GLUE tasks.\n \"\"\",\n REFORMER_START_DOCSTRING,\n)\nclass ReformerForSequenceClassification(ReformerPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n\n self.reformer = ReformerModel(config)\n self.classifier = ReformerClassificationHead(config)\n if config.is_decoder is True:\n logger.warning(\"You might want to disable causal masking for sequence classification\")\n\n self.init_weights()\n\n @add_start_docstrings_to_model_forward(REFORMER_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"google/reformer-crime-and-punishment\",\n output_type=SequenceClassifierOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n position_ids=None,\n attention_mask=None,\n head_mask=None,\n inputs_embeds=None,\n num_hashes=None,\n labels=None,\n output_hidden_states=None,\n output_attentions=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,\n config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),\n If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.reformer(\n input_ids,\n position_ids=position_ids,\n attention_mask=attention_mask,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n num_hashes=num_hashes,\n output_hidden_states=output_hidden_states,\n output_attentions=output_attentions,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0]\n logits = self.classifier(sequence_output)\n\n loss = None\n if labels is not None:\n if self.num_labels == 1:\n # We are doing regression\n loss_fct = MSELoss()\n loss = loss_fct(logits.view(-1), labels.view(-1))\n else:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n\n if not return_dict:\n output = (logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return SequenceClassifierOutput(\n loss=loss,\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\nclass ReformerClassificationHead(nn.Module):\n \"\"\"Head for sentence-level classification tasks.\"\"\"\n\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(2 * config.hidden_size, config.hidden_size)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.out_proj = nn.Linear(config.hidden_size, config.num_labels)\n\n def forward(self, hidden_states, **kwargs):\n hidden_states = hidden_states[:, 0, :] # take <s> token (equiv. to [CLS])\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.dense(hidden_states)\n hidden_states = torch.tanh(hidden_states)\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.out_proj(hidden_states)\n return hidden_states\n\n\n@add_start_docstrings(\n \"\"\"\n Reformer Model with a span classification head on top for extractive question-answering tasks like SQuAD / TriviaQA\n ( a linear layer on top of hidden-states output to compute `span start logits` and `span end logits`.\n \"\"\",\n REFORMER_START_DOCSTRING,\n)\nclass ReformerForQuestionAnswering(ReformerPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n\n self.reformer = ReformerModel(config)\n # 2 * config.hidden_size because we use reversible residual layers\n self.qa_outputs = nn.Linear(2 * config.hidden_size, config.num_labels)\n\n self.init_weights()\n\n @add_start_docstrings_to_model_forward(REFORMER_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"google/reformer-crime-and-punishment\",\n output_type=QuestionAnsweringModelOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n position_ids=None,\n attention_mask=None,\n head_mask=None,\n inputs_embeds=None,\n num_hashes=None,\n start_positions=None,\n end_positions=None,\n output_hidden_states=None,\n output_attentions=None,\n return_dict=None,\n ):\n r\"\"\"\n start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for position (index) of the start of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the\n sequence are not taken into account for computing the loss.\n end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for position (index) of the end of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the\n sequence are not taken into account for computing the loss.\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n reformer_outputs = self.reformer(\n input_ids,\n position_ids=position_ids,\n attention_mask=attention_mask,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n num_hashes=num_hashes,\n use_cache=False, # no causal mask\n output_hidden_states=output_hidden_states,\n output_attentions=output_attentions,\n return_dict=return_dict,\n )\n\n sequence_output = reformer_outputs[0]\n\n logits = self.qa_outputs(sequence_output)\n start_logits, end_logits = logits.split(1, dim=-1)\n start_logits = start_logits.squeeze(-1)\n end_logits = end_logits.squeeze(-1)\n\n total_loss = None\n if start_positions is not None and end_positions is not None:\n # If we are on multi-GPU, split add a dimension\n if len(start_positions.size()) > 1:\n start_positions = start_positions.squeeze(-1)\n if len(end_positions.size()) > 1:\n end_positions = end_positions.squeeze(-1)\n # sometimes the start/end positions are outside our model inputs, we ignore these terms\n ignored_index = start_logits.size(1)\n start_positions.clamp_(0, ignored_index)\n end_positions.clamp_(0, ignored_index)\n\n loss_fct = CrossEntropyLoss(ignore_index=ignored_index)\n start_loss = loss_fct(start_logits, start_positions)\n end_loss = loss_fct(end_logits, end_positions)\n total_loss = (start_loss + end_loss) / 2\n\n if not return_dict:\n output = (start_logits, end_logits) + reformer_outputs[1:]\n return ((total_loss,) + output) if total_loss is not None else output\n\n return QuestionAnsweringModelOutput(\n loss=total_loss,\n start_logits=start_logits,\n end_logits=end_logits,\n hidden_states=reformer_outputs.hidden_states,\n attentions=reformer_outputs.attentions,\n )\n", "# coding=utf-8\n# Copyright 2018 Salesforce and HuggingFace Inc. team.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport unittest\n\nfrom transformers import is_torch_available\nfrom transformers.testing_utils import require_torch, slow, torch_device\n\nfrom .test_configuration_common import ConfigTester\nfrom .test_generation_utils import GenerationTesterMixin\nfrom .test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask\n\n\nif is_torch_available():\n import torch\n\n from transformers import CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLConfig, CTRLLMHeadModel, CTRLModel\n\n\nclass CTRLModelTester:\n def __init__(\n self,\n parent,\n ):\n self.parent = parent\n self.batch_size = 14\n self.seq_length = 7\n self.is_training = True\n self.use_token_type_ids = True\n self.use_input_mask = True\n self.use_labels = True\n self.use_mc_token_ids = True\n self.vocab_size = 99\n self.hidden_size = 32\n self.num_hidden_layers = 5\n self.num_attention_heads = 4\n self.intermediate_size = 37\n self.hidden_act = \"gelu\"\n self.hidden_dropout_prob = 0.1\n self.attention_probs_dropout_prob = 0.1\n self.max_position_embeddings = 512\n self.type_vocab_size = 16\n self.type_sequence_label_size = 2\n self.initializer_range = 0.02\n self.num_labels = 3\n self.num_choices = 4\n self.scope = None\n\n def prepare_config_and_inputs(self):\n input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)\n\n input_mask = None\n if self.use_input_mask:\n input_mask = random_attention_mask([self.batch_size, self.seq_length])\n\n token_type_ids = None\n if self.use_token_type_ids:\n token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)\n\n mc_token_ids = None\n if self.use_mc_token_ids:\n mc_token_ids = ids_tensor([self.batch_size, self.num_choices], self.seq_length)\n\n sequence_labels = None\n token_labels = None\n choice_labels = None\n if self.use_labels:\n sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)\n token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)\n choice_labels = ids_tensor([self.batch_size], self.num_choices)\n\n config = CTRLConfig(\n vocab_size=self.vocab_size,\n n_embd=self.hidden_size,\n n_layer=self.num_hidden_layers,\n n_head=self.num_attention_heads,\n # intermediate_size=self.intermediate_size,\n # hidden_act=self.hidden_act,\n # hidden_dropout_prob=self.hidden_dropout_prob,\n # attention_probs_dropout_prob=self.attention_probs_dropout_prob,\n n_positions=self.max_position_embeddings,\n n_ctx=self.max_position_embeddings,\n # type_vocab_size=self.type_vocab_size,\n # initializer_range=self.initializer_range,\n )\n\n head_mask = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2)\n\n return (\n config,\n input_ids,\n input_mask,\n head_mask,\n token_type_ids,\n mc_token_ids,\n sequence_labels,\n token_labels,\n choice_labels,\n )\n\n def create_and_check_ctrl_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args):\n model = CTRLModel(config=config)\n model.to(torch_device)\n model.eval()\n\n model(input_ids, token_type_ids=token_type_ids, head_mask=head_mask)\n model(input_ids, token_type_ids=token_type_ids)\n result = model(input_ids)\n self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))\n self.parent.assertEqual(len(result.past_key_values), config.n_layer)\n\n def create_and_check_lm_head_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args):\n model = CTRLLMHeadModel(config)\n model.to(torch_device)\n model.eval()\n\n result = model(input_ids, token_type_ids=token_type_ids, labels=input_ids)\n self.parent.assertEqual(result.loss.shape, ())\n self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))\n\n def prepare_config_and_inputs_for_common(self):\n config_and_inputs = self.prepare_config_and_inputs()\n\n (\n config,\n input_ids,\n input_mask,\n head_mask,\n token_type_ids,\n mc_token_ids,\n sequence_labels,\n token_labels,\n choice_labels,\n ) = config_and_inputs\n\n inputs_dict = {\"input_ids\": input_ids, \"token_type_ids\": token_type_ids, \"head_mask\": head_mask}\n\n return config, inputs_dict\n\n\n@require_torch\nclass CTRLModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):\n\n all_model_classes = (CTRLModel, CTRLLMHeadModel) if is_torch_available() else ()\n all_generative_model_classes = (CTRLLMHeadModel,) if is_torch_available() else ()\n test_pruning = True\n test_torchscript = False\n test_resize_embeddings = False\n test_head_masking = False\n\n def setUp(self):\n self.model_tester = CTRLModelTester(self)\n self.config_tester = ConfigTester(self, config_class=CTRLConfig, n_embd=37)\n\n def test_config(self):\n self.config_tester.run_common_tests()\n\n def test_ctrl_model(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_ctrl_model(*config_and_inputs)\n\n def test_ctrl_lm_head_model(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_lm_head_model(*config_and_inputs)\n\n @slow\n def test_model_from_pretrained(self):\n for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:\n model = CTRLModel.from_pretrained(model_name)\n self.assertIsNotNone(model)\n\n\n@require_torch\nclass CTRLModelLanguageGenerationTest(unittest.TestCase):\n @slow\n def test_lm_generate_ctrl(self):\n model = CTRLLMHeadModel.from_pretrained(\"ctrl\")\n model.to(torch_device)\n input_ids = torch.tensor(\n [[11859, 0, 1611, 8]], dtype=torch.long, device=torch_device\n ) # Legal the president is\n expected_output_ids = [\n 11859,\n 0,\n 1611,\n 8,\n 5,\n 150,\n 26449,\n 2,\n 19,\n 348,\n 469,\n 3,\n 2595,\n 48,\n 20740,\n 246533,\n 246533,\n 19,\n 30,\n 5,\n ] # Legal the president is a good guy and I don't want to lose my job. \\n \\n I have a\n\n output_ids = model.generate(input_ids, do_sample=False)\n self.assertListEqual(output_ids[0].tolist(), expected_output_ids)\n" ]
[ [ "torch.nn.Softmax", "torch.nn.Dropout", "torch.nn.CrossEntropyLoss", "torch.ones", "torch.zeros", "torch.einsum", "torch.from_numpy", "torch.nn.Embedding", "torch.nn.LayerNorm", "tensorflow.train.load_variable", "torch.nn.Linear", "torch.matmul", "torch.nn.Tanh", "numpy.transpose", "torch.arange", "tensorflow.train.list_variables", "torch.nn.MSELoss" ], [ "torch.mean", "torch.nn.functional.dropout2d", "torch.cat", "torch.nn.functional.dropout", "torch.zeros", "torch.sum", "torch.nn.Embedding", "torch.tanh", "torch.rsqrt", "torch.no_grad", "torch.where", "torch.logsumexp", "torch.nn.Dropout", "torch.nn.CrossEntropyLoss", "torch.ones", "torch.einsum", "torch.randn", "torch.reshape", "torch.tensor", "torch.arange", "torch.argsort", "torch.index_select", "torch.ones_like", "torch.enable_grad", "torch.full", "torch.cuda.current_device", "numpy.lcm", "torch.exp", "torch.nn.Linear", "torch.nn.init.normal_", "torch.nn.ParameterList", "torch.seed", "torch.manual_seed", "torch.gather", "torch.nn.LayerNorm", "torch.matmul", "torch.chunk", "torch.nn.MSELoss", "torch.argmax" ], [ "torch.tensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Neoyanghc/Bigscity-LibCity
[ "44c0bff537c20d8a9db8f814ffbe3540c65bdf82" ]
[ "libcity/evaluator/traffic_state_evaluator.py" ]
[ "import os\nimport json\nimport datetime\nimport pandas as pd\nfrom libcity.utils import ensure_dir\nfrom libcity.model import loss\nfrom logging import getLogger\nfrom libcity.evaluator.abstract_evaluator import AbstractEvaluator\n\n\nclass TrafficStateEvaluator(AbstractEvaluator):\n\n def __init__(self, config):\n self.metrics = config.get('metrics', ['MAE']) # 评估指标, 是一个 list\n self.allowed_metrics = ['MAE', 'MSE', 'RMSE', 'MAPE', 'masked_MAE',\n 'masked_MSE', 'masked_RMSE', 'masked_MAPE', 'R2', 'EVAR']\n self.save_modes = config.get('save_modes', ['csv', 'json'])\n self.mode = config.get('evaluator_mode', 'single') # or average\n self.config = config\n self.len_timeslots = 0\n self.result = {} # 每一种指标的结果\n self.intermediate_result = {} # 每一种指标每一个batch的结果\n self._check_config()\n self._logger = getLogger()\n\n def _check_config(self):\n if not isinstance(self.metrics, list):\n raise TypeError('Evaluator type is not list')\n for metric in self.metrics:\n if metric not in self.allowed_metrics:\n raise ValueError('the metric {} is not allowed in TrafficStateEvaluator'.format(str(metric)))\n\n def collect(self, batch):\n \"\"\"\n 收集一 batch 的评估输入\n\n Args:\n batch(dict): 输入数据,字典类型,包含两个Key:(y_true, y_pred):\n batch['y_true']: (num_samples/batch_size, timeslots, ..., feature_dim)\n batch['y_pred']: (num_samples/batch_size, timeslots, ..., feature_dim)\n \"\"\"\n if not isinstance(batch, dict):\n raise TypeError('evaluator.collect input is not a dict of user')\n y_true = batch['y_true'] # tensor\n y_pred = batch['y_pred'] # tensor\n if y_true.shape != y_pred.shape:\n raise ValueError(\"batch['y_true'].shape is not equal to batch['y_pred'].shape\")\n self.len_timeslots = y_true.shape[1]\n for i in range(1, self.len_timeslots+1):\n for metric in self.metrics:\n if metric+'@'+str(i) not in self.intermediate_result:\n self.intermediate_result[metric+'@'+str(i)] = []\n if self.mode.lower() == 'average': # 前i个时间步的平均loss\n for i in range(1, self.len_timeslots+1):\n for metric in self.metrics:\n if metric == 'masked_MAE':\n self.intermediate_result[metric + '@' + str(i)].append(\n loss.masked_mae_torch(y_pred[:, :i], y_true[:, :i], 0).item())\n elif metric == 'masked_MSE':\n self.intermediate_result[metric + '@' + str(i)].append(\n loss.masked_mse_torch(y_pred[:, :i], y_true[:, :i], 0).item())\n elif metric == 'masked_RMSE':\n self.intermediate_result[metric + '@' + str(i)].append(\n loss.masked_rmse_torch(y_pred[:, :i], y_true[:, :i], 0).item())\n elif metric == 'masked_MAPE':\n self.intermediate_result[metric + '@' + str(i)].append(\n loss.masked_mape_torch(y_pred[:, :i], y_true[:, :i], 0).item())\n elif metric == 'MAE':\n self.intermediate_result[metric + '@' + str(i)].append(\n loss.masked_mae_torch(y_pred[:, :i], y_true[:, :i]).item())\n elif metric == 'MSE':\n self.intermediate_result[metric + '@' + str(i)].append(\n loss.masked_mse_torch(y_pred[:, :i], y_true[:, :i]).item())\n elif metric == 'RMSE':\n self.intermediate_result[metric + '@' + str(i)].append(\n loss.masked_rmse_torch(y_pred[:, :i], y_true[:, :i]).item())\n elif metric == 'MAPE':\n self.intermediate_result[metric + '@' + str(i)].append(\n loss.masked_mape_torch(y_pred[:, :i], y_true[:, :i]).item())\n elif metric == 'R2':\n self.intermediate_result[metric + '@' + str(i)].append(\n loss.r2_score_torch(y_pred[:, :i], y_true[:, :i]).item())\n elif metric == 'EVAR':\n self.intermediate_result[metric + '@' + str(i)].append(\n loss.explained_variance_score_torch(y_pred[:, :i], y_true[:, :i]).item())\n elif self.mode.lower() == 'single': # 第i个时间步的loss\n for i in range(1, self.len_timeslots + 1):\n for metric in self.metrics:\n if metric == 'masked_MAE':\n self.intermediate_result[metric + '@' + str(i)].append(\n loss.masked_mae_torch(y_pred[:, i-1], y_true[:, i-1], 0).item())\n elif metric == 'masked_MSE':\n self.intermediate_result[metric + '@' + str(i)].append(\n loss.masked_mse_torch(y_pred[:, i-1], y_true[:, i-1], 0).item())\n elif metric == 'masked_RMSE':\n self.intermediate_result[metric + '@' + str(i)].append(\n loss.masked_rmse_torch(y_pred[:, i-1], y_true[:, i-1], 0).item())\n elif metric == 'masked_MAPE':\n self.intermediate_result[metric + '@' + str(i)].append(\n loss.masked_mape_torch(y_pred[:, i-1], y_true[:, i-1], 0).item())\n elif metric == 'MAE':\n self.intermediate_result[metric + '@' + str(i)].append(\n loss.masked_mae_torch(y_pred[:, i-1], y_true[:, i-1]).item())\n elif metric == 'MSE':\n self.intermediate_result[metric + '@' + str(i)].append(\n loss.masked_mse_torch(y_pred[:, i-1], y_true[:, i-1]).item())\n elif metric == 'RMSE':\n self.intermediate_result[metric + '@' + str(i)].append(\n loss.masked_rmse_torch(y_pred[:, i-1], y_true[:, i-1]).item())\n elif metric == 'MAPE':\n self.intermediate_result[metric + '@' + str(i)].append(\n loss.masked_mape_torch(y_pred[:, i-1], y_true[:, i-1]).item())\n elif metric == 'R2':\n self.intermediate_result[metric + '@' + str(i)].append(\n loss.r2_score_torch(y_pred[:, i-1], y_true[:, i-1]).item())\n elif metric == 'EVAR':\n self.intermediate_result[metric + '@' + str(i)].append(\n loss.explained_variance_score_torch(y_pred[:, i-1], y_true[:, i-1]).item())\n else:\n raise ValueError('Error parameter evaluator_mode={}, please set `single` or `average`.'.format(self.mode))\n\n def evaluate(self):\n \"\"\"\n 返回之前收集到的所有 batch 的评估结果\n \"\"\"\n for i in range(1, self.len_timeslots + 1):\n for metric in self.metrics:\n self.result[metric+'@'+str(i)] = sum(self.intermediate_result[metric+'@'+str(i)]) / \\\n len(self.intermediate_result[metric+'@'+str(i)])\n return self.result\n\n def save_result(self, save_path, filename=None):\n \"\"\"\n 将评估结果保存到 save_path 文件夹下的 filename 文件中\n\n Args:\n save_path: 保存路径\n filename: 保存文件名\n \"\"\"\n self._logger.info('Note that you select the {} mode to evaluate!'.format(self.mode))\n self.evaluate()\n ensure_dir(save_path)\n if filename is None: # 使用时间戳\n filename = datetime.datetime.now().strftime('%Y_%m_%d_%H_%M_%S') + '_' + \\\n self.config['model'] + '_' + self.config['dataset']\n\n if 'json' in self.save_modes:\n self._logger.info('Evaluate result is ' + json.dumps(self.result))\n with open(os.path.join(save_path, '{}.json'.format(filename)), 'w') as f:\n json.dump(self.result, f)\n self._logger.info('Evaluate result is saved at ' +\n os.path.join(save_path, '{}.json'.format(filename)))\n\n dataframe = {}\n if 'csv' in self.save_modes:\n for metric in self.metrics:\n dataframe[metric] = []\n for i in range(1, self.len_timeslots + 1):\n for metric in self.metrics:\n dataframe[metric].append(self.result[metric+'@'+str(i)])\n dataframe = pd.DataFrame(dataframe, index=range(1, self.len_timeslots + 1))\n dataframe.to_csv(os.path.join(save_path, '{}.csv'.format(filename)), index=False)\n # 显示所有列\n pd.set_option('display.max_columns', None)\n # 显示所有行\n pd.set_option('display.max_rows', None)\n self._logger.info('Evaluate result is saved at ' +\n os.path.join(save_path, '{}.csv'.format(filename)))\n self._logger.info(\"\\n\" + str(dataframe))\n return dataframe\n\n def clear(self):\n \"\"\"\n 清除之前收集到的 batch 的评估信息,适用于每次评估开始时进行一次清空,排除之前的评估输入的影响。\n \"\"\"\n self.result = {}\n self.intermediate_result = {}\n" ]
[ [ "pandas.set_option" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ishine/DiffSinger-1
[ "9a5baf553f635f088ca110aa22e87b67ece6e947", "9a5baf553f635f088ca110aa22e87b67ece6e947" ]
[ "usr/diffsinger_task.py", "utils/pitch_utils.py" ]
[ "import torch\n\nimport utils\nfrom utils.hparams import hparams\nfrom .diff.net import DiffNet\nfrom .diff.shallow_diffusion_tts import GaussianDiffusion, OfflineGaussianDiffusion\nfrom .diffspeech_task import DiffSpeechTask\nfrom vocoders.base_vocoder import get_vocoder_cls, BaseVocoder\nfrom modules.fastspeech.pe import PitchExtractor\nfrom modules.fastspeech.fs2 import FastSpeech2\nfrom modules.diffsinger_midi.fs2 import FastSpeech2MIDI\n\nfrom usr.diff.candidate_decoder import FFT\nfrom utils.pitch_utils import denorm_f0\nfrom tasks.tts.fs2_utils import FastSpeechDataset\nfrom tasks.tts.fs2 import FastSpeech2Task\n\nimport numpy as np\nimport os\nimport torch.nn.functional as F\n\nDIFF_DECODERS = {\n 'wavenet': lambda hp: DiffNet(hp['audio_num_mel_bins']),\n 'fft': lambda hp: FFT(\n hp['hidden_size'], hp['dec_layers'], hp['dec_ffn_kernel_size'], hp['num_heads']),\n}\n\n\nclass DiffSingerTask(DiffSpeechTask):\n def __init__(self):\n super(DiffSingerTask, self).__init__()\n self.dataset_cls = FastSpeechDataset\n self.vocoder: BaseVocoder = get_vocoder_cls(hparams)()\n if hparams.get('pe_enable') is not None and hparams['pe_enable']:\n self.pe = PitchExtractor().cuda()\n utils.load_ckpt(self.pe, hparams['pe_ckpt'], 'model', strict=True)\n self.pe.eval()\n\n def build_tts_model(self):\n mel_bins = hparams['audio_num_mel_bins']\n self.model = GaussianDiffusion(\n phone_encoder=self.phone_encoder,\n out_dims=mel_bins, denoise_fn=DIFF_DECODERS[hparams['diff_decoder_type']](hparams),\n timesteps=hparams['timesteps'],\n K_step=hparams['K_step'],\n loss_type=hparams['diff_loss_type'],\n spec_min=hparams['spec_min'], spec_max=hparams['spec_max'],\n )\n if hparams['fs2_ckpt'] != '':\n utils.load_ckpt(self.model.fs2, hparams['fs2_ckpt'], 'model', strict=True)\n # self.model.fs2.decoder = None\n for k, v in self.model.fs2.named_parameters():\n v.requires_grad = False\n\n def validation_step(self, sample, batch_idx):\n outputs = {}\n txt_tokens = sample['txt_tokens'] # [B, T_t]\n\n target = sample['mels'] # [B, T_s, 80]\n energy = sample['energy']\n # fs2_mel = sample['fs2_mels']\n spk_embed = sample.get('spk_embed') if not hparams['use_spk_id'] else sample.get('spk_ids')\n mel2ph = sample['mel2ph']\n f0 = sample['f0']\n uv = sample['uv']\n\n outputs['losses'] = {}\n\n outputs['losses'], model_out = self.run_model(self.model, sample, return_output=True, infer=False)\n\n\n outputs['total_loss'] = sum(outputs['losses'].values())\n outputs['nsamples'] = sample['nsamples']\n outputs = utils.tensors_to_scalars(outputs)\n if batch_idx < hparams['num_valid_plots']:\n model_out = self.model(\n txt_tokens, spk_embed=spk_embed, mel2ph=mel2ph, f0=f0, uv=uv, energy=energy, ref_mels=None, infer=True)\n\n if hparams.get('pe_enable') is not None and hparams['pe_enable']:\n gt_f0 = self.pe(sample['mels'])['f0_denorm_pred'] # pe predict from GT mel\n pred_f0 = self.pe(model_out['mel_out'])['f0_denorm_pred'] # pe predict from Pred mel\n else:\n gt_f0 = denorm_f0(sample['f0'], sample['uv'], hparams)\n pred_f0 = model_out.get('f0_denorm')\n self.plot_wav(batch_idx, sample['mels'], model_out['mel_out'], is_mel=True, gt_f0=gt_f0, f0=pred_f0)\n self.plot_mel(batch_idx, sample['mels'], model_out['mel_out'], name=f'diffmel_{batch_idx}')\n self.plot_mel(batch_idx, sample['mels'], model_out['fs2_mel'], name=f'fs2mel_{batch_idx}')\n return outputs\n\n\nclass ShallowDiffusionOfflineDataset(FastSpeechDataset):\n def __getitem__(self, index):\n sample = super(ShallowDiffusionOfflineDataset, self).__getitem__(index)\n item = self._get_item(index)\n\n if self.prefix != 'train' and hparams['fs2_ckpt'] != '':\n fs2_ckpt = os.path.dirname(hparams['fs2_ckpt'])\n item_name = item['item_name']\n fs2_mel = torch.Tensor(np.load(f'{fs2_ckpt}/P_mels_npy/{item_name}.npy')) # ~M generated by FFT-singer.\n sample['fs2_mel'] = fs2_mel\n return sample\n\n def collater(self, samples):\n batch = super(ShallowDiffusionOfflineDataset, self).collater(samples)\n if self.prefix != 'train' and hparams['fs2_ckpt'] != '':\n batch['fs2_mels'] = utils.collate_2d([s['fs2_mel'] for s in samples], 0.0)\n return batch\n\n\nclass DiffSingerOfflineTask(DiffSingerTask):\n def __init__(self):\n super(DiffSingerOfflineTask, self).__init__()\n self.dataset_cls = ShallowDiffusionOfflineDataset\n\n def build_tts_model(self):\n mel_bins = hparams['audio_num_mel_bins']\n self.model = OfflineGaussianDiffusion(\n phone_encoder=self.phone_encoder,\n out_dims=mel_bins, denoise_fn=DIFF_DECODERS[hparams['diff_decoder_type']](hparams),\n timesteps=hparams['timesteps'],\n K_step=hparams['K_step'],\n loss_type=hparams['diff_loss_type'],\n spec_min=hparams['spec_min'], spec_max=hparams['spec_max'],\n )\n # if hparams['fs2_ckpt'] != '':\n # utils.load_ckpt(self.model.fs2, hparams['fs2_ckpt'], 'model', strict=True)\n # self.model.fs2.decoder = None\n\n def run_model(self, model, sample, return_output=False, infer=False):\n txt_tokens = sample['txt_tokens'] # [B, T_t]\n target = sample['mels'] # [B, T_s, 80]\n mel2ph = sample['mel2ph'] # [B, T_s]\n f0 = sample['f0']\n uv = sample['uv']\n energy = sample['energy']\n fs2_mel = None #sample['fs2_mels']\n spk_embed = sample.get('spk_embed') if not hparams['use_spk_id'] else sample.get('spk_ids')\n if hparams['pitch_type'] == 'cwt':\n cwt_spec = sample[f'cwt_spec']\n f0_mean = sample['f0_mean']\n f0_std = sample['f0_std']\n sample['f0_cwt'] = f0 = model.cwt2f0_norm(cwt_spec, f0_mean, f0_std, mel2ph)\n\n output = model(txt_tokens, mel2ph=mel2ph, spk_embed=spk_embed,\n ref_mels=[target, fs2_mel], f0=f0, uv=uv, energy=energy, infer=infer)\n\n losses = {}\n if 'diff_loss' in output:\n losses['mel'] = output['diff_loss']\n # self.add_dur_loss(output['dur'], mel2ph, txt_tokens, losses=losses)\n # if hparams['use_pitch_embed']:\n # self.add_pitch_loss(output, sample, losses)\n if hparams['use_energy_embed']:\n self.add_energy_loss(output['energy_pred'], energy, losses)\n\n if not return_output:\n return losses\n else:\n return losses, output\n\n def validation_step(self, sample, batch_idx):\n outputs = {}\n txt_tokens = sample['txt_tokens'] # [B, T_t]\n\n target = sample['mels'] # [B, T_s, 80]\n energy = sample['energy']\n # fs2_mel = sample['fs2_mels']\n spk_embed = sample.get('spk_embed') if not hparams['use_spk_id'] else sample.get('spk_ids')\n mel2ph = sample['mel2ph']\n f0 = sample['f0']\n uv = sample['uv']\n\n outputs['losses'] = {}\n\n outputs['losses'], model_out = self.run_model(self.model, sample, return_output=True, infer=False)\n\n\n outputs['total_loss'] = sum(outputs['losses'].values())\n outputs['nsamples'] = sample['nsamples']\n outputs = utils.tensors_to_scalars(outputs)\n if batch_idx < hparams['num_valid_plots']:\n fs2_mel = sample['fs2_mels']\n model_out = self.model(\n txt_tokens, spk_embed=spk_embed, mel2ph=mel2ph, f0=f0, uv=uv, energy=energy,\n ref_mels=[None, fs2_mel], infer=True)\n if hparams.get('pe_enable') is not None and hparams['pe_enable']:\n gt_f0 = self.pe(sample['mels'])['f0_denorm_pred'] # pe predict from GT mel\n pred_f0 = self.pe(model_out['mel_out'])['f0_denorm_pred'] # pe predict from Pred mel\n else:\n gt_f0 = denorm_f0(sample['f0'], sample['uv'], hparams)\n pred_f0 = model_out.get('f0_denorm')\n self.plot_wav(batch_idx, sample['mels'], model_out['mel_out'], is_mel=True, gt_f0=gt_f0, f0=pred_f0)\n self.plot_mel(batch_idx, sample['mels'], model_out['mel_out'], name=f'diffmel_{batch_idx}')\n self.plot_mel(batch_idx, sample['mels'], fs2_mel, name=f'fs2mel_{batch_idx}')\n return outputs\n\n def test_step(self, sample, batch_idx):\n spk_embed = sample.get('spk_embed') if not hparams['use_spk_id'] else sample.get('spk_ids')\n txt_tokens = sample['txt_tokens']\n energy = sample['energy']\n if hparams['profile_infer']:\n pass\n else:\n mel2ph, uv, f0 = None, None, None\n if hparams['use_gt_dur']:\n mel2ph = sample['mel2ph']\n if hparams['use_gt_f0']:\n f0 = sample['f0']\n uv = sample['uv']\n fs2_mel = sample['fs2_mels']\n outputs = self.model(\n txt_tokens, spk_embed=spk_embed, mel2ph=mel2ph, f0=f0, uv=uv, ref_mels=[None, fs2_mel], energy=energy,\n infer=True)\n sample['outputs'] = self.model.out2mel(outputs['mel_out'])\n sample['mel2ph_pred'] = outputs['mel2ph']\n\n if hparams.get('pe_enable') is not None and hparams['pe_enable']:\n sample['f0'] = self.pe(sample['mels'])['f0_denorm_pred'] # pe predict from GT mel\n sample['f0_pred'] = self.pe(sample['outputs'])['f0_denorm_pred'] # pe predict from Pred mel\n else:\n sample['f0'] = denorm_f0(sample['f0'], sample['uv'], hparams)\n sample['f0_pred'] = outputs.get('f0_denorm')\n return self.after_infer(sample)\n\n\nclass MIDIDataset(FastSpeechDataset):\n def __getitem__(self, index):\n sample = super(MIDIDataset, self).__getitem__(index)\n item = self._get_item(index)\n sample['f0_midi'] = torch.FloatTensor(item['f0_midi'])\n sample['pitch_midi'] = torch.LongTensor(item['pitch_midi'])[:hparams['max_frames']]\n\n return sample\n\n def collater(self, samples):\n batch = super(MIDIDataset, self).collater(samples)\n batch['f0_midi'] = utils.collate_1d([s['f0_midi'] for s in samples], 0.0)\n batch['pitch_midi'] = utils.collate_1d([s['pitch_midi'] for s in samples], 0)\n # print((batch['pitch_midi'] == f0_to_coarse(batch['f0_midi'])).all())\n return batch\n\n\nclass OpencpopDataset(FastSpeechDataset):\n def __getitem__(self, index):\n sample = super(OpencpopDataset, self).__getitem__(index)\n item = self._get_item(index)\n sample['pitch_midi'] = torch.LongTensor(item['pitch_midi'])[:hparams['max_frames']]\n sample['midi_dur'] = torch.FloatTensor(item['midi_dur'])[:hparams['max_frames']]\n sample['is_slur'] = torch.LongTensor(item['is_slur'])[:hparams['max_frames']]\n return sample\n\n def collater(self, samples):\n batch = super(OpencpopDataset, self).collater(samples)\n batch['pitch_midi'] = utils.collate_1d([s['pitch_midi'] for s in samples], 0)\n batch['midi_dur'] = utils.collate_1d([s['midi_dur'] for s in samples], 0)\n batch['is_slur'] = utils.collate_1d([s['is_slur'] for s in samples], 0)\n return batch\n\n\nclass DiffSingerMIDITask(DiffSingerTask):\n def __init__(self):\n super(DiffSingerMIDITask, self).__init__()\n # self.dataset_cls = MIDIDataset\n self.dataset_cls = OpencpopDataset\n\n def run_model(self, model, sample, return_output=False, infer=False):\n txt_tokens = sample['txt_tokens'] # [B, T_t]\n target = sample['mels'] # [B, T_s, 80]\n # mel2ph = sample['mel2ph'] if hparams['use_gt_dur'] else None # [B, T_s]\n mel2ph = sample['mel2ph']\n if hparams.get('switch_midi2f0_step') is not None and self.global_step > hparams['switch_midi2f0_step']:\n f0 = None\n uv = None\n else:\n f0 = sample['f0']\n uv = sample['uv']\n energy = sample['energy']\n\n spk_embed = sample.get('spk_embed') if not hparams['use_spk_id'] else sample.get('spk_ids')\n if hparams['pitch_type'] == 'cwt':\n cwt_spec = sample[f'cwt_spec']\n f0_mean = sample['f0_mean']\n f0_std = sample['f0_std']\n sample['f0_cwt'] = f0 = model.cwt2f0_norm(cwt_spec, f0_mean, f0_std, mel2ph)\n\n output = model(txt_tokens, mel2ph=mel2ph, spk_embed=spk_embed,\n ref_mels=target, f0=f0, uv=uv, energy=energy, infer=infer, pitch_midi=sample['pitch_midi'],\n midi_dur=sample.get('midi_dur'), is_slur=sample.get('is_slur'))\n\n losses = {}\n if 'diff_loss' in output:\n losses['mel'] = output['diff_loss']\n if hparams['use_pitch_embed']:\n self.add_pitch_loss(output, sample, losses)\n if hparams['use_energy_embed']:\n self.add_energy_loss(output['energy_pred'], energy, losses)\n if not return_output:\n return losses\n else:\n return losses, output\n\n def validation_step(self, sample, batch_idx):\n outputs = {}\n txt_tokens = sample['txt_tokens'] # [B, T_t]\n\n target = sample['mels'] # [B, T_s, 80]\n energy = sample['energy']\n # fs2_mel = sample['fs2_mels']\n spk_embed = sample.get('spk_embed') if not hparams['use_spk_id'] else sample.get('spk_ids')\n mel2ph = sample['mel2ph']\n\n outputs['losses'] = {}\n\n outputs['losses'], model_out = self.run_model(self.model, sample, return_output=True, infer=False)\n\n outputs['total_loss'] = sum(outputs['losses'].values())\n outputs['nsamples'] = sample['nsamples']\n outputs = utils.tensors_to_scalars(outputs)\n if batch_idx < hparams['num_valid_plots']:\n model_out = self.model(\n txt_tokens, spk_embed=spk_embed, mel2ph=mel2ph, f0=None, uv=None, energy=energy, ref_mels=None, infer=True,\n pitch_midi=sample['pitch_midi'], midi_dur=sample.get('midi_dur'), is_slur=sample.get('is_slur'))\n\n if hparams.get('pe_enable') is not None and hparams['pe_enable']:\n gt_f0 = self.pe(sample['mels'])['f0_denorm_pred'] # pe predict from GT mel\n pred_f0 = self.pe(model_out['mel_out'])['f0_denorm_pred'] # pe predict from Pred mel\n else:\n gt_f0 = denorm_f0(sample['f0'], sample['uv'], hparams)\n pred_f0 = model_out.get('f0_denorm')\n self.plot_wav(batch_idx, sample['mels'], model_out['mel_out'], is_mel=True, gt_f0=gt_f0, f0=pred_f0)\n self.plot_mel(batch_idx, sample['mels'], model_out['mel_out'], name=f'diffmel_{batch_idx}')\n self.plot_mel(batch_idx, sample['mels'], model_out['fs2_mel'], name=f'fs2mel_{batch_idx}')\n if hparams['use_pitch_embed']:\n self.plot_pitch(batch_idx, sample, model_out)\n return outputs\n\n\nclass AuxDecoderMIDITask(FastSpeech2Task):\n def __init__(self):\n super().__init__()\n # self.dataset_cls = MIDIDataset\n self.dataset_cls = OpencpopDataset\n\n def build_tts_model(self):\n if hparams['use_midi']:\n self.model = FastSpeech2MIDI(self.phone_encoder)\n else:\n self.model = FastSpeech2(self.phone_encoder)\n\n def run_model(self, model, sample, return_output=False):\n txt_tokens = sample['txt_tokens'] # [B, T_t]\n target = sample['mels'] # [B, T_s, 80]\n mel2ph = sample['mel2ph'] # [B, T_s]\n f0 = sample['f0']\n uv = sample['uv']\n energy = sample['energy']\n\n spk_embed = sample.get('spk_embed') if not hparams['use_spk_id'] else sample.get('spk_ids')\n if hparams['pitch_type'] == 'cwt':\n cwt_spec = sample[f'cwt_spec']\n f0_mean = sample['f0_mean']\n f0_std = sample['f0_std']\n sample['f0_cwt'] = f0 = model.cwt2f0_norm(cwt_spec, f0_mean, f0_std, mel2ph)\n\n output = model(txt_tokens, mel2ph=mel2ph, spk_embed=spk_embed,\n ref_mels=target, f0=f0, uv=uv, energy=energy, infer=False, pitch_midi=sample['pitch_midi'],\n midi_dur=sample.get('midi_dur'), is_slur=sample.get('is_slur'))\n\n losses = {}\n self.add_mel_loss(output['mel_out'], target, losses)\n # self.add_dur_loss(output['dur'], mel2ph, txt_tokens, losses=losses)\n if hparams['use_pitch_embed']:\n self.add_pitch_loss(output, sample, losses)\n if hparams['use_energy_embed']:\n self.add_energy_loss(output['energy_pred'], energy, losses)\n if not return_output:\n return losses\n else:\n return losses, output\n\n def validation_step(self, sample, batch_idx):\n outputs = {}\n outputs['losses'] = {}\n outputs['losses'], model_out = self.run_model(self.model, sample, return_output=True)\n outputs['total_loss'] = sum(outputs['losses'].values())\n outputs['nsamples'] = sample['nsamples']\n mel_out = self.model.out2mel(model_out['mel_out'])\n outputs = utils.tensors_to_scalars(outputs)\n # if sample['mels'].shape[0] == 1:\n # self.add_laplace_var(mel_out, sample['mels'], outputs)\n if batch_idx < hparams['num_valid_plots']:\n self.plot_mel(batch_idx, sample['mels'], mel_out)\n self.plot_dur(batch_idx, sample, model_out)\n if hparams['use_pitch_embed']:\n self.plot_pitch(batch_idx, sample, model_out)\n return outputs\n\n\n# class MIDI2PitchTask(DiffSingerTask):\n# def __init__(self):\n# super(DiffSpeechTask, self).__init__()\n# self.dataset_cls = MIDIDataset\n#\n# def build_tts_model(self):\n# mel_bins = hparams['audio_num_mel_bins']\n# self.model = GaussianDiffusion(\n# phone_encoder=self.phone_encoder,\n# out_dims=mel_bins, denoise_fn=DIFF_DECODERS[hparams['diff_decoder_type']](hparams),\n# timesteps=hparams['timesteps'],\n# K_step=hparams['K_step'],\n# loss_type=hparams['diff_loss_type'],\n# spec_min=hparams['spec_min'], spec_max=hparams['spec_max'],\n# )\n# utils.load_ckpt(self.model, hparams['ds_ckpt'], 'model', strict=True)\n#\n# self.ds_params = [p for name, p in self.model.named_parameters() if\n# ('midi_embed' not in name) and ('pitch_predictor' not in name)]\n#\n# self.midi_params = [p for name, p in self.model.named_parameters() if\n# ('midi_embed' in name) or ('pitch_predictor' in name)]\n#\n# def build_optimizer(self, model):\n# self.optimizer = optimizer = torch.optim.AdamW(\n# self.midi_params,\n# lr=hparams['lr'],\n# betas=(hparams['optimizer_adam_beta1'], hparams['optimizer_adam_beta2']),\n# weight_decay=hparams['weight_decay'])\n# return optimizer\n#\n# def run_model(self, model, sample, return_output=False, infer=False):\n# txt_tokens = sample['txt_tokens'] # [B, T_t]\n# target = sample['mels'] # [B, T_s, 80]\n# # mel2ph = sample['mel2ph'] if hparams['use_gt_dur'] else None # [B, T_s]\n# mel2ph = sample['mel2ph']\n# f0 = sample['f0']\n# uv = sample['uv']\n# energy = sample['energy']\n#\n# pitch_midi = sample['pitch_midi']\n#\n# spk_embed = sample.get('spk_embed') if not hparams['use_spk_id'] else sample.get('spk_ids')\n# if hparams['pitch_type'] == 'cwt':\n# cwt_spec = sample[f'cwt_spec']\n# f0_mean = sample['f0_mean']\n# f0_std = sample['f0_std']\n# sample['f0_cwt'] = f0 = model.cwt2f0_norm(cwt_spec, f0_mean, f0_std, mel2ph)\n#\n# output = model(txt_tokens, mel2ph=mel2ph, spk_embed=spk_embed,\n# ref_mels=target, f0=f0, uv=uv, energy=energy, infer=infer, pitch_midi=pitch_midi)\n#\n# losses = {}\n# # if 'diff_loss' in output:\n# # losses['mel'] = output['diff_loss']\n# if hparams['use_pitch_embed']:\n# self.add_pitch_loss(output, sample, losses)\n# # if hparams['use_energy_embed']:\n# # self.add_energy_loss(output['energy_pred'], energy, losses)\n# if not return_output:\n# return losses\n# else:\n# return losses, output\n#\n# def validation_step(self, sample, batch_idx):\n# outputs = {}\n# txt_tokens = sample['txt_tokens'] # [B, T_t]\n#\n# target = sample['mels'] # [B, T_s, 80]\n# energy = sample['energy']\n# # fs2_mel = sample['fs2_mels']\n# spk_embed = sample.get('spk_embed') if not hparams['use_spk_id'] else sample.get('spk_ids')\n# mel2ph = sample['mel2ph']\n# f0 = sample['f0']\n# uv = sample['uv']\n#\n# pitch_midi = sample['pitch_midi']\n# # f0_midi = sample['f0_midi']\n#\n# outputs['losses'] = {}\n#\n# outputs['losses'], model_out = self.run_model(self.model, sample, return_output=True, infer=False)\n#\n#\n# outputs['total_loss'] = sum(outputs['losses'].values())\n# outputs['nsamples'] = sample['nsamples']\n# outputs = utils.tensors_to_scalars(outputs)\n# if batch_idx < hparams['num_valid_plots']:\n# model_out = self.model(\n# txt_tokens, spk_embed=spk_embed, mel2ph=mel2ph, f0=f0, uv=uv, energy=energy, ref_mels=None, infer=True,\n# pitch_midi=pitch_midi)\n#\n# if hparams['use_pitch_embed']:\n# self.plot_pitch(batch_idx, sample, model_out)\n# return outputs\n", "#########\n# world\n##########\nimport librosa\nimport numpy as np\nimport torch\n\ngamma = 0\nmcepInput = 3 # 0 for dB, 3 for magnitude\nalpha = 0.45\nen_floor = 10 ** (-80 / 20)\nFFT_SIZE = 2048\n\n\nf0_bin = 256\nf0_max = 1100.0\nf0_min = 50.0\nf0_mel_min = 1127 * np.log(1 + f0_min / 700)\nf0_mel_max = 1127 * np.log(1 + f0_max / 700)\n\n\ndef f0_to_coarse(f0):\n is_torch = isinstance(f0, torch.Tensor)\n f0_mel = 1127 * (1 + f0 / 700).log() if is_torch else 1127 * np.log(1 + f0 / 700)\n f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * (f0_bin - 2) / (f0_mel_max - f0_mel_min) + 1\n\n f0_mel[f0_mel <= 1] = 1\n f0_mel[f0_mel > f0_bin - 1] = f0_bin - 1\n f0_coarse = (f0_mel + 0.5).long() if is_torch else np.rint(f0_mel).astype(np.int)\n assert f0_coarse.max() <= 255 and f0_coarse.min() >= 1, (f0_coarse.max(), f0_coarse.min())\n return f0_coarse\n\n\ndef norm_f0(f0, uv, hparams):\n is_torch = isinstance(f0, torch.Tensor)\n if hparams['pitch_norm'] == 'standard':\n f0 = (f0 - hparams['f0_mean']) / hparams['f0_std']\n if hparams['pitch_norm'] == 'log':\n f0 = torch.log2(f0) if is_torch else np.log2(f0)\n if uv is not None and hparams['use_uv']:\n f0[uv > 0] = 0\n return f0\n\n\ndef norm_interp_f0(f0, hparams):\n is_torch = isinstance(f0, torch.Tensor)\n if is_torch:\n device = f0.device\n f0 = f0.data.cpu().numpy()\n uv = f0 == 0\n f0 = norm_f0(f0, uv, hparams)\n if sum(uv) == len(f0):\n f0[uv] = 0\n elif sum(uv) > 0:\n f0[uv] = np.interp(np.where(uv)[0], np.where(~uv)[0], f0[~uv])\n uv = torch.FloatTensor(uv)\n f0 = torch.FloatTensor(f0)\n if is_torch:\n f0 = f0.to(device)\n return f0, uv\n\n\ndef denorm_f0(f0, uv, hparams, pitch_padding=None, min=None, max=None):\n if hparams['pitch_norm'] == 'standard':\n f0 = f0 * hparams['f0_std'] + hparams['f0_mean']\n if hparams['pitch_norm'] == 'log':\n f0 = 2 ** f0\n if min is not None:\n f0 = f0.clamp(min=min)\n if max is not None:\n f0 = f0.clamp(max=max)\n if uv is not None and hparams['use_uv']:\n f0[uv > 0] = 0\n if pitch_padding is not None:\n f0[pitch_padding] = 0\n return f0\n" ]
[ [ "numpy.load", "torch.LongTensor", "torch.FloatTensor" ], [ "numpy.log", "numpy.log2", "numpy.rint", "torch.log2", "torch.FloatTensor", "numpy.where" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
JustinLokHinWu/TextGAN-PyTorch
[ "427f08890056a96fde7e5b67c26c3bb9f5a420a4" ]
[ "models/OurGAN_D.py" ]
[ "# -*- coding: utf-8 -*-\n# @Author : William\n# @Project : TextGAN-william\n# @FileName : RelGAN_D.py\n# @Time : Created at 2019-04-25\n# @Blog : http://zhiweil.ml/\n# @Description : \n# Copyrights (C) 2018. All Rights Reserved.\nimport math\n\nimport torch\nimport torch.autograd as autograd\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport config as cfg\nfrom utils.helpers import truncated_normal_\n\nn_heads = 4\nn_transformer_layers = 3\nclass OurGAN_D(nn.Module):\n def __init__(self, embed_dim, max_seq_len, vocab_size, padding_idx, gpu=False, dropout=0.25):\n super(OurGAN_D, self).__init__()\n\n self.embed_dim = embed_dim\n self.max_seq_len = max_seq_len\n self.gpu = gpu\n\n self.embeddings = nn.Sequential(\n nn.Dropout(dropout),\n nn.Linear(vocab_size, embed_dim, bias=False),\n nn.Tanh()\n )\n\n # Returns BxTxD\n self.transformer = nn.TransformerEncoder(\n nn.TransformerEncoderLayer(embed_dim, nhead=n_heads),\n n_transformer_layers,\n norm=nn.LayerNorm(self.embed_dim)\n )\n\n self.fc1 = nn.Sequential(\n nn.Linear(self.embed_dim * self.max_seq_len, self.embed_dim),\n nn.LeakyReLU(0.2)\n )\n self.fc2 = nn.Sequential(\n nn.Dropout(dropout),\n nn.Linear(self.embed_dim, 100),\n nn.LeakyReLU(0.2)\n )\n self.fc3 = nn.Sequential(\n nn.Linear(100, 1),\n nn.Sigmoid()\n )\n\n self.init_params()\n\n self.pos_encoding = self.positional_encoding()\n \n def positional_encoding(self):\n # From Assignment 3\n pos_indices = torch.arange(self.max_seq_len)[..., None]\n dim_indices = torch.arange(self.embed_dim//2)[None, ...]\n exponents = (2*dim_indices).float()/(self.embed_dim)\n trig_args = pos_indices / (10000**exponents)\n sin_terms = torch.sin(trig_args)\n cos_terms = torch.cos(trig_args)\n\n pos_encodings = torch.zeros((self.max_seq_len, self.embed_dim))\n pos_encodings[:, 0::2] = sin_terms\n pos_encodings[:, 1::2] = cos_terms\n\n if self.gpu:\n pos_encodings = pos_encodings.cuda()\n\n return pos_encodings\n\n def forward(self, inp):\n \"\"\"\n Get logits of discriminator\n :param inp: batch_size * seq_len * vocab_size\n :return logits: [batch_size * num_rep] (1-D tensor)\n \"\"\"\n emb = self.embeddings(inp) # batch_size * max_seq_len * embed_dim\n\n seqlen = inp.size(1)\n\n emb = emb + self.pos_encoding[:seqlen]\n\n trans = self.transformer(emb) # batch * max_seq_len * embed_dim\n\n x = self.fc1(trans.flatten(start_dim=1))\n x = self.fc2(x)\n x = self.fc3(x)\n \n return x\n\n def init_params(self):\n for param in self.parameters():\n if param.requires_grad and len(param.shape) > 0:\n stddev = 1 / math.sqrt(param.shape[0])\n if cfg.dis_init == 'uniform':\n torch.nn.init.uniform_(param, a=-0.05, b=0.05)\n elif cfg.dis_init == 'normal':\n torch.nn.init.normal_(param, std=stddev)\n elif cfg.dis_init == 'truncated_normal':\n truncated_normal_(param, std=stddev)\n" ]
[ [ "torch.nn.Dropout", "torch.nn.init.uniform_", "torch.zeros", "torch.sin", "torch.nn.Tanh", "torch.nn.Sigmoid", "torch.nn.Linear", "torch.nn.TransformerEncoderLayer", "torch.nn.LayerNorm", "torch.nn.init.normal_", "torch.nn.LeakyReLU", "torch.arange", "torch.cos" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Xraydylan/Arducam-OV2640-Pico-Package
[ "0875dd8b5dcfeb69e1c80a554966818fce8d8cb0" ]
[ "Arducam2640/Arducam2640.py" ]
[ "import os\nfrom serial import Serial\nimport time\nimport numpy as np\nfrom PIL import Image, JpegImagePlugin\nimport io\n\n# Default Serial settings\nPORT = \"COM4\"\nBAUDRATE = 921600\nTIMEOUT = 1\n\n# Image types\nYUV = 0\nJPEG = 1\n\nYUV_SIZE = [96, 96]\n\n# JPEG Compressions\nCompression_Off = 0\nCompression_1 = 1\nCompression_2 = 2\nCompression_3 = 3\nCompression_4 = 4\nCompression_Full = 5\n\n# JPEG resolutions\nOV2640_160x120 = 0\nOV2640_176x144 = 1\nOV2640_320x240 = 2\nOV2640_352x288 = 3\nOV2640_640x480 = 4\nOV2640_800x600 = 5\nOV2640_1024x768 = 6\nOV2640_1280x1024 = 7\nOV2640_1600x1200 = 8\n\n# Light modes\nAuto = 0\nSunny = 1\nCloudy = 2\nOffice = 3\nHome = 4\n\n# Saturation\n# Saturation4 = 0\n# Saturation3 = 1\nSaturation2 = 2\nSaturation1 = 3\nSaturation0 = 4\nSaturation_1 = 5\nSaturation_2 = 6\n# Saturation_3 = 7\n# Saturation_4 = 8\n\n# Brightness\n# Brightness4 = 0\n# Brightness3 = 1\nBrightness2 = 2\nBrightness1 = 3\nBrightness0 = 4\nBrightness_1 = 5\nBrightness_2 = 6\n# Brightness_3 = 7\n# Brightness_4 = 8\n\n# Contrast\n# Contrast4 = 0\n# Contrast3 = 1\nContrast2 = 2\nContrast1 = 3\nContrast0 = 4\nContrast_1 = 5\nContrast_2 = 6\n# Contrast_3 = 7\n# Contrast_4 = 8\n\n# Special Effects\nAntique = 0\nBluish = 1\nGreenish = 2\nReddish = 3\nBW = 4\nNegative = 5\nBWnegative = 6\nNormal = 7\n\n\n# Sepia = 8\n# Overexposure = 9\n# Solarize = 10\n# Blueish = 11\n# Yellowish = 12\n\n\n# Use Arducam Class\nclass Arducam:\n def __init__(self, port=None, baudrate=None, timeout=None, image_type=None, conversion_size=None, save_dir=None):\n '''\n Arducam class\n\n :param port: Serial Port\n :param baudrate: Baudrate for serial connection\n :param timeout: Timeout for serial connection (for pyserial timeout)\n :param image_type: integer for to select type\n 0: YUV\n 1: JPEG\n :param conversion_size: Size for YUV resolution\n :param save_dir: Directory for saving images\n '''\n if port is None:\n port = PORT\n if baudrate is None:\n baudrate = BAUDRATE\n if timeout is None:\n timeout = TIMEOUT\n if image_type is None:\n image_type = YUV\n if conversion_size is None:\n conversion_size = YUV_SIZE\n if save_dir is None:\n save_dir = \"saves/\"\n\n self.image_type = YUV\n self.conversion_size = conversion_size\n self.save_dir = None\n\n self.ser = SerialCommunicator(port=port, baudrate=baudrate, timeout=timeout)\n\n self.change_save_dir(save_dir)\n self.check_connection()\n self.set_image_type(image_type)\n\n def _check_save_dir_exist(self):\n if not os.path.exists(self.save_dir):\n os.makedirs(self.save_dir)\n\n def set_image_type(self, image_type):\n '''\n Set image type of Arducam.\n\n :param image_type: integer for to select type\n 0: YUV\n 1: JPEG\n '''\n\n if image_type == YUV:\n self.ser.sender(b\"\\x11\")\n elif image_type == JPEG:\n self.ser.sender(b\"\\x12\")\n else:\n return\n\n self.image_type = image_type\n time.sleep(0.5)\n\n def set_compression_JPEG(self, compression):\n '''\n Set compression for JPEG images.\n\n :param compression: integer 0-5 (0 no jpeg compression, 5 max jpeg compression)\n '''\n if self.image_type == YUV:\n print(\"Mode is YUV. [set_quality] not possible. Please set image_type to JPEG\")\n return\n\n if compression == Compression_Off:\n self.ser.sender(b\"\\x90\")\n elif compression == Compression_1:\n self.ser.sender(b\"\\x91\")\n elif compression == Compression_2:\n self.ser.sender(b\"\\x92\")\n elif compression == Compression_3:\n self.ser.sender(b\"\\x93\")\n elif compression == Compression_3:\n self.ser.sender(b\"\\x94\")\n elif compression == Compression_Full:\n self.ser.sender(b\"\\x95\")\n\n def set_resolution_JPEG(self, resolution):\n '''\n Set compression for JPEG images.\n\n :param resolution: integer 0-8\n '''\n if self.image_type == YUV:\n print(\"Mode is YUV. [set_quality] not possible. Please set image_type to JPEG\")\n return\n\n if resolution == OV2640_160x120:\n self.ser.sender(b\"\\x00\")\n elif resolution == OV2640_176x144:\n self.ser.sender(b\"\\x01\")\n elif resolution == OV2640_320x240:\n self.ser.sender(b\"\\x02\")\n elif resolution == OV2640_352x288:\n self.ser.sender(b\"\\x03\")\n elif resolution == OV2640_640x480:\n self.ser.sender(b\"\\x04\")\n elif resolution == OV2640_800x600:\n self.ser.sender(b\"\\x05\")\n elif resolution == OV2640_1024x768:\n self.ser.sender(b\"\\x06\")\n elif resolution == OV2640_1280x1024:\n self.ser.sender(b\"\\x07\")\n elif resolution == OV2640_1600x1200:\n self.ser.sender(b\"\\x08\")\n\n def set_light_mode(self, light_mode):\n '''\n Set saturation.\n\n :param light_mode: integer 0-4\n '''\n\n if light_mode == Auto:\n self.ser.sender(b\"\\x40\")\n elif light_mode == Sunny:\n self.ser.sender(b\"\\x41\")\n elif light_mode == Cloudy:\n self.ser.sender(b\"\\x42\")\n elif light_mode == Office:\n self.ser.sender(b\"\\x43\")\n elif light_mode == Home:\n self.ser.sender(b\"\\x44\")\n\n def set_saturation(self, saturation):\n '''\n Set light mode.\n\n :param saturation: integer 2-6\n '''\n\n if saturation == Saturation2:\n self.ser.sender(b\"\\x50\")\n elif saturation == Saturation1:\n self.ser.sender(b\"\\x51\")\n elif saturation == Saturation0:\n self.ser.sender(b\"\\x52\")\n elif saturation == Saturation_1:\n self.ser.sender(b\"\\x53\")\n elif saturation == Saturation_2:\n self.ser.sender(b\"\\x54\")\n\n def set_brightness(self, brightness):\n '''\n Set brightness.\n\n :param brightness: integer 2-6\n '''\n\n if brightness == Brightness2:\n self.ser.sender(b\"\\x60\")\n elif brightness == Brightness1:\n self.ser.sender(b\"\\x61\")\n elif brightness == Brightness0:\n self.ser.sender(b\"\\x62\")\n elif brightness == Brightness_1:\n self.ser.sender(b\"\\x63\")\n elif brightness == Brightness_2:\n self.ser.sender(b\"\\x64\")\n\n def set_contrast(self, contrast):\n '''\n Set brightness.\n\n :param contrast: integer 2-6\n '''\n\n if contrast == Contrast2:\n self.ser.sender(b\"\\x70\")\n elif contrast == Contrast1:\n self.ser.sender(b\"\\x71\")\n elif contrast == Contrast0:\n self.ser.sender(b\"\\x72\")\n elif contrast == Contrast_1:\n self.ser.sender(b\"\\x73\")\n elif contrast == Contrast_2:\n self.ser.sender(b\"\\x74\")\n\n def set_special_effect(self, effect):\n '''\n Set brightness.\n\n :param effect: integer 0-7\n '''\n\n if effect == Antique:\n self.ser.sender(b\"\\x80\")\n elif effect == Bluish:\n self.ser.sender(b\"\\x81\")\n elif effect == Greenish:\n self.ser.sender(b\"\\x82\")\n elif effect == Reddish:\n self.ser.sender(b\"\\x83\")\n elif effect == BW:\n self.ser.sender(b\"\\x84\")\n elif effect == Negative:\n self.ser.sender(b\"\\x85\")\n elif effect == BWnegative:\n self.ser.sender(b\"\\x86\")\n elif effect == Normal:\n self.ser.sender(b\"\\x87\")\n\n def capture_frame(self, raw=False, resize_YUV=True, YUV_to_RGB=True, save_name=None):\n '''\n Takes an image with Arducam according to settings.\n\n :param raw: if set returns the raw bytearray\n :param resize_YUV: resize YUV according to conversion_size\n :param YUV_to_RGB: if set returns image RGB image from YUV\n :param save_name: if save name is a string then the return value of capture_frame will be saved.\n the defaults are:\n .txt for byte array\n .npy for numpy arrays\n .jpeg for images\n :return: for a JPEG returns image object\n for a YUV returns array of pixels (resized to conversion_size if resize_YUV set True)\n or an RGB image object if YUV_to_RGB set True\n for raw returns bytearray\n '''\n self.ser.sender(b\"\\x10\")\n byte_array = self.ser.get_data()\n if raw:\n if save_name is not None:\n self.save(byte_array, save_name)\n return byte_array\n image = self.convert_to_image(byte_array, resize_YUV=resize_YUV, YUV_to_RGB=YUV_to_RGB)\n if save_name is not None:\n self.save(image, save_name)\n return image\n\n def convert_to_image(self, byte_array, image_type=None, resize_YUV=True, YUV_to_RGB=True):\n '''\n Converts bytearray to image.\n\n :param byte_array: bytearray of image\n :param image_type: image_type the bytearray is converted to. If None the previously set type is used.\n :param resize_YUV: resize YUV according to conversion_size\n :param YUV_to_RGB: if set returns image RGB image from YUV\n :return: for a JPEG returns image object\n for a YUV returns array of pixels (resized to conversion_size if resize_YUV set True)\n or an RGB image object if YUV_to_RGB set True\n '''\n if image_type is None:\n image_type = self.image_type\n\n img = None\n if image_type == YUV:\n img = self.convert_to_YUV(byte_array, resize=resize_YUV, YUV_to_RGB=YUV_to_RGB)\n elif image_type == JPEG:\n img = self.convert_to_JPEG(byte_array)\n return img\n\n def convert_to_JPEG(self, byte_array):\n '''\n Converts byte_array to JPEG image.\n\n :param byte_array: bytearray of the image\n :return: Image object\n '''\n return Image.open(io.BytesIO(byte_array))\n\n def convert_to_YUV(self, byte_array, resize=True, YUV_to_RGB=True):\n '''\n Converts byte_array to YUV pixel array according to conversion_size if resize.\n\n The format is YUV422 with the order 'YUYV'\n\n :param byte_array: bytearray of the image\n :param resize: determine if pixel array needs to be resized\n :param YUV_to_RGB: if set returns image RGB image from YUV\n :return: for a YUV returns array of pixels (resized to conversion_size if resize set True)\n or an RGB image object if YUV_to_RGB set True\n '''\n\n Y = []\n U = []\n V = []\n for index, byte in enumerate(byte_array):\n val = byte_to_int(byte)\n if index % 4 == 1:\n U += [val] * 2\n elif index % 4 in [0, 2]:\n Y.append(val)\n elif index % 4 == 3:\n V += [val] * 2\n\n YUV_simple = [Y, U, V]\n YUV_pix = np.array(list(zip(*YUV_simple)))\n\n if YUV_to_RGB:\n YUV_pix = np.array([self.YUV_pix_to_RGB_pix(pix) for pix in YUV_pix])\n YUV_pix = np.resize(YUV_pix, list(self.conversion_size) + [3])\n return Image.fromarray(np.uint8(YUV_pix)).convert(\"RGB\")\n if resize:\n return np.resize(YUV_pix, list(self.conversion_size) + [3])\n return YUV_pix\n\n def YUV_pix_to_RGB_pix(self, pix):\n '''\n Converts YUV pixel into RGB pixel\n\n :param pix: pixel with YUV values\n :return: pixel with RGB values\n '''\n Y, U, V = pix\n R = limit_value(Y + 1.370705 * (V - 128))\n G = limit_value(Y - 0.698001 * (V - 128) - 0.337633 * (U - 128))\n B = limit_value(Y + 1.732446 * (U - 128))\n return np.array([R, G, B])\n\n def check_connection(self):\n '''\n Checks if Pico is operational if not an Exception is raised\n '''\n if not self.ser.test_connection():\n raise Exception(\"No Pico Connected or Pico not working\")\n print(\"Connection Established\")\n\n def change_save_dir(self, directory):\n '''\n Changes name of save dictionary\n\n :param directory: name of dictionary\n '''\n if directory[-1] != \"/\":\n directory += \"/\"\n self.save_dir = directory\n\n def save(self, data, name):\n '''\n Saves data according to type\n\n :param data: data\n :param name: name of file. Extension may be modified.\n :return:\n '''\n self._check_save_dir_exist()\n pil_image_types = [Image.Image, JpegImagePlugin.JpegImageFile]\n\n if type(data) == np.ndarrray:\n if len(elem := name.split(\".\")) != 1:\n name = \".\".join(elem[:-1])\n path = self.save_dir + name\n np.save(path, data)\n\n elif type(data) in pil_image_types:\n if len(name.split(\".\")) == 1:\n name += \".jpg\"\n path = self.save_dir + name\n data.save(path)\n\n elif type(data) == bytes:\n if len(name.split(\".\")) == 1:\n name += \".txt\"\n path = self.save_dir + name\n with open(path, \"wb\") as f:\n f.write(data)\n\n\n\n\n def load(self, name, in_save_dir=True):\n '''\n Loads data according to file extension\n\n :param name: name of file\n :param in_save_dir: flag if file is located in the save_dir (automatically adds path to save_dir)\n :return: loaded data according to file extension\n '''\n\n path = name\n if in_save_dir:\n path = self.save_dir + name\n\n split = name.split(\".\")\n if len(split) == 1:\n extension = \".npy\"\n path += \".npy\"\n split.append(extension)\n\n if split[-1] == \".npy\":\n return np.load(path)\n\n if split[-1] == \".txt\":\n with open(path, \"rb\") as f:\n return f.read()\n\n return Image.open(path)\n\n\nclass SerialCommunicator:\n def __init__(self, port=None, baudrate=None, timeout=None):\n if port is None:\n port = PORT\n if baudrate is None:\n baudrate = BAUDRATE\n if timeout is None:\n timeout = TIMEOUT\n\n self.port = port\n self.baudrate = baudrate\n self.timeout = timeout\n\n self.connection_test_loops = 2\n self.confirmation_byte = b'\\x00'\n\n self.ser = None\n\n self.init_serial()\n\n def init_serial(self):\n self.ser = Serial(self.port, self.baudrate, timeout=self.timeout)\n\n def sender(self, data, wait=True):\n self.ser.write(data)\n if wait:\n time.sleep(0.2)\n\n def get_data(self):\n array = b''\n image_flag = 0\n while 1:\n data = self.ser.readline()\n if data == b\"\":\n continue\n\n if data == b\"STOP\\n\":\n print(\"Image Received\")\n array = array[:-1]\n return array\n\n if image_flag == 1:\n array += data\n\n if data == b\"START\\n\":\n print(\"Start Flag\")\n image_flag = 1\n\n def test_connection(self):\n self.sender(b'\\xA0')\n for i in range(self.connection_test_loops):\n byte = self.ser.read()\n if not byte:\n continue\n if byte == self.confirmation_byte:\n return True\n return False\n\n\ndef byte_to_int(byte):\n if type(byte) == int:\n return byte\n return int.from_bytes(byte, byteorder='big')\n\n\ndef limit_value(value):\n if value > 255:\n return 255\n if value < 0:\n return 0\n return value\n" ]
[ [ "numpy.uint8", "numpy.load", "numpy.array", "numpy.save" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
OscarFlores-IFi/CDINP19
[ "7fb0cb6ff36b9a10bcfa0772b172c5e49996df48", "7fb0cb6ff36b9a10bcfa0772b172c5e49996df48" ]
[ "code/p7.py", "code/p3.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Feb 25 09:37:23 2019\n\n@author: if715029\n\"\"\"\n\n# limpieza de base de datos y manejod de texto\nimport pandas as pd\nimport string\nfrom datetime import datetime\n\n#%% Importar tabla\ndirty = pd.read_csv('../data//dirty_data_v3.csv', encoding='latin-1')\n\n#%% Funcion para retirar signos de puntuación. \ndef remove_punctuation(x):\n try:\n x = ''.join(ch for ch in x if ch not in string.puntuation)\n except:\n pass\n\n return(x)\n\n#%% Remover digitos\ndef remove_digits(x):\n try:\n x = ''.join(ch for ch in x if ch not in string.digits)\n except:\n pass\n \n return(x)\n\n#%% quitar espacios\ndef remove_whitespace(x):\n try:\n x = ''.join(x.split())\n except:\n pass\n \n return(x)\n \n#%% reemplazar texto\ndef replace(x,to_replace,replacement):\n try:\n x = x.replace(to_replace,replacement)\n except:\n pass\n \n return(x)\n \n#%% convertir a mayusculas\ndef uppercase_text(x):\n try:\n x = x.upper() \n except:\n pass\n \n return (x)\n\n#%%\ndef lowercase_text(x):\n try:\n x = x.lower()\n except:\n pass\n \n return(x)\n \n#%%\ndef only_digits(x):\n try:\n x = ''.join(ch for ch in x if ch in string.digits)\n except:\n pass\n \n return(x)\n\n#%% aplicar funciones\ndirty['apellido'] = dirty['apellido'].apply(lowercase_text)\ndirty['apellido'] = dirty['apellido'].apply(replace,args=('0','o'))\ndirty['apellido'] = dirty['apellido'].apply(replace,args=('2','z'))\ndirty['apellido'] = dirty['apellido'].apply(replace,args=('4','a'))\ndirty['apellido'] = dirty['apellido'].apply(replace,args=('1','i'))\ndirty['apellido'] = dirty['apellido'].apply(replace,args=('8','b'))\ndirty['apellido'] = dirty['apellido'].apply(remove_punctuation)\ndirty['apellido'] = dirty['apellido'].apply(remove_digits)\n\n\n\n\n\n\n\n\n\n\n\n", "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Feb 11 09:18:37 2019\n\n@author: if715029\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport sklearn.metrics as skm\nimport scipy.spatial.distance as sc\n\n#%% Leer datos\ndata = pd.read_excel('../data/Test de películas(1-16).xlsx', encoding='latin_1')\n\n#%% Seleccionar datos (a mi estilo)\npel = pd.DataFrame()\nfor i in range((len(data.T)-5)//3):\n pel = pel.append(data.iloc[:,6+i*3])\npel = pel.T\nprint(pel)\n\n#%% Seleccionar datos (estilo Riemann)\ncsel = np.arange(6,243,3)\ncnames = list(data.columns.values[csel])\ndatan = data[cnames]\n\n#%% Promedios\nmovie_prom = datan.mean(axis=0)\nuser_prom = datan.mean(axis=1)\n\n#%% Calificaciones a binarios (>= 3)\ndatan = datan.copy()\ndatan[datan<3] = 0\ndatan[datan>=3] = 1\n\n#%% Calcular distancias de indices de similitud\n#D1 = sc.pdist(datan,'hamming') # hamming == matching\nD1 = sc.pdist(datan,'jaccard')\n\nD1 = sc.squareform(D1)\n\n#D2 = sc.pdist(data_b,'jaccard') # hamming == matching\n#D2 = sc.squareform(D2)\n\nIsim1 = 1-D1\n#%% Seleccionar usuario y determinar sus parecidos \nuser = 1\nIsim_user = Isim1[user]\nIsim_user_sort = np.sort(Isim_user)\nindx_user = np.argsort(Isim_user)\n\n#%% Recomendación de películas p1.\nUSER = datan.loc[user]\nUSER_sim = datan.loc[indx_user[-2]]\n\nindx_recomend1 = (USER_sim==1)&(USER==0)\nrecomend1 = list(USER.index[indx_recomend1])\n\n#%% Recomendación peliculas p2. \nUSER = datan.loc[user]\nUSER_sim = np.mean(datan.loc[indx_user[-6:-1]],axis = 0)\nUSER_sim[USER_sim<=.5]=0\nUSER_sim[USER_sim>.5]=1\n\nindx_recomend2 = (USER_sim==1)&(USER==0)\nrecomend2 = list(USER.index[indx_recomend2])\n\n\n\n\n\n" ]
[ [ "pandas.read_csv" ], [ "pandas.read_excel", "numpy.arange", "pandas.DataFrame", "numpy.sort", "scipy.spatial.distance.pdist", "numpy.mean", "numpy.argsort", "scipy.spatial.distance.squareform" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "1.3", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "0.16", "1.8" ], "tensorflow": [] } ]
buildist/VideoEnhancer
[ "0d38c4892c65273a273ed23d3a8cce03f76f92f9" ]
[ "DAIN_extensions/InterpolationCh/setup.py" ]
[ "#!/usr/bin/env python3\nimport os\nimport torch\n\nfrom setuptools import setup, find_packages\nfrom torch.utils.cpp_extension import BuildExtension, CUDAExtension\n\ncxx_args = ['-std=c++11']\n\nnvcc_args = [\n '-gencode', 'arch=compute_50,code=sm_50',\n '-gencode', 'arch=compute_52,code=sm_52',\n '-gencode', 'arch=compute_60,code=sm_60',\n '-gencode', 'arch=compute_61,code=sm_61',\n # '-gencode', 'arch=compute_70,code=sm_70',\n # '-gencode', 'arch=compute_70,code=compute_70'\n '-D__CUDA_NO_HALF_OPERATORS__' # <-- Just add this line\n]\n\nsetup(\n name='interpolationch_cuda',\n ext_modules=[\n CUDAExtension('interpolationch_cuda', [\n 'interpolationch_cuda.cc',\n 'interpolationch_cuda_kernel.cu'\n ], extra_compile_args={'cxx': cxx_args, 'nvcc': nvcc_args})\n ],\n cmdclass={\n 'build_ext': BuildExtension\n })\n" ]
[ [ "torch.utils.cpp_extension.CUDAExtension" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
christopher-salomon-smith/vip-mab
[ "61a120254673b54770b744b19e18d28ffff15f1f", "61a120254673b54770b744b19e18d28ffff15f1f" ]
[ "code/KL_UCB/UCB1.py", "code/resilient-bandit/malicious_bernoulli.py" ]
[ "import numpy as np\nimport scipy.stats as sps\nimport matplotlib.pyplot as plt\n\n\nclass UCB1:\n ''' Representation of a single agent bandit problem and a method to run the UCB1 algorithm on this problem\n\n Attributes\n ----------\n T: The number of time steps the UCB1 algorithm will run for.\n arm_distributions: A list of scipy.stats probability distributions bounded on [0,1]\n means: A list of arm means. Extracted from arm_distributions\n M: Number of arms. Extracted from length of arm_distributions\n regret: A 1xT numpy ndarray of the expected regret from the most recent algorithm run\n '''\n\n def __init__(self, T, arm_distributions):\n ''' Construct a single agent bandit problem instance \n \n Parameters\n ----------\n T: The number of time steps the UCB1 algorithm will run for.\n arm_distributions: A list of scipy.stats probability distributions bounded on [0,1].\n\n Raises\n ------\n ValueError\n If T is not a positive integer.\n If the support for any arm distribution is not in [0,1].\n '''\n if (T < 1 or type(T) is not int):\n raise ValueError('T must be a positive integer')\n if (any(d.support()[0] < 0 or d.support()[1] > 1 for d in arm_distributions)): \n raise ValueError('distribution support must lie in [0,1]')\n self.T = T\n self.arm_distributions = arm_distributions\n self.means = [d.mean() for d in arm_distributions]\n self.M = len(arm_distributions)\n self.regret = None\n\n def C(self, n, t):\n ''' Calculate confidence width at time t for an arm pulled n times so far '''\n return np.sqrt((2 * np.log(t+1)) / n)\n\n def plot_regret(self):\n ''' Plots regret of last run vs theoretical regret bounds \n\n Note: make sure UCB1.run() was called before calling this method\n '''\n optimal_arm = np.argmax(self.means)\n time_axis = list(range(self.T))\n # See Theorem 1 of Auer 2002\n gaps = [self.means[optimal_arm] - mean for mean in self.means]\n sum_gaps = np.sum(gaps)\n sum_gap_reciprocals = 0\n for gap in gaps:\n if (gap != 0):\n sum_gap_reciprocals += 1 / gap\n theoretical_regret_bounds = [8 * np.log(t+1) * sum_gap_reciprocals + (1 + (np.pi ** 2)/3) * sum_gaps for t in time_axis]\n plt.plot(time_axis, theoretical_regret_bounds, '--')\n plt.plot(time_axis, self.regret)\n plt.show()\n\n def run(self):\n ''' Run the UCB1 algorithm on the bandit problem instance held by self\n\n Return\n ------\n A list of length self.T with expected regret at each time t\n\n '''\n N = np.zeros(self.M) # keeps track of number of times arm k has been chosen\n S = np.zeros(self.M) # keeps track of cumulative sum of rewards for arm k\n # data structures just for plotting regret\n optimal_arm = np.argmax(self.means)\n exp_cum_rwd = [0 for t in range(self.T)] # exp_cum_rwd[t] is expected cumulative reward at time t\n for t in range(self.M):\n N[t] = 1\n S[t] = self.arm_distributions[t].rvs()\n exp_cum_rwd[t] = exp_cum_rwd[t-1] + self.means[t] if t != 0 else self.means[t] # t is index of chosen arm here\n for t in range(self.M, self.T):\n a = np.argmax([(S[arm]/N[arm]) + self.C(N[arm], t) for arm in range(self.M)])\n r = self.arm_distributions[a].rvs()\n N[a] = N[a] + 1\n S[a] = S[a] + r\n exp_cum_rwd[t] = exp_cum_rwd[t-1] + self.means[a]\n optimal_exp_cum_rwd = [(t+1) * self.means[optimal_arm] for t in range(self.T)]\n regret = np.asarray(optimal_exp_cum_rwd) - np.asarray(exp_cum_rwd) # see definition of regret\n self.regret = regret\n return regret\n\n\n# # test run\n# T = 100000\n# rwd_means = [.2, .3, .4, .5, .6]\n# sd = 0.5\n# distributions = [sps.truncnorm(a=(0 - rwd_means[i]) / sd, b=(1 - rwd_means[i]) / sd, loc=rwd_means[i], scale=0.2) for i in range(len(rwd_means))]\n# kl = UCB1(T, distributions)\n# kl.run()\n# kl.plot_regret()\n\n\n", "#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport networkx as nx\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport random\n\nM = 6 # number of arms\n\n# SET GRAPH TYPE BELOW #\n\n#G = nx.DiGraph() # directed_rooted.png from this\n#G.add_nodes_from([0, 1, 2, 3, 4, 5])\n#G.add_edges_from([(0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (5, 5),\n# (0, 1), (1, 2), (1, 3), (2, 4), (3, 5), (5, 2)])\n\nG = nx.fast_gnp_random_graph(6, 0.6, seed=1,directed=True) # directed_strongly_connected.png from this\nprint(nx.is_strongly_connected(G)) # check directed_strongly_connected.png with this\n\n#G = nx.fast_gnp_random_graph(6, 0.4, seed=1,directed=True) # directed_weakly_connected.png from this\n#print(nx.is_weakly_connected(G)) # check directed_weakly_connected.png with this\n#print(nx.is_strongly_connected(G)) # and this\n\n#G = nx.fast_gnp_random_graph(6, 0.5, seed=40,directed=False) # test.png from this\n#G = nx.fast_gnp_random_graph(5, 0.25, seed=42,directed=False) #undirected_unconnected.png from this\n\n#G = nx.Graph() # graph for single agent\n#G.add_nodes_from([0]) # for single agent\n\n#G = nx.complete_graph(6)\n\n# SET GRAPH TYPE ABOVE #\n\nnodes = list(G.nodes)\nfor i in nodes:\n G.add_edge(i,i)\nA = nx.adjacency_matrix(G)\n\nN = len(G) # number of agents\nT = 250\n\n#random.seed(42)\n\na = A.toarray() # make adjacency matrix an array for ease of use\nneighbors = [] # list of all agents' neighbors (wonder if we could do this with adjacency list)\nfor i in range(len(a)):\n curr_neighbors = [] # neighbors of current agent\n for j in range(len(a)):\n if a[j][i] == 1:\n curr_neighbors.append(j)\n neighbors.append(curr_neighbors)\n\nnum_neighbors = [sum(A.toarray()[:,i]) for i in range(N)] # get cardinality of neighbors for each agent\n\n\n# In[2]:\n\n\nagent0_regret = [] # FIX THIS\nagent1_regret = []\nagent2_regret = []\nagent3_regret = []\nagent4_regret = []\nagent5_regret = []\n\nmalicious = 0 # index of malicious agent\nnum_malicious = 1\n\nfor epoch in range(10):\n # initialize all vectors in a matrix for each time step, names corresponding to paper\n # (wonder if we could do something like np.zeros((N,M,T))?)\n n = [np.zeros((N,M)) for t in range(T+1)] # (num times agent i picked arm k until time t)\n x = [np.zeros((N,M)) for t in range(T+1)] # (avg rwd of arm k at agent i until time t)\n X = [np.zeros((N,M)) for t in range(T+1)] # (rwd of agent i at arm k at time t? I think it should be cumulative rwd of agent i at arm k until time t)\n z = [np.zeros((N,M)) for t in range(T+1)] # (some kind of avg reward among all agents)\n\n # create rwds array to hold all rewards each agent picks\n rwds = [np.zeros(N) for t in range(T+1)]\n\n #arm_means = [random.uniform(0, 1) for x in range(0, M)] # means between 0 and 1 for all arms\n arm_means = [0.15,0.1,0.2,0.3,0.4,0.5] # means should equal Bernoulli probability\n max_mean = max(arm_means) # get max mean\n\n sigma = 0.1 # standard deviation\n\n # initialization step\n for agent in range(N):\n for arm in range(M):\n X[1][agent][arm] = np.random.binomial(size=1, n=1, p= arm_means[arm])\n n[1][agent][arm] += 1\n z[1][agent][arm] = X[0][agent][arm]\n x[1][agent][arm] = X[0][agent][arm]\n\n for t in range(1,T): # loop over time\n for agent in range(N): # loop through all agents\n if agent == malicious and num_malicious:\n #X[t+1][agent][candidate] = 0.33 # this and n array shouldn't matter for malicious if always force z=0.33?\n for arm in range(M):\n rwds[t+1][agent] = 0.33 \n z[t+1][agent][arm] = 0.33 # always force malicious t0 estimate 0.33 for every arm every time\n else:\n \n # (DECISION MAKING)\n\n Q = [] # corresponds to Q in paper\n \n for arm in range(M):\n q = z[t][agent][arm] + np.sqrt((2*np.log(t))/(num_neighbors[agent]*n[t][agent][arm]))\n Q.append(q)\n \n if agent == malicious and num_malicious: # so this should never run right? Because if agent == malicious is already up there...\n candidate = np.argmin(arm_means) # set malicious agents' policy\n else:\n candidate = np.argmax(Q)\n\n X[t+1][agent][candidate] = np.random.binomial(size=1, n=1, p=arm_means[candidate]) # calculate reward for current agent's arm\n rwds[t+1][agent] = X[t+1][agent][candidate] # keep track of chosen reward\n\n # (UPDATING)\n\n for arm in range(M): # update all arm estimations for this agent\n if arm == candidate: # if chosen arm\n n[t+1][agent][arm] = n[t][agent][arm] + 1\n xsum = 0\n for time in range(t+1): # sum up all rewards so far (Consider making X running sum?)\n xsum += X[time][agent][arm]\n x[t+1][agent][arm] = (1/n[t+1][agent][arm])*xsum\n else: # if not chosen arm\n n[t+1][agent][arm] = n[t][agent][arm]\n x[t+1][agent][arm] = x[t][agent][arm] # not mentioned in paper but seems necessary (I agree)\n \n #if num_neighbors[agent] - 1 > 2*num_malicious:\n zsum = 0\n zvals = []\n z_agent = z[t][agent][arm]\n for neighbor in neighbors[agent]: # look at current agent's neighbors\n if neighbor != agent:\n zvals.append(z[t][neighbor][arm])\n zvals.sort()\n \n # (filter out potentially malicious high values)\n new_zvals = [] \n counter = 0\n for zval in zvals:\n if zval <= z_agent and counter < num_malicious:\n counter += 1\n else:\n new_zvals.append(zval)\n\n # (filter out potentially malicious low values)\n new_new_zvals = [] \n counter = 0\n for zval in reversed(new_zvals):\n if zval >= z_agent and counter < num_malicious:\n counter += 1\n else:\n new_new_zvals.append(zval)\n \n new_new_zvals.append(z_agent)\n \n for zval in new_new_zvals:\n # (PAPER AMBIGUOUS: is xbar(t+1) - xbar(t) part of the summation???)\n zsum += zval + x[t+1][agent][arm] - x[t][agent][arm] # calculate sum for z update\n z[t+1][agent][arm] = (1/(len(new_new_zvals)))*zsum # update current agent's z\n\n rwds_tnspose = np.transpose(rwds) # transpose rwds to make it easier to plot\n for agent in range(len(rwds_tnspose)):\n regret = []\n for t in range(len(rwds_tnspose[agent])):\n avg = np.sum(rwds_tnspose[agent][0:t+1])/(t+1)\n regret.append(max_mean-avg)\n \n regret = np.cumsum(regret)\n if agent == 0: # FIX THIS\n agent0_regret.append(regret)\n elif agent == 1:\n agent1_regret.append(regret)\n elif agent == 2:\n agent2_regret.append(regret)\n elif agent == 3:\n agent3_regret.append(regret)\n elif agent == 4:\n agent4_regret.append(regret)\n else:\n agent5_regret.append(regret)\n\n\n# In[3]:\n\n\n# FIX THIS\narrays0 = [np.array(x) for x in agent0_regret]\navg_regret0 = [np.mean(k) for k in zip(*arrays0)]\narrays1 = [np.array(x) for x in agent1_regret]\navg_regret1 = [np.mean(k) for k in zip(*arrays1)]\narrays2 = [np.array(x) for x in agent2_regret]\navg_regret2 = [np.mean(k) for k in zip(*arrays2)]\narrays3 = [np.array(x) for x in agent3_regret]\navg_regret3 = [np.mean(k) for k in zip(*arrays3)]\narrays4 = [np.array(x) for x in agent4_regret]\navg_regret4 = [np.mean(k) for k in zip(*arrays4)]\narrays5 = [np.array(x) for x in agent5_regret]\navg_regret5 = [np.mean(k) for k in zip(*arrays5)]\n\n\n# In[4]:\n\n\nfig, axes = plt.subplots(nrows=1, ncols=3, figsize=(15,5))\nax = axes.flatten()\n# FIX THIS\n#ax[0].plot(range(T+1),avg_regret0)\nax[0].plot(range(T+1),avg_regret1)\nax[0].plot(range(T+1),avg_regret2)\nax[0].plot(range(T+1),avg_regret3)\nax[0].plot(range(T+1),avg_regret4)\nax[0].plot(range(T+1),avg_regret5)\n\nax[0].set_xlabel(\"Time\")\nax[0].set_ylabel(\"Expected Cumulative Regret\")\nlabels = [\"Agent \" + str(i) for i in range(1,N)]\nax[0].legend(labels)\n\nax[1].plot(range(T+1),avg_regret0)\nax[1].set_xlabel(\"Time\")\nax[1].set_ylabel(\"Expected Cumulative Regret\")\nlabels = [\"Agent 0\"]\nax[1].legend(labels)\n\nnx.draw_networkx(G, ax=ax[2], pos=nx.spring_layout(G))\nax[2].set_axis_off()\n\n#plt.savefig(\"directed_strongly_connected_resilient.eps\")\nplt.show()\n\n\n# In[5]:\n\n\nprint(rwds_tnspose)\n\n\n# In[6]:\n\n\nprint(z[T-1][1])\n\n\n# In[9]:\n\n\nprint(n[T-1][1])\n\n\n# In[ ]:\n\n\n\n\n" ]
[ [ "numpy.log", "numpy.asarray", "matplotlib.pyplot.plot", "numpy.argmax", "matplotlib.pyplot.show", "numpy.zeros", "numpy.sum" ], [ "numpy.log", "matplotlib.pyplot.subplots", "numpy.cumsum", "numpy.argmax", "numpy.mean", "numpy.argmin", "numpy.transpose", "numpy.random.binomial", "numpy.array", "numpy.zeros", "numpy.sum", "matplotlib.pyplot.show" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
vlad-user/parallel-tempering
[ "42ebecdea2a597e706382462dc90aab7e7ca098f" ]
[ "simulator/simulator.py" ]
[ "\"\"\"A class that performs simulations.\"\"\"\nimport sys\nimport os\nimport gc\nimport json\nfrom time import time\nimport math\n\nimport tensorflow as tf\nimport sklearn\nimport numpy as np\n\nfrom simulator.graph.simulator_graph import SimulatorGraph\nfrom simulator import simulator_utils as s_utils\nfrom simulator.exceptions import IllegalArgumentError\n\nclass Simulator: # pylint: disable=too-many-instance-attributes\n \"\"\"Performs single/multiple simulation for calculating averages.\n\n This class defines the API for performing simulations. This class\n trains models (possibly multiple times), while class SimulatorGraph\n creates dataflow graphs with duplicated replicas. More functions\n can be added to train models in different setups.\n\n ### Usage\n\n ```python\n from tensorflow.examples.tutorials.mnist import input_data\n import numpy as np\n\n from simulator.simulator import Simulator\n from simulator.summary_extractor import SummaryExtractor\n import simulator.simulator_utils as s_utils\n from simulator.models.mnist_models import nn_mnist_model_dropout\n MNIST_DATAPATH = 'simulator/data/mnist/'\n\n mnist = input_data.read_data_sets(MNIST_DATAPATH)\n train_data = mnist.train.images\n train_labels = mnist.train.labels\n test_data = mnist.test.images\n test_labels = mnist.test.labels\n valid_data = mnist.validation.images\n valid_labels = mnist.validation.labels\n\n n_replicas = 8\n separation_ratio = 1.21\n\n # set simulation parameters\n model_func = nn_mnist_model_dropout\n learning_rate = 0.01\n noise_list = [1/separation_ratio**i for i in range(n_replicas)]\n noise_type = 'dropout_rmsprop'\n batch_size = 200\n n_epochs = 50\n name = 'test_simulation' # simulation name\n test_step = 300 # 1 step==batch_size\n swap_step = 300\n burn_in_period = 400\n loss_func_name = 'cross_entropy'\n description = 'RMSProp with dropout.'\n proba_coeff = 250\n rmsprop_decay = 0.9\n rmsprop_momentum = 0.001\n rmsprop_epsilon=1e-6\n\n # create and run simulation\n\n sim = Simulator(\n model=model_func,\n learning_rate=learning_rate,\n noise_list=noise_list,\n noise_type='dropout_rmsprop',\n batch_size=batch_size,\n n_epochs=n_epochs,\n test_step=test_step,\n name=name,\n swap_step=swap_step,\n burn_in_period=burn_in_period,\n loss_func_name='cross_entropy',\n description=description,\n proba_coeff=proba_coeff,\n rmsprop_decay=rmsprop_decay,\n rmsprop_epsilon=rmsprop_epsilon,\n rmsprop_momentum=rmsprop_momentum\n )\n\n sim.train(train_data=train_data, train_labels=train_labels,\n test_data=test_data, test_labels=test_labels,\n validation_data=valid_data, validation_labels=valid_labels)\n\n\n # plot results (possible during training on linux)\n se = SummaryExtractor(name)\n se.show_report()\n ```\n \"\"\"\n\n def __init__(self, # pylint: disable=too-many-arguments, too-many-locals\n model,\n learning_rate,\n noise_list,\n noise_type,\n batch_size,\n n_epochs,\n name,\n burn_in_period,\n swap_step,\n separation_ratio,\n ensembles=None,\n n_simulations=1,\n test_step=500,\n tuning_parameter_name=None,\n loss_func_name='cross_entropy',\n verbose_loss='error',\n proba_coeff=1.0,\n description=None,\n test_batch = None,\n hessian=False,\n scheduled_noise=None,\n scheduled_lr=None,\n mode=None,\n moa_lr=None,\n rmsprop_decay=0.9,\n rmsprop_momentum=0.001,\n rmsprop_epsilon=1e-6,\n flush_every=90):\n \"\"\"Creates a new simulator object.\n\n Args:\n model: A function that creates inference model (e.g.\n see `simulation.models.nn_mnist_model()`)\n learning_rate: Learning rate for optimizer\n noise_list: A list (not np.array!) for noise/temperatures/dropout\n values. In case of dropout (dropout_rmsprop, dropout_gd), noise_list\n represents the values of KEEPING the neurons, and NOT the probability\n of excluding the neurons.\n noise_type: A string specifying the noise type and optimizer to apply.\n Possible values could be seen at\n `simulation.simulation_builder.graph_builder.SimulatorGraph.__noise_types`\n batch_size: Batch Size\n n_epochs: Number of epochs for each simulation\n name: The name of the simulation. Specifies the a folder name\n from where a summary files can be later accessed.\n n_simulatins: Number of simulation to run.\n test_step: An integer specifing an interval of steps to perform until\n running a test dataset (1 step equals batch_size)\n swap_step: An integer specifying an interval to perform until\n attempting to swap between replicas based on validation dataset.\n separation_ratio: A separation ratio between two adjacent temperatures.\n This value is not important for simulation because the\n noise_list already contains the separated values. This value is\n (as well as some others) are stored in the simulation\n description file (this file is created by _log_params()\n function).\n tuning_parameter_name: As the separation_ratio value, this argument is\n also not important for simulation. It is stored in the description\n file as well.\n burn_in_period: A number of steps until the swaps start to be\n proposed.\n loss_func_name: A function which we want to optimize. Currently,\n only cross_entropy and STUN (stochastic tunneling) are\n supported.\n verbose_loss: A loss to print during training. Default is 0-1 loss.\n Possible parameters are: 'loss', 'error'.\n proba_coeff: The coeffecient is used in calculation of probability\n of swaps. Specifically, we have\n P(accept_swap) = exp(proba_coeff*(beta_1-beta_2)(E_1-E_2))\n description: A custom string that is stored in the description file.\n `test_batch`: A size of a data that is fed during evaluation of loss,\n error etc. for test/validation dataset. For MNIST or similar sized\n dataset the whole test/validation data can be fed at once.\n hessian: (Boolean) If `True`, computes Hessian and its\n eigenvalues during each swap step. Default is `False` since\n it is computationally intensive and should be used only for\n small networks.\n scheduled_noise: A dictionary specifying with keys corresponding\n steps and values corresponding to `noise_list` at which start\n to apply this values. It could be used for hyper-param annealing,\n or using constant learning rate for every replica and then start\n exchanges instead of annealing.\n scheduled_learning_rate: Same as `scheduled_noise` but only for\n learning rate. This can be used for annealing when `noise_type`\n is not `learning_rate`.\n mode: If `None` (default) will prepare graph for regular parallel\n tempering simulation. If one of `['MOA', 'mixture_of_agents']`,\n in addition to a parallel tempering mode will also train and\n do inference on a weighted outputs of `logits` of all replicas.\n The weight update of such MOA structure is made only on weights\n that `logits` of each replica are multiplied. The updates for\n weights of each replica are made as in usual parallel tempering\n simulation. The learning rate for MOA update is taken from\n `moa_lr` argument.\n moa_lr: A learning rate for `MOA` mode. If `None`, the\n learning rate for replicas is used.\n rmsprop_decay: Used in\n `simulation.simulation_builder.optimizers.RMSPropOptimizer`\n for noise type `dropout_rmsprop`. This value is ignored for\n other `noise_types`.\n rmsprop_momentum: Used in\n simulation.simulation_builder.optimizers.RMSPropOptimizer\n for noise type 'dropout_rmsprop'. This value is ignored for\n other noise_types.\n rmsprop_epsilon: Used in\n `simulation.simulation_builder.optimizers.RMSPropOptimizer`\n for noise type `dropout_rmsprop`. This value is ignored for\n other `noise_types`.\n flush_every: An integer that defines an interval in seconds that\n currently accumulated training log will be flushed to disk.\n Default is 60 seconds.\n \"\"\"\n\n self._model = model\n self._learning_rate = learning_rate\n self._noise_type = noise_type\n self._noise_list = noise_list\n self._n_replicas = len(noise_list)\n self._learning_rate = learning_rate\n self._name = name\n self._n_simulations = n_simulations\n self._burn_in_period = burn_in_period\n self._loss_func_name = loss_func_name\n self._verbose_loss = verbose_loss\n self._proba_coeff = proba_coeff\n self._batch_size = batch_size\n self._n_epochs = n_epochs\n self._test_step = test_step\n self._swap_step = swap_step\n self._separation_ratio = separation_ratio\n self._tuning_param_name = tuning_parameter_name\n self._description = description\n self._ensembles = ensembles\n self._hessian = hessian\n self._scheduled_noise = scheduled_noise\n self._scheduled_lr = scheduled_lr\n self._mode = mode\n self._moa_lr = moa_lr\n self.rmsprop_decay = rmsprop_decay\n self.rmsprop_momentum = rmsprop_momentum\n self.rmsprop_epsilon = rmsprop_epsilon\n self._test_batch = max(500, batch_size)\n self._logged = False # if log had been written to disk \n self._flush_every = flush_every\n self._train_step = min(self._test_step, self._swap_step)\n\n def train_n_times(self, train_data_size=None, **kwargs):\n \"\"\"Trains `n_simulations` times using the same setup.\n\n Args:\n `train_data_size`: (Optional) Sets the amount of train data out of\n whole data that is used in simulation. If None, the whole data\n is used. Otherwise, each simulation receives `train_data_size`\n shuffled training data. This value is used when it is needed to\n feed only part of the data to the algorithm.\n `kwargs`: Should be following keyword arguments: `train_data`,\n `train_labels`, `test_data`, `test_labels`, `validation_data`,\n `validation_labels`.\n \"\"\"\n\n test_data = kwargs.get('test_data', None)\n test_labels = kwargs.get('test_labels', None)\n valid_data = kwargs.get('validation_data', None)\n valid_labels = kwargs.get('validation_labels', None)\n train_data = kwargs.get('train_data', None)\n train_labels = kwargs.get('train_labels', None)\n sim_names = []\n\n for i in range(self._n_simulations):\n \n train_data, train_labels = sklearn.utils.shuffle(\n train_data, train_labels)\n\n self._graph = SimulatorGraph(self._model,\n self._learning_rate,\n self._noise_list,\n self._name,\n ensembles=self._ensembles,\n noise_type=self._noise_type,\n simulation_num=i,\n loss_func_name=self._loss_func_name,\n proba_coeff=self._proba_coeff,\n hessian=self._hessian,\n mode=self._mode,\n moa_lr=self._moa_lr,\n rmsprop_decay=self.rmsprop_decay,\n rmsprop_momentum=self.rmsprop_momentum,\n rmsprop_epsilon=self.rmsprop_epsilon)\n\n if train_data_size is not None:\n train_data_ = train_data[:train_data_size]\n train_labels_ = train_labels[:train_data_size]\n else:\n train_data_ = train_data\n train_labels_ = train_labels\n\n self._parallel_tempering_train(train_data=train_data_,\n train_labels=train_labels_,\n test_data=test_data,\n test_labels=test_labels,\n validation_data=valid_data,\n validation_labels=valid_labels)\n del self._graph\n gc.collect()\n\n\n def _parallel_tempering_train(self, **kwargs): # pylint: disable=too-many-locals, invalid-name\n \"\"\"Trains and swaps between replicas while storing summaries.\n \n Args:\n `kwargs`: Should be following keyword arguments: `train_data`,\n `train_labels`, `test_data`, `test_labels`, `validation_data`,\n `validation_labels`.\n\n \"\"\"\n # we define step at which we (maybe) want to modify noise values\n scheduled_noise = self._scheduled_noise\n scheduled_lr = self._scheduled_lr\n \n if scheduled_noise is not None:\n scheduled_steps = list(sorted(scheduled_noise.keys())) + [np.inf]\n else:\n scheduled_steps = [np.inf]\n\n # step to modify learning rate (e.g. for annealing learning\n # rate with dropout (or something else) as `noise_type`)\n if scheduled_lr is not None:\n if 'learning_rate' is self._noise_type:\n err_msg = (\"If `noise_type` is 'learning_rate' \"\n \"`scheduled_learning_rate` cannot be used.\")\n raise ValueError(err_msg)\n else:\n scheduled_lr = {1: self._learning_rate}\n\n scheduled_lrs = list(sorted(scheduled_lr.keys())) + [np.inf]\n\n\n def get_next_scheduled_step():\n for s in scheduled_steps:\n yield s\n def get_next_scheduled_lr():\n for lr in scheduled_lrs:\n yield lr\n\n next_scheduled_step_iter = get_next_scheduled_step()\n next_scheduled_step = next_scheduled_step_iter.__next__()\n next_scheduled_lr_iter = get_next_scheduled_lr()\n next_scheduled_lr = next_scheduled_lr_iter.__next__()\n\n\n last_flush_time = time()\n \n if not self._logged:\n self._log_params()\n self._logged = True\n try:\n g = self._graph # pylint: disable=invalid-name\n except AttributeError as err:\n if not err.args:\n err.args = ('',)\n\n err.args = (err.args\n + (\"The SimulatorGraph object is not initialized.\",))\n raise\n\n try:\n train_data = kwargs.get('train_data', None)\n train_labels = kwargs.get('train_labels', None)\n test_data = kwargs.get('test_data', None)\n test_labels = kwargs.get('test_labels', None)\n valid_data = kwargs.get('validation_data', None)\n valid_labels = kwargs.get('validation_labels', None)\n if (train_data is None\n or train_labels is None\n or test_data is None\n or test_labels is None\n or valid_data is None\n or valid_labels is None):\n raise IllegalArgumentError(\n 'One of the arguments is None:',\n [x for x in kwargs.keys() if kwargs[x] is None])\n\n # iterators for train/test/validation\n with g.get_tf_graph().as_default(): # pylint: disable=not-context-manager\n data = tf.data.Dataset.from_tensor_slices({\n 'x':train_data,\n 'y':train_labels\n }).shuffle(train_data.shape[0]).batch(self._batch_size)\n iterator = data.make_initializable_iterator()\n \n data = tf.data.Dataset.from_tensor_slices({\n 'x':valid_data,\n 'y':valid_labels\n }).batch(self._test_batch)\n iter_valid = data.make_initializable_iterator()\n\n data = tf.data.Dataset.from_tensor_slices({\n 'x':test_data,\n 'y':test_labels\n }).batch(self._test_batch)\n iter_test = data.make_initializable_iterator()\n\n except: # pylint: disable=try-except-raise\n raise\n\n step = 1\n config = tf.ConfigProto(allow_soft_placement=True)\n\n with tf.Session(graph=g.get_tf_graph(), config=config) as sess:\n _ = sess.run([iterator.initializer,\n iter_valid.initializer,\n iter_test.initializer,\n g.variable_initializer])\n\n next_batch = iterator.get_next()\n next_batch_test = iter_test.get_next()\n next_batch_valid = iter_valid.get_next()\n\n train_batch_loss = {i:[] for i in range(self._n_replicas)}\n train_batch_err = {i:[] for i in range(self._n_replicas)}\n\n for epoch in range(self._n_epochs):\n while True:\n try:\n \n if step > next_scheduled_step:\n g.update_noise_values(scheduled_noise[next_scheduled_step])\n next_scheduled_step = next_scheduled_step_iter.__next__()\n\n if step > next_scheduled_lr:\n g._lr = scheduled_lr[next_scheduled_lr]\n next_scheduled_lr = next_scheduled_lr_iter.__next__()\n\n ### test ###\n if step % self._test_step == 0 or step == 1:\n evaluated = self._train_epoch(sess,\n next_batch_test,\n iter_test,\n dataset_type='test')\n g.add_summary_v2({\n 'test_loss_summary': evaluated[:self._n_replicas],\n 'test_error_summary': evaluated[self._n_replicas:],\n 'test_steps_summary': step\n }, epoch)\n vals2log = evaluated[self._n_replicas:]\n\n self.print_log(epoch,\n step,\n vals2log,\n g.get_accept_ratio())\n\n ### validation + swaps ###\n if step % self._swap_step == 0 or step == 1:\n evaluated = self._train_epoch(sess,\n next_batch_valid,\n iter_valid,\n dataset_type='validation')\n g.add_summary_v2({\n 'validation_loss_summary': evaluated[:self._n_replicas],\n 'validation_error_summary': evaluated[self._n_replicas:],\n 'validation_steps_summary': step\n }, epoch)\n if step > self._burn_in_period:\n g.swap_replicas(evaluated[:self._n_replicas])\n\n ### train ###\n batch = sess.run(next_batch)\n step += 1\n # Forward Pass Only to compute error/losses\n feed_dict = g.create_feed_dict(batch['x'], batch['y'], dataset_type='test')\n loss_err_ops = g.get_ops(loss_ops=True, error_ops=True)\n loss_err_vals = sess.run(loss_err_ops, feed_dict=feed_dict)\n for i in range(self._n_replicas):\n train_batch_loss[i].append(loss_err_vals[i])\n train_batch_err[i].append(loss_err_vals[self._n_replicas + i])\n\n # Compute diffusion and append values to summaries\n if step % self._train_step == 0 or step == 1:\n diff_ops = g.get_ops(diffusion_ops=True)\n evaled_diffs = sess.run(diff_ops)\n g.add_summary_v2({\n 'diffusion_summary': evaled_diffs[0],\n 'train_loss_summary': [np.mean(train_batch_loss[i]) for i in range(self._n_replicas)],\n 'train_error_summary':[np.mean(train_batch_err[i]) for i in range(self._n_replicas)],\n 'train_steps_summary': step,\n }, epoch, add_noise_vals=True)\n\n del train_batch_loss\n del train_batch_err\n\n train_batch_loss = {i:[] for i in range(self._n_replicas)}\n train_batch_err = {i:[] for i in range(self._n_replicas)}\n\n # Update weights step\n update_ops = g.get_ops(weights_update_ops=True)\n feed_dict = g.create_feed_dict(batch['x'], batch['y'])\n _ = sess.run(update_ops, feed_dict=feed_dict)\n\n ############ old impl\n '''\n if step % self._train_step == 0 or step == 1:\n special_ops = True\n else:\n special_ops = False\n\n batch = sess.run(next_batch)\n\n #if self._noise_type not in ['dropout', 'dropout_gd', 'dropout_rmsprop']:\n\n if special_ops:\n ops = g.get_train_ops(special_ops=special_ops, loss_err=False)\n diff_ops = ops[:g._n_replicas]\n evaled_diffs = sess.run(diff_ops)\n\n feed_dict = g.create_feed_dict(batch['x'], batch['y'])\n\n loss_err_train_ops = g.get_train_ops()\n loss_err_ops = loss_err_train_ops[:2*self._n_replicas]\n train_ops = loss_err_train_ops[-self._n_replicas:]\n loss_err_vals = sess.run(loss_err_ops, feed_dict=feed_dict)\n train_vals = sess.run(train_ops, feed_dict=feed_dict)\n\n evaluated = (loss_err_vals\n + evaled_diffs\n + train_vals)\n\n loss = g.extract_evaluated_tensors(evaluated, 'loss')\n err = g.extract_evaluated_tensors(evaluated, 'error')\n\n\n for i in range(self._n_replicas):\n train_batch_loss[i].append(loss[i])\n train_batch_err[i].append(err[i])\n\n if step % self._train_step == 0 or step == 1:\n evaled = [np.mean(train_batch_loss[i]) for i in range(self._n_replicas)]\n evaled += [np.mean(train_batch_err[i]) for i in range(self._n_replicas)]\n evaled += g.extract_evaluated_tensors(evaluated, 'special_vals')\n\n g.add_summary(evaled+evaluated[-self._n_replicas:],\n step=step,\n epoch=epoch,\n dataset_type='train')\n\n del train_batch_loss\n del train_batch_err\n\n train_batch_loss = {i:[] for i in range(self._n_replicas)}\n train_batch_err = {i:[] for i in range(self._n_replicas)}\n '''\n if time() - last_flush_time > self._flush_every:\n g.flush_summary()\n last_flush_time = time()\n\n except tf.errors.OutOfRangeError:\n sess.run(iterator.initializer)\n break\n '''\n if not train_batch_loss[0]:\n evaled = [np.mean(train_batch_loss[i]) for i in range(self._n_replicas)]\n evaled += [np.mean(train_batch_err[i]) for i in range(self._n_replicas)]\n g.add_summary(evaled, step=step, epoch=self._n_epochs, dataset_type='train')\n '''\n g._summary._latest_epoch = self._n_epochs\n g.flush_summary()\n\n def _parallel_tempering_train_moa(self, **kwargs): # pylint: disable=too-many-locals, invalid-name\n \"\"\"Trains and swaps between replicas while storing summaries.\n \n This function differs from others by the fact that it supports MOA.\n Args:\n `kwargs`: Should be following keyword arguments: `train_data`,\n `train_labels`, `test_data`, `test_labels`, `validation_data`,\n `validation_labels`.\n\n \"\"\"\n\n last_flush_time = time()\n \n if not self._logged:\n self._log_params()\n self._logged = True\n try:\n g = self._graph # pylint: disable=invalid-name\n except AttributeError as err:\n if not err.args:\n err.args = ('',)\n\n err.args = (err.args\n + (\"The SimulatorGraph object is not initialized.\",))\n raise\n\n try:\n train_data = kwargs.get('train_data', None)\n train_labels = kwargs.get('train_labels', None)\n test_data = kwargs.get('test_data', None)\n test_labels = kwargs.get('test_labels', None)\n valid_data = kwargs.get('validation_data', None)\n valid_labels = kwargs.get('validation_labels', None)\n if (train_data is None\n or train_labels is None\n or test_data is None\n or test_labels is None\n or valid_data is None\n or valid_labels is None):\n raise IllegalArgumentError(\n 'One of the arguments is None:',\n [x for x in kwargs.keys() if kwargs[x] is None])\n\n # iterators for train/test/validation\n with g.get_tf_graph().as_default(): # pylint: disable=not-context-manager\n data = tf.data.Dataset.from_tensor_slices({\n 'x':train_data,\n 'y':train_labels\n }).shuffle(train_data.shape[0]).batch(self._batch_size)\n iterator = data.make_initializable_iterator()\n \n data = tf.data.Dataset.from_tensor_slices({\n 'x':valid_data,\n 'y':valid_labels\n }).batch(self._test_batch)\n iter_valid = data.make_initializable_iterator()\n\n data = tf.data.Dataset.from_tensor_slices({\n 'x':test_data,\n 'y':test_labels\n }).batch(self._test_batch)\n iter_test = data.make_initializable_iterator()\n\n except: # pylint: disable=try-except-raise\n raise\n\n step = 1\n\n config = tf.ConfigProto(allow_soft_placement=True)\n\n with tf.Session(graph=g.get_tf_graph(), config=config) as sess:\n _ = sess.run([iterator.initializer,\n iter_valid.initializer,\n iter_test.initializer,\n g.variable_initializer])\n\n next_batch = iterator.get_next()\n next_batch_test = iter_test.get_next()\n next_batch_valid = iter_valid.get_next()\n\n train_batch_loss = {i:[] for i in range(self._n_replicas)}\n train_batch_err = {i:[] for i in range(self._n_replicas)}\n moa_batch_loss = []\n moa_batch_err = []\n\n for epoch in range(self._n_epochs):\n\n while True:\n \n try:\n \n\n ### test ###\n if step % self._test_step == 0 or step == 1:\n evaluated = self._train_epoch(sess,\n next_batch_test,\n iter_test,\n dataset_type='test')\n test_losses = evaluated[:self._n_replicas]\n test_errs = evaluated[self._n_replicas:]\n\n g.add_summary_v2({\n 'test_loss_summary': test_losses,\n 'test_error_summary': test_errs,\n 'test_steps_summary': step\n }, epoch)\n # MOA test metrics\n test_loss, test_err = self._train_epoch(sess,\n next_batch_test,\n iter_test,\n dataset_type='test',\n moa=True)\n g.add_summary_v2({'moa_test_loss_summary': test_loss,\n 'moa_test_error_summary': test_err,\n 'moa_test_steps_summary': step}, epoch)\n\n if 'loss' in self._verbose_loss:\n verbose_loss_vals = test_losses + [test_loss]\n else:\n verbose_loss_vals = test_errs + [test_err]\n self.print_log(epoch,\n step,\n verbose_loss_vals,\n g.get_accept_ratio())\n\n ### validation + swaps ###\n if step % self._swap_step == 0 or step == 1:\n evaluated = self._train_epoch(sess,\n next_batch_valid,\n iter_valid,\n dataset_type='validation')\n valid_losses = evaluated[:self._n_replicas]\n valid_errs = evaluated[self._n_replicas:]\n g.add_summary_v2({\n 'validation_loss_summary': valid_losses,\n 'validation_error_summary': valid_errs,\n 'validation_steps_summary': step\n }, epoch)\n if step > self._burn_in_period:\n g.swap_replicas(evaluated[:self._n_replicas])\n\n valid_loss, valid_err = self._train_epoch(sess,\n next_batch_test,\n iter_test,\n dataset_type='test',\n moa=True)\n g.add_summary_v2({'moa_validation_loss_summary': valid_loss,\n 'moa_validation_error_summary': valid_err,\n 'moa_validation_steps_summary': step}, epoch)\n\n ### train ###\n batch = sess.run(next_batch)\n step += 1\n rep_train_loss_ops = g.get_ops(loss_ops=True)\n rep_train_err_ops = g.get_ops(error_ops=True)\n moa_train_loss_ops = g.get_ops(moa_loss_ops=True)\n moa_train_err_ops = g.get_ops(moa_error_ops=True)\n\n execute_ops = (rep_train_loss_ops\n + rep_train_err_ops\n + moa_train_loss_ops\n + moa_train_err_ops)\n\n feed_dict = g.create_feed_dict(batch['x'], batch['y'], dataset_type='test')\n\n evaled_ops = sess.run(execute_ops, feed_dict=feed_dict)\n\n rep_train_loss_vals = evaled_ops[:self._n_replicas]\n rep_train_err_vals = evaled_ops[self._n_replicas:2*self._n_replicas]\n moa_train_loss_val = evaled_ops[2*self._n_replicas]\n moa_train_err_val = evaled_ops[2*self._n_replicas + 1]\n\n for i in range(self._n_replicas):\n train_batch_loss[i].append(rep_train_loss_vals[i])\n train_batch_err[i].append(rep_train_err_vals[i])\n\n moa_batch_loss.append(moa_train_loss_val)\n moa_batch_err.append(moa_train_err_val)\n\n if step % self._train_step == 0 or step == 1:\n diff_ops = g.get_ops(diffusion_ops=True)\n feed_dict = g.create_feed_dict(batch['x'], batch['y'])\n evaled_diffs = sess.run(diff_ops, feed_dict=feed_dict)\n moa_weights_ops = g.get_ops(moa_weights_vars=True)\n moa_weights_vals = sess.run(moa_weights_ops)\n g.add_summary_v2({\n 'diffusion_summary': evaled_diffs[0],\n 'moa_train_loss_summary': np.mean(moa_batch_loss),\n 'moa_train_error_summary': np.mean(moa_batch_err),\n 'moa_weights_summary':list(moa_weights_vals[0].squeeze()),\n 'moa_train_steps_summary': step,\n 'train_loss_summary': [np.mean(train_batch_loss[i]) for i in range(self._n_replicas)],\n 'train_error_summary':[np.mean(train_batch_err[i]) for i in range(self._n_replicas)],\n 'train_steps_summary': step,\n }, epoch, add_noise_vals=True)\n\n del train_batch_loss\n del train_batch_err\n del moa_batch_err\n del moa_batch_loss\n\n train_batch_loss = {i:[] for i in range(self._n_replicas)}\n train_batch_err = {i:[] for i in range(self._n_replicas)}\n moa_batch_loss = []\n moa_batch_err = []\n\n rep_updates = g.get_ops(weights_update_ops=True)\n moa_updates = g.get_ops(moa_weights_update_ops=True)\n\n feed_dict = g.create_feed_dict(batch['x'],\n batch['y'],\n dataset_type='train')\n _ = sess.run(rep_updates + moa_updates, feed_dict=feed_dict)\n\n if time() - last_flush_time > self._flush_every:\n g.flush_summary()\n last_flush_time = time()\n\n except tf.errors.OutOfRangeError:\n sess.run(iterator.initializer)\n break\n '''\n if not train_batch_loss[0]:\n evaled = [np.mean(train_batch_loss[i]) for i in range(self._n_replicas)]\n evaled += [np.mean(train_batch_err[i]) for i in range(self._n_replicas)]\n g.add_summary(evaled, step=step, epoch=self._n_epochs, dataset_type='train')\n '''\n g._summary._latest_epoch = self._n_epochs\n g.flush_summary()\n\n def _parallel_tempering_train_v2(self, **kwargs): # pylint: disable=too-many-locals, invalid-name\n \"\"\"Trains and swaps between replicas while storing summaries.\n\n This function differs from `_parallel_tempering_train()` by the\n fact that it calculates loss/error on the whole train data set\n and just accumulates loss/error over batches. This gives\n better understanding of the performance on train data especially\n when we're dealing with dropout that applied only during training\n but not during testing, which may result in better perfomance on\n test data than on train data. \n \n Args:\n `kwargs`: Should be following keyword arguments: `train_data`,\n `train_labels`, `test_data`, `test_labels`, `validation_data`,\n `validation_labels`.\n\n \"\"\"\n\n last_flush_time = time()\n \n if not self._logged:\n self._log_params()\n self._logged = True\n try:\n g = self._graph # pylint: disable=invalid-name\n except AttributeError as err:\n if not err.args:\n err.args = ('',)\n\n err.args = (err.args\n + (\"The SimulatorGraph object is not initialized.\",))\n raise\n\n try:\n train_data = kwargs.get('train_data', None)\n train_labels = kwargs.get('train_labels', None)\n test_data = kwargs.get('test_data', None)\n test_labels = kwargs.get('test_labels', None)\n valid_data = kwargs.get('validation_data', None)\n valid_labels = kwargs.get('validation_labels', None)\n if (train_data is None\n or train_labels is None\n or test_data is None\n or test_labels is None\n or valid_data is None\n or valid_labels is None):\n raise IllegalArgumentError(\n 'One of the arguments is None:',\n [x for x in kwargs.keys() if kwargs[x] is None])\n\n # iterators for train/test/validation\n with g.get_tf_graph().as_default(): # pylint: disable=not-context-manager\n data = tf.data.Dataset.from_tensor_slices({\n 'x':train_data,\n 'y':train_labels\n }).batch(self._batch_size)\n iterator = data.make_initializable_iterator()\n \n data = tf.data.Dataset.from_tensor_slices({\n 'x':train_data,\n 'y':train_labels\n }).batch(self._batch_size)\n iter_train = data.make_initializable_iterator()\n\n data = tf.data.Dataset.from_tensor_slices({\n 'x':valid_data,\n 'y':valid_labels\n }).batch(self._test_batch)\n iter_valid = data.make_initializable_iterator()\n\n data = tf.data.Dataset.from_tensor_slices({\n 'x':test_data,\n 'y':test_labels\n }).batch(self._test_batch)\n iter_test = data.make_initializable_iterator()\n\n except: # pylint: disable=try-except-raise\n raise\n\n step = 0\n\n config = tf.ConfigProto(allow_soft_placement=True)\n\n with tf.Session(graph=g.get_tf_graph(), config=config) as sess:\n _ = sess.run([iterator.initializer,\n iter_valid.initializer,\n iter_test.initializer,\n iter_train.initializer,\n g.variable_initializer])\n\n next_batch = iterator.get_next()\n next_batch_train = iterator.get_next()\n next_batch_test = iter_test.get_next()\n next_batch_valid = iter_valid.get_next()\n\n for epoch in range(self._n_epochs):\n\n while True:\n\n try:\n step += 1\n\n ### test ###\n if step % self._test_step == 0 or step == 1:\n evaluated = self._train_epoch(sess,\n next_batch_test,\n iter_test,\n dataset_type='test')\n test_losses = evaluated[:self._n_replicas]\n test_errs = evaluated[self._n_replicas:]\n g.add_summary_v2({\n 'test_loss_summary': test_losses,\n 'test_error_summary': test_errs,\n 'test_steps_summary': step\n })\n \n if 'loss' in self._verbose_loss:\n verbose_loss_vals = test_losses\n else:\n verbose_loss_vals = test_errs\n self.print_log(epoch,\n step,\n verbose_loss_vals,\n g.get_accept_ratio())\n\n ### validation + swaps ###\n if step % self._swap_step == 0 or step == 1:\n evaluated = self._train_epoch(sess,\n next_batch_valid,\n iter_valid,\n dataset_type='validation')\n valid_losses = evaluated[:self._n_replicas]\n valid_errs = evaluated[self._n_replicas:]\n g.add_summary_v2({\n 'validation_loss_summary': valid_losses,\n 'validation_error_summary': valid_errs,\n 'validation_steps_summary': step\n })\n if step > self._burn_in_period:\n g.swap_replicas(evaluated[:self._n_replicas])\n\n ### train ###\n if step % self._train_step == 0 or step == 1:\n diff_ops = g.get_ops(diffusion_ops=True)\n evaled_diffs = sess.run(diff_ops)\n loss_err_vals = self._train_epoch(sess,\n next_batch_train,\n iter_train,\n dataset_type='train')\n\n g.add_summary_v2({\n 'train_loss_summary': loss_err_vals[:self._n_replicas],\n 'train_error_summary': loss_err_vals[self._n_replicas:],\n 'train_steps_summary': step,\n 'diffusion_summary': evaled_diffs\n })\n\n batch = sess.run(next_batch)\n feed_dict = g.create_feed_dict(batch['x'], batch['y'])\n update_ops = g.get_ops(weights_update_ops=True)\n\n _ = sess.run(update_ops, feed_dict=feed_dict)\n\n if time() - last_flush_time > self._flush_every:\n g.flush_summary()\n last_flush_time = time()\n\n except tf.errors.OutOfRangeError:\n sess.run(iterator.initializer)\n break\n\n g._summary._latest_epoch = self._n_epochs\n g.flush_summary()\n\n def _train_epoch(self, sess, next_batch, iterator, dataset_type, moa=False):\n \"\"\"Trains the whole data in the iterator and returns average results.\"\"\"\n batch_loss = {i:[] for i in range(self._n_replicas)}\n batch_err = {i:[] for i in range(self._n_replicas)}\n\n g = self._graph\n\n while True:\n try:\n batch = sess.run(next_batch)\n feed_dict = g.create_feed_dict(batch['x'],\n batch['y'],\n dataset_type=dataset_type)\n if moa is True:\n ops = g.get_ops(moa_loss_ops=True,\n moa_error_ops=True)\n evaluated = sess.run(ops, feed_dict=feed_dict)\n loss = evaluated[0]\n err = evaluated[1]\n batch_loss[0].append(loss)\n batch_err[0].append(err)\n else:\n ops = g.get_ops(loss_ops=True,\n error_ops=True)\n\n evaluated = sess.run(ops, feed_dict=feed_dict)\n loss = evaluated[:self._n_replicas]\n err = evaluated[self._n_replicas:]\n for i in range(self._n_replicas):\n batch_loss[i].append(loss[i])\n batch_err[i].append(err[i])\n\n except tf.errors.OutOfRangeError:\n if moa is False:\n loss = [np.mean(batch_loss[i]) for i in range(self._n_replicas)]\n err = [np.mean(batch_err[i]) for i in range(self._n_replicas)]\n res = loss + err\n else:\n loss = np.mean(batch_loss[0])\n err = np.mean(batch_err[0])\n res = [loss, err]\n sess.run(iterator.initializer)\n break\n\n return res\n\n\n def train(self, train_data_size=None, **kwargs):\n \"\"\"Trains model a single time using parallel tempering setup.\n \n Args:\n `train_data_size`: (Optional) The size of the train_data. This\n can be used in case where only part of data should be used\n while training. It is also possible to pass only a fraction\n of the data keeping this argument `None`. If `None`, uses\n the whole data.\n `kwargs`: Should be following keyword arguments: `train_data`,\n `train_labels`, `test_data`, `test_labels`, `validation_data`,\n `validation_labels`.\n \"\"\"\n self._graph = SimulatorGraph(self._model,\n self._learning_rate,\n self._noise_list,\n self._name,\n ensembles=self._ensembles,\n noise_type=self._noise_type,\n simulation_num=0,\n loss_func_name=self._loss_func_name,\n proba_coeff=self._proba_coeff,\n hessian=self._hessian,\n mode=self._mode,\n moa_lr=self._moa_lr,\n rmsprop_decay=self.rmsprop_decay,\n rmsprop_momentum=self.rmsprop_momentum,\n rmsprop_epsilon=self.rmsprop_epsilon)\n\n \n\n \n test_data = kwargs.get('test_data', None)\n test_labels = kwargs.get('test_labels', None)\n valid_data = kwargs.get('validation_data', None)\n valid_labels = kwargs.get('validation_labels', None)\n train_data = kwargs.get('train_data', None)\n train_labels = kwargs.get('train_labels', None)\n \n if self._mode in ['MOA', 'mixture_of_experts', 'moa']:\n pt_fn = self._parallel_tempering_train_moa\n else:\n pt_fn = self._parallel_tempering_train\n \n if train_data_size is not None:\n train_data, train_labels = sklearn.utils.shuffle(\n train_data, train_labels)\n train_data_ = train_data[:train_data_size]\n train_labels_ = train_labels[:train_data_size]\n else:\n train_data_ = train_data\n train_labels_ = train_labels\n pt_fn(train_data=train_data_,\n train_labels=train_labels_,\n test_data=test_data,\n test_labels=test_labels,\n validation_data=valid_data,\n validation_labels=valid_labels)\n \n def _initialize_uninitialized(self, sess):\n with self._graph.get_tf_graph().as_default():\n global_vars = tf.global_variables()\n is_not_initialized = sess.run([tf.is_variable_initialized(v)\n for v in global_vars])\n not_initialized_vars = [v for (v, f) in zip(global_vars, is_not_initialized)\n if not f]\n if len(not_initialized_vars):\n sess.run(tf.variables_initializer(not_initialized_vars))\n\n def _log_params(self):\n \"\"\"Creates and stores description file.\"\"\"\n dirpath = self._graph.get_summary_dirname()\n filepath = os.path.join(dirpath, 'description.json')\n if not os.path.exists(dirpath):\n os.makedirs(dirpath)\n _log = {\n 'name':self._name,\n 'noise_type': self._noise_type,\n 'noise_list': self._noise_list,\n 'n_replicas': len(self._noise_list),\n 'learning_rate':self._learning_rate,\n 'n_epochs':self._n_epochs,\n 'batch_size':self._batch_size,\n 'swap_step': self._swap_step,\n 'separation_ratio': self._separation_ratio,\n 'n_simulations': self._n_simulations,\n 'tuning_parameter_name':self._tuning_param_name,\n 'description':self._description,\n 'burn_in_period':self._burn_in_period,\n 'proba_coeff':self._proba_coeff,\n 'n_params': int(self._graph._n_params),\n 'mode': self._mode,\n 'moa_lr': self._moa_lr\n }\n with open(filepath, 'w') as file:\n json.dump(_log, file, indent=4)\n\n def print_log(self, # pylint: disable=too-many-arguments\n epoch,\n step,\n loss,\n accept_ratio):\n \"\"\"Helper for logs during training.\"\"\"\n\n buff = \"[epoch:{0}]|[step:{1}]|[{2}]|[accept:{3:.3f}]\".format(\n epoch, step, ', '.join(['{0:.3f}'.format(l) for l in loss]), accept_ratio)\n\n\n self.stdout_write(buff)\n\n def stdout_write(self, buff): # pylint: disable=no-self-use\n \"\"\"Writes to stdout buffer with beginning of the line character.\"\"\"\n sys.stdout.write('\\r' + buff)\n sys.stdout.flush()\n" ]
[ [ "sklearn.utils.shuffle", "tensorflow.data.Dataset.from_tensor_slices", "tensorflow.global_variables", "tensorflow.variables_initializer", "tensorflow.is_variable_initialized", "tensorflow.ConfigProto", "numpy.mean" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
hebafer/sqlflow
[ "d77416ae1dced412728783abee2d1008720d51f9", "d77416ae1dced412728783abee2d1008720d51f9" ]
[ "python/runtime/xgboost/evaluate.py", "python/runtime/xgboost/predict.py" ]
[ "# Copyright 2020 The SQLFlow Authors. All rights reserved.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nimport sklearn.metrics\nimport xgboost as xgb\nfrom runtime import db\nfrom runtime.dbapi.paiio import PaiIOConnection\nfrom runtime.feature.field_desc import DataType\nfrom runtime.model.metadata import load_metadata\nfrom runtime.xgboost.dataset import DMATRIX_FILE_SEP, xgb_dataset\n\nSKLEARN_METRICS = [\n 'accuracy_score',\n 'average_precision_score',\n 'balanced_accuracy_score',\n 'brier_score_loss',\n 'cohen_kappa_score',\n 'explained_variance_score',\n 'f1_score',\n 'fbeta_score',\n 'hamming_loss',\n 'hinge_loss',\n 'log_loss',\n 'mean_absolute_error',\n 'mean_squared_error',\n 'mean_squared_log_error',\n 'median_absolute_error',\n 'precision_score',\n 'r2_score',\n 'recall_score',\n 'roc_auc_score',\n 'zero_one_loss',\n]\n\nDEFAULT_PREDICT_BATCH_SIZE = 10000\n\n\ndef evaluate(datasource,\n select,\n feature_metas,\n feature_column_names,\n label_meta,\n result_table,\n validation_metrics=[\"accuracy_score\"],\n is_pai=False,\n pai_table=\"\",\n model_params=None,\n transform_fn=None,\n feature_column_code=\"\"):\n if not is_pai:\n conn = db.connect_with_data_source(datasource)\n else:\n conn = PaiIOConnection.from_table(pai_table)\n dpred = xgb_dataset(datasource,\n 'predict.txt',\n select,\n feature_metas,\n feature_column_names,\n label_meta,\n is_pai,\n pai_table,\n True,\n True,\n batch_size=DEFAULT_PREDICT_BATCH_SIZE,\n transform_fn=transform_fn,\n feature_column_code=feature_column_code\n ) # NOTE: default to use external memory\n bst = xgb.Booster({'nthread': 4}) # init model\n bst.load_model(\"my_model\") # load model\n if not model_params:\n model_params = load_metadata(\"model_meta.json\")[\"attributes\"]\n print(\"Start evaluating XGBoost model...\")\n feature_file_id = 0\n for pred_dmatrix in dpred:\n evaluate_and_store_result(bst, pred_dmatrix, feature_file_id,\n validation_metrics, model_params,\n feature_column_names, label_meta, is_pai,\n conn, result_table)\n feature_file_id += 1\n print(\"Done evaluating. Result table : %s\" % result_table)\n\n\ndef evaluate_and_store_result(bst, dpred, feature_file_id, validation_metrics,\n model_params, feature_column_names, label_meta,\n is_pai, conn, result_table):\n preds = bst.predict(dpred)\n if model_params:\n obj = model_params[\"objective\"]\n # binary:hinge output class labels\n if obj.startswith(\"binary:logistic\"):\n preds = (preds > 0.5).astype(int)\n # multi:softmax output class labels\n elif obj.startswith(\"multi:softprob\"):\n preds = np.argmax(np.array(preds), axis=1)\n # TODO(typhoonzero): deal with binary:logitraw when needed.\n else:\n # prediction output with multi-class job has two dimensions, this\n # is a temporary way, can remove this else branch when we can load\n # the model meta not only on PAI submitter.\n if len(preds.shape) == 2:\n preds = np.argmax(np.array(preds), axis=1)\n\n if is_pai:\n feature_file_read = open(\"predict.txt\", \"r\")\n else:\n feature_file_read = open(\"predict.txt_%d\" % feature_file_id, \"r\")\n\n y_test_list = []\n for line in feature_file_read:\n row = [i for i in line.strip().split(DMATRIX_FILE_SEP)]\n # DMatrix store label in the first column\n if label_meta[\"dtype\"] == \"float32\" or label_meta[\n \"dtype\"] == DataType.FLOAT32:\n label = float(row[0])\n elif label_meta[\"dtype\"] == \"int64\" or label_meta[\n \"dtype\"] == \"int32\" or label_meta[\"dtype\"] == DataType.INT64:\n label = int(row[0])\n else:\n raise ValueError(\"unsupported label dtype: %s\" %\n label_meta[\"dtype\"])\n y_test_list.append(label)\n y_test = np.array(y_test_list)\n\n evaluate_results = dict()\n for metric_name in validation_metrics:\n if metric_name not in SKLEARN_METRICS:\n raise ValueError(\"unsupported metric: %s\" % metric_name)\n metric_func = getattr(sklearn.metrics, metric_name)\n metric_value = metric_func(y_test, preds)\n evaluate_results[metric_name] = metric_value\n\n # write evaluation result to result table\n result_columns = [\"loss\"] + validation_metrics\n with db.buffered_db_writer(conn, result_table, result_columns, 100) as w:\n row = [\"0.0\"]\n for mn in validation_metrics:\n row.append(str(evaluate_results[mn]))\n w.write(row)\n", "# Copyright 2020 The SQLFlow Authors. All rights reserved.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom datetime import datetime\n\nimport numpy as np\nimport xgboost as xgb\nfrom runtime import db\nfrom runtime.dbapi.paiio import PaiIOConnection\nfrom runtime.model.metadata import load_metadata\nfrom runtime.xgboost.dataset import DMATRIX_FILE_SEP, xgb_dataset\n\nDEFAULT_PREDICT_BATCH_SIZE = 10000\n\n\ndef pred(datasource,\n select,\n feature_metas,\n feature_column_names,\n train_label_meta,\n pred_label_meta,\n result_table,\n is_pai=False,\n pai_table=\"\",\n model_params=None,\n train_params=None,\n transform_fn=None,\n feature_column_code=\"\",\n flags=None):\n rank = 0\n nworkers = len(flags.worker_hosts.split(\",\")) if flags else 1\n if nworkers > 1:\n if not is_pai:\n raise Exception(\n \"XGBoost distributed predict is only supported on PAI\")\n if flags.job_name != \"worker\":\n return # ignore ps\n rank = flags.task_index\n pred_imp(datasource, select, feature_metas, feature_column_names,\n train_label_meta, pred_label_meta, result_table, is_pai,\n pai_table, model_params, train_params, transform_fn,\n feature_column_code, rank, nworkers)\n\n\ndef pred_imp(datasource,\n select,\n feature_metas,\n feature_column_names,\n train_label_meta,\n pred_label_meta,\n result_table,\n is_pai=False,\n pai_table=\"\",\n model_params=None,\n train_params=None,\n transform_fn=None,\n feature_column_code=\"\",\n rank=0,\n nworkers=1):\n print(\"rank={} nworkers={}\".format(rank, nworkers))\n if not is_pai:\n conn = db.connect_with_data_source(datasource)\n else:\n conn = PaiIOConnection.from_table(pai_table)\n dpred = xgb_dataset(\n datasource=datasource,\n fn='predict.txt',\n dataset_sql=select,\n feature_metas=feature_metas,\n feature_column_names=feature_column_names,\n label_meta=None,\n is_pai=is_pai,\n pai_table=pai_table,\n pai_single_file=True,\n cache=True,\n batch_size=DEFAULT_PREDICT_BATCH_SIZE,\n rank=rank,\n nworkers=nworkers,\n transform_fn=transform_fn,\n feature_column_code=feature_column_code,\n raw_data_dir=\"predict.raw.dir\") # NOTE: default to use external memory\n bst = xgb.Booster({'nthread': 4}) # init model\n bst.load_model(\"my_model\") # load data\n print(\"{} Start predicting XGBoost model...\".format(datetime.now()))\n if not model_params:\n model_params = load_metadata(\"model_meta.json\")[\"attributes\"]\n\n selected_cols = db.selected_cols(conn, select)\n\n feature_file_id = 0\n train_label_name = train_label_meta[\"feature_name\"]\n pred_label_name = pred_label_meta[\"feature_name\"]\n for pred_dmatrix in dpred:\n predict_and_store_result(bst, pred_dmatrix, feature_file_id,\n model_params, selected_cols, train_label_name,\n pred_label_name, feature_column_names,\n feature_metas, is_pai, conn, result_table,\n rank)\n feature_file_id += 1\n print(\"{} Done predicting. Predict table: {}\".format(\n datetime.now(), result_table))\n\n\ndef predict_and_store_result(bst,\n dpred,\n feature_file_id,\n model_params,\n selected_cols,\n train_label_name,\n pred_label_name,\n feature_column_names,\n feature_metas,\n is_pai,\n conn,\n result_table,\n slice_id=0):\n preds = bst.predict(dpred)\n if model_params:\n obj = model_params[\"objective\"]\n # binary:hinge output class labels\n if obj == \"binary:logistic\":\n preds = (preds > 0.5).astype(int)\n elif obj == \"multi:softprob\":\n preds = np.argmax(np.array(preds), axis=1)\n elif obj == \"multi:softmax\":\n # multi:softmax output class labels\n # Need to convert to int. Otherwise, the\n # table writer of MaxCompute would cause\n # error because of writing float values.\n preds = np.array(preds).astype(int)\n # TODO(typhoonzero): deal with binary:logitraw when needed.\n else:\n # prediction output with multi-class job has two dimensions, this\n # is a temporary way, can remove this else branch when we can load\n # the model meta not only on PAI submitter.\n if len(preds.shape) == 2:\n preds = np.argmax(np.array(preds), axis=1)\n\n if is_pai:\n feature_file_read = open(\"predict.txt.raw\", \"r\")\n else:\n feature_file_read = open(\n \"predict.raw.dir/predict.txt_%d\" % feature_file_id, \"r\")\n\n result_column_names = selected_cols[:]\n # remove train_label_name from result column, if train_label_name == \"\" or\n # the train_label_name is not selected, the index should be -1\n try:\n train_label_index = selected_cols.index(train_label_name)\n except ValueError:\n train_label_index = -1\n if train_label_index != -1:\n del result_column_names[train_label_index]\n result_column_names.append(pred_label_name)\n\n line_no = 0\n with db.buffered_db_writer(conn, result_table, result_column_names, 100,\n slice_id) as w:\n while True:\n line = feature_file_read.readline()\n if not line:\n break\n # FIXME(typhoonzero): how to output columns that are not used\n # as features, like ids?\n row = [\n item\n for i, item in enumerate(line.strip().split(DMATRIX_FILE_SEP))\n if i != train_label_index\n ]\n row.append(preds[line_no])\n w.write(row)\n line_no += 1\n" ]
[ [ "numpy.array" ], [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
gzaraunitn/TA3N
[ "d83ae5d9c8f4452ff69dd9002bb4016a695a4be8" ]
[ "dataset.py" ]
[ "import torch.utils.data as data\n\nimport os\nimport os.path\nimport numpy as np\nfrom numpy.random import randint\nimport torch\n\nfrom colorama import init\nfrom colorama import Fore, Back, Style\n\nimport random\nfrom os import listdir\nfrom os.path import join, splitext\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nimport torchvision.transforms.functional as TF\nfrom PIL import Image, ImageFilter, ImageFile\nfrom torch.utils.data import DataLoader, Dataset\nfrom torchvision import transforms\n\ninit(autoreset=True)\n\nclass VideoRecord(object):\n def __init__(self, row):\n self._data = row\n\n @property\n def path(self):\n return self._data[0]\n\n @property\n def num_frames(self):\n return int(self._data[1])\n\n @property\n def label(self):\n return int(self._data[2])\n\n\nclass TSNDataSet(data.Dataset):\n def __init__(self, root_path, list_file, num_dataload,\n num_segments=3, new_length=1, modality='RGB',\n image_tmpl='img_{:05d}.t7', transform=None,\n force_grayscale=False, random_shift=True, test_mode=False):\n\n self.root_path = root_path\n self.list_file = list_file\n self.num_segments = num_segments\n self.new_length = new_length\n self.modality = modality\n self.image_tmpl = image_tmpl\n self.transform = transform\n self.random_shift = random_shift\n self.test_mode = test_mode\n self.num_dataload = num_dataload\n\n if self.modality == 'RGBDiff' or self.modality == 'RGBDiff2' or self.modality == 'RGBDiffplus':\n self.new_length += 1 # Diff needs one more image to calculate diff\n\n self._parse_list() # read all the video files\n\n def _load_feature(self, directory, idx):\n if self.modality == 'RGB' or self.modality == 'RGBDiff' or self.modality == 'RGBDiff2' or self.modality == 'RGBDiffplus':\n feat_path = os.path.join(directory, self.image_tmpl.format(idx))\n try:\n feat = [torch.load(feat_path)]\n except:\n print(Back.RED + feat_path)\n return feat\n\n elif self.modality == 'Flow':\n x_feat = torch.load(os.path.join(directory, self.image_tmpl.format('x', idx)))\n y_feat = torch.load(os.path.join(directory, self.image_tmpl.format('y', idx)))\n\n return [x_feat, y_feat]\n\n\n def _parse_list(self):\n self.video_list = [VideoRecord(x.strip().split(' ')) for x in open(self.list_file)]\n # repeat the list if the length is less than num_dataload (especially for target data)\n n_repeat = self.num_dataload//len(self.video_list)\n n_left = self.num_dataload%len(self.video_list)\n self.video_list = self.video_list*n_repeat + self.video_list[:n_left]\n\n def _sample_indices(self, record):\n \"\"\"\n\n :param record: VideoRecord\n :return: list\n \"\"\"\n #np.random.seed(1)\n average_duration = (record.num_frames - self.new_length + 1) // self.num_segments\n if average_duration > 0:\n offsets = np.multiply(list(range(self.num_segments)), average_duration) + randint(average_duration, size=self.num_segments)\n elif record.num_frames > self.num_segments:\n offsets = np.sort(randint(record.num_frames - self.new_length + 1, size=self.num_segments))\n else:\n offsets = np.zeros((self.num_segments,))\n return offsets + 1\n\n def _get_val_indices(self, record):\n num_min = self.num_segments + self.new_length - 1\n num_select = record.num_frames - self.new_length + 1\n\n if record.num_frames >= num_min:\n tick = float(num_select) / float(self.num_segments)\n offsets = np.array([int(tick / 2.0 + tick * float(x)) for x in range(self.num_segments)])\n else:\n offsets = np.zeros((self.num_segments,))\n return offsets + 1\n\n def _get_test_indices(self, record):\n num_min = self.num_segments + self.new_length - 1\n num_select = record.num_frames - self.new_length + 1\n\n if record.num_frames >= num_min:\n tick = float(num_select) / float(self.num_segments)\n offsets = np.array([int(tick / 2.0 + tick * float(x)) for x in range(self.num_segments)]) # pick the central frame in each segment\n else: # the video clip is too short --> duplicate the last frame\n id_select = np.array([x for x in range(num_select)])\n # expand to the length of self.num_segments with the last element\n id_expand = np.ones(self.num_segments-num_select,dtype=int)*id_select[id_select[0]-1]\n offsets = np.append(id_select, id_expand)\n\n return offsets + 1\n\n def __getitem__(self, index):\n record = self.video_list[index]\n\n if not self.test_mode:\n segment_indices = self._sample_indices(record) if self.random_shift else self._get_val_indices(record)\n else:\n segment_indices = self._get_test_indices(record)\n\n return self.get(record, segment_indices)\n\n def get(self, record, indices):\n\n frames = list()\n\n for seg_ind in indices:\n p = int(seg_ind)\n for i in range(self.new_length):\n seg_feats = self._load_feature(record.path, p)\n frames.extend(seg_feats)\n\n if p < record.num_frames:\n p += 1\n\n # process_data = self.transform(frames)\n process_data = torch.stack(frames)\n\n return process_data, record.label\n\n def __len__(self):\n return len(self.video_list)\n\n\nclass VideoDataset(data.Dataset):\n\n def __init__(\n self,\n folder,\n n_frames,\n frame_size=224,\n separator=\"_\"\n ):\n\n self.folder = folder\n self.num_segments = n_frames\n self.frame_size = frame_size\n\n self.data_transform = transforms.Compose(\n [\n transforms.Resize(self.frame_size),\n transforms.CenterCrop(self.frame_size),\n transforms.ToTensor(),\n transforms.Normalize(\n mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]\n ),\n ]\n )\n\n self.separator = separator\n self.classes = [c for c in sorted(listdir(folder))]\n self.videos_with_classes = []\n\n for c_index, c in enumerate(self.classes):\n c_path = join(self.folder, c)\n videos = listdir(c_path)\n for v in videos:\n v_path = join(c_path, v)\n num_frames = len(listdir(v_path))\n if num_frames >= self.num_segments:\n pair = (v_path, c_index)\n self.videos_with_classes.append(pair)\n \n\n def _get_test_indices(self, num_frames):\n num_min = self.num_segments\n num_select = num_frames\n\n if num_frames >= num_min:\n tick = float(num_select) / float(self.num_segments)\n offsets = np.array(\n [int(tick / 2.0 + tick * float(x)) for x in range(self.num_segments)]\n ) # pick the central frame in each segment\n else: # the video clip is too short --> duplicate the last frame\n id_select = np.array([x for x in range(num_select)])\n # expand to the length of self.num_segments with the last element\n id_expand = (\n np.ones(self.num_segments - num_select, dtype=int)\n * id_select[id_select[0] - 1]\n )\n offsets = np.append(id_select, id_expand)\n\n return offsets\n\n def __getitem__(self, index):\n\n video, label = self.videos_with_classes[index]\n frames_temp = sorted(\n listdir(video),\n key=lambda path: int(path.split(self.separator)[-1].split(\".\")[0]),\n )\n frames = [f for f in frames_temp if f.endswith('jpg') or f.endswith('jpeg')]\n num_frames = len(frames)\n\n data = []\n\n segment_indices = self._get_test_indices(num_frames)\n for index in segment_indices:\n frame = frames[index]\n frame_path = join(video, frame)\n frame_img = Image.open(frame_path)\n frame_feat = self.data_transform(frame_img)\n data.append(frame_feat)\n tensor = torch.stack(data)\n\n return tensor, label\n\n def __len__(self):\n return len(self.videos_with_classes)" ]
[ [ "torch.load", "numpy.ones", "numpy.append", "torch.stack", "torch.utils.data.append", "numpy.zeros", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
naotohori/cafysis
[ "9d8534121c01ea75ae965cf39a1e307052ff8523", "9d8534121c01ea75ae965cf39a1e307052ff8523" ]
[ "mtx_coord_transform.py", "20130630_1.py" ]
[ "#!/usr/bin/env python\n# coding: UTF-8\n'''\nCreated on 2013/06/28 (based on matrix_transform_make.py)\n@author: Naoto Hori\n'''\n\nimport numpy as np\nfrom numpy import identity, dot\nfrom math import cos, sin\n\nclass mtx_crd_transform():\n def __init__(self):\n self.mtx = identity(4)\n \n def reset(self):\n self.mtx = identity(4)\n \n def show(self):\n for i in range(4):\n print(tuple(self.mtx[i]))\n \n def do_to_array(self, d):\n '''配列d[x,y,z]を受け取って、mtxを施して返す。\n dの値は変更されない。\n 使い方:\n d = mtx_crd_transform.do_to_array(d)'''\n return dot(self.mtx, d+[1.0,])[0:3]\n \n def do_to_data(self,d):\n for i,v in enumerate(d):\n d[i][0:3] = dot(self.mtx, v+[1.0,])[0:3]\n\n def do_to_ndarray(self,d):\n n, _ = d.shape\n for i in range(n):\n v = np.concatenate( (d[i,0:3], [1.0,]) )\n d[i,0:3] = dot(self.mtx, v)[0:3]\n\n def do_to_1darray(self,d):\n r = np.empty((3,))\n v = np.concatenate( (d[0:3], [1.0,]) )\n r[0:3] = dot(self.mtx, v)[0:3]\n return r\n\n def translation(self,x,y,z):\n '''並進移動'''\n self.mtx[0,3] += x\n self.mtx[1,3] += y\n self.mtx[2,3] += z\n\n def rotate_by_mtx(self, mtx_rot):\n self.mtx = dot(mtx_rot, self.mtx)\n \n def rotate(self, nx, ny, nz, t):\n '''任意の単位ベクトル(nx,ny,nz)を軸として、角度tだけ回転'''\n ope = identity(4) # operation matrix\n \n ope[0,0] = nx*nx*(1.0-cos(t)) + cos(t)\n ope[1,0] = nx*ny*(1.0-cos(t)) + nz*sin(t)\n ope[2,0] = nz*nx*(1.0-cos(t)) - ny*sin(t)\n \n ope[0,1] = nx*ny*(1.0-cos(t)) - nz*sin(t)\n ope[1,1] = ny*ny*(1.0-cos(t)) + cos(t)\n ope[2,1] = ny*nz*(1.0-cos(t)) + nx*sin(t)\n \n ope[0,2] = nz*nx*(1.0-cos(t)) + ny*sin(t)\n ope[1,2] = ny*nz*(1.0-cos(t)) - nx*sin(t)\n ope[2,2] = nz*nz*(1.0-cos(t)) + cos(t)\n \n self.mtx = dot(ope, self.mtx)\n \n def rotate_x(self, t):\n '''X軸まわりに、角度tだけ回転'''\n ope = identity(4) # operation matrix\n \n ope[1,1] = cos(t)\n ope[2,1] = sin(t)\n \n ope[1,2] = -sin(t)\n ope[2,2] = cos(t)\n \n self.mtx = dot(ope, self.mtx)\n \n def rotate_y(self, t):\n '''Y軸まわりに、角度tだけ回転'''\n ope = identity(4) # operation matrix\n \n ope[0,0] = cos(t)\n ope[2,0] = -sin(t)\n \n ope[0,2] = sin(t)\n ope[2,2] = cos(t)\n \n self.mtx = dot(ope, self.mtx)\n \n def rotate_z(self, t):\n '''Z軸まわりに、角度tだけ回転'''\n ope = identity(4) # operation matrix\n \n ope[0,0] = cos(t)\n ope[1,0] = sin(t)\n \n ope[0,1] = -sin(t)\n ope[1,1] = cos(t)\n \n self.mtx = dot(ope, self.mtx)\n \n def euler_zxz(self,a,b,c):\n '''Z-X-Z系のオイラー角で回転'''\n '''これは、rotate_z, rotate_x, rotate_zを連続で呼び出すのと同じ'''\n ope = identity(4) # operation matrix\n \n ope[0,0] = cos(a)*cos(c) - sin(a)*cos(b)*sin(c)\n ope[1,0] = cos(a)*sin(c) + sin(a)*cos(b)*cos(c)\n ope[2,0] = sin(a)*sin(b)\n \n ope[0,1] = - sin(a)*cos(c) - cos(a)*cos(b)*sin(c)\n ope[1,1] = - sin(a)*sin(c) + cos(a)*cos(b)*cos(c)\n ope[2,1] = cos(a)*sin(b)\n \n ope[0,2] = sin(b)*sin(c)\n ope[1,2] = - sin(b)*cos(c)\n ope[2,2] = cos(b)\n \n self.mtx = dot(ope, self.mtx)\n \nif __name__ == \"__main__\" :\n import sys\n \n if not len(sys.argv) in (7,8):\n print('')\n print('This script makes a homogeneous transformation matrix,')\n print('angles of which is defined by Z-X-Z Euler angles.')\n print('')\n print('Usage: % SCRIPT [alpha] [beta] [gamma] [x] [y] [z] [[output]]')\n print('')\n print('When \"output\" is specified, the matrix will be written in the file.')\n print('Otherwise STDOUT is used to display.')\n sys.exit(2)\n\n a = float(sys.argv[1])\n b = float(sys.argv[2])\n c = float(sys.argv[3])\n x = float(sys.argv[4])\n y = float(sys.argv[5])\n z = float(sys.argv[6])\n \n mtx = mtx_crd_transform()\n mtx.translation(x, y, z)\n mtx.euler_zxz(a, b, c)\n \n if len(sys.argv) == 8: # Output to a file\n file_mat = file(sys.argv[-1],'w')\n file_mat.write('#matrix\\n')\n for i in range(4):\n file_mat.write('%15.10f %15.10f %15.10f %15.10f\\n' % tuple(mtx.mtx[i]))\n file_mat.write('#a: %f\\n#b: %f\\n#c: %f\\n' % (a,b,c)) \n file_mat.write('#x: %f\\n#y: %f\\n#z: %f\\n' % (x,y,z)) \n file_mat.close()\n else: # Display on STDOUT\n sys.stdout.write('#matrix\\n')\n for i in range(4):\n sys.stdout.write('%15.10f %15.10f %15.10f %15.10f\\n' % tuple(mtx.mtx[i]))\n sys.stdout.write('\\n')\n sys.stdout.write('#a: %f\\n#b: %f\\n#c: %f\\n' % (a,b,c)) \n sys.stdout.write('#x: %f\\n#y: %f\\n#z: %f\\n' % (x,y,z)) \n", "#!/usr/bin/env python\n#vim:fileencoding=UTF-8\n\nimport sys\nfrom numpy import histogram, histogram2d, zeros\nimport math\nimport matplotlib.pyplot as plt\nfrom scipy.constants.constants import pi\n\nif len(sys.argv) != 4:\n print('Usage: SCRIPT [input data] [output prefix] [output file]')\n sys.exit(2)\n \nfile_in = open(sys.argv[1],'r')\nfile_pfx = sys.argv[2]\nfile_out = open(sys.argv[3],'w')\n\nCOL_DIST = 1 - 1\nCOL_THETA = 4 - 1\nCOL_PHI = 5 - 1\n\ndist = []\nphi = []\ntheta = [] \n\nphi1=[]\ntheta1=[]\nphi2=[]\ntheta2=[]\nphi3=[]\ntheta3=[]\n\nfor l in file_in:\n if l.find('#') != -1:\n continue\n lsp = l.split()\n r = float(lsp[COL_DIST])\n t = float(lsp[COL_THETA])\n p = float(lsp[COL_PHI])\n dist.append(r)\n theta.append(t)\n phi.append(p)\n if r < 100.0:\n theta1.append(t)\n phi1.append(p)\n elif r < 150.0:\n theta2.append(t)\n phi2.append(p)\n else:\n theta3.append(t)\n phi3.append(p)\n \ntheta_bins = [x*10.0 for x in range(0,19)]\nphi_bins = [x*10.0 for x in range(-18,19)]\n#theta_bins = [x*15.0 for x in xrange(0,13)]\n#phi_bins = [x*15.0 for x in xrange(-12,13)]\n\nweight = []\nEPSI = 0.001\nfor t in theta:\n radt = math.radians(t)\n# weight.append(1.0/math.sin(radt))\n if radt<EPSI or (pi-radt)<EPSI:\n weight.append(1.0/math.sin(EPSI))\n else:\n weight.append(1.0/math.sin(radt))\n\nH, theta_edge, phi_edge = histogram2d(theta,phi,bins=[theta_bins,phi_bins],\n normed=False)\n\nHn, theta_edge, phi_edge = histogram2d(theta,phi,bins=[theta_bins,phi_bins],\n normed=True)\n\nHw, theta_edge, phi_edge = histogram2d(theta,phi,bins=[theta_bins,phi_bins],\n normed=False, weights=weight)\n\nHnw, theta_edge, phi_edge = histogram2d(theta,phi,bins=[theta_bins,phi_bins],\n normed=True, weights=weight)\n\nHj = zeros((18,36))\nHnj = zeros((18,36))\nfor t in range(18):\n t_deg = t*10.0 + 5.0\n for p in range(36):\n Hj[t,p] = H[t,p] / math.sin(math.radians(t_deg))\n Hnj[t,p] = Hn[t,p] / math.sin(math.radians(t_deg))\n#Hj = zeros((12,24))\n#Hnj = zeros((12,24))\n#for t in xrange(12):\n# t_deg = t*15.0 + 7.5\n# for p in xrange(24):\n# Hj[t,p] = H[t,p] / math.sin(math.radians(t_deg))\n# Hnj[t,p] = Hn[t,p] / math.sin(math.radians(t_deg))\n \n\n\n#H1, theta_edge, phi_edge = histogram2d(theta1,phi1,bins=[theta_bins,phi_bins],\n# normed=True)\n#H2, theta_edge, phi_edge = histogram2d(theta2,phi2,bins=[theta_bins,phi_bins],\n# normed=True)\n#H3, theta_edge, phi_edge = histogram2d(theta3,phi3,bins=[theta_bins,phi_bins],\n# normed=True)\n\nextent=[phi_edge[-1],phi_edge[0],theta_edge[-1],theta_edge[0]]\n\nplt.imshow(H,extent=extent, interpolation='nearest')\nplt.colorbar()\nplt.savefig(file_pfx+'H.png')\n\nplt.clf()\nplt.imshow(Hn,extent=extent, interpolation='nearest')\nplt.colorbar()\nplt.savefig(file_pfx+'Hn.png')\n\nplt.clf()\n#plt.imshow(Hw,extent=extent, interpolation='nearest')\nplt.imshow(Hw,extent=extent, interpolation='nearest',vmax=300)\nplt.colorbar()\nplt.savefig(file_pfx+'Hw.png')\n\nplt.clf()\nplt.imshow(Hnw,extent=extent, interpolation='nearest')\nplt.colorbar()\nplt.savefig(file_pfx+'Hnw.png')\n\nplt.clf()\nplt.imshow(Hj,extent=extent, interpolation='nearest')\nplt.colorbar()\nplt.savefig(file_pfx+'Hj.png')\n\nplt.clf()\nplt.imshow(Hnj,extent=extent, interpolation='nearest')\nplt.colorbar()\nplt.savefig(file_pfx+'Hnj.png')\n\n#plt.imshow(H,extent=extent, interpolation='nearest')\n#plt.colorbar()\n#plt.savefig(file_pfx+'hist.png')\n#\n#plt.imshow(H1,extent=extent, interpolation='nearest')\n#plt.savefig(file_pfx+'hist1.png')\n#\n#plt.imshow(H2,extent=extent, interpolation='nearest')\n#plt.savefig(file_pfx+'hist2.png')\n#\n#plt.imshow(H3,extent=extent, interpolation='nearest')\n#plt.savefig(file_pfx+'hist3.png')\n\n\n#dist_bins = [x*10.0 for x in xrange(0,31)]\n#H, dist_edge = histogram(dist,bins=dist_bins,normed=True)\n#for i,x in enumerate(H):\n# file_out.write('%8.3f %8.6f %8.3f %8.3f\\n'\n# % ((dist_edge[i]+dist_edge[i+1])*0.5, x, dist_edge[i], dist_edge[i+1]))\n#\nfile_out.close()" ]
[ [ "numpy.concatenate", "numpy.dot", "numpy.identity", "numpy.empty" ], [ "matplotlib.pyplot.imshow", "matplotlib.pyplot.savefig", "matplotlib.pyplot.colorbar", "matplotlib.pyplot.clf", "numpy.zeros", "numpy.histogram2d" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jakelishman/qiskit-experiments
[ "f4d23506ac5ea4af22721496d8d5c9bcb4562916" ]
[ "test/test_qubit_spectroscopy.py" ]
[ "# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2021.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"Spectroscopy tests.\"\"\"\nfrom test.base import QiskitExperimentsTestCase\nfrom typing import Tuple\nimport numpy as np\n\nfrom qiskit import QuantumCircuit\nfrom qiskit.qobj.utils import MeasLevel\n\nfrom qiskit_experiments.library import QubitSpectroscopy, EFSpectroscopy\nfrom qiskit_experiments.test.mock_iq_backend import MockIQBackend\n\n\nclass SpectroscopyBackend(MockIQBackend):\n \"\"\"A simple and primitive backend to test spectroscopy experiments.\"\"\"\n\n def __init__(\n self,\n line_width: float = 2e6,\n freq_offset: float = 0.0,\n iq_cluster_centers: Tuple[float, float, float, float] = (1.0, 1.0, -1.0, -1.0),\n iq_cluster_width: float = 0.2,\n ):\n \"\"\"Initialize the spectroscopy backend.\"\"\"\n\n super().__init__(iq_cluster_centers, iq_cluster_width)\n\n self.configuration().basis_gates = [\"x\"]\n\n self._linewidth = line_width\n self._freq_offset = freq_offset\n\n super().__init__(iq_cluster_centers, iq_cluster_width)\n\n def _compute_probability(self, circuit: QuantumCircuit) -> float:\n \"\"\"Returns the probability based on the frequency.\"\"\"\n freq_shift = next(iter(circuit.calibrations[\"Spec\"]))[1][0]\n delta_freq = freq_shift - self._freq_offset\n return np.exp(-(delta_freq ** 2) / (2 * self._linewidth ** 2))\n\n\nclass TestQubitSpectroscopy(QiskitExperimentsTestCase):\n \"\"\"Test spectroscopy experiment.\"\"\"\n\n def test_spectroscopy_end2end_classified(self):\n \"\"\"End to end test of the spectroscopy experiment.\"\"\"\n\n backend = SpectroscopyBackend(line_width=2e6)\n qubit = 1\n freq01 = backend.defaults().qubit_freq_est[qubit]\n frequencies = np.linspace(freq01 - 10.0e6, freq01 + 10.0e6, 21)\n\n spec = QubitSpectroscopy(qubit, frequencies)\n spec.set_run_options(meas_level=MeasLevel.CLASSIFIED)\n expdata = spec.run(backend)\n result = expdata.analysis_results(1)\n value = result.value.value\n\n self.assertTrue(4.999e9 < value < 5.001e9)\n self.assertEqual(result.quality, \"good\")\n\n # Test if we find still find the peak when it is shifted by 5 MHz.\n backend = SpectroscopyBackend(line_width=2e6, freq_offset=5.0e6)\n\n spec = QubitSpectroscopy(qubit, frequencies)\n spec.set_run_options(meas_level=MeasLevel.CLASSIFIED)\n expdata = spec.run(backend)\n result = expdata.analysis_results(1)\n value = result.value.value\n\n self.assertTrue(5.0049e9 < value < 5.0051e9)\n self.assertEqual(result.quality, \"good\")\n\n def test_spectroscopy_end2end_kerneled(self):\n \"\"\"End to end test of the spectroscopy experiment on IQ data.\"\"\"\n\n backend = SpectroscopyBackend(line_width=2e6)\n qubit = 0\n freq01 = backend.defaults().qubit_freq_est[qubit]\n frequencies = np.linspace(freq01 - 10.0e6, freq01 + 10.0e6, 21)\n\n spec = QubitSpectroscopy(qubit, frequencies)\n expdata = spec.run(backend)\n result = expdata.analysis_results(1)\n value = result.value.value\n\n self.assertTrue(freq01 - 2e6 < value < freq01 + 2e6)\n self.assertEqual(result.quality, \"good\")\n\n # Test if we find still find the peak when it is shifted by 5 MHz.\n backend = SpectroscopyBackend(line_width=2e6, freq_offset=5.0e6)\n\n spec = QubitSpectroscopy(qubit, frequencies)\n expdata = spec.run(backend)\n result = expdata.analysis_results(1)\n value = result.value.value\n\n self.assertTrue(freq01 + 3e6 < value < freq01 + 8e6)\n self.assertEqual(result.quality, \"good\")\n\n spec.set_run_options(meas_return=\"avg\")\n expdata = spec.run(backend)\n result = expdata.analysis_results(1)\n value = result.value.value\n\n self.assertTrue(freq01 + 3e6 < value < freq01 + 8e6)\n self.assertEqual(result.quality, \"good\")\n\n def test_spectroscopy12_end2end_classified(self):\n \"\"\"End to end test of the spectroscopy experiment with an x pulse.\"\"\"\n\n backend = SpectroscopyBackend(line_width=2e6)\n qubit = 0\n freq01 = backend.defaults().qubit_freq_est[qubit]\n frequencies = np.linspace(freq01 - 10.0e6, freq01 + 10.0e6, 21)\n\n # Note that the backend is not sophisticated enough to simulate an e-f\n # transition so we run the test with g-e.\n spec = EFSpectroscopy(qubit, frequencies)\n spec.backend = backend\n spec.set_run_options(meas_level=MeasLevel.CLASSIFIED)\n expdata = spec.run(backend)\n result = expdata.analysis_results(1)\n value = result.value.value\n\n self.assertTrue(freq01 - 2e6 < value < freq01 + 2e6)\n self.assertEqual(result.quality, \"good\")\n\n # Test the circuits\n circ = spec.circuits()[0]\n self.assertEqual(circ.data[0][0].name, \"x\")\n self.assertEqual(circ.data[1][0].name, \"Spec\")\n\n def test_experiment_config(self):\n \"\"\"Test converting to and from config works\"\"\"\n exp = QubitSpectroscopy(1, np.linspace(100, 150, 20) * 1e6)\n loaded_exp = QubitSpectroscopy.from_config(exp.config())\n self.assertNotEqual(exp, loaded_exp)\n self.assertTrue(self.experiments_equiv(exp, loaded_exp))\n\n def test_roundtrip_serializable(self):\n \"\"\"Test round trip JSON serialization\"\"\"\n exp = QubitSpectroscopy(1, np.linspace(int(100e6), int(150e6), int(20e6)))\n self.assertRoundTripSerializable(exp, self.experiments_equiv)\n" ]
[ [ "numpy.exp", "numpy.linspace" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Quansight/pandas
[ "511fd46e68b12317eb925d4bf7405c2d33daba6c", "511fd46e68b12317eb925d4bf7405c2d33daba6c", "511fd46e68b12317eb925d4bf7405c2d33daba6c", "511fd46e68b12317eb925d4bf7405c2d33daba6c", "511fd46e68b12317eb925d4bf7405c2d33daba6c", "4071dde86e33434e1bee8304fa62074949f813cc" ]
[ "pandas/tests/io/excel/test_writers.py", "pandas/core/internals/construction.py", "pandas/core/aggregation.py", "asv_bench/benchmarks/indexing.py", "pandas/tests/base/test_ops.py", "pandas/tests/scalar/timestamp/test_arithmetic.py" ]
[ "from datetime import date, datetime, timedelta\nfrom functools import partial\nfrom io import BytesIO\nimport os\n\nimport numpy as np\nimport pytest\n\nimport pandas.util._test_decorators as td\n\nimport pandas as pd\nfrom pandas import DataFrame, Index, MultiIndex, get_option, set_option\nimport pandas._testing as tm\n\nfrom pandas.io.excel import (\n ExcelFile,\n ExcelWriter,\n _OpenpyxlWriter,\n _XlsxWriter,\n _XlwtWriter,\n register_writer,\n)\n\n\[email protected]\ndef path(ext):\n \"\"\"\n Fixture to open file for use in each test case.\n \"\"\"\n with tm.ensure_clean(ext) as file_path:\n yield file_path\n\n\[email protected]\ndef set_engine(engine, ext):\n \"\"\"\n Fixture to set engine for use in each test case.\n\n Rather than requiring `engine=...` to be provided explicitly as an\n argument in each test, this fixture sets a global option to dictate\n which engine should be used to write Excel files. After executing\n the test it rolls back said change to the global option.\n \"\"\"\n option_name = \"io.excel.{ext}.writer\".format(ext=ext.strip(\".\"))\n prev_engine = get_option(option_name)\n set_option(option_name, engine)\n yield\n set_option(option_name, prev_engine) # Roll back option change\n\n\[email protected]_if_no(\"xlrd\")\[email protected](\"ext\", [\".xls\", \".xlsx\", \".xlsm\"])\nclass TestRoundTrip:\n @td.skip_if_no(\"xlwt\")\n @td.skip_if_no(\"openpyxl\")\n @pytest.mark.parametrize(\n \"header,expected\",\n [(None, DataFrame([np.nan] * 4)), (0, DataFrame({\"Unnamed: 0\": [np.nan] * 3}))],\n )\n def test_read_one_empty_col_no_header(self, ext, header, expected):\n # xref gh-12292\n filename = \"no_header\"\n df = pd.DataFrame([[\"\", 1, 100], [\"\", 2, 200], [\"\", 3, 300], [\"\", 4, 400]])\n\n with tm.ensure_clean(ext) as path:\n df.to_excel(path, filename, index=False, header=False)\n result = pd.read_excel(path, filename, usecols=[0], header=header)\n\n tm.assert_frame_equal(result, expected)\n\n @td.skip_if_no(\"xlwt\")\n @td.skip_if_no(\"openpyxl\")\n @pytest.mark.parametrize(\n \"header,expected\",\n [(None, DataFrame([0] + [np.nan] * 4)), (0, DataFrame([np.nan] * 4))],\n )\n def test_read_one_empty_col_with_header(self, ext, header, expected):\n filename = \"with_header\"\n df = pd.DataFrame([[\"\", 1, 100], [\"\", 2, 200], [\"\", 3, 300], [\"\", 4, 400]])\n\n with tm.ensure_clean(ext) as path:\n df.to_excel(path, \"with_header\", index=False, header=True)\n result = pd.read_excel(path, filename, usecols=[0], header=header)\n\n tm.assert_frame_equal(result, expected)\n\n @td.skip_if_no(\"openpyxl\")\n @td.skip_if_no(\"xlwt\")\n def test_set_column_names_in_parameter(self, ext):\n # GH 12870 : pass down column names associated with\n # keyword argument names\n refdf = pd.DataFrame([[1, \"foo\"], [2, \"bar\"], [3, \"baz\"]], columns=[\"a\", \"b\"])\n\n with tm.ensure_clean(ext) as pth:\n with ExcelWriter(pth) as writer:\n refdf.to_excel(writer, \"Data_no_head\", header=False, index=False)\n refdf.to_excel(writer, \"Data_with_head\", index=False)\n\n refdf.columns = [\"A\", \"B\"]\n\n with ExcelFile(pth) as reader:\n xlsdf_no_head = pd.read_excel(\n reader, \"Data_no_head\", header=None, names=[\"A\", \"B\"]\n )\n xlsdf_with_head = pd.read_excel(\n reader, \"Data_with_head\", index_col=None, names=[\"A\", \"B\"]\n )\n\n tm.assert_frame_equal(xlsdf_no_head, refdf)\n tm.assert_frame_equal(xlsdf_with_head, refdf)\n\n @td.skip_if_no(\"xlwt\")\n @td.skip_if_no(\"openpyxl\")\n def test_creating_and_reading_multiple_sheets(self, ext):\n # see gh-9450\n #\n # Test reading multiple sheets, from a runtime\n # created Excel file with multiple sheets.\n def tdf(col_sheet_name):\n d, i = [11, 22, 33], [1, 2, 3]\n return DataFrame(d, i, columns=[col_sheet_name])\n\n sheets = [\"AAA\", \"BBB\", \"CCC\"]\n\n dfs = [tdf(s) for s in sheets]\n dfs = dict(zip(sheets, dfs))\n\n with tm.ensure_clean(ext) as pth:\n with ExcelWriter(pth) as ew:\n for sheetname, df in dfs.items():\n df.to_excel(ew, sheetname)\n\n dfs_returned = pd.read_excel(pth, sheet_name=sheets, index_col=0)\n\n for s in sheets:\n tm.assert_frame_equal(dfs[s], dfs_returned[s])\n\n @td.skip_if_no(\"xlsxwriter\")\n def test_read_excel_multiindex_empty_level(self, ext):\n # see gh-12453\n with tm.ensure_clean(ext) as path:\n df = DataFrame(\n {\n (\"One\", \"x\"): {0: 1},\n (\"Two\", \"X\"): {0: 3},\n (\"Two\", \"Y\"): {0: 7},\n (\"Zero\", \"\"): {0: 0},\n }\n )\n\n expected = DataFrame(\n {\n (\"One\", \"x\"): {0: 1},\n (\"Two\", \"X\"): {0: 3},\n (\"Two\", \"Y\"): {0: 7},\n (\"Zero\", \"Unnamed: 4_level_1\"): {0: 0},\n }\n )\n\n df.to_excel(path)\n actual = pd.read_excel(path, header=[0, 1], index_col=0)\n tm.assert_frame_equal(actual, expected)\n\n df = pd.DataFrame(\n {\n (\"Beg\", \"\"): {0: 0},\n (\"Middle\", \"x\"): {0: 1},\n (\"Tail\", \"X\"): {0: 3},\n (\"Tail\", \"Y\"): {0: 7},\n }\n )\n\n expected = pd.DataFrame(\n {\n (\"Beg\", \"Unnamed: 1_level_1\"): {0: 0},\n (\"Middle\", \"x\"): {0: 1},\n (\"Tail\", \"X\"): {0: 3},\n (\"Tail\", \"Y\"): {0: 7},\n }\n )\n\n df.to_excel(path)\n actual = pd.read_excel(path, header=[0, 1], index_col=0)\n tm.assert_frame_equal(actual, expected)\n\n @td.skip_if_no(\"xlsxwriter\")\n @pytest.mark.parametrize(\"c_idx_names\", [True, False])\n @pytest.mark.parametrize(\"r_idx_names\", [True, False])\n @pytest.mark.parametrize(\"c_idx_levels\", [1, 3])\n @pytest.mark.parametrize(\"r_idx_levels\", [1, 3])\n def test_excel_multindex_roundtrip(\n self, ext, c_idx_names, r_idx_names, c_idx_levels, r_idx_levels\n ):\n # see gh-4679\n with tm.ensure_clean(ext) as pth:\n if c_idx_levels == 1 and c_idx_names:\n pytest.skip(\n \"Column index name cannot be serialized unless it's a MultiIndex\"\n )\n\n # Empty name case current read in as\n # unnamed levels, not Nones.\n check_names = r_idx_names or r_idx_levels <= 1\n\n df = tm.makeCustomDataframe(\n 5, 5, c_idx_names, r_idx_names, c_idx_levels, r_idx_levels\n )\n df.to_excel(pth)\n\n act = pd.read_excel(\n pth,\n index_col=list(range(r_idx_levels)),\n header=list(range(c_idx_levels)),\n )\n tm.assert_frame_equal(df, act, check_names=check_names)\n\n df.iloc[0, :] = np.nan\n df.to_excel(pth)\n\n act = pd.read_excel(\n pth,\n index_col=list(range(r_idx_levels)),\n header=list(range(c_idx_levels)),\n )\n tm.assert_frame_equal(df, act, check_names=check_names)\n\n df.iloc[-1, :] = np.nan\n df.to_excel(pth)\n act = pd.read_excel(\n pth,\n index_col=list(range(r_idx_levels)),\n header=list(range(c_idx_levels)),\n )\n tm.assert_frame_equal(df, act, check_names=check_names)\n\n @td.skip_if_no(\"xlwt\")\n @td.skip_if_no(\"openpyxl\")\n def test_read_excel_parse_dates(self, ext):\n # see gh-11544, gh-12051\n df = DataFrame(\n {\"col\": [1, 2, 3], \"date_strings\": pd.date_range(\"2012-01-01\", periods=3)}\n )\n df2 = df.copy()\n df2[\"date_strings\"] = df2[\"date_strings\"].dt.strftime(\"%m/%d/%Y\")\n\n with tm.ensure_clean(ext) as pth:\n df2.to_excel(pth)\n\n res = pd.read_excel(pth, index_col=0)\n tm.assert_frame_equal(df2, res)\n\n res = pd.read_excel(pth, parse_dates=[\"date_strings\"], index_col=0)\n tm.assert_frame_equal(df, res)\n\n date_parser = lambda x: datetime.strptime(x, \"%m/%d/%Y\")\n res = pd.read_excel(\n pth, parse_dates=[\"date_strings\"], date_parser=date_parser, index_col=0\n )\n tm.assert_frame_equal(df, res)\n\n def test_multiindex_interval_datetimes(self, ext):\n # GH 30986\n midx = pd.MultiIndex.from_arrays(\n [\n range(4),\n pd.interval_range(\n start=pd.Timestamp(\"2020-01-01\"), periods=4, freq=\"6M\"\n ),\n ]\n )\n df = pd.DataFrame(range(4), index=midx)\n with tm.ensure_clean(ext) as pth:\n df.to_excel(pth)\n result = pd.read_excel(pth, index_col=[0, 1])\n expected = pd.DataFrame(\n range(4),\n pd.MultiIndex.from_arrays(\n [\n range(4),\n [\n \"(2020-01-31, 2020-07-31]\",\n \"(2020-07-31, 2021-01-31]\",\n \"(2021-01-31, 2021-07-31]\",\n \"(2021-07-31, 2022-01-31]\",\n ],\n ]\n ),\n )\n tm.assert_frame_equal(result, expected)\n\n\[email protected]_if_no(\"xlrd\")\[email protected](\n \"engine,ext\",\n [\n pytest.param(\"openpyxl\", \".xlsx\", marks=td.skip_if_no(\"openpyxl\")),\n pytest.param(\"openpyxl\", \".xlsm\", marks=td.skip_if_no(\"openpyxl\")),\n pytest.param(\"xlwt\", \".xls\", marks=td.skip_if_no(\"xlwt\")),\n pytest.param(\"xlsxwriter\", \".xlsx\", marks=td.skip_if_no(\"xlsxwriter\")),\n ],\n)\[email protected](\"set_engine\")\nclass TestExcelWriter:\n def test_excel_sheet_size(self, path):\n\n # GH 26080\n breaking_row_count = 2 ** 20 + 1\n breaking_col_count = 2 ** 14 + 1\n # purposely using two arrays to prevent memory issues while testing\n row_arr = np.zeros(shape=(breaking_row_count, 1))\n col_arr = np.zeros(shape=(1, breaking_col_count))\n row_df = pd.DataFrame(row_arr)\n col_df = pd.DataFrame(col_arr)\n\n msg = \"sheet is too large\"\n with pytest.raises(ValueError, match=msg):\n row_df.to_excel(path)\n\n with pytest.raises(ValueError, match=msg):\n col_df.to_excel(path)\n\n def test_excel_sheet_by_name_raise(self, path):\n import xlrd\n\n gt = DataFrame(np.random.randn(10, 2))\n gt.to_excel(path)\n\n xl = ExcelFile(path)\n df = pd.read_excel(xl, 0, index_col=0)\n\n tm.assert_frame_equal(gt, df)\n\n with pytest.raises(xlrd.XLRDError):\n pd.read_excel(xl, \"0\")\n\n def test_excel_writer_context_manager(self, frame, path):\n with ExcelWriter(path) as writer:\n frame.to_excel(writer, \"Data1\")\n frame2 = frame.copy()\n frame2.columns = frame.columns[::-1]\n frame2.to_excel(writer, \"Data2\")\n\n with ExcelFile(path) as reader:\n found_df = pd.read_excel(reader, \"Data1\", index_col=0)\n found_df2 = pd.read_excel(reader, \"Data2\", index_col=0)\n\n tm.assert_frame_equal(found_df, frame)\n tm.assert_frame_equal(found_df2, frame2)\n\n def test_roundtrip(self, frame, path):\n frame = frame.copy()\n frame[\"A\"][:5] = np.nan\n\n frame.to_excel(path, \"test1\")\n frame.to_excel(path, \"test1\", columns=[\"A\", \"B\"])\n frame.to_excel(path, \"test1\", header=False)\n frame.to_excel(path, \"test1\", index=False)\n\n # test roundtrip\n frame.to_excel(path, \"test1\")\n recons = pd.read_excel(path, \"test1\", index_col=0)\n tm.assert_frame_equal(frame, recons)\n\n frame.to_excel(path, \"test1\", index=False)\n recons = pd.read_excel(path, \"test1\", index_col=None)\n recons.index = frame.index\n tm.assert_frame_equal(frame, recons)\n\n frame.to_excel(path, \"test1\", na_rep=\"NA\")\n recons = pd.read_excel(path, \"test1\", index_col=0, na_values=[\"NA\"])\n tm.assert_frame_equal(frame, recons)\n\n # GH 3611\n frame.to_excel(path, \"test1\", na_rep=\"88\")\n recons = pd.read_excel(path, \"test1\", index_col=0, na_values=[\"88\"])\n tm.assert_frame_equal(frame, recons)\n\n frame.to_excel(path, \"test1\", na_rep=\"88\")\n recons = pd.read_excel(path, \"test1\", index_col=0, na_values=[88, 88.0])\n tm.assert_frame_equal(frame, recons)\n\n # GH 6573\n frame.to_excel(path, \"Sheet1\")\n recons = pd.read_excel(path, index_col=0)\n tm.assert_frame_equal(frame, recons)\n\n frame.to_excel(path, \"0\")\n recons = pd.read_excel(path, index_col=0)\n tm.assert_frame_equal(frame, recons)\n\n # GH 8825 Pandas Series should provide to_excel method\n s = frame[\"A\"]\n s.to_excel(path)\n recons = pd.read_excel(path, index_col=0)\n tm.assert_frame_equal(s.to_frame(), recons)\n\n def test_mixed(self, frame, path):\n mixed_frame = frame.copy()\n mixed_frame[\"foo\"] = \"bar\"\n\n mixed_frame.to_excel(path, \"test1\")\n reader = ExcelFile(path)\n recons = pd.read_excel(reader, \"test1\", index_col=0)\n tm.assert_frame_equal(mixed_frame, recons)\n\n def test_ts_frame(self, tsframe, path):\n df = tsframe\n\n df.to_excel(path, \"test1\")\n reader = ExcelFile(path)\n\n recons = pd.read_excel(reader, \"test1\", index_col=0)\n tm.assert_frame_equal(df, recons)\n\n def test_basics_with_nan(self, frame, path):\n frame = frame.copy()\n frame[\"A\"][:5] = np.nan\n frame.to_excel(path, \"test1\")\n frame.to_excel(path, \"test1\", columns=[\"A\", \"B\"])\n frame.to_excel(path, \"test1\", header=False)\n frame.to_excel(path, \"test1\", index=False)\n\n @pytest.mark.parametrize(\"np_type\", [np.int8, np.int16, np.int32, np.int64])\n def test_int_types(self, np_type, path):\n # Test np.int values read come back as int\n # (rather than float which is Excel's format).\n df = DataFrame(np.random.randint(-10, 10, size=(10, 2)), dtype=np_type)\n df.to_excel(path, \"test1\")\n\n reader = ExcelFile(path)\n recons = pd.read_excel(reader, \"test1\", index_col=0)\n\n int_frame = df.astype(np.int64)\n tm.assert_frame_equal(int_frame, recons)\n\n recons2 = pd.read_excel(path, \"test1\", index_col=0)\n tm.assert_frame_equal(int_frame, recons2)\n\n # Test with convert_float=False comes back as float.\n float_frame = df.astype(float)\n recons = pd.read_excel(path, \"test1\", convert_float=False, index_col=0)\n tm.assert_frame_equal(\n recons, float_frame, check_index_type=False, check_column_type=False\n )\n\n @pytest.mark.parametrize(\"np_type\", [np.float16, np.float32, np.float64])\n def test_float_types(self, np_type, path):\n # Test np.float values read come back as float.\n df = DataFrame(np.random.random_sample(10), dtype=np_type)\n df.to_excel(path, \"test1\")\n\n reader = ExcelFile(path)\n recons = pd.read_excel(reader, \"test1\", index_col=0).astype(np_type)\n\n tm.assert_frame_equal(df, recons, check_dtype=False)\n\n @pytest.mark.parametrize(\"np_type\", [np.bool8, np.bool_])\n def test_bool_types(self, np_type, path):\n # Test np.bool values read come back as float.\n df = DataFrame([1, 0, True, False], dtype=np_type)\n df.to_excel(path, \"test1\")\n\n reader = ExcelFile(path)\n recons = pd.read_excel(reader, \"test1\", index_col=0).astype(np_type)\n\n tm.assert_frame_equal(df, recons)\n\n def test_inf_roundtrip(self, path):\n df = DataFrame([(1, np.inf), (2, 3), (5, -np.inf)])\n df.to_excel(path, \"test1\")\n\n reader = ExcelFile(path)\n recons = pd.read_excel(reader, \"test1\", index_col=0)\n\n tm.assert_frame_equal(df, recons)\n\n def test_sheets(self, frame, tsframe, path):\n frame = frame.copy()\n frame[\"A\"][:5] = np.nan\n\n frame.to_excel(path, \"test1\")\n frame.to_excel(path, \"test1\", columns=[\"A\", \"B\"])\n frame.to_excel(path, \"test1\", header=False)\n frame.to_excel(path, \"test1\", index=False)\n\n # Test writing to separate sheets\n writer = ExcelWriter(path)\n frame.to_excel(writer, \"test1\")\n tsframe.to_excel(writer, \"test2\")\n writer.save()\n reader = ExcelFile(path)\n recons = pd.read_excel(reader, \"test1\", index_col=0)\n tm.assert_frame_equal(frame, recons)\n recons = pd.read_excel(reader, \"test2\", index_col=0)\n tm.assert_frame_equal(tsframe, recons)\n assert 2 == len(reader.sheet_names)\n assert \"test1\" == reader.sheet_names[0]\n assert \"test2\" == reader.sheet_names[1]\n\n def test_colaliases(self, frame, path):\n frame = frame.copy()\n frame[\"A\"][:5] = np.nan\n\n frame.to_excel(path, \"test1\")\n frame.to_excel(path, \"test1\", columns=[\"A\", \"B\"])\n frame.to_excel(path, \"test1\", header=False)\n frame.to_excel(path, \"test1\", index=False)\n\n # column aliases\n col_aliases = Index([\"AA\", \"X\", \"Y\", \"Z\"])\n frame.to_excel(path, \"test1\", header=col_aliases)\n reader = ExcelFile(path)\n rs = pd.read_excel(reader, \"test1\", index_col=0)\n xp = frame.copy()\n xp.columns = col_aliases\n tm.assert_frame_equal(xp, rs)\n\n def test_roundtrip_indexlabels(self, merge_cells, frame, path):\n frame = frame.copy()\n frame[\"A\"][:5] = np.nan\n\n frame.to_excel(path, \"test1\")\n frame.to_excel(path, \"test1\", columns=[\"A\", \"B\"])\n frame.to_excel(path, \"test1\", header=False)\n frame.to_excel(path, \"test1\", index=False)\n\n # test index_label\n df = DataFrame(np.random.randn(10, 2)) >= 0\n df.to_excel(path, \"test1\", index_label=[\"test\"], merge_cells=merge_cells)\n reader = ExcelFile(path)\n recons = pd.read_excel(reader, \"test1\", index_col=0).astype(np.int64)\n df.index.names = [\"test\"]\n assert df.index.names == recons.index.names\n\n df = DataFrame(np.random.randn(10, 2)) >= 0\n df.to_excel(\n path,\n \"test1\",\n index_label=[\"test\", \"dummy\", \"dummy2\"],\n merge_cells=merge_cells,\n )\n reader = ExcelFile(path)\n recons = pd.read_excel(reader, \"test1\", index_col=0).astype(np.int64)\n df.index.names = [\"test\"]\n assert df.index.names == recons.index.names\n\n df = DataFrame(np.random.randn(10, 2)) >= 0\n df.to_excel(path, \"test1\", index_label=\"test\", merge_cells=merge_cells)\n reader = ExcelFile(path)\n recons = pd.read_excel(reader, \"test1\", index_col=0).astype(np.int64)\n df.index.names = [\"test\"]\n tm.assert_frame_equal(df, recons.astype(bool))\n\n frame.to_excel(\n path,\n \"test1\",\n columns=[\"A\", \"B\", \"C\", \"D\"],\n index=False,\n merge_cells=merge_cells,\n )\n # take 'A' and 'B' as indexes (same row as cols 'C', 'D')\n df = frame.copy()\n df = df.set_index([\"A\", \"B\"])\n\n reader = ExcelFile(path)\n recons = pd.read_excel(reader, \"test1\", index_col=[0, 1])\n tm.assert_frame_equal(df, recons, check_less_precise=True)\n\n def test_excel_roundtrip_indexname(self, merge_cells, path):\n df = DataFrame(np.random.randn(10, 4))\n df.index.name = \"foo\"\n\n df.to_excel(path, merge_cells=merge_cells)\n\n xf = ExcelFile(path)\n result = pd.read_excel(xf, xf.sheet_names[0], index_col=0)\n\n tm.assert_frame_equal(result, df)\n assert result.index.name == \"foo\"\n\n def test_excel_roundtrip_datetime(self, merge_cells, tsframe, path):\n # datetime.date, not sure what to test here exactly\n tsf = tsframe.copy()\n\n tsf.index = [x.date() for x in tsframe.index]\n tsf.to_excel(path, \"test1\", merge_cells=merge_cells)\n\n reader = ExcelFile(path)\n recons = pd.read_excel(reader, \"test1\", index_col=0)\n\n tm.assert_frame_equal(tsframe, recons)\n\n def test_excel_date_datetime_format(self, engine, ext, path):\n # see gh-4133\n #\n # Excel output format strings\n df = DataFrame(\n [\n [date(2014, 1, 31), date(1999, 9, 24)],\n [datetime(1998, 5, 26, 23, 33, 4), datetime(2014, 2, 28, 13, 5, 13)],\n ],\n index=[\"DATE\", \"DATETIME\"],\n columns=[\"X\", \"Y\"],\n )\n df_expected = DataFrame(\n [\n [datetime(2014, 1, 31), datetime(1999, 9, 24)],\n [datetime(1998, 5, 26, 23, 33, 4), datetime(2014, 2, 28, 13, 5, 13)],\n ],\n index=[\"DATE\", \"DATETIME\"],\n columns=[\"X\", \"Y\"],\n )\n\n with tm.ensure_clean(ext) as filename2:\n writer1 = ExcelWriter(path)\n writer2 = ExcelWriter(\n filename2,\n date_format=\"DD.MM.YYYY\",\n datetime_format=\"DD.MM.YYYY HH-MM-SS\",\n )\n\n df.to_excel(writer1, \"test1\")\n df.to_excel(writer2, \"test1\")\n\n writer1.close()\n writer2.close()\n\n reader1 = ExcelFile(path)\n reader2 = ExcelFile(filename2)\n\n rs1 = pd.read_excel(reader1, \"test1\", index_col=0)\n rs2 = pd.read_excel(reader2, \"test1\", index_col=0)\n\n tm.assert_frame_equal(rs1, rs2)\n\n # Since the reader returns a datetime object for dates,\n # we need to use df_expected to check the result.\n tm.assert_frame_equal(rs2, df_expected)\n\n def test_to_excel_interval_no_labels(self, path):\n # see gh-19242\n #\n # Test writing Interval without labels.\n df = DataFrame(np.random.randint(-10, 10, size=(20, 1)), dtype=np.int64)\n expected = df.copy()\n\n df[\"new\"] = pd.cut(df[0], 10)\n expected[\"new\"] = pd.cut(expected[0], 10).astype(str)\n\n df.to_excel(path, \"test1\")\n reader = ExcelFile(path)\n\n recons = pd.read_excel(reader, \"test1\", index_col=0)\n tm.assert_frame_equal(expected, recons)\n\n def test_to_excel_interval_labels(self, path):\n # see gh-19242\n #\n # Test writing Interval with labels.\n df = DataFrame(np.random.randint(-10, 10, size=(20, 1)), dtype=np.int64)\n expected = df.copy()\n intervals = pd.cut(\n df[0], 10, labels=[\"A\", \"B\", \"C\", \"D\", \"E\", \"F\", \"G\", \"H\", \"I\", \"J\"]\n )\n df[\"new\"] = intervals\n expected[\"new\"] = pd.Series(list(intervals))\n\n df.to_excel(path, \"test1\")\n reader = ExcelFile(path)\n\n recons = pd.read_excel(reader, \"test1\", index_col=0)\n tm.assert_frame_equal(expected, recons)\n\n def test_to_excel_timedelta(self, path):\n # see gh-19242, gh-9155\n #\n # Test writing timedelta to xls.\n df = DataFrame(\n np.random.randint(-10, 10, size=(20, 1)), columns=[\"A\"], dtype=np.int64\n )\n expected = df.copy()\n\n df[\"new\"] = df[\"A\"].apply(lambda x: timedelta(seconds=x))\n expected[\"new\"] = expected[\"A\"].apply(\n lambda x: timedelta(seconds=x).total_seconds() / float(86400)\n )\n\n df.to_excel(path, \"test1\")\n reader = ExcelFile(path)\n\n recons = pd.read_excel(reader, \"test1\", index_col=0)\n tm.assert_frame_equal(expected, recons)\n\n def test_to_excel_periodindex(self, tsframe, path):\n xp = tsframe.resample(\"M\", kind=\"period\").mean()\n\n xp.to_excel(path, \"sht1\")\n\n reader = ExcelFile(path)\n rs = pd.read_excel(reader, \"sht1\", index_col=0)\n tm.assert_frame_equal(xp, rs.to_period(\"M\"))\n\n def test_to_excel_multiindex(self, merge_cells, frame, path):\n arrays = np.arange(len(frame.index) * 2).reshape(2, -1)\n new_index = MultiIndex.from_arrays(arrays, names=[\"first\", \"second\"])\n frame.index = new_index\n\n frame.to_excel(path, \"test1\", header=False)\n frame.to_excel(path, \"test1\", columns=[\"A\", \"B\"])\n\n # round trip\n frame.to_excel(path, \"test1\", merge_cells=merge_cells)\n reader = ExcelFile(path)\n df = pd.read_excel(reader, \"test1\", index_col=[0, 1])\n tm.assert_frame_equal(frame, df)\n\n # GH13511\n def test_to_excel_multiindex_nan_label(self, merge_cells, path):\n df = pd.DataFrame(\n {\"A\": [None, 2, 3], \"B\": [10, 20, 30], \"C\": np.random.sample(3)}\n )\n df = df.set_index([\"A\", \"B\"])\n\n df.to_excel(path, merge_cells=merge_cells)\n df1 = pd.read_excel(path, index_col=[0, 1])\n tm.assert_frame_equal(df, df1)\n\n # Test for Issue 11328. If column indices are integers, make\n # sure they are handled correctly for either setting of\n # merge_cells\n def test_to_excel_multiindex_cols(self, merge_cells, frame, path):\n arrays = np.arange(len(frame.index) * 2).reshape(2, -1)\n new_index = MultiIndex.from_arrays(arrays, names=[\"first\", \"second\"])\n frame.index = new_index\n\n new_cols_index = MultiIndex.from_tuples([(40, 1), (40, 2), (50, 1), (50, 2)])\n frame.columns = new_cols_index\n header = [0, 1]\n if not merge_cells:\n header = 0\n\n # round trip\n frame.to_excel(path, \"test1\", merge_cells=merge_cells)\n reader = ExcelFile(path)\n df = pd.read_excel(reader, \"test1\", header=header, index_col=[0, 1])\n if not merge_cells:\n fm = frame.columns.format(sparsify=False, adjoin=False, names=False)\n frame.columns = [\".\".join(map(str, q)) for q in zip(*fm)]\n tm.assert_frame_equal(frame, df)\n\n def test_to_excel_multiindex_dates(self, merge_cells, tsframe, path):\n # try multiindex with dates\n new_index = [tsframe.index, np.arange(len(tsframe.index))]\n tsframe.index = MultiIndex.from_arrays(new_index)\n\n tsframe.index.names = [\"time\", \"foo\"]\n tsframe.to_excel(path, \"test1\", merge_cells=merge_cells)\n reader = ExcelFile(path)\n recons = pd.read_excel(reader, \"test1\", index_col=[0, 1])\n\n tm.assert_frame_equal(tsframe, recons)\n assert recons.index.names == (\"time\", \"foo\")\n\n def test_to_excel_multiindex_no_write_index(self, path):\n # Test writing and re-reading a MI without the index. GH 5616.\n\n # Initial non-MI frame.\n frame1 = DataFrame({\"a\": [10, 20], \"b\": [30, 40], \"c\": [50, 60]})\n\n # Add a MI.\n frame2 = frame1.copy()\n multi_index = MultiIndex.from_tuples([(70, 80), (90, 100)])\n frame2.index = multi_index\n\n # Write out to Excel without the index.\n frame2.to_excel(path, \"test1\", index=False)\n\n # Read it back in.\n reader = ExcelFile(path)\n frame3 = pd.read_excel(reader, \"test1\")\n\n # Test that it is the same as the initial frame.\n tm.assert_frame_equal(frame1, frame3)\n\n def test_to_excel_float_format(self, path):\n df = DataFrame(\n [[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]],\n index=[\"A\", \"B\"],\n columns=[\"X\", \"Y\", \"Z\"],\n )\n df.to_excel(path, \"test1\", float_format=\"%.2f\")\n\n reader = ExcelFile(path)\n result = pd.read_excel(reader, \"test1\", index_col=0)\n\n expected = DataFrame(\n [[0.12, 0.23, 0.57], [12.32, 123123.20, 321321.20]],\n index=[\"A\", \"B\"],\n columns=[\"X\", \"Y\", \"Z\"],\n )\n tm.assert_frame_equal(result, expected)\n\n def test_to_excel_output_encoding(self, ext):\n # Avoid mixed inferred_type.\n df = DataFrame(\n [[\"\\u0192\", \"\\u0193\", \"\\u0194\"], [\"\\u0195\", \"\\u0196\", \"\\u0197\"]],\n index=[\"A\\u0192\", \"B\"],\n columns=[\"X\\u0193\", \"Y\", \"Z\"],\n )\n\n with tm.ensure_clean(\"__tmp_to_excel_float_format__.\" + ext) as filename:\n df.to_excel(filename, sheet_name=\"TestSheet\", encoding=\"utf8\")\n result = pd.read_excel(filename, \"TestSheet\", encoding=\"utf8\", index_col=0)\n tm.assert_frame_equal(result, df)\n\n def test_to_excel_unicode_filename(self, ext, path):\n with tm.ensure_clean(\"\\u0192u.\" + ext) as filename:\n try:\n f = open(filename, \"wb\")\n except UnicodeEncodeError:\n pytest.skip(\"No unicode file names on this system\")\n else:\n f.close()\n\n df = DataFrame(\n [[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]],\n index=[\"A\", \"B\"],\n columns=[\"X\", \"Y\", \"Z\"],\n )\n df.to_excel(filename, \"test1\", float_format=\"%.2f\")\n\n reader = ExcelFile(filename)\n result = pd.read_excel(reader, \"test1\", index_col=0)\n\n expected = DataFrame(\n [[0.12, 0.23, 0.57], [12.32, 123123.20, 321321.20]],\n index=[\"A\", \"B\"],\n columns=[\"X\", \"Y\", \"Z\"],\n )\n tm.assert_frame_equal(result, expected)\n\n # FIXME: dont leave commented-out\n # def test_to_excel_header_styling_xls(self, engine, ext):\n\n # import StringIO\n # s = StringIO(\n # \"\"\"Date,ticker,type,value\n # 2001-01-01,x,close,12.2\n # 2001-01-01,x,open ,12.1\n # 2001-01-01,y,close,12.2\n # 2001-01-01,y,open ,12.1\n # 2001-02-01,x,close,12.2\n # 2001-02-01,x,open ,12.1\n # 2001-02-01,y,close,12.2\n # 2001-02-01,y,open ,12.1\n # 2001-03-01,x,close,12.2\n # 2001-03-01,x,open ,12.1\n # 2001-03-01,y,close,12.2\n # 2001-03-01,y,open ,12.1\"\"\")\n # df = read_csv(s, parse_dates=[\"Date\"])\n # pdf = df.pivot_table(values=\"value\", rows=[\"ticker\"],\n # cols=[\"Date\", \"type\"])\n\n # try:\n # import xlwt\n # import xlrd\n # except ImportError:\n # pytest.skip\n\n # filename = '__tmp_to_excel_header_styling_xls__.xls'\n # pdf.to_excel(filename, 'test1')\n\n # wbk = xlrd.open_workbook(filename,\n # formatting_info=True)\n # assert [\"test1\"] == wbk.sheet_names()\n # ws = wbk.sheet_by_name('test1')\n # assert [(0, 1, 5, 7), (0, 1, 3, 5), (0, 1, 1, 3)] == ws.merged_cells\n # for i in range(0, 2):\n # for j in range(0, 7):\n # xfx = ws.cell_xf_index(0, 0)\n # cell_xf = wbk.xf_list[xfx]\n # font = wbk.font_list\n # assert 1 == font[cell_xf.font_index].bold\n # assert 1 == cell_xf.border.top_line_style\n # assert 1 == cell_xf.border.right_line_style\n # assert 1 == cell_xf.border.bottom_line_style\n # assert 1 == cell_xf.border.left_line_style\n # assert 2 == cell_xf.alignment.hor_align\n # os.remove(filename)\n # def test_to_excel_header_styling_xlsx(self, engine, ext):\n # import StringIO\n # s = StringIO(\n # \"\"\"Date,ticker,type,value\n # 2001-01-01,x,close,12.2\n # 2001-01-01,x,open ,12.1\n # 2001-01-01,y,close,12.2\n # 2001-01-01,y,open ,12.1\n # 2001-02-01,x,close,12.2\n # 2001-02-01,x,open ,12.1\n # 2001-02-01,y,close,12.2\n # 2001-02-01,y,open ,12.1\n # 2001-03-01,x,close,12.2\n # 2001-03-01,x,open ,12.1\n # 2001-03-01,y,close,12.2\n # 2001-03-01,y,open ,12.1\"\"\")\n # df = read_csv(s, parse_dates=[\"Date\"])\n # pdf = df.pivot_table(values=\"value\", rows=[\"ticker\"],\n # cols=[\"Date\", \"type\"])\n # try:\n # import openpyxl\n # from openpyxl.cell import get_column_letter\n # except ImportError:\n # pytest.skip\n # if openpyxl.__version__ < '1.6.1':\n # pytest.skip\n # # test xlsx_styling\n # filename = '__tmp_to_excel_header_styling_xlsx__.xlsx'\n # pdf.to_excel(filename, 'test1')\n # wbk = openpyxl.load_workbook(filename)\n # assert [\"test1\"] == wbk.get_sheet_names()\n # ws = wbk.get_sheet_by_name('test1')\n # xlsaddrs = [\"%s2\" % chr(i) for i in range(ord('A'), ord('H'))]\n # xlsaddrs += [\"A%s\" % i for i in range(1, 6)]\n # xlsaddrs += [\"B1\", \"D1\", \"F1\"]\n # for xlsaddr in xlsaddrs:\n # cell = ws.cell(xlsaddr)\n # assert cell.style.font.bold\n # assert (openpyxl.style.Border.BORDER_THIN ==\n # cell.style.borders.top.border_style)\n # assert (openpyxl.style.Border.BORDER_THIN ==\n # cell.style.borders.right.border_style)\n # assert (openpyxl.style.Border.BORDER_THIN ==\n # cell.style.borders.bottom.border_style)\n # assert (openpyxl.style.Border.BORDER_THIN ==\n # cell.style.borders.left.border_style)\n # assert (openpyxl.style.Alignment.HORIZONTAL_CENTER ==\n # cell.style.alignment.horizontal)\n # mergedcells_addrs = [\"C1\", \"E1\", \"G1\"]\n # for maddr in mergedcells_addrs:\n # assert ws.cell(maddr).merged\n # os.remove(filename)\n\n @pytest.mark.parametrize(\"use_headers\", [True, False])\n @pytest.mark.parametrize(\"r_idx_nlevels\", [1, 2, 3])\n @pytest.mark.parametrize(\"c_idx_nlevels\", [1, 2, 3])\n def test_excel_010_hemstring(\n self, merge_cells, c_idx_nlevels, r_idx_nlevels, use_headers, path\n ):\n def roundtrip(data, header=True, parser_hdr=0, index=True):\n data.to_excel(path, header=header, merge_cells=merge_cells, index=index)\n\n xf = ExcelFile(path)\n return pd.read_excel(xf, xf.sheet_names[0], header=parser_hdr)\n\n # Basic test.\n parser_header = 0 if use_headers else None\n res = roundtrip(DataFrame([0]), use_headers, parser_header)\n\n assert res.shape == (1, 2)\n assert res.iloc[0, 0] is not np.nan\n\n # More complex tests with multi-index.\n nrows = 5\n ncols = 3\n\n # ensure limited functionality in 0.10\n # override of gh-2370 until sorted out in 0.11\n\n df = tm.makeCustomDataframe(\n nrows, ncols, r_idx_nlevels=r_idx_nlevels, c_idx_nlevels=c_idx_nlevels\n )\n\n # This if will be removed once multi-column Excel writing\n # is implemented. For now fixing gh-9794.\n if c_idx_nlevels > 1:\n with pytest.raises(NotImplementedError):\n roundtrip(df, use_headers, index=False)\n else:\n res = roundtrip(df, use_headers)\n\n if use_headers:\n assert res.shape == (nrows, ncols + r_idx_nlevels)\n else:\n # First row taken as columns.\n assert res.shape == (nrows - 1, ncols + r_idx_nlevels)\n\n # No NaNs.\n for r in range(len(res.index)):\n for c in range(len(res.columns)):\n assert res.iloc[r, c] is not np.nan\n\n def test_duplicated_columns(self, path):\n # see gh-5235\n df = DataFrame([[1, 2, 3], [1, 2, 3], [1, 2, 3]], columns=[\"A\", \"B\", \"B\"])\n df.to_excel(path, \"test1\")\n expected = DataFrame(\n [[1, 2, 3], [1, 2, 3], [1, 2, 3]], columns=[\"A\", \"B\", \"B.1\"]\n )\n\n # By default, we mangle.\n result = pd.read_excel(path, \"test1\", index_col=0)\n tm.assert_frame_equal(result, expected)\n\n # Explicitly, we pass in the parameter.\n result = pd.read_excel(path, \"test1\", index_col=0, mangle_dupe_cols=True)\n tm.assert_frame_equal(result, expected)\n\n # see gh-11007, gh-10970\n df = DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]], columns=[\"A\", \"B\", \"A\", \"B\"])\n df.to_excel(path, \"test1\")\n\n result = pd.read_excel(path, \"test1\", index_col=0)\n expected = DataFrame(\n [[1, 2, 3, 4], [5, 6, 7, 8]], columns=[\"A\", \"B\", \"A.1\", \"B.1\"]\n )\n tm.assert_frame_equal(result, expected)\n\n # see gh-10982\n df.to_excel(path, \"test1\", index=False, header=False)\n result = pd.read_excel(path, \"test1\", header=None)\n\n expected = DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]])\n tm.assert_frame_equal(result, expected)\n\n msg = \"Setting mangle_dupe_cols=False is not supported yet\"\n with pytest.raises(ValueError, match=msg):\n pd.read_excel(path, \"test1\", header=None, mangle_dupe_cols=False)\n\n def test_swapped_columns(self, path):\n # Test for issue #5427.\n write_frame = DataFrame({\"A\": [1, 1, 1], \"B\": [2, 2, 2]})\n write_frame.to_excel(path, \"test1\", columns=[\"B\", \"A\"])\n\n read_frame = pd.read_excel(path, \"test1\", header=0)\n\n tm.assert_series_equal(write_frame[\"A\"], read_frame[\"A\"])\n tm.assert_series_equal(write_frame[\"B\"], read_frame[\"B\"])\n\n def test_invalid_columns(self, path):\n # see gh-10982\n write_frame = DataFrame({\"A\": [1, 1, 1], \"B\": [2, 2, 2]})\n\n with pytest.raises(KeyError, match=\"Not all names specified\"):\n write_frame.to_excel(path, \"test1\", columns=[\"B\", \"C\"])\n\n with pytest.raises(\n KeyError, match=\"'passes columns are not ALL present dataframe'\"\n ):\n write_frame.to_excel(path, \"test1\", columns=[\"C\", \"D\"])\n\n def test_comment_arg(self, path):\n # see gh-18735\n #\n # Test the comment argument functionality to pd.read_excel.\n\n # Create file to read in.\n df = DataFrame({\"A\": [\"one\", \"#one\", \"one\"], \"B\": [\"two\", \"two\", \"#two\"]})\n df.to_excel(path, \"test_c\")\n\n # Read file without comment arg.\n result1 = pd.read_excel(path, \"test_c\", index_col=0)\n\n result1.iloc[1, 0] = None\n result1.iloc[1, 1] = None\n result1.iloc[2, 1] = None\n\n result2 = pd.read_excel(path, \"test_c\", comment=\"#\", index_col=0)\n tm.assert_frame_equal(result1, result2)\n\n def test_comment_default(self, path):\n # Re issue #18735\n # Test the comment argument default to pd.read_excel\n\n # Create file to read in\n df = DataFrame({\"A\": [\"one\", \"#one\", \"one\"], \"B\": [\"two\", \"two\", \"#two\"]})\n df.to_excel(path, \"test_c\")\n\n # Read file with default and explicit comment=None\n result1 = pd.read_excel(path, \"test_c\")\n result2 = pd.read_excel(path, \"test_c\", comment=None)\n tm.assert_frame_equal(result1, result2)\n\n def test_comment_used(self, path):\n # see gh-18735\n #\n # Test the comment argument is working as expected when used.\n\n # Create file to read in.\n df = DataFrame({\"A\": [\"one\", \"#one\", \"one\"], \"B\": [\"two\", \"two\", \"#two\"]})\n df.to_excel(path, \"test_c\")\n\n # Test read_frame_comment against manually produced expected output.\n expected = DataFrame({\"A\": [\"one\", None, \"one\"], \"B\": [\"two\", None, None]})\n result = pd.read_excel(path, \"test_c\", comment=\"#\", index_col=0)\n tm.assert_frame_equal(result, expected)\n\n def test_comment_empty_line(self, path):\n # Re issue #18735\n # Test that pd.read_excel ignores commented lines at the end of file\n\n df = DataFrame({\"a\": [\"1\", \"#2\"], \"b\": [\"2\", \"3\"]})\n df.to_excel(path, index=False)\n\n # Test that all-comment lines at EoF are ignored\n expected = DataFrame({\"a\": [1], \"b\": [2]})\n result = pd.read_excel(path, comment=\"#\")\n tm.assert_frame_equal(result, expected)\n\n def test_datetimes(self, path):\n\n # Test writing and reading datetimes. For issue #9139. (xref #9185)\n datetimes = [\n datetime(2013, 1, 13, 1, 2, 3),\n datetime(2013, 1, 13, 2, 45, 56),\n datetime(2013, 1, 13, 4, 29, 49),\n datetime(2013, 1, 13, 6, 13, 42),\n datetime(2013, 1, 13, 7, 57, 35),\n datetime(2013, 1, 13, 9, 41, 28),\n datetime(2013, 1, 13, 11, 25, 21),\n datetime(2013, 1, 13, 13, 9, 14),\n datetime(2013, 1, 13, 14, 53, 7),\n datetime(2013, 1, 13, 16, 37, 0),\n datetime(2013, 1, 13, 18, 20, 52),\n ]\n\n write_frame = DataFrame({\"A\": datetimes})\n write_frame.to_excel(path, \"Sheet1\")\n read_frame = pd.read_excel(path, \"Sheet1\", header=0)\n\n tm.assert_series_equal(write_frame[\"A\"], read_frame[\"A\"])\n\n def test_bytes_io(self, engine):\n # see gh-7074\n bio = BytesIO()\n df = DataFrame(np.random.randn(10, 2))\n\n # Pass engine explicitly, as there is no file path to infer from.\n writer = ExcelWriter(bio, engine=engine)\n df.to_excel(writer)\n writer.save()\n\n bio.seek(0)\n reread_df = pd.read_excel(bio, index_col=0)\n tm.assert_frame_equal(df, reread_df)\n\n def test_write_lists_dict(self, path):\n # see gh-8188.\n df = DataFrame(\n {\n \"mixed\": [\"a\", [\"b\", \"c\"], {\"d\": \"e\", \"f\": 2}],\n \"numeric\": [1, 2, 3.0],\n \"str\": [\"apple\", \"banana\", \"cherry\"],\n }\n )\n df.to_excel(path, \"Sheet1\")\n read = pd.read_excel(path, \"Sheet1\", header=0, index_col=0)\n\n expected = df.copy()\n expected.mixed = expected.mixed.apply(str)\n expected.numeric = expected.numeric.astype(\"int64\")\n\n tm.assert_frame_equal(read, expected)\n\n def test_true_and_false_value_options(self, path):\n # see gh-13347\n df = pd.DataFrame([[\"foo\", \"bar\"]], columns=[\"col1\", \"col2\"])\n expected = df.replace({\"foo\": True, \"bar\": False})\n\n df.to_excel(path)\n read_frame = pd.read_excel(\n path, true_values=[\"foo\"], false_values=[\"bar\"], index_col=0\n )\n tm.assert_frame_equal(read_frame, expected)\n\n def test_freeze_panes(self, path):\n # see gh-15160\n expected = DataFrame([[1, 2], [3, 4]], columns=[\"col1\", \"col2\"])\n expected.to_excel(path, \"Sheet1\", freeze_panes=(1, 1))\n\n result = pd.read_excel(path, index_col=0)\n tm.assert_frame_equal(result, expected)\n\n def test_path_path_lib(self, engine, ext):\n df = tm.makeDataFrame()\n writer = partial(df.to_excel, engine=engine)\n\n reader = partial(pd.read_excel, index_col=0)\n result = tm.round_trip_pathlib(writer, reader, path=\"foo.{ext}\".format(ext=ext))\n tm.assert_frame_equal(result, df)\n\n def test_path_local_path(self, engine, ext):\n df = tm.makeDataFrame()\n writer = partial(df.to_excel, engine=engine)\n\n reader = partial(pd.read_excel, index_col=0)\n result = tm.round_trip_pathlib(writer, reader, path=\"foo.{ext}\".format(ext=ext))\n tm.assert_frame_equal(result, df)\n\n def test_merged_cell_custom_objects(self, merge_cells, path):\n # see GH-27006\n mi = MultiIndex.from_tuples(\n [\n (pd.Period(\"2018\"), pd.Period(\"2018Q1\")),\n (pd.Period(\"2018\"), pd.Period(\"2018Q2\")),\n ]\n )\n expected = DataFrame(np.ones((2, 2)), columns=mi)\n expected.to_excel(path)\n result = pd.read_excel(path, header=[0, 1], index_col=0, convert_float=False)\n # need to convert PeriodIndexes to standard Indexes for assert equal\n expected.columns.set_levels(\n [[str(i) for i in mi.levels[0]], [str(i) for i in mi.levels[1]]],\n level=[0, 1],\n inplace=True,\n )\n expected.index = expected.index.astype(np.float64)\n tm.assert_frame_equal(expected, result)\n\n @pytest.mark.parametrize(\"dtype\", [None, object])\n def test_raise_when_saving_timezones(self, dtype, tz_aware_fixture, path):\n # GH 27008, GH 7056\n tz = tz_aware_fixture\n data = pd.Timestamp(\"2019\", tz=tz)\n df = DataFrame([data], dtype=dtype)\n with pytest.raises(ValueError, match=\"Excel does not support\"):\n df.to_excel(path)\n\n data = data.to_pydatetime()\n df = DataFrame([data], dtype=dtype)\n with pytest.raises(ValueError, match=\"Excel does not support\"):\n df.to_excel(path)\n\n\nclass TestExcelWriterEngineTests:\n @pytest.mark.parametrize(\n \"klass,ext\",\n [\n pytest.param(_XlsxWriter, \".xlsx\", marks=td.skip_if_no(\"xlsxwriter\")),\n pytest.param(_OpenpyxlWriter, \".xlsx\", marks=td.skip_if_no(\"openpyxl\")),\n pytest.param(_XlwtWriter, \".xls\", marks=td.skip_if_no(\"xlwt\")),\n ],\n )\n def test_ExcelWriter_dispatch(self, klass, ext):\n with tm.ensure_clean(ext) as path:\n writer = ExcelWriter(path)\n if ext == \".xlsx\" and td.safe_import(\"xlsxwriter\"):\n # xlsxwriter has preference over openpyxl if both installed\n assert isinstance(writer, _XlsxWriter)\n else:\n assert isinstance(writer, klass)\n\n def test_ExcelWriter_dispatch_raises(self):\n with pytest.raises(ValueError, match=\"No engine\"):\n ExcelWriter(\"nothing\")\n\n def test_register_writer(self):\n # some awkward mocking to test out dispatch and such actually works\n called_save = []\n called_write_cells = []\n\n class DummyClass(ExcelWriter):\n called_save = False\n called_write_cells = False\n supported_extensions = [\"xlsx\", \"xls\"]\n engine = \"dummy\"\n\n def save(self):\n called_save.append(True)\n\n def write_cells(self, *args, **kwargs):\n called_write_cells.append(True)\n\n def check_called(func):\n func()\n assert len(called_save) >= 1\n assert len(called_write_cells) >= 1\n del called_save[:]\n del called_write_cells[:]\n\n with pd.option_context(\"io.excel.xlsx.writer\", \"dummy\"):\n register_writer(DummyClass)\n writer = ExcelWriter(\"something.xlsx\")\n assert isinstance(writer, DummyClass)\n df = tm.makeCustomDataframe(1, 1)\n check_called(lambda: df.to_excel(\"something.xlsx\"))\n check_called(lambda: df.to_excel(\"something.xls\", engine=\"dummy\"))\n\n\[email protected]_if_no(\"xlrd\")\[email protected]_if_no(\"openpyxl\")\nclass TestFSPath:\n def test_excelfile_fspath(self):\n with tm.ensure_clean(\"foo.xlsx\") as path:\n df = DataFrame({\"A\": [1, 2]})\n df.to_excel(path)\n xl = ExcelFile(path)\n result = os.fspath(xl)\n assert result == path\n\n def test_excelwriter_fspath(self):\n with tm.ensure_clean(\"foo.xlsx\") as path:\n writer = ExcelWriter(path)\n assert os.fspath(writer) == str(path)\n", "\"\"\"\nFunctions for preparing various inputs passed to the DataFrame or Series\nconstructors before passing them to a BlockManager.\n\"\"\"\nfrom collections import abc\n\nimport numpy as np\nimport numpy.ma as ma\n\nfrom pandas._libs import lib\n\nfrom pandas.core.dtypes.cast import (\n construct_1d_arraylike_from_scalar,\n maybe_cast_to_datetime,\n maybe_convert_platform,\n maybe_infer_to_datetimelike,\n maybe_upcast,\n)\nfrom pandas.core.dtypes.common import (\n is_categorical_dtype,\n is_datetime64tz_dtype,\n is_dtype_equal,\n is_extension_array_dtype,\n is_integer_dtype,\n is_list_like,\n is_object_dtype,\n)\nfrom pandas.core.dtypes.generic import (\n ABCDataFrame,\n ABCDatetimeIndex,\n ABCIndexClass,\n ABCPeriodIndex,\n ABCSeries,\n ABCTimedeltaIndex,\n)\n\nfrom pandas.core import algorithms, common as com\nfrom pandas.core.arrays import Categorical\nfrom pandas.core.construction import sanitize_array\nfrom pandas.core.indexes import base as ibase\nfrom pandas.core.indexes.api import (\n Index,\n ensure_index,\n get_objs_combined_axis,\n union_indexes,\n)\nfrom pandas.core.internals import (\n create_block_manager_from_arrays,\n create_block_manager_from_blocks,\n)\n\n# ---------------------------------------------------------------------\n# BlockManager Interface\n\n\ndef arrays_to_mgr(arrays, arr_names, index, columns, dtype=None):\n \"\"\"\n Segregate Series based on type and coerce into matrices.\n\n Needs to handle a lot of exceptional cases.\n \"\"\"\n # figure out the index, if necessary\n if index is None:\n index = extract_index(arrays)\n else:\n index = ensure_index(index)\n\n # don't force copy because getting jammed in an ndarray anyway\n arrays = _homogenize(arrays, index, dtype)\n\n # from BlockManager perspective\n axes = [ensure_index(columns), index]\n\n return create_block_manager_from_arrays(arrays, arr_names, axes)\n\n\ndef masked_rec_array_to_mgr(data, index, columns, dtype, copy: bool):\n \"\"\"\n Extract from a masked rec array and create the manager.\n \"\"\"\n\n # essentially process a record array then fill it\n fill_value = data.fill_value\n fdata = ma.getdata(data)\n if index is None:\n index = get_names_from_index(fdata)\n if index is None:\n index = ibase.default_index(len(data))\n index = ensure_index(index)\n\n if columns is not None:\n columns = ensure_index(columns)\n arrays, arr_columns = to_arrays(fdata, columns)\n\n # fill if needed\n new_arrays = []\n for fv, arr, col in zip(fill_value, arrays, arr_columns):\n # TODO: numpy docs suggest fv must be scalar, but could it be\n # non-scalar for object dtype?\n assert lib.is_scalar(fv), fv\n mask = ma.getmaskarray(data[col])\n if mask.any():\n arr, fv = maybe_upcast(arr, fill_value=fv, copy=True)\n arr[mask] = fv\n new_arrays.append(arr)\n\n # create the manager\n arrays, arr_columns = reorder_arrays(new_arrays, arr_columns, columns)\n if columns is None:\n columns = arr_columns\n\n mgr = arrays_to_mgr(arrays, arr_columns, index, columns, dtype)\n\n if copy:\n mgr = mgr.copy()\n return mgr\n\n\n# ---------------------------------------------------------------------\n# DataFrame Constructor Interface\n\n\ndef init_ndarray(values, index, columns, dtype=None, copy=False):\n # input must be a ndarray, list, Series, index\n\n if isinstance(values, ABCSeries):\n if columns is None:\n if values.name is not None:\n columns = [values.name]\n if index is None:\n index = values.index\n else:\n values = values.reindex(index)\n\n # zero len case (GH #2234)\n if not len(values) and columns is not None and len(columns):\n values = np.empty((0, 1), dtype=object)\n\n # we could have a categorical type passed or coerced to 'category'\n # recast this to an arrays_to_mgr\n if is_categorical_dtype(getattr(values, \"dtype\", None)) or is_categorical_dtype(\n dtype\n ):\n\n if not hasattr(values, \"dtype\"):\n values = _prep_ndarray(values, copy=copy)\n values = values.ravel()\n elif copy:\n values = values.copy()\n\n index, columns = _get_axes(len(values), 1, index, columns)\n return arrays_to_mgr([values], columns, index, columns, dtype=dtype)\n elif is_extension_array_dtype(values) or is_extension_array_dtype(dtype):\n # GH#19157\n\n if isinstance(values, np.ndarray) and values.ndim > 1:\n # GH#12513 a EA dtype passed with a 2D array, split into\n # multiple EAs that view the values\n values = [values[:, n] for n in range(values.shape[1])]\n else:\n values = [values]\n\n if columns is None:\n columns = list(range(len(values)))\n return arrays_to_mgr(values, columns, index, columns, dtype=dtype)\n\n # by definition an array here\n # the dtypes will be coerced to a single dtype\n values = _prep_ndarray(values, copy=copy)\n\n if dtype is not None:\n if not is_dtype_equal(values.dtype, dtype):\n try:\n values = values.astype(dtype)\n except Exception as orig:\n # e.g. ValueError when trying to cast object dtype to float64\n raise ValueError(\n f\"failed to cast to '{dtype}' (Exception was: {orig})\"\n ) from orig\n\n index, columns = _get_axes(*values.shape, index=index, columns=columns)\n values = values.T\n\n # if we don't have a dtype specified, then try to convert objects\n # on the entire block; this is to convert if we have datetimelike's\n # embedded in an object type\n if dtype is None and is_object_dtype(values):\n\n if values.ndim == 2 and values.shape[0] != 1:\n # transpose and separate blocks\n\n dvals_list = [maybe_infer_to_datetimelike(row) for row in values]\n for n in range(len(dvals_list)):\n if isinstance(dvals_list[n], np.ndarray):\n dvals_list[n] = dvals_list[n].reshape(1, -1)\n\n from pandas.core.internals.blocks import make_block\n\n # TODO: What about re-joining object columns?\n block_values = [\n make_block(dvals_list[n], placement=[n]) for n in range(len(dvals_list))\n ]\n\n else:\n datelike_vals = maybe_infer_to_datetimelike(values)\n block_values = [datelike_vals]\n else:\n block_values = [values]\n\n return create_block_manager_from_blocks(block_values, [columns, index])\n\n\ndef init_dict(data, index, columns, dtype=None):\n \"\"\"\n Segregate Series based on type and coerce into matrices.\n Needs to handle a lot of exceptional cases.\n \"\"\"\n if columns is not None:\n from pandas.core.series import Series\n\n arrays = Series(data, index=columns, dtype=object)\n data_names = arrays.index\n\n missing = arrays.isna()\n if index is None:\n # GH10856\n # raise ValueError if only scalars in dict\n index = extract_index(arrays[~missing])\n else:\n index = ensure_index(index)\n\n # no obvious \"empty\" int column\n if missing.any() and not is_integer_dtype(dtype):\n if dtype is None or np.issubdtype(dtype, np.flexible):\n # GH#1783\n nan_dtype = object\n else:\n nan_dtype = dtype\n val = construct_1d_arraylike_from_scalar(np.nan, len(index), nan_dtype)\n arrays.loc[missing] = [val] * missing.sum()\n\n else:\n keys = list(data.keys())\n columns = data_names = Index(keys)\n arrays = (com.maybe_iterable_to_list(data[k]) for k in keys)\n # GH#24096 need copy to be deep for datetime64tz case\n # TODO: See if we can avoid these copies\n arrays = [\n arr if not isinstance(arr, ABCIndexClass) else arr._data for arr in arrays\n ]\n arrays = [\n arr if not is_datetime64tz_dtype(arr) else arr.copy() for arr in arrays\n ]\n return arrays_to_mgr(arrays, data_names, index, columns, dtype=dtype)\n\n\n# ---------------------------------------------------------------------\n\n\ndef _prep_ndarray(values, copy: bool = True) -> np.ndarray:\n if not isinstance(values, (np.ndarray, ABCSeries, Index)):\n if len(values) == 0:\n return np.empty((0, 0), dtype=object)\n elif isinstance(values, range):\n arr = np.arange(values.start, values.stop, values.step, dtype=\"int64\")\n return arr[..., np.newaxis]\n\n def convert(v):\n return maybe_convert_platform(v)\n\n # we could have a 1-dim or 2-dim list here\n # this is equiv of np.asarray, but does object conversion\n # and platform dtype preservation\n try:\n if is_list_like(values[0]) or hasattr(values[0], \"len\"):\n values = np.array([convert(v) for v in values])\n elif isinstance(values[0], np.ndarray) and values[0].ndim == 0:\n # GH#21861\n values = np.array([convert(v) for v in values])\n else:\n values = convert(values)\n except (ValueError, TypeError):\n values = convert(values)\n\n else:\n\n # drop subclass info, do not copy data\n values = np.asarray(values)\n if copy:\n values = values.copy()\n\n if values.ndim == 1:\n values = values.reshape((values.shape[0], 1))\n elif values.ndim != 2:\n raise ValueError(\"Must pass 2-d input\")\n\n return values\n\n\ndef _homogenize(data, index, dtype=None):\n oindex = None\n homogenized = []\n\n for val in data:\n if isinstance(val, ABCSeries):\n if dtype is not None:\n val = val.astype(dtype)\n if val.index is not index:\n # Forces alignment. No need to copy data since we\n # are putting it into an ndarray later\n val = val.reindex(index, copy=False)\n else:\n if isinstance(val, dict):\n if oindex is None:\n oindex = index.astype(\"O\")\n\n if isinstance(index, (ABCDatetimeIndex, ABCTimedeltaIndex)):\n val = com.dict_compat(val)\n else:\n val = dict(val)\n val = lib.fast_multiget(val, oindex.values, default=np.nan)\n val = sanitize_array(\n val, index, dtype=dtype, copy=False, raise_cast_failure=False\n )\n\n homogenized.append(val)\n\n return homogenized\n\n\ndef extract_index(data):\n index = None\n if len(data) == 0:\n index = Index([])\n elif len(data) > 0:\n raw_lengths = []\n indexes = []\n\n have_raw_arrays = False\n have_series = False\n have_dicts = False\n\n for val in data:\n if isinstance(val, ABCSeries):\n have_series = True\n indexes.append(val.index)\n elif isinstance(val, dict):\n have_dicts = True\n indexes.append(list(val.keys()))\n elif is_list_like(val) and getattr(val, \"ndim\", 1) == 1:\n have_raw_arrays = True\n raw_lengths.append(len(val))\n\n if not indexes and not raw_lengths:\n raise ValueError(\"If using all scalar values, you must pass an index\")\n\n if have_series:\n index = union_indexes(indexes)\n elif have_dicts:\n index = union_indexes(indexes, sort=False)\n\n if have_raw_arrays:\n lengths = list(set(raw_lengths))\n if len(lengths) > 1:\n raise ValueError(\"arrays must all be same length\")\n\n if have_dicts:\n raise ValueError(\n \"Mixing dicts with non-Series may lead to ambiguous ordering.\"\n )\n\n if have_series:\n if lengths[0] != len(index):\n msg = (\n f\"array length {lengths[0]} does not match index \"\n f\"length {len(index)}\"\n )\n raise ValueError(msg)\n else:\n index = ibase.default_index(lengths[0])\n\n return ensure_index(index)\n\n\ndef reorder_arrays(arrays, arr_columns, columns):\n # reorder according to the columns\n if (\n columns is not None\n and len(columns)\n and arr_columns is not None\n and len(arr_columns)\n ):\n indexer = ensure_index(arr_columns).get_indexer(columns)\n arr_columns = ensure_index([arr_columns[i] for i in indexer])\n arrays = [arrays[i] for i in indexer]\n return arrays, arr_columns\n\n\ndef get_names_from_index(data):\n has_some_name = any(getattr(s, \"name\", None) is not None for s in data)\n if not has_some_name:\n return ibase.default_index(len(data))\n\n index = list(range(len(data)))\n count = 0\n for i, s in enumerate(data):\n n = getattr(s, \"name\", None)\n if n is not None:\n index[i] = n\n else:\n index[i] = f\"Unnamed {count}\"\n count += 1\n\n return index\n\n\ndef _get_axes(N, K, index, columns):\n # helper to create the axes as indexes\n # return axes or defaults\n\n if index is None:\n index = ibase.default_index(N)\n else:\n index = ensure_index(index)\n\n if columns is None:\n columns = ibase.default_index(K)\n else:\n columns = ensure_index(columns)\n return index, columns\n\n\n# ---------------------------------------------------------------------\n# Conversion of Inputs to Arrays\n\n\ndef to_arrays(data, columns, coerce_float=False, dtype=None):\n \"\"\"\n Return list of arrays, columns.\n \"\"\"\n if isinstance(data, ABCDataFrame):\n if columns is not None:\n arrays = [\n data._ixs(i, axis=1).values\n for i, col in enumerate(data.columns)\n if col in columns\n ]\n else:\n columns = data.columns\n arrays = [data._ixs(i, axis=1).values for i in range(len(columns))]\n\n return arrays, columns\n\n if not len(data):\n if isinstance(data, np.ndarray):\n columns = data.dtype.names\n if columns is not None:\n return [[]] * len(columns), columns\n return [], [] # columns if columns is not None else []\n if isinstance(data[0], (list, tuple)):\n return _list_to_arrays(data, columns, coerce_float=coerce_float, dtype=dtype)\n elif isinstance(data[0], abc.Mapping):\n return _list_of_dict_to_arrays(\n data, columns, coerce_float=coerce_float, dtype=dtype\n )\n elif isinstance(data[0], ABCSeries):\n return _list_of_series_to_arrays(\n data, columns, coerce_float=coerce_float, dtype=dtype\n )\n elif isinstance(data[0], Categorical):\n if columns is None:\n columns = ibase.default_index(len(data))\n return data, columns\n elif (\n isinstance(data, (np.ndarray, ABCSeries, Index))\n and data.dtype.names is not None\n ):\n\n columns = list(data.dtype.names)\n arrays = [data[k] for k in columns]\n return arrays, columns\n else:\n # last ditch effort\n data = [tuple(x) for x in data]\n return _list_to_arrays(data, columns, coerce_float=coerce_float, dtype=dtype)\n\n\ndef _list_to_arrays(data, columns, coerce_float=False, dtype=None):\n if len(data) > 0 and isinstance(data[0], tuple):\n content = list(lib.to_object_array_tuples(data).T)\n else:\n # list of lists\n content = list(lib.to_object_array(data).T)\n # gh-26429 do not raise user-facing AssertionError\n try:\n result = _convert_object_array(\n content, columns, dtype=dtype, coerce_float=coerce_float\n )\n except AssertionError as e:\n raise ValueError(e) from e\n return result\n\n\ndef _list_of_series_to_arrays(data, columns, coerce_float=False, dtype=None):\n if columns is None:\n # We know pass_data is non-empty because data[0] is a Series\n pass_data = [x for x in data if isinstance(x, (ABCSeries, ABCDataFrame))]\n columns = get_objs_combined_axis(pass_data, sort=False)\n\n indexer_cache = {}\n\n aligned_values = []\n for s in data:\n index = getattr(s, \"index\", None)\n if index is None:\n index = ibase.default_index(len(s))\n\n if id(index) in indexer_cache:\n indexer = indexer_cache[id(index)]\n else:\n indexer = indexer_cache[id(index)] = index.get_indexer(columns)\n\n values = com.values_from_object(s)\n aligned_values.append(algorithms.take_1d(values, indexer))\n\n values = np.vstack(aligned_values)\n\n if values.dtype == np.object_:\n content = list(values.T)\n return _convert_object_array(\n content, columns, dtype=dtype, coerce_float=coerce_float\n )\n else:\n return values.T, columns\n\n\ndef _list_of_dict_to_arrays(data, columns, coerce_float=False, dtype=None):\n \"\"\"Convert list of dicts to numpy arrays\n\n if `columns` is not passed, column names are inferred from the records\n - for OrderedDict and dicts, the column names match\n the key insertion-order from the first record to the last.\n - For other kinds of dict-likes, the keys are lexically sorted.\n\n Parameters\n ----------\n data : iterable\n collection of records (OrderedDict, dict)\n columns: iterables or None\n coerce_float : bool\n dtype : np.dtype\n\n Returns\n -------\n tuple\n arrays, columns\n \"\"\"\n\n if columns is None:\n gen = (list(x.keys()) for x in data)\n sort = not any(isinstance(d, dict) for d in data)\n columns = lib.fast_unique_multiple_list_gen(gen, sort=sort)\n\n # assure that they are of the base dict class and not of derived\n # classes\n data = [(type(d) is dict) and d or dict(d) for d in data]\n\n content = list(lib.dicts_to_array(data, list(columns)).T)\n return _convert_object_array(\n content, columns, dtype=dtype, coerce_float=coerce_float\n )\n\n\ndef _convert_object_array(content, columns, coerce_float=False, dtype=None):\n if columns is None:\n columns = ibase.default_index(len(content))\n else:\n if len(columns) != len(content): # pragma: no cover\n # caller's responsibility to check for this...\n raise AssertionError(\n f\"{len(columns)} columns passed, passed data had \"\n f\"{len(content)} columns\"\n )\n\n # provide soft conversion of object dtypes\n def convert(arr):\n if dtype != object and dtype != np.object:\n arr = lib.maybe_convert_objects(arr, try_float=coerce_float)\n arr = maybe_cast_to_datetime(arr, dtype)\n return arr\n\n arrays = [convert(arr) for arr in content]\n\n return arrays, columns\n\n\n# ---------------------------------------------------------------------\n# Series-Based\n\n\ndef sanitize_index(data, index: Index):\n \"\"\"\n Sanitize an index type to return an ndarray of the underlying, pass\n through a non-Index.\n \"\"\"\n\n if len(data) != len(index):\n raise ValueError(\"Length of values does not match length of index\")\n\n if isinstance(data, ABCIndexClass):\n pass\n elif isinstance(data, (ABCPeriodIndex, ABCDatetimeIndex)):\n data = data._values\n\n elif isinstance(data, np.ndarray):\n\n # coerce datetimelike types\n if data.dtype.kind in [\"M\", \"m\"]:\n data = sanitize_array(data, index, copy=False)\n\n return data\n", "\"\"\"\naggregation.py contains utility functions to handle multiple named and lambda\nkwarg aggregations in groupby and DataFrame/Series aggregation\n\"\"\"\n\nfrom collections import defaultdict\nfrom functools import partial\nfrom typing import Any, DefaultDict, List, Sequence, Tuple\n\nfrom pandas.core.dtypes.common import is_dict_like, is_list_like\n\nimport pandas.core.common as com\nfrom pandas.core.indexes.api import Index\n\n\ndef is_multi_agg_with_relabel(**kwargs) -> bool:\n \"\"\"\n Check whether kwargs passed to .agg look like multi-agg with relabeling.\n\n Parameters\n ----------\n **kwargs : dict\n\n Returns\n -------\n bool\n\n Examples\n --------\n >>> is_multi_agg_with_relabel(a='max')\n False\n >>> is_multi_agg_with_relabel(a_max=('a', 'max'),\n ... a_min=('a', 'min'))\n True\n >>> is_multi_agg_with_relabel()\n False\n \"\"\"\n return all(isinstance(v, tuple) and len(v) == 2 for v in kwargs.values()) and (\n len(kwargs) > 0\n )\n\n\ndef normalize_keyword_aggregation(kwargs: dict) -> Tuple[dict, List[str], List[int]]:\n \"\"\"\n Normalize user-provided \"named aggregation\" kwargs.\n Transforms from the new ``Mapping[str, NamedAgg]`` style kwargs\n to the old Dict[str, List[scalar]]].\n\n Parameters\n ----------\n kwargs : dict\n\n Returns\n -------\n aggspec : dict\n The transformed kwargs.\n columns : List[str]\n The user-provided keys.\n col_idx_order : List[int]\n List of columns indices.\n\n Examples\n --------\n >>> normalize_keyword_aggregation({'output': ('input', 'sum')})\n ({'input': ['sum']}, ('output',), [('input', 'sum')])\n \"\"\"\n # Normalize the aggregation functions as Mapping[column, List[func]],\n # process normally, then fixup the names.\n # TODO: aggspec type: typing.Dict[str, List[AggScalar]]\n # May be hitting https://github.com/python/mypy/issues/5958\n # saying it doesn't have an attribute __name__\n aggspec: DefaultDict = defaultdict(list)\n order = []\n columns, pairs = list(zip(*kwargs.items()))\n\n for name, (column, aggfunc) in zip(columns, pairs):\n aggspec[column].append(aggfunc)\n order.append((column, com.get_callable_name(aggfunc) or aggfunc))\n\n # uniquify aggfunc name if duplicated in order list\n uniquified_order = _make_unique_kwarg_list(order)\n\n # GH 25719, due to aggspec will change the order of assigned columns in aggregation\n # uniquified_aggspec will store uniquified order list and will compare it with order\n # based on index\n aggspec_order = [\n (column, com.get_callable_name(aggfunc) or aggfunc)\n for column, aggfuncs in aggspec.items()\n for aggfunc in aggfuncs\n ]\n uniquified_aggspec = _make_unique_kwarg_list(aggspec_order)\n\n # get the new indice of columns by comparison\n col_idx_order = Index(uniquified_aggspec).get_indexer(uniquified_order)\n return aggspec, columns, col_idx_order\n\n\ndef _make_unique_kwarg_list(\n seq: Sequence[Tuple[Any, Any]]\n) -> Sequence[Tuple[Any, Any]]:\n \"\"\"Uniquify aggfunc name of the pairs in the order list\n\n Examples:\n --------\n >>> kwarg_list = [('a', '<lambda>'), ('a', '<lambda>'), ('b', '<lambda>')]\n >>> _make_unique_kwarg_list(kwarg_list)\n [('a', '<lambda>_0'), ('a', '<lambda>_1'), ('b', '<lambda>')]\n \"\"\"\n return [\n (pair[0], \"_\".join([pair[1], str(seq[:i].count(pair))]))\n if seq.count(pair) > 1\n else pair\n for i, pair in enumerate(seq)\n ]\n\n\n# TODO: Can't use, because mypy doesn't like us setting __name__\n# error: \"partial[Any]\" has no attribute \"__name__\"\n# the type is:\n# typing.Sequence[Callable[..., ScalarResult]]\n# -> typing.Sequence[Callable[..., ScalarResult]]:\n\n\ndef _managle_lambda_list(aggfuncs: Sequence[Any]) -> Sequence[Any]:\n \"\"\"\n Possibly mangle a list of aggfuncs.\n\n Parameters\n ----------\n aggfuncs : Sequence\n\n Returns\n -------\n mangled: list-like\n A new AggSpec sequence, where lambdas have been converted\n to have unique names.\n\n Notes\n -----\n If just one aggfunc is passed, the name will not be mangled.\n \"\"\"\n if len(aggfuncs) <= 1:\n # don't mangle for .agg([lambda x: .])\n return aggfuncs\n i = 0\n mangled_aggfuncs = []\n for aggfunc in aggfuncs:\n if com.get_callable_name(aggfunc) == \"<lambda>\":\n aggfunc = partial(aggfunc)\n aggfunc.__name__ = f\"<lambda_{i}>\"\n i += 1\n mangled_aggfuncs.append(aggfunc)\n\n return mangled_aggfuncs\n\n\ndef maybe_mangle_lambdas(agg_spec: Any) -> Any:\n \"\"\"\n Make new lambdas with unique names.\n\n Parameters\n ----------\n agg_spec : Any\n An argument to GroupBy.agg.\n Non-dict-like `agg_spec` are pass through as is.\n For dict-like `agg_spec` a new spec is returned\n with name-mangled lambdas.\n\n Returns\n -------\n mangled : Any\n Same type as the input.\n\n Examples\n --------\n >>> maybe_mangle_lambdas('sum')\n 'sum'\n >>> maybe_mangle_lambdas([lambda: 1, lambda: 2]) # doctest: +SKIP\n [<function __main__.<lambda_0>,\n <function pandas...._make_lambda.<locals>.f(*args, **kwargs)>]\n \"\"\"\n is_dict = is_dict_like(agg_spec)\n if not (is_dict or is_list_like(agg_spec)):\n return agg_spec\n mangled_aggspec = type(agg_spec)() # dict or OrderdDict\n\n if is_dict:\n for key, aggfuncs in agg_spec.items():\n if is_list_like(aggfuncs) and not is_dict_like(aggfuncs):\n mangled_aggfuncs = _managle_lambda_list(aggfuncs)\n else:\n mangled_aggfuncs = aggfuncs\n\n mangled_aggspec[key] = mangled_aggfuncs\n else:\n mangled_aggspec = _managle_lambda_list(agg_spec)\n\n return mangled_aggspec\n", "import warnings\n\nimport numpy as np\n\nfrom pandas import (\n CategoricalIndex,\n DataFrame,\n Float64Index,\n IndexSlice,\n Int64Index,\n IntervalIndex,\n MultiIndex,\n Series,\n UInt64Index,\n concat,\n date_range,\n option_context,\n period_range,\n)\n\nfrom .pandas_vb_common import tm\n\n\nclass NumericSeriesIndexing:\n\n params = [\n (Int64Index, UInt64Index, Float64Index),\n (\"unique_monotonic_inc\", \"nonunique_monotonic_inc\"),\n ]\n param_names = [\"index_dtype\", \"index_structure\"]\n\n def setup(self, index, index_structure):\n N = 10 ** 6\n indices = {\n \"unique_monotonic_inc\": index(range(N)),\n \"nonunique_monotonic_inc\": index(\n list(range(55)) + [54] + list(range(55, N - 1))\n ),\n }\n self.data = Series(np.random.rand(N), index=indices[index_structure])\n self.array = np.arange(10000)\n self.array_list = self.array.tolist()\n\n def time_getitem_scalar(self, index, index_structure):\n self.data[800000]\n\n def time_getitem_slice(self, index, index_structure):\n self.data[:800000]\n\n def time_getitem_list_like(self, index, index_structure):\n self.data[[800000]]\n\n def time_getitem_array(self, index, index_structure):\n self.data[self.array]\n\n def time_getitem_lists(self, index, index_structure):\n self.data[self.array_list]\n\n def time_iloc_array(self, index, index_structure):\n self.data.iloc[self.array]\n\n def time_iloc_list_like(self, index, index_structure):\n self.data.iloc[[800000]]\n\n def time_iloc_scalar(self, index, index_structure):\n self.data.iloc[800000]\n\n def time_iloc_slice(self, index, index_structure):\n self.data.iloc[:800000]\n\n def time_loc_array(self, index, index_structure):\n self.data.loc[self.array]\n\n def time_loc_list_like(self, index, index_structure):\n self.data.loc[[800000]]\n\n def time_loc_scalar(self, index, index_structure):\n self.data.loc[800000]\n\n def time_loc_slice(self, index, index_structure):\n self.data.loc[:800000]\n\n\nclass NonNumericSeriesIndexing:\n\n params = [\n (\"string\", \"datetime\", \"period\"),\n (\"unique_monotonic_inc\", \"nonunique_monotonic_inc\", \"non_monotonic\"),\n ]\n param_names = [\"index_dtype\", \"index_structure\"]\n\n def setup(self, index, index_structure):\n N = 10 ** 6\n if index == \"string\":\n index = tm.makeStringIndex(N)\n elif index == \"datetime\":\n index = date_range(\"1900\", periods=N, freq=\"s\")\n elif index == \"period\":\n index = period_range(\"1900\", periods=N, freq=\"s\")\n index = index.sort_values()\n assert index.is_unique and index.is_monotonic_increasing\n if index_structure == \"nonunique_monotonic_inc\":\n index = index.insert(item=index[2], loc=2)[:-1]\n elif index_structure == \"non_monotonic\":\n index = index[::2].append(index[1::2])\n assert len(index) == N\n self.s = Series(np.random.rand(N), index=index)\n self.lbl = index[80000]\n # warm up index mapping\n self.s[self.lbl]\n\n def time_getitem_label_slice(self, index, index_structure):\n self.s[: self.lbl]\n\n def time_getitem_pos_slice(self, index, index_structure):\n self.s[:80000]\n\n def time_getitem_scalar(self, index, index_structure):\n self.s[self.lbl]\n\n def time_getitem_list_like(self, index, index_structure):\n self.s[[self.lbl]]\n\n\nclass DataFrameStringIndexing:\n def setup(self):\n index = tm.makeStringIndex(1000)\n columns = tm.makeStringIndex(30)\n with warnings.catch_warnings(record=True):\n self.df = DataFrame(np.random.randn(1000, 30), index=index, columns=columns)\n self.idx_scalar = index[100]\n self.col_scalar = columns[10]\n self.bool_indexer = self.df[self.col_scalar] > 0\n self.bool_obj_indexer = self.bool_indexer.astype(object)\n self.boolean_indexer = (self.df[self.col_scalar] > 0).astype(\"boolean\")\n\n def time_loc(self):\n self.df.loc[self.idx_scalar, self.col_scalar]\n\n def time_getitem_scalar(self):\n self.df[self.col_scalar][self.idx_scalar]\n\n def time_boolean_rows(self):\n self.df[self.bool_indexer]\n\n def time_boolean_rows_object(self):\n self.df[self.bool_obj_indexer]\n\n def time_boolean_rows_boolean(self):\n self.df[self.boolean_indexer]\n\n\nclass DataFrameNumericIndexing:\n def setup(self):\n self.idx_dupe = np.array(range(30)) * 99\n self.df = DataFrame(np.random.randn(10000, 5))\n self.df_dup = concat([self.df, 2 * self.df, 3 * self.df])\n self.bool_indexer = [True] * 5000 + [False] * 5000\n\n def time_iloc_dups(self):\n self.df_dup.iloc[self.idx_dupe]\n\n def time_loc_dups(self):\n self.df_dup.loc[self.idx_dupe]\n\n def time_iloc(self):\n self.df.iloc[:100, 0]\n\n def time_loc(self):\n self.df.loc[:100, 0]\n\n def time_bool_indexer(self):\n self.df[self.bool_indexer]\n\n\nclass Take:\n\n params = [\"int\", \"datetime\"]\n param_names = [\"index\"]\n\n def setup(self, index):\n N = 100000\n indexes = {\n \"int\": Int64Index(np.arange(N)),\n \"datetime\": date_range(\"2011-01-01\", freq=\"S\", periods=N),\n }\n index = indexes[index]\n self.s = Series(np.random.rand(N), index=index)\n self.indexer = [True, False, True, True, False] * 20000\n\n def time_take(self, index):\n self.s.take(self.indexer)\n\n\nclass MultiIndexing:\n def setup(self):\n mi = MultiIndex.from_product([range(1000), range(1000)])\n self.s = Series(np.random.randn(1000000), index=mi)\n self.df = DataFrame(self.s)\n\n n = 100000\n with warnings.catch_warnings(record=True):\n self.mdt = DataFrame(\n {\n \"A\": np.random.choice(range(10000, 45000, 1000), n),\n \"B\": np.random.choice(range(10, 400), n),\n \"C\": np.random.choice(range(1, 150), n),\n \"D\": np.random.choice(range(10000, 45000), n),\n \"x\": np.random.choice(range(400), n),\n \"y\": np.random.choice(range(25), n),\n }\n )\n self.idx = IndexSlice[20000:30000, 20:30, 35:45, 30000:40000]\n self.mdt = self.mdt.set_index([\"A\", \"B\", \"C\", \"D\"]).sort_index()\n\n def time_index_slice(self):\n self.mdt.loc[self.idx, :]\n\n\nclass IntervalIndexing:\n def setup_cache(self):\n idx = IntervalIndex.from_breaks(np.arange(1000001))\n monotonic = Series(np.arange(1000000), index=idx)\n return monotonic\n\n def time_getitem_scalar(self, monotonic):\n monotonic[80000]\n\n def time_loc_scalar(self, monotonic):\n monotonic.loc[80000]\n\n def time_getitem_list(self, monotonic):\n monotonic[80000:]\n\n def time_loc_list(self, monotonic):\n monotonic.loc[80000:]\n\n\nclass CategoricalIndexIndexing:\n\n params = [\"monotonic_incr\", \"monotonic_decr\", \"non_monotonic\"]\n param_names = [\"index\"]\n\n def setup(self, index):\n N = 10 ** 5\n values = list(\"a\" * N + \"b\" * N + \"c\" * N)\n indices = {\n \"monotonic_incr\": CategoricalIndex(values),\n \"monotonic_decr\": CategoricalIndex(reversed(values)),\n \"non_monotonic\": CategoricalIndex(list(\"abc\" * N)),\n }\n self.data = indices[index]\n\n self.int_scalar = 10000\n self.int_list = list(range(10000))\n\n self.cat_scalar = \"b\"\n self.cat_list = [\"a\", \"c\"]\n\n def time_getitem_scalar(self, index):\n self.data[self.int_scalar]\n\n def time_getitem_slice(self, index):\n self.data[: self.int_scalar]\n\n def time_getitem_list_like(self, index):\n self.data[[self.int_scalar]]\n\n def time_getitem_list(self, index):\n self.data[self.int_list]\n\n def time_getitem_bool_array(self, index):\n self.data[self.data == self.cat_scalar]\n\n def time_get_loc_scalar(self, index):\n self.data.get_loc(self.cat_scalar)\n\n def time_get_indexer_list(self, index):\n self.data.get_indexer(self.cat_list)\n\n\nclass MethodLookup:\n def setup_cache(self):\n s = Series()\n return s\n\n def time_lookup_iloc(self, s):\n s.iloc\n\n def time_lookup_loc(self, s):\n s.loc\n\n\nclass GetItemSingleColumn:\n def setup(self):\n self.df_string_col = DataFrame(np.random.randn(3000, 1), columns=[\"A\"])\n self.df_int_col = DataFrame(np.random.randn(3000, 1))\n\n def time_frame_getitem_single_column_label(self):\n self.df_string_col[\"A\"]\n\n def time_frame_getitem_single_column_int(self):\n self.df_int_col[0]\n\n\nclass AssignTimeseriesIndex:\n def setup(self):\n N = 100000\n idx = date_range(\"1/1/2000\", periods=N, freq=\"H\")\n self.df = DataFrame(np.random.randn(N, 1), columns=[\"A\"], index=idx)\n\n def time_frame_assign_timeseries_index(self):\n self.df[\"date\"] = self.df.index\n\n\nclass InsertColumns:\n def setup(self):\n self.N = 10 ** 3\n self.df = DataFrame(index=range(self.N))\n\n def time_insert(self):\n np.random.seed(1234)\n for i in range(100):\n self.df.insert(0, i, np.random.randn(self.N), allow_duplicates=True)\n\n def time_assign_with_setitem(self):\n np.random.seed(1234)\n for i in range(100):\n self.df[i] = np.random.randn(self.N)\n\n\nclass ChainIndexing:\n\n params = [None, \"warn\"]\n param_names = [\"mode\"]\n\n def setup(self, mode):\n self.N = 1000000\n\n def time_chained_indexing(self, mode):\n with warnings.catch_warnings(record=True):\n with option_context(\"mode.chained_assignment\", mode):\n df = DataFrame({\"A\": np.arange(self.N), \"B\": \"foo\"})\n df2 = df[df.A > self.N // 2]\n df2[\"C\"] = 1.0\n\n\nfrom .pandas_vb_common import setup # noqa: F401 isort:skip\n", "from datetime import datetime, timedelta\nfrom io import StringIO\nimport sys\n\nimport numpy as np\nimport pytest\n\nfrom pandas._libs.tslib import iNaT\nfrom pandas.compat import PYPY\nfrom pandas.compat.numpy import np_array_datetime64_compat\n\nfrom pandas.core.dtypes.common import (\n is_datetime64_dtype,\n is_datetime64tz_dtype,\n is_object_dtype,\n needs_i8_conversion,\n)\n\nimport pandas as pd\nfrom pandas import (\n DataFrame,\n DatetimeIndex,\n Index,\n Interval,\n IntervalIndex,\n PeriodIndex,\n Series,\n Timedelta,\n TimedeltaIndex,\n Timestamp,\n)\nimport pandas._testing as tm\nfrom pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin\n\n\nclass Ops:\n def _allow_na_ops(self, obj):\n \"\"\"Whether to skip test cases including NaN\"\"\"\n if (isinstance(obj, Index) and obj.is_boolean()) or not obj._can_hold_na:\n # don't test boolean / integer dtypes\n return False\n return True\n\n def setup_method(self, method):\n self.bool_index = tm.makeBoolIndex(10, name=\"a\")\n self.int_index = tm.makeIntIndex(10, name=\"a\")\n self.float_index = tm.makeFloatIndex(10, name=\"a\")\n self.dt_index = tm.makeDateIndex(10, name=\"a\")\n self.dt_tz_index = tm.makeDateIndex(10, name=\"a\").tz_localize(tz=\"US/Eastern\")\n self.period_index = tm.makePeriodIndex(10, name=\"a\")\n self.string_index = tm.makeStringIndex(10, name=\"a\")\n self.unicode_index = tm.makeUnicodeIndex(10, name=\"a\")\n\n arr = np.random.randn(10)\n self.bool_series = Series(arr, index=self.bool_index, name=\"a\")\n self.int_series = Series(arr, index=self.int_index, name=\"a\")\n self.float_series = Series(arr, index=self.float_index, name=\"a\")\n self.dt_series = Series(arr, index=self.dt_index, name=\"a\")\n self.dt_tz_series = self.dt_tz_index.to_series()\n self.period_series = Series(arr, index=self.period_index, name=\"a\")\n self.string_series = Series(arr, index=self.string_index, name=\"a\")\n self.unicode_series = Series(arr, index=self.unicode_index, name=\"a\")\n\n types = [\"bool\", \"int\", \"float\", \"dt\", \"dt_tz\", \"period\", \"string\", \"unicode\"]\n self.indexes = [getattr(self, f\"{t}_index\") for t in types]\n self.series = [getattr(self, f\"{t}_series\") for t in types]\n\n # To test narrow dtypes, we use narrower *data* elements, not *index* elements\n index = self.int_index\n self.float32_series = Series(arr.astype(np.float32), index=index, name=\"a\")\n\n arr_int = np.random.choice(10, size=10, replace=False)\n self.int8_series = Series(arr_int.astype(np.int8), index=index, name=\"a\")\n self.int16_series = Series(arr_int.astype(np.int16), index=index, name=\"a\")\n self.int32_series = Series(arr_int.astype(np.int32), index=index, name=\"a\")\n\n self.uint8_series = Series(arr_int.astype(np.uint8), index=index, name=\"a\")\n self.uint16_series = Series(arr_int.astype(np.uint16), index=index, name=\"a\")\n self.uint32_series = Series(arr_int.astype(np.uint32), index=index, name=\"a\")\n\n nrw_types = [\"float32\", \"int8\", \"int16\", \"int32\", \"uint8\", \"uint16\", \"uint32\"]\n self.narrow_series = [getattr(self, f\"{t}_series\") for t in nrw_types]\n\n self.objs = self.indexes + self.series + self.narrow_series\n\n def check_ops_properties(self, props, filter=None, ignore_failures=False):\n for op in props:\n for o in self.is_valid_objs:\n\n # if a filter, skip if it doesn't match\n if filter is not None:\n filt = o.index if isinstance(o, Series) else o\n if not filter(filt):\n continue\n\n try:\n if isinstance(o, Series):\n expected = Series(getattr(o.index, op), index=o.index, name=\"a\")\n else:\n expected = getattr(o, op)\n except (AttributeError):\n if ignore_failures:\n continue\n\n result = getattr(o, op)\n\n # these could be series, arrays or scalars\n if isinstance(result, Series) and isinstance(expected, Series):\n tm.assert_series_equal(result, expected)\n elif isinstance(result, Index) and isinstance(expected, Index):\n tm.assert_index_equal(result, expected)\n elif isinstance(result, np.ndarray) and isinstance(\n expected, np.ndarray\n ):\n tm.assert_numpy_array_equal(result, expected)\n else:\n assert result == expected\n\n # freq raises AttributeError on an Int64Index because its not\n # defined we mostly care about Series here anyhow\n if not ignore_failures:\n for o in self.not_valid_objs:\n\n # an object that is datetimelike will raise a TypeError,\n # otherwise an AttributeError\n msg = \"no attribute\"\n err = AttributeError\n if issubclass(type(o), DatetimeIndexOpsMixin):\n err = TypeError\n with pytest.raises(err, match=msg):\n getattr(o, op)\n\n @pytest.mark.parametrize(\"klass\", [Series, DataFrame])\n def test_binary_ops_docs(self, klass):\n op_map = {\n \"add\": \"+\",\n \"sub\": \"-\",\n \"mul\": \"*\",\n \"mod\": \"%\",\n \"pow\": \"**\",\n \"truediv\": \"/\",\n \"floordiv\": \"//\",\n }\n for op_name in op_map:\n operand1 = klass.__name__.lower()\n operand2 = \"other\"\n op = op_map[op_name]\n expected_str = \" \".join([operand1, op, operand2])\n assert expected_str in getattr(klass, op_name).__doc__\n\n # reverse version of the binary ops\n expected_str = \" \".join([operand2, op, operand1])\n assert expected_str in getattr(klass, \"r\" + op_name).__doc__\n\n\nclass TestTranspose(Ops):\n errmsg = \"the 'axes' parameter is not supported\"\n\n def test_transpose(self):\n for obj in self.objs:\n tm.assert_equal(obj.transpose(), obj)\n\n def test_transpose_non_default_axes(self):\n for obj in self.objs:\n with pytest.raises(ValueError, match=self.errmsg):\n obj.transpose(1)\n with pytest.raises(ValueError, match=self.errmsg):\n obj.transpose(axes=1)\n\n def test_numpy_transpose(self):\n for obj in self.objs:\n tm.assert_equal(np.transpose(obj), obj)\n\n with pytest.raises(ValueError, match=self.errmsg):\n np.transpose(obj, axes=1)\n\n\nclass TestIndexOps(Ops):\n def setup_method(self, method):\n super().setup_method(method)\n self.is_valid_objs = self.objs\n self.not_valid_objs = []\n\n def test_none_comparison(self):\n\n # bug brought up by #1079\n # changed from TypeError in 0.17.0\n for o in self.is_valid_objs:\n if isinstance(o, Series):\n\n o[0] = np.nan\n\n # noinspection PyComparisonWithNone\n result = o == None # noqa\n assert not result.iat[0]\n assert not result.iat[1]\n\n # noinspection PyComparisonWithNone\n result = o != None # noqa\n assert result.iat[0]\n assert result.iat[1]\n\n result = None == o # noqa\n assert not result.iat[0]\n assert not result.iat[1]\n\n result = None != o # noqa\n assert result.iat[0]\n assert result.iat[1]\n\n if is_datetime64_dtype(o) or is_datetime64tz_dtype(o):\n # Following DatetimeIndex (and Timestamp) convention,\n # inequality comparisons with Series[datetime64] raise\n msg = \"Invalid comparison\"\n with pytest.raises(TypeError, match=msg):\n None > o\n with pytest.raises(TypeError, match=msg):\n o > None\n else:\n result = None > o\n assert not result.iat[0]\n assert not result.iat[1]\n\n result = o < None\n assert not result.iat[0]\n assert not result.iat[1]\n\n def test_ndarray_compat_properties(self):\n\n for o in self.objs:\n # Check that we work.\n for p in [\"shape\", \"dtype\", \"T\", \"nbytes\"]:\n assert getattr(o, p, None) is not None\n\n # deprecated properties\n for p in [\"flags\", \"strides\", \"itemsize\", \"base\", \"data\"]:\n assert not hasattr(o, p)\n\n msg = \"can only convert an array of size 1 to a Python scalar\"\n with pytest.raises(ValueError, match=msg):\n o.item() # len > 1\n\n assert o.ndim == 1\n assert o.size == len(o)\n\n assert Index([1]).item() == 1\n assert Series([1]).item() == 1\n\n def test_value_counts_unique_nunique(self):\n for orig in self.objs:\n o = orig.copy()\n klass = type(o)\n values = o._values\n\n if isinstance(values, Index):\n # reset name not to affect latter process\n values.name = None\n\n # create repeated values, 'n'th element is repeated by n+1 times\n # skip boolean, because it only has 2 values at most\n if isinstance(o, Index) and o.is_boolean():\n continue\n elif isinstance(o, Index):\n expected_index = Index(o[::-1])\n expected_index.name = None\n o = o.repeat(range(1, len(o) + 1))\n o.name = \"a\"\n else:\n expected_index = Index(values[::-1])\n idx = o.index.repeat(range(1, len(o) + 1))\n # take-based repeat\n indices = np.repeat(np.arange(len(o)), range(1, len(o) + 1))\n rep = values.take(indices)\n o = klass(rep, index=idx, name=\"a\")\n\n # check values has the same dtype as the original\n assert o.dtype == orig.dtype\n\n expected_s = Series(\n range(10, 0, -1), index=expected_index, dtype=\"int64\", name=\"a\"\n )\n\n result = o.value_counts()\n tm.assert_series_equal(result, expected_s)\n assert result.index.name is None\n assert result.name == \"a\"\n\n result = o.unique()\n if isinstance(o, Index):\n assert isinstance(result, type(o))\n tm.assert_index_equal(result, orig)\n assert result.dtype == orig.dtype\n elif is_datetime64tz_dtype(o):\n # datetimetz Series returns array of Timestamp\n assert result[0] == orig[0]\n for r in result:\n assert isinstance(r, Timestamp)\n\n tm.assert_numpy_array_equal(\n result.astype(object), orig._values.astype(object)\n )\n else:\n tm.assert_numpy_array_equal(result, orig.values)\n assert result.dtype == orig.dtype\n\n assert o.nunique() == len(np.unique(o.values))\n\n @pytest.mark.parametrize(\"null_obj\", [np.nan, None])\n def test_value_counts_unique_nunique_null(self, null_obj):\n\n for orig in self.objs:\n o = orig.copy()\n klass = type(o)\n values = o._ndarray_values\n\n if not self._allow_na_ops(o):\n continue\n\n # special assign to the numpy array\n if is_datetime64tz_dtype(o):\n if isinstance(o, DatetimeIndex):\n v = o.asi8\n v[0:2] = iNaT\n values = o._shallow_copy(v)\n else:\n o = o.copy()\n o[0:2] = pd.NaT\n values = o._values\n\n elif needs_i8_conversion(o):\n values[0:2] = iNaT\n values = o._shallow_copy(values)\n else:\n values[0:2] = null_obj\n # check values has the same dtype as the original\n\n assert values.dtype == o.dtype\n\n # create repeated values, 'n'th element is repeated by n+1\n # times\n if isinstance(o, (DatetimeIndex, PeriodIndex)):\n expected_index = o.copy()\n expected_index.name = None\n\n # attach name to klass\n o = klass(values.repeat(range(1, len(o) + 1)))\n o.name = \"a\"\n else:\n if isinstance(o, DatetimeIndex):\n expected_index = orig._values._shallow_copy(values)\n else:\n expected_index = Index(values)\n expected_index.name = None\n o = o.repeat(range(1, len(o) + 1))\n o.name = \"a\"\n\n # check values has the same dtype as the original\n assert o.dtype == orig.dtype\n # check values correctly have NaN\n nanloc = np.zeros(len(o), dtype=np.bool)\n nanloc[:3] = True\n if isinstance(o, Index):\n tm.assert_numpy_array_equal(pd.isna(o), nanloc)\n else:\n exp = Series(nanloc, o.index, name=\"a\")\n tm.assert_series_equal(pd.isna(o), exp)\n\n expected_s_na = Series(\n list(range(10, 2, -1)) + [3],\n index=expected_index[9:0:-1],\n dtype=\"int64\",\n name=\"a\",\n )\n expected_s = Series(\n list(range(10, 2, -1)),\n index=expected_index[9:1:-1],\n dtype=\"int64\",\n name=\"a\",\n )\n\n result_s_na = o.value_counts(dropna=False)\n tm.assert_series_equal(result_s_na, expected_s_na)\n assert result_s_na.index.name is None\n assert result_s_na.name == \"a\"\n result_s = o.value_counts()\n tm.assert_series_equal(o.value_counts(), expected_s)\n assert result_s.index.name is None\n assert result_s.name == \"a\"\n\n result = o.unique()\n if isinstance(o, Index):\n tm.assert_index_equal(result, Index(values[1:], name=\"a\"))\n elif is_datetime64tz_dtype(o):\n # unable to compare NaT / nan\n tm.assert_extension_array_equal(result[1:], values[2:])\n assert result[0] is pd.NaT\n else:\n tm.assert_numpy_array_equal(result[1:], values[2:])\n\n assert pd.isna(result[0])\n assert result.dtype == orig.dtype\n\n assert o.nunique() == 8\n assert o.nunique(dropna=False) == 9\n\n def test_value_counts_inferred(self, index_or_series):\n klass = index_or_series\n s_values = [\"a\", \"b\", \"b\", \"b\", \"b\", \"c\", \"d\", \"d\", \"a\", \"a\"]\n s = klass(s_values)\n expected = Series([4, 3, 2, 1], index=[\"b\", \"a\", \"d\", \"c\"])\n tm.assert_series_equal(s.value_counts(), expected)\n\n if isinstance(s, Index):\n exp = Index(np.unique(np.array(s_values, dtype=np.object_)))\n tm.assert_index_equal(s.unique(), exp)\n else:\n exp = np.unique(np.array(s_values, dtype=np.object_))\n tm.assert_numpy_array_equal(s.unique(), exp)\n\n assert s.nunique() == 4\n # don't sort, have to sort after the fact as not sorting is\n # platform-dep\n hist = s.value_counts(sort=False).sort_values()\n expected = Series([3, 1, 4, 2], index=list(\"acbd\")).sort_values()\n tm.assert_series_equal(hist, expected)\n\n # sort ascending\n hist = s.value_counts(ascending=True)\n expected = Series([1, 2, 3, 4], index=list(\"cdab\"))\n tm.assert_series_equal(hist, expected)\n\n # relative histogram.\n hist = s.value_counts(normalize=True)\n expected = Series([0.4, 0.3, 0.2, 0.1], index=[\"b\", \"a\", \"d\", \"c\"])\n tm.assert_series_equal(hist, expected)\n\n def test_value_counts_bins(self, index_or_series):\n klass = index_or_series\n s_values = [\"a\", \"b\", \"b\", \"b\", \"b\", \"c\", \"d\", \"d\", \"a\", \"a\"]\n s = klass(s_values)\n\n # bins\n msg = \"bins argument only works with numeric data\"\n with pytest.raises(TypeError, match=msg):\n s.value_counts(bins=1)\n\n s1 = Series([1, 1, 2, 3])\n res1 = s1.value_counts(bins=1)\n exp1 = Series({Interval(0.997, 3.0): 4})\n tm.assert_series_equal(res1, exp1)\n res1n = s1.value_counts(bins=1, normalize=True)\n exp1n = Series({Interval(0.997, 3.0): 1.0})\n tm.assert_series_equal(res1n, exp1n)\n\n if isinstance(s1, Index):\n tm.assert_index_equal(s1.unique(), Index([1, 2, 3]))\n else:\n exp = np.array([1, 2, 3], dtype=np.int64)\n tm.assert_numpy_array_equal(s1.unique(), exp)\n\n assert s1.nunique() == 3\n\n # these return the same\n res4 = s1.value_counts(bins=4, dropna=True)\n intervals = IntervalIndex.from_breaks([0.997, 1.5, 2.0, 2.5, 3.0])\n exp4 = Series([2, 1, 1, 0], index=intervals.take([0, 3, 1, 2]))\n tm.assert_series_equal(res4, exp4)\n\n res4 = s1.value_counts(bins=4, dropna=False)\n intervals = IntervalIndex.from_breaks([0.997, 1.5, 2.0, 2.5, 3.0])\n exp4 = Series([2, 1, 1, 0], index=intervals.take([0, 3, 1, 2]))\n tm.assert_series_equal(res4, exp4)\n\n res4n = s1.value_counts(bins=4, normalize=True)\n exp4n = Series([0.5, 0.25, 0.25, 0], index=intervals.take([0, 3, 1, 2]))\n tm.assert_series_equal(res4n, exp4n)\n\n # handle NA's properly\n s_values = [\"a\", \"b\", \"b\", \"b\", np.nan, np.nan, \"d\", \"d\", \"a\", \"a\", \"b\"]\n s = klass(s_values)\n expected = Series([4, 3, 2], index=[\"b\", \"a\", \"d\"])\n tm.assert_series_equal(s.value_counts(), expected)\n\n if isinstance(s, Index):\n exp = Index([\"a\", \"b\", np.nan, \"d\"])\n tm.assert_index_equal(s.unique(), exp)\n else:\n exp = np.array([\"a\", \"b\", np.nan, \"d\"], dtype=object)\n tm.assert_numpy_array_equal(s.unique(), exp)\n assert s.nunique() == 3\n\n s = klass({}) if klass is dict else klass({}, dtype=object)\n expected = Series([], dtype=np.int64)\n tm.assert_series_equal(s.value_counts(), expected, check_index_type=False)\n # returned dtype differs depending on original\n if isinstance(s, Index):\n tm.assert_index_equal(s.unique(), Index([]), exact=False)\n else:\n tm.assert_numpy_array_equal(s.unique(), np.array([]), check_dtype=False)\n\n assert s.nunique() == 0\n\n def test_value_counts_datetime64(self, index_or_series):\n klass = index_or_series\n\n # GH 3002, datetime64[ns]\n # don't test names though\n txt = \"\\n\".join(\n [\n \"xxyyzz20100101PIE\",\n \"xxyyzz20100101GUM\",\n \"xxyyzz20100101EGG\",\n \"xxyyww20090101EGG\",\n \"foofoo20080909PIE\",\n \"foofoo20080909GUM\",\n ]\n )\n f = StringIO(txt)\n df = pd.read_fwf(\n f, widths=[6, 8, 3], names=[\"person_id\", \"dt\", \"food\"], parse_dates=[\"dt\"]\n )\n\n s = klass(df[\"dt\"].copy())\n s.name = None\n idx = pd.to_datetime(\n [\"2010-01-01 00:00:00\", \"2008-09-09 00:00:00\", \"2009-01-01 00:00:00\"]\n )\n expected_s = Series([3, 2, 1], index=idx)\n tm.assert_series_equal(s.value_counts(), expected_s)\n\n expected = np_array_datetime64_compat(\n [\"2010-01-01 00:00:00\", \"2009-01-01 00:00:00\", \"2008-09-09 00:00:00\"],\n dtype=\"datetime64[ns]\",\n )\n if isinstance(s, Index):\n tm.assert_index_equal(s.unique(), DatetimeIndex(expected))\n else:\n tm.assert_numpy_array_equal(s.unique(), expected)\n\n assert s.nunique() == 3\n\n # with NaT\n s = df[\"dt\"].copy()\n s = klass(list(s.values) + [pd.NaT])\n\n result = s.value_counts()\n assert result.index.dtype == \"datetime64[ns]\"\n tm.assert_series_equal(result, expected_s)\n\n result = s.value_counts(dropna=False)\n expected_s[pd.NaT] = 1\n tm.assert_series_equal(result, expected_s)\n\n unique = s.unique()\n assert unique.dtype == \"datetime64[ns]\"\n\n # numpy_array_equal cannot compare pd.NaT\n if isinstance(s, Index):\n exp_idx = DatetimeIndex(expected.tolist() + [pd.NaT])\n tm.assert_index_equal(unique, exp_idx)\n else:\n tm.assert_numpy_array_equal(unique[:3], expected)\n assert pd.isna(unique[3])\n\n assert s.nunique() == 3\n assert s.nunique(dropna=False) == 4\n\n # timedelta64[ns]\n td = df.dt - df.dt + timedelta(1)\n td = klass(td, name=\"dt\")\n\n result = td.value_counts()\n expected_s = Series([6], index=[Timedelta(\"1day\")], name=\"dt\")\n tm.assert_series_equal(result, expected_s)\n\n expected = TimedeltaIndex([\"1 days\"], name=\"dt\")\n if isinstance(td, Index):\n tm.assert_index_equal(td.unique(), expected)\n else:\n tm.assert_numpy_array_equal(td.unique(), expected.values)\n\n td2 = timedelta(1) + (df.dt - df.dt)\n td2 = klass(td2, name=\"dt\")\n result2 = td2.value_counts()\n tm.assert_series_equal(result2, expected_s)\n\n def test_factorize(self):\n for orig in self.objs:\n o = orig.copy()\n\n if isinstance(o, Index) and o.is_boolean():\n exp_arr = np.array([0, 1] + [0] * 8, dtype=np.intp)\n exp_uniques = o\n exp_uniques = Index([False, True])\n else:\n exp_arr = np.array(range(len(o)), dtype=np.intp)\n exp_uniques = o\n codes, uniques = o.factorize()\n\n tm.assert_numpy_array_equal(codes, exp_arr)\n if isinstance(o, Series):\n tm.assert_index_equal(uniques, Index(orig), check_names=False)\n else:\n # factorize explicitly resets name\n tm.assert_index_equal(uniques, exp_uniques, check_names=False)\n\n def test_factorize_repeated(self):\n for orig in self.objs:\n o = orig.copy()\n\n # don't test boolean\n if isinstance(o, Index) and o.is_boolean():\n continue\n\n # sort by value, and create duplicates\n if isinstance(o, Series):\n o = o.sort_values()\n n = o.iloc[5:].append(o)\n else:\n indexer = o.argsort()\n o = o.take(indexer)\n n = o[5:].append(o)\n\n exp_arr = np.array(\n [5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=np.intp\n )\n codes, uniques = n.factorize(sort=True)\n\n tm.assert_numpy_array_equal(codes, exp_arr)\n if isinstance(o, Series):\n tm.assert_index_equal(\n uniques, Index(orig).sort_values(), check_names=False\n )\n else:\n tm.assert_index_equal(uniques, o, check_names=False)\n\n exp_arr = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4], np.intp)\n codes, uniques = n.factorize(sort=False)\n tm.assert_numpy_array_equal(codes, exp_arr)\n\n if isinstance(o, Series):\n expected = Index(o.iloc[5:10].append(o.iloc[:5]))\n tm.assert_index_equal(uniques, expected, check_names=False)\n else:\n expected = o[5:10].append(o[:5])\n tm.assert_index_equal(uniques, expected, check_names=False)\n\n def test_duplicated_drop_duplicates_index(self):\n # GH 4060\n for original in self.objs:\n if isinstance(original, Index):\n\n # special case\n if original.is_boolean():\n result = original.drop_duplicates()\n expected = Index([False, True], name=\"a\")\n tm.assert_index_equal(result, expected)\n continue\n\n # original doesn't have duplicates\n expected = np.array([False] * len(original), dtype=bool)\n duplicated = original.duplicated()\n tm.assert_numpy_array_equal(duplicated, expected)\n assert duplicated.dtype == bool\n result = original.drop_duplicates()\n tm.assert_index_equal(result, original)\n assert result is not original\n\n # has_duplicates\n assert not original.has_duplicates\n\n # create repeated values, 3rd and 5th values are duplicated\n idx = original[list(range(len(original))) + [5, 3]]\n expected = np.array([False] * len(original) + [True, True], dtype=bool)\n duplicated = idx.duplicated()\n tm.assert_numpy_array_equal(duplicated, expected)\n assert duplicated.dtype == bool\n tm.assert_index_equal(idx.drop_duplicates(), original)\n\n base = [False] * len(idx)\n base[3] = True\n base[5] = True\n expected = np.array(base)\n\n duplicated = idx.duplicated(keep=\"last\")\n tm.assert_numpy_array_equal(duplicated, expected)\n assert duplicated.dtype == bool\n result = idx.drop_duplicates(keep=\"last\")\n tm.assert_index_equal(result, idx[~expected])\n\n base = [False] * len(original) + [True, True]\n base[3] = True\n base[5] = True\n expected = np.array(base)\n\n duplicated = idx.duplicated(keep=False)\n tm.assert_numpy_array_equal(duplicated, expected)\n assert duplicated.dtype == bool\n result = idx.drop_duplicates(keep=False)\n tm.assert_index_equal(result, idx[~expected])\n\n with pytest.raises(\n TypeError,\n match=r\"drop_duplicates\\(\\) got an unexpected keyword argument\",\n ):\n idx.drop_duplicates(inplace=True)\n\n else:\n expected = Series(\n [False] * len(original), index=original.index, name=\"a\"\n )\n tm.assert_series_equal(original.duplicated(), expected)\n result = original.drop_duplicates()\n tm.assert_series_equal(result, original)\n assert result is not original\n\n idx = original.index[list(range(len(original))) + [5, 3]]\n values = original._values[list(range(len(original))) + [5, 3]]\n s = Series(values, index=idx, name=\"a\")\n\n expected = Series(\n [False] * len(original) + [True, True], index=idx, name=\"a\"\n )\n tm.assert_series_equal(s.duplicated(), expected)\n tm.assert_series_equal(s.drop_duplicates(), original)\n\n base = [False] * len(idx)\n base[3] = True\n base[5] = True\n expected = Series(base, index=idx, name=\"a\")\n\n tm.assert_series_equal(s.duplicated(keep=\"last\"), expected)\n tm.assert_series_equal(\n s.drop_duplicates(keep=\"last\"), s[~np.array(base)]\n )\n\n base = [False] * len(original) + [True, True]\n base[3] = True\n base[5] = True\n expected = Series(base, index=idx, name=\"a\")\n\n tm.assert_series_equal(s.duplicated(keep=False), expected)\n tm.assert_series_equal(\n s.drop_duplicates(keep=False), s[~np.array(base)]\n )\n\n s.drop_duplicates(inplace=True)\n tm.assert_series_equal(s, original)\n\n def test_drop_duplicates_series_vs_dataframe(self):\n # GH 14192\n df = pd.DataFrame(\n {\n \"a\": [1, 1, 1, \"one\", \"one\"],\n \"b\": [2, 2, np.nan, np.nan, np.nan],\n \"c\": [3, 3, np.nan, np.nan, \"three\"],\n \"d\": [1, 2, 3, 4, 4],\n \"e\": [\n datetime(2015, 1, 1),\n datetime(2015, 1, 1),\n datetime(2015, 2, 1),\n pd.NaT,\n pd.NaT,\n ],\n }\n )\n for column in df.columns:\n for keep in [\"first\", \"last\", False]:\n dropped_frame = df[[column]].drop_duplicates(keep=keep)\n dropped_series = df[column].drop_duplicates(keep=keep)\n tm.assert_frame_equal(dropped_frame, dropped_series.to_frame())\n\n def test_fillna(self):\n # # GH 11343\n # though Index.fillna and Series.fillna has separate impl,\n # test here to confirm these works as the same\n\n for orig in self.objs:\n\n o = orig.copy()\n values = o.values\n\n # values will not be changed\n result = o.fillna(o.astype(object).values[0])\n if isinstance(o, Index):\n tm.assert_index_equal(o, result)\n else:\n tm.assert_series_equal(o, result)\n # check shallow_copied\n assert o is not result\n\n for null_obj in [np.nan, None]:\n for orig in self.objs:\n o = orig.copy()\n klass = type(o)\n\n if not self._allow_na_ops(o):\n continue\n\n if needs_i8_conversion(o):\n\n values = o.astype(object).values\n fill_value = values[0]\n values[0:2] = pd.NaT\n else:\n values = o.values.copy()\n fill_value = o.values[0]\n values[0:2] = null_obj\n\n expected = [fill_value] * 2 + list(values[2:])\n\n expected = klass(expected, dtype=orig.dtype)\n o = klass(values)\n\n # check values has the same dtype as the original\n assert o.dtype == orig.dtype\n\n result = o.fillna(fill_value)\n if isinstance(o, Index):\n tm.assert_index_equal(result, expected)\n else:\n tm.assert_series_equal(result, expected)\n # check shallow_copied\n assert o is not result\n\n @pytest.mark.skipif(PYPY, reason=\"not relevant for PyPy\")\n def test_memory_usage(self):\n for o in self.objs:\n res = o.memory_usage()\n res_deep = o.memory_usage(deep=True)\n\n if is_object_dtype(o) or (\n isinstance(o, Series) and is_object_dtype(o.index)\n ):\n # if there are objects, only deep will pick them up\n assert res_deep > res\n else:\n assert res == res_deep\n\n if isinstance(o, Series):\n assert (\n o.memory_usage(index=False) + o.index.memory_usage()\n ) == o.memory_usage(index=True)\n\n # sys.getsizeof will call the .memory_usage with\n # deep=True, and add on some GC overhead\n diff = res_deep - sys.getsizeof(o)\n assert abs(diff) < 100\n\n def test_searchsorted(self):\n # See gh-12238\n for o in self.objs:\n index = np.searchsorted(o, max(o))\n assert 0 <= index <= len(o)\n\n index = np.searchsorted(o, max(o), sorter=range(len(o)))\n assert 0 <= index <= len(o)\n\n def test_validate_bool_args(self):\n invalid_values = [1, \"True\", [1, 2, 3], 5.0]\n\n for value in invalid_values:\n msg = \"expected type bool\"\n with pytest.raises(ValueError, match=msg):\n self.int_series.drop_duplicates(inplace=value)\n\n def test_getitem(self):\n for i in self.indexes:\n s = pd.Series(i)\n\n assert i[0] == s.iloc[0]\n assert i[5] == s.iloc[5]\n assert i[-1] == s.iloc[-1]\n\n assert i[-1] == i[9]\n\n msg = \"index 20 is out of bounds for axis 0 with size 10\"\n with pytest.raises(IndexError, match=msg):\n i[20]\n msg = \"single positional indexer is out-of-bounds\"\n with pytest.raises(IndexError, match=msg):\n s.iloc[20]\n\n @pytest.mark.parametrize(\"indexer_klass\", [list, pd.Index])\n @pytest.mark.parametrize(\n \"indexer\",\n [\n [True] * 10,\n [False] * 10,\n [True, False, True, True, False, False, True, True, False, True],\n ],\n )\n def test_bool_indexing(self, indexer_klass, indexer):\n # GH 22533\n for idx in self.indexes:\n exp_idx = [i for i in range(len(indexer)) if indexer[i]]\n tm.assert_index_equal(idx[indexer_klass(indexer)], idx[exp_idx])\n s = pd.Series(idx)\n tm.assert_series_equal(s[indexer_klass(indexer)], s.iloc[exp_idx])\n\n def test_get_indexer_non_unique_dtype_mismatch(self):\n # GH 25459\n indexes, missing = pd.Index([\"A\", \"B\"]).get_indexer_non_unique(pd.Index([0]))\n tm.assert_numpy_array_equal(np.array([-1], dtype=np.intp), indexes)\n tm.assert_numpy_array_equal(np.array([0], dtype=np.int64), missing)\n", "from datetime import datetime, timedelta\n\nimport numpy as np\nimport pytest\n\nfrom pandas import Timedelta, Timestamp\n\nfrom pandas.tseries import offsets\nfrom pandas.tseries.frequencies import to_offset\n\n\nclass TestTimestampArithmetic:\n def test_overflow_offset(self):\n # no overflow expected\n\n stamp = Timestamp(\"2000/1/1\")\n offset_no_overflow = to_offset(\"D\") * 100\n\n expected = Timestamp(\"2000/04/10\")\n assert stamp + offset_no_overflow == expected\n\n assert offset_no_overflow + stamp == expected\n\n expected = Timestamp(\"1999/09/23\")\n assert stamp - offset_no_overflow == expected\n\n def test_overflow_offset_raises(self):\n # xref https://github.com/statsmodels/statsmodels/issues/3374\n # ends up multiplying really large numbers which overflow\n\n stamp = Timestamp(\"2017-01-13 00:00:00\", freq=\"D\")\n offset_overflow = 20169940 * offsets.Day(1)\n msg = (\n \"the add operation between \"\n r\"\\<-?\\d+ \\* Days\\> and \\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2} \"\n \"will overflow\"\n )\n\n with pytest.raises(OverflowError, match=msg):\n stamp + offset_overflow\n\n with pytest.raises(OverflowError, match=msg):\n offset_overflow + stamp\n\n with pytest.raises(OverflowError, match=msg):\n stamp - offset_overflow\n\n # xref https://github.com/pandas-dev/pandas/issues/14080\n # used to crash, so check for proper overflow exception\n\n stamp = Timestamp(\"2000/1/1\")\n offset_overflow = to_offset(\"D\") * 100 ** 25\n\n with pytest.raises(OverflowError, match=msg):\n stamp + offset_overflow\n\n with pytest.raises(OverflowError, match=msg):\n offset_overflow + stamp\n\n with pytest.raises(OverflowError, match=msg):\n stamp - offset_overflow\n\n def test_delta_preserve_nanos(self):\n val = Timestamp(1337299200000000123)\n result = val + timedelta(1)\n assert result.nanosecond == val.nanosecond\n\n def test_rsub_dtscalars(self, tz_naive_fixture):\n # In particular, check that datetime64 - Timestamp works GH#28286\n td = Timedelta(1235345642000)\n ts = Timestamp.now(tz_naive_fixture)\n other = ts + td\n\n assert other - ts == td\n assert other.to_pydatetime() - ts == td\n if tz_naive_fixture is None:\n assert other.to_datetime64() - ts == td\n else:\n with pytest.raises(TypeError, match=\"subtraction must have\"):\n other.to_datetime64() - ts\n\n def test_timestamp_sub_datetime(self):\n dt = datetime(2013, 10, 12)\n ts = Timestamp(datetime(2013, 10, 13))\n assert (ts - dt).days == 1\n assert (dt - ts).days == -1\n\n def test_addition_subtraction_types(self):\n # Assert on the types resulting from Timestamp +/- various date/time\n # objects\n dt = datetime(2014, 3, 4)\n td = timedelta(seconds=1)\n # build a timestamp with a frequency, since then it supports\n # addition/subtraction of integers\n ts = Timestamp(dt, freq=\"D\")\n\n msg = \"Addition/subtraction of integers\"\n with pytest.raises(TypeError, match=msg):\n # GH#22535 add/sub with integers is deprecated\n ts + 1\n with pytest.raises(TypeError, match=msg):\n ts - 1\n\n # Timestamp + datetime not supported, though subtraction is supported\n # and yields timedelta more tests in tseries/base/tests/test_base.py\n assert type(ts - dt) == Timedelta\n assert type(ts + td) == Timestamp\n assert type(ts - td) == Timestamp\n\n # Timestamp +/- datetime64 not supported, so not tested (could possibly\n # assert error raised?)\n td64 = np.timedelta64(1, \"D\")\n assert type(ts + td64) == Timestamp\n assert type(ts - td64) == Timestamp\n\n @pytest.mark.parametrize(\n \"freq, td, td64\",\n [\n (\"S\", timedelta(seconds=1), np.timedelta64(1, \"s\")),\n (\"min\", timedelta(minutes=1), np.timedelta64(1, \"m\")),\n (\"H\", timedelta(hours=1), np.timedelta64(1, \"h\")),\n (\"D\", timedelta(days=1), np.timedelta64(1, \"D\")),\n (\"W\", timedelta(weeks=1), np.timedelta64(1, \"W\")),\n (\"M\", None, np.timedelta64(1, \"M\")),\n ],\n )\n def test_addition_subtraction_preserve_frequency(self, freq, td, td64):\n ts = Timestamp(\"2014-03-05 00:00:00\", freq=freq)\n original_freq = ts.freq\n\n assert (ts + 1 * original_freq).freq == original_freq\n assert (ts - 1 * original_freq).freq == original_freq\n\n if td is not None:\n # timedelta does not support months as unit\n assert (ts + td).freq == original_freq\n assert (ts - td).freq == original_freq\n\n assert (ts + td64).freq == original_freq\n assert (ts - td64).freq == original_freq\n\n @pytest.mark.parametrize(\n \"td\", [Timedelta(hours=3), np.timedelta64(3, \"h\"), timedelta(hours=3)]\n )\n def test_radd_tdscalar(self, td):\n # GH#24775 timedelta64+Timestamp should not raise\n ts = Timestamp.now()\n assert td + ts == ts + td\n\n @pytest.mark.parametrize(\n \"other,expected_difference\",\n [\n (np.timedelta64(-123, \"ns\"), -123),\n (np.timedelta64(1234567898, \"ns\"), 1234567898),\n (np.timedelta64(-123, \"us\"), -123000),\n (np.timedelta64(-123, \"ms\"), -123000000),\n ],\n )\n def test_timestamp_add_timedelta64_unit(self, other, expected_difference):\n ts = Timestamp(datetime.utcnow())\n result = ts + other\n valdiff = result.value - ts.value\n assert valdiff == expected_difference\n\n @pytest.mark.parametrize(\"ts\", [Timestamp.now(), Timestamp.now(\"utc\")])\n @pytest.mark.parametrize(\n \"other\",\n [\n 1,\n np.int64(1),\n np.array([1, 2], dtype=np.int32),\n np.array([3, 4], dtype=np.uint64),\n ],\n )\n def test_add_int_no_freq_raises(self, ts, other):\n msg = \"Addition/subtraction of integers and integer-arrays\"\n with pytest.raises(TypeError, match=msg):\n ts + other\n with pytest.raises(TypeError, match=msg):\n other + ts\n\n with pytest.raises(TypeError, match=msg):\n ts - other\n with pytest.raises(TypeError):\n other - ts\n\n @pytest.mark.parametrize(\n \"ts\",\n [\n Timestamp(\"1776-07-04\", freq=\"D\"),\n Timestamp(\"1776-07-04\", tz=\"UTC\", freq=\"D\"),\n ],\n )\n @pytest.mark.parametrize(\n \"other\",\n [\n 1,\n np.int64(1),\n np.array([1, 2], dtype=np.int32),\n np.array([3, 4], dtype=np.uint64),\n ],\n )\n def test_add_int_with_freq(self, ts, other):\n\n with pytest.raises(TypeError):\n ts + other\n with pytest.raises(TypeError):\n other + ts\n\n with pytest.raises(TypeError):\n ts - other\n\n with pytest.raises(TypeError):\n other - ts\n" ]
[ [ "pandas.read_excel", "pandas.io.excel.ExcelFile", "pandas.MultiIndex.from_tuples", "pandas.DataFrame", "numpy.random.random_sample", "numpy.random.randn", "numpy.random.sample", "pandas._testing.assert_frame_equal", "numpy.random.randint", "pandas.Index", "pandas._testing.makeCustomDataframe", "pandas.cut", "pandas._testing.makeDataFrame", "pandas._testing.assert_series_equal", "pandas.set_option", "numpy.zeros", "pandas.option_context", "pandas.io.excel.register_writer", "pandas.date_range", "pandas.util._test_decorators.safe_import", "pandas.io.excel.ExcelWriter", "pandas.MultiIndex.from_arrays", "pandas._testing.ensure_clean", "numpy.ones", "pandas.Period", "pandas.get_option", "pandas.Timestamp", "pandas.util._test_decorators.skip_if_no" ], [ "pandas.core.common.maybe_iterable_to_list", "numpy.asarray", "pandas.core.dtypes.common.is_extension_array_dtype", "numpy.ma.getdata", "numpy.vstack", "pandas._libs.lib.is_scalar", "pandas.core.dtypes.common.is_dtype_equal", "pandas.core.dtypes.cast.maybe_cast_to_datetime", "numpy.issubdtype", "pandas.core.dtypes.common.is_datetime64tz_dtype", "pandas.core.internals.create_block_manager_from_blocks", "pandas.core.common.dict_compat", "pandas.core.common.values_from_object", "pandas.core.indexes.api.get_objs_combined_axis", "numpy.ma.getmaskarray", "pandas.core.series.Series", "numpy.arange", "pandas.core.dtypes.cast.maybe_convert_platform", "pandas._libs.lib.fast_unique_multiple_list_gen", "pandas.core.indexes.base.default_index", "pandas.core.dtypes.common.is_categorical_dtype", "pandas.core.dtypes.common.is_integer_dtype", "pandas.core.dtypes.common.is_list_like", "pandas.core.internals.blocks.make_block", "pandas.core.indexes.api.Index", "pandas.core.indexes.api.union_indexes", "pandas._libs.lib.to_object_array", "pandas.core.dtypes.cast.maybe_infer_to_datetimelike", "pandas._libs.lib.fast_multiget", "pandas._libs.lib.to_object_array_tuples", "pandas.core.internals.create_block_manager_from_arrays", "pandas.core.algorithms.take_1d", "pandas.core.construction.sanitize_array", "pandas.core.dtypes.common.is_object_dtype", "pandas.core.dtypes.cast.maybe_upcast", "pandas.core.indexes.api.ensure_index", "pandas._libs.lib.maybe_convert_objects", "numpy.empty" ], [ "pandas.core.indexes.api.Index", "pandas.core.dtypes.common.is_list_like", "pandas.core.common.get_callable_name", "pandas.core.dtypes.common.is_dict_like" ], [ "pandas.CategoricalIndex", "pandas.concat", "pandas.Series", "numpy.random.seed", "pandas.period_range", "numpy.arange", "pandas.option_context", "pandas.DataFrame", "numpy.random.randn", "numpy.random.rand", "pandas.date_range" ], [ "pandas.to_datetime", "pandas.Series", "pandas._testing.makeBoolIndex", "pandas.core.dtypes.common.is_datetime64tz_dtype", "numpy.random.randn", "pandas.core.dtypes.common.is_datetime64_dtype", "pandas._testing.makeDateIndex", "pandas.isna", "pandas._testing.makePeriodIndex", "pandas._testing.assert_numpy_array_equal", "numpy.unique", "pandas._testing.makeFloatIndex", "pandas.Index", "pandas.DatetimeIndex", "pandas._testing.assert_extension_array_equal", "pandas._testing.assert_series_equal", "pandas._testing.assert_index_equal", "pandas.read_fwf", "numpy.random.choice", "pandas.Timedelta", "pandas._testing.makeIntIndex", "pandas.Interval", "numpy.transpose", "numpy.array", "pandas.core.dtypes.common.needs_i8_conversion", "pandas.TimedeltaIndex", "pandas._testing.makeStringIndex", "pandas.compat.numpy.np_array_datetime64_compat", "pandas.IntervalIndex.from_breaks", "pandas.core.dtypes.common.is_object_dtype", "pandas._testing.makeUnicodeIndex" ], [ "pandas.tseries.offsets.Day", "pandas.tseries.frequencies.to_offset", "pandas.Timedelta", "numpy.timedelta64", "numpy.int64", "pandas.Timestamp.now", "numpy.array", "pandas.Timestamp" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.24" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jainyk/package-outlier
[ "f1f8206f0d01ad7bf1b25f2cf551381ebe0ba1ed" ]
[ "testing methods/testing_angle_based.py" ]
[ "import scipy\nimport scipy.stats as ss\nimport numpy as np\nimport matplotlib\nimport pandas as pd\nimport random\nimport math\n\n\ndef iqr_threshold_method(scores, margin):\n q1 = np.percentile(scores, 25, interpolation='midpoint')\n q3 = np.percentile(scores, 75, interpolation='midpoint')\n iqr = q3-q1\n lower_range = q1 - (1.5 * iqr)\n upper_range = q3 + (1.5 * iqr)\n lower_range = lower_range - margin\n upper_range = upper_range + margin\n return lower_range, upper_range\n\n\ndef angle(point1, point2, point3):\n v21 = np.subtract(point2, point1)\n v31 = np.subtract(point3, point1)\n dot_product = (v21*v31).sum()\n normalization = np.linalg.norm(v21)*np.linalg.norm(v31)\n acute_angle = np.arccos(dot_product/normalization)\n return acute_angle\n\n\ndef eval_angle_point(point, data):\n angles_data = []\n for index_b, b in enumerate(data):\n if (np.array_equal(b, point)):\n continue\n\n for c in data[index_b + 1:]:\n if (np.array_equal(c, point)) or (np.array_equal(c, b)):\n continue\n angles_data.append(angle(point, b, c))\n return angles_data\n\n\n\ndef AngleOutlier(data, margin=0):\n \"\"\"Returns numpy array with data points labelled as outliers\n Parameters\n ----------\n data: numpy 2d array like data points\n\n margin: int, default=0\n Margin of error\n \"\"\"\n no_of_data_point = data.shape[0]\n variance_of_each_datapoint = []\n\n for i in range(0, no_of_data_point):\n point = data[i]\n temp = eval_angle_point(point, data)\n variance_of_each_datapoint.append(np.var(temp))\n\n lower_range, upper_range = iqr_threshold_method(variance_of_each_datapoint, margin)\n\n outlier_points = []\n\n for i in range(0, no_of_data_point):\n if variance_of_each_datapoint[i] < lower_range or variance_of_each_datapoint[i] > upper_range:\n outlier_points.append(data[i])\n\n return outlier_points, lower_range, upper_range, variance_of_each_datapoint\n\n\nif __name__=='__main__':\n np.random.seed(16)\n\n normal_mean = np.array([1.0, 2.0])\n normal_covariance = np.array([[0.2, 0.0], [0.0, 0.1]])\n normal_data = np.random.multivariate_normal(normal_mean, normal_covariance, 10)\n\n anomaly_mean = np.array([6.0, 8.0])\n anomaly_covariance = np.array([[2.0, 0.0], [0.0, 4.0]])\n anomaly_data = np.random.multivariate_normal(anomaly_mean, anomaly_covariance, 10)\n all_data = np.concatenate((normal_data, anomaly_data), axis=0)\n print(all_data)\n print(all_data.shape)\n# point = all_data[0]\n #print(point)\n #res = eval_angle_point(point, all_data)\n res = AngleOutlier(all_data)\n print(res)\n #print(res)\n" ]
[ [ "numpy.random.seed", "numpy.array_equal", "numpy.random.multivariate_normal", "numpy.subtract", "numpy.arccos", "numpy.percentile", "numpy.linalg.norm", "numpy.concatenate", "numpy.var", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ccp4/metrix_ml
[ "61646355d9218d590b99937cf289713763df1d23", "61646355d9218d590b99937cf289713763df1d23" ]
[ "metrix_ml/utils/ColumnTransformation.py", "metrix_ml/pre_processing/feature_pairplot_plotting.py" ]
[ "import pandas as pd\nimport os\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom datetime import datetime\nfrom sklearn.base import BaseEstimator, TransformerMixin\n\nclass ColumnTransformation(BaseEstimator, TransformerMixin):\n '''A class to run various column transformation steps to prepare the\n input until I am able to implement this step in the database code'''\n \n '''This works as long as I don't run it within a scikit learn pipeline'''\n \n def __init__(self):\n pass\n \n def fit(self, df, y=None):\n return self\n\n def transform(self, df, y=None):\n #MW_ASU\n df['MW_ASU'] = df['MW_chain'] * df['No_mol_ASU']\n\n #MW_chain/No_atom_chain\n df['MW_chain/No_atom_chain'] = df['MW_chain'] / df['No_atom_chain']\n\n #wavelength**3\n df['wavelength**3'] = df['wavelength'] ** 3\n\n #wilson\n df['wilson'] = -2 * df['wilsonbfactor']\n\n #bragg\n df['bragg'] = (1 / df['highreslimit'])**2\n\n #MW_ASU/sites_ASU\n df['MW_ASU/sites_ASU'] = df['MW_ASU'] / df['sites_ASU']\n \n #wavelenght**3/Vcell\n df['wavelength**3/Vcell'] = df['wavelength**3'] / df['Vcell']\n \n #Vcell/Vm<Ma>\n df['Vcell/Vm<Ma>'] = df['Vcell'] / (df['Matth_coeff'] * df['MW_chain/No_atom_chain'])\n\n #MW_ASU/sites_ASU/solvent_content\n df['MW_ASU/sites_ASU/solvent_content'] = df['MW_ASU/sites_ASU'] / df['solvent_content']\n\n #use np.exp to work with series object\n df['volume_wilsonB_highres'] = df['Vcell/Vm<Ma>'] * np.exp(df['wilson'] * df['bragg'])\n\n return df.round(decimals=4).to_csv(os.path.join(METRIX_PATH, \"data_transform.csv\"))\n\n#c = ColumnTransformation()\n#c.transform(metrix)\n", "# set up environment\n# define command line parameters\n# define location of input data\n# create output directories\n# start the class FeatureCorrelations\n\nimport argparse\nimport os\nimport csv\nimport pathlib\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom datetime import datetime\nfrom scipy.stats import pearsonr#, betai\nfrom sklearn.model_selection import train_test_split\n\n###############################################################################\n#\n# define command line arguments\n#\n###############################################################################\n\ndef parse_command_line():\n '''defining the command line input to make it runable'''\n parser = argparse.ArgumentParser(description='various plots for feature analysis')\n \n parser.add_argument(\n '--input', \n type=str, \n dest=\"input\",\n default=\"\",\n help='The input CSV file')\n \n parser.add_argument(\n '--outdir',\n type=str,\n dest='outdir',\n default='',\n help='Specify output directory')\n\n args = parser.parse_args()\n if args.input == '':\n parser.print_help()\n exit(0)\n return args\n\n###############################################################################\n#\n# load the data from CSV file and creating output directory\n#\n###############################################################################\n\ndef load_data(csv_path):\n '''load the raw data as stored in CSV file'''\n return pd.read_csv(csv_path, na_filter=False, skipinitialspace=True, thousands=',')\n\ndef make_output_folder(outdir):\n name = os.path.join(outdir, 'feature_pair_plot')\n os.makedirs(name, exist_ok=True)\n return name\n\n###############################################################################\n#\n# class to analyse correlations between features\n#\n###############################################################################\n\n\nclass FeaturePairPlot(object):\n '''A class to help analyse the data;\n try to identify linear correlations in the data;\n calculate Pearson Correlation Coefficient with and without\n p-values; create a scatter matrix; inout data must not contain\n any strings or NaN values; also remove any columns with \n categorical data or transform them first; remove any text except column labels'''\n\n def __init__(self, data, feature_pair_plot):\n self.data = data\n self.feature_pair_plot = feature_pair_plot\n self.prepare_metrix_data()\n self.split_data()\n self.plot_pair()\n \n ###############################################################################\n #\n # creating data frame with column transformations\n #\n ###############################################################################\n\n def prepare_metrix_data(self):\n '''Function to create smaller dataframes for directly after dataprocessing, after\n adding some protein information and after carrying out some custom solumn\n transformations.\n ******\n Input: large data frame\n Output: smaller dataframes; database, man_add, transform\n '''\n print('*' *80)\n print('* Preparing input dataframe')\n print('*' *80)\n\n# #database plus manually added data\n# attr_newdata_initial = ['IoverSigma', 'cchalf', 'RmergediffI', 'RmergeI', 'RmeasI',\n# 'RmeasdiffI', 'RpimdiffI', 'RpimI', 'totalobservations',\n# 'totalunique', 'multiplicity', 'completeness', 'lowreslimit',\n# 'highreslimit', 'wilsonbfactor', 'anomalousslope',\n# 'anomalousCC', 'anomalousmulti', 'anomalouscompl', 'diffI',\n# 'diffF', 'f', 'wavelength', 'sg_number', 'cell_a', 'cell_b', 'cell_c',\n# 'cell_alpha', 'cell_beta', 'cell_gamma', 'Vcell', 'solvent_content',\n# 'Matth_coeff', 'No_atom_chain', 'No_mol_ASU',\n# 'MW_chain', 'sites_ASU', 'EP_success']\n\n# attr_newdata_transform = ['IoverSigma', 'cchalf', 'RmergediffI', 'RmergeI', 'RmeasI',\n# 'RmeasdiffI', 'RpimdiffI', 'RpimI', 'totalobservations',\n# 'totalunique', 'multiplicity', 'completeness', 'lowreslimit',\n# 'highreslimit', 'wilsonbfactor', 'anomalousslope',\n# 'anomalousCC', 'anomalousmulti', 'anomalouscompl', 'diffI',\n# 'diffF', 'f', 'wavelength', 'wavelength**3', 'wavelength**3_Vcell',\n# 'sg_number', 'cell_a', 'cell_b', 'cell_c', 'cell_alpha',\n# 'cell_beta', 'cell_gamma','Vcell', 'solvent_content',\n# 'Vcell_Vm<Ma>', 'Matth_coeff', 'MW_ASU_sites_ASU_solvent_content',\n# 'MW_chain', 'No_atom_chain', 'No_mol_ASU', 'MW_ASU', 'sites_ASU',\n# 'MW_ASU_sites_ASU', 'MW_chain_No_atom_chain', 'wilson', 'bragg',\n# 'volume_wilsonB_highres', 'IoverSigma_MW_ASU', 'EP_success']\n \n# attr_newdata_transform = ['IoverSigma', 'cchalf', 'RmergediffI', 'RmergeI', 'RmeasI',\n# 'RmeasdiffI', 'RpimdiffI', 'RpimI', 'totalobservations',\n# 'totalunique', 'multiplicity', 'completeness', 'lowreslimit',\n# 'highreslimit', 'wilsonbfactor', 'anomalousslope',\n# 'anomalousCC', 'anomalousmulti', 'anomalouscompl', 'diffI',\n# 'diffF', 'f',\n# 'sg_number', 'cell_a', 'cell_b', 'cell_c', 'cell_alpha',\n# 'cell_beta', 'cell_gamma','Vcell', 'solvent_content',\n# 'Vcell_Vm<Ma>', 'Matth_coeff', 'MW_ASU_sites_ASU_solvent_content',\n# 'MW_chain', 'No_atom_chain', 'No_mol_ASU', 'MW_ASU', 'sites_ASU',\n# 'MW_ASU_sites_ASU', 'MW_chain_No_atom_chain', 'bragg',\n# 'volume_wilsonB_highres', 'IoverSigma_MW_ASU', 'EP_success']\n\n# data_initial = self.data[attr_newdata_initial]\n# self.X_data_initial = data_initial\n\n# data_transform = data_initial.copy()\n\n# with open(os.path.join(self.feature_pair_plot, 'feature_pair_plot.txt'), 'a') as text_file:\n# text_file.write('Preparing input data as data_initial with following attributes %s \\n' %(attr_newdata_initial))\n\n# #column transformation\n# #MW_ASU\n# data_transform['MW_ASU'] = data_transform['MW_chain'] * data_transform['No_mol_ASU']\n#\n# #MW_ASU/sites_ASU\n# data_transform['MW_ASU_sites_ASU'] = data_transform['MW_ASU'] / data_transform['sites_ASU']\n# \n# #IoverSigma/MW_ASU\n# data_transform['IoverSigma_MW_ASU'] = data_transform['IoverSigma'] / data_transform['MW_ASU']\n#\n# #MW_chain/No_atom_chain\n# data_transform['MW_chain_No_atom_chain'] = data_transform['MW_chain'] / data_transform['No_atom_chain']\n#\n# #MW_ASU/sites_ASU/solvent_content\n# data_transform['MW_ASU_sites_ASU_solvent_content'] = data_transform['MW_ASU_sites_ASU'] / data_transform['solvent_content']\n#\n# #wavelength**3\n# data_transform['wavelength**3'] = data_transform['wavelength'] ** 3\n#\n# #wavelenght**3/Vcell\n# data_transform['wavelength**3_Vcell'] = data_transform['wavelength**3'] / data_transform['Vcell']\n#\n# #Vcell/Vm<Ma>\n# data_transform['Vcell_Vm<Ma>'] = data_transform['Vcell'] / (data_transform['Matth_coeff'] * data_transform['MW_chain_No_atom_chain'])\n#\n# #wilson\n# data_transform['wilson'] = -2 * data_transform['wilsonbfactor']\n#\n# #bragg\n# data_transform['bragg'] = (1 / data_transform['highreslimit'])**2\n#\n# #use np.exp to work with series object\n# data_transform['volume_wilsonB_highres'] = data_transform['Vcell_Vm<Ma>'] * np.exp(data_transform['wilson'] * data_transform['bragg'])\n \n# self.X_data_transform = data_transform\n \n# self.X_data_transform = self.X_data_transform.fillna(0)\n \n# self.best_features = self.X_data_transform[['lowreslimit', 'anomalousslope', 'anomalousCC', 'diffI', 'diffF', 'f', 'EP_success']]\n\n self.X_data_transform = self.data[[\"no_res\", \"no_frag\", \"longest_frag\", \"res_frag_ratio\", \"mapCC\", \"EP_success\"]]\n\n self.X_data_transform = self.X_data_transform.fillna(0)\n\n \n with open(os.path.join(self.feature_pair_plot, 'feature_pair_plot.txt'), 'a') as text_file:\n text_file.write('Created the following dataframe: data_transform \\n')\n text_file.write(str(self.X_data_transform.columns)+'\\n') \n\n ###############################################################################\n #\n # creating training and test set for each of the 3 dataframes\n #\n ###############################################################################\n\n def split_data(self):\n '''Function which splits the input data into training set and test set.\n ******\n Input: a dataframe that contains the features and labels in columns and the samples\n in rows\n Output: sets of training and test data with an 80/20 split; X_train, X_test, y_train,\n y_test\n '''\n print('*' *80)\n print('* Splitting data into test and training set with test=20%')\n print('*' *80)\n\n y = self.data['EP_success']\n \n#normal split of samples \n# X_transform_train, X_transform_test, y_train, y_test = train_test_split(self.X_transform, y, test_size=0.2, random_state=42)\n\n#stratified split of samples\n X_data_transform_train, X_data_transform_test, y_train, y_test = train_test_split(self.X_data_transform, y, test_size=0.2, random_state=42)#stratify=y\n \n assert self.X_data_transform.columns.all() == X_data_transform_train.columns.all()\n\n self.X_data_transform_train = X_data_transform_train\n self.X_data_transform_test = X_data_transform_test\n self.y_train = y_train\n self.y_test = y_test\n\n\n################################################################################\n#\n# plotting feature pairs\n#\n################################################################################\n\n def plot_pair(self):\n '''Plot a histogram for each feature.'''\n print('*' *80)\n print('* Plotting feature pairs')\n print('*' *80)\n \n def corrfunc(x, y, **kws):\n (r, p) = pearsonr(x, y)\n #print(r, p)\n ax = plt.gca()\n #use commented-out lines below if all features used;\n# ax.annotate(\"r = {:.2f} \".format(r),\n# xy=(.1, .9), xycoords=ax)\n# ax.annotate(\"p = {:.3f}\".format(p),\n# xy=(.1, .8), xycoords=ax) \n datestring = datetime.strftime(datetime.now(), '%Y%m%d_%H%M')\n #graph = sns.pairplot(self.best_features, hue='EP_success')\n graph = sns.pairplot(self.X_data_transform_train, hue='EP_success')\n graph.map(corrfunc)\n handles = graph._legend_data.values()\n labels = graph._legend_data.keys()\n graph.fig.legend(handles=handles, labels=labels, loc='upper center', ncol=1)\n graph.fig.subplots_adjust(top=0.92)\n plt.tight_layout()\n plt.savefig(os.path.join(self.feature_pair_plot, 'pairplot_'+datestring+'.png'))\n \ndef run():\n args = parse_command_line()\n \n \n ###############################################################################\n\n #look at the imported data to get an idea what we are working with\n data = load_data(args.input)\n\n feature_pair_plot = make_output_folder(args.outdir)\n\n ###############################################################################\n\n feature_pair_plot = FeaturePairPlot(data, feature_pair_plot)\n\n" ]
[ [ "numpy.exp" ], [ "matplotlib.pyplot.gca", "matplotlib.pyplot.tight_layout", "pandas.read_csv", "scipy.stats.pearsonr", "sklearn.model_selection.train_test_split" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] } ]
agnes-yang/PytorchNetHub
[ "e6bcb57b8ffb61b53ebdb9a09f6b25e516b4efc7", "e6bcb57b8ffb61b53ebdb9a09f6b25e516b4efc7" ]
[ "Yolov1_pytorch/models/net.py", "Yolov1_pytorch/utils/predictUtils.py" ]
[ "#encoding:utf-8\nimport torch.nn as nn\nimport torch.utils.model_zoo as model_zoo\nimport math\nimport torch.nn.functional as F\n'''\n只用了vgg16方法\n'''\n\n__all__ = [\n 'VGG', 'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn',\n 'vgg19_bn', 'vgg19',\n]\n\n\nmodel_urls = {\n 'vgg11': 'https://download.pytorch.org/models/vgg11-bbd30ac9.pth',\n 'vgg13': 'https://download.pytorch.org/models/vgg13-c768596a.pth',\n 'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth',\n 'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth',\n 'vgg11_bn': 'https://download.pytorch.org/models/vgg11_bn-6002323d.pth',\n 'vgg13_bn': 'https://download.pytorch.org/models/vgg13_bn-abd245e5.pth',\n 'vgg16_bn': 'https://download.pytorch.org/models/vgg16_bn-6c64b313.pth',\n 'vgg19_bn': 'https://download.pytorch.org/models/vgg19_bn-c79401a0.pth',\n}\n\n\nclass VGG(nn.Module):\n\n def __init__(self, features, num_classes=1000):\n super(VGG, self).__init__()\n self.features = features\n self.classifier = nn.Sequential(\n nn.Linear(512 * 7 * 7, 4096),\n nn.ReLU(True),\n nn.Dropout(),\n nn.Linear(4096, 4096),\n nn.ReLU(True),\n nn.Dropout(),\n nn.Linear(4096, num_classes),\n )\n self._initialize_weights()\n\n def forward(self, x):\n x = self.features(x)\n x = x.view(x.size(0), -1)\n x = self.classifier(x)\n # 从vgg16得到输出,经过sigmoid 归一化到0-1之间\n x = F.sigmoid(x)\n # 再改变形状,返回(xxx,7,7,30) xxx代表几张照片,(7,7,30)代表一张照片的信息\n x = x.view(-1,7,7,30)\n return x\n\n def _initialize_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n if m.bias is not None:\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n m.weight.data.normal_(0, 0.01)\n m.bias.data.zero_()\n\n\ndef make_layers(cfg, batch_norm=False):\n layers = []\n in_channels = 3\n for v in cfg:\n if v == 'M':\n layers += [nn.MaxPool2d(kernel_size=2, stride=2)]\n else:\n conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)\n if batch_norm:\n layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]\n else:\n layers += [conv2d, nn.ReLU(inplace=True)]\n in_channels = v\n return nn.Sequential(*layers)\n\n\ncfg = {\n 'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],\n 'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],\n 'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],\n 'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],\n}\n\n\ndef vgg11(pretrained=False, **kwargs):\n \"\"\"VGG 11-layer model (configuration \"A\")\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = VGG(make_layers(cfg['A']), **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['vgg11']))\n return model\n\n\ndef vgg11_bn(pretrained=False, **kwargs):\n \"\"\"VGG 11-layer model (configuration \"A\") with batch normalization\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = VGG(make_layers(cfg['A'], batch_norm=True), **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['vgg11_bn']))\n return model\n\n\ndef vgg13(pretrained=False, **kwargs):\n \"\"\"VGG 13-layer model (configuration \"B\")\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = VGG(make_layers(cfg['B']), **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['vgg13']))\n return model\n\n\ndef vgg13_bn(pretrained=False, **kwargs):\n \"\"\"VGG 13-layer model (configuration \"B\") with batch normalization\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = VGG(make_layers(cfg['B'], batch_norm=True), **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['vgg13_bn']))\n return model\n\n\ndef vgg16(pretrained=False, **kwargs):\n \"\"\"VGG 16-layer model (configuration \"D\")\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = VGG(make_layers(cfg['D']), **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['vgg16']))\n return model\n\n\ndef vgg16_bn(pretrained=False, **kwargs):\n \"\"\"VGG 16-layer model (configuration \"D\") with batch normalization\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = VGG(make_layers(cfg['D'], batch_norm=True), **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['vgg16_bn']))\n return model\n\n\ndef vgg19(pretrained=False, **kwargs):\n \"\"\"VGG 19-layer model (configuration \"E\")\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = VGG(make_layers(cfg['E']), **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['vgg19']))\n return model\n\n\ndef vgg19_bn(pretrained=False, **kwargs):\n \"\"\"VGG 19-layer model (configuration 'E') with batch normalization\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = VGG(make_layers(cfg['E'], batch_norm=True), **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['vgg19_bn']))\n return model\n\ndef test():\n import torch\n from torch.autograd import Variable\n model = vgg16()\n # 提取特征层不动\n # 修改分类层最后三层全连接层\n # 修改vgg的分类层结构\n model.classifier = nn.Sequential(\n nn.Linear(512 * 7 * 7, 4096),\n nn.ReLU(True),\n nn.Dropout(),\n nn.Linear(4096, 4096),\n nn.ReLU(True),\n nn.Dropout(),\n # 最后一层修改为1470 即为1470代表一张图的信息(1470=7x7x30)\n nn.Linear(4096, 1470),\n )\n print(model.classifier[6])\n print('=============================')\n print(model)\n print('=============================')\n img = torch.rand(2,3,224,224)\n img = Variable(img)\n output = model(img)\n print(output.size())\n\nif __name__ == '__main__':\n test()", "import cv2\nimport torch\nimport numpy as np\nimport torchvision.transforms as transforms\nfrom torch.autograd import Variable\nfrom config import opt\ndef predict_result(model,image_name,root_path=''):\n '''\n 预测一张测试照片\n '''\n result = []\n image = cv2.imread(root_path+image_name)\n h,w,_ = image.shape\n # 将图像规范化到(224,224)\n img = cv2.resize(image,(224,224))\n # 转换为RGB\n img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)\n mean = (123,117,104)#RGB\n # 减去均值\n img = img - np.array(mean,dtype=np.float32)\n #对图像进行转化\n transform = transforms.Compose([transforms.ToTensor(),])\n img = transform(img)\n # volatile相当于requires_grad=False,不保存中间变量。仅用于纯推断\n img = Variable(img[None,:,:,:],volatile=True)\n if opt.use_gpu:\n img = img.cuda()\n pred = model(img) #1x7x7x30\n pred = pred.cpu()\n # 将网络输出结果转化为 可视化格式\n boxes,cls_indexs,probs = decoder(pred)\n # 遍历一张图像上所有的预测候选框\n for i,box in enumerate(boxes):\n x1 = int(box[0]*w)\n x2 = int(box[2]*w)\n y1 = int(box[1]*h)\n y2 = int(box[3]*h)\n cls_index = cls_indexs[i]\n cls_index = int(cls_index) # convert LongTensor to int\n prob = probs[i]\n prob = float(prob)\n result.append([(x1,y1),(x2,y2),opt.VOC_CLASSES[cls_index],image_name,prob])\n return result\ndef decoder(pred):\n '''\n 解码\n pred (tensor) 1x7x7x30\n return (tensor) box[[x1,y1,x2,y2]] label[...]\n '''\n boxes=[]\n cls_indexs=[]\n probs = []\n cell_size = 1./7\n pred = pred.data\n pred = pred.squeeze(0) #7x7x30\n contain1 = pred[:,:,4].unsqueeze(2)\n contain2 = pred[:,:,9].unsqueeze(2)\n contain = torch.cat((contain1,contain2),2)\n mask1 = contain > 0.9 #大于阈值\n mask2 = (contain==contain.max()) #we always select the best contain_prob what ever it>0.9\n mask = (mask1+mask2).gt(0)\n min_score,min_index = torch.min(mask,2) #每个cell只选最大概率的那个预测框\n for i in range(7):\n for j in range(7):\n for b in range(2):\n index = min_index[i,j]\n mask[i,j,index] = 0\n if mask[i,j,b] == 1:\n #print(i,j,b)\n box = pred[i,j,b*5:b*5+4]\n contain_prob = torch.FloatTensor([pred[i,j,b*5+4]])\n xy = torch.FloatTensor([j,i])*cell_size #cell左上角 up left of cell\n box[:2] = box[:2]*cell_size + xy # return cxcy relative to image\n box_xy = torch.FloatTensor(box.size())#转换成xy形式 convert[cx,cy,w,h] to [x1,xy1,x2,y2]\n box_xy[:2] = box[:2] - 0.5*box[2:]\n box_xy[2:] = box[:2] + 0.5*box[2:]\n max_prob,cls_index = torch.max(pred[i,j,10:],0)\n boxes.append(box_xy.view(1,4))\n cls_indexs.append(cls_index)\n probs.append(contain_prob)\n boxes = torch.cat(boxes,0) #(n,4)\n probs = torch.cat(probs,0) #(n,)\n cls_indexs = torch.cat(cls_indexs,0) #(n,)\n keep = nms(boxes,probs)\n return boxes[keep],cls_indexs[keep],probs[keep]\n\ndef nms(bboxes,scores,threshold=0.5):\n '''\n bboxes(tensor) [N,4]\n scores(tensor) [N,]\n '''\n x1 = bboxes[:,0]\n y1 = bboxes[:,1]\n x2 = bboxes[:,2]\n y2 = bboxes[:,3]\n areas = (x2-x1) * (y2-y1)\n\n _,order = scores.sort(0,descending=True)\n keep = []\n while order.numel() > 0:\n i = order[0]\n keep.append(i)\n\n if order.numel() == 1:\n break\n\n xx1 = x1[order[1:]].clamp(min=x1[i])\n yy1 = y1[order[1:]].clamp(min=y1[i])\n xx2 = x2[order[1:]].clamp(max=x2[i])\n yy2 = y2[order[1:]].clamp(max=y2[i])\n\n w = (xx2-xx1).clamp(min=0)\n h = (yy2-yy1).clamp(min=0)\n inter = w*h\n\n ovr = inter / (areas[i] + areas[order[1:]] - inter)\n ids = (ovr<=threshold).nonzero().squeeze()\n if ids.numel() == 0:\n break\n order = order[ids+1]\n return torch.LongTensor(keep)\n\n\ndef voc_ap(rec, prec, use_07_metric=False):\n if use_07_metric:\n # 11 point metric\n ap = 0.\n for t in np.arange(0., 1.1, 0.1):\n if np.sum(rec >= t) == 0:\n p = 0\n else:\n p = np.max(prec[rec >= t])\n ap = ap + p / 11.\n\n else:\n # correct ap caculation\n mrec = np.concatenate(([0.], rec, [1.]))\n mpre = np.concatenate(([0.], prec, [0.]))\n\n for i in range(mpre.size - 1, 0, -1):\n mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])\n\n i = np.where(mrec[1:] != mrec[:-1])[0]\n\n ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])\n\n return ap\n\n\ndef voc_eval(preds, target, VOC_CLASSES=opt.VOC_CLASSES, threshold=0.5, use_07_metric=False, ):\n '''\n preds {'cat':[[image_id,confidence,x1,y1,x2,y2],...],'dog':[[],...]}\n target {(image_id,class):[[],]}\n\n 举例:\n preds = {\n 'cat': [['image01', 0.9, 20, 20, 40, 40], ['image01', 0.8, 20, 20, 50, 50], ['image02', 0.8, 30, 30, 50, 50]],\n 'dog': [['image01', 0.78, 60, 60, 90, 90]]}\n target = {('image01', 'cat'): [[20, 20, 41, 41]], ('image01', 'dog'): [[60, 60, 91, 91]],\n ('image02', 'cat'): [[30, 30, 51, 51]]}\n '''\n aps = []\n # 遍历所有的类别\n for i, class_ in enumerate(VOC_CLASSES):\n pred = preds[class_] # [[image_id,confidence,x1,y1,x2,y2],...]\n if len(pred) == 0: # 如果这个类别一个都没有检测到的异常情况\n ap = -1\n print('---class {} ap {}---'.format(class_, ap))\n aps += [ap]\n break\n # print(pred)\n image_ids = [x[0] for x in pred]\n confidence = np.array([float(x[1]) for x in pred])\n BB = np.array([x[2:] for x in pred])\n # sort by confidence\n sorted_ind = np.argsort(-confidence)\n sorted_scores = np.sort(-confidence)\n BB = BB[sorted_ind, :]\n image_ids = [image_ids[x] for x in sorted_ind]\n\n # go down dets and mark TPs and FPs\n npos = 0.\n for (key1, key2) in target:\n if key2 == class_:\n npos += len(target[(key1, key2)]) # 统计这个类别的正样本,在这里统计才不会遗漏\n nd = len(image_ids)\n tp = np.zeros(nd)\n fp = np.zeros(nd)\n for d, image_id in enumerate(image_ids):\n bb = BB[d] # 预测框\n if (image_id, class_) in target:\n BBGT = target[(image_id, class_)] # [[],]\n for bbgt in BBGT:\n # compute overlaps\n # intersection\n ixmin = np.maximum(bbgt[0], bb[0])\n iymin = np.maximum(bbgt[1], bb[1])\n ixmax = np.minimum(bbgt[2], bb[2])\n iymax = np.minimum(bbgt[3], bb[3])\n iw = np.maximum(ixmax - ixmin + 1., 0.)\n ih = np.maximum(iymax - iymin + 1., 0.)\n inters = iw * ih\n\n union = (bb[2] - bb[0] + 1.) * (bb[3] - bb[1] + 1.) + (bbgt[2] - bbgt[0] + 1.) * (\n bbgt[3] - bbgt[1] + 1.) - inters\n if union == 0:\n print(bb, bbgt)\n\n overlaps = inters / union\n if overlaps > threshold:\n tp[d] = 1\n BBGT.remove(bbgt) # 这个框已经匹配到了,不能再匹配\n if len(BBGT) == 0:\n del target[(image_id, class_)] # 删除没有box的键值\n break\n fp[d] = 1 - tp[d]\n else:\n fp[d] = 1\n fp = np.cumsum(fp)\n tp = np.cumsum(tp)\n rec = tp / float(npos)\n prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)\n # print(rec,prec)\n ap = voc_ap(rec, prec, use_07_metric)\n print('---class {} ap {}---'.format(class_, ap))\n aps += [ap]\n print('---map {}---'.format(np.mean(aps)))" ]
[ [ "torch.nn.Sequential", "torch.nn.Dropout", "torch.nn.Conv2d", "torch.nn.Linear", "torch.nn.functional.sigmoid", "torch.nn.MaxPool2d", "torch.rand", "torch.nn.BatchNorm2d", "torch.nn.ReLU", "torch.utils.model_zoo.load_url", "torch.autograd.Variable" ], [ "numpy.minimum", "torch.max", "torch.cat", "numpy.cumsum", "numpy.concatenate", "numpy.max", "numpy.mean", "torch.FloatTensor", "numpy.where", "torch.autograd.Variable", "numpy.arange", "numpy.finfo", "numpy.zeros", "torch.LongTensor", "torch.min", "numpy.argsort", "numpy.array", "numpy.sum", "numpy.maximum", "numpy.sort" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
CIDARLAB/genetic-circuit-partitioning
[ "b748e111e100eecc70b5978d6133ba816b8d7f5f", "b748e111e100eecc70b5978d6133ba816b8d7f5f" ]
[ "2021.4/bin/test.py", "2021.4/bin/plot_partition.py" ]
[ "import timeit\nfrom copy import deepcopy\n# import genetic_partition_test as gp \nimport networkx as nx\nimport numpy as np\nimport ujson\n\nm1 = np.zeros(shape=(6,6))\nm2 = np.array([[1,2,3,4],[5,6,7,8],[9,10,11,12],[13,14,15,16]])\n\nfor i in range(m2.shape[0]):\n\tfor j in range(m2.shape[1]):\n\t\tm1[i][j] = m2[i][j]\nprint(m1)", "import numpy as np\nimport matplotlib.pyplot as plt\nimport networkx as nx\nimport genetic_partition_test as gp\n\ndef load_graph (edgelist):\n G = nx.read_edgelist (edgelist, nodetype = str, create_using=nx.DiGraph())\n return G\n\ndef community_layout(g, partition):\n \"\"\"\n Compute the layout for a modular graph.\n\n\n Arguments:\n ----------\n g -- networkx.Graph or networkx.DiGraph instance\n graph to plot\n\n partition -- dict mapping int node -> int community\n graph partitions\n\n\n Returns:\n --------\n pos -- dict mapping int node -> (float x, float y)\n node positions\n\n \"\"\"\n\n pos_communities = _position_communities(g, partition, scale=3.)\n\n pos_nodes = _position_nodes(g, partition, scale=1.)\n\n # combine positions\n pos = dict()\n for node in g.nodes():\n print ('community position', pos_communities[node])\n print ('node position', pos_nodes[node])\n pos[node] = pos_communities[node] + pos_nodes[node]\n print('new node position', pos[node])\n\n return pos\n\ndef _position_communities(g, partition, **kwargs):\n\n # create a weighted graph, in which each node corresponds to a community,\n # and each edge weight to the number of edges between communities\n between_community_edges = _find_between_community_edges(g, partition)\n\n communities = set(partition.values())\n hypergraph = nx.DiGraph()\n hypergraph.add_nodes_from(communities)\n for (ci, cj), edges in between_community_edges.items():\n hypergraph.add_edge(ci, cj, weight=len(edges))\n\n # find layout for communities\n pos_communities = nx.spring_layout(hypergraph, **kwargs)\n\n # set node positions to position of community\n pos = dict()\n for node, community in partition.items():\n pos[node] = pos_communities[community]\n\n return pos\n\ndef _find_between_community_edges(g, partition):\n\n edges = dict()\n\n for (ni, nj) in g.edges():\n ci = partition[ni]\n cj = partition[nj]\n\n if ci != cj:\n try:\n edges[(ci, cj)] += [(ni, nj)]\n except KeyError:\n edges[(ci, cj)] = [(ni, nj)]\n\n return edges\n\ndef _position_nodes(g, partition, **kwargs):\n \"\"\"\n Positions nodes within communities.\n \"\"\"\n\n communities = dict()\n for node, community in partition.items():\n try:\n communities[community] += [node]\n except KeyError:\n communities[community] = [node]\n\n pos = dict()\n for ci, nodes in communities.items():\n subgraph = g.subgraph(nodes)\n pos_subgraph = nx.spring_layout(subgraph, **kwargs)\n pos.update(pos_subgraph)\n\n return pos\n\ndef plot_partition ():\n\n outdir = \"/Users/jgzhang/Work/Densmore_lab/Partition/code_version/v2/genetic-circuit-partitioning/2021.4/\"\n bm_path = outdir + 'runs/benchmark/electronic-circuits/RCA4/'\n sol_path = outdir + 'runs/results/electronic-circuits/RCA4/nparts/14/optimized_lc/'\n part_sol = sol_path + 'part_solns.txt'\n\n edgelist = bm_path + '/DAG.edgelist'\n G = load_graph (edgelist)\n in_nodes, out_nodes, nonprimitives = gp.get_nonprimitive_nodes (G)\n G_primitive = gp.get_G_primitive (G, nonprimitives)\n\n solDict = gp.load_opt_part_sol (part_sol)\n iteration = 6\n\n part = solDict[iteration]['part']\n node_to_partDict = {}\n for n in G_primitive.nodes():\n node_to_partDict[n] = gp.get_part(part, n)\n\n pos = community_layout(G_primitive, node_to_partDict)\n\n nx.draw(G_primitive, pos, node_color=list(node_to_partDict.values()))\n plt.show()\n return\n\n\nif __name__ == '__main__':\n plot_partition() " ]
[ [ "numpy.array", "numpy.zeros" ], [ "matplotlib.pyplot.show" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jarilq/aura-core
[ "7880ed265396bf8c89b783835853328e6d7d1589" ]
[ "tools/auralink/current.py" ]
[ "import math\n\nfrom props import root, getNode\n\nairdata_node = getNode('/sensors/airdata', True)\nfilter_node = getNode('/filters/filter[0]', True)\npilot_node = getNode('/sensors/pilot_input', True)\nstatus_node = getNode('/status', True)\npos_node = getNode(\"/position\", True)\nvel_node = getNode(\"/velocity\", True)\ntargets_node = getNode(\"/autopilot/targets\", True)\ntecs_node = getNode(\"/autopilot/tecs\", True)\npower_node = getNode(\"/sensors/power\", True)\ntecs_config_node = getNode(\"/config/autopilot/TECS\", True)\n\nr2d = 180.0 / math.pi\nmps2kt = 1.9438444924406046432\nkt2mps = 0.5144444444444444444\nft2m = 0.3048\ng = 9.81\n\nlast_time = 0.0\n\n# crude battery % interpolation model\n# 100 - 4.2\n# 83% - 3.8\n# 27% - 3.65\n# 0% - 3.5\nbatv = [ 3.3, 3.50, 3.65, 3.80, 4.20 ]\nbatp = [ 0.0, 0.05, 0.27, 0.83, 1.00 ]\nfrom scipy.interpolate import interp1d\nbatf = interp1d(batv, batp)\nfilt_perc = 1.0\n\ndef compute_tecs():\n if filter_node.getFloat('timestamp') < 0.01:\n # do nothing if filter not inited\n return\n \n mass_kg = tecs_config_node.getFloat(\"mass_kg\")\n if mass_kg < 0.01:\n mass_kg = 3.0\n if tecs_config_node.hasChild(\"weight_bal\"):\n wb = tecs_config_node.getFloat(\"weight_bal\")\n else:\n wb = 1.0\n # fixem:\n wb = 0.0\n alt_m = filter_node.getFloat(\"altitude_m\")\n vel_mps = vel_node.getFloat(\"airspeed_smoothed_kt\") * kt2mps\n target_alt_m = targets_node.getFloat(\"altitude_msl_ft\") * ft2m\n target_vel_mps = targets_node.getFloat(\"airspeed_kt\") * kt2mps\n \n energy_pot = mass_kg * g * alt_m\n energy_kin = 0.5 * mass_kg * vel_mps * vel_mps\n\n target_pot = mass_kg * g * target_alt_m\n target_kin = 0.5 * mass_kg * target_vel_mps * target_vel_mps\n\n error_pot = target_pot - energy_pot\n error_kin = target_kin - energy_kin\n # print(filter_node.getFloat('timestamp'), 'target_alt:', target_alt_m, 'tgt_pot:', target_pot, 'E_pot:', energy_pot, 'Err_kin:', error_kin, 'Err_pot:', error_pot)\n error_total = error_pot + error_kin\n error_bal = (2.0 - wb) * error_kin - wb * error_pot\n\n tecs_node.setFloat(\"energy_total\", energy_pot + energy_kin )\n tecs_node.setFloat(\"target_total\", target_pot + target_kin )\n tecs_node.setFloat(\"error_total\", error_total)\n tecs_node.setFloat(\"error_diff\", error_bal)\n\ndef compute_derived_data():\n global last_time\n \n # compute ground track heading/speed\n vn = filter_node.getFloat(\"vn_ms\")\n ve = filter_node.getFloat(\"ve_ms\")\n vd = filter_node.getFloat(\"vd_ms\")\n hdg = (math.pi * 0.5 - math.atan2(vn, ve)) * r2d\n vel_ms = math.sqrt( vn*vn + ve*ve + vd*vd )\n filter_node.setFloat(\"groundtrack_deg\", hdg)\n filter_node.setFloat(\"groundspeed_ms\", vel_ms)\n filter_node.setFloat(\"groundspeed_kt\", vel_ms * mps2kt)\n\n # compute frame dt\n current_time = filter_node.getFloat('timestamp')\n dt = current_time - last_time\n last_time = current_time\n\n # local 'airborne' helper (not official)\n if vel_node.getFloat('airspeed_smoothed_kt') >= 15:\n in_flight = True\n else:\n in_flight = False\n status_node.setBool(\"in_flight\", in_flight)\n \n # local autopilot timer\n ap_enabled = False\n if pilot_node.getFloatEnum(\"channel\", 0) > 0:\n ap_enabled = True\n \n if in_flight and ap_enabled:\n timer = status_node.getFloat('local_autopilot_timer')\n timer += dt\n status_node.setFloat('local_autopilot_timer', timer)\n\n # estimate distance traveled from filter velocity and dt\n if in_flight:\n if not status_node.getBool('onboard_flight_time'):\n ft = status_node.getFloat('flight_timer')\n ft += dt\n status_node.setFloat('flight_timer', ft)\n od = status_node.getFloat('flight_odometer')\n od += vel_ms * dt\n status_node.setFloat('flight_odometer', od)\n\n # autopilot error metrics\n roll_error = targets_node.getFloat('roll_deg') - filter_node.getFloat('roll_deg')\n #print 'error %.4f,%.1f' % (filter_node.getFloat('timestamp'), roll_error)\n \n volts = power_node.getFloat(\"main_vcc\")\n amps = power_node.getFloat(\"main_amps\")\n watts = volts * amps\n power_node.setFloat(\"main_watts\", watts)\n\n cell_volts = power_node.getFloat(\"cell_vcc\")\n if cell_volts < 3.3: cell_volts = 3.3\n if cell_volts > 4.2: cell_volts = 4.2\n batt_perc = batf(cell_volts)\n global filt_perc\n if filt_perc is None:\n filt_perc = batt_perc\n else:\n filt_perc = 0.9995 * filt_perc + 0.0005 * batt_perc\n power_node.setFloat(\"battery_perc\", filt_perc)\n \n # TECS\n compute_tecs()\n" ]
[ [ "scipy.interpolate.interp1d" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] } ]
shashwat9kumar/datasets
[ "99b055408025f8e934fcbb0fc054488aa087ebfb", "99b055408025f8e934fcbb0fc054488aa087ebfb", "99b055408025f8e934fcbb0fc054488aa087ebfb", "99b055408025f8e934fcbb0fc054488aa087ebfb", "99b055408025f8e934fcbb0fc054488aa087ebfb", "99b055408025f8e934fcbb0fc054488aa087ebfb", "99b055408025f8e934fcbb0fc054488aa087ebfb", "99b055408025f8e934fcbb0fc054488aa087ebfb" ]
[ "tensorflow_datasets/scripts/documentation/doc_utils.py", "tensorflow_datasets/image_classification/bigearthnet.py", "tensorflow_datasets/image_classification/cars196.py", "tensorflow_datasets/core/utils/image_utils.py", "tensorflow_datasets/object_detection/waymo_open_dataset.py", "tensorflow_datasets/testing/fake_data_generation/smallnorb.py", "tensorflow_datasets/core/features/dataset_feature_test.py", "tensorflow_datasets/testing/fake_data_generation/arc.py" ]
[ "# coding=utf-8\n# Copyright 2021 The TensorFlow Datasets Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Util to generate the dataset documentation content.\n\nUsed by tensorflow_datasets/scripts/documentation/build_catalog.py\n\n\"\"\"\n\nimport collections\nimport os\nimport textwrap\nfrom typing import Any, Dict, List, Optional, Tuple, Union\n\nimport dataclasses\n\nimport tensorflow.compat.v2 as tf\nimport tensorflow_datasets as tfds\n\n# Dict of `full_names_dict['dataset']['config']['version']`\nFullNamesDict = Dict[str, Dict[str, Dict[str, Any]]]\n# Same as `FullNamesDict`, but contains `True` for nightly datasets:\n# * New dataset: nightly_dict['dataset'] is True\n# * New config: nightly_dict['dataset']['config'] is True\n# * New version: nightly_dict['dataset']['config']['version'] is True\nNightlyDict = Dict[str, Union[bool, Dict[str, Union[bool, Dict[str, bool]]]]]\n\n\[email protected]\nclass DocUtilPaths:\n \"\"\"Structure containing the utils paths.\"\"\"\n # VisualizationDocUtil\n fig_base_path: Optional[tfds.core.PathLike] = tfds.core.gcs_path(\n 'visualization/fig/')\n fig_base_url: str = 'https://storage.googleapis.com/tfds-data/visualization/fig/'\n # DataframeDocUtil\n df_base_path: Optional[tfds.core.PathLike] = tfds.core.gcs_path(\n 'visualization/dataframe')\n df_base_url: str = 'https://storage.googleapis.com/tfds-data/visualization/dataframe/'\n # NightlyDocUtil\n nightly_path: Optional[tfds.core.PathLike] = tfds.core.utils.tfds_path(\n 'stable_versions.txt')\n\n\nclass VisualizationDocUtil(object):\n \"\"\"Small util which generate the path/urls for the visualizations.\"\"\"\n\n def __init__(self, base_path: tfds.core.PathLike, base_url: str):\n \"\"\"Constructor.\n\n Args:\n base_path: Path where images are stored.\n base_url: Base url where images are displayed.\n \"\"\"\n self._base_path = base_path\n self._base_url = base_url\n\n def _get_name(self, builder):\n return builder.info.full_name.replace('/', '-') + '.png'\n\n def get_url(self, builder):\n return self._base_url + self._get_name(builder)\n\n def get_html_tag(self, builder: tfds.core.DatasetBuilder) -> str:\n \"\"\"Returns the <img> html tag.\"\"\"\n url = self.get_url(builder)\n return f'<img src=\"{url}\" alt=\"Visualization\" width=\"500px\">'\n\n def has_visualization(self, builder):\n filepath = os.path.join(self._base_path, self._get_name(builder))\n return tf.io.gfile.exists(filepath)\n\n\nclass DataframeDocUtil(object):\n \"\"\"Small util which generate the path/urls for the dataframes.\"\"\"\n\n def __init__(self, base_path: tfds.core.PathLike, base_url: str):\n \"\"\"Constructor.\n\n Args:\n base_path: Path where images are stored.\n base_url: Base url where images are displayed.\n \"\"\"\n self._base_path = base_path\n self._base_url = base_url\n\n def _get_name(self, builder):\n return builder.info.full_name.replace('/', '-') + '.html'\n\n def get_url(self, builder):\n return self._base_url + self._get_name(builder)\n\n def get_html_tag(self, builder: tfds.core.DatasetBuilder) -> str:\n \"\"\"Returns the html tag.\"\"\"\n url = self.get_url(builder)\n button_id = 'displaydataframe'\n content_id = 'dataframecontent'\n visualization_html = f\"\"\"\n <!-- mdformat off(HTML should not be auto-formatted) -->\n\n {{% framebox %}}\n\n <button id=\"{button_id}\">Display examples...</button>\n <div id=\"{content_id}\" style=\"overflow-x:scroll\"></div>\n <script src=\"https://www.gstatic.com/external_hosted/jquery2.min.js\"></script>\n <script>\n var url = \"{url}\";\n $(document).ready(() => {{\n $(\"#{button_id}\").click((event) => {{\n // Disable the button after clicking (dataframe loaded only once).\n $(\"#{button_id}\").prop(\"disabled\", true);\n\n // Pre-fetch and display the content\n $.get(url, (data) => {{\n $(\"#{content_id}\").html(data);\n }}).fail(() => {{\n $(\"#{content_id}\").html(\n 'Error loading examples. If the error persist, please open '\n + 'a new issue.'\n );\n }});\n }});\n }});\n </script>\n\n {{% endframebox %}}\n\n <!-- mdformat on -->\n \"\"\"\n return textwrap.dedent(visualization_html)\n\n def has_visualization(self, builder):\n filepath = os.path.join(self._base_path, self._get_name(builder))\n return tf.io.gfile.exists(filepath)\n\n\ndef _split_full_name(full_name: str) -> Tuple[str, str, str]:\n \"\"\"Extracts the `(ds name, config, version)` from the full_name.\"\"\"\n if not tfds.core.load.is_full_name(full_name):\n raise ValueError(f'Parsing builder name string {full_name} failed.'\n 'The builder name string must be of the following format:'\n '`dataset_name[/config_name]/version`')\n ds_name, *optional_config, version = full_name.split('/')\n assert len(optional_config) <= 1\n config = next(iter(optional_config)) if optional_config else ''\n return ds_name, config, version\n\n\ndef _full_names_to_dict(full_names: List[str]) -> FullNamesDict:\n \"\"\"Creates the dict `d['dataset']['config']['version']`.\"\"\"\n full_names_dict = collections.defaultdict(lambda: collections.defaultdict( # pylint: disable=g-long-lambda\n lambda: collections.defaultdict(type(None))))\n for full_name in full_names:\n ds_name, config, version = _split_full_name(full_name)\n full_names_dict[ds_name][config][version] # pylint: disable=pointless-statement\n return full_names_dict\n\n\ndef _build_nightly_dict(\n registered_ds: FullNamesDict,\n stable_version_ds: FullNamesDict,\n) -> NightlyDict:\n \"\"\"Computes the nightly dict from the registered and stable dict.\"\"\"\n nightly_ds = collections.defaultdict(lambda: collections.defaultdict( # pylint: disable=g-long-lambda\n lambda: collections.defaultdict(bool)))\n for dataset in registered_ds:\n if dataset in stable_version_ds:\n for config in registered_ds[dataset]:\n if config in stable_version_ds[dataset]:\n for version in registered_ds[dataset][config]:\n if version in stable_version_ds[dataset][config]:\n # (dataset, config, version) already exists\n # We add it to the nightly dict to make sure the\n # key exists\n nightly_ds[dataset][config][version] = False\n else:\n # New version only present in tfds-nightly\n nightly_ds[dataset][config][version] = True\n else:\n # New config only present in tfds-nightly\n nightly_ds[dataset][config] = True\n else:\n # New dataset only present in tfds-nightly\n nightly_ds[dataset] = True\n return nightly_ds\n\n\[email protected]()\ndef _load_nightly_dict(version_path: tfds.core.PathLike) -> NightlyDict:\n \"\"\"Loads (and caches) the nightly dict.\"\"\"\n with tf.io.gfile.GFile(os.fspath(version_path), 'r') as f:\n stable_versions = f.read().splitlines()\n\n # Build the `full_names_dict['dataset']['config']['version']` for both\n # nightly and stable version\n registered_ds = _full_names_to_dict(tfds.core.load.list_full_names())\n stable_version_ds = _full_names_to_dict(stable_versions)\n\n # Nightly versions are `registered - stable`\n return _build_nightly_dict(registered_ds, stable_version_ds)\n\n\nclass NightlyDocUtil(object):\n \"\"\"Small util to format the doc.\"\"\"\n\n def __init__(self, path: tfds.core.PathLike):\n \"\"\"Constructor.\n\n Args:\n path: Path containing the nightly versions\n \"\"\"\n self._nightly_dict: NightlyDict = _load_nightly_dict(path)\n\n def is_builder_nightly(\n self,\n builder: Union[tfds.core.DatasetBuilder, str],\n ) -> bool:\n \"\"\"Returns `True` if the builder is new.\"\"\"\n if isinstance(builder, tfds.core.DatasetBuilder):\n builder_name = builder.name\n else:\n builder_name = builder\n return self._nightly_dict[builder_name] is True # pylint: disable=g-bool-id-comparison\n\n def is_config_nightly(self, builder: tfds.core.DatasetBuilder) -> bool:\n \"\"\"Returns `True` if the config is new.\"\"\"\n ds_name, config, _ = _split_full_name(builder.info.full_name)\n if self.is_builder_nightly(builder):\n return False\n return self._nightly_dict[ds_name][config] is True # pylint: disable=g-bool-id-comparison\n\n def is_version_nightly(\n self,\n builder: tfds.core.DatasetBuilder,\n version: str,\n ) -> bool:\n \"\"\"Returns `True` if the version is new.\"\"\"\n ds_name, config, _ = _split_full_name(builder.info.full_name)\n if self.is_builder_nightly(builder) or self.is_config_nightly(builder):\n return False\n return self._nightly_dict[ds_name][config][version] is True # pylint: disable=g-bool-id-comparison\n\n def has_nightly(self, builder: tfds.core.DatasetBuilder) -> bool:\n \"\"\"Returns True if any of the builder/config/version is new.\"\"\"\n\n def reduce(value):\n if isinstance(value, bool):\n return value\n elif isinstance(value, dict):\n return any(reduce(x) for x in value.values())\n else:\n raise AssertionError(f'Invalid nightly_dict value: {value}')\n\n return reduce(self._nightly_dict[builder.name])\n\n icon = (\n '<span class=\"material-icons\" '\n 'title=\"Available only in the tfds-nightly package\">nights_stay</span>')\n", "# coding=utf-8\n# Copyright 2021 The TensorFlow Datasets Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"BigEarthNet remote sensing dataset of Sentinel-2 image patches.\"\"\"\n\nimport io\nimport json\nimport os\n\nimport numpy as np\nimport tensorflow.compat.v2 as tf\nimport tensorflow_datasets.public_api as tfds\n\n_CITATION = \"\"\"\\\n@article{Sumbul2019BigEarthNetAL,\n title={BigEarthNet: A Large-Scale Benchmark Archive For Remote Sensing Image Understanding},\n author={Gencer Sumbul and Marcela Charfuelan and Beg{\\\"u}m Demir and Volker Markl},\n journal={CoRR},\n year={2019},\n volume={abs/1902.06148}\n}\"\"\"\n\n_DESCRIPTION = \"\"\"\\\nThe BigEarthNet is a new large-scale Sentinel-2 benchmark archive, consisting of\n590,326 Sentinel-2 image patches. The image patch size on the ground is\n1.2 x 1.2 km with variable image size depending on the channel resolution.\nThis is a multi-label dataset with 43 imbalanced labels.\n\nTo construct the BigEarthNet, 125 Sentinel-2\ntiles acquired between June 2017 and May 2018 over the 10 countries (Austria,\nBelgium, Finland, Ireland, Kosovo, Lithuania, Luxembourg, Portugal, Serbia,\nSwitzerland) of Europe were initially selected. All the tiles were\natmospherically corrected by the Sentinel-2 Level 2A product generation and\nformatting tool (sen2cor). Then, they were divided into 590,326 non-overlapping\nimage patches. Each image patch was annotated by the multiple land-cover classes\n(i.e., multi-labels) that were provided from the CORINE Land Cover database of\nthe year 2018 (CLC 2018).\n\nBands and pixel resolution in meters:\n\n* B01: Coastal aerosol; 60m\n* B02: Blue; 10m\n* B03: Green; 10m\n* B04: Red; 10m\n* B05: Vegetation red edge; 20m\n* B06: Vegetation red edge; 20m\n* B07: Vegetation red edge; 20m\n* B08: NIR; 10m\n* B09: Water vapor; 60m\n* B11: SWIR; 20m\n* B12: SWIR; 20m\n* B8A: Narrow NIR; 20m\n\nLicense: Community Data License Agreement - Permissive, Version 1.0.\n\nURL: http://bigearth.net/\n\"\"\"\n\n_LABELS = [\n 'Agro-forestry areas', 'Airports',\n 'Annual crops associated with permanent crops', 'Bare rock',\n 'Beaches, dunes, sands', 'Broad-leaved forest', 'Burnt areas',\n 'Coastal lagoons', 'Complex cultivation patterns', 'Coniferous forest',\n 'Construction sites', 'Continuous urban fabric',\n 'Discontinuous urban fabric', 'Dump sites', 'Estuaries',\n 'Fruit trees and berry plantations', 'Green urban areas',\n 'Industrial or commercial units', 'Inland marshes', 'Intertidal flats',\n 'Land principally occupied by agriculture, with significant areas of '\n 'natural vegetation', 'Mineral extraction sites', 'Mixed forest',\n 'Moors and heathland', 'Natural grassland', 'Non-irrigated arable land',\n 'Olive groves', 'Pastures', 'Peatbogs', 'Permanently irrigated land',\n 'Port areas', 'Rice fields', 'Road and rail networks and associated land',\n 'Salines', 'Salt marshes', 'Sclerophyllous vegetation', 'Sea and ocean',\n 'Sparsely vegetated areas', 'Sport and leisure facilities',\n 'Transitional woodland/shrub', 'Vineyards', 'Water bodies', 'Water courses'\n]\n\n_DATA_OPTIONS = ['rgb', 'all']\n\n_ZIP_FILE = 'http://bigearth.net/downloads/BigEarthNet-S2-v1.0.tar.gz'\n_ZIP_SUBIDR = 'BigEarthNet-v1.0'\n\n# To clip and rescale the RGB channels for the JPEG images visualizatoin.\n# This is not the maximal value.\n# Sample observed max value was about 17800, while the sample observed mean\n# was about 400 with a standard deviation of about 200.\n# Adhoc selection of the upper max value to be mean + 7*std.\n_OPTICAL_MAX_VALUE = 2000.\n\n\nclass BigearthnetConfig(tfds.core.BuilderConfig):\n \"\"\"BuilderConfig for Bigearthnet.\"\"\"\n\n def __init__(self, selection=None, **kwargs):\n \"\"\"Constructs a BigearthnetConfig.\n\n Args:\n selection: `str`, one of `_DATA_OPTIONS`.\n **kwargs: keyword arguments forwarded to super.\n \"\"\"\n if selection not in _DATA_OPTIONS:\n raise ValueError('selection must be one of %s' % _DATA_OPTIONS)\n\n super(BigearthnetConfig, self).__init__(\n version=tfds.core.Version('1.0.0'),\n release_notes={\n '1.0.0': 'New split API (https://tensorflow.org/datasets/splits)',\n },\n **kwargs)\n self.selection = selection\n\n\nclass Bigearthnet(tfds.core.BeamBasedBuilder):\n \"\"\"Bigearthnet remote sensing dataset of Sentinel-2 image patches.\"\"\"\n\n BUILDER_CONFIGS = [\n BigearthnetConfig(\n selection='rgb', name='rgb', description='Sentinel-2 RGB channels'),\n BigearthnetConfig(\n selection='all', name='all', description='13 Sentinel-2 channels'),\n ]\n\n def _info(self):\n metadata_dict = tfds.features.FeaturesDict({\n 'acquisition_date': tfds.features.Text(),\n 'coordinates': {\n 'lrx': tf.int64,\n 'lry': tf.int64,\n 'ulx': tf.int64,\n 'uly': tf.int64,\n },\n 'projection': tfds.features.Text(),\n 'tile_source': tfds.features.Text(),\n })\n if self.builder_config.selection == 'rgb':\n features = tfds.features.FeaturesDict({\n 'image':\n tfds.features.Image(shape=[120, 120, 3]),\n 'labels':\n tfds.features.Sequence(tfds.features.ClassLabel(names=_LABELS)),\n 'filename':\n tfds.features.Text(),\n 'metadata':\n metadata_dict,\n })\n supervised_keys = ('image', 'labels')\n elif self.builder_config.selection == 'all':\n features = tfds.features.FeaturesDict({\n 'B01':\n tfds.features.Tensor(shape=[20, 20], dtype=tf.float32),\n 'B02':\n tfds.features.Tensor(shape=[120, 120], dtype=tf.float32),\n 'B03':\n tfds.features.Tensor(shape=[120, 120], dtype=tf.float32),\n 'B04':\n tfds.features.Tensor(shape=[120, 120], dtype=tf.float32),\n 'B05':\n tfds.features.Tensor(shape=[60, 60], dtype=tf.float32),\n 'B06':\n tfds.features.Tensor(shape=[60, 60], dtype=tf.float32),\n 'B07':\n tfds.features.Tensor(shape=[60, 60], dtype=tf.float32),\n 'B08':\n tfds.features.Tensor(shape=[120, 120], dtype=tf.float32),\n 'B09':\n tfds.features.Tensor(shape=[20, 20], dtype=tf.float32),\n 'B11':\n tfds.features.Tensor(shape=[60, 60], dtype=tf.float32),\n 'B12':\n tfds.features.Tensor(shape=[60, 60], dtype=tf.float32),\n 'B8A':\n tfds.features.Tensor(shape=[60, 60], dtype=tf.float32),\n 'labels':\n tfds.features.Sequence(tfds.features.ClassLabel(names=_LABELS)),\n 'filename':\n tfds.features.Text(),\n 'metadata':\n metadata_dict,\n })\n supervised_keys = None\n\n return tfds.core.DatasetInfo(\n builder=self,\n description=_DESCRIPTION,\n features=features,\n supervised_keys=supervised_keys,\n homepage='http://bigearth.net',\n citation=_CITATION,\n )\n\n def _split_generators(self, dl_manager):\n \"\"\"Returns SplitGenerators.\"\"\"\n dl_path = dl_manager.download(_ZIP_FILE)\n return [\n tfds.core.SplitGenerator(\n name=tfds.Split.TRAIN,\n gen_kwargs={\n 'archive_path': dl_path,\n },\n ),\n ]\n\n def _build_pcollection(self, pipeline, archive_path):\n \"\"\"Generates examples as dicts.\"\"\"\n beam = tfds.core.lazy_imports.apache_beam\n selection = self.builder_config.selection\n\n return (pipeline\n | 'ArchivePath' >> beam.Create([archive_path])\n | 'ReadArchive' >> beam.FlatMap(_read_archive, selection)\n | 'Reshuffle' >> beam.transforms.Reshuffle()\n | 'ProcessExamples' >> beam.Map(_process_example, selection))\n\n\ndef _read_archive(archive_path, selection):\n \"\"\"Yields non-processed examples out of archive.\"\"\"\n example = {}\n read_band_files = 0\n for fpath, fobj in tfds.core.download.extractor.iter_tar_stream(archive_path):\n read_band_files += 1\n _, patch_name, fname = fpath.split(os.path.sep)\n if fname.endswith('_labels_metadata.json'):\n example['metadata'] = fobj.read()\n elif fname.endswith('.tif'):\n band = fname[-7:-4]\n if selection != 'rgb' or (selection == 'rgb' and\n band in {'B02', 'B03', 'B04'}):\n example[band] = fobj.read()\n example.setdefault('bands', []).append(band)\n else:\n raise AssertionError('Unexpected file: %s' % fpath)\n if read_band_files == 13:\n example['filename'] = patch_name\n yield example\n example = {}\n read_band_files = 0\n\n\ndef _process_example(example, selection):\n example = example.copy()\n example['metadata'] = json.loads(example['metadata'])\n example['labels'] = example['metadata'].pop('labels')\n for band in example.pop('bands') or []:\n example[band] = _load_tif(example[band])\n if selection == 'rgb':\n _create_rgb_image(example)\n return example['filename'], example\n\n\ndef _create_rgb_image(d):\n \"\"\"Creates and rescales RGB image.\"\"\"\n img = np.stack([d.pop('B04'), d.pop('B03'), d.pop('B02')], axis=2)\n img = img / _OPTICAL_MAX_VALUE * 255.0\n d['image'] = np.clip(img, 0, 255).astype(np.uint8)\n\n\ndef _load_tif(data):\n \"\"\"Loads TIF file and returns as float32 numpy array.\"\"\"\n img = tfds.core.lazy_imports.PIL_Image.open(io.BytesIO(data))\n img = np.array(img.getdata()).reshape(img.size).astype(np.float32)\n return img\n", "# coding=utf-8\n# Copyright 2021 The TensorFlow Datasets Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Dataset class for Cars196 Dataset.\"\"\"\nimport os\nimport six.moves.urllib as urllib\nimport tensorflow.compat.v2 as tf\nimport tensorflow_datasets.public_api as tfds\n\n_URL = 'http://ai.stanford.edu/~jkrause/car196/'\n_EXTRA_URL = 'https://ai.stanford.edu/~jkrause/cars/car_devkit.tgz'\n\n_DESCRIPTION = (\n 'The Cars dataset contains 16,185 images of 196 classes of cars. The data '\n 'is split into 8,144 training images and 8,041 testing images, where each '\n 'class has been split roughly in a 50-50 split. Classes are typically at '\n 'the level of Make, Model, Year, e.g. 2012 Tesla Model S or 2012 BMW M3 '\n 'coupe.')\n\n_NAMES = [\n 'AM General Hummer SUV 2000',\n 'Acura RL Sedan 2012',\n 'Acura TL Sedan 2012',\n 'Acura TL Type-S 2008',\n 'Acura TSX Sedan 2012',\n 'Acura Integra Type R 2001',\n 'Acura ZDX Hatchback 2012',\n 'Aston Martin V8 Vantage Convertible 2012',\n 'Aston Martin V8 Vantage Coupe 2012',\n 'Aston Martin Virage Convertible 2012',\n 'Aston Martin Virage Coupe 2012',\n 'Audi RS 4 Convertible 2008',\n 'Audi A5 Coupe 2012',\n 'Audi TTS Coupe 2012',\n 'Audi R8 Coupe 2012',\n 'Audi V8 Sedan 1994',\n 'Audi 100 Sedan 1994',\n 'Audi 100 Wagon 1994',\n 'Audi TT Hatchback 2011',\n 'Audi S6 Sedan 2011',\n 'Audi S5 Convertible 2012',\n 'Audi S5 Coupe 2012',\n 'Audi S4 Sedan 2012',\n 'Audi S4 Sedan 2007',\n 'Audi TT RS Coupe 2012',\n 'BMW ActiveHybrid 5 Sedan 2012',\n 'BMW 1 Series Convertible 2012',\n 'BMW 1 Series Coupe 2012',\n 'BMW 3 Series Sedan 2012',\n 'BMW 3 Series Wagon 2012',\n 'BMW 6 Series Convertible 2007',\n 'BMW X5 SUV 2007',\n 'BMW X6 SUV 2012',\n 'BMW M3 Coupe 2012',\n 'BMW M5 Sedan 2010',\n 'BMW M6 Convertible 2010',\n 'BMW X3 SUV 2012',\n 'BMW Z4 Convertible 2012',\n 'Bentley Continental Supersports Conv. Convertible 2012',\n 'Bentley Arnage Sedan 2009',\n 'Bentley Mulsanne Sedan 2011',\n 'Bentley Continental GT Coupe 2012',\n 'Bentley Continental GT Coupe 2007',\n 'Bentley Continental Flying Spur Sedan 2007',\n 'Bugatti Veyron 16.4 Convertible 2009',\n 'Bugatti Veyron 16.4 Coupe 2009',\n 'Buick Regal GS 2012',\n 'Buick Rainier SUV 2007',\n 'Buick Verano Sedan 2012',\n 'Buick Enclave SUV 2012',\n 'Cadillac CTS-V Sedan 2012',\n 'Cadillac SRX SUV 2012',\n 'Cadillac Escalade EXT Crew Cab 2007',\n 'Chevrolet Silverado 1500 Hybrid Crew Cab 2012',\n 'Chevrolet Corvette Convertible 2012',\n 'Chevrolet Corvette ZR1 2012',\n 'Chevrolet Corvette Ron Fellows Edition Z06 2007',\n 'Chevrolet Traverse SUV 2012',\n 'Chevrolet Camaro Convertible 2012',\n 'Chevrolet HHR SS 2010',\n 'Chevrolet Impala Sedan 2007',\n 'Chevrolet Tahoe Hybrid SUV 2012',\n 'Chevrolet Sonic Sedan 2012',\n 'Chevrolet Express Cargo Van 2007',\n 'Chevrolet Avalanche Crew Cab 2012',\n 'Chevrolet Cobalt SS 2010',\n 'Chevrolet Malibu Hybrid Sedan 2010',\n 'Chevrolet TrailBlazer SS 2009',\n 'Chevrolet Silverado 2500HD Regular Cab 2012',\n 'Chevrolet Silverado 1500 Classic Extended Cab 2007',\n 'Chevrolet Express Van 2007',\n 'Chevrolet Monte Carlo Coupe 2007',\n 'Chevrolet Malibu Sedan 2007',\n 'Chevrolet Silverado 1500 Extended Cab 2012',\n 'Chevrolet Silverado 1500 Regular Cab 2012',\n 'Chrysler Aspen SUV 2009',\n 'Chrysler Sebring Convertible 2010',\n 'Chrysler Town and Country Minivan 2012',\n 'Chrysler 300 SRT-8 2010',\n 'Chrysler Crossfire Convertible 2008',\n 'Chrysler PT Cruiser Convertible 2008',\n 'Daewoo Nubira Wagon 2002',\n 'Dodge Caliber Wagon 2012',\n 'Dodge Caliber Wagon 2007',\n 'Dodge Caravan Minivan 1997',\n 'Dodge Ram Pickup 3500 Crew Cab 2010',\n 'Dodge Ram Pickup 3500 Quad Cab 2009',\n 'Dodge Sprinter Cargo Van 2009',\n 'Dodge Journey SUV 2012',\n 'Dodge Dakota Crew Cab 2010',\n 'Dodge Dakota Club Cab 2007',\n 'Dodge Magnum Wagon 2008',\n 'Dodge Challenger SRT8 2011',\n 'Dodge Durango SUV 2012',\n 'Dodge Durango SUV 2007',\n 'Dodge Charger Sedan 2012',\n 'Dodge Charger SRT-8 2009',\n 'Eagle Talon Hatchback 1998',\n 'FIAT 500 Abarth 2012',\n 'FIAT 500 Convertible 2012',\n 'Ferrari FF Coupe 2012',\n 'Ferrari California Convertible 2012',\n 'Ferrari 458 Italia Convertible 2012',\n 'Ferrari 458 Italia Coupe 2012',\n 'Fisker Karma Sedan 2012',\n 'Ford F-450 Super Duty Crew Cab 2012',\n 'Ford Mustang Convertible 2007',\n 'Ford Freestar Minivan 2007',\n 'Ford Expedition EL SUV 2009',\n 'Ford Edge SUV 2012',\n 'Ford Ranger SuperCab 2011',\n 'Ford GT Coupe 2006',\n 'Ford F-150 Regular Cab 2012',\n 'Ford F-150 Regular Cab 2007',\n 'Ford Focus Sedan 2007',\n 'Ford E-Series Wagon Van 2012',\n 'Ford Fiesta Sedan 2012',\n 'GMC Terrain SUV 2012',\n 'GMC Savana Van 2012',\n 'GMC Yukon Hybrid SUV 2012',\n 'GMC Acadia SUV 2012',\n 'GMC Canyon Extended Cab 2012',\n 'Geo Metro Convertible 1993',\n 'HUMMER H3T Crew Cab 2010',\n 'HUMMER H2 SUT Crew Cab 2009',\n 'Honda Odyssey Minivan 2012',\n 'Honda Odyssey Minivan 2007',\n 'Honda Accord Coupe 2012',\n 'Honda Accord Sedan 2012',\n 'Hyundai Veloster Hatchback 2012',\n 'Hyundai Santa Fe SUV 2012',\n 'Hyundai Tucson SUV 2012',\n 'Hyundai Veracruz SUV 2012',\n 'Hyundai Sonata Hybrid Sedan 2012',\n 'Hyundai Elantra Sedan 2007',\n 'Hyundai Accent Sedan 2012',\n 'Hyundai Genesis Sedan 2012',\n 'Hyundai Sonata Sedan 2012',\n 'Hyundai Elantra Touring Hatchback 2012',\n 'Hyundai Azera Sedan 2012',\n 'Infiniti G Coupe IPL 2012',\n 'Infiniti QX56 SUV 2011',\n 'Isuzu Ascender SUV 2008',\n 'Jaguar XK XKR 2012',\n 'Jeep Patriot SUV 2012',\n 'Jeep Wrangler SUV 2012',\n 'Jeep Liberty SUV 2012',\n 'Jeep Grand Cherokee SUV 2012',\n 'Jeep Compass SUV 2012',\n 'Lamborghini Reventon Coupe 2008',\n 'Lamborghini Aventador Coupe 2012',\n 'Lamborghini Gallardo LP 570-4 Superleggera 2012',\n 'Lamborghini Diablo Coupe 2001',\n 'Land Rover Range Rover SUV 2012',\n 'Land Rover LR2 SUV 2012',\n 'Lincoln Town Car Sedan 2011',\n 'MINI Cooper Roadster Convertible 2012',\n 'Maybach Landaulet Convertible 2012',\n 'Mazda Tribute SUV 2011',\n 'McLaren MP4-12C Coupe 2012',\n 'Mercedes-Benz 300-Class Convertible 1993',\n 'Mercedes-Benz C-Class Sedan 2012',\n 'Mercedes-Benz SL-Class Coupe 2009',\n 'Mercedes-Benz E-Class Sedan 2012',\n 'Mercedes-Benz S-Class Sedan 2012',\n 'Mercedes-Benz Sprinter Van 2012',\n 'Mitsubishi Lancer Sedan 2012',\n 'Nissan Leaf Hatchback 2012',\n 'Nissan NV Passenger Van 2012',\n 'Nissan Juke Hatchback 2012',\n 'Nissan 240SX Coupe 1998',\n 'Plymouth Neon Coupe 1999',\n 'Porsche Panamera Sedan 2012',\n 'Ram C/V Cargo Van Minivan 2012',\n 'Rolls-Royce Phantom Drophead Coupe Convertible 2012',\n 'Rolls-Royce Ghost Sedan 2012',\n 'Rolls-Royce Phantom Sedan 2012',\n 'Scion xD Hatchback 2012',\n 'Spyker C8 Convertible 2009',\n 'Spyker C8 Coupe 2009',\n 'Suzuki Aerio Sedan 2007',\n 'Suzuki Kizashi Sedan 2012',\n 'Suzuki SX4 Hatchback 2012',\n 'Suzuki SX4 Sedan 2012',\n 'Tesla Model S Sedan 2012',\n 'Toyota Sequoia SUV 2012',\n 'Toyota Camry Sedan 2012',\n 'Toyota Corolla Sedan 2012',\n 'Toyota 4Runner SUV 2012',\n 'Volkswagen Golf Hatchback 2012',\n 'Volkswagen Golf Hatchback 1991',\n 'Volkswagen Beetle Hatchback 2012',\n 'Volvo C30 Hatchback 2012',\n 'Volvo 240 Sedan 1993',\n 'Volvo XC90 SUV 2007',\n 'smart fortwo Convertible 2012',\n]\n\n_CITATION = \"\"\"\\\n\n @inproceedings{KrauseStarkDengFei-Fei_3DRR2013,\n title = {3D Object Representations for Fine-Grained Categorization},\n booktitle = {4th International IEEE Workshop on 3D Representation and Recognition (3dRR-13)},\n year = {2013},\n address = {Sydney, Australia},\n author = {Jonathan Krause and Michael Stark and Jia Deng and Li Fei-Fei}\n }\n\n\"\"\"\n\n\nclass Cars196(tfds.core.GeneratorBasedBuilder):\n \"\"\"Car Images dataset.\"\"\"\n\n VERSION = tfds.core.Version('2.0.1')\n SUPPORTED_VERSIONS = [\n tfds.core.Version('2.1.0'),\n ]\n\n RELEASE_NOTES = {\n '2.0.0': 'Initial release',\n '2.0.1': 'Website URL update',\n }\n\n def _info(self):\n \"\"\"Define the dataset info.\"\"\"\n features_dict = {\n 'image': tfds.features.Image(),\n 'label': tfds.features.ClassLabel(names=_NAMES),\n 'bbox': tfds.features.BBoxFeature(),\n }\n if self.version > '2.0.0':\n features_dict['id'] = tfds.features.Text()\n return tfds.core.DatasetInfo(\n builder=self,\n description=(_DESCRIPTION),\n features=tfds.features.FeaturesDict(features_dict),\n supervised_keys=('image', 'label'),\n homepage='https://ai.stanford.edu/~jkrause/cars/car_dataset.html',\n citation=_CITATION)\n\n def _split_generators(self, dl_manager):\n \"\"\"Define the train and test split.\"\"\"\n output_files = dl_manager.download_and_extract({\n 'train':\n urllib.parse.urljoin(_URL, 'cars_train.tgz'),\n 'test':\n urllib.parse.urljoin(_URL, 'cars_test.tgz'),\n 'extra':\n _EXTRA_URL,\n 'test_annos':\n urllib.parse.urljoin(_URL, 'cars_test_annos_withlabels.mat'),\n })\n\n return [\n tfds.core.SplitGenerator(\n name='train',\n gen_kwargs={\n 'split_name':\n 'train',\n 'data_dir_path':\n os.path.join(output_files['train'], 'cars_train'),\n 'data_annotations_path':\n os.path.join(output_files['extra'],\n os.path.join('devkit',\n 'cars_train_annos.mat')),\n },\n ),\n tfds.core.SplitGenerator(\n name='test',\n gen_kwargs={\n 'split_name':\n 'test',\n 'data_dir_path':\n os.path.join(output_files['test'], 'cars_test'),\n 'data_annotations_path':\n output_files['test_annos'],\n },\n ),\n ]\n\n def _generate_examples(self, split_name, data_dir_path,\n data_annotations_path):\n \"\"\"Generate training and testing samples.\"\"\"\n\n image_dict = self.returnImageDict(data_dir_path)\n bbox_dict = self.returnBbox(data_annotations_path, image_dict)\n with tf.io.gfile.GFile(data_annotations_path, 'rb') as f:\n mat = tfds.core.lazy_imports.scipy.io.loadmat(f)\n for example in mat['annotations'][0]:\n image_name = example[-1].item().split('.')[0]\n label = _NAMES[example[4].item() - 1]\n image = image_dict[image_name]\n bbox = bbox_dict[image_name]\n features = {\n 'label': label,\n 'image': image,\n 'bbox': bbox,\n }\n if self.version > '2.0.0':\n features['id'] = image_name\n yield image_name, features\n\n def returnImageDict(self, path):\n return {\n filename.split('.')[0]: os.path.join(path, filename)\n for filename in tf.io.gfile.listdir(path)\n }\n\n def returnBbox(self, filename, image_dict):\n bbox_dict = {}\n with tf.io.gfile.GFile(filename, 'rb') as f:\n data = tfds.core.lazy_imports.scipy.io.loadmat(f)\n for example in data['annotations'][0]:\n image_name = example[-1].item().split('.')[0]\n ymin = float(example[1].item())\n xmin = float(example[0].item())\n ymax = float(example[3].item())\n xmax = float(example[2].item())\n with tf.io.gfile.GFile(image_dict[image_name], 'rb') as fp:\n img = tfds.core.lazy_imports.PIL_Image.open(fp)\n width, height = img.size\n bbox_dict[image_name] = tfds.features.BBox(ymin / height, xmin / width,\n ymax / height, xmax / width)\n return bbox_dict\n", "# coding=utf-8\n# Copyright 2021 The TensorFlow Datasets Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Utilities to manipulate images.\n\nNote: these functions are not meant to be used inside of a TF graph.\n\"\"\"\nimport csv\n\nimport subprocess\nfrom typing import Any, List, Optional\n\nimport numpy as np\n\nimport tensorflow.compat.v2 as tf\nfrom tensorflow_datasets.core import lazy_imports_lib\nfrom tensorflow_datasets.core.utils import py_utils\nfrom tensorflow_datasets.core.utils import resource_utils\nfrom tensorflow_datasets.core.utils import tf_utils\n\nPilImage = Any # Require lazy deps.\nTHUMBNAIL_SIZE = 128\n\n\n@py_utils.memoize()\ndef _get_runner():\n return tf_utils.TFGraphRunner()\n\n\ndef decode_image(image_bytes: bytes) -> np.ndarray:\n \"\"\"Returns np.array corresponding to encoded image.\"\"\"\n runner = _get_runner()\n return runner.run(tf.image.decode_image, image_bytes)\n\n\ndef png_to_jpeg(image_bytes: bytes, quality: int = 100) -> np.ndarray:\n \"\"\"Converts PNG image (bytes or str) to JPEG (bytes).\"\"\"\n runner = _get_runner()\n decode_fn = lambda img: tf.image.decode_png(img, channels=3)\n image = runner.run(decode_fn, image_bytes)\n fn = lambda img: tf.image.encode_jpeg(img, format='rgb', quality=quality)\n return runner.run(fn, image)\n\n\ndef jpeg_cmyk_to_rgb(image_bytes: bytes, quality: int = 100) -> np.ndarray:\n \"\"\"Converts JPEG CMYK image (bytes) to RGB JPEG (bytes).\"\"\"\n runner = _get_runner()\n image = runner.run(tf.image.decode_jpeg, image_bytes)\n fn = lambda img: tf.image.encode_jpeg(img, format='rgb', quality=quality)\n return runner.run(fn, image)\n\n\ndef ffmpeg_run(\n args: List[str],\n stdin: Optional[bytes] = None,\n) -> None:\n \"\"\"Executes the ffmpeg function.\"\"\"\n ffmpeg_path = 'ffmpeg'\n try:\n cmd_args = [ffmpeg_path] + args\n subprocess.run(\n cmd_args,\n check=True,\n input=stdin,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n )\n except subprocess.CalledProcessError as e:\n raise ValueError(f'Command {e.cmd} returned error code {e.returncode}:\\n'\n f'stdout={e.stdout.decode(\"utf-8\")}\\n'\n f'stderr={e.stderr.decode(\"utf-8\")}\\n')\n except FileNotFoundError as e:\n raise FileNotFoundError(\n 'It seems that ffmpeg is not installed on the system. Please follow '\n 'the instrutions at https://ffmpeg.org/. '\n f'Original exception: {e}')\n\n\n@py_utils.memoize()\ndef get_colormap() -> np.ndarray:\n \"\"\"Loads the colormap.\n\n The colormap was precomputed using Glasbey et al. algorythm (Colour Displays\n for Categorical Images, 2017) to generate maximally distinct colors.\n\n It was generated using https://github.com/taketwo/glasbey:\n\n ```python\n gb = glasbey.Glasbey(\n base_palette=[(0, 0, 0), (228, 26, 28), (55, 126, 184), (77, 175, 74)],\n no_black=True,\n )\n palette = gb.generate_palette(size=256)\n gb.save_palette(palette, 'colormap.csv')\n ```\n\n Returns:\n colormap: A `np.array(shape=(255, 3), dtype=np.uint8)` representing the\n mapping id -> color.\n \"\"\"\n colormap_path = resource_utils.tfds_path() / 'core/utils/colormap.csv'\n with colormap_path.open() as f:\n return np.array(list(csv.reader(f)), dtype=np.uint8)\n\n\ndef apply_colormap(image: np.ndarray) -> np.ndarray:\n \"\"\"Apply colormap from grayscale (h, w, 1) to colored (h, w, 3) image.\"\"\"\n image = image.squeeze(axis=-1) # (h, w, 1) -> (h, w)\n cmap = get_colormap() # Get the (256, 3) colormap\n # Normalize uint16 and convert each value to a unique color\n return cmap[image % len(cmap)]\n\n\n# Visualization single image\n\n\ndef _postprocess_noop(img: PilImage) -> PilImage:\n return img\n\n\ndef _postprocess_convert_rgb(img: PilImage) -> PilImage:\n return img.convert('RGB')\n\n\ndef create_thumbnail(ex: np.ndarray,\n *,\n use_colormap: bool,\n default_dimensions: bool = True) -> PilImage:\n \"\"\"Creates the image from the np.array input.\"\"\"\n PIL_Image = lazy_imports_lib.lazy_imports.PIL_Image # pylint: disable=invalid-name\n\n if use_colormap: # Apply the colormap first as it modify the shape/dtype\n ex = apply_colormap(ex)\n\n _, _, c = ex.shape\n postprocess = _postprocess_noop\n if c == 1:\n ex = ex.squeeze(axis=-1)\n mode = 'L'\n elif ex.dtype == np.uint16:\n mode = 'I;16'\n postprocess = _postprocess_convert_rgb\n else:\n mode = None\n img = PIL_Image.fromarray(ex, mode=mode)\n img = postprocess(img)\n if default_dimensions:\n img.thumbnail((THUMBNAIL_SIZE, THUMBNAIL_SIZE)) # Resize the image in-place\n return img\n", "# coding=utf-8\n# Copyright 2021 The TensorFlow Datasets Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"The Waymo Open Dataset. See waymo.com/open.\"\"\"\n\nimport os\nfrom absl import logging\nimport tensorflow.compat.v2 as tf\nfrom tensorflow_datasets.proto import waymo_dataset_pb2 as open_dataset\nimport tensorflow_datasets.public_api as tfds\n\n_CITATION = \"\"\"\n@InProceedings{Sun_2020_CVPR,\nauthor = {Sun, Pei and Kretzschmar, Henrik and Dotiwalla, Xerxes and Chouard, Aurelien and Patnaik, Vijaysai and Tsui, Paul and Guo, James and Zhou, Yin and Chai, Yuning and Caine, Benjamin and Vasudevan, Vijay and Han, Wei and Ngiam, Jiquan and Zhao, Hang and Timofeev, Aleksei and Ettinger, Scott and Krivokon, Maxim and Gao, Amy and Joshi, Aditya and Zhang, Yu and Shlens, Jonathon and Chen, Zhifeng and Anguelov, Dragomir},\ntitle = {Scalability in Perception for Autonomous Driving: Waymo Open Dataset},\nbooktitle = {The IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},\nmonth = {June},\nyear = {2020}\n}\n\"\"\"\n\n_DESCRIPTION = \"\"\"\\\nThe Waymo Open Dataset is comprised of high resolution sensor data\ncollected by Waymo self-driving cars in a wide variety of conditions.\nThis data is licensed for non-commercial use.\n\nWARNING: this dataset requires additional authorization and registration.\nPlease look at tfds documentation for accessing GCS, and\nafterwards, please register via https://waymo.com/open/licensing/\n\"\"\"\n\n_GCS_DESCRIPTION = \"\"\"\nThis dataset is also available in pre-processed format, making it faster\nto load, if you select the correct data_dir:\n\n```\ntfds.load('waymo_open_dataset/{}', \\\ndata_dir='gs://waymo_open_dataset_{}_individual_files/tensorflow_datasets')\n```\n\n\"\"\"\n\n_HOMEPAGE_URL = \"http://www.waymo.com/open/\"\n_OBJECT_LABELS = [\n \"TYPE_UNKNOWN\", \"TYPE_VEHICLE\", \"TYPE_PEDESTRIAN\", \"TYPE_SIGN\",\n \"TYPE_CYCLIST\"\n]\n\n\nclass WaymoOpenDatasetConfig(tfds.core.BuilderConfig):\n \"\"\"BuilderConfig for Waymo Open Dataset Config.\"\"\"\n\n def __init__(self,\n *,\n name,\n version_str,\n description,\n is_on_gcs=False,\n **kwargs):\n \"\"\"BuilderConfig for Waymo Open Dataset examples.\n\n Args:\n name: Config name\n version_str: Version string (e.g. `v_1_2_0`).\n description: Description\n is_on_gcs: Whether the dataset is availabe preprocessed on GCS\n **kwargs: keyword arguments forwarded to super.\n \"\"\"\n if is_on_gcs:\n description = description + _GCS_DESCRIPTION.format(name, version_str)\n super(WaymoOpenDatasetConfig, self).__init__(\n name=name,\n version=tfds.core.Version(\"0.2.0\"),\n description=description,\n **kwargs)\n self.cloud_bucket = tfds.core.as_path(\n f\"gs://waymo_open_dataset_{version_str}_individual_files/\")\n\n\nclass WaymoOpenDataset(tfds.core.BeamBasedBuilder):\n \"\"\"Waymo Open Dataset.\"\"\"\n\n BUILDER_CONFIGS = [\n WaymoOpenDatasetConfig(\n name=\"v1.2\",\n version_str=\"v_1_2_0\",\n description=\"Waymo Open Dataset v1.2\",\n ),\n WaymoOpenDatasetConfig(\n name=\"v1.1\",\n version_str=\"v_1_1_0\",\n description=\"Waymo Open Dataset v1.1\",\n ),\n WaymoOpenDatasetConfig(\n name=\"v1.0\",\n version_str=\"v_1_0_0\",\n description=\"Waymo Open Dataset v1.0\",\n is_on_gcs=True,\n ),\n ]\n\n def _info(self) -> tfds.core.DatasetInfo:\n # Annotation descriptions are in the object development kit.\n annotations = {\n \"type\": tfds.features.ClassLabel(names=_OBJECT_LABELS),\n \"bbox\": tfds.features.BBoxFeature(),\n }\n\n return tfds.core.DatasetInfo(\n builder=self,\n description=_DESCRIPTION,\n features=tfds.features.FeaturesDict({\n \"context\": {\n \"name\": tfds.features.Text()\n },\n \"timestamp_micros\": tf.int64,\n \"camera_FRONT\": {\n \"image\":\n tfds.features.Image(\n shape=(1280, 1920, 3), encoding_format=\"jpeg\"),\n \"labels\":\n tfds.features.Sequence(annotations)\n },\n \"camera_FRONT_LEFT\": {\n \"image\":\n tfds.features.Image(\n shape=(1280, 1920, 3), encoding_format=\"jpeg\"),\n \"labels\":\n tfds.features.Sequence(annotations)\n },\n \"camera_SIDE_LEFT\": {\n \"image\":\n tfds.features.Image(\n shape=(886, 1920, 3), encoding_format=\"jpeg\"),\n \"labels\":\n tfds.features.Sequence(annotations)\n },\n \"camera_FRONT_RIGHT\": {\n \"image\":\n tfds.features.Image(\n shape=(1280, 1920, 3), encoding_format=\"jpeg\"),\n \"labels\":\n tfds.features.Sequence(annotations)\n },\n \"camera_SIDE_RIGHT\": {\n \"image\":\n tfds.features.Image(\n shape=(886, 1920, 3), encoding_format=\"jpeg\"),\n \"labels\":\n tfds.features.Sequence(annotations)\n },\n }),\n homepage=_HOMEPAGE_URL,\n citation=_CITATION,\n )\n\n def _split_generators(self, dl_manager):\n \"\"\"Returns the SplitGenerators.\n\n Args:\n dl_manager: Download manager object.\n\n Returns:\n SplitGenerators.\n \"\"\"\n\n # Training set\n train_files = tf.io.gfile.glob(\n os.path.join(self.builder_config.cloud_bucket,\n \"training/segment*camera*\"))\n logging.info(\"Train files: %s\", train_files)\n\n # Validation set\n validation_files = tf.io.gfile.glob(\n os.path.join(self.builder_config.cloud_bucket,\n \"validation/segment*camera*\"))\n logging.info(\"Validation files: %s\", validation_files)\n\n split_generators = [\n tfds.core.SplitGenerator(\n name=tfds.Split.TRAIN,\n gen_kwargs={\n \"tf_record_files\": train_files,\n },\n ),\n tfds.core.SplitGenerator(\n name=tfds.Split.VALIDATION,\n gen_kwargs={\n \"tf_record_files\": validation_files,\n },\n ),\n ]\n\n # Testing set (Only available in Waymo Open Dataset v1.2)\n if self.builder_config.name == \"v_1_2\":\n test_files = tf.io.gfile.glob(\n os.path.join(self.builder_config.cloud_bucket,\n \"testing/segment*camera*\"))\n logging.info(\"Testing files: %s\", test_files)\n\n split_generators.append(\n tfds.core.SplitGenerator(\n name=tfds.Split.TEST,\n gen_kwargs={\n \"tf_record_files\": test_files,\n },\n ))\n\n return split_generators\n\n def _build_pcollection(self, pipeline, tf_record_files):\n \"\"\"Generate examples as dicts.\n\n Args:\n pipeline: Apache Beam pipeline.\n tf_record_files: .tfrecord files.\n\n Returns:\n Dict of examples.\n \"\"\"\n beam = tfds.core.lazy_imports.apache_beam\n\n def _process_example(tf_record_file):\n for image_and_annotation in _generate_images_and_annotations(\n tf_record_file):\n key = \"%s:%s\" % (image_and_annotation[\"context\"][\"name\"],\n image_and_annotation[\"timestamp_micros\"])\n yield key, image_and_annotation\n\n return (pipeline\n | beam.Create(tf_record_files)\n | beam.FlatMap(_process_example))\n\n\ndef _generate_images_and_annotations(tf_record_file):\n \"\"\"Yields the images and annotations from a given file.\n\n Args:\n tf_record_file: .tfrecord files.\n\n Yields:\n Waymo images and annotations.\n \"\"\"\n # Go through all frames\n dataset = tf.data.TFRecordDataset(tf_record_file, compression_type=\"\")\n for data in tfds.as_numpy(dataset):\n frame = open_dataset.Frame()\n frame.ParseFromString(bytearray(data)) # pytype: disable=wrong-arg-types\n\n image_and_annotation = {\n \"context\": {\n \"name\": frame.context.name\n },\n \"timestamp_micros\": frame.timestamp_micros\n }\n\n camera_calibration = {\n calibration.name: calibration\n for calibration in frame.context.camera_calibrations\n }\n camera_labels = {label.name: label for label in frame.camera_labels}\n\n # Go through all 5 camera images in the frame\n for frame_image in frame.images:\n labels = None\n if frame_image.name in camera_labels:\n image_height = camera_calibration[frame_image.name].height\n image_width = camera_calibration[frame_image.name].width\n labels = _convert_labels(camera_labels[frame_image.name], image_width,\n image_height)\n\n camera_name = open_dataset.CameraName.Name.Name(frame_image.name)\n image_and_annotation[\"camera_\" + camera_name] = {\n \"image\": frame_image.image,\n \"labels\": labels\n }\n\n yield image_and_annotation\n\n\ndef _convert_labels(raw_labels, image_width, image_height):\n \"\"\"Convert labels to bounding boxes.\n\n Args:\n raw_labels: Raw label data.\n image_width: Width of the Waymo images.\n image_height: Height of the Waymo images.\n\n Returns:\n List of dicts with the label type and the corresponding bounding boxes.\n \"\"\"\n return [\n { # pylint: disable=g-complex-comprehension\n \"type\": raw_label.type,\n \"bbox\": _build_bounding_box(raw_label.box, image_width, image_height)\n } for raw_label in raw_labels.labels\n ]\n\n\ndef _build_bounding_box(open_dataset_box, image_width, image_height):\n \"\"\"Builds and returns TFDS bounding box.\n\n Args:\n open_dataset_box: Bounding box center x,y coordinates and its length, width.\n image_width: Width of the Waymo images.\n image_height: Height of the Waymo images.\n\n Returns:\n tfds.features.BBox.\n \"\"\"\n\n center_x = open_dataset_box.center_x\n center_y = open_dataset_box.center_y\n length = open_dataset_box.length\n width = open_dataset_box.width\n\n return tfds.features.BBox(\n ymin=max((center_y - (width / 2)) / image_height, 0.0),\n ymax=min((center_y + (width / 2)) / image_height, 1.0),\n xmin=max((center_x - (length / 2)) / image_width, 0.0),\n xmax=min((center_x + (length / 2)) / image_width, 1.0),\n )\n", "# coding=utf-8\n# Copyright 2021 The TensorFlow Datasets Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nr\"\"\"Generate Smallnorb-like files, smaller and with random data.\n\n\"\"\"\n\nimport os\n\nfrom absl import app\nfrom absl import flags\nimport numpy as np\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow_datasets.core import utils\nfrom tensorflow_datasets.testing import test_utils\n\nNUM_IMAGES = 5\nFACTOR_VALUES = [\n list(range(10)),\n list(range(9)),\n list(range(0, 36, 2)),\n list(range(6)),\n]\nTRAINING_OUTPUT_NAME = \"smallnorb-5x46789x9x18x6x2x96x96-training\"\nTESTING_OUTPUT_NAME = \"smallnorb-5x01235x9x18x6x2x96x96-testing\"\n\nflags.DEFINE_string(\"tfds_dir\", None, \"Path to tensorflow_datasets directory.\")\nFLAGS = flags.FLAGS\n\n\ndef write_binary_matrix(filename, array):\n \"\"\"Writes array as a binary formatted matrix to the file.\n\n The file format is described on the data set page:\n https://cs.nyu.edu/~ylclab/data/norb-v1.0-small/\n\n Args:\n filename: String with path to the file.\n array: Numpy array that should be written to the file.\n \"\"\"\n with tf.io.gfile.GFile(filename, \"wb\") as f:\n\n # All data is stored in little-endian byte order.\n int32_dtype = np.dtype(\"int32\").newbyteorder(\"<\")\n\n # The first 4 bytes specify the data type.\n if array.dtype.str == \"<i4\":\n # Magic code for little-endian int32.\n f.write(np.asarray(507333716, dtype=int32_dtype).tobytes())\n elif array.dtype.str == \"|u1\":\n # Magic code for uint8.\n f.write(np.asarray(507333717, dtype=int32_dtype).tobytes())\n else:\n raise ValueError(\"Array data type %r not supported.\" % array.dtype.str)\n\n # Next, we specify the number of dimensions of the array to be stored as a\n # 32 bit integer.\n f.write(np.asarray(array.ndim, int32_dtype).tobytes())\n\n # The shape of the array is saved as 32-bit integers. If there are less than\n # 3 dimensions, the shape is padded with ones.\n shape = list(array.shape) + [1] * max(0, 3 - array.ndim)\n f.write(np.asarray(shape, int32_dtype).tobytes())\n\n # Finally, the data is written as a C-contiguous matrix. There is no need to\n # check for the byte order as we checked for this when writing the magic\n # code.\n f.write(np.ascontiguousarray(array).tobytes())\n\n\ndef _create_chunk(prefix, random_state):\n \"\"\"Creates fake dat, cat, and info files with the given prefix.\"\"\"\n # Create the images.\n image_shape = (NUM_IMAGES, 2, 96, 96) # Data file contains pairs of images.\n images = random_state.randint(256, size=image_shape).astype(\"uint8\")\n write_binary_matrix(\"%s-dat.mat\" % prefix, images)\n\n # Create the class label file.\n class_labels = random_state.choice(range(5), size=(NUM_IMAGES))\n write_binary_matrix(\"%s-cat.mat\" % prefix, class_labels.astype(\"int32\"))\n\n # Create the auxiliary info file that contains additional labels.\n info = []\n for values in FACTOR_VALUES:\n info.append(random_state.choice(values, size=(NUM_IMAGES)))\n write_binary_matrix(\"%s-info.mat\" % prefix, np.array(info).T.astype(\"int32\"))\n\n\ndef _generate():\n \"\"\"Generates a fake data set and writes it to the fake_examples directory.\"\"\"\n tfds_dir = FLAGS.tfds_dir or str(utils.tfds_write_path())\n output_dir = os.path.join(tfds_dir, \"testing\", \"test_data\", \"fake_examples\",\n \"smallnorb\")\n test_utils.remake_dir(output_dir)\n random_state = np.random.RandomState(0)\n _create_chunk(os.path.join(output_dir, TRAINING_OUTPUT_NAME), random_state)\n _create_chunk(os.path.join(output_dir, TESTING_OUTPUT_NAME), random_state)\n\n\ndef main(argv):\n if len(argv) > 1:\n raise app.UsageError(\"Too many command-line arguments.\")\n _generate()\n\n\nif __name__ == \"__main__\":\n app.run(main)\n", "# coding=utf-8\n# Copyright 2021 The TensorFlow Datasets Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for tensorflow_datasets.core.features.dataset_feature.\"\"\"\n\nimport numpy as np\nimport tensorflow.compat.v2 as tf\nfrom tensorflow_datasets import testing\nfrom tensorflow_datasets.core import dataset_utils\nfrom tensorflow_datasets.core import decode as decode_lib\nfrom tensorflow_datasets.core import features as feature_lib\n\ntf.enable_v2_behavior()\n\n\nclass IncrementDecoder(decode_lib.Decoder):\n \"\"\"Basic decoder that just adds 1 to the encoded example.\"\"\"\n\n def decode_example(self, serialized_example):\n return serialized_example + 1\n\n\nclass DatasetDictFeatureTest(testing.FeatureExpectationsTestCase):\n\n def test_int(self):\n\n self.assertFeatureEagerOnly(\n feature=feature_lib.Dataset({'int': tf.int32}),\n shape={'int': ()}, # shape of each element of the dataset\n dtype={'int': tf.int32},\n serialized_info={\n 'int': feature_lib.TensorInfo(shape=(None,), dtype=tf.int32),\n },\n tests=[\n # Python array\n testing.FeatureExpectationItem(\n value=[{\n 'int': 1\n }, {\n 'int': 2\n }, {\n 'int': 3\n }],\n expected=tf.data.Dataset.from_tensor_slices({'int': [1, 2, 3]}),\n ),\n # Numpy array\n testing.FeatureExpectationItem(\n value=dataset_utils.as_numpy(\n tf.data.Dataset.from_tensor_slices(\n {'int': np.ones(shape=(3,), dtype=np.int32)})),\n expected=tf.data.Dataset.from_tensor_slices({'int': [1, 1, 1]}),\n ),\n # Dataset length doesn't matter\n testing.FeatureExpectationItem(\n value=dataset_utils.as_numpy(\n tf.data.Dataset.from_tensor_slices(\n {'int': np.ones(shape=(4,), dtype=np.int32)})),\n expected=tf.data.Dataset.from_tensor_slices(\n {'int': [1, 1, 1, 1]}),\n ),\n ],\n test_attributes=dict(_length=None))\n\n def test_label(self):\n\n self.assertFeatureEagerOnly(\n feature=feature_lib.Dataset(\n {\n 'label': feature_lib.ClassLabel(names=['left', 'right']),\n },\n length=None),\n shape={'label': ()},\n dtype={'label': tf.int64},\n serialized_info={\n 'label': feature_lib.TensorInfo(shape=(None,), dtype=tf.int64),\n },\n tests=[\n testing.FeatureExpectationItem(\n value=[{\n 'label': 'right'\n }, {\n 'label': 'left'\n }, {\n 'label': 'left'\n }],\n expected=tf.data.Dataset.from_tensor_slices(\n {'label': [1, 0, 0]}),\n ),\n # Variable sequence length\n testing.FeatureExpectationItem(\n value=dataset_utils.as_numpy(\n tf.data.Dataset.from_tensor_slices(\n {'label': ['right', 'left', 'right', 'left']})),\n expected=tf.data.Dataset.from_tensor_slices(\n {'label': [1, 0, 1, 0]}),\n ),\n ],\n test_attributes=dict(_length=None))\n\n def test_nested(self):\n\n self.assertFeatureEagerOnly(\n feature=feature_lib.Dataset({\n 'a': tf.string,\n 'b': {\n 'c': feature_lib.Tensor(shape=(4, 2), dtype=tf.int32),\n 'd': tf.uint8,\n }\n }, length=None),\n shape={\n 'a': (),\n 'b': {\n 'c': (4, 2),\n 'd': (),\n }\n },\n dtype={\n 'a': tf.string,\n 'b': {\n 'c': tf.int32,\n 'd': tf.uint8,\n }\n },\n tests=[\n testing.FeatureExpectationItem(\n value=dataset_utils.as_numpy(tf.data.Dataset.from_tensor_slices({\n 'a': ['aa', 'b', 'ccc'],\n 'b': {\n 'c': np.ones(shape=(3, 4, 2), dtype=np.int32),\n 'd': [1, 2, 3],\n }\n })),\n expected=tf.data.Dataset.from_tensor_slices({\n 'a': [\n tf.compat.as_bytes(t) for t in ('aa', 'b', 'ccc')\n ],\n 'b': {\n 'c': np.ones(shape=(3, 4, 2), dtype=np.int32),\n 'd': [1, 2, 3],\n }\n }),\n ),\n testing.FeatureExpectationItem(\n value=dataset_utils.as_numpy(tf.data.Dataset.from_tensor_slices({\n 'a': [str(i) for i in range(100)],\n 'b': { # pylint: disable=g-complex-comprehension\n 'c': [np.ones(shape=(4, 2), dtype=np.int32) for _ in range(100)],\n 'd': [5 for _ in range(100)],\n }\n })),\n expected=tf.data.Dataset.from_tensor_slices({\n 'a': [tf.compat.as_bytes(str(i)) for i in range(100)],\n 'b': {\n 'c': np.ones(shape=(100, 4, 2), dtype=np.int32),\n 'd': [5] * 100,\n }\n }),\n ),\n ],\n )\n\n def test_input_dict(self):\n\n self.assertFeatureEagerOnly(\n feature=feature_lib.Dataset({\n 'a': tf.string,\n 'b': {\n 'c': feature_lib.Tensor(shape=(4, 2), dtype=tf.int32),\n 'd': tf.uint8,\n }\n }, length=None),\n shape={\n 'a': (),\n 'b': {\n 'c': (4, 2),\n 'd': (),\n }\n },\n dtype={\n 'a': tf.string,\n 'b': {\n 'c': tf.int32,\n 'd': tf.uint8,\n }\n },\n tests=[\n testing.FeatureExpectationItem(\n value={\n 'a': ['aa', 'b', 'ccc'],\n 'b': {\n 'c': np.ones(shape=(3, 4, 2), dtype=np.int32),\n 'd': [1, 2, 3],\n }\n },\n expected=tf.data.Dataset.from_tensor_slices({\n 'a': [\n tf.compat.as_bytes(t) for t in ('aa', 'b', 'ccc')\n ],\n 'b': {\n 'c': np.ones(shape=(3, 4, 2), dtype=np.int32),\n 'd': [1, 2, 3],\n }\n }),\n ),\n testing.FeatureExpectationItem(\n value={\n 'a': [str(i) for i in range(100)],\n 'b': { # pylint: disable=g-complex-comprehension\n 'c': [np.ones(shape=(4, 2), dtype=np.int32) for _ in range(100)],\n 'd': [5 for _ in range(100)],\n }\n },\n expected=tf.data.Dataset.from_tensor_slices({\n 'a': [tf.compat.as_bytes(str(i)) for i in range(100)],\n 'b': {\n 'c': np.ones(shape=(100, 4, 2), dtype=np.int32),\n 'd': [5] * 100,\n }\n }),\n ),\n # Wrong length in one of the lists.\n testing.FeatureExpectationItem(\n value={\n 'a': ['aa'],\n 'b': {\n 'c': np.ones(shape=(3, 4, 2), dtype=np.int32),\n 'd': [1, 2, 3],\n }\n },\n raise_cls=ValueError,\n raise_msg='The length of all elements of one sequence should be the same.',\n ),\n ],\n )\n\n def test_decoding(self):\n\n self.assertFeatureEagerOnly(\n feature=feature_lib.Dataset({\n 'a': tf.string,\n 'b': {\n 'c': tf.uint8,\n }\n },\n length=None),\n shape={\n 'a': (),\n 'b': {\n 'c': (),\n }\n },\n dtype={\n 'a': tf.string,\n 'b': {\n 'c': tf.uint8,\n }\n },\n tests=[\n testing.FeatureExpectationItem(\n value=dataset_utils.as_numpy(\n tf.data.Dataset.from_tensor_slices({\n 'a': ['aa', 'b', 'ccc'],\n 'b': {\n 'c': [1, 2, 3],\n }\n })),\n decoders={\n 'b': {\n 'c': IncrementDecoder(),\n },\n },\n expected=tf.data.Dataset.from_tensor_slices({\n 'a': [tf.compat.as_bytes(t) for t in ('aa', 'b', 'ccc')],\n 'b': {\n 'c': [2, 3, 4],\n }\n }),\n ),\n ],\n )\n\n\nclass DatasetFeatureTest(testing.FeatureExpectationsTestCase):\n\n def test_int(self):\n\n self.assertFeatureEagerOnly(\n feature=feature_lib.Dataset(tf.int32, length=3),\n shape=(),\n dtype=tf.int32,\n tests=[\n # Python array\n testing.FeatureExpectationItem(\n value=[1, 2, 3],\n expected=tf.data.Dataset.from_tensor_slices([1, 2, 3]),\n ),\n # Numpy array\n testing.FeatureExpectationItem(\n value=np.ones(shape=(3,), dtype=np.int32),\n expected=tf.data.Dataset.from_tensor_slices([1, 1, 1]),\n ),\n # Datasets with a different lenght will fail on encoding.\n testing.FeatureExpectationItem(\n value=np.ones(shape=(4,), dtype=np.int32),\n raise_cls=ValueError,\n raise_msg='Error while serializing feature',\n ),\n ],\n )\n\n def test_label(self):\n\n self.assertFeatureEagerOnly(\n feature=feature_lib.Dataset(\n feature_lib.ClassLabel(names=['left', 'right']),),\n shape=(),\n dtype=tf.int64,\n tests=[\n testing.FeatureExpectationItem(\n value=['right', 'left', 'left'],\n expected=tf.data.Dataset.from_tensor_slices([1, 0, 0]),\n ),\n # Variable sequence length\n testing.FeatureExpectationItem(\n value=['right', 'left', 'right', 'left'],\n expected=tf.data.Dataset.from_tensor_slices([1, 0, 1, 0]),\n ),\n # Empty sequence length\n testing.FeatureExpectationItem(\n value=[],\n expected=[],\n ),\n ],\n )\n\n def test_getattr(self):\n feature = feature_lib.Dataset(\n feature_lib.ClassLabel(names=['left', 'right']),)\n self.assertEqual(feature.names, ['left', 'right'])\n\n feature = feature_lib.Dataset({\n 'label': feature_lib.ClassLabel(names=['left', 'right']),\n })\n self.assertEqual(feature['label'].names, ['left', 'right'])\n\n def test_metadata(self):\n feature = feature_lib.Dataset(feature_lib.ClassLabel(num_classes=2))\n feature.feature.names = ['left', 'right']\n with testing.tmp_dir() as tmp_dir:\n feature.save_metadata(data_dir=tmp_dir, feature_name='test')\n\n feature2 = feature_lib.Dataset(feature_lib.ClassLabel(num_classes=2))\n feature2.load_metadata(data_dir=tmp_dir, feature_name='test')\n self.assertEqual(feature2.feature.names, ['left', 'right'])\n\n\nif __name__ == '__main__':\n testing.test_main()\n", "# coding=utf-8\n# Copyright 2021 The TensorFlow Datasets Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nr\"\"\"Generate ARC-like files, smaller and with random data.\"\"\"\n\nimport json\nimport os\n\nfrom absl import app\nfrom absl import flags\nimport numpy as np\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow_datasets.core.utils import py_utils\nfrom tensorflow_datasets.testing import test_utils\n\nflags.DEFINE_string(\n name=\"tfds_dir\",\n default=py_utils.tfds_dir(),\n help=\"Path to tensorflow_datasets directory\")\nFLAGS = flags.FLAGS\n\n_COMMIT = \"0123456789abcdef0123456789abcdef01234567\" # fake commit\n_EXTRACT_SUBDIR = \"fchollet-ARC-{}\".format(_COMMIT[:7])\nNUM_TASKS = {\"training\": 10, \"evaluation\": 5}\n\n\ndef examples_dir():\n return os.path.join(FLAGS.tfds_dir, \"testing\", \"test_data\", \"fake_examples\",\n \"arc\", _EXTRACT_SUBDIR, \"data\")\n\n\ndef arc_dir(name):\n return os.path.join(examples_dir(), name)\n\n\ndef make_grid_data():\n size = np.random.randint(30, size=2) + 1\n grid = np.random.randint(10, size=size[0] * size[1]).reshape(size)\n return grid.tolist()\n\n\ndef make_pair():\n return {\n \"input\": make_grid_data(),\n \"output\": make_grid_data(),\n }\n\n\ndef make_task():\n num_train_pairs = np.random.randint(3) + 2 # 2 to 4\n num_test_pairs = np.random.randint(2) + 1 # 1 or 2\n return {\n \"train\": [make_pair() for _ in range(num_train_pairs)],\n \"test\": [make_pair() for _ in range(num_test_pairs)],\n }\n\n\ndef write_task(output_dir, task_id, task):\n path = os.path.join(output_dir, \"{}.json\".format(task_id))\n with tf.io.gfile.GFile(path, \"w\") as f:\n json.dump(task, f)\n\n\ndef main(_):\n task_index = np.random.randint(2**31)\n for subset in [\"training\", \"evaluation\"]:\n output_dir = arc_dir(subset)\n test_utils.remake_dir(output_dir)\n num_tasks = NUM_TASKS[subset]\n for _ in range(num_tasks):\n task_index += 1\n task_id = \"{:08x}\".format(task_index)\n task = make_task()\n write_task(output_dir, task_id, task)\n\n\nif __name__ == \"__main__\":\n app.run(main)\n" ]
[ [ "tensorflow.compat.v2.io.gfile.exists" ], [ "numpy.clip" ], [ "tensorflow.compat.v2.io.gfile.GFile", "tensorflow.compat.v2.io.gfile.listdir" ], [ "tensorflow.compat.v2.image.encode_jpeg", "tensorflow.compat.v2.image.decode_png" ], [ "tensorflow.compat.v2.data.TFRecordDataset" ], [ "tensorflow.compat.v2.io.gfile.GFile", "numpy.asarray", "numpy.ascontiguousarray", "numpy.dtype", "numpy.array", "numpy.random.RandomState" ], [ "tensorflow.compat.v2.enable_v2_behavior", "tensorflow.compat.v2.data.Dataset.from_tensor_slices", "tensorflow.compat.v2.compat.as_bytes", "numpy.ones" ], [ "tensorflow.compat.v2.io.gfile.GFile", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
FedeMPouzols/cngi_prototype
[ "421a99c460f4092b79120f5bec122de7ce9b8b96", "421a99c460f4092b79120f5bec122de7ce9b8b96" ]
[ "cngi/vis/timeaverage.py", "ngcasa/arachne/_imaging_utils/_dask_utils.py" ]
[ "# Copyright 2019 AUI, Inc. Washington DC, USA\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nthis module will be included in the api\n\"\"\"\n\n################################################\ndef timeaverage(xds, bin=1, width=None, span='state', maxuvwdistance=None):\n \"\"\"\n Average data across the time axis\n\n Parameters\n ----------\n xds : xarray.core.dataset.Dataset\n input Visibility Dataset\n bin : int\n number of adjacent times to average, used when width is None. Default=1 (no change)\n width : str\n resample to width freq (i.e. '10s') and produce uniform time steps over span. Ignores bin. Default None uses bin value.\n see https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html.\n span : str\n span of the binning. Allowed values are 'scan', 'state' or 'both'. Default is 'state' (meaning all states in a scan)\n maxuvwdistance (future) : float\n NOT IMPLEMENTED. maximum separation of start-to-end baselines that can be included in an average. (meters)\n\n Returns\n -------\n xarray.core.dataset.Dataset\n New Visibility Dataset\n \"\"\"\n import xarray\n import numpy as np\n\n intnan = np.full((1), np.nan, dtype=np.int32)[0]\n\n #######\n # mapped out over groups\n def timebin(gxds, stacked=True):\n if stacked: gxds = gxds.unstack('stb')\n \n # mean coarsen/resample everything but data and weight\n dvs = [dv for dv in gxds.data_vars if dv not in ['DATA', 'CORRECTED_DATA', 'WEIGHT']] + list(gxds.coords)\n if width is None:\n nxds = gxds[dvs].coarsen(time=bin, boundary='pad').mean()\n else:\n nxds = gxds[dvs].resample(time=width).mean()\n \n # sum coarsen/resample weight\n if 'WEIGHT' in gxds.data_vars:\n if width is None:\n nxds['WEIGHT'] = gxds.WEIGHT.coarsen(time=bin, boundary='pad').sum()\n else:\n nxds['WEIGHT'] = gxds.WEIGHT.resample(time=width).sum()\n \n # use weight in coarsening/resampling data cols\n for col in ['DATA', 'CORRECTED_DATA']:\n if (col in gxds.data_vars) and ('WEIGHT' in gxds.data_vars):\n if width is None:\n xda = (gxds[col] * gxds.WEIGHT).coarsen(time=bin, boundary='pad').sum()\n else:\n xda = (gxds[col] * gxds.WEIGHT).resample(time=width).sum()\n nxds[col] = xda / nxds['WEIGHT']\n \n if stacked: nxds = nxds.stack({'stb': ('time', 'baseline')})\n return nxds\n\n #############\n # span across state by grouping on scans (keeps scans separate)\n if span == 'state':\n txds = xds.stack({'stb': ('time', 'baseline')})\n txds = txds.groupby('SCAN_NUMBER').map(timebin)\n txds = txds.where(txds.SCAN_NUMBER.notnull() & (txds.SCAN_NUMBER > intnan), drop=True).unstack('stb')\n txds = txds.transpose('time', 'baseline', 'chan', 'pol', 'uvw_index', 'spw_id', 'pol_id')\n\n # span across scans by grouping on states (keeps states separate)\n elif span == 'scan':\n txds = xds.stack({'stb': ('time', 'baseline')})\n txds = txds.groupby('STATE_ID').map(timebin)\n txds = txds.where(txds.STATE_ID.notnull() & (txds.STATE_ID > intnan), drop=True).unstack('stb')\n txds = txds.transpose('time', 'baseline', 'chan', 'pol', 'uvw_index', 'spw_id', 'pol_id')\n\n # span across both\n else:\n txds = timebin(xds, stacked=False)\n\n # coarsen can change int/bool dtypes to float, so they need to be manually set back\n for dv in txds.data_vars:\n txds[dv] = txds[dv].astype(xds[dv].dtype)\n\n # put the attributes back in\n txds = txds.assign_attrs(xds.attrs)\n\n # verify values\n #cxds1 = xds_state.assign_coords({'time_s': xds_state.time.astype('datetime64[s]')}).swap_dims({'time':'time_s'})\n #cxds2 = txds.assign_coords({'time_s': txds.time.astype('datetime64[s]')}).swap_dims({'time':'time_s'})\n #cxds = cxds1.DATA - cxds2.DATA\n #cxds[51].values\n\n return txds\n", "import pandas as pd\nimport dask\nimport dask.array as da\n\ndef _find_unique_subset(a,b):\n a_pd = pd.DataFrame(a)\n b_pd = pd.DataFrame(b)\n \n a_pd = a_pd.append(b_pd)\n \n a_pd = a_pd.drop_duplicates(a_pd.columns[-1])\n #print(a_pd.columns[-1])\n return a_pd.to_numpy()\n #return da.from_array(a_pd.to_numpy(),chunks=chunks)\n\ndef _tree_combine_list(list_to_sum,func):\n import dask.array as da\n while len(list_to_sum) > 1:\n new_list_to_sum = []\n for i in range(0, len(list_to_sum), 2):\n if i < len(list_to_sum) - 1:\n lazy = dask.delayed(_find_unique_subset)(list_to_sum[i],list_to_sum[i+1])\n else:\n lazy = list_to_sum[i]\n new_list_to_sum.append(lazy)\n list_to_sum = new_list_to_sum\n return list_to_sum[0]\n" ]
[ [ "numpy.full" ], [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
MaayanLab/jupyter-template-catalog
[ "212b455e62d49f04dcee73bb6eeb5312b71ba8ef", "212b455e62d49f04dcee73bb6eeb5312b71ba8ef" ]
[ "appyters/STEAP_post_processing_analysis/scripts/convert_output_to_dataframe.py", "appyters/scRNA_seq/utils.py" ]
[ "# to allow importing to work correctly (in a dirty way)\nimport os\nimport sys\nimport inspect\nfilepath = os.path.abspath(inspect.getfile(inspect.currentframe()))\ncurrentdir = os.path.dirname(filepath)\nparentdir = os.path.dirname(currentdir)\nsys.path.insert(0, parentdir)\n\nimport constants\nimport pandas as pd\nfrom pathlib import Path\nfrom statsmodels.stats.multitest import multipletests\nfrom typing import Dict\n\n\ndef find_csv_file(directory: str) -> Dict[str, Dict[str, str]]:\n \"\"\"\n Finds the priorirization.csv file in the input directory and uses the\n directory names to determine which method was used. This information is\n saced in a dictionary as:\n {phenotype : {method : path/to/file}}\n \"\"\"\n file_dict = {}\n for path in Path(directory).rglob('prioritization.csv'):\n full_path = str(path)\n (name, method, __, __) = path.parts[-4:]\n name = name[8:] # 'CELLECT-' is 8 long\n method = method[8:]\n if name not in file_dict:\n file_dict[name] = {}\n file_dict[name].update({method: full_path})\n return file_dict\n\n\ndef make_df(directory: str) -> pd.DataFrame:\n \"\"\"\n Converts the prioritization.csv files in the directories to a\n pandas dataframe.\n\n Parameters\n ----------\n directory : str\n The output directory of CELLECT.\n There should be three subdirectories inside this directory.\n These subdirecories start with the name \"CELLECT-\" and\n end with the method used (H-MAGMA, LDSC or MAGMA)\n\n Returns\n -------\n dataframe : pd.DataFrame\n The output pandas dataframe contains information about the\n phenotype, celltypes, method and enrichment (beta) values\n with corresponding p-values.\n \"\"\"\n file_dict = find_csv_file(directory)\n df_list_1 = []\n for name, d in file_dict.items():\n df_list_2 = []\n for method, file_path in d.items():\n df = pd.read_csv(file_path)\n df['method'] = method\n df.sort_values(\n by=['gwas', 'specificity_id', 'annotation'],\n inplace=True)\n df_list_2.append(df)\n df_list_1.extend(df_list_2)\n\n df_all = pd.concat(df_list_1, ignore_index=True)\n # count the number of methods (not used atm)\n df_all = df_all.merge(\n df_all.groupby(\n ['gwas', 'specificity_id', 'annotation']\n ).size().to_frame('n_methods'),\n on=['gwas', 'specificity_id', 'annotation'], how='left')\n # count the number of annotations/celltypes\n df_all.sort_values(by=['gwas', 'method'], inplace=True)\n df_all.reset_index(inplace=True, drop=True)\n return df_all\n\n\ndef pvalue_correction(\n dataframe: pd.DataFrame,\n method: str = 'bonferroni'\n) -> pd.DataFrame:\n '''\n Corrects the pvalues in the input pandas dataframe for the\n multiple testing problem. The resulting output dataframe is\n the input dataframe with an additional corrected pvalues column.\n\n Parameters\n ----------\n dataframe : pd.DataFrame\n The input pandas dataframe contains information about the phenotype,\n celltypes, method and enrichment (beta) values with corresponding\n p-values.\n method : str\n The pvalue correction method (default bonferroni).\n Other available methods are documented in\n https://www.statsmodels.org/stable/generated/statsmodels.stats.multitest.multipletests.html\n\n Returns\n -------\n dataframe : pd.DataFrame\n The output pandas dataframe equals the input dataframe with an\n additional column containing the corrected pvalues.\n '''\n df_p = dataframe.pivot_table(\n values='pvalue',\n index=['method', 'gwas', 'specificity_id'],\n columns=['annotation']\n )\n df_p = df_p.apply(\n lambda row: multipletests(row.dropna(), method=method)[1],\n axis=1,\n result_type='reduce'\n ).apply(pd.Series).stack().reset_index().drop('level_3', axis=1)\n df_p.rename(columns={0: f\"pvalue_{method}\"}, inplace=True)\n df_p['annotation'] = dataframe['annotation']\n corrected_df = pd.merge(\n dataframe,\n df_p,\n on=['gwas', 'specificity_id', 'annotation', 'method']\n )\n return corrected_df\n\n\nif __name__ == \"__main__\":\n df_all = make_df(constants.CELLECT_OUTDIR)\n df_all = pvalue_correction(df_all, method=constants.PVAL_CORRECTION)\n df_all.to_hdf('data/data.h5', key='df_all', mode='w')\n", "# Basic libraries\nimport pandas as pd\nimport requests, json\nimport time\nimport numpy as np\nimport warnings\nimport re\nimport random\nfrom collections import defaultdict\n\n# Visualization\nimport scipy.stats as ss\nimport plotly\nfrom plotly import tools\nimport plotly.express as px\nimport plotly.graph_objs as go\nimport matplotlib\nimport matplotlib.pyplot as plt; plt.rcdefaults()\nfrom matplotlib import rcParams\nfrom matplotlib.lines import Line2D\nfrom matplotlib_venn import venn2, venn3\nimport IPython\nfrom IPython.display import HTML, display, Markdown, IFrame, FileLink\nfrom itertools import combinations, permutations\nfrom scipy import stats\nimport seaborn as sns\n# Data analysis\nfrom sklearn.decomposition import PCA\nfrom sklearn.preprocessing import quantile_transform\nfrom sklearn import cluster\nfrom sklearn.metrics import silhouette_score\nfrom sklearn.manifold import TSNE\nimport umap\nfrom rpy2 import robjects\nfrom rpy2.robjects import r, pandas2ri\nfrom magic import MAGIC as MG\nimport scanpy as sc\nimport anndata\nfrom maayanlab_bioinformatics.dge.characteristic_direction import characteristic_direction\nfrom maayanlab_bioinformatics.dge.limma_voom import limma_voom_differential_expression\nimport pandas as pd\nimport sys, h5py, time\nimport scanpy as sc\nimport anndata\nimport numpy as np\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nimport umap.umap_ as umap\nfrom sklearn.decomposition import NMF\nfrom statsmodels.stats.multitest import multipletests\n\nfrom IPython.display import display, HTML\nfrom maayanlab_bioinformatics.enrichment.crisp import enrich_crisp, fisher_overlap\n\n# Bokeh\nfrom bokeh.io import output_notebook\nfrom bokeh.plotting import figure, show\nfrom bokeh.models import HoverTool, CustomJS, ColumnDataSource, Span, Select, Legend, PreText, Paragraph, LinearColorMapper, ColorBar, CategoricalColorMapper\nfrom bokeh.layouts import layout, row, column, gridplot\nfrom bokeh.palettes import all_palettes\nimport colorcet as cc\nfrom bokeh.palettes import Category20\n\nfrom plotly.offline import init_notebook_mode\ninit_notebook_mode(connected = False)\noutput_notebook()\n\n\npd.set_option('display.max_columns', 1000) \npd.set_option('display.max_rows', 1000)\n\n\ndef check_files(fname):\n if fname == \"\":\n raise IOError\n if fname.endswith(\".txt\") == False and fname.endswith(\".csv\") ==False and fname.endswith(\".tsv\")==False:\n raise IOError\ndef check_df(df, col):\n if col not in df.columns:\n raise IOError\n\ndef load_seurat_files(mtx_filename, gene_filename, barcodes_filename):\n \n adata = anndata.read_mtx(mtx_filename).T\n with open(barcodes_filename, \"r\") as f:\n cells = f.readlines()\n cells = [x.strip() for x in cells]\n genes = pd.read_csv(\n gene_filename,\n header=None,\n sep='\\t',\n )\n \n adata.var['gene_ids'] = genes.iloc[:, 0].values \n adata.var['gene_symbols'] = genes.iloc[:, 1].values\n adata.var_names = adata.var['gene_symbols']\n adata.var_names_make_unique(join=\"-\")\n \n \n adata.obs['barcode'] = cells\n adata.obs_names = cells\n adata.obs_names_make_unique(join=\"-\")\n return adata\n\ndef load_metadata(adata, meta_data_filename, meta_class_column_name):\n if meta_data_filename != \"\":\n if meta_data_filename.endswith(\".csv\"):\n meta_df = pd.read_csv(meta_data_filename, index_col=0)\n else:\n meta_df = pd.read_csv(meta_data_filename, sep=\"\\t\", index_col=0)\n if meta_class_column_name == \"\":\n raise Exception (\"Run time error: Please provide a proper column name for sample classes in metadata\")\n try:\n check_df(meta_df, meta_class_column_name)\n except:\n raise Exception (f\"Error! Column '{meta_class_column_name}' is not in metadata\")\n adata.obs[meta_class_column_name] = meta_df.loc[:, meta_class_column_name]\n adata.var_names_make_unique()\n\n else:\n meta_class_column_name = \"Class\"\n adata.obs[meta_class_column_name] = [\"Class0\"]*adata.n_obs\n adata.var_names_make_unique()\n \n return adata, meta_class_column_name\n\ndef load_data(dataset_name, rnaseq_data_filename, mtx_data_filename, gene_data_filename, barcode_data_filename, meta_data_filename=None, meta_class_column_name=None, table_counter=1):\n adata = None\n if rnaseq_data_filename != \"\":\n check_files(rnaseq_data_filename)\n try:\n if rnaseq_data_filename.endswith(\".csv\"):\n expr_df = pd.read_csv(rnaseq_data_filename, index_col=0).sort_index()\n else:\n expr_df = pd.read_csv(rnaseq_data_filename, index_col=0, sep=\"\\t\").sort_index()\n\n # convert df into anndata\n # adata matrix: sample x gene\n adata = anndata.AnnData(expr_df.T)\n adata.X = adata.X.astype('float64') \n\n except:\n print(\"Error! Input files are in a wrong format. \\\n Please check if the index of the expression data are genes and the columns are sample IDs. \\\n Sample IDs in the expression data and the metadata should be matched\")\n\n del expr_df\n elif mtx_data_filename != \"\":\n adata = load_seurat_files(mtx_data_filename, gene_data_filename, barcode_data_filename)\n if adata is not None:\n # load meta data\n adata, meta_class_column_name = load_metadata(adata, meta_data_filename, meta_class_column_name) \n \n # add batch info\n if meta_class_column_name == \"\":\n meta_class_column_name = \"batch\"\n else:\n adata.obs = adata.obs.rename(columns={meta_class_column_name: 'class'})\n meta_class_column_name = \"class\"\n \n adata.obs[\"batch\"] = dataset_name\n adata.obs.index = adata.obs.index + \"-\" + dataset_name\n \n display_statistics(adata, \"### Statistics of data ###\") \n return adata, table_counter, meta_class_column_name\ndef create_download_link(df, title = \"Download CSV file: {}\", filename = \"data.csv\"): \n if filename.endswith(\".csv\"):\n df.to_csv(filename)\n elif filename.endswith(\".h5ad\"): #anndata\n import os, tempfile, shutil\n tmp = tempfile.mktemp()\n df.write(tmp)\n shutil.copyfile(tmp, filename)\n os.unlink(tmp)\n html = \"<a href=\\\"./{}\\\" target='_blank'>{}</a>\".format(filename, title.format(filename))\n return HTML(html)\n\ndef display_link(url, title=None):\n if title is None:\n title = url\n raw_html = '<a href=\"%s\" target=\"_blank\">%s</a>' % (url, title)\n \n return display(HTML(raw_html))\n\ndef display_object(counter, caption, df=None, istable=True, subcounter=\"\"):\n if df is not None:\n display(df)\n if istable == True:\n display(Markdown(\"*Table {}{}. {}*\".format(counter, subcounter, caption)))\n else:\n display(Markdown(\"*Figure {}{}. {}*\".format(counter, subcounter, caption)))\n counter += 1\n return counter\n\n\ndef display_statistics(data, description=\"\"):\n print(description)\n print(\"Sample size:\", data.n_obs)\n print(\"Feature size:\", data.n_vars)\n \ndef autoselect_color_by(sample_metadata):\n '''Automatically select a column in the sample_metadata df for coloring.\n '''\n color_by = None\n color_type = 'categorical'\n meta_col_nuniques = sample_metadata.nunique()\n # pick a column with the cardinality between 2 and 10\n meta_col_nuniques = meta_col_nuniques.loc[meta_col_nuniques.between(1, 30)]\n if len(meta_col_nuniques) > 0:\n color_by = meta_col_nuniques.index[0]\n else: # pick a numeric column\n is_number = np.vectorize(lambda x: np.issubdtype(x, np.number))\n meta_col_dtypes = sample_metadata.dtypes\n try:\n meta_col_is_number = is_number(meta_col_dtypes)\n if meta_col_is_number.sum() > 0:\n color_by = meta_col_dtypes.loc[meta_col_is_number].index[0]\n color_type = 'continuous'\n except:\n pass \n \n return color_by, color_type\n\n\n\n\n\n#############################################\n########## 2. Plot\n#############################################\n\n\ndef normalize(adata, normalization_method, log_normalization):\n tmp_adata = adata.copy()\n if normalization_method == \"Seurat\":\n sc.pp.filter_cells(tmp_adata, min_genes=200)\n sc.pp.filter_genes(tmp_adata, min_cells=3)\n sc.pp.normalize_total(tmp_adata, target_sum=1e4) \n if log_normalization:\n sc.pp.log1p(tmp_adata)\n sc.pp.scale(tmp_adata, max_value=10)\n elif normalization_method == \"Zheng17\":\n sc.pp.recipe_zheng17(adata_merged, log=log_normalization, plot=False)\n elif normalization_method == \"Weinreb17\":\n sc.pp.recipe_weinreb17(adata_merged, log=log_normalization)\n return tmp_adata\n \n\n\ndef run_magic(dataset, solver='exact'):\n # Run imputation\n dataset.uns['magic'] = normalize_magic(dataset.to_df(), solver=solver).T\n return dataset\ndef normalize_magic(dataset, solver='exact'):\n \n magic_op = MG(solver=solver)\n data_magic = magic_op.fit_transform(dataset)\n return data_magic.transpose()\n \n\ndef run_clustergrammer(dataset, meta_class_column_name, magic_normalization=False, nr_genes=800, metadata_cols=None, filter_samples=True,gene_list=None):\n # Subset the expression DataFrame using top 800 genes with largest variance\n if magic_normalization == True:\n data = dataset.uns[\"magic\"]\n else:\n data = dataset.to_df().T\n meta_df = dataset.obs\n variances = np.var(data, axis=1)\n srt_idx = variances.argsort()[::-1]\n if gene_list == None or len(gene_list) == 0:\n expr_df_sub = data.iloc[srt_idx].iloc[:nr_genes]\n else:\n gene_list = gene_list.split(\"\\n\")\n common_gene_list = list(set(gene_list).intersection(set(data.index)))\n expr_df_sub = data.loc[common_gene_list, :]\n assert len(expr_df_sub.index) > 0\n # prettify sample names\n sample_names = ['::'.join([y, x]) for x,y in\n zip(meta_df[meta_class_column_name], expr_df_sub.columns)]\n expr_df_sub.columns = sample_names\n expr_df_sub.index = [\"Gene: \"+str(x) for x in expr_df_sub.index]\n sample_name = [\"Sample: \"+x for x in sample_names]\n expr_df_sub.columns = sample_name\n\n\n treatment_type = [\"Class: \"+ x.split(\"::\")[1] for x in sample_names]\n new_series = pd.DataFrame(treatment_type).T\n new_series.columns = expr_df_sub.columns\n expr_df_sub = pd.concat([new_series, expr_df_sub], axis=0)\n\n index_list = list(expr_df_sub.index)\n index_list = [\"\" if \"Gene\" not in str(x) else x for x in index_list]\n expr_df_sub.index = index_list\n #subset of expr_df_sub\n if len(expr_df_sub.columns) > 50:\n print(\"Input data is too large. Random sampling (n=50) is performed.\")\n expr_df_sub = expr_df_sub.sample(50, axis=1)\n expr_df_sub_file = \"expr_df_sub_file.txt\"\n expr_df_sub.to_csv(\"expr_df_sub_file.txt\", sep='\\t')\n # POST the expression matrix to Clustergrammer and get the URL\n clustergrammer_url = 'https://amp.pharm.mssm.edu/clustergrammer/matrix_upload/'\n r = requests.post(clustergrammer_url, files={'file': open(expr_df_sub_file, 'rb')}).text\n\n return r\n \n#############################################\n########## 2. Plot\n#############################################\n\ndef plot_clustergrammar(clustergrammer_url):\n clustergrammer_url = clustergrammer_url.replace(\"http:\", \"https:\")\n display_link(clustergrammer_url, clustergrammer_url)\n # Embed\n display(IPython.display.IFrame(clustergrammer_url, width=\"1000\", height=\"1000\"))\n\n\n\n\n\ndef get_signatures(classes, dataset, method, meta_class_column_name, cluster=True, filter_genes=True):\n \n robjects.r('''edgeR <- function(rawcount_dataframe, g1, g2) {\n # Load packages\n suppressMessages(require(limma))\n suppressMessages(require(edgeR))\n colData <- as.data.frame(c(rep(c(\"Control\"),length(g1)),rep(c(\"Condition\"),length(g2))))\n rownames(colData) <- c(g1,g2)\n colnames(colData) <- c(\"group\")\n colData$group = relevel(as.factor(colData$group), \"Control\")\n y <- DGEList(counts=rawcount_dataframe, group=colData$group)\n y <- calcNormFactors(y)\n y <- estimateCommonDisp(y)\n y <- estimateTagwiseDisp(y)\n et <- exactTest(y)\n res <- topTags(et, n=Inf)\n # Return\n res <- as.data.frame(res)\n results <- list(\"edgeR_dataframe\"= res, \"rownames\"=rownames(res))\n return (results)\n }\n ''')\n\n robjects.r('''deseq2 <- function(rawcount_dataframe, g1, g2) {\n # Load packages\n suppressMessages(require(DESeq2))\n colData <- as.data.frame(c(rep(c(\"Control\"),length(g1)),rep(c(\"Condition\"),length(g2))))\n rownames(colData) <- c(g1,g2)\n colnames(colData) <- c(\"group\")\n colData$group = relevel(as.factor(colData$group), \"Control\")\n dds <- DESeqDataSetFromMatrix(countData = rawcount_dataframe, colData = colData, design=~(group))\n dds <- DESeq(dds)\n res <- results(dds)\n res[which(is.na(res$padj)),] <- 1\n res <- as.data.frame(res)\n results <- list(\"DESeq_dataframe\"= res, \"rownames\"=rownames(res))\n return(results)\n }\n ''')\n \n expr_df = dataset.to_df().T\n raw_expr_df = dataset.raw.to_adata().to_df().T\n meta_df = dataset.obs\n \n signatures = dict()\n\n \n if cluster == True:\n # cluster 0 vs rest\n sc.tl.rank_genes_groups(dataset, meta_class_column_name, method='t-test', use_raw=True)\n \n for cls1 in classes:\n signature_label = f\"{cls1} vs. rest\"\n print(\"Analyzing.. {} using {}\".format(signature_label, method))\n cls1_sample_ids = meta_df.loc[meta_df[meta_class_column_name]==cls1, :].index.tolist() #case\n non_cls1_sample_ids = meta_df.loc[meta_df[meta_class_column_name]!=cls1, :].index.tolist() #control\n sample_ids = non_cls1_sample_ids.copy()\n sample_ids.extend(cls1_sample_ids)\n tmp_raw_expr_df = raw_expr_df[sample_ids]\n \n if method == \"limma\":\n signature = limma_voom_differential_expression(tmp_raw_expr_df.loc[:, non_cls1_sample_ids], tmp_raw_expr_df.loc[:, cls1_sample_ids])\n elif method == \"characteristic_direction\":\n signature = characteristic_direction(expr_df.loc[:, non_cls1_sample_ids], expr_df.loc[:, cls1_sample_ids], calculate_sig=False)\n elif method == \"edgeR\":\n edgeR = robjects.r['edgeR']\n edgeR_results = pandas2ri.conversion.rpy2py(edgeR(pandas2ri.conversion.py2rpy(tmp_raw_expr_df), pandas2ri.conversion.py2rpy(non_cls1_sample_ids), pandas2ri.conversion.py2rpy(cls1_sample_ids)))\n\n signature = pd.DataFrame(edgeR_results[0])\n signature.index = edgeR_results[1]\n signature = signature.sort_values(\"logFC\", ascending=False)\n elif method == \"DESeq2\":\n DESeq2 = robjects.r['deseq2']\n DESeq2_results = pandas2ri.conversion.rpy2py(DESeq2(pandas2ri.conversion.py2rpy(tmp_raw_expr_df), pandas2ri.conversion.py2rpy(non_cls1_sample_ids), pandas2ri.conversion.py2rpy(cls1_sample_ids)))\n\n signature = pd.DataFrame(DESeq2_results[0])\n signature.index = DESeq2_results[1]\n signature = signature.sort_values(\"log2FoldChange\", ascending=False)\n elif method == \"wilcoxon\": \n dedf = sc.get.rank_genes_groups_df(dataset, group=cls1).set_index('names').sort_values('pvals', ascending=True)\n dedf = dedf.replace([np.inf, -np.inf], np.nan).dropna() \n dedf = dedf.sort_values(\"logfoldchanges\", ascending=False)\n signature = dedf\n \n signatures[signature_label] = signature\n else:\n sc.tl.rank_genes_groups(dataset, meta_class_column_name, method='wilcoxon', use_raw=True)\n \n for cls1, cls2 in permutations(classes, 2):\n signature_label = \" vs. \".join([cls1, cls2])\n print(\"Analyzing.. {} using {}\".format(signature_label, method))\n cls1_sample_ids = meta_df.loc[meta_df[meta_class_column_name]==cls1, :].index.tolist() #control\n cls2_sample_ids = meta_df.loc[meta_df[meta_class_column_name]==cls2, :].index.tolist() #case\n sample_ids = cls1_sample_ids.copy()\n sample_ids.extend(cls2_sample_ids)\n tmp_raw_expr_df = raw_expr_df[sample_ids]\n if method == \"limma\":\n signature = limma_voom_differential_expression(tmp_raw_expr_df.loc[:, cls1_sample_ids], tmp_raw_expr_df.loc[:, cls2_sample_ids])\n \n elif method == \"characteristic_direction\":\n signature = characteristic_direction(expr_df.loc[:, cls1_sample_ids], expr_df.loc[:, cls2_sample_ids], calculate_sig=False)\n elif method == \"edgeR\":\n edgeR = robjects.r['edgeR']\n edgeR_results = pandas2ri.conversion.rpy2py(edgeR(pandas2ri.conversion.py2rpy(tmp_raw_expr_df), pandas2ri.conversion.py2rpy(cls1_sample_ids), pandas2ri.conversion.py2rpy(cls2_sample_ids)))\n\n signature = pd.DataFrame(edgeR_results[0])\n signature.index = edgeR_results[1]\n signature = signature.sort_values(\"logFC\", ascending=False)\n elif method == \"DESeq2\":\n DESeq2 = robjects.r['deseq2']\n DESeq2_results = pandas2ri.conversion.rpy2py(DESeq2(pandas2ri.conversion.py2rpy(tmp_raw_expr_df), pandas2ri.conversion.py2rpy(cls1_sample_ids), pandas2ri.conversion.py2rpy(cls2_sample_ids)))\n\n signature = pd.DataFrame(DESeq2_results[0])\n signature.index = DESeq2_results[1]\n signature = signature.sort_values(\"log2FoldChange\", ascending=False)\n elif method == \"wilcoxon\": \n dedf = sc.get.rank_genes_groups_df(dataset, group=cls2).set_index('names').sort_values('pvals', ascending=True)\n dedf = dedf.replace([np.inf, -np.inf], np.nan).dropna()\n dedf = dedf.sort_values(\"logfoldchanges\", ascending=False)\n signature = dedf\n signatures[signature_label] = signature\n return signatures\n\n\n\n\ndef submit_enrichr_geneset(geneset, label):\n ENRICHR_URL = 'https://amp.pharm.mssm.edu/Enrichr/addList'\n genes_str = '\\n'.join(geneset)\n payload = {\n 'list': (None, genes_str),\n 'description': (None, label)\n }\n response = requests.post(ENRICHR_URL, files=payload)\n if not response.ok:\n raise Exception('Error analyzing gene list')\n time.sleep(0.5)\n data = json.loads(response.text)\n return data\n\n\ndef run_enrichr(signature, signature_label, geneset_size=500, fc_colname = 'logFC'):\n\n # Sort signature\n up_signature = signature[signature[fc_colname] > 0]\n down_signature = signature[signature[fc_colname] < 0]\n \n # Get genesets\n genesets = {\n 'upregulated': up_signature.index[:geneset_size],\n 'downregulated': down_signature.index[:geneset_size:]\n }\n\n # Submit to Enrichr\n enrichr_ids = {geneset_label: submit_enrichr_geneset(geneset=geneset, label=signature_label+', '+geneset_label+', from scRNA-seq Appyter') for geneset_label, geneset in genesets.items()}\n enrichr_ids['signature_label'] = signature_label\n return enrichr_ids\n\ndef get_enrichr_results(user_list_id, gene_set_libraries, overlappingGenes=True, geneset=None):\n ENRICHR_URL = 'http://amp.pharm.mssm.edu/Enrichr/enrich'\n query_string = '?userListId=%s&backgroundType=%s'\n results = []\n for gene_set_library, label in gene_set_libraries.items():\n response = requests.get(\n ENRICHR_URL +\n query_string % (user_list_id, gene_set_library)\n )\n if not response.ok:\n raise Exception('Error fetching enrichment results')\n\n data = json.loads(response.text)\n resultDataframe = pd.DataFrame(data[gene_set_library], columns=[\n 'rank', 'term_name', 'pvalue', 'zscore', 'combined_score', 'overlapping_genes', 'FDR', 'old_pvalue', 'old_FDR'])\n selectedColumns = ['term_name', 'zscore', 'combined_score', 'pvalue', 'FDR'] if not overlappingGenes else [\n 'term_name', 'zscore', 'combined_score', 'FDR', 'pvalue', 'overlapping_genes']\n resultDataframe = resultDataframe.loc[:, selectedColumns]\n resultDataframe['gene_set_library'] = label\n resultDataframe['log10P'] = -np.log10(resultDataframe['pvalue'])\n results.append(resultDataframe)\n concatenatedDataframe = pd.concat(results)\n if geneset:\n concatenatedDataframe['geneset'] = geneset\n return concatenatedDataframe\n\n\n\ndef get_enrichr_results_by_library(enrichr_results, signature_label, plot_type='interactive', library_type='go', version='2018', sort_results_by='pvalue'):\n\n # Libraries\n if library_type == 'go':\n go_version = str(version)\n libraries = {\n 'GO_Biological_Process_'+go_version: 'Gene Ontology Biological Process ('+go_version+' version)',\n 'MGI_Mammalian_Phenotype_Level_4_2019': 'MGI Mammalian Phenotype Level 4 2019'\n }\n elif library_type == \"pathway\":\n # Libraries\n libraries = {\n 'KEGG_2019_Human': 'KEGG Pathways',\n }\n elif library_type == \"celltype\":\n # Libraries\n libraries = {\n 'HuBMAP_ASCT_plus_B_augmented_w_RNAseq_Coexpression': 'HuBMAP ASCT+B Cell Type'\n }\n elif library_type==\"disease\":\n libraries = {\n 'GWAS_Catalog_2019': 'GWAS Catalog',\n }\n # Get Enrichment Results\n enrichment_results = {geneset: get_enrichr_results(enrichr_results[geneset]['userListId'], gene_set_libraries=libraries, geneset=geneset) for geneset in ['upregulated', 'downregulated']}\n enrichment_results['signature_label'] = signature_label\n enrichment_results['plot_type'] = plot_type\n enrichment_results['sort_results_by'] = sort_results_by\n\n # Return\n return enrichment_results\n\n\ndef get_enrichr_result_tables_by_library(enrichr_results, signature_label, library_type='tf'):\n\n # Libraries\n if library_type == 'tf':\n # Libraries\n libraries = {\n 'ChEA_2016': 'ChEA (experimentally validated targets)',\n }\n elif library_type == \"ke\":\n # Libraries\n libraries = {\n 'KEA_2015': 'KEA (experimentally validated targets)',\n 'ARCHS4_Kinases_Coexp': 'ARCHS4 (coexpressed genes)'\n }\n elif library_type == \"mirna\":\n libraries = {\n 'TargetScan_microRNA_2017': 'TargetScan (experimentally validated targets)',\n 'miRTarBase_2017': 'miRTarBase (experimentally validated targets)'\n }\n\n\n # Initialize results\n results = []\n\n # Loop through genesets\n for geneset in ['upregulated', 'downregulated']:\n\n # Append ChEA results\n enrichment_dataframe = get_enrichr_results(enrichr_results[geneset]['userListId'], gene_set_libraries=libraries, geneset=geneset)\n results.append(enrichment_dataframe)\n\n # Concatenate results\n enrichment_dataframe = pd.concat(results)\n\n return {'enrichment_dataframe': enrichment_dataframe, 'signature_label': signature_label}\n\n# enrichment analysis for uploaded gmt\ndef get_library(filename):\n # processes library data\n raw_library_data = []\n library_data = []\n\n \n with open(filename, \"r\") as f:\n for line in f.readlines():\n raw_library_data.append(line.split(\"\\t\\t\"))\n name = []\n gene_list = []\n\n for i in range(len(raw_library_data)):\n name += [raw_library_data[i][0]]\n raw_genes = raw_library_data[i][1].replace('\\t', ' ')\n gene_list += [raw_genes[:-1]]\n\n library_data = [list(a) for a in zip(name, gene_list)]\n \n return library_data\n\ndef library_to_dict(library_data):\n dictionary = dict()\n for i in range(len(library_data)):\n row = library_data[i]\n dictionary[row[0]] = [x.upper() for x in row[1].split(\" \")]\n return dictionary\n\ndef get_library_iter(library_data):\n for term in library_data.keys():\n single_set = library_data[term]\n yield term, single_set\n\ndef get_enrichment_results(items, library_data):\n return sorted(enrich_crisp(items, get_library_iter(library_data), n_background_entities=20000, preserve_overlap=True), key=lambda r: r[1].pvalue)\n\n# Call enrichment results and return a plot and dataframe for Scatter Plot\ndef get_values(obj_list):\n pvals = []\n odds_ratio = []\n n_overlap = []\n overlap = []\n for i in obj_list:\n pvals.append(i.pvalue)\n odds_ratio.append(i.odds_ratio)\n n_overlap.append(i.n_overlap)\n overlap.append(i.overlap)\n return pvals, odds_ratio, n_overlap, overlap\n\ndef get_qvalue(p_vals):\n r = multipletests(p_vals, method=\"fdr_bh\")\n return r[1]\n\n\ndef enrichment_analysis(items, library_data): \n items = [x.upper() for x in items]\n all_results = get_enrichment_results(items, library_data)\n unzipped_results = list(zip(*all_results))\n if len(unzipped_results)>0:\n pvals, odds_ratio, n_overlap, overlap = get_values(unzipped_results[1])\n df = pd.DataFrame({\"term_name\":unzipped_results[0], \"pvalue\": pvals, \\\n \"odds_ratio\": odds_ratio, \"n_overlap\": n_overlap, \"overlap\": overlap})\n df[\"-log(p value)\"] = -np.log10(df[\"pvalue\"])\n df[\"q value\"] = get_qvalue(df[\"pvalue\"].tolist())\n return [list(unzipped_results[0])], [pvals], df\n else:\n raise IOError()\n\n\nimport hashlib\ndef str_to_int(string, mod):\n string = re.sub(r\"\\([^()]*\\)\", \"\", string).strip()\n byte_string = bytearray(string, \"utf8\")\n return int(hashlib.sha256(byte_string).hexdigest(), base=16)%mod\n\ndef plot_scatter(umap_df, values_dict, option_list, sample_names, caption_text, category_list_dict=None, location='right', category=True, dropdown=False, figure_counter=0, additional_info=None):\n \n # init plot \n if additional_info is not None:\n source = ColumnDataSource(data=dict(x=umap_df[\"x\"], y=umap_df[\"y\"], values=values_dict[option_list[0]], \n names=sample_names, info=additional_info[option_list[0]]))\n else:\n source = ColumnDataSource(data=dict(x=umap_df[\"x\"], y=umap_df[\"y\"], values=values_dict[option_list[0]], \n names=sample_names))\n # node size\n if umap_df.shape[0] > 1000:\n node_size = 2\n else:\n node_size = 6\n \n if location == 'right':\n plot = figure(plot_width=1000, plot_height=800) \n else:\n plot = figure(plot_width=1000, plot_height=1000+20*len(category_list_dict[option_list[0]])) \n if category == True:\n unique_category_dict = dict()\n for option in option_list:\n unique_category_dict[option] = sorted(list(set(values_dict[option])))\n \n # map category to color\n # color is mapped by its category name \n # if a color is used by other categories, use another color\n factors_dict = dict()\n colors_dict = dict()\n for key in values_dict.keys():\n unused_color = list(Category20[20])\n factors_dict[key] = category_list_dict[key]\n colors_dict[key] = list()\n for category_name in factors_dict[key]:\n color_for_category = Category20[20][str_to_int(category_name, 20)]\n \n if color_for_category not in unused_color:\n if len(unused_color) > 0:\n color_for_category = unused_color[0] \n else:\n color_for_category = Category20[20][19]\n \n colors_dict[key].append(color_for_category)\n if color_for_category in unused_color:\n unused_color.remove(color_for_category)\n \n color_mapper = CategoricalColorMapper(factors=factors_dict[option_list[0]], palette=colors_dict[option_list[0]])\n legend = Legend()\n \n plot.add_layout(legend, location)\n scatter = plot.scatter('x', 'y', size=node_size, source=source, color={'field': 'values', 'transform': color_mapper}, legend_field=\"values\")\n plot.legend.label_width = 30\n plot.legend.click_policy='hide'\n plot.legend.spacing = 1\n if location == 'below':\n location = 'bottom_left'\n plot.legend.location = location\n plot.legend.label_text_font_size = '10pt'\n else:\n color_mapper = LinearColorMapper(palette=cc.CET_D1A, low=min(values_dict[option_list[0]]), high=max(values_dict[option_list[0]]))\n color_bar = ColorBar(color_mapper=color_mapper, label_standoff=12)\n plot.add_layout(color_bar, 'right')\n plot.scatter('x', 'y', size=node_size, source=source, color={'field': 'values', 'transform': color_mapper})\n \n if additional_info is not None:\n tooltips = [\n (\"Sample\", \"@names\"),\n (\"Value\", \"@values\"),\n (\"p-value\", \"@info\")\n ]\n else:\n tooltips = [\n (\"Sample\", \"@names\"),\n (\"Value\", \"@values\"),\n ]\n plot.add_tools(HoverTool(tooltips=tooltips))\n plot.output_backend = \"webgl\"\n \n plot.xaxis.axis_label = \"UMAP_1\"\n plot.xaxis.axis_label_text_font_size = \"12pt\"\n plot.yaxis.axis_label = \"UMAP_2\"\n plot.yaxis.axis_label_text_font_size = \"12pt\"\n \n plot.xaxis.major_tick_line_color = None # turn off x-axis major ticks\n plot.xaxis.minor_tick_line_color = None # turn off x-axis minor ticks\n plot.yaxis.major_tick_line_color = None # turn off y-axis major ticks\n plot.yaxis.minor_tick_line_color = None # turn off y-axis minor ticks\n plot.xaxis.major_label_text_font_size = '0pt' # preferred method for removing tick labels\n plot.yaxis.major_label_text_font_size = '0pt' # preferred method for removing tick labels\n\n default_text = \"Figure {}. {}{}\"\n pre = Paragraph(text = default_text.format(figure_counter, caption_text, option_list[0]), width=500, height=100, style={\"font-family\":'Helvetica', \"font-style\": \"italic\"})\n figure_counter += 1\n if dropdown == True:\n if category == True:\n callback_adt = CustomJS(args=dict(source=source, \\\n pre=pre, \\\n values_dict=values_dict, \\\n figure_counter=figure_counter, \\\n color_mapper=color_mapper,\\\n unique_category_dict=unique_category_dict,\\\n category_list_dict=category_list_dict,\\\n additional_info=additional_info,\\\n factors_dict=factors_dict,\\\n colors_dict=colors_dict,\\\n plot=plot,\\\n scatter=scatter,\n caption_text=caption_text\n ), code=\"\"\" \n const val = cb_obj.value; \n source.data.values = values_dict[val] \n if (additional_info != null) {\n source.data.info = additional_info[val]\n }\n color_mapper.factors = category_list_dict[val]\n color_mapper.palette = colors_dict[val]\n plot.legend = unique_category_dict[val]\n pre.text = \"Figure \"+figure_counter+\". \"+caption_text+val+\".\"; \n plot.height = 1000+20*(category_list_dict[val].length)\n source.change.emit();\n \"\"\")\n else:\n callback_adt = CustomJS(args=dict(source=source, \\\n pre=pre, \\\n values_dict=values_dict, \\\n additional_info=additional_info,\\\n figure_counter=figure_counter,\n caption_text=caption_text), code=\"\"\" \n const val = cb_obj.value; \n source.data.values = values_dict[val]\n if (additional_info != null) {\n source.data.info = additional_info[val]\n }\n \n pre.text = \"Figure \"+figure_counter+\". \"+caption_text+val+\".\"; \n source.change.emit();\n \"\"\")\n\n # init dropdown menu\n select = Select(title=\"Select an option:\", value=option_list[0], options=option_list)\n select.js_on_change('value', callback_adt)\n\n col = column(select, row(column(plot, pre)))\n show(col)\n else:\n col = column(plot, pre)\n show(col)\n return figure_counter\n\n\n\n\ndef clustering(adata, dataset, umap_n_neighbors, umap_min_dist, bool_plot, figure_counter, batch_correction=True):\n # clustering\n \n if batch_correction== True:\n sc.external.pp.bbknn(adata, batch_key=\"batch\")\n else:\n sc.pp.neighbors(adata, n_neighbors=umap_n_neighbors)\n sc.tl.leiden(adata, resolution=1.0)\n sc.tl.umap(adata, min_dist=umap_min_dist)\n \n \n # sort by clusters\n new_order = adata.obs.sort_values(by='leiden').index.tolist()\n adata = adata[new_order, :]\n adata.obs['leiden'] = 'Cluster '+adata.obs['leiden'].astype('object')\n \n if bool_plot == True:\n umap_df = pd.DataFrame(adata.obsm['X_umap'])\n umap_df.columns = ['x', 'y']\n\n values_dict = dict()\n values_dict[\"Cluster\"] = adata.obs[\"leiden\"].values\n category_list_dict = dict()\n category_list_dict[\"Cluster\"] = list(sorted(adata.obs[\"leiden\"].unique()))\n figure_counter = plot_scatter(umap_df, values_dict, [\"Cluster\"], adata.obs.index.tolist(), \"Scatter plot of the samples. Each dot represents a sample and it is colored by \", category_list_dict=category_list_dict, category=True, dropdown=False, figure_counter=figure_counter)\n\n display(create_download_link(adata.obs[\"leiden\"], filename=f\"clustering_{dataset}.csv\"))\n return adata, figure_counter\n\n\ndef differential_gene_expression_analysis(adata, diff_gex_method, enrichment_groupby, meta_class_column_name, table_counter):\n if diff_gex_method == \"characteristic_direction\":\n fc_colname = \"CD-coefficient\"\n sort_genes_by = \"CD-coefficient\"\n ascending = False\n elif diff_gex_method == \"limma\":\n fc_colname = \"logFC\"\n sort_genes_by = \"t\"\n ascending = False\n elif diff_gex_method == \"edgeR\":\n fc_colname = \"logFC\"\n sort_genes_by = \"PValue\"\n ascending = True\n elif diff_gex_method == \"DESeq2\":\n fc_colname = \"log2FoldChange\"\n sort_genes_by = \"padj\"\n ascending = True\n elif diff_gex_method == \"wilcoxon\":\n fc_colname = \"logfoldchanges\"\n sort_genes_by = \"scores\"\n ascending = False\n \n \n if enrichment_groupby == \"user_defined_class\":\n classes = adata.obs[meta_class_column_name].unique().tolist()\n bool_cluster=False\n if len(classes) < 2:\n print(\"Warning: Please provide at least 2 classes in the metadata\")\n \n elif enrichment_groupby == \"Cluster\":\n meta_class_column_name = \"leiden\"\n classes = sorted(adata.obs[\"leiden\"].unique().tolist())\n classes = sorted(classes, key=lambda x: int(x.replace(\"Cluster \", \"\")))\n bool_cluster=True\n else:\n meta_class_column_name = enrichment_groupby\n classes = sorted(adata.obs[meta_class_column_name].unique().tolist())\n classes.sort()\n bool_cluster=True\n \n if len(classes) > 5 and adata.n_obs > 5000:\n if diff_gex_method == \"wilcoxon\":\n print('Warning: There are too many cells/clusters. It cannot execute the analysis code for the data. The appyter randomly select 5000 samples. If you want to execute it with the whole data, please run it locally.')\n else:\n print('Warning: There are too many cells/clusters. It cannot execute the analysis code for the data. The appyter switched to Wilcoxon rank-sum method and randomly select 5000 samples. If you want to execute it with the whole data, please run it locally.')\n diff_gex_method = \"wilcoxon\"\n # randomly select 5K samples\n random_selected_samples = random.sample(adata.obs.index.tolist(), 5000)\n adata_random_sampled = adata[random_selected_samples, :]\n signatures = get_signatures(classes, adata_random_sampled, method=diff_gex_method, meta_class_column_name=meta_class_column_name, cluster=bool_cluster)\n else:\n signatures = get_signatures(classes, adata, method=diff_gex_method, meta_class_column_name=meta_class_column_name, cluster=bool_cluster)\n \n if len(classes) > 1:\n for label, signature in signatures.items():\n if bool_cluster == True:\n case_label = label.split(\" vs. \")[0]\n else:\n case_label = label.split(\" vs. \")[1]\n signature = signature.sort_values(by=sort_genes_by, ascending=ascending)\n table_counter = display_object(table_counter, f\"Top 5 Differentially Expressed Genes in {label} (up-regulated in {case_label})\", signature.head(5), istable=True)\n display(create_download_link(signature, filename=\"DEG_{}.csv\".format(label)))\n return signatures, bool_cluster, table_counter\n\n\n\n\n\ndef visualize_enrichment_analysis(adata, signatures, meta_class_column_name, diff_gex_method, enrichr_libraries_filename, enrichr_libraries, enrichment_groupby, libraries_tab, gene_topk, bool_cluster, bool_plot, figure_counter, table_counter):\n\n topk_enrichment_terms = 3\n if diff_gex_method == \"characteristic_direction\":\n fc_colname = \"CD-coefficient\"\n elif diff_gex_method == \"limma\":\n fc_colname = \"logFC\"\n elif diff_gex_method == \"edgeR\":\n fc_colname = \"logFC\"\n elif diff_gex_method == \"DESeq2\":\n fc_colname = \"log2FoldChange\"\n elif diff_gex_method == \"wilcoxon\":\n fc_colname = \"logfoldchanges\"\n \n results = {} \n if libraries_tab == 'Yes' or libraries_tab == 'All':\n results['enrichr'] = {}\n for label, signature in signatures.items():\n # Run analysis\n if enrichment_groupby == \"user_defined_class\":\n case_name = label.split(\" vs. \")[1]\n col_name = meta_class_column_name\n elif enrichment_groupby == \"Cluster\":\n case_name = label.split(\" vs. \")[0]\n col_name = \"leiden\"\n else:\n case_name = label.split(\" vs. \")[0]\n col_name = \"batch\"\n\n results['enrichr'][label] = run_enrichr(signature=signature, signature_label=label, fc_colname=fc_colname, geneset_size=gene_topk)\n display(Markdown(\"*Enrichment Analysis Result: {} (Up-regulated in {})*\".format(label, case_name)))\n display_link(\"https://amp.pharm.mssm.edu/Enrichr/enrich?dataset={}\".format(results['enrichr'][label][\"upregulated\"][\"shortId\"]))\n display(Markdown(\"*Enrichment Analysis Result: {} (Down-regulated in {})*\".format(label, case_name)))\n display_link(\"https://amp.pharm.mssm.edu/Enrichr/enrich?dataset={}\".format(results['enrichr'][label][\"downregulated\"][\"shortId\"]))\n table_counter = display_object(table_counter, \"The table displays links to Enrichr containing the results of enrichment analyses generated by analyzing the up-regulated and down-regulated genes from a differential expression analysis. By clicking on these links, users can interactively explore and download the enrichment results from the Enrichr website.\", istable=True)\n if libraries_tab == 'No' or libraries_tab == 'All':\n results['user_defined_enrichment'] = {}\n for label, signature in signatures.items():\n\n # Run analysis\n if enrichment_groupby == \"user_defined_class\":\n case_name = label.split(\" vs. \")[1]\n col_name = meta_class_column_name\n elif enrichment_groupby == \"Cluster\":\n case_name = label.split(\" vs. \")[0]\n col_name = \"leiden\"\n else:\n case_name = label.split(\" vs. \")[1]\n col_name = \"batch\"\n user_library = library_to_dict(get_library(enrichr_libraries_filename))\n\n # Sort signature\n up_signature = signature[signature[fc_colname] > 0]\n up_genes = [x.upper() for x in up_signature.index[:gene_topk].tolist()]\n\n results['user_defined_enrichment'][label] = dict()\n try:\n _, _, results['user_defined_enrichment'][label]['enrichment_dataframe'] = enrichment_analysis(up_genes, user_library)\n results['user_defined_enrichment'][label]['enrichment_dataframe']['gene_set_library'] = enrichr_libraries_filename\n except:\n pass\n if \"Gene Ontology\" in enrichr_libraries:\n results['go_enrichment'] = {}\n for label, signature in signatures.items():\n # Run analysis\n results['go_enrichment'][label] = get_enrichr_results_by_library(results['enrichr'][label], label, library_type='go', version='2018')\n \n if \"Pathway\" in enrichr_libraries:\n # Initialize results\n results['pathway_enrichment'] = {}\n\n # Loop through results\n for label, signature in signatures.items():\n # Run analysis\n results['pathway_enrichment'][label] = get_enrichr_results_by_library(results['enrichr'][label], label, library_type='pathway')\n if \"Transcription Factor\" in enrichr_libraries:\n # Initialize results\n results['tf_enrichment'] = {}\n # Loop through results\n for label, signature in signatures.items():\n # Run analysis\n results['tf_enrichment'][label] = get_enrichr_result_tables_by_library(enrichr_results=results['enrichr'][label], signature_label=label, library_type='tf')\n if \"Kinase\" in enrichr_libraries:\n # Initialize results\n results['kinase_enrichment'] = {}\n\n # Loop through results\n for label, enrichr_results in results['enrichr'].items():\n # Run analysis\n results['kinase_enrichment'][label] = get_enrichr_result_tables_by_library(enrichr_results=enrichr_results, signature_label=label, library_type=\"ke\")\n\n if \"miRNA\" in enrichr_libraries:\n results['mirna_enrichment'] = {}\n\n # Loop through results\n for label, enrichr_results in results['enrichr'].items():\n # Run analysis\n results['mirna_enrichment'][label] = get_enrichr_result_tables_by_library(enrichr_results=enrichr_results, signature_label=label, library_type=\"mirna\")\n if \"Cell Type\" in enrichr_libraries:\n results['celltype_enrichment'] = {}\n for label, signature in signatures.items():\n # Run analysis\n results['celltype_enrichment'][label] = get_enrichr_results_by_library(results['enrichr'][label], label, library_type='celltype')\n if \"Disease\" in enrichr_libraries:\n results['disease_enrichment'] = {}\n for label, signature in signatures.items():\n # Run analysis\n results['disease_enrichment'][label] = get_enrichr_results_by_library(results['enrichr'][label], label, library_type='disease', version='2018')\n \n library_option_list = set()\n topk_enriched_terms_dict = defaultdict(list)\n for label, signature in signatures.items():\n if bool_cluster == True:\n cluster_names = [label.split(\" vs. \")[0]]\n else:\n cluster_names = [label.split(\" vs. \")[1]]\n \n for key in results.keys():\n if key.endswith(\"enrichment\") == False:\n continue\n enrichment_results = results[key][label]\n meta_df = adata.obs\n for cluster_name in cluster_names:\n for direction in ['upregulated']:\n if direction in enrichment_results:\n enrichment_dataframe = enrichment_results[direction]\n elif \"enrichment_dataframe\" in enrichment_results:\n enrichment_dataframe = enrichment_results[\"enrichment_dataframe\"]\n else:\n continue\n if enrichment_dataframe.empty == True:\n raise Exception(\"Enrichment analysis returns empty results. Please check if your data contains proper gene names.\")\n libraries = enrichment_dataframe['gene_set_library'].unique() \n \n for library in libraries:\n enrichment_dataframe_library = enrichment_dataframe[enrichment_dataframe['gene_set_library']==library]\n \n topk_enriched_terms = enrichment_dataframe_library.iloc[:topk_enrichment_terms]\n selected_columns = ['term_name', 'pvalue']\n topk_enriched_terms = topk_enriched_terms[selected_columns]\n topk_enriched_terms.columns = [\"Enriched Term\", \"pvalue\"]\n topk_enriched_terms.insert(0, 'Rank', [*range(1, len(topk_enriched_terms)+1)])\n topk_enriched_terms.insert(0, 'Class', cluster_name)\n \n topk_enriched_terms_dict[library].append(topk_enriched_terms)\n \n top_term = enrichment_dataframe_library.iloc[0]['term_name']\n if library not in meta_df.columns:\n meta_df.insert(0, library, np.nan)\n meta_df[library] = meta_df[library].astype(\"object\")\n if bool_cluster == True:\n meta_df.loc[meta_df[col_name]==cluster_name, library] = top_term\n else:\n meta_df.loc[meta_df[meta_class_column_name]==cluster_name, library] = top_term\n library_option_list.add(library)\n \n \n for topk_i in range(topk_enrichment_terms):\n if topk_i < topk_enriched_terms.shape[0]: # in case # of enrichment terms is less than topk_enrichment_terms\n new_col_name = f\"{library}_{topk_i}\"\n top_term = topk_enriched_terms.iloc[topk_i]['Enriched Term']\n\n if new_col_name not in meta_df.columns:\n meta_df.insert(0, new_col_name, np.nan)\n meta_df[new_col_name] = meta_df[new_col_name].astype(\"object\")\n meta_df.loc[meta_df[col_name]==cluster_name, new_col_name] = top_term\n\n \n library_option_list = list(library_option_list)\n\n for library in library_option_list: \n top_enriched_term_df = pd.concat(topk_enriched_terms_dict[library])\n top_enriched_term_df = top_enriched_term_df.set_index([\"Class\", \"Rank\"])\n \n if libraries_tab == 'Yes':\n table_counter = display_object(table_counter, f\"Top 3 Enriched Terms for each cluster/class in library {library}. To see more results, please use Enrichr links above.\", df=top_enriched_term_df, istable=True)\n else:\n table_counter = display_object(table_counter, f\"Top 3 Enriched Terms for each cluster/class in library {library}.\", df=top_enriched_term_df, istable=True)\n display(create_download_link(top_enriched_term_df, filename=f\"Top{topk_enrichment_terms}_Enriched_Terms_{library}.csv\"))\n \n # umap info into dataframe \n umap_df = pd.DataFrame(adata.obsm['X_umap'])\n umap_df.columns = ['x', 'y']\n\n option_list = library_option_list \n if enrichment_groupby != \"batch\":\n option_list.append(\"leiden\")\n else:\n option_list.append(\"batch\")\n adata_norm_selected = adata.obs[option_list].astype(\"object\").fillna(\"NaN\")\n \n values_dict = dict(zip(adata_norm_selected.T.index.tolist(), adata_norm_selected.T.values))\n category_list_dict = dict()\n for option in option_list:\n category_list_dict[option] = list(sorted(adata_norm_selected[option].unique()))\n if bool_plot == True:\n figure_counter = plot_scatter(umap_df, values_dict, option_list, adata.obs.index.tolist(), \"Scatter plot of the samples. Each dot represents a sample and it is colored by enriched terms in library \", location='below', category_list_dict=category_list_dict, category=True, dropdown=True, figure_counter=figure_counter)\n return adata, option_list, figure_counter, table_counter\n\n\nfrom plotly.subplots import make_subplots\nimport plotly.graph_objects as go\nimport plotly.figure_factory as ff\nfrom scipy.spatial.distance import pdist, squareform\n\ndef extract_library_name(path):\n return path.replace(\"libraries/\",\"\").replace(\".txt\",\"\").replace(\".gmt\", \"\").replace(\"_0\",\"\")\n\ndef plot_protrack(libraries, tmp_adata, bool_plot_rug=True, bool_plot_dendrogram=True, figure_counter=0, fig_size=(1800, 800)):\n \n frequent_genes = get_frequent_genes(tmp_adata.uns['signature'])\n frequent_genes = frequent_genes.intersection(tmp_adata.var.index)\n \n # init figure\n trace_idx = 0\n row_length = len(libraries)+2\n \n row_heights = [0.1]\n row_heights.extend([(1-row_heights[0])/(row_length-1)]*(row_length-1))\n fig = make_subplots(rows=row_length, cols=1, shared_xaxes=True, vertical_spacing=0.00, row_heights=row_heights,\n )\n\n # plot dendrogram\n if bool_plot_dendrogram == True:\n fig, dendrogram_fig, trace_idx, dendrogram_sample_index_list = plot_dendrogram(fig, tmp_adata, frequent_genes, trace_idx)\n \n # get color mapping dictionary\n color_map_dict = get_color_map(libraries, tmp_adata)\n \n i = 2\n if bool_plot_rug == True:\n for library_filename in libraries:\n # init rug plots\n adata_heatmap = tmp_adata.copy()\n adata_heatmap.obs = adata_heatmap.obs.T.drop_duplicates().T\n new_library_filename = extract_library_name(library_filename)\n library_name = library_filename+\"_0\"\n clean_library_filename = {library_name: new_library_filename}\n new_cols = list()\n for col in adata_heatmap.obs.columns:\n if col in clean_library_filename:\n new_cols.append(clean_library_filename[col])\n else:\n new_cols.append(col)\n adata_heatmap.obs.columns = new_cols\n\n ####### rug plots to show predicted cell types/enriched terms ########\n fig, trace_idx, i = plot_rug(fig, dendrogram_fig, dendrogram_sample_index_list, adata_heatmap, new_library_filename, color_map_dict, trace_idx, i)\n \n \n # Edit Layout\n fig.update_yaxes(showgrid=False, showline=False, zeroline=False, showticklabels=False, ticks=\"\", row=1, col=1)\n fig.update_layout({'width':fig_size[0], 'height':fig_size[1],\n 'legend_traceorder': \"grouped\",\n })\n fig.update_layout(paper_bgcolor='#FFFFFF', plot_bgcolor='#FFFFFF') \n\n \n # Plot!\n fig.show() \n figure_counter = display_object(figure_counter, \"Protrack Plot. The plot shows the top predicted cell types for each cluster for each library. The colors are based on Cell Ontology IDs.\", istable=False) \n return figure_counter\n \ndef plot_rug(fig, dendrogram_fig, dendrogram_sample_index_list, adata_heatmap, new_library_filename, color_map_dict, trace_idx, i):\n rug_df = pd.DataFrame(dendrogram_fig['layout']['xaxis']['tickvals'], index=dendrogram_sample_index_list)\n rug_df.columns = ['x']\n rug_df['y'] = new_library_filename\n rug_df['sample'] = rug_df.index\n rug_df['Prediction'] = adata_heatmap.obs.loc[dendrogram_sample_index_list, new_library_filename].astype(str)#.map(str)\n cell_types = list(map(str, rug_df['Prediction'].tolist()))\n\n subfig = px.scatter(rug_df, x='x', y='y', color='Prediction', hover_name='sample',\n color_discrete_map=color_map_dict, hover_data=['Prediction','y'])\n subfig.update_layout({'width':800, 'height':30, 'showlegend': False})\n subfig.update_traces(marker=dict(size=12, symbol='line-ns-open',\n line=dict(width=2,\n color='DarkSlateGrey')),\n selector=dict(mode='markers'))\n for sub in subfig['data']:\n fig.add_trace(sub, row=i, col=1)\n fig['data'][trace_idx]['showlegend']=True\n fig['data'][trace_idx]['legendgroup']=new_library_filename\n fig['data'][trace_idx]['legendgrouptitle_text']=new_library_filename\n\n trace_idx += 1\n # Update yaxis properties\n fig.update_yaxes(showgrid=False, showline=False, zeroline=False, showticklabels=True, ticks=\"\", row=i, col=1)\n fig.update_xaxes(showgrid=False, showline=False, zeroline=False, showticklabels=False, ticks=\"\", row=i, col=1)\n i += 1\n \n return fig, trace_idx, i\ndef plot_dendrogram(fig, tmp_adata, frequent_genes, trace_idx=0):\n labels = tmp_adata.obs.index # ordered in cluster\n data_array = tmp_adata.to_df().loc[:,frequent_genes]\n genes = data_array.columns\n\n # Initialize figure by creating upper dendrogram for samples\n dendrogram_fig = ff.create_dendrogram(data_array, orientation='bottom', labels=list(labels), colorscale=[\"#000000\"], color_threshold=0.0) # fig ordered in dendrogram\n for i in range(len(fig['data'])):\n dendrogram_fig['data'][i]['yaxis'] = 'y2'\n dendrogram_fig.update_layout({'width':1000, 'height':800, 'showlegend':False, 'hovermode': 'closest',})\n dendrogram_fig.update_yaxes(mirror=False, showgrid=False, showline=False, zeroline=False, showticklabels=False, ticks=\"\")\n dendrogram_fig.update_xaxes(mirror=False, showgrid=False, showline=False, zeroline=False, showticklabels=False, ticks=\"\")\n dendrogram_sample_index_list = dendrogram_fig['layout']['xaxis']['ticktext'] # sample names\n for subfig in dendrogram_fig['data']:\n fig.add_trace(subfig, row=1, col=1)\n fig['data'][trace_idx]['showlegend']=False\n trace_idx += 1\n return fig, dendrogram_fig, trace_idx, dendrogram_sample_index_list\n\ndef get_frequent_genes(signatures, topk_genes_heatmap=1000):\n # frequent genes for heatmap\n frequent_upregulated_genes = dict()\n \n rank_list = list()\n top1000_rank_list = list()\n for label, signature in signatures.items():\n rank_list.append(signature.rank(ascending=False)[\"scores\"])\n top1000_rank_list.append(signature.rank(ascending=False)[\"scores\"]<topk_genes_heatmap)\n\n rank_df = pd.concat(rank_list, axis=1)\n rank_df[\"median\"] = rank_df.median(axis=1)\n\n top1000_rank_df = pd.concat(top1000_rank_list, axis=1)\n top1000_rank_df[\"sum\"] = top1000_rank_df.sum(axis=1)\\\n\n top1000_rank_df[\"median\"] = rank_df[\"median\"]\n top1000_rank_df = top1000_rank_df.sort_values([\"sum\", \"median\"], ascending=[False, True])\n\n sorted_dict = set(top1000_rank_df.iloc[:topk_genes_heatmap,:].index)\n \n return sorted_dict\n\ndef get_color_map(library_filenames, tmp_adata):\n # legend color map\n color_list = list()\n color_list.extend(px.colors.qualitative.Plotly)\n color_list.extend(px.colors.qualitative.Alphabet)\n\n color_map_dict = dict() # pred cell ontology name+ID :color\n color_map_id_dict = dict() # pred cell ontology ID : color\n\n i = 0\n for library_filename in library_filenames:\n library_name = library_filename+\"_0\" # top1 prediction\n if library_name in tmp_adata.obs.columns:\n prediction = tmp_adata.obs[library_name].unique()\n else:\n prediction = tmp_adata.obs[library_filename].unique()\n for pred in prediction:\n pred = str(pred)\n cl_id_in_pred = pred.split(\":\")[-1]\n if cl_id_in_pred == \"\": #if CL ID is None, use cell ontology name\n cl_id_in_pred = pred\n\n if cl_id_in_pred not in color_map_id_dict:\n color_map_id_dict[cl_id_in_pred] = color_list[i]\n color_map_dict[pred] = color_list[i]\n i += 1\n else:\n color_map_dict[pred] = color_map_id_dict[cl_id_in_pred]\n\n\n if i == len(color_list):\n i = i-len(color_list)\n return color_map_dict\n\ndef summary(adata, option_list, table_counter):\n for col in option_list:\n counts = adata.obs[[col]].reset_index().groupby(col).count()\n counts.columns = ['# of Samples']\n counts[\"Percentage (%)\"] = counts['# of Samples']/counts['# of Samples'].sum() * 100\n counts = counts.sort_values(\"Percentage (%)\", ascending=False)\n table_counter = display_object(table_counter, \"The number of samples for each category in {}\".format(col), counts, istable=True)\n return table_counter\ndef trajectory_inference(adata, trajectory_method, figure_counter=0):\n node_size = min(100, 120000 / len(adata.obs.index))\n if trajectory_method == \"palantir\":\n # Run analysis\n pr_res, umap = run_palantir(dataset=adata)\n \n # Display results\n sc.pl.umap(adata, color=['leiden'], size=node_size)\n plot_palantir(pr_res, umap)\n\n elif trajectory_method == \"dpt\":\n adata.uns['iroot'] = 0\n sc.pl.umap(adata, color=['leiden'], size=node_size)\n sc.tl.dpt(adata)\n sc.pl.umap(adata, color=['dpt_pseudotime'], size=node_size)\n display_link(\"draw_graph_fa.pdf\", \"Download figure\")\n \n figure_counter = display_object(figure_counter, \"Trajectory inference result using {}. Each point represents an RNA-seq sample. Sample colors are based on pseudotime.\".format(trajectory_method), istable=False)\n return adata, figure_counter\n\ndef run_palantir(dataset):\n sc.external.tl.phenograph(dataset, clustering_algo=\"leiden\", k=30)\n sc.external.tl.palantir(dataset)\n umap = pd.DataFrame(dataset.obsm['X_umap'], index=dataset.obs_names, columns=['x', 'y'])\n \n # early cell is set based on CD34 expression\n df = dataset.to_df()\n df.columns = df.columns.map(str.upper)\n start_cell = df['CD34'].sort_values(ascending=False).index[0]\n pr_res = sc.external.tl.palantir_results(\n dataset,\n early_cell=start_cell,\n ms_data='X_palantir_multiscale',\n num_waypoints=500,\n )\n return pr_res, umap\ndef plot_palantir(pr_res, umap):\n # Set up figure\n n_branches = pr_res.branch_probs.shape[1]\n n_cols = 6\n n_rows = int(np.ceil(n_branches / n_cols))\n fig = plt.figure(figsize=[2 * n_cols, 2 * (n_rows + 2)])\n gs = plt.GridSpec(\n n_rows + 2, n_cols, height_ratios=np.append([0.75, 0.75], np.repeat(1, n_rows))\n )\n cmap = plt.cm.plasma\n # Pseudotime\n ax = plt.subplot(gs[0:2, 1:3])\n c = pr_res.pseudotime[umap.index]\n ax.scatter(umap.loc[:, \"x\"], umap.loc[:, \"y\"], s=3, cmap=plt.cm.plasma, c=c)\n normalize = matplotlib.colors.Normalize(vmin=np.min(c), vmax=np.max(c))\n cax, _ = matplotlib.colorbar.make_axes(ax)\n cbar = matplotlib.colorbar.ColorbarBase(cax, norm=normalize, cmap=cmap)\n ax.set_axis_off()\n ax.set_title(\"Pseudotime\")\n\n # Entropy\n ax = plt.subplot(gs[0:2, 3:5])\n c = pr_res.entropy[umap.index]\n ax.scatter(umap.loc[:, \"x\"], umap.loc[:, \"y\"], s=3, cmap=plt.cm.plasma, c=c)\n normalize = matplotlib.colors.Normalize(vmin=np.min(c), vmax=np.max(c))\n cax, _ = matplotlib.colorbar.make_axes(ax)\n cbar = matplotlib.colorbar.ColorbarBase(cax, norm=normalize, cmap=cmap)\n ax.set_axis_off()\n ax.set_title(\"Differentiation potential\")\n\ndef time_series_trajectory_inference(adata, timepoint_labels_column_name, timepoint_labels, figure_counter):\n run_tempora(adata, timepoint_labels_column_name, timepoint_labels)\n display(Image.open(\"Tempora_plot.jpg\"))\n figure_counter = display_object(figure_counter, \"Time-series trajectory inference result using Tempora. Tempora visualizes the result as a network, with the piechart at each node representing the composition of cells collected at different time points in the experiment and the arrow connecting each pair of nodes representing lineage relationship between them.\", istable=False)\n display(FileLink(\"Tempora_plot.jpg\", result_html_prefix=\"Download figure\"))\n return figure_counter\n\ndef results_table(enrichment_dataframe, source_label, target_label, label, table_counter):\n\n # Get libraries\n for gene_set_library in enrichment_dataframe['gene_set_library'].unique():\n\n # Get subset\n enrichment_dataframe_subset = enrichment_dataframe[enrichment_dataframe['gene_set_library'] == gene_set_library].copy()\n\n # Get unique values from source column\n enrichment_dataframe_subset[source_label] = [x.split('_')[0] for x in enrichment_dataframe_subset['term_name']]\n enrichment_dataframe_subset = enrichment_dataframe_subset.sort_values(['FDR', 'pvalue']).rename(columns={'pvalue': 'P-value'}).drop_duplicates(source_label)\n\n # Add links and bold for significant results\n enrichment_dataframe_subset[source_label] = ['<a href=\"http://www.mirbase.org/cgi-bin/query.pl?terms={}\" target=\"_blank\">{}</a>'.format(x.split(\" \")[0], x) if '-miR-' in x else '<a href=\"http://amp.pharm.mssm.edu/Harmonizome/gene/{}\" target=\"_blank\">{}</a>'.format(x.split(\" \")[0], x)for x in enrichment_dataframe_subset[source_label]]\n \n # else:\n enrichment_dataframe_subset[source_label] = [rowData[source_label].replace('target=\"_blank\">', 'target=\"_blank\"><b>').replace('</a>', '*</b></a>') if rowData['FDR'] < 0.05 else rowData[source_label] for index, rowData in enrichment_dataframe_subset.iterrows()]\n\n # Add rank\n enrichment_dataframe_subset['Rank'] = ['<b>'+str(x+1)+'</b>' for x in range(len(enrichment_dataframe_subset.index))]\n\n # Add overlapping genes with tooltip\n enrichment_dataframe_subset['nr_overlapping_genes'] = [len(x) for x in enrichment_dataframe_subset['overlapping_genes']]\n enrichment_dataframe_subset['overlapping_genes'] = [', '.join(x) for x in enrichment_dataframe_subset['overlapping_genes']]\n enrichment_dataframe_subset[target_label.title()] = ['{nr_overlapping_genes} {geneset} '.format(**rowData)+target_label+'s' for index, rowData in enrichment_dataframe_subset.iterrows()]\n \n # Convert to HTML\n pd.set_option('max.colwidth', -1)\n html_table = enrichment_dataframe_subset.head(50)[['Rank', source_label, 'P-value', 'FDR', target_label.title()]].to_html(escape=False, index=False, classes='w-100')\n html_results = '<div style=\"max-height: 200px; overflow-y: scroll;\">{}</div>'.format(html_table)\n\n # Add CSS\n display(HTML('<style>.w-100{width: 100%;} .text-left th{text-align: left !important;}</style>'))\n display(HTML('<style>.slick-cell{overflow: visible;}.gene-tooltip{text-decoration: underline; text-decoration-style: dotted;}.gene-tooltip .gene-tooltip-text{visibility: hidden; position: absolute; left: 60%; width: 250px; z-index: 1000; text-align: center; background-color: black; color: white; padding: 5px 10px; border-radius: 5px;} .gene-tooltip:hover .gene-tooltip-text{visibility: visible;} .gene-tooltip .gene-tooltip-text::after {content: \" \";position: absolute;bottom: 100%;left: 50%;margin-left: -5px;border-width: 5px;border-style: solid;border-color: transparent transparent black transparent;}</style>'))\n\n # Display table\n display(HTML(html_results))\n # Display gene set\n \n if source_label == \"Transcription Factor\":\n additional_description = \"Enrichment Analysis Results for {} in {}. The table contains scrollable tables displaying the results of the Transcription Factor (TF) enrichment analysis generated using Enrichr. Every row represents a TF; significant TFs are highlighted in bold.\"\n elif source_label == \"Kinase\":\n additional_description = \"Enrichment Analysis Results for {} in {}. The table contains browsable tables displaying the results of the Protein Kinase (PK) enrichment analysis generated using Enrichr. Every row represents a PK; significant PKs are highlighted in bold.\" \n elif source_label == \"miRNA\":\n additional_description = \"Enrichment Analysis Results for {} in {}. The figure contains browsable tables displaying the results of the miRNA enrichment analysis generated using Enrichr. Every row represents a miRNA; significant miRNAs are highlighted in bold.\"\n display_object(table_counter, additional_description.format(label, gene_set_library), istable=True)\n display(create_download_link(enrichment_dataframe_subset, filename=\"Enrichment_analysis_{}_{}.csv\".format(source_label, gene_set_library)))\n table_counter += 1\n \n return table_counter\n\ndef display_table(analysis_results, source_label, label, table_counter):\n \n # Plot Table\n return results_table(analysis_results['enrichment_dataframe'].copy(), source_label=source_label, target_label='target', label=label, table_counter=table_counter)\ndef CPM(data):\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n data = (data/data.sum())*10**6\n data = data.fillna(0)\n\n return data\n\n\ndef run_monocle(dataset, color_by='Pseudotime', ordering='de', plot_type='interactive'):\n robjects.r('''\n suppressMessages(library(dplyr))\n suppressMessages(library(monocle))\n suppressMessages(library(tibble))\n suppressMessages(require(Matrix))\n suppressMessages(require(VGAM))\n suppressMessages(require(igraph))\n\n # Make a CellDataSet object\n # @expr_df: CPM expression data.frame (genes by samples) \n makeCellData <- function(expr_df) {\n genes <- rownames(expr_df)\n expr_mat = data.matrix(expr_df)\n num_cells_expressed <- (expr_mat > 0.1) + 0\n num_cells_expressed <- Matrix::rowSums(num_cells_expressed)\n fd <- data.frame(num_cells_expressed=num_cells_expressed, row.names = genes)\n fd <- new(\"AnnotatedDataFrame\", data = fd)\n pd <- new(\"AnnotatedDataFrame\", data = data.frame(row.names=colnames(expr_mat)))\n \n newCellDataSet(expr_mat,\n phenoData = pd,\n featureData = fd,\n lowerDetectionLimit = 0.1,\n expressionFamily = VGAM::tobit(0.1))\n }\n\n makeCellData3 <- function(expr_df) {\n genes <- rownames(expr_df)\n expr_mat = data.matrix(expr_df)\n num_cells_expressed <- (expr_mat > 0.1) + 0\n num_cells_expressed <- Matrix::rowSums(num_cells_expressed)\n fd <- data.frame(num_cells_expressed=num_cells_expressed, row.names = genes)\n fd <- new(\"AnnotatedDataFrame\", data = fd)\n pd <- data.frame(row.names=colnames(expr_mat))\n # a hack to avoid error when running `partitionCells`\n pd['foo'] = 'bar'\n pd <- new(\"AnnotatedDataFrame\", data = pd)\n \n newCellDataSet(expr_mat,\n phenoData = pd,\n featureData = fd,\n lowerDetectionLimit = 0.1)\n }\n\n getDEGsAsOrderingGenes <- function(cds){\n # get DEGs among clusters\n cds_expressed_genes <- row.names(subset(fData(cds), num_cells_expressed >= 10))\n clustering_DEG_genes <- differentialGeneTest(cds[cds_expressed_genes,], \n fullModelFormulaStr = '~Cluster',\n cores = 8)\n # order cells with top 1000 DEGs\n cds_ordering_genes <- row.names(clustering_DEG_genes)[order(clustering_DEG_genes$qval)][1:1000]\n cds_ordering_genes\n }\n\n getHighVarGenesAsOrderingGenes <- function(cds){\n # Use genes with highest variance as ordering genes\n RowVar <- function(x, ...) {\n # from https://stackoverflow.com/questions/25099825/row-wise-variance-of-a-matrix-in-r\n rowSums((x - rowMeans(x, ...))^2, ...)/(dim(x)[2] - 1)\n }\n # use genes with high variances for ordering cell\n gene_variances <- RowVar(exprs(cds))\n cds_ordering_genes <- names(gene_variances[order(gene_variances, decreasing = T)])[1:1000]\n cds_ordering_genes\n }\n\n # Run the entire Monocle-DDRTree pipeline to \n # 1) clustering\n # 2) identify DEGs across clusters\n # 3) ordering cells/psudotime estimation\n runMonocleDDRTree <- function(cds, ordering = \"de\") {\n # tSNE and clustering cells\n cds <- reduceDimension(cds, \n max_components = 2,\n norm_method = 'log',\n reduction_method = 'tSNE',\n perplexity = 5,\n verbose = T)\n\n n_cells <- as.numeric(dim(cds)[2])\n k <- 50 # default k for louvain clustering\n if (n_cells < 52){\n k <- n_cells - 2\n }\n cds <- clusterCells(cds, method=\"louvain\", k=k, verbose = T)\n n_clusters <- length(unique(cds$Cluster))\n if (n_clusters > 1 && ordering == \"de\"){\n cds_ordering_genes <- tryCatch(\n {\n message(\"Attempting to compute DEGs across clusters for ordering cells...\")\n getDEGsAsOrderingGenes(cds)\n },\n error=function(cond) {\n message(\"Error encountered while computing DEGs using monocle:\")\n message(cond)\n message(\"Fall back to using most variable genes for ordering cells\")\n getHighVarGenesAsOrderingGenes(cds)\n }\n )\n } else { # only 1 cluster\n message(\"Using most variable genes for ordering cells...\")\n cds_ordering_genes <- getHighVarGenesAsOrderingGenes(cds)\n }\n cds <- setOrderingFilter(cds, ordering_genes = cds_ordering_genes)\n cds <- reduceDimension(cds, method = 'DDRTree', norm_method = 'log', \n ncenter = NULL)\n cds <- orderCells(cds)\n return(cds)\n }\n\n\n runMonocleUMAPsimplePPT <- function(cds) {\n # 1. Noramlize and pre-process the data\n cds <- estimateSizeFactors(cds)\n cds <- preprocessCDS(cds, \n num_dim = 50,\n norm_method = 'log',\n method = 'PCA'\n )\n # 2. Reduce the dimensionality of the data\n cds <- reduceDimension(cds, max_components = 3,\n reduction_method = 'UMAP',\n metric=\"cosine\",\n verbose = F)\n # 3. Partition the cells into supergroups\n cds <- partitionCells(cds)\n # 4. Learn the principal graph\n cds <- learnGraph(cds,\n max_components = 3,\n RGE_method = 'SimplePPT',\n partition_component = T,\n verbose = F)\n ## Not Implemented: Monocle 3 doesn't seem to support ordering by \n # the expression of a list of genes. Will need know how to automatically\n # find the root cell instead.\n\n # # 5. order cells\n # cds <- orderCells(cds, \n # root_pr_nodes = get_correct_root_state(cds,\n # cell_phenotype = 'cell_type2',\n # \"Multipotent progenitors\"))\n return(cds)\n }\n\n # Convert cds object to edge_df and data_df for making plot\n # @ref: https://github.com/cole-trapnell-lab/monocle-release/blob/ea83577c511564222bd08a35a9f944b07ccd1a42/R/plotting.R#L53\n convertToDataFrames <- function(cds) {\n sample_name <- NA\n sample_state <- pData(cds)$State\n # data_dim_1 <- NA\n # data_dim_2 <- NA\n theta <- 0\n x <- 1\n y <- 2\n\n lib_info_with_pseudo <- pData(cds)\n\n reduced_dim_coords <- reducedDimK(cds)\n\n ica_space_df <- Matrix::t(reduced_dim_coords) %>%\n as.data.frame() %>%\n select_(prin_graph_dim_1 = x, prin_graph_dim_2 = y) %>%\n mutate(sample_name = rownames(.), sample_state = rownames(.))\n\n dp_mst <- minSpanningTree(cds)\n\n edge_df <- dp_mst %>%\n igraph::as_data_frame() %>%\n select_(source = \"from\", target = \"to\") %>%\n left_join(ica_space_df %>% select_(source=\"sample_name\", source_prin_graph_dim_1=\"prin_graph_dim_1\", source_prin_graph_dim_2=\"prin_graph_dim_2\"), by = \"source\") %>%\n left_join(ica_space_df %>% select_(target=\"sample_name\", target_prin_graph_dim_1=\"prin_graph_dim_1\", target_prin_graph_dim_2=\"prin_graph_dim_2\"), by = \"target\")\n\n data_df <- t(monocle::reducedDimS(cds)) %>%\n as.data.frame() %>%\n select_(x = x, y = y) %>%\n rownames_to_column(\"sample_name\") %>%\n mutate(sample_state) %>%\n left_join(lib_info_with_pseudo %>% rownames_to_column(\"sample_name\"), by = \"sample_name\")\n\n return_rotation_mat <- function(theta) {\n theta <- theta / 180 * pi\n matrix(c(cos(theta), sin(theta), -sin(theta), cos(theta)), nrow = 2)\n }\n rot_mat <- return_rotation_mat(theta)\n\n cn1 <- c(\"x\", \"y\")\n cn2 <- c(\"source_prin_graph_dim_1\", \"source_prin_graph_dim_2\")\n cn3 <- c(\"target_prin_graph_dim_1\", \"target_prin_graph_dim_2\")\n data_df[, cn1] <- as.matrix(data_df[, cn1]) %*% t(rot_mat)\n edge_df[, cn2] <- as.matrix(edge_df[, cn2]) %*% t(rot_mat)\n edge_df[, cn3] <- as.matrix(edge_df[, cn3]) %*% t(rot_mat)\n # Drop the redundant sample_state column\n data_df[,\"sample_state\"] = NULL\n return(list(edge_df=edge_df, data_df=data_df))\n }\n\n\n convertToDataFrames3 <- function(cds) {\n sample_name <- NA\n sample_state <- pData(cds)$louvain_component\n\n x <- 1\n y <- 2\n z <- 3\n\n lib_info_with_pseudo <- pData(cds)\n\n reduced_dim_coords <- reducedDimK(cds)\n\n ica_space_df <- Matrix::t(reduced_dim_coords) %>%\n as.data.frame() %>%\n select_(prin_graph_dim_1 = x, prin_graph_dim_2 = y, prin_graph_dim_3 = z) %>%\n mutate(sample_name = rownames(.), sample_state = rownames(.))\n\n dp_mst <- minSpanningTree(cds)\n\n edge_df <- dp_mst %>%\n igraph::as_data_frame() %>%\n select_(source = \"from\", target = \"to\") %>%\n left_join(ica_space_df %>% select_(source=\"sample_name\", source_prin_graph_dim_1=\"prin_graph_dim_1\", \n source_prin_graph_dim_2=\"prin_graph_dim_2\", \n source_prin_graph_dim_3=\"prin_graph_dim_3\"), by = \"source\") %>%\n left_join(ica_space_df %>% select_(target=\"sample_name\", target_prin_graph_dim_1=\"prin_graph_dim_1\", \n target_prin_graph_dim_2=\"prin_graph_dim_2\", \n target_prin_graph_dim_3=\"prin_graph_dim_3\"), by = \"target\")\n\n data_df <- t(monocle::reducedDimS(cds)) %>%\n as.data.frame() %>%\n select_(x = x, y = y, z = z) %>%\n rownames_to_column(\"sample_name\") %>%\n mutate(sample_state) %>%\n left_join(lib_info_with_pseudo %>% rownames_to_column(\"sample_name\"), by = \"sample_name\")\n\n # Drop the redundant sample_state column\n data_df[,\"sample_state\"] = NULL\n return(list(edge_df=edge_df, data_df=data_df))\n }\n\n # Run the entire Monocle pipeline\n runMonoclePipeline <- function(expr_df, ordering = \"de\") {\n cds <- makeCellData(expr_df)\n cds <- runMonocleDDRTree(cds, ordering = ordering)\n convertToDataFrames(cds) \n }\n\n runMonocle3Pipeline <- function(expr_df){\n cds <- makeCellData3(expr_df)\n cds <- runMonocleUMAPsimplePPT(cds)\n convertToDataFrames3(cds)\n }\n ''')\n runMonoclePipeline = robjects.globalenv['runMonoclePipeline']\n # Compute CPM\n rawdata = dataset.raw.to_adata().to_df().T\n data = CPM(rawdata)\n # Run Monocle\n results_monocle = runMonoclePipeline(pandas2ri.conversion.py2rpy(data), ordering=ordering)\n monocle_results = {}\n for key_idx in range(len(list(results_monocle.names))):\n key = list(results_monocle.names)[key_idx]\n df = pandas2ri.conversion.rpy2py(results_monocle[key_idx])\n monocle_results[key] = df\n\n monocle_results['data_df'].set_index('sample_name', inplace=True)\n monocle_results['sample_metadata'] = dataset.obs[[\"leiden\"]].merge(\n monocle_results['data_df'],\n left_index=True,\n right_index=True\n )\n # Return\n monocle_results.update(\n {'color_by': color_by, 'plot_type': plot_type}\n )\n return monocle_results\n\n\n\ndef plot_monocle(monocle_results, debug=False):\n # Get results\n sample_metadata = monocle_results['sample_metadata']\n color_by = monocle_results.get('color_by')\n\n color_type = 'continuous'\n if color_by == 'State':\n color_type = 'categorical'\n \n color_column = monocle_results['sample_metadata'][color_by] if color_by else None\n sample_titles = ['<b>{}</b><br>'.format(index)+'<br>'.join('<i>{key}</i>: {value}'.format(**locals()) for key, value in rowData.items()) for index, rowData in sample_metadata.iterrows()]\n\n # Make a trace for the trajectory\n edge_trace = go.Scatter(\n x=[],\n y=[],\n line=dict(width=1,color='#888'),\n hoverinfo='none',\n name='trajectory',\n mode='lines')\n\n for _, row in monocle_results['edge_df'].iterrows():\n x0, y0 = row['source_prin_graph_dim_1'], row['source_prin_graph_dim_2']\n x1, y1 = row['target_prin_graph_dim_1'], row['target_prin_graph_dim_2']\n edge_trace['x'] += (x0, x1, None)\n edge_trace['y'] += (y0, y1, None)\n \n if color_by and color_type == 'continuous':\n marker = dict(size=5, color=color_column, colorscale='Viridis', showscale=True)\n trace = go.Scatter(x=monocle_results['data_df']['x'],\n y=monocle_results['data_df']['y'],\n mode='markers',\n hoverinfo='text',\n text=sample_titles,\n marker=marker,\n name='Cells'\n )\n data = [trace, edge_trace]\n\n elif color_by and color_type == 'categorical' and len(color_column.unique()) <= len(s.colors):\n\n # Get unique categories\n unique_categories = color_column.unique()\n\n # Define empty list\n data = []\n \n # Loop through the unique categories\n for i, category in enumerate(unique_categories):\n\n # Get the color corresponding to the category\n category_color = s.colors[i]\n\n # Get the indices of the samples corresponding to the category\n category_indices = [i for i, sample_category in enumerate(color_column) if sample_category == category]\n \n # Create new trace\n trace = go.Scatter(x=monocle_results['data_df']['x'].values[category_indices],\n y=monocle_results['data_df']['y'].values[category_indices],\n mode='markers',\n hoverinfo='text',\n text=[sample_titles[x] for x in category_indices],\n name = category,\n marker=dict(size=5, color=category_color))\n \n # Append trace to data list\n data.append(trace)\n data.append(edge_trace)\n else:\n marker = dict(size=5)\n trace = go.Scatter(x=monocle_results['data_df']['x'],\n y=monocle_results['data_df']['y'],\n mode='markers',\n hoverinfo='text',\n text=sample_titles,\n marker=marker)\n data = [trace, edge_trace]\n \n colored = '' if str(color_by) == 'None' else 'Colored by {}'.format(color_by)\n layout = go.Layout(title='<b>Monocle Analysis | Cell Trajectory Plot</b><br><i>{}</i>'.format(colored), hovermode='closest', margin=go.Margin(l=0,r=0,b=0,t=50), width=900,\n scene=dict(xaxis=dict(title='Component 1'), yaxis=dict(title='Component 2')))\n fig = go.Figure(data=data, layout=layout)\n fig.show()\n plt.savefig(\"monocle.pdf\")\n\ndef run_tempora(dataset, timepoint_labels_column_name, timepoint_labels):\n robjects.r('''\n library(Seurat)\n library(Tempora)\n library(snow)\n library(RColorBrewer)\n library(igraph)\n\n #overwrite a function of Tempora\n BuildTrajectory <- function(object, n_pcs, difference_threshold=0.01){\n\n if (class(object)[1] != \"Tempora\"){\n stop(\"Not a valid Tempora object\")\n }\n\n\n if (n_pcs > ncol([email protected]$rotation)){\n stop(\"Number of PCs selected exceeds number of PCs calculated\")\n }\n\n significant_pathways_list <- gsva_pca <- list()\n for (i in 1:n_pcs){\n genes_scaled <- scale([email protected]$rotation[,i])\n significant_pathways_list[[i]] <- [email protected][which(rownames([email protected]) %in% names(which(genes_scaled[,1] > 1.0 | genes_scaled[,1] < -1.0))), ]\n gsva_pca[[i]] <- colMeans(significant_pathways_list[[i]])\n }\n\n gsva_pca <- Reduce(rbind, gsva_pca)\n rownames(gsva_pca) <- paste0(\"PC\", seq(1:nrow(gsva_pca)))\n\n mi_network <- bnlearn::aracne(as.data.frame(gsva_pca))\n edges_df <- as.data.frame(mi_network$arcs)\n edges_df$to <- as.numeric(as.character(edges_df$to))\n edges_df$from <- as.numeric(as.character(edges_df$from))\n edges_df$from_clusterscore <- unlist(sapply(edges_df$from, function(x) [email protected]$Cluster_time_score[[email protected]$Id == x]))\n edges_df$to_clusterscore <- unlist(sapply(edges_df$to, function(x) [email protected]$Cluster_time_score[[email protected]$Id == x]))\n\n\n edges_df$direction <- ifelse((abs(edges_df$to_clusterscore - edges_df$from_clusterscore)/(0.5*(edges_df$to_clusterscore + edges_df$from_clusterscore))) < difference_threshold, \"bidirectional\", \"unidirectional\")\n edges_df <- edges_df[-which(edges_df$from_clusterscore > edges_df$to_clusterscore), ]\n edges_df$id <- ifelse(as.numeric(edges_df$from) > as.numeric(edges_df$to), paste0(edges_df$from, edges_df$to), paste0(edges_df$to, edges_df$from))\n edges_df <- edges_df[!duplicated(edges_df$id), ]\n edges_df <- edges_df[, -6]\n edges_df$type <- ifelse(edges_df$direction == \"bidirectional\", 3, 1)\n\n object@trajectory <- edges_df\n [email protected] <- n_pcs\n return(object)\n }\n\n #overwrite a function of Tempora\n PlotTrajectory <- function(object, plotname, layout=NULL, ...){\n\n if (class(object)[1] != \"Tempora\"){\n stop(\"Not a valid Tempora object\")\n }\n\n if (is.null(object@trajectory)){\n stop(\"BuildTrajectory has not been run. See ?Tempora::BuildTrajectory for details\")\n }\n edge_graph <- igraph::graph_from_data_frame(d=object@trajectory, vertices = [email protected], directed = T)\n if (is.null(layout)){\n l <- igraph::layout_with_sugiyama(edge_graph, layers = [email protected]$Cluster_time_score, maxiter = 1000)\n #l$layout[,2] <- 3-(rescale([email protected]$Cluster_time_score, to=c(0,3)))\n if (length(levels([email protected]$Timepoints)) > 9){\n colours <- colorRampPalette(RColorBrewer::brewer.pal(7, \"YlOrRd\"))\n plot.igraph(edge_graph, ylim=c(-1,1), layout = l$layout, ylab = \"Inferred time\", vertex.shape = \"pie\", vertex.pie = lapply(1:nrow([email protected]), function(x) as.numeric([email protected][x,2:((length(levels([email protected]$Timepoints)))+1)])),\n vertex.pie.color=list(colours(length(levels([email protected]$Timepoints)))), pie.border=list(rep(\"white\", 4)), vertex.frame.color=\"white\", edge.arrow.size = 0.5, edge.width = 1.5, vertex.label.family=\"Arial\",\n vertex.label.color=\"black\", edge.lty = E(edge_graph)$type, ...)\n axis(side=2, at=c(-1,1), labels=c(\"Late\",\"Early\"), las=1)\n legend(1,1, legend = levels([email protected]$Timepoints), fill=colours, bty = \"n\", border=\"black\")\n } else {\n colours <- brewer.pal(length(levels([email protected]$Timepoints)), \"YlOrRd\")\n plot.igraph(edge_graph, ylim=c(-1,1), ylab = \"Inferred time\", layout = l$layout, vertex.shape = \"pie\", vertex.pie = lapply(1:nrow([email protected]), function(x) as.numeric([email protected][x,2:((length(levels([email protected]$Timepoints)))+1)])),\n vertex.pie.color=list(colours), pie.border=list(rep(\"white\", length(levels([email protected]$Timepoints)))), vertex.frame.color=\"white\",\n vertex.label.family=\"Arial\", vertex.label.color=\"black\", edge.lty = E(edge_graph)$type,...)\n legend(1,1, legend = levels([email protected]$Timepoints), fill=colours, bty = \"n\", border = \"black\")\n axis(side=2, at=c(-1,1), labels=c(\"Late\",\"Early\"), las=1)\n }\n object@layouts <- l$layout\n\n } else {\n if (length(levels([email protected]$Timepoints)) > 9){\n colours <- colorRampPalette(RColorBrewer::brewer.pal(7, \"YlOrRd\"))\n \n plot.igraph(edge_graph, ylim=c(-1,1), layout = layout, ylab = \"Inferred time\", vertex.shape = \"pie\", vertex.pie = lapply(1:nrow([email protected]), function(x) as.numeric([email protected][x,2:((length(levels([email protected]$Timepoints)))+1)])),\n vertex.pie.color=list(colours(length(levels([email protected]$Timepoints)))), pie.border=list(rep(\"white\", 4)), vertex.frame.color=\"white\", edge.arrow.size = 0.5, edge.width = 1.5, vertex.label.family=\"Arial\",\n vertex.label.color=\"black\", edge.lty = E(edge_graph)$type, ...)\n axis(side=2, at=c(-1,1), labels=c(\"Late\",\"Early\"), las=1)\n legend(1,1, legend = levels([email protected]$Timepoints), fill=colours, bty = \"n\", border=\"black\")\n \n } else {\n colours <- brewer.pal(length(levels([email protected]$Timepoints)), \"YlOrRd\")\n \n plot.igraph(edge_graph, ylim=c(-1,1), ylab = \"Inferred time\", layout = layout, vertex.shape = \"pie\", vertex.pie = lapply(1:nrow([email protected]), function(x) as.numeric([email protected][x,2:((length(levels([email protected]$Timepoints)))+1)])),\n vertex.pie.color=list(colours), pie.border=list(rep(\"white\", length(levels([email protected]$Timepoints)))), vertex.frame.color=\"white\",\n vertex.label.family=\"Arial\", vertex.label.color=\"black\", edge.lty = E(edge_graph)$type,...)\n legend(1,1, legend = levels([email protected]$Timepoints), fill=colours, bty = \"n\", border = \"black\")\n axis(side=2, at=c(-1,1), labels=c(\"Late\",\"Early\"), las=1)\n \n }\n }\n # save\n jpeg(plotname, width=600, height=600)\n\n plot(edge_graph, ylim=c(-1,1), ylab = \"Inferred time\", layout = layout, vertex.shape = \"pie\", vertex.pie = lapply(1:nrow([email protected]), function(x) as.numeric([email protected][x,2:((length(levels([email protected]$Timepoints)))+1)])),\n vertex.pie.color=list(colours), pie.border=list(rep(\"white\", length(levels([email protected]$Timepoints)))), vertex.frame.color=\"white\",\n vertex.label.family=\"Arial\", vertex.label.color=\"black\", edge.lty = E(edge_graph)$type,...)\n legend(1,1, legend = levels([email protected]$Timepoints), fill=colours, bty = \"n\", border = \"black\")\n axis(side=2, at=c(-1,1), labels=c(\"Late\",\"Early\"), las=1)\n dev.off()\n \n validObject(object)\n return(object)\n }\n\n loadData <- function(rnaseq_data_filename, meta_data_filename, cluster_column_name, timepoint_column_name, timpoint_order_list, plotname){\n raw_counts = read.csv(rnaseq_data_filename, row.names = 1)\n meta = read.csv(meta_data_filename, row.names = 1)\n \n #create Seurat object\n # seu <- CreateSeuratObject(raw.data=raw_counts,meta.data=meta,names.delim=\"?\",names.field=1,normalization.method=\"LogNormalize\")\n seu <- CreateSeuratObject(counts=raw_counts,meta.data=meta,names.delim=\"?\",names.field=1,normalization.method=\"LogNormalize\")\n \n #convert Seurat object to Tempora object\n seu_tempora <- ImportSeuratObject(seu, assayType=\"RNA\", clusters = cluster_column_name,\n timepoints = timepoint_column_name,\n timepoint_order = timpoint_order_list)\n \n seu_tempora <- CalculatePWProfiles(seu_tempora, \n gmt_path = \"Human_GOBP_AllPathways_no_GO_iea_June_01_2020_symbol.gmt\",\n method=\"gsva\", min.sz = 5, max.sz = 200, parallel.sz = 1)\n \n \n #Build trajectory with 2 PCs \n seu_tempora <- BuildTrajectory(seu_tempora, n_pcs = 2, difference_threshold = 0.01)\n\n #Visualize the trajectory\n seu_tempora <- PlotTrajectory(seu_tempora, plotname)\n return (seu_tempora)\n }\n ''')\n #download Human_GOBP_AllPathways_no_GO_iea_June_01_2020_symbol.gmt \n url = \"http://download.baderlab.org/EM_Genesets/June_01_2020/Human/symbol/Human_GOBP_AllPathways_no_GO_iea_June_01_2020_symbol.gmt\"\n r = requests.get(url, allow_redirects=True)\n with open('Human_GOBP_AllPathways_no_GO_iea_June_01_2020_symbol.gmt', \"wb\") as f:\n f.write(r.content)\n \n tmp_meta_df = dataset.obs.copy()\n tmp_meta_df[\"Timepoints\"] = tmp_meta_df[timepoint_labels_column_name]\n\n tmp_meta_df[\"leiden\"] = tmp_meta_df[\"leiden\"].astype('int')\n tmp_meta_df[\"leiden\"] = tmp_meta_df[\"leiden\"] + 1\n if len(tmp_meta_df[\"Timepoints\"].unique()) > 1:\n #save preprocessed data\n tmp_meta_df.to_csv(\"metadata_with_clusters.csv\")\n dataset.raw.to_adata().to_df().T.to_csv(\"expressiondata_after_preprocessing.csv\")\n\n loadData = robjects.r['loadData']\n tempora_result = loadData(\"expressiondata_after_preprocessing.csv\", \"metadata_with_clusters.csv\", \"leiden\", \"Timepoints\", timepoint_labels.split(\"\\n\"), \"Tempora_plot.jpg\")\n" ]
[ [ "pandas.concat", "pandas.merge", "pandas.read_csv" ], [ "pandas.concat", "pandas.read_csv", "numpy.min", "numpy.issubdtype", "numpy.repeat", "pandas.DataFrame", "matplotlib.pyplot.savefig", "matplotlib.colorbar.ColorbarBase", "numpy.ceil", "matplotlib.pyplot.subplot", "matplotlib.colorbar.make_axes", "numpy.log10", "numpy.max", "numpy.var", "pandas.set_option", "matplotlib.pyplot.rcdefaults", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
lhmtriet/PUMiner_MSR
[ "cbaa126bc56b0968f4b709374c59b87f16b6811d" ]
[ "Code/analysis/psf_baseline/predict.py" ]
[ "import pandas as pd\nimport sanalytics.estimators.pu_estimators as pu\nfrom gensim.models.doc2vec import Doc2Vec\nimport sanalytics.evaluation.evaluation_metric as see\nfrom progressbar import progressbar\nimport sanalytics.algorithms.utils as sau\nfrom time import time\nimport numpy as np\n\n## Read threshold\narg = sys.argv[1].split(\"|\")\nt = float(arg[0])\nname = arg[1]\n\n## Import Data\nX_test = pd.read_parquet(\"datasets/rq3_data/sec1.0_test.parquet\")\n\n## Import D2V\nd2v = Doc2Vec.load(\"datasets/rq3_d2v/{}.model\".format(name))\n\n## In pos set\ndef pos_set(str):\n if \"|\" in str: return False\n if \"sse\" in str: return True\n if \"set1\" in str: return True\n if \"set2\" in str: return True\n\n## Predict functions\ndef predict(post, thresh, d2v):\n vec = d2v.infer_vector(\"{} {} {}\".format(post.title, post.question, post.answers).split())\n sims = d2v.docvecs.most_similar([vec], topn=1000)\n return min(len([i for i in sims if pos_set(i[0]) and i[1] > thresh]), 1)\n\n## Columns\nc_90 = [\"variation\", \"classifier\", \"test_set\", \"recall\", \"prec_lower\", \"prec_opt\", \"f1_lower\", \"f1_opt\", \"f_measure\", \"eval_time\"]\n\n## Test set\nresults_90 = []\nstart_pred = time()\nX_test[\"preds\"] = [predict(i, t, d2v) for i in progressbar(X_test.itertuples())]\nend_pred = time()\nresults = see.evaluate_metrics(X_test[X_test.label==\"security\"].preds, X_test.preds)\nresults_90.append([\"sec1.0\", \"d2v_baseline_{}_{}\".format(t, name), \"test\"] + list(results) + [end_pred - start_pred])\ndf_90 = pd.DataFrame(results_90, columns=c_90)\ndf_90.to_csv(\"analysis/test_models/d2v_baseline/preds/d2v_baseline_{}_{}_90.csv\".format(t, name), index=False)\n" ]
[ [ "pandas.read_parquet", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "1.1", "1.5", "0.24", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
yoheikikuta/robust_physical_perturbations
[ "9c3d4da7fad727a531da7437bf3823bd56ac91c3" ]
[ "lisa-cnn-attack/cleverhans/attacks.py" ]
[ "from abc import ABCMeta\nimport numpy as np\nfrom six.moves import xrange\nimport warnings\n\n\nclass Attack(object):\n\n \"\"\"\n Abstract base class for all attack classes.\n \"\"\"\n __metaclass__ = ABCMeta\n\n def __init__(self, model, back='tf', sess=None):\n \"\"\"\n :param model: A function that takes a symbolic input and returns the\n symbolic output for the model's predictions.\n :param back: The backend to use. Either 'tf' (default) or 'th'.\n :param sess: The tf session to run graphs in (use None for Theano)\n \"\"\"\n if not(back == 'tf' or back == 'th'):\n raise ValueError(\"Backend argument must either be 'tf' or 'th'.\")\n if back == 'th' and sess is not None:\n raise Exception(\"A session should not be provided when using th.\")\n if not hasattr(model, '__call__'):\n raise ValueError(\"model argument must be a function that returns \"\n \"the symbolic output when given an input tensor.\")\n if back == 'th':\n warnings.warn(\"CleverHans support for Theano is deprecated and \"\n \"will be dropped on 2017-11-08.\")\n\n # Prepare attributes\n self.model = model\n self.back = back\n self.sess = sess\n self.inf_loop = False\n\n def generate(self, x, **kwargs):\n \"\"\"\n Generate the attack's symbolic graph for adversarial examples. This\n method should be overriden in any child class that implements an\n attack that is expressable symbolically. Otherwise, it will wrap the\n numerical implementation as a symbolic operator.\n :param x: The model's symbolic inputs.\n :param **kwargs: optional parameters used by child classes.\n :return: A symbolic representation of the adversarial examples.\n \"\"\"\n if self.back == 'th':\n raise NotImplementedError('Theano version not implemented.')\n\n if not self.inf_loop:\n self.inf_loop = True\n assert self.parse_params(**kwargs)\n import tensorflow as tf\n graph = tf.py_func(self.generate_np, [x], tf.float32)\n self.inf_loop = False\n return graph\n else:\n error = \"No symbolic or numeric implementation of attack.\"\n raise NotImplementedError(error)\n\n def generate_np(self, x_val, **kwargs):\n \"\"\"\n Generate adversarial examples and return them as a Numpy array. This\n method should be overriden in any child class that implements an attack\n that is not fully expressed symbolically.\n :param x_val: A Numpy array with the original inputs.\n :param **kwargs: optional parameters used by child classes.\n :return: A Numpy array holding the adversarial examples.\n \"\"\"\n if self.back == 'th':\n raise NotImplementedError('Theano version not implemented.')\n\n if not self.inf_loop:\n self.inf_loop = True\n import tensorflow as tf\n\n # Generate this attack's graph if not done previously\n if not hasattr(self, \"_x\") and not hasattr(self, \"_x_adv\"):\n input_shape = list(x_val.shape)\n input_shape[0] = None\n self._x = tf.placeholder(tf.float32, shape=input_shape)\n self._x_adv = self.generate(self._x, **kwargs)\n self.inf_loop = False\n else:\n error = \"No symbolic or numeric implementation of attack.\"\n raise NotImplementedError(error)\n\n if self.sess is None:\n raise ValueError(\"Cannot use `generate_np` when no `sess` was\"\n \" provided\")\n return self.sess.run(self._x_adv, feed_dict={self._x: x_val})\n\n def parse_params(self, params=None):\n \"\"\"\n Take in a dictionary of parameters and applies attack-specific checks\n before saving them as attributes.\n :param params: a dictionary of attack-specific parameters\n :return: True when parsing was successful\n \"\"\"\n return True\n\n\nclass FastGradientMethod(Attack):\n\n \"\"\"\n This attack was originally implemented by Goodfellow et al. (2015) with the\n infinity norm (and is known as the \"Fast Gradient Sign Method\"). This\n implementation extends the attack to other norms, and is therefore called\n the Fast Gradient Method.\n Paper link: https://arxiv.org/abs/1412.6572\n \"\"\"\n\n def __init__(self, model, back='tf', sess=None):\n \"\"\"\n Create a FastGradientMethod instance.\n \"\"\"\n super(FastGradientMethod, self).__init__(model, back, sess)\n\n def generate(self, x, **kwargs):\n \"\"\"\n Generate symbolic graph for adversarial examples and return.\n :param x: The model's symbolic inputs.\n :param eps: (optional float) attack step size (input variation)\n :param ord: (optional) Order of the norm (mimics Numpy).\n Possible values: np.inf, 1 or 2.\n :param y: (optional) A placeholder for the model labels. Only provide\n this parameter if you'd like to use true labels when crafting\n adversarial samples. Otherwise, model predictions are used as\n labels to avoid the \"label leaking\" effect (explained in this\n paper: https://arxiv.org/abs/1611.01236). Default is None.\n Labels should be one-hot-encoded.\n :param clip_min: (optional float) Minimum input component value\n :param clip_max: (optional float) Maximum input component value\n \"\"\"\n # Parse and save attack-specific parameters\n assert self.parse_params(**kwargs)\n\n if self.back == 'tf':\n from .attacks_tf import fgm\n else:\n from .attacks_th import fgm\n\n return fgm(x, self.model(x), y=self.y, eps=self.eps, ord=self.ord,\n clip_min=self.clip_min, clip_max=self.clip_max)\n\n def generate_np(self, x_val, **kwargs):\n \"\"\"\n Generate adversarial samples and return them in a Numpy array.\n :param x_val: (required) A Numpy array with the original inputs.\n :param eps: (required float) attack step size (input variation)\n :param ord: (optional) Order of the norm (mimics Numpy).\n Possible values: np.inf, 1 or 2.\n :param y: (optional) A placeholder for the model labels. Only provide\n this parameter if you'd like to use true labels when crafting\n adversarial samples. Otherwise, model predictions are used as\n labels to avoid the \"label leaking\" effect (explained in this\n paper: https://arxiv.org/abs/1611.01236). Default is None.\n Labels should be one-hot-encoded.\n :param clip_min: (optional float) Minimum input component value\n :param clip_max: (optional float) Maximum input component value\n \"\"\"\n if self.back == 'th':\n raise NotImplementedError('Theano version not implemented.')\n if self.sess is None:\n raise ValueError(\"Cannot use `generate_np` when no `sess` was\"\n \" provided\")\n\n import tensorflow as tf\n\n # Generate this attack's graph if it hasn't been done previously\n if not hasattr(self, \"_x\"):\n input_shape = list(x_val.shape)\n input_shape[0] = None\n self._x = tf.placeholder(tf.float32, shape=input_shape)\n self._x_adv = self.generate(self._x, **kwargs)\n\n # Run symbolic graph without or with true labels\n if 'y_val' not in kwargs or kwargs['y_val'] is None:\n feed_dict = {self._x: x_val}\n else:\n # Verify label placeholder was given in params if using true labels\n if self.y is None:\n error = \"True labels given but label placeholder not given.\"\n raise Exception(error)\n feed_dict = {self._x: x_val, self.y: kwargs['y_val']}\n return self.sess.run(self._x_adv, feed_dict=feed_dict)\n\n def parse_params(self, eps=0.3, ord=np.inf, y=None, clip_min=None,\n clip_max=None, **kwargs):\n \"\"\"\n Take in a dictionary of parameters and applies attack-specific checks\n before saving them as attributes.\n\n Attack-specific parameters:\n :param eps: (optional float) attack step size (input variation)\n :param ord: (optional) Order of the norm (mimics Numpy).\n Possible values: np.inf, 1 or 2.\n :param y: (optional) A placeholder for the model labels. Only provide\n this parameter if you'd like to use true labels when crafting\n adversarial samples. Otherwise, model predictions are used as\n labels to avoid the \"label leaking\" effect (explained in this\n paper: https://arxiv.org/abs/1611.01236). Default is None.\n Labels should be one-hot-encoded.\n :param clip_min: (optional float) Minimum input component value\n :param clip_max: (optional float) Maximum input component value\n \"\"\"\n # Save attack-specific parameters\n self.eps = eps\n self.ord = ord\n self.y = y\n self.clip_min = clip_min\n self.clip_max = clip_max\n\n # Check if order of the norm is acceptable given current implementation\n if self.ord not in [np.inf, int(1), int(2)]:\n raise ValueError(\"Norm order must be either np.inf, 1, or 2.\")\n if self.back == 'th' and self.ord != np.inf:\n raise NotImplementedError(\"The only FastGradientMethod norm \"\n \"implemented for Theano is np.inf.\")\n return True\n\n\nclass BasicIterativeMethod(Attack):\n\n \"\"\"\n The Basic Iterative Method (Kurakin et al. 2016). The original paper used\n hard labels for this attack; no label smoothing.\n Paper link: https://arxiv.org/pdf/1607.02533.pdf\n \"\"\"\n\n def __init__(self, model, back='tf', sess=None):\n \"\"\"\n Create a BasicIterativeMethod instance.\n \"\"\"\n super(BasicIterativeMethod, self).__init__(model, back, sess)\n\n def generate(self, x, **kwargs):\n import tensorflow as tf\n\n # Parse and save attack-specific parameters\n assert self.parse_params(**kwargs)\n\n # Initialize loop variables\n eta = 0\n\n # Fix labels to the first model predictions for loss computation\n model_preds = self.model(x)\n preds_max = tf.reduce_max(model_preds, 1, keep_dims=True)\n y = tf.to_float(tf.equal(model_preds, preds_max))\n fgsm_params = {'eps': self.eps_iter, 'y': y, 'ord': self.ord}\n\n for i in range(self.nb_iter):\n FGSM = FastGradientMethod(self.model, back=self.back,\n sess=self.sess)\n # Compute this step's perturbation\n eta = FGSM.generate(x + eta, **fgsm_params) - x\n\n # Clipping perturbation eta to self.ord norm ball\n if self.ord == np.inf:\n eta = tf.clip_by_value(eta, -self.eps, self.eps)\n elif self.ord in [1, 2]:\n reduc_ind = list(xrange(1, len(eta.get_shape())))\n if self.ord == 1:\n norm = tf.reduce_sum(tf.abs(eta),\n reduction_indices=reduc_ind,\n keep_dims=True)\n elif self.ord == 2:\n norm = tf.sqrt(tf.reduce_sum(tf.square(eta),\n reduction_indices=reduc_ind,\n keep_dims=True))\n eta = eta * self.eps / norm\n\n # Define adversarial example (and clip if necessary)\n adv_x = x + eta\n if self.clip_min is not None and self.clip_max is not None:\n adv_x = tf.clip_by_value(adv_x, self.clip_min, self.clip_max)\n\n return adv_x\n\n def parse_params(self, eps=0.3, eps_iter=0.05, nb_iter=10, y=None,\n ord=np.inf, clip_min=None, clip_max=None, **kwargs):\n \"\"\"\n Take in a dictionary of parameters and applies attack-specific checks\n before saving them as attributes.\n\n Attack-specific parameters:\n :param eps: (required float) maximum distortion of adversarial example\n compared to original input\n :param eps_iter: (required float) step size for each attack iteration\n :param nb_iter: (required int) Number of attack iterations.\n :param y: (required) A placeholder for the model labels.\n :param ord: (optional) Order of the norm (mimics Numpy).\n Possible values: np.inf, 1 or 2.\n :param clip_min: (optional float) Minimum input component value\n :param clip_max: (optional float) Maximum input component value\n \"\"\"\n\n # Save attack-specific parameters\n self.eps = eps\n self.eps_iter = eps_iter\n self.nb_iter = nb_iter\n self.y = y\n self.ord = ord\n self.clip_min = clip_min\n self.clip_max = clip_max\n\n # Check if order of the norm is acceptable given current implementation\n if self.ord not in [np.inf, 1, 2]:\n raise ValueError(\"Norm order must be either np.inf, 1, or 2.\")\n if self.back == 'th':\n error_string = \"BasicIterativeMethod is not implemented in Theano\"\n raise NotImplementedError(error_string)\n\n return True\n\n\nclass SaliencyMapMethod(Attack):\n\n \"\"\"\n The Jacobian-based Saliency Map Method (Papernot et al. 2016).\n Paper link: https://arxiv.org/pdf/1511.07528.pdf\n \"\"\"\n\n def __init__(self, model, back='tf', sess=None):\n \"\"\"\n Create a SaliencyMapMethod instance.\n \"\"\"\n super(SaliencyMapMethod, self).__init__(model, back, sess)\n\n if self.back == 'th':\n error = \"Theano version of SaliencyMapMethod not implemented.\"\n raise NotImplementedError(error)\n\n def generate(self, x, **kwargs):\n \"\"\"\n Attack-specific parameters:\n \"\"\"\n import tensorflow as tf\n from .attacks_tf import jacobian_graph, jsma_batch\n\n # Parse and save attack-specific parameters\n assert self.parse_params(**kwargs)\n\n # Define Jacobian graph wrt to this input placeholder\n preds = self.model(x)\n grads = jacobian_graph(preds, x, self.nb_classes)\n\n # Define appropriate graph (targeted / random target labels)\n if self.targets is not None:\n def jsma_wrap(x_val, targets):\n return jsma_batch(self.sess, x, preds, grads, x_val,\n self.theta, self.gamma, self.clip_min,\n self.clip_max, self.nb_classes,\n targets=targets)\n\n # Attack is targeted, target placeholder will need to be fed\n wrap = tf.py_func(jsma_wrap, [x, self.targets], tf.float32)\n else:\n def jsma_wrap(x_val):\n return jsma_batch(self.sess, x, preds, grads, x_val,\n self.theta, self.gamma, self.clip_min,\n self.clip_max, self.nb_classes,\n targets=None)\n\n # Attack is untargeted, target values will be chosen at random\n wrap = tf.py_func(jsma_wrap, [x], tf.float32)\n\n return wrap\n\n def generate_np(self, x_val, **kwargs):\n \"\"\"\n Attack-specific parameters:\n :param batch_size: (optional) Batch size when running the graph\n :param targets: (optional) Target values if the attack is targeted\n \"\"\"\n if self.sess is None:\n raise ValueError(\"Cannot use `generate_np` when no `sess` was\"\n \" provided\")\n\n import tensorflow as tf\n\n # Generate this attack's graph if it hasn't been done previously\n if not hasattr(self, \"_x\"):\n input_shape = list(x_val.shape)\n input_shape[0] = None\n self._x = tf.placeholder(tf.float32, shape=input_shape)\n self._x_adv = self.generate(self._x, **kwargs)\n\n # Run symbolic graph without or with true labels\n if 'y_val' not in kwargs or kwargs['y_val'] is None:\n feed_dict = {self._x: x_val}\n else:\n if self.targets is None:\n raise Exception(\"This attack was instantiated untargeted.\")\n else:\n if len(kwargs['y_val'].shape) > 1:\n nb_targets = len(kwargs['y_val'])\n else:\n nb_targets = 1\n if nb_targets != len(x_val):\n raise Exception(\"Specify exactly one target per input.\")\n feed_dict = {self._x: x_val, self.targets: kwargs['y_val']}\n return self.sess.run(self._x_adv, feed_dict=feed_dict)\n\n def parse_params(self, theta=1., gamma=np.inf, nb_classes=10, clip_min=0.,\n clip_max=1., targets=None, **kwargs):\n \"\"\"\n Take in a dictionary of parameters and applies attack-specific checks\n before saving them as attributes.\n\n Attack-specific parameters:\n :param theta: (optional float) Perturbation introduced to modified\n components (can be positive or negative)\n :param gamma: (optional float) Maximum percentage of perturbed features\n :param nb_classes: (optional int) Number of model output classes\n :param clip_min: (optional float) Minimum component value for clipping\n :param clip_max: (optional float) Maximum component value for clipping\n :param targets: (optional) Target placeholder if the attack is targeted\n \"\"\"\n\n self.theta = theta\n self.gamma = gamma\n self.nb_classes = nb_classes\n self.clip_min = clip_min\n self.clip_max = clip_max\n self.targets = targets\n\n return True\n\n\nclass VirtualAdversarialMethod(Attack):\n\n \"\"\"\n This attack was originally proposed by Miyato et al. (2016) and was used\n for virtual adversarial training.\n Paper link: https://arxiv.org/abs/1507.00677\n\n \"\"\"\n\n def __init__(self, model, back='tf', sess=None):\n super(VirtualAdversarialMethod, self).__init__(model, back, sess)\n\n def generate(self, x, **kwargs):\n \"\"\"\n Generate symbolic graph for adversarial examples and return.\n :param x: The model's symbolic inputs.\n :param eps: (optional float ) the epsilon (input variation parameter)\n :param num_iterations: (optional) the number of iterations\n :param xi: (optional float) the finite difference parameter\n :param clip_min: (optional float) Minimum input component value\n :param clip_max: (optional float) Maximum input component value\n \"\"\"\n # Parse and save attack-specific parameters\n assert self.parse_params(**kwargs)\n\n return vatm(self.model, x, self.model(x), eps=self.eps,\n num_iterations=self.num_iterations, xi=self.xi,\n clip_min=self.clip_min, clip_max=self.clip_max)\n\n def generate_np(self, x_val, **kwargs):\n \"\"\"\n Generate adversarial samples and return them in a Numpy array.\n :param x_val: (required) A Numpy array with the original inputs.\n :param eps: (optional float )the epsilon (input variation parameter)\n :param num_iterations: (optional) the number of iterations\n :param xi: (optional float) the finite difference parameter\n :param clip_min: (optional float) Minimum input component value\n :param clip_max: (optional float) Maximum input component value\n \"\"\"\n if self.back == 'th':\n raise NotImplementedError('Theano version not implemented.')\n\n import tensorflow as tf\n\n # Generate this attack's graph if it hasn't been done previously\n if not hasattr(self, \"_x\"):\n input_shape = list(x_val.shape)\n input_shape[0] = None\n self._x = tf.placeholder(tf.float32, shape=input_shape)\n self._x_adv = self.generate(self._x, **kwargs)\n\n return self.sess.run(self._x_adv, feed_dict={self._x: x_val})\n\n def parse_params(self, eps=2.0, num_iterations=1, xi=1e-6, clip_min=None,\n clip_max=None, **kwargs):\n \"\"\"\n Take in a dictionary of parameters and applies attack-specific checks\n before saving them as attributes.\n\n Attack-specific parameters:\n :param eps: (optional float )the epsilon (input variation parameter)\n :param num_iterations: (optional) the number of iterations\n :param xi: (optional float) the finite difference parameter\n :param clip_min: (optional float) Minimum input component value\n :param clip_max: (optional float) Maximum input component value\n \"\"\"\n # Save attack-specific parameters\n self.eps = eps\n self.num_iterations = num_iterations\n self.xi = xi\n self.clip_min = clip_min\n self.clip_max = clip_max\n return True\n\n\ndef fgsm(x, predictions, eps, back='tf', clip_min=None, clip_max=None):\n \"\"\"\n A wrapper for the Fast Gradient Sign Method.\n It calls the right function, depending on the\n user's backend.\n :param x: the input\n :param predictions: the model's output\n (Note: in the original paper that introduced this\n attack, the loss was computed by comparing the\n model predictions with the hard labels (from the\n dataset). Instead, this version implements the loss\n by comparing the model predictions with the most\n likely class. This tweak is recommended since the\n discovery of label leaking in the following paper:\n https://arxiv.org/abs/1611.01236)\n :param eps: the epsilon (input variation parameter)\n :param back: switch between TensorFlow ('tf') and\n Theano ('th') implementation\n :param clip_min: optional parameter that can be used to set a minimum\n value for components of the example returned\n :param clip_max: optional parameter that can be used to set a maximum\n value for components of the example returned\n :return: a tensor for the adversarial example\n \"\"\"\n warnings.warn(\"attacks.fgsm is deprecated and will be removed on \"\n \"2017-09-27. Instantiate an object from FastGradientMethod.\")\n if back == 'tf':\n # Compute FGSM using TensorFlow\n from .attacks_tf import fgm\n return fgm(x, predictions, y=None, eps=eps, ord=np.inf,\n clip_min=clip_min, clip_max=clip_max)\n elif back == 'th':\n # Compute FGSM using Theano\n from .attacks_th import fgm\n return fgm(x, predictions, eps, clip_min=clip_min, clip_max=clip_max)\n\n\ndef vatm(model, x, logits, eps, back='tf', num_iterations=1, xi=1e-6,\n clip_min=None, clip_max=None):\n \"\"\"\n A wrapper for the perturbation methods used for virtual adversarial\n training : https://arxiv.org/abs/1507.00677\n It calls the right function, depending on the\n user's backend.\n :param model: the model which returns the network unnormalized logits\n :param x: the input placeholder\n :param logits: the model's unnormalized output tensor\n :param eps: the epsilon (input variation parameter)\n :param num_iterations: the number of iterations\n :param xi: the finite difference parameter\n :param clip_min: optional parameter that can be used to set a minimum\n value for components of the example returned\n :param clip_max: optional parameter that can be used to set a maximum\n value for components of the example returned\n :return: a tensor for the adversarial example\n\n \"\"\"\n if back == 'tf':\n # Compute VATM using TensorFlow\n from .attacks_tf import vatm as vatm_tf\n return vatm_tf(model, x, logits, eps, num_iterations=num_iterations,\n xi=xi, clip_min=clip_min, clip_max=clip_max)\n elif back == 'th':\n # Compute VATM using Theano\n from .attacks_th import vatm as vatm_th\n return vatm_th(model, x, logits, eps, num_iterations=num_iterations,\n xi=xi, clip_min=clip_min, clip_max=clip_max)\n\n\ndef jsma(sess, x, predictions, grads, sample, target, theta, gamma=np.inf,\n increase=True, back='tf', clip_min=None, clip_max=None):\n \"\"\"\n A wrapper for the Jacobian-based saliency map approach.\n It calls the right function, depending on the\n user's backend.\n :param sess: TF session\n :param x: the input\n :param predictions: the model's symbolic output (linear output,\n pre-softmax)\n :param sample: (1 x 1 x img_rows x img_cols) numpy array with sample input\n :param target: target class for input sample\n :param theta: delta for each feature adjustment\n :param gamma: a float between 0 - 1 indicating the maximum distortion\n percentage\n :param increase: boolean; true if we are increasing pixels, false otherwise\n :param back: switch between TensorFlow ('tf') and\n Theano ('th') implementation\n :param clip_min: optional parameter that can be used to set a minimum\n value for components of the example returned\n :param clip_max: optional parameter that can be used to set a maximum\n value for components of the example returned\n :return: an adversarial sample\n \"\"\"\n warnings.warn(\"attacks.jsma is deprecated and will be removed on \"\n \"2017-09-27. Instantiate an object from SaliencyMapMethod.\")\n if back == 'tf':\n # Compute Jacobian-based saliency map attack using TensorFlow\n from .attacks_tf import jsma\n return jsma(sess, x, predictions, grads, sample, target, theta, gamma,\n clip_min, clip_max)\n elif back == 'th':\n raise NotImplementedError(\"Theano jsma not implemented.\")\n" ]
[ [ "tensorflow.clip_by_value", "tensorflow.reduce_max", "tensorflow.equal", "tensorflow.placeholder", "tensorflow.abs", "tensorflow.square", "tensorflow.py_func" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]
Maluuba/bokeh
[ "1e6695b7001dd4440a035754d4e085c317ae1122" ]
[ "bokeh/models/tests/test_sources.py" ]
[ "from __future__ import absolute_import\n\nimport unittest\nfrom unittest import skipIf\nimport warnings\n\nimport numpy as np\ntry:\n import pandas as pd\n is_pandas = True\nexcept ImportError as e:\n is_pandas = False\n\nfrom bokeh.models.sources import DataSource, ColumnDataSource\nfrom bokeh.util.serialization import transform_column_source_data\n\nclass TestColumnDataSource(unittest.TestCase):\n\n def test_basic(self):\n ds = ColumnDataSource()\n self.assertTrue(isinstance(ds, DataSource))\n\n def test_init_dict_arg(self):\n data = dict(a=[1], b=[2])\n ds = ColumnDataSource(data)\n self.assertEquals(ds.data, data)\n self.assertEquals(set(ds.column_names), set(data.keys()))\n\n def test_init_dict_data_kwarg(self):\n data = dict(a=[1], b=[2])\n ds = ColumnDataSource(data=data)\n self.assertEquals(ds.data, data)\n self.assertEquals(set(ds.column_names), set(data.keys()))\n\n @skipIf(not is_pandas, \"pandas not installed\")\n def test_init_pandas_arg(self):\n data = dict(a=[1, 2], b=[2, 3])\n df = pd.DataFrame(data)\n ds = ColumnDataSource(df)\n self.assertTrue(set(df.columns).issubset(set(ds.column_names)))\n for key in data.keys():\n self.assertIsInstance(ds.data[key], pd.Series)\n self.assertEquals(list(df[key]), list(ds.data[key]))\n self.assertIsInstance(ds.data['index'], np.ndarray)\n self.assertEquals([0, 1], list(ds.data['index']))\n self.assertEqual(set(ds.column_names) - set(df.columns), set([\"index\"]))\n\n @skipIf(not is_pandas, \"pandas not installed\")\n def test_init_pandas_data_kwarg(self):\n data = dict(a=[1, 2], b=[2, 3])\n df = pd.DataFrame(data)\n ds = ColumnDataSource(data=df)\n self.assertTrue(set(df.columns).issubset(set(ds.column_names)))\n for key in data.keys():\n self.assertIsInstance(ds.data[key], pd.Series)\n self.assertEquals(list(df[key]), list(ds.data[key]))\n self.assertIsInstance(ds.data['index'], np.ndarray)\n self.assertEquals([0, 1], list(ds.data['index']))\n self.assertEqual(set(ds.column_names) - set(df.columns), set([\"index\"]))\n\n def test_add_with_name(self):\n ds = ColumnDataSource()\n name = ds.add([1,2,3], name=\"foo\")\n self.assertEquals(name, \"foo\")\n name = ds.add([4,5,6], name=\"bar\")\n self.assertEquals(name, \"bar\")\n\n def test_add_without_name(self):\n ds = ColumnDataSource()\n name = ds.add([1,2,3])\n self.assertEquals(name, \"Series 0\")\n name = ds.add([4,5,6])\n self.assertEquals(name, \"Series 1\")\n\n def test_add_with_and_without_name(self):\n ds = ColumnDataSource()\n name = ds.add([1,2,3], \"foo\")\n self.assertEquals(name, \"foo\")\n name = ds.add([4,5,6])\n self.assertEquals(name, \"Series 1\")\n\n def test_remove_exists(self):\n ds = ColumnDataSource()\n name = ds.add([1,2,3], \"foo\")\n assert name\n ds.remove(\"foo\")\n self.assertEquals(ds.column_names, [])\n\n def test_remove_exists2(self):\n with warnings.catch_warnings(record=True) as w:\n ds = ColumnDataSource()\n ds.remove(\"foo\")\n self.assertEquals(ds.column_names, [])\n self.assertEquals(len(w), 1)\n self.assertEquals(w[0].category, UserWarning)\n self.assertEquals(str(w[0].message), \"Unable to find column 'foo' in data source\")\n\n def test_stream_bad_data(self):\n ds = ColumnDataSource(data=dict(a=[10], b=[20]))\n with self.assertRaises(ValueError) as cm:\n ds.stream(dict())\n self.assertEqual(str(cm.exception), \"Must stream updates to all existing columns (missing: a, b)\")\n with self.assertRaises(ValueError) as cm:\n ds.stream(dict(a=[10]))\n self.assertEqual(str(cm.exception), \"Must stream updates to all existing columns (missing: b)\")\n with self.assertRaises(ValueError) as cm:\n ds.stream(dict(a=[10], b=[10], x=[10]))\n self.assertEqual(str(cm.exception), \"Must stream updates to all existing columns (extra: x)\")\n with self.assertRaises(ValueError) as cm:\n ds.stream(dict(a=[10], x=[10]))\n self.assertEqual(str(cm.exception), \"Must stream updates to all existing columns (missing: b, extra: x)\")\n with self.assertRaises(ValueError) as cm:\n ds.stream(dict(a=[10], b=[10, 20]))\n self.assertEqual(str(cm.exception), \"All streaming column updates must be the same length\")\n\n with self.assertRaises(ValueError) as cm:\n ds.stream(dict(a=[10], b=np.ones((1,1))))\n self.assertTrue(\n str(cm.exception).startswith(\"stream(...) only supports 1d sequences, got ndarray with size (\")\n )\n\n def test_stream_good_data(self):\n ds = ColumnDataSource(data=dict(a=[10], b=[20]))\n ds._document = \"doc\"\n stuff = {}\n mock_setter = object()\n def mock(*args, **kw):\n stuff['args'] = args\n stuff['kw'] = kw\n ds.data._stream = mock\n ds.stream(dict(a=[11, 12], b=[21, 22]), \"foo\", mock_setter)\n self.assertEqual(stuff['args'], (\"doc\", ds, dict(a=[11, 12], b=[21, 22]), \"foo\", mock_setter))\n self.assertEqual(stuff['kw'], {})\n\n def test_patch_bad_columns(self):\n ds = ColumnDataSource(data=dict(a=[10, 11], b=[20, 21]))\n with self.assertRaises(ValueError) as cm:\n ds.patch(dict(c=[(0, 100)]))\n self.assertEqual(str(cm.exception), \"Can only patch existing columns (extra: c)\")\n with self.assertRaises(ValueError) as cm:\n ds.patch(dict(a=[(0,100)], c=[(0, 100)], d=[(0, 100)]))\n self.assertEqual(str(cm.exception), \"Can only patch existing columns (extra: c, d)\")\n\n\n def test_patch_bad_simple_indices(self):\n ds = ColumnDataSource(data=dict(a=[10, 11], b=[20, 21]))\n with self.assertRaises(ValueError) as cm:\n ds.patch(dict(a=[(3, 100)]))\n self.assertEqual(str(cm.exception), \"Out-of bounds index (3) in patch for column: a\")\n\n def test_patch_good_simple_indices(self):\n ds = ColumnDataSource(data=dict(a=[10, 11], b=[20, 21]))\n ds._document = \"doc\"\n stuff = {}\n mock_setter = object()\n def mock(*args, **kw):\n stuff['args'] = args\n stuff['kw'] = kw\n ds.data._patch = mock\n ds.patch(dict(a=[(0,100), (1,101)], b=[(0,200)]), mock_setter)\n self.assertEqual(stuff['args'], (\"doc\", ds, dict(a=[(0,100), (1,101)], b=[(0,200)]), mock_setter))\n self.assertEqual(stuff['kw'], {})\n\n def test_patch_bad_slice_indices(self):\n ds = ColumnDataSource(data=dict(a=[10, 11, 12, 13, 14, 15], b=[20, 21, 22, 23, 24, 25]))\n with self.assertRaises(ValueError) as cm:\n ds.patch(dict(a=[(slice(10), list(range(10)))]))\n self.assertEqual(str(cm.exception), \"Out-of bounds slice index stop (10) in patch for column: a\")\n with self.assertRaises(ValueError) as cm:\n ds.patch(dict(a=[(slice(10, 1), list(range(10)))]))\n self.assertEqual(str(cm.exception), \"Patch slices must have start < end, got slice(10, 1, None)\")\n with self.assertRaises(ValueError) as cm:\n ds.patch(dict(a=[(slice(None, 10, -1), list(range(10)))]))\n self.assertEqual(str(cm.exception), \"Patch slices must have positive (start, stop, step) values, got slice(None, 10, -1)\")\n with self.assertRaises(ValueError) as cm:\n ds.patch(dict(a=[(slice(10, 1, 1), list(range(10)))]))\n self.assertEqual(str(cm.exception), \"Patch slices must have start < end, got slice(10, 1, 1)\")\n with self.assertRaises(ValueError) as cm:\n ds.patch(dict(a=[(slice(10, 1, -1), list(range(10)))]))\n self.assertEqual(str(cm.exception), \"Patch slices must have start < end, got slice(10, 1, -1)\")\n with self.assertRaises(ValueError) as cm:\n ds.patch(dict(a=[(slice(1, 10, -1), list(range(10)))]))\n self.assertEqual(str(cm.exception), \"Patch slices must have positive (start, stop, step) values, got slice(1, 10, -1)\")\n\n\n def test_patch_good_slice_indices(self):\n ds = ColumnDataSource(data=dict(a=[10, 11, 12, 13, 14, 15], b=[20, 21, 22, 23, 24, 25]))\n ds._document = \"doc\"\n stuff = {}\n mock_setter = object()\n def mock(*args, **kw):\n stuff['args'] = args\n stuff['kw'] = kw\n ds.data._patch = mock\n ds.patch(dict(a=[(slice(2), [100, 101]), (slice(3, 5), [100, 101])], b=[(slice(None, None, 2), [100, 101, 102])]), mock_setter)\n self.assertEqual(stuff['args'],\n (\"doc\", ds, dict(a=[(slice(2), [100, 101]), (slice(3, 5), [100, 101])], b=[(slice(None, None, 2), [100, 101, 102])]), mock_setter)\n )\n self.assertEqual(stuff['kw'], {})\n\n def test_data_column_lengths(self):\n # TODO: use this when soft=False\n #\n #with self.assertRaises(ValueError):\n # ColumnDataSource(data=dict(a=[10, 11], b=[20, 21, 22]))\n #\n #ds = ColumnDataSource()\n #with self.assertRaises(ValueError):\n # ds.data = dict(a=[10, 11], b=[20, 21, 22])\n #\n #ds = ColumnDataSource(data=dict(a=[10, 11]))\n #with self.assertRaises(ValueError):\n # ds.data[\"b\"] = [20, 21, 22]\n #\n #ds = ColumnDataSource(data=dict(a=[10, 11], b=[20, 21]))\n #with self.assertRaises(ValueError):\n # ds.data.update(dict(a=[10, 11, 12]))\n\n with warnings.catch_warnings(record=True) as warns:\n ColumnDataSource(data=dict(a=[10, 11], b=[20, 21, 22]))\n self.assertEquals(len(warns), 1)\n self.assertEquals(str(warns[0].message), \"ColumnDataSource's columns must be of the same length. Current lengths: ('a', 2), ('b', 3)\")\n\n ds = ColumnDataSource()\n with warnings.catch_warnings(record=True) as warns:\n ds.data = dict(a=[10, 11], b=[20, 21, 22])\n self.assertEquals(len(warns), 1)\n self.assertEquals(str(warns[0].message), \"ColumnDataSource's columns must be of the same length. Current lengths: ('a', 2), ('b', 3)\")\n\n ds = ColumnDataSource(data=dict(a=[10, 11]))\n with warnings.catch_warnings(record=True) as warns:\n ds.data[\"b\"] = [20, 21, 22]\n self.assertEquals(len(warns), 1)\n self.assertEquals(str(warns[0].message), \"ColumnDataSource's columns must be of the same length. Current lengths: ('a', 2), ('b', 3)\")\n\n ds = ColumnDataSource(data=dict(a=[10, 11], b=[20, 21]))\n with warnings.catch_warnings(record=True) as warns:\n ds.data.update(dict(a=[10, 11, 12]))\n self.assertEquals(len(warns), 1)\n self.assertEquals(str(warns[0].message), \"ColumnDataSource's columns must be of the same length. Current lengths: ('a', 3), ('b', 2)\")\n\n def test_set_data_from_json_list(self):\n ds = ColumnDataSource()\n data = {\"foo\": [1, 2, 3]}\n ds.set_from_json('data', data)\n self.assertEquals(ds.data, data)\n\n def test_set_data_from_json_base64(self):\n ds = ColumnDataSource()\n data = {\"foo\": np.arange(3)}\n json = transform_column_source_data(data)\n ds.set_from_json('data', json)\n self.assertTrue(np.array_equal(ds.data[\"foo\"], data[\"foo\"]))\n\n def test_set_data_from_json_nested_base64(self):\n ds = ColumnDataSource()\n data = {\"foo\": [[np.arange(3)]]}\n json = transform_column_source_data(data)\n ds.set_from_json('data', json)\n self.assertTrue(np.array_equal(ds.data[\"foo\"], data[\"foo\"]))\n\n def test_set_data_from_json_nested_base64_and_list(self):\n ds = ColumnDataSource()\n data = {\"foo\": [np.arange(3), [1, 2, 3]]}\n json = transform_column_source_data(data)\n ds.set_from_json('data', json)\n self.assertTrue(np.array_equal(ds.data[\"foo\"], data[\"foo\"]))\n\nif __name__ == \"__main__\":\n unittest.main()\n" ]
[ [ "numpy.arange", "numpy.array_equal", "pandas.DataFrame", "numpy.ones" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
SURGroup/UncertaintyQuantification
[ "a94c8db47d07134ea2b3b0a3ca53ca818532c3e6", "a94c8db47d07134ea2b3b0a3ca53ca818532c3e6", "a94c8db47d07134ea2b3b0a3ca53ca818532c3e6", "a94c8db47d07134ea2b3b0a3ca53ca818532c3e6" ]
[ "docs/code/sampling/latin_hypercube/plot_latin_hypercube_simple.py", "src/UQpy/sampling/mcmc/DRAM.py", "docs/code/sampling/adaptive_kriging/adaptive_kriging_normal.py", "docs/code/inference/mle/plot_learn_distribution_model.py" ]
[ "\"\"\"\n\nLatin Hypercube Sampling\n==================================\n\nThis example shows the use of the Latin Hypercube sampling class. In particular:\n\"\"\"\n\n# %% md\n#\n# - How to define the Latin Hypercube sampling method supported by UQpy\n# - How to use different sampling criteria\n# - How to plot the 2D samples\n\n# %%\n\n# %% md\n#\n# Initially we have to import the necessary modules.\n\n# %%\n\nfrom UQpy.sampling import LatinHypercubeSampling\nimport matplotlib.pyplot as plt\nfrom UQpy.distributions import Uniform\nfrom UQpy.sampling.stratified_sampling.latin_hypercube_criteria import *\n\n# %% md\n#\n# Define Latin Hypercube sampling class\n# ----------------------------------------------\n# In order to initialize the LatinHypercube sampling class, the user needs to define a list of distributions\n# for each one of the parameters that need to be sampled.\n#\n# Apart from the distributions list, the number of samples :code:`nsamples` to be drawn is required.\n# The :code:`random_state` parameter defines the seed of the random generator.\n#\n# Finally, the design criterion can be defined by the user. The default case is the :class:`.Random`.\n# For more details on the various criteria you can refer to the documentation of the criteria\n# :class:`.Random`, :class:`.Centered`, :class:`.Maximin`, :class:`.MinCorrelation`\n\n# %%\n\ndist1 = Uniform(loc=0., scale=1.)\ndist2 = Uniform(loc=0., scale=1.)\n\nlhs_random = LatinHypercubeSampling(distributions=[dist1, dist2], nsamples=5,\n random_state=np.random.RandomState(789),\n criterion=Random())\n\nlhs_centered = LatinHypercubeSampling(distributions=[dist1, dist2], criterion=Centered(),\n random_state=np.random.RandomState(789),\n nsamples=5)\n\nlhs_maximin = LatinHypercubeSampling(distributions=[dist1, dist2],\n random_state=np.random.RandomState(789),\n criterion=MaxiMin(metric=DistanceMetric.CHEBYSHEV),\n nsamples=5)\n\nlhs_mincorrelate = LatinHypercubeSampling(distributions=[dist1, dist2],\n random_state=np.random.RandomState(789),\n criterion=MinCorrelation(iterations=100),\n nsamples=5)\n\n# %% md\n#\n# Plot the generated samples\n# ------------------------------------\n# The :code:`samples` attribute of the latin hypercube class is a numpy array of with shape\n# :code:`(nsamples, len(distributions))`\n# Both :code:`samples` and :code:`samplesU01` are populated at the same time since the Latin Hypercube samples are\n# initially drawn in the unit hypercube, thus in contrast to Monte Carlo sampling no transformation is required.\n# Using the :py:meth:`run` method to generate samples replaces the previously created ones.\n\n# %%\n\n# plot the samples\nfig, axs = plt.subplots(2, 2)\nfig.subplots_adjust(hspace=0.5)\naxs[0, 0].set_title('Random-LHS design')\naxs[0, 0].scatter(lhs_random._samples[:, 0], lhs_random._samples[:, 1])\naxs[0, 0].set_yticks([0.0, 0.2, 0.4, 0.6, 0.8, 1.0])\naxs[0, 0].set_xticks([0.0, 0.2, 0.4, 0.6, 0.8, 1.0])\naxs[0, 0].yaxis.grid(True)\naxs[0, 0].xaxis.grid(True)\n\n\naxs[0, 1].set_title('Centered-LHS design')\naxs[0, 1].scatter(lhs_centered._samples[:, 0], lhs_centered._samples[:, 1])\naxs[0, 1].set_yticks([0.0, 0.2, 0.4, 0.6, 0.8, 1.0])\naxs[0, 1].set_xticks([0.0, 0.2, 0.4, 0.6, 0.8, 1.0])\naxs[0, 1].yaxis.grid(True)\naxs[0, 1].xaxis.grid(True)\n\n\naxs[1, 0].set_title('Maximin-LHS design')\naxs[1, 0].scatter(lhs_maximin._samples[:, 0], lhs_maximin._samples[:, 1])\naxs[1, 0].set_yticks([0.0, 0.2, 0.4, 0.6, 0.8, 1.0])\naxs[1, 0].set_xticks([0.0, 0.2, 0.4, 0.6, 0.8, 1.0])\naxs[1, 0].yaxis.grid(True)\naxs[1, 0].xaxis.grid(True)\n\n\naxs[1, 1].set_title('MinCorrelation-LHS design')\naxs[1, 1].scatter(lhs_random._samples[:, 0], lhs_random._samples[:, 1])\naxs[1, 1].set_yticks([0.0, 0.2, 0.4, 0.6, 0.8, 1.0])\naxs[1, 1].set_xticks([0.0, 0.2, 0.4, 0.6, 0.8, 1.0])\naxs[1, 1].yaxis.grid(True)\naxs[1, 1].xaxis.grid(True)\n\nplt.ylim(0, 1)\nplt.xlim(0, 1)\nplt.show()\n", "import logging\nfrom typing import Callable\nimport warnings\nwarnings.filterwarnings('ignore')\nimport numpy as np\n\nfrom beartype import beartype\nfrom UQpy.sampling.mcmc.baseclass.MCMC import MCMC\nfrom UQpy.distributions import *\nfrom UQpy.utilities.ValidationTypes import *\n\n\nclass DRAM(MCMC):\n\n @beartype\n def __init__(\n self,\n pdf_target: Union[Callable, list[Callable]] = None,\n log_pdf_target: Union[Callable, list[Callable]] = None,\n args_target: tuple = None,\n burn_length: Annotated[int, Is[lambda x: x >= 0]] = 0,\n jump: int = 1,\n dimension: int = None,\n seed: list = None,\n save_log_pdf: bool = False,\n concatenate_chains: bool = True,\n initial_covariance: float = None,\n covariance_update_rate: float = 100,\n scale_parameter: float = None,\n delayed_rejection_scale: float = 1 / 5,\n save_covariance: bool = False,\n random_state: RandomStateType = None,\n n_chains: int = None,\n nsamples: int = None,\n nsamples_per_chain: int = None,\n ):\n \"\"\"\n Delayed Rejection Adaptive Metropolis algorithm :cite:`Dram1` :cite:`MCMC2`\n\n In this algorithm, the proposal density is Gaussian and its covariance C is being updated from samples as\n :code:`C = scale_parameter * C_sample` where :code:`C_sample` is the sample covariance. Also, the delayed\n rejection scheme is applied, i.e, if a candidate is not accepted another one is generated from the proposal with\n covariance :code:`delayed_rejection_scale ** 2 * C`.\n\n :param pdf_target: Target density function from which to draw random samples. Either `pdf_target` or\n `log_pdf_target` must be provided (the latter should be preferred for better numerical stability).\n\n If `pdf_target` is a callable, it refers to the joint pdf to sample from, it must take at least one input\n **x**, which are the point(s) at which to evaluate the pdf. Within :class:`.MCMC` the `pdf_target` is evaluated\n as:\n :code:`p(x) = pdf_target(x, \\*args_target)`\n\n where **x** is a :class:`numpy.ndarray of shape :code:`(nsamples, dimension)` and `args_target` are additional\n positional arguments that are provided to :class:`.MCMC` via its `args_target` input.\n\n If `pdf_target` is a list of callables, it refers to independent marginals to sample from. The marginal in\n dimension :code:`j` is evaluated as: :code:`p_j(xj) = pdf_target[j](xj, \\*args_target[j])` where **x** is a\n :class:`numpy.ndarray` of shape :code:`(nsamples, dimension)`\n :param log_pdf_target: Logarithm of the target density function from which to draw random samples.\n Either pdf_target or log_pdf_target must be provided (the latter should be preferred for better numerical\n stability).\n\n Same comments as for input `pdf_target`.\n :param args_target: Positional arguments of the pdf / log-pdf target function. See `pdf_target`\n :param burn_length: Length of burn-in - i.e., number of samples at the beginning of the chain to discard (note:\n no thinning during burn-in). Default is :math:`0`, no burn-in.\n :param jump: Thinning parameter, used to reduce correlation between samples. Setting :code:`jump=n` corresponds\n to skipping n-1 states between accepted states of the chain. Default is :math:`1` (no thinning).\n :param dimension: A scalar value defining the dimension of target density function. Either `dimension` and\n `n_chains` or `seed` must be provided.\n :param seed: Seed of the Markov chain(s), shape :code:`(n_chains, dimension)`.\n Default: :code:`zeros(n_chains x dimension)`.\n\n If `seed` is not provided, both `n_chains` and `dimension` must be provided.\n :param save_log_pdf: Boolean that indicates whether to save log-pdf values along with the samples.\n Default: :any:`False`\n :param concatenate_chains: Boolean that indicates whether to concatenate the chains after a run, i.e., samples\n are stored as a :class:`numpy.ndarray` of shape :code:`(nsamples * n_chains, dimension)` if :any:`True`,\n :code:`(nsamples, n_chains, dimension)` if :any:`False`.\n Default: :any:`True`\n :param n_chains: The number of Markov chains to generate. Either `dimension` and `n_chains` or `seed` must be\n provided.\n :param initial_covariance: Initial covariance for the gaussian proposal distribution. Default: I(dim)\n :param covariance_update_rate: Rate at which covariance is being updated, i.e., every k0 iterations.\n Default: :math:`100`\n :param scale_parameter: Scale parameter for covariance updating. Default: :math:`2.38^2/dim`\n :param delayed_rejection_scale: Scale parameter for delayed rejection. Default: :math:`1/5`\n :param save_covariance: If :any:`True`, updated covariance is saved in attribute :py:attr:`adaptive_covariance`.\n Default: :any:`False`\n :param random_state: Random seed used to initialize the pseudo-random number generator. Default is\n :any:`None`.\n :param nsamples: Number of samples to generate.\n :param nsamples_per_chain: Number of samples to generate per chain.\n \"\"\"\n self.nsamples = nsamples\n self.nsamples_per_chain = nsamples_per_chain\n super().__init__(\n pdf_target=pdf_target,\n log_pdf_target=log_pdf_target,\n args_target=args_target,\n dimension=dimension,\n seed=seed,\n burn_length=burn_length,\n jump=jump,\n save_log_pdf=save_log_pdf,\n concatenate_chains=concatenate_chains,\n random_state=random_state,\n n_chains=n_chains,\n )\n\n self.logger = logging.getLogger(__name__)\n # Check the initial covariance\n self.initial_covariance = initial_covariance\n if self.initial_covariance is None:\n self.initial_covariance = np.eye(self.dimension)\n elif not (isinstance(self.initial_covariance, np.ndarray)\n and self.initial_covariance == (self.dimension, self.dimension)):\n raise TypeError(\n \"UQpy: Input initial_covariance should be a 2D ndarray of shape (dimension, dimension)\")\n\n self.covariance_update_rate = covariance_update_rate\n self.scale_parameter = scale_parameter\n if self.scale_parameter is None:\n self.scale_parameter = 2.38 ** 2 / self.dimension\n self.delayed_rejection_scale = delayed_rejection_scale\n self.save_covariance = save_covariance\n for key, typ in zip(\n [\n \"covariance_update_rate\",\n \"scale_parameter\",\n \"delayed_rejection_scale\",\n \"save_covariance\",\n ],\n [int, float, float, bool],\n ):\n if not isinstance(getattr(self, key), typ):\n raise TypeError(\"Input \" + key + \" must be of type \" + typ.__name__)\n\n # initialize the sample mean and sample covariance that you need\n self.current_covariance = np.tile(\n self.initial_covariance[np.newaxis, ...], (self.n_chains, 1, 1))\n self.sample_mean = np.zeros((self.n_chains, self.dimension,))\n self.sample_covariance = np.zeros((self.n_chains, self.dimension, self.dimension))\n if self.save_covariance:\n self.adaptive_covariance = [self.current_covariance.copy(), ]\n\n self.logger.info(\"\\nUQpy: Initialization of \" + self.__class__.__name__ + \" algorithm complete.\")\n\n if (nsamples is not None) or (nsamples_per_chain is not None):\n self.run(nsamples=nsamples, nsamples_per_chain=nsamples_per_chain)\n\n def run_one_iteration(self, current_state: np.ndarray, current_log_pdf: np.ndarray):\n \"\"\"\n Run one iteration of the mcmc chain for DRAM algorithm, starting at current state -\n see :class:`MCMC` class.\n \"\"\"\n from UQpy.distributions import MultivariateNormal\n\n multivariate_normal = MultivariateNormal(mean=np.zeros(self.dimension, ), cov=1.0)\n\n # Sample candidate\n candidate = np.zeros_like(current_state)\n for nc, current_cov in enumerate(self.current_covariance):\n multivariate_normal.update_parameters(cov=current_cov)\n candidate[nc, :] = current_state[nc, :] + \\\n multivariate_normal.rvs(nsamples=1, random_state=self.random_state) \\\n .reshape((self.dimension,))\n\n # Compute log_pdf_target of candidate sample\n log_p_candidate = self.evaluate_log_target(candidate)\n\n # Compare candidate with current sample and decide or not to keep the candidate (loop over nc chains)\n accept_vec = np.zeros((self.n_chains,))\n delayed_chains_indices = ([]) # indices of chains that will undergo delayed rejection\n unif_rvs = (Uniform().rvs(nsamples=self.n_chains, random_state=self.random_state)\n .reshape((-1,)))\n for nc, (cand, log_p_cand, log_p_curr) in enumerate(\n zip(candidate, log_p_candidate, current_log_pdf)):\n accept = np.log(unif_rvs[nc]) < log_p_cand - log_p_curr\n if accept:\n current_state[nc, :] = cand\n current_log_pdf[nc] = log_p_cand\n accept_vec[nc] += 1.0\n else: # enter delayed rejection\n delayed_chains_indices.append(nc) # these indices will enter the delayed rejection part\n\n # Delayed rejection\n if delayed_chains_indices: # performed delayed rejection for some chains\n current_states_delayed = np.zeros(\n (len(delayed_chains_indices), self.dimension))\n candidates_delayed = np.zeros((len(delayed_chains_indices), self.dimension))\n candidate2 = np.zeros((len(delayed_chains_indices), self.dimension))\n # Sample other candidates closer to the current one\n for i, nc in enumerate(delayed_chains_indices):\n current_states_delayed[i, :] = current_state[nc, :]\n candidates_delayed[i, :] = candidate[nc, :]\n multivariate_normal.update_parameters(\n cov=self.delayed_rejection_scale ** 2 * self.current_covariance[nc])\n candidate2[i, :] = current_states_delayed[i, :] + \\\n multivariate_normal.rvs(nsamples=1, random_state=self.random_state) \\\n .reshape((self.dimension,))\n # Evaluate their log_target\n log_p_candidate2 = self.evaluate_log_target(candidate2)\n log_prop_cand_cand2 = multivariate_normal.log_pdf(candidates_delayed - candidate2)\n log_prop_cand_curr = multivariate_normal.log_pdf(candidates_delayed - current_states_delayed)\n # Accept or reject\n unif_rvs = (Uniform().rvs(nsamples=len(delayed_chains_indices),\n random_state=self.random_state).reshape((-1,)))\n for (nc, cand2, log_p_cand2, j1, j2, u_rv) in zip(\n delayed_chains_indices,\n candidate2,\n log_p_candidate2,\n log_prop_cand_cand2,\n log_prop_cand_curr,\n unif_rvs,\n ):\n alpha_cand_cand2 = min(1.0, np.exp(log_p_candidate[nc] - log_p_cand2))\n alpha_cand_curr = min(1.0, np.exp(log_p_candidate[nc] - current_log_pdf[nc]))\n log_alpha2 = (log_p_cand2 - current_log_pdf[nc] + j1 - j2\n + np.log(max(1.0 - alpha_cand_cand2, 10 ** (-320)))\n - np.log(max(1.0 - alpha_cand_curr, 10 ** (-320))))\n accept = np.log(u_rv) < min(0.0, log_alpha2)\n if accept:\n current_state[nc, :] = cand2\n current_log_pdf[nc] = log_p_cand2\n accept_vec[nc] += 1.0\n\n # Adaptive part: update the covariance\n for nc in range(self.n_chains):\n # update covariance\n self.sample_mean[nc], self.sample_covariance[nc], = self._recursive_update_mean_covariance(\n nsamples=self.iterations_number,\n new_sample=current_state[nc, :],\n previous_mean=self.sample_mean[nc],\n previous_covariance=self.sample_covariance[nc], )\n if (self.iterations_number > 1) and (self.iterations_number % self.covariance_update_rate == 0):\n self.current_covariance[nc] = self.scale_parameter * self.sample_covariance[nc] + \\\n 1e-6 * np.eye(self.dimension)\n if self.save_covariance and \\\n ((self.iterations_number > 1) and (self.iterations_number % self.covariance_update_rate == 0)):\n self.adaptive_covariance.append(self.current_covariance.copy())\n\n # Update the acceptance rate\n self._update_acceptance_rate(accept_vec)\n return current_state, current_log_pdf\n\n @staticmethod\n def _recursive_update_mean_covariance(\n nsamples, new_sample, previous_mean, previous_covariance=None\n ):\n \"\"\"\n Iterative formula to compute a new sample mean and covariance based on previous ones and new sample.\n\n New covariance is computed only of previous_covariance is provided.\n\n **Inputs:**\n\n * n (int): Number of samples used to compute the new mean\n * new_sample (ndarray (dim, )): new sample\n * previous_mean (ndarray (dim, )): Previous sample mean, to be updated with new sample value\n * previous_covariance (ndarray (dim, dim)): Previous sample covariance, to be updated with new sample value\n\n **Output/Returns:**\n\n * new_mean (ndarray (dim, )): Updated sample mean\n * new_covariance (ndarray (dim, dim)): Updated sample covariance\n\n \"\"\"\n new_mean = (nsamples - 1) / nsamples * previous_mean + 1 / nsamples * new_sample\n if previous_covariance is None:\n return new_mean\n dimensions = new_sample.size\n if nsamples == 1:\n new_covariance = np.zeros((dimensions, dimensions))\n else:\n delta_n = (new_sample - previous_mean).reshape((dimensions, 1))\n new_covariance = (nsamples - 2) / (nsamples - 1) \\\n * previous_covariance + 1 / nsamples * np.matmul(delta_n, delta_n.T)\n return new_mean, new_covariance\n", "\"\"\"\n\nU-Function & User-defined learning function\n============================================\n\nIn this example, Monte Carlo Sampling is used to generate samples from Normal distribution and new samples are generated\nadaptively, using U-function as the learning criteria .\n\"\"\"\n\n# %% md\n#\n# Import the necessary libraries. Here we import standard libraries such as numpy, matplotlib and other necessary\n# library for plots, but also need to import the :class:`.MonteCarloSampling`,\n# :class:`.AdaptiveKriging`, :class:`.Kriging` and :class:`.RunModel` class from UQpy.\n\n# %%\nimport shutil\n\nfrom UQpy import PythonModel\nfrom UQpy.surrogates.gaussian_process import GaussianProcessRegression\nfrom UQpy.sampling import MonteCarloSampling, AdaptiveKriging\nfrom UQpy.run_model.RunModel import RunModel\nfrom UQpy.distributions import Normal\nfrom local_series import series\nimport matplotlib.pyplot as plt\nimport time\nfrom UQpy.utilities.MinimizeOptimizer import MinimizeOptimizer\n\n\n# %% md\n#\n# Using UQpy :class:`.MonteCarloSampling` class to generate samples for two random variables, which are normally\n# distributed with mean :math:`0` and variance :math:`1`.\n\n# %%\n\nmarginals = [Normal(loc=0., scale=4.), Normal(loc=0., scale=4.)]\nx = MonteCarloSampling(distributions=marginals, nsamples=20, random_state=1)\n\n# %% md\n#\n# RunModel class is used to define an object to evaluate the model at sample points.\n\n# %%\n\nmodel = PythonModel(model_script='local_series.py', model_object_name='series')\nrmodel = RunModel(model=model)\n\n\n# %% md\n#\n# :class:`.Kriging` class defines an object to generate a surrogate model for a given set of data.\n\n# %%\n\nfrom UQpy.surrogates.gaussian_process.regression_models import LinearRegression\nfrom UQpy.surrogates.gaussian_process.kernels import RBF\nbounds = [[10**(-3), 10**3], [10**(-3), 10**2], [10**(-3), 10**2]]\noptimizer = MinimizeOptimizer(method=\"L-BFGS-B\", bounds=bounds)\nK = GaussianProcessRegression(regression_model=LinearRegression(), kernel=RBF(), optimizer=optimizer,\n hyperparameters=[1, 1, 0.1], optimizations_number=10, noise=False)\n\n# %% md\n#\n# This example works for all three learning function based on reliability analysis.\n#\n# :class:`.AdaptiveKriging` class is used to generate new sample using :class:`.UFunction` as active learning function.\n\n# %%\n\nfrom UQpy.sampling.adaptive_kriging_functions import *\nstart_time = time.time()\nlearning_function = WeightedUFunction(weighted_u_stop=2)\na = AdaptiveKriging(runmodel_object=rmodel, surrogate=K, learning_nsamples=10 ** 3, n_add=1,\n learning_function=learning_function, distributions=marginals, random_state=2)\na.run(nsamples=100, samples=x.samples)\n\nelapsed_time = time.time() - start_time\n\n\ntime.strftime(\"%H:%M:%S\", time.gmtime(elapsed_time))\ng = a.surrogate.predict(a.learning_set, False)\nn_ = a.learning_set.shape[0] + len(a.qoi)\npf = (sum(g < 0) + sum(np.array(a.qoi) < 0)) / n_\nprint('Time: ', elapsed_time)\nprint('Function evaluation: ', a.samples.shape[0])\nprint('Probability of failure: ', pf)\n\n# %% md\n#\n# This figure shows the location of new samples generated using active learning function.\n\n# %%\n\nnum = 50\nx1 = np.linspace(-7, 7, num)\nx2 = np.linspace(-7, 7, num)\nx1v, x2v = np.meshgrid(x1, x2)\ny = np.zeros([num, num])\ny_act = np.zeros([num, num])\nmse = np.zeros([num, num])\nfor i in range(num):\n for j in range(num):\n xa = marginals[0].cdf(np.atleast_2d(x1v[i, j]))\n ya = marginals[1].cdf(np.atleast_2d(x2v[i, j]))\n y[i, j] = a.surrogate.predict(np.hstack([xa, ya]))\n y_act[i, j] = series(np.array([[x1v[i, j], x2v[i, j]]]))\n\nfig, ax = plt.subplots()\nkr_a = ax.contour(x1v, x2v, y_act, levels=[0], colors='Black')\n\n# Plot for scattered data\nnd = x.nsamples\nID1 = ax.scatter(a.samples[nd:, 0], a.samples[nd:, 1], color='Grey', label='New samples')\nID = ax.scatter(x.samples[:nd, 0], x.samples[:nd, 1], color='Red', label='Initial samples')\nplt.legend(handles=[ID1, ID])\nplt.show()\n\n# %% md\n#\n# User-defined Learning function\n# ------------------------------\n\n# %%\n\nclass UserLearningFunction(LearningFunction):\n\n def __init__(self, u_stop: int = 2):\n self.u_stop = u_stop\n\n def evaluate_function(self, distributions, n_add, surrogate, population, qoi=None, samples=None):\n # AKMS class use these inputs to compute the learning function\n\n g, sig = surrogate.predict(population, True)\n\n # Remove the inconsistency in the shape of 'g' and 'sig' array\n g = g.reshape([population.shape[0], 1])\n sig = sig.reshape([population.shape[0], 1])\n\n u = abs(g) / sig\n rows = u[:, 0].argsort()[:n_add]\n\n indicator = False\n if min(u[:, 0]) >= self.u_stop:\n indicator = True\n\n return population[rows, :], u[rows, 0], indicator\n\n# %% md\n#\n# Creating new instances of :class:`.Kriging` and :class:`.RunModel` class.\n\n# %%\nbounds = [[10**(-3), 10**3], [10**(-3), 10**2], [10**(-3), 10**2]]\noptimizer = MinimizeOptimizer(method=\"L-BFGS-B\", bounds=bounds)\nK1 = GaussianProcessRegression(regression_model=LinearRegression(), kernel=RBF(), optimizer=optimizer,\n hyperparameters=[1, 1, 0.1], optimizations_number=1)\nmodel = PythonModel(model_script='local_series.py', model_object_name='series')\nrmodel1 = RunModel(model=model)\n\n# %% md\n#\n# Executing :class:`Adaptivekriging` with the user-defined learning function.\n\n# %%\n\nstart_time = time.time()\nak = AdaptiveKriging(runmodel_object=rmodel1, samples=x.samples, surrogate=K1, learning_nsamples=10 ** 3,\n n_add=1, learning_function=UserLearningFunction(), distributions=marginals, random_state=3)\nak.run(nsamples=100)\n\n\nelapsed_time = time.time() - start_time\n\ntime.strftime(\"%H:%M:%S\", time.gmtime(elapsed_time))\ng = ak.surrogate.predict(ak.learning_set, False)\nn_ = ak.learning_set.shape[0] + len(ak.qoi)\npf = (sum(g < 0) + sum(np.array(ak.qoi) < 0)) / n_\nprint('Time: ', elapsed_time)\nprint('Function evaluation: ', ak.samples.shape[0])\nprint('Probability of failure: ', pf)\n\n# %% md\n#\n# This figure shows the location of new samples generated using active learning function.\n\n# %%\n\nfig1, ax1 = plt.subplots()\nkr_a = ax1.contour(x1v, x2v, y_act, levels=[0], colors='Black')\n\n# Plot for scattered data\nID1 = ax1.scatter(ak.samples[nd:, 0], ak.samples[nd:, 1], color='Grey', label='New samples')\nID = ax1.scatter(x.samples[:nd, 0], x.samples[:nd, 1], color='Red', label='Initial samples')\nplt.legend(handles=[ID1, ID])\nplt.show()\n\n# %% md\n#\n# Monte Carlo Simulation\n# -----------------------\n# Probability of failure and covariance is estimated using Monte Carlo Simulation. 10,000 samples are generated\n# randomly using :class:`.MonteCarloSampling` class and model is evaluated at all samples.\n\n# %%\n\nstart_time = time.time()\n\n# Code\nb = MonteCarloSampling(distributions=marginals, nsamples=10 ** 4, random_state=4)\nmodel = PythonModel(model_script='local_series.py', model_object_name='series')\nr1model = RunModel(model=model)\nr1model.run(samples=b.samples)\n\n\ngx = np.array(r1model.qoi_list)\npf_mcs = np.sum(np.array(gx) < 0) / b.nsamples\ncov_pf_mcs = np.sqrt((1 - pf_mcs) / (pf_mcs * b.nsamples))\nelapsed_time = time.time() - start_time\ntime.strftime(\"%H:%M:%S\", time.gmtime(elapsed_time))\n\n# %% md\n#\n# Results from Monte Carlo Simulation.\n\n# %%\n\nprint('Time: ', elapsed_time)\nprint('Function evaluation: ', b.nsamples)\nprint('Probability of failure: ', pf_mcs)\n\n", "\"\"\"\n\nSimple probability distribution model\n==============================================\n\nIn the following we learn the mean and covariance of a univariate gaussian distribution from data.\n\"\"\"\n\n#%% md\n#\n# Initially we have to import the necessary modules.\n\n#%%\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom UQpy.inference import DistributionModel, MLE\nfrom UQpy.distributions import Normal\nfrom UQpy.inference import MinimizeOptimizer\n\n#%% md\n#\n# First, for the sake of this example, we generate fake data from a gaussian distribution with mean 0 and\n# standard deviation 1.\n\n#%%\n\nmu, sigma = 0, 0.1 # true mean and standard deviation\ndata_1 = np.random.normal(mu, sigma, 1000).reshape((-1, 1))\nprint('Shape of data vector: {}'.format(data_1.shape))\n\ncount, bins, ignored = plt.hist(data_1, 30, density=True)\nplt.plot(bins, 1 / (sigma * np.sqrt(2 * np.pi)) * np.exp(- (bins - mu) ** 2 / (2 * sigma ** 2)),\n linewidth=2, color='r')\nplt.title('Histogram of the data')\nplt.show()\n\n#%% md\n#\n# Create an instance of the class Model. The user must define the number of parameters to be estimated, in this case 2\n# (mean and standard deviation), and set those parameters to be learnt as None when instantiating the Distribution\n# object. For maximum likelihood estimation, no prior pdf is required.\n\n#%%\n\n# set parameters to be learnt as None\ndist = Normal(loc=None, scale=None)\ncandidate_model = DistributionModel(n_parameters=2, distributions=dist)\n\nml_estimator = MLE(inference_model=candidate_model, data=data_1, n_optimizations=3)\nprint('ML estimates of the mean={0:.3f} (true=0.) and std. dev={1:.3f} (true=0.1)'.format(\n ml_estimator.mle[0], ml_estimator.mle[1]))\n\n#%% md\n#\n# We can also fix one of the parameters and learn the remaining one\n\n#%%\n\nd = Normal(loc=0., scale=None)\ncandidate_model = DistributionModel(n_parameters=1, distributions=d)\n\noptimizer = MinimizeOptimizer(bounds=[[0.0001, 2.]])\nml_estimator = MLE(inference_model=candidate_model, data=data_1,\n n_optimizations=1)\nprint('ML estimates of the std. dev={0:.3f} (true=0.1)'.format(ml_estimator.mle[0]))" ]
[ [ "matplotlib.pyplot.ylim", "matplotlib.pyplot.xlim", "matplotlib.pyplot.show", "matplotlib.pyplot.subplots" ], [ "numpy.log", "numpy.eye", "numpy.matmul", "numpy.tile", "numpy.zeros_like", "numpy.exp", "numpy.zeros" ], [ "matplotlib.pyplot.legend", "matplotlib.pyplot.show", "matplotlib.pyplot.subplots" ], [ "numpy.sqrt", "matplotlib.pyplot.title", "numpy.random.normal", "numpy.exp", "matplotlib.pyplot.hist", "matplotlib.pyplot.show" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
marineLM/Impute_then_Regress
[ "d59f61dab8008b852c6c613806797614cf6184dd" ]
[ "python/launch_all.py" ]
[ "'''\nDefines:\n - the paramaters of data simulations, \n - the list of methods to compare and their hyperparameters,\nAnd launches all experiments.\n'''\n\nimport pandas as pd\nimport argparse\nfrom run_all import run\n\nparser = argparse.ArgumentParser()\nparser.add_argument('mdm', help='missing data mechanism',\n choices=['MCAR', 'MAR', 'gaussian_sm'])\nparser.add_argument('--link', help='type of link function for the outcome',\n choices=['linear', 'square', 'stairs',\n 'discontinuous_linear'])\nargs = parser.parse_args()\n\nn_iter = 10\nn_jobs = 40\nn_sizes = [2e4, 1e5]\nn_sizes = [int(i) for i in n_sizes]\nn_test = int(1e4)\nn_val = int(1e4)\n\nif args.link == 'square':\n curvature = 1\nelif args.link == 'stairs':\n curvature = 20\nelse:\n curvature = None\n\n# First fill in data_desc with all default values.\nif args.mdm == 'MCAR':\n if args.link:\n filename = 'MCAR_' + args.link\n else:\n filename = 'MCAR'\n\n default_values = {'n_features': 50, 'missing_rate': 0.5,\n 'prop_latent': 0.3, 'snr': 10, 'masking': 'MCAR',\n 'prop_for_masking': None, 'link': args.link,\n 'curvature': curvature}\n\n # Define the list of parameters that should be tested and their range of\n # values\n other_values = {'prop_latent': [0.7]}\n\nelif args.mdm == 'gaussian_sm':\n if args.link:\n filename = 'gaussian_sm_' + args.link\n else:\n filename = 'gaussian_sm'\n\n default_values = {'n_features': 50, 'missing_rate': 0.5,\n 'prop_latent': 0.3, 'sm_type': 'gaussian',\n 'sm_param': 2, 'snr': 10, 'perm': False,\n 'link': args.link, 'curvature': curvature}\n\n # Define the list of parameters that should be tested and their range of\n # values\n other_values = {'prop_latent': [0.7]}\n\n# Then vary parameters one by one while the other parameters remain constant,\n# and equal to their default values.\ndata_descs = [pd.DataFrame([default_values])]\nfor param, vals in other_values.items():\n n = len(vals)\n data = pd.DataFrame([default_values]*n)\n data.loc[:, param] = vals\n data_descs.append(data)\ndata_descs = pd.concat(data_descs, axis=0)\n\n\n# Define the methods that will be compared\nmethods_params = []\n\nmethods_params.append({'method': 'BayesPredictor', 'order0': False})\nmethods_params.append({'method': 'BayesPredictor_order0', 'order0': True})\n\nfor max_leaf_nodes in [50, 100, 200, 400, 600]:\n for max_iter in [100, 200, 300]:\n for min_samples_leaf in [10, 20, 50]:\n methods_params.append({'method': 'GBRT',\n 'n_iter_no_change': 10,\n 'max_leaf_nodes': max_leaf_nodes,\n 'max_iter': max_iter,\n 'min_samples_leaf': min_samples_leaf\n })\n\nmlp_depths = [1, 2, 5]\nwidth_factors = [1, 5, 10]\nweight_decays = [1e-6, 1e-5, 1e-4, 1e-3]\nlearning_rates = [1e-2, 5e-3, 1e-3, 5e-4]\nneumann_depths = [20]\n\nfor add_mask in [True, False]:\n for mlp_d in mlp_depths:\n for wf in width_factors:\n for wd in weight_decays:\n for lr in learning_rates:\n if add_mask:\n name = 'oracleMLPPytorch_mask'\n else:\n name = 'oracleMLPPytorch'\n methods_params.append({'method': name,\n 'add_mask': add_mask,\n 'mdm': args.mdm,\n 'n_epochs': 1000,\n 'batch_size': 100,\n 'lr': lr,\n 'weight_decay': wd,\n 'early_stopping': True,\n 'optimizer': 'adam',\n 'width_factor': wf,\n 'mlp_depth': mlp_d,\n 'init_type': 'uniform',\n 'verbose': False})\n\n\nfor add_mask in [True, False]:\n for imp_type in ['mean', 'MICE']:\n for mlp_d in mlp_depths:\n for wf in width_factors:\n for wd in weight_decays:\n for lr in learning_rates:\n if add_mask:\n name = imp_type + 'MLPPytorch_mask'\n else:\n name = imp_type + 'MLPPytorch'\n methods_params.append({'method': name,\n 'add_mask': add_mask,\n 'imputation_type': imp_type,\n 'n_epochs': 1000,\n 'batch_size': 100,\n 'lr': lr,\n 'weight_decay': wd,\n 'early_stopping': True,\n 'optimizer': 'adam',\n 'mlp_depth': mlp_d,\n 'width_factor': wf,\n 'init_type': 'uniform',\n 'verbose': False})\n\n\nfor init in ['uniform']:\n name = 'NeuMiss_' + init + '_'\n for mlp_d in mlp_depths:\n for wf in width_factors:\n for d in neumann_depths:\n for wd in weight_decays:\n for lr in learning_rates:\n methods_params.append({'method': name,\n 'mode': 'shared',\n 'depth': d,\n 'n_epochs': 1000,\n 'batch_size': 100,\n 'lr': lr,\n 'weight_decay': wd,\n 'early_stopping': True,\n 'optimizer': 'adam',\n 'residual_connection': True,\n 'mlp_depth': mlp_d,\n 'width_factor': wf,\n 'init_type': init,\n 'add_mask': False,\n 'verbose': False})\n\n\nrun_params = {\n 'n_iter': n_iter,\n 'n_sizes': n_sizes,\n 'n_test': n_test,\n 'n_val': n_val,\n 'mdm': args.mdm,\n 'data_descs': data_descs,\n 'methods_params': methods_params,\n 'filename': filename,\n 'n_jobs': n_jobs}\n\nrun(**run_params)\n" ]
[ [ "pandas.concat", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
WingsBrokenAngel/MSR-VTT-DataCleaning
[ "4eb2f4736e9b606e9296487c3b70351b2cde1dfa" ]
[ "msrvtt_model/train_model.py" ]
[ "# -*- coding: utf-8 -*-\n# Author: Haoran Chen\n# Date: 2019-10-07\nimport tensorflow as tf\nimport pickle\nimport numpy as np\nimport sys\nfrom pprint import pprint\nfrom collections import defaultdict\nimport time\nsys.path.append('..')\nfrom utils import *\n\n\nnp.random.seed(42)\ndata_dict = None\nmodel = None\noptions = None\n\n# METRICS = {'Bleu_4': 0., 'CIDEr': 0., \n# 'METEOR': 0., 'ROUGE_L': 0.}\nMETRICS = {'CIDEr': 0.}\nMAX = {key: 0. for key in METRICS}\nmin_xe = 1000.\n\ndef cal_metrics(sess, phase):\n sent_dict, sent_list = defaultdict(list), []\n loss_list = []\n if phase == \"train\":\n ref = data_dict[\"ref\"][0]\n idx2cap = {idx: elem for idx, elem in enumerate(ref)}\n idx_start, idx_end = 0, 6513\n elif phase == \"val\":\n ref = data_dict['ref'][1]\n idx2cap = {idx+6513: elem for idx, elem in enumerate(ref)}\n idx_start, idx_end = 6513, 7010\n elif phase == \"test\":\n ref = data_dict['ref'][2]\n idx2cap = {idx+7010: elem for idx, elem in enumerate(ref)}\n idx_start, idx_end = 7010, 10000\n else:\n raise ValueError(\"The phase should be val or test\")\n tag_feat = data_dict['tag_feat']\n eco_res_feat = data_dict['eco_res_feat']\n idx2gts = data_dict['idx2gts']\n for idx in range(idx_start, idx_end):\n tag, ervid = tag_feat[idx], eco_res_feat[idx]\n tag, ervid = np.expand_dims(tag, 0), np.expand_dims(ervid, 0)\n gts = idx2gts[idx]\n maxlen = max([len(gt) for gt in gts])\n gts_mat = np.zeros((maxlen, len(gts)), dtype=np.int32)\n for idx2, gt in enumerate(gts):\n gts_mat[:len(gt), idx2] = gt\n # print('tag shape:', tag.shape, 'evid:', evid.shape, 'rvid:', rvid.shape)\n wanted_ops = {\n 'generated_words': model.generated_words, \"test_loss\": model.test_loss}\n feed_dict = {\n model.word_idx: gts_mat, model.vid_inputs: ervid, model.se_inputs: tag}\n # sel_word_idx shape: (batch_size, beam_width, n_steps)\n res = sess.run(wanted_ops, feed_dict)\n generated_words = res['generated_words']\n loss_list.append(res['test_loss'])\n for x in np.squeeze(generated_words):\n if x == 0:\n break\n sent_dict[idx].append(data_dict['idx2word'][x])\n sent_dict[idx] = [' '.join(sent_dict[idx])]\n sent_list.append(sent_dict[idx][0])\n scores = score(idx2cap, sent_dict)\n print(phase)\n pprint(scores)\n mean_loss = np.mean(loss_list)\n # print('average loss:', mean_loss, flush=True)\n\n if \"test\" == phase or \"train\" == phase:\n with open(flags.name+'_%s_output.log'%phase, 'w') as fo:\n for sent in sent_list:\n fo.write(sent+'\\n')\n\n return scores, mean_loss\n\n\ndef main():\n global data_dict, model, options\n data_dict = get_data(flags)\n options = get_options(data_dict)\n model = get_model(options)\n # model = get_gru(options)\n best_score, save_path = 0., None\n\n with model.graph.as_default():\n global_step = tf.train.get_or_create_global_step()\n train_op = get_train_op(model, options, global_step)\n saver = tf.train.Saver()\n config = get_config()\n sess = tf.Session(config=config, graph=model.graph)\n trainable_variables = tf.trainable_variables()\n # print(*trainable_variables, sep='\\n', flush=True)\n if flags.test is None:\n sess.run(tf.global_variables_initializer())\n train_idx1 = np.arange(options.train_size, dtype=np.int32)\n train_idx2 = np.arange(options.train_size2, dtype=np.int32)\n\n for idx in range(options.epoch):\n start_time = time.perf_counter()\n train_loss = []\n if idx < options.threshold:\n np.random.shuffle(train_idx1)\n train_part1(train_idx1, train_op, train_loss, \n sess, options, data_dict, model)\n else:\n np.random.shuffle(train_idx2)\n train_part2(train_idx2, train_op, train_loss, sess, \n idx, options, data_dict, model)\n mean_train_loss = np.mean(train_loss)\n print('epoch %d: loss %f' % (idx, mean_train_loss))\n scores, val_loss = cal_metrics(sess, 'val')\n global METRICS, MAX, min_xe\n METRICS = {key: max(METRICS[key], scores[key]) for key in METRICS}\n overall_score1 = np.mean([scores[key] / METRICS[key] for key in METRICS])\n overall_score2 = np.mean([MAX[key] / METRICS[key] for key in METRICS])\n if overall_score1 > overall_score2:\n MAX = scores\n # if val_loss < min_xe:\n # min_xe = val_loss\n save_path = saver.save(sess, './saves/%s-best.ckpt'%flags.name)\n print('Epoch %d: the best model has been saved as %s.'\n % (idx, save_path))\n end_time = time.perf_counter()\n print('%d epoch: %.2fs.' % (idx, end_time - start_time), flush=True)\n saver.restore(sess, save_path)\n cal_metrics(sess, 'train')\n cal_metrics(sess, 'test')\n else:\n saver.restore(sess, flags.test)\n # cal_metrics(sess, 'train')\n # cal_metrics(sess, 'val')\n cal_metrics(sess, 'test')\n sess.close()\n\n\nif __name__ == \"__main__\":\n tf.app.flags.DEFINE_string('name', '1', 'name of model')\n tf.app.flags.DEFINE_string('corpus', None, 'Path to corpus file')\n tf.app.flags.DEFINE_string('ecores', None, 'Path to ECO-RES feature files')\n tf.app.flags.DEFINE_string('tag', None, 'Path to Tag feature files')\n tf.app.flags.DEFINE_string('ref', None, 'Path to reference files')\n tf.app.flags.DEFINE_string('test', None, 'Path to the saved parameters')\n\n flags = tf.app.flags.FLAGS\n\n start_time = time.perf_counter()\n main()\n end_time = time.perf_counter()\n\n print('Total time: %.2fs' % (end_time - start_time))\n" ]
[ [ "numpy.expand_dims", "numpy.random.seed", "numpy.arange", "numpy.squeeze", "tensorflow.train.get_or_create_global_step", "tensorflow.trainable_variables", "numpy.random.shuffle", "tensorflow.global_variables_initializer", "numpy.mean", "tensorflow.app.flags.DEFINE_string", "tensorflow.Session", "tensorflow.train.Saver" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
CurryYuan/X-Trans2Cap
[ "c78a27209f14fcbbec74fe8b5edc06faea2e7d44", "c78a27209f14fcbbec74fe8b5edc06faea2e7d44" ]
[ "lib/reference_dataset.py", "lib/pointnet2/pytorch_utils.py" ]
[ "import os\nimport json\nimport pickle\nimport numpy as np\nfrom itertools import chain\nfrom collections import Counter\nfrom torch.utils.data import Dataset\n\nfrom lib.config import CONF\nfrom data.scannet.model_util_scannet import ScannetDatasetConfig\n\n# data setting\nDC = ScannetDatasetConfig()\n\n\nclass ReferenceDataset(Dataset):\n def __init__(self):\n pass\n\n def __len__(self):\n raise NotImplementedError\n\n def __getitem__(self, idx):\n raise NotImplementedError\n\n def _get_raw2label(self):\n # mapping\n scannet_labels = DC.type2class.keys()\n scannet2label = {label: i for i, label in enumerate(scannet_labels)}\n\n lines = [line.rstrip() for line in open(CONF.SCANNET_V2_TSV)]\n lines = lines[1:]\n raw2label = {}\n for i in range(len(lines)):\n label_classes_set = set(scannet_labels)\n elements = lines[i].split('\\t')\n raw_name = elements[1]\n nyu40_name = elements[7]\n if nyu40_name not in label_classes_set:\n raw2label[raw_name] = scannet2label['others']\n else:\n raw2label[raw_name] = scannet2label[nyu40_name]\n\n return raw2label\n\n def _get_unique_multiple_lookup(self):\n all_sem_labels = {}\n cache = {}\n for data in self.scanrefer:\n scene_id = data[\"scene_id\"]\n object_id = data[\"object_id\"]\n object_name = \" \".join(data[\"object_name\"].split(\"_\"))\n ann_id = data[\"ann_id\"]\n\n if scene_id not in all_sem_labels:\n all_sem_labels[scene_id] = []\n\n if scene_id not in cache:\n cache[scene_id] = {}\n\n if object_id not in cache[scene_id]:\n cache[scene_id][object_id] = {}\n try:\n all_sem_labels[scene_id].append(self.raw2label[object_name])\n except KeyError:\n all_sem_labels[scene_id].append(17)\n\n # convert to numpy array\n all_sem_labels = {scene_id: np.array(all_sem_labels[scene_id]) for scene_id in all_sem_labels.keys()}\n\n unique_multiple_lookup = {}\n for data in self.scanrefer:\n scene_id = data[\"scene_id\"]\n object_id = data[\"object_id\"]\n object_name = \" \".join(data[\"object_name\"].split(\"_\"))\n ann_id = data[\"ann_id\"]\n\n try:\n sem_label = self.raw2label[object_name]\n except KeyError:\n sem_label = 17\n\n unique_multiple = 0 if (all_sem_labels[scene_id] == sem_label).sum() == 1 else 1\n\n # store\n if scene_id not in unique_multiple_lookup:\n unique_multiple_lookup[scene_id] = {}\n\n if object_id not in unique_multiple_lookup[scene_id]:\n unique_multiple_lookup[scene_id][object_id] = {}\n\n if ann_id not in unique_multiple_lookup[scene_id][object_id]:\n unique_multiple_lookup[scene_id][object_id][ann_id] = None\n\n unique_multiple_lookup[scene_id][object_id][ann_id] = unique_multiple\n\n return unique_multiple_lookup\n\n def _tranform_des(self):\n lang = {}\n label = {}\n for data in self.scanrefer:\n scene_id = data[\"scene_id\"]\n object_id = data[\"object_id\"]\n ann_id = data[\"ann_id\"]\n\n if scene_id not in lang:\n lang[scene_id] = {}\n label[scene_id] = {}\n\n if object_id not in lang[scene_id]:\n lang[scene_id][object_id] = {}\n label[scene_id][object_id] = {}\n\n if ann_id not in lang[scene_id][object_id]:\n lang[scene_id][object_id][ann_id] = {}\n label[scene_id][object_id][ann_id] = {}\n\n # trim long descriptions\n tokens = data[\"token\"][:CONF.TRAIN.MAX_DES_LEN]\n\n # tokenize the description\n tokens = [\"sos\"] + tokens + [\"eos\"]\n embeddings = np.zeros((CONF.TRAIN.MAX_DES_LEN + 2, 300))\n labels = np.zeros((CONF.TRAIN.MAX_DES_LEN + 2)) # start and end\n\n # load\n for token_id in range(len(tokens)):\n token = tokens[token_id]\n try:\n embeddings[token_id] = self.glove[token]\n labels[token_id] = self.vocabulary[\"word2idx\"][token]\n except KeyError:\n embeddings[token_id] = self.glove[\"unk\"]\n labels[token_id] = self.vocabulary[\"word2idx\"][\"unk\"]\n\n # store\n lang[scene_id][object_id][ann_id] = embeddings\n label[scene_id][object_id][ann_id] = labels\n\n return lang, label\n\n def _build_vocabulary(self, dataset_name):\n vocab_path = CONF.VOCAB.format(dataset_name)\n if os.path.exists(vocab_path):\n self.vocabulary = json.load(open(vocab_path))\n else:\n if self.split == \"train\":\n all_words = chain(*[data[\"token\"][:CONF.TRAIN.MAX_DES_LEN] for data in self.scanrefer])\n word_counter = Counter(all_words)\n word_counter = sorted([(k, v) for k, v in word_counter.items() if k in self.glove], key=lambda x: x[1],\n reverse=True)\n word_list = [k for k, _ in word_counter]\n\n # build vocabulary\n word2idx, idx2word = {}, {}\n spw = [\"pad_\", \"unk\", \"sos\", \"eos\"] # NOTE distinguish padding token \"pad_\" and the actual word \"pad\"\n for i, w in enumerate(word_list):\n shifted_i = i + len(spw)\n word2idx[w] = shifted_i\n idx2word[shifted_i] = w\n\n # add special words into vocabulary\n for i, w in enumerate(spw):\n word2idx[w] = i\n idx2word[i] = w\n\n vocab = {\n \"word2idx\": word2idx,\n \"idx2word\": idx2word\n }\n json.dump(vocab, open(vocab_path, \"w\"), indent=4)\n\n self.vocabulary = vocab\n\n def _build_frequency(self, dataset_name):\n vocab_weights_path = CONF.VOCAB_WEIGHTS.format(dataset_name)\n if os.path.exists(vocab_weights_path):\n with open(vocab_weights_path) as f:\n weights = json.load(f)\n self.weights = np.array([v for _, v in weights.items()])\n else:\n all_tokens = []\n for scene_id in self.lang_ids.keys():\n for object_id in self.lang_ids[scene_id].keys():\n for ann_id in self.lang_ids[scene_id][object_id].keys():\n all_tokens += self.lang_ids[scene_id][object_id][ann_id].astype(int).tolist()\n\n word_count = Counter(all_tokens)\n word_count = sorted([(k, v) for k, v in word_count.items()], key=lambda x: x[0])\n\n # frequencies = [c for _, c in word_count]\n # weights = np.array(frequencies).astype(float)\n # weights = weights / np.sum(weights)\n # weights = 1 / np.log(1.05 + weights)\n\n weights = np.ones((len(word_count)))\n\n self.weights = weights\n\n with open(vocab_weights_path, \"w\") as f:\n weights = {k: v for k, v in enumerate(weights)}\n json.dump(weights, f, indent=4)\n\n def _load_data(self, dataset_name):\n print(\"loading data...\")\n # load language features\n self.glove = pickle.load(open(CONF.GLOVE_PICKLE, \"rb\"))\n self._build_vocabulary(dataset_name)\n self.num_vocabs = len(self.vocabulary[\"word2idx\"].keys())\n self.lang, self.lang_ids = self._tranform_des()\n self._build_frequency(dataset_name)\n\n # add scannet data\n self.scene_list = sorted(list(set([data[\"scene_id\"] for data in self.scanrefer])))\n\n # load scene data\n self.scene_data = {}\n for scene_id in self.scene_list:\n self.scene_data[scene_id] = {}\n self.scene_data[scene_id][\"mesh_vertices\"] = np.load(\n os.path.join(CONF.PATH.SCANNET_DATA, scene_id) + \"_aligned_vert.npy\") # axis-aligned\n self.scene_data[scene_id][\"instance_labels\"] = np.load(\n os.path.join(CONF.PATH.SCANNET_DATA, scene_id) + \"_ins_label.npy\")\n self.scene_data[scene_id][\"semantic_labels\"] = np.load(\n os.path.join(CONF.PATH.SCANNET_DATA, scene_id) + \"_sem_label.npy\")\n self.scene_data[scene_id][\"instance_bboxes\"] = np.load(\n os.path.join(CONF.PATH.SCANNET_DATA, scene_id) + \"_aligned_bbox.npy\")\n\n # prepare class mapping\n lines = [line.rstrip() for line in open(CONF.SCANNET_V2_TSV)]\n lines = lines[1:]\n raw2nyuid = {}\n for i in range(len(lines)):\n elements = lines[i].split('\\t')\n raw_name = elements[1]\n nyu40_name = int(elements[4])\n raw2nyuid[raw_name] = nyu40_name\n\n # store\n self.raw2nyuid = raw2nyuid\n self.raw2label = self._get_raw2label()\n self.unique_multiple_lookup = self._get_unique_multiple_lookup()\n\n def _translate(self, point_set, bbox):\n # unpack\n coords = point_set[:, :3]\n\n # translation factors\n x_factor = np.random.choice(np.arange(-0.5, 0.501, 0.001), size=1)[0]\n y_factor = np.random.choice(np.arange(-0.5, 0.501, 0.001), size=1)[0]\n z_factor = np.random.choice(np.arange(-0.5, 0.501, 0.001), size=1)[0]\n factor = [x_factor, y_factor, z_factor]\n\n # dump\n coords += factor\n point_set[:, :3] = coords\n bbox[:, :3] += factor\n\n return point_set, bbox", "# Copyright (c) Facebook, Inc. and its affiliates.\r\n# \r\n# This source code is licensed under the MIT license found in the\r\n# LICENSE file in the root directory of this source tree.\r\n\r\n''' Modified based on Ref: https://github.com/erikwijmans/Pointnet2_PyTorch '''\r\nimport torch\r\nimport torch.nn as nn\r\nfrom typing import List, Tuple\r\n\r\nclass SharedMLP(nn.Sequential):\r\n\r\n def __init__(\r\n self,\r\n args: List[int],\r\n *,\r\n bn: bool = False,\r\n activation=nn.ReLU(inplace=True),\r\n preact: bool = False,\r\n first: bool = False,\r\n name: str = \"\"\r\n ):\r\n super().__init__()\r\n\r\n for i in range(len(args) - 1):\r\n self.add_module(\r\n name + 'layer{}'.format(i),\r\n Conv2d(\r\n args[i],\r\n args[i + 1],\r\n bn=(not first or not preact or (i != 0)) and bn,\r\n activation=activation\r\n if (not first or not preact or (i != 0)) else None,\r\n preact=preact\r\n )\r\n )\r\n\r\n\r\nclass _BNBase(nn.Sequential):\r\n\r\n def __init__(self, in_size, batch_norm=None, name=\"\"):\r\n super().__init__()\r\n self.add_module(name + \"bn\", batch_norm(in_size))\r\n\r\n nn.init.constant_(self[0].weight, 1.0)\r\n nn.init.constant_(self[0].bias, 0)\r\n\r\n\r\nclass BatchNorm1d(_BNBase):\r\n\r\n def __init__(self, in_size: int, *, name: str = \"\"):\r\n super().__init__(in_size, batch_norm=nn.BatchNorm1d, name=name)\r\n\r\n\r\nclass BatchNorm2d(_BNBase):\r\n\r\n def __init__(self, in_size: int, name: str = \"\"):\r\n super().__init__(in_size, batch_norm=nn.BatchNorm2d, name=name)\r\n\r\n\r\nclass BatchNorm3d(_BNBase):\r\n\r\n def __init__(self, in_size: int, name: str = \"\"):\r\n super().__init__(in_size, batch_norm=nn.BatchNorm3d, name=name)\r\n\r\n\r\nclass _ConvBase(nn.Sequential):\r\n\r\n def __init__(\r\n self,\r\n in_size,\r\n out_size,\r\n kernel_size,\r\n stride,\r\n padding,\r\n activation,\r\n bn,\r\n init,\r\n conv=None,\r\n batch_norm=None,\r\n bias=True,\r\n preact=False,\r\n name=\"\"\r\n ):\r\n super().__init__()\r\n\r\n bias = bias and (not bn)\r\n conv_unit = conv(\r\n in_size,\r\n out_size,\r\n kernel_size=kernel_size,\r\n stride=stride,\r\n padding=padding,\r\n bias=bias\r\n )\r\n init(conv_unit.weight)\r\n if bias:\r\n nn.init.constant_(conv_unit.bias, 0)\r\n\r\n if bn:\r\n if not preact:\r\n bn_unit = batch_norm(out_size)\r\n else:\r\n bn_unit = batch_norm(in_size)\r\n\r\n if preact:\r\n if bn:\r\n self.add_module(name + 'bn', bn_unit)\r\n\r\n if activation is not None:\r\n self.add_module(name + 'activation', activation)\r\n\r\n self.add_module(name + 'conv', conv_unit)\r\n\r\n if not preact:\r\n if bn:\r\n self.add_module(name + 'bn', bn_unit)\r\n\r\n if activation is not None:\r\n self.add_module(name + 'activation', activation)\r\n\r\n\r\nclass Conv1d(_ConvBase):\r\n\r\n def __init__(\r\n self,\r\n in_size: int,\r\n out_size: int,\r\n *,\r\n kernel_size: int = 1,\r\n stride: int = 1,\r\n padding: int = 0,\r\n activation=nn.ReLU(inplace=True),\r\n bn: bool = False,\r\n init=nn.init.kaiming_normal_,\r\n bias: bool = True,\r\n preact: bool = False,\r\n name: str = \"\"\r\n ):\r\n super().__init__(\r\n in_size,\r\n out_size,\r\n kernel_size,\r\n stride,\r\n padding,\r\n activation,\r\n bn,\r\n init,\r\n conv=nn.Conv1d,\r\n batch_norm=BatchNorm1d,\r\n bias=bias,\r\n preact=preact,\r\n name=name\r\n )\r\n\r\n\r\nclass Conv2d(_ConvBase):\r\n\r\n def __init__(\r\n self,\r\n in_size: int,\r\n out_size: int,\r\n *,\r\n kernel_size: Tuple[int, int] = (1, 1),\r\n stride: Tuple[int, int] = (1, 1),\r\n padding: Tuple[int, int] = (0, 0),\r\n activation=nn.ReLU(inplace=True),\r\n bn: bool = False,\r\n init=nn.init.kaiming_normal_,\r\n bias: bool = True,\r\n preact: bool = False,\r\n name: str = \"\"\r\n ):\r\n super().__init__(\r\n in_size,\r\n out_size,\r\n kernel_size,\r\n stride,\r\n padding,\r\n activation,\r\n bn,\r\n init,\r\n conv=nn.Conv2d,\r\n batch_norm=BatchNorm2d,\r\n bias=bias,\r\n preact=preact,\r\n name=name\r\n )\r\n\r\n\r\nclass Conv3d(_ConvBase):\r\n\r\n def __init__(\r\n self,\r\n in_size: int,\r\n out_size: int,\r\n *,\r\n kernel_size: Tuple[int, int, int] = (1, 1, 1),\r\n stride: Tuple[int, int, int] = (1, 1, 1),\r\n padding: Tuple[int, int, int] = (0, 0, 0),\r\n activation=nn.ReLU(inplace=True),\r\n bn: bool = False,\r\n init=nn.init.kaiming_normal_,\r\n bias: bool = True,\r\n preact: bool = False,\r\n name: str = \"\"\r\n ):\r\n super().__init__(\r\n in_size,\r\n out_size,\r\n kernel_size,\r\n stride,\r\n padding,\r\n activation,\r\n bn,\r\n init,\r\n conv=nn.Conv3d,\r\n batch_norm=BatchNorm3d,\r\n bias=bias,\r\n preact=preact,\r\n name=name\r\n )\r\n\r\n\r\nclass FC(nn.Sequential):\r\n\r\n def __init__(\r\n self,\r\n in_size: int,\r\n out_size: int,\r\n *,\r\n activation=nn.ReLU(inplace=True),\r\n bn: bool = False,\r\n init=None,\r\n preact: bool = False,\r\n name: str = \"\"\r\n ):\r\n super().__init__()\r\n\r\n fc = nn.Linear(in_size, out_size, bias=not bn)\r\n if init is not None:\r\n init(fc.weight)\r\n if not bn:\r\n nn.init.constant_(fc.bias, 0)\r\n\r\n if preact:\r\n if bn:\r\n self.add_module(name + 'bn', BatchNorm1d(in_size))\r\n\r\n if activation is not None:\r\n self.add_module(name + 'activation', activation)\r\n\r\n self.add_module(name + 'fc', fc)\r\n\r\n if not preact:\r\n if bn:\r\n self.add_module(name + 'bn', BatchNorm1d(out_size))\r\n\r\n if activation is not None:\r\n self.add_module(name + 'activation', activation)\r\n\r\ndef set_bn_momentum_default(bn_momentum):\r\n\r\n def fn(m):\r\n if isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d)):\r\n m.momentum = bn_momentum\r\n\r\n return fn\r\n\r\n\r\nclass BNMomentumScheduler(object):\r\n\r\n def __init__(\r\n self, model, bn_lambda, last_epoch=-1,\r\n setter=set_bn_momentum_default\r\n ):\r\n if not isinstance(model, nn.Module):\r\n raise RuntimeError(\r\n \"Class '{}' is not a PyTorch nn Module\".format(\r\n type(model).__name__\r\n )\r\n )\r\n\r\n self.model = model\r\n self.setter = setter\r\n self.lmbd = bn_lambda\r\n\r\n self.step(last_epoch + 1)\r\n self.last_epoch = last_epoch\r\n\r\n def step(self, epoch=None):\r\n if epoch is None:\r\n epoch = self.last_epoch + 1\r\n\r\n self.last_epoch = epoch\r\n self.model.apply(self.setter(self.lmbd(epoch)))\r\n\r\n\r\n" ]
[ [ "numpy.arange", "numpy.array", "numpy.zeros" ], [ "torch.nn.init.constant_", "torch.nn.Linear", "torch.nn.ReLU" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ssalonen/financedatahoarder
[ "c24c856837b0114dce7ecd7a1d1ffef66e2e6c62" ]
[ "financedatahoarder/scraper/scrapers/parsers/morningstar_overview.py" ]
[ "# -*- coding: utf-8 -*-\nimport scrapy\nimport pandas as pd\nfrom lxml import html\nfrom lxml.html.clean import Cleaner\nfrom pytz import UTC, timezone, FixedOffset\nimport logging\nfrom financedatahoarder.scraper.scrapers.items import OverviewKeyStats\n\nOSUUDEN_ARVO = 'Osuuden arvo' # Fund\nLOPETUSHINTA = 'Lopetushinta' # ETF\nMYYNTIKURSSI = 'Myyntikurssi' # ETF\n\n_HTML_CLEANER = Cleaner(allow_tags=[''], remove_unknown_tags=False)\n\n\ndef parse_overview_key_stats(selector):\n \"\"\"Prase overview key stats from Morningstar.fi ETF, Fund or stocks page\"\"\"\n tables = selector.css('table.overviewKeyStatsTable').extract()\n if tables:\n table = tables[0]\n df = pd.read_html(table, encoding='utf-8')[0].transpose()\n df.columns = df.iloc[0]\n df = df.drop(0)\n for col, val in df.iteritems():\n if OSUUDEN_ARVO in col or MYYNTIKURSSI in col or LOPETUSHINTA in col:\n # Osuuden arvo<br>dd.mm.yyyy\n # Or\n # Myyntikurssi (dd.mm.yyyy)\n value_date = pd.to_datetime(col.replace(')', '')[-10:], dayfirst=True, utc=True)\n value = float(val.dropna().iloc[0].replace(',', '.').replace('EUR', '').strip())\n break\n else:\n raise RuntimeError('Could not find date')\n return [OverviewKeyStats(value=value, value_date=value_date)]\n else:\n return [parse_stock_price(selector)]\n\n\ndef parse_stock_price(selector):\n logger = logging.getLogger('parse_stock_price')\n # <span class=\"price\" id=\"Col0Price\">42,41</span>\n price_item = selector.css('span#Col0Price.price::text').extract()\n value = float(price_item[0].replace(',', '.'))\n # <p class=\"priceInformation\" id=\"Col0PriceTime\">Päivitetty 20.03.2015<br />18:29:38\n # <abbr title=\"TimeZone_EET\">EET</abbr>\n datetime_text = selector.css('p#Col0PriceTime.priceInformation').extract()[0]\n datetime_text = html.fromstring(_HTML_CLEANER.clean_html(datetime_text)).text\n # datetime_text ~= u'Päivitetty 20.03.201518:29:38 EET | EUR \\t\\t ....\n date_text = datetime_text[10:21]\n time_text = datetime_text[21:29]\n tz_text = datetime_text[30:].partition('|')[0].strip()\n # UGLY: support for EEST (pytz does not support it), more proper support provided by this\n # https://github.com/mithro/python-datetime-tz/blob/master/datetime_tz/pytz_abbr.py\n if tz_text in ('EEST', 'EEDT'):\n tz = FixedOffset(3 * 60)\n else:\n tz = timezone(tz_text)\n value_date = pd.to_datetime(date_text + ' ' + time_text, dayfirst=True).tz_localize(tz)\n value_date = value_date.tz_convert(UTC)\n return OverviewKeyStats(value=value, value_date=value_date)\n\n" ]
[ [ "pandas.to_datetime", "pandas.read_html" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
alexarnimueller/MolGAN
[ "cfc9902c49c47995eab40381082a8b6acf5f0525" ]
[ "utils/utils.py" ]
[ "import numpy as np\n\nfrom sklearn.metrics import classification_report as sk_classification_report\nfrom sklearn.metrics import confusion_matrix\n\nfrom rdkit import Chem\nfrom rdkit.Chem import AllChem\nfrom rdkit.Chem import Draw\n\nfrom .molecular_metrics import MolecularMetrics\n\n\ndef strip_salt(mols, stereo=False):\n out = list()\n for mol in mols:\n if mol:\n s = Chem.MolToSmiles(mol, isomericSmiles=stereo)\n if '.' in s:\n f = s.split('.')\n lengths = [len(m) for m in f]\n n = np.argmax(lengths)\n out.append(f[n])\n else:\n out.append(s)\n else:\n out.append(None)\n return [Chem.MolFromSmiles(mol) if mol else None for mol in out]\n\n\ndef mols2grid_image(mols, molsPerRow):\n mols = [e if e is not None else Chem.RWMol() for e in mols]\n\n for mol in mols:\n AllChem.Compute2DCoords(mol)\n\n return Draw.MolsToGridImage(mols, molsPerRow=molsPerRow, subImgSize=(150, 150))\n\n\ndef classification_report(data, model, session, sample=False):\n _, _, _, a, x, _, f, _, _ = data.next_validation_batch()\n\n n, e = session.run([model.nodes_gumbel_argmax, model.edges_gumbel_argmax] if sample else [\n model.nodes_argmax, model.edges_argmax], feed_dict={model.edges_labels: a, model.nodes_labels: x,\n model.node_features: f, model.training: False,\n model.variational: False})\n n, e = np.argmax(n, axis=-1), np.argmax(e, axis=-1)\n\n y_true = e.flatten()\n y_pred = a.flatten()\n target_names = [str(Chem.rdchem.BondType.values[int(e)]) for e in data.bond_decoder_m.values()]\n\n print('######## Classification Report ########\\n')\n print(sk_classification_report(y_true, y_pred, labels=list(range(len(target_names))),\n target_names=target_names))\n\n print('######## Confusion Matrix ########\\n')\n print(confusion_matrix(y_true, y_pred, labels=list(range(len(target_names)))))\n\n y_true = n.flatten()\n y_pred = x.flatten()\n target_names = [Chem.Atom(e).GetSymbol() for e in data.atom_decoder_m.values()]\n\n print('######## Classification Report ########\\n')\n print(sk_classification_report(y_true, y_pred, labels=list(range(len(target_names))),\n target_names=target_names))\n\n print('\\n######## Confusion Matrix ########\\n')\n print(confusion_matrix(y_true, y_pred, labels=list(range(len(target_names)))))\n\n\ndef reconstructions(data, model, session, batch_dim=10, sample=False):\n m0, _, _, a, x, _, f, _, _ = data.next_train_batch(batch_dim)\n\n n, e = session.run([model.nodes_gumbel_argmax, model.edges_gumbel_argmax] if sample else [\n model.nodes_argmax, model.edges_argmax], feed_dict={model.edges_labels: a, model.nodes_labels: x,\n model.node_features: f, model.training: False,\n model.variational: False})\n n, e = np.argmax(n, axis=-1), np.argmax(e, axis=-1)\n\n m1 = np.array([e if e is not None else Chem.RWMol() for e in [data.matrices2mol(n_, e_, strict=True)\n for n_, e_ in zip(n, e)]])\n\n mols = np.vstack((m0, m1)).T.flatten()\n\n return mols\n\n\ndef samples(data, model, session, embeddings, sample=False, smiles=False):\n n, e = session.run([model.nodes_gumbel_argmax, model.edges_gumbel_argmax] if sample else [\n model.nodes_argmax, model.edges_argmax], feed_dict={\n model.embeddings: embeddings, model.training: False})\n n, e = np.argmax(n, axis=-1), np.argmax(e, axis=-1)\n\n mols = [data.matrices2mol(n_, e_, strict=True) for n_, e_ in zip(n, e)]\n\n if smiles:\n return [Chem.MolToSmiles(m, isomericSmiles=True, kekuleSmiles=True) for m in mols if m]\n else:\n return mols\n\n\ndef all_scores(mols, data, norm=False, reconstruction=False):\n m0 = {k: list(filter(lambda e: e is not None, v)) for k, v in {\n 'NP score': MolecularMetrics.natural_product_scores(mols, norm=norm),\n 'QED score': MolecularMetrics.quantitative_estimation_druglikeness_scores(mols),\n 'logP score': MolecularMetrics.water_octanol_partition_coefficient_scores(mols, norm=norm),\n 'SA score': MolecularMetrics.synthetic_accessibility_score_scores(mols, norm=norm),\n 'diversity score': MolecularMetrics.diversity_scores(mols, data),\n 'drugcandidate score': MolecularMetrics.drugcandidate_scores(mols, data)}.items()}\n\n m1 = {'valid score': MolecularMetrics.valid_total_score(mols) * 100,\n 'unique score': MolecularMetrics.unique_total_score(mols) * 100,\n 'novel score': MolecularMetrics.novel_total_score(mols, data) * 100}\n\n return m0, m1\n" ]
[ [ "numpy.argmax", "numpy.vstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
luanft/lstm-cnn-model
[ "dde8121f8fc0f9c4745759e07af301c3b60214da" ]
[ "charts.py" ]
[ "import os\nimport numpy as np\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom matplotlib.axes import Axes\nfrom matplotlib.figure import Figure\nfrom mplfinance.original_flavor import candlestick2_ohlc\nfrom util import convert_to_list, decode_img\nfrom setting import DPI, IMG_H, IMG_W, CHART_DIR, ChartType\n\ndef init_chart():\n \"\"\"Init the directory folder for charts\"\"\"\n dataset_type = ['train', 'validation', 'test']\n for ds_type in dataset_type:\n for ctype in ChartType:\n path_to_chart = os.path.join(CHART_DIR, ds_type, ctype.value)\n if not os.path.exists(path_to_chart):\n os.makedirs(path_to_chart, exist_ok=True)\n\ndef format_and_save_chart(path_to_image: str, fig_obj: Figure, *ax_objs: list):\n \"\"\"Format chart for CNN input\"\"\"\n fig_obj.set_frameon(False)\n fig_obj.set_dpi(DPI)\n fig_obj.set_size_inches((IMG_W/DPI, IMG_H/DPI))\n for ax in ax_objs:\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n fig_obj.tight_layout(pad=0.001)\n fig_obj.savefig(path_to_image)\n plt.close('all')\n plt.cla()\n plt.clf()\n\ndef create_bar_chart(fname, frame_data, chart_dir=''):\n \"\"\"Create the bar chart from window data\"\"\"\n path_to_image = os.path.join(chart_dir, ChartType.BAR.value, \"%s.png\" % fname)\n if not os.path.exists(path_to_image):\n fig_obj, ax_obj = plt.subplots()\n time_series = convert_to_list(frame_data['Time'])\n closed_prices = convert_to_list(frame_data['Trade Volume'])\n ax_obj.bar(x=time_series, height=closed_prices, width=0.8, align='center')\n format_and_save_chart(path_to_image, fig_obj, ax_obj)\n return decode_img(path_to_image)\n\ndef create_line_chart(fname, frame_data, chart_dir=''):\n \"\"\"Create the bar chart from window data\"\"\"\n path_to_image = os.path.join(chart_dir, ChartType.LINE.value, \"%s.png\" % fname)\n if not os.path.exists(path_to_image):\n fig_obj, ax_obj = plt.subplots(nrows=1)\n time_series = convert_to_list(frame_data['Time'])\n high_prices = convert_to_list(frame_data['Trade High'])\n low_prices = convert_to_list(frame_data['Trade Low'])\n ax_obj.plot(time_series, high_prices, color='green')\n ax_obj.plot(time_series, low_prices, color='red')\n format_and_save_chart(path_to_image, fig_obj, ax_obj)\n return decode_img(path_to_image)\n\n\ndef create_filled_line_chart(fname, frame_data, chart_dir=''):\n \"\"\"Create the F line chart from window data\"\"\"\n path_to_image = os.path.join(chart_dir, ChartType.FLINE.value, \"%s.png\" % fname)\n if not os.path.exists(path_to_image):\n fig_obj, ax_obj = plt.subplots(nrows=1)\n time_series = convert_to_list(frame_data['Time'])\n high_prices = convert_to_list(frame_data['Trade High'])\n low_prices = convert_to_list(frame_data['Trade Low'])\n mean_prices = ((np.array(high_prices) + np.array(low_prices)) / 2).tolist()\n ax_obj.plot(time_series, high_prices, color='green', linewidth=0.1)\n ax_obj.plot(time_series, low_prices, color='red', linewidth=0.1)\n ax_obj.fill_between(time_series, high_prices, mean_prices, color='green')\n ax_obj.fill_between(time_series, mean_prices, low_prices, color='red')\n format_and_save_chart(path_to_image, fig_obj, ax_obj)\n return decode_img(path_to_image)\n\ndef create_candlestick_chart(fname, frame_data, chart_dir=''):\n \"\"\"Create the candlestick chart from window data\"\"\"\n path_to_image = os.path.join(chart_dir, ChartType.CANDLESTICK.value, \"%s.png\" % fname)\n if not os.path.exists(path_to_image):\n fig_obj, ax_obj = plt.subplots()\n high_prices = convert_to_list(frame_data['Trade High'])\n low_prices = convert_to_list(frame_data['Trade Low'])\n open_prices = convert_to_list(frame_data['Trade Open'])\n closed_prices = convert_to_list(frame_data['Trade Close'])\n candlestick2_ohlc(\n ax_obj, opens=open_prices, highs=high_prices, lows=low_prices,\n closes=closed_prices, width=0.5, colorup='green', colordown='red', alpha=0.8\n )\n format_and_save_chart(path_to_image, fig_obj, ax_obj)\n\n return decode_img(path_to_image)\n\ndef draw_fusion_bar_chart(ax_obj, time_series, trading_volume):\n \"\"\"Draw the bar char\"\"\"\n ax_bar_obj: Axes = ax_obj.twinx()\n ax_bar_obj.autoscale_view()\n ax_bar_obj.set_ylim((0, max(trading_volume) * 2))\n ax_bar_obj.bar(x=time_series, height=trading_volume, width=0.8, align='center', color=\"blue\")\n return ax_bar_obj\n\ndef create_bar_candlestick_fusion_chart(fname, frame_data, chart_dir=''):\n \"\"\"Create the bar candlestick fusion chart from window data\"\"\"\n path_to_image = os.path.join(chart_dir, ChartType.BAR_CANDLESTICK_FUSION.value, \"%s.png\" % fname)\n if not os.path.exists(path_to_image):\n fig_obj, ax_candle_obj = plt.subplots()\n time_series = convert_to_list(frame_data['Time'])\n trading_volume = convert_to_list(frame_data['Trade Volume'])\n high_prices = convert_to_list(frame_data['Trade High'])\n low_prices = convert_to_list(frame_data['Trade Low'])\n open_prices = convert_to_list(frame_data['Trade Open'])\n closed_prices = convert_to_list(frame_data['Trade Close'])\n ax_bar_obj = draw_fusion_bar_chart(ax_candle_obj, list(range(len(time_series))), trading_volume)\n candlestick2_ohlc(\n ax_candle_obj, opens=open_prices, highs=high_prices, lows=low_prices,\n closes=closed_prices, width=0.5, colorup='black', colordown='red', alpha=0.8\n )\n format_and_save_chart(path_to_image, fig_obj, ax_candle_obj, ax_bar_obj)\n return decode_img(path_to_image)\n\ndef create_bar_line_fusion_chart(fname, frame_data, chart_dir=''):\n \"\"\"Create the bar line fusion chart from window data\"\"\"\n path_to_image = os.path.join(chart_dir, ChartType.BAR_LINE_FUSION.value, \"%s.png\" % fname)\n if not os.path.exists(path_to_image):\n fig_obj, ax_line_obj = plt.subplots()\n time_series = convert_to_list(frame_data['Time'])\n trading_volume = convert_to_list(frame_data['Trade Volume'])\n high_prices = convert_to_list(frame_data['Trade High'])\n low_prices = convert_to_list(frame_data['Trade Low'])\n transformed_time_series = list(range(len(time_series)))\n ax_bar_obj = draw_fusion_bar_chart(ax_line_obj, transformed_time_series, trading_volume)\n ax_line_obj.plot(transformed_time_series, high_prices, color='green')\n ax_line_obj.plot(transformed_time_series, low_prices, color='red')\n format_and_save_chart(path_to_image, fig_obj, ax_line_obj, ax_bar_obj)\n return decode_img(path_to_image)\n\ndef create_bar_filled_line_fusion_chart(fname, frame_data, chart_dir=''):\n \"\"\"Create the bar filled line fusion chart from window data\"\"\"\n path_to_image = os.path.join(chart_dir, ChartType.BAR_FLINE_FUSION.value, \"%s.png\" % fname)\n if not os.path.exists(path_to_image):\n fig_obj, ax_fline_obj = plt.subplots()\n time_series = convert_to_list(frame_data['Time'])\n trading_volume = convert_to_list(frame_data['Trade Volume'])\n high_prices = convert_to_list(frame_data['Trade High'])\n low_prices = convert_to_list(frame_data['Trade Low'])\n mean_prices = ((np.array(high_prices) + np.array(low_prices)) / 2).tolist()\n transformed_time_series = list(range(len(time_series)))\n ax_bar_obj = draw_fusion_bar_chart(ax_fline_obj, transformed_time_series, trading_volume)\n ax_fline_obj.plot(transformed_time_series, high_prices, color='green', linewidth=0.1)\n ax_fline_obj.plot(transformed_time_series, low_prices, color='red', linewidth=0.1)\n ax_fline_obj.fill_between(transformed_time_series, high_prices, mean_prices, color='green')\n ax_fline_obj.fill_between(transformed_time_series, mean_prices, low_prices, color='red')\n format_and_save_chart(path_to_image, fig_obj, ax_fline_obj, ax_bar_obj)\n return decode_img(path_to_image)\n\n\ninit_chart()" ]
[ [ "matplotlib.use", "matplotlib.pyplot.cla", "matplotlib.pyplot.subplots", "matplotlib.pyplot.clf", "matplotlib.pyplot.close", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mark-koren/AdaptiveStressTestingToolbox
[ "60ad5d590c949a53ab0fb004fbf42717e970f6a1" ]
[ "examples/hifi/EnvironmentPrediction/Predictions/SensorMeasurements/GroundSeg.py" ]
[ "'''Implementation of Random Markov Field ground segmentation described in:\n\nG. Postica, A. Romanoni, and M. Matteucci. Robust moving objects detection in LiDAR\ndata exploiting visual cues. In IEEE/RSJ International Conference on Intelligent Robots and\nSystems (IROS), pages 1093-1098, 2016.\n\nINPUT: point_cloud [num_points, 3] LiDAR data (number of LiDAR points, (x,y,z))\nOUTPUT: point_cloud_seg [seg_points, 3] LiDAR points that are not 'ground'.\n\n'''\n\nimport numpy as np\nimport math\nimport itertools\nimport copy\n\ndef ground_seg(point_cloud, res=1./3., s=0.09):\n\n num_points = point_cloud.shape[0]\n\n # generate 2-D grid of the LiDAR cloud\n max_index = math.sqrt(2.)*(128/3./2.+1.)\n\n # a 2D array that contains lists of 3D points in point_cloud that map to\n # a particular grid cell (according to the place of the 3D point in point_cloud)\n filler = np.frompyfunc(lambda x: list(), 1, 1)\n grid = np.empty((int(2 * math.ceil(max_index/res) + 1), int(2 * math.ceil(max_index/res) + 1)), dtype=np.object)\n filler(grid, grid);\n\n # determine the center coordinate of the 2D grid\n center_x = int(math.ceil(max_index/res))\n center_y = int(math.ceil(max_index/res))\n\n for i in range(num_points):\n point = point_cloud[i,:]\n x = point[0]\n y = point[1]\n z = point[2]\n\n if ((math.fabs(x) <= max_index) and (math.fabs(y) <= max_index) and (z <= 3.5)):\n\n grid[int(center_x + round(x/res)), int(center_y + round(y/res))].append(i)\n\n h_G = np.nan*np.empty((grid.shape))\n\n # iterate radially outwards to compute if a point belongs to the ground (1) on mask grid\n grid_seg = np.zeros(grid.shape)\n\n # initialize the center coordinate of the 2D grid to ground\n points_z = np.ndarray.tolist(point_cloud[grid[center_x, center_y],2])\n H = max(points_z or [np.nan])\n\n if not math.isnan(H):\n h_G[center_x, center_y] = H\n else:\n # initialize to the z-height of the LiDAR accroding to the KITTI set-up\n h_G[center_x, center_y] = -1.73\n\n # initialize the coordinates of inner circle\n circle_inner = [[center_x, center_y]]\n\n # identify all the points that were labeled as not ground\n point_cloud_seg = np.empty((0,3))\n\n for i in range(1,int(math.ceil(max_index/res))+1):\n\n # generate indices at the ith inner circle level\n circle_curr = generate_circle(i, center_x, center_y)\n\n for indices in circle_curr:\n x = indices[0]\n y = indices[1]\n\n # compute h_hat_G: find max h_G of neighbors\n neigh_indeces = np.array(get_neighbors(x,y,circle_inner))\n\n # compute the min and max z coordinates of each grid cell\n points_z = np.ndarray.tolist(point_cloud[grid[x,y],2])\n H = max(points_z or [np.nan])\n h = min(points_z or [np.nan])\n\n h_hat_G = np.nanmax(h_G[neigh_indeces])\n\n if ((not np.isnan(H)) and (not np.isnan(h)) and \\\n (H - h < s) and (H - h_hat_G < s)):\n grid_seg[x,y] = 1\n h_G[x,y] = copy.deepcopy(H)\n\n else:\n\n h_G[x,y] = copy.deepcopy(h_hat_G)\n\n # add to not ground points\n point_locations = grid[x,y]\n\n if point_locations != []:\n point_cloud_seg = np.vstack((point_cloud_seg,point_cloud[point_locations,:]))\n\n # update the inner circle indices\n circle_inner = copy.deepcopy(circle_curr)\n\n return point_cloud_seg\n\n# return the indices of a circle at level i from the center of the grid\ndef generate_circle(i, center_x, center_y):\n\n circle_range = range(-1*i,i+1)\n circle = [list(x) for x in itertools.product(circle_range, circle_range)]\n circle = [[item[0]+center_x, item[1]+center_y] for item in circle if ((abs(item[0]) == i) or (abs(item[1]) == i))]\n\n return circle\n\n# get the inner circle neighbors of a point\ndef get_neighbors(x,y,circle_inner):\n neigh_indices = []\n for indices in circle_inner:\n if ((abs(x-indices[0]) < 2) and (abs(y-indices[1]) < 2)):\n neigh_indices.append(indices)\n\n return neigh_indices\n\n" ]
[ [ "numpy.nanmax", "numpy.isnan", "numpy.vstack", "numpy.ndarray.tolist", "numpy.zeros", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sberbank-ai/ru-prompts
[ "4eeedae92cb5234c70adc787ace7cfceb76b0be0" ]
[ "ruprompts/callbacks.py" ]
[ "import os\n\nimport torch\nfrom transformers import TrainerControl, TrainerState\nfrom transformers.file_utils import WEIGHTS_NAME\nfrom transformers.modeling_utils import PreTrainedModel\nfrom transformers.trainer_callback import TrainerCallback\nfrom transformers.trainer_utils import PREFIX_CHECKPOINT_DIR\nfrom transformers.training_args import TrainingArguments\n\nfrom ruprompts.prompt import Prompt\nfrom ruprompts.prompt_embedding import PROMPT_PROVIDER_KEY_NAME\n\ntry:\n import omegaconf\n\n IS_OMEGACONF_AVAILABLE = True\nexcept ImportError:\n omegaconf = None\n IS_OMEGACONF_AVAILABLE = False\n\ntry:\n import wandb\n\n IS_WANDB_AVAILABLE = True\nexcept ImportError:\n wandb = None\n IS_WANDB_AVAILABLE = False\n\n\nclass FreezeTransformerUnfreezePrompt(TrainerCallback):\n \"\"\"Freezes all parameters but those of prompt provider.\"\"\"\n\n def on_train_begin(\n self,\n args: TrainingArguments,\n state: TrainerState,\n control: TrainerControl,\n model: PreTrainedModel,\n **kwargs,\n ):\n for name, param in model.transformer.named_parameters():\n if PROMPT_PROVIDER_KEY_NAME in name:\n param.requires_grad = True\n else:\n param.requires_grad = False\n\n\nclass ReduceCheckpoint(TrainerCallback):\n \"\"\"Reduces the checkpoint size by keeping only the weights of prompt provider.\"\"\"\n\n def on_save(\n self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs\n ):\n checkpoint_folder = f\"{PREFIX_CHECKPOINT_DIR}-{state.global_step}\"\n output_dir = os.path.join(args.output_dir, checkpoint_folder)\n weights_path = os.path.join(output_dir, WEIGHTS_NAME)\n weights = torch.load(weights_path)\n\n keys_to_remove = []\n for weight_key in weights:\n if PROMPT_PROVIDER_KEY_NAME not in weight_key:\n keys_to_remove.append(weight_key)\n\n for key in keys_to_remove:\n weights.pop(key)\n torch.save(weights, weights_path)\n\n\nclass SavePretrainedPrompt(TrainerCallback):\n \"\"\"Saves the prompt as pretrained on checkpoint.\n\n Args:\n prompt (# !s!`ruprompts.prompt.Prompt`): Prompt instance to be saved.\n \"\"\"\n\n def __init__(self, prompt: Prompt):\n self.prompt = prompt\n\n def on_save(\n self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs\n ):\n checkpoint_folder = f\"{PREFIX_CHECKPOINT_DIR}-{state.global_step}\"\n output_dir = os.path.join(args.output_dir, checkpoint_folder)\n self.prompt.save_pretrained(output_dir)\n\n\nclass WBLogHydraConfig(TrainerCallback):\n \"\"\"Logs Hydra config to Weights and Biases on training start.\n\n Args:\n cfg (omegaconf.DictConfig): Config to be logged.\n \"\"\"\n\n def __init__(self, cfg):\n if not (IS_OMEGACONF_AVAILABLE and IS_WANDB_AVAILABLE):\n raise UserWarning(\n \"WBLogHydraConfig is not available. Install `hydra` and `wandb` \"\n \"with `pip install hydra-core wandb`.\"\n )\n\n self.cfg = cfg\n\n def on_train_begin(\n self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs\n ):\n wandb.config.update({\"hydra\": omegaconf.OmegaConf.to_container(self.cfg)})\n" ]
[ [ "torch.save", "torch.load" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
onepanelio/ensembleObjectDetection
[ "ddc742b553ba7e18d5dcdcf30f61f5858f369d3a" ]
[ "TestTimeAugmentation/predict_batch_retinanet.py" ]
[ "# USAGE\n# python predict_batch.py --model output.h5 --labels logos/retinanet_classes.csv\n#\t--input logos/images --output output\n\n# import the necessary packages\nfrom keras_retinanet.utils.image import preprocess_image\nfrom keras_retinanet.utils.image import read_image_bgr\nfrom keras_retinanet.utils.image import resize_image\nfrom keras_retinanet import models\nimport xml.etree.ElementTree as ET\nfrom xml.dom import minidom\nfrom imutils import paths\nimport numpy as np\nimport argparse\nimport cv2\nimport os\n\n#confidence = 0.25\n\ndef prettify(elem):\n \"\"\"Return a pretty-printed XML string for the Element.\n \"\"\"\n rough_string = ET.tostring(elem, 'utf-8')\n reparsed = minidom.parseString(rough_string)\n return reparsed.toprettyxml(indent=\" \")\n\ndef generateXML(filename,outputPath,w,h,d,boxes):\n top = ET.Element('annotation')\n childFolder = ET.SubElement(top, 'folder')\n childFolder.text = 'images'\n childFilename = ET.SubElement(top, 'filename')\n childFilename.text = filename[0:filename.rfind(\".\")]\n childPath = ET.SubElement(top, 'path')\n childPath.text = outputPath + \"/\" + filename\n childSource = ET.SubElement(top, 'source')\n childDatabase = ET.SubElement(childSource, 'database')\n childDatabase.text = 'Unknown'\n childSize = ET.SubElement(top, 'size')\n childWidth = ET.SubElement(childSize, 'width')\n childWidth.text = str(w)\n childHeight = ET.SubElement(childSize, 'height')\n childHeight.text = str(h)\n childDepth = ET.SubElement(childSize, 'depth')\n childDepth.text = str(d)\n childSegmented = ET.SubElement(top, 'segmented')\n childSegmented.text = str(0)\n for (box,score) in boxes:\n category = box[0]\n box = box[1].astype(\"int\")\n (x,y,xmax,ymax) = box\n childObject = ET.SubElement(top, 'object')\n childName = ET.SubElement(childObject, 'name')\n childName.text = category\n childScore = ET.SubElement(childObject, 'confidence')\n childScore.text = str(score)\n childPose = ET.SubElement(childObject, 'pose')\n childPose.text = 'Unspecified'\n childTruncated = ET.SubElement(childObject, 'truncated')\n childTruncated.text = '0'\n childDifficult = ET.SubElement(childObject, 'difficult')\n childDifficult.text = '0'\n childBndBox = ET.SubElement(childObject, 'bndbox')\n childXmin = ET.SubElement(childBndBox, 'xmin')\n childXmin.text = str(x)\n childYmin = ET.SubElement(childBndBox, 'ymin')\n childYmin.text = str(y)\n childXmax = ET.SubElement(childBndBox, 'xmax')\n childXmax.text = str(xmax)\n childYmax = ET.SubElement(childBndBox, 'ymax')\n childYmax.text = str(ymax)\n return prettify(top)\n\n\n\n# TODO:\n# Allow option for --input to be a .txt file OR a directory. Check if\n# file, and if so, presume keras-retinanet set of images + labels\ndef mainDataset(dataset,output, confidence, name, weights,fichClass):\n # load the class label mappings\n LABELS = open(fichClass).read().strip().split(\"\\n\")\n LABELS = {int(L.split(\",\")[1]): L.split(\",\")[0] for L in LABELS}\n \n # load the model from disk and grab all input image paths\n model = models.load_model(weights, backbone_name=name)\n imagePaths = list(paths.list_images(dataset))\n # loop over the input image paths\n for (i, imagePath) in enumerate(imagePaths):\n \t# load the input image (in BGR order), clone it, and preprocess it\n \tprint(\"[INFO] predicting on image {} of {}\".format(i + 1,\n \t\tlen(imagePaths)))\n \n \t# load the input image (in BGR order), clone it, and preprocess it\n \timage = read_image_bgr(imagePath)\n \twI, hI, d = image.shape\n \toutput = image.copy()\n \timage = preprocess_image(image)\n \t(image, scale) = resize_image(image)\n \timage = np.expand_dims(image, axis=0)\n \n \t# detect objects in the input image and correct for the image scale\n \t(boxes, scores, labels) = model.predict_on_batch(image)\n \tboxes /= scale\n \tboxes1 = []\n \tfor (box, score, label) in zip(boxes[0], scores[0], labels[0]):\n \t\tif score < confidence:\n \t\t\tcontinue\n \t\tboxes1.append(([LABELS[label],box],score))\n \n \t# parse the filename from the input image path, construct the\n \t# path to the output image, and write the image to disk\n \tfilename = imagePath.split(os.path.sep)[-1]\n \t#outputPath = os.path.sep.join([args[\"output\"], filename])\n \t\n \tfile = open(imagePath[0:imagePath.rfind(\".\")]+\".xml\", \"w\")\n \tfile.write(generateXML(imagePath[0:imagePath.rfind(\".\")],imagePath,hI, wI, d, boxes1))\n \tfile.close()\n \n \t\n \t#cv2.imwrite(outputPath, output)\n" ]
[ [ "numpy.expand_dims" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
flylo/spotify-tensorflow
[ "0514eba60148278c2093c50eb4801575710f2988" ]
[ "tests/tf_schema_utils_test.py" ]
[ "# -*- coding: utf-8 -*-\n#\n# Copyright 2017-2019 Spotify AB.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport os\nfrom tempfile import NamedTemporaryFile\n\nimport tensorflow as tf\nfrom tensorflow.python.platform import test\nfrom spotify_tensorflow.tf_schema_utils import feature_spec_to_schema, \\\n schema_to_feature_spec, schema_txt_file_to_feature_spec # noqa: E501\n\n\nclass TfSchemaUtilsTest(test.TestCase):\n def test_round_trip(self):\n feature_spec = {\n \"scalar_feature_1\": tf.FixedLenFeature(shape=[], dtype=tf.int64),\n \"scalar_feature_2\": tf.FixedLenFeature(shape=[], dtype=tf.int64),\n \"scalar_feature_3\": tf.FixedLenFeature(shape=[], dtype=tf.float32),\n \"varlen_feature_1\": tf.VarLenFeature(dtype=tf.float32),\n \"varlen_feature_2\": tf.VarLenFeature(dtype=tf.string),\n \"1d_vector_feature\": tf.FixedLenFeature(shape=[1], dtype=tf.string),\n \"2d_vector_feature\": tf.FixedLenFeature(shape=[2, 2], dtype=tf.float32),\n \"sparse_feature\": tf.SparseFeature(\"idx\", \"value\", tf.float32, 10),\n }\n inferred_schema = feature_spec_to_schema(feature_spec)\n inferred_feature_spec = schema_to_feature_spec(inferred_schema)\n self.assertEqual(inferred_feature_spec, feature_spec)\n\n def test_schema_txt_to_feature_spec(self):\n schema_txt = \"\"\"\n feature {\n name: \"test_feature\"\n value_count {\n min: 1\n max: 1\n }\n type: FLOAT\n presence {\n min_count: 1\n }\n }\n \"\"\".encode(\"utf-8\")\n\n with NamedTemporaryFile() as f:\n f.write(schema_txt)\n f.flush()\n os.fsync(f)\n feature_spec = schema_txt_file_to_feature_spec(f.name)\n self.assertEqual(feature_spec, {\"test_feature\": tf.VarLenFeature(dtype=tf.float32)})\n" ]
[ [ "tensorflow.SparseFeature", "tensorflow.FixedLenFeature", "tensorflow.VarLenFeature" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "1.0", "1.2" ] } ]
momenator/spine_uda
[ "3d6c9cd2431bcdb084d7603d0cc3101163b0902c", "3d6c9cd2431bcdb084d7603d0cc3101163b0902c" ]
[ "train_refine.py", "train_shape_aware.py" ]
[ "import glob\nimport random\nimport os\nos.environ['CUDA_VISIBLE_DEVICES'] = '2'\nimport torch\nimport torch.nn as nn\nimport numpy as np\nfrom torch.utils.data.dataset import random_split\nfrom torch.utils.data import DataLoader\nimport torch.optim as optim\nimport torch.optim.lr_scheduler as lr_scheduler\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport tqdm\nfrom data.dataset import UnpairedDataset\nfrom models.unet import UNet, UNetWavelet\n\n# loss functions here\nfrom utils.losses import DiceLoss, WassersteinLoss, entropy_loss\nfrom utils.lovasz_losses import lovasz_softmax\n\n# metrics here\nfrom utils.metrics import compute_dice_metric\n\n# scheduler\n# from utils.schedulers import LambdaLR\n\n# init seed\nseed = 21\ntorch.manual_seed(seed)\nnp.random.seed(seed)\n\nproject_name = 'test'\nresult_path = './results'\nbatch_size = 20\n\nnum_train = 100\nnum_val = 250\nnum_test = 250\n\nlearning_rate = 0.0002\niteration = 0\nepochs_init = 5\nepochs_decay = 0\nepochs = epochs_init + epochs_decay\n\ndiscrepancy_loss = WassersteinLoss()\n# loss_fn = nn.BCEWithLogitsLoss()\nloss_fn = nn.L1Loss()\n\n\ndef criterion(pred, label):\n return nn.BCELoss()(pred, label) + DiceLoss()(pred, label)\n\n\ndef criterion_reg(pred, label):\n return nn.BCELoss()(pred, label) + lovasz_softmax(pred, label, classes=[1])\n\n\n# should we use \ndef validate_net(net, loader, name='Test', epoch=None, is_save=True, limit=None, display=False, refiner=None):\n net.eval()\n \n if refiner != None:\n refiner.eval()\n \n dices_a = []\n dices_b = []\n losses_a = []\n losses_b = []\n \n count = 0\n \n for j, data in enumerate(loader):\n \n if limit != None and count >= limit:\n break\n \n image_a = data['A'][2].cuda()\n target_a = data['A'][1].cuda()\n \n image_b = data['B'][2].cuda()\n target_b = data['B'][1].cuda()\n \n # do validation on test set here!\n with torch.no_grad():\n if torch.max(target_a) != 0:\n pred = net(image_a)\n loss = criterion(pred, target_a).item()\n pred = torch.round(pred)\n dice_score = compute_dice_metric(pred, target_a).item()\n dices_a.append(dice_score)\n losses_a.append(loss)\n \n if torch.max(target_b) != 0:\n pred = net(image_b)\n \n if refiner != None:\n pred = refiner(pred)\n \n loss = criterion(pred, target_b).item()\n pred = torch.round(pred)\n dice_score = compute_dice_metric(pred, target_b).item()\n dices_b.append(dice_score)\n losses_b.append(loss)\n \n count += 1\n \n # print dice scores here!\n mean_dice_a = np.mean(dices_a)\n mean_dice_b = np.mean(dices_b)\n \n mean_loss_a = np.mean(losses_a)\n mean_loss_b = np.mean(losses_b)\n \n print('{} - Avg dice A: {}, Avg dice B: {}, Avg loss A: {}, Avg loss B: {}'.format(name, \n mean_dice_a, \n mean_dice_b, \n mean_loss_a, \n mean_loss_b))\n \n # return none for the time being\n return mean_dice_a, mean_dice_b, mean_loss_a, mean_loss_b\n\n\n# loader from siegen dataset for validation\ntrain_scan_dataset = UnpairedDataset('../', 'ct_sag_kr/train', 'mr_sag_kr/train', corrupt=True)\ntest_scan_dataset = UnpairedDataset('../', 'ct_sag_kr/test', 'mr_sag_kr/test')\n\nscan_dataset, _ = random_split(train_scan_dataset, [num_train, len(train_scan_dataset) - num_train])\nscan_dataset_test, scan_dataset_val, _ = random_split(test_scan_dataset, \n [num_val, num_test, len(test_scan_dataset) - (num_val + num_test)])\n\ntrain_loader = DataLoader(scan_dataset, batch_size=batch_size, num_workers=5)\nval_loader = DataLoader(scan_dataset_val, batch_size=1, shuffle=False, num_workers=5)\ntest_loader = DataLoader(scan_dataset_test, batch_size=1, shuffle=False, num_workers=5)\n\nnet = UNet(1, 1)\nnet.cuda()\n\nrefine_net = UNet(1, 1)\nrefine_net.cuda()\n\n\n# list optimisers here...\n# single optimiser variant 1\noptimiser = optim.Adam(net.parameters(), lr=learning_rate)\noptimiser_refine = optim.Adam(refine_net.parameters(), lr=learning_rate * 10)\n\nprint('Project name ', project_name)\nprint('Learning rate ', learning_rate)\nprint('Epochs ', epochs)\n\ntrain_loss = []\ntrain_loss_rec = []\ntrain_loss_seg = []\n\ntrain_loss_seg_a = []\ntrain_loss_seg_b = []\n\ntrain_dice = []\nval_loss_a = []\nval_dice_a = []\nval_loss_b = []\nval_dice_b = []\n\n\nfor e in range(epochs):\n epoch_train_loss_rec = []\n epoch_train_loss_seg = []\n dice_scores = []\n \n net.train()\n \n print('Epoch ', e)\n \n for i, data in enumerate(tqdm.tqdm(train_loader)):\n iteration += batch_size\n image_a = data['A'][2].cuda()\n target_a = data['A'][1].cuda()\n target_a_corrupted = data['A'][3].cuda()\n \n optimiser.zero_grad()\n pred_seg_a = net(image_a)\n \n loss_seg_a = criterion(pred_seg_a, target_a)\n loss_seg_a.backward() \n optimiser.step()\n \n copy_pred_seg_a = pred_seg_a.detach().clone()\n \n optimiser_refine.zero_grad()\n refine_pred = refine_net(copy_pred_seg_a)\n loss_refine_a = criterion(refine_pred, target_a)\n loss_refine_a.backward()\n optimiser_refine.step()\n \n # dice_score = dice_coeff(torch.round(pred), l).item()\n epoch_train_loss_rec.append(loss_refine_a.item())\n epoch_train_loss_seg.append(loss_seg_a.item())\n \n mean_loss_rec = np.mean(epoch_train_loss_rec)\n mean_loss_seg = np.mean(epoch_train_loss_seg)\n \n # print('Train A - avg seg:{}'.format(np.mean(epoch_train_loss_seg)))\n \n print('Train A - avg seg: {}, avg rec: {}'.format(mean_loss_seg, mean_loss_rec))\n\n dice_a, dice_b, loss_a, loss_b = validate_net(net=net, loader=val_loader, \n name='Validation ', epoch=str(e), refiner=refine_net)\n \n val_loss_a.append(loss_a)\n val_dice_a.append(dice_a)\n val_loss_b.append(loss_b)\n val_dice_b.append(dice_b)\n \n # update learning rate\n # scheduler.step()\n \ndice_a, dice_b, loss_a, loss_b = validate_net(net=net, loader=test_loader,\n name='Test ', epoch='final', refiner=refine_net)\n\n\n# save results here\nsave_path = os.path.join(result_path, project_name)\n\nif os.path.isdir(save_path) is False:\n os.makedirs(save_path)\n\nnp.savez(os.path.join(save_path, 'params'), \n num_train=num_train,\n num_val=num_val,\n num_test=num_test,\n epochs=epochs,\n learning_rate=learning_rate,\n batch_size=batch_size,\n val_dice_a=val_dice_a, \n val_dice_b=val_dice_b,\n val_loss_a=val_loss_a, \n val_loss_b=val_loss_b,\n test_dice_a=dice_a, \n test_dice_b=dice_b,\n test_loss_a=loss_a, \n test_loss_b=loss_b)\n\ntorch.save(net.state_dict(), os.path.join(save_path, 'net'))\n\n", "import glob\nimport random\nimport os\nos.environ['CUDA_VISIBLE_DEVICES'] = '4'\nimport torch\nimport torch.nn as nn\nimport numpy as np\nfrom torch.utils.data.dataset import random_split\nfrom torch.utils.data import DataLoader\nimport torch.optim as optim\nimport torch.optim.lr_scheduler as lr_scheduler\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport tqdm\nfrom data.dataset import UnpairedDataset\nfrom models.unet import UNet, UNetWavelet\n\n# loss functions here\nfrom utils.losses import DiceLoss, WassersteinLoss, entropy_loss\n\n# metrics here\nfrom utils.metrics import compute_dice_metric\n\n# scheduler\n# from utils.schedulers import LambdaLR\n\n# init seed\nseed = 20\ntorch.manual_seed(seed)\nnp.random.seed(seed)\n\nproject_name = 'unet_shape_aware'\nresult_path = './results'\nbatch_size = 20\n\nnum_train = 7000\nnum_val = 250\nnum_test = 250\n\nlearning_rate = 0.0002\niteration = 0\nepochs_init = 100\nepochs_decay = 0\nepochs = epochs_init + epochs_decay\n\n\ndef criterion(pred, label):\n return nn.BCELoss()(pred, label) + DiceLoss()(pred, label)\n\n\n# should we use \ndef validate_net(net, loader, name='Test', epoch=None, is_save=True, limit=None, display=False):\n net.eval()\n dices_a = []\n dices_b = []\n losses_a = []\n losses_b = []\n \n count = 0\n \n for j, data in enumerate(loader):\n \n if limit != None and count >= limit:\n break\n \n image_a = data['A'][2].cuda()\n target_a = data['A'][1].cuda()\n \n image_b = data['B'][2].cuda()\n target_b = data['B'][1].cuda()\n \n # do validation on test set here!\n with torch.no_grad():\n if torch.max(target_a) != 0:\n a1, a2, a3, a4, a5 = net.downsample(image_a) \n pred, _ ,_ , _ = net.upsample(a1, a2, a3, a4, a5)\n# pred = net(image_a)\n loss = criterion(pred, target_a).item()\n pred = torch.round(pred)\n dice_score = compute_dice_metric(pred, target_a).item()\n dices_a.append(dice_score)\n losses_a.append(loss)\n \n if torch.max(target_b) != 0:\n a1, a2, a3, a4, a5 = net.downsample(image_b) \n pred, _, _, _ = net.upsample(a1, a2, a3, a4, a5)\n# pred = net(image_b)\n loss = criterion(pred, target_b).item()\n pred = torch.round(pred)\n dice_score = compute_dice_metric(pred, target_b).item()\n dices_b.append(dice_score)\n losses_b.append(loss)\n \n count += 1\n \n # print dice scores here!\n mean_dice_a = np.mean(dices_a)\n mean_dice_b = np.mean(dices_b)\n \n mean_loss_a = np.mean(losses_a)\n mean_loss_b = np.mean(losses_b)\n \n print('{} - Avg dice A: {}, Avg dice B: {}, Avg loss A: {}, Avg loss B: {}'.format(name, \n mean_dice_a, \n mean_dice_b, \n mean_loss_a, \n mean_loss_b))\n \n # return none for the time being\n return mean_dice_a, mean_dice_b, mean_loss_a, mean_loss_b\n\n\n# loader from siegen dataset for validation\ntrain_scan_dataset = UnpairedDataset('../', 'ct_sag_kr/train', 'mr_sag_kr/train', contours=True)\ntest_scan_dataset = UnpairedDataset('../', 'ct_sag_kr/test', 'mr_sag_kr/test', contours=True)\n\nscan_dataset, _ = random_split(train_scan_dataset, [num_train, len(train_scan_dataset) - num_train])\nscan_dataset_test, scan_dataset_val, _ = random_split(test_scan_dataset, \n [num_val, num_test, len(test_scan_dataset) - (num_val + num_test)])\n\ntrain_loader = DataLoader(scan_dataset, batch_size=batch_size, num_workers=5)\nval_loader = DataLoader(scan_dataset_val, batch_size=1, shuffle=False, num_workers=5)\ntest_loader = DataLoader(scan_dataset_test, batch_size=1, shuffle=False, num_workers=5)\n\nnet = UNet(1, 1, multiple_output=True)\nnet.cuda()\n\n\n# list optimisers here...\n# single optimiser variant 1\noptimiser = optim.Adam(net.parameters(), lr=learning_rate)\n\n# scheduler = lr_scheduler.LambdaLR(optimiser, \n# lr_lambda=LambdaLR(learning_rate, epochs_init, 0, epochs_decay).step,\n# last_epoch=-1,\n# verbose=True)\n\n# torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda, last_epoch=-1, verbose=False)\n\nprint('Project name ', project_name)\nprint('Learning rate ', learning_rate)\nprint('Epochs ', epochs)\n\ntrain_loss = []\ntrain_loss_rec = []\ntrain_loss_seg = []\n\ntrain_loss_seg_a = []\ntrain_loss_seg_b = []\n\ntrain_dice = []\nval_loss_a = []\nval_dice_a = []\nval_loss_b = []\nval_dice_b = []\n\n\nfor e in range(epochs):\n epoch_train_loss_rec = []\n epoch_train_loss_seg = []\n \n dice_scores = []\n net.train() \n \n print('Epoch ', e)\n \n for i, data in enumerate(tqdm.tqdm(train_loader)):\n iteration += batch_size\n \n # data A\n image_a = data['A'][2].cuda()\n target_a = data['A'][1].cuda()\n ctr_a = data['A'][3].cuda()\n edt_a = data['A'][4].cuda()\n \n # data B\n # image_b = data['B'][2].cuda()\n\n optimiser.zero_grad()\n \n a1, a2, a3, a4, a5 = net.downsample(image_a) \n pred_seg_a, pred_ctr_a, pred_edt_a, _ = net.upsample(a1, a2, a3, a4, a5)\n \n loss_seg_a = criterion(pred_seg_a, target_a)\n loss_ctr_a = DiceLoss()(pred_ctr_a, ctr_a)\n loss_edt_a = nn.L1Loss()(pred_edt_a, edt_a)\n \n loss = loss_seg_a + loss_ctr_a + loss_edt_a\n \n loss.backward()\n # loss_seg_a.backward() \n optimiser.step()\n \n # dice_score = dice_coeff(torch.round(pred), l).item()\n # epoch_train_loss_rec.append(loss_recon.item())\n epoch_train_loss_seg.append(loss_seg_a.item())\n \n # mean_loss_rec = np.mean(epoch_train_loss_rec)\n mean_loss_seg = np.mean(epoch_train_loss_seg)\n \n # print('Train A - avg seg:{}'.format(np.mean(epoch_train_loss_seg)))\n \n print('Train A - avg seg: {}'.format(mean_loss_seg))\n\n dice_a, dice_b, loss_a, loss_b = validate_net(net=net, loader=val_loader, name='Validation ', epoch=str(e))\n \n val_loss_a.append(loss_a)\n val_dice_a.append(dice_a)\n val_loss_b.append(loss_b)\n val_dice_b.append(dice_b)\n \n # update learning rate\n # scheduler.step()\n \ndice_a, dice_b, loss_a, loss_b = validate_net(net=net, loader=test_loader, name='Test ', epoch='final')\n\n\n# save results here\nsave_path = os.path.join(result_path, project_name)\n\nif os.path.isdir(save_path) is False:\n os.makedirs(save_path)\n\nnp.savez(os.path.join(save_path, 'params'), \n num_train=num_train,\n num_val=num_val,\n num_test=num_test,\n epochs=epochs,\n learning_rate=learning_rate,\n batch_size=batch_size,\n val_dice_a=val_dice_a, \n val_dice_b=val_dice_b,\n val_loss_a=val_loss_a, \n val_loss_b=val_loss_b,\n test_dice_a=dice_a, \n test_dice_b=dice_b,\n test_loss_a=loss_a, \n test_loss_b=loss_b)\n\ntorch.save(net.state_dict(), os.path.join(save_path, 'net'))\n\n" ]
[ [ "torch.max", "numpy.random.seed", "torch.manual_seed", "torch.round", "torch.utils.data.DataLoader", "torch.nn.BCELoss", "numpy.mean", "torch.no_grad", "torch.nn.L1Loss" ], [ "torch.max", "numpy.random.seed", "torch.manual_seed", "torch.round", "torch.utils.data.DataLoader", "torch.nn.BCELoss", "numpy.mean", "torch.no_grad", "torch.nn.L1Loss" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
aptsunny/SinglePathOneShot
[ "07ae9d6d4ecc2ef4e1f4a8b4aaff0768ab54f57a" ]
[ "src/Search_cifar/flops.py" ]
[ "import torch\nimport torch.nn as nn\n\n\nclass Shufflenet(nn.Module):\n\n def __init__(self, inp, oup, mid_channels, *, ksize, stride):\n super(Shufflenet, self).__init__()\n self.stride = stride\n assert stride in [1, 2]\n assert ksize in [3, 5, 7]\n\n self.base_mid_channel = mid_channels\n self.ksize = ksize\n pad = ksize // 2\n self.pad = pad\n self.inp = inp\n\n outputs = oup - inp\n\n branch_main = [\n # pw\n nn.Conv2d(inp, mid_channels, 1, 1, 0, bias=False),\n nn.BatchNorm2d(mid_channels),\n nn.ReLU(inplace=True),\n # dw\n nn.Conv2d(mid_channels, mid_channels, ksize,\n stride, pad, groups=mid_channels, bias=False),\n nn.BatchNorm2d(mid_channels),\n # pw-linear\n nn.Conv2d(mid_channels, outputs, 1, 1, 0, bias=False),\n nn.BatchNorm2d(outputs),\n nn.ReLU(inplace=True),\n ]\n self.branch_main = nn.Sequential(*branch_main)\n\n if stride == 2:\n branch_proj = [\n # dw\n nn.Conv2d(\n inp, inp, ksize, stride, pad, groups=inp, bias=False),\n nn.BatchNorm2d(inp),\n # pw-linear\n nn.Conv2d(inp, inp, 1, 1, 0, bias=False),\n nn.BatchNorm2d(inp),\n nn.ReLU(inplace=True),\n ]\n self.branch_proj = nn.Sequential(*branch_proj)\n\n def forward(self, old_x):\n if self.stride == 1:\n x_proj, x = channel_shuffle(old_x)\n return torch.cat((x_proj, self.branch_main(x)), 1)\n elif self.stride == 2:\n x_proj = old_x\n x = old_x\n return torch.cat((self.branch_proj(x_proj), self.branch_main(x)), 1)\n\n\nclass Shuffle_Xception(nn.Module):\n\n def __init__(self, inp, oup, mid_channels, *, stride):\n super(Shuffle_Xception, self).__init__()\n\n assert stride in [1, 2]\n\n self.base_mid_channel = mid_channels\n self.stride = stride\n self.ksize = 3\n self.pad = 1\n self.inp = inp\n outputs = oup - inp\n\n branch_main = [\n # dw\n nn.Conv2d(inp, inp, 3, stride, 1, groups=inp, bias=False),\n nn.BatchNorm2d(inp),\n # pw\n nn.Conv2d(inp, mid_channels, 1, 1, 0, bias=False),\n nn.BatchNorm2d(mid_channels),\n nn.ReLU(inplace=True),\n # dw\n nn.Conv2d(mid_channels, mid_channels, 3,\n 1, 1, groups=mid_channels, bias=False),\n nn.BatchNorm2d(mid_channels),\n # pw\n nn.Conv2d(mid_channels, mid_channels, 1, 1, 0, bias=False),\n nn.BatchNorm2d(mid_channels),\n nn.ReLU(inplace=True),\n # dw\n nn.Conv2d(mid_channels, mid_channels, 3,\n 1, 1, groups=mid_channels, bias=False),\n nn.BatchNorm2d(mid_channels),\n # pw\n nn.Conv2d(mid_channels, outputs, 1, 1, 0, bias=False),\n nn.BatchNorm2d(outputs),\n nn.ReLU(inplace=True),\n ]\n\n self.branch_main = nn.Sequential(*branch_main)\n\n if self.stride == 2:\n branch_proj = [\n # dw\n nn.Conv2d(inp, inp, 3, stride, 1, groups=inp, bias=False),\n nn.BatchNorm2d(inp),\n # pw-linear\n nn.Conv2d(inp, inp, 1, 1, 0, bias=False),\n nn.BatchNorm2d(inp),\n nn.ReLU(inplace=True),\n ]\n self.branch_proj = nn.Sequential(*branch_proj)\n\n def forward(self, old_x):\n if self.stride == 1:\n x_proj, x = channel_shuffle(old_x)\n return torch.cat((x_proj, self.branch_main(x)), 1)\n elif self.stride == 2:\n x_proj = old_x\n x = old_x\n return torch.cat((self.branch_proj(x_proj), self.branch_main(x)), 1)\n\n\ndef channel_shuffle(x):\n batchsize, num_channels, height, width = x.data.size()\n assert (num_channels % 4 == 0)\n x = x.reshape(batchsize * num_channels // 2, 2, height * width)\n x = x.permute(1, 0, 2)\n x = x.reshape(2, -1, num_channels // 2, height, width)\n return x[0], x[1]\n\n\nclass ShuffleNetV2_OneShot(nn.Module):\n\n def __init__(self, input_size=224, n_class=1000, architecture=None, channels_scales=None):\n super(ShuffleNetV2_OneShot, self).__init__()\n\n assert input_size % 32 == 0\n assert architecture is not None and channels_scales is not None\n\n # self.stage_repeats = [4, 4, 8, 4]\n self.stage_repeats = [1, 1, 2, 1]\n self.stage_out_channels = [-1, 16, 64, 160, 320, 640, 1024]\n\n # building first layer\n input_channel = self.stage_out_channels[1]\n self.first_conv = nn.Sequential(\n nn.Conv2d(3, input_channel, 3, 2, 1, bias=False),\n nn.BatchNorm2d(input_channel),\n nn.ReLU(inplace=True),\n )\n\n self.features = []\n archIndex = 0\n for idxstage in range(len(self.stage_repeats)):\n numrepeat = self.stage_repeats[idxstage]\n output_channel = self.stage_out_channels[idxstage + 2]\n\n for i in range(numrepeat):\n if i == 0:\n inp, outp, stride = input_channel, output_channel, 2\n else:\n inp, outp, stride = input_channel // 2, output_channel, 1\n\n blockIndex = architecture[archIndex]\n base_mid_channels = outp // 2\n mid_channels = int(\n base_mid_channels * channels_scales[archIndex])\n archIndex += 1\n if blockIndex == 0:\n self.features.append(\n Shufflenet(inp, outp, mid_channels=mid_channels, ksize=3, stride=stride))\n elif blockIndex == 1:\n self.features.append(\n Shufflenet(inp, outp, mid_channels=mid_channels, ksize=5, stride=stride))\n elif blockIndex == 2:\n self.features.append(\n Shufflenet(inp, outp, mid_channels=mid_channels, ksize=7, stride=stride))\n elif blockIndex == 3:\n self.features.append(\n Shuffle_Xception(inp, outp, mid_channels=mid_channels, stride=stride))\n else:\n raise NotImplementedError\n input_channel = output_channel\n\n assert archIndex == len(architecture)\n self.features = nn.Sequential(*self.features)\n\n self.conv_last = nn.Sequential(\n nn.Conv2d(\n input_channel, self.stage_out_channels[\n -1], 1, 1, 0, bias=False),\n nn.BatchNorm2d(self.stage_out_channels[-1]),\n nn.ReLU(inplace=True),\n )\n self.globalpool = nn.AvgPool2d(7)\n self.dropout = nn.Dropout(0.1)\n self.classifier = nn.Sequential(\n nn.Linear(self.stage_out_channels[-1], n_class, bias=False))\n self._initialize_weights()\n\n def forward(self, x):\n x = self.first_conv(x)\n x = self.features(x)\n x = self.conv_last(x)\n\n x = self.globalpool(x)\n\n x = self.dropout(x)\n x = x.contiguous().view(-1, self.stage_out_channels[-1])\n x = self.classifier(x)\n return x\n\n def _initialize_weights(self):\n for name, m in self.named_modules():\n if isinstance(m, nn.Conv2d):\n if 'first' in name:\n nn.init.normal_(m.weight, 0, 0.01)\n else:\n nn.init.normal_(m.weight, 0, 1.0 / m.weight.shape[1])\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n if m.bias is not None:\n nn.init.constant_(m.bias, 0.0001)\n nn.init.constant_(m.running_mean, 0)\n elif isinstance(m, nn.BatchNorm1d):\n nn.init.constant_(m.weight, 1)\n if m.bias is not None:\n nn.init.constant_(m.bias, 0.0001)\n nn.init.constant_(m.running_mean, 0)\n elif isinstance(m, nn.Linear):\n nn.init.normal_(m.weight, 0, 0.01)\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n\n\ndef get_flops(model, input_shape=(3, 224, 224)):\n list_conv = []\n\n def conv_hook(self, input, output):\n batch_size, input_channels, input_height, input_width = input[0].size()\n output_channels, output_height, output_width = output[0].size()\n\n assert self.in_channels % self.groups == 0\n\n kernel_ops = self.kernel_size[0] * self.kernel_size[\n 1] * (self.in_channels // self.groups)\n params = output_channels * kernel_ops\n flops = batch_size * params * output_height * output_width\n\n list_conv.append(flops)\n\n list_linear = []\n\n def linear_hook(self, input, output):\n batch_size = input[0].size(0) if input[0].dim() == 2 else 1\n\n weight_ops = self.weight.nelement()\n\n flops = batch_size * weight_ops\n list_linear.append(flops)\n\n def foo(net):\n childrens = list(net.children())\n if not childrens:\n if isinstance(net, torch.nn.Conv2d):\n net.register_forward_hook(conv_hook)\n if isinstance(net, torch.nn.Linear):\n net.register_forward_hook(linear_hook)\n return\n for c in childrens:\n foo(c)\n\n foo(model)\n input = torch.autograd.Variable(\n torch.rand(*input_shape).unsqueeze(0), requires_grad=True)\n out = model(input)\n\n total_flops = sum(sum(i) for i in [list_conv, list_linear])\n return total_flops\n\n\ndef get_cand_flops(cand):\n model = ShuffleNetV2_OneShot(\n architecture=tuple(cand), channels_scales=(1.0,) * 5) # 20\n return get_flops(model)\n\n\ndef main():\n for i in range(1): # 4\n print(i, get_cand_flops((i,) * 5)) # 20\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "torch.nn.Sequential", "torch.nn.Dropout", "torch.nn.init.constant_", "torch.nn.Conv2d", "torch.nn.Linear", "torch.nn.AvgPool2d", "torch.nn.init.normal_", "torch.rand", "torch.nn.BatchNorm2d", "torch.nn.ReLU" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
JackLidge/FPLTransfers
[ "d458770b658a5dedfe7379871afc424949427cb5" ]
[ "FPLTransfers/FPLTransfers.py" ]
[ "import asyncio\nimport aiohttp\nimport pandas\nfrom concurrent.futures import ThreadPoolExecutor\nfrom fpl import FPL\nfrom fpl.constants import API_URLS\nfrom fpl.utils import fetch, logged_in\n\nclass FPLTransfers():\n '''\n '''\n \n def __init__(self, email=None, password=None):\n '''\n Placeholder - the email and password will be sent to an authentication function in the future\n once it is enabled.\n '''\n self._email = email\n self._password = password\n \n self._aio_pool = ThreadPoolExecutor(1)\n self._aio_loop = asyncio.new_event_loop()\n self._aio_pool.submit(asyncio.set_event_loop, self._aio_loop).result()\n \n async def _call_api_async(self, func, login=False):\n \"\"\" Calls the given FPL API function asynchronously.\n Args:\n func: The API function to execute.\n requires_login: Whether the call requires authentication.\n Returns:\n The Future of the passed function.\n \"\"\"\n\n if login and self._email is None:\n raise ValueError('Email is not provided, the functionality which has been called requires an email')\n elif login and self._password is None:\n raise ValueError('Password is not provided, the functionality which has been called requires a password')\n \n async with aiohttp.ClientSession() as session:\n fpl = FPL(session) #if self.__fpl is None else self.__fpl\n\n if login:\n await fpl.login(self._email, self._password)\n\n return await func(fpl)\n \n def _call_api(self, func, login=False):\n \"\"\" Calls the given FPL API function synchronously.\n Args:\n func: The API function to execute.\n Returns:\n The result of the passed function.\n \"\"\"\n return self._aio_pool.submit(self._aio_loop.run_until_complete, self._call_api_async(func, login)).result()\n \n def _get_team_dict(self, team_data):\n '''\n '''\n team_dict = {}\n for i, val in enumerate(team_data.iterrows()):\n team_dict[team_data.loc[i+1, 'code']] = team_data.loc[i+1, 'name']\n return team_dict \n \n def _ensure_costs_work(self, money_left, it):\n '''\n Returns\n -------\n result: boolean\n Returns True if the difference between actual money spent and total average money spent is\n less than the tolerance defined by the exponential decay equation which reaches approximately\n 0 at x=15.\n '''\n def func(x):\n return -1.35 * (0.8 ** ((1/15) + x)) + 1.05\n tol = func(it)\n \n if it == 13:\n mean_money_left = 10\n elif it == 14:\n mean_money_left = 4.5\n elif it == 15:\n mean_money_left = 0.1\n else:\n mean_money_left = 100 - ((100 / 15) * it)\n #print(money_left, mean_money_left, (mean_money_left/money_left), (1 - tol) + 1)\n result = 0 < mean_money_left / money_left < (1 - tol) + 1\n return result\n \n def normalise(self, df=None):\n '''\n '''\n max_vals = df.max()\n df[['points', 'dreamteam', 'bps', 'form', 'ppg', 'av_difficulty']] = \\\n df[['points', 'dreamteam', 'bps', 'form', 'ppg', 'av_difficulty']] / \\\n max_vals[['points', 'dreamteam', 'bps', 'form', 'ppg', 'av_difficulty']]\n \n for i, val in enumerate(df.iterrows()):\n overall = (df.loc[val[0], 'points'] * 0.2) + (df.loc[val[0], 'form'] * 0.3) + \\\n (df.loc[val[0], 'ppg'] * 0.15) + (df.loc[val[0], 'dreamteam'] * 0.1) + \\\n (df.loc[val[0], 'bps'] * 0.1) + (df.loc[val[0], 'av_difficulty'] * 0.15)\n \n df.loc[val[0], 'overall'] = overall\n return df\n \n def _get_players(self, no_weeks):\n '''\n '''\n json_data = self._call_api(lambda fpl: fpl.get_players(include_summary=True, return_json=True))\n teams_data = self._call_api(lambda fpl: fpl.get_teams(return_json=True))\n # Creates pandas dataframe from player attributes and cuts to only include required columns\n data = pandas.DataFrame.from_records(json_data, index=['id'])\n data = data[['first_name', 'second_name', 'element_type', 'team_code', \n 'total_points', 'form', 'points_per_game', 'dreamteam_count',\n 'now_cost', 'chance_of_playing_next_round', 'bps', \n 'selected_by_percent']]\n \n self.original_attributes = list(data.columns)\n new_cols = ['first_name', 'second_name', 'pos', 'team', 'points', 'form', 'ppg', 'dreamteam',\n 'cost', 'chance', 'bps', '%']\n data.columns = new_cols\n \n # Get all the future fixtures for players and sorts by player and \"event\"\n fixtures_df = pandas.DataFrame()\n for player in json_data:\n player_df = pandas.DataFrame.from_records(player['fixtures'])\n player_df['player_id'] = player['id']\n fixtures_df = pandas.concat([fixtures_df, player_df], sort=False)\n fixtures_df = fixtures_df.set_index(['player_id', 'event'])\n\n # Creates teams pandas dataframe and finds the conversion for team IDs and their names\n teams = pandas.DataFrame.from_records(teams_data, index=['id'])\n team_dict = self._get_team_dict(teams)\n \n # Changes team code to team name and puts in average difficulty for the next selected number of weeks\n cur_week = fixtures_df.loc[1].index[0]\n for i, val in enumerate(data.iterrows()):\n data.loc[val[0], 'team'] = team_dict[data.loc[val[0], 'team']]\n # Is there a better way to do this nested try except bracket?\n try:\n fixtures = fixtures_df.loc[val[0]][cur_week:cur_week+no_weeks]['difficulty']\n except KeyError:\n try:\n fixtures = fixtures_df.loc[val[0]][cur_week:cur_week+no_weeks-1]['difficulty']\n except KeyError:\n try:\n fixtures = fixtures_df.loc[val[0]][cur_week+1:cur_week+no_weeks]['difficulty']\n except KeyError:\n av_difficulty = 0\n av_difficulty = 5 - (sum(fixtures) / (len(fixtures)))\n data.loc[val[0], 'av_difficulty'] = av_difficulty\n\n # This implementation is ugly and hard coded. Find a way to improve it if you can\n data[['pos']] = data[['pos']].astype(str)\n data[['points', 'dreamteam']] = data[['points', 'dreamteam']].astype(int)\n data[['form', 'ppg', 'cost', 'bps', '%']] = data[['form', 'ppg', 'cost', \n 'bps', '%']].astype(float)\n data['chance'] = data['chance'].fillna(100)\n data[['cost']] = data[['cost']] / 10\n self.data = data\n \n def _get_user_team(self):\n '''\n '''\n async def get_user_team_async(self, fpl=FPL, user_id=None):\n response = await fetch(\n self.session, API_URLS[\"user_team\"].format(user_id))\n return response\n \n json_data = self._call_api(lambda fpl: fpl.get_user(return_json=True), login=True)\n self._user_id = json_data['id']\n self._bank = json_data['last_deadline_bank'] / 10\n\n team = self._call_api(lambda fpl: get_user_team_async(fpl, user_id=self._user_id), login=True)\n current_team = pandas.DataFrame.from_records(team['picks'], index=['element'])\n return current_team\n \n def _transfer_finder(self, cur_team, all_players):\n '''\n '''\n idx = cur_team.index\n pos = ['1', '2', '3', '4']\n full_set = []\n for i in pos:\n pos_team = cur_team.loc[cur_team['pos'] == i]\n player = pos_team.iloc[-1]\n for j, val in all_players.loc[all_players['pos'] == i].iterrows():\n if j not in idx and val.cost < (player.cost + self._bank) and player.overall < val.overall and val.chance > 51:\n temp = {'existing player': player.first_name + ' ' + player.second_name,\n 'new player': val.first_name + ' ' + val.second_name,\n 'diff': val.overall - player.overall,\n 'ex_pl_id': player.name,\n 'n_pl_id': j}\n full_set.append(temp)\n break\n return full_set\n \n \n def single_transfer(self, no_weeks=5):\n '''\n Finds the best transfer to do based on the following methodology:\n - Finds the lowest ranked player by position (based on \"overall\" metric)\n - Finds the player with the highest \"overall\" rank in that position that is within cost\n - Finds the difference in \"overall\" rank between existing player and new player\n - Recommends the transfer with the highest difference\n '''\n user_team = self._get_user_team()\n self._get_players(no_weeks=no_weeks)\n data = self.normalise(df=self.data)\n \n cur_team = data.loc[user_team.index].sort_values('overall', ascending=False)\n data = data.sort_values('overall', ascending=False)\n self.current_team = cur_team\n full_set = self._transfer_finder(cur_team, data)\n\n try: \n diff_val = max([i['diff'] for i in full_set])\n idx= [i['diff'] for i in full_set].index(diff_val)\n print(f\"Replace {full_set[idx]['existing player']} with {full_set[idx]['new player']}\")\n except ValueError:\n print('No transfers to do this week')\n\n def double_transfer(self, no_weeks=5):\n '''\n - Check first to see if any players are \"chance\" < 51 and find highest differential for those players.\n - Otherwise do same as single transfer for first player.\n - Remove first player transfer from current team and replacement from data\n - Perform the same operation again\n '''\n user_team = self._get_user_team()\n self._get_players(no_weeks=no_weeks)\n data = self.normalise(df=self.data)\n \n cur_team = data.loc[user_team.index].sort_values('overall', ascending=False)\n data = data.sort_values('overall', ascending=False)\n self.current_team = cur_team\n \n # Run the _transfer_finder code twice, removing found player each time.\n for i in range(2):\n full_set = self._transfer_finder(cur_team, data)\n try:\n diff_val = max([i['diff'] for i in full_set])\n idx = [i['diff'] for i in full_set].index(diff_val)\n print(f\"Replace {full_set[idx]['existing player']} with {full_set[idx]['new player']}\")\n data = data.drop(index=full_set[idx]['n_pl_id'])\n cur_team = cur_team.drop(index=full_set[idx]['ex_pl_id'])\n except ValueError:\n print('No transfers to do this week')\n \n def wildcard(self, no_weeks=5):\n '''\n Finds the best team to create using a wildcard based on the following methodology:\n - Filters player list by those with >51% chance of playing next round and sorts by \"overall\" column\n - Iterates through the list fifteen times to fill squad\n - Checks the player is in a position which still needs to be filled and hasn't already been picked\n - Checks whether the selected player is within a tolerance price\n - If the player is within the tolerance price, moves on to find the next place in the squad\n - If not, moves to the next player in the list to see if they fulfill all the criteria\n '''\n self._get_players(no_weeks)\n data = self.data\n data = self.normalise(df=data)\n element_type = {'1': 2, '2': 5, '3': 5, '4': 3}\n player_list = []\n \n # This implementation is also messy - I have filtered both lists for players likely to play\n # and sorted but \"overall\" rank. Need to figure out a way to do this without creating a separate list\n eval_data = data.loc[data['chance'] > 51]\n eval_data = eval_data.sort_values(by='overall', ascending=False).reset_index(drop=True)\n data = data.loc[data['chance'] > 51]\n data = data.sort_values(by='overall', ascending=False).reset_index(drop=True)\n\n for i in range(16):\n for j, val in enumerate(data.iterrows()):\n if element_type[val[1]['pos']] >= 1 and not val[0] in player_list:\n player_list.append(val[0])\n players_temp = data.iloc[player_list]\n try:\n money_left = 100 - sum(players_temp['cost'])\n except (AttributeError, NameError) as e:\n money_left = 100\n result = self._ensure_costs_work(money_left, i) \n if result == True:\n element_type[val[1]['pos']] -= 1\n eval_data = eval_data.drop(val[0])\n break\n else:\n player_list = player_list[:-1] \n return data.iloc[player_list]\n " ]
[ [ "pandas.DataFrame.from_records", "pandas.concat", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
inspur-hsslab/iMIX
[ "99898de97ef8b45462ca1d6bf2542e423a73d769", "99898de97ef8b45462ca1d6bf2542e423a73d769", "99898de97ef8b45462ca1d6bf2542e423a73d769", "99898de97ef8b45462ca1d6bf2542e423a73d769" ]
[ "imix/models/vqa_models/oscar/modeling/oscarplus_pretrain.py", "imix/data/infocomp/visual_dialog_infocpler.py", "imix/data/vqadata/answerprocessor.py", "imix/solver/optimization.py" ]
[ "from imix.models.builder import VQA_MODELS\nfrom imix.models.vqa_models.base_model import BaseModel\nfrom copy import deepcopy\nfrom pytorch_transformers import BertConfig\nfrom .modeling_bert import BertImgForPreTraining\n\nfrom imix.utils.registry import Registry\nfrom collections import OrderedDict\nimport torch\n\nBERT = Registry('bert')\nBERT.register_module('BertConfig', BertConfig)\nBERT.register_module('BertImgForPreTraining', BertImgForPreTraining)\n\n\n@VQA_MODELS.register_module()\nclass OscarPreTraining(BaseModel):\n\n def __init__(self, bert_cfg, pretrained_cfg):\n super().__init__()\n self.bert_cfg = bert_cfg\n self.pretrained_cfg = pretrained_cfg\n\n config = self.build_bert_cfg()\n self.pretrain_model = self.build_pretrain_model(config)\n\n def build_bert_cfg(self):\n\n def num_contrast_classes(texta_false_prob, use_b):\n if texta_false_prob < 0.5 and (texta_false_prob > 0 or not use_b):\n return 3\n else:\n return 2\n\n bert_cfg = deepcopy(self.bert_cfg)\n obj_type = bert_cfg.pop('type')\n obj_class = BERT.get(obj_type)\n\n method_cfg = bert_cfg.pop('run_method')\n run_fun = method_cfg.pop('type')\n cfg = getattr(obj_class, run_fun)(**method_cfg)\n\n texta_false_prob = bert_cfg.pop('texta_false_prob')\n use_b = bert_cfg.pop('use_b')\n\n cfg.num_contrast_classes = num_contrast_classes(texta_false_prob, use_b)\n\n for k, v in bert_cfg.items():\n setattr(cfg, k, v)\n\n return cfg\n\n def build_pretrain_model(self, config):\n pretrained_cfg = deepcopy(self.pretrained_cfg)\n obj_type = pretrained_cfg.pop('type')\n obj_class = BERT.get(obj_type)\n\n method_cfg = pretrained_cfg.pop('run_method')\n run_fun = method_cfg.pop('type')\n\n method_cfg.from_tf = bool('.ckpt' in method_cfg.pretrained_model_name_or_path)\n method_cfg.config = config\n\n method_params = OrderedDict()\n order_keys = ['pretrained_model_name_or_path', 'from_tf', 'config', 'cache_dir']\n for k in order_keys:\n method_params[k] = method_cfg[k]\n\n return getattr(obj_class, run_fun)(**method_params)\n\n def forward_train(self, mini_batch, **kwargs):\n images, input_ids, input_mask, segment_ids, lm_label_ids, is_next, _, _ = self.data_process(mini_batch)\n image_features = torch.stack(images).to('cuda', non_blocking=True)\n outputs = self.pretrain_model(\n input_ids=input_ids,\n token_type_ids=segment_ids,\n attention_mask=input_mask,\n masked_lm_labels=lm_label_ids,\n next_sentence_label=is_next,\n img_feats=image_features)\n return outputs\n\n def forward_test(self, data, **kwargs):\n pass\n\n def data_process(self, mini_batch):\n device = 'cuda'\n images, targets, qa_inds = mini_batch[0], mini_batch[1], mini_batch[2]\n targets_transposed = list(zip(*targets))\n input_ids = torch.stack(targets_transposed[0]).to(device, non_blocking=True)\n input_mask = torch.stack(targets_transposed[1]).to(device, non_blocking=True)\n segment_ids = torch.stack(targets_transposed[2]).to(device, non_blocking=True)\n lm_label_ids = torch.stack(targets_transposed[3]).to(device, non_blocking=True)\n is_next = torch.stack(targets_transposed[4]).to(device, non_blocking=True)\n is_img_match = torch.stack(targets_transposed[5]).to(device, non_blocking=True)\n\n return images, input_ids, input_mask, segment_ids, lm_label_ids, is_next, qa_inds, is_img_match\n", "# from ..utils.tokenization import BertTokenizer\nfrom transformers.tokenization_bert import BertTokenizer\nfrom ..utils.data_utils import encode_input\nimport random\nimport torch\nfrom ..vqadata.stream import ItemFeature\nfrom imix.utils.config import imixEasyDict\n\n\nclass VisDiaInfoCpler:\n RUN_FUNC_BERT = {\n 'train': 'train_dataset_bert_info',\n 'val': 'val_dataset_bert_info',\n 'test': 'test_dataset_bert_info',\n }\n\n def __init__(self, cfg):\n self.has_bert = cfg.get('has_bert', False)\n self.tokenizer_path = cfg.tokenizer.path\n self.num_options = cfg.num_options\n self.num_negative_samples = cfg.get('num_negative_samples', 8)\n self.visual_dialog_tot_rounds = cfg.visual_dialog_tot_rounds\n self.max_sequence_len = cfg.get('max_sequence_len', 256)\n self.mask_probability = cfg.get('mask_probability', 0.15)\n self.visdial_tot_rounds = cfg.get('visdial_tot_rounds', 11)\n self.tokenizer_path = cfg.get('tokenizer', 'bert-base-uncased')\n if isinstance(self.tokenizer_path, imixEasyDict):\n self.tokenizer_path = self.tokenizer_path.path\n\n self.init_tokens()\n\n def init_tokens(self):\n # self.tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\n self.tokenizer = BertTokenizer.from_pretrained(pretrained_model_name_or_path=self.tokenizer_path)\n tokens = ['[CLS]', '[MASK]', '[SEP]']\n indexed_tokens = self.tokenizer.convert_tokens_to_ids(tokens)\n self.CLS = indexed_tokens[0]\n self.MASK = indexed_tokens[1]\n self.SEP = indexed_tokens[2]\n\n def complete_info(self, item_feature, split='train'):\n return self.complete_bert_info(item_feature,\n split) if self.has_bert else self.complete_normal_info(item_feature)\n\n def complete_normal_info(self, item_feature):\n pass\n\n def complete_bert_info(self, item_feature, split):\n func = getattr(self, self.RUN_FUNC_BERT[split])\n return func(item_feature)\n\n def generate_train_language_info(self, utterances, utterances_random):\n tokens_all_list = []\n mask_all_list = []\n segments_all_list = []\n sep_indices_all_list = []\n next_labels_all_list = []\n hist_len_all_list = []\n\n for idx, ut in enumerate(utterances):\n tokens_all = []\n mask_all = []\n segments_all = []\n sep_indices_all = []\n next_labels_all = []\n hist_len_all = []\n\n ut, start_segment = self.prune_rounds(ut, self.visual_dialog_tot_rounds)\n tokens, segments, sep_indices, mask = encode_input(\n ut,\n start_segment,\n self.CLS,\n self.SEP,\n self.MASK,\n max_seq_len=self.max_sequence_len,\n mask_prob=self.mask_probability)\n tokens_all.append(tokens)\n mask_all.append(mask)\n sep_indices_all.append(sep_indices)\n next_labels_all.append(torch.LongTensor([0]))\n segments_all.append(segments)\n hist_len_all.append(torch.LongTensor([len(ut) - 1]))\n negative_samples = utterances_random[idx]\n\n for context_random in negative_samples:\n context_random, start_segment = self.prune_rounds(context_random, self.visual_dialog_tot_rounds)\n # print(\"{}: {}\".format(j, tokens2str(context_random)))\n tokens_random, segments_random, sep_indices_random, mask_random = encode_input(\n context_random,\n start_segment,\n self.CLS,\n self.SEP,\n self.MASK,\n max_seq_len=self.max_sequence_len,\n mask_prob=self.mask_probability)\n tokens_all.append(tokens_random)\n mask_all.append(mask_random)\n sep_indices_all.append(sep_indices_random)\n next_labels_all.append(torch.LongTensor([1]))\n segments_all.append(segments_random)\n hist_len_all.append(torch.LongTensor([len(context_random) - 1]))\n\n tokens_all_list.append(torch.cat(tokens_all, 0).unsqueeze(0))\n mask_all_list.append(torch.cat(mask_all, 0).unsqueeze(0))\n segments_all_list.append(torch.cat(segments_all, 0).unsqueeze(0))\n sep_indices_all_list.append(torch.cat(sep_indices_all, 0).unsqueeze(0))\n next_labels_all_list.append(torch.cat(next_labels_all, 0).unsqueeze(0))\n hist_len_all_list.append(torch.cat(hist_len_all, 0).unsqueeze(0))\n\n tokens_all_list = torch.cat(tokens_all_list, 0)\n mask_all_list = torch.cat(mask_all_list, 0)\n segments_all_list = torch.cat(segments_all_list, 0)\n sep_indices_all_list = torch.cat(sep_indices_all_list, 0)\n next_labels_all_list = torch.cat(next_labels_all_list, 0)\n hist_len_all_list = torch.cat(hist_len_all_list, 0)\n\n language_info = {}\n language_info['tokens'] = tokens_all_list\n language_info['segments'] = segments_all_list\n language_info['sep_indices'] = sep_indices_all_list\n language_info['mask'] = mask_all_list\n language_info['next_sentence_labels'] = next_labels_all_list\n language_info['hist_len'] = hist_len_all_list\n\n return language_info\n\n def generate_train_samples(self, item_feature): # build positvie utterances and negative utterances\n utterances = []\n utterances_random = []\n caption_token = self.tokenizer.encode(item_feature['caption'])\n utterances.append([caption_token])\n utterances_random.append([caption_token])\n token_len = len(caption_token) + 2\n\n for dl in item_feature['dialog']:\n curr_utterance = utterances[-1].copy()\n curr_utterance_random = utterances[-1].copy()\n\n q_token = self.tokenizer.encode(dl['question'])\n a_token = self.tokenizer.encode(dl['answer'])\n\n curr_utterance.append(q_token)\n curr_utterance.append(a_token)\n\n q_token_len = len(q_token)\n a_token_len = len(a_token)\n\n token_len += q_token_len + a_token_len + 2 # the question sep token and answer sep token\n\n curr_utterance_random.append(q_token)\n utterances.append(curr_utterance)\n\n gt_idx = dl['gt_index']\n negative_samples = []\n answer_options_num = len(dl['answer_options'])\n\n for _ in range(self.num_negative_samples):\n all_options_idx = list(range(answer_options_num))\n all_options_idx.remove(gt_idx)\n all_options_idx = all_options_idx[:self.num_options - 1]\n random_utterance_token = None\n\n random_idx = None\n while len(all_options_idx):\n random_idx = random.choice(all_options_idx)\n random_utterance_token = self.tokenizer.encode(dl['answer_options'][random_idx])\n if self.max_sequence_len >= (token_len + len(random_utterance_token) + 1):\n break\n else:\n all_options_idx.remove(random_idx)\n\n if len(all_options_idx) == 0:\n random_utterance_token = random_utterance_token[:a_token_len]\n\n tmp = curr_utterance_random.copy()\n tmp.append(random_utterance_token)\n negative_samples.append(tmp)\n\n utterances_random.append(negative_samples)\n\n return utterances, utterances_random\n\n def generate_val_samples(self, item_feature): # build positvie utterances and negative utterances\n utterances = []\n gt_relevance = None\n gt_option_inds = []\n options_all = []\n caption_token = self.tokenizer.encode(item_feature['caption'])\n utterances.append([caption_token])\n num_options = self.num_options\n\n for idx, utterance in enumerate(item_feature['dialog']):\n cur_rnd_utterance = utterances[-1].copy()\n cur_rnd_utterance.append(self.tokenizer.encode(utterance['question']))\n # current round\n gt_option_ind = utterance['gt_index']\n option_inds = []\n option_inds.append(gt_option_ind)\n all_inds = list(range(100))\n all_inds.remove(gt_option_ind)\n all_inds = all_inds[:(num_options - 1)]\n option_inds.extend(all_inds)\n gt_option_inds.append(0)\n cur_rnd_options = []\n answer_options = [utterance['answer_options'][k] for k in option_inds]\n assert len(answer_options) == len(option_inds) == num_options\n assert answer_options[0] == utterance['answer']\n\n if idx == item_feature['round_id'] - 1:\n gt_relevance = torch.Tensor(item_feature['gt_relevance'])\n # shuffle based on new indices\n gt_relevance = gt_relevance[torch.LongTensor(option_inds)]\n for a_op in answer_options:\n cur_rnd_cur_option = cur_rnd_utterance.copy()\n cur_rnd_cur_option.append(self.tokenizer.encode(a_op))\n cur_rnd_options.append(cur_rnd_cur_option)\n cur_rnd_utterance.append(self.tokenizer.encode(utterance['answer']))\n utterances.append(cur_rnd_utterance)\n options_all.append(cur_rnd_options)\n\n return options_all, gt_relevance, gt_option_inds\n\n def generate_test_samples(self, item_feature):\n options_all = []\n caption_token = self.tokenizer.encode(item_feature['caption'])\n cur_rnd_utterance = [caption_token]\n dialog_len = len(item_feature['dialog'])\n for idx, dl in enumerate(item_feature['dialog']):\n q_token = self.tokenizer.encode(dl['question'])\n cur_rnd_utterance.append(q_token)\n if idx != dialog_len - 1:\n cur_rnd_utterance.append(self.tokenizer.encode(dl['answer']))\n\n for answer in item_feature['dialog'][-1]['answer_options']:\n cur_option = cur_rnd_utterance.copy()\n cur_option.append(self.tokenizer.encode(answer))\n options_all.append(cur_option)\n\n return options_all\n\n def generate_val_language_info(self, options_all):\n tokens_all = []\n mask_all = []\n segments_all = []\n sep_indices_all = []\n hist_len_all = []\n for rnd, cur_rnd_options in enumerate(options_all):\n\n tokens_all_rnd = []\n mask_all_rnd = []\n segments_all_rnd = []\n sep_indices_all_rnd = []\n hist_len_all_rnd = []\n\n for cur_rnd_option in cur_rnd_options:\n cur_rnd_option, start_segment = self.prune_rounds(cur_rnd_option, self.visdial_tot_rounds)\n tokens, segments, sep_indices, mask = encode_input(\n cur_rnd_option,\n start_segment,\n self.CLS,\n self.SEP,\n self.MASK,\n max_seq_len=self.max_sequence_len,\n mask_prob=self.mask_probability)\n\n tokens_all_rnd.append(tokens)\n mask_all_rnd.append(mask)\n segments_all_rnd.append(segments)\n sep_indices_all_rnd.append(sep_indices)\n hist_len_all_rnd.append(torch.LongTensor([len(cur_rnd_option) - 1]))\n\n tokens_all.append(torch.cat(tokens_all_rnd, 0).unsqueeze(0))\n mask_all.append(torch.cat(mask_all_rnd, 0).unsqueeze(0))\n segments_all.append(torch.cat(segments_all_rnd, 0).unsqueeze(0))\n sep_indices_all.append(torch.cat(sep_indices_all_rnd, 0).unsqueeze(0))\n hist_len_all.append(torch.cat(hist_len_all_rnd, 0).unsqueeze(0))\n\n tokens_all = torch.cat(tokens_all, 0)\n mask_all = torch.cat(mask_all, 0)\n segments_all = torch.cat(segments_all, 0)\n sep_indices_all = torch.cat(sep_indices_all, 0)\n hist_len_all = torch.cat(hist_len_all, 0)\n\n item = {}\n item['tokens'] = tokens_all\n item['segments'] = segments_all\n item['sep_indices'] = sep_indices_all\n item['mask'] = mask_all\n item['hist_len'] = hist_len_all\n\n return item\n\n def generate_test_language_info(self, options_all):\n tokens_all = []\n mask_all = []\n segments_all = []\n sep_indices_all = []\n hist_len_all = []\n\n for option in options_all:\n option, start_segment = self.pruneRounds(option, self.visdial_tot_rounds)\n # print(\"option: {} {}\".format(j, tokens2str(option)))\n tokens, segments, sep_indices, mask = encode_input(\n option, start_segment, self.CLS, self.SEP, self.MASK, max_seq_len=self.max_sequence_len, mask_prob=0)\n\n tokens_all.append(tokens)\n mask_all.append(mask)\n segments_all.append(segments)\n sep_indices_all.append(sep_indices)\n hist_len_all.append(torch.LongTensor([len(option) - 1]))\n\n tokens_all = torch.cat(tokens_all, 0)\n mask_all = torch.cat(mask_all, 0)\n segments_all = torch.cat(segments_all, 0)\n sep_indices_all = torch.cat(sep_indices_all, 0)\n hist_len_all = torch.cat(hist_len_all, 0)\n\n language_info = {}\n language_info['tokens'] = tokens_all.unsqueeze(0)\n language_info['segments'] = segments_all.unsqueeze(0)\n language_info['sep_indices'] = sep_indices_all.unsqueeze(0)\n language_info['mask'] = mask_all.unsqueeze(0)\n language_info['hist_len'] = hist_len_all.unsqueeze(0)\n\n return language_info\n\n def tokens2str(self, seq):\n dialog_sequence = ''\n for sentence in seq:\n for word in sentence:\n dialog_sequence += self.tokenizer._convert_id_to_token(word) + ' '\n dialog_sequence += ' </end> '\n dialog_sequence = dialog_sequence.encode('utf8')\n return dialog_sequence\n\n @staticmethod\n def prune_rounds(context, num_rounds):\n start_segment = 1\n len_context = len(context)\n cur_rounds = (len(context) // 2) + 1\n l_index = 0\n if cur_rounds > num_rounds:\n # caption is not part of the final input\n l_index = len_context - (2 * num_rounds)\n start_segment = 0\n return context[l_index:], start_segment\n\n def val_dataset_bert_info(self, item_feature):\n samples, gt_relevance, gt_option_inds = self.generate_val_samples(item_feature)\n language = self.generate_val_language_info(samples)\n\n item_feature.update(language)\n item_feature['gt_relevance'] = gt_relevance\n item_feature['gt_option_inds'] = torch.LongTensor(gt_option_inds)\n item_feature['round_id'] = torch.LongTensor([item_feature['round_id']])\n item_feature['dialog'] = [item_feature['dialog']]\n\n return item_feature\n\n def train_dataset_bert_info(self, item_feature):\n utterances, utterances_random = self.generate_train_samples(item_feature)\n utterances, utterances_random = utterances[1:], utterances_random[1:] # remove the caption in the beginning\n assert len(utterances) == len(utterances_random) == 10\n\n language = self.generate_train_language_info(utterances, utterances_random)\n item_feature.update(language)\n return item_feature\n\n def test_dataset_bert_info(self, item_feature):\n test_samples = self.generate_test_samples(item_feature)\n language = self.generate_test_language_info(test_samples)\n item_feature.update(language)\n\n return item_feature\n\n\nclass VisualDialogDenseInfoCpler(VisDiaInfoCpler):\n\n def __init__(self, cfg):\n super().__init__(cfg)\n\n def complete_info(self, item_feature, split='train'):\n cur_rnd_utterance = [self.tokenizer.encode(item_feature.caption)]\n cur_rounds = item_feature.round_id\n\n for rnd, utterance in enumerate(item_feature.dialog[:cur_rounds]):\n cur_rnd_utterance.append(self.tokenizer.encode(utterance['question']))\n if rnd != cur_rounds - 1:\n cur_rnd_utterance.append(self.tokenizer.encode(utterance['answer']))\n\n options_all = []\n for answer in item_feature.dialog[cur_rounds - 1]['answer_options']:\n cur_option = cur_rnd_utterance.copy()\n cur_option.append(self.tokenizer.encode(answer))\n options_all.append(cur_option)\n assert len(cur_option) == 2 * cur_rounds + 1\n\n gt_option = item_feature.dialog[cur_rounds - 1]['gt_index']\n\n tokens_all = []\n mask_all = []\n segments_all = []\n sep_indices_all = []\n hist_len_all = []\n\n for _, option in enumerate(options_all):\n option, start_segment = super().prune_rounds(option, self.visual_dialog_tot_rounds)\n tokens, segments, sep_indices, mask = encode_input(\n option,\n start_segment,\n self.CLS,\n self.SEP,\n self.MASK,\n max_seq_len=self.max_sequence_len,\n mask_prob=self.mask_probability)\n\n tokens_all.append(tokens)\n mask_all.append(mask)\n segments_all.append(segments)\n sep_indices_all.append(sep_indices)\n hist_len_all.append(torch.LongTensor([len(option) - 1]))\n\n tokens_all = torch.cat(tokens_all, 0)\n mask_all = torch.cat(mask_all, 0)\n segments_all = torch.cat(segments_all, 0)\n sep_indices_all = torch.cat(sep_indices_all, 0)\n hist_len_all = torch.cat(hist_len_all, 0)\n\n item = ItemFeature()\n item['tokens'] = tokens_all.unsqueeze(0)\n item['segments'] = segments_all.unsqueeze(0)\n item['sep_indices'] = sep_indices_all.unsqueeze(0)\n item['mask'] = mask_all.unsqueeze(0)\n item['hist_len'] = hist_len_all.unsqueeze(0)\n item['image_id'] = torch.LongTensor([item_feature.image_id])\n\n item['image_feat'] = item_feature.image_feat\n item['image_loc'] = item_feature.image_loc\n item['image_mask'] = item_feature.image_mask\n item['image_target'] = item_feature.image_target\n item['image_label'] = item_feature.image_label\n\n # add dense annotation fields\n item['gt_relevance_round_id'] = torch.LongTensor([cur_rounds])\n item['gt_relevance'] = torch.Tensor(item_feature['relevance'])\n item['gt_option'] = torch.LongTensor([gt_option])\n\n # add next sentence labels for training with the nsp loss as well\n nsp_labels = torch.ones(*tokens_all.unsqueeze(0).shape[:-1])\n nsp_labels[:, gt_option] = 0\n item['next_sentence_labels'] = nsp_labels.long()\n\n return item\n", "import torch\nfrom .baseprocessor import BaseProcessor\nfrom imix.utils.third_party_libs import PathManager\nimport re\nfrom ..builder import build_vocab, build_preprocessor, PROCESSOR\n\nSENTENCE_SPLIT_REGEX = re.compile(r'(\\W+)')\n\n\ndef tokenize(sentence, regex=SENTENCE_SPLIT_REGEX, keep=None, remove=None):\n if keep is None:\n keep = [\"'s\"]\n if remove is None:\n remove = [',', '?']\n sentence = sentence.lower()\n\n for token in keep:\n sentence = sentence.replace(token, ' ' + token)\n\n for token in remove:\n sentence = sentence.replace(token, '')\n\n tokens = regex.split(sentence)\n tokens = [t.strip() for t in tokens if len(t.strip()) > 0]\n return tokens\n\n\ndef load_str_list(fname):\n with PathManager.open(fname) as f:\n lines = f.readlines()\n lines = [l.strip() for l in lines]\n return lines\n\n\[email protected]_module()\nclass SimpleWordProcessor(BaseProcessor):\n \"\"\"Tokenizes a word and processes it.\n\n Attributes:\n tokenizer (function): Type of tokenizer to be used.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n from imix.utils.third_party_libs import word_tokenize\n\n self.tokenizer = word_tokenize\n\n def __call__(self, item, *args, **kwargs):\n return {'text': self.tokenizer(item['text'], *args, **kwargs)}\n\n\[email protected]_module()\nclass VQAAnswerProcessor(BaseProcessor):\n \"\"\"Processor for generating answer scores for answers passed using VQA\n accuracy formula. Using VocabDict class to represent answer vocabulary, so\n parameters must specify \"vocab_file\". \"num_answers\" in parameter config\n specify the max number of answers possible. Takes in dict containing\n \"answers\" or \"answers_tokens\". \"answers\" are preprocessed to generate\n \"answers_tokens\" if passed.\n\n Args:\n config (DictConfig): Configuration for the processor\n\n Attributes:\n answer_vocab (VocabDict): Class representing answer vocabulary\n \"\"\"\n\n DEFAULT_NUM_ANSWERS = 10\n\n def __init__(self, answer_vocab, preprocessor, num_answers, *args, **kwargs):\n # self.writer = registry.get(\"writer\")\n # if not hasattr(config, \"vocab_file\"):\n # raise AttributeError(\n # \"'vocab_file' argument required, but not \"\n # \"present in AnswerProcessor's config\"\n # )\n\n # self.answer_vocab = VocabDict(config.vocab_file, *args, **kwargs)\n self.answer_vocab = build_vocab(answer_vocab)\n\n self.preprocessor = build_preprocessor(preprocessor)\n\n # self.preprocessor = None\n\n # if hasattr(config, \"preprocessor\"):\n # self.preprocessor = Processor(config.preprocessor)\n #\n # if self.preprocessor is None:\n # raise ValueError(\n # f\"No processor named {config.preprocessor} is defined.\"\n # )\n self.num_answers = num_answers\n # if hasattr(config, \"num_answers\"):\n # self.num_answers = config.num_answers\n # else:\n # self.num_answers = self.DEFAULT_NUM_ANSWERS\n # warnings.warn(\n # \"'num_answers' not defined in the config. \"\n # \"Setting to default of {}\".format(self.DEFAULT_NUM_ANSWERS)\n # )\n\n def __call__(self, item):\n \"\"\"Takes in dict with answers or answers_tokens, and returns back a\n dict with answers (processed), \"answers_indices\" which point to indices\n of the answers if present and \"answers_scores\" which represent VQA\n style scores for the answers.\n\n Args:\n item (Dict): Dict containing answers or answers_tokens\n\n Returns:\n Dict: Processed answers, indices and scores.\n \"\"\"\n tokens = None\n\n if not isinstance(item, dict):\n raise TypeError(\"'item' passed to processor must be a dict\")\n\n if 'answer_tokens' in item:\n tokens = item['answer_tokens']\n elif 'answers' in item:\n if self.preprocessor is None:\n raise AssertionError(\"'preprocessor' must be defined if you \" \"don't pass 'answer_tokens'\")\n\n tokens = [self.preprocessor({'text': answer})['text'] for answer in item['answers']]\n else:\n raise AssertionError(\"'answers' or 'answer_tokens' must be passed\" ' to answer processor in a dict')\n\n tokens = self._increase_to_ten(tokens)\n answers_indices = torch.zeros(self.DEFAULT_NUM_ANSWERS, dtype=torch.long)\n answers_indices.fill_(self.answer_vocab.get_unk_index())\n\n for idx, token in enumerate(tokens):\n answers_indices[idx] = self.answer_vocab.word2idx(token)\n\n answers_scores = self.compute_answers_scores(answers_indices)\n\n return {\n 'answers': tokens,\n 'answers_indices': answers_indices,\n 'answers_scores': answers_scores,\n }\n\n def get_vocab_size(self):\n \"\"\"Get vocab size of the answer vocabulary. Can also include soft copy\n dynamic answer space size.\n\n Returns:\n int: size of the answer vocabulary\n \"\"\"\n return self.answer_vocab.num_vocab\n\n def get_true_vocab_size(self):\n \"\"\"True vocab size can be different from normal vocab size in some\n cases such as soft copy where dynamic answer space is added.\n\n Returns:\n int: True vocab size.\n \"\"\"\n return self.answer_vocab.num_vocab\n\n def word2idx(self, word):\n \"\"\"Convert a word to its index according to vocabulary.\n\n Args:\n word (str): Word to be converted to index.\n\n Returns:\n int: Index of the word.\n \"\"\"\n return self.answer_vocab.word2idx(word)\n\n def idx2word(self, idx):\n \"\"\"Index to word according to the vocabulary.\n\n Args:\n idx (int): Index to be converted to the word.\n\n Returns:\n str: Word corresponding to the index.\n \"\"\"\n return self.answer_vocab.idx2word(idx)\n\n def compute_answers_scores(self, answers_indices):\n \"\"\"Generate VQA based answer scores for answers_indices.\n\n Args:\n answers_indices (torch.LongTensor): tensor containing indices of the answers\n\n Returns:\n torch.FloatTensor: tensor containing scores.\n \"\"\"\n scores = torch.zeros(self.get_vocab_size(), dtype=torch.float)\n gt_answers = list(enumerate(answers_indices))\n unique_answers = set(answers_indices.tolist())\n\n for answer in unique_answers:\n accs = []\n for gt_answer in gt_answers:\n other_answers = [item for item in gt_answers if item != gt_answer]\n\n matching_answers = [item for item in other_answers if item[1] == answer]\n acc = min(1, float(len(matching_answers)) / 3)\n accs.append(acc)\n avg_acc = sum(accs) / len(accs)\n\n if answer != self.answer_vocab.UNK_INDEX:\n scores[answer] = avg_acc\n\n return scores\n\n def _increase_to_ten(self, tokens):\n while len(tokens) < self.DEFAULT_NUM_ANSWERS:\n tokens += tokens[:self.DEFAULT_NUM_ANSWERS - len(tokens)]\n\n return tokens\n", "import torch\nfrom .builder import OPTIMIZERS\nfrom torch.optim.optimizer import required\nfrom typing import Tuple\nfrom torch.optim import Optimizer\nimport math\nfrom torch.nn.utils import clip_grad_norm_\nimport logging\n'''\nbelow optimizer for lxmert\n'''\n\nlogger = logging.getLogger(__name__)\n\n\ndef warmup_cosine(x, warmup=0.002):\n if x < warmup:\n return x / warmup\n return 0.5 * (1.0 + torch.cos(math.pi * x))\n\n\ndef warmup_constant(x, warmup=0.002):\n \"\"\"Linearly increases learning rate over `warmup`*`t_total` (as provided to\n BertAdam) training steps.\n\n Learning rate is 1. afterwards.\n \"\"\"\n if x < warmup:\n return x / warmup\n return 1.0\n\n\ndef warmup_linear(x, warmup=0.002):\n \"\"\"Specifies a triangular learning rate schedule where peak is reached at\n `warmup`*`t_total`-th (as provided to BertAdam) training step.\n\n After `t_total`-th training step, learning rate is zero.\n \"\"\"\n if x < warmup:\n return x / warmup\n return max((x - 1.) / (warmup - 1.), 0)\n\n\nSCHEDULES = {\n 'warmup_cosine': warmup_cosine,\n 'warmup_constant': warmup_constant,\n 'warmup_linear': warmup_linear,\n}\n\n\[email protected]_module()\nclass BertAdam(Optimizer):\n \"\"\"Implements BERT version of Adam algorithm with weight decay fix.\n\n Params:\n lr: learning rate\n warmup: portion of t_total for the warmup, -1 means no warmup. Default: -1\n t_total: total number of training steps for the learning\n rate schedule, -1 means constant learning rate. Default: -1\n schedule: schedule to use for the warmup (see above). Default: 'warmup_linear'\n b1: Adams b1. Default: 0.9\n b2: Adams b2. Default: 0.999\n e: Adams epsilon. Default: 1e-6\n weight_decay: Weight decay. Default: 0.01\n max_grad_norm: Maximum norm for the gradients (-1 means no clipping). Default: 1.0\n \"\"\"\n\n def __init__(self,\n params,\n lr=required,\n warmup=-1,\n t_total=-1,\n schedule='warmup_linear',\n b1=0.9,\n b2=0.999,\n e=1e-6,\n weight_decay=0.01,\n max_grad_norm=1.0):\n if lr is not required and lr < 0.0:\n raise ValueError('Invalid learning rate: {} - should be >= 0.0'.format(lr))\n if schedule not in SCHEDULES:\n raise ValueError('Invalid schedule parameter: {}'.format(schedule))\n if not 0.0 <= warmup < 1.0 and not warmup == -1:\n raise ValueError('Invalid warmup: {} - should be in [0.0, 1.0[ or -1'.format(warmup))\n if not 0.0 <= b1 < 1.0:\n raise ValueError('Invalid b1 parameter: {} - should be in [0.0, 1.0['.format(b1))\n if not 0.0 <= b2 < 1.0:\n raise ValueError('Invalid b2 parameter: {} - should be in [0.0, 1.0['.format(b2))\n if not e >= 0.0:\n raise ValueError('Invalid epsilon value: {} - should be >= 0.0'.format(e))\n defaults = dict(\n lr=lr,\n schedule=schedule,\n warmup=warmup,\n t_total=t_total,\n b1=b1,\n b2=b2,\n e=e,\n weight_decay=weight_decay,\n max_grad_norm=max_grad_norm)\n super(BertAdam, self).__init__(params, defaults)\n\n def get_lr(self):\n lr = []\n for group in self.param_groups:\n for p in group['params']:\n state = self.state[p]\n if len(state) == 0:\n return [0]\n if group['t_total'] != -1:\n schedule_fct = SCHEDULES[group['schedule']]\n lr_scheduled = group['lr'] * schedule_fct(state['step'] / group['t_total'], group['warmup'])\n else:\n lr_scheduled = group['lr']\n lr.append(lr_scheduled)\n return lr\n\n def step(self, closure=None):\n \"\"\"Performs a single optimization step.\n\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n loss = closure()\n\n warned_for_t_total = False\n\n for group in self.param_groups:\n for p in group['params']:\n if p.grad is None:\n continue\n grad = p.grad.data\n if grad.is_sparse:\n raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')\n\n state = self.state[p]\n\n # State initialization\n if len(state) == 0:\n state['step'] = 0\n # Exponential moving average of gradient values\n state['next_m'] = torch.zeros_like(p.data)\n # Exponential moving average of squared gradient values\n state['next_v'] = torch.zeros_like(p.data)\n\n next_m, next_v = state['next_m'], state['next_v']\n beta1, beta2 = group['b1'], group['b2']\n\n # LXRT: grad is clipped outside.\n # Add grad clipping\n # if group['max_grad_norm'] > 0:\n # clip_grad_norm_(p, group['max_grad_norm'])\n\n # Decay the first and second moment running average coefficient\n # In-place operations to update the averages at the same time\n next_m.mul_(beta1).add_(1 - beta1, grad)\n next_v.mul_(beta2).addcmul_(1 - beta2, grad, grad)\n update = next_m / (next_v.sqrt() + group['e'])\n\n # Just adding the square of the weights to the loss function is *not*\n # the correct way of using L2 regularization/weight decay with Adam,\n # since that will interact with the m and v parameters in strange ways.\n #\n # Instead we want to decay the weights in a manner that doesn't interact\n # with the m/v parameters. This is equivalent to adding the square\n # of the weights to the loss with plain (non-momentum) SGD.\n if group['weight_decay'] > 0.0:\n update += group['weight_decay'] * p.data\n\n if group['t_total'] != -1:\n schedule_fct = SCHEDULES[group['schedule']]\n progress = state['step'] / group['t_total']\n lr_scheduled = group['lr'] * schedule_fct(progress, group['warmup'])\n # warning for exceeding t_total (only active with warmup_linear\n if group['schedule'] == 'warmup_linear' and progress > 1. and not warned_for_t_total:\n logger.warning(\n \"Training beyond specified 't_total' steps with schedule '{}'. Learning rate set to {}. \"\n \"Please set 't_total' of {} correctly.\".format(group['schedule'], lr_scheduled,\n self.__class__.__name__))\n warned_for_t_total = True\n # end warning\n else:\n lr_scheduled = group['lr']\n\n update_with_lr = lr_scheduled * update\n p.data.add_(-update_with_lr)\n\n state['step'] += 1\n\n # step_size = lr_scheduled * math.sqrt(bias_correction2) / bias_correction1\n # No bias correction\n # bias_correction1 = 1 - beta1 ** state['step']\n # bias_correction2 = 1 - beta2 ** state['step']\n\n return loss\n\n\[email protected]_module()\nclass BertAdam1(Optimizer):\n \"\"\"Implements BERT version of Adam algorithm with weight decay fix.\n\n Params:\n betas: Adams b1. Default: 0.9\n Adams b2. Default: 0.999\n e: Adams epsilon. Default: 1e-6\n weight_decay: Weight decay. Default: 0.01\n max_grad_norm: Maximum norm for the gradients (-1 means no clipping). Default: 1.0\n \"\"\"\n\n def __init__(self,\n params,\n lr=required,\n betas: Tuple[float, float] = (0.9, 0.999),\n eps=1e-6,\n weight_decay=0.01,\n max_grad_norm=1.0):\n b1 = betas[0]\n b2 = betas[1]\n if lr is not required and lr < 0.0:\n raise ValueError('Invalid learning rate: {} - should be >= 0.0'.format(lr))\n if not 0.0 <= b1 < 1.0:\n raise ValueError('Invalid b1 parameter: {} - should be in [0.0, 1.0['.format(b1))\n if not 0.0 <= b2 < 1.0:\n raise ValueError('Invalid b2 parameter: {} - should be in [0.0, 1.0['.format(b2))\n if not eps >= 0.0:\n raise ValueError('Invalid epsilon value: {} - should be >= 0.0'.format(eps))\n defaults = dict(lr=lr, b1=b1, b2=b2, e=eps, weight_decay=weight_decay, max_grad_norm=max_grad_norm)\n super(BertAdam1, self).__init__(params, defaults)\n\n def step(self, closure=None):\n \"\"\"Performs a single optimization step.\n\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n for p in group['params']:\n if p.grad is None:\n continue\n grad = p.grad.data\n if grad.is_sparse:\n raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')\n\n state = self.state[p]\n\n # State initialization\n if len(state) == 0:\n state['step'] = 0\n # Exponential moving average of gradient values\n state['next_m'] = torch.zeros_like(p.data)\n # Exponential moving average of squared gradient values\n state['next_v'] = torch.zeros_like(p.data)\n\n next_m, next_v = state['next_m'], state['next_v']\n beta1, beta2 = group['b1'], group['b2']\n\n # Add grad clipping\n if group['max_grad_norm'] > 0:\n clip_grad_norm_(p, group['max_grad_norm'])\n\n # Decay the first and second moment running average coefficient\n # In-place operations to update the averages at the same time\n next_m.mul_(beta1).add_(1 - beta1, grad)\n next_v.mul_(beta2).addcmul_(1 - beta2, grad, grad)\n update = next_m / (next_v.sqrt() + group['e'])\n\n # Just adding the square of the weights to the loss function is *not*\n # the correct way of using L2 regularization/weight decay with Adam,\n # since that will interact with the m and v parameters in strange ways.\n #\n # Instead we want to decay the weights in a manner that doesn't interact\n # with the m/v parameters. This is equivalent to adding the square\n # of the weights to the loss with plain (non-momentum) SGD.\n if group['weight_decay'] > 0.0:\n update += group['weight_decay'] * p.data\n\n update_with_lr = group['lr'] * update\n p.data.add_(-update_with_lr)\n\n state['step'] += 1\n\n # step_size = lr_scheduled * math.sqrt(bias_correction2) / bias_correction1\n # No bias correction\n # bias_correction1 = 1 - beta1 ** state['step']\n # bias_correction2 = 1 - beta2 ** state['step']\n\n return loss\n\n\[email protected]_module()\nclass TansformerAdamW(Optimizer):\n \"\"\"Implements Adam algorithm with weight decay fix.\n\n Parameters:\n lr (float): learning rate. Default 1e-3.\n betas (tuple of 2 floats): Adams beta parameters (b1, b2). Default: (0.9, 0.999)\n eps (float): Adams epsilon. Default: 1e-6\n weight_decay (float): Weight decay. Default: 0.0\n correct_bias (bool): can be set to False to avoid correcting bias\n in Adam (e.g. like in Bert TF repository). Default True.\n \"\"\"\n\n def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-6, weight_decay=0.0, correct_bias=True):\n if lr < 0.0:\n raise ValueError('Invalid learning rate: {} - should be >= 0.0'.format(lr))\n if not 0.0 <= betas[0] < 1.0:\n raise ValueError('Invalid beta parameter: {} - should be in [0.0, 1.0['.format(betas[0]))\n if not 0.0 <= betas[1] < 1.0:\n raise ValueError('Invalid beta parameter: {} - should be in [0.0, 1.0['.format(betas[1]))\n if not 0.0 <= eps:\n raise ValueError('Invalid epsilon value: {} - should be >= 0.0'.format(eps))\n defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, correct_bias=correct_bias)\n super(TansformerAdamW, self).__init__(params, defaults)\n\n def step(self, closure=None):\n \"\"\"Performs a single optimization step.\n\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n for p in group['params']:\n if p.grad is None:\n continue\n grad = p.grad.data\n if grad.is_sparse:\n raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')\n\n state = self.state[p]\n\n # State initialization\n if len(state) == 0:\n state['step'] = 0\n # Exponential moving average of gradient values\n state['exp_avg'] = torch.zeros_like(p.data)\n # Exponential moving average of squared gradient values\n state['exp_avg_sq'] = torch.zeros_like(p.data)\n\n exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']\n beta1, beta2 = group['betas']\n\n state['step'] += 1\n\n # Decay the first and second moment running average coefficient\n # In-place operations to update the averages at the same time\n exp_avg.mul_(beta1).add_(1.0 - beta1, grad)\n exp_avg_sq.mul_(beta2).addcmul_(1.0 - beta2, grad, grad)\n denom = exp_avg_sq.sqrt().add_(group['eps'])\n\n step_size = group['lr']\n if group['correct_bias']: # No bias correction for Bert\n bias_correction1 = 1.0 - beta1**state['step']\n bias_correction2 = 1.0 - beta2**state['step']\n step_size = step_size * math.sqrt(bias_correction2) / bias_correction1\n\n p.data.addcdiv_(-step_size, exp_avg, denom)\n\n # Just adding the square of the weights to the loss function is *not*\n # the correct way of using L2 regularization/weight decay with Adam,\n # since that will interact with the m and v parameters in strange ways.\n #\n # Instead we want to decay the weights in a manner that doesn't interact\n # with the m/v parameters. This is equivalent to adding the square\n # of the weights to the loss with plain (non-momentum) SGD.\n # Add weight decay at the end (fixed version)\n if group['weight_decay'] > 0.0:\n p.data.add_(-group['lr'] * group['weight_decay'], p.data)\n\n return loss\n" ]
[ [ "torch.stack" ], [ "torch.LongTensor", "torch.Tensor", "torch.cat" ], [ "torch.zeros" ], [ "torch.nn.utils.clip_grad_norm_", "torch.zeros_like", "torch.cos" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
edouardoyallon/kymatio
[ "eeed6ac9e59bc6645b90fc4e7ff8ce4f693887bc", "eeed6ac9e59bc6645b90fc4e7ff8ce4f693887bc", "eeed6ac9e59bc6645b90fc4e7ff8ce4f693887bc" ]
[ "kymatio/scattering2d/backend/torch_backend.py", "kymatio/scattering3d/backend/numpy_backend.py", "kymatio/scattering2d/frontend/torch_frontend.py" ]
[ "# Authors: Edouard Oyallon, Sergey Zagoruyko\n\nimport torch\nfrom torch.nn import ReflectionPad2d\nfrom collections import namedtuple\n\nBACKEND_NAME = 'torch'\n\ndef iscomplex(x):\n return x.size(-1) == 2\n\ndef isreal(x):\n return x.size(-1) == 1\n\nclass Pad(object):\n def __init__(self, pad_size, input_size, pre_pad=False):\n \"\"\"Padding which allows to simultaneously pad in a reflection fashion\n and map to complex.\n\n Parameters\n ----------\n pad_size : list of 4 integers\n Size of padding to apply [top, bottom, left, right].\n input_size : list of 2 integers\n size of the original signal [height, width].\n pre_pad : boolean, optional\n If set to true, then there is no padding, one simply adds the imaginary part.\n\n Attributes\n ----------\n pad_size : list of 4 integers \n Size of padding to apply [top, bottom, left, right].\n input_size : list of 2 integers\n Size of the original signal [height, width].\n pre_pad : boolean\n If set to true, then there is no padding, one simply adds the imaginary part.\n \n \"\"\"\n self.pre_pad = pre_pad\n self.pad_size = pad_size\n self.input_size = input_size\n\n self.build()\n\n def build(self):\n \"\"\"Builds the padding module.\n\n Attributes\n ----------\n padding_module : ReflectionPad2d\n Pads the input tensor using the reflection of the input\n boundary. \n\n \"\"\"\n pad_size_tmp = list(self.pad_size)\n\n # This allow to handle the case where the padding is equal to the image size\n if pad_size_tmp[0] == self.input_size[0]:\n pad_size_tmp[0] -= 1\n pad_size_tmp[1] -= 1\n if pad_size_tmp[2] == self.input_size[1]:\n pad_size_tmp[2] -= 1\n pad_size_tmp[3] -= 1\n # Pytorch expects its padding as [left, right, top, bottom]\n self.padding_module = ReflectionPad2d([pad_size_tmp[2], pad_size_tmp[3],\n pad_size_tmp[0], pad_size_tmp[1]])\n\n def __call__(self, x):\n \"\"\"Applies padding and maps to complex.\n \n Parameters\n ----------\n x : tensor\n Real tensor input to be padded and sent to complex domain.\n\n Returns\n -------\n output : tensor\n Complex torch tensor that has been padded.\n\n \"\"\"\n batch_shape = x.shape[:-2]\n signal_shape = x.shape[-2:]\n x = x.reshape((-1, 1) + signal_shape)\n if not self.pre_pad:\n x = self.padding_module(x)\n if self.pad_size[0] == self.input_size[0]:\n x = torch.cat([x[:, :, 1, :].unsqueeze(2), x, x[:, :, x.size(2) - 2, :].unsqueeze(2)], 2)\n if self.pad_size[2] == self.input_size[1]:\n x = torch.cat([x[:, :, :, 1].unsqueeze(3), x, x[:, :, :, x.size(3) - 2].unsqueeze(3)], 3)\n\n output = x.new_zeros(x.shape + (2,))\n output[..., 0] = x\n output = output.reshape(batch_shape + output.shape[-3:])\n return output\n\ndef unpad(in_):\n \"\"\"Unpads input.\n \n Slices the input tensor at indices between 1::-1.\n\n Parameters\n ----------\n in_ : tensor_like\n Input tensor.\n\n Returns\n -------\n in_[..., 1:-1, 1:-1] : tensor_like\n Output tensor. Unpadded input.\n\n \"\"\"\n return torch.unsqueeze(in_[..., 1:-1, 1:-1], -3)\n\nclass SubsampleFourier(object):\n \"\"\"Subsampling of a 2D image performed in the Fourier domain\n \n Subsampling in the spatial domain amounts to periodization\n in the Fourier domain, hence the formula.\n\n Parameters\n ----------\n x : tensor_like\n Input tensor with at least 5 dimensions, the last being the real\n and imaginary parts. Ideally, the last dimension should be a power\n of 2 to avoid errors.\n k : int\n Integer such that x is subsampled by 2**k along the spatial variables.\n\n Returns\n -------\n out : tensor_like\n Tensor such that its Fourier transform is the Fourier\n transform of a subsampled version of x, i.e. in\n FFT^{-1}(res)[u1, u2] = FFT^{-1}(x)[u1 * (2**k), u2 * (2**k)].\n\n \"\"\"\n def __call__(self, x, k):\n batch_shape = x.shape[:-3]\n signal_shape = x.shape[-3:]\n x = x.view((-1,) + signal_shape)\n y = x.view(-1,\n k, x.size(1) // k,\n k, x.size(2) // k,\n 2)\n\n out = y.mean(3, keepdim=False).mean(1, keepdim=False)\n out = out.reshape(batch_shape + out.shape[-3:])\n return out\n\nclass Modulus(object):\n \"\"\"This class implements a modulus transform for complex numbers.\n\n Usage\n -----\n modulus = Modulus()\n x_mod = modulus(x)\n\n Parameters\n ---------\n x : input tensor\n Complex torch tensor.\n\n Returns\n -------\n output : output tensor \n A tensor with the same dimensions as x, such that output[..., 0]\n contains the complex modulus of x, while output[..., 1] = 0.\n\n \"\"\"\n def __call__(self, x):\n norm = torch.zeros_like(x)\n norm[...,0] = (x[...,0]*x[...,0] +\n x[...,1]*x[...,1]).sqrt()\n return norm\n\ndef fft(x, direction='C2C', inverse=False):\n \"\"\"Interface with torch FFT routines for 2D signals.\n\n Example\n -------\n x = torch.randn(128, 32, 32, 2)\n x_fft = fft(x)\n x_ifft = fft(x, inverse=True)\n\n Parameters\n ----------\n x : tensor\n Complex input for the FFT.\n direction : string\n 'C2R' for complex to real, 'C2C' for complex to complex.\n inverse : bool\n True for computing the inverse FFT.\n NB : If direction is equal to 'C2R', then an error is raised.\n \n Raises\n ------\n RuntimeError\n In the event that we are going from complex to real and not doing\n the inverse fft or in the event x is not contiguous.\n TypeError\n In the event that x does not have a final dimension 2 i.e. not\n complex. \n \n Returns\n -------\n output : tensor\n Result of FFT or IFFT.\n \n \"\"\"\n if direction == 'C2R':\n if not inverse:\n raise RuntimeError('C2R mode can only be done with an inverse FFT.')\n\n if not iscomplex(x):\n raise TypeError('The input should be complex (e.g. last dimension is 2).')\n\n if not x.is_contiguous():\n raise RuntimeError('Tensors must be contiguous!')\n\n if direction == 'C2R':\n output = torch.irfft(x, 2, normalized=False, onesided=False) * x.size(-2) * x.size(-3)\n elif direction == 'C2C':\n if inverse:\n output = torch.ifft(x, 2, normalized=False) * x.size(-2) * x.size(-3)\n else:\n output = torch.fft(x, 2, normalized=False)\n\n return output\n\ndef cdgmm(A, B, inplace=False):\n \"\"\"Complex pointwise multiplication.\n \n Complex pointwise multiplication between (batched) tensor A and tensor B.\n\n Parameters\n ----------\n A : tensor\n A is a complex tensor of size (B, C, M, N, 2).\n B : tensor\n B is a complex tensor of size (M, N, 2) or real tensor of (M, N, 1).\n inplace : boolean, optional\n If set to True, all the operations are performed inplace.\n\n Returns\n -------\n C : tensor\n Output tensor of size (B, C, M, N, 2) such that:\n C[b, c, m, n, :] = A[b, c, m, n, :] * B[m, n, :].\n\n \"\"\"\n if not iscomplex(A):\n raise TypeError('The input must be complex, indicated by a last '\n 'dimension of size 2.')\n\n if B.ndimension() != 3:\n raise RuntimeError('The filter must be a 3-tensor, with a last '\n 'dimension of size 1 or 2 to indicate it is real '\n 'or complex, respectively.')\n\n if not iscomplex(B) and not isreal(B):\n raise TypeError('The filter must be complex or real, indicated by a '\n 'last dimension of size 2 or 1, respectively.')\n\n if A.size()[-3:-1] != B.size()[-3:-1]:\n raise RuntimeError('The filters are not compatible for multiplication!')\n\n if A.dtype is not B.dtype:\n raise TypeError('A and B must be of the same dtype.')\n\n if A.device.type != B.device.type:\n raise TypeError('A and B must be both on GPU or both on CPU.')\n\n if A.device.type == 'cuda':\n if A.device.index != B.device.index:\n raise TypeError('A and B must be on the same GPU!')\n\n if isreal(B):\n if inplace:\n return A.mul_(B)\n else:\n return A * B\n else:\n C = A.new(A.size())\n\n A_r = A[..., 0].contiguous().view(-1, A.size(-2)*A.size(-3))\n A_i = A[..., 1].contiguous().view(-1, A.size(-2)*A.size(-3))\n\n B_r = B[...,0].contiguous().view(B.size(-2)*B.size(-3)).unsqueeze(0).expand_as(A_i)\n B_i = B[..., 1].contiguous().view(B.size(-2)*B.size(-3)).unsqueeze(0).expand_as(A_r)\n\n C[..., 0].view(-1, C.size(-2)*C.size(-3))[:] = A_r * B_r - A_i * B_i\n C[..., 1].view(-1, C.size(-2)*C.size(-3))[:] = A_r * B_i + A_i * B_r\n\n return C if not inplace else A.copy_(C)\n\ndef finalize(s0, s1, s2):\n \"\"\"Concatenate scattering of different orders.\n\n Parameters\n ----------\n s0 : tensor\n Tensor which contains the zeroth order scattering coefficents.\n s1 : tensor\n Tensor which contains the first order scattering coefficents.\n s2 : tensor\n Tensor which contains the second order scattering coefficents.\n \n Returns\n -------\n s : tensor\n Final output. Scattering transform.\n\n \"\"\"\n if len(s2)>0:\n return torch.cat([torch.cat(s0, -3), torch.cat(s1, -3), torch.cat(s2, -3)], -3)\n else:\n return torch.cat([torch.cat(s0, -3), torch.cat(s1, -3)], -3)\n\nbackend = namedtuple('backend', ['name', 'cdgmm', 'modulus', 'subsample_fourier', 'fft', 'Pad', 'unpad', 'finalize'])\nbackend.name = 'torch'\nbackend.cdgmm = cdgmm\nbackend.modulus = Modulus()\nbackend.subsample_fourier = SubsampleFourier()\nbackend.fft = fft\nbackend.Pad = Pad\nbackend.unpad = unpad\nbackend.finalize = finalize\n", "import numpy as np\nimport warnings\n\nBACKEND_NAME = 'numpy'\nfrom collections import namedtuple\n\ndef complex_modulus(x):\n \"\"\"Compute the complex modulus.\n\n Computes the modulus of x and stores the result in a real numpy array.\n \n Parameters\n ----------\n x : numpy array\n A complex numpy array.\n \n Returns\n -------\n norm : numpy array\n A real numpy array with the same dimensions as x. Real part\n contains complex modulus of x.\n \n \"\"\"\n return np.abs(x)\n\n\n\ndef modulus_rotation(x, module):\n \"\"\"Used for computing rotation invariant scattering transform coefficents.\n \n Parameters\n ----------\n x : tensor\n Size (batchsize, M, N, O).\n module : tensor\n Tensor that holds the overall sum.\n \n Returns\n -------\n output : numpy array\n Numpy array of the same size as input_array. It holds the output of\n the operation::\n \n $\\\\sqrt{\\\\sum_m (\\\\text{input}_\\\\text{array} \\\\star \\\\psi_{j,l,m})^2)}$\n \n which is covariant to 3D translations and rotations.\n \n \"\"\"\n if module is None:\n module = np.zeros_like(x)\n else:\n module = module **2\n module += np.abs(x)**2\n return np.sqrt(module)\n\n\n\ndef _compute_standard_scattering_coefs(input_array, filter, J, subsample):\n \"\"\"Computes convolution and downsamples.\n \n Computes the convolution of input_array with a lowpass filter phi_J\n and downsamples by a factor J.\n \n Parameters\n ----------\n input_array : numpy array \n Size (batchsize, M, N, O).\n filter : numpy array\n Size (M, N, O).\n J : int\n Low pass scale of phi_J.\n subsample : function\n Subsampling function.\n \n Returns\n -------\n output : numpy array \n The result of input_array \\\\star phi_J downsampled by a factor J.\n \n \"\"\"\n low_pass = filter[J]\n convolved_input = cdgmm3d(input_array, low_pass)\n convolved_input = fft(convolved_input, inverse=True)\n return subsample(convolved_input, J)\n\n\ndef _compute_local_scattering_coefs(input_array, filter, j, points):\n \"\"\"Compute convolution and returns particular points.\n\n Computes the convolution of input_array with a lowpass filter phi_j and\n and returns the value of the output at particular points.\n\n Parameters\n ----------\n input_array : numpy array\n Size (batchsize, M, N, O, 2).\n filter : numpy array\n Size (M, N, O, 2)\n j : int\n The lowpass scale j of phi_j\n points : numpy array\n Size (batchsize, number of points, 3)\n\n Returns\n -------\n output : numpy array\n Numpy array of size (batchsize, number of points, 1) with the values\n of the lowpass filtered moduli at the points given.\n \n \"\"\"\n local_coefs = np.zeros((input_array.shape[0], points.shape[1]), dtype=np.complex64)\n low_pass = filter[j+1]\n convolved_input = cdgmm3d(input_array, low_pass)\n convolved_input = fft(convolved_input, inverse=True)\n for i in range(input_array.shape[0]):\n for j in range(points[i].shape[0]):\n x, y, z = points[i, j, 0], points[i, j, 1], points[i, j, 2]\n local_coefs[i, j, 0] = convolved_input[\n i, int(x), int(y), int(z), 0]\n return local_coefs\n\n\n\ndef subsample(input_array, j):\n \"\"\"Downsamples.\n\n Parameters\n ----------\n input_array : numpy array\n Input numpy array.\n j : int\n Downsampling factor.\n\n Returns\n -------\n out : numpy array\n Downsampled numpy array. \n \n \"\"\"\n return np.ascontiguousarray(input_array[..., ::2 ** j, ::2 ** j, ::2 ** j])\n\n\ndef compute_integrals(input_array, integral_powers):\n \"\"\"Computes integrals.\n\n Computes integrals of the input_array to the given powers.\n\n Parameters\n ----------\n input_array: numpy array\n Size (B, M, N, O), B is batch_size, M, N, O are spatial dims.\n\n integral_powers: list\n List of P positive floats containing the p values used to\n compute the integrals of the input_array to the power p (l_p\n norms).\n\n Returns\n -------\n integrals: numpy array\n Numpy array of size (B, P) containing the integrals of the input_array\n to the powers p (l_p norms).\n\n \"\"\"\n integrals = np.zeros((input_array.shape[0], len(integral_powers)),dtype=np.complex64)\n for i_q, q in enumerate(integral_powers):\n integrals[:, i_q] = (input_array ** q).reshape((\n input_array.shape[0], -1)).sum(axis=1)\n return integrals\n\n\ndef fft(x, direction='C2C', inverse=False):\n \"\"\"FFT of a 3d signal.\n\n Example\n -------\n x = numpy.random.randn(128, 32, 32, 32, 2).view(numpy.complex64)\n x_fft = fft(x)\n x_ifft = fft(x, inverse=True)\n\n Parameters\n ----------\n input : numpy array\n Complex input for the FFT.\n inverse : bool\n True for computing the inverse FFT.\n\n Raises\n ------\n RuntimeError\n Raised in event we attempt to map from complex to real without\n inverse FFT.\n\n Returns\n -------\n output : numpy array\n Result of FFT or IFFT.\n\n \"\"\"\n if direction == 'C2R':\n if not inverse:\n raise RuntimeError('C2R mode can only be done with an inverse FFT.')\n\n if direction == 'C2R':\n output = np.real(np.fft.ifftn(x, axes=(-3,-2,-1)))\n elif direction == 'C2C':\n if inverse:\n output = np.fft.ifftn(x, axes=(-3,-2,-1))\n else:\n output = np.fft.fftn(x, axes=(-3,-2,-1))\n return output\n\n\ndef cdgmm3d(A, B, inplace=False):\n \"\"\"Complex pointwise multiplication.\n\n Complex pointwise multiplication between (batched) numpy array A and\n numpy array B.\n\n Parameters\n ----------\n A : numpy array\n A is a complex numpy array of size (B, C, M, N).\n B : numpy array\n B is a complex or real numpy array of size (M, N).\n inplace : boolean, optional\n If set to True, all the operations are performed inplace.\n\n Raises\n ------\n RuntimeError\n Raised in event B is not three dimensional.\n \n Returns\n -------\n C : numpy array\n Output numpy array of size (B, C, M, N) such that:\n C[b, c, m, n, :] = A[b, c, m, n, :] * B[m, n, :].\n\n \"\"\"\n if B.ndim != 3:\n raise RuntimeError('The dimension of the second input must be 3.')\n\n if inplace:\n return np.multiply(A, B, out=A)\n else:\n return A * B\n\n\ndef finalize(s_order_1, s_order_2, max_order):\n \"\"\"Concatenate scattering of different orders.\n \n Parameters\n ----------\n s0 : numpy array\n numpy array which contains the zeroth order scattering coefficents.\n s1 : numpy array\n numpy array which contains the first order scattering coefficents.\n s2 : numpy array\n numpy array which contains the second order scattering coefficents.\n \n Returns\n -------\n s : numpy array\n Final output. Scattering transform.\n \n \"\"\"\n s_order_1 = np.concatenate([np.expand_dims(arr, 2) for arr in s_order_1], axis=2)\n if max_order == 2:\n s_order_2 = np.concatenate([np.expand_dims(arr, 2) for arr in s_order_2], axis=2)\n return np.concatenate([s_order_1, s_order_2], axis=1)\n else:\n return s_order_1\n\n\ndef aggregate(x):\n \"\"\"Aggregation of scattering coefficents.\n\n Parameters\n ----------\n x : list \n List of numpy arrays. \n\n Returns\n -------\n out : numpy array\n Stacked scattering coefficents.\n\n \"\"\"\n return np.concatenate([np.expand_dims(arr, 1) for arr in x], axis=1)\n\nbackend = namedtuple('backend', ['name', 'cdgmm3d', 'fft', 'finalize', 'modulus', 'modulus_rotation', 'subsample',\n 'compute_integrals', 'aggregate'])\n\nbackend.name = 'numpy'\nbackend.cdgmm3d = cdgmm3d\nbackend.fft = fft\nbackend.aggregate = aggregate\nbackend.finalize = finalize\nbackend.modulus = complex_modulus\nbackend.modulus_rotation = modulus_rotation\nbackend.subsample = subsample\nbackend.compute_integrals = compute_integrals\nbackend._compute_standard_scattering_coefs = _compute_standard_scattering_coefs\nbackend._compute_local_scattering_coefs = _compute_local_scattering_coefs\n\n", "__all__ = ['Scattering2DTorch']\n\nimport torch\nimport torch.nn as nn\n\nfrom kymatio.scattering2d.core.scattering2d import scattering2d\nfrom ..filter_bank import filter_bank\nfrom ..utils import compute_padding\nfrom ...frontend.torch_frontend import ScatteringTorch\n\n\nclass Scattering2DTorch(ScatteringTorch):\n \"\"\" Main module implementing the scattering transform in 2D.\n The scattering transform computes two wavelet transform followed\n by modulus non-linearity.\n It can be summarized as::\n\n S_J x = [S_J^0 x, S_J^1 x, S_J^2 x]\n\n for::\n\n S_J^0 x = x * phi_J\n S_J^1 x = [|x * psi^1_lambda| * phi_J]_lambda\n S_J^2 x = [||x * psi^1_lambda| * psi^2_mu| * phi_J]_{lambda, mu}\n\n where * denotes the convolution (in space), phi_J is a lowpass\n filter, psi^1_lambda is a family of bandpass\n filters and psi^2_mu is another family of bandpass filters.\n Only Morlet filters are used in this implementation.\n Convolutions are efficiently performed in the Fourier domain.\n\n Example\n -------\n # 1) Define a Scattering2D object as:\n s = Scattering2D_torch(J, shape=(M, N))\n # where (M, N) is the image size and 2**J the scale of the scattering\n # 2) Forward on an input Tensor x of shape B x M x N,\n # where B is the batch size.\n result_s = s(x)\n\n Parameters\n ----------\n J : int\n Log-2 of the scattering scale.\n shape : tuple of ints\n Spatial support (M, N) of the input.\n L : int, optional\n Number of angles used for the wavelet transform. Defaults to `8`.\n max_order : int, optional\n The maximum order of scattering coefficients to compute. Must be either\n `1` or `2`. Defaults to `2`.\n pre_pad : boolean, optional\n Controls the padding: if set to False, a symmetric padding is applied\n on the signal. If set to True, the software will assume the signal was\n padded externally. Defaults to `False`.\n backend : object, optional\n Controls the backend which is combined with the frontend.\n\n Attributes\n ----------\n J : int\n Log-2 of the scattering scale.\n shape : tuple of int\n Spatial support (M, N) of the input.\n L : int, optional\n Number of angles used for the wavelet transform.\n max_order : int, optional\n The maximum order of scattering coefficients to compute.\n Must be either equal to `1` or `2`.\n pre_pad : boolean\n Controls the padding: if set to False, a symmetric padding is applied\n on the signal. If set to True, the software will assume the signal was\n padded externally.\n Psi : dictionary\n Contains the wavelets filters at all resolutions. See\n filter_bank.filter_bank for an exact description.\n Phi : dictionary\n Contains the low-pass filters at all resolutions. See\n filter_bank.filter_bank for an exact description.\n M_padded, N_padded : int\n Spatial support of the padded input.\n\n Notes\n -----\n The design of the filters is optimized for the value L = 8.\n\n pre_pad is particularly useful when cropping bigger images because\n this does not introduce border effects inherent to padding.\n\n \"\"\"\n def __init__(self, J, shape, L=8, max_order=2, pre_pad=False, backend=None):\n super(Scattering2DTorch, self).__init__()\n self.pre_pad, self.L, self.backend, self.J, self.shape, self.max_order = pre_pad, L, backend, J, shape,\\\n max_order\n self.build()\n\n def build(self):\n self.M, self.N = self.shape\n # use the default backend if no backend is provided\n if not self.backend:\n from ..backend.torch_backend import backend\n self.backend = backend\n elif not self.backend.name.startswith('torch'):\n raise RuntimeError('This backend is not supported.')\n\n if 2 ** self.J > self.shape[0] or 2 ** self.J > self.shape[1]:\n raise RuntimeError('The smallest dimension should be larger than 2^J.')\n self.M_padded, self.N_padded = compute_padding(self.M, self.N, self.J)\n # pads equally on a given side if the amount of padding to add is an even number of pixels, otherwise it adds an extra pixel\n self.pad = self.backend.Pad([(self.M_padded - self.M) // 2, (self.M_padded - self.M+1) // 2, (self.N_padded - self.N) // 2,\n (self.N_padded - self.N + 1) // 2], [self.M, self.N], pre_pad=self.pre_pad)\n self.unpad = self.backend.unpad\n self.create_and_register_filters()\n\n def create_and_register_filters(self):\n \"\"\" This function run the filterbank function that\n will create the filters as numpy array, and then, it\n saves those arrays as module's buffers.\"\"\"\n\n # Create the filters\n filters = filter_bank(self.M_padded, self.N_padded, self.J, self.L)\n n = 0\n self.phi, self.psi = filters['phi'], filters['psi']\n for c, phi in self.phi.items():\n if isinstance(c, int):\n self.phi[c] = torch.from_numpy(self.phi[c]).unsqueeze(-1) # add a trailing singleton dimension to mark\n # it as non-complex\n self.register_buffer('tensor' + str(n), self.phi[c])\n n += 1\n\n for j in range(len(self.psi)):\n for k, v in self.psi[j].items():\n if isinstance(k, int):\n self.psi[j][k] = torch.from_numpy(v).unsqueeze(-1) # add a trailing singleton dimension to mark it\n # as non-complex\n self.register_buffer('tensor' + str(n), self.psi[j][k])\n n += 1\n\n def scattering(self, input):\n # each time scattering is run, one needs to make sure self.psi and self.phi point to\n # the correct buffers\n n = 0\n buffer_dict = dict(self.named_buffers())\n for c, phi in self.phi.items():\n if isinstance(c, int):\n self.phi[c] = buffer_dict['tensor' + str(n)]\n n += 1\n\n for j in range(len(self.psi)):\n for k, v in self.psi[j].items():\n if isinstance(k, int):\n self.psi[j][k] = buffer_dict['tensor' + str(n)]\n n += 1\n\n return scattering2d(input, self.pad, self.unpad, self.backend, self.J, self.L, self.phi, self.psi, self.max_order)\n\n def forward(self, input):\n \"\"\"Forward pass of the scattering.\n\n Parameters\n ----------\n input : tensor\n Tensor with k+2 dimensions :math:`(n_1, ..., n_k, M, N)` where :math:`(n_1, ...,n_k)` is\n arbitrary. Currently, k=2 is hardcoded. :math:`n_1` typically is the batch size, whereas\n :math:`n_2` is the number of\n input channels.\n\n Returns\n -------\n S : tensor\n Scattering of the input, a tensor with k+3 dimensions :math:`(n_1, ...,n_k, D, Md, Nd)`\n where :math:`D` corresponds to a new channel dimension and :math:`(Md, Nd)` are\n downsampled sizes by a factor :math:`2^J`. Currently, k=2 is hardcoded.\n\n \"\"\"\n if not torch.is_tensor(input):\n raise TypeError('The input should be a PyTorch Tensor.')\n\n if len(input.shape) < 2:\n raise RuntimeError('Input tensor must have at least two dimensions.')\n\n if not input.is_contiguous():\n raise RuntimeError('Tensor must be contiguous!')\n\n if (input.size(-1) != self.N or input.size(-2) != self.M) and not self.pre_pad:\n raise RuntimeError('Tensor must be of spatial size (%i,%i)!' % (self.M, self.N))\n\n if (input.size(-1) != self.N_padded or input.size(-2) != self.M_padded) and self.pre_pad:\n raise RuntimeError('Padded tensor must be of spatial size (%i,%i)!' % (self.M_padded, self.N_padded))\n\n return self.scattering(input)\n\n def loginfo(self):\n return 'Torch frontend is used.'\n\n" ]
[ [ "torch.nn.ReflectionPad2d", "torch.cat", "torch.zeros_like", "torch.unsqueeze", "torch.ifft", "torch.fft", "torch.irfft" ], [ "numpy.expand_dims", "numpy.sqrt", "numpy.abs", "numpy.multiply", "numpy.ascontiguousarray", "numpy.fft.fftn", "numpy.concatenate", "numpy.fft.ifftn", "numpy.zeros_like", "numpy.zeros" ], [ "torch.from_numpy", "torch.is_tensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
andreamusso96/RSE-Distance
[ "4f1ec9d25f9de71d258de15c55491c30a3e76d05" ]
[ "RandomWalkSimulatorCUDA.py" ]
[ "import cupy as cp # noqa\nimport cupyx.scipy.sparse as sparse # noqa\nimport numpy as np\nfrom graph_tool.spectral import adjacency\nfrom tqdm import tqdm\nimport torch\nfrom torch.utils.dlpack import to_dlpack\n\n\nclass RandomWalkSimulator:\n \"\"\"\n\n The class RandomWalkSimulator is designed to run a fast simulations of a random walk on a graph\n and compute the meeting times of two walks\n\n \"\"\"\n\n def __init__(self, g):\n \"\"\"\n\n Initialises a RandomWalkSimulator\n\n Args:\n g (graph_tool.Graph): the graph on which you want to simulate the random walk\n\n \"\"\"\n\n # Device name\n self.n_nodes = g.num_vertices()\n\n # Random walk matrix\n self.P = self.random_walk_matrix(g=g)\n\n ###################################################################### PUBLIC METHODS ###############################################################################\n\n def get_meeting_times_delta_g(self, max_time_steps, n_samples):\n \"\"\"\n\n Gets the meeting times necessary to compute delta_g, i.e., it compute n_samples of the meeting time of two randomly started walks.\n\n Args:\n max_time_steps (int): the number of time steps for which you want to simulate the random walks at most\n n_samples (int): the number of samples of the meeting time that you want\n\n Returns:\n (1D np.ndarray): a 1D np.ndarray in which each entry is one sample of the meeting time of two randomly started walks. If the walks met after max_time_steps the entry is equal to -1 by default.\n\n \"\"\"\n\n start_position = cp.random.randint(low=0, high=self.n_nodes, size=[n_samples])\n meeting_times = self.get_meeting_times(max_time_steps=max_time_steps, start_position=start_position)\n\n meeting_times_flat = np.ndarray.flatten(meeting_times)\n meeting_times_flat_without_diagonal = np.delete(meeting_times_flat, range(0, len(meeting_times_flat), len(meeting_times_flat) + 1), 0)\n \n return meeting_times_flat_without_diagonal\n\n\n def get_meeting_times_rse_dist(self, max_time_steps, vertices, n_samples_per_vertex):\n \"\"\"\n\n Gets the meeting times necessary to compute the RSE distance between the vertices passed as parameters. \n If a list of vertices [v1, v2, v3, v4] is passed, the method returns n samples of the meeting time of walk started at vi with the walk started at vj,\n for each pair (vi,vj) in [v1, v2, v3, v4].\n\n Args:\n max_time_steps (int): the number of time steps for which you want to simulate the random walks at most.\n vertices (list[int]): the vertices for which we want to compute the meeting time\n n_samples_per_vertex (int): the number of samples of the meeting time per pair of vertex \n\n Returns:\n (dict[tuple(int,int): 1D np.ndarray]): A dictionary where the key is a tuple (i,j) of vertices and the value is an array containing samples of the meeting\n time of the walks started at those two vertices. \n \n \"\"\"\n\n start_position = self.start_pos_with_focal_vertices(focal_vertices=vertices, n_samples_per_focal_vertex=n_samples_per_vertex)\n meeting_times = self.get_meeting_times(max_time_steps=max_time_steps, start_position=start_position)\n\n meeting_times_vw = {}\n for i, v in enumerate(vertices):\n mts_v = meeting_times[i*n_samples_per_vertex: (i + 1)*n_samples_per_vertex, :]\n for j, w in enumerate(vertices):\n if v != w:\n mts_vw = mts_v[: , j*n_samples_per_vertex:(j+1)*n_samples_per_vertex]\n meeting_times_vw[(v,w)] = np.ndarray.flatten(mts_vw)\n\n return meeting_times_vw\n\n ###################################################################### PRIVATE METHODS ###############################################################################\n\n @staticmethod\n def random_walk_matrix(g):\n \"\"\"\n\n Returns the transition matrix of the random walk\n\n Args:\n g (graph_tool.Graph): the graph for which we want to build the transition matrix\n\n Returns:\n (torch.sparse_coo_tensor): the transition matrix of the random walk. This is defined as P(i,j) = 1/deg(i)\n\n \"\"\"\n\n # Build the transition matrix\n A = sparse.csr_matrix(adjacency(g))\n\n degrees = cp.array(1 / g.get_out_degrees(g.get_vertices()))\n ind = cp.arange(degrees.shape[0])\n D = sparse.csr_matrix((degrees, (ind, ind)), shape=(ind.shape[0], ind.shape[0]))\n\n P = D * A\n\n return P\n \n @staticmethod\n def start_pos_with_focal_vertices(focal_vertices, n_samples_per_focal_vertex):\n \"\"\"\n\n Creates a starting configuration where we have n_samples_per_focal_vertex random walks starting at each focal vertex \n\n Args:\n focal_vertices (list): a list of integers, which are the vertices at which we want our walks to start out\n n_samples_per_focal_vertex (int): the number of walks we want to start per focal vertex\n\n Returns:\n (1D cp.ndarray): An array of the type [1,1,1,2,2,2,3,3,3] with the starting position of the walks. Every focal vertex is repeated n_samples_per_focal_vertex times.\n \n \"\"\"\n\n start_pos = cp.zeros(len(focal_vertices)*n_samples_per_focal_vertex, dtype=cp.int64)\n \n counter = 0\n for v in focal_vertices:\n for _ in range(n_samples_per_focal_vertex):\n start_pos[counter] = v\n counter += 1\n \n return start_pos\n \n\n def get_meeting_times(self, max_time_steps, start_position):\n \"\"\"\n\n This method simulates random walks started at start_position and keeps track of their meeting times. \n Then it returns these meeting times, once all the walks have met or max_time_steps has expired.\n\n Args:\n max_time_steps (int): the number of time steps for which you want to simulate the random walks at most\n start_position (1D cp.ndarray, optional): a 1D cp.ndarray in which each entry is the starting positions of one sample of the random walk. \n\n Returns:\n (2D cp.ndarray): a 2D cp.ndarray with entry m,n being the meeting time between sample walk m and sample walk n. If two walks never meet the value of the m,n entry is -1\n\n \"\"\"\n\n n_samples = len(start_position)\n\n meeting_times = -1 * cp.ones([n_samples, n_samples])\n\n # Fix the starting position and check if walks meet at the starting position\n start_pos, array_start = self.start_position(n_samples=n_samples, start_position=start_position)\n meeting_bool = self.check_meetings(current_pos=start_pos, meeting_times=meeting_times)\n meeting_times = meeting_times + meeting_bool\n\n # Run the walks and at each time step check if some walks have met. Also, end the loop if all walks have met.\n # Here a while condition would look better but we could not use tqdm to time it\n array_current = array_start\n for t in tqdm(range(max_time_steps)):\n # Find next position of the walks\n next_pos, array_next = self.next_step(array_current=array_current)\n array_current = array_next\n\n # Check meetings at this round\n meeting_bool = self.check_meetings(current_pos=next_pos, meeting_times=meeting_times)\n meeting_times = meeting_times + (t + 1) * meeting_bool\n\n # Verify if all walks have met\n complete = self.check_complete(meeting_times=meeting_times)\n if complete:\n return cp.asnumpy(meeting_times)\n\n return cp.asnumpy(meeting_times)\n\n @staticmethod\n def check_meetings(current_pos, meeting_times):\n \"\"\"\n\n\n This method checks if the walks meet for the first time at the current step.\n\n Args:\n current_pos (1D cp.ndarray): the current position of the walks (i.e. the names of the vertices at which the walks are)\n meeting_times (2D cp.ndarray): the matrix of meeting times. Entry i, j is the meeting time of walk i with walk j.\n The default value of -1 is set when the walks have not met yet.\n\n Returns:\n (2D cp.ndarray of boolean values): A matrix with entry (i,j) being true if walk i has met walk j for the first time at current_position\n and false otherwise (i.e. either the walks are not at the same position, or they have met before).\n\n \"\"\"\n\n # Check if the walks are at the same position\n auxiliary_1 = current_pos.reshape(-1, 1) * cp.ones((current_pos.shape[0], current_pos.shape[0]))\n same_pos_bool = (current_pos == auxiliary_1)\n\n # Check if the walks have met before\n auxiliary_2 = -1 * cp.ones((meeting_times.shape[0], meeting_times.shape[1]))\n not_already_met_bool = (meeting_times == auxiliary_2)\n\n # If walk i,j are at the same place and have never met before set entry i,j = True, else set entry i,j = False.\n first_meeting_bool = same_pos_bool * not_already_met_bool\n\n return first_meeting_bool\n\n @staticmethod\n def check_complete(meeting_times):\n \"\"\"\n\n This method check if all the walks have met\n\n Args:\n meeting_times (2D cp.ndarray): the matrix of meeting times. Entry i, j is the meeting time of walk i with walk j.\n The default value of -1 is set when the walks have not met yet.\n\n Returns:\n (bool): True if all the walks have met and False otherwise\n\n \"\"\"\n\n # Compares an auxiliary matrix of all -1 with the meeting time matrix \n auxiliary = -1 * cp.ones((meeting_times.shape[0], meeting_times.shape[1]))\n not_already_met_bool = (meeting_times == auxiliary)\n\n # Counts how many non zero entries are in the matrix not_already_met_bool\n nb_not_met = int(cp.count_nonzero(not_already_met_bool))\n\n if nb_not_met == 0:\n return True\n else:\n return False\n\n def next_step(self, array_current):\n \"\"\"\n\n Given the current step of all the samples of the random walk, computes the next step for all the samples of the random walk\n\n Args:\n array_current (2D cp.ndarray): A 2D cp.ndarray with rows indicating the current position of the random walk. \\\n Each row is of the form [0,0, ..., 0, 1, 0, ..., 0]. The entry 1 indicates the current position of the random walk.\n\n Returns:\n (cp.ndarray, cupyx.scipy.sparse.csr_matrix): A tuple consisting of: \\\n * A 1D cp.ndarray with for each sample of the random walk the index of the vertex where the rw will jump next \\\n * A 2D cupyx.scipy.sparse.csr_matrix with rows indicates the next position of the random walk of a sample. \\\n Each row is of the form [0,0, ..., 0, 1, 0, ..., 0]. The entry 1 indicates the next position of the random walk.\n\n \"\"\"\n\n # Compute the probabilities of moving to given neighbors by multiplying sparse matrices current_step and P\n proba_next_pos = array_current * self.P\n\n # Sample index of next step of the random walk.\n next_pos = self.compute_next_position_from_proba(proba_next_pos)\n\n # Create indices for sparse matrix\n ind_ptr = cp.arange(array_current.shape[0] + 1, dtype=cp.int64)\n\n # Create values for sparse matrix\n values = cp.ones(array_current.shape[0], dtype=cp.float64)\n\n # Construct a 2D sparse tensor with rows of the form [0,0, ..., 0, 1, 0, ..., 0], where the entry 1 indicates the position of the rw\n array_next = sparse.csr_matrix((values, next_pos, ind_ptr), shape=(array_current.shape[0], array_current.shape[1]))\n\n # One could initialise the sparse matrix as follows (more intuitive)\n # array_next = sparse.csr_matrix((values, (ind_row, next_pos)), shape=(array_current.shape[0],array_current.shape[1]))\n # However, this slows down the code remarkably\n\n return next_pos, array_next\n\n @staticmethod\n def compute_next_position_from_proba(proba_next_pos):\n \"\"\"\n\n For each row i of proba_next_pos, corresponding to one random walk, it samples the column index j with the probability specified in the entry proba_next_pos[i][j].\n The index j corresponds to the next position of the walk\n\n Args:\n proba_next_pos (2D cupyx.scipy.sparse.csr_matrix): a matrix with entry i,j being the probability of moving from i to j for the random walk. \\\n The matrix has axis 0 of length n_samples (i.e. number of walks) and axis 1 of length n_nodes (i.e. the probability \\\n for the walk to move at each one of those nodes)\n\n Returns:\n (1D cp.ndarray): the next position of the walks, randomly sampled from proba_next_pos\n\n \"\"\"\n\n # This method is actually 3xfaster if only we could write the cuda code for it!! \n # Indeed, instead of transforming proba_next_pos into an array with a bunch of zeros, we could only\n # consider the entries in which we are interested. \n\n # Changes a cupy array into a torch tensor\n proba_next_pos_torch = torch.as_tensor(proba_next_pos.toarray(), device='cuda')\n\n # Samples stuff using torch\n next_pos_torch = torch.multinomial(proba_next_pos_torch, 1).flatten()\n\n # Converts back to cupy format\n next_pos = cp.fromDlpack(to_dlpack(next_pos_torch))\n\n return next_pos\n\n def start_position(self, n_samples, start_position):\n \"\"\"\n\n Randomly chooses starting vertices for each sample of the random walks\n\n Args:\n n_samples (int): the number of random walks you want to sample\n sp (1D cp.ndarray): starting positions for each sample of the walk (if None random starting positions are selected)\n\n Returns:\n [(cp.ndarray, cupyx.scipy.sparse.csr_matrix)]: A tuple consisting of: \\\n * A 1D cp.ndarray with for each sample of the random walks the index of the starting vertex of the random walks \\\n * A 2D cupyx.scipy.sparse.csr_matrix with rows indicates the starting position of the random walk of a sample. \\\n Each row is of the form [0,0, ..., 0, 1, 0, ..., 0]. The entry 1 indicates the position of the random walk.\n\n \"\"\"\n \n # Set the start position\n start_pos = start_position\n\n # Create indices for sparse matrix\n ind_ptr = cp.arange(n_samples + 1, dtype=cp.int64)\n\n # Create values for sparse matrix\n values = cp.ones(n_samples, dtype=cp.float64)\n\n # Construct a 2D sparse tensor with rows of the form [0,0, ..., 0, 1, 0, ..., 0], where the entry 1 indicates the position of the rw\n array_start = sparse.csr_matrix((values, start_pos, ind_ptr), shape=(n_samples, self.n_nodes))\n\n # One could initialise the sparse matrix as follows (more intuitive)\n # array_next = sparse.csr_matrix((values, (ind_row, next_pos)), shape=(array_current.shape[0],array_current.shape[1]))\n # However, this slows down the code remarkably\n\n return start_pos, array_start" ]
[ [ "numpy.ndarray.flatten", "torch.utils.dlpack.to_dlpack", "torch.multinomial" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
EfficientAI/efficient_cv
[ "e308f229e4d99da86ad56f87f3a78b2c81f27ca5" ]
[ "model_prototype/utils/data_transformation_utils.py" ]
[ "import numpy as np\nfrom scipy.signal import resample\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.preprocessing import RobustScaler\n\n\ndef upsample_signal(data, sampling_factor, sampler=None):\n \"\"\"\n data is a time series sequence(nd_array) numpy data\n upsampling uses fourier interpolation.\n \"\"\"\n return resample(data, sampling_factor*data.shape[0])\n\n\ndef downsample_signal(data, sampling_factor, sampler=None):\n \"\"\"\n data is time series sequenced (nd_array) numpy data\n downsample just takes the average sampling_factor points.\n nr_data points should be divisible by sampling_factor\n \"\"\"\n reshaped = data.reshape(data.shape[0]//sampling_factor, sampling_factor,\n -1)\n return reshaped.mean(axis=1)\n\n\ndef generic_sampler(data, sampling_rate, sampler):\n \"\"\"\n apply sampler on numpy data with sampling rate\n \"\"\"\n data = data.reshape((int(data.shape[0]/sampling_rate)), sampling_rate)\n data = sampler(data, axis=1)\n return data\n\n\ndef standardizer_z_score(data, verbose=False):\n \"\"\"\n data is a time seriese sequence (nd_array) numpy data\n normalize the data across time series by calculating mean and variance\n print the mean and variance if verbose is true.\n Here we standardize the data with z-score normalization.\n This is supposedly work only if data has gaussian distribution.\n\n Other normalization procedure to explore:\n * Median normalization\n * Sigmoid normalization\n * Tanh normalization\n \"\"\"\n scaler = StandardScaler()\n scaler.fit(data)\n scaled_data = scaler.transform(data)\n if verbose:\n print(\"mean: \", scaler.mean_, \" var: \", scaler.var_)\n return scaled_data\n\n\ndef normalizer_min_max(data, verbose=False):\n \"\"\"\n Normalize the data in range 0 to 1. Supresses the scaler variations,\n but do not assume any gaussian distribution (this assumption is with\n standardization)\n \"\"\"\n scaler = MinMaxScaler()\n scaler.fit(data)\n scaled_data = scaler.transform(data)\n if verbose:\n print(\"min: \", scaler.data_min_, \" max: \", scaler.data_max_)\n return scaled_data\n\n\ndef normalizer_median_quantiles(data, verbose=False):\n \"\"\"\n Normalize the data in range 75th and 25th percentile\n This normalization is robust to outliers\n \"\"\"\n scaler = RobustScaler()\n scaler.fit(data)\n scaled_data = scaler.transform(data)\n if verbose:\n print(\"center: \", scaler.center_, \" scale: \", scaler.scale_)\n return scaled_data\n\n# #################################### Test ###################################\n\n\ndef test_upsample_signal():\n a = np.array([[1, 6], [2, 7], [3, 8], [4, 9]])\n b = downsample_signal(a, 2)\n print(b)\n\n\ndef test_downsample_signal():\n a = np.array([[1, 6], [2, 7], [3, 8], [4, 9]])\n b = downsample_signal(a, 2)\n print(b)\n\n\ndef test_standardizer_z_score():\n a = np.random.multivariate_normal(mean=[12, 3], cov=[[2, 0], [0, 5]],\n size=100)\n b = standardizer_z_score(a, True)\n print(b)\n\n\ndef test_normalizer_min_max():\n a = np.random.multivariate_normal(mean=[12, 3], cov=[[2, 0], [0, 5]],\n size=100)\n b = normalizer_min_max(a, True)\n print(b)\n\n\ndef test_normalizer_median_quantiles():\n a = np.random.multivariate_normal(mean=[12, 3], cov=[[2, 0], [0, 5]],\n size=100)\n b = normalizer_median_quantiles(a, True)\n print(b)\n\n\nif __name__ == \"__main__\":\n pass\n" ]
[ [ "sklearn.preprocessing.RobustScaler", "numpy.random.multivariate_normal", "scipy.signal.resample", "sklearn.preprocessing.StandardScaler", "numpy.array", "sklearn.preprocessing.MinMaxScaler" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
bossauh/vocale
[ "5d6c515a8ab82076e37039846fc3d2e676c3a0e4" ]
[ "vocale/recognizer.py" ]
[ "import asyncio\nimport json\nimport os\nimport time\nimport warnings\nimport wave\nfrom queue import Queue\nfrom typing import Callable\n\nimport numpy as np\nimport pvporcupine\nimport sounddevice as sd\nimport vosk\nfrom fluxhelper import osInterface\nfrom speech_recognition import AudioFile, Recognizer, UnknownValueError\nfrom tensorflow.keras.models import load_model\nfrom vosk import SetLogLevel\n\nRATE = 16000\nDURATION = 0.5\nCHANNELS = 1\nCHUNK = 512\nMAX_FREQ = 18\n\n\n# Disable logging\nwarnings.filterwarnings(\"ignore\")\nos.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"3\"\nSetLogLevel(-1)\n\n\nclass VAD:\n\n \"\"\"\n Main Voice activity detection class.\n This uses deep learning to predict whether a piece of audio is considered a speech or not a speech.\n\n Parameters\n ----------\n `modelPath` : str\n path to the model.h5 file.\n `sensitivity : float\n how sensitive the detection is.\n\n Methods\n -------\n `isSpeech(stream: bytes)` :\n returns True if the classified stream is a voice and False if not.\n\n \"\"\"\n\n def __init__(self, modelPath: str, sensitivity: float = 0.90):\n self.model = load_model(modelPath)\n\n self.buffer = []\n self.sensitivity = sensitivity\n\n async def _formatPredictions(self, predictions) -> list:\n\n \"\"\"\n Format the predictions into a more readable and easy to traverse format.\n \"\"\"\n\n predictions = [[i, float(r)] for i, r in enumerate(predictions)]\n predictions.sort(key=lambda x: x[1], reverse=True)\n return predictions\n\n async def isSpeech(self, stream: bytes) -> bool:\n\n \"\"\"\n Makes a prediction from the given stream bytes.\n\n Parameters\n ----------\n `stream` : bytes\n raw bytes stream (usually retrieved from pyaudio's .read function or sounddevice)\n\n Returns True if the classified stream is a voice and False if not.\n \"\"\"\n\n # Convert the raw streams into a numpy array and get the decibels\n arr = np.frombuffer(stream, dtype=np.int16)\n db = 20 * np.log10(np.abs(np.fft.rfft(arr[:2048])))\n\n # Collect decibel values from relevent frequencies (MAX_FREQ)\n features = list(np.round(db[3:MAX_FREQ], 2))\n self.buffer.append(features)\n\n if len(self.buffer) == int(RATE / CHUNK * DURATION):\n total = np.array([x for y in self.buffer for x in y])\n self.buffer.clear()\n\n # Make the prediction\n predictions = self.model(np.array([total]))[0]\n predictions = await self._formatPredictions(predictions)\n\n index, probability = predictions[0]\n if index == 1 and probability >= self.sensitivity:\n # 1 is the index of speech and 0 is non speech\n return True\n return False\n\n\nclass SpeechRecognizer:\n def __init__(\n self,\n wakewords: list,\n wakewordSensitivities: list,\n vadPath: str,\n vadThreshold: float,\n voskPath: str,\n savePath: str,\n callback: Callable,\n loop: asyncio.BaseEventLoop,\n offline: bool = False,\n device: int = None,\n **kwargs,\n ) -> None:\n\n # Class parameters\n self.wakewords = wakewords\n self.offline = offline\n self.savePath = savePath\n self.voskPath = voskPath\n self.device = device\n self.loop = loop\n self.sensitivities = wakewordSensitivities\n self._callback = callback\n\n # Class kwarg parameters\n self.speechLengths = kwargs.get(\"speechLengths\", (6.0, 0.9))\n self.speechLengthMultiplier = kwargs.get(\"speechLengthMultiplier\", 0.15)\n self.beforeWokeBufferLimit = kwargs.get(\"beforeWokeBufferLimit\", 200)\n self.googleRecognizerKey = kwargs.get(\"googleRecognizerKey\", None)\n self.disableVosk = kwargs.get(\"disableVosk\", False)\n\n # Empty string convert to None\n if self.googleRecognizerKey == \"\":\n self.googleRecognizerKey = None\n\n # Initialize vosk recognizer\n\n if not self.disableVosk:\n self.voskModel = vosk.Model(self.voskPath)\n self.vosk = None\n self.restartVosk()\n\n # Initialize speechrecognition module\n self.srRecognizer = Recognizer()\n\n # Initialize other libraries\n w = [x for x in self.wakewords if x in pvporcupine.KEYWORDS]\n self.porcupine = None\n\n if w:\n self.porcupine = pvporcupine.create(\n keywords=w, sensitivities=self.sensitivities\n )\n\n self.vad = VAD(vadPath, vadThreshold)\n\n self.done = False\n self.listen = True\n self.woke = False\n\n self._speechLength = self.speechLengths[0]\n self._frames = {\"beforeWoke\": [], \"afterWoke\": []}\n self._followup = False\n self._q = Queue()\n self._ready = False\n self._speech = True\n self._startSpeechLength = self.speechLengths[0]\n self._realSpeechLength = self.speechLengths[1]\n self._lastRecognizedTime = time.time()\n\n self.__count = 0\n self.__prevSpeaking = None\n self.__length = 0\n\n # User callback parameters\n self.callbackParams = {}\n\n def __callback(self, data, frames, time_, status) -> None:\n self._q.put(bytes(data))\n\n def _reset(self) -> None:\n self._frames = {\"beforeWoke\": [], \"afterWoke\": []}\n \n if not self.disableVosk:\n self.vosk.FinalResult()\n \n self.woke = False\n self._speech = True\n self._lastRecognizedTime = time.time()\n self.__count = 0\n self.__prevSpeaking = None\n self.__length = 0\n self._speechLength = self.speechLengths[0]\n\n def multiplySpeechLength(self, multiplier: float) -> float:\n\n \"\"\"\n Dynamically update the speech length by multiplying it by a certain value.\n \"\"\"\n\n self._realSpeechLength = self.speechLengths[1] * multiplier\n return self._realSpeechLength\n\n def recognizeDone(self) -> None:\n\n \"\"\"\n Tells the recognizer that we are done recognizing.\n \"\"\"\n\n self._speech = False\n\n def restartVosk(self) -> None:\n \"\"\"\n Restart just the Vosk recognizer.\n \"\"\"\n\n if not self.disableVosk:\n self.vosk = vosk.KaldiRecognizer(self.voskModel, RATE)\n\n async def recognize(self) -> dict:\n\n if not self._speech:\n\n if self.offline:\n if not self.disableVosk:\n text = json.loads(self.vosk.FinalResult())[\"text\"]\n return {\"status\": \"recognized\", \"msg\": text}\n return {\"status\": \"error\", \"msg\": f\"both disableVosk and offline is True. Can't recognize with nothing to recognize with.\", \"exception\": None}\n\n frames = self._frames[\"beforeWoke\"][-10:] + self._frames[\"afterWoke\"]\n\n # First save the data gathered into a .wav file\n wf = wave.open(self.savePath, \"wb\")\n wf.setnchannels(CHANNELS)\n wf.setsampwidth(2)\n wf.setframerate(RATE)\n wf.writeframes(b\"\".join(frames))\n wf.close()\n\n # Convert it into a AudioData object\n try:\n with AudioFile(self.savePath) as src:\n audio = self.srRecognizer.record(src)\n except Exception as e:\n return {\n \"status\": \"error\",\n \"msg\": f\"Failed to convert cache file to AudioData. ({e})\",\n \"exception\": e\n }\n\n # Finally attempt to recognize using google's recognizer from speechrecognition module\n try:\n content = self.srRecognizer.recognize_google(\n audio, key=self.googleRecognizerKey\n )\n callback = {\"status\": \"recognized\", \"msg\": content}\n except UnknownValueError:\n callback = {\"status\": \"unknown\", \"msg\": \"Unknown value.\"}\n except Exception as e:\n callback = {\n \"status\": \"error\",\n \"msg\": f\"Failed to recognize audio. ({e})\",\n \"exception\": e\n }\n finally:\n return callback\n\n return {\"status\": \"listening\", \"msg\": \"Appending frames.\"}\n\n async def callback(self, *args, **kwargs) -> None:\n await self._callback(*args, **kwargs, **self.callbackParams)\n\n async def wakeUp(\n self, followup: bool = False, emitCallback: bool = True, **kwargs\n ) -> None:\n\n \"\"\"\n Wake up the speech recognizer,\n\n Parameters\n ----------\n `followup` : bool\n\n \"\"\"\n\n self.woke = True\n self._followup = followup\n self.__prevSpeaking = time.time()\n\n self.callbackParams = {\"followup\": followup, **kwargs}\n if emitCallback:\n await self.callback({\"status\": \"woke\", \"msg\": \"woke\"})\n\n async def start(self, blocking: bool = False) -> None:\n\n \"\"\"\n Start the speech recognizer.\n\n Parameters\n ----------\n `blocking` : bool\n if True, speech recognizer will block the program.\n \"\"\"\n\n if blocking:\n return await self._start()\n\n def f():\n asyncio.set_event_loop(self.loop)\n self.loop.run_until_complete(self._start())\n\n osInterface.thread(f)\n while not self._ready:\n await asyncio.sleep(0.05)\n\n async def wokeListen(self, data) -> bool:\n\n \"\"\"\n Starts listening for the provided wake words both using pvporcupine and vosk.\n Vosk will not be used if self.disableVosk is True\n \"\"\"\n\n if not self.disableVosk:\n # Get vosk information\n self.vosk.AcceptWaveform(data)\n partial = json.loads(self.vosk.PartialResult())\n else:\n partial = {\"partial\": \"\"}\n\n # Get pvporcupine wake word information\n p = -1\n if self.porcupine:\n p = self.porcupine.process(np.frombuffer(data, dtype=np.int16))\n\n # Check if a wake word is recognized using both vosk and porcupine if porcupine is successfully initialized\n if any(k in partial[\"partial\"] for k in self.wakewords) or p >= 0:\n\n if not self.disableVosk:\n self.vosk.FinalResult()\n\n return True\n \n # Constantly collect before wake word frames\n if len(self._frames[\"beforeWoke\"]) > self.beforeWokeBufferLimit:\n self._frames[\"beforeWoke\"].pop(0)\n self._frames[\"beforeWoke\"].append(data)\n \n\n if not self.disableVosk:\n # Prevent active listening from getting way too big, will cause a memory leak if not implemented\n if len(partial[\"partial\"].split()) > 25:\n self.vosk.FinalResult()\n self.restartVosk()\n\n vad = await self.vad.isSpeech(data)\n if vad:\n self.__prevSpeaking = time.time()\n\n if not self.__prevSpeaking:\n self.__prevSpeaking = time.time()\n length = time.time() - self.__prevSpeaking\n\n if length > 20.0:\n\n if not self.disableVosk:\n self.vosk.FinalResult()\n self.restartVosk()\n self.__prevSpeaking = time.time()\n\n # Emit what the vosk recognizer is currently hearing\n await self.callback(\n {\"status\": \"activeListeningPartial\", \"msg\": partial[\"partial\"]}\n )\n\n return False\n\n async def _start(self) -> None:\n with sd.RawInputStream(\n samplerate=RATE,\n blocksize=CHUNK,\n device=self.device,\n dtype=\"int16\",\n channels=CHANNELS,\n callback=self.__callback,\n ):\n self._ready = True\n while not self.done:\n data = self._q.get()\n\n if self.listen:\n\n # Wait for one of the wake words to be triggered\n if not self.woke:\n\n # There seems to be a bug wherein woke becomes True right after the speech is recognized, so we do a time check to prevent that. (pls fix) FIXME\n woke = await self.wokeListen(data)\n if (time.time() - self._lastRecognizedTime) < 1.8:\n woke = False\n\n # Now wake up the processor/recognizer\n if woke and not self.woke:\n await self.wakeUp()\n\n if self.woke:\n \n partial = None\n if not self.disableVosk:\n # Give vosk the speech data\n self.vosk.AcceptWaveform(data)\n\n # Realtime Partial data\n partial = list(json.loads(self.vosk.PartialResult()).items())[\n 0\n ][1].strip()\n if partial:\n await self.callback(\n {\"status\": \"recognizedPartial\", \"msg\": partial}\n )\n\n # Perform voice activity detection\n vad = await self.vad.isSpeech(data)\n if vad:\n self.__count += 1\n self.__prevSpeaking = time.time()\n\n await self.callback(\n {\"status\": \"voiceActivity\", \"msg\": \"voiceActivity\"}\n )\n\n # Perform previous voice activity checking.\n if self.__prevSpeaking:\n self.__length = time.time() - self.__prevSpeaking\n\n comparator = self.__count == 0 or not partial\n if self.disableVosk:\n comparator = self.__count == 0\n\n if comparator:\n self._speechLength = self._startSpeechLength\n else:\n self._speechLength = self._realSpeechLength\n\n # Current speech length has exceeded the provided speech length meaning we're done listening.\n if self.__length > self._speechLength:\n self.recognizeDone()\n\n self._frames[\"afterWoke\"].append(data)\n recognized = await self.recognize()\n await self.callback(recognized)\n\n # Finally reset all the variables back to their default so that it can be ready for the next time the listener gets woke.\n if not self._speech:\n self._reset()\n\n\nasync def callback(data, *args, **kwargs) -> None:\n status = data.get(\"status\", \"listening\")\n if status == \"recognizedPartial\":\n print(f\"> {data['msg']} {recognizer._realSpeechLength}\", end=\"\\r\")\n\n if data[\"msg\"].startswith(\"turn the lights off\"):\n recognizer.recognizeDone()\n\n if data[\"msg\"].endswith((\"to\", \"two\", \"of\", \"and\", \"for\")):\n recognizer.multiplySpeechLength(2.8)\n else:\n recognizer.multiplySpeechLength(1)\n\n if status == \"recognized\":\n print(f\"You: {data['msg']}\")\n\n if status == \"woke\":\n print(f\"\\nI'm listening...\")\n\n if status == \"activeListeningPartial\":\n print(f\"Active: {data['msg']}\", end=\"\\r\")\n\n\nasync def main(loop: asyncio.BaseEventLoop) -> None:\n\n global recognizer\n recognizer = SpeechRecognizer(\n [\"jarvis\"],\n [1.0],\n osInterface.joinPath(\"models/vad.h5\"),\n 0.9,\n osInterface.joinPath(\"models/vosk\"),\n osInterface.joinPath(\".tmp/cache.wav\"),\n callback,\n loop,\n speechLengths=(5.0, 1.2),\n offline=False,\n disableVosk=True\n )\n\n await recognizer.start(blocking=True)\n\n\nif __name__ == \"__main__\":\n loop = asyncio.get_event_loop()\n\n try:\n loop.run_until_complete(main(loop))\n except KeyboardInterrupt:\n loop.stop()\n" ]
[ [ "tensorflow.keras.models.load_model", "numpy.fft.rfft", "numpy.round", "numpy.frombuffer", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "2.2", "2.3", "2.4", "2.5", "2.6" ] } ]
juliuskunze/eve
[ "628ad445397a9872849c9c25b4e841ba8130ea39" ]
[ "wmt/main.py" ]
[ "# Copyright 2021 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Main file for running the WMT example.\n\nThis file is intentionally kept short. The majority for logic is in libraries\nthat can be easily tested and imported in Colab.\n\"\"\"\nimport wandb\nfrom absl import app\nfrom absl import flags\nfrom absl import logging\n\nfrom clu import platform\nimport train\nimport jax\nfrom ml_collections import config_flags\nimport tensorflow as tf\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string('workdir', None, 'Directory to store model data.')\nconfig_flags.DEFINE_config_file(\n 'config',\n 'configs/default.py',\n 'File path to the training hyperparameter configuration.',\n lock_config=True)\nflags.mark_flags_as_required(['config', 'workdir'])\n\n\ndef main(argv):\n if len(argv) > 1:\n raise app.UsageError('Too many command-line arguments.')\n\n # Hide any GPUs from TensorFlow. Otherwise TF might reserve memory and make\n # it unavailable to JAX.\n tf.config.experimental.set_visible_devices([], 'GPU')\n\n logging.info('JAX process: %d / %d', jax.process_index(), jax.process_count())\n logging.info('JAX local devices: %r', jax.local_devices())\n\n # Add a note so that we can tell which task is which JAX host.\n # (Depending on the platform task 0 is not guaranteed to be host 0)\n platform.work_unit().set_task_status(f'process_index: {jax.process_index()}, '\n f'process_count: {jax.process_count()}')\n platform.work_unit().create_artifact(platform.ArtifactType.DIRECTORY,\n FLAGS.workdir, 'workdir')\n\n with wandb.init(config=dict(FLAGS.config), sync_tensorboard=True):\n train.train_and_evaluate(FLAGS.config, FLAGS.workdir)\n\n\nif __name__ == '__main__':\n jax.config.config_with_absl()\n app.run(main)\n" ]
[ [ "tensorflow.config.experimental.set_visible_devices" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mfkiwl/PW_from_GPS
[ "fa0b0b9e1325a055ce884f79c14d24148348886b" ]
[ "aux_gps.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jun 10 14:33:19 2019\n\n@author: ziskin\n\"\"\"\nfrom PW_paths import work_yuval\nfrom pathlib import Path\ncwd = Path().cwd()\n# TODO: build curve fit tool with various function model: power, sum of sin ...\n# TODO: no need to build it, use lmfit instead:\n# TODO: check if lmfit accepts- datetimeindex, xarrays and NaNs.\n# TODO: if not, build func to replace datetimeindex to numbers and vise versa\n\n# def high_sample_and_smooth_dataframe_time_series(df):\n# import pandas as pd\n# dfs = df.copy()\n# dfs.index = pd.to_timedelta(dfs.index, unit='d')\n# dfs = dfs.resample('15S').interpolate(method='cubic').T.mean().resample('5T').mean()\n# better = better.reset_index(drop=True)\n# better.index = np.arange(-days_prior, days_after, 1/pts_per_day)\n\nmonth_to_doy_dict = {1: 1, 2: 32, 3: 61, 4: 92, 5: 122,\n 6: 153, 7: 183, 8: 214, 9: 245, 10: 275, 11: 306, 12: 336}\n\n\ndef replace_char_at_string_position(string, char='s', pos=3):\n if pos != -1:\n string = string[:pos] + char + string[pos+1:]\n else:\n string = string[:pos] + char\n return string\n\n\ndef read_converted_G0_stations(path=cwd):\n import pandas as pd\n df = pd.read_excel(path/'G0-converted.xlsx', skiprows=11)\n return df\n\n\ndef fill_na_xarray_time_series_with_its_group(xarray, grp='month', time_dim='time',\n smooth=True, window=11, order=3,\n plot=False):\n \"\"\" fill the NaNs of a Dataset or DataArray with mean grp cycle\n (hourly, monthly, etc) and smooth using savgol filter\"\"\"\n from scipy.signal import savgol_filter\n import xarray as xr\n def fill_na_dataarray(da, grp=grp, time_dim=time_dim, smooth=smooth,\n window=window, order=order, plot=plot):\n print('selected {}ly NaN filling for {}.'.format(grp, da.name))\n da_old = da.copy()\n mean_signal = da.groupby('{}.{}'.format(time_dim, grp)).mean()\n da = da.groupby('{}.{}'.format(time_dim, grp)).fillna(mean_signal)\n da = da.reset_coords(drop=True)\n if smooth:\n print('smoothing.')\n da = da.copy(data=savgol_filter(da, window, order))\n da.attrs['smoothing'] = 'savgol filter, window {}, order {}.'.format(window, order)\n da.attrs['NaN filling'] = 'mean {}ly values'.format(grp)\n if plot:\n da.plot()\n da_old.plot()\n return da\n\n if isinstance(xarray, xr.DataArray):\n xarray = fill_na_dataarray(xarray)\n elif isinstance(xarray, xr.Dataset):\n dal = []\n attrs = xarray.attrs\n for da in xarray:\n dal.append(fill_na_dataarray(xarray[da]))\n xarray = xr.merge(dal)\n xarray.attrs = attrs\n return xarray\n\n\ndef replace_xarray_time_series_with_its_group(da, grp='month', time_dim='time'):\n \"\"\"run the same func on each dim in da\"\"\"\n import xarray as xr\n dims = [x for x in da.dims if time_dim not in x]\n if len(dims) == 0:\n # no other dim except time:\n da = replace_time_series_with_its_group(da, grp=grp)\n return da\n dims_attrs = [da[x].attrs for x in dims]\n dims_attrs_dict = dict(zip(dims, dims_attrs))\n if len(dims) == 1:\n dim0_list = []\n for dim0 in da[dims[0]]:\n da0 = da.sel({dims[0]: dim0})\n da0 = replace_time_series_with_its_group(da0, grp=grp)\n dim0_list.append(da0)\n da_transformed = xr.concat(dim0_list, dims[0])\n da_transformed[dims[0]] = da[dims[0]]\n da_transformed.attrs[dims[0]] = dims_attrs_dict.get(dims[0])\n elif len(dims) == 2:\n dim0_list = []\n for dim0 in da[dims[0]]:\n dim1_list = []\n for dim1 in da[dims[1]]:\n da0 = da.sel({dims[0]: dim0, dims[1]: dim1})\n da0 = replace_time_series_with_its_group(da0, grp=grp)\n dim1_list.append(da0)\n dim0_list.append(xr.concat(dim1_list, dims[1]))\n da_transformed = xr.concat(dim0_list, dims[0])\n da_transformed[dims[0]] = da[dims[0]]\n da_transformed[dims[1]] = da[dims[1]]\n da_transformed.attrs[dims[0]] = dims_attrs_dict.get(dims[0])\n da_transformed.attrs[dims[1]] = dims_attrs_dict.get(dims[1])\n elif len(dims) == 3:\n dim0_list = []\n for dim0 in da[dims[0]]:\n dim1_list = []\n for dim1 in da[dims[1]]:\n dim2_list = []\n for dim2 in da[dims[2]]:\n da0 = da.sel({dims[0]: dim0, dims[1]: dim1, dims[2]: dim2})\n da0 = replace_time_series_with_its_group(da0, grp=grp)\n dim2_list.append(da0)\n dim1_list.append(xr.concat(dim2_list, dims[2]))\n dim0_list.append(xr.concat(dim1_list, dims[1]))\n da_transformed = xr.concat(dim0_list, dims[0])\n da_transformed[dims[0]] = da[dims[0]]\n da_transformed[dims[1]] = da[dims[1]]\n da_transformed[dims[2]] = da[dims[2]]\n da_transformed.attrs[dims[0]] = dims_attrs_dict.get(dims[0])\n da_transformed.attrs[dims[1]] = dims_attrs_dict.get(dims[1])\n da_transformed.attrs[dims[2]] = dims_attrs_dict.get(dims[2])\n return da_transformed\n\n\ndef replace_time_series_with_its_group(da_ts, grp='month'):\n \"\"\" replace an xarray time series with its mean grouping e.g., time.month,\n time.dayofyear, time.hour etc.., basiaclly implement .transform method\n on 1D dataarray, index must be datetime\"\"\"\n import xarray as xr\n import pandas as pd\n da_ts = da_ts.reset_coords(drop=True)\n attrs = da_ts.attrs\n df = da_ts.to_dataframe(da_ts.name)\n if grp == 'month':\n grp_ind = df.index.month\n elif grp == 'hour':\n grp_ind = df.index.hour\n df = df.groupby(grp_ind).transform('mean')\n ds = df.to_xarray()\n da = ds[da_ts.name]\n da.attrs = attrs\n return da\n\n\ndef read_ims_api_token():\n from PW_paths import home_path\n with open(home_path / '.imsapi') as fp:\n token = fp.readlines()[0].strip('\\n')\n return token\n\n\ndef calculate_gradient(f, lat_dim='latitude', lon_dim='longitude',\n level_dim='level', time_dim='time', savepath=None):\n from metpy.calc import lat_lon_grid_deltas\n from metpy.calc import gradient\n from aux_gps import save_ncfile\n import xarray as xr\n name = f.name\n dx, dy = lat_lon_grid_deltas(f[lon_dim], f[lat_dim])\n# f = f.transpose(..., lat_dim, lon_dim)\n# fy, fx = gradient(f, deltas=(dy, dx))\n if level_dim in f.dims and time_dim in f.dims:\n min_year = f[time_dim].dt.year.min().item()\n max_year = f[time_dim].dt.year.max().item()\n level_cnt = f[level_dim].size\n label = '{}_{}-{}.nc'.format(level_cnt, min_year, max_year)\n times = []\n for time in f[time_dim]:\n print('{}-{}'.format(time[time_dim].dt.month.item(), time[time_dim].dt.year.item()))\n levels = []\n for level in f[level_dim]:\n ftl = f.sel({time_dim: time, level_dim: level})\n fy, fx = gradient(ftl, deltas=(dy, dx))\n fx_da = xr.DataArray(fx.magnitude, dims=[lat_dim, lon_dim])\n fx_da.name = '{}x'.format(name)\n fy_da = xr.DataArray(fy.magnitude, dims=[lat_dim, lon_dim])\n fy_da.name = '{}y'.format(name)\n fx_da.attrs['units'] = fx.units.format_babel()\n fy_da.attrs['units'] = fy.units.format_babel()\n grad = xr.merge([fx_da, fy_da])\n levels.append(grad)\n times.append(xr.concat(levels, level_dim))\n ds = xr.concat(times, time_dim)\n ds[level_dim] = f[level_dim]\n ds[time_dim] = f[time_dim]\n ds[lat_dim] = f[lat_dim]\n ds[lon_dim] = f[lon_dim]\n else:\n if level_dim in f.dims:\n level_cnt = f[level_dim].size\n label = '{}.nc'.format(level_cnt)\n levels = []\n for level in f[level_dim]:\n fl = f.sel({level_dim: level})\n fy, fx = gradient(fl, deltas=(dy, dx))\n fx_da = xr.DataArray(fx.magnitude, dims=[lat_dim, lon_dim])\n fx_da.name = '{}x'.format(name)\n fy_da = xr.DataArray(fy.magnitude, dims=[lat_dim, lon_dim])\n fy_da.name = '{}y'.format(name)\n fx_da.attrs['units'] = fx.units.format_babel()\n fy_da.attrs['units'] = fy.units.format_babel()\n grad = xr.merge([fx_da, fy_da])\n levels.append(grad)\n da = xr.concat(levels, level_dim)\n da[level_dim] = f[level_dim]\n elif time_dim in f.dims:\n min_year = f[time_dim].dt.year.min().item()\n max_year = f[time_dim].dt.year.max().item()\n min_year = f[time_dim].dt.year.min().item()\n max_year = f[time_dim].dt.year.max().item()\n times = []\n for time in f[time_dim]:\n ft = f.sel({time_dim: time})\n fy, fx = gradient(ft, deltas=(dy, dx))\n fx_da = xr.DataArray(fx.magnitude, dims=[lat_dim, lon_dim])\n fx_da.name = '{}x'.format(name)\n fy_da = xr.DataArray(fy.magnitude, dims=[lat_dim, lon_dim])\n fy_da.name = '{}y'.format(name)\n fx_da.attrs['units'] = fx.units.format_babel()\n fy_da.attrs['units'] = fy.units.format_babel()\n grad = xr.merge([fx_da, fy_da])\n times.append(grad)\n ds = xr.concat(times, time_dim)\n ds[time_dim] = f[time_dim]\n ds[lat_dim] = f[lat_dim]\n ds[lon_dim] = f[lon_dim]\n if savepath is not None:\n filename = '{}_grad_{}'.format(f.name, label)\n save_ncfile(ds, savepath, filename)\n return ds\n\n\ndef calculate_divergence(u, v, lat_dim='latitude', lon_dim='longitude',\n level_dim='level', time_dim='time', savepath=None):\n from metpy.calc import divergence\n from metpy.calc import lat_lon_grid_deltas\n from aux_gps import save_ncfile\n import xarray as xr\n dx, dy = lat_lon_grid_deltas(u[lon_dim], u[lat_dim])\n u = u.transpose(..., lat_dim, lon_dim)\n v = v.transpose(..., lat_dim, lon_dim)\n if level_dim in u.dims and time_dim in u.dims:\n min_year = u[time_dim].dt.year.min().item()\n max_year = u[time_dim].dt.year.max().item()\n level_cnt = u[level_dim].size\n label = '{}_{}-{}.nc'.format(level_cnt, min_year, max_year)\n times = []\n for time in u[time_dim]:\n print('{}-{}'.format(time[time_dim].dt.month.item(), time[time_dim].dt.year.item()))\n levels = []\n for level in u[level_dim]:\n utl = u.sel({time_dim: time, level_dim: level})\n vtl = v.sel({time_dim: time, level_dim: level})\n div = divergence(utl, vtl, dx=dx, dy=dy)\n div_da = xr.DataArray(div.magnitude, dims=[lat_dim, lon_dim])\n div_da.attrs['units'] = div.units.format_babel()\n levels.append(div_da)\n times.append(xr.concat(levels, level_dim))\n da = xr.concat(times, time_dim)\n da[level_dim] = u[level_dim]\n da[time_dim] = u[time_dim]\n da[lat_dim] = u[lat_dim]\n da[lon_dim] = u[lon_dim]\n da.name = '{}{}_div'.format(u.name, v.name)\n else:\n if level_dim in u.dims:\n level_cnt = u[level_dim].size\n label = '{}.nc'.format(level_cnt)\n levels = []\n for level in u[level_dim]:\n ul = u.sel({level_dim: level})\n vl = v.sel({level_dim: level})\n div = divergence(ul, vl, dx=dx, dy=dy)\n div_da = xr.DataArray(div.magnitude, dims=[lat_dim, lon_dim])\n div_da.attrs['units'] = div.units.format_babel()\n levels.append(div_da)\n da = xr.concat(levels, level_dim)\n da[level_dim] = u[level_dim]\n elif time_dim in u.dims:\n min_year = u[time_dim].dt.year.min().item()\n max_year = u[time_dim].dt.year.max().item()\n min_year = u[time_dim].dt.year.min().item()\n max_year = u[time_dim].dt.year.max().item()\n times = []\n for time in u[time_dim]:\n ut = u.sel({time_dim: time})\n vt = v.sel({time_dim: time})\n div = divergence(ut, vt, dx=dx, dy=dy)\n div_da = xr.DataArray(div.magnitude, dims=[lat_dim, lon_dim])\n div_da.attrs['units'] = div.units.format_babel()\n times.append(div_da)\n da = xr.concat(times, time_dim)\n da[time_dim] = u[time_dim]\n da[lat_dim] = u[lat_dim]\n da[lon_dim] = u[lon_dim]\n da.name = '{}{}_div'.format(u.name, v.name)\n if savepath is not None:\n filename = '{}{}_div_{}'.format(u.name, v.name, label)\n save_ncfile(da, savepath, filename)\n return da\n\n\ndef calculate_pressure_integral(da, pdim='level'):\n import numpy as np\n # first sort to decending levels:\n da = da.sortby(pdim, ascending=False)\n try:\n units = da[pdim].attrs['units']\n except KeyError:\n print('no units attrs found, assuming units are hPa')\n units = 'hPa'\n # transform to Pa:\n if units != 'Pa':\n print('{} units detected, converting to Pa!'.format(units))\n da[pdim] = da[pdim] * 100\n # P_{i+1} - P_i:\n plevel_diff = np.abs(da[pdim].diff(pdim, label='lower'))\n # var_i + var_{i+1}:\n da_sum = da.shift(level=-1) + da\n p_int = ((da_sum * plevel_diff) / 2.0).sum(pdim)\n return p_int\n\n\ndef linear_fit_using_scipy_da_ts(da_ts, model='TSEN', slope_factor=3650.25,\n plot=False, ax=None, units=None,\n method='simple', weights=None, not_time=False):\n \"\"\"linear fit using scipy for dataarray time series,\n support for theilslopes(TSEN) and lingress(LR), produce 95% CI\"\"\"\n import xarray as xr\n from scipy.stats.mstats import theilslopes\n from scipy.stats import linregress\n import matplotlib.pyplot as plt\n from scipy.optimize import curve_fit\n import numpy as np\n time_dim = list(set(da_ts.dims))[0]\n y = da_ts.dropna(time_dim).values\n if not_time:\n X = da_ts[time_dim].values.reshape(-1, 1)\n jul_no_nans = da_ts.dropna(time_dim)[time_dim].values\n # jul_no_nans -= np.median(jul_no_nans)\n jul = da_ts[time_dim].values\n # jul -= np.median(jul)\n else:\n jul, jul_no_nans = get_julian_dates_from_da(da_ts, subtract='median')\n X = jul_no_nans.reshape(-1, 1)\n if model == 'LR':\n if method == 'simple':\n coef, intercept, r_value, p_value, std_err = linregress(jul_no_nans, y)\n confidence_interval = 1.96 * std_err\n coef_lo = coef - confidence_interval\n coef_hi = coef + confidence_interval\n elif method == 'curve_fit':\n func = lambda x, a, b: a * x + b\n if weights is not None:\n sigma = weights.dropna(time_dim).values\n else:\n sigma = None\n best_fit_ab, covar = curve_fit(func, jul_no_nans, y,\n sigma=sigma, p0=[0, 0],\n absolute_sigma = False)\n sigma_ab = np.sqrt(np.diagonal(covar))\n coef = best_fit_ab[0]\n intercept = best_fit_ab[1]\n coef_lo = coef - sigma_ab[0]\n coef_hi = coef + sigma_ab[0]\n elif model == 'TSEN':\n coef, intercept, coef_lo, coef_hi = theilslopes(y, X)\n predict = jul * coef + intercept\n predict_lo = jul * coef_lo + intercept\n predict_hi = jul * coef_hi + intercept\n trend_hi = xr.DataArray(predict_hi, dims=[time_dim])\n trend_hi.name = 'trend_hi'\n trend_lo = xr.DataArray(predict_lo, dims=[time_dim])\n trend_lo.name = 'trend_lo'\n trend_hi[time_dim] = da_ts[time_dim]\n trend_lo[time_dim] = da_ts[time_dim]\n slope_in_factor_scale_lo = coef_lo * slope_factor\n slope_in_factor_scale_hi = coef_hi * slope_factor\n trend = xr.DataArray(predict, dims=[time_dim])\n trend.name = 'trend'\n trend[time_dim] = da_ts[time_dim]\n slope_in_factor_scale = coef * slope_factor\n if plot:\n labels = ['{}'.format(da_ts.name)]\n if ax is None:\n fig, ax = plt.subplots()\n origln = da_ts.plot.line('k-', marker='o', ax=ax, linewidth=1.5, markersize=2.5)\n trendln = trend.plot(ax=ax, color='r', linewidth=2)\n trend_hi.plot.line('r--', ax=ax, linewidth=1.5)\n trend_lo.plot.line('r--', ax=ax, linewidth=1.5)\n trend_label = '{} model, slope={:.2f} ({:.2f}, {:.2f}) {}'.format(model, slope_in_factor_scale, slope_in_factor_scale_lo, slope_in_factor_scale_hi, units)\n handles = origln\n handles += trendln\n labels.append(trend_label)\n ax.legend(handles=handles, labels=labels, loc='upper left')\n ax.grid()\n trend_ds = xr.merge([trend, trend_hi, trend_lo])\n results_dict = {'slope_hi': slope_in_factor_scale_hi, 'slope_lo': slope_in_factor_scale_lo, 'slope': slope_in_factor_scale}\n results_dict['intercept'] = intercept\n return trend_ds, results_dict\n\n\ndef scatter_plot_and_fit(df, x, y, color='b', ax=None):\n import matplotlib.pyplot as plt\n import seaborn as sns\n if ax is None:\n fig, ax = plt.subplots()\n sns.scatterplot(x=x, y=y, data=df, ax=ax, color=color, s=10)\n\n\ndef linear_regression_scikit_learn(da1, da2, same_dim='time'):\n from sklearn.metrics import mean_squared_error\n from sklearn.linear_model import LinearRegression\n import numpy as np\n from aux_gps import dim_intersection\n shared = dim_intersection([da1, da2])\n da1 = da1.sel({same_dim: shared})\n da2 = da2.sel({same_dim: shared})\n X = da1.dropna(same_dim).values.reshape(-1, 1)\n y = da2.dropna(same_dim).values\n lr = LinearRegression()\n lr.fit(X, y)\n slope = lr.coef_[0]\n inter = lr.intercept_\n pred = lr.predict(X)\n rmse = mean_squared_error(y, pred, squared=False)\n resid = pred - y\n mean = np.sum(resid)\n return slope, inter, mean, rmse\n\n\ndef split_equal_da_ts_around_datetime(da_ts, dt='2014-05-01'):\n time_dim = list(set(da_ts.dims))[0]\n x1 = da_ts.dropna(time_dim).sel({time_dim: slice(None, dt)})\n x2 = da_ts.dropna(time_dim).sel({time_dim: slice(dt, None)})\n if x1.size == 0 or x2.size == 0:\n raise ValueError('one or two of the sub-series is 0 size.')\n if x1.size > x2.size:\n x1 = x1.isel({time_dim: slice(-x2.size , None)})\n elif x1.size < x2.size:\n x2 = x2.isel({time_dim: slice(0, x1.size)})\n return x1, x2\n\n\ndef wilcoxon_rank_test_xr(\n da_ts, alpha=0.05,\n cp_dt='2014-05-01',\n zero_method='wilcox',\n correction=False,\n alternative='two-sided',\n mode='auto'):\n import xarray as xr\n from scipy.stats import wilcoxon\n x, y = split_equal_da_ts_around_datetime(da_ts, dt=cp_dt)\n stat, pvalue = wilcoxon(x, y, zero_method=zero_method,\n correction=correction, alternative=alternative\n )\n if pvalue < alpha:\n # the two parts of the time series come from different distributions\n print('Two distributions!')\n normal = False\n else:\n # same distribution\n print('Same distribution')\n normal = True\n da = xr.DataArray([stat, pvalue, normal], dims=['result'])\n da['result'] = ['stat', 'pvalue', 'h']\n return da\n\n\ndef normality_test_xr(da_ts, sample=None, alpha=0.05, test='lili',\n dropna=True, verbose=True):\n \"\"\"normality tests on da_ts\"\"\"\n from statsmodels.stats.diagnostic import lilliefors\n from scipy.stats import shapiro\n from scipy.stats import normaltest\n import xarray as xr\n time_dim = list(set(da_ts.dims))[0]\n if sample is not None:\n da_ts = da_ts.resample({time_dim: sample}).mean()\n if dropna:\n da_ts = da_ts.dropna(time_dim)\n if test == 'shapiro':\n stat, pvalue = shapiro(da_ts)\n elif test == 'lili':\n stat, pvalue = lilliefors(da_ts, dist='norm', pvalmethod='table')\n elif test == 'normaltest':\n stat, pvalue = normaltest(da_ts)\n if pvalue < alpha:\n Not = 'NOT'\n normal = False\n else:\n Not = ''\n normal = True\n if verbose:\n print('Mean: {:.4f}, pvalue: {:.4f}'.format(stat, pvalue))\n print('Thus, the data is {} Normally distributed with alpha {}'.format(Not, alpha))\n da = xr.DataArray([stat, pvalue, normal], dims=['result'])\n da['result'] = ['stat', 'pvalue', 'h']\n return da\n\n\ndef homogeneity_test_xr(da_ts, hg_test_func, dropna=True, alpha=0.05,\n sim=None, verbose=True):\n \"\"\"False means data is homogenous, True means non-homogenous with significance alpha\"\"\"\n import xarray as xr\n import pandas as pd\n time_dim = list(set(da_ts.dims))[0]\n if dropna:\n da_ts = da_ts.dropna(time_dim)\n h, cp, p, U, mu = hg_test_func(da_ts, alpha=alpha, sim=sim)\n result = hg_test_func(da_ts, alpha=alpha, sim=sim)\n name = type(result).__name__\n if verbose:\n print('running homogeneity {} with alpha {} and sim {}'.format(name, alpha, sim))\n\n cpl = pd.to_datetime(da_ts.isel({time_dim: result.cp})[time_dim].values)\n if 'U' in result._fields:\n stat = result.U\n elif 'T' in result._fields:\n stat = result.T\n elif 'Q' in result._fields:\n stat = result.Q\n elif 'R' in result._fields:\n stat = result.R\n elif 'V' in result._fields:\n stat = result.V\n da = xr.DataArray([name, result.h, cpl, result.p, stat, result.avg], dims=['results'])\n da['results'] = ['name', 'h', 'cp_dt', 'pvalue', 'stat', 'means']\n return da\n\n\ndef VN_ratio_trend_test_xr(da_ts, dropna=True, alpha=0.05, loadpath=work_yuval,\n verbose=True, return_just_trend=False):\n \"\"\"calculate the Von Nuemann ratio test statistic and test for trend.\"\"\"\n import xarray as xr\n time_dim = list(set(da_ts.dims))[0]\n if dropna:\n da_ts = da_ts.dropna(time_dim)\n n = da_ts.dropna(time_dim).size\n d2 = (da_ts.diff(time_dim)**2.0).sum() / (n - 1)\n # s**2 is the variance:\n s2 = da_ts.var()\n eta = (d2 / s2).item()\n cv_da = xr.load_dataarray(loadpath / 'VN_critical_values.nc')\n cv = cv_da.sel(sample_size=n, pvalue=alpha, method='nearest').item()\n if eta < cv:\n if verbose:\n print('the hypothesis of stationary cannot be rejected at the level {}'.format(alpha))\n trend = True\n else:\n trend = False\n if return_just_trend:\n return trend\n else:\n da = xr.DataArray([eta, cv, trend, n], dims=['results'])\n da['results'] = ['eta', 'cv', 'trend', 'n']\n return da\n\n\ndef reduce_tail_xr(xarray, reduce='mean', time_dim='time', records=120,\n return_df=False):\n import xarray as xr\n\n def reduce_tail_da(da, reduce=reduce, time_dim=time_dim, records=records):\n if reduce == 'mean':\n da = da.dropna(time_dim).tail(records).mean(time_dim)\n return da\n if isinstance(xarray, xr.DataArray):\n xarray = reduce_tail_da(xarray, reduce, time_dim, records)\n elif isinstance(xarray, xr.Dataset):\n xarray = xarray.map(reduce_tail_da, args=(reduce, time_dim, records))\n if return_df:\n df = xarray.to_array('dum').to_dataframe(reduce)\n df.index.name = ''\n return df\n return xarray\n\n\ndef decimal_year_to_datetime(decimalyear):\n from datetime import datetime, timedelta\n import pandas as pd\n year = int(decimalyear)\n rem = decimalyear - year\n base = datetime(year, 1, 1)\n result = base + timedelta(seconds=(base.replace(year=base.year + 1) - base).total_seconds() * rem)\n return pd.to_datetime(result)\n\n\ndef select_months(da_ts, months, remove=False, reindex=True):\n import xarray as xr\n from aux_gps import xr_reindex_with_date_range\n import pandas as pd\n import numpy as np\n time_dim = list(set(da_ts.dims))[0]\n attrs = da_ts.attrs\n try:\n name = da_ts.name\n except AttributeError:\n name = ''\n if remove:\n all_months = np.arange(1, 13)\n months = list(set(all_months).difference(set(months)))\n print('selecting months #{} from {}'.format(', #'.join([str(x) for x in months]), name))\n to_add = []\n for month in months:\n sliced = da_ts.sel({time_dim: da_ts['{}.month'.format(time_dim)] == int(month)})\n to_add.append(sliced)\n da = xr.concat(to_add, time_dim)\n da.attrs = attrs\n if reindex:\n freq = pd.infer_freq(da_ts[time_dim].values)\n da = xr_reindex_with_date_range(da, freq=freq)\n return da\n\n\ndef run_MLR_harmonics(harmonic_dss, season=None, n_max=4,\n plot=True, cunits='cpd',\n ax=None, legend_loc=None, ncol=1,\n legsize=8, lw=1, legend_S_only=False):\n \"\"\" change cunits to 'cpy' to process annual harmonics\"\"\"\n from sklearn.linear_model import LinearRegression\n from sklearn.metrics import explained_variance_score\n import matplotlib.pyplot as plt\n import numpy as np\n if n_max > harmonic_dss[cunits].max().values.item():\n n_max = harmonic_dss[cunits].max().values.item()\n try:\n field = harmonic_dss.attrs['field']\n if field == 'PW':\n field = 'PWV'\n except KeyError:\n field = 'no name'\n name = [x for x in harmonic_dss][0].split('_')[0]\n if season is None and 'season' not in harmonic_dss.dims:\n harmonic = harmonic_dss # .sel(season='ALL')\n elif season is None and 'season' in harmonic_dss.dims:\n harmonic = harmonic_dss.sel(season='ALL')\n elif season is not None:\n harmonic = harmonic_dss.sel(season=season)\n # pre-proccess:\n if 'month' in harmonic.dims:\n harmonic = harmonic.transpose('month', cunits, ...)\n elif 'hour' in harmonic.dims:\n harmonic = harmonic.transpose('hour', cunits, ...)\n harmonic = harmonic.sel({cunits: slice(1, n_max)})\n # X = harmonic[name + '_mean'].values\n y = harmonic[name].values.reshape(-1, 1)\n exp_list = []\n for cycle in harmonic[cunits].values:\n X = harmonic[name + '_mean'].sel({cunits: cycle}).values.reshape(-1, 1)\n lr = LinearRegression(fit_intercept=False)\n lr.fit(X, y)\n y_pred = lr.predict(X)\n ex_var = explained_variance_score(y, y_pred)\n exp_list.append(ex_var)\n explained = np.array(exp_list) * 100.0\n exp_dict = dict(zip([x for x in harmonic[cunits].values], explained))\n exp_dict['total'] = np.cumsum(explained)\n exp_dict['season'] = season\n exp_dict['name'] = name\n if plot:\n if ax is None:\n fig, ax = plt.subplots(figsize=(8, 6))\n markers = ['s', 'x', '^', '>', '<', 'X']\n colors = ['tab:cyan', 'tab:brown', 'tab:pink', 'tab:orange',\n 'tab:purple', 'tab:yellow']\n styles = ['--', '-.', ':', ' ', 'None', ' ']\n S = ['S{}'.format(x) for x in harmonic[cunits].values]\n S_total = ['+'.join(S)]\n S = ['S{} ({:.0f}%)'.format(x, exp_dict[int(x)]) for x in harmonic[cunits].values]\n for i, cycle in enumerate(harmonic[cunits].values):\n harmonic[name + '_mean'].sel({cunits: cycle}).plot(ax=ax,\n linestyle=styles[i],\n color=colors[i],\n linewidth=lw,\n label=S[i]) # marker=markers[i])\n harmonic[name + '_mean'].sum(cunits).plot(ax=ax, marker=None, color='k',\n alpha=0.7, linewidth=lw, label=S_total)\n harmonic[name].plot(ax=ax, marker='o', linewidth=0., color='k', alpha=0.7, label=field)\n handles, labels = ax.get_legend_handles_labels()\n if legend_S_only:\n handles1 = handles[:-2]\n labels1 = labels[:-2]\n ax.legend(\n handles=handles1, labels=labels1,\n prop={'size': legsize},\n framealpha=0.5,\n fancybox=True,\n loc=legend_loc, ncol=ncol, columnspacing=0.75, handlelength=1.0)\n else:\n ax.legend(\n S + S_total + [field],\n prop={'size': legsize},\n framealpha=0.5,\n fancybox=True,\n loc=legend_loc, ncol=ncol, columnspacing=0.75, handlelength=1.0)\n# ax.grid()\n ax.set_xlabel('Time of day [UTC]')\n # ax.set_ylabel('{} anomalies [mm]'.format(field))\n if season is None:\n ax.set_title('Annual {} diurnal cycle for {} station'.format(field, name.upper()))\n else:\n ax.set_title('{} diurnal cycle for {} station in {}'.format(field, name.upper(), season))\n if legend_S_only:\n return ax, handles, labels\n else:\n return ax\n else:\n return exp_dict\n\n\ndef harmonic_analysis_xr(da, n=6, normalize=False, anomalize=False, freq='D',\n user_field_name=None):\n import xarray as xr\n from aux_gps import fit_da_to_model\n from aux_gps import normalize_xr\n from aux_gps import anomalize_xr\n try:\n field = da.attrs['channel_name']\n except KeyError:\n field = user_field_name\n if field is None:\n field = ''\n if normalize:\n da = normalize_xr(da, norm=1)\n time_dim = list(set(da.dims))[0]\n if anomalize:\n da = anomalize_xr(da, freq=freq)\n seasons = ['JJA', 'SON', 'DJF', 'MAM', 'ALL']\n print('station name: {}'.format(da.name))\n print('performing harmonic analysis with 1 to {} cycles per day.'.format(n))\n season_list = []\n for season in seasons:\n if season != 'ALL':\n print('analysing season {}.'.format(season))\n das = da.sel({time_dim: da['{}.season'.format(time_dim)] == season})\n else:\n print('analysing ALL seasons.')\n das = da\n ds = harmonic_da(das, n=n)\n season_list.append(ds)\n dss = xr.concat(season_list, 'season')\n dss['season'] = seasons\n dss.attrs['field'] = field\n return dss\n\n\ndef harmonic_da(da_ts, n=3, field=None, init=None):\n from aux_gps import fit_da_to_model\n import xarray as xr\n time_dim = list(set(da_ts.dims))[0]\n harmonics = [x + 1 for x in range(n)]\n if init is not None:\n init_amp = da_ts.groupby('{}.hour'.format(time_dim)).mean().mean('hour').values\n else:\n init_amp = 1.0\n init_values = [init_amp/float(x) for x in harmonics]\n params_list = []\n di_mean_list = []\n di_std_list = []\n for cpd, init_val in zip(harmonics, init_values):\n print('fitting harmonic #{}'.format(cpd))\n params = dict(\n sin_freq={\n 'value': cpd}, sin_amp={\n 'value': init_val}, sin_phase={\n 'value': 0})\n res = fit_da_to_model(\n da_ts,\n modelname='sin',\n params=params,\n plot=False,\n verbose=False)\n name = da_ts.name.split('_')[0]\n params_da = xr.DataArray([x for x in res.attrs.values()],\n dims=['params', 'val_err'])\n params_da['params'] = [x for x in res.attrs.keys()]\n params_da['val_err'] = ['value', 'stderr']\n params_da.name = name + '_params'\n name = res.name.split('_')[0]\n diurnal_mean = res.groupby('{}.hour'.format(time_dim)).mean()\n diurnal_std = res.groupby('{}.hour'.format(time_dim)).std()\n # diurnal_mean.attrs.update(attrs)\n # diurnal_std.attrs.update(attrs)\n diurnal_mean.name = name + '_mean'\n diurnal_std.name = name + '_std'\n params_list.append(params_da)\n di_mean_list.append(diurnal_mean)\n di_std_list.append(diurnal_std)\n da_mean = xr.concat(di_mean_list, 'cpd')\n da_std = xr.concat(di_std_list, 'cpd')\n da_params = xr.concat(params_list, 'cpd')\n ds = da_mean.to_dataset(name=da_mean.name)\n ds[da_std.name] = da_std\n ds['cpd'] = harmonics\n ds[da_params.name] = da_params\n ds[da_ts.name] = da_ts.groupby('{}.hour'.format(time_dim)).mean()\n if field is not None:\n ds.attrs['field'] = field\n return ds\n\n\ndef harmonic_da_ts(da_ts, n=3, grp='month', return_ts_fit=False,\n verbose=True):\n from aux_gps import fit_da_ts_to_sine_model\n import xarray as xr\n time_dim = list(set(da_ts.dims))[0]\n harmonics = [x + 1 for x in range(n)]\n if grp == 'month':\n init_freqs = [x / 366 for x in harmonics]\n cunits = 'cpy'\n cu_name = 'cycles per year'\n elif grp == 'hour':\n init_freqs = harmonics\n cunits = 'cpd'\n cu_name = 'cycles per day'\n params_list = []\n di_mean_list = []\n di_std_list = []\n tss = []\n params_dicts = []\n for cycle, init_freq in zip(harmonics, init_freqs):\n if verbose:\n print('fitting harmonic #{}'.format(cycle))\n res = fit_da_ts_to_sine_model(\n da_ts, init_freq=init_freq, verbose=False, plot=False)\n name = da_ts.name.split('_')[0]\n params_da = xr.DataArray([x for x in res.attrs.values()],\n dims=['params', 'val_err'])\n params_da['params'] = [x for x in res.attrs.keys()]\n params_da['val_err'] = ['value', 'stderr']\n params_da.name = name + '_params'\n name = res.name.split('_')[0]\n diurnal_mean = res.groupby('{}.{}'.format(time_dim, grp)).mean()\n diurnal_std = res.groupby('{}.{}'.format(time_dim, grp)).std()\n # diurnal_mean.attrs.update(attrs)\n # diurnal_std.attrs.update(attrs)\n diurnal_mean.name = name + '_mean'\n diurnal_std.name = name + '_std'\n params_list.append(params_da)\n di_mean_list.append(diurnal_mean)\n di_std_list.append(diurnal_std)\n tss.append(res)\n params_dicts.append(res.attrs)\n da_mean = xr.concat(di_mean_list, cunits)\n da_std = xr.concat(di_std_list, cunits)\n da_params = xr.concat(params_list, cunits)\n ds = da_mean.to_dataset(name=da_mean.name)\n ds[da_std.name] = da_std\n ds[cunits] = harmonics\n ds[cunits].attrs['long_name'] = cu_name\n ds[da_params.name] = da_params\n ds[da_ts.name] = da_ts.groupby('{}.{}'.format(time_dim, grp)).mean(keep_attrs=True)\n if return_ts_fit:\n ds = xr.concat(tss, cunits)\n ds[cunits] = harmonics\n di = {}\n for i, harm in enumerate(harmonics):\n keys = [x + '_{}'.format(harm) for x in params_dicts[i].keys()]\n di.update(dict(zip(keys, [x for x in params_dicts[i].values()])))\n ds.attrs = di\n return ds\n\n\ndef convert_da_to_long_form_df(da, var_name=None, value_name=None):\n \"\"\" convert xarray dataarray to long form pandas df\n to use with seaborn\"\"\"\n import xarray as xr\n if var_name is None:\n var_name = 'var'\n if value_name is None:\n value_name = 'value'\n dims = [x for x in da.dims]\n if isinstance(da, xr.Dataset):\n value_vars = [x for x in da]\n elif isinstance(da, xr.DataArray):\n value_vars = [da.name]\n df = da.to_dataframe()\n for i, dim in enumerate(da.dims):\n df[dim] = df.index.get_level_values(i)\n df = df.melt(value_vars=value_vars, value_name=value_name,\n id_vars=dims, var_name=var_name)\n return df\n\n\ndef get_season_for_pandas_dtindex(df):\n import pandas as pd\n if not isinstance(df.index, pd.DatetimeIndex):\n raise ValueError('index needs to be datetimeindex!')\n season = []\n months = [x.month for x in df.index]\n for month in months:\n if month <= 8 and month >=6:\n season.append('JJA')\n elif month <=5 and month >= 3:\n season.append('MAM')\n elif month >=9 and month<=11:\n season.append('SON')\n elif month == 12 or month == 1 or month ==2:\n season.append('DJF')\n return pd.Series(season, index=df.index)\n\n\ndef anomalize_xr(da_ts, freq='D', time_dim=None, units=None, verbose=True): # i.e., like deseason\n import xarray as xr\n if time_dim is None:\n time_dim = list(set(da_ts.dims))[0]\n attrs = da_ts.attrs\n if isinstance(da_ts, xr.Dataset):\n da_attrs = dict(zip([x for x in da_ts],[da_ts[x].attrs for x in da_ts]))\n try:\n name = da_ts.name\n except AttributeError:\n name = ''\n if isinstance(da_ts, xr.Dataset):\n name = [x for x in da_ts]\n if freq == 'D':\n if verbose:\n print('removing daily means from {}'.format(name))\n frq = 'daily'\n date = groupby_date_xr(da_ts)\n grp = date\n elif freq == 'H':\n if verbose:\n print('removing hourly means from {}'.format(name))\n frq = 'hourly'\n grp = '{}.hour'.format(time_dim)\n elif freq == 'MS':\n if verbose:\n print('removing monthly means from {}'.format(name))\n frq = 'monthly'\n grp = '{}.month'.format(time_dim)\n elif freq == 'AS':\n if verbose:\n print('removing yearly means from {}'.format(name))\n frq = 'yearly'\n grp = '{}.year'.format(time_dim)\n elif freq == 'DOY':\n if verbose:\n print('removing day of year means from {}'.format(name))\n frq = 'dayofyear'\n grp = '{}.dayofyear'.format(time_dim)\n elif freq == 'WOY':\n if verbose:\n print('removing week of year means from {}'.format(name))\n frq = 'weekofyear'\n grp = '{}.weekofyear'.format(time_dim)\n # calculate climatology:\n climatology = da_ts.groupby(grp).mean()\n climatology_std = da_ts.groupby(grp).std()\n da_anoms = da_ts.groupby(grp) - climatology\n if units == '%':\n da_anoms = 100.0 * (da_anoms.groupby(grp) / climatology)\n # da_anoms = 100.0 * (da_anoms / da_ts.mean())\n # da_anoms = 100.0 * (da_ts.groupby(grp)/climatology - 1)\n # da_anoms = 100.0 * (da_ts.groupby(grp)-climatology) / da_ts\n if verbose:\n print('Using % as units.')\n elif units == 'std':\n da_anoms = (da_anoms.groupby(grp) / climatology_std)\n if verbose:\n print('Using std as units.')\n da_anoms = da_anoms.reset_coords(drop=True)\n da_anoms.attrs.update(attrs)\n da_anoms.attrs.update(action='removed {} means'.format(frq))\n # if dataset, update attrs for each dataarray and add action='removed x means'\n if isinstance(da_ts, xr.Dataset):\n for x in da_ts:\n da_anoms[x].attrs.update(da_attrs.get(x))\n da_anoms[x].attrs.update(action='removed {} means'.format(frq))\n if units == '%':\n da_anoms[x].attrs.update(units='%')\n return da_anoms\n\n\ndef line_and_num_for_phrase_in_file(phrase='the dog barked', filename='file.txt'):\n with open(filename, 'r') as f:\n for (i, line) in enumerate(f):\n if phrase in line:\n return i, line\n return None, None\n\n\ndef grab_n_consecutive_epochs_from_ts(da_ts, sep='nan', n=10, time_dim=None,\n return_largest=False):\n \"\"\"grabs n consecutive epochs from time series (xarray dataarrays)\n and return list of either dataarrays\"\"\"\n if time_dim is None:\n time_dim = list(set(da_ts.dims))[0]\n df = da_ts.to_dataframe()\n A = consecutive_runs(df, num='nan')\n A = A.sort_values('total_not-nan', ascending=False)\n max_n = len(A)\n if return_largest:\n start = A.iloc[0, 0]\n end = A.iloc[0, 1]\n da = da_ts.isel({time_dim:slice(start, end)})\n return da\n if n > max_n:\n print('{} epoches requested but only {} available'.format(n, max_n))\n n = max_n\n da_list = []\n for i in range(n):\n start = A.iloc[i, 0]\n end = A.iloc[i, 1]\n da = da_ts.isel({time_dim: slice(start, end)})\n da_list.append(da)\n return da_list\n\n\ndef keep_full_years_of_monthly_mean_data(da_ts, verbose=False):\n name = da_ts.name\n time_dim = list(set(da_ts.dims))[0]\n df = da_ts.dropna(time_dim).to_dataframe()\n # calculate yearly data to drop (if points less than threshold):\n df['year'] = df.index.year\n points_in_year = df.groupby(['year']).count()[name].to_frame()\n # calculate total years with any data:\n tot_years = points_in_year[points_in_year >0].dropna().count().values.item()\n # calculate yealy data percentage (from maximum available):\n points_in_year['percent'] = (points_in_year[name] / 12) * 100.0\n # get the number of years to drop and the years themselves:\n number_of_years_to_drop = points_in_year[name][points_in_year['percent'] <= 99].count()\n percent_of_years_to_drop = 100.0 * \\\n number_of_years_to_drop / len(points_in_year)\n years_to_drop = points_in_year.index[points_in_year['percent'] <= 99]\n if verbose:\n print('for {}: found {} ({:.2f} %) bad years with {:.0f} % drop thresh.'.format(\n name, number_of_years_to_drop, percent_of_years_to_drop, 99))\n # now drop the days:\n for year_to_drop in years_to_drop:\n df = df[df['year'] != year_to_drop]\n if verbose:\n print('for {}: kept {} years.'.format(name, df['year'].unique().size))\n da = df[name].to_xarray()\n # add some more metadata:\n da.attrs['years_kept'] = sorted(df['year'].unique().tolist())\n da.attrs['years_total'] = tot_years\n da.attrs['years_dropped'] = number_of_years_to_drop\n da.attrs['years_dropped_percent'] = '{:.1f}'.format(percent_of_years_to_drop)\n return da\n\n#def assemble_semi_period(reduced_da_ts):\n# import numpy as np\n# import xarray as xr\n# period = [x for x in reduced_da_ts.dims][0]\n# if period == 'month':\n# plength = reduced_da_ts[period].size\n# mnth_arr = np.arange(1, 13)\n# mnth_splt = np.array_split(mnth_arr, int(12/plength))\n# vals = reduced_da_ts.values\n# vals_list = []\n# vals_list.append(vals)\n# for i in range(len(mnth_splt)-1):\n# vals_list.append(vals)\n# modified_reduced = xr.DataArray(np.concatenate(vals_list), dims=['month'])\n# modified_reduced['month'] = mnth_arr\n# return modified_reduced\n# elif period == 'hour':\n# plength = reduced_da_ts[period].size\n# hr_arr = np.arange(0, 24)\n# hr_splt = np.array_split(hr_arr, int(24/plength))\n# vals = reduced_da_ts.values\n# vals_list = []\n# vals_list.append(vals)\n# for i in range(len(hr_splt)-1):\n# vals_list.append(vals)\n# modified_reduced = xr.DataArray(np.concatenate(vals_list), dims=['hour'])\n# modified_reduced['hour'] = hr_arr\n# return modified_reduced\n#\n#\n#def groupby_semi_period(da_ts, period='6M'):\n# \"\"\"return an xarray DataArray with the semi period of 1 to 11 months or\n# 1 to 23 hours.\n# Input: period : string, first char is period length, second is frequency.\n# for now support is M for month and H for hour.\"\"\"\n# import numpy as np\n# df = da_ts.to_dataframe()\n# plength = [x for x in period if x.isdigit()]\n# if len(plength) == 1:\n# plength = int(plength[0])\n# elif len(plength) == 2:\n# plength = int(''.join(plength))\n# freq = [x for x in period if x.isalpha()][0]\n# print(plength, freq)\n# if freq == 'M':\n# if np.mod(12, plength) != 0:\n# raise('pls choose integer amounts, e.g., 3M, 4M, 6M...')\n# mnth_arr = np.arange(1, 13)\n# mnth_splt = np.array_split(mnth_arr, int(12 / plength))\n# rpld = {}\n# for i in range(len(mnth_splt) - 1):\n# rpld.update(dict(zip(mnth_splt[i + 1], mnth_splt[0])))\n# df['month'] = df.index.month\n# df['month'] = df['month'].replace(rpld)\n# month = df['month'].to_xarray()\n# return month\n# if freq == 'H':\n# if np.mod(24, plength) != 0:\n# raise('pls choose integer amounts, e.g., 6H, 8H, 12H...')\n# hr_arr = np.arange(0, 24)\n# hr_splt = np.array_split(hr_arr, int(24 / plength))\n# rpld = {}\n# for i in range(len(hr_splt) - 1):\n# rpld.update(dict(zip(hr_splt[i + 1], hr_splt[0])))\n# df['hour'] = df.index.hour\n# df['hour'] = df['hour'].replace(rpld)\n# hour = df['hour'].to_xarray()\n# return hour\n\n\ndef groupby_half_hour_xr(da_ts, reduce='mean'):\n import pandas as pd\n import numpy as np\n df = da_ts.to_dataframe()\n native_freq = pd.infer_freq(df.index)\n if not native_freq:\n raise('Cannot infer frequency...')\n if reduce == 'mean':\n df = df.groupby([df.index.hour, df.index.minute]).mean()\n elif reduce == 'std':\n df = df.groupby([df.index.hour, df.index.minute]).std()\n time = pd.date_range(start='1900-01-01', periods=df.index.size,\n freq=native_freq)\n df = df.set_index(time)\n df = df.resample('30T').mean()\n half_hours = np.arange(0, 24, 0.5)\n df.index = half_hours\n df.index.name = 'half_hour'\n ds = df.to_xarray()\n return ds\n\n\ndef groupby_date_xr(da_ts, time_dim='time'):\n df = da_ts[time_dim].to_dataframe()\n df['date'] = df.index.date\n date = df['date'].to_xarray()\n return date\n\n\ndef loess_curve(da_ts, time_dim='time', season=None, plot=True):\n from skmisc.loess import loess\n import matplotlib.pyplot as plt\n import xarray as xr\n import numpy as np\n if season is not None:\n da_ts = da_ts.sel({time_dim: da_ts[time_dim + '.season'] == season})\n x = da_ts.dropna(time_dim)[time_dim].values\n y = da_ts.dropna(time_dim).values\n l_obj = loess(x, y)\n l_obj.fit()\n pred = l_obj.predict(x, stderror=True)\n conf = pred.confidence()\n lowess = np.copy(pred.values)\n ll = np.copy(conf.lower)\n ul = np.copy(conf.upper)\n da_lowess = xr.Dataset()\n da_lowess['mean'] = xr.DataArray(lowess, dims=[time_dim])\n da_lowess['upper'] = xr.DataArray(ul, dims=[time_dim])\n da_lowess['lower'] = xr.DataArray(ll, dims=[time_dim])\n da_lowess[time_dim] = x\n if plot:\n plt.plot(x, y, '+')\n plt.plot(x, lowess)\n plt.fill_between(x, ll, ul, alpha=.33)\n plt.show()\n return da_lowess\n\n\ndef detrend_ts(da_ts, method='scipy', verbose=False,\n time_dim='time'):\n from scipy.signal import detrend\n import xarray as xr\n import pandas as pd\n if verbose:\n print('detrending using {}.'.format(method))\n if method == 'loess':\n trend = loess_curve(da_ts, plot=False)\n detrended = da_ts - trend['mean']\n detrended.name = da_ts.name\n elif method == 'scipy':\n freq = xr.infer_freq(da_ts[time_dim])\n y = da_ts.dropna(time_dim)\n y_detrended = y.copy(data=detrend(y))\n detrended = y_detrended\n start = pd.to_datetime(y_detrended[time_dim].isel({time_dim: 0}).item())\n end = pd.to_datetime(y_detrended[time_dim].isel({time_dim: -1}).item())\n new_time = pd.date_range(start, end, freq=freq)\n detrended = detrended.reindex({time_dim:new_time})\n return detrended\n\n\ndef autocorr_plot(da_ts, max_lag=40):\n import pandas as pd\n ser = pd.Series(da_ts)\n corrs = [ser.autocorr(lag=x) for x in range(0, max_lag)]\n lags = [x for x in range(0, max_lag)]\n lags_ser = pd.Series(corrs, index=lags)\n ax = lags_ser.plot(kind='bar', rot=0, figsize=(10, 5))\n return ax\n\n\ndef error_mean_rmse(y, y_pred):\n from sklearn.metrics import mean_squared_error\n import numpy as np\n mse = mean_squared_error(y.values, y_pred.values)\n rmse = np.sqrt(mse)\n mean = np.mean(y.values-y_pred.values)\n print('mean : {:.2f}, rmse : {:.2f}'.format(mean, rmse))\n return mean, rmse\n\n\ndef remove_suffix_from_ds(ds, sep='_'):\n import xarray as xr\n if not isinstance(ds, xr.Dataset):\n raise ValueError('input must be an xarray dataset object!')\n vnames = [x for x in ds.data_vars]\n new_names = [x.split(sep)[0] for x in ds.data_vars]\n name_dict = dict(zip(vnames, new_names))\n ds = ds.rename_vars(name_dict)\n return ds\n\n\ndef rename_data_vars(ds, suffix='_error', prefix=None,\n verbose=False):\n import xarray as xr\n if not isinstance(ds, xr.Dataset):\n raise ValueError('input must be an xarray dataset object!')\n vnames = [x for x in ds.data_vars]\n # if remove_suffix:\n # new_names = [x.replace(suffix, '') for x in ds.data_vars]\n if suffix is not None:\n new_names = [str(x) + suffix for x in ds.data_vars]\n if prefix is not None:\n new_names = [prefix + str(x) for x in ds.data_vars]\n name_dict = dict(zip(vnames, new_names))\n ds = ds.rename_vars(name_dict)\n if verbose:\n print('var names were added the suffix {}.'.format(suffix))\n return ds\n\n\ndef remove_duplicate_spaces_in_string(line):\n import re\n line_removed = \" \".join(re.split(\"\\s+\", line, flags=re.UNICODE))\n return line_removed\n\n\ndef save_ncfile(xarray, savepath, filename='temp.nc', engine=None, dtype=None,\n fillvalue=None):\n import xarray as xr\n print('saving {} to {}'.format(filename, savepath))\n if dtype is None:\n comp = dict(zlib=True, complevel=9, _FillValue=fillvalue) # best compression\n else:\n comp = dict(zlib=True, complevel=9, dtype=dtype, _FillValue=fillvalue) # best compression\n if isinstance(xarray, xr.Dataset):\n encoding = {var: comp for var in xarray}\n elif isinstance(xarray, xr.DataArray):\n encoding = {var: comp for var in xarray.to_dataset()}\n xarray.to_netcdf(savepath / filename, 'w', encoding=encoding, engine=engine)\n print('File saved!')\n return\n\n\ndef weighted_long_term_monthly_means_da(da_ts, plot=True):\n \"\"\"create a long term monthly means(climatology) from a dataarray time\n series with weights of items(mins,days etc..) per each month\n apperently, DataArray.groupby('time.month').mean('time') does exactely\n this... so this function is redundant\"\"\"\n import pandas as pd\n name = da_ts.name\n # first save attrs:\n attrs = da_ts.attrs\n try:\n df = da_ts.to_dataframe()\n except ValueError:\n name = 'name'\n df = da_ts.to_dataframe(name=name)\n df = df.dropna()\n df['month'] = df.index.month\n df['year'] = df.index.year\n cnt = df.groupby(['month', 'year']).count()[name].to_frame()\n cnt /= cnt.max()\n weights = pd.pivot_table(cnt, index='year', columns='month')\n dfmm = df.groupby(['month', 'year']).mean()[name].to_frame()\n dfmm = pd.pivot_table(dfmm, index='year', columns='month')\n # wrong:\n# weighted_monthly_means = dfmm * weights\n # normalize weights:\n wtag = weights / weights.sum(axis=0)\n weighted_clim = (dfmm*wtag).sum(axis=0).unstack().squeeze()\n # convert back to time-series:\n# df_ts = weighted_monthly_means.stack().reset_index()\n# df_ts['dt'] = df_ts.year.astype(str) + '-' + df_ts.month.astype(str)\n# df_ts['dt'] = pd.to_datetime(df_ts['dt'])\n# df_ts = df_ts.set_index('dt')\n# df_ts = df_ts.drop(['year', 'month'], axis=1)\n# df_ts.index.name = 'time'\n# da = df_ts[name].to_xarray()\n da = weighted_clim.to_xarray()\n da.attrs = attrs\n# da = xr_reindex_with_date_range(da, drop=True, freq='MS')\n if plot:\n da.plot()\n return da\n\n\ndef create_monthly_index(dt_da, period=6, unit='month'):\n import numpy as np\n pdict = {6: 'H', 4: 'T', 3: 'Q'}\n dt = dt_da.to_dataframe()\n if unit == 'month':\n dt[unit] = getattr(dt.index, unit)\n months = np.arange(1, 13)\n month_groups = np.array_split(months, len(months) / period)\n for i, month_grp in enumerate(month_groups):\n dt.loc[(dt['month'] >=month_grp[0]) & (dt['month'] <=month_grp[-1]), 'grp_months'] = '{}{}'.format(pdict.get(period), i+1)\n da = dt['grp_months'].to_xarray()\n return da\n\n\ndef compute_consecutive_events_datetimes(da_ts, time_dim='time',\n minimum_epochs=10):\n \"\"\"WARNING : for large xarrays it takes alot of time and memory!\"\"\"\n import pandas as pd\n import xarray as xr\n df = da_ts.notnull().to_dataframe()\n A = consecutive_runs(df, num=False)\n # filter out minimum consecutive epochs:\n if minimum_epochs is not None:\n A = A[A['total_True'] > minimum_epochs]\n dt_min = df.iloc[A['{}_True_start'.format(da_ts.name)]].index\n try:\n dt_max = df.iloc[A['{}_True_end'.format(da_ts.name)]].index\n except IndexError:\n dt_max = df.iloc[A['{}_True_end'.format(da_ts.name)][:-1]]\n end = pd.DataFrame(index=[df.index[-1]], data=[False],\n columns=[da_ts.name])\n dt_max = dt_max.append(end)\n dt_max = dt_max.index\n events = []\n print('done part1')\n for i_min, i_max in zip(dt_min, dt_max):\n events.append(da_ts.sel({time_dim: slice(i_min, i_max)}))\n events_da = xr.concat(events, 'event')\n events_da['event'] = range(len(events))\n return events_da\n\n\ndef multi_time_coord_slice(min_time, max_time, freq='5T', time_dim='time',\n name='general_group'):\n \"\"\"returns a datetimeindex array of the multi-time-coords slice defined by\n min_time, max_time vectors and freq.\"\"\"\n import pandas as pd\n import numpy as np\n assert len(min_time) == len(max_time)\n dates = [\n pd.date_range(\n start=min_time[i],\n end=max_time[i],\n freq=freq) for i in range(\n len(min_time))]\n dates = [pd.Series(np.ones(dates[i].shape, dtype=int) * i, index=dates[i]) for i in range(len(dates))]\n dates = pd.concat(dates)\n da = dates.to_xarray()\n da = da.rename({'index': time_dim})\n da.name = name\n return da\n\n\ndef calculate_g(lat):\n \"\"\"calculate the gravitational acceleration with lat in degrees\"\"\"\n import numpy as np\n g0 = 9.780325\n nom = 1.0 + 0.00193185 * np.sin(np.deg2rad(lat)) ** 2.0\n denom = 1.0 - 0.00669435 * np.sin(np.deg2rad(lat)) ** 2.0\n g = g0 * (nom / denom)**0.5\n return g\n\n\ndef find_consecutive_vals_df(df, col='class', val=7):\n import numpy as np\n bool_vals = np.where(df[col] == val, 1, 0)\n con_df = consecutive_runs(bool_vals, num=0)\n return con_df\n\n\ndef lat_mean(xarray, method='cos', dim='lat', copy_attrs=True):\n import numpy as np\n import xarray as xr\n\n def mean_single_da(da, dim=dim, method=method):\n if dim not in da.dims:\n return da\n if method == 'cos':\n weights = np.cos(np.deg2rad(da[dim].values))\n da_mean = (weights * da).sum(dim) / sum(weights)\n if copy_attrs:\n da_mean.attrs = da.attrs\n return da_mean\n\n xarray = xarray.transpose(..., 'lat')\n if isinstance(xarray, xr.DataArray):\n xarray = mean_single_da(xarray)\n elif isinstance(xarray, xr.Dataset):\n xarray = xarray.map(mean_single_da, keep_attrs=copy_attrs)\n return xarray\n\n\ndef consecutive_runs(arr, num=False):\n import numpy as np\n import pandas as pd\n \"\"\"get the index ranges (min, max) of the ~num condition.\n num can be 1 or 0 or True or False\"\"\"\n # Create an array that is 1 where a is num, and pad each end with an extra\n # 1.\n if isinstance(arr, pd.DataFrame):\n a = arr.squeeze().values\n name = arr.columns[0]\n elif isinstance(arr, np.ndarray):\n a = arr\n elif isinstance(arr, list):\n a = np.array(arr)\n if num == 'nan':\n isone = np.concatenate(([1], np.isnan(a).view(np.int8), [1]))\n else:\n isone = np.concatenate(([1], np.equal(a, num).view(np.int8), [1]))\n absdiff = np.abs(np.diff(isone))\n # Runs start and end where absdiff is 1.\n ranges = np.where(absdiff == 1)[0].reshape(-1, 2)\n A = pd.DataFrame(ranges)\n A['2'] = A.iloc[:, 1] - A.iloc[:, 0]\n if isinstance(arr, pd.DataFrame):\n if isinstance(num, bool):\n notnum = not num\n elif isinstance(num, int):\n notnum = 'not-{}'.format(num)\n elif num == 'nan':\n notnum = 'not-nan'\n A.columns = [\n '{}_{}_start'.format(\n name, notnum), '{}_{}_end'.format(\n name, notnum), 'total_{}'.format(notnum)]\n return A\n\n\ndef get_all_possible_combinations_from_list(li, reduce_single_list=True, combine_by_sep='+'):\n from itertools import combinations\n output = sum([list(map(list, combinations(li, i)))\n for i in range(len(li) + 1)], [])\n output = output[1:]\n if reduce_single_list:\n output = [x[0] if len(x) == 1 else x for x in output]\n if combine_by_sep is not None:\n for out in output:\n if isinstance(out, list):\n ind = output.index(out)\n output[ind] = '+'.join(out)\n return output\n\n\ndef gantt_chart(ds, fw='bold', ax=None, pe_dict=None, fontsize=14, linewidth=10,\n title='RINEX files availability for the Israeli GNSS stations',\n time_dim='time', antialiased=False, colors=None, grid=False,\n marker='x', marker_suffix='_tide'):\n import pandas as pd\n import matplotlib.pyplot as plt\n import numpy as np\n import seaborn as sns\n import matplotlib.dates as mdates\n from matplotlib.ticker import AutoMinorLocator\n import matplotlib.patheffects as pe\n # TODO: fix the ticks/ticks labels\n # sns.set_palette(sns.color_palette(\"tab10\", len(ds)))\n sns.set_palette(sns.color_palette(\"Dark2\", len(ds)))\n if ax is None:\n fig, ax = plt.subplots(figsize=(20, 6))\n names = []\n for x in ds:\n if marker_suffix in x:\n names.append('')\n else:\n names.append(x)\n # names = [x for x in ds]\n vals = range(1, len(ds) + 1)\n xmin = pd.to_datetime(ds[time_dim].min().values) - pd.Timedelta(1, unit='W')\n xmax = pd.to_datetime(ds[time_dim].max().values) + pd.Timedelta(1, unit='W')\n if colors is None:\n colors = plt.rcParams[\"axes.prop_cycle\"].by_key()[\"color\"]\n# dt_min_list = []\n# dt_max_list = []\n for i, da in enumerate(ds):\n print(da)\n df = ds[da].notnull().to_dataframe()\n A = consecutive_runs(df, num=False)\n dt_min = df.iloc[A['{}_True_start'.format(da)]].index\n try:\n dt_max = df.iloc[A['{}_True_end'.format(da)]].index\n except IndexError:\n dt_max = df.iloc[A['{}_True_end'.format(da)][:-1]]\n end = pd.DataFrame(index=[df.index[-1]], data=[False], columns=[da])\n dt_max = dt_max.append(end)\n dt_max = dt_max.index\n y = len(ds) + 1 - np.ones(dt_min.shape) * (i + 1)\n y1 = len(ds) + 1 - np.ones(dt_min.shape) * (i + 0.5)\n# y_list.append(y)\n# dt_min_list.append(dt_min)\n# dt_max_list.append(dt_max)\n # v = int(calc(i, max = len(ds)))\n if marker_suffix in da:\n x = pd.to_datetime(ds[da].dropna('time')['time'].values)\n # print(x)\n ax.scatter(x, y1, color=colors[i], marker=marker, s=150)\n # ax.vlines(y, dt_min, dt_max, linewidth=1000, color=colors[i])\n else:\n if pe_dict is not None:\n ax.hlines(y, dt_min, dt_max, linewidth=linewidth, color=colors[i], path_effects=[pe.Stroke(linewidth=15, foreground='k'), pe.Normal()])\n else:\n ax.hlines(y, dt_min, dt_max, linewidth=linewidth, color=colors[i], antialiased=antialiased)\n #plt.show()\n # ds[da][~ds[da].isnull()] = i + 1\n # ds[da] = ds[da].fillna(0)\n if grid:\n ax.grid(True, axis='x')\n # yticks and their labels:\n ax.set_yticks(vals)\n ax.set_yticklabels(names[::-1], fontweight=fw, fontsize=fontsize)\n [ax.get_yticklabels()[i].set_color(colors[::-1][i]) for i in range(len(colors))]\n ax.set_xlim(xmin, xmax)\n # handle x-axis (time):\n ax.xaxis.set_minor_locator(AutoMinorLocator())\n ax.tick_params(which='major',\n direction='out',\n labeltop=False,\n labelbottom=True,\n top=False,\n bottom=True, left=True, labelsize=fontsize)\n ax.minorticks_on()\n ax.tick_params(which='minor',\n direction='out',\n labeltop=False,\n labelbottom=True,\n top=False,\n bottom=True, left=False)\n# ax.xaxis.set_minor_locator(mdates.YearLocator())\n# ax.xaxis.set_minor_formatter(mdates.DateFormatter(\"\\n%Y\"))\n plt.setp(ax.xaxis.get_majorticklabels(), rotation=30, ha='center',\n fontweight=fw, fontsize=fontsize)\n plt.setp(ax.xaxis.get_minorticklabels(), rotation=30, ha='center',\n fontweight=fw, fontsize=fontsize)\n # grid lines:\n# ax.grid(which='major', axis='x', linestyle='-', color='k')\n# ax.grid(which='minor', axis='x', linestyle='-', color='k')\n if title is not None:\n ax.set_title(title, fontsize=14, fontweight=fw)\n # fig.tight_layout()\n return ax\n\n\ndef time_series_stack_with_window(ts_da, time_dim='time',\n window='1D'):\n \"\"\"make it faster, much faster using isel and then cocant to dataset\n save also the datetimes\"\"\"\n import pandas as pd\n import xarray as xr\n\n window_dt = pd.Timedelta(window)\n freq = pd.infer_freq(ts_da[time_dim].values)\n if not any(i.isdigit() for i in freq):\n freq = '1' + freq\n freq_td = pd.Timedelta(freq)\n window_points = int(window_dt / freq_td)\n inds = []\n end_index = ts_da[time_dim].size - window_points\n index_to_run_over = range(0, end_index)\n for i in range(end_index):\n inds.append([i, i + window_points])\n arr_list = []\n arr_time_list = []\n ts_arr = ts_da.values\n ts_time_arr = ts_da[time_dim].values\n for ind in inds:\n arr_list.append(ts_arr[ind[0]: ind[1]])\n arr_time_list.append(ts_time_arr[ind[0]: ind[1]])\n ds = xr.Dataset()\n ds[ts_da.name] = xr.DataArray(arr_list, dims=['start_date', 'points'])\n ds[ts_da.name].attrs = ts_da.attrs\n ds[time_dim] = xr.DataArray(arr_time_list, dims=['start_date', 'points'])\n ds['start_date'] = ts_da.isel({time_dim: index_to_run_over})[time_dim].values\n ds['points'] = range(window_points)\n ds.attrs['freq'] = freq\n return ds\n\n\ndef get_RI_reg_combinations(dataset):\n \"\"\"return n+1 sized dataset of full regressors and median value regressors\"\"\"\n import xarray as xr\n\n def replace_dta_with_median(dataset, dta):\n ds = dataset.copy()\n ds[dta] = dataset[dta] - dataset[dta] + dataset[dta].median('time')\n ds.attrs['median'] = dta\n return ds\n if type(dataset) != xr.Dataset:\n return print('Input is xarray dataset only')\n ds_list = []\n ds_list.append(dataset)\n dataset.attrs['median'] = 'full_set'\n for da in dataset.data_vars:\n ds_list.append(replace_dta_with_median(dataset, da))\n return ds_list\n\n\ndef annual_standertize(data, time_dim='time', std_nan=1.0):\n \"\"\"just divide by the time.month std()\"\"\"\n attrs = data.attrs\n std_longterm = data.groupby('{}.month'.format(time_dim)).std(keep_attrs=True)\n if std_nan is not None:\n std_longterm = std_longterm.fillna(std_nan)\n data = data.groupby('{}.month'.format(time_dim)) / std_longterm\n data = data.reset_coords(drop=True)\n data.attrs.update(attrs)\n return data\n\n\ndef normalize_xr(data, time_dim='time', norm=1, down_bound=-1.,\n upper_bound=1., verbose=True):\n attrs = data.attrs\n avg = data.mean(time_dim, keep_attrs=True)\n sd = data.std(time_dim, keep_attrs=True)\n if norm == 0:\n data = data\n norm_str = 'No'\n elif norm == 1:\n data = (data-avg)/sd\n norm_str = '(data-avg)/std'\n elif norm == 2:\n data = (data-avg)/avg\n norm_str = '(data-avg)/avg'\n elif norm == 3:\n data = data/avg\n norm_str = '(data/avg)'\n elif norm == 4:\n data = data/sd\n norm_str = '(data)/std'\n elif norm == 5:\n dh = data.max()\n dl = data.min()\n # print dl\n data = (((data-dl)*(upper_bound-down_bound))/(dh-dl))+down_bound\n norm_str = 'mapped between ' + str(down_bound) + ' and ' + str(upper_bound)\n # print data\n if verbose:\n print('Data is ' + norm_str)\n elif norm == 6:\n data = data-avg\n norm_str = 'data-avg'\n if verbose and norm != 5:\n print('Preforming ' + norm_str + ' Normalization')\n data.attrs = attrs\n data.attrs['Normalize'] = norm_str\n return data\n\n\ndef slice_task_date_range(files, date_range, task='non-specific'):\n from aux_gps import get_timedate_and_station_code_from_rinex\n import pandas as pd\n from pathlib import Path\n import logging\n \"\"\" return a slice files object (list of rfn Paths) with the correct\n within the desired date range\"\"\"\n logger = logging.getLogger('gipsyx')\n date_range = pd.to_datetime(date_range)\n logger.info(\n 'performing {} task within the dates: {} to {}'.format(task,\n date_range[0].strftime(\n '%Y-%m-%d'),\n date_range[1].strftime('%Y-%m-%d')))\n if not files:\n return files\n path = Path(files[0].as_posix().split('/')[0])\n rfns = [x.as_posix().split('/')[-1][0:12] for x in files]\n dts = get_timedate_and_station_code_from_rinex(rfns)\n rfn_series = pd.Series(rfns, index=dts)\n rfn_series = rfn_series.sort_index()\n mask = (rfn_series.index >= date_range[0]) & (\n rfn_series.index <= date_range[1])\n files = [path / x for x in rfn_series.loc[mask].values]\n return files\n\n\ndef geo_annotate(ax, lons, lats, labels, xytext=(3, 3), fmt=None, c='k',\n fw='normal', fs=None, colorupdown=False):\n for x, y, label in zip(lons, lats, labels):\n if colorupdown:\n if float(label) >= 0.0:\n c = 'r'\n elif float(label) < 0.0:\n c = 'b'\n if fmt is not None:\n annot = ax.annotate(fmt.format(label), xy=(x, y), xytext=xytext,\n textcoords=\"offset points\", color=c,\n fontweight=fw, fontsize=fs)\n else:\n annot = ax.annotate(label, xy=(x, y), xytext=xytext,\n textcoords=\"offset points\", color=c,\n fontweight=fw, fontsize=fs)\n return annot\n\n\ndef piecewise_linear_fit(da, k=1, plot=True):\n \"\"\"return dataarray with coords k \"piece\" indexing to k parts of\n datetime. k=None means get all datetime index\"\"\"\n import numpy as np\n import xarray as xr\n time_dim = list(set(da.dims))[0]\n time_no_nans = da.dropna(time_dim)[time_dim]\n time_pieces = np.array_split(time_no_nans.values, k)\n params = lmfit_params('line')\n best_values = []\n best_fits = []\n for piece in time_pieces:\n dap = da.sel({time_dim: piece})\n result = fit_da_to_model(dap, params, model_dict={'model_name': 'line'},\n method='leastsq', plot=False, verbose=False)\n best_values.append(result.best_values)\n best_fits.append(result.best_fit)\n bfs = np.concatenate(best_fits)\n tps = np.concatenate(time_pieces)\n da_final = xr.DataArray(bfs, dims=[time_dim])\n da_final[time_dim] = tps\n if plot:\n ax = plot_tmseries_xarray(da, points=True)\n for piece in time_pieces:\n da_final.sel({time_dim: piece}).plot(color='r', ax=ax)\n return da_final\n\n\ndef convert_wind_direction(u=None, v=None, ws=None, wd=None, verbose=False):\n \"\"\"\n\n\n Parameters\n ----------\n u : TYPE, optional\n zonal direction. The default is None.\n v : TYPE, optional\n meridional direction. The default is None.\n ws : TYPE, optional\n magnitude. The default is None.\n wd : TYPE, optional\n meteorological direction. The default is None.\n verbose : TYPE, optional\n DESCRIPTION. The default is False.\n\n Raises\n ------\n ValueError\n DESCRIPTION.\n\n Returns\n -------\n None.\n\n \"\"\"\n import numpy as np\n if (u is None and v is None) and (ws is not None and wd is not None):\n if verbose:\n print('converting from WS, WD to U, V')\n u = -ws*np.sin(np.deg2rad(wd))\n v = -ws*np.cos(np.deg2rad(wd))\n return u, v\n elif (u is not None and v is not None) and (ws is None and wd is None):\n if verbose:\n print('converting from U, V to WS, WD')\n wd = 180 + np.rad2deg(np.arctan2(u, v))\n ws = np.sqrt(u**2+v**2)\n return ws, wd\n else:\n raise ValueError('choose either ws and wd or u and v!')\n\n\ndef lmfit_params(model_name, k=None):\n from lmfit.parameter import Parameters\n sin_params = Parameters()\n # add with tuples: (NAME VALUE VARY MIN MAX EXPR BRUTE_STEP)\n amp = ['sin_amp', 50, True, None, None, None, None]\n phase = ['sin_phase', 0, True, None, None, None, None]\n # freq = ['sin_freq', 1/365.0, True, None, None, None]\n freq = ['sin_freq', 4, True, None, None, None]\n sin_params.add(*amp)\n sin_params.add(*phase)\n sin_params.add(*freq)\n line_params = Parameters()\n slope = ['line_slope', 1e-6, True, None, None, None, None]\n intercept = ['line_intercept', 58.6, True, None, None, None, None]\n line_params.add(*slope)\n line_params.add(*intercept)\n constant = Parameters()\n constant.add(*['constant', 40.0, True,None, None, None, None])\n if k is not None:\n sum_sin_params = Parameters()\n for mode in range(k):\n amp[0] = 'sin{}_amp'.format(mode)\n phase[0] = 'sin{}_phase'.format(mode)\n freq[0] = 'sin{}_freq'.format(mode)\n sum_sin_params.add(*amp)\n sum_sin_params.add(*phase)\n sum_sin_params.add(*freq)\n if model_name == 'sin_linear':\n return line_params + sin_params\n elif model_name == 'sin':\n return sin_params\n elif model_name == 'sin_constant':\n return sin_params + constant\n elif model_name == 'line':\n return line_params\n elif model_name == 'sum_sin' and k is not None:\n return sum_sin_params\n elif model_name == 'sum_sin_linear' and k is not None:\n return sum_sin_params + line_params\n\n\ndef fit_da_to_model(da, params=None, modelname='sin', method='leastsq', times=None, plot=True, verbose=True):\n \"\"\"options for modelname:'sin', 'sin_linear', 'line', 'sin_constant', and\n 'sum_sin'\"\"\"\n # for sum_sin or sum_sin_linear use model_dict={'model_name': 'sum_sin', k:3}\n # usage for params: you need to know the parameter names first:\n # modelname='sin', params=dict(sin_freq={'value':3},sin_amp={'value':0.3},sin_phase={'value':0})\n # fit_da_to_model(alon, modelname='sin', params=dict(sin_freq={'value':3},sin_amp={'value':0.3},sin_phase={'value':0}))\n import matplotlib.pyplot as plt\n import pandas as pd\n import xarray as xr\n time_dim = list(set(da.dims))[0]\n if times is not None:\n da = da.sel({time_dim: slice(*times)})\n lm = lmfit_model_switcher()\n lm.pick_model(modelname)\n lm.generate_params(**params)\n params = lm.params\n model = lm.model\n if verbose:\n print(model)\n print(params)\n jul, jul_no_nans = get_julian_dates_from_da(da)\n y = da.dropna(time_dim).values\n result = model.fit(**params, data=y, time=jul_no_nans, method=method)\n if not result.success:\n raise ValueError('model not fitted properly...')\n fit_y = result.eval(**result.best_values, time=jul)\n fit = xr.DataArray(fit_y, dims=time_dim)\n fit[time_dim] = da[time_dim]\n fit.name = da.name + '_fit'\n p = {}\n for name, param in result.params.items():\n p[name] = [param.value, param.stderr]\n fit.attrs.update(**p)\n # return fit\n if verbose:\n print(result.best_values)\n if plot:\n fig, ax = plt.subplots(figsize=(8, 6))\n da.plot.line(marker='.', linewidth=0., color='b', ax=ax)\n dt = pd.to_datetime(da[time_dim].values)\n ax.plot(dt, fit_y, c='r')\n plt.legend(['data', 'fit'])\n return fit\n\n\ndef fit_da_ts_to_sine_model(da_ts, init_freq=1/366, verbose=False, plot=True):\n \"\"\"\n Use lmfit MySineModel class to fit time series in DataArray\n\n Parameters\n ----------\n da_ts : TYPE\n DESCRIPTION.\n plot : TYPE, optional\n DESCRIPTION. The default is True.\n\n Returns\n -------\n fitted_ds : Xarray Dataset\n DESCRIPTION.\n\n \"\"\"\n import xarray as xr\n import matplotlib.pyplot as plt\n import pandas as pd\n model = pick_lmfit_model(name='sine')\n time_dim = list(set(da_ts.dims))[0]\n jul, jul_no_nans = get_julian_dates_from_da(da_ts)\n y = da_ts.dropna(time_dim).values\n params = model.guess(data=y, freq=init_freq)\n if verbose:\n print(model)\n print(params)\n result = model.fit(data=y, params=params, x=jul_no_nans)\n if not result.success:\n raise ValueError('model not fitted properly...')\n fit_y = result.eval(**result.best_values, x=jul)\n fit = xr.DataArray(fit_y, dims=[time_dim])\n fit[time_dim] = da_ts[time_dim]\n fit.name = da_ts.name + '_fit'\n p = {}\n for name, param in result.params.items():\n p[name] = [param.value, param.stderr]\n fit.attrs.update(**p)\n # return fit\n if verbose:\n print(result.best_values)\n if plot:\n fig, ax = plt.subplots(figsize=(8, 6))\n da_ts.plot.line(marker='.', linewidth=0., color='b', ax=ax)\n dt = pd.to_datetime(da_ts[time_dim].values)\n ax.plot(dt, fit_y, c='r')\n plt.legend(['data', 'fit'])\n return fit\n\n\ndef get_julian_dates_from_da(da, subtract='first'):\n \"\"\"transform the time dim of a dataarray to julian dates(days since)\"\"\"\n import pandas as pd\n import numpy as np\n # get time dim:\n time_dim = list(set(da.dims))[0]\n # convert to days since 2000 (julian_date):\n jul = pd.to_datetime(da[time_dim].values).to_julian_date()\n # normalize all days to first entry:\n if subtract == 'first':\n first_day = jul[0]\n jul -= first_day\n elif subtract == 'median':\n med = np.median(jul)\n jul -= med\n # do the same but without nans:\n jul_no_nans = pd.to_datetime(\n da.dropna(time_dim)[time_dim].values).to_julian_date()\n if subtract == 'first':\n jul_no_nans -= first_day\n elif subtract == 'median':\n jul_no_nans -= med\n return jul.values, jul_no_nans.values\n\n\ndef lomb_scargle_xr(da_ts, units='cpy', user_freq='MS', plot=True, kwargs=None):\n from astropy.timeseries import LombScargle\n import pandas as pd\n import xarray as xr\n time_dim = list(set(da_ts.dims))[0]\n sp_str = pd.infer_freq(da_ts[time_dim].values)\n if not sp_str:\n print('using user-defined freq: {}'.format(user_freq))\n sp_str = user_freq\n if units == 'cpy':\n # cycles per year:\n freq_dict = {'MS': 12, 'D': 365.25, 'H': 8766}\n long_name = 'Cycles per Year'\n elif units == 'cpd':\n # cycles per day:\n freq_dict = {'H': 24}\n long_name = 'Cycles per Day'\n t = [x for x in range(da_ts[time_dim].size)]\n y = da_ts.values\n lomb_kwargs = {'samples_per_peak': 10, 'nyquist_factor': 2}\n if kwargs is not None:\n lomb_kwargs.update(kwargs)\n freq, power = LombScargle(t, y).autopower(**lomb_kwargs)\n unit_freq = freq_dict.get(sp_str)\n da = xr.DataArray(power, dims=['freq'])\n da['freq'] = freq * unit_freq\n da.attrs['long_name'] = 'Power from LombScargle'\n da.name = '{}_power'.format(da_ts.name)\n da['freq'].attrs['long_name'] = long_name\n if plot:\n da.plot()\n return da\n\n\ndef fft_xr(xarray, method='fft', units='cpy', nan_fill='mean', user_freq='MS',\n plot=True):\n import numpy as np\n import pandas as pd\n import matplotlib.pyplot as plt\n import xarray as xr\n from scipy import signal\n# import matplotlib\n# matplotlib.rcParams['text.usetex'] = True\n\n def fft_da(da, units, nan_fill, periods):\n time_dim = list(set(da.dims))[0]\n try:\n p_units = da.attrs['units']\n except KeyError:\n p_units = 'amp'\n if nan_fill == 'mean':\n x = da.fillna(da.mean(time_dim))\n elif nan_fill == 'zero':\n x = da.fillna(0)\n # infer freq of time series:\n sp_str = pd.infer_freq(x[time_dim].values)\n if user_freq is None:\n if not sp_str:\n raise Exception('Didnt find a frequency for {}, check for nans!'.format(da.name))\n if len(sp_str) > 1:\n mul = [char for char in sp_str if char.isdigit()]\n sp_str = ''.join([char for char in sp_str if char.isalpha()])\n if not mul:\n mul = 1\n else:\n if len(mul) > 1:\n mul = int(''.join(mul))\n else:\n mul = int(mul[0])\n period = sp_str\n elif len(sp_str) == 1:\n mul = 1\n period = sp_str[0]\n p_name = periods[period][0]\n p_val = mul * periods[period][1]\n print('found {} {} frequency in {} time-series'.format(mul, p_name, da.name))\n else:\n p_name = periods[user_freq][0]\n # number of seconds in freq units in time-series:\n p_val = periods[user_freq][1]\n print('using user freq of {}'.format(user_freq))\n print('sample rate in seconds: {}'.format(p_val))\n if method == 'fft':\n # run fft:\n p = 20 * np.log10(np.abs(np.fft.rfft(x, n=None)))\n f = np.linspace(0, (1 / p_val) / 2, len(p))\n elif method == 'welch':\n f, p = signal.welch(x, 1e-6, 'hann', 1024, scaling='spectrum')\n if units == 'cpy':\n unit_freq = 1.0 / periods['Y'][1] # in Hz\n print('unit_freq: cycles per year ({} seconds)'.format(periods['Y'][1]))\n elif units == 'cpd':\n unit_freq = 1.0 / periods['D'][1] # in Hz\n print('unit_freq: cycles per day ({} seconds)'.format(periods['D'][1]))\n # unit_freq_in_time_series = unit_freq * p_val # in Hz\n # f = np.linspace(0, unit_freq_in_time_series / 2, len(p))\n f_in_unit_freq = f / unit_freq\n p_units = '{}^2/{}'.format(p_units, units)\n power = xr.DataArray(p, dims=['freq'])\n power.name = da.name\n power['freq'] = f_in_unit_freq\n power['freq'].attrs['long_name'] = 'Frequency'\n power['freq'].attrs['units'] = units\n power.attrs['long_name'] = 'Power'\n power.attrs['units'] = p_units\n return power\n\n periods = {'N': ['nanoseconds', 1e-9],\n 'U': ['microseconds', 1e-6],\n 'us': ['microseconds', 1e-6],\n 'L': ['milliseconds', 1e-3],\n 'ms': ['milliseconds', 1e-3],\n 'T': ['minutes', 60.0],\n '5T': ['minutes', 300.0],\n 'min': ['minutes', 60.0],\n 'H': ['hours', 3600.0],\n 'D': ['days', 86400.0],\n 'W': ['weeks', 604800.0],\n 'MS': ['months', 86400.0 * 30],\n 'Y': ['years', 86400.0 * 365.25]\n }\n if isinstance(xarray, xr.DataArray):\n power = fft_da(xarray, units, nan_fill, periods)\n if plot:\n fig, ax = plt.subplots(figsize=(6, 8))\n power.plot.line(ax=ax, xscale='log', yscale='log')\n ax.grid()\n return power\n elif isinstance(xarray, xr.Dataset):\n p_list = []\n for da in xarray:\n p_list.append(fft_da(xarray[da], units, nan_fill, periods))\n ds = xr.merge(p_list)\n da_from_ds = ds.to_array(dim='station')\n try:\n ds.attrs['full_name'] = 'Power spectra for {}'.format(xarray.attrs['full_name'])\n except KeyError:\n pass\n elif isinstance(xarray, list):\n p_list = []\n for da in xarray:\n p_list.append(fft_da(da, units, nan_fill, periods))\n ds = xr.merge(p_list, compat='override')\n da_from_ds = ds.to_array(dim='epochs')\n try:\n ds.attrs['full_name'] = 'Power spectra for {}'.format(da.attrs['full_name'])\n except KeyError:\n pass\n if plot:\n da_mean = da_from_ds.mean('epochs')\n da_mean.attrs = da_from_ds.attrs\n # da_from_ds.plot.line(xscale='log', yscale='log', hue='station')\n fig, ax = plt.subplots(figsize=(8, 6))\n da_mean.plot.line(ax=ax, xscale='log', yscale='log')\n ax.grid()\n return ds\n return\n\n\ndef standard_error_slope(X, y):\n \"\"\" get the standard error of the slope of the linear regression,\n works in the case that X is a vector only\"\"\"\n import numpy as np\n ssxm, ssxym, ssyxm, ssym = np.cov(X, y, bias=1).flat\n r_num = ssxym\n r_den = np.sqrt(ssxm * ssym)\n if r_den == 0.0:\n r = 0.0\n else:\n r = r_num / r_den\n n = len(X)\n df = n - 2\n sterrest = np.sqrt((1 - r**2) * ssym / ssxm / df)\n return sterrest\n\n\ndef tar_dir(files_with_path_to_tar, filename, savepath, compresslevel=9,\n with_dir_struct=False, verbose=False):\n import tarfile as tr\n \"\"\" compresses all glob_str_to_tar files (e.g., *.txt) in path_to_tar,\n and save it all to savepath with filename as filename. by default adds .tar\n suffix if not supplied by user. control compression level with\n compresslevel (i.e., None means no compression).\"\"\"\n def aname(file, arcname):\n if arcname is None:\n return None\n else:\n return file.as_posix().split('/')[-1]\n\n path_to_tar = files_with_path_to_tar[0].as_posix().split('/')[0]\n if len(filename.split('.')) < 2:\n filename += '.tar'\n if verbose:\n print('added .tar suffix to {}'.format(filename.split('.'[0])))\n else:\n filename = filename.split('.')[0]\n filename += '.tar'\n if verbose:\n print('changed suffix to tar')\n tarfile = savepath / filename\n if compresslevel is None:\n tar = tr.open(tarfile, \"w\")\n else:\n tar = tr.open(tarfile, \"w:gz\", compresslevel=compresslevel)\n if not with_dir_struct:\n arcname = True\n if verbose:\n print('files were archived without directory structure')\n else:\n arcname = None\n if verbose:\n print('files were archived with {} dir structure'.format(path_to_tar))\n total = len(files_with_path_to_tar)\n print('Found {} files to tar in dir {}'.format(total, path_to_tar))\n cnt = 0\n for file in files_with_path_to_tar:\n tar.add(file, arcname=aname(file, arcname=arcname))\n cnt += 1\n# if np.mod(cnt, 10) == 0:\n# print('.', end=\" \")\n tar.close()\n print('Compressed all files in {} to {}'.format(\n path_to_tar, savepath / filename))\n return\n\n\ndef query_yes_no(question, default=\"no\"):\n \"\"\"Ask a yes/no question via raw_input() and return their answer.\n\n \"question\" is a string that is presented to the user.\n \"default\" is the presumed answer if the user just hits <Enter>.\n It must be \"yes\" (the default), \"no\" or None (meaning\n an answer is required of the user).\n\n The \"answer\" return value is True for \"yes\" or False for \"no\".\n \"\"\"\n import sys\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\n \"no\": False, \"n\": False}\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")\n\ndef get_var(varname):\n \"\"\"get a linux shell var (without the $)\"\"\"\n import subprocess\n CMD = 'echo $%s' % varname\n p = subprocess.Popen(\n CMD,\n stdout=subprocess.PIPE,\n shell=True,\n executable='/bin/bash')\n out = p.stdout.readlines()[0].strip().decode(\"utf-8\")\n if len(out) == 0:\n return None\n else:\n return out\n\n\ndef plot_tmseries_xarray(ds, fields=None, points=False, error_suffix='_error',\n errorbar_alpha=0.5, trend_suffix='_trend'):\n \"\"\"plot time-series plot w/o errorbars of a xarray dataset\"\"\"\n import numpy as np\n import matplotlib.pyplot as plt\n import xarray as xr\n if points:\n ma = '.' # marker\n lw = 0. # linewidth\n else:\n ma = None # marker\n lw = 1.0 # linewidth\n if isinstance(ds, xr.DataArray):\n ds = ds.to_dataset()\n# if len(ds.dims) > 1:\n# raise ValueError('Number of dimensions in Dataset exceeds 1!')\n if isinstance(fields, str):\n fields = [fields]\n error_fields = [x for x in ds.data_vars if error_suffix in x]\n trend_fields = [x for x in ds.data_vars if trend_suffix in x]\n if fields is None and error_fields:\n all_fields = [x for x in ds.data_vars if error_suffix not in x]\n elif fields is None and trend_fields:\n all_fields = [x for x in ds.data_vars if trend_suffix not in x]\n elif fields is None and not error_fields:\n all_fields = [x for x in ds.data_vars]\n elif fields is None and not trend_fields:\n all_fields = [x for x in ds.data_vars]\n elif fields is not None and isinstance(fields, list):\n all_fields = sorted(fields)\n time_dim = list(set(ds[all_fields].dims))[0]\n if len(all_fields) == 1:\n da = ds[all_fields[0]]\n ax = da.plot(figsize=(20, 4), color='b', marker=ma, linewidth=lw)[0].axes\n ax.grid()\n if error_fields:\n print('adding errorbars fillbetween...')\n error = da.name + error_suffix\n ax.fill_between(da[time_dim].values, da.values - ds[error].values,\n da.values + ds[error].values,\n where=np.isfinite(da.values),\n alpha=errorbar_alpha)\n if trend_fields:\n print('adding trends...')\n trend = da.name + trend_suffix\n da[trend].plot(ax=ax, color='r')\n trend_attr = [x for x in da[trend].attrs.keys()\n if 'trend' in x][0]\n if trend_attr:\n trend_str = trend_attr.split('>')[-1]\n trend_val = da[trend].attrs[trend_attr]\n ax.text(0.1, 0.9, '{}: {:.2f}'.format(trend_str, trend_val),\n horizontalalignment='center',\n verticalalignment='top', color='green', fontsize=15,\n transform=ax.transAxes)\n ax.grid(True)\n ax.set_title(da.name)\n plt.tight_layout()\n plt.subplots_adjust(top=0.93)\n return ax\n else:\n da = ds[all_fields].to_array('var')\n fg = da.plot(row='var', sharex=True, sharey=False, figsize=(20, 15),\n hue='var', color='k', marker=ma, linewidth=lw)\n for i, (ax, field) in enumerate(zip(fg.axes.flatten(), all_fields)):\n ax.grid(True)\n if error_fields:\n print('adding errorbars fillbetween...')\n ax.fill_between(da[time_dim].values,\n da.sel(var=field).values - ds[field + error_suffix].values,\n da.sel(var=field).values + ds[field + error_suffix].values,\n where=np.isfinite(da.sel(var=field).values),\n alpha=errorbar_alpha)\n if trend_fields:\n print('adding trends...')\n ds[field + trend_suffix].plot(ax=ax, color='r')\n trend_attr = [x for x in ds[field + trend_suffix].attrs.keys()\n if 'trend' in x][0]\n if trend_attr:\n trend_str = trend_attr.split('>')[-1]\n trend_val = ds[field + trend_suffix].attrs[trend_attr]\n ax.text(0.1, 0.9, '{}: {:.2f}'.format(trend_str, trend_val),\n horizontalalignment='center',\n verticalalignment='top', color='green', fontsize=15,\n transform=ax.transAxes)\n try:\n ax.set_ylabel('[' + ds[field].attrs['units'] + ']')\n except KeyError:\n pass\n ax.lines[0].set_color('C{}'.format(i))\n ax.grid(True)\n # fg.fig.suptitle()\n fg.fig.subplots_adjust(left=0.1, top=0.93)\n return fg\n\n\ndef flip_xy_axes(ax, ylim=None):\n if ylim is None:\n new_y_lim = ax.get_xlim()\n else:\n new_y_lim = ylim\n new_x_lim = ax.get_ylim()\n ylabel = ax.get_xlabel()\n xlabel = ax.get_ylabel()\n newx = ax.lines[0].get_ydata()\n newy = ax.lines[0].get_xdata()\n # set new x- and y- data for the line\n # ax.margins(y=0)\n ax.lines[0].set_xdata(newx)\n ax.lines[0].set_ydata(newy)\n ax.set_xlim(new_x_lim)\n ax.set_ylim(new_y_lim)\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n ax.invert_xaxis()\n ax.invert_yaxis()\n ax.invert_yaxis()\n return ax\n\n\ndef choose_time_groupby_arg(da_ts, time_dim='time', grp='hour'):\n if grp != 'date':\n grp_arg = '{}.{}'.format(time_dim, grp)\n else:\n grp_arg = groupby_date_xr(da_ts)\n return grp_arg\n\n\ndef time_series_stack(time_da, time_dim='time', grp1='hour', grp2='month',\n return_just_stacked_da=False):\n \"\"\"Takes a time-series xr.DataArray objects and reshapes it using\n grp1 and grp2. output is a xr.Dataset that includes the reshaped DataArray\n , its datetime-series and the grps.\"\"\"\n import xarray as xr\n import pandas as pd\n # try to infer the freq and put it into attrs for later reconstruction:\n freq = pd.infer_freq(time_da[time_dim].values)\n name = time_da.name\n time_da.attrs['freq'] = freq\n attrs = time_da.attrs\n # drop all NaNs:\n time_da = time_da.dropna(time_dim)\n # first grouping:\n grp1_arg = choose_time_groupby_arg(time_da, time_dim=time_dim, grp=grp1)\n grp_obj1 = time_da.groupby(grp1_arg)\n da_list = []\n t_list = []\n for grp1_name, grp1_inds in grp_obj1.groups.items():\n da = time_da.isel({time_dim: grp1_inds})\n if grp2 is not None:\n # second grouping:\n grp2_arg = choose_time_groupby_arg(time_da, time_dim=time_dim, grp=grp2)\n grp_obj2 = da.groupby(grp2_arg)\n for grp2_name, grp2_inds in grp_obj2.groups.items():\n da2 = da.isel({time_dim: grp2_inds})\n # extract datetimes and rewrite time coord to 'rest':\n times = da2[time_dim]\n times = times.rename({time_dim: 'rest'})\n times.coords['rest'] = range(len(times))\n t_list.append(times)\n da2 = da2.rename({time_dim: 'rest'})\n da2.coords['rest'] = range(len(da2))\n da_list.append(da2)\n else:\n times = da[time_dim]\n times = times.rename({time_dim: 'rest'})\n times.coords['rest'] = range(len(times))\n t_list.append(times)\n da = da.rename({time_dim: 'rest'})\n da.coords['rest'] = range(len(da))\n da_list.append(da)\n # get group keys:\n grps1 = [x for x in grp_obj1.groups.keys()]\n if grp2 is not None:\n grps2 = [x for x in grp_obj2.groups.keys()]\n # concat and convert to dataset:\n stacked_ds = xr.concat(da_list, dim='all').to_dataset(name=name)\n stacked_ds[time_dim] = xr.concat(t_list, 'all')\n if grp2 is not None:\n # create a multiindex for the groups:\n mindex = pd.MultiIndex.from_product([grps1, grps2], names=[grp1, grp2])\n stacked_ds.coords['all'] = mindex\n else:\n # create a multiindex for first group only:\n mindex = pd.MultiIndex.from_product([grps1], names=[grp1])\n stacked_ds.coords['all'] = mindex\n # unstack:\n # ds = stacked_ds.unstack('all')[time_da.name]\n ds = stacked_ds.unstack('all')\n if return_just_stacked_da:\n ds = ds[time_da.name]\n ds.attrs = attrs\n# if plot:\n# plot_stacked_time_series(ds[name].mean('rest', keep_attrs=True))\n return ds\n\n\ndef plot_stacked_time_series(stacked_da):\n import matplotlib.pyplot as plt\n import matplotlib.ticker as tck\n import numpy as np\n try:\n units = stacked_da.attrs['units']\n except KeyError:\n units = ''\n try:\n station = stacked_da.attrs['station']\n except KeyError:\n station = ''\n try:\n name = stacked_da.name\n except KeyError:\n name = ''\n SMALL_SIZE = 12\n MEDIUM_SIZE = 16\n BIGGER_SIZE = 18\n plt.rc('font', size=SMALL_SIZE) # controls default text sizes\n plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title\n plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels\n plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels\n plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels\n plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize\n plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title\n # plt.rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})\n # plt.rc('text', usetex=True)\n grp1_mean = stacked_da.mean(stacked_da.dims[0])\n grp2_mean = stacked_da.mean(stacked_da.dims[1])\n fig = plt.figure(figsize=(16, 10), dpi=80)\n grid = plt.GridSpec(\n 2, 2, width_ratios=[\n 1, 4], height_ratios=[\n 5, 1], wspace=0, hspace=0)\n # grid = plt.GridSpec(2, 2, hspace=0.5, wspace=0.2)\n# ax_main = fig.add_subplot(grid[:-1, :-1])\n# ax_left = fig.add_subplot(grid[:-1, 0], xticklabels=[], yticklabels=[])\n# ax_bottom = fig.add_subplot(grid[-1, 0:-1], xticklabels=[], yticklabels=[])\n ax_main = fig.add_subplot(grid[0, 1])\n ax_left = fig.add_subplot(grid[0, 0])\n ax_left.grid()\n ax_bottom = fig.add_subplot(grid[1, 1])\n ax_bottom.grid()\n pcl = stacked_da.T.plot.contourf(\n ax=ax_main, add_colorbar=False, cmap=plt.cm.get_cmap(\n 'viridis', 41), levels=41)\n ax_main.xaxis.set_minor_locator(tck.AutoMinorLocator())\n ax_main.tick_params(\n direction='out',\n top='on',\n bottom='off',\n left='off',\n right='on',\n labelleft='off',\n labelbottom='off',\n labeltop='on',\n labelright='on',\n which='major')\n ax_main.tick_params(\n direction='out',\n top='on',\n bottom='off',\n left='off',\n right='on',\n which='minor')\n ax_main.grid(\n True,\n which='major',\n axis='both',\n linestyle='-',\n color='k',\n alpha=0.2)\n ax_main.grid(\n True,\n which='minor',\n axis='both',\n linestyle='--',\n color='k',\n alpha=0.2)\n ax_main.tick_params(\n top='on',\n bottom='off',\n left='off',\n right='on',\n labelleft='off',\n labelbottom='off',\n labeltop='on',\n labelright='on')\n bottom_limit = ax_main.get_xlim()\n left_limit = ax_main.get_ylim()\n grp1_mean.plot(ax=ax_left)\n grp2_mean.plot(ax=ax_bottom)\n ax_bottom.set_xlim(bottom_limit)\n ax_left = flip_xy_axes(ax_left, left_limit)\n ax_bottom.set_ylabel(r'${}$'.format(units), fontsize=12)\n ax_left.set_xlabel(r'${}$'.format(units), fontsize=12)\n fig.subplots_adjust(right=0.8)\n # divider = make_axes_locatable(ax_main)\n # cax1 = divider.append_axes(\"right\", size=\"5%\", pad=0.2)\n # [left, bottom, width, height] of figure:\n cbar_ax = fig.add_axes([0.85, 0.15, 0.02, 0.75])\n # fig.colorbar(pcl, orientation=\"vertical\", pad=0.2, label=units)\n pcl_ticks = np.linspace(\n stacked_da.min().item(),\n stacked_da.max().item(),\n 11)\n cbar = fig.colorbar(\n pcl,\n cax=cbar_ax,\n label=r'${}$'.format(units),\n ticks=pcl_ticks)\n cbar.set_ticklabels(['{:.1f}'.format(x) for x in pcl_ticks])\n title = ' '.join([name, station])\n fig.suptitle(title, fontweight='bold', fontsize=15)\n return fig\n\n\ndef time_series_stack_decrapeted(time_da, time_dim='time', grp1='hour', grp2='month'):\n \"\"\"Takes a time-series xr.DataArray objects and reshapes it using\n grp1 and grp2. outout is a xr.Dataset that includes the reshaped DataArray\n , its datetime-series and the grps.\"\"\"\n import xarray as xr\n import numpy as np\n import pandas as pd\n # try to infer the freq and put it into attrs for later reconstruction:\n freq = pd.infer_freq(time_da[time_dim].values)\n name = time_da.name\n time_da.attrs['freq'] = freq\n attrs = time_da.attrs\n # drop all NaNs:\n time_da = time_da.dropna(time_dim)\n # group grp1 and concat:\n grp_obj1 = time_da.groupby(time_dim + '.' + grp1)\n s_list = []\n for grp_name, grp_inds in grp_obj1.groups.items():\n da = time_da.isel({time_dim: grp_inds})\n s_list.append(da)\n grps1 = [x for x in grp_obj1.groups.keys()]\n stacked_da = xr.concat(s_list, dim=grp1)\n stacked_da[grp1] = grps1\n # group over the concatenated da and concat again:\n grp_obj2 = stacked_da.groupby(time_dim + '.' + grp2)\n s_list = []\n for grp_name, grp_inds in grp_obj2.groups.items():\n da = stacked_da.isel({time_dim: grp_inds})\n s_list.append(da)\n grps2 = [x for x in grp_obj2.groups.keys()]\n stacked_da = xr.concat(s_list, dim=grp2)\n stacked_da[grp2] = grps2\n # numpy part:\n # first, loop over both dims and drop NaNs, append values and datetimes:\n vals = []\n dts = []\n for grp1_val in stacked_da[grp1]:\n da = stacked_da.sel({grp1: grp1_val})\n for grp2_val in da[grp2]:\n val = da.sel({grp2: grp2_val}).dropna(time_dim)\n vals.append(val.values)\n dts.append(val[time_dim].values)\n # second, we get the max of the vals after the second groupby:\n max_size = max([len(x) for x in vals])\n # we fill NaNs and NaT for the remainder of them:\n concat_sizes = [max_size - len(x) for x in vals]\n concat_arrys = [np.empty((x)) * np.nan for x in concat_sizes]\n concat_vals = [np.concatenate(x) for x in list(zip(vals, concat_arrys))]\n # 1970-01-01 is the NaT for this time-series:\n concat_arrys = [np.zeros((x), dtype='datetime64[ns]')\n for x in concat_sizes]\n concat_dts = [np.concatenate(x) for x in list(zip(dts, concat_arrys))]\n concat_vals = np.array(concat_vals)\n concat_dts = np.array(concat_dts)\n # finally , we reshape them:\n concat_vals = concat_vals.reshape((stacked_da[grp1].shape[0],\n stacked_da[grp2].shape[0],\n max_size))\n concat_dts = concat_dts.reshape((stacked_da[grp1].shape[0],\n stacked_da[grp2].shape[0],\n max_size))\n # create a Dataset and DataArrays for them:\n sda = xr.Dataset()\n sda.attrs = attrs\n sda[name] = xr.DataArray(concat_vals, dims=[grp1, grp2, 'rest'])\n sda[time_dim] = xr.DataArray(concat_dts, dims=[grp1, grp2, 'rest'])\n sda[grp1] = grps1\n sda[grp2] = grps2\n sda['rest'] = range(max_size)\n return sda\n\n\n#def time_series_stack2(time_da, time_dim='time', grp1='hour', grp2='month',\n# plot=True):\n# \"\"\"produces a stacked plot with two groupings for a time-series\"\"\"\n# import xarray as xr\n# import matplotlib.pyplot as plt\n# import numpy as np\n# import matplotlib.ticker as tck\n# grp_obj1 = time_da.groupby(time_dim + '.' + grp1)\n# s_list = []\n# for grp_name, grp_inds in grp_obj1.groups.items():\n# da = time_da.isel({time_dim: grp_inds})\n# # da = da.rename({time_dim: grp + '_' + str(grp_name)})\n# # da.name += '_' + grp + '_' + str(grp_name)\n# s_list.append(da)\n# grps1 = [x for x in grp_obj1.groups.keys()]\n# stacked_da = xr.concat(s_list, dim=grp1)\n# stacked_da[grp1] = grps1\n# s_list = []\n# for grp_val in grps1:\n# da = stacked_da.sel({grp1: grp_val}).groupby(time_dim + '.' + grp2).mean()\n# s_list.append(da)\n# stacked_da2 = xr.concat(s_list, dim=grp1)\n# if plot:\n# try:\n# units = time_da.attrs['units']\n# except KeyError:\n# units = ''\n# try:\n# station = time_da.attrs['station']\n# except KeyError:\n# station = ''\n# try:\n# name = time_da.name\n# except KeyError:\n# name = ''\n# SMALL_SIZE = 12\n# MEDIUM_SIZE = 16\n# BIGGER_SIZE = 18\n# plt.rc('font', size=SMALL_SIZE) # controls default text sizes\n# plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title\n# plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels\n# plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels\n# plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels\n# plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize\n# plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title\n# grp1_mean = stacked_da2.mean(grp1)\n# grp2_mean = stacked_da2.mean(grp2)\n# fig = plt.figure(figsize=(16, 10), dpi=80)\n# grid = plt.GridSpec(2, 2, width_ratios=[1, 4], height_ratios=[5, 1], wspace=0, hspace=0)\n# # grid = plt.GridSpec(2, 2, hspace=0.5, wspace=0.2)\n## ax_main = fig.add_subplot(grid[:-1, :-1])\n## ax_left = fig.add_subplot(grid[:-1, 0], xticklabels=[], yticklabels=[])\n## ax_bottom = fig.add_subplot(grid[-1, 0:-1], xticklabels=[], yticklabels=[])\n# ax_main = fig.add_subplot(grid[0, 1])\n# ax_left = fig.add_subplot(grid[0, 0])\n# ax_left.grid()\n# ax_bottom = fig.add_subplot(grid[1, 1])\n# ax_bottom.grid()\n# pcl = stacked_da2.T.plot.pcolormesh(ax = ax_main, add_colorbar=False, cmap=plt.cm.get_cmap('viridis', 19), snap=True)\n# ax_main.xaxis.set_minor_locator(tck.AutoMinorLocator())\n# ax_main.tick_params(direction='out', top='on', bottom='off', left='off', right='on', labelleft='off', labelbottom='off', labeltop='on', labelright='on', which='major')\n# ax_main.tick_params(direction='out', top='on', bottom='off', left='off', right='on', which='minor')\n# ax_main.grid(True, which='major', axis='both', linestyle='-', color='k', alpha=0.2)\n# ax_main.grid(True, which='minor', axis='both', linestyle='--', color='k', alpha=0.2)\n# ax_main.tick_params(top='on', bottom='off', left='off', right='on', labelleft='off', labelbottom='off', labeltop='on', labelright='on')\n# bottom_limit = ax_main.get_xlim()\n# left_limit = ax_main.get_ylim()\n# grp1_mean.plot(ax=ax_left)\n# grp2_mean.plot(ax=ax_bottom)\n# ax_bottom.set_xlim(bottom_limit)\n# ax_left = flip_xy_axes(ax_left, left_limit)\n# ax_bottom.set_ylabel(units)\n# ax_left.set_xlabel(units)\n# fig.subplots_adjust(right=0.8)\n# # divider = make_axes_locatable(ax_main)\n# # cax1 = divider.append_axes(\"right\", size=\"5%\", pad=0.2)\n# # [left, bottom, width, height] of figure:\n# cbar_ax = fig.add_axes([0.85, 0.15, 0.02, 0.75])\n# # fig.colorbar(pcl, orientation=\"vertical\", pad=0.2, label=units)\n# pcl_ticks = np.linspace(stacked_da2.min().item(), stacked_da2.max().item(), 11)\n# cbar = fig.colorbar(pcl, cax=cbar_ax, label=units, ticks=pcl_ticks)\n# cbar.set_ticklabels(['{:.1f}'.format(x) for x in pcl_ticks])\n# title = ' '.join([name, station])\n# fig.suptitle(title, fontweight='bold', fontsize=15)\n# # fig.colorbar(pcl, ax=ax_main)\n# # plt.colorbar(pcl, cax=ax_main)\n# return stacked_da2\n\n\n#def time_series_stack_decraped(time_da, time_dim='time', grp='hour', plot=True):\n# import xarray as xr\n# grp_obj = time_da.groupby(time_dim + '.' + grp)\n# s_list = []\n# for grp_name, grp_inds in grp_obj.groups.items():\n# da = time_da.isel({time_dim: grp_inds})\n# # da = da.rename({time_dim: grp + '_' + str(grp_name)})\n# # da.name += '_' + grp + '_' + str(grp_name)\n# s_list.append(da)\n# grps = [x for x in grp_obj.groups.keys()]\n# stacked_da = xr.concat(s_list, dim=grp)\n# stacked_da[grp] = grps\n# if 'year' in grp:\n# resample_span = '1Y'\n# elif grp == 'month':\n# resample_span = '1Y'\n# elif grp == 'day':\n# resample_span = '1MS'\n# elif grp == 'hour':\n# resample_span = '1D'\n# elif grp == 'minute':\n# resample_span = '1H'\n# stacked_da = stacked_da.resample({time_dim: resample_span}).mean(time_dim)\n# if plot:\n# stacked_da.T.plot.pcolormesh(figsize=(6, 8))\n# return stacked_da\n\n\ndef dt_to_np64(time_coord, unit='m', convert_back=False):\n \"\"\"accepts time_coord and a required time unit and returns a dataarray\n of time_coord and unix POSIX continous float index\"\"\"\n import numpy as np\n import xarray as xr\n unix_epoch = np.datetime64(0, unit)\n one_time_unit = np.timedelta64(1, unit)\n time_unit_since_epoch = (time_coord.values - unix_epoch) / one_time_unit\n units = {'Y': 'years', 'M': 'months', 'W': 'weeks', 'D': 'days',\n 'h': 'hours', 'm': 'minutes', 's': 'seconds'}\n new_time = xr.DataArray(time_unit_since_epoch, coords=[time_coord],\n dims=[time_coord.name])\n new_time.attrs['units'] = units[unit] + ' since 1970-01-01 00:00:00'\n return new_time\n\n\ndef xr_reindex_with_date_range(ds, drop=True, time_dim=None, freq='5min',\n dt_min=None, dt_max=None):\n \"\"\"be careful when drop=True in datasets that have various nans in dataarrays\"\"\"\n import pandas as pd\n if time_dim is None:\n time_dim = list(set(ds.dims))[0]\n if drop:\n ds = ds.dropna(time_dim)\n if dt_min is not None:\n dt_min = pd.to_datetime(dt_min)\n start = pd.to_datetime(dt_min)\n else:\n start = pd.to_datetime(ds[time_dim].min().item())\n if dt_max is not None:\n dt_max = pd.to_datetime(dt_max)\n end = pd.to_datetime(dt_max)\n else:\n end = pd.to_datetime(ds[time_dim].max().item())\n new_time = pd.date_range(start, end, freq=freq)\n ds = ds.reindex({time_dim: new_time})\n return ds\n\n\ndef add_attr_to_xr(da, key, value, append=False):\n \"\"\"add attr to da, if append=True, appends it, if key exists\"\"\"\n import xarray as xr\n if isinstance(da, xr.Dataset):\n raise TypeError('only xr.DataArray allowd!')\n if key in da.attrs and not append:\n raise ValueError('{} already exists in {}, use append=True'.format(key, da.name))\n elif key in da.attrs and append:\n da.attrs[key] += value\n else:\n da.attrs[key] = value\n return da\n\n\ndef filter_nan_errors(ds, error_str='_error', dim='time', meta='action'):\n \"\"\"return the data in a dataarray only if its error is not NaN,\n assumes that ds is a xr.dataset and includes fields and their error\n like this: field, field+error_str\"\"\"\n import xarray as xr\n import numpy as np\n from aux_gps import add_attr_to_xr\n if isinstance(ds, xr.DataArray):\n raise TypeError('only xr.Dataset allowd!')\n fields = [x for x in ds.data_vars if error_str not in x]\n for field in fields:\n ds[field] = ds[field].where(np.isfinite(\n ds[field + error_str])).dropna(dim)\n if meta in ds[field].attrs:\n append = True\n add_attr_to_xr(\n ds[field],\n meta,\n ', filtered values with NaN errors',\n append)\n return ds\n\n\ndef smooth_xr(da, dim='time', weights=[0.25, 0.5, 0.25]):\n # fix to accept wither da or ds:\n import xarray as xr\n weight = xr.DataArray(weights, dims=['window'])\n if isinstance(da, xr.Dataset):\n attrs = dict(zip(da.data_vars, [da[x].attrs for x in da]))\n da_roll = da.to_array('dummy').rolling(\n {dim: len(weights)}, center=True).construct('window').dot(weight)\n da_roll = da_roll.to_dataset('dummy')\n for das, attr in attrs.items():\n da_roll[das].attrs = attr\n da_roll[das].attrs['action'] = 'weighted rolling mean with {} on {}'.format(\n weights, dim)\n else:\n da_roll = da.rolling({dim: len(weights)},\n center=True).construct('window').dot(weight)\n da_roll.attrs['action'] = 'weighted rolling mean with {} on {}'.format(\n weights, dim)\n return da_roll\n\n\ndef keep_iqr(da, dim='time', qlow=0.25, qhigh=0.75, k=1.5, drop_with_freq=None,\n verbose=False):\n \"\"\"return the data in a dataarray only in the k times the\n Interquartile Range (low, high), drop\"\"\"\n from aux_gps import add_attr_to_xr\n from aux_gps import xr_reindex_with_date_range\n try:\n quan = da.quantile([qlow, qhigh], dim).values\n except TypeError:\n # support for datetime64 dtypes:\n if da.dtype == '<M8[ns]':\n quan = da.astype(int).quantile(\n [qlow, qhigh], dim).astype('datetime64[ns]').values\n # support for timedelta64 dtypes:\n elif da.dtype == '<m8[ns]':\n quan = da.astype(int).quantile(\n [qlow, qhigh], dim).astype('timedelta64[ns]').values\n low = quan[0]\n high = quan[1]\n iqr = high - low\n lower = low - (iqr * k)\n higher = high + (iqr * k)\n before = da.size\n da = da.where((da < higher) & (da > lower)).dropna(dim)\n after = da.size\n if verbose:\n print('dropped {} outliers from {}.'.format(before-after, da.name))\n if 'action' in da.attrs:\n append = True\n else:\n append = False\n add_attr_to_xr(\n da, 'action', ', kept IQR ({}, {}, {})'.format(\n qlow, qhigh, k), append)\n if drop_with_freq is not None:\n da = xr_reindex_with_date_range(da, freq=drop_with_freq)\n return da\n\n\ndef transform_ds_to_lat_lon_alt(ds, coords_name=['X', 'Y', 'Z'],\n error_str='_error', time_dim='time'):\n \"\"\"appends to the data vars of ds(xr.dataset) the lat, lon, alt fields\n and their error where the geocent fields are X, Y, Z\"\"\"\n import xarray as xr\n from aux_gps import get_latlonalt_error_from_geocent_error\n geo_fields = [ds[x].values for x in coords_name]\n geo_errors = [ds[x + error_str].values for x in coords_name]\n latlong = get_latlonalt_error_from_geocent_error(*geo_fields, *geo_errors)\n new_fields = ['lon', 'lat', 'alt', 'lon_error', 'lat_error', 'alt_error']\n new_names = ['Longitude', 'Latitude', 'Altitude']\n new_units = ['Degrees', 'Degrees', 'm']\n for name, data in zip(new_fields, latlong):\n ds[name] = xr.DataArray(data, dims=[time_dim])\n for name, unit, full_name in zip(new_fields[0:3], new_units[0:3],\n new_names[0:3]):\n ds[name].attrs['full_name'] = full_name\n ds[name].attrs['units'] = unit\n return ds\n\n\ndef get_latlonalt_error_from_geocent_error(X, Y, Z, xe=None, ye=None, ze=None):\n \"\"\"returns the value and error in lat(decimal degree), lon(decimal degree)\n and alt(meters) for X, Y, Z in geocent coords (in meters), all input is\n lists or np.arrays\"\"\"\n import pyproj\n ecef = pyproj.Proj(proj='geocent', ellps='WGS84', datum='WGS84')\n lla = pyproj.Proj(proj='latlong', ellps='WGS84', datum='WGS84')\n lon, lat, alt = pyproj.transform(ecef, lla, X, Y, Z, radians=False)\n if (xe is not None) and (ye is not None) and (ze is not None):\n lon_pe, lat_pe, alt_pe = pyproj.transform(ecef, lla, X + xe, Y + ye,\n Z + ze, radians=False)\n lon_me, lat_me, alt_me = pyproj.transform(ecef, lla, X - xe, Y - ye,\n Z - ze, radians=False)\n lon_e = (lon_pe - lon_me) / 2.0\n lat_e = (lat_pe - lat_me) / 2.0\n alt_e = (alt_pe - alt_me) / 2.0\n return lon, lat, alt, lon_e, lat_e, alt_e\n else:\n return lon, lat, alt\n\n\ndef path_glob(path, glob_str='*.Z', return_empty_list=False):\n \"\"\"returns all the files with full path(pathlib3 objs) if files exist in\n path, if not, returns FilenotFoundErro\"\"\"\n from pathlib import Path\n# if not isinstance(path, Path):\n# raise Exception('{} must be a pathlib object'.format(path))\n path = Path(path)\n files_with_path = [file for file in path.glob(glob_str) if file.is_file]\n if not files_with_path and not return_empty_list:\n raise FileNotFoundError('{} search in {} found no files.'.format(glob_str,\n path))\n elif not files_with_path and return_empty_list:\n return files_with_path\n else:\n return files_with_path\n\n\ndef find_cross_points(df, cols=None):\n \"\"\"find if col A is crossing col B in df and is higher (Up) or lower (Down)\n than col B (after crossing). cols=None means that the first two cols of\n df are used.\"\"\"\n import numpy as np\n if cols is None:\n cols = df.columns.values[0:2]\n df['Diff'] = df[cols[0]] - df[cols[1]]\n df['Cross'] = np.select([((df.Diff < 0) & (df.Diff.shift() > 0)), ((\n df.Diff > 0) & (df.Diff.shift() < 0))], ['Up', 'Down'], None)\n return df\n\n\ndef get_rinex_filename_from_datetime(station, dt='2012-05-07', st_lower=True):\n \"\"\"return rinex filename from datetime string\"\"\"\n import pandas as pd\n\n def filename_from_single_date(station, date):\n day = pd.to_datetime(date, format='%Y-%m-%d').dayofyear\n year = pd.to_datetime(date, format='%Y-%m-%d').year\n if 'T' in date:\n hour = pd.to_datetime(date, format='%Y-%m-%d').hour\n hour = letters_to_hours_and_vice_verse(hour)\n else:\n hour = '0'\n if len(str(day)) == 1:\n str_day = '00' + str(day) + hour\n elif len(str(day)) == 2:\n str_day = '0' + str(day) + hour\n elif len(str(day)) == 3:\n str_day = str(day) + hour\n if st_lower:\n st = station.lower()\n else:\n st = station\n filename = st + str_day + '.' + str(year)[2:4] + 'd'\n return filename\n\n if isinstance(dt, list):\n filenames = []\n for date in dt:\n filename = filename_from_single_date(station, date)\n filenames.append(filename)\n return filenames\n else:\n filename = filename_from_single_date(station, dt)\n return filename\n\n\ndef letters_to_hours_and_vice_verse(symbol):\n \"\"\"A - 0 hours, B- 1 hours, until X = 23 hours\"\"\"\n import string\n import numpy as np\n import pandas as pd\n hour_letters = [x.upper() for x in string.ascii_letters][:24]\n hour_numbers = np.arange(0, 24)\n hour_string_dict = dict(zip(hour_letters, hour_numbers))\n reverse_dict = dict(zip(hour_numbers, hour_letters))\n if isinstance(symbol, int):\n return reverse_dict.get(symbol, 'NaN')\n elif isinstance(symbol, str):\n return pd.Timedelta('{} hour'.format(hour_string_dict.get(symbol), 'NaN'))\n\n\ndef get_timedate_and_station_code_from_rinex(rinex_str='tela0010.05d',\n just_dt=False, st_upper=True):\n \"\"\"return datetime from rinex2 format\"\"\"\n import pandas as pd\n import datetime\n\n def get_dt_from_single_rinex(rinex_str):\n station = rinex_str[0:4]\n days = int(rinex_str[4:7])\n hour = rinex_str[7]\n year = rinex_str[-3:-1]\n Year = datetime.datetime.strptime(year, '%y').strftime('%Y')\n dt = datetime.datetime(int(Year), 1, 1) + datetime.timedelta(days - 1)\n dt = pd.to_datetime(dt)\n if hour != '0':\n hours_to_add = letters_to_hours_and_vice_verse(hour)\n # print(hours_to_add)\n dt += hours_to_add\n if st_upper:\n st = station.upper()\n else:\n st = station\n return dt, st\n\n if isinstance(rinex_str, list):\n dt_list = []\n for rstr in rinex_str:\n dt, station = get_dt_from_single_rinex(rstr)\n dt_list.append(dt)\n return dt_list\n else:\n dt, station = get_dt_from_single_rinex(rinex_str)\n if just_dt:\n return dt\n else:\n return dt, station\n\n\ndef configure_logger(name='general', filename=None):\n import logging\n import sys\n stdout_handler = logging.StreamHandler(sys.stdout)\n if filename is not None:\n file_handler = logging.FileHandler(filename=filename, mode='a')\n handlers = [file_handler, stdout_handler]\n else:\n handlers = [stdout_handler]\n\n logging.basicConfig(\n level=logging.INFO,\n format='[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s',\n handlers=handlers\n )\n logger = logging.getLogger(name=name)\n return logger\n\n\ndef process_gridsearch_results(GridSearchCV):\n import xarray as xr\n import pandas as pd\n import numpy as np\n \"\"\"takes GridSreachCV object with cv_results and xarray it into dataarray\"\"\"\n params = GridSearchCV.param_grid\n scoring = GridSearchCV.scoring\n names = [x for x in params.keys()]\n if len(params) > 1:\n # unpack param_grid vals to list of lists:\n pro = [[y for y in x] for x in params.values()]\n ind = pd.MultiIndex.from_product((pro), names=names)\n result_names = [x for x in GridSearchCV.cv_results_.keys() if\n 'time' not in x and 'param' not in x and\n 'rank' not in x]\n ds = xr.Dataset()\n for da_name in result_names:\n da = xr.DataArray(GridSearchCV.cv_results_[da_name])\n ds[da_name] = da\n ds = ds.assign(dim_0=ind).unstack('dim_0')\n elif len(params) == 1:\n result_names = [x for x in GridSearchCV.cv_results_.keys() if\n 'time' not in x and 'param' not in x and\n 'rank' not in x]\n ds = xr.Dataset()\n for da_name in result_names:\n da = xr.DataArray(GridSearchCV.cv_results_[da_name], dims={**params})\n ds[da_name] = da\n for k, v in params.items():\n ds[k] = v\n name = [x for x in ds.data_vars.keys() if 'split' in x and 'test' in x]\n split_test = xr.concat(ds[name].data_vars.values(), dim='kfolds')\n split_test.name = 'split_test'\n kfolds_num = len(name)\n name = [x for x in ds.data_vars.keys() if 'split' in x and 'train' in x]\n split_train = xr.concat(ds[name].data_vars.values(), dim='kfolds')\n split_train.name = 'split_train'\n name = [x for x in ds.data_vars.keys() if 'mean_test' in x]\n mean_test = xr.concat(ds[name].data_vars.values(), dim='scoring')\n mean_test.name = 'mean_test'\n name = [x for x in ds.data_vars.keys() if 'mean_train' in x]\n mean_train = xr.concat(ds[name].data_vars.values(), dim='scoring')\n mean_train.name = 'mean_train'\n name = [x for x in ds.data_vars.keys() if 'std_test' in x]\n std_test = xr.concat(ds[name].data_vars.values(), dim='scoring')\n std_test.name = 'std_test'\n name = [x for x in ds.data_vars.keys() if 'std_train' in x]\n std_train = xr.concat(ds[name].data_vars.values(), dim='scoring')\n std_train.name = 'std_train'\n ds = ds.drop(ds.data_vars.keys())\n ds['mean_test'] = mean_test\n ds['mean_train'] = mean_train\n ds['std_test'] = std_test\n ds['std_train'] = std_train\n ds['split_test'] = split_test\n ds['split_train'] = split_train\n mean_test_train = xr.concat(ds[['mean_train', 'mean_test']].data_vars.\n values(), dim='train_test')\n std_test_train = xr.concat(ds[['std_train', 'std_test']].data_vars.\n values(), dim='train_test')\n split_test_train = xr.concat(ds[['split_train', 'split_test']].data_vars.\n values(), dim='train_test')\n ds['train_test'] = ['train', 'test']\n ds = ds.drop(ds.data_vars.keys())\n ds['MEAN'] = mean_test_train\n ds['STD'] = std_test_train\n # CV = xr.Dataset(coords=GridSearchCV.param_grid)\n ds = xr.concat(ds[['MEAN', 'STD']].data_vars.values(), dim='MEAN_STD')\n ds['MEAN_STD'] = ['MEAN', 'STD']\n ds.name = 'CV_mean_results'\n ds.attrs['param_names'] = names\n if isinstance(scoring, str):\n ds.attrs['scoring'] = scoring\n ds = ds.squeeze(drop=True)\n else:\n ds['scoring'] = scoring\n ds = ds.to_dataset()\n ds['CV_full_results'] = split_test_train\n ds['kfolds'] = np.arange(kfolds_num)\n return ds\n\n\ndef calculate_std_error(arr, statistic='std'):\n from scipy.stats import moment\n import numpy as np\n # remove nans:\n arr = arr[np.logical_not(np.isnan(arr))]\n n = len(arr)\n if statistic == 'std':\n mu4 = moment(arr, moment=4)\n sig4 = np.var(arr)**2.0\n se = mu4 - sig4 * (n - 3) / (n - 1)\n se = (se / n)**0.25\n elif statistic == 'mean':\n std = np.std(arr)\n se = std / np.sqrt(n)\n return se\n\n\ndef calculate_distance_between_two_lat_lon_points(\n lat1,\n lon1,\n lat2,\n lon2,\n orig_epsg='4326',\n meter_epsg='2039',\n verbose=False):\n \"\"\"calculate the distance between two points (lat,lon) with epsg of\n WGS84 and convert to meters with a local epsg. if lat1 is array then\n calculates the distance of many points.\"\"\"\n import geopandas as gpd\n import pandas as pd\n try:\n df1 = pd.DataFrame(index=lat1.index)\n except AttributeError:\n try:\n len(lat1)\n except TypeError:\n lat1 = [lat1]\n df1 = pd.DataFrame(index=[x for x in range(len(lat1))])\n df1['lat'] = lat1\n df1['lon'] = lon1\n first_gdf = gpd.GeoDataFrame(\n df1, geometry=gpd.points_from_xy(\n df1['lon'], df1['lat']))\n first_gdf.crs = {'init': 'epsg:{}'.format(orig_epsg)}\n first_gdf.to_crs(epsg=int(meter_epsg), inplace=True)\n try:\n df2 = pd.DataFrame(index=lat2.index)\n except AttributeError:\n try:\n len(lat2)\n except TypeError:\n lat2 = [lat2]\n df2 = pd.DataFrame(index=[x for x in range(len(lat2))])\n df2['lat'] = lat2\n df2['lon'] = lon2\n second_gdf = gpd.GeoDataFrame(\n df2, geometry=gpd.points_from_xy(\n df2['lon'], df2['lat']))\n second_gdf.crs = {'init': 'epsg:{}'.format(orig_epsg)}\n second_gdf.to_crs(epsg=int(meter_epsg), inplace=True)\n ddf = first_gdf.geometry.distance(second_gdf.geometry)\n return ddf\n\n\ndef get_nearest_lat_lon_for_xy(lat_da, lon_da, points):\n \"\"\"used to access UERRA reanalysis, where the variable has x,y as coords\"\"\"\n import numpy as np\n from scipy.spatial import cKDTree\n if isinstance(points, np.ndarray):\n points = list(points)\n combined_x_y_arrays = np.dstack(\n [lat_da.values.ravel(), lon_da.values.ravel()])[0]\n mytree = cKDTree(combined_x_y_arrays)\n points = np.atleast_2d(points)\n dist, inds = mytree.query(points)\n yx = []\n for ind in inds:\n y, x = np.unravel_index(ind, lat_da.shape)\n yx.append([y, x])\n return yx\n\n\ndef get_altitude_of_point_using_dem(lat, lon, dem_path=work_yuval / 'AW3D30'):\n import xarray as xr\n file = sorted(path_glob(dem_path, 'israel_dem*.nc'))[0]\n awd = xr.load_dataarray(file)\n alt = awd.sel(lon=float(lon), lat=float(lat),\n method='nearest').values.item()\n return alt\n\n\ndef coarse_dem(data, dem_path=work_yuval / 'AW3D30'):\n \"\"\"coarsen to data coords\"\"\"\n # data is lower resolution than awd\n import salem\n import xarray as xr\n # determine resulotion:\n try:\n lat_size = data.lat.size\n lon_size = data.lon.size\n except AttributeError:\n print('data needs to have lat and lon coords..')\n return\n # check for file exist:\n filename = 'israel_dem_' + str(lon_size) + '_' + str(lat_size) + '.nc'\n my_file = dem_path / filename\n if my_file.is_file():\n awds = xr.open_dataarray(my_file)\n print('{} is found and loaded...'.format(filename))\n else:\n awd = salem.open_xr_dataset(dem_path / 'israel_dem.tif')\n awds = data.salem.lookup_transform(awd)\n awds = awds['data']\n awds.to_netcdf(dem_path / filename)\n print('{} is saved to {}'.format(filename, dem_path))\n return awds\n\n\ndef invert_dict(d):\n \"\"\"unvert dict\"\"\"\n inverse = dict()\n for key in d:\n # Go through the list that is saved in the dict:\n for item in d[key]:\n # Check if in the inverted dict the key exists\n if item not in inverse:\n # If not create a new list\n inverse[item] = key\n else:\n inverse[item].append(key)\n return inverse\n\n\ndef concat_shp(path, shp_file_list, saved_filename):\n import geopandas as gpd\n import pandas as pd\n shapefiles = [path / x for x in shp_file_list]\n gdf = pd.concat([gpd.read_file(shp)\n for shp in shapefiles]).pipe(gpd.GeoDataFrame)\n gdf.to_file(path / saved_filename)\n print('saved {} to {}'.format(saved_filename, path))\n return\n\n\ndef scale_xr(da, upper=1.0, lower=0.0, unscale=False):\n if not unscale:\n dh = da.max()\n dl = da.min()\n da_scaled = (((da-dl)*(upper-lower))/(dh-dl)) + lower\n da_scaled.attrs = da.attrs\n da_scaled.attrs['scaled'] = True\n da_scaled.attrs['lower'] = dl.item()\n da_scaled.attrs['upper'] = dh.item()\n if unscale and da.attrs['scaled']:\n dh = da.max()\n dl = da.min()\n upper = da.attrs['upper']\n lower = da.attrs['lower']\n da_scaled = (((da-dl)*(upper-lower))/(dh-dl)) + lower\n return da_scaled\n\n\ndef print_saved_file(name, path):\n print(name + ' was saved to ' + str(path))\n return\n\n\ndef dim_union(da_list, dim='time'):\n import pandas as pd\n setlist = [set(x[dim].values) for x in da_list]\n empty_list = [x for x in setlist if not x]\n if empty_list:\n print('NaN dim drop detected, check da...')\n return\n u = list(set.union(*setlist))\n # new_dim = list(set(a.dropna(dim)[dim].values).intersection(\n # set(b.dropna(dim)[dim].values)))\n if dim == 'time':\n new_dim = sorted(pd.to_datetime(u))\n else:\n new_dim = sorted(u)\n return new_dim\n\n\ndef dim_intersection(da_list, dim='time', dropna=True, verbose=None):\n import pandas as pd\n if dropna:\n setlist = [set(x.dropna(dim)[dim].values) for x in da_list]\n else:\n setlist = [set(x[dim].values) for x in da_list]\n empty_list = [x for x in setlist if not x]\n if empty_list:\n if verbose == 0:\n print('NaN dim drop detected, check da...')\n return None\n u = list(set.intersection(*setlist))\n # new_dim = list(set(a.dropna(dim)[dim].values).intersection(\n # set(b.dropna(dim)[dim].values)))\n if dim == 'time':\n new_dim = sorted(pd.to_datetime(u))\n else:\n new_dim = sorted(u)\n return new_dim\n\n\ndef get_unique_index(da, dim='time', verbose=False):\n import numpy as np\n before = da[dim].size\n _, index = np.unique(da[dim], return_index=True)\n da = da.isel({dim: index})\n after = da[dim].size\n if verbose:\n print('dropped {} duplicate coord entries.'.format(before-after))\n return da\n\n\ndef Zscore_xr(da, dim='time'):\n \"\"\"input is a dattarray of data and output is a dattarray of Zscore\n for the dim\"\"\"\n attrs = da.attrs\n z = (da - da.mean(dim=dim)) / da.std(dim=dim)\n z.attrs = attrs\n if 'units' in attrs.keys():\n z.attrs['old_units'] = attrs['units']\n z.attrs['action'] = 'converted to Z-score'\n z.attrs['units'] = 'std'\n return z\n\n\ndef desc_nan(data, verbose=True):\n \"\"\"count only NaNs in data and returns the thier amount and the non-NaNs\"\"\"\n import numpy as np\n import xarray as xr\n\n def nan_da(data):\n nans = np.count_nonzero(np.isnan(data.values))\n non_nans = np.count_nonzero(~np.isnan(data.values))\n if verbose:\n print(str(type(data)))\n print(data.name + ': non-NaN entries: ' + str(non_nans) + ' of total ' +\n str(data.size) + ', shape:' + str(data.shape) + ', type:' +\n str(data.dtype))\n print('Dimensions:')\n dim_nn_list = []\n for dim in data.dims:\n dim_len = data[dim].size\n dim_non_nans = np.int(data.dropna(dim)[dim].count())\n dim_nn_list.append(dim_non_nans)\n if verbose:\n print(dim + ': non-NaN labels: ' +\n str(dim_non_nans) + ' of total ' + str(dim_len))\n return non_nans\n if isinstance(data, xr.DataArray):\n nn_dict = nan_da(data)\n return nn_dict\n elif isinstance(data, np.ndarray):\n nans = np.count_nonzero(np.isnan(data))\n non_nans = np.count_nonzero(~np.isnan(data))\n if verbose:\n print(str(type(data)))\n print('non-NaN entries: ' + str(non_nans) + ' of total ' +\n str(data.size) + ', shape:' + str(data.shape) + ', type:' +\n str(data.dtype))\n elif isinstance(data, xr.Dataset):\n for varname in data.data_vars.keys():\n non_nans = nan_da(data[varname])\n return non_nans\n\n\nclass lmfit_model_switcher(object):\n def pick_model(self, model_name, *args, **kwargs):\n \"\"\"Dispatch method\"\"\"\n method_name = str(model_name)\n # Get the method from 'self'. Default to a lambda.\n method = getattr(self, method_name, lambda: \"Invalid ML Model\")\n # Call the method as we return it\n self.model = method(*args, **kwargs)\n return self\n\n def pick_param(self, name, **kwargs):\n # **kwargs.keys() = value, vary, min, max, expr\n if not hasattr(self, 'model'):\n raise('pls pick model first!')\n return\n else:\n self.model.set_param_hint(name, **kwargs)\n return\n\n def generate_params(self, **kwargs):\n if not hasattr(self, 'model'):\n raise('pls pick model first!')\n return\n else:\n if kwargs is not None:\n for key, val in kwargs.items():\n self.model.set_param_hint(key, **val)\n self.params = self.model.make_params()\n else:\n self.params = self.model.make_params()\n return\n\n def line(self, line_pre='line_'):\n from lmfit import Model\n\n def func(time, slope, intercept):\n f = slope * time + intercept\n return f\n return Model(func, independent_vars=['time'], prefix=line_pre)\n\n def sin(self, sin_pre='sin_'):\n from lmfit import Model\n\n def func(time, amp, freq, phase):\n import numpy as np\n f = amp * np.sin(2 * np.pi * freq * (time - phase))\n return f\n return Model(func, independent_vars=['time'], prefix=sin_pre)\n\n def sin_constant(self, sin_pre='sin_', con_pre='constant_'):\n from lmfit.models import ConstantModel\n\n constant = ConstantModel(prefix=con_pre)\n lmfit = lmfit_model_switcher()\n lmfit.pick_model('sin', sin_pre)\n return lmfit.model + constant\n\n def sin_linear(self, sin_pre='sin_', line_pre='line_'):\n lmfit = lmfit_model_switcher()\n sin = lmfit.pick_model('sin', sin_pre)\n line = lmfit.pick_model('line', line_pre)\n return sin + line\n\n def sum_sin(self, k):\n lmfit = lmfit_model_switcher()\n sin = lmfit.pick_model('sin', 'sin0_')\n for k in range(k-1):\n sin += lmfit.pick_model('sin', 'sin{}_'.format(k+1))\n return sin\n\n def sum_sin_constant(self, k, con_pre='constant_'):\n from lmfit.models import ConstantModel\n constant = ConstantModel(prefix=con_pre)\n lmfit = lmfit_model_switcher()\n sum_sin = lmfit.pick_model('sum_sin', k)\n return sum_sin + constant\n\n def sum_sin_linear(self, k, line_pre='line_'):\n lmfit = lmfit_model_switcher()\n sum_sin = lmfit.pick_model('sum_sin', k)\n line = lmfit.pick_model('line', line_pre)\n return sum_sin + line\n\n\ndef pick_lmfit_model(name='sine'):\n import numpy as np\n import lmfit\n\n class MySineModel(lmfit.Model):\n def __init__(self, *args, **kwargs):\n def sine(x, ampl, offset, freq, x0):\n return ampl * np.sin((x - x0)*2*np.pi*freq) + offset\n super(MySineModel, self).__init__(sine, *args, **kwargs)\n\n def guess(self, data, freq=None, **kwargs):\n params = self.make_params()\n\n def pset(param, value):\n params['{}{}'.format(self.prefix, param)].set(value=value)\n pset(\"ampl\", np.max(data) - np.min(data))\n pset(\"offset\", np.mean(data) + 0.01)\n if freq is None:\n pset(\"freq\", 1)\n else:\n pset(\"freq\", freq)\n pset(\"x0\", 0)\n return lmfit.models.update_param_vals(params, self.prefix, **kwargs)\n name_dict = {'sine': MySineModel()}\n return name_dict.get(name)\n\n\ndef move_or_copy_files_from_doy_dir_structure_to_single_path(yearly_path=work_yuval/'SST',\n movepath=work_yuval/'SST',\n filetype='*.nc',\n opr='copy'):\n \"\"\"move files from year-doy directory structure to another path.\"\"\"\n from aux_gps import path_glob\n import shutil\n year_dirs = sorted([x for x in path_glob(yearly_path, '*/') if x.is_dir()])\n years = []\n for year_dir in year_dirs:\n print('year {} is being processed.'.format(year_dir))\n years.append(year_dir.as_posix().split('/')[-1])\n doy_dirs = sorted([x for x in path_glob(year_dir, '*/') if x.is_dir()])\n for doy_dir in doy_dirs:\n file = path_glob(doy_dir, filetype)[0]\n same_file = file.as_posix().split('/')[-1]\n orig = file\n dest = movepath / same_file\n if opr == 'copy':\n shutil.copy(orig.resolve(), dest.resolve())\n elif opr == 'move':\n shutil.move(orig.resolve(), dest.resolve())\n print('{} has been {}ed {}'.format(same_file, opr, movepath))\n return years\n\n" ]
[ [ "sklearn.metrics.explained_variance_score", "matplotlib.pyplot.legend", "matplotlib.patheffects.Normal", "pandas.read_excel", "pandas.to_datetime", "pandas.Series", "numpy.sqrt", "matplotlib.ticker.AutoMinorLocator", "matplotlib.pyplot.rc", "numpy.cumsum", "pandas.DataFrame", "sklearn.metrics.mean_squared_error", "numpy.concatenate", "matplotlib.pyplot.plot", "numpy.arctan2", "numpy.max", "numpy.mean", "numpy.var", "numpy.where", "scipy.spatial.cKDTree", "scipy.optimize.curve_fit", "scipy.signal.welch", "scipy.signal.savgol_filter", "matplotlib.pyplot.tight_layout", "numpy.unique", "numpy.arange", "numpy.sin", "numpy.copy", "numpy.std", "numpy.diff", "matplotlib.pyplot.subplots_adjust", "numpy.zeros", "numpy.unravel_index", "matplotlib.pyplot.figure", "pandas.concat", "scipy.stats.moment", "matplotlib.pyplot.cm.get_cmap", "numpy.min", "numpy.isnan", "numpy.median", "pandas.Timedelta", "numpy.timedelta64", "numpy.atleast_2d", "scipy.stats.linregress", "scipy.stats.wilcoxon", "numpy.cov", "matplotlib.pyplot.fill_between", "pandas.MultiIndex.from_product", "pandas.date_range", "numpy.deg2rad", "matplotlib.pyplot.GridSpec", "numpy.equal", "numpy.array", "matplotlib.pyplot.show", "numpy.sum", "numpy.diagonal", "pandas.pivot_table", "pandas.infer_freq", "scipy.signal.detrend", "scipy.stats.normaltest", "numpy.isfinite", "numpy.fft.rfft", "matplotlib.pyplot.subplots", "numpy.datetime64", "numpy.ones", "scipy.stats.shapiro", "sklearn.linear_model.LinearRegression", "matplotlib.patheffects.Stroke", "scipy.stats.mstats.theilslopes", "numpy.array_split", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [ "0.14", "1.6", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] } ]
microsoft/goodpoints
[ "d1b1e30a49bfe24feeca73421bc47f5f45572908", "d1b1e30a49bfe24feeca73421bc47f5f45572908", "d1b1e30a49bfe24feeca73421bc47f5f45572908" ]
[ "examples/compress/util_sample.py", "examples/gkt/submit_gkt_jobs.py", "examples/compress/construct_st_coresets.py" ]
[ "\r\nimport numpy as np\r\nimport numpy.random as npr\r\nimport numpy.linalg as npl\r\nimport os\r\nimport pickle as pkl\r\n\r\n'''\r\nFile containing helper functions for details about target P and\r\ndrawing samples / loading mcmc samples from file\r\n'''\r\n######## functions related to setting P and sampling from it ########\r\n\r\n\r\ndef sample(n, params_p, seed=None):\r\n \"\"\"Returns n sample points drawn iid from a specified distribution\r\n \r\n Args:\r\n n: Number of sample points to generate\r\n params_p: Dictionary of distribution parameters including\r\n name: Distribution name in {\"gauss\"}\r\n var: Variance parameter\r\n d: Dimension of generated vectors\r\n seed: (Optional) Random seed to set prior to generation; if None,\r\n no seed will be set\r\n \"\"\"\r\n name = params_p[\"name\"]\r\n if name == \"gauss\":\r\n sig = np.sqrt(params_p[\"var\"])\r\n return(sig * npr.default_rng(seed).standard_normal(size=(n, params_p[\"d\"])))\r\n elif name == \"unif\":\r\n return(npr.default_rng(seed).random(size=(n, params_p[\"d\"])))\r\n elif name == \"mog\":\r\n rng = npr.default_rng(seed)\r\n w = params_p[\"weights\"]\r\n n_mix = rng.multinomial(n, w)\r\n for i, ni in enumerate(n_mix):\r\n mean = params_p[\"means\"][i, :]\r\n cov = params_p[\"covs\"][i, :, :]\r\n temp = rng.multivariate_normal(mean=mean, cov=cov, size=ni)\r\n if i == 0:\r\n x = temp\r\n else:\r\n x = np.vstack((x, temp))\r\n rng.shuffle(x)\r\n return(x)\r\n elif params_p[\"name\"] == \"diag_mog\":\r\n rng = npr.default_rng(seed)\r\n w = params_p[\"weights\"]\r\n d = params_p[\"d\"]\r\n n_mix = rng.multinomial(n, w)\r\n for i, ni in enumerate(n_mix):\r\n mean = params_p[\"means\"][i, :]\r\n cov = params_p[\"covs\"][i] * np.eye(d)\r\n temp = rng.multivariate_normal(mean=mean, cov=cov, size=ni)\r\n if i == 0:\r\n x = temp\r\n else:\r\n x = np.vstack((x, temp))\r\n rng.shuffle(x)\r\n return(x)\r\n elif params_p[\"saved_samples\"] == True:\r\n if 'Hinch' in params_p[\"name\"]: # for this case, samples are all preloaded\r\n filename = os.path.join(params_p[\"data_dir\"], \"{}_samples_n_{}.pkl\".format(params_p[\"name\"], n))\r\n with open(filename, 'rb') as file:\r\n return(pkl.load(file))\r\n else:\r\n end = params_p[\"X\"].shape[0]\r\n sample_idx = np.linspace(0, end-1, n, dtype=int, endpoint=True)\r\n return(params_p[\"X\"][sample_idx])\r\n\r\n raise ValueError(\"Unrecognized distribution name {}\".format(params_p[\"name\"]))\r\n\r\ndef compute_diag_mog_params(M=int(4), snr=3.):\r\n \"\"\"Returns diagonal mixture of Gaussian target distribution settings for d=2\r\n \r\n Args:\r\n M: (Optional) Integer, number of components\r\n snr: (Optional) Scaling of the means \r\n \"\"\"\r\n d = int(2)\r\n weights = np.ones(M)\r\n weights /= np.sum(weights)\r\n\r\n # change this to set the means apart\r\n means = np.zeros((M, d))\r\n if M == 3:\r\n means = snr*np.array([[1., 1.], [-1., 1], [-1., -1.]])\r\n if M == 4: \r\n means = snr*np.array([[1., 1.], [-1., 1], [-1., -1.], [1., -1.]])\r\n if M == 6:\r\n means = snr*np.array([[1., 1.], [-1., 1], [-1., -1.], [1., -1.], [0, 2.], [-2, 0.]])\r\n if M == 8:\r\n means = snr*np.array([[1., 1.], [-1., 1], [-1., -1.], [1., -1.], [0, 2.], [-2, 0.], [2, 0.], [0, -2.]])\r\n if M == 32:\r\n means = np.zeros((M, 2))\r\n for i in range(M):\r\n \r\n snr = 10 if i<M//2 else 20\r\n means[i] = snr*np.array([np.sin(2*np.pi/(M/2.)*i), np.cos(2*np.pi/(M/2.)*i)])\r\n covs = np.ones(M)\r\n\r\n params_p = {\"name\": \"diag_mog\", \r\n \"weights\": weights,\r\n \"means\": means,\r\n \"covs\": covs,\r\n \"d\": int(d),\r\n \"saved_samples\": False}\r\n return(params_p) \r\n\r\ndef compute_mcmc_params_p(filename, data_folder, nmax=int(2**15)): #, include_last=True, profiling=False):\r\n \"\"\"Returns a dictionary for a distribution associated with samples saved in filename\r\n \r\n Args:\r\n filename: string, denoting a prefix to be used for the file fron which the samples are loaded\r\n nmax:(Optional) Integer, to define Pnmax as an approximation to P of samples\r\n include_last:(Optional) If True, always includes the last point from the loaded coreset, \r\n otherwise always includes the first point\r\n profiling:(Optional) If True, debugging mode, returns some indices \r\n \"\"\"\r\n # burn_in_parameters for the 8 settings of Lotka, and Goodwin, taken as the max value in Table S4 and S6 for the respective setting in \r\n # https://arxiv.org/pdf/2005.03952.pdf\r\n \r\n burn_in_params = {\r\n 'Goodwin_RW': int(820000),\r\n 'Goodwin_ADA-RW': int(824000),\r\n 'Goodwin_MALA': int(1615000),\r\n 'Goodwin_PRECOND-MALA': int(1475000),\r\n 'Lotka_RW': int(1512000),\r\n 'Lotka_ADA-RW': int(1797000),\r\n 'Lotka_MALA': int(1573000),\r\n 'Lotka_PRECOND-MALA': int(1251000),\r\n }\r\n \r\n gl_filenames = list(burn_in_params.keys()) # goodwin lotka filenames \r\n \r\n # The Hinch sample files are already preprocessed\r\n Hinch_filenames = ['Hinch_P_seed_1_temp_1', 'Hinch_P_seed_2_temp_1', 'Hinch_TP_seed_1_temp_8', 'Hinch_TP_seed_2_temp_8']\r\n Hinch_scaled_filenames = [f + '_scaled' for f in Hinch_filenames] # for samples after scaling\r\n Hinch_scaled_nosplit_filenames = [f + '_scaled_nosplit' for f in Hinch_filenames] # for samples after scaling and no split of data\r\n \r\n for f in Hinch_filenames:\r\n burn_in_params[f] = int(0)\r\n for f in Hinch_scaled_filenames:\r\n burn_in_params[f] = int(0)\r\n for f in Hinch_scaled_nosplit_filenames:\r\n burn_in_params[f] = int(0)\r\n \r\n med_dist_params = {'Goodwin_RW': 0.02,\r\n 'Goodwin_ADA-RW': 0.0201,\r\n 'Goodwin_MALA': 0.0171,\r\n 'Goodwin_PRECOND-MALA': 0.0205,\r\n 'Lotka_RW': 0.0274,\r\n 'Lotka_ADA-RW': 0.0283,\r\n 'Lotka_MALA': 0.023,\r\n 'Lotka_PRECOND-MALA': 0.0288, \r\n 'Hinch_P_seed_1_temp_1': 2.3748,\r\n 'Hinch_P_seed_2_temp_1': 2.3311,\r\n 'Hinch_TP_seed_1_temp_8': 7.3232, \r\n 'Hinch_TP_seed_2_temp_8': 7.2557,\r\n 'Hinch_P_seed_1_temp_1_scaled': 8.0676,\r\n 'Hinch_P_seed_2_temp_1_scaled': 8.3189,\r\n 'Hinch_TP_seed_1_temp_8_scaled': 8.621, \r\n 'Hinch_TP_seed_2_temp_8_scaled': 8.6649,\r\n \r\n 'Hinch_P_seed_1_temp_1_scaled_nosplit': 8.0676,\r\n 'Hinch_P_seed_2_temp_1_scaled_nosplit': 8.3189,\r\n 'Hinch_TP_seed_1_temp_8_scaled_nosplit': 8.621, \r\n 'Hinch_TP_seed_2_temp_8_scaled_nosplit': 8.6649,\r\n }\r\n \r\n assert(filename in med_dist_params.keys())\r\n \r\n params_p = {\"saved_samples\": True, \r\n \"name\": filename, \r\n \"data_dir\": data_folder,\r\n \"nmax\": nmax,\r\n \"burn_in\": burn_in_params[filename],\r\n \"med_dist\" : med_dist_params[filename]\r\n }\r\n params_p[\"d\"] = int(38) if 'Hinch' in filename else int(4)\r\n \r\n # specific filename to load the coresets\r\n if 'Hinch' in filename: # samples are pre-loaded\r\n filename = os.path.join(params_p[\"data_dir\"], \"{}_pnmax_15.pkl\".format(params_p[\"name\"].replace(\"_nosplit\",\"\"))) # ignore nosplit case\r\n assert(nmax == int(2**15))\r\n with open(filename, \"rb\") as file:\r\n params_p[\"Pnmax\"] = pkl.load(file)\r\n else:\r\n pkl_name = os.path.join(params_p[\"data_dir\"], \"{}_theta.pkl\".format(params_p[\"name\"]))\r\n with open(pkl_name, \"rb\") as file:\r\n X = pkl.load(file)\r\n \r\n # ignore burn_in samples\r\n burn_in = params_p[\"burn_in\"]\r\n X = X[burn_in:]\r\n \r\n # separate the odd/even indices\r\n idx_even = np.arange(X.shape[0]-1, 1, -2)[::-1]\r\n idx_odd = np.arange(X.shape[0]-2, 0, -2)[::-1]\r\n idx_Pnmax = np.linspace(0, len(idx_odd)-1, nmax, dtype=int, endpoint=True)\r\n \r\n # define Pnmax, and X\r\n params_p[\"Pnmax\"] = X[idx_odd][idx_Pnmax]\r\n params_p[\"X\"] = X #[idx_even] remove subindexing\r\n \r\n return(params_p)\r\n# else:\r\n# return(params_p, idx_Pnmax, idx_even, idx_odd)\r\n \r\n\r\ndef sample_string(params_p, sample_seed):\r\n \"\"\"Returns string summarizing the parameters of the target distribution P \r\n for appending to pkl file names\r\n \r\n Args:\r\n params_p : Dictionary of distribution parameters recognized by sample()\r\n sample_seed: random seed used for generating data from P\r\n \"\"\"\r\n if params_p[\"saved_samples\"] == True:\r\n temp = params_p[\"name\"]\r\n if \"med_dist\" in params_p.keys():\r\n temp += \"_nmax_\" + str(int(np.log2(params_p[\"nmax\"])))\r\n return(temp)\r\n else:\r\n # older file names\r\n return(params_p[\"name\"])\r\n if params_p[\"name\"] == \"gauss\":\r\n return(\"{}_var{}_seed{}\".format(\r\n params_p[\"name\"], params_p[\"var\"], sample_seed))\r\n if params_p[\"name\"] == \"diag_mog\":\r\n return(\"{}_comp{}_seed{}\".format(\r\n params_p[\"name\"], len(params_p[\"weights\"]), sample_seed))\r\n if params_p[\"name\"] == \"mog\":\r\n return(\"{}_comp{}_seed{}\".format(\r\n params_p[\"name\"], len(params_p[\"weights\"]), sample_seed))\r\n \r\n \r\n raise ValueError(\"Unrecognized distribution name {}\".format(params_p[\"name\"]))\r\n\r\ndef compute_params_p(args):\r\n ''' \r\n Return dimensionality, params_p, and var_k, for the experiment using the\r\n following parameters of args. Note that returned params_p should be\r\n supported by the following functions: sample function in util_sample.py; \r\n p_kernel, ppn_kernel and pp_kernel functions in util_k_mmd.py.\r\n\r\n args.setting: name of target\r\n args.d: dimension of points (gauss/mog)\r\n args.M, (mog)\r\n args.filename (mcmc)\r\n args.mcmcfolder (to load mcmc data)\r\n '''\r\n ## P and kernel parameters ####\r\n if args.setting == \"gauss\": \r\n d = args.d\r\n var_p = 1. # Variance of P\r\n var_k = float(2*d) # Variance of k\r\n params_p = {\"name\": \"gauss\", \"var\": var_p, \"d\": int(d), \"saved_samples\": False}\r\n \r\n if args.setting == \"mog\":\r\n # d will be set to 2\r\n assert(args.M in [3, 4, 6, 8, 32])\r\n params_p = compute_diag_mog_params(args.M)\r\n d = params_p[\"d\"]\r\n var_k = float(2*d)\r\n \r\n if args.setting == \"mcmc\":\r\n # d will be set automatically; nmax needs to be changed\r\n assert(args.filename is not None)\r\n params_p = compute_mcmc_params_p(args.filename, args.mcmcfolder)\r\n d = params_p[\"d\"]\r\n var_k = (params_p[\"med_dist\"])**2\r\n return(d, params_p, var_k)", "'''Reproduces the vignettes of generalized KT on a Slurm cluster\n by executing run_generalized_kt_experiment.ipynb with appropriate parameters\n'''\n\nimport itertools\nfrom slurmpy import Slurm\nimport numpy as np\n\n\n\ndef singlejob_slurm_command(prefix, temp_ids, new_fix_param_str, m_max, d, rep0, repn, computemmd, s_id,\n M=0, filename='temp',\n power=0.5,\n compute_power=0,\n target_kt=1,\n standard_thin=0,\n power_kt=0,\n kt_plus=0,\n rerun=0,\n nu=0.5):\n '''\n Deploys a slurm job that runs thinning experiments based on the parameters, and appends the slurm id\n to temp_ids\n \n prefix: (str) prefix for slurm id description\n new_fix_param_str: basic description of the code to be run\n temp_ids: ids of slurm jobs\n m_max : (int) max size of input\n d: (int) dimension\n rep0: (int) starting index of rep\n repn: (int) number of reps (rep_ids will be rep0, rep0+1, ..., rep0+repn-1)\n computemmd: (int) whether mmd needs to be computed (anything but 0) else 0\n s_id: (int) wait for the slurm run with id s_id\n M: (int) number of components in MOG\n filename: (str) the mcmc filename\n power:(float) power of the kernel\n target_kt: (int) if target kt needs to be run (anything but 0) else 0\n compute_power: (int) if power kernel needs to be computed (anything but 0) else 0\n standard_thin: (int) if standard thinning needs to be run (anything but 0) else 0\n power_kt: (int) if power kT needs to be run (anything but 0) else 0\n kt_plus: (int) if kT+ needs to be run (anything but 0) else 0 [compute power must not be 0]\n rerun: (int) if experiments should be rerun (anything but 0) else 0\n nu: (float/int) a parameter for some kernels\n \n '''\n param_str = new_fix_param_str + ' -m ' + str(m_max+1) + ' -d ' + str(d)\n param_str += ' -r0 ' + str(rep0) + ' -rn ' + str(repn)\n param_str += ' -cm ' + str(computemmd) # whether to compute mmd\n param_str += ' -M ' + str(M) # mog number of components\n param_str += ' -f ' + filename # mcmc filename\n param_str += ' -cp ' + str(compute_power) # whether to compute power kernel\n param_str += ' -pow ' + str(power) # power for power kernel\n param_str += ' -tkt ' + str(target_kt) # whether to run target KT\n param_str += ' -st ' + str(standard_thin) # whether to run standard thin\n param_str += ' -pkt ' + str(power_kt) # whether to run power KT\n param_str += ' -ktp ' + str(kt_plus) # whether to run KT+\n param_str += ' -rr ' + str(rerun) # whether to rerun\n param_str += ' -nu ' + str(nu) # nu param for IMQ/Matern or beta param for Bspline\n\n s = Slurm(f\"{prefix}d{d}m{m_max}r{rep0}\", {\"partition\": partitions[idx], \n \"c\": 1\n })\n temp_ids.append(s.run(param_str, depends_on=[s_id])) # wait for the compilation of the run_ktplus_notebook\n return\n\ndef combinemmd_slurm_command(prefix, fix_param_str, m_max, d, total_reps, computemmd, temp_ids,\n M=0, filename='temp',\n power=0.5,\n compute_power=0,\n target_kt=1,\n standard_thin=0,\n power_kt=0,\n kt_plus=0,\n rerun=0, \n nu=0.5):\n '''\n Deploys a slurm job that combines all thinning experiment results based on the parameters\n prefix: (str) prefix for slurm id description\n fix_param_str: basic description of the code to be run\n m_max : (int) max size of input\n d: (int) dimension\n total_reps: (int) range of reps to be comibined, code will combine 0, ..., total_reps-1\n computemmd: (int) whether mmd needs to be computed (anything but 0) else 0\n temp_ids: ids of slurm jobs\n M: (int) number of components in MOG\n filename: (str) the mcmc filename\n power:(float) power of the kernel\n target_kt: (int) if target kt needs to be run (anything but 0) else 0\n compute_power: (int) if power kernel needs to be computed (anything but 0) else 0\n standard_thin: (int) if standard thinning needs to be run (anything but 0) else 0\n power_kt: (int) if power kT needs to be run (anything but 0) else 0\n kt_plus: (int) if kT+ needs to be run (anything but 0) else 0 [compute power must not be 0]\n rerun: (int) if experiments should be rerun (anything but 0) else 0\n nu: (float/int) a parameter for some kernels\n '''\n # combine the results once all runs done; wait for temp_ids to finish\n param_str = fix_param_str + ' -m ' + str(m_max+1) + ' -d ' + str(d)\n param_str += ' -r0 ' + str(0) + ' -rn ' + str(total_reps)\n param_str += ' -cm ' + str(computemmd) # whether to compute mmd\n param_str += ' -scr ' + str(1) # this activates combinining\n param_str += ' -M ' + str(M) # mog number of components\n param_str += ' -f ' + filename # mcmc filename\n param_str += ' -cp ' + str(compute_power) # whether to compute power kernel\n param_str += ' -pow ' + str(power) # power for power kernel\n param_str += ' -tkt ' + str(target_kt) # whether to run target KT\n param_str += ' -st ' + str(standard_thin) # whether to run standard thin\n param_str += ' -pkt ' + str(power_kt) # whether to run power KT\n param_str += ' -ktp ' + str(kt_plus) # whether to run KT+\n param_str += ' -rr ' + str(rerun) # whether to rerun\n param_str += ' -nu ' + str(nu) # nu param for IMQ/Matern or beta param for Bspline\n \n s = Slurm(f\"C{prefix}d{d}m{m_max}r{total_reps}\", {\"partition\": partitions[idx], \n \"c\": 1\n })\n s.run(param_str, depends_on=temp_ids)\n return\n\n# define the slurm object\npartitions = [\"high\", \"yugroup\", \"jsteinhardt\", \"low\"]\nidx = 2 # which partition to pick\n\ns = Slurm(\"convert\", {\"partition\": partitions[idx], \n \"c\": 1\n })\n\n# convert the run_kt_experiment ipython notebook into a python file\ns_id = s.run('module load python; python compile_notebook.py run_generalized_kt_experiment.ipynb')\n\n# define repetition and m parameters\ntotal_reps = 10 # set this to the max number of repetitions\nreps_per_job = 1\ncombine = True # whether to combine all mmd results or not\nonly_combine = False # whether we only run experiments to combine mmd results\n\nm_max = 7 ## max sample size processed is 2**(2*m_max); and the output size is 2**(m_max)\ncomputemmd = 1 #\n\n\n### All experiments are run with Gauss(sigma) as k and Gauss(sigma/sqrt(2)) as krt ###\ngauss_target = False # Gauss P\nmog_target = False # MoG P\nmcmc_target = True # MCMC P\nmcmc_file_idx = range(16, 24) # range of MCMC files that need to be run; RUN 12--16, and 16--24 separately\nrerun = 0 # BUT STILL DOESN\"T RERUN IF DURING COMBINING ; SO DON\"T EXPECT RERUN IF TOTAL REPS = REPS_PER_JOB\n\nall_mcmc_filenames = ['Goodwin_RW','Goodwin_ADA-RW', 'Goodwin_MALA', 'Goodwin_PRECOND-MALA', 'Lotka_RW', 'Lotka_ADA-RW', 'Lotka_MALA', 'Lotka_PRECOND-MALA','Hinch_P_seed_1_temp_1', 'Hinch_P_seed_2_temp_1', 'Hinch_TP_seed_1_temp_8', 'Hinch_TP_seed_2_temp_8', 'Hinch_P_seed_1_temp_1_scaled', 'Hinch_P_seed_2_temp_1_scaled', 'Hinch_TP_seed_1_temp_8_scaled', 'Hinch_TP_seed_2_temp_8_scaled', \n'Goodwin_RW_float_step', 'Goodwin_ADA-RW_float_step', 'Goodwin_MALA_float_step', 'Goodwin_PRECOND-MALA_float_step', 'Lotka_RW_float_step', 'Lotka_ADA-RW_float_step', 'Lotka_MALA_float_step',\n 'Lotka_PRECOND-MALA_float_step']\n# files to run for MCMC experiments; \n# 0-4 for Goodwin, 4-8 for Lotka, 8-12 for Hinch, \n# 12-16 for Hinch Scaled, where the entire chain was standardized coordinate wise (centered, and scaled)\n# 16-24 for Goodwin/Lotka_float_step experiments with sampling indices computed using np.linspace, rather than np.arange\n\nif gauss_target:\n ds = [2, 10, 20, 50, 100] #, 10, 20, 50, 100] # for Gauss P\n# ds = [2, 4, 10, 100] # for Gauss P\nif mog_target:\n Ms = [4, 6, 8] #, 8] # M = number of mixtures for 2 dim MOG P\nif mcmc_target:\n ## NOTE for Hinch /Hinch_scale MCMC experiments m_max <=8 is permitted\n mcmc_files = np.array(all_mcmc_filenames)[mcmc_file_idx] # filename denotes the MCMC setting to be loaded;\n # filename denotes the MCMC setting to be loaded; kernel k is Gauss(sigma^2), where sigma = med dist(Phat)\n # and Pstart is 2^15 sized point set obtained from standard thinning of post-burn in samples from the end\n # burn_in and sigma params are pre-loaded in the sample functions\n # samples are loaded from pkl files for Hinch dataset\n\n# MCMC P : 4 experiments each for Goodwin/Lotka-Volterra/Hinch\n# - kernels : (1) Goodwin/Lotka:\n# - Laplace k with KT+ (power = 0.81)\n# - NOTE: the power kernel is matern with parameter nu_eff \n# - where nu_eff = power * (d+1)/2 - d/2 or power = 2*nu_eff/(d+1) + d/(d+1)\n# - Since d = 4, we have nu_eff = 0.025\n# - (2) Hinch:\n# - IMQ k with KT+ with nu = 0.5 (power = 0.5)\n\n# # check sizes of list\n# for t in [power_list, target_kt_flags, power_kt_flags, kt_plus_flags, nu_list]:\n# assert(len(t) == len(kernel_list))\n\n\nfix_param_str = 'module load python; python3 run_generalized_kt_experiment.py ' \n\n### experiment combinations ###\n# in all cases, var/gamma parameter is set automatically; \n# sigma = 1/gamma = sqrt(2d) for Gauss/MoG, and median distance in MCMC)\n# for non Gauss/Laplace cases we have to specify nu parameter\n \nif gauss_target:\n # Gauss P : d = 2, 10, 20, 50, 100\n # - kernels : (1) Gauss k with t-KT and KT(rt)\n # list of kernels to be run\n kernel_list = [\"gauss\"] \n # list of powers for the kernels (should be same size as kernel_list)\n power_list = [0.5]\n # whether power kernel needs to be computed (should be same size as kernel_list)\n compute_power_list = [1]\n # whether standard thinning needs to be computed (should be same size as kernel_list)\n standard_thin_flags = [1]\n # whether target KT needs to be computed (should be same size as kernel_list)\n target_kt_flags = [1]\n # whether KT+ needs to be computed (should be same size as kernel_list)\n kt_plus_flags = [0]\n # whether power KT needs to be computed (should be same size as kernel_list)\n power_kt_flags = [1]\n \n \n # run gaussian experiments\n for kk, kernel in enumerate(kernel_list):\n new_fix_param_str = fix_param_str + ' -P gauss' + ' -kernel ' + kernel\n prefix =f\"G{kernel[0]}\"\n for d in ds:\n temp_ids = []\n # if reps_per_job == total_reps the goal is to generally combine\n if not only_combine:\n for i in range(0, total_reps, reps_per_job):\n singlejob_slurm_command(prefix, temp_ids, new_fix_param_str, m_max, d, i, \n reps_per_job, computemmd, s_id=s_id,\n compute_power=compute_power_list[kk],\n power=power_list[kk],\n target_kt=target_kt_flags[kk],\n standard_thin=standard_thin_flags[kk],\n power_kt=power_kt_flags[kk],\n kt_plus=kt_plus_flags[kk],\n rerun=rerun,\n )\n \n # combine the results once all runs done; wait for temp_ids to finish\n if combine:\n if computemmd==1: combinemmd_slurm_command(prefix, new_fix_param_str, m_max, \n d, total_reps, computemmd, temp_ids=temp_ids,\n compute_power=compute_power_list[kk],\n power=power_list[kk],\n target_kt=target_kt_flags[kk],\n standard_thin=standard_thin_flags[kk],\n power_kt=power_kt_flags[kk],\n kt_plus=kt_plus_flags[kk],\n rerun=0,)\n\n\nif mog_target:\n # MoG P : M = 4, 6, 8 in R^2\n # - kernels : (1) Gauss k with t-KT, KT(rt) (power = 0.5)\n # - (2) Laplace k with KT+ (power = 0.7)\n # - NOTE: the power kernel is matern with parameter nu_eff \n # - where nu_eff = power * (d+1)/2 - d/2 or power = 2*nu_eff/(d+1) + d/(d+1)\n # - Since d = 2, we have nu_eff = 0.05\n # - (3) IMQ k with KT+ with nu = 0.5 (power = 0.5)\n # - (4) B-spline with KT+ with nu = 2 (power = 2/3.) --nu is same as beta for b-spine--(power = (nu+2) / (2*nu + 2))\n\n # list of kernels to be run\n kernel_list = [\"gauss\", \"laplace\", \"imq\", \"bspline\"] #[\"gauss\", \"sinc\", \"laplace\", \"imq\", \"matern\", \"bspline\"]\n # list of powers for the kernels (should be same size as kernel_list)\n power_list = [0.5, 0.7, 0.5, 2./3]\n # whether power kernel needs to be computed (should be same size as kernel_list)\n compute_power_list = [1, 1, 1, 1]\n # whether standard thinning needs to be computed (should be same size as kernel_list)\n standard_thin_flags = [1, 0, 0, 0] \n # whether target KT needs to be computed (should be same size as kernel_list)\n target_kt_flags = [1, 0, 0, 0]\n # whether KT+ needs to be computed (should be same size as kernel_list)\n kt_plus_flags = [0, 1, 1, 1]\n # whether power KT needs to be computed (should be same size as kernel_list)\n power_kt_flags = [1, 0, 0, 0] # same as root kt when root_power = 0.5\n # the list of nu parameteter list (irrelevant for Gauss/Laplacae)\n nu_list = [0., 0., 0.5, 2.] # nu is same as beta for bspline\n \n # run MOG experiments\n d = 2 # doesn't matter; will be set internally automatically; just specify some int\n for kk, kernel in enumerate(kernel_list):\n new_fix_param_str = fix_param_str + ' -P mog' + ' -kernel ' + kernel\n for M in Ms:\n prefix =f\"M{M}{kernel[0]}\"\n temp_ids = []\n if not only_combine:\n for i in range(0, total_reps, reps_per_job):\n singlejob_slurm_command(prefix, temp_ids, new_fix_param_str, m_max, \n d, i, reps_per_job, computemmd, \n s_id=s_id, M=M, \n compute_power=compute_power_list[kk],\n power=power_list[kk],\n target_kt=target_kt_flags[kk],\n standard_thin=standard_thin_flags[kk],\n power_kt=power_kt_flags[kk],\n kt_plus=kt_plus_flags[kk],\n rerun=rerun,\n nu=nu_list[kk])\n if combine:\n if computemmd==1: combinemmd_slurm_command(prefix, new_fix_param_str, m_max, \n d, total_reps, computemmd, \n temp_ids=temp_ids, M=M,\n compute_power=compute_power_list[kk],\n power=power_list[kk],\n target_kt=target_kt_flags[kk],\n standard_thin=standard_thin_flags[kk],\n power_kt=power_kt_flags[kk],\n kt_plus=kt_plus_flags[kk],\n rerun=0,\n nu=nu_list[kk])\n\n# laplace kernel for Goodwin/Lotka-Volterra\nif mcmc_target and mcmc_file_idx==range(16, 24):\n # list of kernels to be run\n kernel_list = [\"laplace\"] #[\"gauss\", \"sinc\", \"laplace\", \"imq\", \"matern\", \"bspline\"]\n # list of powers for the kernels (should be same size as kernel_list)\n power_list = [0.81]\n # whether power kernel needs to be computed (should be same size as kernel_list)\n compute_power_list = [1]\n # whether standard thinning needs to be computed (should be same size as kernel_list)\n standard_thin_flags = [1] \n # whether target KT needs to be computed (should be same size as kernel_list)\n target_kt_flags = [0]\n # whether KT+ needs to be computed (should be same size as kernel_list)\n kt_plus_flags = [1]\n # whether power KT needs to be computed (should be same size as kernel_list)\n power_kt_flags = [0] # same as root kt when root_power = 0.5\n # the list of nu parameteter list (irrelevant for Gauss/Laplacae)\n nu_list = [0.] # nu is same as beta for bspline\n\n# IMQ kernel for Hinch\nif mcmc_target and mcmc_file_idx==range(12, 16):\n # list of kernels to be run\n kernel_list = [\"imq\"] #[\"gauss\", \"sinc\", \"laplace\", \"imq\", \"matern\", \"bspline\"]\n # list of powers for the kernels (should be same size as kernel_list)\n power_list = [0.5]\n # whether power kernel needs to be computed (should be same size as kernel_list)\n compute_power_list = [1]\n # whether standard thinning needs to be computed (should be same size as kernel_list)\n standard_thin_flags = [1] \n # whether target KT needs to be computed (should be same size as kernel_list)\n target_kt_flags = [0]\n # whether KT+ needs to be computed (should be same size as kernel_list)\n kt_plus_flags = [1]\n # whether power KT needs to be computed (should be same size as kernel_list)\n power_kt_flags = [0] # same as root kt when root_power = 0.5\n # the list of nu parameteter list (irrelevant for Gauss/Laplacae)\n nu_list = [0.5] # nu is same as beta for bspline\n \nif mcmc_target:\n # run MCMC experiments\n d = 4 # doesn't matter; will be set internally automatically; just specify some int\n for kk, kernel in enumerate(kernel_list):\n new_fix_param_str = fix_param_str + ' -P mcmc' + ' -kernel ' + kernel\n \n for filename in mcmc_files:\n prefix =f\"m{filename[0]}{kernel[0]}\"\n temp_ids = []\n if reps_per_job != total_reps:\n for i in range(0, total_reps, reps_per_job):\n singlejob_slurm_command(prefix, temp_ids, new_fix_param_str, m_max, d, i, \n reps_per_job, computemmd, s_id=s_id, \n filename=filename, power=power_list[kk],\n compute_power=compute_power_list[kk],\n target_kt=target_kt_flags[kk],\n standard_thin=standard_thin_flags[kk],\n power_kt=power_kt_flags[kk],\n kt_plus=kt_plus_flags[kk],\n rerun=rerun)\n if combine:\n if computemmd==1: combinemmd_slurm_command(prefix, new_fix_param_str, m_max, d, total_reps, \n computemmd, temp_ids=temp_ids, \n filename=filename, power=power_list[kk],\n compute_power=compute_power_list[kk],\n target_kt=target_kt_flags[kk],\n standard_thin=standard_thin_flags[kk],\n power_kt=power_kt_flags[kk],\n kt_plus=kt_plus_flags[kk],\n rerun=0) \n\n\n", "import numpy as np\nfrom argparse import ArgumentParser\n\nimport pathlib\nimport os\nimport os.path\nimport pickle as pkl\n\nfrom goodpoints.tictoc import tic, toc # for timing blocks of code\nfrom goodpoints.util import fprint # for printing while flushing buffer\n\n# utils for getting arguments, generating samples, evaluating kernels, and mmds, getting filenames\nfrom util_parse import get_args_from_terminal\nfrom util_sample import compute_params_p, sample, sample_string\nfrom util_k_mmd import compute_params_k, squared_mmd\nfrom util_filenames import get_file_template\n\n# When called as a script, call construct_st_coresets\ndef main():\n return(construct_st_coresets(get_args_from_terminal()))\n\ndef construct_st_coresets(args):\n '''\n\n Generate standard thinning coresets of size 4**args.size/2**args.m from inputs of \n size 4**args.size and load/save its mmd based on multiple arguments. \n It does the following tasks:\n\n 1. Generate ST coreset if args.rerun == True OR args.computemmd == True\n 2. Compute mmd and save to disk if args.computemmd == True AND (mmd not on disk OR args.recomputemmd OR args.rerun), else load from disk\n 3. Return the coreset if args.returncoreset == True\n 4. Return the mmd if args.returncoreset == False and args.computemmd == True\n\n The function takes multiple input arguments as a dictionary that isnormally processed by \n the parser function. One can directly specify / access the arguments as args.argument where \n all the arguments are listed below:\n (all arguments are optional, and the default value is specified below)\n\n resultsfolder: (str; default coresets_folder) folder to save results (relative to where the script is run)\n seed : (int; default 123456789) seed for experiment\n rerun : (bool; default False) whether to rerun the experiments\n size : (int; default 2) sample set of size in log base 4, i.e., input size = 4**size\n rep0 : (int; default 0) starting experiment id (useful for multiple repetitions)\n repn : (int; default 1) number of experiment replication (useful for multiple repetitions)\n m : (int; default 2) number of thinning rounds; output size = 4**size / 2**m\n d : (int; default 2) dimension of the points\n returncoreset: (bool; default False) whether to return coresets\n verbose : (bool; default False) whether to print coresets and mmds\n computemmd : (bool; default False) whether to compute mmd results; if exist on disk load from it, \n else compte and save to disk; return them if returncoreset is False\n recomputemmd : (bool; default False) whether to re-compute mmd results and save on disk (refreshes disk results)\n setting : (str; default gauss) name of the target distribution P for running the experiments \n (needs to be supported by functions: compute_params_p and sample function in util_sample.py; \n and p_kernel, ppn_kernel and pp_kernel functions in util_k_mmd.py)\n M : (int; default None) number of mixture for diag mog in d=2, used only when setting = mog\n by compute_params_p (and in turn by compute_mog_params_p) in util_sample.py\n filename : (str; default None) name for MCMC target, used only when setting = mcmc\n by compute_params_p (and in turn by compute_mcmc_params_p) in util_sample.py\n this setting would require samples to be preloaded in mcmcfolder\n mcmcfolder : (str; default data) folder to load MCMC data from, and save some \n PPk like objects to save time while computing mmd\n '''\n pathlib.Path(args.resultsfolder).mkdir(parents=True, exist_ok=True)\n \n ####### seeds ####### \n\n seed_sequence = np.random.SeedSequence(entropy = args.seed)\n seed_sequence_children = seed_sequence.spawn(3)\n\n sample_seeds_set = seed_sequence_children[0].generate_state(1000)\n\n # compute d, params_p and var_k for the setting\n d, params_p, var_k = compute_params_p(args)\n \n # define the kernels\n params_k_split, params_k_swap, split_kernel, swap_kernel = compute_params_k(d=d, var_k=var_k, \n use_krt_split=args.krt, name=\"gauss\") \n \n # probability threshold\n delta = 0.5\n\n # mmd \n mmds = np.zeros(args.repn)\n \n for i, rep in enumerate(np.arange(args.rep0, args.rep0+args.repn)):\n sample_seed = sample_seeds_set[rep]\n\n prefix = \"ST\"\n file_template = get_file_template(args.resultsfolder, prefix, d, args.size, args.m, params_p, params_k_split, \n params_k_swap, delta=delta, \n sample_seed=sample_seed, thin_seed=None, \n compress_seed=None,\n compressalg=None, \n g=None,\n )\n \n tic()\n # Include replication number in mmd filenames\n filename = file_template.format('mmd', rep)\n input_size = 4**(args.size)\n coreset = np.linspace(0, input_size-1, int(input_size/2**args.m), dtype=int, endpoint=True)\n if args.rerun or args.computemmd:\n fprint(f\"Running ST experiment with template {filename}.....\")\n if not args.recomputemmd and os.path.exists(filename): \n print(f\"Loading mmd from {filename} (already present)\")\n with open(filename, 'rb') as file:\n mmd = pkl.load(file)\n else:\n print('(re) Generating ST coreset')\n X = sample(4**(args.size),params_p, seed = sample_seed)\n print(\"computing mmd\")\n if 'X' not in locals(): X = sample(4**(args.size),params_p, seed = sample_seed)\n if params_p[\"saved_samples\"]:# if MCMC data compute MMD(Sin), i.e., MMD from the input data\n params_p_eval = dict()\n params_p_eval[\"data_dir\"] = params_p[\"data_dir\"]\n params_p_eval[\"d\"] = d\n params_p_eval[\"name\"] = params_p[\"name\"]+ \"_sin\"\n params_p_eval[\"Pnmax\"] = X\n params_p_eval[\"saved_samples\"] = False\n else:\n params_p_eval = params_p\n mmd = np.sqrt(squared_mmd(params_k=params_k_swap, params_p=params_p_eval, xn=X[coreset]))\n with open(filename, 'wb') as file:\n pkl.dump(mmd, file, protocol=pkl.HIGHEST_PROTOCOL)\n mmds[i] = mmd\n toc()\n if args.verbose:\n print(f\"CORESET: {coreset}\")\n print(f\"mmds: {mmds}\")\n if args.returncoreset:\n if 'X' not in globals(): X = sample(4**(args.size),params_p, seed = sample_seed)\n return(X, X[coreset])\n else:\n if args.computemmd:\n return(mmds)\n \nif __name__ == \"__main__\":\n main()\n \n" ]
[ [ "numpy.log2", "numpy.sqrt", "numpy.linspace", "numpy.arange", "numpy.vstack", "numpy.eye", "numpy.cos", "numpy.ones", "numpy.sin", "numpy.array", "numpy.zeros", "numpy.sum", "numpy.random.default_rng" ], [ "numpy.array" ], [ "numpy.arange", "numpy.zeros", "numpy.random.SeedSequence" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
vivid-k/Global-Encoding
[ "9105925f371cf427083625100497a98ad8fcdbdf" ]
[ "models/SubLayers.py" ]
[ "''' Define the sublayers in encoder/decoder layer '''\nimport numpy as np\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom models.Modules import ScaledDotProductAttention\n\n__author__ = \"Yu-Hsiang Huang\"\n\nclass MultiHeadAttention(nn.Module):\n ''' Multi-Head Attention module '''\n\n def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1):\n super().__init__()\n\n self.n_head = n_head\n self.d_k = d_k\n self.d_v = d_v\n\n self.w_qs = nn.Linear(d_model, n_head * d_k)\n self.w_ks = nn.Linear(d_model, n_head * d_k)\n self.w_vs = nn.Linear(d_model, n_head * d_v)\n nn.init.normal_(self.w_qs.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_k)))\n nn.init.normal_(self.w_ks.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_k)))\n nn.init.normal_(self.w_vs.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_v)))\n\n self.attention = ScaledDotProductAttention(temperature=np.power(d_k, 0.5))\n self.layer_norm = nn.LayerNorm(d_model)\n\n self.fc = nn.Linear(n_head * d_v, d_model)\n nn.init.xavier_normal_(self.fc.weight)\n\n self.dropout = nn.Dropout(dropout)\n\n\n def forward(self, q, k, v, mask=None):\n\n d_k, d_v, n_head = self.d_k, self.d_v, self.n_head\n\n sz_b, len_q, _ = q.size()\n sz_b, len_k, _ = k.size()\n sz_b, len_v, _ = v.size()\n\n residual = q\n\n q = self.w_qs(q).view(sz_b, len_q, n_head, d_k)\n k = self.w_ks(k).view(sz_b, len_k, n_head, d_k)\n v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)\n\n q = q.permute(2, 0, 1, 3).contiguous().view(-1, len_q, d_k) # (n*b) x lq x dk\n k = k.permute(2, 0, 1, 3).contiguous().view(-1, len_k, d_k) # (n*b) x lk x dk\n v = v.permute(2, 0, 1, 3).contiguous().view(-1, len_v, d_v) # (n*b) x lv x dv\n\n mask = mask.repeat(n_head, 1, 1) # (n*b) x .. x ..\n output, attn = self.attention(q, k, v, mask=mask)\n\n output = output.view(n_head, sz_b, len_q, d_v)\n output = output.permute(1, 2, 0, 3).contiguous().view(sz_b, len_q, -1) # b x lq x (n*dv)\n\n output = self.dropout(self.fc(output))\n output = self.layer_norm(output + residual)\n\n return output, attn\n\nclass PositionwiseFeedForward(nn.Module):\n ''' A two-feed-forward-layer module '''\n\n def __init__(self, d_in, d_hid, dropout=0.1):\n super().__init__()\n self.w_1 = nn.Conv1d(d_in, d_hid, 1) # position-wise\n self.w_2 = nn.Conv1d(d_hid, d_in, 1) # position-wise\n self.layer_norm = nn.LayerNorm(d_in)\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, x):\n residual = x\n output = x.transpose(1, 2)\n output = self.w_2(F.relu(self.w_1(output)))\n output = output.transpose(1, 2)\n output = self.dropout(output)\n output = self.layer_norm(output + residual)\n return output\n" ]
[ [ "torch.nn.Dropout", "numpy.sqrt", "numpy.power", "torch.nn.init.xavier_normal_", "torch.nn.LayerNorm", "torch.nn.Linear", "torch.nn.Conv1d" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
epiviz/parser
[ "bba301db40458b6666653c77a446db8778ee07f8" ]
[ "src/epivizFileParser/GtfParsedFile.py" ]
[ "import pysam\nfrom .utils import toDataFrame\nfrom .Helper import get_range_helper\nimport pandas as pd\nfrom aiocache import cached, Cache\nfrom aiocache.serializers import JsonSerializer, PickleSerializer\n\n__author__ = \"Jayaram Kancherla\"\n__copyright__ = \"jkanche\"\n__license__ = \"mit\"\n\n\nclass GtfParsedFile(object):\n \"\"\"\n GTF File Class to parse gtf/gff files \n\n Args:\n file (str): file location can be local (full path) or hosted publicly\n columns ([str]) : column names for various columns in file\n\n Attributes:\n file: a pysam file object\n fileSrc: location of the file\n cacheData: cache of accessed data in memory\n columns: column names to use\n \"\"\"\n\n def __init__(self, file, columns=[\"chr\", \"start\", \"end\", \"width\", \"strand\", \"geneid\", \"exon_starts\", \"exon_ends\", \"gene\"]):\n self.fileSrc = file\n self.columns = columns\n\n print(\"Loading annotations\", file)\n self.file = pd.read_csv(file, sep=\"\\t\", names=columns)\n self.file[\"gene_idx\"] = self.file[\"gene\"]\n self.file = self.file.set_index(\"gene_idx\")\n\n print(\"Parsing chromsomes and their lengths\")\n chromosomes = []\n groupByChr = self.file.groupby(\"chr\")\n\n for name, gdf in groupByChr:\n chromosomes.append([name, 1, int(gdf[\"end\"].values.max())])\n\n self.chromosomes = chromosomes\n\n def parse_attribute(self, item, key):\n if key in item:\n tstr = item.split(key, 1)\n tstrval = tstr[1].split(\";\", 1)\n return tstrval[0][1:]\n else:\n return None\n\n def search_gene(self, query, maxResults=5):\n result = []\n err = None\n\n try:\n if len(query) > 1:\n matched = self.file[self.file[\"gene\"].str.contains(\n query, na=False, case=False)]\n\n counter = 0\n for index, row in matched.iterrows():\n rec = {\n \"chr\": row[\"chr\"],\n \"start\": int(row[\"start\"]),\n \"end\": int(row[\"end\"]),\n \"gene\": row[\"gene\"],\n \"strand\": row[\"strand\"]\n }\n result.append(rec)\n counter += 1\n if counter >= int(maxResults):\n break\n\n return result, err\n except Exception as e:\n return {}, str(e)\n\n def get_col_names(self):\n return self.columns\n\n def getRange(self, chr, start, end, bins=2000, zoomlvl=-1, metric=\"AVG\", respType=\"DataFrame\"):\n \"\"\"Get data for a given genomic location\n\n Args:\n chr (str): chromosome \n start (int): genomic start\n end (int): genomic end\n respType (str): result format type, default is \"DataFrame\n\n Returns:\n result\n a DataFrame with matched regions from the input genomic location if respType is DataFrame else result is an array\n error \n if there was any error during the process\n \"\"\"\n result = pd.DataFrame(columns=[\n \"chr\", \"start\", \"end\", \"width\", \"strand\", \"geneid\", \"exon_starts\", \"exon_ends\", \"gene\"])\n\n try:\n result = self.file[(self.file[\"start\"] <= end) & (\n self.file[\"end\"] >= start) & (self.file[\"chr\"] == chr)]\n result = result.sort_values(by=[\"chr\", \"start\", \"end\"])\n return result, None\n except Exception as e:\n return result, str(e)\n\n @cached(ttl=None, cache=Cache.MEMORY, serializer=PickleSerializer(), namespace=\"gtfsearchgene\")\n async def searchGene(self, query, maxResults=5):\n return self.search_gene(query, maxResults)\n\n @cached(ttl=None, cache=Cache.MEMORY, serializer=PickleSerializer(), namespace=\"gtfgetdata\")\n async def get_data(self, chr, start, end, bins=2000, zoomlvl=-1, metric=\"AVG\", respType=\"DataFrame\"):\n return self.getRange(chr, start, end, bins=bins, zoomlvl=zoomlvl, metric=metric, respType=respType)\n" ]
[ [ "pandas.read_csv", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
empyriumz/TAPER-EHR
[ "fc89a27730a6eb6d4b321832e017c7e9662fa2e3" ]
[ "model/seq_model.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom base import BaseModel\nfrom model.mem_transformer import MemTransformerLM\nfrom model.gru_ae import *\nimport numpy as np\nclass Seq_Attention(BaseModel):\n def __init__(\n self,\n transformer_state_path,\n num_classes,\n codes=True,\n demographics=True,\n demographics_size=0,\n div_factor=2,\n dropout=0.5,\n ):\n super(Seq_Attention, self).__init__()\n\n self.num_classes = num_classes\n self.demographics = demographics\n self.demographics_size = demographics_size\n self.codes = codes\n\n state_dict = torch.load(transformer_state_path)\n transformer_config = state_dict[\"config\"]\n state_dict = state_dict[\"state_dict\"]\n\n transformer_args = transformer_config[\"model\"][\"args\"]\n self.transformer = MemTransformerLM(**transformer_args)\n\n self.transformer.load_state_dict(state_dict)\n self.transformer.eval()\n\n self.patient_rep_size = +self.transformer.d_embed * int(\n self.codes\n ) + self.demographics_size * int(self.demographics)\n self.predictor = nn.Sequential(\n nn.Dropout(p=dropout),\n nn.Linear(\n self.patient_rep_size,\n self.patient_rep_size // div_factor,\n ),\n nn.ReLU(inplace=True),\n nn.Linear(self.patient_rep_size // div_factor, self.num_classes),\n )\n\n def forward(self, x, device=\"cuda\"):\n x_codes, x_cl, b_is, demo = x\n\n x_codes = x_codes.to(device)\n x_cl = x_cl.to(device)\n demo = demo.to(device)\n b_is = b_is.to(device)\n #patient_rep = torch.Tensor([]).to(device)\n patient_rep = None\n with torch.no_grad():\n mem_out = self.transformer._forward(x_codes)\n mem_out = mem_out[x_cl, b_is, :]\n if self.codes and self.demographics: \n patient_rep = torch.cat((mem_out, demo), dim=1)\n elif self.codes and not self.demographics:\n patient_rep = mem_out\n elif not self.codes and self.demographics:\n patient_rep = demo\n else:\n raise ValueError(\"codes and demographics can be false at the same time\")\n # if self.demographics:\n # if len(patient_rep.shape) == 0:\n # patient_rep = demo\n # else:\n # if len(patient_rep.shape) == 1:\n # patient_rep = patient_rep.unsqueeze(dim=0)\n # patient_rep = torch.cat((patient_rep, demo), dim=1)\n\n logits = self.predictor(patient_rep)\n if self.num_classes > 1:\n log_probs = F.log_softmax(logits, dim=1).squeeze()\n else: \n log_probs = torch.sigmoid(logits)\n \n if len(logits) == 1:\n logits = logits.squeeze(dim=0)\n log_probs = log_probs.squeeze(dim=0)\n else:\n logits = logits.squeeze()\n log_probs = log_probs.squeeze()\n \n return log_probs, logits\n\n def __str__(self):\n \"\"\"\n Model prints with number of trainable parameters\n \"\"\"\n model_parameters = filter(lambda p: p.requires_grad, self.parameters())\n params = sum([np.prod(p.size()) for p in model_parameters if p is not None])\n return \"\\nTrainable parameters: {}\".format(params)" ]
[ [ "torch.nn.Dropout", "torch.sigmoid", "torch.nn.functional.log_softmax", "torch.load", "torch.cat", "torch.nn.Linear", "torch.no_grad", "torch.nn.ReLU" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
NEUdeep/TileDetection
[ "77b5ef4bb4db29f5ffe6a6fa9f87b4bfe8516e4c", "f453ac868de195a7859b9bf07c813e46eb35d2d0" ]
[ "mmdet/core/evaluation/mean_ap.py", "utils/util.py" ]
[ "from multiprocessing import Pool\n\nimport mmcv\nimport numpy as np\nfrom mmcv.utils import print_log\nfrom terminaltables import AsciiTable\n\nfrom .bbox_overlaps import bbox_overlaps\nfrom .class_names import get_classes\n\n\ndef average_precision(recalls, precisions, mode='area'):\n \"\"\"Calculate average precision (for single or multiple scales).\n\n Args:\n recalls (ndarray): shape (num_scales, num_dets) or (num_dets, )\n precisions (ndarray): shape (num_scales, num_dets) or (num_dets, )\n mode (str): 'area' or '11points', 'area' means calculating the area\n under precision-recall curve, '11points' means calculating\n the average precision of recalls at [0, 0.1, ..., 1]\n\n Returns:\n float or ndarray: calculated average precision\n \"\"\"\n no_scale = False\n if recalls.ndim == 1:\n no_scale = True\n recalls = recalls[np.newaxis, :]\n precisions = precisions[np.newaxis, :]\n assert recalls.shape == precisions.shape and recalls.ndim == 2\n num_scales = recalls.shape[0]\n ap = np.zeros(num_scales, dtype=np.float32)\n if mode == 'area':\n zeros = np.zeros((num_scales, 1), dtype=recalls.dtype)\n ones = np.ones((num_scales, 1), dtype=recalls.dtype)\n mrec = np.hstack((zeros, recalls, ones))\n mpre = np.hstack((zeros, precisions, zeros))\n for i in range(mpre.shape[1] - 1, 0, -1):\n mpre[:, i - 1] = np.maximum(mpre[:, i - 1], mpre[:, i])\n for i in range(num_scales):\n ind = np.where(mrec[i, 1:] != mrec[i, :-1])[0]\n ap[i] = np.sum(\n (mrec[i, ind + 1] - mrec[i, ind]) * mpre[i, ind + 1])\n elif mode == '11points':\n for i in range(num_scales):\n for thr in np.arange(0, 1 + 1e-3, 0.1):\n precs = precisions[i, recalls[i, :] >= thr]\n prec = precs.max() if precs.size > 0 else 0\n ap[i] += prec\n ap /= 11\n else:\n raise ValueError(\n 'Unrecognized mode, only \"area\" and \"11points\" are supported')\n if no_scale:\n ap = ap[0]\n return ap\n\n\ndef tpfp_imagenet(det_bboxes,\n gt_bboxes,\n gt_bboxes_ignore=None,\n default_iou_thr=0.5,\n area_ranges=None):\n \"\"\"Check if detected bboxes are true positive or false positive.\n\n Args:\n det_bbox (ndarray): Detected bboxes of this image, of shape (m, 5).\n gt_bboxes (ndarray): GT bboxes of this image, of shape (n, 4).\n gt_bboxes_ignore (ndarray): Ignored gt bboxes of this image,\n of shape (k, 4). Default: None\n default_iou_thr (float): IoU threshold to be considered as matched for\n medium and large bboxes (small ones have special rules).\n Default: 0.5.\n area_ranges (list[tuple] | None): Range of bbox areas to be evaluated,\n in the format [(min1, max1), (min2, max2), ...]. Default: None.\n\n Returns:\n tuple[np.ndarray]: (tp, fp) whose elements are 0 and 1. The shape of\n each array is (num_scales, m).\n \"\"\"\n # an indicator of ignored gts\n gt_ignore_inds = np.concatenate(\n (np.zeros(gt_bboxes.shape[0], dtype=np.bool),\n np.ones(gt_bboxes_ignore.shape[0], dtype=np.bool)))\n # stack gt_bboxes and gt_bboxes_ignore for convenience\n gt_bboxes = np.vstack((gt_bboxes, gt_bboxes_ignore))\n\n num_dets = det_bboxes.shape[0]\n num_gts = gt_bboxes.shape[0]\n if area_ranges is None:\n area_ranges = [(None, None)]\n num_scales = len(area_ranges)\n # tp and fp are of shape (num_scales, num_gts), each row is tp or fp\n # of a certain scale.\n tp = np.zeros((num_scales, num_dets), dtype=np.float32)\n fp = np.zeros((num_scales, num_dets), dtype=np.float32)\n if gt_bboxes.shape[0] == 0:\n if area_ranges == [(None, None)]:\n fp[...] = 1\n else:\n det_areas = (det_bboxes[:, 2] - det_bboxes[:, 0]) * (\n det_bboxes[:, 3] - det_bboxes[:, 1])\n for i, (min_area, max_area) in enumerate(area_ranges):\n fp[i, (det_areas >= min_area) & (det_areas < max_area)] = 1\n return tp, fp\n ious = bbox_overlaps(det_bboxes, gt_bboxes - 1)\n gt_w = gt_bboxes[:, 2] - gt_bboxes[:, 0]\n gt_h = gt_bboxes[:, 3] - gt_bboxes[:, 1]\n iou_thrs = np.minimum((gt_w * gt_h) / ((gt_w + 10.0) * (gt_h + 10.0)),\n default_iou_thr)\n # sort all detections by scores in descending order\n sort_inds = np.argsort(-det_bboxes[:, -1])\n for k, (min_area, max_area) in enumerate(area_ranges):\n gt_covered = np.zeros(num_gts, dtype=bool)\n # if no area range is specified, gt_area_ignore is all False\n if min_area is None:\n gt_area_ignore = np.zeros_like(gt_ignore_inds, dtype=bool)\n else:\n gt_areas = gt_w * gt_h\n gt_area_ignore = (gt_areas < min_area) | (gt_areas >= max_area)\n for i in sort_inds:\n max_iou = -1\n matched_gt = -1\n # find best overlapped available gt\n for j in range(num_gts):\n # different from PASCAL VOC: allow finding other gts if the\n # best overlaped ones are already matched by other det bboxes\n if gt_covered[j]:\n continue\n elif ious[i, j] >= iou_thrs[j] and ious[i, j] > max_iou:\n max_iou = ious[i, j]\n matched_gt = j\n # there are 4 cases for a det bbox:\n # 1. it matches a gt, tp = 1, fp = 0\n # 2. it matches an ignored gt, tp = 0, fp = 0\n # 3. it matches no gt and within area range, tp = 0, fp = 1\n # 4. it matches no gt but is beyond area range, tp = 0, fp = 0\n if matched_gt >= 0:\n gt_covered[matched_gt] = 1\n if not (gt_ignore_inds[matched_gt]\n or gt_area_ignore[matched_gt]):\n tp[k, i] = 1\n elif min_area is None:\n fp[k, i] = 1\n else:\n bbox = det_bboxes[i, :4]\n area = (bbox[2] - bbox[0]) * (bbox[3] - bbox[1])\n if area >= min_area and area < max_area:\n fp[k, i] = 1\n return tp, fp\n\n\ndef tpfp_default(det_bboxes,\n gt_bboxes,\n gt_bboxes_ignore=None,\n iou_thr=0.5,\n area_ranges=None):\n \"\"\"Check if detected bboxes are true positive or false positive.\n\n Args:\n det_bbox (ndarray): Detected bboxes of this image, of shape (m, 5).\n gt_bboxes (ndarray): GT bboxes of this image, of shape (n, 4).\n gt_bboxes_ignore (ndarray): Ignored gt bboxes of this image,\n of shape (k, 4). Default: None\n iou_thr (float): IoU threshold to be considered as matched.\n Default: 0.5.\n area_ranges (list[tuple] | None): Range of bbox areas to be evaluated,\n in the format [(min1, max1), (min2, max2), ...]. Default: None.\n\n Returns:\n tuple[np.ndarray]: (tp, fp) whose elements are 0 and 1. The shape of\n each array is (num_scales, m).\n \"\"\"\n # an indicator of ignored gts\n gt_ignore_inds = np.concatenate(\n (np.zeros(gt_bboxes.shape[0], dtype=np.bool),\n np.ones(gt_bboxes_ignore.shape[0], dtype=np.bool)))\n # stack gt_bboxes and gt_bboxes_ignore for convenience\n gt_bboxes = np.vstack((gt_bboxes, gt_bboxes_ignore))\n\n num_dets = det_bboxes.shape[0]\n num_gts = gt_bboxes.shape[0]\n if area_ranges is None:\n area_ranges = [(None, None)]\n num_scales = len(area_ranges)\n # tp and fp are of shape (num_scales, num_gts), each row is tp or fp of\n # a certain scale\n tp = np.zeros((num_scales, num_dets), dtype=np.float32)\n fp = np.zeros((num_scales, num_dets), dtype=np.float32)\n\n # if there is no gt bboxes in this image, then all det bboxes\n # within area range are false positives\n if gt_bboxes.shape[0] == 0:\n if area_ranges == [(None, None)]:\n fp[...] = 1\n else:\n det_areas = (det_bboxes[:, 2] - det_bboxes[:, 0]) * (\n det_bboxes[:, 3] - det_bboxes[:, 1])\n for i, (min_area, max_area) in enumerate(area_ranges):\n fp[i, (det_areas >= min_area) & (det_areas < max_area)] = 1\n return tp, fp\n\n ious = bbox_overlaps(det_bboxes, gt_bboxes)\n # for each det, the max iou with all gts\n ious_max = ious.max(axis=1)\n # for each det, which gt overlaps most with it\n ious_argmax = ious.argmax(axis=1)\n # sort all dets in descending order by scores\n sort_inds = np.argsort(-det_bboxes[:, -1])\n for k, (min_area, max_area) in enumerate(area_ranges):\n gt_covered = np.zeros(num_gts, dtype=bool)\n # if no area range is specified, gt_area_ignore is all False\n if min_area is None:\n gt_area_ignore = np.zeros_like(gt_ignore_inds, dtype=bool)\n else:\n gt_areas = (gt_bboxes[:, 2] - gt_bboxes[:, 0]) * (\n gt_bboxes[:, 3] - gt_bboxes[:, 1])\n gt_area_ignore = (gt_areas < min_area) | (gt_areas >= max_area)\n for i in sort_inds:\n if ious_max[i] >= iou_thr:\n matched_gt = ious_argmax[i]\n if not (gt_ignore_inds[matched_gt]\n or gt_area_ignore[matched_gt]):\n if not gt_covered[matched_gt]:\n gt_covered[matched_gt] = True\n tp[k, i] = 1\n else:\n fp[k, i] = 1\n # otherwise ignore this detected bbox, tp = 0, fp = 0\n elif min_area is None:\n fp[k, i] = 1\n else:\n bbox = det_bboxes[i, :4]\n area = (bbox[2] - bbox[0]) * (bbox[3] - bbox[1])\n if area >= min_area and area < max_area:\n fp[k, i] = 1\n return tp, fp\n\n\ndef get_cls_results(det_results, annotations, class_id):\n \"\"\"Get det results and gt information of a certain class.\n\n Args:\n det_results (list[list]): Same as `eval_map()`.\n annotations (list[dict]): Same as `eval_map()`.\n class_id (int): ID of a specific class.\n\n Returns:\n tuple[list[np.ndarray]]: detected bboxes, gt bboxes, ignored gt bboxes\n \"\"\"\n cls_dets = [img_res[class_id] for img_res in det_results]\n cls_gts = []\n cls_gts_ignore = []\n for ann in annotations:\n gt_inds = ann['labels'] == class_id\n cls_gts.append(ann['bboxes'][gt_inds, :])\n\n if ann.get('labels_ignore', None) is not None:\n ignore_inds = ann['labels_ignore'] == class_id\n cls_gts_ignore.append(ann['bboxes_ignore'][ignore_inds, :])\n else:\n cls_gts_ignore.append(np.empty((0, 4), dtype=np.float32))\n\n return cls_dets, cls_gts, cls_gts_ignore\n\n\ndef eval_map(det_results,\n annotations,\n scale_ranges=None,\n iou_thr=0.5,\n dataset=None,\n logger=None,\n tpfp_fn=None,\n nproc=4):\n \"\"\"Evaluate mAP of a dataset.\n\n Args:\n det_results (list[list]): [[cls1_det, cls2_det, ...], ...].\n The outer list indicates images, and the inner list indicates\n per-class detected bboxes.\n annotations (list[dict]): Ground truth annotations where each item of\n the list indicates an image. Keys of annotations are:\n\n - `bboxes`: numpy array of shape (n, 4)\n - `labels`: numpy array of shape (n, )\n - `bboxes_ignore` (optional): numpy array of shape (k, 4)\n - `labels_ignore` (optional): numpy array of shape (k, )\n scale_ranges (list[tuple] | None): Range of scales to be evaluated,\n in the format [(min1, max1), (min2, max2), ...]. A range of\n (32, 64) means the area range between (32**2, 64**2).\n Default: None.\n iou_thr (float): IoU threshold to be considered as matched.\n Default: 0.5.\n dataset (list[str] | str | None): Dataset name or dataset classes,\n there are minor differences in metrics for different datsets, e.g.\n \"voc07\", \"imagenet_det\", etc. Default: None.\n logger (logging.Logger | str | None): The way to print the mAP\n summary. See `mmcv.utils.print_log()` for details. Default: None.\n tpfp_fn (callable | None): The function used to determine true/\n false positives. If None, :func:`tpfp_default` is used as default\n unless dataset is 'det' or 'vid' (:func:`tpfp_imagenet` in this\n case). If it is given as a function, then this function is used\n to evaluate tp & fp. Default None.\n nproc (int): Processes used for computing TP and FP.\n Default: 4.\n\n Returns:\n tuple: (mAP, [dict, dict, ...])\n \"\"\"\n assert len(det_results) == len(annotations)\n\n num_imgs = len(det_results)\n num_scales = len(scale_ranges) if scale_ranges is not None else 1\n num_classes = len(det_results[0]) # positive class num\n area_ranges = ([(rg[0]**2, rg[1]**2) for rg in scale_ranges]\n if scale_ranges is not None else None)\n\n pool = Pool(nproc)\n eval_results = []\n for i in range(num_classes):\n # get gt and det bboxes of this class\n cls_dets, cls_gts, cls_gts_ignore = get_cls_results(\n det_results, annotations, i)\n # choose proper function according to datasets to compute tp and fp\n if tpfp_fn is None:\n if dataset in ['det', 'vid']:\n tpfp_fn = tpfp_imagenet\n else:\n tpfp_fn = tpfp_default\n if not callable(tpfp_fn):\n raise ValueError(\n f'tpfp_fn has to be a function or None, but got {tpfp_fn}')\n\n # compute tp and fp for each image with multiple processes\n tpfp = pool.starmap(\n tpfp_fn,\n zip(cls_dets, cls_gts, cls_gts_ignore,\n [iou_thr for _ in range(num_imgs)],\n [area_ranges for _ in range(num_imgs)]))\n tp, fp = tuple(zip(*tpfp))\n # calculate gt number of each scale\n # ignored gts or gts beyond the specific scale are not counted\n num_gts = np.zeros(num_scales, dtype=int)\n for j, bbox in enumerate(cls_gts):\n if area_ranges is None:\n num_gts[0] += bbox.shape[0]\n else:\n gt_areas = (bbox[:, 2] - bbox[:, 0]) * (\n bbox[:, 3] - bbox[:, 1])\n for k, (min_area, max_area) in enumerate(area_ranges):\n num_gts[k] += np.sum((gt_areas >= min_area)\n & (gt_areas < max_area))\n # sort all det bboxes by score, also sort tp and fp\n cls_dets = np.vstack(cls_dets)\n num_dets = cls_dets.shape[0]\n sort_inds = np.argsort(-cls_dets[:, -1])\n tp = np.hstack(tp)[:, sort_inds]\n fp = np.hstack(fp)[:, sort_inds]\n # calculate recall and precision with tp and fp\n tp = np.cumsum(tp, axis=1)\n fp = np.cumsum(fp, axis=1)\n eps = np.finfo(np.float32).eps\n recalls = tp / np.maximum(num_gts[:, np.newaxis], eps)\n precisions = tp / np.maximum((tp + fp), eps)\n # calculate AP\n if scale_ranges is None:\n recalls = recalls[0, :]\n precisions = precisions[0, :]\n num_gts = num_gts.item()\n mode = 'area' if dataset != 'voc07' else '11points'\n ap = average_precision(recalls, precisions, mode)\n eval_results.append({\n 'num_gts': num_gts,\n 'num_dets': num_dets,\n 'recall': recalls,\n 'precision': precisions,\n 'ap': ap\n })\n pool.close()\n if scale_ranges is not None:\n # shape (num_classes, num_scales)\n all_ap = np.vstack([cls_result['ap'] for cls_result in eval_results])\n all_num_gts = np.vstack(\n [cls_result['num_gts'] for cls_result in eval_results])\n mean_ap = []\n for i in range(num_scales):\n if np.any(all_num_gts[:, i] > 0):\n mean_ap.append(all_ap[all_num_gts[:, i] > 0, i].mean())\n else:\n mean_ap.append(0.0)\n else:\n aps = []\n for cls_result in eval_results:\n if cls_result['num_gts'] > 0:\n aps.append(cls_result['ap'])\n mean_ap = np.array(aps).mean().item() if aps else 0.0\n\n print_map_summary(\n mean_ap,\n eval_results,\n dataset,\n area_ranges,\n logger=logger,\n iou_thr=iou_thr)\n\n return mean_ap, eval_results\n\n\ndef print_map_summary(mean_ap,\n results,\n dataset=None,\n scale_ranges=None,\n logger=None,\n iou_thr=None):\n \"\"\"Print mAP and results of each class.\n\n A table will be printed to show the gts/dets/recall/AP of each class and\n the mAP.\n\n Args:\n mean_ap (float): Calculated from `eval_map()`.\n results (list[dict]): Calculated from `eval_map()`.\n dataset (list[str] | str | None): Dataset name or dataset classes.\n scale_ranges (list[tuple] | None): Range of scales to be evaluated.\n logger (logging.Logger | str | None): The way to print the mAP\n summary. See `mmcv.utils.print_log()` for details. Default: None.\n \"\"\"\n\n if logger == 'silent':\n return\n\n if isinstance(results[0]['ap'], np.ndarray):\n num_scales = len(results[0]['ap'])\n else:\n num_scales = 1\n\n if scale_ranges is not None:\n assert len(scale_ranges) == num_scales\n\n num_classes = len(results)\n\n recalls = np.zeros((num_scales, num_classes), dtype=np.float32)\n precisions = np.zeros((num_scales, num_classes), dtype=np.float32)\n aps = np.zeros((num_scales, num_classes), dtype=np.float32)\n num_gts = np.zeros((num_scales, num_classes), dtype=int)\n for i, cls_result in enumerate(results):\n if cls_result['recall'].size > 0:\n recalls[:, i] = np.array(cls_result['recall'], ndmin=2)[:, -1]\n precisions[:, i] = np.array(\n cls_result['precision'], ndmin=2)[:, -1]\n aps[:, i] = cls_result['ap']\n num_gts[:, i] = cls_result['num_gts']\n\n if dataset is None:\n label_names = [str(i) for i in range(num_classes)]\n elif mmcv.is_str(dataset):\n label_names = get_classes(dataset)\n else:\n label_names = dataset\n\n if not isinstance(mean_ap, list):\n mean_ap = [mean_ap]\n\n header = ['class', 'gts', 'dets', 'precision', 'recall', 'ap']\n for i in range(num_scales):\n if scale_ranges is not None:\n print_log(f'Scale range {scale_ranges[i]}', logger=logger)\n table_data = [header]\n for j in range(num_classes):\n row_data = [\n label_names[j], num_gts[i, j], results[j]['num_dets'],\n f'{precisions[i, j]:.3f}', f'{recalls[i, j]:.3f}',\n f'{aps[i, j]:.3f}'\n ]\n table_data.append(row_data)\n table_data.append(\n ['mAP@{}'.format(iou_thr), '', '', '', '', f'{mean_ap[i]:.3f}'])\n table = AsciiTable(table_data)\n table.inner_footing_row_border = True\n print_log('\\n' + table.table, logger=logger)\n", "import cv2\nimport concurrent.futures\nimport os\nimport numpy as np\nimport json\nimport copy\nimport random\nimport pickle\n\n\ndef get_root_path():\n return os.path.abspath(os.path.join(os.path.dirname(__file__), \"..\"))\n\n\ndef sliding_crop_canny_imgs(img_dir,\n save_crop_dir,\n sliding_win_xsize=1650,\n sliding_win_ysize=1650,\n overlap=200):\n\n def crop(img_dir, img_name, save_crop_dir, sliding_win_xsize,\n sliding_win_ysize, overlap):\n full_path = os.path.join(img_dir, img_name)\n image = cv2.imread(full_path)\n resized_image = cv2.resize(\n image, (int(0.1 * image.shape[1]), int(0.1 * image.shape[0])))\n imgray = cv2.cvtColor(resized_image, cv2.COLOR_BGR2GRAY)\n ret, bin_pic = cv2.threshold(imgray, 0, 255,\n cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n median = cv2.medianBlur(bin_pic, 5)\n cannyPic = cv2.Canny(median, 10, 200)\n\n contours, hierarchy = cv2.findContours(cannyPic, cv2.RETR_CCOMP,\n cv2.CHAIN_APPROX_SIMPLE)\n\n maxArea = 0\n for i in range(len(contours)):\n if cv2.contourArea(contours[i]) > cv2.contourArea(\n contours[maxArea]):\n maxArea = i\n\n boader = contours[maxArea]\n xmin, xmax, ymin, ymax = np.min(boader[:, :, 0]), np.max(\n boader[:, :, 0]), np.min(boader[:, :, 1]), np.max(boader[:, :, 1])\n\n xmin = xmin * 10 - 150\n xmax = xmax * 10 + 150\n ymin = ymin * 10 - 150\n ymax = ymax * 10 + 150\n\n if xmax - xmin > 1500 and ymax - ymin > 1500 and xmin >= 0 and xmax <= image.shape[\n 1] and ymin >= 0 and ymax <= image.shape[0]:\n croped_img = image[ymin:ymax + 1, xmin:xmax + 1]\n else:\n # if the canny croped is invalid, change to center crop\n center = [image.shape[1] // 2, image.shape[0] // 2]\n if 'CAM3' in img_name:\n center[1] = int(image.shape[0] * 0.55)\n length = int(image.shape[1] * 0.36)\n croped_img = image[center[1] - length:center[1] + length + 1,\n center[0] - length:center[0] + length + 1, :]\n xmin, xmax, ymin, ymax = center[0] - length, center[\n 0] + length, center[1] - length, center[1] + length\n\n height = ymax - ymin + 1\n width = xmax - xmin + 1\n x_slides = []\n y_slides = []\n x_slide = 0\n y_slide = 0\n while True:\n if x_slide >= width - sliding_win_xsize:\n x_slide = width - sliding_win_xsize\n x_slides.append(x_slide)\n break\n else:\n x_slides.append(x_slide)\n x_slide += (sliding_win_xsize - overlap)\n\n while True:\n if y_slide >= height - sliding_win_ysize:\n y_slide = height - sliding_win_ysize\n y_slides.append(y_slide)\n break\n else:\n y_slides.append(y_slide)\n y_slide += (sliding_win_ysize - overlap)\n\n slides_pos = [(x + xmin, y + ymin) for x in x_slides for y in y_slides]\n for slide_pos in slides_pos:\n x, y = slide_pos\n croped_img = image[y:y + sliding_win_ysize,\n x:x + sliding_win_xsize]\n croped_img_name = '{}_{}_{}_{}_{}.jpg'.format(\n img_name.split('.')[0], x, y, x + sliding_win_xsize - 1,\n y + sliding_win_ysize - 1)\n cv2.imwrite(\n os.path.join(save_crop_dir, croped_img_name), croped_img)\n\n print('Finish {}'.format(img_name))\n\n img_names = os.listdir(img_dir)\n if not os.path.exists(save_crop_dir):\n os.mkdir(save_crop_dir)\n\n with concurrent.futures.ThreadPoolExecutor(max_workers=40) as executor:\n for img_name in img_names:\n executor.submit(crop, img_dir, img_name, save_crop_dir,\n sliding_win_xsize, sliding_win_ysize, overlap)\n\n\ndef generate_canny_slide_annotations_file(anno_file, img_dir, save_anno_folder,\n train_anno_json_name,\n bk_anno_json_name,\n all_anno_json_name):\n '''\n params:\n anno_file: original annotation json file path\n img_dir: the processed canny slide img patches folder\n save_anno_folder: the folder for saving the generated annotations\n train_anno_json_name: name for generated train annotations (with defects, positive samples)\n bk_anno_json_name: name for all background annotations\n all_anno_json_name: name for all the samples(all positive and negative) annotations\n '''\n\n with open(anno_file, 'r') as f:\n annotations = json.load(f)\n\n name_anno_map = {}\n for anno in annotations:\n name = anno['name']\n if name not in name_anno_map.keys():\n name_anno_map[name] = [anno]\n else:\n name_anno_map[name].append(anno)\n\n all_annotations = []\n new_annotations = []\n background_annotations = []\n\n img_names = os.listdir(img_dir)\n\n for img_name in img_names:\n has_box = False\n splits = (img_name.split('.')[0]).split('_')\n origin_img_name = '{}_{}_{}_{}.jpg'.format(splits[0], splits[1],\n splits[2], splits[3])\n xmin, ymin, xmax, ymax = int(splits[-4]), int(splits[-3]), int(\n splits[-2]), int(splits[-1])\n annos = name_anno_map[origin_img_name]\n image_width = xmax - xmin + 1\n image_height = ymax - ymin + 1\n\n for ann in annos:\n anno = copy.deepcopy(ann)\n\n anno['image_width'] = image_width\n anno['image_height'] = image_height\n anno['name'] = img_name\n left_up_position = [xmin, ymin]\n right_down_position = [xmax, ymax]\n box = anno['bbox']\n\n if box[0] >= left_up_position[0] and box[0] <= right_down_position[\n 0] and box[1] >= left_up_position[1] and box[\n 1] <= right_down_position[1] and box[\n 2] >= left_up_position[0] and box[\n 2] <= right_down_position[0] and box[\n 3] >= left_up_position[1] and box[\n 3] <= right_down_position[1]:\n has_box = True\n anno['bbox'][0] = round(\n max(0, anno['bbox'][0] - left_up_position[0]), 2) # x1\n anno['bbox'][1] = round(\n max(0, anno['bbox'][1] - left_up_position[1]), 2) # y1\n anno['bbox'][2] = round(\n max(0, anno['bbox'][2] - left_up_position[0]), 2) # x2\n anno['bbox'][3] = round(\n max(0, anno['bbox'][3] - left_up_position[1]), 2) # y2\n new_annotations.append(anno)\n\n if not has_box:\n anno = dict(\n name=img_name,\n image_height=image_height,\n image_width=image_width,\n category=0,\n bbox=[])\n background_annotations.append(anno)\n\n select_indices = random.sample(\n range(len(background_annotations)), len(new_annotations))\n bks = [background_annotations[idx] for idx in select_indices]\n all_annotations = bks + new_annotations\n with open(os.path.join(save_anno_folder, train_anno_json_name), 'w') as f:\n json.dump(new_annotations, f, indent=4, separators=(',', ': '))\n with open(os.path.join(save_anno_folder, bk_anno_json_name), 'w') as f:\n json.dump(background_annotations, f, indent=4, separators=(',', ': '))\n with open(os.path.join(save_anno_folder, all_anno_json_name), 'w') as f:\n json.dump(all_annotations, f, indent=4, separators=(',', ': '))\n\n\ndef test_predictions(predict_json_file, image_dir, result_img_save_dir):\n '''\n Descriptions:\n function to test the generated submit json file\n '''\n with open(predict_json_file, 'r') as f:\n context = json.load(f)\n\n image_names = os.listdir(image_dir)\n # select_indices = random.sample(range(len(image_names)), 10)\n select_indices = range(10)\n select_img_names = [image_names[i] for i in select_indices]\n\n if not os.path.exists(result_img_save_dir):\n os.mkdir(result_img_save_dir)\n\n for index, image_name in enumerate(select_img_names):\n image = cv2.imread(os.path.join(image_dir, image_name))\n for con in context:\n if con['name'] == image_name:\n cv2.rectangle(image,\n (int(con['bbox'][0]), int(con['bbox'][1])),\n (int(con['bbox'][2]), int(con['bbox'][3])),\n (0, 0, 255))\n cv2.putText(\n image,\n str(con['category']) + '_' + str(round(con['score'], 2)),\n (int(con['bbox'][0]), int(con['bbox'][1]) - 10),\n cv2.FONT_HERSHEY_COMPLEX, 1, (255, 0, 0), 1)\n cv2.imwrite(\n os.path.join(result_img_save_dir, 'test{}.jpg'.format(index)),\n image)\n\n\ndef convert_tile_annotations_toinfos(ann_file, val_num_rate, infos_save_dir,\n train_info_name, val_info_name,\n all_info_name):\n '''\n Description:\n function to convert from the anno json to the mmdetection dataset annotation infos, \n saved in pickle format\n '''\n with open(ann_file, 'r') as f:\n text = json.load(f)\n\n if not os.path.exists(infos_save_dir):\n os.mkdir(infos_save_dir)\n\n total_annotations = {}\n total_infos = []\n \n for annotation in text:\n if annotation == \"info\" or annotation == \"licenses\":\n continue\n# import pdb;pdb.set_trace()\n if annotation['file_name'] not in total_annotations.keys():\n total_annotations[annotation['file_name']] = [\n annotation['height'], annotation['width'],\n [annotation['category_id']], [annotation['bbox']]\n ]\n else:\n total_annotations[annotation['file_name']][2].append(\n annotation['category_id'])\n total_annotations[annotation['file_name']][3].append(annotation['bbox'])\n\n for file_name in total_annotations.keys():\n file_info = total_annotations[file_name]\n bboxes = file_info[-1]\n labels = file_info[-2]\n if len(bboxes[0]) == 0:\n bboxes = np.zeros((0, 4), dtype=np.float32)\n labels = np.array([], dtype=np.int64)\n else:\n bboxes = np.array(bboxes).astype(np.float32)\n labels = np.array(labels).astype(np.int64)\n total_infos.append(\n dict(\n filename=file_name,\n height=file_info[0],\n width=file_info[1],\n ann=dict(bboxes=bboxes, labels=labels)))\n\n val_num = int(len(total_infos) * val_num_rate)\n val_infos_indices = random.sample(range(len(total_infos)), val_num)\n train_info_indices = list(\n set(range(len(total_infos))).difference(set(val_infos_indices)))\n\n train_infos = [total_infos[idx] for idx in train_info_indices]\n val_infos = [total_infos[idx] for idx in val_infos_indices]\n\n with open(os.path.join(infos_save_dir, train_info_name), 'wb') as f:\n pickle.dump(train_infos, f)\n with open(os.path.join(infos_save_dir, val_info_name), 'wb') as f:\n pickle.dump(val_infos, f)\n with open(os.path.join(infos_save_dir, all_info_name), 'wb') as f:\n pickle.dump(total_infos, f)\n\n test_recover_infos(\n val_info_file=os.path.join(infos_save_dir, val_info_name),\n train_info_file=os.path.join(infos_save_dir, train_info_name),\n all_info_file=os.path.join(infos_save_dir, all_info_name))\n\n\ndef test_recover_infos(val_info_file, train_info_file, all_info_file):\n with open(val_info_file, 'rb') as f:\n val_infos = pickle.load(f)\n with open(train_info_file, 'rb') as f:\n train_infos = pickle.load(f)\n with open(all_info_file, 'rb') as f:\n all_infos = pickle.load(f)\n assert (type(val_infos[0]) == dict)\n print(val_infos[0], len(val_infos))\n print(train_infos[0], len(train_infos))\n print(len(all_infos))\n\n\ndef check_annotations(anno_json_path):\n with open(anno_json_path, 'r') as f:\n context = json.load(f)\n\n for anno in context:\n img_name = anno['name']\n box = anno['bbox']\n height = anno['image_height']\n width = anno['image_width']\n if len(box) > 0:\n if box[0] < 0 or box[0] >= width or box[2] < 0 or box[\n 2] >= width or box[1] < 0 or box[1] >= height or box[\n 3] < 0 or box[3] >= height or box[0] >= box[2] or box[\n 1] >= box[3]:\n print(anno)" ]
[ [ "numpy.hstack", "numpy.minimum", "numpy.maximum", "numpy.arange", "numpy.cumsum", "numpy.empty", "numpy.ones", "numpy.finfo", "numpy.zeros_like", "numpy.any", "numpy.where", "numpy.argsort", "numpy.array", "numpy.zeros", "numpy.sum", "numpy.vstack" ], [ "numpy.max", "numpy.array", "numpy.zeros", "numpy.min" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
TUDelft-DataDrivenControl/FRED
[ "f837f4a126e693519fa5ab7c913cb26570ca5278" ]
[ "tools/mwe.py" ]
[ "from fenics import *\nfrom fenics_adjoint import *\nimport numpy as np\nfrom pyadjoint import Block\nfrom pyadjoint.overloaded_function import overload_function\n\n\ndef get_coefficient(func, coord1, coord2, gradient=False, grad_idx=None):\n return func(coord1, coord2)\n\n\nbackend_get_coefficient = get_coefficient\n\n\nclass CoefficientBlock(Block):\n def __init__(self, func, coord1, coord2, **kwargs):\n super(CoefficientBlock, self).__init__()\n self.kwargs = kwargs\n self.func = func\n self.add_dependency(coord1)\n self.add_dependency(coord2)\n degree = func.function_space().ufl_element().degree()\n family = func.function_space().ufl_element().family()\n if np.isin(family, [\"CG\", \"Lagrange\"]):\n self.V = FunctionSpace(mesh, \"DG\", degree - 1)\n else:\n raise NotImplementedError(\n \"Not implemented for other elements than Lagrange\")\n\n def __str__(self):\n return \"CoefficientBlock\"\n\n def evaluate_adj_component(self, inputs, adj_inputs, block_variable, idx, prepared=None):\n grad_idx = project(self.func.dx(idx), self.V)\n return grad_idx(inputs[0], inputs[1]) * adj_inputs[0]\n\n def recompute_component(self, inputs, block_variable, idx, prepared):\n return backend_get_coefficient(self.func, inputs[0], inputs[1])\n\n\nget_coefficient = overload_function(get_coefficient, CoefficientBlock)\n\n\nmesh = UnitSquareMesh(10, 10)\nV0 = FunctionSpace(mesh, \"DG\", 0)\nV1 = FunctionSpace(mesh, \"Lagrange\", 1)\n\nu = Function(V1)\nx = SpatialCoordinate(u)\nz = project(x[0]*x[1], V1)\n\nx1 = [Constant(r) for r in np.random.rand(1)]\nx2 = [Constant(r) for r in np.random.rand(1)]\n\n# functional_list =\n# for idx in range(len(x1)):\nidx = 0\ny = Constant(0.)\n\ndz = Function(V1)\n\nz.assign(project(z+dz,V1))\nct = get_coefficient(z, x1[idx], x2[idx])\n# a = AdjFloat(sqrt(1-ct))\n\nJ = (ct) ** 2\n# controls = x1 + x2\ncontrols = [dz]\nm = [Control(c) for c in controls]\nh = [Constant(0.01*np.random.rand()) for c in controls]\n\nJhat = ReducedFunctional(J, m)\nprint(5*\"\\n\")\nJhat.derivative()\n\nprint(5*\"\\n\")\ntaylor_test(Jhat, controls, h)\n" ]
[ [ "numpy.random.rand", "numpy.isin" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
larrybradley/numpy
[ "4e11da37bf94a0f496f236e9706205ac81683058", "4e11da37bf94a0f496f236e9706205ac81683058" ]
[ "benchmarks/benchmarks/bench_avx.py", "benchmarks/benchmarks/bench_core.py" ]
[ "from __future__ import absolute_import, division, print_function\n\nfrom .common import Benchmark\n\nimport numpy as np\n\navx_ufuncs = ['sqrt',\n 'absolute',\n 'reciprocal',\n 'square',\n 'rint',\n 'floor',\n 'ceil' ,\n 'trunc']\nstride = [1, 2, 4]\ndtype = ['f', 'd']\n\nclass AVX_UFunc(Benchmark):\n params = [avx_ufuncs, stride, dtype]\n param_names = ['avx_based_ufunc', 'stride', 'dtype']\n timeout = 10\n\n def setup(self, ufuncname, stride, dtype):\n np.seterr(all='ignore')\n try:\n self.f = getattr(np, ufuncname)\n except AttributeError:\n raise NotImplementedError()\n N = 10000\n self.arr = np.ones(stride*N, dtype)\n\n def time_ufunc(self, ufuncname, stride, dtype):\n self.f(self.arr[::stride])\n\n", "from __future__ import absolute_import, division, print_function\n\nfrom .common import Benchmark\n\nimport numpy as np\n\n\nclass Core(Benchmark):\n def setup(self):\n self.l100 = range(100)\n self.l50 = range(50)\n self.l = [np.arange(1000), np.arange(1000)]\n self.l_view = [memoryview(a) for a in self.l]\n self.l10x10 = np.ones((10, 10))\n\n def time_array_1(self):\n np.array(1)\n\n def time_array_empty(self):\n np.array([])\n\n def time_array_l1(self):\n np.array([1])\n\n def time_array_l100(self):\n np.array(self.l100)\n\n def time_array_l(self):\n np.array(self.l)\n\n def time_array_l_view(self):\n np.array(self.l_view)\n\n def time_vstack_l(self):\n np.vstack(self.l)\n\n def time_hstack_l(self):\n np.hstack(self.l)\n\n def time_dstack_l(self):\n np.dstack(self.l)\n\n def time_arange_100(self):\n np.arange(100)\n\n def time_zeros_100(self):\n np.zeros(100)\n\n def time_ones_100(self):\n np.ones(100)\n\n def time_empty_100(self):\n np.empty(100)\n\n def time_eye_100(self):\n np.eye(100)\n\n def time_identity_100(self):\n np.identity(100)\n\n def time_eye_3000(self):\n np.eye(3000)\n\n def time_identity_3000(self):\n np.identity(3000)\n\n def time_diag_l100(self):\n np.diag(self.l100)\n\n def time_diagflat_l100(self):\n np.diagflat(self.l100)\n\n def time_diagflat_l50_l50(self):\n np.diagflat([self.l50, self.l50])\n\n def time_triu_l10x10(self):\n np.triu(self.l10x10)\n\n def time_tril_l10x10(self):\n np.tril(self.l10x10)\n\n\nclass Temporaries(Benchmark):\n def setup(self):\n self.amid = np.ones(50000)\n self.bmid = np.ones(50000)\n self.alarge = np.ones(1000000)\n self.blarge = np.ones(1000000)\n\n def time_mid(self):\n (self.amid * 2) + self.bmid\n\n def time_mid2(self):\n (self.amid + self.bmid) - 2\n\n def time_large(self):\n (self.alarge * 2) + self.blarge\n\n def time_large2(self):\n (self.alarge + self.blarge) - 2\n\n\nclass CorrConv(Benchmark):\n params = [[50, 1000, int(1e5)],\n [10, 100, 1000, int(1e4)],\n ['valid', 'same', 'full']]\n param_names = ['size1', 'size2', 'mode']\n\n def setup(self, size1, size2, mode):\n self.x1 = np.linspace(0, 1, num=size1)\n self.x2 = np.cos(np.linspace(0, 2*np.pi, num=size2))\n\n def time_correlate(self, size1, size2, mode):\n np.correlate(self.x1, self.x2, mode=mode)\n\n def time_convolve(self, size1, size2, mode):\n np.convolve(self.x1, self.x2, mode=mode)\n\n\nclass CountNonzero(Benchmark):\n param_names = ['numaxes', 'size', 'dtype']\n params = [\n [1, 2, 3],\n [100, 10000, 1000000],\n [bool, int, str, object]\n ]\n\n def setup(self, numaxes, size, dtype):\n self.x = np.arange(numaxes * size).reshape(numaxes, size)\n self.x = (self.x % 3).astype(dtype)\n\n def time_count_nonzero(self, numaxes, size, dtype):\n np.count_nonzero(self.x)\n\n def time_count_nonzero_axis(self, numaxes, size, dtype):\n np.count_nonzero(self.x, axis=self.x.ndim - 1)\n\n def time_count_nonzero_multi_axis(self, numaxes, size, dtype):\n if self.x.ndim >= 2:\n np.count_nonzero(self.x, axis=(\n self.x.ndim - 1, self.x.ndim - 2))\n\n\nclass PackBits(Benchmark):\n param_names = ['dtype']\n params = [[bool, np.uintp]]\n def setup(self, dtype):\n self.d = np.ones(10000, dtype=dtype)\n self.d2 = np.ones((200, 1000), dtype=dtype)\n\n def time_packbits(self, dtype):\n np.packbits(self.d)\n\n def time_packbits_axis0(self, dtype):\n np.packbits(self.d2, axis=0)\n\n def time_packbits_axis1(self, dtype):\n np.packbits(self.d2, axis=1)\n\n\nclass UnpackBits(Benchmark):\n def setup(self):\n self.d = np.ones(10000, dtype=np.uint8)\n self.d2 = np.ones((200, 1000), dtype=np.uint8)\n\n def time_unpackbits(self):\n np.unpackbits(self.d)\n\n def time_unpackbits_little(self):\n np.unpackbits(self.d, bitorder=\"little\")\n\n def time_unpackbits_axis0(self):\n np.unpackbits(self.d2, axis=0)\n\n def time_unpackbits_axis1(self):\n np.unpackbits(self.d2, axis=1)\n\n def time_unpackbits_axis1_little(self):\n np.unpackbits(self.d2, bitorder=\"little\", axis=1)\n\n\nclass Indices(Benchmark):\n def time_indices(self):\n np.indices((1000, 500))\n" ]
[ [ "numpy.seterr", "numpy.ones" ], [ "numpy.diag", "numpy.linspace", "numpy.vstack", "numpy.tril", "numpy.hstack", "numpy.arange", "numpy.eye", "numpy.packbits", "numpy.count_nonzero", "numpy.triu", "numpy.zeros", "numpy.diagflat", "numpy.identity", "numpy.correlate", "numpy.array", "numpy.convolve", "numpy.indices", "numpy.dstack", "numpy.ones", "numpy.empty", "numpy.unpackbits" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sarvex/TFace
[ "490cf90a1f042b86d7d03042f26d0a7cf6b1f0c0", "490cf90a1f042b86d7d03042f26d0a7cf6b1f0c0" ]
[ "torchkit/head/localfc/cosface.py", "torchkit/backbone/model_mobilefacenet.py" ]
[ "from __future__ import print_function\nfrom __future__ import division\nimport math\nimport torch\nimport torch.nn as nn\nfrom torch.nn import Parameter\nfrom torchkit.head.localfc.common import calc_logits\n\n\nclass CosFace(nn.Module):\n \"\"\" Implement of CosFace (https://arxiv.org/abs/1801.09414)\n\n \"\"\"\n def __init__(self,\n in_features,\n out_features,\n scale=64.0,\n margin=0.40):\n \"\"\" Args:\n in_features: size of each input features\n out_features: size of each output features\n scale: norm of input feature\n margin: margin\n \"\"\"\n super(CosFace, self).__init__()\n self.in_features = in_features\n self.out_features = out_features\n\n self.scale = scale\n self.margin = margin\n\n self.kernel = Parameter(torch.FloatTensor(in_features, out_features))\n # nn.init.xavier_uniform_(self.kernel)\n nn.init.normal_(self.kernel, std=0.01)\n # init.kaiming_uniform_(self.kernel, a=math.sqrt(5))\n\n def forward(self, embeddings, labels):\n cos_theta, origin_cos = calc_logits(embeddings, self.kernel)\n target_logit = cos_theta[torch.arange(0, embeddings.size(0)), labels].view(-1, 1)\n\n final_target_logit = target_logit - self.margin\n\n cos_theta.scatter_(1, labels.view(-1, 1).long(), final_target_logit)\n output = cos_theta * self.scale\n\n return output, origin_cos * self.scale\n", "\n# based on:\n# https://github.com/cavalleria/cavaface.pytorch/blob/master/backbone/mobilefacenet.py\nfrom torch.nn import Conv2d\nfrom torch.nn import BatchNorm2d\nfrom torch.nn import PReLU\nfrom torch.nn import Sequential\nfrom torch.nn import Module\nfrom torchkit.backbone.common import initialize_weights\nfrom torchkit.backbone.common import LinearBlock, GNAP, GDC\n\n\nclass Conv_block(Module):\n \"\"\" Convolution block with no-linear activation layer\n \"\"\"\n def __init__(self, in_c, out_c, kernel=(1, 1), stride=(1, 1), padding=(0, 0), groups=1):\n super(Conv_block, self).__init__()\n self.conv = Conv2d(in_c, out_c, kernel, stride, padding, groups=groups, bias=False)\n self.bn = BatchNorm2d(out_c)\n self.prelu = PReLU(out_c)\n\n def forward(self, x):\n x = self.conv(x)\n x = self.bn(x)\n x = self.prelu(x)\n return x\n\n\nclass Depth_Wise(Module):\n \"\"\" Depthwise block\n \"\"\"\n def __init__(self, in_c, out_c, kernel=(3, 3), stride=(2, 2), padding=(1, 1), groups=1, residual=False):\n super(Depth_Wise, self).__init__()\n self.conv = Conv_block(in_c, groups, (1, 1), (1, 1), (0, 0))\n self.conv_dw = Conv_block(groups, groups, kernel, stride, padding, groups=groups)\n self.project = LinearBlock(groups, out_c, (1, 1), (1, 1), (0, 0))\n self.residual = residual\n\n def forward(self, x):\n if self.residual:\n short_cut = x\n x = self.conv(x)\n x = self.conv_dw(x)\n x = self.project(x)\n if self.residual:\n output = short_cut + x\n else:\n output = x\n return output\n\n\nclass Residual(Module):\n \"\"\" Residual block\n \"\"\"\n def __init__(self, channel, num_block, groups, kernel=(3, 3), stride=(1, 1), padding=(1, 1)):\n super(Residual, self).__init__()\n modules = []\n for _ in range(num_block):\n modules.append(Depth_Wise(channel, channel,\n kernel=kernel,\n stride=stride,\n padding=padding,\n groups=groups,\n residual=True))\n self.model = Sequential(*modules)\n\n def forward(self, x):\n return self.model(x)\n\n\nclass MobileFaceNet(Module):\n \"\"\" MobileFaceNet backbone\n \"\"\"\n def __init__(self, input_size, embedding_size=512, output_name=\"GDC\"):\n \"\"\" Args:\n input_size: input_size of backbone\n embedding_size: embedding_size of last feature\n output_name: support GDC or GNAP\n \"\"\"\n super(MobileFaceNet, self).__init__()\n assert output_name in [\"GNAP\", 'GDC']\n assert input_size[0] in [112]\n self.conv1 = Conv_block(3, 64, (3, 3), (2, 2), (1, 1))\n self.conv2_dw = Conv_block(64, 64, (3, 3), (1, 1), (1, 1), groups=64)\n self.conv_23 = Depth_Wise(64, 64, (3, 3), (2, 2), (1, 1), groups=128)\n self.conv_3 = Residual(64,\n num_block=4,\n groups=128,\n kernel=(3, 3),\n stride=(1, 1),\n padding=(1, 1))\n self.conv_34 = Depth_Wise(64, 128, (3, 3), (2, 2), (1, 1), groups=256)\n self.conv_4 = Residual(128,\n num_block=6,\n groups=256,\n kernel=(3, 3),\n stride=(1, 1),\n padding=(1, 1))\n self.conv_45 = Depth_Wise(128, 128, (3, 3), (2, 2), (1, 1), groups=512)\n self.conv_5 = Residual(128,\n num_block=2,\n groups=256,\n kernel=(3, 3),\n stride=(1, 1),\n padding=(1, 1))\n self.conv_6_sep = Conv_block(128, 512, (1, 1), (1, 1), (0, 0))\n if output_name == \"GNAP\":\n self.output_layer = GNAP(512)\n else:\n self.output_layer = GDC(512, embedding_size)\n\n initialize_weights(self.modules())\n\n def forward(self, x):\n out = self.conv1(x)\n out = self.conv2_dw(out)\n out = self.conv_23(out)\n out = self.conv_3(out)\n out = self.conv_34(out)\n out = self.conv_4(out)\n out = self.conv_45(out)\n out = self.conv_5(out)\n conv_features = self.conv_6_sep(out)\n out = self.output_layer(conv_features)\n return out\n" ]
[ [ "torch.FloatTensor", "torch.nn.init.normal_" ], [ "torch.nn.PReLU", "torch.nn.Conv2d", "torch.nn.Sequential", "torch.nn.BatchNorm2d" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Durabun/QWell
[ "746c6e53441556d28bdbdeb6d07561e9dd75842a" ]
[ "index_plot.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\n\nevenR = np.array([1.212,3.368])\noddR = np.array([2.381])\nS = evenR.size+oddR.size\nprop = np.zeros(S)\ntunn = np.zeros(S)\ni=0\nj=1\na=0.469\n\ndef rad(x):\n\treturn np.sqrt((1.1*np.pi)**2-x**2)\n\t\nprint (S)\n\nprint (prop)\nprint (tunn)\n\nwhile i< evenR.size:\n\tprop[2*i] = evenR[i]/a\n\ttunn[2*i] = rad(evenR[i])/a\n\ti=i+1\n\tprint (i)\nprint(\"odd\")\nwhile j-1 < oddR.size:\n\tprop[j] = oddR[j-1]/a\n\ttunn[j] = rad(oddR[j-1])/a\n\tj=j+2\n\tprint (j)\n\nprint (prop)\nprint (tunn)\n\nBcoeff = np.array([0.6318,0.6171,0.4823])\n\n#def Bfn(k,K):\n#\treturn k+1.2*K\n\t\nl = 0\n\n#while l < S:\n#\tBcoeff[l] = Bfn(prop[l],tunn[l])\n#\tl=l+1\n\nprint (Bcoeff)\n\nz = 0\ndef ef1(B,K,k,a):\n\treturn lambda x: 2*B*np.exp((a+x)*K)*np.cos(a*k)\n\t\ndef ef2(B,k):\n\treturn lambda x: 2*B*np.cos(k*x)\t\n\t\ndef ef3(B,K,k,a):\n\treturn lambda x: 2*B*np.exp((a-x)*K)*np.cos(a*k)\t\n\t\ndef of1(B,K,k,a):\n\treturn lambda x: -2*B*np.exp((a+x)*K)*np.sin(a*k)\n\t\ndef of2(B,k):\n\treturn lambda x: 2*B*np.sin(k*x)\t\n\t\ndef of3(B,K,k,a):\n\treturn lambda x: 2*B*np.exp((a-x)*K)*np.sin(a*k)\n\n\t\nr1 = np.arange(-5,-a,0.001)\nr2 = np.arange(-a,a,0.001)\nr3 = np.arange(a,5,0.001)\ncolor = [\"r\",\"b\",\"g\"]\nwhile z <S:\n#\tplt.figure\n\tif z%2 == 1:\n\t\tplt1 = of1(Bcoeff[z],tunn[z],prop[z],a)\n\t\tplt2 = of2(Bcoeff[z],prop[z])\n\t\tplt3 = of3(Bcoeff[z],tunn[z],prop[z],a)\n\t\tplt.plot(r1,plt1(r1),color[z],r2,plt2(r2),color[z],r3,plt3(r3),color[z])\n#\t\tplt.plot(r2,plt2(r2))\n#\t\tplt.plot(r3,plt3(r3))\n\telse:\n\t\tplt1 = ef1(Bcoeff[z],tunn[z],prop[z],a)\n\t\tplt2 = ef2(Bcoeff[z],prop[z])\n\t\tplt3 = ef3(Bcoeff[z],tunn[z],prop[z],a)\n\t\tplt.plot(r1,plt1(r1),color[z],r2,plt2(r2),color[z],r3,plt3(r3),color[z])\n#\t\tplt.plot(r2,plt2(r2))\n#\t\tplt.plot(r3,plt3(r3))\n\tz = z+1\n\t\t\nplt.show()" ]
[ [ "numpy.sqrt", "numpy.arange", "numpy.cos", "numpy.sin", "numpy.exp", "numpy.array", "numpy.zeros", "matplotlib.pyplot.show" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sevagh/Music-Separation-TF
[ "1e21d3802b7df8f6c25778bca6a6c576f805fc4a" ]
[ "algorithms/umx.py" ]
[ "import torch\nimport numpy as np\nimport argparse\nimport soundfile as sf\nimport norbert\nimport json\nfrom pathlib import Path\nimport scipy.signal\nimport resampy\nimport warnings\nimport tqdm\nfrom contextlib import redirect_stderr\nimport io\n\nimport os\nimport sys\n\nvendor_path = os.path.join(\n os.path.dirname(os.path.abspath(__file__)), \"../vendor/open-unmix-pytorch\"\n)\nsys.path.append(vendor_path)\n\nimport model\nimport utils\n\n\ndef load_model(target, model_name=\"umxhq\", device=\"cpu\"):\n \"\"\"\n target model path can be either <target>.pth, or <target>-sha256.pth\n (as used on torchub)\n \"\"\"\n model_path = Path(model_name).expanduser()\n if not model_path.exists():\n # model path does not exist, use hubconf model\n try:\n # disable progress bar\n err = io.StringIO()\n with redirect_stderr(err):\n return torch.hub.load(\n \"sigsep/open-unmix-pytorch\",\n model_name,\n target=target,\n device=device,\n pretrained=True,\n )\n print(err.getvalue())\n except AttributeError:\n raise NameError(\"Model does not exist on torchhub\")\n # assume model is a path to a local model_name direcotry\n else:\n # load model from disk\n with open(Path(model_path, target + \".json\"), \"r\") as stream:\n results = json.load(stream)\n\n target_model_path = next(Path(model_path).glob(\"%s*.pth\" % target))\n state = torch.load(target_model_path, map_location=device)\n\n max_bin = utils.bandwidth_to_max_bin(\n state[\"sample_rate\"], results[\"args\"][\"nfft\"], results[\"args\"][\"bandwidth\"]\n )\n\n unmix = model.OpenUnmix(\n n_fft=results[\"args\"][\"nfft\"],\n n_hop=results[\"args\"][\"nhop\"],\n nb_channels=results[\"args\"][\"nb_channels\"],\n hidden_size=results[\"args\"][\"hidden_size\"],\n max_bin=max_bin,\n )\n\n unmix.load_state_dict(state)\n unmix.stft.center = True\n unmix.eval()\n unmix.to(device)\n return unmix\n\n\ndef istft(X, rate=44100, n_fft=4096, n_hopsize=1024):\n t, audio = scipy.signal.istft(\n X / (n_fft / 2), rate, nperseg=n_fft, noverlap=n_fft - n_hopsize, boundary=True\n )\n return audio\n\n\ndef separate(\n audio,\n targets,\n model_name=\"umxhq\",\n niter=1,\n softmask=False,\n alpha=1.0,\n residual_model=False,\n device=\"cpu\",\n):\n \"\"\"\n Performing the separation on audio input\n\n Parameters\n ----------\n audio: np.ndarray [shape=(nb_timesteps, nb_channels)]\n mixture audio\n\n targets: list of str\n a list of the separation targets.\n Note that for each target a separate model is expected\n to be loaded.\n One of `vocals`, `drums`, `bass`, `other`.\n\n model_name: str\n name of torchhub model or path to model folder, defaults to `umxhq`\n\n niter: int\n Number of EM steps for refining initial estimates in a\n post-processing stage, defaults to 1.\n\n softmask: boolean\n if activated, then the initial estimates for the sources will\n be obtained through a ratio mask of the mixture STFT, and not\n by using the default behavior of reconstructing waveforms\n by using the mixture phase, defaults to False\n\n alpha: float\n changes the exponent to use for building ratio masks, defaults to 1.0\n\n residual_model: boolean\n computes a residual target, for custom separation scenarios\n when not all targets are available, defaults to False\n\n device: str\n set torch device. Defaults to `cpu`.\n\n Returns\n -------\n estimates: `dict` [`str`, `np.ndarray`]\n dictionary of all restimates as performed by the separation model.\n\n \"\"\"\n # convert numpy audio to torch\n audio_torch = torch.tensor(audio.T[None, ...]).float().to(device)\n\n source_names = []\n V = []\n\n for j, target in enumerate(tqdm.tqdm(targets)):\n unmix_target = load_model(target=target, model_name=model_name, device=device)\n Vj = unmix_target(audio_torch).cpu().detach().numpy()\n if softmask:\n # only exponentiate the model if we use softmask\n Vj = Vj ** alpha\n # output is nb_frames, nb_samples, nb_channels, nb_bins\n V.append(Vj[:, 0, ...]) # remove sample dim\n source_names += [target]\n\n V = np.transpose(np.array(V), (1, 3, 2, 0))\n\n X = unmix_target.stft(audio_torch).detach().cpu().numpy()\n # convert to complex numpy type\n X = X[..., 0] + X[..., 1] * 1j\n X = X[0].transpose(2, 1, 0)\n\n if residual_model or len(targets) == 1:\n V = norbert.residual_model(V, X, alpha if softmask else 1)\n source_names += [\"residual\"] if len(targets) > 1 else [\"accompaniment\"]\n\n Y = norbert.wiener(V, X.astype(np.complex128), niter, use_softmask=softmask)\n\n estimates = {}\n for j, name in enumerate(source_names):\n audio_hat = istft(\n Y[..., j].T,\n n_fft=unmix_target.stft.n_fft,\n n_hopsize=unmix_target.stft.n_hop,\n )\n estimates[name] = audio_hat.T\n\n return estimates\n\n\ndef inference_args(parser, remaining_args):\n inf_parser = argparse.ArgumentParser(\n description=__doc__,\n parents=[parser],\n add_help=True,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n\n inf_parser.add_argument(\n \"--softmask\",\n dest=\"softmask\",\n action=\"store_true\",\n help=(\n \"if enabled, will initialize separation with softmask.\"\n \"otherwise, will use mixture phase with spectrogram\"\n ),\n )\n\n inf_parser.add_argument(\n \"--niter\",\n type=int,\n default=1,\n help=\"number of iterations for refining results.\",\n )\n\n inf_parser.add_argument(\n \"--alpha\",\n type=float,\n default=1.0,\n help=\"exponent in case of softmask separation\",\n )\n\n inf_parser.add_argument(\n \"--samplerate\", type=int, default=44100, help=\"model samplerate\"\n )\n\n inf_parser.add_argument(\n \"--residual-model\", action=\"store_true\", help=\"create a model for the residual\"\n )\n return inf_parser.parse_args()\n\n\ndef test_main(\n input_files=None,\n samplerate=44100,\n niter=1,\n alpha=1.0,\n softmask=False,\n residual_model=False,\n model=\"umxhq\",\n targets=(\"vocals\", \"drums\", \"bass\", \"other\"),\n outdir=None,\n start=0.0,\n duration=-1.0,\n no_cuda=False,\n):\n\n use_cuda = not no_cuda and torch.cuda.is_available()\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n\n for input_file in input_files:\n # handling an input audio path\n info = sf.info(input_file)\n start = int(start * info.samplerate)\n # check if dur is none\n if duration > 0:\n # stop in soundfile is calc in samples, not seconds\n stop = start + int(duration * info.samplerate)\n else:\n # set to None for reading complete file\n stop = None\n\n audio, rate = sf.read(input_file, always_2d=True, start=start, stop=stop)\n\n if audio.shape[1] > 2:\n warnings.warn(\n \"Channel count > 2! \" \"Only the first two channels will be processed!\"\n )\n audio = audio[:, :2]\n\n if rate != samplerate:\n # resample to model samplerate if needed\n audio = resampy.resample(audio, rate, samplerate, axis=0)\n\n if audio.shape[1] == 1:\n # if we have mono, let's duplicate it\n # as the input of OpenUnmix is always stereo\n audio = np.repeat(audio, 2, axis=1)\n\n estimates = separate(\n audio,\n targets=targets,\n model_name=model,\n niter=niter,\n alpha=alpha,\n softmask=softmask,\n residual_model=residual_model,\n device=device,\n )\n if not outdir:\n model_path = Path(model)\n if not model_path.exists():\n output_path = Path(Path(input_file).stem + \"_\" + model)\n else:\n output_path = Path(Path(input_file).stem + \"_\" + model_path.stem)\n else:\n if len(input_files) > 1:\n output_path = Path(outdir) / Path(input_file).stem\n else:\n output_path = Path(outdir)\n\n output_path.mkdir(exist_ok=True, parents=True)\n\n drum_signal = estimates[\"drums\"].sum(axis=1) / 2\n vocal_signal = estimates[\"vocals\"].sum(axis=1) / 2\n\n # sum of remaining as harmonic\n harmonic_signal = (\n estimates[\"other\"].sum(axis=1) / 2 + estimates[\"bass\"].sum(axis=1) / 2\n )\n\n size_mismatch = audio.shape[0] - drum_signal.shape[0]\n\n if size_mismatch > 0: # needs padding\n drum_signal = np.concatenate((drum_signal, np.zeros(size_mismatch)))\n vocal_signal = np.concatenate((vocal_signal, np.zeros(size_mismatch)))\n harmonic_signal = np.concatenate((harmonic_signal, np.zeros(size_mismatch)))\n\n prefix = input_files[0].split(\"/\")[-1].split(\"_\")[0]\n sf.write(\n str(output_path / Path(prefix + \"_percussive\").with_suffix(\".wav\")),\n drum_signal,\n samplerate,\n )\n sf.write(\n str(output_path / Path(prefix + \"_vocal\").with_suffix(\".wav\")),\n vocal_signal,\n samplerate,\n )\n sf.write(\n str(output_path / Path(prefix + \"_harmonic\").with_suffix(\".wav\")),\n harmonic_signal,\n samplerate,\n )\n\n\nif __name__ == \"__main__\":\n # Training settings\n parser = argparse.ArgumentParser(description=\"OSU Inference\", add_help=False)\n\n parser.add_argument(\n \"input\", type=str, nargs=\"+\", help=\"List of paths to wav/flac files.\"\n )\n\n parser.add_argument(\n \"--targets\",\n nargs=\"+\",\n default=[\"vocals\", \"drums\", \"bass\", \"other\"],\n type=str,\n help=\"provide targets to be processed. \\\n If none, all available targets will be computed\",\n )\n\n parser.add_argument(\n \"--outdir\",\n type=str,\n help=\"Results path where audio evaluation results are stored\",\n )\n\n parser.add_argument(\n \"--start\", type=float, default=0.0, help=\"Audio chunk start in seconds\"\n )\n\n parser.add_argument(\n \"--duration\",\n type=float,\n default=-1.0,\n help=\"Audio chunk duration in seconds, negative values load full track\",\n )\n\n parser.add_argument(\n \"--model\",\n default=\"umxhq\",\n type=str,\n help=\"path to mode base directory of pretrained models\",\n )\n\n parser.add_argument(\n \"--no-cuda\", action=\"store_true\", default=False, help=\"disables CUDA inference\"\n )\n\n args, _ = parser.parse_known_args()\n args = inference_args(parser, args)\n\n test_main(\n input_files=args.input,\n samplerate=args.samplerate,\n alpha=args.alpha,\n softmask=args.softmask,\n niter=args.niter,\n residual_model=args.residual_model,\n model=args.model,\n targets=args.targets,\n outdir=args.outdir,\n start=args.start,\n duration=args.duration,\n no_cuda=args.no_cuda,\n )\n" ]
[ [ "torch.hub.load", "torch.load", "torch.tensor", "torch.cuda.is_available", "torch.device", "numpy.repeat", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
cartertroy/datarobot-user-models
[ "d2c2b47e0d46a0ce8d07f1baa8d57155a829d2fc" ]
[ "model_templates/inference/python3_keras_vizai_joblib/model_utils.py" ]
[ "# keras imports\nfrom keras.models import load_model\nfrom keras.applications.vgg16 import preprocess_input\n\n# scikit-learn imports\nfrom sklearn.pipeline import Pipeline\n\n# pandas/numpy imports\nimport pandas as pd\nimport numpy as np\n\nimport joblib\nimport io\nimport base64\nimport h5py\nfrom PIL import Image\nfrom pathlib import Path\n\n# define constants\n\nIMG_SIZE = 150\nIMG_SHAPE = (IMG_SIZE, IMG_SIZE, 3)\n\n\ndef get_imputation_img() -> str:\n \"\"\" black image in base64 str for data imputation filling \"\"\"\n black_PIL_img = Image.fromarray(np.zeros(IMG_SHAPE, dtype=\"float32\"), \"RGB\")\n return get_base64_str_from_PIL_img(black_PIL_img)\n\n\ndef get_img_obj_from_base64_str(b64_img_str: str) -> Image:\n \"\"\" given a base64 encoded image str get the PIL.Image object \"\"\"\n b64_img = base64.b64decode(b64_img_str)\n b64_img = io.BytesIO(b64_img)\n return Image.open(b64_img)\n\n\ndef get_base64_str_from_PIL_img(pillowed_img: Image) -> str:\n \"\"\" given a PIL.Image object return base64 encoded str of the image object \"\"\"\n buffer = io.BytesIO()\n pillowed_img.save(buffer, format=\"JPEG\")\n return base64.b64encode(buffer.getvalue())\n\n\ndef img_preprocessing(pillowed_img: Image) -> np.ndarray:\n \"\"\" given a PIL.Image object resize, convert to RGB and return as np.array \"\"\"\n img = pillowed_img.resize((IMG_SHAPE[:-1]), Image.LANCZOS)\n img = img.convert(\"RGB\")\n img_arr = np.asarray(img, dtype=\"float32\")\n img_arr = preprocess_input(img_arr) # pixel scaling/color normalization\n return img_arr\n\n\ndef preprocessing_X_transform(data_df: pd.DataFrame, image_feature_name: str,) -> pd.DataFrame:\n \"\"\" Apply the preprocessing methods on the data before prediction for the model to work on \"\"\"\n\n data_df = data_df.copy()\n if image_feature_name in data_df:\n data_df[image_feature_name] = data_df[image_feature_name].astype(bytes)\n data_df[image_feature_name] = data_df[image_feature_name].apply(get_img_obj_from_base64_str)\n data_df[image_feature_name] = data_df[image_feature_name].apply(img_preprocessing)\n return data_df\n\n\ndef pretrained_preprocess_input(img_arr: np.ndarray) -> np.ndarray:\n return preprocess_input(img_arr)\n\n\ndef reshape_numpy_array(data_series: pd.Series) -> np.ndarray:\n \"\"\" Convert pd.Series to numpy array and reshape it too \"\"\"\n return np.asarray(data_series.to_list()).reshape(-1, *IMG_SHAPE)\n\n\ndef apply_image_data_preprocessing(x_data_df: pd.DataFrame, image_feature_name: str) -> np.ndarray:\n \"\"\" Image data preprocessing before fit \"\"\"\n X_data_df = preprocessing_X_transform(x_data_df, image_feature_name)\n X_data = reshape_numpy_array(X_data_df[image_feature_name])\n return X_data\n\n\ndef convert_np_to_df(np_array, img_col) -> pd.DataFrame:\n \"\"\" simple utility to convert numpy array to dataframe \"\"\"\n return pd.DataFrame(data=np_array, columns=[img_col])\n\n\ndef deserialize_estimator_pipeline(input_dir: str) -> Pipeline:\n \"\"\"\n Load estimator pipeline from the given joblib file.\n\n Parameters\n ----------\n joblib_file_path: str\n The joblib file path to load from.\n\n Returns\n -------\n pipeline: Pipeline\n Constructed pipeline with necessary preprocessor steps and estimator to predict/score.\n \"\"\"\n # load the dictionary obj from the joblib file\n joblib_file_path = Path(input_dir) / \"artifact.joblib\"\n estimator_dict = joblib.load(joblib_file_path)\n model = estimator_dict[\"model\"]\n prep_pipeline = estimator_dict[\"preprocessor_pipeline\"]\n with h5py.File(model, mode=\"r\") as fp:\n keras_model = load_model(fp)\n\n pipeline = Pipeline([(\"preprocessor\", prep_pipeline), (\"estimator\", keras_model)], verbose=True)\n return pipeline\n" ]
[ [ "numpy.asarray", "numpy.zeros", "sklearn.pipeline.Pipeline", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
ericwang0701/AlphaPose
[ "1f17dbf4b41ad7452430b69f72d58a0585ed09af", "1f17dbf4b41ad7452430b69f72d58a0585ed09af" ]
[ "alphapose/models/fastpose.py", "alphapose/utils/metrics.py" ]
[ "# -----------------------------------------------------\n# Copyright (c) Shanghai Jiao Tong University. All rights reserved.\n# Written by Jiefeng Li ([email protected])\n# -----------------------------------------------------\n\nimport torch.nn as nn\n\nfrom .builder import SPPE\nfrom .layers.DUC import DUC\nfrom .layers.SE_Resnet import SEResnet\n\n\[email protected]_module\nclass FastPose(nn.Module):\n conv_dim = 128\n\n def __init__(self, norm_layer=nn.BatchNorm2d, **cfg):\n super(FastPose, self).__init__()\n self._preset_cfg = cfg['PRESET']\n\n if 'DCN' in cfg.keys():\n stage_with_dcn = cfg['STAGE_WITH_DCN']\n dcn = cfg['DCN']\n self.preact = SEResnet(\n f\"resnet{cfg['NUM_LAYERS']}\", dcn=dcn, stage_with_dcn=stage_with_dcn)\n else:\n self.preact = SEResnet(f\"resnet{cfg['NUM_LAYERS']}\")\n\n # Imagenet pretrain model\n import torchvision.models as tm # noqa: F401,F403\n assert cfg['NUM_LAYERS'] in [18, 34, 50, 101, 152]\n x = eval(f\"tm.resnet{cfg['NUM_LAYERS']}(pretrained=True)\")\n\n model_state = self.preact.state_dict()\n state = {k: v for k, v in x.state_dict().items()\n if k in self.preact.state_dict() and v.size() == self.preact.state_dict()[k].size()}\n model_state.update(state)\n self.preact.load_state_dict(model_state)\n\n self.suffle1 = nn.PixelShuffle(2)\n self.duc1 = DUC(512, 1024, upscale_factor=2, norm_layer=norm_layer)\n self.duc2 = DUC(256, 512, upscale_factor=2, norm_layer=norm_layer)\n\n self.conv_out = nn.Conv2d(\n self.conv_dim, self._preset_cfg['NUM_JOINTS'], kernel_size=3, stride=1, padding=1)\n\n def forward(self, x):\n out = self.preact(x)\n out = self.suffle1(out)\n out = self.duc1(out)\n out = self.duc2(out)\n\n out = self.conv_out(out)\n return out\n\n def _initialize(self):\n for m in self.conv_out.modules():\n if isinstance(m, nn.Conv2d):\n # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n # logger.info('=> init {}.weight as normal(0, 0.001)'.format(name))\n # logger.info('=> init {}.bias as 0'.format(name))\n nn.init.normal_(m.weight, std=0.001)\n nn.init.constant_(m.bias, 0)\n", "# -----------------------------------------------------\n# Copyright (c) Shanghai Jiao Tong University. All rights reserved.\n# Written by Jiefeng Li ([email protected])\n# -----------------------------------------------------\n\nimport os\nimport sys\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom .transforms import get_max_pred_batch\n\nfrom pycocotools.coco import COCO\nfrom pycocotools.cocoeval import COCOeval\n\n\nclass DataLogger(object):\n \"\"\"Average data logger.\"\"\"\n def __init__(self):\n self.clear()\n\n def clear(self):\n self.value = 0\n self.sum = 0\n self.cnt = 0\n self.avg = 0\n\n def update(self, value, n=1):\n self.value = value\n self.sum += value * n\n self.cnt += n\n self._cal_avg()\n\n def _cal_avg(self):\n self.avg = self.sum / self.cnt\n\n\ndef calc_iou(pred, target):\n \"\"\"Calculate mask iou\"\"\"\n if isinstance(pred, torch.Tensor):\n pred = pred.cpu().data.numpy()\n if isinstance(target, torch.Tensor):\n target = target.cpu().data.numpy()\n\n pred = pred >= 0.5\n target = target >= 0.5\n\n intersect = (pred == target) * pred * target\n union = np.maximum(pred, target)\n\n if pred.ndim == 2:\n iou = np.sum(intersect) / np.sum(union)\n elif pred.ndim == 3 or pred.ndim == 4:\n n_samples = pred.shape[0]\n intersect = intersect.reshape(n_samples, -1)\n union = union.reshape(n_samples, -1)\n\n iou = np.mean(np.sum(intersect, axis=1) / np.sum(union, axis=1))\n\n return iou\n\n\ndef mask_cross_entropy(pred, target):\n return F.binary_cross_entropy_with_logits(\n pred, target, reduction='mean')[None]\n\n\ndef evaluate_mAP(res_file, ann_type='bbox', ann_file='person_keypoints_val2017.json', silence=True):\n \"\"\"Evaluate mAP result for coco dataset.\n\n Parameters\n ----------\n res_file: str\n Path to result json file.\n ann_type: str\n annotation type, including: `bbox`, `segm`, `keypoints`.\n ann_file: str\n Path to groundtruth file.\n silence: bool\n True: disable running log.\n\n \"\"\"\n class NullWriter(object):\n def write(self, arg):\n pass\n\n ann_file = os.path.join('./data/coco/annotations/', ann_file)\n\n if silence:\n nullwrite = NullWriter()\n oldstdout = sys.stdout\n sys.stdout = nullwrite # disable output\n\n cocoGt = COCO(ann_file)\n cocoDt = cocoGt.loadRes(res_file)\n\n cocoEval = COCOeval(cocoGt, cocoDt, ann_type)\n cocoEval.evaluate()\n cocoEval.accumulate()\n cocoEval.summarize()\n\n if silence:\n sys.stdout = oldstdout # enable output\n\n stats_names = ['AP', 'Ap .5', 'AP .75', 'AP (M)', 'AP (L)',\n 'AR', 'AR .5', 'AR .75', 'AR (M)', 'AR (L)']\n info_str = {}\n for ind, name in enumerate(stats_names):\n info_str[name] = cocoEval.stats[ind]\n\n return info_str\n\n\ndef calc_accuracy(preds, labels):\n \"\"\"Calculate heatmap accuracy.\"\"\"\n preds = preds.cpu().data.numpy()\n labels = labels.cpu().data.numpy()\n\n num_joints = preds.shape[1]\n\n norm = 1.0\n hm_h = preds.shape[2]\n hm_w = preds.shape[3]\n\n preds, _ = get_max_pred_batch(preds)\n labels, _ = get_max_pred_batch(labels)\n norm = np.ones((preds.shape[0], 2)) * np.array([hm_w, hm_h]) / 10\n\n dists = calc_dist(preds, labels, norm)\n\n acc = 0\n sum_acc = 0\n cnt = 0\n for i in range(num_joints):\n acc = dist_acc(dists[i])\n if acc >= 0:\n sum_acc += acc\n cnt += 1\n\n if cnt > 0:\n return sum_acc / cnt\n else:\n return 0\n\n\ndef calc_dist(preds, target, normalize):\n \"\"\"Calculate normalized distances\"\"\"\n preds = preds.astype(np.float32)\n target = target.astype(np.float32)\n dists = np.zeros((preds.shape[1], preds.shape[0]))\n\n for n in range(preds.shape[0]):\n for c in range(preds.shape[1]):\n if target[n, c, 0] > 1 and target[n, c, 1] > 1:\n normed_preds = preds[n, c, :] / normalize[n]\n normed_targets = target[n, c, :] / normalize[n]\n dists[c, n] = np.linalg.norm(normed_preds - normed_targets)\n else:\n dists[c, n] = -1\n\n return dists\n\n\ndef dist_acc(dists, thr=0.5):\n \"\"\"Calculate accuracy with given input distance.\"\"\"\n dist_cal = np.not_equal(dists, -1)\n num_dist_cal = dist_cal.sum()\n if num_dist_cal > 0:\n return np.less(dists[dist_cal], thr).sum() * 1.0 / num_dist_cal\n else:\n return -1\n" ]
[ [ "torch.nn.init.constant_", "torch.nn.Conv2d", "torch.nn.PixelShuffle", "torch.nn.init.normal_" ], [ "numpy.maximum", "numpy.less", "torch.nn.functional.binary_cross_entropy_with_logits", "numpy.linalg.norm", "numpy.ones", "numpy.not_equal", "numpy.array", "numpy.zeros", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
FrankLeeeee/ColossalAI-Examples
[ "a7eae54278f3e5bcaa4d2a54552ff4a18ead9cc5", "a7eae54278f3e5bcaa4d2a54552ff4a18ead9cc5" ]
[ "language/bert/zero/finetuning/glue/main.py", "image/vision_transformer/colo_vit/test_vit.py" ]
[ "import colossalai\nimport transformers\nimport torch\nfrom argparse import ArgumentError\nfrom pathlib import Path\nfrom colossalai.core import global_context as gpc\nfrom colossalai.logging import get_dist_logger\nfrom arguments import parse_args\nfrom processors import PROCESSORS\nfrom utils import (get_model, get_optimizer, get_lr_scheduler, get_eval_dataloader, get_train_dataloader, run_eval,\n run_train)\n\nfrom colossalai.engine.gradient_accumulation import GradAccumLrSchedulerByStep\nfrom colossalai.zero.init_ctx import ZeroInitContext\nfrom colossalai.zero.shard_utils import TensorShardStrategy\n\n\ndef main():\n args = parse_args()\n\n # init distributed environment\n colossalai.launch_from_torch(config='./configs/colossalai_zero.py')\n\n use_zero = hasattr(gpc.config, 'zero')\n\n # get logger\n logger = get_dist_logger()\n\n if not any([args.train, args.eval, args.predict]):\n raise ArgumentError(\"At least one of train, eval and predict should be set\")\n\n # exit if the output directory is not empty to avoid overwritting\n output_dir = Path(args.output_dir).absolute()\n args.output_dir = output_dir\n\n if args.train and output_dir.exists and next(output_dir.iterdir(), None):\n raise FileExistsError(f\"Output directory ({output_dir}) already exists and is not empty.\")\n\n output_dir.mkdir(exist_ok=True)\n\n # get data processor\n processor = PROCESSORS[args.task_name]()\n num_labels = len(processor.get_labels())\n\n # get tokenizer\n tokenizer = transformers.BertTokenizer.from_pretrained(args.vocab_file,\n do_lower_case=args.do_lower_case,\n max_len=512)\n\n # check if checkpoint file is given\n if args.init_checkpoint:\n use_hf_pretrain = False\n else:\n use_hf_pretrain = True\n\n # Prepare model\n if use_zero:\n shard_strategy = TensorShardStrategy()\n with ZeroInitContext(target_device=torch.cuda.current_device(), shard_strategy=shard_strategy,\n shard_param=True):\n model = get_model(args.bert_config, num_labels, use_hf_pretrain)\n else:\n model = get_model(args.bert_config, num_labels, use_hf_pretrain)\n\n if use_hf_pretrain:\n logger.info(\"Loading model checkpoint from HuggingFace pretrained weights\", ranks=[0])\n else:\n logger.info(f\"Loading model checkpoint from {args.init_checkpoint}\", ranks=[0])\n checkpoint = torch.load(args.init_checkpoint, map_location='cpu')\n checkpoint = checkpoint[\"model\"] if \"model\" in checkpoint.keys() else checkpoint\n model.load_state_dict(checkpoint, strict=False)\n logger.info(\"Model checkpoint is loaded\", ranks=[0])\n\n # Prepare optimizer\n optimizer = get_optimizer(model, args.learning_rate)\n\n # prepare loss function\n criterion = torch.nn.CrossEntropyLoss()\n\n # train\n if args.train:\n # get dataloader\n train_dataloader = get_train_dataloader(args, tokenizer, processor, logger)\n\n # prepare lr scheduler\n steps_per_epoch = GradAccumLrSchedulerByStep.compute_effective_steps_per_epoch(\n train_dataloader, gpc.config.get('gradient_accumulation', 1))\n total_steps = args.num_train_epochs * steps_per_epoch\n lr_scheduler = get_lr_scheduler(optimizer, total_steps=total_steps, warmup_ratio=args.warmup_proportion)\n else:\n train_dataloader = None\n lr_scheduler = None\n\n if args.eval:\n eval_dataloader, eval_examples, label_map = get_eval_dataloader(args, tokenizer, processor, logger)\n else:\n eval_dataloader = None\n\n engine, train_dataloader, eval_dataloader, lr_scheduler = colossalai.initialize(model=model,\n optimizer=optimizer,\n criterion=criterion,\n train_dataloader=train_dataloader,\n test_dataloader=eval_dataloader,\n lr_scheduler=lr_scheduler)\n\n if args.train:\n # train\n run_train(args, engine, train_dataloader, lr_scheduler, logger)\n\n if args.eval:\n run_eval(args, engine, eval_dataloader, eval_examples, num_labels, label_map, logger)\n\n gpc.destroy()\n\n\nif __name__ == '__main__':\n main()\n", "import torch\nimport pytest\nimport colossalai\nfrom colossalai.context.parallel_mode import ParallelMode\nimport torch.multiprocessing as mp\nfrom colossalai.testing import rerun_if_address_is_in_use\nfrom colossalai.utils.cuda import get_current_device\nfrom colossalai.utils import free_port\nfrom colossalai.utils.model.colo_init_context import ColoInitContext\nfrom colossalai.tensor import TensorSpec, ComputePattern, ParallelAction, DistSpecManager, distspec\nfrom colossalai.core import global_context as gpc\nfrom functools import partial\nfrom utils.util import tensor_equal, tensor_shard_equal, set_seed\nfrom vit import get_training_components\nfrom torch.nn.parallel import DistributedDataParallel as DDP\nfrom colossalai.nn.parallel.data_parallel import ColoDDP\n\n\n# Only for all Linear, it's 1d_row split because Linear will be transposed when calculating.\n# But for other layers, it's 1d_col split.\n# Layernorm is not supported for now.\n# patch_embeddings.projection has nn.Conv2d\n# https://github.com/huggingface/transformers/blob/dcb08b99f44919425f8ba9be9ddcc041af8ec25e/src/transformers/models/vit/modeling_vit.py#L182\ndef init_1d_row_for_linear_weight_spec(model):\n spec = TensorSpec(\n distspec.shard(gpc.get_group(ParallelMode.PARALLEL_1D), [-1], [gpc.get_world_size(ParallelMode.PARALLEL_1D)]),\n ParallelAction(ComputePattern.TP1D))\n with DistSpecManager.no_grad():\n for n, p in model.named_parameters():\n if 'weight' in n and 'layernorm' not in n and 'embeddings.patch_embeddings.projection.weight' not in n:\n p.set_spec(spec)\n\n\n# Similarly, it's col split for Linear but row split for others.\ndef init_1d_col_for_linear_weight_bias_spec(model):\n spec = TensorSpec(\n distspec.shard(gpc.get_group(ParallelMode.PARALLEL_1D), [0], [gpc.get_world_size(ParallelMode.PARALLEL_1D)]),\n ParallelAction(ComputePattern.TP1D))\n with DistSpecManager.no_grad():\n for n, p in model.named_parameters():\n if ('weight' in n\n or 'bias' in n) and 'layernorm' not in n and 'embeddings.patch_embeddings.projection' not in n:\n p.set_spec(spec)\n\n\ndef check_param_equal(model, torch_model):\n for p, torch_p in zip(model.parameters(), torch_model.parameters()):\n assert tensor_shard_equal(torch_p, p)\n\n\ndef check_grad_equal(model, torch_model):\n for (np, p), (ntp, torch_p) in zip(model.named_parameters(), torch_model.named_parameters()):\n if (torch_p.grad.shape == p.grad.shape):\n assert torch.allclose(torch_p.grad, p.grad, rtol=1e-3, atol=2.0) == True\n else:\n dims_not_eq = torch.nonzero(torch.tensor(torch_p.grad.shape) != torch.tensor(p.grad.shape))\n dim = dims_not_eq.item()\n world_size = gpc.get_world_size(ParallelMode.PARALLEL_1D)\n rank = gpc.get_local_rank(ParallelMode.PARALLEL_1D)\n assert torch.allclose(torch_p.grad.chunk(world_size, dim)[rank], p.grad, rtol=1e-3, atol=2.0) == True\n\n\ndef run_vit(init_spec_func, use_ddp):\n model_builder, train_dataloader, test_dataloader, optimizer_class, criterion = get_training_components()\n\n with ColoInitContext(device=get_current_device()):\n model = model_builder()\n model = model.cuda()\n torch_model = model_builder().cuda()\n if use_ddp:\n model = ColoDDP(model)\n torch_model = DDP(torch_model,\n device_ids=[gpc.get_global_rank()],\n process_group=gpc.get_group(ParallelMode.DATA))\n for torch_p, p in zip(torch_model.parameters(), model.parameters()):\n torch_p.data.copy_(p)\n init_spec_func(model)\n\n check_param_equal(model, torch_model)\n model.train()\n torch_model.train()\n set_seed(gpc.get_local_rank(ParallelMode.DATA))\n\n optimizer = optimizer_class(model.parameters(), lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0)\n torch_optimizer = optimizer_class(torch_model.parameters(), lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0)\n\n for i, image_dict in enumerate(train_dataloader):\n if use_ddp:\n model.zero_grad()\n else:\n optimizer.zero_grad()\n logits = model(image_dict['pixel_values'])\n torch_logits = torch_model(image_dict['pixel_values'])\n assert tensor_equal(torch_logits.logits, logits.logits)\n loss = criterion(logits.logits, image_dict['label'])\n torch_loss = criterion(torch_logits.logits, image_dict['label'])\n if use_ddp:\n model.backward(loss)\n else:\n loss.backward()\n torch_loss.backward()\n check_grad_equal(model, torch_model)\n optimizer.step()\n torch_optimizer.step()\n check_param_equal(model, torch_model)\n break\n\n\ndef run_dist(rank, world_size, port, use_ddp):\n if use_ddp and world_size == 1:\n return\n tp_world_size = world_size // 2 if use_ddp else world_size\n config = dict(parallel=dict(tensor=dict(mode=\"1d\", size=tp_world_size),))\n colossalai.launch(config=config, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')\n run_vit(init_1d_row_for_linear_weight_spec, use_ddp)\n run_vit(init_1d_col_for_linear_weight_bias_spec, use_ddp)\n\n\[email protected]\[email protected]('world_size', [1, 4])\[email protected]('use_ddp', [False, True])\n@rerun_if_address_is_in_use()\ndef test_vit(world_size, use_ddp):\n run_func = partial(run_dist, world_size=world_size, port=free_port(), use_ddp=use_ddp)\n mp.spawn(run_func, nprocs=world_size)\n\n\nif __name__ == '__main__':\n test_vit(4, False)\n" ]
[ [ "torch.nn.CrossEntropyLoss", "torch.cuda.current_device", "torch.load" ], [ "torch.allclose", "torch.multiprocessing.spawn", "torch.tensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
marcorossignolo/QuOCS
[ "5ed631e2aebc42b226f5992daf27e2da75a89af9", "5ed631e2aebc42b226f5992daf27e2da75a89af9", "5ed631e2aebc42b226f5992daf27e2da75a89af9" ]
[ "src/quocslib/tools/linearalgebra.py", "src/quocslib/timeevolution/piecewise_integrator.py", "src/quocslib/optimalcontrolproblems/OneQubitProblem.py" ]
[ "# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n# Copyright 2021- QuOCS Team\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n\nimport numpy as np\nfrom quocslib.tools.randomgenerator import RandomNumberGenerator\n\n\ndef ptrace(rho, dimensions):\n \"\"\"\n Useful to have this implementation of the partial trace which uses einsums\n\n TODO implement this in Python again\n \"\"\"\n\n return rho\n\n\ndef commutator(A, B):\n return A @ B - B @ A\n\n\ndef gram_schmidt(A):\n \"\"\"\n Orthonormalize a set of linear independent vectors\n\n :param A: Square matrix with linear independent vectors\n :return A: Square matrix with orthonormalize vectors\n \"\"\"\n # Get the number of vectors.\n n = A.shape[1]\n for j in range(n):\n # To orthogonalize the vector in column j with respect to the\n # previous vectors, subtract from it its projection onto\n # each of the previous vectors.\n for k in range(j):\n u_k = A[:, k]\n A[:, j] -= np.dot(u_k, A[:, j]) * u_k / np.linalg.norm(u_k) ** 2\n A[:, j] = A[:, j] / np.linalg.norm(A[:, j])\n return A\n\n\ndef simplex_creation(\n mean_value: np.array, sigma_variation: np.array, rng: RandomNumberGenerator = None\n) -> np.array:\n \"\"\"\n Creation of the simplex\n\n @return:\n \"\"\"\n ctrl_par_number = mean_value.shape[0]\n ##################\n # Scale matrix:\n # Explain what the scale matrix means here\n ##################\n # First row\n x0_scale = np.zeros((1, ctrl_par_number))\n # Simplex matrix ( without first row )\n simplex_matrix = np.diag(np.ones_like(sigma_variation))\n # Add random number in the first column\n if rng is None:\n random_array = np.random.rand(ctrl_par_number)\n else:\n random_array = rng.get_random_numbers(ctrl_par_number)\n random_array = random_array.reshape(\n ctrl_par_number,\n )\n simplex_matrix[0, :] += np.sqrt(3) * (random_array - 0.5) * 2\n # Orthogonalize set of vectors with gram_schmidt, and rescale with the normalization length\n simplex_matrix_orthonormal = gram_schmidt(simplex_matrix.T)\n # Rescale the vector with the sigma variation\n simplex_matrix_orthogonal_rescaled = simplex_matrix_orthonormal @ np.diag(\n sigma_variation\n )\n # Add the first row containing only zeros\n x_t_norm = np.append(x0_scale, simplex_matrix_orthogonal_rescaled, axis=0)\n # Offset matrix\n x_offset = np.outer(np.ones((1, ctrl_par_number + 1)), mean_value)\n # Start simplex matrix\n StartSimplex = x_t_norm + x_offset\n return StartSimplex\n\n\nif __name__ == \"__main__\":\n # TODO Move this main script to a test script\n Nc = 4\n ampl_var_1 = 2.0\n ampl_var_2 = 0.7\n f_norm = 1 / np.sqrt(2)\n p_1 = (ampl_var_1 * f_norm) * np.ones(\n 2,\n )\n p_2 = (ampl_var_2 * f_norm) * np.ones(\n 2,\n )\n sc_vec = np.append(p_1, p_2)\n\n x0_scale = np.zeros((1, Nc))\n # Simplex matrix ( without first row )\n simplex_m = np.diag(sc_vec)\n # Add random number\n simplex_m[0, :] += (\n (sc_vec[0] / 10.0)\n * (\n np.random.rand(\n Nc,\n )\n - 0.5\n )\n * 2\n )\n\n simplex_m_r = gram_schmidt(simplex_m.T, sc_vec).T\n # Rescale accordingly to amplitude variation\n # x_norm = A_norm.dot(np.diag(sc_vec))\n # Add first row\n x_t_norm = np.append(x0_scale, simplex_m_r, axis=0)\n\n print(x_t_norm)\n\n\ndef to_sup_op(H):\n \"\"\"\n Function to convert a Hamiltonian into a Liouvillian\n \"\"\"\n dim = np.size(H, 1)\n idm = np.eye(dim)\n return np.kron(idm, H) - np.kron(H.T.conj(), idm)\n\n\ndef to_vec(rho):\n \"\"\"\n Take an input rho vector and flatten it into a column\n \"\"\"\n return rho.flatten()\n", "import numpy as np\nfrom scipy.linalg import expm\n\n# can we do conditional import?\ntry:\n import jax.scipy as jsp\nexcept:\n raise ImportError\n\n\ndef pw_evolution(U_store, drive, A, B, n_slices, dt):\n \"\"\"Compute the piecewise evolution of a system defined by the\n Hamiltonian H = A + drive * B and store the result in U_store\n\n :param List[np.matrix] U_store: the storage for all of the computed propagators\n :param np.array drive: an array of dimension n_controls x n_slices that contains the amplitudes of the pulse\n :param np.matrix A: the drift Hamiltonian\n :param List[np.matrix] B: the control Hamiltonians\n :param int n_slices: number of slices\n :param float dt: the duration of each time slice\n :return None: Stores the new propagators so this doesn't return\n \"\"\"\n K = len(B)\n for i in range(n_slices):\n H = A\n for k in range(K):\n H = H + drive[k, i] * B[k]\n U_store[i] = expm(-1j * dt * H)\n return None\n\n\ndef pw_final_evolution(drive, A, B, n_slices, dt, u0):\n \"\"\"Compute the piecewise evolution of a system defined by the\n Hamiltonian H = A + drive * B and concatenate all the propagators\n\n :param List[np.matrix] U_store: the storage for all of the computed propagators\n :param np.array drive: an array of dimension n_controls x n_slices that contains the amplitudes of the pulse\n :param np.matrix A: the drift Hamiltonian\n :param List[np.matrix] B: the control Hamiltonians\n :param int n_slices: number of slices\n :param np.matrix u0: the initial density matrix to start from\n :return np.matrix: the final propagator\n \"\"\"\n K = len(B)\n U = u0\n for i in range(n_slices):\n H = A\n for k in range(K):\n H = H + drive[k, i] * B[k]\n U = expm(-1j * dt * H) @ U\n return U\n", "# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n# Copyright 2021- QuOCS Team\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n\nfrom quocslib.optimalcontrolproblems.su2 import hamiltonian_d1_d2\nimport numpy as np\nfrom scipy.linalg import expm, norm\nfrom quocslib.utils.AbstractFoM import AbstractFoM\nimport os\n\n\nclass OneQubit(AbstractFoM):\n def __init__(self, args_dict: dict = None):\n if args_dict is None:\n args_dict = {}\n\n self.psi_target = np.asarray(eval(args_dict.setdefault(\"target_state\", \"[1.0/np.sqrt(2), -1j/np.sqrt(2)]\")),\n dtype=\"complex\")\n self.psi_0 = np.asarray(eval(args_dict.setdefault(\"initial_state\", \"[1.0, 0.0]\")), dtype=\"complex\")\n self.delta1 = args_dict.setdefault(\"delta1\", 0.1)\n self.delta2 = args_dict.setdefault(\"delta2\", 0.1)\n # Noise in the figure of merit\n self.is_noisy = args_dict.setdefault(\"is_noisy\", False)\n self.noise_factor = args_dict.setdefault(\"noise_factor\", 0.05)\n self.std_factor = args_dict.setdefault(\"std_factor\", 0.01)\n\n # Drifting FoM\n self.include_drift = args_dict.setdefault(\"include_drift\", False)\n self.linear_drift_val_over_iterartion = args_dict.setdefault(\"linear_drift_val_over_iterartion\", 0.002)\n\n self.FoM_list = []\n self.save_path = \"\"\n self.FoM_save_name = \"FoM.txt\"\n\n self.FoM_eval_number = 0\n\n # def __del__(self):\n # np.savetxt(os.path.join(self.save_path, self.FoM_save_name), self.FoM_list)\n\n def save_FoM(self):\n np.savetxt(os.path.join(self.save_path, self.FoM_save_name), self.FoM_list)\n\n def set_save_path(self, save_path: str = \"\"):\n self.save_path = save_path\n\n def get_FoM(self, pulses: list = [], parameters: list = [], timegrids: list = []) -> dict:\n f = np.asarray(pulses[0])\n timegrid = np.asarray(timegrids[0])\n dt = timegrid[1] - timegrid[0]\n U = self._time_evolution(f, dt)\n psi_f = np.matmul(U, self.psi_0)\n infidelity = 1.0 - self._get_fidelity(self.psi_target, psi_f)\n std = 1e-4\n if self.is_noisy:\n noise = (self.noise_factor * 2 * (0.5 - np.random.rand(1)[0]))\n infidelity += noise\n std = (self.std_factor * np.random.rand(1)[0])\n\n if self.include_drift:\n infidelity += self.linear_drift_val_over_iterartion * self.FoM_eval_number\n\n self.FoM_list.append(np.abs(infidelity))\n self.FoM_eval_number += 1\n\n return {\"FoM\": np.abs(infidelity), \"std\": std}\n\n @staticmethod\n def _time_evolution(fc, dt):\n U = np.identity(2)\n for ii in range(fc.size - 1):\n ham_t = hamiltonian_d1_d2((fc[ii + 1] + fc[ii]) / 2)\n U_temp = U\n U = np.matmul(expm(-1j * ham_t * dt), U_temp)\n return U\n\n @staticmethod\n def _get_fidelity(psi1, psi2):\n return np.abs(np.dot(psi1.conj().T, psi2)) ** 2 / (norm(psi1) * norm(psi2))\n" ]
[ [ "numpy.diag", "numpy.dot", "numpy.ones_like", "numpy.sqrt", "numpy.eye", "numpy.kron", "numpy.linalg.norm", "numpy.ones", "numpy.append", "numpy.size", "numpy.random.rand", "numpy.zeros" ], [ "scipy.linalg.expm" ], [ "numpy.abs", "numpy.asarray", "numpy.matmul", "scipy.linalg.expm", "numpy.identity", "numpy.random.rand", "scipy.linalg.norm" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "0.12", "0.14", "0.15" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "0.12", "0.14", "0.15" ], "tensorflow": [] } ]
bpinsard/nipy
[ "d49e8292adad6619e3dac710752131b567efe90e", "d49e8292adad6619e3dac710752131b567efe90e", "d49e8292adad6619e3dac710752131b567efe90e", "d49e8292adad6619e3dac710752131b567efe90e", "d49e8292adad6619e3dac710752131b567efe90e" ]
[ "nipy/labs/datasets/volumes/volume_img.py", "examples/formula/fir.py", "nipy/modalities/fmri/fmristat/setup.py", "examples/labs/need_data/one_sample_t_test.py", "nipy/algorithms/statistics/onesample.py" ]
[ "# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n\"\"\"\nAn image that stores the data as an (x, y, z, ...) array, with an\naffine mapping to the world space\n\"\"\"\nfrom __future__ import absolute_import\nimport copy\n\nimport numpy as np\nfrom scipy import ndimage\n\n# Local imports\nfrom ..transforms.affine_utils import to_matrix_vector, \\\n from_matrix_vector, get_bounds\nfrom ..transforms.affine_transform import AffineTransform\nfrom ..transforms.transform import CompositionError\n\nfrom .volume_grid import VolumeGrid\n\n################################################################################\n# class `VolumeImg`\n################################################################################\n\nclass VolumeImg(VolumeGrid):\n \"\"\" A regularly-spaced image for embedding data in an x, y, z 3D\n world, for neuroimaging.\n\n This object is an ndarray representing a volume, with the first 3\n dimensions being spatial, and mapped to a named world space using\n an affine (4x4 matrix).\n\n Attributes\n ----------\n\n affine : 4x4 ndarray\n Affine mapping from indices to world coordinates.\n world_space : string\n Name of the world space the data is embedded in. For\n instance `mni152`.\n metadata : dictionnary\n Optional, user-defined, dictionnary used to carry around\n extra information about the data as it goes through\n transformations. The consistency of this information may not\n be maintained as the data is modified.\n interpolation : 'continuous' or 'nearest'\n String giving the interpolation logic used when calculating\n values in different world spaces\n _data : \n Private pointer to the data.\n\n Notes\n ------\n\n The data is stored in an undefined way: prescalings might need to\n be applied to it before using it, or the data might be loaded on\n demand. The best practice to access the data is not to access the\n _data attribute, but to use the `get_data` method.\n \"\"\"\n\n # most attributes are given by the VolumeField interface \n\n #---------------------------------------------------------------------------\n # Attributes, VolumeImg interface\n #---------------------------------------------------------------------------\n\n # The affine (4x4 ndarray)\n affine = np.eye(4)\n\n #---------------------------------------------------------------------------\n # VolumeField interface\n #---------------------------------------------------------------------------\n\n def __init__(self, data, affine, world_space, metadata=None, \n interpolation='continuous'):\n \"\"\" Creates a new neuroimaging image with an affine mapping.\n\n Parameters\n ----------\n\n data : ndarray\n ndarray representing the data.\n affine : 4x4 ndarray\n affine transformation to the reference world space\n world_space : string\n name of the reference world space.\n metadata : dictionnary\n dictionnary of user-specified information to store with\n the image.\n \"\"\"\n if not interpolation in ('continuous', 'nearest'):\n raise ValueError('interpolation must be either continuous '\n 'or nearest')\n self._data = data\n if not affine.shape == (4, 4):\n raise ValueError('The affine should be a 4x4 array')\n self.affine = affine\n self.world_space = world_space\n if metadata is None:\n metadata = dict()\n self.metadata = metadata\n self.interpolation = interpolation\n\n \n def like_from_data(self, data):\n # Use self.__class__ for subclassing.\n assert len(data.shape) >= 3, \\\n 'The data passed must be an array of at least 3 dimensions'\n return self.__class__(data=data, \n affine=copy.copy(self.affine),\n world_space=self.world_space,\n metadata=copy.copy(self.metadata),\n interpolation=self.interpolation,\n )\n\n # Inherit docstring\n like_from_data.__doc__ = VolumeGrid.like_from_data.__doc__\n\n\n def get_transform(self):\n return AffineTransform('voxel_space', self.world_space, self.affine)\n\n # Inherit docstring\n get_transform.__doc__ = VolumeGrid.get_transform.__doc__\n\n def get_affine(self):\n return self.affine\n\n def resampled_to_img(self, target_image, interpolation=None):\n if not hasattr(target_image, 'world_space'):\n from ..converters import as_volume_img\n target_image = as_volume_img(target_image)\n if not target_image.world_space == self.world_space:\n raise CompositionError(\n 'The two images are not embedded in the same world space')\n if isinstance(target_image, VolumeImg):\n return self.as_volume_img(affine=target_image.affine,\n shape=target_image.get_data().shape[:3],\n interpolation=interpolation)\n else:\n # IMPORTANT: Polymorphism can be implemented by walking the \n # MRO and finding a method that does not raise\n # NotImplementedError. \n return super(VolumeImg, self).resampled_to_img(target_image,\n interpolation=interpolation)\n\n\n # Inherit docstring\n resampled_to_img.__doc__ = VolumeGrid.resampled_to_img.__doc__\n\n\n def as_volume_img(self, affine=None, shape=None,\n interpolation=None, copy=True):\n if affine is None and shape is None:\n if copy:\n import copy\n return copy.copy(self)\n else:\n return self\n if affine is None:\n affine = self.affine\n data = self.get_data()\n if shape is None:\n shape = data.shape[:3]\n shape = list(shape)\n if affine.shape[0] == 3:\n # We have a 3D affine, we need to find out the offset and\n # shape to keep the same bounding box in the new space\n affine4d = np.eye(4)\n affine4d[:3, :3] = affine\n transform_affine = np.dot(np.linalg.inv(affine4d),\n self.affine, \n )\n # The bounding box in the new world, if no offset is given\n (xmin, xmax), (ymin, ymax), (zmin, zmax) = get_bounds(\n data.shape[:3], \n transform_affine,\n )\n\n offset = np.array((xmin, ymin, zmin))\n offset = np.dot(affine, offset)\n affine = from_matrix_vector(affine, offset[:3])\n shape = (np.ceil(xmax - xmin)+1,\n np.ceil(ymax - ymin)+1,\n np.ceil(zmax - zmin)+1, )\n shape = [int(s) for s in shape]\n if not len(shape) == 3:\n raise ValueError('The shape specified should be the shape '\n 'the 3D grid, and thus of length 3. %s was specified'\n % shape )\n interpolation_order = self._get_interpolation_order(interpolation)\n if np.all(affine == self.affine):\n # Small trick to be more numericaly stable\n transform_affine = np.eye(4)\n else:\n transform_affine = np.dot(np.linalg.inv(self.affine), affine)\n A, b = to_matrix_vector(transform_affine)\n A_inv = np.linalg.inv(A)\n # If A is diagonal, ndimage.affine_transform is clever-enough \n # to use a better algorithm\n if np.all(np.diag(np.diag(A)) == A):\n A = np.diag(A)\n else:\n b = np.dot(A, b)\n # For images with dimensions larger than 3D:\n data_shape = list(data.shape)\n if len(data_shape) > 3:\n # Iter in a set of 3D volumes, as the interpolation problem is \n # separable in the extra dimensions. This reduces the\n # computational cost\n data = np.reshape(data, data_shape[:3] + [-1])\n data = np.rollaxis(data, 3)\n resampled_data = [ ndimage.affine_transform(slice, A,\n offset=np.dot(A_inv, b),\n output_shape=shape,\n order=interpolation_order)\n for slice in data]\n resampled_data = np.concatenate([d[..., np.newaxis]\n for d in resampled_data], \n axis=3)\n resampled_data = np.reshape(resampled_data, list(shape) +\n list(data_shape[3:]))\n else:\n resampled_data = ndimage.affine_transform(data, A,\n offset=np.dot(A_inv, b),\n output_shape=shape,\n order=interpolation_order)\n return self.__class__(resampled_data, affine, \n self.world_space, metadata=self.metadata,\n interpolation=self.interpolation)\n\n\n # Inherit docstring\n as_volume_img.__doc__ = VolumeGrid.as_volume_img.__doc__\n\n\n #---------------------------------------------------------------------------\n # VolumeImg interface\n #---------------------------------------------------------------------------\n\n def xyz_ordered(self, resample=False, copy=True):\n \"\"\" Returns an image with the affine diagonal and positive\n in the world space it is embedded in. \n\n Parameters\n -----------\n resample: boolean, optional\n If resample is False, no resampling is performed, the\n axis are only permuted. If it is impossible\n to get xyz ordering by permuting the axis, a\n 'CompositionError' is raised.\n copy: boolean, optional\n If copy is True, a deep copy of the image (including the\n data) is made.\n \"\"\"\n A, b = to_matrix_vector(self.affine.copy())\n if not np.all((np.abs(A) > 0.001).sum(axis=0) == 1):\n if not resample:\n raise CompositionError(\n 'Cannot reorder the axis: the image affine contains rotations'\n )\n else:\n # Identify the voxel size using a QR decomposition of the\n # affine\n R, Q = np.linalg.qr(self.affine[:3, :3])\n target_affine = np.diag(np.abs(np.diag(Q))[\n np.abs(R).argmax(axis=1)])\n return self.as_volume_img(affine=target_affine)\n # Copy the image, we don't want to modify in place.\n if copy:\n img = self.__copy__()\n else:\n img = self\n axis_numbers = np.argmax(np.abs(A), axis=0)\n while not np.all(np.sort(axis_numbers) == axis_numbers):\n first_inversion = np.argmax(np.diff(axis_numbers)<0)\n img = img._swapaxes(first_inversion+1, first_inversion)\n A, b = to_matrix_vector(img.affine)\n axis_numbers = np.argmax(np.abs(A), axis=0)\n\n # Now make sure the affine is positive\n pixdim = np.diag(A).copy()\n data = img.get_data()\n if pixdim[0] < 0:\n b[0] = b[0] + pixdim[0]*(data.shape[0] - 1)\n pixdim[0] = -pixdim[0]\n slice1 = slice(None, None, -1)\n else:\n slice1 = slice(None, None, None)\n if pixdim[1] < 0:\n b[1] = b[1] + 1 + pixdim[1]*(data.shape[1] - 1)\n pixdim[1] = -pixdim[1]\n slice2 = slice(None, None, -1)\n else:\n slice2 = slice(None, None, None)\n if pixdim[2] < 0:\n b[2] = b[2] + 1 + pixdim[2]*(data.shape[2] - 1)\n pixdim[2] = -pixdim[2]\n slice3 = slice(None, None, -1)\n else:\n slice3 = slice(None, None, None)\n data = data[slice1, slice2, slice3]\n img._data = data\n img.affine = from_matrix_vector(np.diag(pixdim), b)\n return img\n \n\n def _swapaxes(self, axis1, axis2):\n \"\"\" Swap the axis axis1 and axis2 of the data array and reorder the \n affine matrix to stay consistent with the data\n\n See also\n --------\n self.xyz_ordered\n \"\"\"\n if (axis1 > 2) or (axis2 > 2):\n raise ValueError('Can swap axis only on spatial axis. '\n 'Use np.swapaxes of the data array.')\n reordered_data = np.swapaxes(self.get_data(), axis1, axis2)\n new_affine = self.affine\n order = np.array((0, 1, 2, 3))\n order[axis1] = axis2\n order[axis2] = axis1\n new_affine = new_affine.T[order].T\n return VolumeImg(reordered_data, new_affine, self.world_space, \n metadata=self.metadata)\n\n #---------------------------------------------------------------------------\n # Private methods\n #---------------------------------------------------------------------------\n\n def _apply_transform(self, w2w_transform):\n \"\"\" Used for subclassing only. Do not call\n \"\"\"\n new_v2w_transform = \\\n self.get_transform().composed_with(w2w_transform)\n if hasattr(new_v2w_transform, 'affine'):\n new_img = self.__class__(self.get_data(),\n new_v2w_transform.affine,\n new_v2w_transform.output_space,\n metadata=self.metadata,\n interpolation=self.interpolation)\n else:\n new_img = VolumeGrid(self.get_data(),\n transform=new_v2w_transform,\n metadata=self.metadata,\n interpolation=self.interpolation)\n return new_img \n\n\n def __repr__(self):\n options = np.get_printoptions()\n np.set_printoptions(precision=5, threshold=64, edgeitems=2)\n representation = \\\n '%s(\\n data=%s,\\n affine=%s,\\n world_space=%s,\\n interpolation=%s)' % (\n self.__class__.__name__,\n '\\n '.join(repr(self._data).split('\\n')),\n '\\n '.join(repr(self.affine).split('\\n')),\n repr(self.world_space),\n repr(self.interpolation))\n np.set_printoptions(**options)\n return representation\n\n\n def __eq__(self, other):\n return ( isinstance(other, self.__class__)\n and np.all(self.get_data() == other.get_data())\n and np.all(self.affine == other.affine)\n and (self.world_space == other.world_space)\n and (self.interpolation == other.interpolation)\n )\n\n\n", "#!/usr/bin/env python\n# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n\"\"\" Example of FIR model using formula framework\n\nShows how to use B splines as basis functions for the FIR instead of simple\nboxcars.\n\nRequires matplotlib\n\"\"\"\n\nimport numpy as np\n\ntry:\n import matplotlib.pyplot as plt\nexcept ImportError:\n raise RuntimeError(\"This script needs the matplotlib library\")\n\nfrom sympy.utilities.lambdify import implemented_function\n\nfrom nipy.algorithms.statistics.api import Formula\nfrom nipy.modalities.fmri import utils\n\ndef linBspline(knots):\n \"\"\" Create linear B spline that is zero outside [knots[0], knots[-1]]\n\n (knots is assumed to be sorted).\n \"\"\"\n fns = []\n knots = np.array(knots)\n for i in range(knots.shape[0]-2):\n name = 'bs_%s' % i\n k1, k2, k3 = knots[i:i+3]\n d1 = k2-k1\n def anon(x,k1=k1,k2=k2,k3=k3):\n return ((x-k1) / d1 * np.greater(x, k1) * np.less_equal(x, k2) +\n (k3-x) / d1 * np.greater(x, k2) * np.less(x, k3))\n fns.append(implemented_function(name, anon))\n return fns\n\n\n# The splines are functions of t (time)\nbsp_fns = linBspline(np.arange(0,10,2))\n\n# We're going to evaluate at these specific values of time\ntt = np.linspace(0,50,101)\ntvals= tt.view(np.dtype([('t', np.float)]))\n\n# Some inter-stimulus intervals\nisis = np.random.uniform(low=0, high=3, size=(4,)) + 10.\n\n# Made into event onset times\ne = np.cumsum(isis)\n\n# Make event onsets into functions of time convolved with the spline functions.\nevent_funcs = [utils.events(e, f=fn) for fn in bsp_fns]\n\n# Put into a formula.\nf = Formula(event_funcs)\n\n# The design matrix\nX = f.design(tvals, return_float=True)\n\n# Show the design matrix as line plots\nplt.plot(X[:,0])\nplt.plot(X[:,1])\nplt.plot(X[:,2])\nplt.xlabel('time (s)')\nplt.title('B spline used as bases for an FIR response model')\nplt.show()\n", "from __future__ import absolute_import\n# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\ndef configuration(parent_package='',top_path=None):\n from numpy.distutils.misc_util import Configuration\n config = Configuration('fmristat', parent_package, top_path)\n\n config.add_subpackage('tests')\n\n return config\n\nif __name__ == '__main__':\n from numpy.distutils.core import setup\n setup(**configuration(top_path='').todict())\n", "#!/usr/bin/env python\n# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\nfrom __future__ import print_function\n__doc__ = \"\"\"\nExample of a one-sample t-test using the GLM formalism.\nThis script takes individual contrast images and masks and runs a simple GLM.\nThis can be readily generalized to any design matrix.\n\nThis particular example shows the statical map of a contrast\nrelated to a computation task\n(subtraction of computation task minus sentence reading/listening).\n\nNeeds matplotlib.\n\nAuthor : Bertrand Thirion, 2012\n\"\"\"\nprint(__doc__)\n\n#autoindent\nfrom os import mkdir, getcwd, path\n\nimport numpy as np\n\ntry:\n import matplotlib.pyplot as plt\nexcept ImportError:\n raise RuntimeError(\"This script needs the matplotlib library\")\n\nfrom nibabel import load, concat_images, save, Nifti1Image\n\nfrom nipy.labs.mask import intersect_masks\nfrom nipy.modalities.fmri.glm import FMRILinearModel\nfrom nipy.labs.viz import plot_map, cm\n\n# Local import\nfrom get_data_light import DATA_DIR, get_second_level_dataset\n\n# Get the data\nn_subjects = 12\nn_beta = 29\ndata_dir = path.join(DATA_DIR, 'group_t_images')\nmask_images = [path.join(data_dir, 'mask_subj%02d.nii' % n)\n for n in range(n_subjects)]\n\nbetas = [path.join(data_dir, 'spmT_%04d_subj_%02d.nii' % (n_beta, n))\n for n in range(n_subjects)]\n\nmissing_files = np.array([not path.exists(m) for m in mask_images + betas])\nif missing_files.any():\n get_second_level_dataset()\n\nwrite_dir = path.join(getcwd(), 'results')\nif not path.exists(write_dir):\n mkdir(write_dir)\n\n# Compute a population-level mask as the intersection of individual masks\ngrp_mask = Nifti1Image(intersect_masks(mask_images).astype(np.int8),\n load(mask_images[0]).get_affine())\n\n# concatenate the individual images\nfirst_level_image = concat_images(betas)\n\n# set the model\ndesign_matrix = np.ones(len(betas))[:, np.newaxis] # only the intercept\ngrp_model = FMRILinearModel(first_level_image, design_matrix, grp_mask)\n\n# GLM fitting using ordinary least_squares\ngrp_model.fit(do_scaling=False, model='ols')\n\n# specify and estimate the contrast\ncontrast_val = np.array(([[1]])) # the only possible contrast !\nz_map, = grp_model.contrast(contrast_val, con_id='one_sample', output_z=True)\n\n# write the results\nsave(z_map, path.join(write_dir, 'one_sample_z_map.nii'))\n\n# look at the result\nvmax = max(- z_map.get_data().min(), z_map.get_data().max())\nvmin = - vmax\nplot_map(z_map.get_data(), z_map.get_affine(),\n cmap=cm.cold_hot,\n vmin=vmin,\n vmax=vmax,\n threshold=3.,\n black_bg=True)\nplt.savefig(path.join(write_dir, '%s_z_map.png' % 'one_sample'))\nplt.show()\nprint(\"Wrote all the results in directory %s\" % write_dir)\n", "# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n\"\"\"\nUtilities for one sample t-tests\n\"\"\"\nfrom __future__ import absolute_import\n\n__docformat__ = 'restructuredtext'\n\nimport numpy as np\n\nfrom ..utils.matrices import pos_recipr\n\ndef estimate_mean(Y, sd):\n \"\"\" Estimate the mean of a sample given information about\n the standard deviations of each entry.\n\n Parameters\n ----------\n Y : ndarray\n Data for which mean is to be estimated. Should have shape[0] ==\n number of subjects.\n sd : ndarray\n Standard deviation (subject specific) of the data for which the\n mean is to be estimated. Should have shape[0] == number of\n subjects.\n\n Returns\n -------\n value : dict\n This dictionary has keys ['effect', 'scale', 't', 'resid', 'sd']\n \"\"\"\n nsubject = Y.shape[0]\n squeeze = False\n if Y.ndim == 1:\n Y = Y.reshape(Y.shape[0], 1)\n squeeze = True\n\n _stretch = lambda x: np.multiply.outer(np.ones(nsubject), x)\n\n W = pos_recipr(sd**2)\n if W.shape in [(), (1,)]:\n W = np.ones(Y.shape) * W\n W.shape = Y.shape\n\n # Compute the mean using the optimal weights\n effect = (Y * W).sum(0) / W.sum(0)\n resid = (Y - _stretch(effect)) * np.sqrt(W)\n\n scale = np.add.reduce(np.power(resid, 2), 0) / (nsubject - 1)\n var_total = scale * pos_recipr(W.sum(0))\n\n value = {}\n value['resid'] = resid\n value['effect'] = effect\n value['sd'] = np.sqrt(var_total)\n value['t'] = value['effect'] * pos_recipr(value['sd'])\n value['scale'] = np.sqrt(scale)\n\n if squeeze:\n for key, val in value.items():\n value[key] = np.squeeze(val)\n return value\n\ndef estimate_varatio(Y, sd, df=None, niter=10):\n \"\"\" Estimate variance fixed/random effects variance ratio\n\n In a one-sample random effects problem, estimate\n the ratio between the fixed effects variance and\n the random effects variance.\n\n Parameters\n ----------\n\n Y : np.ndarray\n Data for which mean is to be estimated.\n Should have shape[0] == number of subjects.\n sd : array\n Standard deviation (subject specific) \n of the data for which the mean is to be estimated.\n Should have shape[0] == number of subjects.\n df : int or None, optional\n If supplied, these are used as weights when\n deriving the fixed effects variance. Should have\n length == number of subjects.\n niter : int, optional\n Number of EM iterations to perform (default 10)\n\n Returns\n -------\n value : dict\n This dictionary has keys ['fixed', 'ratio', 'random'], where\n 'fixed' is the fixed effects variance implied by the input\n parameter 'sd'; 'random' is the random effects variance and\n 'ratio' is the estimated ratio of variances: 'random'/'fixed'.\n \"\"\"\n nsubject = Y.shape[0]\n squeeze = False\n if Y.ndim == 1:\n Y = Y.reshape(Y.shape[0], 1)\n squeeze = True\n _stretch = lambda x: np.multiply.outer(np.ones(nsubject), x)\n\n W = pos_recipr(sd**2)\n if W.shape in [(), (1,)]:\n W = np.ones(Y.shape) * W\n W.shape = Y.shape\n\n S = 1. / W\n R = Y - np.multiply.outer(np.ones(Y.shape[0]), Y.mean(0))\n sigma2 = np.squeeze((R**2).sum(0)) / (nsubject - 1)\n\n Sreduction = 0.99\n minS = S.min(0) * Sreduction\n Sm = S - _stretch(minS)\n\n for _ in range(niter):\n Sms = Sm + _stretch(sigma2)\n W = pos_recipr(Sms)\n Winv = pos_recipr(W.sum(0))\n mu = Winv * (W*Y).sum(0)\n R = W * (Y - _stretch(mu))\n ptrS = 1 + (Sm * W).sum(0) - (Sm * W**2).sum(0) * Winv\n sigma2 = np.squeeze((sigma2 * ptrS + (sigma2**2) * (R**2).sum(0)) / nsubject)\n sigma2 = sigma2 - minS\n if df is None:\n df = np.ones(nsubject)\n df.shape = (1, nsubject)\n _Sshape = S.shape\n S.shape = (S.shape[0], np.product(S.shape[1:]))\n\n value = {}\n value['fixed'] = (np.dot(df, S) / df.sum()).reshape(_Sshape[1:])\n value['ratio'] = np.nan_to_num(sigma2 / value['fixed'])\n value['random'] = sigma2\n\n if squeeze:\n for key in list(value):\n value[key] = np.squeeze(value[key])\n return value\n" ]
[ [ "numpy.diag", "numpy.dot", "numpy.rollaxis", "numpy.abs", "numpy.get_printoptions", "numpy.linalg.inv", "numpy.reshape", "numpy.eye", "numpy.set_printoptions", "numpy.sort", "numpy.all", "numpy.concatenate", "numpy.ceil", "numpy.diff", "numpy.linalg.qr", "numpy.array" ], [ "numpy.greater", "numpy.linspace", "matplotlib.pyplot.title", "numpy.arange", "numpy.less", "numpy.cumsum", "numpy.dtype", "matplotlib.pyplot.plot", "numpy.random.uniform", "numpy.less_equal", "matplotlib.pyplot.xlabel", "numpy.array", "matplotlib.pyplot.show" ], [ "numpy.distutils.misc_util.Configuration" ], [ "numpy.array", "matplotlib.pyplot.show" ], [ "numpy.dot", "numpy.product", "numpy.sqrt", "numpy.power", "numpy.squeeze", "numpy.nan_to_num", "numpy.ones" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [ "1.11", "1.19", "1.24", "1.16", "1.23", "1.20", "1.7", "1.12", "1.21", "1.22", "1.14", "1.6", "1.13", "1.9", "1.17", "1.10", "1.18", "1.15", "1.8" ], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jmitz/daymetDataExtraction
[ "40b393a8ba31d1969e197cf97dd7096147bdc2de" ]
[ "daymetExtractor.py" ]
[ "# -------------------------------------------------------------------------------\n# Name: daymetFileDownload.py\n# Purpose:\n#\n# Author: jmitzelfelt\n#\n# Created: 7/2/2017\n# Copyright: (c) jmitzelfelt 2017\n# Licence: Unlicense\n# -------------------------------------------------------------------------------\n\nimport numpy as np\nfrom osgeo import gdal\nfrom osgeo import gdalconst\nfrom osgeo import osr\nimport os\nimport re\nimport fnmatch\nimport datetime\n\n\ndef fileRetrieve(inDir, inFilters):\n returnFileInfos = []\n includes = r'|'.join([fnmatch.translate('*.' + x.upper()) for x in inFilters])\n for root, dirs, files in os.walk(inDir):\n files = [(root, f) for f in files]\n files = [f for f in files if re.match(includes, f[1].upper())]\n returnFileInfos.extend(files)\n return returnFileInfos\n\n\ndef filterFileInfoList(inFileInfos, inDirFilters, inFileFilter):\n dirIncludes = r'|'.join([fnmatch.translate('*' + x.upper() + '*') for x in inDirFilters])\n fileInclude = fnmatch.translate('*' + inFileFilter.upper() + '*')\n returnFileInfos = [f for f in inFileInfos if re.match(dirIncludes, f[0].upper()) and re.match(fileInclude, f[1].upper())]\n return returnFileInfos\n\n\ndef getFileInfoYear(inFileInfo):\n return re.search(r'\\d{4}', inFileInfo[1]).group()\n\n\ndef sortFileInfoList(inFileInfo, inDataTypes):\n year = getFileInfoYear(inFileInfo)\n testType = '0'\n if len(inDataTypes) > 1:\n for DataType in inDataTypes:\n if inFileInfo[0].find(DataType) >= 0:\n testType = str(inDataTypes.index(DataType))\n break\n return year + testType\n\n\n# Adapted from http://stackoverflow.com/questions/10454316/how-to-project-and-resample-a-grid-to-match-another-grid-with-gdal-python\ndef projectFile(\n inDataLayer,\n inMaskLayer,\n inOutLayer='',\n inDriverType=\"MEM\"\n ):\n dataLayer = gdal.Open(inDataLayer, gdalconst.GA_ReadOnly)\n dataProj = osr.SpatialReference()\n dataProj.ImportFromWkt(dataLayer.GetProjection())\n maskLayer = gdal.Open(inMaskLayer, gdalconst.GA_ReadOnly)\n maskProj = osr.SpatialReference()\n maskProj.ImportFromWkt(maskLayer.GetProjection())\n maskGeoTrans = maskLayer.GetGeoTransform()\n xSize = maskLayer.RasterXSize\n ySize = maskLayer.RasterYSize\n bandCount = dataLayer.RasterCount\n destLayer = gdal.GetDriverByName(inDriverType).Create(inOutLayer, xSize, ySize, bandCount, gdalconst.GDT_Float32)\n destLayer.SetGeoTransform(maskGeoTrans)\n destLayer.SetProjection(maskProj.ExportToWkt())\n gdal.ReprojectImage(dataLayer, destLayer, dataProj.ExportToWkt(), maskProj.ExportToWkt(), gdalconst.GRA_NearestNeighbour)\n return destLayer\n\n\ndef main(\n inDataDir,\n inMaskLayer,\n inOutDataDir,\n inDataParms=['tmax', 'tmin', 'prcp'],\n inDataTypes={'daily': ['Daily'], 'monthly': ['Monthly', 'Annual']},\n inFileExts=['nc4']\n ):\n\n fileInfos = fileRetrieve(inDataDir, inFileExts)\n maskData = gdal.Open(inMaskLayer)\n maskArray = maskData.GetRasterBand(1).ReadAsArray().ravel()\n for DataType in inDataTypes:\n for DataParm in inDataParms:\n outDataFile = '{}{}_DAYMET_GYE_{}.csv'.format(inOutDataDir, DataType.upper(), DataParm.upper())\n with open(outDataFile, 'w+') as outputFile:\n print('Writing {}'.format(outDataFile))\n # Write dataFile Header\n outputFile.write('point number ->,')\n if DataType == 'daily':\n outputFile.write('month,year,')\n maskArray.tofile(outputFile, sep=',')\n outputFile.write('\\n')\n filteredFileInfos = sorted(filterFileInfoList(fileInfos, inDataTypes[DataType], DataParm), key=lambda fileInfo: sortFileInfoList(fileInfo, inDataTypes[DataType]))\n for FileInfo in filteredFileInfos:\n dataLayer = '/'.join(FileInfo)\n print(dataLayer)\n rasterData = projectFile(dataLayer, inMaskLayer)\n rasterCount = rasterData.RasterCount\n for band in range(1, rasterCount + 1):\n bandArray = rasterData.GetRasterBand(band).ReadAsArray().ravel().tolist()\n if DataParm == 'prcp':\n roundedArray = np.round(np.multiply(bandArray, 100), 1)\n else:\n roundedArray = np.round(bandArray, 2)\n\n year = getFileInfoYear(FileInfo)\n\n if DataType == 'daily':\n\n dataLine = (datetime.datetime(int(year), 1, 1) + datetime.timedelta(band - 1)).strftime('%m/%d/%Y,%m,%Y,')\n\n else:\n\n month = band if (FileInfo[0].find('Monthly') > -1) else 14\n dataLine = '{}_{}{},'.format(DataParm, year, str(month).zfill(2))\n\n print(dataLine)\n outputFile.write(dataLine)\n roundedArray.tofile(outputFile, sep=',')\n outputFile.write('\\n')\n outputFile.close()\n del outputFile\n\n\nmain('F:/daymet/', 'F:/daymetDataExtraction/daymetmask', 'F:/daymetData/')\n" ]
[ [ "numpy.round", "numpy.multiply" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
adgaudio/ietk-ret
[ "a93328f0a787fadf20817c75b6c5e0e33d39a720", "a93328f0a787fadf20817c75b6c5e0e33d39a720" ]
[ "ietk/methods/sharpen_img.py", "ietk/methods/dehaze.py" ]
[ "import cv2.ximgproc\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport scipy.ndimage as ndi\nimport logging\n\nfrom ietk.data import IDRiD\nfrom ietk import util\n\nlog = logging.getLogger(__name__)\n\n\ndef check_and_fix_nan(A, replacement_img):\n nanmask = np.isnan(A)\n if nanmask.any():\n log.warn(\"sharpen: guided filter blurring operation or laplace filter returned nans. your input image has extreme values\")\n A[nanmask] = replacement_img[nanmask]\n return A\n\n\ndef sharpen(img, bg=None, t='laplace', blur_radius=30, blur_guided_eps=1e-8,\n use_guidedfilter='if_large_img'):\n \"\"\"Use distortion model to deblur image. Equivalent to usharp mask:\n\n 1/t * img - (1-1/t) * blurry(img)\n\n Then, apply guided filter to smooth result but preserve edges.\n\n img - image to sharpen, assume normalized in [0,1]\n bg - image background\n t - the transmission map (inverse amount of sharpening)\n can be scalar, matrix of same (h, w) as img, or 3 channel image.\n By default, use a multi-channel sharpened laplace filter on a smoothed\n image with 10x10 kernel. For enhancing fine details in large images.\n use_guidedfilter - a bool or the string 'if_large_img' determining whether\n to clean up the resulting sharpened image. If the min image dimension is\n less that 1500, this cleanup operation may blur the\n image, ruining its quality.\n \"\"\"\n if bg is None:\n bg = np.zeros(img.shape[:2], dtype='bool')\n else:\n img = img.copy()\n img[bg] = 0\n # assert np.isnan(img).sum() == 0\n # assert np.isnan(t).sum() == 0\n\n # blurring (faster than ndi.gaussian_filter(I)\n A = cv2.ximgproc.guidedFilter(\n # radiance.astype('float32'),\n img.astype('float32'),\n img.astype('float32'),\n blur_radius, blur_guided_eps)\n\n if t == 'laplace':\n t = 1-util.norm01(sharpen(ndi.morphological_laplace(\n img, (2,2,1), mode='wrap'), bg, 0.15), bg)\n # t = 1-util.norm01(ndi.morphological_laplace(\n # img, (2,2,1), mode='wrap'), bg)\n\n # todo note: laplace t is 01 normalized. should we keep the max\n # and just normalize the lower range (or vice versa or something)?\n\n # note2: if laplace is all zeros (due to bad input img), t will be all nan.\n\n\n if len(np.shape(t)) + 1 == len(img.shape):\n t_refined = np.expand_dims(t, -1).astype('float')\n else:\n t_refined = t\n if np.shape(t):\n t_refined[bg] = 1 # ignore background, but fix division by zero\n J = (\n img.astype('float')-A) / np.maximum(1e-8, np.maximum(t_refined, np.min(t_refined)/2)) + A\n # assert np.isnan(J).sum() == 0\n if bg is not None:\n J[bg] = 0\n\n # applying a guided filter for smoothing image at this point can be\n # problematic to the image quality, significantly blurring it.\n if use_guidedfilter == 'if_large_img':\n # note: at some point, find a better threshold? This works.\n use_guidedfilter = min(J.shape[0], J.shape[1]) >= 1500\n if not use_guidedfilter:\n J = check_and_fix_nan(J, img)\n return J\n\n r2 = cv2.ximgproc.guidedFilter(\n img.astype('float32'),\n J.astype('float32'),\n 2, 1e-8)\n r2 = check_and_fix_nan(r2, img)\n if bg is not None:\n r2[bg] = 0\n return r2\n\n\nif __name__ == \"__main__\":\n import os\n os.makedirs('./data/plots', exist_ok=True)\n dset = IDRiD('./data/IDRiD_segmentation')\n img, labels = dset['IDRiD_26']\n # img_id, img, labels = dset.sample()\n # print(img_id)\n # he = labels['HE']\n # ma = labels['MA']\n # ex = labels['EX']\n # se = labels['SE']\n # od = labels['OD']\n # set background pure black.\n bg = util.get_background(img)\n img[bg] = 0\n\n J = sharpen(img, bg, .15)\n J_nogf = sharpen(img, bg, .15, use_guidedfilter=False)\n J_laplace = sharpen(img, bg)\n\n # f, axs = plt.subplots(1, 2, figsize=(15, 5))\n f, axs = plt.subplots(1, 3, figsize=(15, 5))\n axs[0].imshow(img)\n axs[0].set_title('Unmodified image', fontsize=22)\n axs[1].imshow(J)\n axs[1].set_title('Sharpened, Algo. 1', fontsize=22)\n # axs[2].imshow(J_nogf)\n # axs[2].set_title('Sharpened without guided filter')\n axs[2].imshow(J_laplace)\n axs[2].set_title('Sharpened, Algo. 2', fontsize=22)\n # axs[2].imshow(J_nogf)\n # axs[2].set_title('Sharpened without guided filter')\n [ax.axis('off') for ax in axs]\n f.subplots_adjust(wspace=0.02, hspace=0.02, top=0.9, bottom=0.1)\n f.savefig('./data/plots/sharpen_fundus.png', bbox_inches='tight')\n\n # plt.figure(num=4) ; plt.imshow(util.norm01(r2, bg))\n # plt.figure(num=5) ; plt.imshow(r2.clip(0, 1))\n\n plt.show()\n # import evaluation as EV\n # import metric\n\n # EV.empirical_evaluation(\n # {'img': img,\n # 'radiance': J,\n # 'r2': r2,\n # 'norm01': util.norm01(J, bg),\n # 'clip': J.clip(0, 1),\n # 'r2norm01': util.norm01(r2, bg),\n # 'r2clip': r2.clip(0, 1),\n # },\n # labels, metric.eval_methods, bg, num_procs=8)\n\n\n# y = util.norm01(sharpen(ndi.gaussian_laplace((A/2+X/2), (10,10,1)).mean(-1, keepdims=0), bg[:,:,0]), bg[:,:,0])\n# sh(sharpen(Z, bg[:,:,0], (1-y[:,:,np.newaxis])))\n", "#!/usr/bin/env python\nimport scipy.ndimage as ndi\nimport scipy.stats as stats\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport PIL\nimport glob\nimport cv2\n\nfrom ietk.util import get_background\n\n\ndef get_dark_channel(\n img: np.ndarray, filter_size: int):\n \"\"\"Compute the dark channel of given image.\n This is the pixel-wise min along rgb channels for\n a neighborhood of fixed size around the pixel. I use a circular footprint\n rather than a rectangle.\n\n reference: http://kaiminghe.com/publications/pami10dehaze.pdf\n\n img: np.ndarray of size (h, x, 3)\n filter_size: integer\n \"\"\"\n _tmp = stats.norm.pdf(np.linspace(0, 1, filter_size), .5, .25/2)\n dark_channel = ndi.minimum_filter(\n img.min(-1), footprint=np.log(np.outer(_tmp, _tmp)) > -6)\n return dark_channel\n\n\ndef get_atmosphere(img: np.ndarray, dark: np.ndarray):\n \"\"\"Given an image of shape (h, w, 3) and a dark channel of shape (h, w),\n compute and return the atmosphere, a vector of shape (3, )\n\n Consider the 10% brightest pixels in dark channel, look up their\n intensities in original image and use the brightest intensity found from\n that set.\n \"\"\"\n # top 10\\% of brightest pixels in dark channel\n q = np.quantile(dark.ravel(), 0.999) - 1e-6\n mask = dark >= q\n rv = np.array([img[:, :, ch][mask].max() for ch in range(3)])\n assert img.shape[2] == 3 # sanity check\n rv += 1 - rv.max() # seems to make img brighter\n return rv\n\n\ndef dehaze(img, dark_channel_filter_size=15, guided_filter_radius=50,\n guided_eps=1e-2):\n img = img / img.max()\n darkch_unnorm = get_dark_channel(img, dark_channel_filter_size)\n A = get_atmosphere(img, darkch_unnorm).reshape(1, 1, 3)\n # atmosphere_upper_thresh = 220\n # A = np.maximum(A, atmosphere_upper_thresh)\n\n t_unrefined = 1 - get_dark_channel(img / A, dark_channel_filter_size)\n # t_unrefined = np.maximum(t_unrefined, 0.4)\n\n # refine dark channel using guided filter (ie like a bilateral filter or\n # anisotropic diffusion but faster)\n t_refined = cv2.ximgproc.guidedFilter(\n img.astype('float32'),\n t_unrefined.astype('float32'), guided_filter_radius, guided_eps)\n t_refined = t_refined.clip(0.0001, 1) # guided filter can make slightly >1\n\n radiance = ( # Eq. 22 of paper\n img.astype('float')-A) \\\n / np.expand_dims(t_refined, -1).astype('float') \\\n + A\n # radiance = norm01(radiance)\n # radiance = radiance / radiance.max()\n radiance = radiance.clip(0, 1)\n return locals()\n\n\ndef illumination_correction(img, dark_channel_filter_size=25,\n guided_filter_radius=80, guided_eps=1e-2, A=1):\n \"\"\"Illumination correction is basically just inverted dehazing\"\"\"\n img = img / img.max()\n\n # notice there is no \"1 - get_dark...\" in the equation here.\n t_unrefined = get_dark_channel((1-img) / A, dark_channel_filter_size)\n # invert image after guided filtering\n t_refined = 1-cv2.ximgproc.guidedFilter(\n 1-img.astype('float32'),\n t_unrefined.astype('float32'), guided_filter_radius, guided_eps)\n t_refined = t_refined.clip(0.00001, 1) # guided filter can make slightly >1\n # invert the inverted image when recovering radiance\n radiance = 1 - (((1-img.astype('float')) - A)/np.expand_dims(t_refined, -1) + A)\n # radiance = norm01(radiance)\n # radiance = radiance / radiance.max()\n # radiance = radiance.clip(0, 1)\n return locals()\n\n\ndef dehaze_from_fp(fp):\n with PIL.Image.open(fp) as img:\n img.load()\n img = np.array(img)/255\n # remove background, assuming retinal fundus image\n background = get_background(img)\n img[background] = 1\n return dehaze(img)\n\n\ndef illuminate_from_fp(fp):\n with PIL.Image.open(fp) as img:\n img.load()\n img = np.array(img)/255\n return illuminate_dehaze(img)\n\n\ndef illuminate_dehaze(img):\n \"\"\"\n Perform illumination correction to remove shadows followed by dehazing.\n Correctly remove background\n Return a tuple of dicts. The first dict is output of illumination\n correction. Second dict is output from dehazing.\n \"\"\"\n # compute a background mask to clean up noise from the guided filter\n background = get_background(img)\n img[background] = 1\n\n d = illumination_correction(img)\n # reset the background\n d['radiance'][background] = 1/255\n\n d2 = dehaze(d['radiance'])\n\n d['background'] = background\n return d, d2\n\n\nif __name__ == \"__main__\":\n # fp = '../../data/tiananmen1.png'\n # img = plt.imread(fp)\n # dct = dehaze(img)\n # img2 = dct['radiance']\n # print(img2.min(), img2.max())\n # img2 = (img2 - img2.min()) / (img2.max() - img2.min())\n # plt.imshow(np.clip(img2, 0, 1))\n # plt.imshow(np.clip(img2, 0, 1))\n # import sys ; sys.exit()\n\n fps_healthy = glob.glob('./data/messidor_grade1/*/*')\n fps_grade3 = glob.glob('./data/messidor_grade3/*/*')\n\n # for fp in fps_healthy[:10]:\n # illuminate_from_fp(fp)\n # for fp in fps_grade3[:10]:\n # illuminate_from_fp(fp)\n\n # testing: check that the methods work\n fp = fps_grade3[0]\n # # fp = '../../data/tiananmen1.png'\n # # fp = '../../data/forest.jpg'\n with PIL.Image.open(fp) as img:\n img.load()\n img = np.array(img)/255\n d, d2 = illuminate_from_fp(fp)\n f, axs = plt.subplots(1, 3)\n axs[0].imshow(img)\n axs[0].set_title(\"Original Image\")\n axs[1].imshow(d['t_refined'], cmap='Greys')\n axs[1].set_title(\"Illumination Depth Map\")\n axs[2].imshow(d2['t_refined'], cmap='Greys')\n axs[2].set_title(\"Dehaze Depth Map\")\n f.suptitle('Depth maps for given input image')\n\n illuminated = d['radiance']\n dehazed = d2['radiance']\n f, axs = plt.subplots(1, 3)\n axs[0].imshow(img)\n axs[0].set_title('Original Image')\n axs[1].imshow(illuminated)\n axs[1].set_title(\"Illuminated\")\n axs[2].imshow(dehazed)\n axs[2].set_title(\"Dehazed\")\n f.suptitle('Illumination Correction Pipeline')\n\n\n # dehaze\n d = dehaze_from_fp(fp)\n f, axs = plt.subplots(1, 3)\n axs[0].imshow(img)\n axs[0].set_title(\"Original Image\")\n axs[1].imshow(d['t_refined'], cmap='Greys')\n axs[1].set_title(\"Dehaze Depth Map\")\n axs[2].imshow(d['radiance'])\n axs[2].set_title(\"Dehazed\")\n f.suptitle('Dehazing the image')\n # import sys ; sys.exit()\n plt.show()\n" ]
[ [ "numpy.expand_dims", "numpy.min", "numpy.isnan", "matplotlib.pyplot.subplots", "numpy.shape", "scipy.ndimage.morphological_laplace", "matplotlib.pyplot.show", "numpy.zeros" ], [ "numpy.expand_dims", "numpy.linspace", "matplotlib.pyplot.subplots", "numpy.outer", "numpy.array", "matplotlib.pyplot.show" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
dk25021999/mmf
[ "218057265a3fc175f656b5ebe8fb44ef5ccca2e9", "218057265a3fc175f656b5ebe8fb44ef5ccca2e9", "c8f47a23b85a87d14616c2f53e81693a25ea929a" ]
[ "mmf/utils/env.py", "tests/datasets/test_iteration_strategies.py", "tools/scripts/features/lmdb_conversion.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates.\n\nimport glob\nimport importlib\nimport logging\nimport os\nimport random\nimport sys\nfrom datetime import datetime\n\nimport numpy as np\nimport torch\nfrom omegaconf import OmegaConf, open_dict\n\n\ndef set_seed(seed):\n if seed:\n if seed == -1:\n # From detectron2\n seed = (\n os.getpid()\n + int(datetime.now().strftime(\"%S%f\"))\n + int.from_bytes(os.urandom(2), \"big\")\n )\n np.random.seed(seed)\n torch.manual_seed(seed)\n random.seed(seed)\n\n return seed\n\n\ndef import_user_module(user_dir: str):\n \"\"\"Given a user dir, this function imports it as a module.\n\n This user_module is expected to have an __init__.py at its root.\n You can use import_files to import your python files easily in\n __init__.py\n\n Args:\n user_dir (str): directory which has to be imported\n \"\"\"\n from mmf.common.registry import registry\n from mmf.utils.general import get_absolute_path # noqa\n\n logger = logging.getLogger(__name__)\n if user_dir:\n if registry.get(\"__mmf_user_dir_imported__\", no_warning=True):\n logger.info(f\"User dir {user_dir} already imported. Skipping.\")\n return\n\n # Allow loading of files as user source\n if user_dir.endswith(\".py\"):\n user_dir = user_dir[:-3]\n\n dot_path = \".\".join(user_dir.split(os.path.sep))\n # In case of abspath which start from \"/\" the first char\n # will be \".\" which turns it into relative module which\n # find_spec doesn't like\n if os.path.isabs(user_dir):\n dot_path = dot_path[1:]\n\n try:\n dot_spec = importlib.util.find_spec(dot_path)\n except ModuleNotFoundError:\n dot_spec = None\n abs_user_dir = get_absolute_path(user_dir)\n module_parent, module_name = os.path.split(abs_user_dir)\n\n # If dot path is found in sys.modules, or path can be directly\n # be imported, we don't need to play jugglery with actual path\n if dot_path in sys.modules or dot_spec is not None:\n module_name = dot_path\n else:\n user_dir = abs_user_dir\n\n logger.info(f\"Importing from {user_dir}\")\n if module_name != dot_path:\n # Since dot path hasn't been found or can't be imported,\n # we can try importing the module by changing sys path\n # to the parent\n sys.path.insert(0, module_parent)\n\n importlib.import_module(module_name)\n sys.modules[\"mmf_user_dir\"] = sys.modules[module_name]\n\n # Register config for user's model and dataset config\n # relative path resolution\n config = registry.get(\"config\")\n if config is None:\n registry.register(\n \"config\", OmegaConf.create({\"env\": {\"user_dir\": user_dir}})\n )\n else:\n with open_dict(config):\n config.env.user_dir = user_dir\n\n registry.register(\"__mmf_user_dir_imported__\", True)\n\n\ndef import_files(file_path: str, module_name: str = None):\n \"\"\"The function imports all of the files present in file_path's directory.\n This is useful for end user in case they want to easily import files without\n mentioning each of them in their __init__.py. module_name if specified\n is the full path to module under which all modules will be imported.\n\n my_project/\n my_models/\n my_model.py\n __init__.py\n\n Contents of __init__.py\n\n ```\n from mmf.utils.env import import_files\n\n import_files(__file__, \"my_project.my_models\")\n ```\n\n This will then allow you to import `my_project.my_models.my_model` anywhere.\n\n Args:\n file_path (str): Path to file in whose directory everything will be imported\n module_name (str): Module name if this file under some specified structure\n \"\"\"\n for file in os.listdir(os.path.dirname(file_path)):\n if file.endswith(\".py\") and not file.startswith(\"_\"):\n import_name = file[: file.find(\".py\")]\n if module_name:\n importlib.import_module(f\"{module_name}.{import_name}\")\n else:\n importlib.import_module(f\"{import_name}\")\n\n\ndef setup_imports():\n from mmf.common.registry import registry\n\n # First, check if imports are already setup\n has_already_setup = registry.get(\"imports_setup\", no_warning=True)\n if has_already_setup:\n return\n # Automatically load all of the modules, so that\n # they register with registry\n root_folder = registry.get(\"mmf_root\", no_warning=True)\n\n if root_folder is None:\n root_folder = os.path.dirname(os.path.abspath(__file__))\n root_folder = os.path.join(root_folder, \"..\")\n\n environment_mmf_path = os.environ.get(\"MMF_PATH\", os.environ.get(\"PYTHIA_PATH\"))\n\n if environment_mmf_path is not None:\n root_folder = environment_mmf_path\n\n registry.register(\"pythia_path\", root_folder)\n registry.register(\"mmf_path\", root_folder)\n\n trainer_folder = os.path.join(root_folder, \"trainers\")\n trainer_pattern = os.path.join(trainer_folder, \"**\", \"*.py\")\n datasets_folder = os.path.join(root_folder, \"datasets\")\n datasets_pattern = os.path.join(datasets_folder, \"**\", \"*.py\")\n model_folder = os.path.join(root_folder, \"models\")\n common_folder = os.path.join(root_folder, \"common\")\n modules_folder = os.path.join(root_folder, \"modules\")\n model_pattern = os.path.join(model_folder, \"**\", \"*.py\")\n common_pattern = os.path.join(common_folder, \"**\", \"*.py\")\n modules_pattern = os.path.join(modules_folder, \"**\", \"*.py\")\n\n importlib.import_module(\"mmf.common.meter\")\n\n files = (\n glob.glob(datasets_pattern, recursive=True)\n + glob.glob(model_pattern, recursive=True)\n + glob.glob(trainer_pattern, recursive=True)\n + glob.glob(common_pattern, recursive=True)\n + glob.glob(modules_pattern, recursive=True)\n )\n\n for f in files:\n f = os.path.realpath(f)\n if f.endswith(\".py\") and not f.endswith(\"__init__.py\"):\n splits = f.split(os.sep)\n import_prefix_index = 0\n for idx, split in enumerate(splits):\n if split == \"mmf\":\n import_prefix_index = idx + 1\n file_name = splits[-1]\n module_name = file_name[: file_name.find(\".py\")]\n module = \".\".join([\"mmf\"] + splits[import_prefix_index:-1] + [module_name])\n importlib.import_module(module)\n\n registry.register(\"imports_setup\", True)\n\n\ndef setup_torchaudio():\n # required for soundfile\n try:\n import libfb.py.ctypesmonkeypatch\n\n libfb.py.ctypesmonkeypatch.install()\n except ImportError:\n pass\n\n\ndef teardown_imports():\n from mmf.common.registry import registry\n\n registry.unregister(\"pythia_path\")\n registry.unregister(\"mmf_path\")\n registry.unregister(\"imports_setup\")\n", "# Copyright (c) Facebook, Inc. and its affiliates.\n\nimport unittest\nfrom collections import Counter\n\nimport numpy as np\nimport torch\nfrom mmf.datasets import iteration_strategies\nfrom tests.test_utils import NumbersDataset\n\n\nclass TestIterationStrategies(unittest.TestCase):\n NUM_DATALOADERS = 5\n\n def setUp(self):\n np.random.seed(1234)\n\n def _build_dataloaders(self):\n dataloaders = {}\n for idx in range(self.NUM_DATALOADERS):\n dataloaders[f\"numbers_{idx}\"] = torch.utils.data.DataLoader(\n dataset=NumbersDataset((idx + 1) * (10 ** idx)), num_workers=0\n )\n return dataloaders\n\n def test_constant_iteration_strategy(self):\n dataloaders = self._build_dataloaders()\n strategy = iteration_strategies.ConstantIterationStrategy.from_params(\n dataloaders=dataloaders\n )\n\n counter = Counter()\n count = 100\n for _ in range(count):\n counter[strategy()] += 1\n\n self.assertEqual(counter[0], count)\n for idx in range(1, self.NUM_DATALOADERS):\n self.assertEqual(counter[idx], 0)\n\n strategy = iteration_strategies.ConstantIterationStrategy.from_params(\n dataloaders=dataloaders, idx=1\n )\n\n counter = Counter()\n count = 100\n for _ in range(count):\n counter[strategy()] += 1\n\n self.assertEqual(counter[1], count)\n for idx in range(0, self.NUM_DATALOADERS):\n if idx != 1:\n self.assertEqual(counter[idx], 0)\n\n def test_round_robin_strategy(self):\n dataloaders = self._build_dataloaders()\n strategy = iteration_strategies.RoundRobinIterationStrategy.from_params(\n dataloaders=dataloaders\n )\n\n counter = Counter()\n count = 100\n for _ in range(count):\n counter[strategy()] += 1\n\n for idx in range(0, self.NUM_DATALOADERS):\n self.assertEqual(counter[idx], count // self.NUM_DATALOADERS)\n\n strategy = iteration_strategies.RoundRobinIterationStrategy.from_params(\n dataloaders=dataloaders, start_idx=2\n )\n counter = Counter()\n count = 100\n for _ in range(count):\n counter[strategy()] += 1\n\n for idx in range(0, self.NUM_DATALOADERS):\n self.assertEqual(counter[idx], count // self.NUM_DATALOADERS)\n\n def test_random_strategy(self):\n dataloaders = self._build_dataloaders()\n strategy = iteration_strategies.RandomIterationStrategy.from_params(\n dataloaders=dataloaders\n )\n\n counter = Counter()\n count = 10000\n for _ in range(count):\n counter[strategy()] += 1\n\n for idx in range(0, self.NUM_DATALOADERS):\n self.assertTrue(counter[idx] <= 2100)\n self.assertTrue(counter[idx] >= 1900)\n\n def test_size_proportional_strategy(self):\n dataloaders = self._build_dataloaders()\n strategy = iteration_strategies.SizeProportionalIterationStrategy.from_params(\n dataloaders=dataloaders\n )\n\n counter = Counter()\n count = 10000\n for _ in range(count):\n counter[strategy()] += 1\n\n for idx in range(0, self.NUM_DATALOADERS):\n self.assertTrue(counter[idx] <= 10 ** idx)\n lower_limit = 10 ** (idx - 1)\n if idx == 0:\n lower_limit = 0\n self.assertTrue(counter[idx] >= lower_limit)\n\n def test_ratios_strategy(self):\n dataloaders = self._build_dataloaders()\n sampling_ratios = {}\n\n # Constant\n for idx in range(self.NUM_DATALOADERS):\n sampling_ratios[f\"numbers_{idx}\"] = 0\n sampling_ratios[\"numbers_0\"] = 1\n strategy = iteration_strategies.RatiosIterationStrategy.from_params(\n dataloaders, sampling_ratios=sampling_ratios\n )\n\n counter = Counter()\n count = 10000\n for _ in range(count):\n counter[strategy()] += 1\n\n self.assertEqual(counter[0], count)\n for idx in range(1, self.NUM_DATALOADERS):\n self.assertEqual(counter[idx], 0)\n\n for idx in range(self.NUM_DATALOADERS):\n sampling_ratios[f\"numbers_{idx}\"] = 1.0 / self.NUM_DATALOADERS\n\n strategy = iteration_strategies.RatiosIterationStrategy.from_params(\n dataloaders, sampling_ratios=sampling_ratios\n )\n\n count = 10000\n counter = Counter()\n for _ in range(count):\n counter[strategy()] += 1\n\n for idx in range(0, self.NUM_DATALOADERS):\n self.assertTrue(counter[idx] <= 2100)\n self.assertTrue(counter[idx] >= 1900)\n\n lens = sum(len(loader.dataset) for loader in dataloaders.values())\n for idx in range(self.NUM_DATALOADERS):\n sampling_ratios[f\"numbers_{idx}\"] = (\n len(dataloaders[f\"numbers_{idx}\"].dataset) / lens\n )\n\n strategy = iteration_strategies.RatiosIterationStrategy.from_params(\n dataloaders, sampling_ratios=sampling_ratios\n )\n\n count = 10000\n counter = Counter()\n for _ in range(count):\n counter[strategy()] += 1\n\n for idx in range(0, self.NUM_DATALOADERS):\n self.assertTrue(counter[idx] <= 10 ** idx)\n lower_limit = 10 ** (idx - 1)\n if idx == 0:\n lower_limit = 0\n self.assertTrue(counter[idx] >= lower_limit)\n", "# Copyright (c) Facebook, Inc. and its affiliates.\n\nimport argparse\nimport glob\nimport os\nimport pickle\n\nimport lmdb\nimport numpy as np\nimport tqdm\nfrom mmf.utils.file_io import PathManager\n\n\nclass LMDBConversion:\n def __init__(self):\n self.args = self.get_parser().parse_args()\n\n def get_parser(self):\n parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)\n\n parser.add_argument(\n \"--mode\",\n required=True,\n type=str,\n help=\"Mode can either be `convert` (for conversion of \\n\"\n + \"features to an LMDB file) or `extract` (extract \\n\"\n + \"raw features from a LMDB file)\",\n )\n parser.add_argument(\n \"--lmdb_path\", required=True, type=str, help=\"LMDB file path\"\n )\n parser.add_argument(\n \"--features_folder\", required=True, type=str, help=\"Features folder\"\n )\n return parser\n\n def convert(self):\n env = lmdb.open(self.args.lmdb_path, map_size=1099511627776)\n id_list = []\n all_features = glob.glob(\n os.path.join(self.args.features_folder, \"**\", \"*.npy\"), recursive=True\n )\n\n features = []\n for feature in all_features:\n if not feature.endswith(\"_info.npy\"):\n features.append(feature)\n\n with env.begin(write=True) as txn:\n for infile in tqdm.tqdm(features):\n reader = np.load(infile, allow_pickle=True)\n item = {}\n split = os.path.relpath(infile, self.args.features_folder).split(\n \".npy\"\n )[0]\n item[\"feature_path\"] = split\n key = split.encode()\n id_list.append(key)\n item[\"features\"] = reader\n info_file = infile.split(\".npy\")[0] + \"_info.npy\"\n if not os.path.isfile(info_file):\n txn.put(key, pickle.dumps(item))\n continue\n\n reader = np.load(info_file, allow_pickle=True)\n item[\"image_height\"] = reader.item().get(\"image_height\")\n item[\"image_width\"] = reader.item().get(\"image_width\")\n item[\"num_boxes\"] = reader.item().get(\"num_boxes\")\n item[\"objects\"] = reader.item().get(\"objects\")\n item[\"cls_prob\"] = reader.item().get(\"cls_prob\", None)\n item[\"bbox\"] = reader.item().get(\"bbox\")\n\n txn.put(key, pickle.dumps(item))\n\n txn.put(b\"keys\", pickle.dumps(id_list))\n\n def extract(self):\n os.makedirs(self.args.features_folder, exist_ok=True)\n env = lmdb.open(\n self.args.lmdb_path,\n max_readers=1,\n readonly=True,\n lock=False,\n readahead=False,\n meminit=False,\n )\n with env.begin(write=False) as txn:\n _image_ids = pickle.loads(txn.get(b\"keys\"))\n for img_id in tqdm.tqdm(_image_ids):\n item = pickle.loads(txn.get(img_id))\n img_id = img_id.decode(\"utf-8\")\n tmp_dict = {}\n tmp_dict[\"image_id\"] = img_id\n tmp_dict[\"bbox\"] = item[\"bbox\"]\n tmp_dict[\"num_boxes\"] = item[\"num_boxes\"]\n tmp_dict[\"image_height\"] = item[\"image_height\"]\n tmp_dict[\"image_width\"] = item[\"image_width\"]\n tmp_dict[\"objects\"] = item[\"objects\"]\n tmp_dict[\"cls_prob\"] = item[\"cls_prob\"]\n\n info_file_base_name = str(img_id) + \"_info.npy\"\n file_base_name = str(img_id) + \".npy\"\n\n path = os.path.join(self.args.features_folder, file_base_name)\n if PathManager.exists(path):\n continue\n info_path = os.path.join(self.args.features_folder, info_file_base_name)\n base_path = \"/\".join(path.split(\"/\")[:-1])\n PathManager.mkdirs(base_path)\n np.save(PathManager.open(path, \"wb\"), item[\"features\"])\n np.save(PathManager.open(info_path, \"wb\"), tmp_dict)\n\n def execute(self):\n if self.args.mode == \"convert\":\n self.convert()\n elif self.args.mode == \"extract\":\n self.extract()\n else:\n raise ValueError(\"mode must be either `convert` or `extract` \")\n\n\nif __name__ == \"__main__\":\n lmdb_converter = LMDBConversion()\n lmdb_converter.execute()\n" ]
[ [ "torch.manual_seed", "numpy.random.seed" ], [ "numpy.random.seed" ], [ "numpy.load" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
luogan1234/prerequisite-prediction-co-training
[ "28e3f241ada5afe75a73525375087be230735c2a", "28e3f241ada5afe75a73525375087be230735c2a" ]
[ "config.py", "model/text_model.py" ]
[ "import numpy as np\nimport torch\nfrom sklearn.decomposition import PCA\n\nclass Config:\n def __init__(self, dataset, text_encoder, graph_layer, init_num, max_change_num, seed, cpu):\n self.dataset = dataset\n self.text_encoder = text_encoder\n self.graph_layer = graph_layer\n self.init_num = init_num\n self.max_change_num = max_change_num if max_change_num >= 0 else 10000000\n self.seed = seed\n self.device = 'cpu' if cpu else 'cuda'\n if dataset in ['moocen']:\n self.language = 'en'\n self.vocab_num = 30522 # bert-base-uncased\n if dataset in ['mooczh', 'cs', 'psy', 'math', 'phy', 'chem']:\n self.language = 'zh'\n self.vocab_num = 21128 # bert-base-chinese\n assert self.language, 'Need to provide the language information for new datasets'\n \n self.max_term_length = 20\n self.word_embedding_dim = 32\n self.attention_dim = 32\n self.text_embedding_dim = 768 if text_encoder in ['bert', 'bert-freeze'] else 128\n self.graph_embedding_dim = 128\n self.encoding_dim = 64\n self.max_cotraining_iterations = 6\n self.max_epochs = 64\n self.early_stop_time = 8\n self.num_classes = 2\n self.threshold = 0.8\n \n def ensemble_num(self, mode):\n if mode == 'text' and self.text_encoder == 'bert':\n num = 2\n else:\n num = 4\n return num\n \n def lr(self, mode):\n if mode == 'text' and self.text_encoder == 'bert':\n lr = 4e-5\n else:\n lr = 1e-3\n return lr\n \n def batch_size(self, mode):\n if mode == 'train':\n batch_size = 16\n else:\n batch_size = 64\n if self.text_encoder in ['lstm', 'bert-freeze']:\n batch_size *= 4\n return batch_size\n \n def set_concepts_parameters(self, concepts):\n self.concept_num = len(concepts)\n \n def set_gcn_parameters(self, graph):\n self.laplacian1 = self.to_laplacian_matrix(graph.T)\n self.laplacian2 = self.to_laplacian_matrix(graph)\n \n def set_embeddings(self, embeds):\n pca = PCA()\n X = embeds.detach().cpu().numpy()\n X = pca.fit_transform(X)\n embeds = torch.from_numpy(X)\n embeds = embeds[:, :self.graph_embedding_dim]\n size = embeds.size()\n padding = torch.zeros(size[0], self.graph_embedding_dim-size[1])\n self.embeddings = torch.cat([embeds, padding], 1).to(self.device)\n \n def to_laplacian_matrix(self, graph):\n a = np.eye(graph.shape[0]) + graph\n d = np.power(np.sum(np.abs(a), 1), -1)\n d[np.isinf(d)] = 0\n d = np.diag(d)\n laplacian = np.array(np.matmul(d, a), dtype=np.float32)\n laplacian = torch.from_numpy(laplacian).to(self.device)\n return laplacian\n \n def store_name(self):\n return '{}_{}_{}_{}_{}_{}'.format(self.dataset, self.text_encoder, self.graph_layer, self.init_num, self.max_change_num, self.seed)\n \n def parameter_info(self):\n obj = {'dataset': self.dataset, 'text_encoder': self.text_encoder, 'graph_layer': self.graph_layer, 'init_num': self.init_num, 'max_change_num': self.max_change_num, 'seed': self.seed}\n return obj", "import torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nimport numpy as np\r\nfrom model.base_module import BaseModule\r\nfrom model.lstm_encoder import LSTMEncoder\r\nfrom model.bert_encoder import BERTEncoder\r\nfrom transformers import BertModel\r\nfrom model.mlp_layer import MLPLayer\r\n\r\nclass TextModel(BaseModule):\r\n def __init__(self, config):\r\n super().__init__(config)\r\n if config.text_encoder == 'lstm':\r\n self.encoder = LSTMEncoder(config)\r\n if config.text_encoder in ['bert', 'bert-freeze']:\r\n self.encoder = BERTEncoder(config)\r\n self.fc = MLPLayer(self.config.text_embedding_dim*2, self.config.num_classes)\r\n self.cached_embeds = torch.zeros(config.concept_num, config.text_embedding_dim).to(config.device)\r\n \r\n def get_embeddings(self, batch):\r\n x = batch['t']\r\n outs = self.encoder(x)\r\n return outs\r\n \r\n def forward(self, batch):\r\n t1, t2 = batch['t1'], batch['t2']\r\n e1, e2 = self.encoder(t1), self.encoder(t2)\r\n o = torch.cat([e1, e2], 1)\r\n outs = self.fc(o)\r\n return outs\r\n \r\n def set_cached_embeds(self, batch):\r\n i, t = batch['i'], batch['t']\r\n e = self.encoder(t)\r\n pos = [i]\r\n self.cached_embeds.index_put_(pos, e.detach())\r\n \r\n def predict(self, batch):\r\n i1, i2 = batch['i1'], batch['i2']\r\n e1 = self.cached_embeds.index_select(0, i1)\r\n e2 = self.cached_embeds.index_select(0, i2)\r\n o = torch.cat([e1, e2], -1)\r\n outs = self.fc(o)\r\n return outs" ]
[ [ "numpy.diag", "numpy.abs", "numpy.isinf", "torch.zeros", "torch.cat", "numpy.eye", "numpy.matmul", "torch.from_numpy", "sklearn.decomposition.PCA" ], [ "torch.zeros", "torch.cat" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
tw-yshuang/NTU_2020S_ML_HW3
[ "59d155bc3dbcb0bcd85fc10278ad00501770f738" ]
[ "src/food_img_transform.py" ]
[ "import cv2\nimport skimage\nimport random\nimport numpy as np\nfrom torchvision import transforms\ntry:\n from src.Model.find_file_name import get_filenames\n from src.Model.Img_DIP import get_filter_img\nexcept ModuleNotFoundError:\n from Model.find_file_name import get_filenames\n from Model.Img_DIP import get_filter_img\n\ntrain_transforms_arg = transforms.Compose([\n transforms.ColorJitter(brightness=(0.75, 1.25), contrast=(\n 0.75, 1.25), saturation=(0.75, 1.25), hue=(0, 0.05)),\n\n transforms.RandomChoice([\n transforms.ColorJitter(brightness=(0.25, 1.75)),\n transforms.ColorJitter(contrast=(0.25, 1.75)),\n transforms.ColorJitter(saturation=(0.25, 1.75)),\n transforms.ColorJitter(hue=(0, 0.1))\n ]),\n])\n\ntrain_transforms = transforms.Compose([\n transforms.ToPILImage(),\n transforms.RandomHorizontalFlip(),\n transforms.RandomVerticalFlip(),\n transforms.Pad(padding=(64, 64), padding_mode='symmetric'),\n transforms.RandomRotation(45, expand=True),\n transforms.Lambda(lambda x: cv2_transforms(x)),\n transforms.ToPILImage(),\n transforms.Resize((128, 128)),\n transforms.RandomApply([train_transforms_arg], p=0.85),\n transforms.ToTensor(), # data normalization\n transforms.RandomErasing(value='random'),\n])\n# testing dosen't use agumentation\ntest_transforms = transforms.Compose([\n transforms.ToPILImage(),\n transforms.Resize((128, 128)),\n transforms.ToTensor(), # data normalization\n])\n\n\ndef cv2_transforms(img, inSize=256, isShow=False):\n img = np.asarray(img)\n img_center = img.shape[0] // 2\n half_size = inSize // 2\n crop_img = img[img_center-half_size: img_center +\n half_size, img_center-half_size: img_center+half_size]\n filter_img = random.choice(\n [crop_img, get_filter_img(crop_img, kernel=random.choice(['laplace', 'mean']))])\n output_img = random.choice(\n [filter_img, get_pepper_salt_noised(filter_img, amount=random.uniform(0.005, 0))])\n\n if isShow:\n cv2.imshow('img', img)\n cv2.imshow('crop', crop_img)\n cv2.imshow('output', output_img)\n cv2.waitKey(0)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n pass\n return output_img\n\n\ndef get_pepper_salt_noised(img, amount=0.05, isShow=False):\n # img = cv2.cvtColor(cvImg, cv2.COLOR_BGR2RGB)\n img = img/255.0 # floating point image\n img_noised = skimage.util.random_noise(img, 's&p', amount=amount)\n img_noised = np.uint8(img_noised*256)\n # cvImg_noised = cv2.cvtColor(img_noised, cv2.COLOR_BGR2RGB)\n if isShow:\n cv2.imshow(\"Pepper_salt_noise: \" + str(amount), img_noised)\n cv2.waitKey(0)\n return img_noised\n\n\nif __name__ == \"__main__\":\n import matplotlib.pyplot as plt\n\n filenames = get_filenames('Data/food-11/training', '.jpg')\n for filename in filenames:\n img = cv2.imread(filename)\n # img = get_pepper_salt_noised(img, 0.05, True)\n img = cv2.resize(img, (256, 256))\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n\n toPILimg_transforms = transforms.Compose([transforms.ToPILImage()])\n\n process_tensor = train_transforms(img)\n process_img = toPILimg_transforms(process_tensor)\n\n # process_img = cv2.imread(process_img)\n # cv2.imshow('raw', img)\n # cv2.imshow('process_img', process_img)\n plt.subplot(1, 2, 1)\n plt.imshow(img)\n plt.subplot(1, 2, 2)\n plt.imshow(process_img)\n plt.show(block=False)\n plt.pause(0.05)\n plt.cla()\n # plt.waitforbuttonpress(0)\n # plt.close()\n # if 0xFF == ord('q'):\n # print('q')\n # plt.close('all')\n" ]
[ [ "matplotlib.pyplot.imshow", "numpy.asarray", "numpy.uint8", "matplotlib.pyplot.cla", "matplotlib.pyplot.subplot", "matplotlib.pyplot.show", "matplotlib.pyplot.pause" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
phamhe/mmdetection3d
[ "bc97d76ada58872eb84b08bb316f6a5a0526d706", "bc97d76ada58872eb84b08bb316f6a5a0526d706" ]
[ "mmdet3d/models/roi_heads/bbox_heads/h3d_bbox_head.py", "mmdet3d/datasets/kitti_dataset.py" ]
[ "import torch\nfrom mmcv.cnn import ConvModule\nfrom torch import nn as nn\nfrom torch.nn import functional as F\n\nfrom mmdet3d.core.bbox import DepthInstance3DBoxes\nfrom mmdet3d.core.post_processing import aligned_3d_nms\nfrom mmdet3d.models.builder import build_loss\nfrom mmdet3d.models.losses import chamfer_distance\nfrom mmdet3d.ops import build_sa_module\nfrom mmdet.core import build_bbox_coder, multi_apply\nfrom mmdet.models import HEADS\n\n\[email protected]_module()\nclass H3DBboxHead(nn.Module):\n r\"\"\"Bbox head of `H3DNet <https://arxiv.org/abs/2006.05682>`_.\n\n Args:\n num_classes (int): The number of classes.\n suface_matching_cfg (dict): Config for suface primitive matching.\n line_matching_cfg (dict): Config for line primitive matching.\n bbox_coder (:obj:`BaseBBoxCoder`): Bbox coder for encoding and\n decoding boxes.\n train_cfg (dict): Config for training.\n test_cfg (dict): Config for testing.\n gt_per_seed (int): Number of ground truth votes generated\n from each seed point.\n num_proposal (int): Number of proposal votes generated.\n feat_channels (tuple[int]): Convolution channels of\n prediction layer.\n primitive_feat_refine_streams (int): The number of mlps to\n refine primitive feature.\n primitive_refine_channels (tuple[int]): Convolution channels of\n prediction layer.\n upper_thresh (float): Threshold for line matching.\n surface_thresh (float): Threshold for suface matching.\n line_thresh (float): Threshold for line matching.\n conv_cfg (dict): Config of convolution in prediction layer.\n norm_cfg (dict): Config of BN in prediction layer.\n objectness_loss (dict): Config of objectness loss.\n center_loss (dict): Config of center loss.\n dir_class_loss (dict): Config of direction classification loss.\n dir_res_loss (dict): Config of direction residual regression loss.\n size_class_loss (dict): Config of size classification loss.\n size_res_loss (dict): Config of size residual regression loss.\n semantic_loss (dict): Config of point-wise semantic segmentation loss.\n cues_objectness_loss (dict): Config of cues objectness loss.\n cues_semantic_loss (dict): Config of cues semantic loss.\n proposal_objectness_loss (dict): Config of proposal objectness\n loss.\n primitive_center_loss (dict): Config of primitive center regression\n loss.\n \"\"\"\n\n def __init__(self,\n num_classes,\n suface_matching_cfg,\n line_matching_cfg,\n bbox_coder,\n train_cfg=None,\n test_cfg=None,\n gt_per_seed=1,\n num_proposal=256,\n feat_channels=(128, 128),\n primitive_feat_refine_streams=2,\n primitive_refine_channels=[128, 128, 128],\n upper_thresh=100.0,\n surface_thresh=0.5,\n line_thresh=0.5,\n conv_cfg=dict(type='Conv1d'),\n norm_cfg=dict(type='BN1d'),\n objectness_loss=None,\n center_loss=None,\n dir_class_loss=None,\n dir_res_loss=None,\n size_class_loss=None,\n size_res_loss=None,\n semantic_loss=None,\n cues_objectness_loss=None,\n cues_semantic_loss=None,\n proposal_objectness_loss=None,\n primitive_center_loss=None):\n super(H3DBboxHead, self).__init__()\n self.num_classes = num_classes\n self.train_cfg = train_cfg\n self.test_cfg = test_cfg\n self.gt_per_seed = gt_per_seed\n self.num_proposal = num_proposal\n self.with_angle = bbox_coder['with_rot']\n self.upper_thresh = upper_thresh\n self.surface_thresh = surface_thresh\n self.line_thresh = line_thresh\n\n self.objectness_loss = build_loss(objectness_loss)\n self.center_loss = build_loss(center_loss)\n self.dir_class_loss = build_loss(dir_class_loss)\n self.dir_res_loss = build_loss(dir_res_loss)\n self.size_class_loss = build_loss(size_class_loss)\n self.size_res_loss = build_loss(size_res_loss)\n self.semantic_loss = build_loss(semantic_loss)\n\n self.bbox_coder = build_bbox_coder(bbox_coder)\n self.num_sizes = self.bbox_coder.num_sizes\n self.num_dir_bins = self.bbox_coder.num_dir_bins\n\n self.cues_objectness_loss = build_loss(cues_objectness_loss)\n self.cues_semantic_loss = build_loss(cues_semantic_loss)\n self.proposal_objectness_loss = build_loss(proposal_objectness_loss)\n self.primitive_center_loss = build_loss(primitive_center_loss)\n\n assert suface_matching_cfg['mlp_channels'][-1] == \\\n line_matching_cfg['mlp_channels'][-1]\n\n # surface center matching\n self.surface_center_matcher = build_sa_module(suface_matching_cfg)\n # line center matching\n self.line_center_matcher = build_sa_module(line_matching_cfg)\n\n # Compute the matching scores\n matching_feat_dims = suface_matching_cfg['mlp_channels'][-1]\n self.matching_conv = ConvModule(\n matching_feat_dims,\n matching_feat_dims,\n 1,\n padding=0,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg,\n bias=True,\n inplace=True)\n self.matching_pred = nn.Conv1d(matching_feat_dims, 2, 1)\n\n # Compute the semantic matching scores\n self.semantic_matching_conv = ConvModule(\n matching_feat_dims,\n matching_feat_dims,\n 1,\n padding=0,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg,\n bias=True,\n inplace=True)\n self.semantic_matching_pred = nn.Conv1d(matching_feat_dims, 2, 1)\n\n # Surface feature aggregation\n self.surface_feats_aggregation = list()\n for k in range(primitive_feat_refine_streams):\n self.surface_feats_aggregation.append(\n ConvModule(\n matching_feat_dims,\n matching_feat_dims,\n 1,\n padding=0,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg,\n bias=True,\n inplace=True))\n self.surface_feats_aggregation = nn.Sequential(\n *self.surface_feats_aggregation)\n\n # Line feature aggregation\n self.line_feats_aggregation = list()\n for k in range(primitive_feat_refine_streams):\n self.line_feats_aggregation.append(\n ConvModule(\n matching_feat_dims,\n matching_feat_dims,\n 1,\n padding=0,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg,\n bias=True,\n inplace=True))\n self.line_feats_aggregation = nn.Sequential(\n *self.line_feats_aggregation)\n\n # surface center(6) + line center(12)\n prev_channel = 18 * matching_feat_dims\n self.bbox_pred = nn.ModuleList()\n for k in range(len(primitive_refine_channels)):\n self.bbox_pred.append(\n ConvModule(\n prev_channel,\n primitive_refine_channels[k],\n 1,\n padding=0,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg,\n bias=True,\n inplace=False))\n prev_channel = primitive_refine_channels[k]\n\n # Final object detection\n # Objectness scores (2), center residual (3),\n # heading class+residual (num_heading_bin*2), size class +\n # residual(num_size_cluster*4)\n conv_out_channel = (2 + 3 + bbox_coder['num_dir_bins'] * 2 +\n bbox_coder['num_sizes'] * 4 + self.num_classes)\n self.bbox_pred.append(nn.Conv1d(prev_channel, conv_out_channel, 1))\n\n def init_weights(self, pretrained=None):\n \"\"\"Initialize the weights in detector.\n\n Args:\n pretrained (str, optional): Path to pre-trained weights.\n Defaults to None.\n \"\"\"\n pass\n\n def forward(self, feats_dict, sample_mod):\n \"\"\"Forward pass.\n\n Args:\n feats_dict (dict): Feature dict from backbone.\n sample_mod (str): Sample mode for vote aggregation layer.\n valid modes are \"vote\", \"seed\" and \"random\".\n\n Returns:\n dict: Predictions of vote head.\n \"\"\"\n ret_dict = {}\n aggregated_points = feats_dict['aggregated_points']\n original_feature = feats_dict['aggregated_features']\n batch_size = original_feature.shape[0]\n object_proposal = original_feature.shape[2]\n\n # Extract surface center, features and semantic predictions\n z_center = feats_dict['pred_z_center']\n xy_center = feats_dict['pred_xy_center']\n z_semantic = feats_dict['sem_cls_scores_z']\n xy_semantic = feats_dict['sem_cls_scores_xy']\n z_feature = feats_dict['aggregated_features_z']\n xy_feature = feats_dict['aggregated_features_xy']\n # Extract line points and features\n line_center = feats_dict['pred_line_center']\n line_feature = feats_dict['aggregated_features_line']\n\n surface_center_pred = torch.cat((z_center, xy_center), dim=1)\n ret_dict['surface_center_pred'] = surface_center_pred\n ret_dict['surface_sem_pred'] = torch.cat((z_semantic, xy_semantic),\n dim=1)\n\n # Extract the surface and line centers of rpn proposals\n rpn_proposals = feats_dict['proposal_list']\n rpn_proposals_bbox = DepthInstance3DBoxes(\n rpn_proposals.reshape(-1, 7).clone(),\n box_dim=rpn_proposals.shape[-1],\n with_yaw=self.with_angle,\n origin=(0.5, 0.5, 0.5))\n\n obj_surface_center, obj_line_center = \\\n rpn_proposals_bbox.get_surface_line_center()\n obj_surface_center = obj_surface_center.reshape(\n batch_size, -1, 6, 3).transpose(1, 2).reshape(batch_size, -1, 3)\n obj_line_center = obj_line_center.reshape(batch_size, -1, 12,\n 3).transpose(1, 2).reshape(\n batch_size, -1, 3)\n ret_dict['surface_center_object'] = obj_surface_center\n ret_dict['line_center_object'] = obj_line_center\n\n # aggregate primitive z and xy features to rpn proposals\n surface_center_feature_pred = torch.cat((z_feature, xy_feature), dim=2)\n surface_center_feature_pred = torch.cat(\n (surface_center_feature_pred.new_zeros(\n (batch_size, 6, surface_center_feature_pred.shape[2])),\n surface_center_feature_pred),\n dim=1)\n\n surface_xyz, surface_features, _ = self.surface_center_matcher(\n surface_center_pred,\n surface_center_feature_pred,\n target_xyz=obj_surface_center)\n\n # aggregate primitive line features to rpn proposals\n line_feature = torch.cat((line_feature.new_zeros(\n (batch_size, 12, line_feature.shape[2])), line_feature),\n dim=1)\n line_xyz, line_features, _ = self.line_center_matcher(\n line_center, line_feature, target_xyz=obj_line_center)\n\n # combine the surface and line features\n combine_features = torch.cat((surface_features, line_features), dim=2)\n\n matching_features = self.matching_conv(combine_features)\n matching_score = self.matching_pred(matching_features)\n ret_dict['matching_score'] = matching_score.transpose(2, 1)\n\n semantic_matching_features = self.semantic_matching_conv(\n combine_features)\n semantic_matching_score = self.semantic_matching_pred(\n semantic_matching_features)\n ret_dict['semantic_matching_score'] = \\\n semantic_matching_score.transpose(2, 1)\n\n surface_features = self.surface_feats_aggregation(surface_features)\n line_features = self.line_feats_aggregation(line_features)\n\n # Combine all surface and line features\n surface_features = surface_features.view(batch_size, -1,\n object_proposal)\n line_features = line_features.view(batch_size, -1, object_proposal)\n\n combine_feature = torch.cat((surface_features, line_features), dim=1)\n\n # Final bbox predictions\n bbox_predictions = self.bbox_pred[0](combine_feature)\n bbox_predictions += original_feature\n for conv_module in self.bbox_pred[1:]:\n bbox_predictions = conv_module(bbox_predictions)\n\n refine_decode_res = self.bbox_coder.split_pred(\n bbox_predictions[:, :self.num_classes + 2],\n bbox_predictions[:, self.num_classes + 2:], aggregated_points)\n for key in refine_decode_res.keys():\n ret_dict[key + '_optimized'] = refine_decode_res[key]\n return ret_dict\n\n def loss(self,\n bbox_preds,\n points,\n gt_bboxes_3d,\n gt_labels_3d,\n pts_semantic_mask=None,\n pts_instance_mask=None,\n img_metas=None,\n rpn_targets=None,\n gt_bboxes_ignore=None):\n \"\"\"Compute loss.\n\n Args:\n bbox_preds (dict): Predictions from forward of h3d bbox head.\n points (list[torch.Tensor]): Input points.\n gt_bboxes_3d (list[:obj:`BaseInstance3DBoxes`]): Ground truth \\\n bboxes of each sample.\n gt_labels_3d (list[torch.Tensor]): Labels of each sample.\n pts_semantic_mask (None | list[torch.Tensor]): Point-wise\n semantic mask.\n pts_instance_mask (None | list[torch.Tensor]): Point-wise\n instance mask.\n img_metas (list[dict]): Contain pcd and img's meta info.\n rpn_targets (Tuple) : Targets generated by rpn head.\n gt_bboxes_ignore (None | list[torch.Tensor]): Specify\n which bounding.\n\n Returns:\n dict: Losses of H3dnet.\n \"\"\"\n (vote_targets, vote_target_masks, size_class_targets, size_res_targets,\n dir_class_targets, dir_res_targets, center_targets, mask_targets,\n valid_gt_masks, objectness_targets, objectness_weights,\n box_loss_weights, valid_gt_weights) = rpn_targets\n\n losses = {}\n\n # calculate refined proposal loss\n refined_proposal_loss = self.get_proposal_stage_loss(\n bbox_preds,\n size_class_targets,\n size_res_targets,\n dir_class_targets,\n dir_res_targets,\n center_targets,\n mask_targets,\n objectness_targets,\n objectness_weights,\n box_loss_weights,\n valid_gt_weights,\n suffix='_optimized')\n for key in refined_proposal_loss.keys():\n losses[key + '_optimized'] = refined_proposal_loss[key]\n\n bbox3d_optimized = self.bbox_coder.decode(\n bbox_preds, suffix='_optimized')\n\n targets = self.get_targets(points, gt_bboxes_3d, gt_labels_3d,\n pts_semantic_mask, pts_instance_mask,\n bbox_preds)\n\n (cues_objectness_label, cues_sem_label, proposal_objectness_label,\n cues_mask, cues_match_mask, proposal_objectness_mask,\n cues_matching_label, obj_surface_line_center) = targets\n\n # match scores for each geometric primitive\n objectness_scores = bbox_preds['matching_score']\n # match scores for the semantics of primitives\n objectness_scores_sem = bbox_preds['semantic_matching_score']\n\n primitive_objectness_loss = self.cues_objectness_loss(\n objectness_scores.transpose(2, 1),\n cues_objectness_label,\n weight=cues_mask,\n avg_factor=cues_mask.sum() + 1e-6)\n\n primitive_sem_loss = self.cues_semantic_loss(\n objectness_scores_sem.transpose(2, 1),\n cues_sem_label,\n weight=cues_mask,\n avg_factor=cues_mask.sum() + 1e-6)\n\n objectness_scores = bbox_preds['obj_scores_optimized']\n objectness_loss_refine = self.proposal_objectness_loss(\n objectness_scores.transpose(2, 1), proposal_objectness_label)\n primitive_matching_loss = (objectness_loss_refine *\n cues_match_mask).sum() / (\n cues_match_mask.sum() + 1e-6) * 0.5\n primitive_sem_matching_loss = (\n objectness_loss_refine * proposal_objectness_mask).sum() / (\n proposal_objectness_mask.sum() + 1e-6) * 0.5\n\n # Get the object surface center here\n batch_size, object_proposal = bbox3d_optimized.shape[:2]\n refined_bbox = DepthInstance3DBoxes(\n bbox3d_optimized.reshape(-1, 7).clone(),\n box_dim=bbox3d_optimized.shape[-1],\n with_yaw=self.with_angle,\n origin=(0.5, 0.5, 0.5))\n\n pred_obj_surface_center, pred_obj_line_center = \\\n refined_bbox.get_surface_line_center()\n pred_obj_surface_center = pred_obj_surface_center.reshape(\n batch_size, -1, 6, 3).transpose(1, 2).reshape(batch_size, -1, 3)\n pred_obj_line_center = pred_obj_line_center.reshape(\n batch_size, -1, 12, 3).transpose(1, 2).reshape(batch_size, -1, 3)\n pred_surface_line_center = torch.cat(\n (pred_obj_surface_center, pred_obj_line_center), 1)\n\n square_dist = self.primitive_center_loss(pred_surface_line_center,\n obj_surface_line_center)\n\n match_dist = torch.sqrt(square_dist.sum(dim=-1) + 1e-6)\n primitive_centroid_reg_loss = torch.sum(\n match_dist * cues_matching_label) / (\n cues_matching_label.sum() + 1e-6)\n\n refined_loss = dict(\n primitive_objectness_loss=primitive_objectness_loss,\n primitive_sem_loss=primitive_sem_loss,\n primitive_matching_loss=primitive_matching_loss,\n primitive_sem_matching_loss=primitive_sem_matching_loss,\n primitive_centroid_reg_loss=primitive_centroid_reg_loss)\n\n losses.update(refined_loss)\n\n return losses\n\n def get_bboxes(self,\n points,\n bbox_preds,\n input_metas,\n rescale=False,\n suffix=''):\n \"\"\"Generate bboxes from vote head predictions.\n\n Args:\n points (torch.Tensor): Input points.\n bbox_preds (dict): Predictions from vote head.\n input_metas (list[dict]): Point cloud and image's meta info.\n rescale (bool): Whether to rescale bboxes.\n\n Returns:\n list[tuple[torch.Tensor]]: Bounding boxes, scores and labels.\n \"\"\"\n # decode boxes\n obj_scores = F.softmax(\n bbox_preds['obj_scores' + suffix], dim=-1)[..., -1]\n\n sem_scores = F.softmax(bbox_preds['sem_scores'], dim=-1)\n\n prediction_collection = {}\n prediction_collection['center'] = bbox_preds['center' + suffix]\n prediction_collection['dir_class'] = bbox_preds['dir_class']\n prediction_collection['dir_res'] = bbox_preds['dir_res' + suffix]\n prediction_collection['size_class'] = bbox_preds['size_class']\n prediction_collection['size_res'] = bbox_preds['size_res' + suffix]\n\n bbox3d = self.bbox_coder.decode(prediction_collection)\n\n batch_size = bbox3d.shape[0]\n results = list()\n for b in range(batch_size):\n bbox_selected, score_selected, labels = self.multiclass_nms_single(\n obj_scores[b], sem_scores[b], bbox3d[b], points[b, ..., :3],\n input_metas[b])\n bbox = input_metas[b]['box_type_3d'](\n bbox_selected,\n box_dim=bbox_selected.shape[-1],\n with_yaw=self.bbox_coder.with_rot)\n results.append((bbox, score_selected, labels))\n\n return results\n\n def multiclass_nms_single(self, obj_scores, sem_scores, bbox, points,\n input_meta):\n \"\"\"Multi-class nms in single batch.\n\n Args:\n obj_scores (torch.Tensor): Objectness score of bounding boxes.\n sem_scores (torch.Tensor): semantic class score of bounding boxes.\n bbox (torch.Tensor): Predicted bounding boxes.\n points (torch.Tensor): Input points.\n input_meta (dict): Point cloud and image's meta info.\n\n Returns:\n tuple[torch.Tensor]: Bounding boxes, scores and labels.\n \"\"\"\n bbox = input_meta['box_type_3d'](\n bbox,\n box_dim=bbox.shape[-1],\n with_yaw=self.bbox_coder.with_rot,\n origin=(0.5, 0.5, 0.5))\n box_indices = bbox.points_in_boxes(points)\n\n corner3d = bbox.corners\n minmax_box3d = corner3d.new(torch.Size((corner3d.shape[0], 6)))\n minmax_box3d[:, :3] = torch.min(corner3d, dim=1)[0]\n minmax_box3d[:, 3:] = torch.max(corner3d, dim=1)[0]\n\n nonempty_box_mask = box_indices.T.sum(1) > 5\n\n bbox_classes = torch.argmax(sem_scores, -1)\n nms_selected = aligned_3d_nms(minmax_box3d[nonempty_box_mask],\n obj_scores[nonempty_box_mask],\n bbox_classes[nonempty_box_mask],\n self.test_cfg.nms_thr)\n\n # filter empty boxes and boxes with low score\n scores_mask = (obj_scores > self.test_cfg.score_thr)\n nonempty_box_inds = torch.nonzero(\n nonempty_box_mask, as_tuple=False).flatten()\n nonempty_mask = torch.zeros_like(bbox_classes).scatter(\n 0, nonempty_box_inds[nms_selected], 1)\n selected = (nonempty_mask.bool() & scores_mask.bool())\n\n if self.test_cfg.per_class_proposal:\n bbox_selected, score_selected, labels = [], [], []\n for k in range(sem_scores.shape[-1]):\n bbox_selected.append(bbox[selected].tensor)\n score_selected.append(obj_scores[selected] *\n sem_scores[selected][:, k])\n labels.append(\n torch.zeros_like(bbox_classes[selected]).fill_(k))\n bbox_selected = torch.cat(bbox_selected, 0)\n score_selected = torch.cat(score_selected, 0)\n labels = torch.cat(labels, 0)\n else:\n bbox_selected = bbox[selected].tensor\n score_selected = obj_scores[selected]\n labels = bbox_classes[selected]\n\n return bbox_selected, score_selected, labels\n\n def get_proposal_stage_loss(self,\n bbox_preds,\n size_class_targets,\n size_res_targets,\n dir_class_targets,\n dir_res_targets,\n center_targets,\n mask_targets,\n objectness_targets,\n objectness_weights,\n box_loss_weights,\n valid_gt_weights,\n suffix=''):\n \"\"\"Compute loss for the aggregation module.\n\n Args:\n bbox_preds (dict): Predictions from forward of vote head.\n size_class_targets (torch.Tensor): Ground truth \\\n size class of each prediction bounding box.\n size_res_targets (torch.Tensor): Ground truth \\\n size residual of each prediction bounding box.\n dir_class_targets (torch.Tensor): Ground truth \\\n direction class of each prediction bounding box.\n dir_res_targets (torch.Tensor): Ground truth \\\n direction residual of each prediction bounding box.\n center_targets (torch.Tensor): Ground truth center \\\n of each prediction bounding box.\n mask_targets (torch.Tensor): Validation of each \\\n prediction bounding box.\n objectness_targets (torch.Tensor): Ground truth \\\n objectness label of each prediction bounding box.\n objectness_weights (torch.Tensor): Weights of objectness \\\n loss for each prediction bounding box.\n box_loss_weights (torch.Tensor): Weights of regression \\\n loss for each prediction bounding box.\n valid_gt_weights (torch.Tensor): Validation of each \\\n ground truth bounding box.\n\n Returns:\n dict: Losses of aggregation module.\n \"\"\"\n # calculate objectness loss\n objectness_loss = self.objectness_loss(\n bbox_preds['obj_scores' + suffix].transpose(2, 1),\n objectness_targets,\n weight=objectness_weights)\n\n # calculate center loss\n source2target_loss, target2source_loss = self.center_loss(\n bbox_preds['center' + suffix],\n center_targets,\n src_weight=box_loss_weights,\n dst_weight=valid_gt_weights)\n center_loss = source2target_loss + target2source_loss\n\n # calculate direction class loss\n dir_class_loss = self.dir_class_loss(\n bbox_preds['dir_class' + suffix].transpose(2, 1),\n dir_class_targets,\n weight=box_loss_weights)\n\n # calculate direction residual loss\n batch_size, proposal_num = size_class_targets.shape[:2]\n heading_label_one_hot = dir_class_targets.new_zeros(\n (batch_size, proposal_num, self.num_dir_bins))\n heading_label_one_hot.scatter_(2, dir_class_targets.unsqueeze(-1), 1)\n dir_res_norm = (bbox_preds['dir_res_norm' + suffix] *\n heading_label_one_hot).sum(dim=-1)\n dir_res_loss = self.dir_res_loss(\n dir_res_norm, dir_res_targets, weight=box_loss_weights)\n\n # calculate size class loss\n size_class_loss = self.size_class_loss(\n bbox_preds['size_class' + suffix].transpose(2, 1),\n size_class_targets,\n weight=box_loss_weights)\n\n # calculate size residual loss\n one_hot_size_targets = box_loss_weights.new_zeros(\n (batch_size, proposal_num, self.num_sizes))\n one_hot_size_targets.scatter_(2, size_class_targets.unsqueeze(-1), 1)\n one_hot_size_targets_expand = one_hot_size_targets.unsqueeze(\n -1).repeat(1, 1, 1, 3)\n size_residual_norm = (bbox_preds['size_res_norm' + suffix] *\n one_hot_size_targets_expand).sum(dim=2)\n box_loss_weights_expand = box_loss_weights.unsqueeze(-1).repeat(\n 1, 1, 3)\n size_res_loss = self.size_res_loss(\n size_residual_norm,\n size_res_targets,\n weight=box_loss_weights_expand)\n\n # calculate semantic loss\n semantic_loss = self.semantic_loss(\n bbox_preds['sem_scores' + suffix].transpose(2, 1),\n mask_targets,\n weight=box_loss_weights)\n\n losses = dict(\n objectness_loss=objectness_loss,\n semantic_loss=semantic_loss,\n center_loss=center_loss,\n dir_class_loss=dir_class_loss,\n dir_res_loss=dir_res_loss,\n size_class_loss=size_class_loss,\n size_res_loss=size_res_loss)\n\n return losses\n\n def get_targets(self,\n points,\n gt_bboxes_3d,\n gt_labels_3d,\n pts_semantic_mask=None,\n pts_instance_mask=None,\n bbox_preds=None):\n \"\"\"Generate targets of proposal module.\n\n Args:\n points (list[torch.Tensor]): Points of each batch.\n gt_bboxes_3d (list[:obj:`BaseInstance3DBoxes`]): Ground truth \\\n bboxes of each batch.\n gt_labels_3d (list[torch.Tensor]): Labels of each batch.\n pts_semantic_mask (None | list[torch.Tensor]): Point-wise semantic\n label of each batch.\n pts_instance_mask (None | list[torch.Tensor]): Point-wise instance\n label of each batch.\n bbox_preds (torch.Tensor): Bounding box predictions of vote head.\n\n Returns:\n tuple[torch.Tensor]: Targets of proposal module.\n \"\"\"\n # find empty example\n valid_gt_masks = list()\n gt_num = list()\n for index in range(len(gt_labels_3d)):\n if len(gt_labels_3d[index]) == 0:\n fake_box = gt_bboxes_3d[index].tensor.new_zeros(\n 1, gt_bboxes_3d[index].tensor.shape[-1])\n gt_bboxes_3d[index] = gt_bboxes_3d[index].new_box(fake_box)\n gt_labels_3d[index] = gt_labels_3d[index].new_zeros(1)\n valid_gt_masks.append(gt_labels_3d[index].new_zeros(1))\n gt_num.append(1)\n else:\n valid_gt_masks.append(gt_labels_3d[index].new_ones(\n gt_labels_3d[index].shape))\n gt_num.append(gt_labels_3d[index].shape[0])\n\n if pts_semantic_mask is None:\n pts_semantic_mask = [None for i in range(len(gt_labels_3d))]\n pts_instance_mask = [None for i in range(len(gt_labels_3d))]\n\n aggregated_points = [\n bbox_preds['aggregated_points'][i]\n for i in range(len(gt_labels_3d))\n ]\n\n surface_center_pred = [\n bbox_preds['surface_center_pred'][i]\n for i in range(len(gt_labels_3d))\n ]\n\n line_center_pred = [\n bbox_preds['pred_line_center'][i]\n for i in range(len(gt_labels_3d))\n ]\n\n surface_center_object = [\n bbox_preds['surface_center_object'][i]\n for i in range(len(gt_labels_3d))\n ]\n\n line_center_object = [\n bbox_preds['line_center_object'][i]\n for i in range(len(gt_labels_3d))\n ]\n\n surface_sem_pred = [\n bbox_preds['surface_sem_pred'][i]\n for i in range(len(gt_labels_3d))\n ]\n\n line_sem_pred = [\n bbox_preds['sem_cls_scores_line'][i]\n for i in range(len(gt_labels_3d))\n ]\n\n (cues_objectness_label, cues_sem_label, proposal_objectness_label,\n cues_mask, cues_match_mask, proposal_objectness_mask,\n cues_matching_label, obj_surface_line_center) = multi_apply(\n self.get_targets_single, points, gt_bboxes_3d, gt_labels_3d,\n pts_semantic_mask, pts_instance_mask, aggregated_points,\n surface_center_pred, line_center_pred, surface_center_object,\n line_center_object, surface_sem_pred, line_sem_pred)\n\n cues_objectness_label = torch.stack(cues_objectness_label)\n cues_sem_label = torch.stack(cues_sem_label)\n proposal_objectness_label = torch.stack(proposal_objectness_label)\n cues_mask = torch.stack(cues_mask)\n cues_match_mask = torch.stack(cues_match_mask)\n proposal_objectness_mask = torch.stack(proposal_objectness_mask)\n cues_matching_label = torch.stack(cues_matching_label)\n obj_surface_line_center = torch.stack(obj_surface_line_center)\n\n return (cues_objectness_label, cues_sem_label,\n proposal_objectness_label, cues_mask, cues_match_mask,\n proposal_objectness_mask, cues_matching_label,\n obj_surface_line_center)\n\n def get_targets_single(self,\n points,\n gt_bboxes_3d,\n gt_labels_3d,\n pts_semantic_mask=None,\n pts_instance_mask=None,\n aggregated_points=None,\n pred_surface_center=None,\n pred_line_center=None,\n pred_obj_surface_center=None,\n pred_obj_line_center=None,\n pred_surface_sem=None,\n pred_line_sem=None):\n \"\"\"Generate targets for primitive cues for single batch.\n\n Args:\n points (torch.Tensor): Points of each batch.\n gt_bboxes_3d (:obj:`BaseInstance3DBoxes`): Ground truth \\\n boxes of each batch.\n gt_labels_3d (torch.Tensor): Labels of each batch.\n pts_semantic_mask (None | torch.Tensor): Point-wise semantic\n label of each batch.\n pts_instance_mask (None | torch.Tensor): Point-wise instance\n label of each batch.\n aggregated_points (torch.Tensor): Aggregated points from\n vote aggregation layer.\n pred_surface_center (torch.Tensor): Prediction of surface center.\n pred_line_center (torch.Tensor): Prediction of line center.\n pred_obj_surface_center (torch.Tensor): Objectness prediction \\\n of surface center.\n pred_obj_line_center (torch.Tensor): Objectness prediction of \\\n line center.\n pred_surface_sem (torch.Tensor): Semantic prediction of \\\n surface center.\n pred_line_sem (torch.Tensor): Semantic prediction of line center.\n Returns:\n tuple[torch.Tensor]: Targets for primitive cues.\n \"\"\"\n device = points.device\n gt_bboxes_3d = gt_bboxes_3d.to(device)\n num_proposals = aggregated_points.shape[0]\n gt_center = gt_bboxes_3d.gravity_center\n\n dist1, dist2, ind1, _ = chamfer_distance(\n aggregated_points.unsqueeze(0),\n gt_center.unsqueeze(0),\n reduction='none')\n # Set assignment\n object_assignment = ind1.squeeze(0)\n\n # Generate objectness label and mask\n # objectness_label: 1 if pred object center is within\n # self.train_cfg['near_threshold'] of any GT object\n # objectness_mask: 0 if pred object center is in gray\n # zone (DONOTCARE), 1 otherwise\n euclidean_dist1 = torch.sqrt(dist1.squeeze(0) + 1e-6)\n proposal_objectness_label = euclidean_dist1.new_zeros(\n num_proposals, dtype=torch.long)\n proposal_objectness_mask = euclidean_dist1.new_zeros(num_proposals)\n\n gt_sem = gt_labels_3d[object_assignment]\n\n obj_surface_center, obj_line_center = \\\n gt_bboxes_3d.get_surface_line_center()\n obj_surface_center = obj_surface_center.reshape(-1, 6,\n 3).transpose(0, 1)\n obj_line_center = obj_line_center.reshape(-1, 12, 3).transpose(0, 1)\n obj_surface_center = obj_surface_center[:, object_assignment].reshape(\n 1, -1, 3)\n obj_line_center = obj_line_center[:,\n object_assignment].reshape(1, -1, 3)\n\n surface_sem = torch.argmax(pred_surface_sem, dim=1).float()\n line_sem = torch.argmax(pred_line_sem, dim=1).float()\n\n dist_surface, _, surface_ind, _ = chamfer_distance(\n obj_surface_center,\n pred_surface_center.unsqueeze(0),\n reduction='none')\n dist_line, _, line_ind, _ = chamfer_distance(\n obj_line_center, pred_line_center.unsqueeze(0), reduction='none')\n\n surface_sel = pred_surface_center[surface_ind.squeeze(0)]\n line_sel = pred_line_center[line_ind.squeeze(0)]\n surface_sel_sem = surface_sem[surface_ind.squeeze(0)]\n line_sel_sem = line_sem[line_ind.squeeze(0)]\n\n surface_sel_sem_gt = gt_sem.repeat(6).float()\n line_sel_sem_gt = gt_sem.repeat(12).float()\n\n euclidean_dist_surface = torch.sqrt(dist_surface.squeeze(0) + 1e-6)\n euclidean_dist_line = torch.sqrt(dist_line.squeeze(0) + 1e-6)\n objectness_label_surface = euclidean_dist_line.new_zeros(\n num_proposals * 6, dtype=torch.long)\n objectness_mask_surface = euclidean_dist_line.new_zeros(num_proposals *\n 6)\n objectness_label_line = euclidean_dist_line.new_zeros(\n num_proposals * 12, dtype=torch.long)\n objectness_mask_line = euclidean_dist_line.new_zeros(num_proposals *\n 12)\n objectness_label_surface_sem = euclidean_dist_line.new_zeros(\n num_proposals * 6, dtype=torch.long)\n objectness_label_line_sem = euclidean_dist_line.new_zeros(\n num_proposals * 12, dtype=torch.long)\n\n euclidean_dist_obj_surface = torch.sqrt((\n (pred_obj_surface_center - surface_sel)**2).sum(dim=-1) + 1e-6)\n euclidean_dist_obj_line = torch.sqrt(\n torch.sum((pred_obj_line_center - line_sel)**2, dim=-1) + 1e-6)\n\n # Objectness score just with centers\n proposal_objectness_label[\n euclidean_dist1 < self.train_cfg['near_threshold']] = 1\n proposal_objectness_mask[\n euclidean_dist1 < self.train_cfg['near_threshold']] = 1\n proposal_objectness_mask[\n euclidean_dist1 > self.train_cfg['far_threshold']] = 1\n\n objectness_label_surface[\n (euclidean_dist_obj_surface <\n self.train_cfg['label_surface_threshold']) *\n (euclidean_dist_surface <\n self.train_cfg['mask_surface_threshold'])] = 1\n objectness_label_surface_sem[\n (euclidean_dist_obj_surface <\n self.train_cfg['label_surface_threshold']) *\n (euclidean_dist_surface < self.train_cfg['mask_surface_threshold'])\n * (surface_sel_sem == surface_sel_sem_gt)] = 1\n\n objectness_label_line[\n (euclidean_dist_obj_line < self.train_cfg['label_line_threshold'])\n *\n (euclidean_dist_line < self.train_cfg['mask_line_threshold'])] = 1\n objectness_label_line_sem[\n (euclidean_dist_obj_line < self.train_cfg['label_line_threshold'])\n * (euclidean_dist_line < self.train_cfg['mask_line_threshold']) *\n (line_sel_sem == line_sel_sem_gt)] = 1\n\n objectness_label_surface_obj = proposal_objectness_label.repeat(6)\n objectness_mask_surface_obj = proposal_objectness_mask.repeat(6)\n objectness_label_line_obj = proposal_objectness_label.repeat(12)\n objectness_mask_line_obj = proposal_objectness_mask.repeat(12)\n\n objectness_mask_surface = objectness_mask_surface_obj\n objectness_mask_line = objectness_mask_line_obj\n\n cues_objectness_label = torch.cat(\n (objectness_label_surface, objectness_label_line), 0)\n cues_sem_label = torch.cat(\n (objectness_label_surface_sem, objectness_label_line_sem), 0)\n cues_mask = torch.cat((objectness_mask_surface, objectness_mask_line),\n 0)\n\n objectness_label_surface *= objectness_label_surface_obj\n objectness_label_line *= objectness_label_line_obj\n cues_matching_label = torch.cat(\n (objectness_label_surface, objectness_label_line), 0)\n\n objectness_label_surface_sem *= objectness_label_surface_obj\n objectness_label_line_sem *= objectness_label_line_obj\n\n cues_match_mask = (torch.sum(\n cues_objectness_label.view(18, num_proposals), dim=0) >=\n 1).float()\n\n obj_surface_line_center = torch.cat(\n (obj_surface_center, obj_line_center), 1).squeeze(0)\n\n return (cues_objectness_label, cues_sem_label,\n proposal_objectness_label, cues_mask, cues_match_mask,\n proposal_objectness_mask, cues_matching_label,\n obj_surface_line_center)\n", "import copy\nimport mmcv\nimport numpy as np\nimport os\nimport tempfile\nimport torch\nfrom mmcv.utils import print_log\nfrom os import path as osp\n\nfrom mmdet.datasets import DATASETS\nfrom ..core import show_result\nfrom ..core.bbox import (Box3DMode, CameraInstance3DBoxes, Coord3DMode,\n points_cam2img)\nfrom .custom_3d import Custom3DDataset\n\n\[email protected]_module()\nclass KittiDataset(Custom3DDataset):\n r\"\"\"KITTI Dataset.\n\n This class serves as the API for experiments on the `KITTI Dataset\n <http://www.cvlibs.net/datasets/kitti/eval_object.php?obj_benchmark=3d>`_.\n\n Args:\n data_root (str): Path of dataset root.\n ann_file (str): Path of annotation file.\n split (str): Split of input data.\n pts_prefix (str, optional): Prefix of points files.\n Defaults to 'velodyne'.\n pipeline (list[dict], optional): Pipeline used for data processing.\n Defaults to None.\n classes (tuple[str], optional): Classes used in the dataset.\n Defaults to None.\n modality (dict, optional): Modality to specify the sensor data used\n as input. Defaults to None.\n box_type_3d (str, optional): Type of 3D box of this dataset.\n Based on the `box_type_3d`, the dataset will encapsulate the box\n to its original format then converted them to `box_type_3d`.\n Defaults to 'LiDAR' in this dataset. Available options includes\n\n - 'LiDAR': Box in LiDAR coordinates.\n - 'Depth': Box in depth coordinates, usually for indoor dataset.\n - 'Camera': Box in camera coordinates.\n filter_empty_gt (bool, optional): Whether to filter empty GT.\n Defaults to True.\n test_mode (bool, optional): Whether the dataset is in test mode.\n Defaults to False.\n pcd_limit_range (list): The range of point cloud used to filter\n invalid predicted boxes. Default: [0, -40, -3, 70.4, 40, 0.0].\n \"\"\"\n CLASSES = ('car', 'pedestrian', 'cyclist')\n\n def __init__(self,\n data_root,\n ann_file,\n split,\n pts_prefix='velodyne',\n pipeline=None,\n classes=None,\n modality=None,\n box_type_3d='LiDAR',\n filter_empty_gt=True,\n test_mode=False,\n pcd_limit_range=[0, -40, -3, 70.4, 40, 0.0]):\n super().__init__(\n data_root=data_root,\n ann_file=ann_file,\n pipeline=pipeline,\n classes=classes,\n modality=modality,\n box_type_3d=box_type_3d,\n filter_empty_gt=filter_empty_gt,\n test_mode=test_mode)\n\n self.split = split\n self.root_split = os.path.join(self.data_root, split)\n assert self.modality is not None\n self.pcd_limit_range = pcd_limit_range\n self.pts_prefix = pts_prefix\n\n def _get_pts_filename(self, idx):\n \"\"\"Get point cloud filename according to the given index.\n\n Args:\n index (int): Index of the point cloud file to get.\n\n Returns:\n str: Name of the point cloud file.\n \"\"\"\n pts_filename = osp.join(self.root_split, self.pts_prefix,\n f'{idx:06d}.bin')\n return pts_filename\n\n def get_data_info(self, index):\n \"\"\"Get data info according to the given index.\n\n Args:\n index (int): Index of the sample data to get.\n\n Returns:\n dict: Data information that will be passed to the data \\\n preprocessing pipelines. It includes the following keys:\n\n - sample_idx (str): Sample index.\n - pts_filename (str): Filename of point clouds.\n - img_prefix (str | None): Prefix of image files.\n - img_info (dict): Image info.\n - lidar2img (list[np.ndarray], optional): Transformations \\\n from lidar to different cameras.\n - ann_info (dict): Annotation info.\n \"\"\"\n info = self.data_infos[index]\n sample_idx = info['image']['image_idx']\n img_filename = os.path.join(self.data_root,\n info['image']['image_path'])\n\n # TODO: consider use torch.Tensor only\n rect = info['calib']['R0_rect'].astype(np.float32)\n Trv2c = info['calib']['Tr_velo_to_cam'].astype(np.float32)\n P2 = info['calib']['P2'].astype(np.float32)\n lidar2img = P2 @ rect @ Trv2c\n\n pts_filename = self._get_pts_filename(sample_idx)\n input_dict = dict(\n sample_idx=sample_idx,\n pts_filename=pts_filename,\n img_prefix=None,\n img_info=dict(filename=img_filename),\n lidar2img=lidar2img)\n\n if not self.test_mode:\n annos = self.get_ann_info(index)\n input_dict['ann_info'] = annos\n\n return input_dict\n\n def get_ann_info(self, index):\n \"\"\"Get annotation info according to the given index.\n\n Args:\n index (int): Index of the annotation data to get.\n\n Returns:\n dict: annotation information consists of the following keys:\n\n - gt_bboxes_3d (:obj:`LiDARInstance3DBoxes`): \\\n 3D ground truth bboxes.\n - gt_labels_3d (np.ndarray): Labels of ground truths.\n - gt_bboxes (np.ndarray): 2D ground truth bboxes.\n - gt_labels (np.ndarray): Labels of ground truths.\n - gt_names (list[str]): Class names of ground truths.\n \"\"\"\n # Use index to get the annos, thus the evalhook could also use this api\n info = self.data_infos[index]\n rect = info['calib']['R0_rect'].astype(np.float32)\n Trv2c = info['calib']['Tr_velo_to_cam'].astype(np.float32)\n\n annos = info['annos']\n # we need other objects to avoid collision when sample\n annos = self.remove_dontcare(annos)\n loc = annos['location']\n dims = annos['dimensions']\n rots = annos['rotation_y']\n gt_names = annos['name']\n gt_bboxes_3d = np.concatenate([loc, dims, rots[..., np.newaxis]],\n axis=1).astype(np.float32)\n\n # convert gt_bboxes_3d to velodyne coordinates\n gt_bboxes_3d = CameraInstance3DBoxes(gt_bboxes_3d).convert_to(\n self.box_mode_3d, np.linalg.inv(rect @ Trv2c))\n gt_bboxes = annos['bbox']\n\n selected = self.drop_arrays_by_name(gt_names, ['DontCare'])\n gt_bboxes = gt_bboxes[selected].astype('float32')\n gt_names = gt_names[selected]\n\n gt_labels = []\n for cat in gt_names:\n if cat in self.CLASSES:\n gt_labels.append(self.CLASSES.index(cat))\n else:\n gt_labels.append(-1)\n gt_labels = np.array(gt_labels).astype(np.int64)\n gt_labels_3d = copy.deepcopy(gt_labels)\n\n anns_results = dict(\n gt_bboxes_3d=gt_bboxes_3d,\n gt_labels_3d=gt_labels_3d,\n bboxes=gt_bboxes,\n labels=gt_labels,\n gt_names=gt_names)\n return anns_results\n\n def drop_arrays_by_name(self, gt_names, used_classes):\n \"\"\"Drop irrelevant ground truths by name.\n\n Args:\n gt_names (list[str]): Names of ground truths.\n used_classes (list[str]): Classes of interest.\n\n Returns:\n np.ndarray: Indices of ground truths that will be dropped.\n \"\"\"\n inds = [i for i, x in enumerate(gt_names) if x not in used_classes]\n inds = np.array(inds, dtype=np.int64)\n return inds\n\n def keep_arrays_by_name(self, gt_names, used_classes):\n \"\"\"Keep useful ground truths by name.\n\n Args:\n gt_names (list[str]): Names of ground truths.\n used_classes (list[str]): Classes of interest.\n\n Returns:\n np.ndarray: Indices of ground truths that will be keeped.\n \"\"\"\n inds = [i for i, x in enumerate(gt_names) if x in used_classes]\n inds = np.array(inds, dtype=np.int64)\n return inds\n\n def remove_dontcare(self, ann_info):\n \"\"\"Remove annotations that do not need to be cared.\n\n Args:\n ann_info (dict): Dict of annotation infos. The ``'DontCare'``\n annotations will be removed according to ann_file['name'].\n\n Returns:\n dict: Annotations after filtering.\n \"\"\"\n img_filtered_annotations = {}\n relevant_annotation_indices = [\n i for i, x in enumerate(ann_info['name']) if x != 'DontCare'\n ]\n for key in ann_info.keys():\n img_filtered_annotations[key] = (\n ann_info[key][relevant_annotation_indices])\n return img_filtered_annotations\n\n def format_results(self,\n outputs,\n pklfile_prefix=None,\n submission_prefix=None):\n \"\"\"Format the results to pkl file.\n\n Args:\n outputs (list[dict]): Testing results of the dataset.\n pklfile_prefix (str | None): The prefix of pkl files. It includes\n the file path and the prefix of filename, e.g., \"a/b/prefix\".\n If not specified, a temp file will be created. Default: None.\n submission_prefix (str | None): The prefix of submitted files. It\n includes the file path and the prefix of filename, e.g.,\n \"a/b/prefix\". If not specified, a temp file will be created.\n Default: None.\n\n Returns:\n tuple: (result_files, tmp_dir), result_files is a dict containing \\\n the json filepaths, tmp_dir is the temporal directory created \\\n for saving json files when jsonfile_prefix is not specified.\n \"\"\"\n if pklfile_prefix is None:\n tmp_dir = tempfile.TemporaryDirectory()\n pklfile_prefix = osp.join(tmp_dir.name, 'results')\n else:\n tmp_dir = None\n\n if not isinstance(outputs[0], dict):\n result_files = self.bbox2result_kitti2d(outputs, self.CLASSES,\n pklfile_prefix,\n submission_prefix)\n elif 'pts_bbox' in outputs[0] or 'img_bbox' in outputs[0]:\n result_files = dict()\n for name in outputs[0]:\n results_ = [out[name] for out in outputs]\n pklfile_prefix_ = pklfile_prefix + name\n if submission_prefix is not None:\n submission_prefix_ = submission_prefix + name\n else:\n submission_prefix_ = None\n if 'img' in name:\n result_files = self.bbox2result_kitti2d(\n results_, self.CLASSES, pklfile_prefix_,\n submission_prefix_)\n else:\n result_files_ = self.bbox2result_kitti(\n results_, self.CLASSES, pklfile_prefix_,\n submission_prefix_)\n result_files[name] = result_files_\n else:\n result_files = self.bbox2result_kitti(outputs, self.CLASSES,\n pklfile_prefix,\n submission_prefix)\n return result_files, tmp_dir\n\n def evaluate(self,\n results,\n metric=None,\n logger=None,\n pklfile_prefix=None,\n submission_prefix=None,\n show=False,\n out_dir=None):\n \"\"\"Evaluation in KITTI protocol.\n\n Args:\n results (list[dict]): Testing results of the dataset.\n metric (str | list[str]): Metrics to be evaluated.\n logger (logging.Logger | str | None): Logger used for printing\n related information during evaluation. Default: None.\n pklfile_prefix (str | None): The prefix of pkl files. It includes\n the file path and the prefix of filename, e.g., \"a/b/prefix\".\n If not specified, a temp file will be created. Default: None.\n submission_prefix (str | None): The prefix of submission datas.\n If not specified, the submission data will not be generated.\n show (bool): Whether to visualize.\n Default: False.\n out_dir (str): Path to save the visualization results.\n Default: None.\n\n Returns:\n dict[str, float]: Results of each evaluation metric.\n \"\"\"\n result_files, tmp_dir = self.format_results(results, pklfile_prefix)\n from mmdet3d.core.evaluation import kitti_eval\n gt_annos = [info['annos'] for info in self.data_infos]\n\n if isinstance(result_files, dict):\n ap_dict = dict()\n for name, result_files_ in result_files.items():\n eval_types = ['bbox', 'bev', '3d']\n if 'img' in name:\n eval_types = ['bbox']\n ap_result_str, ap_dict_ = kitti_eval(\n gt_annos,\n result_files_,\n self.CLASSES,\n eval_types=eval_types)\n for ap_type, ap in ap_dict_.items():\n ap_dict[f'{name}/{ap_type}'] = float('{:.4f}'.format(ap))\n\n print_log(\n f'Results of {name}:\\n' + ap_result_str, logger=logger)\n\n else:\n if metric == 'img_bbox':\n ap_result_str, ap_dict = kitti_eval(\n gt_annos, result_files, self.CLASSES, eval_types=['bbox'])\n else:\n ap_result_str, ap_dict = kitti_eval(gt_annos, result_files,\n self.CLASSES)\n print_log('\\n' + ap_result_str, logger=logger)\n\n if tmp_dir is not None:\n tmp_dir.cleanup()\n if show:\n self.show(results, out_dir)\n return ap_dict\n\n def bbox2result_kitti(self,\n net_outputs,\n class_names,\n pklfile_prefix=None,\n submission_prefix=None):\n \"\"\"Convert 3D detection results to kitti format for evaluation and test\n submission.\n\n Args:\n net_outputs (list[np.ndarray]): List of array storing the \\\n inferenced bounding boxes and scores.\n class_names (list[String]): A list of class names.\n pklfile_prefix (str | None): The prefix of pkl file.\n submission_prefix (str | None): The prefix of submission file.\n\n Returns:\n list[dict]: A list of dictionaries with the kitti format.\n \"\"\"\n assert len(net_outputs) == len(self.data_infos), \\\n 'invalid list length of network outputs'\n if submission_prefix is not None:\n mmcv.mkdir_or_exist(submission_prefix)\n\n det_annos = []\n print('\\nConverting prediction to KITTI format')\n for idx, pred_dicts in enumerate(\n mmcv.track_iter_progress(net_outputs)):\n annos = []\n info = self.data_infos[idx]\n sample_idx = info['image']['image_idx']\n image_shape = info['image']['image_shape'][:2]\n box_dict = self.convert_valid_bboxes(pred_dicts, info)\n anno = {\n 'name': [],\n 'truncated': [],\n 'occluded': [],\n 'alpha': [],\n 'bbox': [],\n 'dimensions': [],\n 'location': [],\n 'rotation_y': [],\n 'score': []\n }\n if len(box_dict['bbox']) > 0:\n box_2d_preds = box_dict['bbox']\n box_preds = box_dict['box3d_camera']\n scores = box_dict['scores']\n box_preds_lidar = box_dict['box3d_lidar']\n label_preds = box_dict['label_preds']\n\n for box, box_lidar, bbox, score, label in zip(\n box_preds, box_preds_lidar, box_2d_preds, scores,\n label_preds):\n bbox[2:] = np.minimum(bbox[2:], image_shape[::-1])\n bbox[:2] = np.maximum(bbox[:2], [0, 0])\n anno['name'].append(class_names[int(label)])\n anno['truncated'].append(0.0)\n anno['occluded'].append(0)\n anno['alpha'].append(\n -np.arctan2(-box_lidar[1], box_lidar[0]) + box[6])\n anno['bbox'].append(bbox)\n anno['dimensions'].append(box[3:6])\n anno['location'].append(box[:3])\n anno['rotation_y'].append(box[6])\n anno['score'].append(score)\n\n anno = {k: np.stack(v) for k, v in anno.items()}\n annos.append(anno)\n else:\n anno = {\n 'name': np.array([]),\n 'truncated': np.array([]),\n 'occluded': np.array([]),\n 'alpha': np.array([]),\n 'bbox': np.zeros([0, 4]),\n 'dimensions': np.zeros([0, 3]),\n 'location': np.zeros([0, 3]),\n 'rotation_y': np.array([]),\n 'score': np.array([]),\n }\n annos.append(anno)\n\n if submission_prefix is not None:\n curr_file = f'{submission_prefix}/{sample_idx:06d}.txt'\n with open(curr_file, 'w') as f:\n bbox = anno['bbox']\n loc = anno['location']\n dims = anno['dimensions'] # lhw -> hwl\n\n for idx in range(len(bbox)):\n print(\n '{} -1 -1 {:.4f} {:.4f} {:.4f} {:.4f} '\n '{:.4f} {:.4f} {:.4f} '\n '{:.4f} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f}'.format(\n anno['name'][idx], anno['alpha'][idx],\n bbox[idx][0], bbox[idx][1], bbox[idx][2],\n bbox[idx][3], dims[idx][1], dims[idx][2],\n dims[idx][0], loc[idx][0], loc[idx][1],\n loc[idx][2], anno['rotation_y'][idx],\n anno['score'][idx]),\n file=f)\n\n annos[-1]['sample_idx'] = np.array(\n [sample_idx] * len(annos[-1]['score']), dtype=np.int64)\n\n det_annos += annos\n\n if pklfile_prefix is not None:\n if not pklfile_prefix.endswith(('.pkl', '.pickle')):\n out = f'{pklfile_prefix}.pkl'\n mmcv.dump(det_annos, out)\n print(f'Result is saved to {out}.')\n\n return det_annos\n\n def bbox2result_kitti2d(self,\n net_outputs,\n class_names,\n pklfile_prefix=None,\n submission_prefix=None):\n \"\"\"Convert 2D detection results to kitti format for evaluation and test\n submission.\n\n Args:\n net_outputs (list[np.ndarray]): List of array storing the \\\n inferenced bounding boxes and scores.\n class_names (list[String]): A list of class names.\n pklfile_prefix (str | None): The prefix of pkl file.\n submission_prefix (str | None): The prefix of submission file.\n\n Returns:\n list[dict]: A list of dictionaries have the kitti format\n \"\"\"\n assert len(net_outputs) == len(self.data_infos), \\\n 'invalid list length of network outputs'\n det_annos = []\n print('\\nConverting prediction to KITTI format')\n for i, bboxes_per_sample in enumerate(\n mmcv.track_iter_progress(net_outputs)):\n annos = []\n anno = dict(\n name=[],\n truncated=[],\n occluded=[],\n alpha=[],\n bbox=[],\n dimensions=[],\n location=[],\n rotation_y=[],\n score=[])\n sample_idx = self.data_infos[i]['image']['image_idx']\n\n num_example = 0\n for label in range(len(bboxes_per_sample)):\n bbox = bboxes_per_sample[label]\n for i in range(bbox.shape[0]):\n anno['name'].append(class_names[int(label)])\n anno['truncated'].append(0.0)\n anno['occluded'].append(0)\n anno['alpha'].append(0.0)\n anno['bbox'].append(bbox[i, :4])\n # set dimensions (height, width, length) to zero\n anno['dimensions'].append(\n np.zeros(shape=[3], dtype=np.float32))\n # set the 3D translation to (-1000, -1000, -1000)\n anno['location'].append(\n np.ones(shape=[3], dtype=np.float32) * (-1000.0))\n anno['rotation_y'].append(0.0)\n anno['score'].append(bbox[i, 4])\n num_example += 1\n\n if num_example == 0:\n annos.append(\n dict(\n name=np.array([]),\n truncated=np.array([]),\n occluded=np.array([]),\n alpha=np.array([]),\n bbox=np.zeros([0, 4]),\n dimensions=np.zeros([0, 3]),\n location=np.zeros([0, 3]),\n rotation_y=np.array([]),\n score=np.array([]),\n ))\n else:\n anno = {k: np.stack(v) for k, v in anno.items()}\n annos.append(anno)\n\n annos[-1]['sample_idx'] = np.array(\n [sample_idx] * num_example, dtype=np.int64)\n det_annos += annos\n\n if pklfile_prefix is not None:\n # save file in pkl format\n pklfile_path = (\n pklfile_prefix[:-4] if pklfile_prefix.endswith(\n ('.pkl', '.pickle')) else pklfile_prefix)\n mmcv.dump(det_annos, pklfile_path)\n\n if submission_prefix is not None:\n # save file in submission format\n mmcv.mkdir_or_exist(submission_prefix)\n print(f'Saving KITTI submission to {submission_prefix}')\n for i, anno in enumerate(det_annos):\n sample_idx = self.data_infos[i]['image']['image_idx']\n cur_det_file = f'{submission_prefix}/{sample_idx:06d}.txt'\n with open(cur_det_file, 'w') as f:\n bbox = anno['bbox']\n loc = anno['location']\n dims = anno['dimensions'][::-1] # lhw -> hwl\n for idx in range(len(bbox)):\n print(\n '{} -1 -1 {:4f} {:4f} {:4f} {:4f} {:4f} {:4f} '\n '{:4f} {:4f} {:4f} {:4f} {:4f} {:4f} {:4f}'.format(\n anno['name'][idx],\n anno['alpha'][idx],\n *bbox[idx], # 4 float\n *dims[idx], # 3 float\n *loc[idx], # 3 float\n anno['rotation_y'][idx],\n anno['score'][idx]),\n file=f,\n )\n print('Result is saved to {}'.format(submission_prefix))\n\n return det_annos\n\n def convert_valid_bboxes(self, box_dict, info):\n \"\"\"Convert the predicted boxes into valid ones.\n\n Args:\n box_dict (dict): Box dictionaries to be converted.\n\n - boxes_3d (:obj:`LiDARInstance3DBoxes`): 3D bounding boxes.\n - scores_3d (torch.Tensor): Scores of boxes.\n - labels_3d (torch.Tensor): Class labels of boxes.\n info (dict): Data info.\n\n Returns:\n dict: Valid predicted boxes.\n\n - bbox (np.ndarray): 2D bounding boxes.\n - box3d_camera (np.ndarray): 3D bounding boxes in \\\n camera coordinate.\n - box3d_lidar (np.ndarray): 3D bounding boxes in \\\n LiDAR coordinate.\n - scores (np.ndarray): Scores of boxes.\n - label_preds (np.ndarray): Class label predictions.\n - sample_idx (int): Sample index.\n \"\"\"\n # TODO: refactor this function\n box_preds = box_dict['boxes_3d']\n scores = box_dict['scores_3d']\n labels = box_dict['labels_3d']\n sample_idx = info['image']['image_idx']\n # TODO: remove the hack of yaw\n box_preds.tensor[:, -1] = box_preds.tensor[:, -1] - np.pi\n box_preds.limit_yaw(offset=0.5, period=np.pi * 2)\n\n if len(box_preds) == 0:\n return dict(\n bbox=np.zeros([0, 4]),\n box3d_camera=np.zeros([0, 7]),\n box3d_lidar=np.zeros([0, 7]),\n scores=np.zeros([0]),\n label_preds=np.zeros([0, 4]),\n sample_idx=sample_idx)\n\n rect = info['calib']['R0_rect'].astype(np.float32)\n Trv2c = info['calib']['Tr_velo_to_cam'].astype(np.float32)\n P2 = info['calib']['P2'].astype(np.float32)\n img_shape = info['image']['image_shape']\n P2 = box_preds.tensor.new_tensor(P2)\n\n box_preds_camera = box_preds.convert_to(Box3DMode.CAM, rect @ Trv2c)\n\n box_corners = box_preds_camera.corners\n box_corners_in_image = points_cam2img(box_corners, P2)\n # box_corners_in_image: [N, 8, 2]\n minxy = torch.min(box_corners_in_image, dim=1)[0]\n maxxy = torch.max(box_corners_in_image, dim=1)[0]\n box_2d_preds = torch.cat([minxy, maxxy], dim=1)\n # Post-processing\n # check box_preds_camera\n image_shape = box_preds.tensor.new_tensor(img_shape)\n valid_cam_inds = ((box_2d_preds[:, 0] < image_shape[1]) &\n (box_2d_preds[:, 1] < image_shape[0]) &\n (box_2d_preds[:, 2] > 0) & (box_2d_preds[:, 3] > 0))\n # check box_preds\n limit_range = box_preds.tensor.new_tensor(self.pcd_limit_range)\n valid_pcd_inds = ((box_preds.center > limit_range[:3]) &\n (box_preds.center < limit_range[3:]))\n valid_inds = valid_cam_inds & valid_pcd_inds.all(-1)\n\n if valid_inds.sum() > 0:\n return dict(\n bbox=box_2d_preds[valid_inds, :].numpy(),\n box3d_camera=box_preds_camera[valid_inds].tensor.numpy(),\n box3d_lidar=box_preds[valid_inds].tensor.numpy(),\n scores=scores[valid_inds].numpy(),\n label_preds=labels[valid_inds].numpy(),\n sample_idx=sample_idx,\n )\n else:\n return dict(\n bbox=np.zeros([0, 4]),\n box3d_camera=np.zeros([0, 7]),\n box3d_lidar=np.zeros([0, 7]),\n scores=np.zeros([0]),\n label_preds=np.zeros([0, 4]),\n sample_idx=sample_idx,\n )\n\n def show(self, results, out_dir, show=True):\n \"\"\"Results visualization.\n\n Args:\n results (list[dict]): List of bounding boxes results.\n out_dir (str): Output directory of visualization result.\n show (bool): Visualize the results online.\n \"\"\"\n assert out_dir is not None, 'Expect out_dir, got none.'\n for i, result in enumerate(results):\n example = self.prepare_test_data(i)\n data_info = self.data_infos[i]\n pts_path = data_info['point_cloud']['velodyne_path']\n file_name = osp.split(pts_path)[-1].split('.')[0]\n # for now we convert points into depth mode\n points = example['points'][0]._data.numpy()\n points = Coord3DMode.convert_point(points, Coord3DMode.LIDAR,\n Coord3DMode.DEPTH)\n gt_bboxes = self.get_ann_info(i)['gt_bboxes_3d'].tensor\n gt_bboxes = Box3DMode.convert(gt_bboxes, Box3DMode.LIDAR,\n Box3DMode.DEPTH)\n pred_bboxes = result['boxes_3d'].tensor.numpy()\n pred_bboxes = Box3DMode.convert(pred_bboxes, Box3DMode.LIDAR,\n Box3DMode.DEPTH)\n show_result(points, gt_bboxes, pred_bboxes, out_dir, file_name,\n show)\n" ]
[ [ "torch.nn.Sequential", "torch.nn.functional.softmax", "torch.Size", "torch.max", "torch.cat", "torch.nn.ModuleList", "torch.min", "torch.sum", "torch.zeros_like", "torch.nonzero", "torch.nn.Conv1d", "torch.stack", "torch.argmax" ], [ "numpy.minimum", "torch.max", "numpy.maximum", "torch.cat", "numpy.linalg.inv", "torch.min", "numpy.stack", "numpy.ones", "numpy.concatenate", "numpy.arctan2", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ferdinandwp/MyPython
[ "46a5b41b1914b3bfab2d18e72cc866acb3842119", "46a5b41b1914b3bfab2d18e72cc866acb3842119" ]
[ "practice_array_manipulation/pd_create_df.py", "practice_charts/8-pair_plot.py" ]
[ "import pandas as pd\n\ndef pretty_print(name, to_print):\n print(f'{name}:')\n print(f'{to_print}\\n\\n')\n\n# Creating Dataframe from Lists\norders = pd.DataFrame(data=[['XB4Z34', 11, 25.50],\n ['SZA1123', 34, 60],\n ['P4FF2S', 2, 123.40],\n ['PL34DS', 10, 1254.23],\n ['PL34DS', 4, 12.4]],\n columns=['Product', 'Qty', 'Price'])\n\n#some basic info - how to get basic information of dataframe\n#change column name if required\norders.columns = ['Product','Qty','Price']\npretty_print('orders',orders)\n\n#convert to string\npretty_print('orders', orders.to_string())\n\n#find total rows of dataframes\npretty_print(\"length of orders\", len(orders))\n\n#get first 3 rows\npretty_print(\"first_three\", orders.head(3))\n\n#columns\npretty_print(\"orders_columm\",orders.columns)\n\n#index\npretty_print(\"orders_index\",orders.index)\n\n#datatype\npretty_print(\"orders_dtype\", orders.dtypes)\n\n#shape\npretty_print(\"orders_shape\", orders.shape)\n\n#summarize info\npretty_print(\"orders_summary\", orders.info())\n\n#describe dataframe\npretty_print(\"orders_desc\", orders.describe())\n\n\n#extract values from dataframes\n#sort by criteria\npretty_print('orders',orders)\npretty_print('by_price', orders.sort_values(by='Qty'))\npretty_print('by_price', orders.sort_values(by='Price'))\n\n#count rows by criteria (grouping data)\npretty_print('by_Product', orders['Product'].value_counts())\npretty_print('by_Qty', orders['Qty'].value_counts())\npretty_print('by_Price', orders['Price'].value_counts())\n\n#find null values\npretty_print('orders_null', orders.isnull())\n\n#unique value count for each criteria\npretty_print('orders_unique', orders.nunique())\npretty_print('Orders_unique_Qty', orders['Qty'].unique())\n", "import pandas as pd\nimport matplotlib.pyplot as plt\nimport os\nimport seaborn as sns\n\n#Iris Pairplot\ndf = pd.read_csv('data/iris.data',sep=',',header=None)\ndf.columns = ['sep_len', 'sep_wid', 'pet_len', 'pet_wid','class']\n\nsns.pairplot(df, hue='class', diag_kind='hist')\n# plt.savefig('plots/15-seaborn_pairplot/iris_pairplot.png')\nplt.show()\nplt.close()\n\n" ]
[ [ "pandas.DataFrame" ], [ "matplotlib.pyplot.close", "pandas.read_csv", "matplotlib.pyplot.show" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
armavox/rls-med
[ "d653c1277b4d8be0585a7128faf8f71f9f13ec60" ]
[ "src/models/auxiliary.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.nn.init as init\n\n\nclass ConvGRUCell(nn.Module):\n \"\"\"\n Generate a convolutional GRU cell\n \"\"\"\n\n def __init__(self, input_size, hidden_size, kernel_size):\n super().__init__()\n padding = kernel_size // 2\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.reset_gate = nn.Conv2d(input_size + hidden_size, hidden_size, kernel_size, padding=padding)\n self.update_gate = nn.Conv2d(\n input_size + hidden_size, hidden_size, kernel_size, padding=padding\n )\n self.out_gate = nn.Conv2d(input_size + hidden_size, hidden_size, kernel_size, padding=padding)\n\n init.orthogonal(self.reset_gate.weight)\n init.orthogonal(self.update_gate.weight)\n init.orthogonal(self.out_gate.weight)\n init.constant(self.reset_gate.bias, 0.0)\n init.constant(self.update_gate.bias, 0.0)\n init.constant(self.out_gate.bias, 0.0)\n\n def forward(self, input_, prev_state):\n\n # get batch and spatial sizes\n batch_size = input_.data.size()[0]\n spatial_size = input_.data.size()[2:]\n\n # generate empty prev_state, if None is provided\n if prev_state is None:\n state_size = [batch_size, self.hidden_size] + list(spatial_size)\n prev_state = torch.zeros(state_size).cuda()\n\n # data size is [batch, channel, height, width]\n stacked_inputs = torch.cat([input_, prev_state], dim=1)\n update = F.sigmoid(self.update_gate(stacked_inputs))\n reset = F.sigmoid(self.reset_gate(stacked_inputs))\n out_inputs = F.tanh(self.out_gate(torch.cat([input_, prev_state * reset], dim=1)))\n new_state = prev_state * (1 - update) + out_inputs * update\n\n return new_state\n\n\ndef gru_rls_cell(input, hidden, uzr, wzr, bz, uo, wo, bo):\n\n gates = F.linear(input, uzr) + F.linear(hidden, wzr, bz)\n resetgate, updategate = gates.chunk(2, 1)\n resetgate = torch.sigmoid(resetgate)\n updategate = torch.sigmoid(updategate)\n\n outgate = F.linear(input, uo) + F.linear(hidden * resetgate, wo, bo)\n outgate = torch.tanh(outgate)\n\n return updategate * hidden + (1 - updategate) * outgate\n\n\ndef lstm_cell(input, hidden, w_ih, w_hh, b_ih=None, b_hh=None):\n\n hx, cx = hidden # w_ih: (256, 4), b_ih: (256); w_hh: (256, 64), b_hh: (256)\n gates = F.linear(input, w_ih, b_ih) + F.linear(hx, w_hh, b_hh)\n\n ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1)\n\n ingate = torch.sigmoid(ingate)\n forgetgate = torch.sigmoid(forgetgate)\n cellgate = torch.tanh(cellgate)\n outgate = torch.sigmoid(outgate)\n\n cy = (forgetgate * cx) + (ingate * cellgate)\n hy = outgate * torch.tanh(cy)\n\n return hy, cy\n" ]
[ [ "torch.sigmoid", "torch.cat", "torch.zeros", "torch.nn.Conv2d", "torch.tanh", "torch.nn.init.orthogonal", "torch.nn.init.constant", "torch.nn.functional.linear" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Takahiro-Funahashi/o-reilly-deep-learning-clone
[ "043bfe69a4bec7c6063fd8b71d337f99f13ad26e" ]
[ "ch05/gradient_check.py" ]
[ "# coding: utf-8\nimport numpy as np\nimport sys\n\nsys.path.append('./ch05/')\nsys.path.append('./dataset/')\n\nif __name__ == '__main__':\n from two_layer_net import TwoLayerNet\n from mnist import load_mnist\n\n # データの読み込み\n (x_train, t_train), (x_test, t_test) = load_mnist(\n normalize=True, one_hot_label=True)\n\n network = TwoLayerNet(input_size=784, hidden_size=50, output_size=10)\n\n x_batch = x_train[:3]\n t_batch = t_train[:3]\n\n grad_numerical = network.numerical_gradient(x_batch, t_batch)\n grad_backprop = network.gradient(x_batch, t_batch)\n\n for key in grad_numerical.keys():\n diff = np.average(np.abs(grad_backprop[key] - grad_numerical[key]))\n print(key + \":\" + str(diff))\n" ]
[ [ "numpy.abs" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mrtzh/folktables
[ "5dd61a1b650adbf5afdb9a2cdae76903e1017c66" ]
[ "folktables/load_acs.py" ]
[ "\"\"\"Load ACS PUMS data from Census CSV files.\"\"\"\nimport os\nimport random\nimport io\nimport requests\nimport zipfile\n\nimport numpy as np\nimport pandas as pd\n\n\nstate_list = ['AL', 'AK', 'AZ', 'AR', 'CA', 'CO', 'CT', 'DE', 'FL', 'GA', 'HI',\n 'ID', 'IL', 'IN', 'IA', 'KS', 'KY', 'LA', 'ME', 'MD', 'MA', 'MI',\n 'MN', 'MS', 'MO', 'MT', 'NE', 'NV', 'NH', 'NJ', 'NM', 'NY', 'NC',\n 'ND', 'OH', 'OK', 'OR', 'PA', 'RI', 'SC', 'SD', 'TN', 'TX', 'UT',\n 'VT', 'VA', 'WA', 'WV', 'WI', 'WY', 'PR']\n\n\n_STATE_CODES = {'AL': '01', 'AK': '02', 'AZ': '04', 'AR': '05', 'CA': '06',\n 'CO': '08', 'CT': '09', 'DE': '10', 'FL': '12', 'GA': '13',\n 'HI': '15', 'ID': '16', 'IL': '17', 'IN': '18', 'IA': '19',\n 'KS': '20', 'KY': '21', 'LA': '22', 'ME': '23', 'MD': '24',\n 'MA': '25', 'MI': '26', 'MN': '27', 'MS': '28', 'MO': '29',\n 'MT': '30', 'NE': '31', 'NV': '32', 'NH': '33', 'NJ': '34',\n 'NM': '35', 'NY': '36', 'NC': '37', 'ND': '38', 'OH': '39',\n 'OK': '40', 'OR': '41', 'PA': '42', 'RI': '44', 'SC': '45',\n 'SD': '46', 'TN': '47', 'TX': '48', 'UT': '49', 'VT': '50',\n 'VA': '51', 'WA': '53', 'WV': '54', 'WI': '55', 'WY': '56',\n 'PR': '72'}\n\n\ndef download_and_extract(url, datadir, remote_fname, file_name, delete_download=False):\n \"\"\"Helper function to download and unzip files.\"\"\"\n download_path = os.path.join(datadir, remote_fname)\n response = requests.get(url)\n with open(download_path, 'wb') as handle:\n handle.write(response.content)\n \n with zipfile.ZipFile(download_path, 'r') as zip_ref:\n zip_ref.extract(file_name, path=datadir)\n \n if delete_download and download_path != os.path.join(datadir, file_name):\n os.remove(download_path)\n\n\ndef initialize_and_download(datadir, state, year, horizon, survey, download=False):\n \"\"\"Download the dataset (if required).\"\"\"\n assert horizon in ['1-Year', '5-Year']\n assert int(year) >= 2014\n assert state in state_list\n assert survey in ['person', 'household']\n\n state_code = _STATE_CODES[state]\n survey_code = 'p' if survey == 'person' else 'h'\n if int(year) >= 2017:\n file_name = f'psam_{survey_code}{state_code}.csv'\n else:\n # 2016 and earlier use different file names\n file_name = f'ss{str(year)[-2:]}{survey_code}{state.lower()}.csv'\n \n # Assume is the path exists and is a file, then it has been downloaded\n # correctly\n file_path = os.path.join(datadir, file_name)\n if os.path.isfile(file_path):\n return file_path\n if not download:\n raise FileNotFoundError(f'Could not find {year} {horizon} {survey} survey data for {state} in {datadir}. Call get_data with download=True to download the dataset.')\n \n print(f'Downloading data for {year} {horizon} {survey} survey for {state}...')\n # Download and extract file\n base_url= f'https://www2.census.gov/programs-surveys/acs/data/pums/{year}/{horizon}'\n remote_fname = f'csv_{survey_code}{state.lower()}.zip'\n url = os.path.join(base_url, remote_fname)\n try:\n download_and_extract(url, datadir, remote_fname, file_name, delete_download=True)\n except Exception as e:\n print(f'\\n{os.path.join(datadir, remote_fname)} may be corrupted. Please try deleting it and rerunning this command.\\n')\n print(f'Exception: ', e)\n\n return file_path\n\n\ndef load_acs(root_dir, states=None, year=2018, horizon='1-Year',\n survey='person', density=1, random_seed=1,\n serial_filter_list=None,\n download=False):\n \"\"\"\n Load sample of ACS PUMS data from Census csv files into DataFrame.\n\n If a serial filter list is passed in, density and random_seed are ignored\n and the output is instead filtered with the provided list (only entries with\n a serial number in the list are kept).\n \"\"\"\n if int(year) < 2014:\n raise ValueError('Year must be >= 2014')\n\n if serial_filter_list is not None:\n serial_filter_list = set(serial_filter_list) # set for faster membership check\n\n if states is None:\n states = state_list\n \n random.seed(random_seed)\n \n base_datadir = os.path.join(root_dir, str(year), horizon)\n os.makedirs(base_datadir, exist_ok=True)\n \n file_names = []\n for state in states:\n file_names.append(\n initialize_and_download(base_datadir, state, year, horizon, survey, download=download)\n )\n\n sample = io.StringIO()\n\n first = True\n \n for file_name in file_names:\n \n with open(file_name, 'r') as f:\n \n if first:\n sample.write(next(f))\n first = False\n else:\n next(f)\n\n if serial_filter_list is None:\n for line in f:\n if random.uniform(0, 1) < density:\n # strip whitespace found in some early files\n sample.write(line.replace(' ',''))\n else:\n for line in f:\n serialno = line.split(',')[1]\n if serialno in serial_filter_list:\n # strip whitespace found in some early files\n sample.write(line.replace(' ',''))\n\n \n sample.seek(0)\n \n dtypes = {'PINCP' : np.float64, 'RT' : str, 'SOCP' : str, 'SERIALNO' : str, 'NAICSP' : str}\n \n return pd.read_csv(sample, dtype=dtypes)\n" ]
[ [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
ibara1454/pyss
[ "69b47d89f88f04876bdabd504d202a1ced7bb5e4", "69b47d89f88f04876bdabd504d202a1ced7bb5e4" ]
[ "pyss/util/coefficient.py", "mpi_test/test_pyss.py" ]
[ "#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport numpy.linalg\n\n\ndef newton_cotes_coeff(h, n, tr=None):\n \"\"\"Return array of weights of Newton-Cotes method.\n\n Parameters\n ----------\n h : float\n Length of each two nearest sampling points.\n n : int\n Degree of interpolate polynomial using in Newton-Cotes method.\n tr : int, optional\n Degree of interpolate polynomial using in low rank approaximation.\n `tr` should lower or equal to `n`.\n\n Returns\n -------\n ws : ndarray\n Array of weights of Newton-Cotes method with (`n`) sampling points\n \"\"\"\n if tr is None:\n tr = n\n a_i = np.arange(n + 1) * h\n b_i = lambda i: (n * h) ** i / i\n A = np.vstack(a_i ** i for i in range(tr + 1))\n b = np.array([b_i(i + 1) for i in range(tr + 1)]).T\n # using least square solver instead of multiplying pseudo inverse\n x, res, rank, sigv = np.linalg.lstsq(A, b)\n return x\n\n\ndef composite_newton_cotes_coeff(h, n, cmpstn, tr=None, contour=False):\n \"\"\"Return array of weights of composite Newton-Cotes method.\n\n Parameters\n ----------\n h : float\n Length of each two nearest sampling points.\n n : int\n Number of sampling points of non-composite Newton-Cotes method in this\n composite method. Counted from 0.\n `n` should be a factor of `cmpstn`.\n cmpstn : int\n Number of all sampling points in this composite method. Counted from 0.\n `cmpstn` should be a multiple of `n`.\n\n Returns\n -------\n ws : ndarray\n Array of weights of Newton-Cotes method with `cmpstn` sampling points\n \"\"\"\n if contour is False and (cmpstn - 1) % n != 0:\n raise ValueError\n if contour is True and cmpstn % n != 0:\n raise ValueError\n ws = np.zeros(cmpstn).T\n basews = newton_cotes_coeff(h, n, tr)\n loops = int((cmpstn if contour else cmpstn - 1) / n)\n\n begin = 0\n for l in range(loops):\n ws = __add_base_coeff_to_composite_coeff(ws, basews, begin)\n begin = begin + n\n return ws\n\n\ndef __add_base_coeff_to_composite_coeff(cmpstws, basews, begin):\n n = cmpstws.size\n for i in range(basews.size):\n cmpstws[(begin + i) % n] = cmpstws[(begin + i) % n] + basews[i]\n return cmpstws\n\n\ndef stable_newton_cotes_coeff(h, n, tr):\n \"\"\"Return array of weights of Stable Newton-Cotes method.\n\n Parameters\n ----------\n h : float\n Length of each two nearest sampling points.\n n : int\n Number of sampling points. Counted from 0.\n tr : int\n Degree of interpolate polynomial using in Newton-Cotes method.\n `tr` should lower or equal to `n`.\n\n Returns\n -------\n ws : ndarray\n Array of weights of Newton-Cotes method with (`n`+1) sampling points\n \"\"\"\n a_1 = np.linspace(start=0, stop=n * h, num=n + 1, endpoint=True)\n b_i = lambda i: (n * h) ** i / i\n A = np.vstack(a_1 ** i for i in range(tr + 1))\n b = np.array([b_i(i + 1) for i in range(tr + 1)]).T\n return np.linalg.pinv(A).dot(b)\n\n\ndef composite_stable_newton_cotes_coeff(h, n, tr, cmpstn, contour=False):\n \"\"\"Return array of weights of composite stable Newton-Cotes method.\n\n Parameters\n ----------\n h : float\n Length of each two nearest sampling points.\n n : int\n Number of sampling points of non-composite Newton-Cotes method in this\n composite method. Counted from 0.\n `n` should be a factor of `cmpstn`.\n tr : int\n Degree of interpolate polynomial using in Newton-Cotes method.\n `tr` should lower or equal to `n`.\n cmpstn : int\n Number of all sampling points in this composite method. Counted from 0.\n `cmpstn` should be a multiple of `n`.\n\n Returns\n -------\n ws : ndarray\n Array of weights of Newton-Cotes method with (`cmpstn`+1) sampling\n points.\n \"\"\"\n xs = np.zeros(cmpstn + 1).T\n basexs = stable_newton_cotes_coeff(h, n, tr) # vector of length n+1\n for i in range(0, cmpstn, n):\n xs[i:i + n + 1] = xs[i:i + n + 1] + basexs\n return xs\n\n", "import scipy.io\nimport numpy\nfrom pyss.util.contour import Circle, Ellipse\n\nA = scipy.io.mmread(\"matrix/cage4.mtx\")\nB = scipy.sparse.eye(9)\n\ncontour = Ellipse(real=200, imag=0.3, shift=900)\n\n\ndef create_source(x, y):\n return numpy.eye(x, y)\n\noption = {'l': 2, 'm': 1, 'n': 12, 'source': create_source}\n" ]
[ [ "numpy.linspace", "numpy.arange", "numpy.linalg.lstsq", "numpy.linalg.pinv", "numpy.zeros" ], [ "numpy.eye" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
HeegonJin/ssd_face_mask_detection
[ "0e5c91f7d286cf1e03f7d718958d7ff957b0062d" ]
[ "train.py" ]
[ "from data import *\nfrom utils.augmentations import SSDAugmentation\nfrom layers.modules import MultiBoxLoss\nfrom ssd import build_ssd\nimport os\nimport os.path as osp\nimport torch\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.backends.cudnn as cudnn\nimport torch.utils.data as data\n\nimport argparse\n\nfrom tensorboardX import SummaryWriter\n\n\ndef str2bool(v):\n return v.lower() in (\"yes\", \"true\", \"t\", \"1\")\n\n\nparser = argparse.ArgumentParser(\n description='Single Shot MultiBox Detector Training With Pytorch')\ntrain_set = parser.add_mutually_exclusive_group()\nparser.add_argument('--dataset', default='VOC', choices=['VOC', 'COCO'],\n type=str, help='VOC or COCO')\nparser.add_argument('--dataset_root', default=VOC_ROOT,\n help='Dataset root directory path')\nparser.add_argument('--basenet', default='vgg16_reducedfc.pth',\n help='Pretrained base model')\nparser.add_argument('--batch_size', default=32, type=int,\n help='Batch size for training')\nparser.add_argument('--resume', default=osp.join(osp.dirname(osp.abspath(__file__)), 'weights', 'VOC_weighted.pth'), type=str,\n help='Checkpoint state_dict file to resume training from')\nparser.add_argument('--start_iter', default=0, type=int,\n help='Resume training at this iter')\nparser.add_argument('--num_workers', default=4, type=int,\n help='Number of workers used in dataloading')\nparser.add_argument('--cuda', default=True, type=str2bool,\n help='Use CUDA to train model')\nparser.add_argument('--lr', '--learning-rate', default=1e-5, type=float,\n help='initial learning rate')\nparser.add_argument('--momentum', default=0.9, type=float,\n help='Momentum value for optim')\nparser.add_argument('--weight_decay', default=5e-4, type=float,\n help='Weight decay for SGD')\nparser.add_argument('--gamma', default=0.1, type=float,\n help='Gamma update for SGD')\nparser.add_argument('--visdom', default=False, type=str2bool,\n help='Use visdom for loss visualization')\nparser.add_argument('--save_folder', default='weights/',\n help='Directory for saving checkpoint models')\nargs = parser.parse_args()\n\nif torch.cuda.is_available():\n\n if args.cuda:\n torch.set_default_tensor_type('torch.cuda.FloatTensor')\n if not args.cuda:\n print(\"WARNING: It looks like you have a CUDA device, but aren't \" +\n \"using CUDA.\\nRun with --cuda for optimal training speed.\")\n torch.set_default_tensor_type('torch.FloatTensor')\nelse:\n torch.set_default_tensor_type('torch.FloatTensor')\n\nif not os.path.exists(args.save_folder):\n os.mkdir(args.save_folder)\n\n\ndef train():\n writer = SummaryWriter()\n\n cfg = voc\n dataset = VOCDetection(root=args.dataset_root,\n transform=SSDAugmentation(cfg['min_dim'],\n MEANS))\n\n ssd_net = build_ssd('train', cfg['min_dim'], cfg['num_classes'])\n net = ssd_net\n\n if args.visdom:\n import visdom\n viz = visdom.Visdom()\n\n if args.cuda:\n net = torch.nn.DataParallel(ssd_net)\n cudnn.benchmark = True\n\n if args.resume:\n print('Resuming training, loading {}...'.format(args.resume))\n ssd_net.load_weights(args.resume)\n else:\n vgg_weights = torch.load(args.save_folder + args.basenet)\n print('Loading base network...')\n ssd_net.vgg.load_state_dict(vgg_weights)\n\n if args.cuda:\n net = net.cuda()\n\n if not args.resume:\n print('Initializing weights...')\n # initialize newly added layers' weights with xavier method\n ssd_net.extras.apply(weights_init)\n ssd_net.loc.apply(weights_init)\n ssd_net.conf.apply(weights_init)\n\n optimizer = optim.AdamW(net.parameters(), lr=args.lr)\n # optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=args.momentum,\n # weight_decay=args.weight_decay)\n criterion = MultiBoxLoss(cfg['num_classes'], 0.5, True, 0, True, 3, 0.5,\n False, args.cuda)\n\n net.train()\n # loss counters\n loc_loss = 0\n conf_loss = 0\n epoch = 0\n print('Loading the dataset...')\n\n epoch_size = len(dataset) // args.batch_size\n print('Training SSD on:', dataset.name)\n print('Using the specified args:')\n print(args)\n\n step_index = 0\n\n data_loader = torch.utils.data.DataLoader(dataset, args.batch_size,\n num_workers=args.num_workers,\n shuffle=True, collate_fn=detection_collate,\n pin_memory=True)\n # create batch iterator\n for epoch in range(10000):\n\n for iteration, data_ in enumerate(data_loader):\n\n # if iteration in cfg['lr_steps']:\n # step_index += 1\n # adjust_learning_rate(optimizer, args.gamma, step_index)\n\n # load train data\n images, targets = data_\n\n if args.cuda:\n images = images.cuda()\n targets = [ann.cuda().detach() for ann in targets]\n else:\n images = Variable(images)\n targets = [ann.detach() for ann in targets]\n # forward\n out = net(images)\n # backprop\n optimizer.zero_grad()\n loss_l, loss_c = criterion(out, targets)\n loss = loss_l + loss_c\n loss.backward()\n optimizer.step()\n\n loc_loss += loss_l.item()\n conf_loss += loss_c.item()\n\n # torch.save(ssd_net.state_dict(),\n # args.save_folder + '' + args.dataset + '.pth')\n torch.save(ssd_net.state_dict(),\n args.save_folder + '' + 'VOC_weighted' + '.pth')\n writer.add_scalar(\"epoch location loss\", conf_loss, epoch)\n writer.add_scalar(\"epoch classification loss\", loc_loss, epoch)\n writer.add_scalar(\"epoch total loss\", loss.item(), epoch)\n writer.flush()\n loc_loss = 0\n conf_loss = 0\n writer.close()\n\ndef adjust_learning_rate(optimizer, gamma, step):\n \"\"\"Sets the learning rate to the initial LR decayed by 10 at every\n specified step\n # Adapted from PyTorch Imagenet example:\n # https://github.com/pytorch/examples/blob/master/imagenet/main.py\n \"\"\"\n lr = args.lr * (gamma ** (step))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n\ndef xavier(param):\n nn.init.xavier_uniform_(param)\n\n\ndef weights_init(m):\n if isinstance(m, nn.Conv2d):\n xavier(m.weight.data)\n m.bias.data.zero_()\n\n\ndef create_vis_plot(_xlabel, _ylabel, _title, _legend):\n return viz.line(\n X=torch.zeros((1,)).cpu(),\n Y=torch.zeros((1, 3)).cpu(),\n opts=dict(\n xlabel=_xlabel,\n ylabel=_ylabel,\n title=_title,\n legend=_legend\n )\n )\n\n\ndef update_vis_plot(iteration, loc, conf, window1, window2, update_type,\n epoch_size=1):\n viz.line(\n X=torch.ones((1, 3)).cpu() * iteration,\n Y=torch.Tensor([loc, conf, loc + conf]).unsqueeze(0).cpu() / epoch_size,\n win=window1,\n update=update_type\n )\n # initialize epoch plot on first iteration\n if iteration == 0:\n viz.line(\n X=torch.zeros((1, 3)).cpu(),\n Y=torch.Tensor([loc, conf, loc + conf]).unsqueeze(0).cpu(),\n win=window2,\n update=True\n )\n\n\nif __name__ == '__main__':\n train()\n" ]
[ [ "torch.set_default_tensor_type", "torch.ones", "torch.Tensor", "torch.load", "torch.zeros", "torch.utils.data.DataLoader", "torch.cuda.is_available", "torch.nn.init.xavier_uniform_", "torch.nn.DataParallel", "torch.autograd.Variable" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
piotrjurkiewicz/flow_stats
[ "cc97a8381275cb9dd23ed0c3432abffaf4198431" ]
[ "flow_models/lib/kde.py" ]
[ "# Taken from SciPy\n# scipy/stats/kde.py at 79ed161bf603dc3af3986efe7064df79212c4dd4\n\n# Weighted KDE computation based on:\n# https://stackoverflow.com/a/27623920/2178047\n# https://gist.github.com/tillahoffmann/f844bce2ec264c1c8cb5\n\n# Weighted KDE computation using FFT based on:\n# https://github.com/scipy/scipy/issues/6176\n# https://github.com/michaelhb/superplot/blob/master/superplot/statslib/kde.py\n\nfrom __future__ import division, print_function, absolute_import\n\n# Standard library imports.\nimport warnings\n\n# Scipy imports.\nfrom scipy import linalg, special\nfrom scipy.special import logsumexp\nfrom scipy.stats import norm, multivariate_normal\nfrom scipy.signal import fftconvolve\nfrom scipy.interpolate import interp1d, RegularGridInterpolator\n\nfrom numpy import atleast_2d, reshape, zeros, newaxis, dot, exp, pi, sqrt, \\\n ravel, power, atleast_1d, squeeze, sum, transpose\nimport numpy as np\n\n\n__all__ = ['gaussian_kde']\n\n\nclass gaussian_kde(object):\n def __init__(self, dataset, bw_method=None, weights=None, fft=True, extend=True, interp_method='nearest'):\n self.dataset = atleast_2d(dataset)\n if not self.dataset.size > 1:\n raise ValueError(\"`dataset` input should have multiple elements.\")\n\n self.d, self.n = self.dataset.shape\n\n if weights is not None:\n self.weights = weights / np.sum(weights)\n else:\n self.weights = np.ones(self.n) / self.n\n\n # Compute the effective sample size\n # http://surveyanalysis.org/wiki/Design_Effects_and_Effective_Sample_Size#Kish.27s_approximate_formula_for_computing_effective_sample_size\n self.neff = 1.0 / np.sum(self.weights ** 2)\n self.fft = fft\n self.extend = extend\n self.interp_method = interp_method\n\n self.set_bandwidth(bw_method=bw_method)\n\n def evaluate(self, points):\n if self.fft:\n return self.evaluate_fft(points)\n else:\n return self.evaluate_not_fft(points)\n\n def evaluate_not_fft(self, points):\n points = atleast_2d(points)\n\n d, m = points.shape\n if d != self.d:\n if d == 1 and m == self.d:\n # points was passed in as a row vector\n points = reshape(points, (self.d, 1))\n m = 1\n else:\n msg = \"points have dimension %s, dataset has dimension %s\" % (d,\n self.d)\n raise ValueError(msg)\n\n result = zeros((m,), dtype=float)\n\n whitening = linalg.cholesky(self.inv_cov)\n scaled_dataset = dot(whitening, self.dataset)\n scaled_points = dot(whitening, points)\n\n if m >= self.n:\n # there are more points than data, so loop over data\n for i in range(self.n):\n diff = scaled_dataset[:, i, newaxis] - scaled_points\n energy = sum(diff * diff, axis=0) / 2.0\n result += exp(-energy) * self.weights[i]\n else:\n # loop over points\n for i in range(m):\n diff = scaled_dataset - scaled_points[:, i, newaxis]\n energy = sum(diff * diff, axis=0) / 2.0\n result[i] = sum(exp(-energy) * self.weights, axis=0)\n\n result = result / self._norm_factor\n\n return result\n\n def evaluate_fft(self, points):\n\n if self.d == 1:\n\n binned_pdf, bin_centers = self._bin_dataset(points)\n mean_bin = np.mean(bin_centers)\n\n def gauss_kernel(x):\n \"\"\" 1D Gaussian kernel. \"\"\"\n return norm.pdf(x, loc=mean_bin, scale=self.det_cov**0.5)\n\n gauss_bin_centers = gauss_kernel(bin_centers)\n\n pdf = fftconvolve(binned_pdf, gauss_bin_centers, mode='same')\n pdf = np.real(pdf)\n\n bin_width = bin_centers[1] - bin_centers[0]\n pdf /= pdf.sum() * bin_width\n\n kde = interp1d(bin_centers,\n pdf,\n bounds_error=False,\n fill_value=None)\n\n return np.array([kde(x) for x in points]) # max?\n\n elif self.d == 2:\n\n binned_pdf, (bin_centers_x, bin_centers_y) = self._bin_dataset(points)\n mean_bin = [np.mean(bin_centers_x), np.mean(bin_centers_y)]\n\n def gauss_kernel(x):\n \"\"\" 2D Gaussian kernel. \"\"\"\n return multivariate_normal.pdf(x, mean=mean_bin, cov=self.covariance)\n\n grid_x, grid_y = np.meshgrid(bin_centers_x, bin_centers_y)\n grid = np.column_stack([grid_x.flatten(), grid_y.flatten()])\n\n gauss_bin_centers = gauss_kernel(grid)\n gauss_bin_centers = np.reshape(gauss_bin_centers, binned_pdf.shape, order='F')\n\n pdf = fftconvolve(binned_pdf, gauss_bin_centers, mode='same')\n pdf = np.real(pdf)\n\n bin_width_x = bin_centers_x[1] - bin_centers_x[0]\n bin_width_y = bin_centers_y[1] - bin_centers_y[0]\n bin_vol = bin_width_x * bin_width_y\n pdf /= pdf.sum() * bin_vol\n\n kde = RegularGridInterpolator((bin_centers_x, bin_centers_y),\n pdf,\n method=self.interp_method,\n bounds_error=False,\n fill_value=None)\n\n return kde(points.T) # max?\n\n else:\n raise ValueError(\"FFT only implemented in 1 or 2 dimesions\")\n\n __call__ = evaluate\n\n def integrate_gaussian(self, mean, cov):\n raise NotImplementedError\n\n def integrate_box_1d(self, low, high):\n raise NotImplementedError\n\n def integrate_box(self, low_bounds, high_bounds, maxpts=None):\n raise NotImplementedError\n\n def integrate_kde(self, other):\n raise NotImplementedError\n\n def resample(self, size=None):\n raise NotImplementedError\n\n def scotts_factor(self):\n return power(self.neff, -1./(self.d+4))\n\n def silverman_factor(self):\n return power(self.neff*(self.d+2.0)/4.0, -1./(self.d+4))\n\n # Default method to calculate bandwidth, can be overwritten by subclass\n covariance_factor = scotts_factor\n\n def set_bandwidth(self, bw_method=None):\n if bw_method is None:\n pass\n elif bw_method == 'scott':\n self.covariance_factor = self.scotts_factor\n elif bw_method == 'silverman':\n self.covariance_factor = self.silverman_factor\n elif np.isscalar(bw_method) and not isinstance(bw_method, str):\n self._bw_method = 'use constant'\n self.covariance_factor = lambda: bw_method\n elif callable(bw_method):\n self._bw_method = bw_method\n self.covariance_factor = lambda: self._bw_method(self)\n else:\n msg = \"`bw_method` should be 'scott', 'silverman', a scalar \" \\\n \"or a callable.\"\n raise ValueError(msg)\n\n self._compute_covariance()\n\n def _compute_covariance(self):\n self.factor = self.covariance_factor()\n # Cache covariance and inverse covariance of the data\n if not hasattr(self, '_data_inv_cov'):\n # Compute the mean and residuals\n _mean = sum(self.weights * self.dataset, axis=1)\n _residual = (self.dataset - _mean[:, None])\n # Compute the biased covariance\n self._data_covariance = np.atleast_2d(np.dot(_residual * self.weights, _residual.T))\n # Correct for bias (http://en.wikipedia.org/wiki/Weighted_arithmetic_mean#Weighted_sample_covariance)\n self._data_covariance /= (1 - sum(self.weights ** 2))\n self._data_inv_cov = linalg.inv(self._data_covariance)\n\n # Scale by bandwidth\n self.covariance = self._data_covariance * self.factor**2\n self.inv_cov = self._data_inv_cov / self.factor**2\n self._norm_factor = sqrt(linalg.det(2 * pi * self.covariance))\n\n # Determinant of covariance matrix\n self.det_cov = np.linalg.det(self.covariance)\n\n def _bin_dataset(self, points):\n\n if self.d == 1:\n\n nbins = self.n\n xmin, xmax = np.min(points[0]), np.max(points[0])\n binned_pdf, bin_edges = np.histogram(self.dataset[0],\n bins=nbins,\n range=(xmin, xmax) if self.extend else None,\n normed=True,\n weights=self.weights)\n bin_centers = np.array((bin_edges[:-1] + bin_edges[1:]) * 0.5)\n\n elif self.d == 2:\n\n nbins = int(self.n**0.5)\n xmin, xmax = np.min(points[0]), np.max(points[0])\n ymin, ymax = np.min(points[1]), np.max(points[1])\n binned_pdf, bin_edges_x, bin_edges_y = np.histogram2d(self.dataset[0],\n self.dataset[1],\n bins=nbins,\n range=((xmin, xmax), (ymin, ymax)) if self.extend else None,\n normed=True,\n weights=self.weights)\n bin_centers_x = 0.5 * (bin_edges_x[:-1] + bin_edges_x[1:])\n bin_centers_y = 0.5 * (bin_edges_y[:-1] + bin_edges_y[1:])\n bin_centers = [np.array(bin_centers_x), np.array(bin_centers_y)]\n\n else:\n raise ValueError(\"Bining only implemented in 1 or 2 dimesions\")\n\n return binned_pdf, bin_centers\n\n def pdf(self, x):\n return self.evaluate(x)\n\n def logpdf(self, x):\n raise NotImplementedError\n" ]
[ [ "numpy.dot", "numpy.max", "numpy.mean", "numpy.exp", "numpy.histogram", "numpy.reshape", "scipy.interpolate.RegularGridInterpolator", "numpy.linalg.det", "numpy.real", "scipy.interpolate.interp1d", "scipy.linalg.inv", "numpy.zeros", "scipy.signal.fftconvolve", "numpy.power", "scipy.linalg.det", "numpy.min", "numpy.atleast_2d", "numpy.meshgrid", "numpy.array", "numpy.sum", "numpy.histogram2d", "scipy.stats.norm.pdf", "numpy.ones", "scipy.linalg.cholesky", "numpy.isscalar", "scipy.stats.multivariate_normal.pdf" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
AndreasZS/pychop3d
[ "5261e0b491d25f1824b0a541981dfd3887faf9e4" ]
[ "pychop3d/configuration.py" ]
[ "import numpy as np\nimport yaml\nimport os\n\n\nclass Configuration:\n \"\"\"\n This class will hold all of the configuration settings for a run. It will not hold any BSPTree data or mesh data.\n The settings will be saved and loaded from YAML files. Maybe later implement this as a singleton if necessary. For\n now the only object of this class will be stored in the class itself.\n \"\"\"\n\n config = None\n\n def __init__(self, config_path=None):\n self.restore_defaults()\n\n # open config YAML\n if config_path is not None:\n self.load(config_path)\n\n def load(self, path):\n self.directory = os.path.dirname(path)\n with open(path) as f:\n config_file = yaml.safe_load(f)\n for key, value in config_file.items():\n setattr(self, key, value)\n self.printer_extents = np.array(self.printer_extents, dtype=float)\n\n @property\n def n_theta(self):\n return self._n_theta\n\n @n_theta.setter\n def n_theta(self, value):\n self._n_theta = value\n self.normals = self.uniform_normals()\n\n @property\n def n_phi(self):\n return self._n_phi\n\n @n_phi.setter\n def n_phi(self, value):\n self._n_phi = value\n self.normals = self.uniform_normals()\n\n def restore_defaults(self):\n self.name = \"chopped\"\n self.do_not_save = ['normals']\n # printer parameters\n self.printer_extents = np.array([200, 200, 200], dtype=float)\n # plane selection parameters\n self.plane_spacing = 20\n self._n_theta = 5\n self._n_phi = 5\n self.normals = self.uniform_normals()\n self.add_middle_plane = True\n # plane uniqueness parameters\n self.different_origin_th = float(.1 * np.sqrt(np.sum(self.printer_extents ** 2)))\n self.different_angle_th = np.pi / 10\n # objective parameters\n self.objective_weights = {\n 'part': 1,\n 'utilization': .25,\n 'connector': 1,\n 'fragility': 1,\n 'seam': 0, # set to zero until implemented\n 'symmetry': 0 # set to zero until implemented\n }\n self.fragility_objective_th = .95\n self.connector_objective_th = 10\n self.obb_utilization = False\n # connector placement parameters\n self.connector_collision_penalty = 10 ** 10\n self.empty_cc_penalty = 10**-5\n self.sa_initial_connector_ratio = .1\n self.sa_initialization_iterations = 10_000\n self.sa_iterations = 300_000\n # connector settings\n self.connector_diameter_min = 5\n self.connector_diameter_max = 30\n self.connector_diameter = 5\n self.connector_tolerance = 1\n self.connector_spacing = 10\n # run settings\n self.mesh = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'test', 'test_meshes', 'Bunny-LowPoly.stl'))\n self._directory = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'debug'))\n self.save_path = os.path.abspath(os.path.join(self.directory, 'config.yml'))\n self.scale_factor = -1\n self.beam_width = 5\n self.part_separation = False\n\n @property\n def directory(self):\n return self._directory\n\n @directory.setter\n def directory(self, value):\n self._directory = value\n save_name = os.path.basename(self.save_path)\n self.save_path = os.path.abspath(os.path.join(self.directory, save_name))\n\n def uniform_normals(self):\n \"\"\"http://corysimon.github.io/articles/uniformdistn-on-sphere/\n \"\"\"\n theta = np.arange(0, np.pi, np.pi / self.n_theta)\n phi = np.arccos(np.arange(0, 1, 1 / self.n_phi))\n theta, phi = np.meshgrid(theta, phi)\n theta = theta.ravel()\n phi = phi.ravel()\n return np.stack((np.sin(phi) * np.cos(theta), np.sin(phi) * np.sin(theta), np.cos(phi)), axis=1)\n\n def save(self, filename=None):\n \"\"\"saves the config file\n\n from pyyaml docs https://pyyaml.org/wiki/PyYAMLDocumentation:\n safe_dump_all produces only standard YAML tags and cannot represent an arbitrary Python object.\n\n therefore the save data will convert any numpy array to list first\n \"\"\"\n if filename is not None:\n self.save_path = os.path.abspath(os.path.join(self.directory, filename))\n\n save_data = {}\n for key, value in self.__dict__.items():\n if key in self.do_not_save:\n continue\n if isinstance(value, np.ndarray):\n save_data[key] = [float(v) for v in value]\n else:\n save_data[key] = value\n\n with open(self.save_path, 'w') as f:\n yaml.dump(save_data, f)\n\n return self.save_path\n\n\nconfig = Configuration()\nConfiguration.config = config\n" ]
[ [ "numpy.meshgrid", "numpy.arange", "numpy.cos", "numpy.sin", "numpy.array", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ChrisBNEU/RMG-Py
[ "5fd68810cfb45a666dc223df0720e6dd8314139f" ]
[ "rmgpy/rmg/model.py" ]
[ "#!/usr/bin/env python3\n\n###############################################################################\n# #\n# RMG - Reaction Mechanism Generator #\n# #\n# Copyright (c) 2002-2021 Prof. William H. Green ([email protected]), #\n# Prof. Richard H. West ([email protected]) and the RMG Team ([email protected]) #\n# #\n# Permission is hereby granted, free of charge, to any person obtaining a #\n# copy of this software and associated documentation files (the 'Software'), #\n# to deal in the Software without restriction, including without limitation #\n# the rights to use, copy, modify, merge, publish, distribute, sublicense, #\n# and/or sell copies of the Software, and to permit persons to whom the #\n# Software is furnished to do so, subject to the following conditions: #\n# #\n# The above copyright notice and this permission notice shall be included in #\n# all copies or substantial portions of the Software. #\n# #\n# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING #\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER #\n# DEALINGS IN THE SOFTWARE. #\n# #\n###############################################################################\n\n\"\"\"\nContains classes for working with the reaction model generated by RMG.\n\"\"\"\n\nimport gc\nimport itertools\nimport logging\nimport os\n\nimport numpy as np\n\nimport rmgpy.data.rmg\nfrom rmgpy import settings\nfrom rmgpy.constraints import fails_species_constraints\nfrom rmgpy.data.kinetics.depository import DepositoryReaction\nfrom rmgpy.data.kinetics.family import KineticsFamily, TemplateReaction\nfrom rmgpy.data.kinetics.library import KineticsLibrary, LibraryReaction\n\nfrom rmgpy.molecule.group import Group\nfrom rmgpy.kinetics import KineticsData, Arrhenius\n\nfrom rmgpy.data.rmg import get_db\nfrom rmgpy.display import display\nfrom rmgpy.exceptions import ForbiddenStructureException\nfrom rmgpy.kinetics import KineticsData, Arrhenius\nfrom rmgpy.quantity import Quantity\nfrom rmgpy.reaction import Reaction\nfrom rmgpy.rmg.pdep import PDepReaction, PDepNetwork\nfrom rmgpy.rmg.react import react_all\nfrom rmgpy.species import Species\nfrom rmgpy.thermo.thermoengine import submit\nfrom rmgpy.rmg.decay import decay_species\n\n\n################################################################################\n\nclass ReactionModel:\n \"\"\"\n Represent a generic reaction model. A reaction model consists of `species`,\n a list of species, and `reactions`, a list of reactions.\n \"\"\"\n\n def __init__(self, species=None, reactions=None):\n self.species = species or []\n self.reactions = reactions or []\n\n def __reduce__(self):\n \"\"\"\n A helper function used when pickling an object.\n \"\"\"\n return ReactionModel, (self.species, self.reactions)\n\n def merge(self, other):\n \"\"\"\n Return a new :class:`ReactionModel` object that is the union of this\n model and `other`.\n \"\"\"\n if not isinstance(other, ReactionModel):\n raise ValueError('Expected type ReactionModel for other parameter, got {0}'.format(other.__class__))\n\n # Initialize the merged model\n final_model = ReactionModel()\n\n # Put the current model into the merged model as-is\n final_model.species.extend(self.species)\n final_model.reactions.extend(self.reactions)\n\n # Determine which species in other are already in self\n common_species = {}\n unique_species = []\n for spec in other.species:\n for spec0 in final_model.species:\n if spec.is_isomorphic(spec0):\n common_species[spec] = spec0\n if spec0.label not in ['Ar', 'N2', 'Ne', 'He']:\n if not spec0.thermo.is_identical_to(spec.thermo):\n print('Species {0} thermo from model 1 did not match that of model 2.'.format(spec.label))\n\n break\n else:\n unique_species.append(spec)\n\n # Determine which reactions in other are already in self\n common_reactions = {}\n unique_reactions = []\n for rxn in other.reactions:\n for rxn0 in final_model.reactions:\n if rxn.is_isomorphic(rxn0, either_direction=True):\n common_reactions[rxn] = rxn0\n if not rxn0.kinetics.is_identical_to(rxn.kinetics):\n print('Reaction {0} kinetics from model 1 did not match that of model 2.'.format(str(rxn0)))\n break\n else:\n unique_reactions.append(rxn)\n\n # Add the unique species from other to the final model\n final_model.species.extend(unique_species)\n\n # Make sure unique reactions only refer to species in the final model\n for rxn in unique_reactions:\n for i, reactant in enumerate(rxn.reactants):\n try:\n rxn.reactants[i] = common_species[reactant]\n if rxn.pairs:\n for j, pair in enumerate(rxn.pairs):\n if reactant in pair:\n rxn.pairs[j] = (rxn.reactants[i], pair[1])\n except KeyError:\n pass\n for i, product in enumerate(rxn.products):\n try:\n rxn.products[i] = common_species[product]\n if rxn.pairs:\n for j, pair in enumerate(rxn.pairs):\n if product in pair:\n rxn.pairs[j] = (pair[0], rxn.products[i])\n except KeyError:\n pass\n\n # Add the unique reactions from other to the final model\n final_model.reactions.extend(unique_reactions)\n\n # Return the merged model\n return final_model\n\n\n################################################################################\n\nclass CoreEdgeReactionModel:\n \"\"\"\n Represent a reaction model constructed using a rate-based screening\n algorithm. The species and reactions in the model itself are called the\n *core*; the species and reactions identified as candidates for inclusion in\n the model are called the *edge*. The attributes are:\n\n ========================= ==============================================================\n Attribute Description\n ========================= ==============================================================\n `core` The species and reactions of the current model core\n `edge` The species and reactions of the current model edge\n `network_dict` A dictionary of pressure-dependent reaction networks (:class:`Network` objects) indexed by source.\n `network_list` A list of pressure-dependent reaction networks (:class:`Network` objects)\n `network_count` A counter for the number of pressure-dependent networks created\n `index_species_dict` A dictionary with a unique index pointing to the species objects\n `solvent_name` String describing solvent name for liquid reactions. Empty for non-liquid estimation\n `surface_site_density` The surface site density (a SurfaceConcentration quantity) or None if no heterogeneous catalyst.\n ========================= ==============================================================\n\n\n \"\"\"\n\n def __init__(self, core=None, edge=None, surface=None):\n if core is None:\n self.core = ReactionModel()\n else:\n self.core = core\n if edge is None:\n self.edge = ReactionModel()\n else:\n self.edge = edge\n if surface is None:\n self.surface = ReactionModel()\n else:\n self.surface = surface\n\n # The default tolerances mimic the original RMG behavior; no edge\n # pruning takes place, and the simulation is interrupted as soon as\n # a species flux higher than the validity\n self.network_dict = {}\n self.network_list = []\n self.network_count = 0\n self.species_dict = {}\n self.reaction_dict = {}\n self.species_cache = [None for i in range(4)]\n self.species_counter = 0\n self.reaction_counter = 0\n self.new_species_list = []\n self.new_reaction_list = []\n self.output_species_list = []\n self.output_reaction_list = []\n self.pressure_dependence = None\n self.quantum_mechanics = None\n self.verbose_comments = False\n self.kinetics_estimator = 'rate rules'\n self.index_species_dict = {}\n self.save_edge_species = False\n self.iteration_num = 0\n self.thermo_tol_keep_spc_in_edge = np.inf\n self.Gfmax = np.inf\n self.Gmax = np.inf\n self.Gmin = -np.inf\n self.min_core_size_for_prune = 50\n self.maximum_edge_species = 100000\n self.Tmax = 0\n self.reaction_systems = []\n self.new_surface_spcs_add = set()\n self.new_surface_rxns_add = set()\n self.new_surface_spcs_loss = set()\n self.new_surface_rxns_loss = set()\n self.solvent_name = ''\n self.surface_site_density = None\n self.unrealgroups = [Group().from_adjacency_list(\"\"\"\n 1 O u0 p2 c0 {2,S} {4,S}\n 2 O u0 p2 c0 {1,S} {3,S}\n 3 R!H u1 px c0 {2,S}\n 4 H u0 p0 c0 {1,S}\n \"\"\")]\n\n def check_for_existing_species(self, molecule):\n \"\"\"\n Check to see if an existing species contains the same\n :class:`molecule.Molecule` as `molecule`. Comparison is done using\n isomorphism without consideration of electrons. Therefore, resonance\n structures of a species will all match each other.\n\n Returns the matched species if found and `None` otherwise.\n \"\"\"\n\n # First check cache and return if species is found\n for i, spec in enumerate(self.species_cache):\n if spec is not None and spec.is_isomorphic(molecule, strict=False):\n self.species_cache.pop(i)\n self.species_cache.insert(0, spec)\n return spec\n\n # If not found in cache, check all species with matching formula\n formula = molecule.get_formula()\n try:\n species_list = self.species_dict[formula]\n except KeyError:\n pass\n else:\n for spec in species_list:\n if spec.is_isomorphic(molecule, strict=False):\n self.species_cache.pop()\n self.species_cache.insert(0, spec)\n return spec\n\n # At this point we can conclude that the species is new\n return None\n\n def make_new_species(self, object, label='', reactive=True, check_existing=True, generate_thermo=True, check_decay=False):\n \"\"\"\n Formally create a new species from the specified `object`, which can be\n either a :class:`Molecule` object or an :class:`rmgpy.species.Species`\n object. It is emphasized that `reactive` relates to the :Class:`Species` attribute, while `reactive_structure`\n relates to the :Class:`Molecule` attribute.\n \"\"\"\n\n if isinstance(object, rmgpy.species.Species):\n molecule = object.molecule[0]\n label = label if label != '' else object.label\n reactive = object.reactive\n else:\n molecule = object\n\n molecule.clear_labeled_atoms()\n\n # If desired, check to ensure that the species is new; return the\n # existing species if not new\n if check_existing:\n spec = self.check_for_existing_species(molecule)\n if spec is not None:\n return spec, False\n\n # If we're here then we're ready to make the new species\n try:\n spec = Species(label=label,molecule=[molecule],reactive=reactive,thermo=object.thermo, transport_data=object.transport_data)\n except AttributeError:\n spec = Species(label=label, molecule=[molecule], reactive=reactive)\n \n spec.generate_resonance_structures()\n \n if check_decay:\n spcs = decay_species(spec)\n if len(spcs) == 1:\n spec = spcs[0]\n else:\n return [self.make_new_species(spc) for spc in spcs]\n \n if reactive:\n self.species_counter += 1 # count only reactive species\n spec.index = self.species_counter\n else:\n spec.index = -1\n\n spec.creation_iteration = self.iteration_num\n spec.molecular_weight = Quantity(spec.molecule[0].get_molecular_weight() * 1000., \"amu\")\n\n if generate_thermo:\n self.generate_thermo(spec)\n\n # If the species still does not have a label, set initial label as the SMILES\n # This may change later after getting thermo in self.generate_thermo()\n if not spec.label:\n spec.label = spec.smiles\n logging.debug('Creating new species %s', spec.label)\n\n formula = molecule.get_formula()\n if formula in self.species_dict:\n self.species_dict[formula].append(spec)\n else:\n self.species_dict[formula] = [spec]\n\n # Since the species is new, add it to the list of new species\n self.new_species_list.append(spec)\n\n if spec.reactive:\n self.index_species_dict[spec.index] = spec\n\n return spec, True\n\n def check_for_existing_reaction(self, rxn):\n \"\"\"\n Check to see if an existing reaction has the same reactants, products, and\n family as `rxn`. Returns :data:`True` or :data:`False` and the matched\n reaction (if found).\n\n First, a shortlist of reaction is retrieved that have the same reaction keys\n as the parameter reaction.\n\n Next, the reaction ID containing an identifier (e.g. label) of the reactants\n and products is compared between the parameter reaction and the each of the\n reactions in the shortlist. If a match is found, the discovered reaction is \n returned.\n\n If a match is not yet found, the Library (seed mechs, reaction libs)\n in the reaction database are iterated over to check if a reaction was overlooked\n (a reaction with a different \"family\" key as the parameter reaction).\n\n \"\"\"\n\n # Make sure the reactant and product lists are sorted before performing the check\n rxn.reactants.sort()\n rxn.products.sort()\n\n # If reactants and products are identical, then something weird happened along\n # the way and we got a symmetrical reaction.\n if rxn.reactants == rxn.products:\n logging.debug(\"Symmetrical reaction found. Returning no reaction\")\n return True, None\n\n family_obj = get_family_library_object(rxn.family)\n shortlist = self.search_retrieve_reactions(rxn)\n\n # Now use short-list to check for matches. All should be in same forward direction.\n\n # Make sure the reactant and product lists are sorted before performing the check\n rxn_id = generate_reaction_id(rxn)\n\n for rxn0 in shortlist:\n rxn_id0 = generate_reaction_id(rxn0)\n\n if rxn_id == rxn_id0 and are_identical_species_references(rxn, rxn0):\n if isinstance(family_obj, KineticsLibrary) or isinstance(family_obj, KineticsFamily):\n if not rxn.duplicate:\n return True, rxn0\n else:\n return True, rxn0\n elif (isinstance(family_obj, KineticsFamily)\n and rxn_id == rxn_id0[::-1]\n and are_identical_species_references(rxn, rxn0)):\n if not rxn.duplicate:\n return True, rxn0\n\n # Now check seed mechanisms\n # We want to check for duplicates in *other* seed mechanisms, but allow\n # duplicated *within* the same seed mechanism\n _, r1_fwd, r2_fwd = generate_reaction_key(rxn)\n _, r1_rev, r2_rev = generate_reaction_key(rxn, useProducts=True)\n\n for library in self.reaction_dict:\n lib_obj = get_family_library_object(library)\n if isinstance(lib_obj, KineticsLibrary) and library != rxn.family:\n\n # First check seed short-list in forward direction \n shortlist = self.retrieve(library, r1_fwd, r2_fwd)\n\n for rxn0 in shortlist:\n rxn_id0 = generate_reaction_id(rxn0)\n if (rxn_id == rxn_id0) or (rxn_id == rxn_id0[::-1]):\n if are_identical_species_references(rxn, rxn0):\n return True, rxn0\n\n # Now get the seed short-list of the reverse reaction\n\n shortlist = self.retrieve(library, r1_rev, r2_rev)\n\n for rxn0 in shortlist:\n if are_identical_species_references(rxn, rxn0):\n return True, rxn0\n\n return False, None\n\n def make_new_reaction(self, forward, check_existing=True, generate_thermo=True):\n \"\"\"\n Make a new reaction given a :class:`Reaction` object `forward`. \n The reaction is added to the global list of reactions.\n Returns the reaction in the direction that corresponds to the\n estimated kinetics, along with whether or not the reaction is new to the\n global reaction list.\n\n The forward direction is determined using the \"is_reverse\" attribute of the\n reaction's family. If the reaction family is its own reverse, then it is\n made such that the forward reaction is exothermic at 298K.\n \n The forward reaction is appended to self.new_reaction_list if it is new.\n \"\"\"\n\n # Determine the proper species objects for all reactants and products\n if forward.family and forward.is_forward:\n reactants = [self.make_new_species(reactant, generate_thermo=generate_thermo)[0] for reactant in forward.reactants]\n products = []\n for product in forward.products:\n spcs = self.make_new_species(product, generate_thermo=generate_thermo,check_decay=True)\n if type(spcs) == tuple:\n products.append(spcs[0])\n elif type(spcs) == list:\n products.extend([spc[0] for spc in spcs])\n else:\n try:\n reactants = [self.make_new_species(reactant, generate_thermo=generate_thermo)[0] for reactant in forward.reactants]\n products = [self.make_new_species(product, generate_thermo=generate_thermo)[0] for product in forward.products]\n except:\n logging.error(f\"Error when making species in reaction {forward:s} from {forward.family:s}\")\n raise\n \n if forward.specific_collider is not None:\n forward.specific_collider = self.make_new_species(forward.specific_collider)[0]\n\n if forward.pairs is not None:\n for pairIndex in range(len(forward.pairs)):\n reactant_index = forward.reactants.index(forward.pairs[pairIndex][0])\n product_index = forward.products.index(forward.pairs[pairIndex][1])\n forward.pairs[pairIndex] = (reactants[reactant_index], products[product_index])\n if hasattr(forward, 'reverse'):\n if forward.reverse:\n forward.reverse.pairs[pairIndex] = (products[product_index], reactants[reactant_index])\n forward.reactants = reactants\n forward.products = products\n\n if check_existing:\n found, rxn = self.check_for_existing_reaction(forward)\n if found:\n return rxn, False\n\n # Generate the reaction pairs if not yet defined\n if forward.pairs is None or len(forward.pairs) != max(len(forward.reactants), len(forward.products)):\n forward.generate_pairs()\n if hasattr(forward, 'reverse'):\n if forward.reverse:\n forward.reverse.generate_pairs()\n\n # Note in the log\n if isinstance(forward, TemplateReaction):\n logging.debug('Creating new %s template reaction %s', forward.family, forward)\n elif isinstance(forward, DepositoryReaction):\n logging.debug('Creating new %s reaction %s', forward.get_source(), forward)\n elif isinstance(forward, LibraryReaction):\n logging.debug('Creating new library reaction %s', forward)\n else:\n raise Exception(\"Unrecognized reaction type {0!s}\".format(forward.__class__))\n\n self.register_reaction(forward)\n\n forward.index = self.reaction_counter + 1\n self.reaction_counter += 1\n\n # Since the reaction is new, add it to the list of new reactions\n self.new_reaction_list.append(forward)\n\n # Return newly created reaction\n return forward, True\n\n def make_new_pdep_reaction(self, forward):\n \"\"\"\n Make a new pressure-dependent reaction based on a list of `reactants` and a\n list of `products`. The reaction belongs to the specified `network` and\n has pressure-dependent kinetics given by `kinetics`.\n\n No checking for existing reactions is made here. The returned PDepReaction\n object is not added to the global list of reactions, as that is intended\n to represent only the high-pressure-limit set. The reaction_counter is\n incremented, however, since the returned reaction can and will exist in\n the model edge and/or core.\n \"\"\"\n\n # Don't create reverse reaction: all such reactions are treated as irreversible\n # The reverse direction will come from a different partial network\n # Note that this isn't guaranteed to satisfy thermodynamics (but will probably be close)\n forward.reverse = None\n forward.reversible = False\n\n # Generate the reaction pairs if not yet defined\n if forward.pairs is None:\n forward.generate_pairs()\n\n # Set reaction index and increment the counter\n forward.index = self.reaction_counter + 1\n self.reaction_counter += 1\n\n return forward\n\n def enlarge(self, new_object=None, react_edge=False,\n unimolecular_react=None, bimolecular_react=None, trimolecular_react=None):\n \"\"\"\n Enlarge a reaction model by processing the objects in the list `new_object`. \n If `new_object` is a\n :class:`rmg.species.Species` object, then the species is moved from\n the edge to the core and reactions generated for that species, reacting\n with itself and with all other species in the model core. If `new_object`\n is a :class:`rmg.unirxn.network.Network` object, then reactions are\n generated for the species in the network with the largest leak flux.\n\n If the `react_edge` flag is `True`, then no new_object is needed,\n and instead the algorithm proceeds to react the core species together\n to form edge reactions.\n \"\"\"\n\n num_old_core_species = len(self.core.species)\n num_old_core_reactions = len(self.core.reactions)\n num_old_edge_species = len(self.edge.species)\n num_old_edge_reactions = len(self.edge.reactions)\n reactions_moved_from_edge = []\n self.new_reaction_list = []\n self.new_species_list = []\n\n # Determine number of parallel processes.\n from rmgpy.rmg.main import determine_procnum_from_ram\n procnum = determine_procnum_from_ram()\n\n if react_edge is False:\n # We are adding core species \n new_reactions = []\n pdep_network = None\n object_was_in_edge = False\n\n if isinstance(new_object, Species):\n\n new_species = new_object\n\n object_was_in_edge = new_species in self.edge.species\n\n if not new_species.reactive:\n logging.info('NOT generating reactions for unreactive species {0}'.format(new_species))\n else:\n logging.info('Adding species {0} to model core'.format(new_species))\n display(new_species) # if running in IPython --pylab mode, draws the picture!\n\n # Add new species\n reactions_moved_from_edge = self.add_species_to_core(new_species)\n\n elif isinstance(new_object, tuple) and isinstance(new_object[0], PDepNetwork) and self.pressure_dependence:\n\n pdep_network, new_species = new_object\n new_reactions.extend(pdep_network.explore_isomer(new_species))\n\n self.process_new_reactions(new_reactions, new_species, pdep_network, generate_thermo=False)\n\n else:\n raise TypeError('Unable to use object {0} to enlarge reaction model; expecting an object of class '\n 'rmg.model.Species or rmg.model.PDepNetwork, not {1}'.format(new_object,\n new_object.__class__))\n\n # If there are any core species among the unimolecular product channels\n # of any existing network, they need to be made included\n for network in self.network_list:\n network.update_configurations(self)\n index = 0\n isomers = [isomer.species[0] for isomer in network.isomers]\n while index < len(self.core.species):\n species = self.core.species[index]\n if species in isomers and species not in network.explored:\n network.explored.append(species)\n continue\n for products in network.products:\n products = products.species\n if len(products) == 1 and products[0] == species:\n new_reactions = network.explore_isomer(species)\n\n self.process_new_reactions(new_reactions, species, network, generate_thermo=False)\n network.update_configurations(self)\n index = 0\n break\n else:\n index += 1\n\n if isinstance(new_object, Species) and object_was_in_edge:\n # moved one species from edge to core\n num_old_edge_species -= 1\n # moved these reactions from edge to core\n num_old_edge_reactions -= len(reactions_moved_from_edge)\n\n else:\n # Generate reactions between all core species which have not been\n # reacted yet and exceed the reaction filter thresholds\n rxn_lists, spcs_tuples = react_all(self.core.species, num_old_core_species,\n unimolecular_react, bimolecular_react,\n trimolecular_react=trimolecular_react,\n procnum=procnum)\n\n for rxnList, spcTuple in zip(rxn_lists, spcs_tuples):\n if rxnList:\n # Identify a core species which was used to generate the reaction\n # This is only used to determine the reaction direction for processing\n spc = spcTuple[0]\n self.process_new_reactions(rxnList, spc, generate_thermo=False)\n\n ################################################################\n # Begin processing the new species and reactions\n\n # Generate thermo for new species\n if self.new_species_list:\n logging.info('Generating thermo for new species...')\n self.apply_thermo_to_species(procnum)\n\n # Do thermodynamic filtering\n if not np.isinf(self.thermo_tol_keep_spc_in_edge) and self.new_species_list != []:\n self.thermo_filter_species(self.new_species_list)\n\n # Generate kinetics of new reactions\n if self.new_reaction_list:\n logging.info('Generating kinetics for new reactions...')\n for reaction in self.new_reaction_list:\n # If the reaction already has kinetics (e.g. from a library),\n # assume the kinetics are satisfactory\n if reaction.kinetics is None:\n self.apply_kinetics_to_reaction(reaction)\n\n # For new reactions, convert ArrheniusEP to Arrhenius, and fix barrier heights.\n # self.new_reaction_list only contains *actually* new reactions, all in the forward direction.\n for reaction in self.new_reaction_list:\n # convert KineticsData to Arrhenius forms\n if isinstance(reaction.kinetics, KineticsData):\n reaction.kinetics = reaction.kinetics.to_arrhenius()\n # correct barrier heights of estimated kinetics\n if isinstance(reaction, TemplateReaction) or isinstance(reaction,\n DepositoryReaction): # i.e. not LibraryReaction\n reaction.fix_barrier_height() # also converts ArrheniusEP to Arrhenius.\n\n if self.pressure_dependence and reaction.is_unimolecular():\n # If this is going to be run through pressure dependence code,\n # we need to make sure the barrier is positive.\n reaction.fix_barrier_height(force_positive=True)\n\n # Update unimolecular (pressure dependent) reaction networks\n if self.pressure_dependence:\n # Recalculate k(T,P) values for modified networks\n self.update_unimolecular_reaction_networks()\n logging.info('')\n\n # Check new core and edge reactions for Chemkin duplicates\n # The same duplicate reaction gets brought into the core\n # at the same time, so there is no danger in checking all of the edge.\n new_core_reactions = self.core.reactions[num_old_core_reactions:]\n new_edge_reactions = self.edge.reactions[num_old_edge_reactions:]\n checked_reactions = self.core.reactions[:num_old_core_reactions] + self.edge.reactions[:num_old_edge_reactions]\n from rmgpy.chemkin import mark_duplicate_reaction\n for rxn in new_core_reactions:\n mark_duplicate_reaction(rxn, checked_reactions)\n checked_reactions.append(rxn)\n if self.save_edge_species:\n for rxn in new_edge_reactions:\n mark_duplicate_reaction(rxn, checked_reactions)\n checked_reactions.append(rxn)\n self.log_enlarge_summary(\n new_core_species=self.core.species[num_old_core_species:],\n new_core_reactions=self.core.reactions[num_old_core_reactions:],\n reactions_moved_from_edge=reactions_moved_from_edge,\n new_edge_species=self.edge.species[num_old_edge_species:],\n new_edge_reactions=self.edge.reactions[num_old_edge_reactions:],\n react_edge=react_edge,\n )\n\n logging.info('')\n\n def add_new_surface_objects(self, obj, new_surface_species, new_surface_reactions, reaction_system):\n \"\"\"\n obj is the list of objects for enlargement coming from simulate\n new_surface_species and new_surface_reactions are the current lists of surface species and surface reactions\n following simulation\n reaction_system is the current reactor\n manages surface species and reactions being moved to and from the surface\n moves them to appropriate newSurfaceSpc/RxnsAdd/loss sets\n returns false if the surface has changed\n \"\"\"\n surf_spcs = set(self.surface.species)\n surf_rxns = set(self.surface.reactions)\n\n new_surface_species = set(new_surface_species)\n new_surface_reactions = set(new_surface_reactions)\n\n added_rxns = {k for k in obj if isinstance(k, Reaction)}\n added_surface_rxns = new_surface_reactions - surf_rxns\n\n added_bulk_rxns = added_rxns - added_surface_rxns\n lost_surface_rxns = (surf_rxns - new_surface_reactions) | added_bulk_rxns\n\n added_spcs = {k for k in obj if isinstance(k, Species)} | {\n k.get_maximum_leak_species(reaction_system.T.value_si, reaction_system.P.value_si) for k in obj if\n isinstance(k, PDepNetwork)}\n lost_surface_spcs = (surf_spcs - new_surface_species) | added_spcs\n added_surface_spcs = new_surface_species - surf_spcs\n\n self.new_surface_spcs_add = self.new_surface_spcs_add | added_surface_spcs\n self.new_surface_rxns_add = self.new_surface_rxns_add | added_surface_rxns\n self.new_surface_spcs_loss = self.new_surface_spcs_loss | lost_surface_spcs\n self.new_surface_rxns_loss = self.new_surface_rxns_loss | lost_surface_rxns\n\n return not (self.new_surface_rxns_add != set() or\n self.new_surface_rxns_loss != set() or\n self.new_surface_spcs_loss != set() or\n self.new_surface_spcs_add != set())\n\n def adjust_surface(self):\n \"\"\"\n Here we add species intended to be added and remove any species that need to be moved out of the core. \n For now we remove reactions from the surface that have become part of a PDepNetwork by \n intersecting the set of surface reactions with the core so that all surface reactions are in the core\n thus the surface algorithm currently (June 2017) is not implemented for pdep networks\n (however it will function fine for non-pdep reactions on a pdep run)\n \"\"\"\n self.surface.species = list(\n ((set(self.surface.species) | self.new_surface_spcs_add) - self.new_surface_spcs_loss) & set(self.core.species))\n self.surface.reactions = list(\n ((set(self.surface.reactions) | self.new_surface_rxns_add) - self.new_surface_rxns_loss) & set(\n self.core.reactions))\n self.clear_surface_adjustments()\n\n def clear_surface_adjustments(self):\n \"\"\"\n empties surface tracking varaibles\n \"\"\"\n self.new_surface_spcs_add = set()\n self.new_surface_rxns_add = set()\n self.new_surface_spcs_loss = set()\n self.new_surface_rxns_loss = set()\n\n def process_new_reactions(self, new_reactions, new_species, pdep_network=None, generate_thermo=True):\n \"\"\"\n Process a list of newly-generated reactions involving the new core\n species or explored isomer `new_species` in network `pdep_network`.\n \n Makes a reaction and decides where to put it: core, edge, or PDepNetwork.\n \"\"\"\n for rxn in new_reactions:\n rxn, is_new = self.make_new_reaction(rxn, generate_thermo=generate_thermo)\n if rxn is None:\n # Skip this reaction because there was something wrong with it\n continue\n if is_new:\n # We've made a new reaction, so make sure the species involved\n # are in the core or edge\n all_species_in_core = True\n # Add the reactant and product species to the edge if necessary\n # At the same time, check if all reactants and products are in the core\n for spec in rxn.reactants:\n if spec not in self.core.species:\n all_species_in_core = False\n if spec not in self.edge.species:\n self.add_species_to_edge(spec)\n for spec in rxn.products:\n if spec not in self.core.species:\n all_species_in_core = False\n if spec not in self.edge.species:\n self.add_species_to_edge(spec)\n\n isomer_atoms = sum([len(spec.molecule[0].atoms) for spec in rxn.reactants])\n\n # Decide whether or not to handle the reaction as a pressure-dependent reaction\n pdep = True\n if not self.pressure_dependence:\n # The pressure dependence option is turned off entirely\n pdep = False\n elif self.pressure_dependence.maximum_atoms is not None \\\n and self.pressure_dependence.maximum_atoms < isomer_atoms:\n # The reaction involves so many atoms that pressure-dependent effects are assumed to be negligible\n pdep = False\n elif not (rxn.is_isomerization() or rxn.is_dissociation() or rxn.is_association()):\n # The reaction is not unimolecular in either direction, so it cannot be pressure-dependent\n pdep = False\n elif isinstance(rxn, LibraryReaction):\n # Try generating the high pressure limit kinetics. If successful, set pdep to ``True``, and vice versa.\n pdep = rxn.generate_high_p_limit_kinetics()\n elif any([any([x.is_subgraph_isomorphic(q) for q in self.unrealgroups]) for y in rxn.reactants+rxn.products for x in y.molecule]):\n pdep = False\n\n # If pressure dependence is on, we only add reactions that are not unimolecular;\n # unimolecular reactions will be added after processing the associated networks\n if not pdep:\n if not is_new:\n # The reaction is not new, so it should already be in the core or edge\n continue\n if all_species_in_core:\n self.add_reaction_to_core(rxn)\n else:\n self.add_reaction_to_edge(rxn)\n else:\n # Add the reaction to the appropriate unimolecular reaction network\n # If pdep_network is not None then that will be the network the\n # (path) reactions are added to\n # Note that this must be done even with reactions that are not new\n # because of the way partial networks are explored\n # Since PDepReactions are created as irreversible, not doing so\n # would cause you to miss the reverse reactions!\n self.add_reaction_to_unimolecular_networks(rxn, new_species=new_species, network=pdep_network)\n if isinstance(rxn, LibraryReaction):\n # If reaction came from a reaction library, omit it from the core and edge so that it does \n # not get double-counted with the pdep network\n if rxn in self.core.reactions:\n self.core.reactions.remove(rxn)\n if rxn in self.edge.reactions:\n self.edge.reactions.remove(rxn)\n\n def apply_thermo_to_species(self, procnum):\n \"\"\"\n Generate thermo for species. QM calculations are parallelized if requested.\n \"\"\"\n from rmgpy.rmg.input import get_input\n quantum_mechanics = get_input('quantum_mechanics')\n\n if quantum_mechanics:\n quantum_mechanics.run_jobs(self.new_species_list, procnum=procnum)\n\n # Serial thermo calculation for other methods\n for spc in self.new_species_list:\n self.generate_thermo(spc, rename=True)\n\n def generate_thermo(self, spc, rename=False):\n \"\"\"\n Generate thermo for species.\n \"\"\"\n if not spc.thermo:\n submit(spc, self.solvent_name)\n\n if rename and spc.thermo and spc.thermo.label != '': # check if thermo libraries have a name for it\n logging.info('Species {0} renamed {1} based on thermo library name'.format(spc.label, spc.thermo.label))\n spc.label = spc.thermo.label\n\n spc.generate_energy_transfer_model()\n\n def apply_kinetics_to_reaction(self, reaction):\n \"\"\"\n retrieve the best kinetics for the reaction and apply it towards the forward \n or reverse direction (if reverse, flip the direaction).\n \"\"\"\n from rmgpy.data.rmg import get_db\n # Find the reaction kinetics\n kinetics, source, entry, is_forward = self.generate_kinetics(reaction)\n # Flip the reaction direction if the kinetics are defined in the reverse direction\n if not is_forward:\n family = get_db('kinetics').families[reaction.family]\n reaction.reactants, reaction.products = reaction.products, reaction.reactants\n reaction.pairs = [(p, r) for r, p in reaction.pairs]\n if family.own_reverse and hasattr(reaction, 'reverse'):\n if reaction.reverse:\n reaction.template = reaction.reverse.template\n # replace degeneracy\n reaction.degeneracy = reaction.reverse.degeneracy\n # We're done with the \"reverse\" attribute, so delete it to save a bit of memory\n reaction.reverse = None\n reaction.kinetics = kinetics\n\n def generate_kinetics(self, reaction):\n \"\"\"\n Generate best possible kinetics for the given `reaction` using the kinetics database.\n \"\"\"\n # Only reactions from families should be missing kinetics\n assert isinstance(reaction, TemplateReaction)\n\n family = get_family_library_object(reaction.family)\n\n # Get the kinetics for the reaction\n kinetics, source, entry, is_forward = family.get_kinetics(reaction, template_labels=reaction.template,\n degeneracy=reaction.degeneracy,\n estimator=self.kinetics_estimator,\n return_all_kinetics=False)\n # Get the gibbs free energy of reaction at 298 K\n G298 = reaction.get_free_energy_of_reaction(298)\n gibbs_is_positive = G298 > -1e-8\n\n if family.own_reverse and len(reaction.products)==len(reaction.reactants) and hasattr(reaction, 'reverse'):\n if reaction.reverse:\n # The kinetics family is its own reverse, so we could estimate kinetics in either direction\n\n # First get the kinetics for the other direction\n rev_kinetics, rev_source, rev_entry, rev_is_forward = family.get_kinetics(reaction.reverse,\n template_labels=reaction.reverse.template,\n degeneracy=reaction.reverse.degeneracy,\n estimator=self.kinetics_estimator,\n return_all_kinetics=False)\n # Now decide which direction's kinetics to keep\n keep_reverse = False\n if entry is not None and rev_entry is None:\n # Only the forward has an entry, meaning an exact match in a depository or template\n # the reverse must have used an averaged estimated node - so use forward.\n reason = \"This direction matched an entry in {0}, the other was just an estimate.\".format(reaction.family)\n elif entry is None and rev_entry is not None:\n # Only the reverse has an entry (see above) - use reverse.\n keep_reverse = True\n reason = \"This direction matched an entry in {0}, the other was just an estimate.\".format(reaction.family)\n elif entry is not None and rev_entry is not None and entry is rev_entry:\n # Both forward and reverse have the same source and entry\n # Use the one for which the kinetics is the forward kinetics\n keep_reverse = gibbs_is_positive and is_forward and rev_is_forward\n reason = \"Both directions matched the same entry in {0}, but this direction is exergonic.\".format(reaction.family)\n elif self.kinetics_estimator == 'group additivity' and (kinetics.comment.find(\"Fitted to 1 rate\") > 0\n and not rev_kinetics.comment.find(\"Fitted to 1 rate\") > 0):\n # forward kinetics were fitted to only 1 rate, but reverse are hopefully better\n keep_reverse = True\n reason = \"Other direction matched a group only fitted to 1 rate.\"\n elif self.kinetics_estimator == 'group additivity' and (not kinetics.comment.find(\"Fitted to 1 rate\") > 0\n and rev_kinetics.comment.find(\"Fitted to 1 rate\") > 0):\n # reverse kinetics were fitted to only 1 rate, but forward are hopefully better\n keep_reverse = False\n reason = \"Other direction matched a group only fitted to 1 rate.\"\n elif entry is not None and rev_entry is not None:\n # Both directions matched explicit rate rules\n # Keep the direction with the lower (but nonzero) rank\n if entry.rank < rev_entry.rank and entry.rank != 0:\n keep_reverse = False\n reason = \"Both directions matched explicit rate rules, but this direction has a rule with a lower rank ({0} vs {1}).\".format(\n entry.rank, rev_entry.rank)\n elif rev_entry.rank < entry.rank and rev_entry.rank != 0:\n keep_reverse = True\n reason = \"Both directions matched explicit rate rules, but this direction has a rule with a lower rank ({0} vs {1}).\".format(\n rev_entry.rank, entry.rank)\n # Otherwise keep the direction that is exergonic at 298 K\n else:\n keep_reverse = gibbs_is_positive and is_forward and rev_is_forward\n reason = \"Both directions matched explicit rate rules, but this direction is exergonic.\"\n else:\n # Keep the direction that is exergonic at 298 K\n # This must be done after the thermo generation step\n keep_reverse = gibbs_is_positive and is_forward and rev_is_forward\n reason = \"Both directions are estimates, but this direction is exergonic.\"\n\n if keep_reverse:\n kinetics = rev_kinetics\n source = rev_source\n entry = rev_entry\n is_forward = not rev_is_forward\n G298 = -G298\n\n if self.verbose_comments:\n kinetics.comment += \"\\nKinetics were estimated in this direction instead of the reverse because:\\n{0}\".format(reason)\n kinetics.comment += \"\\ndGrxn(298 K) = {0:.2f} kJ/mol\".format(G298 / 1000.)\n\n # The comments generated by the database for estimated kinetics can\n # be quite long, and therefore not very useful\n # We don't want to waste lots of memory storing these long, \n # uninformative strings, so here we replace them with much shorter ones\n if not self.verbose_comments:\n # Only keep a short comment (to save memory)\n if 'Exact' in kinetics.comment:\n # Exact match of rate rule\n pass\n elif 'Matched reaction' in kinetics.comment:\n # Stems from matching a reaction from a depository\n pass\n else:\n # Estimated (averaged) rate rule\n kinetics.comment = kinetics.comment[kinetics.comment.find('Estimated'):]\n\n return kinetics, source, entry, is_forward\n\n def log_enlarge_summary(self, new_core_species, new_core_reactions, new_edge_species, new_edge_reactions,\n reactions_moved_from_edge=None, react_edge=False):\n \"\"\"\n Output a summary of a model enlargement step to the log. The details of\n the enlargement are passed in the `new_core_species`, `new_core_reactions`,\n `new_edge_species`, and `new_edge_reactions` objects. \n \"\"\"\n\n logging.info('')\n if react_edge:\n logging.info('Summary of Secondary Model Edge Enlargement')\n else:\n logging.info('Summary of Model Enlargement')\n logging.info('---------------------------------')\n\n logging.info('Added {0:d} new core species'.format(len(new_core_species)))\n for spec in new_core_species:\n display(spec)\n logging.info(' {0}'.format(spec))\n\n logging.info('Created {0:d} new edge species'.format(len(new_edge_species)))\n for spec in new_edge_species:\n display(spec)\n logging.info(' {0}'.format(spec))\n\n if reactions_moved_from_edge:\n logging.info('Moved {0:d} reactions from edge to core'.format(len(reactions_moved_from_edge)))\n for rxn in reactions_moved_from_edge:\n for r in new_core_reactions:\n if ((r.reactants == rxn.reactants and r.products == rxn.products) or\n (r.products == rxn.reactants and r.reactants == rxn.products)):\n logging.info(' {0}'.format(r))\n new_core_reactions.remove(r)\n break\n logging.info('Added {0:d} new core reactions'.format(len(new_core_reactions)))\n for rxn in new_core_reactions:\n logging.info(' {0}'.format(rxn))\n\n logging.info('Created {0:d} new edge reactions'.format(len(new_edge_reactions)))\n for rxn in new_edge_reactions:\n logging.info(' {0}'.format(rxn))\n\n core_species_count, core_reaction_count, edge_species_count, edge_reaction_count = self.get_model_size()\n\n # Output current model size information after enlargement\n logging.info('')\n logging.info('After model enlargement:')\n logging.info(' The model core has {0:d} species and {1:d} reactions'.format(core_species_count,\n core_reaction_count))\n logging.info(' The model edge has {0:d} species and {1:d} reactions'.format(edge_species_count,\n edge_reaction_count))\n logging.info('')\n\n def add_species_to_core(self, spec):\n \"\"\"\n Add a species `spec` to the reaction model core (and remove from edge if\n necessary). This function also moves any reactions in the edge that gain\n core status as a result of this change in status to the core.\n If this are any such reactions, they are returned in a list.\n \"\"\"\n\n assert spec not in self.core.species, \"Tried to add species {0} to core, but it's already there\".format(spec.label)\n\n forbidden_structures = get_db('forbidden')\n\n # check RMG globally forbidden structures\n if not spec.explicitly_allowed and forbidden_structures.is_molecule_forbidden(spec.molecule[0]):\n\n rxn_list = []\n if spec in self.edge.species:\n # remove forbidden species from edge\n logging.info(\"Species {0} was Forbidden and not added to Core...Removing from Edge.\".format(spec))\n self.remove_species_from_edge(self.reaction_systems, spec)\n\n return []\n\n # Add the species to the core\n self.core.species.append(spec)\n\n rxn_list = []\n if spec in self.edge.species:\n\n # If species was in edge, remove it\n logging.debug(\"Removing species %s from edge.\", spec)\n self.edge.species.remove(spec)\n\n # Search edge for reactions that now contain only core species;\n # these belong in the model core and will be moved there\n for rxn in self.edge.reactions:\n all_core = True\n for reactant in rxn.reactants:\n if reactant not in self.core.species:\n all_core = False\n for product in rxn.products:\n if product not in self.core.species:\n all_core = False\n if all_core:\n rxn_list.append(rxn)\n\n # Move any identified reactions to the core\n for rxn in rxn_list:\n self.add_reaction_to_core(rxn)\n logging.debug(\"Moving reaction from edge to core: %s\", rxn)\n return rxn_list\n\n def add_species_to_edge(self, spec):\n \"\"\"\n Add a species `spec` to the reaction model edge.\n \"\"\"\n self.edge.species.append(spec)\n\n def set_thermodynamic_filtering_parameters(self, Tmax, thermo_tol_keep_spc_in_edge,\n min_core_size_for_prune, maximum_edge_species, reaction_systems):\n \"\"\"\n sets parameters for thermodynamic filtering based on the current core\n Tmax is the maximum reactor temperature in K\n thermo_tol_keep_spc_in_edge is the Gibbs number above which species will be filtered\n min_core_size_for_prune is the core size at which thermodynamic filtering will start\n maximum_edge_species is the maximum allowed number of edge species\n reaction_systems is a list of reaction_system objects\n \"\"\"\n self.Tmax = Tmax\n Gs = [spc.thermo.get_free_energy(Tmax) for spc in self.core.species]\n self.Gmax = max(Gs)\n self.Gmin = min(Gs)\n\n self.Gfmax = thermo_tol_keep_spc_in_edge * (self.Gmax - self.Gmin) + self.Gmax\n self.thermo_tol_keep_spc_in_edge = thermo_tol_keep_spc_in_edge\n self.min_core_size_for_prune = min_core_size_for_prune\n self.reaction_systems = reaction_systems\n self.maximum_edge_species = maximum_edge_species\n\n def thermo_filter_species(self, spcs):\n \"\"\"\n checks Gibbs energy of the species in species against the\n maximum allowed Gibbs energy\n \"\"\"\n Tmax = self.Tmax\n for spc in spcs:\n G = spc.thermo.get_free_energy(Tmax)\n if G > self.Gfmax:\n Gn = (G - self.Gmax) / (self.Gmax - self.Gmin)\n logging.info('Removing species {0} with Gibbs energy {1} from edge because it\\'s Gibbs number {2} is '\n 'greater than the thermo_tol_keep_spc_in_edge of '\n '{3} '.format(spc, G, Gn, self.thermo_tol_keep_spc_in_edge))\n self.remove_species_from_edge(self.reaction_systems, spc)\n\n # Delete any networks that became empty as a result of pruning\n if self.pressure_dependence:\n self.remove_empty_pdep_networks()\n\n def thermo_filter_down(self, maximum_edge_species, min_species_exist_iterations_for_prune=0):\n \"\"\"\n removes species from the edge based on their Gibbs energy until maximum_edge_species\n is reached under the constraint that all removed species are older than\n min_species_exist_iterations_for_prune iterations\n maximum_edge_species is the maximum allowed number of edge species\n min_species_exist_iterations_for_prune is the number of iterations a species must be in the edge\n before it is eligible for thermo filtering\n \"\"\"\n Tmax = self.Tmax\n num_to_remove = len(self.edge.species) - maximum_edge_species\n logging.debug('Planning to remove %d species', num_to_remove)\n iteration = self.iteration_num\n\n if num_to_remove > 0: # implies flux pruning is off or did not trigger\n logging.info('Reached maximum number of edge species')\n logging.info('Attempting to remove excess edge species with Thermodynamic filtering')\n spcs = self.edge.species\n Gfs = np.array([spc.thermo.get_free_energy(Tmax) for spc in spcs])\n Gns = (Gfs - self.Gmax) / (self.Gmax - self.Gmin)\n inds = np.argsort(Gns) # could actually do this with the Gfs, but want to print the Gn value later\n inds = inds[::-1] # get in order of increasing Gf\n\n ind = 0\n remove_spcs = []\n\n rInds = []\n while ind < len(\n inds) and num_to_remove > 0: # find the species we can remove and collect indices for removal\n i = inds[ind]\n spc = spcs[i]\n if iteration - spc.creation_iteration >= min_species_exist_iterations_for_prune:\n remove_spcs.append(spc)\n rInds.append(i)\n num_to_remove -= 1\n ind += 1\n\n logging.debug('Found %d eligible species for filtering', len(remove_spcs))\n\n for i, spc in enumerate(remove_spcs):\n logging.info('Removing species {0} from edge to meet maximum number of edge species, Gibbs '\n 'number is {1}'.format(spc, Gns[rInds[i]]))\n self.remove_species_from_edge(self.reaction_systems, spc)\n\n # Delete any networks that became empty as a result of pruning\n if self.pressure_dependence:\n self.remove_empty_pdep_networks()\n\n # call garbage collection\n collected = gc.collect()\n logging.info('Garbage collector: collected %d objects.' % (collected))\n\n def remove_empty_pdep_networks(self):\n \"\"\"\n searches for and deletes any empty pdep networks\n \"\"\"\n networks_to_delete = []\n for network in self.network_list:\n if len(network.path_reactions) == 0 and len(network.net_reactions) == 0:\n networks_to_delete.append(network)\n\n if len(networks_to_delete) > 0:\n logging.info('Deleting {0:d} empty pressure-dependent reaction networks'.format(len(networks_to_delete)))\n for network in networks_to_delete:\n logging.debug(' Deleting empty pressure dependent reaction network #%d', network.index)\n source = tuple(network.source)\n nets_with_this_source = self.network_dict[source]\n nets_with_this_source.remove(network)\n if not nets_with_this_source:\n del (self.network_dict[source])\n self.network_list.remove(network)\n\n def prune(self, reaction_systems, tol_keep_in_edge, tol_move_to_core, maximum_edge_species,\n min_species_exist_iterations_for_prune):\n \"\"\"\n Remove species from the model edge based on the simulation results from\n the list of `reaction_systems`.\n \"\"\"\n\n ineligible_species = [] # A list of the species which are not eligible for pruning, for any reason\n prunable_species = reaction_systems[0].prunable_species\n prunable_networks = reaction_systems[0].prunable_networks\n\n num_prunable_species = len(prunable_species)\n iteration = self.iteration_num\n # All edge species that have not existed for more than two enlarge\n # iterations are ineligible for pruning\n for spec in prunable_species:\n if iteration - spec.creation_iteration <= min_species_exist_iterations_for_prune:\n ineligible_species.append(spec)\n\n # Get the maximum species rates (and network leak rates)\n # across all reaction systems\n max_edge_species_rate_ratios = np.zeros((num_prunable_species), np.float64)\n for reaction_system in reaction_systems:\n for i in range(num_prunable_species):\n rate_ratio = reaction_system.max_edge_species_rate_ratios[i]\n if max_edge_species_rate_ratios[i] < rate_ratio:\n max_edge_species_rate_ratios[i] = rate_ratio\n\n for i, network in enumerate(prunable_networks):\n rate_ratio = reaction_system.max_network_leak_rate_ratios[i]\n # Add the fraction of the network leak rate contributed by\n # each unexplored species to that species' rate\n # This is to ensure we have an overestimate of that species flux\n ratios = network.get_leak_branching_ratios(reaction_system.T.value_si, reaction_system.P.value_si)\n for spec, frac in ratios.items():\n if spec in prunable_species:\n index = prunable_species.index(spec)\n max_edge_species_rate_ratios[index] += frac * rate_ratio\n # Mark any species that is explored in any partial network as ineligible for pruning\n for spec in network.explored:\n if spec not in ineligible_species:\n ineligible_species.append(spec)\n\n # Sort the edge species rates by index\n indices = np.argsort(max_edge_species_rate_ratios)\n # Determine which species to prune\n species_to_prune = []\n prune_due_to_rate_counter = 0\n for index in indices:\n spec = prunable_species[index]\n if spec in ineligible_species or not spec in self.edge.species:\n continue\n # Remove the species with rates below the pruning tolerance from the model edge\n if max_edge_species_rate_ratios[index] < tol_keep_in_edge:\n species_to_prune.append((index, spec))\n prune_due_to_rate_counter += 1\n # Keep removing species with the lowest rates until we are below the maximum edge species size\n elif num_prunable_species - len(species_to_prune) > maximum_edge_species:\n if max_edge_species_rate_ratios[index] < tol_move_to_core:\n logging.info('Pruning species {0} to make num_edge_species smaller than maximum_edge_species'.format(spec))\n species_to_prune.append((index, spec))\n else:\n logging.warning('Attempted to prune a species that exceeded tol_move_to_core, pruning settings '\n 'for this run are likely bad, either maximum_edge_species needs to be set higher '\n '(~100000) or min_species_exist_iterations_for_prune should be reduced (~2)')\n break\n else:\n break\n\n # Actually do the pruning\n if prune_due_to_rate_counter > 0:\n logging.info('Pruning %d species whose rate ratios against characteristic rate did not exceed the '\n 'minimum threshold of %g', prune_due_to_rate_counter, tol_keep_in_edge)\n for index, spec in species_to_prune[0:prune_due_to_rate_counter]:\n logging.info('Pruning species %s', spec)\n logging.debug(' %-56s %10.4e', spec, max_edge_species_rate_ratios[index])\n self.remove_species_from_edge(reaction_systems, spec)\n if len(species_to_prune) - prune_due_to_rate_counter > 0:\n logging.info('Pruning %d species to obtain an edge size of %d species',\n len(species_to_prune) - prune_due_to_rate_counter, maximum_edge_species)\n for index, spec in species_to_prune[prune_due_to_rate_counter:]:\n logging.info('Pruning species %s', spec)\n logging.debug(' %-56s %10.4e', spec, max_edge_species_rate_ratios[index])\n self.remove_species_from_edge(reaction_systems, spec)\n\n # Delete any networks that became empty as a result of pruning\n if self.pressure_dependence:\n self.remove_empty_pdep_networks()\n\n logging.info('')\n\n def remove_species_from_edge(self, reaction_systems, spec):\n \"\"\"\n Remove species `spec` from the reaction model edge.\n \"\"\"\n\n # remove the species\n self.edge.species.remove(spec)\n self.index_species_dict.pop(spec.index)\n\n # clean up species references in reaction_systems\n for reaction_system in reaction_systems:\n try:\n reaction_system.species_index.pop(spec)\n except KeyError:\n pass\n\n # identify any reactions it's involved in\n rxn_list = []\n for rxn in reaction_system.reaction_index:\n if spec in rxn.reactants or spec in rxn.products:\n rxn_list.append(rxn)\n\n for rxn in rxn_list:\n reaction_system.reaction_index.pop(rxn)\n\n # identify any reactions it's involved in\n rxn_list = []\n for rxn in self.edge.reactions:\n if spec in rxn.reactants or spec in rxn.products:\n rxn_list.append(rxn)\n # remove those reactions\n for rxn in rxn_list:\n self.edge.reactions.remove(rxn)\n\n # Remove the species from any unirxn networks it is in\n if self.pressure_dependence:\n for network in self.network_list:\n # Delete all path reactions involving the species\n rxn_list = []\n for rxn in network.path_reactions:\n if spec in rxn.reactants or spec in rxn.products:\n rxn_list.append(rxn)\n if len(rxn_list) > 0:\n for rxn in rxn_list:\n network.path_reactions.remove(rxn)\n # Delete all net reactions involving the species\n rxn_list = []\n for rxn in network.net_reactions:\n if spec in rxn.reactants or spec in rxn.products:\n rxn_list.append(rxn)\n for rxn in rxn_list:\n network.net_reactions.remove(rxn)\n\n # Recompute the isomers, reactants, and products for this network\n network.update_configurations(self)\n\n # Remove from the global list of reactions\n # also remove it from the global list of reactions\n for family in self.reaction_dict:\n if spec in self.reaction_dict[family]:\n del self.reaction_dict[family][spec]\n for reactant1 in self.reaction_dict[family]:\n if spec in self.reaction_dict[family][reactant1]:\n del self.reaction_dict[family][reactant1][spec]\n for reactant1 in self.reaction_dict[family]:\n for reactant2 in self.reaction_dict[family][reactant1]:\n temp_rxn_delete_list = []\n for templateReaction in self.reaction_dict[family][reactant1][reactant2]:\n if spec in templateReaction.reactants or spec in templateReaction.products:\n temp_rxn_delete_list.append(templateReaction)\n for tempRxnToBeDeleted in temp_rxn_delete_list:\n self.reaction_dict[family][reactant1][reactant2].remove(tempRxnToBeDeleted)\n\n # remove from the global list of species, to free memory\n formula = spec.molecule[0].get_formula()\n self.species_dict[formula].remove(spec)\n if spec in self.species_cache:\n self.species_cache.remove(spec)\n self.species_cache.append(None)\n\n def add_reaction_to_core(self, rxn):\n \"\"\"\n Add a reaction `rxn` to the reaction model core (and remove from edge if\n necessary). This function assumes `rxn` has already been checked to\n ensure it is supposed to be a core reaction (i.e. all of its reactants\n AND all of its products are in the list of core species).\n \"\"\"\n if rxn not in self.core.reactions:\n self.core.reactions.append(rxn)\n if rxn in self.edge.reactions:\n self.edge.reactions.remove(rxn)\n\n def add_reaction_to_edge(self, rxn):\n \"\"\"\n Add a reaction `rxn` to the reaction model edge. This function assumes\n `rxn` has already been checked to ensure it is supposed to be an edge\n reaction (i.e. all of its reactants OR all of its products are in the\n list of core species, and the others are in either the core or the\n edge).\n \"\"\"\n self.edge.reactions.append(rxn)\n\n def get_model_size(self):\n \"\"\"\n Return the numbers of species and reactions in the model core and edge.\n Note that this is not necessarily equal to the lengths of the\n corresponding species and reaction lists.\n \"\"\"\n core_species_count = len(self.core.species)\n core_reactions_count = len(self.core.reactions)\n edge_species_count = len(self.edge.species)\n edge_reactions_count = len(self.edge.reactions)\n return core_species_count, core_reactions_count, edge_species_count, edge_reactions_count\n\n def get_species_reaction_lists(self):\n \"\"\"\n Return lists of all of the species and reactions in the core and the\n edge.\n \"\"\"\n species_list = []\n species_list.extend(self.core.species)\n species_list.extend(self.edge.species)\n reaction_list = []\n reaction_list.extend(self.core.reactions)\n reaction_list.extend(self.edge.reactions)\n return species_list, reaction_list\n\n def get_stoichiometry_matrix(self):\n \"\"\"\n Return the stoichiometry matrix for all generated species and reactions.\n The id of each species and reaction is the corresponding row and column,\n respectively, in the matrix.\n \"\"\"\n species_list, reaction_list = self.get_species_reaction_lists()\n from scipy import sparse\n stoichiometry = sparse.dok_matrix((self.species_counter, self.reaction_counter), float)\n for rxn in reaction_list:\n j = rxn.index - 1\n spec_list = rxn.reactants[:]\n spec_list.extend(rxn.products)\n for spec in spec_list:\n i = spec.index - 1\n nu = rxn.get_stoichiometric_coefficient(spec)\n if nu != 0:\n stoichiometry[i, j] = nu\n return stoichiometry.tocsr()\n\n def add_seed_mechanism_to_core(self, seed_mechanism, react=False):\n \"\"\"\n Add all species and reactions from `seed_mechanism`, a \n :class:`KineticsPrimaryDatabase` object, to the model core. If `react`\n is ``True``, then reactions will also be generated between the seed\n species. For large seed mechanisms this can be prohibitively expensive,\n so it is not done by default.\n \"\"\"\n\n if react:\n raise NotImplementedError(\"react=True doesn't work yet\")\n database = rmgpy.data.rmg.database\n\n library_names = list(database.kinetics.libraries.keys())\n family_names = list(database.kinetics.families.keys())\n\n path = os.path.join(settings['database.directory'], 'kinetics', 'libraries')\n from rmgpy.rmg.input import rmg\n\n self.new_reaction_list = []\n self.new_species_list = []\n\n num_old_core_species = len(self.core.species)\n num_old_core_reactions = len(self.core.reactions)\n\n logging.info('Adding seed mechanism {0} to model core...'.format(seed_mechanism))\n\n seed_mechanism = database.kinetics.libraries[seed_mechanism]\n\n rxns = seed_mechanism.get_library_reactions()\n\n for rxn in rxns:\n if isinstance(rxn, LibraryReaction) and not (rxn.library in library_names) and not (rxn.library == 'kineticsjobs'): # if one of the reactions in the library is from another library load that library\n database.kinetics.library_order.append((rxn.library, 'Internal'))\n database.kinetics.load_libraries(path=path, libraries=[rxn.library])\n library_names = list(database.kinetics.libraries.keys())\n if isinstance(rxn, TemplateReaction) and not (rxn.family in family_names):\n logging.warning('loading reaction {0} originally from family {1} as a library reaction'.format(str(rxn),\n rxn.family))\n rxn = LibraryReaction(reactants=rxn.reactants[:], products=rxn.products[:],\n library=seed_mechanism.name, specific_collider=rxn.specific_collider,\n kinetics=rxn.kinetics, duplicate=rxn.duplicate,\n reversible=rxn.reversible\n )\n r, isNew = self.make_new_reaction(rxn) # updates self.new_species_list and self.newReactionlist\n if not isNew:\n logging.info(\"This library reaction was not new: {0}\".format(rxn))\n elif self.pressure_dependence and rxn.elementary_high_p and rxn.is_unimolecular() \\\n and isinstance(rxn, LibraryReaction) and isinstance(rxn.kinetics, Arrhenius) and \\\n (self.pressure_dependence.maximum_atoms is None or self.pressure_dependence.maximum_atoms >= \\\n sum([len(spec.molecule[0].atoms) for spec in r.reactants])):\n # This unimolecular library reaction is flagged as `elementary_high_p` and has Arrhenius type kinetics.\n # We should calculate a pressure-dependent rate for it\n if len(rxn.reactants) == 1:\n self.process_new_reactions(new_reactions=[rxn], new_species=rxn.reactants[0])\n else:\n self.process_new_reactions(new_reactions=[rxn], new_species=rxn.products[0])\n\n # Perform species constraints and forbidden species checks\n\n for spec in self.new_species_list:\n if database.forbidden_structures.is_molecule_forbidden(spec.molecule[0]):\n if 'allowed' in rmg.species_constraints and 'seed mechanisms' in rmg.species_constraints['allowed']:\n spec.explicitly_allowed = True\n logging.warning(\"Species {0} from seed mechanism {1} is globally forbidden. \"\n \"It will behave as an inert unless found in a seed mechanism \"\n \"or reaction library.\".format(spec.label, seed_mechanism.label))\n else:\n raise ForbiddenStructureException(\"Species {0} from seed mechanism {1} is globally forbidden. \"\n \"You may explicitly allow it, but it will remain inert unless \"\n \"found in a seed mechanism or reaction \"\n \"library.\".format(spec.label, seed_mechanism.label))\n if fails_species_constraints(spec):\n if 'allowed' in rmg.species_constraints and 'seed mechanisms' in rmg.species_constraints['allowed']:\n rmg.species_constraints['explicitlyAllowedMolecules'].extend(spec.molecule)\n else:\n raise ForbiddenStructureException(\"Species constraints forbids species {0} from seed mechanism {1}.\"\n \" Please reformulate constraints, remove the species, or\"\n \" explicitly allow it.\".format(spec.label, seed_mechanism.label))\n\n for spec in self.new_species_list:\n if spec.reactive:\n submit(spec, self.solvent_name)\n\n self.add_species_to_core(spec)\n\n for rxn in self.new_reaction_list:\n if self.pressure_dependence and rxn.is_unimolecular():\n # If this is going to be run through pressure dependence code,\n # we need to make sure the barrier is positive.\n # ...but are Seed Mechanisms run through PDep? Perhaps not.\n for spec in itertools.chain(rxn.reactants, rxn.products):\n submit(spec, self.solvent_name)\n\n rxn.fix_barrier_height(force_positive=True)\n self.add_reaction_to_core(rxn)\n\n # Check we didn't introduce unmarked duplicates\n self.mark_chemkin_duplicates()\n\n self.log_enlarge_summary(\n new_core_species=self.core.species[num_old_core_species:],\n new_core_reactions=self.core.reactions[num_old_core_reactions:],\n new_edge_species=[],\n new_edge_reactions=[],\n )\n\n def add_reaction_library_to_edge(self, reaction_library):\n \"\"\"\n Add all species and reactions from `reaction_library`, a\n :class:`KineticsPrimaryDatabase` object, to the model edge.\n \"\"\"\n\n database = rmgpy.data.rmg.database\n library_names = list(database.kinetics.libraries.keys())\n family_names = list(database.kinetics.families.keys())\n path = os.path.join(settings['database.directory'], 'kinetics', 'libraries')\n\n from rmgpy.rmg.input import rmg\n\n self.new_reaction_list = []\n self.new_species_list = []\n\n num_old_edge_species = len(self.edge.species)\n num_old_edge_reactions = len(self.edge.reactions)\n\n logging.info('Adding reaction library {0} to model edge...'.format(reaction_library))\n reaction_library = database.kinetics.libraries[reaction_library]\n\n rxns = reaction_library.get_library_reactions()\n for rxn in rxns:\n if isinstance(rxn, LibraryReaction) and not (rxn.library in library_names): # if one of the reactions in the library is from another library load that library\n database.kinetics.library_order.append((rxn.library, 'Internal'))\n database.kinetics.load_libraries(path=path, libraries=[rxn.library])\n library_names = list(database.kinetics.libraries.keys())\n if isinstance(rxn, TemplateReaction) and not (rxn.family in family_names):\n logging.warning('loading reaction {0} originally from family {1} as a library reaction'.format(str(rxn),\n rxn.family))\n rxn = LibraryReaction(reactants=rxn.reactants[:], products=rxn.products[:],\n library=reaction_library.name, specific_collider=rxn.specific_collider,\n kinetics=rxn.kinetics, duplicate=rxn.duplicate,\n reversible=rxn.reversible\n )\n r, isNew = self.make_new_reaction(rxn) # updates self.new_species_list and self.newReactionlist\n if not isNew:\n logging.info(\"This library reaction was not new: {0}\".format(rxn))\n elif self.pressure_dependence and rxn.elementary_high_p and rxn.is_unimolecular() \\\n and isinstance(rxn, LibraryReaction) and isinstance(rxn.kinetics, Arrhenius) and \\\n (self.pressure_dependence.maximum_atoms is None or self.pressure_dependence.maximum_atoms >= \\\n sum([len(spec.molecule[0].atoms) for spec in r.reactants])):\n # This unimolecular library reaction is flagged as `elementary_high_p` and has Arrhenius type kinetics.\n # We should calculate a pressure-dependent rate for it\n if len(rxn.reactants) == 1:\n self.process_new_reactions(new_reactions=[rxn], new_species=rxn.reactants[0])\n else:\n self.process_new_reactions(new_reactions=[rxn], new_species=rxn.products[0])\n\n # Perform species constraints and forbidden species checks\n for spec in self.new_species_list:\n if not reaction_library.auto_generated: # No need to check for forbidden species otherwise\n if database.forbidden_structures.is_molecule_forbidden(spec.molecule[0]):\n if 'allowed' in rmg.species_constraints and 'reaction libraries' in rmg.species_constraints['allowed']:\n spec.explicitly_allowed = True\n logging.warning(\"Species {0} from reaction library {1} is globally forbidden. It will behave \"\n \"as an inert unless found in a seed mechanism or reaction \"\n \"library.\".format(spec.label, reaction_library.label))\n else:\n raise ForbiddenStructureException(\"Species {0} from reaction library {1} is globally \"\n \"forbidden. You may explicitly allow it, but it will remain \"\n \"inert unless found in a seed mechanism or reaction \"\n \"library.\".format(spec.label, reaction_library.label))\n if fails_species_constraints(spec):\n if 'allowed' in rmg.species_constraints and 'reaction libraries' in rmg.species_constraints['allowed']:\n rmg.species_constraints['explicitlyAllowedMolecules'].extend(spec.molecule)\n else:\n raise ForbiddenStructureException(\"Species constraints forbids species {0} from reaction library \"\n \"{1}. Please reformulate constraints, remove the species, or \"\n \"explicitly allow it.\".format(spec.label, reaction_library.label))\n\n for spec in self.new_species_list:\n if spec.reactive:\n submit(spec, self.solvent_name)\n\n self.add_species_to_edge(spec)\n\n for rxn in self.new_reaction_list:\n # Note that we haven't actually evaluated any fluxes at this point\n # Instead, we remove the comment below if the reaction is moved to\n # the core later in the mechanism generation\n if not (self.pressure_dependence and rxn.elementary_high_p and rxn.is_unimolecular()\n and isinstance(rxn, LibraryReaction) and isinstance(rxn.kinetics, Arrhenius) and \\\n (self.pressure_dependence.maximum_atoms is None or self.pressure_dependence.maximum_atoms >= \\\n sum([len(spec.molecule[0].atoms) for spec in r.reactants]))):\n # Don't add to the edge library reactions that were already processed\n self.add_reaction_to_edge(rxn)\n\n if self.save_edge_species:\n from rmgpy.chemkin import mark_duplicate_reaction\n new_edge_reactions = self.edge.reactions[num_old_edge_reactions:]\n checked_reactions = self.core.reactions + self.edge.reactions[:num_old_edge_reactions]\n for rxn in new_edge_reactions:\n mark_duplicate_reaction(rxn, checked_reactions)\n checked_reactions.append(rxn)\n\n self.log_enlarge_summary(\n new_core_species=[],\n new_core_reactions=[],\n new_edge_species=self.edge.species[num_old_edge_species:],\n new_edge_reactions=self.edge.reactions[num_old_edge_reactions:],\n )\n\n def add_reaction_library_to_output(self, reaction_library):\n \"\"\"\n Add all species and reactions from `reaction_library`, a\n :class:`KineticsPrimaryDatabase` object, to the output.\n This does not bring any of the reactions or species into the core itself.\n \"\"\"\n\n logging.info('Adding reaction library {0} to output file...'.format(reaction_library))\n\n # Append the edge reactions that are from the selected reaction library to an output species and output reactions list\n for rxn in self.edge.reactions:\n if isinstance(rxn, LibraryReaction):\n if rxn.library == reaction_library:\n self.output_reaction_list.append(rxn)\n\n for species in rxn.reactants + rxn.products:\n if species not in self.core.species and species not in self.output_species_list:\n self.output_species_list.append(species)\n\n def add_reaction_to_unimolecular_networks(self, newReaction, new_species, network=None):\n \"\"\"\n Given a newly-created :class:`Reaction` object `newReaction`, update the\n corresponding unimolecular reaction network. If no network exists, a new\n one is created. If the new reaction is an isomerization that connects two\n existing networks, the two networks are merged. This function is called\n whenever a new high-pressure limit edge reaction is created. Returns the\n network containing the new reaction.\n \"\"\"\n\n assert isinstance(new_species, Species)\n\n # Put the reaction in the direction in which the new species is in the reactants\n if new_species in newReaction.reactants:\n reactants = newReaction.reactants[:]\n products = newReaction.products[:]\n else:\n reactants = newReaction.products[:]\n products = newReaction.reactants[:]\n reactants.sort()\n products.sort()\n\n source = tuple(reactants)\n\n # Only search for a network if we don't specify it as a parameter\n if network is None:\n if len(reactants) == 1:\n # Find the network containing the reactant as the source\n try:\n networks = self.network_dict[source]\n assert len(networks) == 1\n network = networks[0]\n except KeyError:\n pass\n elif len(reactants) > 1:\n # Find the network containing the reactants as the source AND the\n # product as an explored isomer\n try:\n networks = self.network_dict[source]\n for n in networks:\n if products[0] in n.explored:\n assert network is None\n network = n\n except KeyError:\n pass\n else:\n return\n\n # If no suitable network exists, create a new one\n if network is None:\n self.network_count += 1\n network = PDepNetwork(index=self.network_count, source=reactants[:])\n # should the source passed to PDepNetwork constuctor be a tuple not a list? that's what is used in network_dict\n try:\n self.network_dict[source].append(network)\n except KeyError:\n self.network_dict[source] = [network]\n self.network_list.append(network)\n\n # Add the path reaction to that network\n network.add_path_reaction(newReaction)\n\n def update_unimolecular_reaction_networks(self):\n \"\"\"\n Iterate through all of the currently-existing unimolecular reaction\n networks, updating those that have been marked as invalid. In each update,\n the phenomonological rate coefficients :math:`k(T,P)` are computed for\n each net reaction in the network, and the resulting reactions added or\n updated.\n \"\"\"\n\n # Merge networks if necessary\n # Two partial networks having the same source and containing one or\n # more explored isomers in common must be merged together to avoid\n # double-counting of rates\n for networks in self.network_dict.values():\n network_count = len(networks)\n for index0, network0 in enumerate(networks):\n index = index0 + 1\n while index < network_count:\n found = False\n network = networks[index]\n if network0.source == network.source:\n # The networks contain the same source, but do they contain any common included isomers (other than the source)?\n for isomer in network0.explored:\n if isomer != network.source and isomer in network.explored:\n # The networks contain an included isomer in common, so we need to merge them\n found = True\n break\n if found:\n # The networks contain the same source and one or more common included isomers\n # Therefore they need to be merged together\n logging.info(\n 'Merging PDepNetwork #{0:d} and PDepNetwork #{1:d}'.format(network0.index, network.index))\n network0.merge(network)\n networks.remove(network)\n self.network_list.remove(network)\n network_count -= 1\n else:\n index += 1\n\n count = sum([1 for network in self.network_list if\n not network.valid and not (len(network.explored) == 0 and len(network.source) > 1)])\n logging.info('Updating {0:d} modified unimolecular reaction networks (out of {1:d})...'.format(count, len(\n self.network_list)))\n\n # Iterate over all the networks, updating the invalid ones as necessary\n # self = reaction_model object\n updated_networks = []\n for network in self.network_list:\n if not network.valid:\n network.update(self, self.pressure_dependence)\n updated_networks.append(network)\n\n # PDepReaction objects generated from partial networks are irreversible\n # However, it makes more sense to have reversible reactions in the core\n # Thus we mark PDepReaction objects as reversible and remove the reverse\n # direction from the list of core reactions\n # Note that well-skipping reactions may not have a reverse if the well\n # that they skip over is not itself in the core\n index = 0\n core_reaction_count = len(self.core.reactions)\n while index < core_reaction_count:\n reaction = self.core.reactions[index]\n if isinstance(reaction, PDepReaction):\n for reaction2 in self.core.reactions[index + 1:]:\n if isinstance(reaction2,\n PDepReaction) and reaction.reactants == reaction2.products and reaction.products == reaction2.reactants:\n # We've found the PDepReaction for the reverse direction\n dGrxn = reaction.get_free_energy_of_reaction(300.)\n kf = reaction.get_rate_coefficient(1000, 1e5)\n kr = reaction.get_rate_coefficient(1000, 1e5) / reaction.get_equilibrium_constant(1000)\n kf2 = reaction2.get_rate_coefficient(1000, 1e5) / reaction2.get_equilibrium_constant(1000)\n kr2 = reaction2.get_rate_coefficient(1000, 1e5)\n if kf / kf2 < 0.5 or kf / kf2 > 2.0:\n # Most pairs of reactions should satisfy thermodynamic consistency (or at least be \"close\")\n # Warn about the ones that aren't close (but don't abort)\n logging.warning('Forward and reverse PDepReactions for reaction {0!s} generated from '\n 'networks {1:d} and {2:d} do not satisfy thermodynamic '\n 'consistency.'.format(reaction,\n reaction.network.index,\n reaction2.network.index))\n logging.warning('{0!s}:'.format(reaction))\n logging.warning('{0:.2e} {1:.2e}:'.format(kf, kf2))\n logging.warning('{0!s}:'.format(reaction2))\n logging.warning('{0:.2e} {1:.2e}:'.format(kr, kr2))\n # Keep the exergonic direction\n keep_first = dGrxn < 0\n # Delete the PDepReaction that we aren't keeping\n if keep_first:\n self.core.reactions.remove(reaction2)\n reaction.reversible = True\n else:\n self.core.reactions.remove(reaction)\n self.core.reactions.remove(reaction2)\n self.core.reactions.insert(index, reaction2)\n reaction2.reversible = True\n core_reaction_count -= 1\n # There should be only one reverse, so we can stop searching once we've found it\n break\n else:\n reaction.reversible = True\n # Move to the next core reaction\n index += 1\n\n def mark_chemkin_duplicates(self):\n \"\"\"\n Check that all reactions that will appear the chemkin output have been checked as duplicates.\n \n Call this if you've done something that may have introduced undetected duplicate reactions,\n like add a reaction library or seed mechanism.\n Anything added via the :meth:`expand` method should already be detected.\n \"\"\"\n from rmgpy.chemkin import mark_duplicate_reactions\n\n rxn_list = self.core.reactions + self.output_reaction_list\n mark_duplicate_reactions(rxn_list)\n\n def register_reaction(self, rxn):\n \"\"\"\n Adds the reaction to the reaction database.\n\n The reaction database is structured as a multi-level\n dictionary, for efficient search and retrieval of\n existing reactions.\n\n The database has two types of dictionary keys:\n - reaction family\n - reactant(s) keys\n\n First, the keys are generated for the parameter reaction.\n \n Next, it is checked whether the reaction database already \n contains similar keys. If not, a new container is created,\n either a dictionary for the family key and first reactant key,\n or a list for the second reactant key.\n\n Finally, the reaction is inserted as the first element in the \n list.\n \"\"\"\n\n key_family, key1, key2 = generate_reaction_key(rxn)\n\n # make dictionary entries if necessary\n if key_family not in self.reaction_dict:\n self.reaction_dict[key_family] = {}\n\n if key1 not in self.reaction_dict[key_family]:\n self.reaction_dict[key_family][key1] = {}\n\n if key2 not in self.reaction_dict[key_family][key1]:\n self.reaction_dict[key_family][key1][key2] = []\n\n # store this reaction at the top of the relevant short-list\n self.reaction_dict[key_family][key1][key2].insert(0, rxn)\n\n def search_retrieve_reactions(self, rxn):\n \"\"\"\n Searches through the reaction database for \n reactions with an identical reaction key as the key of the \n parameter reaction.\n\n Both the reaction key based on the reactants as well as on the products\n is used to search for possible candidate reactions.\n \"\"\"\n\n # Get the short-list of reactions with the same family, reactant1 and reactant2\n family_label, r1_fwd, r2_fwd = generate_reaction_key(rxn)\n\n my_reaction_list = []\n\n rxns = self.retrieve(family_label, r1_fwd, r2_fwd)\n my_reaction_list.extend(rxns)\n\n family = get_family_library_object(family_label)\n # if the family is its own reverse (H-Abstraction) then check the other direction\n if isinstance(family, KineticsFamily):\n # Get the short-list of reactions with the same family, product1 and product2\n family_label, r1_rev, r2_rev = generate_reaction_key(rxn, useProducts=True)\n\n rxns = self.retrieve(family_label, r1_rev, r2_rev)\n my_reaction_list.extend(rxns)\n\n return my_reaction_list\n\n def initialize_index_species_dict(self):\n \"\"\"\n Populates the core species dictionary\n \n integer -> core Species\n\n with the species that are currently in the core.\n \"\"\"\n\n for spc in itertools.chain(self.core.species, self.edge.species):\n if spc.reactive:\n self.index_species_dict[spc.index] = spc\n\n def retrieve(self, family_label, key1, key2):\n \"\"\"\n Returns a list of reactions from the reaction database with the \n same keys as the parameters.\n\n Returns an empty list when one of the keys could not be found.\n \"\"\"\n try:\n return self.reaction_dict[family_label][key1][key2][:]\n except KeyError: # no such short-list: must be new, unless in seed.\n return []\n\n\ndef generate_reaction_key(rxn, useProducts=False):\n \"\"\"\n Returns a tuple with 3 keys:\n - the reaction family (or library) the reaction belongs to\n - the keys of the reactants.\n\n None for the third element in the tuple if there is \n only 1 reactant.\n\n The keys are sorted alphabetically.\n \"\"\"\n\n key_family = rxn.family\n\n spc_list = rxn.products if useProducts else rxn.reactants\n\n if len(spc_list) == 1:\n key1, key2 = get_key(spc_list[0]), None\n else:\n # Use 2 keys, even for trimolecular reactions?\n key1, key2 = sorted([get_key(spc_list[0]), get_key(spc_list[1])])\n\n return key_family, key1, key2\n\n\ndef generate_reaction_id(rxn):\n \"\"\"\n Returns a tuple of the reactions reactant and product\n keys.\n\n Both lists are sorted.\n\n The first element in the tuple is the reactants list.\n \"\"\"\n\n reactants = sorted([get_key(reactant) for reactant in rxn.reactants])\n products = sorted([get_key(product) for product in rxn.products])\n\n return reactants, products\n\n\ndef get_family_library_object(label):\n \"\"\"\n Returns the KineticsFamily or KineticsLibrary object associated with the\n parameter string.\n\n First search through the reaction families, then \n through the libraries.\n \"\"\"\n\n kinetics = rmgpy.data.rmg.database.kinetics\n\n try:\n fam = kinetics.families[label]\n return fam\n except KeyError:\n pass\n\n try:\n lib = kinetics.libraries[label]\n return lib\n except KeyError:\n pass\n\n raise Exception('Could not retrieve the family/library: {}'.format(label))\n\n\ndef get_key(spc):\n \"\"\"\n Returns a string of the species that can serve as a key in a dictionary.\n \"\"\"\n\n return spc.label\n\n\ndef are_identical_species_references(rxn1, rxn2):\n \"\"\"\n Checks if the references of the reactants and products of the two reactions\n are identical, in either direction.\n \"\"\"\n identical_same_direction = rxn1.reactants == rxn2.reactants and rxn1.products == rxn2.products\n identical_opposite_directions = rxn1.reactants == rxn2.products and rxn1.products == rxn2.reactants\n identical_collider = rxn1.specific_collider == rxn2.specific_collider\n\n return (identical_same_direction or identical_opposite_directions) and identical_collider\n" ]
[ [ "numpy.argsort", "scipy.sparse.dok_matrix", "numpy.zeros", "numpy.isinf" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
maxiwelian/deepqmc
[ "0d243e2c5f5964a79929294e62e46819653b137b" ]
[ "src/deepqmc/wf/ferminet/pretrainer.py" ]
[ "from torch.distributions import Normal\nimport numpy as np\nfrom torch import nn\nfrom typing import Tuple\nfrom tqdm.auto import trange\nimport torch\n\nfrom deepqmc import Molecule\nfrom deepqmc.pyscfext import eval_ao_normed, pyscf_from_mol\nfrom deepqmc.wf import WaveFunction\nfrom deepqmc.wf.paulinet.molorb import MolecularOrbital\nfrom deepqmc.wf.paulinet.gto import GTOBasis\nfrom deepqmc.physics import pairwise_diffs, local_energy\n\n\n__all__ = ['Pretrainer']\n\nclass Pretrainer(nn.Module):\n r\"\"\" Implements the FermiNet wave function Ansatz pretraining based on [pfau2020ab]\n\n Provides tools for pretraining the Ansatz.\n\n .. math:\n\n Usage:\n wf = FermiNet(mol, n_layers, nf_hidden_single, nf_hidden_pairwise, n_determinants).cuda()\n pretrainer = Pretrainer(mol).cuda()\n pretrainer.pretrain(wf)\n\n Args:\n mol (:class:`~deepqmc.Molecule`): molecule whose wave function is represented\n basis (str): basis for the molecular orbitals\n\n \"\"\"\n\n def __init__(self,\n mol,\n basis: str = '6-311g',\n device: str = 'cuda',\n dtype: torch.dtype = torch.float32):\n super(Pretrainer, self).__init__()\n self.device = device\n self.dtype = dtype\n\n self.atom_positions = [x.cpu().numpy() for x in mol.coords.split(1, dim=0)]\n self.ne_atoms = [int(i) for i in mol.charges]\n\n self.mol = mol\n self.n_elec = int(mol.charges)\n self.n_up = (self.n_elec + mol.spin) // 2\n self.n_down = (self.n_elec - mol.spin) // 2\n self.n_atoms = len(self.mol)\n self.n_orbitals = max(self.n_up, self.n_down) # if n_orbital is none return max of up or down\n # cas and workdir set to None\n\n self.mf, _ = pyscf_from_mol(mol, basis)\n\n def compute_orbital_probability(self, samples: torch.Tensor) -> torch.Tensor:\n up_dets, down_dets = self.hf_orbitals(samples)\n\n spin_ups = up_dets ** 2\n spin_downs = down_dets ** 2\n\n p_up = torch.diagonal(spin_ups, dim1=-2, dim2=-1).prod(-1)\n p_down = torch.diagonal(spin_downs, dim1=-2, dim2=-1).prod(-1)\n\n probabilities = p_up * p_down\n\n return probabilities.detach()\n\n def hf_orbitals(self, samples: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:\n n_samples = samples.shape[0]\n # mol = Molecule.from_name('H2O')\n # mf, _ = pyscf_from_mol(mol, '6-31g')\n # rs = torch.randn(100, 10, 3).double()\n # mo = (eval_ao_normed(mf.mol, rs.flatten(end_dim=1).numpy()).reshape(100, 10, -1) @ mf.mo_coeff[:, :5])\n #\n samples = samples.flatten(end_dim=1).cpu().numpy()\n determinants = (eval_ao_normed(self.mf.mol, samples).reshape(n_samples, self.n_elec, -1)\n @ self.mf.mo_coeff[:, :self.n_orbitals])\n determinants = torch.from_numpy(determinants).to(device=self.device, dtype=self.dtype)\n up_dets, down_dets = determinants.split([self.n_up, self.n_down], dim=1)\n up_dets, down_dets = up_dets[:, :, :up_dets.shape[1]], down_dets[:, :, :down_dets.shape[1]]\n\n return up_dets, down_dets\n\n def pretrain(self,\n wf: WaveFunction,\n n_samples: int = 1024,\n n_steps: int = 1000,\n lr: float = 1e-4):\n\n sampler = MetropolisHastingsPretrain()\n opt = torch.optim.Adam(list(wf.parameters())[:-1], lr=lr)\n steps = trange(\n 0, # init_step = 0\n n_steps,\n initial=0,\n total=n_steps,\n desc='pretraining',\n disable=None,\n )\n\n samples = initialize_samples(self.ne_atoms, self.atom_positions, n_samples).to(device=self.device, dtype=self.dtype)\n\n for step in steps:\n Es_loc, _, _ = local_energy(\n samples,\n wf.sample(False),\n create_graph=False,\n keep_graph=False,\n )\n\n samples = sampler(wf, self, samples)\n\n up_dets, down_dets = self.hf_orbitals(samples)\n up_dets = tile_labels(up_dets, wf.n_determinants)\n down_dets = tile_labels(down_dets, wf.n_determinants)\n\n wf.pretraining = True\n model_up_dets, model_down_dets = wf(samples)\n wf.pretraining = False\n\n loss = mse_error(up_dets, model_up_dets)\n loss += mse_error(down_dets, model_down_dets)\n opt.zero_grad()\n loss.backward() # in order for hook to work must call backward\n opt.step()\n\n steps.set_postfix(E=f'{Es_loc.mean():.6f}')\n\n # print('iteration: ', step, ' energy: ', Es_loc.mean())\n\n\ndef mse_error(targets: torch.Tensor, outputs: torch.Tensor) -> torch.Tensor:\n return ((targets - outputs)**2).mean(0).sum()\n\n\ndef tile_labels(label: torch.Tensor, n_k: int) -> torch.Tensor:\n x = label.unsqueeze(dim=1).repeat((1, n_k, 1, 1))\n return x\n\n\nclass RandomWalker():\n r\"\"\" Creates normal sampler with std of sigma\n\n Used to suggest new updates to the positions of the walkers\n\n Usage:\n distr = RandomWalker(sigma.to(device=device, dtype=dtype))\n\n Args:\n sigma (float): step size of the walkers\n\n \"\"\"\n def __init__(self, sigma):\n self.step_gaussian = Normal(0.0, sigma)\n\n def resample(self, prev_sample) -> torch.Tensor:\n return prev_sample + self.step_gaussian.sample(prev_sample.shape)\n\n\nclass Uniform(nn.Module):\n r\"\"\" Creates a uniform sampler between 0 and 1\n\n Used to determine whether moves accepted or rejected\n\n Usage:\n alpha_distr = Uniform(torch.tensor(0.).to(device=device, dtype=dtype), torch.tensor(1.).to(device=device, dtype=dtype))\n\n \"\"\"\n def __init__(self, low=0, high=1):\n super(Uniform, self).__init__()\n self.low = torch.tensor(low) if type(low) != torch.Tensor else low\n self.high = torch.tensor(high) if type(high) != torch.Tensor else high\n\n def forward(self, batch_size: int = 1):\n return self.low + torch.rand(batch_size, device=self.low.device) * (self.high - self.low)\n\n def sample(self, batch_size: int = 1):\n return self(batch_size)\n\n\nclass ToProb(nn.Module):\n def forward(self, amps: torch.Tensor) -> torch.Tensor:\n return torch.exp(amps) ** 2\n\n\ndef initialize_samples(ne_atoms, atom_positions, n_samples):\n r\"\"\" Initialises samples for pretraining\n\n Usage:\n samples = initialize_samples(ne_atoms, atom_positions, n_samples).to(device=self.device, dtype=self.dtype)\n\n Args:\n ne_atoms (list int): number of electrons assigned to each nucleus\n atom_positions (list np.array): positions of the nuclei\n n_samples (int): number of walkers\n\n Returns:\n samples (np.array): walker positions (n_samples, n_elec, 3)\n\n \"\"\"\n ups = []\n downs = []\n for ne_atom, atom_position in zip(ne_atoms, atom_positions):\n for e in range(ne_atom):\n if e % 2 == 0: # fill up the orbitals alternating up down\n curr_sample_up = np.random.normal(loc=atom_position, scale=1., size=(n_samples, 1, 3))\n ups.append(curr_sample_up)\n else:\n curr_sample_down = np.random.normal(loc=atom_position, scale=1., size=(n_samples, 1, 3))\n downs.append(curr_sample_down)\n ups = np.concatenate(ups, axis=1)\n downs = np.concatenate(downs, axis=1)\n curr_sample = np.concatenate([ups, downs], axis=1) # stack the ups first to be consistent with model\n return torch.from_numpy(curr_sample)\n\n\nclass MetropolisHastingsPretrain(nn.Module):\n r\"\"\" Implements MetropolisHastings sampling based on [pfau2020ab]\n\n Samples congigurations based on the amplitudes of both the Hartree Fock orbitals and the wave function Ansatz\n\n .. math:\n\n Usage:\n sampler = MetropolisHastingsPretrain()\n\n Args:\n sigma (float): step size for the walkers (std of the proposed moves)\n correlation_length (int): number of steps between sampling each update of the walker positions\n target_acceptance (float): the target acceptance of the steps\n\n Returns:\n curr_sample (torch.Tensor): walker configurations (n_samples, n_elec, 3)\n\n \"\"\"\n def __init__(self,\n sigma: float = 0.02,\n correlation_length: int = 10,\n target_acceptance: float = 0.5,\n device: str = 'cuda',\n dtype: torch.dtype = torch.float32):\n super(MetropolisHastingsPretrain, self).__init__()\n self.device = device\n self.dtype = dtype\n\n self.sigma = sigma\n self.correlation_length = correlation_length\n\n self.distr = RandomWalker(sigma)\n self.alpha_distr = Uniform(torch.tensor(0.).to(device=device, dtype=dtype), torch.tensor(1.).to(device=device, dtype=dtype))\n self.to_prob = ToProb()\n\n self.acceptance = 0.0\n self.target_acceptance = target_acceptance\n\n print('initialized pretraining sampler')\n\n def forward(self, model, pretrainer, curr_sample):\n n_samples = curr_sample.shape[0]\n\n # --- split the walkers and sample half from the hf_orbitals and half from the wave function\n sams = curr_sample.split([n_samples // 2, n_samples // 2])\n curr_sample_model, curr_sample_hf = sams[0].squeeze(), sams[1].squeeze()\n shape = curr_sample_model.shape\n\n curr_log_amp = model(curr_sample_model)[0]\n curr_prob_model = self.to_prob(curr_log_amp)\n curr_prob_hf = pretrainer.compute_orbital_probability(curr_sample_hf)\n\n acceptance_total_mod = 0.\n acceptance_total_hf = 0.\n for _ in range(self.correlation_length):\n # --- next sample\n new_sample_model = curr_sample_model + torch.normal(0.0, self.sigma, size=shape, device=self.device, dtype=self.dtype)\n new_log_amp = model(new_sample_model)[0]\n new_prob_model = self.to_prob(new_log_amp)\n\n new_sample_hf = curr_sample_hf + torch.normal(0.0, self.sigma, size=shape, device=self.device)\n new_prob_hf = pretrainer.compute_orbital_probability(new_sample_hf).to(self.device)\n\n # --- update sample\n alpha_model = new_prob_model / curr_prob_model\n alpha_hf = new_prob_hf / curr_prob_hf\n\n # --- generate masks\n mask_model = alpha_model > torch.rand(shape[0], device=self.device, dtype=self.dtype)\n mask_hf = alpha_hf > torch.rand(shape[0], device=self.device, dtype=self.dtype)\n\n curr_sample_model = torch.where(mask_model.unsqueeze(-1).unsqueeze(-1), new_sample_model, curr_sample_model)\n curr_prob_model = torch.where(mask_model, new_prob_model, curr_prob_model)\n\n curr_sample_hf = torch.where(mask_hf.unsqueeze(-1).unsqueeze(-1), new_sample_hf, curr_sample_hf)\n curr_prob_hf = torch.where(mask_hf, new_prob_hf, curr_prob_hf)\n\n acceptance_total_mod += mask_model.type(self.dtype).mean()\n acceptance_total_hf += mask_hf.type(self.dtype).mean()\n\n curr_sample = torch.cat([curr_sample_model, curr_sample_hf], dim=0)\n # --- randomly permute so some walkers in the next run sample from different distribution than in this run\n idxs = torch.randperm(len(curr_sample))\n curr_sample = curr_sample[idxs]\n return curr_sample\n\n def adjust_sampling_steps(self, acceptance):\n if acceptance < 0.5:\n self.sigma += 0.001\n else:\n self.sigma -= 0.001\n\n\n\n\"\"\"\nBelow is a depreciated version of the pretrainer which works the same\n\"\"\"\nclass Pretrainer_dep(nn.Module):\n r\"\"\" Implements the FermiNet wave function Ansatz pretraining based on [pfau2020ab]\n\n Provides tools for pretraining the Ansatz.\n\n .. math:\n\n Usage:\n wf = FermiNet(mol, n_layers, nf_hidden_single, nf_hidden_pairwise, n_determinants).cuda()\n pretrainer = Pretrainer(mol).cuda()\n pretrainer.pretrain(wf)\n\n Args:\n mol (:class:`~deepqmc.Molecule`): molecule whose wave function is represented\n basis (str): basis for the molecular orbitals\n\n \"\"\"\n\n def __init__(self,\n mol,\n basis: str = '6-311g',\n device: str = 'cuda',\n dtype: torch.dtype = torch.float32):\n super(Pretrainer_dep, self).__init__()\n self.device = device\n self.dtype = dtype\n\n self.atom_positions = [x.cpu().numpy() for x in mol.coords.split(1, dim=0)]\n self.ne_atoms = [int(i) for i in mol.charges]\n\n self.mol = mol\n self.n_elec = int(mol.charges)\n self.n_up = (self.n_elec + mol.spin) // 2\n self.n_down = (self.n_elec - mol.spin) // 2\n self.n_atoms = len(self.mol)\n self.n_orbitals = max(self.n_up, self.n_down) # if n_orbital is none return max of up or down\n # cas and workdir set to None\n mf, mc = pyscf_from_mol(mol, basis, None, None)\n basis = GTOBasis.from_pyscf(mf.mol) # basis from molecule from name\n mol = Molecule(\n mf.mol.atom_coords(),\n mf.mol.atom_charges(),\n mf.mol.charge,\n mf.mol.spin,\n )\n self.mo = MolecularOrbital( # create the molecular orbital\n mol,\n basis,\n self.n_orbitals,\n cusp_correction=False)\n self.mo.init_from_pyscf(mf, freeze_mos=True)\n self.coords = mol.coords.unsqueeze(0).to(device=device, dtype=dtype)\n\n def compute_orbital_probability(self, samples: torch.Tensor) -> torch.Tensor:\n up_dets, down_dets = self.hf_orbitals(samples)\n\n spin_ups = up_dets ** 2\n spin_downs = down_dets ** 2\n\n p_up = torch.diagonal(spin_ups, dim1=-2, dim2=-1).prod(-1)\n p_down = torch.diagonal(spin_downs, dim1=-2, dim2=-1).prod(-1)\n\n probabilities = p_up * p_down\n\n return probabilities.detach()\n\n def hf_orbitals(self, samples: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:\n samples_hf = samples.view(-1, 1, 3).repeat(1, self.n_atoms, 1)\n diffs_nuc = pairwise_diffs(torch.cat([self.coords, samples_hf]), self.coords).squeeze(1)\n determinants = self.mo(diffs_nuc).unsqueeze(1).view(-1, self.n_elec, self.n_orbitals)\n up_dets, down_dets = determinants.split([self.n_up, self.n_down], dim=1)\n up_dets, down_dets = up_dets[:, :, :up_dets.shape[1]], down_dets[:, :, :down_dets.shape[1]]\n return up_dets, down_dets\n\n def pretrain(self,\n wf: WaveFunction,\n n_samples: int = 1024,\n n_steps: int = 1000):\n\n sampler = MetropolisHastingsPretrain()\n opt = torch.optim.Adam(list(wf.parameters())[:-3], lr=0.0001)\n steps = trange(\n 0, # init_step = 0\n n_steps,\n initial=0,\n total=n_steps,\n desc='pretraining',\n disable=None,\n )\n\n samples = initialize_samples(self.ne_atoms, self.atom_positions, n_samples).to(device=self.device, dtype=self.dtype)\n\n for step in steps:\n Es_loc, _, _ = local_energy(\n samples,\n wf.sample(False),\n create_graph=False,\n keep_graph=False,\n )\n\n samples = sampler(wf, self, samples)\n\n up_dets, down_dets = self.hf_orbitals(samples)\n up_dets = tile_labels(up_dets, wf.n_determinants)\n down_dets = tile_labels(down_dets, wf.n_determinants)\n\n wf.pretraining = True\n model_up_dets, model_down_dets = wf(samples)\n wf.pretraining = False\n\n loss = mse_error(up_dets, model_up_dets)\n loss += mse_error(down_dets, model_down_dets)\n wf.zero_grad()\n loss.backward() # in order for hook to work must call backward\n opt.step()\n print('iteration: ', step, ' energy: ', Es_loc.mean())\n\n" ]
[ [ "torch.normal", "torch.diagonal", "torch.cat", "torch.from_numpy", "torch.tensor", "numpy.concatenate", "torch.exp", "numpy.random.normal", "torch.rand", "torch.where", "torch.distributions.Normal" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
data301-2021-winter1/project-group33-project
[ "52ea0628521e12f98ef2d3d977350a526bd10600" ]
[ "notebooks/ungraded/project_functions1.py" ]
[ "# Imports\nimport os\nfrom os import listdir\nimport pandas as pd\nimport datetime\n\nmonths = [\"January\", \"February\", \"March\", \"April\", \"May\", \"June\", \"July\", \"August\", \"September\", \"October\", \"November\", \"December\"]\n\ndef addMissingMonths(df):\n count = 0\n for year in df[\"Year\"].unique():\n for province in df[\"Jurisdiction\"].unique():\n if len(df[(df[\"Year\"] == year) & (df[\"Jurisdiction\"] == province)].index) != 12:\n for month in months:\n if df[(df[\"Year\"] == year) & (df[\"Jurisdiction\"] == province) & (df[\"Month\"] == month)].empty:\n df = df.append({\"Year\" : year, \"Jurisdiction\" : province, \"Month\" : month, \"Area (hectares)\" : 0, \"Number\" : 0}, ignore_index=True)\n count = count + 1\n return df\n\n# Import and process functions\n\ndef loadAndProcessFireData(pathToDataDir):\n fireData = {}\n for f in listdir(pathToDataDir):\n if(f.endswith(\".csv\")):\n fireData[f] = (\n pd.read_csv(pathToDataDir + \"/\" + f)\n .pipe(lambda x : x.drop(columns = x.columns[[1,2,4,6,8,9,10]]))\n .loc[lambda x : x[\"Year\"] != 2020]\n .loc[lambda x : x[\"Jurisdiction\"] != \"Parks Canada\"]\n )\n return fireData\n\ndef loadAndProcessONIData(pathToONIDataFile):\n ONIData = (\n pd.read_csv(pathToONIDataFile, sep='\\s+', header=8)\n .pipe(lambda x : x.drop(columns = x.columns[[2, 3, 4]]))\n .rename(columns = {\"YEAR\" : \"Year\", \"MON/MMM\" : \"MonthNum\", \"PHASE\" : \"Phase\"})\n .loc[lambda x : x[\"Phase\"] != \"M\"]\n .assign(Month = lambda x: x[\"MonthNum\"].map({\n 1 : \"January\",\n 2 : \"February\",\n 3 : \"March\",\n 4 : \"April\",\n 5 : \"May\",\n 6 : \"June\",\n 7 : \"July\",\n 8 : \"August\",\n 9 : \"September\",\n 10 : \"October\",\n 11 : \"November\",\n 12 : \"December\"\n }))\n .loc[:, [\"Year\", \"Month\", \"ONI\", \"Phase\", \"MonthNum\"]]\n )\n return ONIData\n\ndef createONIByYear(ONIData):\n ONIByYear = pd.DataFrame(columns = {\"Year\", \"AvgONI\", \"Phase\"})\n for year in range(ONIData[\"Year\"].min(), ONIData[\"Year\"].max()):\n avgONI = ONIData[ONIData[\"Year\"] == year][\"ONI\"].sum()/12\n phase = \"N\"\n if avgONI <= -0.5:\n phase = \"L\"\n if avgONI >= 0.5:\n phase = \"E\"\n ONIByYear = ONIByYear.append({\"Year\" : year, \"AvgONI\" : avgONI, \"Phase\" : phase}, ignore_index=True)\n return ONIByYear\n\n# Data Merging functions\n\ndef createDataByMonth(fireData, ONIData):\n df = addMissingMonths(\n pd.merge(\n fireData[\"NFD-Area_burned_by_month-ENFR.csv\"], \n fireData[\"NFD-Number_of_fires_by_month-ENFR.csv\"]\n )\n ).merge(\n ONIData,\n how = \"left\",\n on = [\"Year\", \"Month\"]\n )\n df[\"Month\"] = pd.Categorical(df['Month'], categories=months, ordered=True)\n return (\n df.sort_values([\"Year\", \"Jurisdiction\", \"Month\"])\n .assign(DateTime = lambda x : pd.to_datetime(dict(year=x[\"Year\"], month=x[\"MonthNum\"], day=1)))\n .pipe(lambda x : x.drop(columns = [\"MonthNum\"]))\n )\n \n\ndef createDataByCause(fireData, ONIByYear):\n return (\n pd.merge(\n fireData[\"NFD-Area_burned_by_cause_class-ENFR.csv\"],\n fireData[\"NFD-Number_of_fires_by_cause_class-ENFR.csv\"]\n ).merge(\n ONIByYear,\n how = \"left\",\n on = [\"Year\"]\n )\n )\n\ndef createDataBySize(fireData, ONIByYear):\n return ( \n pd.merge(\n fireData[\"NFD-Area_burned_by_fire_size_class-ENFR.csv\"],\n fireData[\"NFD-Number_of_fires_by_fire_size_class-ENFR.csv\"]\n ).merge(\n ONIByYear,\n how = \"left\",\n on = [\"Year\"]\n )\n )" ]
[ [ "pandas.Categorical", "pandas.merge", "pandas.read_csv", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
ollyparryevans/mrcnn_deployment
[ "184d6475ecbc8c15db38701a1f0b9e5e060686a5" ]
[ "mrcnn/model.py" ]
[ "\"\"\"\nMask R-CNN\nThe main Mask R-CNN model implementation.\n\nCopyright (c) 2017 Matterport, Inc.\nLicensed under the MIT License (see LICENSE for details)\nWritten by Waleed Abdulla\n\"\"\"\n\nimport os\nimport random\nimport datetime\nimport re\nimport math\nimport logging\nfrom collections import OrderedDict\nimport multiprocessing\nimport numpy as np\nimport skimage.transform\nimport tensorflow as tf\nimport keras\nimport keras.backend as K\nimport keras.layers as KL\nimport keras.engine as KE\nimport keras.models as KM\n\nfrom mrcnn import utils\n\n# Requires TensorFlow 1.3+ and Keras 2.0.8+.\nfrom distutils.version import LooseVersion\nassert LooseVersion(tf.__version__) >= LooseVersion(\"1.3\")\nassert LooseVersion(keras.__version__) >= LooseVersion('2.0.8')\n\n############################################################\n# Utility Functions\n############################################################\n\ndef log(text, array=None):\n \"\"\"Prints a text message. And, optionally, if a Numpy array is provided it\n prints it's shape, min, and max values.\n \"\"\"\n if array is not None:\n text = text.ljust(25)\n text += (\"shape: {:20} min: {:10.5f} max: {:10.5f} {}\".format(\n str(array.shape),\n array.min() if array.size else \"\",\n array.max() if array.size else \"\",\n array.dtype))\n print(text)\n\n\nclass BatchNorm(KL.BatchNormalization):\n \"\"\"Extends the Keras BatchNormalization class to allow a central place\n to make changes if needed.\n\n Batch normalization has a negative effect on training if batches are small\n so this layer is often frozen (via setting in Config class) and functions\n as linear layer.\n \"\"\"\n def call(self, inputs, training=None):\n \"\"\"\n Note about training values:\n None: Train BN layers. This is the normal mode\n False: Freeze BN layers. Good when batch size is small\n True: (don't use). Set layer in training mode even when making inferences\n \"\"\"\n return super(self.__class__, self).call(inputs, training=training)\n\n\ndef compute_backbone_shapes(config, image_shape):\n \"\"\"Computes the width and height of each stage of the backbone network.\n\n Returns:\n [N, (height, width)]. Where N is the number of stages\n \"\"\"\n if callable(config.BACKBONE):\n return config.COMPUTE_BACKBONE_SHAPE(image_shape)\n\n # Currently supports ResNet only\n assert config.BACKBONE in [\"resnet50\", \"resnet101\"]\n return np.array(\n [[int(math.ceil(image_shape[0] / stride)),\n int(math.ceil(image_shape[1] / stride))]\n for stride in config.BACKBONE_STRIDES])\n\n\n############################################################\n# Resnet Graph\n############################################################\n\n# Code adopted from:\n# https://github.com/fchollet/deep-learning-models/blob/master/resnet50.py\n\ndef identity_block(input_tensor, kernel_size, filters, stage, block,\n use_bias=True, train_bn=True):\n \"\"\"The identity_block is the block that has no conv layer at shortcut\n # Arguments\n input_tensor: input tensor\n kernel_size: default 3, the kernel size of middle conv layer at main path\n filters: list of integers, the nb_filters of 3 conv layer at main path\n stage: integer, current stage label, used for generating layer names\n block: 'a','b'..., current block label, used for generating layer names\n use_bias: Boolean. To use or not use a bias in conv layers.\n train_bn: Boolean. Train or freeze Batch Norm layers\n \"\"\"\n nb_filter1, nb_filter2, nb_filter3 = filters\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n\n x = KL.Conv2D(nb_filter1, (1, 1), name=conv_name_base + '2a',\n use_bias=use_bias)(input_tensor)\n x = BatchNorm(name=bn_name_base + '2a')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',\n name=conv_name_base + '2b', use_bias=use_bias)(x)\n x = BatchNorm(name=bn_name_base + '2b')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base + '2c',\n use_bias=use_bias)(x)\n x = BatchNorm(name=bn_name_base + '2c')(x, training=train_bn)\n\n x = KL.Add()([x, input_tensor])\n x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x)\n return x\n\n\ndef conv_block(input_tensor, kernel_size, filters, stage, block,\n strides=(2, 2), use_bias=True, train_bn=True):\n \"\"\"conv_block is the block that has a conv layer at shortcut\n # Arguments\n input_tensor: input tensor\n kernel_size: default 3, the kernel size of middle conv layer at main path\n filters: list of integers, the nb_filters of 3 conv layer at main path\n stage: integer, current stage label, used for generating layer names\n block: 'a','b'..., current block label, used for generating layer names\n use_bias: Boolean. To use or not use a bias in conv layers.\n train_bn: Boolean. Train or freeze Batch Norm layers\n Note that from stage 3, the first conv layer at main path is with subsample=(2,2)\n And the shortcut should have subsample=(2,2) as well\n \"\"\"\n nb_filter1, nb_filter2, nb_filter3 = filters\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n\n x = KL.Conv2D(nb_filter1, (1, 1), strides=strides,\n name=conv_name_base + '2a', use_bias=use_bias)(input_tensor)\n x = BatchNorm(name=bn_name_base + '2a')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',\n name=conv_name_base + '2b', use_bias=use_bias)(x)\n x = BatchNorm(name=bn_name_base + '2b')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base +\n '2c', use_bias=use_bias)(x)\n x = BatchNorm(name=bn_name_base + '2c')(x, training=train_bn)\n\n shortcut = KL.Conv2D(nb_filter3, (1, 1), strides=strides,\n name=conv_name_base + '1', use_bias=use_bias)(input_tensor)\n shortcut = BatchNorm(name=bn_name_base + '1')(shortcut, training=train_bn)\n\n x = KL.Add()([x, shortcut])\n x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x)\n return x\n\n\ndef resnet_graph(input_image, architecture, stage5=False, train_bn=True):\n \"\"\"Build a ResNet graph.\n architecture: Can be resnet50 or resnet101\n stage5: Boolean. If False, stage5 of the network is not created\n train_bn: Boolean. Train or freeze Batch Norm layers\n \"\"\"\n assert architecture in [\"resnet50\", \"resnet101\"]\n # Stage 1\n x = KL.ZeroPadding2D((3, 3))(input_image)\n x = KL.Conv2D(64, (7, 7), strides=(2, 2), name='conv1', use_bias=True)(x)\n x = BatchNorm(name='bn_conv1')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n C1 = x = KL.MaxPooling2D((3, 3), strides=(2, 2), padding=\"same\")(x)\n # Stage 2\n x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1), train_bn=train_bn)\n x = identity_block(x, 3, [64, 64, 256], stage=2, block='b', train_bn=train_bn)\n C2 = x = identity_block(x, 3, [64, 64, 256], stage=2, block='c', train_bn=train_bn)\n # Stage 3\n x = conv_block(x, 3, [128, 128, 512], stage=3, block='a', train_bn=train_bn)\n x = identity_block(x, 3, [128, 128, 512], stage=3, block='b', train_bn=train_bn)\n x = identity_block(x, 3, [128, 128, 512], stage=3, block='c', train_bn=train_bn)\n C3 = x = identity_block(x, 3, [128, 128, 512], stage=3, block='d', train_bn=train_bn)\n # Stage 4\n x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a', train_bn=train_bn)\n block_count = {\"resnet50\": 5, \"resnet101\": 22}[architecture]\n for i in range(block_count):\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block=chr(98 + i), train_bn=train_bn)\n C4 = x\n # Stage 5\n if stage5:\n x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a', train_bn=train_bn)\n x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b', train_bn=train_bn)\n C5 = x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c', train_bn=train_bn)\n else:\n C5 = None\n return [C1, C2, C3, C4, C5]\n\n\n############################################################\n# Proposal Layer\n############################################################\n\ndef apply_box_deltas_graph(boxes, deltas):\n \"\"\"Applies the given deltas to the given boxes.\n boxes: [N, (y1, x1, y2, x2)] boxes to update\n deltas: [N, (dy, dx, log(dh), log(dw))] refinements to apply\n \"\"\"\n # Convert to y, x, h, w\n height = boxes[:, 2] - boxes[:, 0]\n width = boxes[:, 3] - boxes[:, 1]\n center_y = boxes[:, 0] + 0.5 * height\n center_x = boxes[:, 1] + 0.5 * width\n # Apply deltas\n center_y += deltas[:, 0] * height\n center_x += deltas[:, 1] * width\n height *= tf.exp(deltas[:, 2])\n width *= tf.exp(deltas[:, 3])\n # Convert back to y1, x1, y2, x2\n y1 = center_y - 0.5 * height\n x1 = center_x - 0.5 * width\n y2 = y1 + height\n x2 = x1 + width\n result = tf.stack([y1, x1, y2, x2], axis=1, name=\"apply_box_deltas_out\")\n return result\n\n\ndef clip_boxes_graph(boxes, window):\n \"\"\"\n boxes: [N, (y1, x1, y2, x2)]\n window: [4] in the form y1, x1, y2, x2\n \"\"\"\n # Split\n wy1, wx1, wy2, wx2 = tf.split(window, 4)\n y1, x1, y2, x2 = tf.split(boxes, 4, axis=1)\n # Clip\n y1 = tf.maximum(tf.minimum(y1, wy2), wy1)\n x1 = tf.maximum(tf.minimum(x1, wx2), wx1)\n y2 = tf.maximum(tf.minimum(y2, wy2), wy1)\n x2 = tf.maximum(tf.minimum(x2, wx2), wx1)\n clipped = tf.concat([y1, x1, y2, x2], axis=1, name=\"clipped_boxes\")\n clipped.set_shape((clipped.shape[0], 4))\n return clipped\n\n\nclass ProposalLayer(KE.Layer):\n \"\"\"Receives anchor scores and selects a subset to pass as proposals\n to the second stage. Filtering is done based on anchor scores and\n non-max suppression to remove overlaps. It also applies bounding\n box refinement deltas to anchors.\n\n Inputs:\n rpn_probs: [batch, anchors, (bg prob, fg prob)]\n rpn_bbox: [batch, anchors, (dy, dx, log(dh), log(dw))]\n anchors: [batch, (y1, x1, y2, x2)] anchors in normalized coordinates\n\n Returns:\n Proposals in normalized coordinates [batch, rois, (y1, x1, y2, x2)]\n \"\"\"\n\n def __init__(self, proposal_count, nms_threshold, config=None, **kwargs):\n super(ProposalLayer, self).__init__(**kwargs)\n self.config = config\n self.proposal_count = proposal_count\n self.nms_threshold = nms_threshold\n\n def call(self, inputs):\n # Box Scores. Use the foreground class confidence. [Batch, num_rois, 1]\n scores = inputs[0][:, :, 1]\n # Box deltas [batch, num_rois, 4]\n deltas = inputs[1]\n deltas = deltas * np.reshape(self.config.RPN_BBOX_STD_DEV, [1, 1, 4])\n # Anchors\n anchors = inputs[2]\n\n # Improve performance by trimming to top anchors by score\n # and doing the rest on the smaller subset.\n pre_nms_limit = tf.minimum(6000, tf.shape(anchors)[1])\n ix = tf.nn.top_k(scores, pre_nms_limit, sorted=True,\n name=\"top_anchors\").indices\n scores = utils.batch_slice([scores, ix], lambda x, y: tf.gather(x, y),\n self.config.IMAGES_PER_GPU)\n deltas = utils.batch_slice([deltas, ix], lambda x, y: tf.gather(x, y),\n self.config.IMAGES_PER_GPU)\n pre_nms_anchors = utils.batch_slice([anchors, ix], lambda a, x: tf.gather(a, x),\n self.config.IMAGES_PER_GPU,\n names=[\"pre_nms_anchors\"])\n\n # Apply deltas to anchors to get refined anchors.\n # [batch, N, (y1, x1, y2, x2)]\n boxes = utils.batch_slice([pre_nms_anchors, deltas],\n lambda x, y: apply_box_deltas_graph(x, y),\n self.config.IMAGES_PER_GPU,\n names=[\"refined_anchors\"])\n\n # Clip to image boundaries. Since we're in normalized coordinates,\n # clip to 0..1 range. [batch, N, (y1, x1, y2, x2)]\n window = np.array([0, 0, 1, 1], dtype=np.float32)\n boxes = utils.batch_slice(boxes,\n lambda x: clip_boxes_graph(x, window),\n self.config.IMAGES_PER_GPU,\n names=[\"refined_anchors_clipped\"])\n\n # Filter out small boxes\n # According to Xinlei Chen's paper, this reduces detection accuracy\n # for small objects, so we're skipping it.\n\n # Non-max suppression\n def nms(boxes, scores):\n indices = tf.image.non_max_suppression(\n boxes, scores, self.proposal_count,\n self.nms_threshold, name=\"rpn_non_max_suppression\")\n proposals = tf.gather(boxes, indices)\n # Pad if needed\n padding = tf.maximum(self.proposal_count - tf.shape(proposals)[0], 0)\n proposals = tf.pad(proposals, [(0, padding), (0, 0)])\n return proposals\n proposals = utils.batch_slice([boxes, scores], nms,\n self.config.IMAGES_PER_GPU)\n return proposals\n\n def compute_output_shape(self, input_shape):\n return (None, self.proposal_count, 4)\n\n\n############################################################\n# ROIAlign Layer\n############################################################\n\ndef log2_graph(x):\n \"\"\"Implementation of Log2. TF doesn't have a native implementation.\"\"\"\n return tf.log(x) / tf.log(2.0)\n\n\nclass PyramidROIAlign(KE.Layer):\n \"\"\"Implements ROI Pooling on multiple levels of the feature pyramid.\n\n Params:\n - pool_shape: [height, width] of the output pooled regions. Usually [7, 7]\n\n Inputs:\n - boxes: [batch, num_boxes, (y1, x1, y2, x2)] in normalized\n coordinates. Possibly padded with zeros if not enough\n boxes to fill the array.\n - image_meta: [batch, (meta data)] Image details. See compose_image_meta()\n - Feature maps: List of feature maps from different levels of the pyramid.\n Each is [batch, height, width, channels]\n\n Output:\n Pooled regions in the shape: [batch, num_boxes, height, width, channels].\n The width and height are those specific in the pool_shape in the layer\n constructor.\n \"\"\"\n\n def __init__(self, pool_shape, **kwargs):\n super(PyramidROIAlign, self).__init__(**kwargs)\n self.pool_shape = tuple(pool_shape)\n\n def call(self, inputs):\n # Crop boxes [batch, num_boxes, (y1, x1, y2, x2)] in normalized coords\n boxes = inputs[0]\n\n # Image meta\n # Holds details about the image. See compose_image_meta()\n image_meta = inputs[1]\n\n # Feature Maps. List of feature maps from different level of the\n # feature pyramid. Each is [batch, height, width, channels]\n feature_maps = inputs[2:]\n\n # Assign each ROI to a level in the pyramid based on the ROI area.\n y1, x1, y2, x2 = tf.split(boxes, 4, axis=2)\n h = y2 - y1\n w = x2 - x1\n # Use shape of first image. Images in a batch must have the same size.\n image_shape = parse_image_meta_graph(image_meta)['image_shape'][0]\n # Equation 1 in the Feature Pyramid Networks paper. Account for\n # the fact that our coordinates are normalized here.\n # e.g. a 224x224 ROI (in pixels) maps to P4\n image_area = tf.cast(image_shape[0] * image_shape[1], tf.float32)\n roi_level = log2_graph(tf.sqrt(h * w) / (224.0 / tf.sqrt(image_area)))\n roi_level = tf.minimum(5, tf.maximum(\n 2, 4 + tf.cast(tf.round(roi_level), tf.int32)))\n roi_level = tf.squeeze(roi_level, 2)\n\n # Loop through levels and apply ROI pooling to each. P2 to P5.\n pooled = []\n box_to_level = []\n for i, level in enumerate(range(2, 6)):\n ix = tf.where(tf.equal(roi_level, level))\n level_boxes = tf.gather_nd(boxes, ix)\n\n # Box indices for crop_and_resize.\n box_indices = tf.cast(ix[:, 0], tf.int32)\n\n # Keep track of which box is mapped to which level\n box_to_level.append(ix)\n\n # Stop gradient propogation to ROI proposals\n level_boxes = tf.stop_gradient(level_boxes)\n box_indices = tf.stop_gradient(box_indices)\n\n # Crop and Resize\n # From Mask R-CNN paper: \"We sample four regular locations, so\n # that we can evaluate either max or average pooling. In fact,\n # interpolating only a single value at each bin center (without\n # pooling) is nearly as effective.\"\n #\n # Here we use the simplified approach of a single value per bin,\n # which is how it's done in tf.crop_and_resize()\n # Result: [batch * num_boxes, pool_height, pool_width, channels]\n pooled.append(tf.image.crop_and_resize(\n feature_maps[i], level_boxes, box_indices, self.pool_shape,\n method=\"bilinear\"))\n\n # Pack pooled features into one tensor\n pooled = tf.concat(pooled, axis=0)\n\n # Pack box_to_level mapping into one array and add another\n # column representing the order of pooled boxes\n box_to_level = tf.concat(box_to_level, axis=0)\n box_range = tf.expand_dims(tf.range(tf.shape(box_to_level)[0]), 1)\n box_to_level = tf.concat([tf.cast(box_to_level, tf.int32), box_range],\n axis=1)\n\n # Rearrange pooled features to match the order of the original boxes\n # Sort box_to_level by batch then box index\n # TF doesn't have a way to sort by two columns, so merge them and sort.\n sorting_tensor = box_to_level[:, 0] * 100000 + box_to_level[:, 1]\n ix = tf.nn.top_k(sorting_tensor, k=tf.shape(\n box_to_level)[0]).indices[::-1]\n ix = tf.gather(box_to_level[:, 2], ix)\n pooled = tf.gather(pooled, ix)\n\n # Re-add the batch dimension\n pooled = tf.expand_dims(pooled, 0)\n return pooled\n\n def compute_output_shape(self, input_shape):\n return input_shape[0][:2] + self.pool_shape + (input_shape[2][-1], )\n\n\n############################################################\n# Detection Target Layer\n############################################################\n\ndef overlaps_graph(boxes1, boxes2):\n \"\"\"Computes IoU overlaps between two sets of boxes.\n boxes1, boxes2: [N, (y1, x1, y2, x2)].\n \"\"\"\n # 1. Tile boxes2 and repeat boxes1. This allows us to compare\n # every boxes1 against every boxes2 without loops.\n # TF doesn't have an equivalent to np.repeat() so simulate it\n # using tf.tile() and tf.reshape.\n b1 = tf.reshape(tf.tile(tf.expand_dims(boxes1, 1),\n [1, 1, tf.shape(boxes2)[0]]), [-1, 4])\n b2 = tf.tile(boxes2, [tf.shape(boxes1)[0], 1])\n # 2. Compute intersections\n b1_y1, b1_x1, b1_y2, b1_x2 = tf.split(b1, 4, axis=1)\n b2_y1, b2_x1, b2_y2, b2_x2 = tf.split(b2, 4, axis=1)\n y1 = tf.maximum(b1_y1, b2_y1)\n x1 = tf.maximum(b1_x1, b2_x1)\n y2 = tf.minimum(b1_y2, b2_y2)\n x2 = tf.minimum(b1_x2, b2_x2)\n intersection = tf.maximum(x2 - x1, 0) * tf.maximum(y2 - y1, 0)\n # 3. Compute unions\n b1_area = (b1_y2 - b1_y1) * (b1_x2 - b1_x1)\n b2_area = (b2_y2 - b2_y1) * (b2_x2 - b2_x1)\n union = b1_area + b2_area - intersection\n # 4. Compute IoU and reshape to [boxes1, boxes2]\n iou = intersection / union\n overlaps = tf.reshape(iou, [tf.shape(boxes1)[0], tf.shape(boxes2)[0]])\n return overlaps\n\n\ndef detection_targets_graph(proposals, gt_class_ids, gt_boxes, gt_masks, config):\n \"\"\"Generates detection targets for one image. Subsamples proposals and\n generates target class IDs, bounding box deltas, and masks for each.\n\n Inputs:\n proposals: [N, (y1, x1, y2, x2)] in normalized coordinates. Might\n be zero padded if there are not enough proposals.\n gt_class_ids: [MAX_GT_INSTANCES] int class IDs\n gt_boxes: [MAX_GT_INSTANCES, (y1, x1, y2, x2)] in normalized coordinates.\n gt_masks: [height, width, MAX_GT_INSTANCES] of boolean type.\n\n Returns: Target ROIs and corresponding class IDs, bounding box shifts,\n and masks.\n rois: [TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] in normalized coordinates\n class_ids: [TRAIN_ROIS_PER_IMAGE]. Integer class IDs. Zero padded.\n deltas: [TRAIN_ROIS_PER_IMAGE, NUM_CLASSES, (dy, dx, log(dh), log(dw))]\n Class-specific bbox refinements.\n masks: [TRAIN_ROIS_PER_IMAGE, height, width). Masks cropped to bbox\n boundaries and resized to neural network output size.\n\n Note: Returned arrays might be zero padded if not enough target ROIs.\n \"\"\"\n # Assertions\n asserts = [\n tf.Assert(tf.greater(tf.shape(proposals)[0], 0), [proposals],\n name=\"roi_assertion\"),\n ]\n with tf.control_dependencies(asserts):\n proposals = tf.identity(proposals)\n\n # Remove zero padding\n proposals, _ = trim_zeros_graph(proposals, name=\"trim_proposals\")\n gt_boxes, non_zeros = trim_zeros_graph(gt_boxes, name=\"trim_gt_boxes\")\n gt_class_ids = tf.boolean_mask(gt_class_ids, non_zeros,\n name=\"trim_gt_class_ids\")\n gt_masks = tf.gather(gt_masks, tf.where(non_zeros)[:, 0], axis=2,\n name=\"trim_gt_masks\")\n\n # Handle COCO crowds\n # A crowd box in COCO is a bounding box around several instances. Exclude\n # them from training. A crowd box is given a negative class ID.\n crowd_ix = tf.where(gt_class_ids < 0)[:, 0]\n non_crowd_ix = tf.where(gt_class_ids > 0)[:, 0]\n crowd_boxes = tf.gather(gt_boxes, crowd_ix)\n crowd_masks = tf.gather(gt_masks, crowd_ix, axis=2)\n gt_class_ids = tf.gather(gt_class_ids, non_crowd_ix)\n gt_boxes = tf.gather(gt_boxes, non_crowd_ix)\n gt_masks = tf.gather(gt_masks, non_crowd_ix, axis=2)\n\n # Compute overlaps matrix [proposals, gt_boxes]\n overlaps = overlaps_graph(proposals, gt_boxes)\n\n # Compute overlaps with crowd boxes [anchors, crowds]\n crowd_overlaps = overlaps_graph(proposals, crowd_boxes)\n crowd_iou_max = tf.reduce_max(crowd_overlaps, axis=1)\n no_crowd_bool = (crowd_iou_max < 0.001)\n\n # Determine positive and negative ROIs\n roi_iou_max = tf.reduce_max(overlaps, axis=1)\n # 1. Positive ROIs are those with >= 0.5 IoU with a GT box\n positive_roi_bool = (roi_iou_max >= 0.5)\n positive_indices = tf.where(positive_roi_bool)[:, 0]\n # 2. Negative ROIs are those with < 0.5 with every GT box. Skip crowds.\n negative_indices = tf.where(tf.logical_and(roi_iou_max < 0.5, no_crowd_bool))[:, 0]\n\n # Subsample ROIs. Aim for 33% positive\n # Positive ROIs\n positive_count = int(config.TRAIN_ROIS_PER_IMAGE *\n config.ROI_POSITIVE_RATIO)\n positive_indices = tf.random_shuffle(positive_indices)[:positive_count]\n positive_count = tf.shape(positive_indices)[0]\n # Negative ROIs. Add enough to maintain positive:negative ratio.\n r = 1.0 / config.ROI_POSITIVE_RATIO\n negative_count = tf.cast(r * tf.cast(positive_count, tf.float32), tf.int32) - positive_count\n negative_indices = tf.random_shuffle(negative_indices)[:negative_count]\n # Gather selected ROIs\n positive_rois = tf.gather(proposals, positive_indices)\n negative_rois = tf.gather(proposals, negative_indices)\n\n # Assign positive ROIs to GT boxes.\n positive_overlaps = tf.gather(overlaps, positive_indices)\n roi_gt_box_assignment = tf.cond(\n tf.greater(tf.shape(positive_overlaps)[1], 0),\n true_fn = lambda: tf.argmax(positive_overlaps, axis=1),\n false_fn = lambda: tf.cast(tf.constant([]),tf.int64)\n )\n roi_gt_boxes = tf.gather(gt_boxes, roi_gt_box_assignment)\n roi_gt_class_ids = tf.gather(gt_class_ids, roi_gt_box_assignment)\n\n # Compute bbox refinement for positive ROIs\n deltas = utils.box_refinement_graph(positive_rois, roi_gt_boxes)\n deltas /= config.BBOX_STD_DEV\n\n # Assign positive ROIs to GT masks\n # Permute masks to [N, height, width, 1]\n transposed_masks = tf.expand_dims(tf.transpose(gt_masks, [2, 0, 1]), -1)\n # Pick the right mask for each ROI\n roi_masks = tf.gather(transposed_masks, roi_gt_box_assignment)\n\n # Compute mask targets\n boxes = positive_rois\n if config.USE_MINI_MASK:\n # Transform ROI coordinates from normalized image space\n # to normalized mini-mask space.\n y1, x1, y2, x2 = tf.split(positive_rois, 4, axis=1)\n gt_y1, gt_x1, gt_y2, gt_x2 = tf.split(roi_gt_boxes, 4, axis=1)\n gt_h = gt_y2 - gt_y1\n gt_w = gt_x2 - gt_x1\n y1 = (y1 - gt_y1) / gt_h\n x1 = (x1 - gt_x1) / gt_w\n y2 = (y2 - gt_y1) / gt_h\n x2 = (x2 - gt_x1) / gt_w\n boxes = tf.concat([y1, x1, y2, x2], 1)\n box_ids = tf.range(0, tf.shape(roi_masks)[0])\n masks = tf.image.crop_and_resize(tf.cast(roi_masks, tf.float32), boxes,\n box_ids,\n config.MASK_SHAPE)\n # Remove the extra dimension from masks.\n masks = tf.squeeze(masks, axis=3)\n\n # Threshold mask pixels at 0.5 to have GT masks be 0 or 1 to use with\n # binary cross entropy loss.\n masks = tf.round(masks)\n\n # Append negative ROIs and pad bbox deltas and masks that\n # are not used for negative ROIs with zeros.\n rois = tf.concat([positive_rois, negative_rois], axis=0)\n N = tf.shape(negative_rois)[0]\n P = tf.maximum(config.TRAIN_ROIS_PER_IMAGE - tf.shape(rois)[0], 0)\n rois = tf.pad(rois, [(0, P), (0, 0)])\n roi_gt_boxes = tf.pad(roi_gt_boxes, [(0, N + P), (0, 0)])\n roi_gt_class_ids = tf.pad(roi_gt_class_ids, [(0, N + P)])\n deltas = tf.pad(deltas, [(0, N + P), (0, 0)])\n masks = tf.pad(masks, [[0, N + P], (0, 0), (0, 0)])\n\n return rois, roi_gt_class_ids, deltas, masks\n\n\nclass DetectionTargetLayer(KE.Layer):\n \"\"\"Subsamples proposals and generates target box refinement, class_ids,\n and masks for each.\n\n Inputs:\n proposals: [batch, N, (y1, x1, y2, x2)] in normalized coordinates. Might\n be zero padded if there are not enough proposals.\n gt_class_ids: [batch, MAX_GT_INSTANCES] Integer class IDs.\n gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] in normalized\n coordinates.\n gt_masks: [batch, height, width, MAX_GT_INSTANCES] of boolean type\n\n Returns: Target ROIs and corresponding class IDs, bounding box shifts,\n and masks.\n rois: [batch, TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] in normalized\n coordinates\n target_class_ids: [batch, TRAIN_ROIS_PER_IMAGE]. Integer class IDs.\n target_deltas: [batch, TRAIN_ROIS_PER_IMAGE, NUM_CLASSES,\n (dy, dx, log(dh), log(dw), class_id)]\n Class-specific bbox refinements.\n target_mask: [batch, TRAIN_ROIS_PER_IMAGE, height, width)\n Masks cropped to bbox boundaries and resized to neural\n network output size.\n\n Note: Returned arrays might be zero padded if not enough target ROIs.\n \"\"\"\n\n def __init__(self, config, **kwargs):\n super(DetectionTargetLayer, self).__init__(**kwargs)\n self.config = config\n\n def call(self, inputs):\n proposals = inputs[0]\n gt_class_ids = inputs[1]\n gt_boxes = inputs[2]\n gt_masks = inputs[3]\n\n # Slice the batch and run a graph for each slice\n # TODO: Rename target_bbox to target_deltas for clarity\n names = [\"rois\", \"target_class_ids\", \"target_bbox\", \"target_mask\"]\n outputs = utils.batch_slice(\n [proposals, gt_class_ids, gt_boxes, gt_masks],\n lambda w, x, y, z: detection_targets_graph(\n w, x, y, z, self.config),\n self.config.IMAGES_PER_GPU, names=names)\n return outputs\n\n def compute_output_shape(self, input_shape):\n return [\n (None, self.config.TRAIN_ROIS_PER_IMAGE, 4), # rois\n (None, 1), # class_ids\n (None, self.config.TRAIN_ROIS_PER_IMAGE, 4), # deltas\n (None, self.config.TRAIN_ROIS_PER_IMAGE, self.config.MASK_SHAPE[0],\n self.config.MASK_SHAPE[1]) # masks\n ]\n\n def compute_mask(self, inputs, mask=None):\n return [None, None, None, None]\n\n\n############################################################\n# Detection Layer\n############################################################\n\ndef refine_detections_graph(rois, probs, deltas, window, config):\n \"\"\"Refine classified proposals and filter overlaps and return final\n detections.\n\n Inputs:\n rois: [N, (y1, x1, y2, x2)] in normalized coordinates\n probs: [N, num_classes]. Class probabilities.\n deltas: [N, num_classes, (dy, dx, log(dh), log(dw))]. Class-specific\n bounding box deltas.\n window: (y1, x1, y2, x2) in image coordinates. The part of the image\n that contains the image excluding the padding.\n\n Returns detections shaped: [N, (y1, x1, y2, x2, class_id, score)] where\n coordinates are normalized.\n \"\"\"\n # Class IDs per ROI\n class_ids = tf.argmax(probs, axis=1, output_type=tf.int32)\n # Class probability of the top class of each ROI\n indices = tf.stack([tf.range(probs.shape[0]), class_ids], axis=1)\n class_scores = tf.gather_nd(probs, indices)\n # Class-specific bounding box deltas\n deltas_specific = tf.gather_nd(deltas, indices)\n # Apply bounding box deltas\n # Shape: [boxes, (y1, x1, y2, x2)] in normalized coordinates\n refined_rois = apply_box_deltas_graph(\n rois, deltas_specific * config.BBOX_STD_DEV)\n # Clip boxes to image window\n refined_rois = clip_boxes_graph(refined_rois, window)\n\n # TODO: Filter out boxes with zero area\n\n # Filter out background boxes\n keep = tf.where(class_ids > 0)[:, 0]\n # Filter out low confidence boxes\n if config.DETECTION_MIN_CONFIDENCE:\n conf_keep = tf.where(class_scores >= config.DETECTION_MIN_CONFIDENCE)[:, 0]\n keep = tf.sets.set_intersection(tf.expand_dims(keep, 0),\n tf.expand_dims(conf_keep, 0))\n keep = tf.sparse_tensor_to_dense(keep)[0]\n\n # Apply per-class NMS\n # 1. Prepare variables\n pre_nms_class_ids = tf.gather(class_ids, keep)\n pre_nms_scores = tf.gather(class_scores, keep)\n pre_nms_rois = tf.gather(refined_rois, keep)\n unique_pre_nms_class_ids = tf.unique(pre_nms_class_ids)[0]\n\n def nms_keep_map(class_id):\n \"\"\"Apply Non-Maximum Suppression on ROIs of the given class.\"\"\"\n # Indices of ROIs of the given class\n ixs = tf.where(tf.equal(pre_nms_class_ids, class_id))[:, 0]\n # Apply NMS\n class_keep = tf.image.non_max_suppression(\n tf.gather(pre_nms_rois, ixs),\n tf.gather(pre_nms_scores, ixs),\n max_output_size=config.DETECTION_MAX_INSTANCES,\n iou_threshold=config.DETECTION_NMS_THRESHOLD)\n # Map indices\n class_keep = tf.gather(keep, tf.gather(ixs, class_keep))\n # Pad with -1 so returned tensors have the same shape\n gap = config.DETECTION_MAX_INSTANCES - tf.shape(class_keep)[0]\n class_keep = tf.pad(class_keep, [(0, gap)],\n mode='CONSTANT', constant_values=-1)\n # Set shape so map_fn() can infer result shape\n class_keep.set_shape([config.DETECTION_MAX_INSTANCES])\n return class_keep\n\n # 2. Map over class IDs\n nms_keep = tf.map_fn(nms_keep_map, unique_pre_nms_class_ids,\n dtype=tf.int64)\n # 3. Merge results into one list, and remove -1 padding\n nms_keep = tf.reshape(nms_keep, [-1])\n nms_keep = tf.gather(nms_keep, tf.where(nms_keep > -1)[:, 0])\n # 4. Compute intersection between keep and nms_keep\n keep = tf.sets.set_intersection(tf.expand_dims(keep, 0),\n tf.expand_dims(nms_keep, 0))\n keep = tf.sparse_tensor_to_dense(keep)[0]\n # Keep top detections\n roi_count = config.DETECTION_MAX_INSTANCES\n class_scores_keep = tf.gather(class_scores, keep)\n num_keep = tf.minimum(tf.shape(class_scores_keep)[0], roi_count)\n top_ids = tf.nn.top_k(class_scores_keep, k=num_keep, sorted=True)[1]\n keep = tf.gather(keep, top_ids)\n\n # Arrange output as [N, (y1, x1, y2, x2, class_id, score)]\n # Coordinates are normalized.\n detections = tf.concat([\n tf.gather(refined_rois, keep),\n tf.to_float(tf.gather(class_ids, keep))[..., tf.newaxis],\n tf.gather(class_scores, keep)[..., tf.newaxis]\n ], axis=1)\n\n # Pad with zeros if detections < DETECTION_MAX_INSTANCES\n gap = config.DETECTION_MAX_INSTANCES - tf.shape(detections)[0]\n detections = tf.pad(detections, [(0, gap), (0, 0)], \"CONSTANT\")\n return detections\n\n\nclass DetectionLayer(KE.Layer):\n \"\"\"Takes classified proposal boxes and their bounding box deltas and\n returns the final detection boxes.\n\n Returns:\n [batch, num_detections, (y1, x1, y2, x2, class_id, class_score)] where\n coordinates are normalized.\n \"\"\"\n\n def __init__(self, config=None, **kwargs):\n super(DetectionLayer, self).__init__(**kwargs)\n self.config = config\n\n def call(self, inputs):\n rois = inputs[0]\n mrcnn_class = inputs[1]\n mrcnn_bbox = inputs[2]\n image_meta = inputs[3]\n\n # Get windows of images in normalized coordinates. Windows are the area\n # in the image that excludes the padding.\n # Use the shape of the first image in the batch to normalize the window\n # because we know that all images get resized to the same size.\n m = parse_image_meta_graph(image_meta)\n image_shape = m['image_shape'][0]\n window = norm_boxes_graph(m['window'], image_shape[:2])\n\n # Run detection refinement graph on each item in the batch\n detections_batch = utils.batch_slice(\n [rois, mrcnn_class, mrcnn_bbox, window],\n lambda x, y, w, z: refine_detections_graph(x, y, w, z, self.config),\n self.config.IMAGES_PER_GPU)\n\n # Reshape output\n # [batch, num_detections, (y1, x1, y2, x2, class_score)] in\n # normalized coordinates\n return tf.reshape(\n detections_batch,\n [self.config.BATCH_SIZE, self.config.DETECTION_MAX_INSTANCES, 6])\n\n def compute_output_shape(self, input_shape):\n return (None, self.config.DETECTION_MAX_INSTANCES, 6)\n\n\n############################################################\n# Region Proposal Network (RPN)\n############################################################\n\ndef rpn_graph(feature_map, anchors_per_location, anchor_stride):\n \"\"\"Builds the computation graph of Region Proposal Network.\n\n feature_map: backbone features [batch, height, width, depth]\n anchors_per_location: number of anchors per pixel in the feature map\n anchor_stride: Controls the density of anchors. Typically 1 (anchors for\n every pixel in the feature map), or 2 (every other pixel).\n\n Returns:\n rpn_logits: [batch, H, W, 2] Anchor classifier logits (before softmax)\n rpn_probs: [batch, H, W, 2] Anchor classifier probabilities.\n rpn_bbox: [batch, H, W, (dy, dx, log(dh), log(dw))] Deltas to be\n applied to anchors.\n \"\"\"\n # TODO: check if stride of 2 causes alignment issues if the feature map\n # is not even.\n # Shared convolutional base of the RPN\n shared = KL.Conv2D(512, (3, 3), padding='same', activation='relu',\n strides=anchor_stride,\n name='rpn_conv_shared')(feature_map)\n\n # Anchor Score. [batch, height, width, anchors per location * 2].\n x = KL.Conv2D(2 * anchors_per_location, (1, 1), padding='valid',\n activation='linear', name='rpn_class_raw')(shared)\n\n # Reshape to [batch, anchors, 2]\n rpn_class_logits = KL.Lambda(\n lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 2]))(x)\n\n # Softmax on last dimension of BG/FG.\n rpn_probs = KL.Activation(\n \"softmax\", name=\"rpn_class_xxx\")(rpn_class_logits)\n\n # Bounding box refinement. [batch, H, W, anchors per location, depth]\n # where depth is [x, y, log(w), log(h)]\n x = KL.Conv2D(anchors_per_location * 4, (1, 1), padding=\"valid\",\n activation='linear', name='rpn_bbox_pred')(shared)\n\n # Reshape to [batch, anchors, 4]\n rpn_bbox = KL.Lambda(lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 4]))(x)\n\n return [rpn_class_logits, rpn_probs, rpn_bbox]\n\n\ndef build_rpn_model(anchor_stride, anchors_per_location, depth):\n \"\"\"Builds a Keras model of the Region Proposal Network.\n It wraps the RPN graph so it can be used multiple times with shared\n weights.\n\n anchors_per_location: number of anchors per pixel in the feature map\n anchor_stride: Controls the density of anchors. Typically 1 (anchors for\n every pixel in the feature map), or 2 (every other pixel).\n depth: Depth of the backbone feature map.\n\n Returns a Keras Model object. The model outputs, when called, are:\n rpn_logits: [batch, H, W, 2] Anchor classifier logits (before softmax)\n rpn_probs: [batch, W, W, 2] Anchor classifier probabilities.\n rpn_bbox: [batch, H, W, (dy, dx, log(dh), log(dw))] Deltas to be\n applied to anchors.\n \"\"\"\n input_feature_map = KL.Input(shape=[None, None, depth],\n name=\"input_rpn_feature_map\")\n outputs = rpn_graph(input_feature_map, anchors_per_location, anchor_stride)\n return KM.Model([input_feature_map], outputs, name=\"rpn_model\")\n\n\n############################################################\n# Feature Pyramid Network Heads\n############################################################\n\ndef fpn_classifier_graph(rois, feature_maps, image_meta,\n pool_size, num_classes, train_bn=True,\n fc_layers_size=1024):\n \"\"\"Builds the computation graph of the feature pyramid network classifier\n and regressor heads.\n\n rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized\n coordinates.\n feature_maps: List of feature maps from different layers of the pyramid,\n [P2, P3, P4, P5]. Each has a different resolution.\n - image_meta: [batch, (meta data)] Image details. See compose_image_meta()\n pool_size: The width of the square feature map generated from ROI Pooling.\n num_classes: number of classes, which determines the depth of the results\n train_bn: Boolean. Train or freeze Batch Norm layers\n fc_layers_size: Size of the 2 FC layers\n\n Returns:\n logits: [N, NUM_CLASSES] classifier logits (before softmax)\n probs: [N, NUM_CLASSES] classifier probabilities\n bbox_deltas: [N, (dy, dx, log(dh), log(dw))] Deltas to apply to\n proposal boxes\n \"\"\"\n # ROI Pooling\n # Shape: [batch, num_boxes, pool_height, pool_width, channels]\n x = PyramidROIAlign([pool_size, pool_size],\n name=\"roi_align_classifier\")([rois, image_meta] + feature_maps)\n # Two 1024 FC layers (implemented with Conv2D for consistency)\n x = KL.TimeDistributed(KL.Conv2D(fc_layers_size, (pool_size, pool_size), padding=\"valid\"),\n name=\"mrcnn_class_conv1\")(x)\n x = KL.TimeDistributed(BatchNorm(), name='mrcnn_class_bn1')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n x = KL.TimeDistributed(KL.Conv2D(fc_layers_size, (1, 1)),\n name=\"mrcnn_class_conv2\")(x)\n x = KL.TimeDistributed(BatchNorm(), name='mrcnn_class_bn2')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n shared = KL.Lambda(lambda x: K.squeeze(K.squeeze(x, 3), 2),\n name=\"pool_squeeze\")(x)\n\n # Classifier head\n mrcnn_class_logits = KL.TimeDistributed(KL.Dense(num_classes),\n name='mrcnn_class_logits')(shared)\n mrcnn_probs = KL.TimeDistributed(KL.Activation(\"softmax\"),\n name=\"mrcnn_class\")(mrcnn_class_logits)\n\n # BBox head\n # [batch, boxes, num_classes * (dy, dx, log(dh), log(dw))]\n x = KL.TimeDistributed(KL.Dense(num_classes * 4, activation='linear'),\n name='mrcnn_bbox_fc')(shared)\n # Reshape to [batch, boxes, num_classes, (dy, dx, log(dh), log(dw))]\n s = K.int_shape(x)\n mrcnn_bbox = KL.Reshape((s[1], num_classes, 4), name=\"mrcnn_bbox\")(x)\n\n return mrcnn_class_logits, mrcnn_probs, mrcnn_bbox\n\n\ndef build_fpn_mask_graph(rois, feature_maps, image_meta,\n pool_size, num_classes, train_bn=True):\n \"\"\"Builds the computation graph of the mask head of Feature Pyramid Network.\n\n rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized\n coordinates.\n feature_maps: List of feature maps from different layers of the pyramid,\n [P2, P3, P4, P5]. Each has a different resolution.\n image_meta: [batch, (meta data)] Image details. See compose_image_meta()\n pool_size: The width of the square feature map generated from ROI Pooling.\n num_classes: number of classes, which determines the depth of the results\n train_bn: Boolean. Train or freeze Batch Norm layers\n\n Returns: Masks [batch, roi_count, height, width, num_classes]\n \"\"\"\n # ROI Pooling\n # Shape: [batch, boxes, pool_height, pool_width, channels]\n x = PyramidROIAlign([pool_size, pool_size],\n name=\"roi_align_mask\")([rois, image_meta] + feature_maps)\n\n # Conv layers\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv1\")(x)\n x = KL.TimeDistributed(BatchNorm(),\n name='mrcnn_mask_bn1')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv2\")(x)\n x = KL.TimeDistributed(BatchNorm(),\n name='mrcnn_mask_bn2')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv3\")(x)\n x = KL.TimeDistributed(BatchNorm(),\n name='mrcnn_mask_bn3')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv4\")(x)\n x = KL.TimeDistributed(BatchNorm(),\n name='mrcnn_mask_bn4')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2DTranspose(256, (2, 2), strides=2, activation=\"relu\"),\n name=\"mrcnn_mask_deconv\")(x)\n x = KL.TimeDistributed(KL.Conv2D(num_classes, (1, 1), strides=1, activation=\"sigmoid\"),\n name=\"mrcnn_mask\")(x)\n return x\n\n\n############################################################\n# Loss Functions\n############################################################\n\ndef smooth_l1_loss(y_true, y_pred):\n \"\"\"Implements Smooth-L1 loss.\n y_true and y_pred are typically: [N, 4], but could be any shape.\n \"\"\"\n diff = K.abs(y_true - y_pred)\n less_than_one = K.cast(K.less(diff, 1.0), \"float32\")\n loss = (less_than_one * 0.5 * diff**2) + (1 - less_than_one) * (diff - 0.5)\n return loss\n\n\ndef rpn_class_loss_graph(rpn_match, rpn_class_logits):\n \"\"\"RPN anchor classifier loss.\n\n rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,\n -1=negative, 0=neutral anchor.\n rpn_class_logits: [batch, anchors, 2]. RPN classifier logits for FG/BG.\n \"\"\"\n # Squeeze last dim to simplify\n rpn_match = tf.squeeze(rpn_match, -1)\n # Get anchor classes. Convert the -1/+1 match to 0/1 values.\n anchor_class = K.cast(K.equal(rpn_match, 1), tf.int32)\n # Positive and Negative anchors contribute to the loss,\n # but neutral anchors (match value = 0) don't.\n indices = tf.where(K.not_equal(rpn_match, 0))\n # Pick rows that contribute to the loss and filter out the rest.\n rpn_class_logits = tf.gather_nd(rpn_class_logits, indices)\n anchor_class = tf.gather_nd(anchor_class, indices)\n # Cross entropy loss\n loss = K.sparse_categorical_crossentropy(target=anchor_class,\n output=rpn_class_logits,\n from_logits=True)\n loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))\n return loss\n\n\ndef rpn_bbox_loss_graph(config, target_bbox, rpn_match, rpn_bbox):\n \"\"\"Return the RPN bounding box loss graph.\n\n config: the model config object.\n target_bbox: [batch, max positive anchors, (dy, dx, log(dh), log(dw))].\n Uses 0 padding to fill in unsed bbox deltas.\n rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,\n -1=negative, 0=neutral anchor.\n rpn_bbox: [batch, anchors, (dy, dx, log(dh), log(dw))]\n \"\"\"\n # Positive anchors contribute to the loss, but negative and\n # neutral anchors (match value of 0 or -1) don't.\n rpn_match = K.squeeze(rpn_match, -1)\n indices = tf.where(K.equal(rpn_match, 1))\n\n # Pick bbox deltas that contribute to the loss\n rpn_bbox = tf.gather_nd(rpn_bbox, indices)\n\n # Trim target bounding box deltas to the same length as rpn_bbox.\n batch_counts = K.sum(K.cast(K.equal(rpn_match, 1), tf.int32), axis=1)\n target_bbox = batch_pack_graph(target_bbox, batch_counts,\n config.IMAGES_PER_GPU)\n\n # TODO: use smooth_l1_loss() rather than reimplementing here\n # to reduce code duplication\n diff = K.abs(target_bbox - rpn_bbox)\n less_than_one = K.cast(K.less(diff, 1.0), \"float32\")\n loss = (less_than_one * 0.5 * diff**2) + (1 - less_than_one) * (diff - 0.5)\n\n loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))\n return loss\n\n\ndef mrcnn_class_loss_graph(target_class_ids, pred_class_logits,\n active_class_ids):\n \"\"\"Loss for the classifier head of Mask RCNN.\n\n target_class_ids: [batch, num_rois]. Integer class IDs. Uses zero\n padding to fill in the array.\n pred_class_logits: [batch, num_rois, num_classes]\n active_class_ids: [batch, num_classes]. Has a value of 1 for\n classes that are in the dataset of the image, and 0\n for classes that are not in the dataset.\n \"\"\"\n # During model building, Keras calls this function with\n # target_class_ids of type float32. Unclear why. Cast it\n # to int to get around it.\n target_class_ids = tf.cast(target_class_ids, 'int64')\n\n # Find predictions of classes that are not in the dataset.\n pred_class_ids = tf.argmax(pred_class_logits, axis=2)\n # TODO: Update this line to work with batch > 1. Right now it assumes all\n # images in a batch have the same active_class_ids\n pred_active = tf.gather(active_class_ids[0], pred_class_ids)\n\n # Loss\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=target_class_ids, logits=pred_class_logits)\n\n # Erase losses of predictions of classes that are not in the active\n # classes of the image.\n loss = loss * pred_active\n\n # Computer loss mean. Use only predictions that contribute\n # to the loss to get a correct mean.\n loss = tf.reduce_sum(loss) / tf.reduce_sum(pred_active)\n return loss\n\n\ndef mrcnn_bbox_loss_graph(target_bbox, target_class_ids, pred_bbox):\n \"\"\"Loss for Mask R-CNN bounding box refinement.\n\n target_bbox: [batch, num_rois, (dy, dx, log(dh), log(dw))]\n target_class_ids: [batch, num_rois]. Integer class IDs.\n pred_bbox: [batch, num_rois, num_classes, (dy, dx, log(dh), log(dw))]\n \"\"\"\n # Reshape to merge batch and roi dimensions for simplicity.\n target_class_ids = K.reshape(target_class_ids, (-1,))\n target_bbox = K.reshape(target_bbox, (-1, 4))\n pred_bbox = K.reshape(pred_bbox, (-1, K.int_shape(pred_bbox)[2], 4))\n\n # Only positive ROIs contribute to the loss. And only\n # the right class_id of each ROI. Get their indices.\n positive_roi_ix = tf.where(target_class_ids > 0)[:, 0]\n positive_roi_class_ids = tf.cast(\n tf.gather(target_class_ids, positive_roi_ix), tf.int64)\n indices = tf.stack([positive_roi_ix, positive_roi_class_ids], axis=1)\n\n # Gather the deltas (predicted and true) that contribute to loss\n target_bbox = tf.gather(target_bbox, positive_roi_ix)\n pred_bbox = tf.gather_nd(pred_bbox, indices)\n\n # Smooth-L1 Loss\n loss = K.switch(tf.size(target_bbox) > 0,\n smooth_l1_loss(y_true=target_bbox, y_pred=pred_bbox),\n tf.constant(0.0))\n loss = K.mean(loss)\n return loss\n\n\ndef mrcnn_mask_loss_graph(target_masks, target_class_ids, pred_masks):\n \"\"\"Mask binary cross-entropy loss for the masks head.\n\n target_masks: [batch, num_rois, height, width].\n A float32 tensor of values 0 or 1. Uses zero padding to fill array.\n target_class_ids: [batch, num_rois]. Integer class IDs. Zero padded.\n pred_masks: [batch, proposals, height, width, num_classes] float32 tensor\n with values from 0 to 1.\n \"\"\"\n # Reshape for simplicity. Merge first two dimensions into one.\n target_class_ids = K.reshape(target_class_ids, (-1,))\n mask_shape = tf.shape(target_masks)\n target_masks = K.reshape(target_masks, (-1, mask_shape[2], mask_shape[3]))\n pred_shape = tf.shape(pred_masks)\n pred_masks = K.reshape(pred_masks,\n (-1, pred_shape[2], pred_shape[3], pred_shape[4]))\n # Permute predicted masks to [N, num_classes, height, width]\n pred_masks = tf.transpose(pred_masks, [0, 3, 1, 2])\n\n # Only positive ROIs contribute to the loss. And only\n # the class specific mask of each ROI.\n positive_ix = tf.where(target_class_ids > 0)[:, 0]\n positive_class_ids = tf.cast(\n tf.gather(target_class_ids, positive_ix), tf.int64)\n indices = tf.stack([positive_ix, positive_class_ids], axis=1)\n\n # Gather the masks (predicted and true) that contribute to loss\n y_true = tf.gather(target_masks, positive_ix)\n y_pred = tf.gather_nd(pred_masks, indices)\n\n # Compute binary cross entropy. If no positive ROIs, then return 0.\n # shape: [batch, roi, num_classes]\n loss = K.switch(tf.size(y_true) > 0,\n K.binary_crossentropy(target=y_true, output=y_pred),\n tf.constant(0.0))\n loss = K.mean(loss)\n return loss\n\n\n############################################################\n# Data Generator\n############################################################\n\ndef load_image_gt(dataset, config, image_id, augment=False, augmentation=None,\n use_mini_mask=False):\n \"\"\"Load and return ground truth data for an image (image, mask, bounding boxes).\n\n augment: (deprecated. Use augmentation instead). If true, apply random\n image augmentation. Currently, only horizontal flipping is offered.\n augmentation: Optional. An imgaug (https://github.com/aleju/imgaug) augmentation.\n For example, passing imgaug.augmenters.Fliplr(0.5) flips images\n right/left 50% of the time.\n use_mini_mask: If False, returns full-size masks that are the same height\n and width as the original image. These can be big, for example\n 1024x1024x100 (for 100 instances). Mini masks are smaller, typically,\n 224x224 and are generated by extracting the bounding box of the\n object and resizing it to MINI_MASK_SHAPE.\n\n Returns:\n image: [height, width, 3]\n shape: the original shape of the image before resizing and cropping.\n class_ids: [instance_count] Integer class IDs\n bbox: [instance_count, (y1, x1, y2, x2)]\n mask: [height, width, instance_count]. The height and width are those\n of the image unless use_mini_mask is True, in which case they are\n defined in MINI_MASK_SHAPE.\n \"\"\"\n # Load image and mask\n image = dataset.load_image(image_id)\n mask, class_ids = dataset.load_mask(image_id)\n original_shape = image.shape\n image, window, scale, padding, crop = utils.resize_image(\n image,\n min_dim=config.IMAGE_MIN_DIM,\n min_scale=config.IMAGE_MIN_SCALE,\n max_dim=config.IMAGE_MAX_DIM,\n mode=config.IMAGE_RESIZE_MODE)\n mask = utils.resize_mask(mask, scale, padding, crop)\n\n # Random horizontal flips.\n # TODO: will be removed in a future update in favor of augmentation\n if augment:\n logging.warning(\"'augment' is deprecated. Use 'augmentation' instead.\")\n if random.randint(0, 1):\n image = np.fliplr(image)\n mask = np.fliplr(mask)\n\n # Augmentation\n # This requires the imgaug lib (https://github.com/aleju/imgaug)\n if augmentation:\n import imgaug\n\n # Augmenters that are safe to apply to masks\n # Some, such as Affine, have settings that make them unsafe, so always\n # test your augmentation on masks\n MASK_AUGMENTERS = [\"Sequential\", \"SomeOf\", \"OneOf\", \"Sometimes\",\n \"Fliplr\", \"Flipud\", \"CropAndPad\",\n \"Affine\", \"PiecewiseAffine\"]\n\n def hook(images, augmenter, parents, default):\n \"\"\"Determines which augmenters to apply to masks.\"\"\"\n return augmenter.__class__.__name__ in MASK_AUGMENTERS\n\n # Store shapes before augmentation to compare\n image_shape = image.shape\n mask_shape = mask.shape\n # Make augmenters deterministic to apply similarly to images and masks\n det = augmentation.to_deterministic()\n image = det.augment_image(image)\n # Change mask to np.uint8 because imgaug doesn't support np.bool\n mask = det.augment_image(mask.astype(np.uint8),\n hooks=imgaug.HooksImages(activator=hook))\n # Verify that shapes didn't change\n assert image.shape == image_shape, \"Augmentation shouldn't change image size\"\n assert mask.shape == mask_shape, \"Augmentation shouldn't change mask size\"\n # Change mask back to bool\n mask = mask.astype(np.bool)\n\n # Note that some boxes might be all zeros if the corresponding mask got cropped out.\n # and here is to filter them out\n _idx = np.sum(mask, axis=(0, 1)) > 0\n mask = mask[:, :, _idx]\n class_ids = class_ids[_idx]\n # Bounding boxes. Note that some boxes might be all zeros\n # if the corresponding mask got cropped out.\n # bbox: [num_instances, (y1, x1, y2, x2)]\n bbox = utils.extract_bboxes(mask)\n\n # Active classes\n # Different datasets have different classes, so track the\n # classes supported in the dataset of this image.\n active_class_ids = np.zeros([dataset.num_classes], dtype=np.int32)\n source_class_ids = dataset.source_class_ids[dataset.image_info[image_id][\"source\"]]\n active_class_ids[source_class_ids] = 1\n\n # Resize masks to smaller size to reduce memory usage\n if use_mini_mask:\n mask = utils.minimize_mask(bbox, mask, config.MINI_MASK_SHAPE)\n\n # Image meta data\n image_meta = compose_image_meta(image_id, original_shape, image.shape,\n window, scale, active_class_ids)\n\n return image, image_meta, class_ids, bbox, mask\n\n\ndef build_detection_targets(rpn_rois, gt_class_ids, gt_boxes, gt_masks, config):\n \"\"\"Generate targets for training Stage 2 classifier and mask heads.\n This is not used in normal training. It's useful for debugging or to train\n the Mask RCNN heads without using the RPN head.\n\n Inputs:\n rpn_rois: [N, (y1, x1, y2, x2)] proposal boxes.\n gt_class_ids: [instance count] Integer class IDs\n gt_boxes: [instance count, (y1, x1, y2, x2)]\n gt_masks: [height, width, instance count] Ground truth masks. Can be full\n size or mini-masks.\n\n Returns:\n rois: [TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)]\n class_ids: [TRAIN_ROIS_PER_IMAGE]. Integer class IDs.\n bboxes: [TRAIN_ROIS_PER_IMAGE, NUM_CLASSES, (y, x, log(h), log(w))]. Class-specific\n bbox refinements.\n masks: [TRAIN_ROIS_PER_IMAGE, height, width, NUM_CLASSES). Class specific masks cropped\n to bbox boundaries and resized to neural network output size.\n \"\"\"\n assert rpn_rois.shape[0] > 0\n assert gt_class_ids.dtype == np.int32, \"Expected int but got {}\".format(\n gt_class_ids.dtype)\n assert gt_boxes.dtype == np.int32, \"Expected int but got {}\".format(\n gt_boxes.dtype)\n assert gt_masks.dtype == np.bool_, \"Expected bool but got {}\".format(\n gt_masks.dtype)\n\n # It's common to add GT Boxes to ROIs but we don't do that here because\n # according to XinLei Chen's paper, it doesn't help.\n\n # Trim empty padding in gt_boxes and gt_masks parts\n instance_ids = np.where(gt_class_ids > 0)[0]\n assert instance_ids.shape[0] > 0, \"Image must contain instances.\"\n gt_class_ids = gt_class_ids[instance_ids]\n gt_boxes = gt_boxes[instance_ids]\n gt_masks = gt_masks[:, :, instance_ids]\n\n # Compute areas of ROIs and ground truth boxes.\n rpn_roi_area = (rpn_rois[:, 2] - rpn_rois[:, 0]) * \\\n (rpn_rois[:, 3] - rpn_rois[:, 1])\n gt_box_area = (gt_boxes[:, 2] - gt_boxes[:, 0]) * \\\n (gt_boxes[:, 3] - gt_boxes[:, 1])\n\n # Compute overlaps [rpn_rois, gt_boxes]\n overlaps = np.zeros((rpn_rois.shape[0], gt_boxes.shape[0]))\n for i in range(overlaps.shape[1]):\n gt = gt_boxes[i]\n overlaps[:, i] = utils.compute_iou(\n gt, rpn_rois, gt_box_area[i], rpn_roi_area)\n\n # Assign ROIs to GT boxes\n rpn_roi_iou_argmax = np.argmax(overlaps, axis=1)\n rpn_roi_iou_max = overlaps[np.arange(\n overlaps.shape[0]), rpn_roi_iou_argmax]\n # GT box assigned to each ROI\n rpn_roi_gt_boxes = gt_boxes[rpn_roi_iou_argmax]\n rpn_roi_gt_class_ids = gt_class_ids[rpn_roi_iou_argmax]\n\n # Positive ROIs are those with >= 0.5 IoU with a GT box.\n fg_ids = np.where(rpn_roi_iou_max > 0.5)[0]\n\n # Negative ROIs are those with max IoU 0.1-0.5 (hard example mining)\n # TODO: To hard example mine or not to hard example mine, that's the question\n # bg_ids = np.where((rpn_roi_iou_max >= 0.1) & (rpn_roi_iou_max < 0.5))[0]\n bg_ids = np.where(rpn_roi_iou_max < 0.5)[0]\n\n # Subsample ROIs. Aim for 33% foreground.\n # FG\n fg_roi_count = int(config.TRAIN_ROIS_PER_IMAGE * config.ROI_POSITIVE_RATIO)\n if fg_ids.shape[0] > fg_roi_count:\n keep_fg_ids = np.random.choice(fg_ids, fg_roi_count, replace=False)\n else:\n keep_fg_ids = fg_ids\n # BG\n remaining = config.TRAIN_ROIS_PER_IMAGE - keep_fg_ids.shape[0]\n if bg_ids.shape[0] > remaining:\n keep_bg_ids = np.random.choice(bg_ids, remaining, replace=False)\n else:\n keep_bg_ids = bg_ids\n # Combine indices of ROIs to keep\n keep = np.concatenate([keep_fg_ids, keep_bg_ids])\n # Need more?\n remaining = config.TRAIN_ROIS_PER_IMAGE - keep.shape[0]\n if remaining > 0:\n # Looks like we don't have enough samples to maintain the desired\n # balance. Reduce requirements and fill in the rest. This is\n # likely different from the Mask RCNN paper.\n\n # There is a small chance we have neither fg nor bg samples.\n if keep.shape[0] == 0:\n # Pick bg regions with easier IoU threshold\n bg_ids = np.where(rpn_roi_iou_max < 0.5)[0]\n assert bg_ids.shape[0] >= remaining\n keep_bg_ids = np.random.choice(bg_ids, remaining, replace=False)\n assert keep_bg_ids.shape[0] == remaining\n keep = np.concatenate([keep, keep_bg_ids])\n else:\n # Fill the rest with repeated bg rois.\n keep_extra_ids = np.random.choice(\n keep_bg_ids, remaining, replace=True)\n keep = np.concatenate([keep, keep_extra_ids])\n assert keep.shape[0] == config.TRAIN_ROIS_PER_IMAGE, \\\n \"keep doesn't match ROI batch size {}, {}\".format(\n keep.shape[0], config.TRAIN_ROIS_PER_IMAGE)\n\n # Reset the gt boxes assigned to BG ROIs.\n rpn_roi_gt_boxes[keep_bg_ids, :] = 0\n rpn_roi_gt_class_ids[keep_bg_ids] = 0\n\n # For each kept ROI, assign a class_id, and for FG ROIs also add bbox refinement.\n rois = rpn_rois[keep]\n roi_gt_boxes = rpn_roi_gt_boxes[keep]\n roi_gt_class_ids = rpn_roi_gt_class_ids[keep]\n roi_gt_assignment = rpn_roi_iou_argmax[keep]\n\n # Class-aware bbox deltas. [y, x, log(h), log(w)]\n bboxes = np.zeros((config.TRAIN_ROIS_PER_IMAGE,\n config.NUM_CLASSES, 4), dtype=np.float32)\n pos_ids = np.where(roi_gt_class_ids > 0)[0]\n bboxes[pos_ids, roi_gt_class_ids[pos_ids]] = utils.box_refinement(\n rois[pos_ids], roi_gt_boxes[pos_ids, :4])\n # Normalize bbox refinements\n bboxes /= config.BBOX_STD_DEV\n\n # Generate class-specific target masks\n masks = np.zeros((config.TRAIN_ROIS_PER_IMAGE, config.MASK_SHAPE[0], config.MASK_SHAPE[1], config.NUM_CLASSES),\n dtype=np.float32)\n for i in pos_ids:\n class_id = roi_gt_class_ids[i]\n assert class_id > 0, \"class id must be greater than 0\"\n gt_id = roi_gt_assignment[i]\n class_mask = gt_masks[:, :, gt_id]\n\n if config.USE_MINI_MASK:\n # Create a mask placeholder, the size of the image\n placeholder = np.zeros(config.IMAGE_SHAPE[:2], dtype=bool)\n # GT box\n gt_y1, gt_x1, gt_y2, gt_x2 = gt_boxes[gt_id]\n gt_w = gt_x2 - gt_x1\n gt_h = gt_y2 - gt_y1\n # Resize mini mask to size of GT box\n placeholder[gt_y1:gt_y2, gt_x1:gt_x2] = \\\n np.round(skimage.transform.resize(\n class_mask, (gt_h, gt_w), order=1, mode=\"constant\")).astype(bool)\n # Place the mini batch in the placeholder\n class_mask = placeholder\n\n # Pick part of the mask and resize it\n y1, x1, y2, x2 = rois[i].astype(np.int32)\n m = class_mask[y1:y2, x1:x2]\n mask = skimage.transform.resize(m, config.MASK_SHAPE, order=1, mode=\"constant\")\n masks[i, :, :, class_id] = mask\n\n return rois, roi_gt_class_ids, bboxes, masks\n\n\ndef build_rpn_targets(image_shape, anchors, gt_class_ids, gt_boxes, config):\n \"\"\"Given the anchors and GT boxes, compute overlaps and identify positive\n anchors and deltas to refine them to match their corresponding GT boxes.\n\n anchors: [num_anchors, (y1, x1, y2, x2)]\n gt_class_ids: [num_gt_boxes] Integer class IDs.\n gt_boxes: [num_gt_boxes, (y1, x1, y2, x2)]\n\n Returns:\n rpn_match: [N] (int32) matches between anchors and GT boxes.\n 1 = positive anchor, -1 = negative anchor, 0 = neutral\n rpn_bbox: [N, (dy, dx, log(dh), log(dw))] Anchor bbox deltas.\n \"\"\"\n # RPN Match: 1 = positive anchor, -1 = negative anchor, 0 = neutral\n rpn_match = np.zeros([anchors.shape[0]], dtype=np.int32)\n # RPN bounding boxes: [max anchors per image, (dy, dx, log(dh), log(dw))]\n rpn_bbox = np.zeros((config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4))\n\n # Handle COCO crowds\n # A crowd box in COCO is a bounding box around several instances. Exclude\n # them from training. A crowd box is given a negative class ID.\n crowd_ix = np.where(gt_class_ids < 0)[0]\n if crowd_ix.shape[0] > 0:\n # Filter out crowds from ground truth class IDs and boxes\n non_crowd_ix = np.where(gt_class_ids > 0)[0]\n crowd_boxes = gt_boxes[crowd_ix]\n gt_class_ids = gt_class_ids[non_crowd_ix]\n gt_boxes = gt_boxes[non_crowd_ix]\n # Compute overlaps with crowd boxes [anchors, crowds]\n crowd_overlaps = utils.compute_overlaps(anchors, crowd_boxes)\n crowd_iou_max = np.amax(crowd_overlaps, axis=1)\n no_crowd_bool = (crowd_iou_max < 0.001)\n else:\n # All anchors don't intersect a crowd\n no_crowd_bool = np.ones([anchors.shape[0]], dtype=bool)\n\n # Compute overlaps [num_anchors, num_gt_boxes]\n overlaps = utils.compute_overlaps(anchors, gt_boxes)\n\n # Match anchors to GT Boxes\n # If an anchor overlaps a GT box with IoU >= 0.7 then it's positive.\n # If an anchor overlaps a GT box with IoU < 0.3 then it's negative.\n # Neutral anchors are those that don't match the conditions above,\n # and they don't influence the loss function.\n # However, don't keep any GT box unmatched (rare, but happens). Instead,\n # match it to the closest anchor (even if its max IoU is < 0.3).\n #\n # 1. Set negative anchors first. They get overwritten below if a GT box is\n # matched to them. Skip boxes in crowd areas.\n anchor_iou_argmax = np.argmax(overlaps, axis=1)\n anchor_iou_max = overlaps[np.arange(overlaps.shape[0]), anchor_iou_argmax]\n rpn_match[(anchor_iou_max < 0.3) & (no_crowd_bool)] = -1\n # 2. Set an anchor for each GT box (regardless of IoU value).\n # TODO: If multiple anchors have the same IoU match all of them\n gt_iou_argmax = np.argmax(overlaps, axis=0)\n rpn_match[gt_iou_argmax] = 1\n # 3. Set anchors with high overlap as positive.\n rpn_match[anchor_iou_max >= 0.7] = 1\n\n # Subsample to balance positive and negative anchors\n # Don't let positives be more than half the anchors\n ids = np.where(rpn_match == 1)[0]\n extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE // 2)\n if extra > 0:\n # Reset the extra ones to neutral\n ids = np.random.choice(ids, extra, replace=False)\n rpn_match[ids] = 0\n # Same for negative proposals\n ids = np.where(rpn_match == -1)[0]\n extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE -\n np.sum(rpn_match == 1))\n if extra > 0:\n # Rest the extra ones to neutral\n ids = np.random.choice(ids, extra, replace=False)\n rpn_match[ids] = 0\n\n # For positive anchors, compute shift and scale needed to transform them\n # to match the corresponding GT boxes.\n ids = np.where(rpn_match == 1)[0]\n ix = 0 # index into rpn_bbox\n # TODO: use box_refinement() rather than duplicating the code here\n for i, a in zip(ids, anchors[ids]):\n # Closest gt box (it might have IoU < 0.7)\n gt = gt_boxes[anchor_iou_argmax[i]]\n\n # Convert coordinates to center plus width/height.\n # GT Box\n gt_h = gt[2] - gt[0]\n gt_w = gt[3] - gt[1]\n gt_center_y = gt[0] + 0.5 * gt_h\n gt_center_x = gt[1] + 0.5 * gt_w\n # Anchor\n a_h = a[2] - a[0]\n a_w = a[3] - a[1]\n a_center_y = a[0] + 0.5 * a_h\n a_center_x = a[1] + 0.5 * a_w\n\n # Compute the bbox refinement that the RPN should predict.\n rpn_bbox[ix] = [\n (gt_center_y - a_center_y) / a_h,\n (gt_center_x - a_center_x) / a_w,\n np.log(gt_h / a_h),\n np.log(gt_w / a_w),\n ]\n # Normalize\n rpn_bbox[ix] /= config.RPN_BBOX_STD_DEV\n ix += 1\n\n return rpn_match, rpn_bbox\n\n\ndef generate_random_rois(image_shape, count, gt_class_ids, gt_boxes):\n \"\"\"Generates ROI proposals similar to what a region proposal network\n would generate.\n\n image_shape: [Height, Width, Depth]\n count: Number of ROIs to generate\n gt_class_ids: [N] Integer ground truth class IDs\n gt_boxes: [N, (y1, x1, y2, x2)] Ground truth boxes in pixels.\n\n Returns: [count, (y1, x1, y2, x2)] ROI boxes in pixels.\n \"\"\"\n # placeholder\n rois = np.zeros((count, 4), dtype=np.int32)\n\n # Generate random ROIs around GT boxes (90% of count)\n rois_per_box = int(0.9 * count / gt_boxes.shape[0])\n for i in range(gt_boxes.shape[0]):\n gt_y1, gt_x1, gt_y2, gt_x2 = gt_boxes[i]\n h = gt_y2 - gt_y1\n w = gt_x2 - gt_x1\n # random boundaries\n r_y1 = max(gt_y1 - h, 0)\n r_y2 = min(gt_y2 + h, image_shape[0])\n r_x1 = max(gt_x1 - w, 0)\n r_x2 = min(gt_x2 + w, image_shape[1])\n\n # To avoid generating boxes with zero area, we generate double what\n # we need and filter out the extra. If we get fewer valid boxes\n # than we need, we loop and try again.\n while True:\n y1y2 = np.random.randint(r_y1, r_y2, (rois_per_box * 2, 2))\n x1x2 = np.random.randint(r_x1, r_x2, (rois_per_box * 2, 2))\n # Filter out zero area boxes\n threshold = 1\n y1y2 = y1y2[np.abs(y1y2[:, 0] - y1y2[:, 1]) >=\n threshold][:rois_per_box]\n x1x2 = x1x2[np.abs(x1x2[:, 0] - x1x2[:, 1]) >=\n threshold][:rois_per_box]\n if y1y2.shape[0] == rois_per_box and x1x2.shape[0] == rois_per_box:\n break\n\n # Sort on axis 1 to ensure x1 <= x2 and y1 <= y2 and then reshape\n # into x1, y1, x2, y2 order\n x1, x2 = np.split(np.sort(x1x2, axis=1), 2, axis=1)\n y1, y2 = np.split(np.sort(y1y2, axis=1), 2, axis=1)\n box_rois = np.hstack([y1, x1, y2, x2])\n rois[rois_per_box * i:rois_per_box * (i + 1)] = box_rois\n\n # Generate random ROIs anywhere in the image (10% of count)\n remaining_count = count - (rois_per_box * gt_boxes.shape[0])\n # To avoid generating boxes with zero area, we generate double what\n # we need and filter out the extra. If we get fewer valid boxes\n # than we need, we loop and try again.\n while True:\n y1y2 = np.random.randint(0, image_shape[0], (remaining_count * 2, 2))\n x1x2 = np.random.randint(0, image_shape[1], (remaining_count * 2, 2))\n # Filter out zero area boxes\n threshold = 1\n y1y2 = y1y2[np.abs(y1y2[:, 0] - y1y2[:, 1]) >=\n threshold][:remaining_count]\n x1x2 = x1x2[np.abs(x1x2[:, 0] - x1x2[:, 1]) >=\n threshold][:remaining_count]\n if y1y2.shape[0] == remaining_count and x1x2.shape[0] == remaining_count:\n break\n\n # Sort on axis 1 to ensure x1 <= x2 and y1 <= y2 and then reshape\n # into x1, y1, x2, y2 order\n x1, x2 = np.split(np.sort(x1x2, axis=1), 2, axis=1)\n y1, y2 = np.split(np.sort(y1y2, axis=1), 2, axis=1)\n global_rois = np.hstack([y1, x1, y2, x2])\n rois[-remaining_count:] = global_rois\n return rois\n\n\ndef data_generator(dataset, config, shuffle=True, augment=False, augmentation=None,\n random_rois=0, batch_size=1, detection_targets=False,\n no_augmentation_sources=None):\n \"\"\"A generator that returns images and corresponding target class ids,\n bounding box deltas, and masks.\n\n dataset: The Dataset object to pick data from\n config: The model config object\n shuffle: If True, shuffles the samples before every epoch\n augment: (deprecated. Use augmentation instead). If true, apply random\n image augmentation. Currently, only horizontal flipping is offered.\n augmentation: Optional. An imgaug (https://github.com/aleju/imgaug) augmentation.\n For example, passing imgaug.augmenters.Fliplr(0.5) flips images\n right/left 50% of the time.\n random_rois: If > 0 then generate proposals to be used to train the\n network classifier and mask heads. Useful if training\n the Mask RCNN part without the RPN.\n batch_size: How many images to return in each call\n detection_targets: If True, generate detection targets (class IDs, bbox\n deltas, and masks). Typically for debugging or visualizations because\n in trainig detection targets are generated by DetectionTargetLayer.\n no_augmentation_sources: Optional. List of sources to exclude for\n augmentation. A source is string that identifies a dataset and is\n defined in the Dataset class.\n\n Returns a Python generator. Upon calling next() on it, the\n generator returns two lists, inputs and outputs. The contents\n of the lists differs depending on the received arguments:\n inputs list:\n - images: [batch, H, W, C]\n - image_meta: [batch, (meta data)] Image details. See compose_image_meta()\n - rpn_match: [batch, N] Integer (1=positive anchor, -1=negative, 0=neutral)\n - rpn_bbox: [batch, N, (dy, dx, log(dh), log(dw))] Anchor bbox deltas.\n - gt_class_ids: [batch, MAX_GT_INSTANCES] Integer class IDs\n - gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)]\n - gt_masks: [batch, height, width, MAX_GT_INSTANCES]. The height and width\n are those of the image unless use_mini_mask is True, in which\n case they are defined in MINI_MASK_SHAPE.\n\n outputs list: Usually empty in regular training. But if detection_targets\n is True then the outputs list contains target class_ids, bbox deltas,\n and masks.\n \"\"\"\n b = 0 # batch item index\n image_index = -1\n image_ids = np.copy(dataset.image_ids)\n error_count = 0\n no_augmentation_sources = no_augmentation_sources or []\n\n # Anchors\n # [anchor_count, (y1, x1, y2, x2)]\n backbone_shapes = compute_backbone_shapes(config, config.IMAGE_SHAPE)\n anchors = utils.generate_pyramid_anchors(config.RPN_ANCHOR_SCALES,\n config.RPN_ANCHOR_RATIOS,\n backbone_shapes,\n config.BACKBONE_STRIDES,\n config.RPN_ANCHOR_STRIDE)\n\n # Keras requires a generator to run indefinitely.\n while True:\n try:\n # Increment index to pick next image. Shuffle if at the start of an epoch.\n image_index = (image_index + 1) % len(image_ids)\n if shuffle and image_index == 0:\n np.random.shuffle(image_ids)\n\n # Get GT bounding boxes and masks for image.\n image_id = image_ids[image_index]\n\n # If the image source is not to be augmented pass None as augmentation\n if dataset.image_info[image_id]['source'] in no_augmentation_sources:\n image, image_meta, gt_class_ids, gt_boxes, gt_masks = \\\n load_image_gt(dataset, config, image_id, augment=augment,\n augmentation=None,\n use_mini_mask=config.USE_MINI_MASK)\n else:\n image, image_meta, gt_class_ids, gt_boxes, gt_masks = \\\n load_image_gt(dataset, config, image_id, augment=augment,\n augmentation=augmentation,\n use_mini_mask=config.USE_MINI_MASK)\n\n # Skip images that have no instances. This can happen in cases\n # where we train on a subset of classes and the image doesn't\n # have any of the classes we care about.\n if not np.any(gt_class_ids > 0):\n continue\n\n # RPN Targets\n rpn_match, rpn_bbox = build_rpn_targets(image.shape, anchors,\n gt_class_ids, gt_boxes, config)\n\n # Mask R-CNN Targets\n if random_rois:\n rpn_rois = generate_random_rois(\n image.shape, random_rois, gt_class_ids, gt_boxes)\n if detection_targets:\n rois, mrcnn_class_ids, mrcnn_bbox, mrcnn_mask =\\\n build_detection_targets(\n rpn_rois, gt_class_ids, gt_boxes, gt_masks, config)\n\n # Init batch arrays\n if b == 0:\n batch_image_meta = np.zeros(\n (batch_size,) + image_meta.shape, dtype=image_meta.dtype)\n batch_rpn_match = np.zeros(\n [batch_size, anchors.shape[0], 1], dtype=rpn_match.dtype)\n batch_rpn_bbox = np.zeros(\n [batch_size, config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4], dtype=rpn_bbox.dtype)\n batch_images = np.zeros(\n (batch_size,) + image.shape, dtype=np.float32)\n batch_gt_class_ids = np.zeros(\n (batch_size, config.MAX_GT_INSTANCES), dtype=np.int32)\n batch_gt_boxes = np.zeros(\n (batch_size, config.MAX_GT_INSTANCES, 4), dtype=np.int32)\n batch_gt_masks = np.zeros(\n (batch_size, gt_masks.shape[0], gt_masks.shape[1],\n config.MAX_GT_INSTANCES), dtype=gt_masks.dtype)\n if random_rois:\n batch_rpn_rois = np.zeros(\n (batch_size, rpn_rois.shape[0], 4), dtype=rpn_rois.dtype)\n if detection_targets:\n batch_rois = np.zeros(\n (batch_size,) + rois.shape, dtype=rois.dtype)\n batch_mrcnn_class_ids = np.zeros(\n (batch_size,) + mrcnn_class_ids.shape, dtype=mrcnn_class_ids.dtype)\n batch_mrcnn_bbox = np.zeros(\n (batch_size,) + mrcnn_bbox.shape, dtype=mrcnn_bbox.dtype)\n batch_mrcnn_mask = np.zeros(\n (batch_size,) + mrcnn_mask.shape, dtype=mrcnn_mask.dtype)\n\n # If more instances than fits in the array, sub-sample from them.\n if gt_boxes.shape[0] > config.MAX_GT_INSTANCES:\n ids = np.random.choice(\n np.arange(gt_boxes.shape[0]), config.MAX_GT_INSTANCES, replace=False)\n gt_class_ids = gt_class_ids[ids]\n gt_boxes = gt_boxes[ids]\n gt_masks = gt_masks[:, :, ids]\n\n # Add to batch\n batch_image_meta[b] = image_meta\n batch_rpn_match[b] = rpn_match[:, np.newaxis]\n batch_rpn_bbox[b] = rpn_bbox\n batch_images[b] = mold_image(image.astype(np.float32), config)\n batch_gt_class_ids[b, :gt_class_ids.shape[0]] = gt_class_ids\n batch_gt_boxes[b, :gt_boxes.shape[0]] = gt_boxes\n batch_gt_masks[b, :, :, :gt_masks.shape[-1]] = gt_masks\n if random_rois:\n batch_rpn_rois[b] = rpn_rois\n if detection_targets:\n batch_rois[b] = rois\n batch_mrcnn_class_ids[b] = mrcnn_class_ids\n batch_mrcnn_bbox[b] = mrcnn_bbox\n batch_mrcnn_mask[b] = mrcnn_mask\n b += 1\n\n # Batch full?\n if b >= batch_size:\n inputs = [batch_images, batch_image_meta, batch_rpn_match, batch_rpn_bbox,\n batch_gt_class_ids, batch_gt_boxes, batch_gt_masks]\n outputs = []\n\n if random_rois:\n inputs.extend([batch_rpn_rois])\n if detection_targets:\n inputs.extend([batch_rois])\n # Keras requires that output and targets have the same number of dimensions\n batch_mrcnn_class_ids = np.expand_dims(\n batch_mrcnn_class_ids, -1)\n outputs.extend(\n [batch_mrcnn_class_ids, batch_mrcnn_bbox, batch_mrcnn_mask])\n\n yield inputs, outputs\n\n # start a new batch\n b = 0\n except (GeneratorExit, KeyboardInterrupt):\n raise\n except:\n # Log it and skip the image\n logging.exception(\"Error processing image {}\".format(\n dataset.image_info[image_id]))\n error_count += 1\n if error_count > 5:\n raise\n\n\n############################################################\n# MaskRCNN Class\n############################################################\n\nclass MaskRCNN():\n \"\"\"Encapsulates the Mask RCNN model functionality.\n\n The actual Keras model is in the keras_model property.\n \"\"\"\n\n def __init__(self, mode, config, model_dir):\n \"\"\"\n mode: Either \"training\" or \"inference\"\n config: A Sub-class of the Config class\n model_dir: Directory to save training logs and trained weights\n \"\"\"\n assert mode in ['training', 'inference']\n self.mode = mode\n self.config = config\n self.model_dir = model_dir\n self.set_log_dir()\n self.keras_model = self.build(mode=mode, config=config)\n\n def build(self, mode, config):\n \"\"\"Build Mask R-CNN architecture.\n input_shape: The shape of the input image.\n mode: Either \"training\" or \"inference\". The inputs and\n outputs of the model differ accordingly.\n \"\"\"\n assert mode in ['training', 'inference']\n\n # Image size must be dividable by 2 multiple times\n h, w = config.IMAGE_SHAPE[:2]\n if h / 2**6 != int(h / 2**6) or w / 2**6 != int(w / 2**6):\n raise Exception(\"Image size must be dividable by 2 at least 6 times \"\n \"to avoid fractions when downscaling and upscaling.\"\n \"For example, use 256, 320, 384, 448, 512, ... etc. \")\n\n # Inputs\n input_image = KL.Input(\n shape=[None, None, 3], name=\"input_image\")\n input_image_meta = KL.Input(shape=[config.IMAGE_META_SIZE],\n name=\"input_image_meta\")\n if mode == \"training\":\n # RPN GT\n input_rpn_match = KL.Input(\n shape=[None, 1], name=\"input_rpn_match\", dtype=tf.int32)\n input_rpn_bbox = KL.Input(\n shape=[None, 4], name=\"input_rpn_bbox\", dtype=tf.float32)\n\n # Detection GT (class IDs, bounding boxes, and masks)\n # 1. GT Class IDs (zero padded)\n input_gt_class_ids = KL.Input(\n shape=[None], name=\"input_gt_class_ids\", dtype=tf.int32)\n # 2. GT Boxes in pixels (zero padded)\n # [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] in image coordinates\n input_gt_boxes = KL.Input(\n shape=[None, 4], name=\"input_gt_boxes\", dtype=tf.float32)\n # Normalize coordinates\n gt_boxes = KL.Lambda(lambda x: norm_boxes_graph(\n x, K.shape(input_image)[1:3]))(input_gt_boxes)\n # 3. GT Masks (zero padded)\n # [batch, height, width, MAX_GT_INSTANCES]\n if config.USE_MINI_MASK:\n input_gt_masks = KL.Input(\n shape=[config.MINI_MASK_SHAPE[0],\n config.MINI_MASK_SHAPE[1], None],\n name=\"input_gt_masks\", dtype=bool)\n else:\n input_gt_masks = KL.Input(\n shape=[config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1], None],\n name=\"input_gt_masks\", dtype=bool)\n elif mode == \"inference\":\n # Anchors in normalized coordinates\n input_anchors = KL.Input(shape=[None, 4], name=\"input_anchors\")\n\n # Build the shared convolutional layers.\n # Bottom-up Layers\n # Returns a list of the last layers of each stage, 5 in total.\n # Don't create the thead (stage 5), so we pick the 4th item in the list.\n if callable(config.BACKBONE):\n _, C2, C3, C4, C5 = config.BACKBONE(input_image, stage5=True,\n train_bn=config.TRAIN_BN)\n else:\n _, C2, C3, C4, C5 = resnet_graph(input_image, config.BACKBONE,\n stage5=True, train_bn=config.TRAIN_BN)\n # Top-down Layers\n # TODO: add assert to varify feature map sizes match what's in config\n P5 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c5p5')(C5)\n P4 = KL.Add(name=\"fpn_p4add\")([\n KL.UpSampling2D(size=(2, 2), name=\"fpn_p5upsampled\")(P5),\n KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c4p4')(C4)])\n P3 = KL.Add(name=\"fpn_p3add\")([\n KL.UpSampling2D(size=(2, 2), name=\"fpn_p4upsampled\")(P4),\n KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c3p3')(C3)])\n P2 = KL.Add(name=\"fpn_p2add\")([\n KL.UpSampling2D(size=(2, 2), name=\"fpn_p3upsampled\")(P3),\n KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c2p2')(C2)])\n # Attach 3x3 conv to all P layers to get the final feature maps.\n P2 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding=\"SAME\", name=\"fpn_p2\")(P2)\n P3 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding=\"SAME\", name=\"fpn_p3\")(P3)\n P4 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding=\"SAME\", name=\"fpn_p4\")(P4)\n P5 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding=\"SAME\", name=\"fpn_p5\")(P5)\n # P6 is used for the 5th anchor scale in RPN. Generated by\n # subsampling from P5 with stride of 2.\n P6 = KL.MaxPooling2D(pool_size=(1, 1), strides=2, name=\"fpn_p6\")(P5)\n\n # Note that P6 is used in RPN, but not in the classifier heads.\n rpn_feature_maps = [P2, P3, P4, P5, P6]\n mrcnn_feature_maps = [P2, P3, P4, P5]\n\n # Anchors\n if mode == \"training\":\n anchors = self.get_anchors(config.IMAGE_SHAPE)\n # Duplicate across the batch dimension because Keras requires it\n # TODO: can this be optimized to avoid duplicating the anchors?\n anchors = np.broadcast_to(anchors, (config.BATCH_SIZE,) + anchors.shape)\n # A hack to get around Keras's bad support for constants\n anchors = KL.Lambda(lambda x: tf.Variable(anchors), name=\"anchors\")(input_image)\n else:\n anchors = input_anchors\n\n # RPN Model\n rpn = build_rpn_model(config.RPN_ANCHOR_STRIDE,\n len(config.RPN_ANCHOR_RATIOS), config.TOP_DOWN_PYRAMID_SIZE)\n # Loop through pyramid layers\n layer_outputs = [] # list of lists\n for p in rpn_feature_maps:\n layer_outputs.append(rpn([p]))\n # Concatenate layer outputs\n # Convert from list of lists of level outputs to list of lists\n # of outputs across levels.\n # e.g. [[a1, b1, c1], [a2, b2, c2]] => [[a1, a2], [b1, b2], [c1, c2]]\n output_names = [\"rpn_class_logits\", \"rpn_class\", \"rpn_bbox\"]\n outputs = list(zip(*layer_outputs))\n outputs = [KL.Concatenate(axis=1, name=n)(list(o))\n for o, n in zip(outputs, output_names)]\n\n rpn_class_logits, rpn_class, rpn_bbox = outputs\n\n # Generate proposals\n # Proposals are [batch, N, (y1, x1, y2, x2)] in normalized coordinates\n # and zero padded.\n proposal_count = config.POST_NMS_ROIS_TRAINING if mode == \"training\"\\\n else config.POST_NMS_ROIS_INFERENCE\n rpn_rois = ProposalLayer(\n proposal_count=proposal_count,\n nms_threshold=config.RPN_NMS_THRESHOLD,\n name=\"ROI\",\n config=config)([rpn_class, rpn_bbox, anchors])\n\n if mode == \"training\":\n # Class ID mask to mark class IDs supported by the dataset the image\n # came from.\n active_class_ids = KL.Lambda(\n lambda x: parse_image_meta_graph(x)[\"active_class_ids\"]\n )(input_image_meta)\n\n if not config.USE_RPN_ROIS:\n # Ignore predicted ROIs and use ROIs provided as an input.\n input_rois = KL.Input(shape=[config.POST_NMS_ROIS_TRAINING, 4],\n name=\"input_roi\", dtype=np.int32)\n # Normalize coordinates\n target_rois = KL.Lambda(lambda x: norm_boxes_graph(\n x, K.shape(input_image)[1:3]))(input_rois)\n else:\n target_rois = rpn_rois\n\n # Generate detection targets\n # Subsamples proposals and generates target outputs for training\n # Note that proposal class IDs, gt_boxes, and gt_masks are zero\n # padded. Equally, returned rois and targets are zero padded.\n rois, target_class_ids, target_bbox, target_mask =\\\n DetectionTargetLayer(config, name=\"proposal_targets\")([\n target_rois, input_gt_class_ids, gt_boxes, input_gt_masks])\n\n # Network Heads\n # TODO: verify that this handles zero padded ROIs\n mrcnn_class_logits, mrcnn_class, mrcnn_bbox =\\\n fpn_classifier_graph(rois, mrcnn_feature_maps, input_image_meta,\n config.POOL_SIZE, config.NUM_CLASSES,\n train_bn=config.TRAIN_BN,\n fc_layers_size=config.FPN_CLASSIF_FC_LAYERS_SIZE)\n\n mrcnn_mask = build_fpn_mask_graph(rois, mrcnn_feature_maps,\n input_image_meta,\n config.MASK_POOL_SIZE,\n config.NUM_CLASSES,\n train_bn=config.TRAIN_BN)\n\n # TODO: clean up (use tf.identify if necessary)\n output_rois = KL.Lambda(lambda x: x * 1, name=\"output_rois\")(rois)\n\n # Losses\n rpn_class_loss = KL.Lambda(lambda x: rpn_class_loss_graph(*x), name=\"rpn_class_loss\")(\n [input_rpn_match, rpn_class_logits])\n rpn_bbox_loss = KL.Lambda(lambda x: rpn_bbox_loss_graph(config, *x), name=\"rpn_bbox_loss\")(\n [input_rpn_bbox, input_rpn_match, rpn_bbox])\n class_loss = KL.Lambda(lambda x: mrcnn_class_loss_graph(*x), name=\"mrcnn_class_loss\")(\n [target_class_ids, mrcnn_class_logits, active_class_ids])\n bbox_loss = KL.Lambda(lambda x: mrcnn_bbox_loss_graph(*x), name=\"mrcnn_bbox_loss\")(\n [target_bbox, target_class_ids, mrcnn_bbox])\n mask_loss = KL.Lambda(lambda x: mrcnn_mask_loss_graph(*x), name=\"mrcnn_mask_loss\")(\n [target_mask, target_class_ids, mrcnn_mask])\n\n # Model\n inputs = [input_image, input_image_meta,\n input_rpn_match, input_rpn_bbox, input_gt_class_ids, input_gt_boxes, input_gt_masks]\n if not config.USE_RPN_ROIS:\n inputs.append(input_rois)\n outputs = [rpn_class_logits, rpn_class, rpn_bbox,\n mrcnn_class_logits, mrcnn_class, mrcnn_bbox, mrcnn_mask,\n rpn_rois, output_rois,\n rpn_class_loss, rpn_bbox_loss, class_loss, bbox_loss, mask_loss]\n model = KM.Model(inputs, outputs, name='mask_rcnn')\n else:\n # Network Heads\n # Proposal classifier and BBox regressor heads\n mrcnn_class_logits, mrcnn_class, mrcnn_bbox =\\\n fpn_classifier_graph(rpn_rois, mrcnn_feature_maps, input_image_meta,\n config.POOL_SIZE, config.NUM_CLASSES,\n train_bn=config.TRAIN_BN,\n fc_layers_size=config.FPN_CLASSIF_FC_LAYERS_SIZE)\n\n # Detections\n # output is [batch, num_detections, (y1, x1, y2, x2, class_id, score)] in\n # normalized coordinates\n detections = DetectionLayer(config, name=\"mrcnn_detection\")(\n [rpn_rois, mrcnn_class, mrcnn_bbox, input_image_meta])\n\n # Create masks for detections\n detection_boxes = KL.Lambda(lambda x: x[..., :4])(detections)\n mrcnn_mask = build_fpn_mask_graph(detection_boxes, mrcnn_feature_maps,\n input_image_meta,\n config.MASK_POOL_SIZE,\n config.NUM_CLASSES,\n train_bn=config.TRAIN_BN)\n\n model = KM.Model([input_image, input_image_meta, input_anchors],\n [detections, mrcnn_class, mrcnn_bbox,\n mrcnn_mask, rpn_rois, rpn_class, rpn_bbox],\n name='mask_rcnn')\n\n # Add multi-GPU support.\n if config.GPU_COUNT > 1:\n from mrcnn.parallel_model import ParallelModel\n model = ParallelModel(model, config.GPU_COUNT)\n\n return model\n\n def find_last(self):\n \"\"\"Finds the last checkpoint file of the last trained model in the\n model directory.\n Returns:\n The path of the last checkpoint file\n \"\"\"\n # Get directory names. Each directory corresponds to a model\n dir_names = next(os.walk(self.model_dir))[1]\n key = self.config.NAME.lower()\n dir_names = filter(lambda f: f.startswith(key), dir_names)\n dir_names = sorted(dir_names)\n if not dir_names:\n import errno\n raise FileNotFoundError(\n errno.ENOENT,\n \"Could not find model directory under {}\".format(self.model_dir))\n # Pick last directory\n dir_name = os.path.join(self.model_dir, dir_names[-1])\n # Find the last checkpoint\n checkpoints = next(os.walk(dir_name))[2]\n checkpoints = filter(lambda f: f.startswith(\"mask_rcnn\"), checkpoints)\n checkpoints = sorted(checkpoints)\n if not checkpoints:\n import errno\n raise FileNotFoundError(\n errno.ENOENT, \"Could not find weight files in {}\".format(dir_name))\n checkpoint = os.path.join(dir_name, checkpoints[-1])\n return checkpoint\n\n def load_weights(self, filepath, by_name=False, exclude=None):\n \"\"\"Modified version of the corresponding Keras function with\n the addition of multi-GPU support and the ability to exclude\n some layers from loading.\n exclude: list of layer names to exclude\n \"\"\"\n import h5py\n # Conditional import to support versions of Keras before 2.2\n # TODO: remove in about 6 months (end of 2018)\n try:\n from keras.engine import saving\n except ImportError:\n # Keras before 2.2 used the 'topology' namespace.\n from keras.engine import topology as saving\n\n if exclude:\n by_name = True\n\n if h5py is None:\n raise ImportError('`load_weights` requires h5py.')\n f = h5py.File(filepath, mode='r')\n if 'layer_names' not in f.attrs and 'model_weights' in f:\n f = f['model_weights']\n\n # In multi-GPU training, we wrap the model. Get layers\n # of the inner model because they have the weights.\n keras_model = self.keras_model\n layers = keras_model.inner_model.layers if hasattr(keras_model, \"inner_model\")\\\n else keras_model.layers\n\n # Exclude some layers\n if exclude:\n layers = filter(lambda l: l.name not in exclude, layers)\n\n if by_name:\n saving.load_weights_from_hdf5_group_by_name(f, layers)\n else:\n saving.load_weights_from_hdf5_group(f, layers)\n if hasattr(f, 'close'):\n f.close()\n\n # Update the log directory\n self.set_log_dir(filepath)\n\n def get_imagenet_weights(self):\n \"\"\"Downloads ImageNet trained weights from Keras.\n Returns path to weights file.\n \"\"\"\n from keras.utils.data_utils import get_file\n TF_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/'\\\n 'releases/download/v0.2/'\\\n 'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'\n weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',\n TF_WEIGHTS_PATH_NO_TOP,\n cache_subdir='models',\n md5_hash='a268eb855778b3df3c7506639542a6af')\n return weights_path\n\n def compile(self, learning_rate, momentum):\n \"\"\"Gets the model ready for training. Adds losses, regularization, and\n metrics. Then calls the Keras compile() function.\n \"\"\"\n # Optimizer object\n # optimizer = keras.optimizers.SGD(\n # lr=learning_rate, momentum=momentum,\n # clipnorm=self.config.GRADIENT_CLIP_NORM)\n\n\n # Optimizer object\n if self.config.OPTIMIZER == 'ADAM':\n optimizer = keras.optimizers.Adam(\n learning_rate, amsgrad=True, clipnorm=self.config.GRADIENT_CLIP_NORM)\n else:\n optimizer = keras.optimizers.SGD(\n lr=learning_rate, momentum=momentum, clipnorm=self.config.GRADIENT_CLIP_NORM)\n\n\n # Add Losses\n # First, clear previously set losses to avoid duplication\n self.keras_model._losses = []\n self.keras_model._per_input_losses = {}\n loss_names = [\n \"rpn_class_loss\", \"rpn_bbox_loss\",\n \"mrcnn_class_loss\", \"mrcnn_bbox_loss\", \"mrcnn_mask_loss\"]\n for name in loss_names:\n layer = self.keras_model.get_layer(name)\n if layer.output in self.keras_model.losses:\n continue\n loss = (\n tf.reduce_mean(layer.output, keepdims=True)\n * self.config.LOSS_WEIGHTS.get(name, 1.))\n self.keras_model.add_loss(loss)\n\n # Add L2 Regularization\n # Skip gamma and beta weights of batch normalization layers.\n reg_losses = [\n keras.regularizers.l2(self.config.WEIGHT_DECAY)(w) / tf.cast(tf.size(w), tf.float32)\n for w in self.keras_model.trainable_weights\n if 'gamma' not in w.name and 'beta' not in w.name]\n self.keras_model.add_loss(tf.add_n(reg_losses))\n\n # Compile\n self.keras_model.compile(\n optimizer=optimizer,\n loss=[None] * len(self.keras_model.outputs))\n\n # Add metrics for losses\n for name in loss_names:\n if name in self.keras_model.metrics_names:\n continue\n layer = self.keras_model.get_layer(name)\n self.keras_model.metrics_names.append(name)\n loss = (\n tf.reduce_mean(layer.output, keepdims=True)\n * self.config.LOSS_WEIGHTS.get(name, 1.))\n self.keras_model.metrics_tensors.append(loss)\n\n def set_trainable(self, layer_regex, keras_model=None, indent=0, verbose=1):\n \"\"\"Sets model layers as trainable if their names match\n the given regular expression.\n \"\"\"\n # Print message on the first call (but not on recursive calls)\n if verbose > 0 and keras_model is None:\n log(\"Selecting layers to train\")\n\n keras_model = keras_model or self.keras_model\n\n # In multi-GPU training, we wrap the model. Get layers\n # of the inner model because they have the weights.\n layers = keras_model.inner_model.layers if hasattr(keras_model, \"inner_model\")\\\n else keras_model.layers\n\n for layer in layers:\n # Is the layer a model?\n if layer.__class__.__name__ == 'Model':\n print(\"In model: \", layer.name)\n self.set_trainable(\n layer_regex, keras_model=layer, indent=indent + 4)\n continue\n\n if not layer.weights:\n continue\n # Is it trainable?\n trainable = bool(re.fullmatch(layer_regex, layer.name))\n # Update layer. If layer is a container, update inner layer.\n if layer.__class__.__name__ == 'TimeDistributed':\n layer.layer.trainable = trainable\n else:\n layer.trainable = trainable\n # Print trainable layer names\n if trainable and verbose > 0:\n log(\"{}{:20} ({})\".format(\" \" * indent, layer.name,\n layer.__class__.__name__))\n\n def set_log_dir(self, model_path=None):\n \"\"\"Sets the model log directory and epoch counter.\n\n model_path: If None, or a format different from what this code uses\n then set a new log directory and start epochs from 0. Otherwise,\n extract the log directory and the epoch counter from the file\n name.\n \"\"\"\n # Set date and epoch counter as if starting a new model\n self.epoch = 0\n now = datetime.datetime.now()\n\n # If we have a model path with date and epochs use them\n if model_path:\n # Continue from we left of. Get epoch and date from the file name\n # A sample model path might look like:\n # /path/to/logs/coco20171029T2315/mask_rcnn_coco_0001.h5\n regex = r\".*/[\\w-]+(\\d{4})(\\d{2})(\\d{2})T(\\d{2})(\\d{2})/mask\\_rcnn\\_[\\w-]+(\\d{4})\\.h5\"\n m = re.match(regex, model_path)\n if m:\n now = datetime.datetime(int(m.group(1)), int(m.group(2)), int(m.group(3)),\n int(m.group(4)), int(m.group(5)))\n # Epoch number in file is 1-based, and in Keras code it's 0-based.\n # So, adjust for that then increment by one to start from the next epoch\n self.epoch = int(m.group(6)) - 1 + 1\n print('Re-starting from epoch %d' % self.epoch)\n\n # Directory for training logs\n self.log_dir = os.path.join(self.model_dir, \"{}{:%Y%m%dT%H%M}\".format(\n self.config.NAME.lower(), now))\n\n # Path to save after each epoch. Include placeholders that get filled by Keras.\n self.checkpoint_path = os.path.join(self.log_dir, \"mask_rcnn_{}_*epoch*.h5\".format(\n self.config.NAME.lower()))\n self.checkpoint_path = self.checkpoint_path.replace(\n \"*epoch*\", \"{epoch:04d}\")\n\n def train(self, train_dataset, val_dataset, learning_rate, epochs, layers,\n augmentation=None, custom_callbacks=None, no_augmentation_sources=None):\n \"\"\"Train the model.\n train_dataset, val_dataset: Training and validation Dataset objects.\n learning_rate: The learning rate to train with\n epochs: Number of training epochs. Note that previous training epochs\n are considered to be done alreay, so this actually determines\n the epochs to train in total rather than in this particaular\n call.\n layers: Allows selecting wich layers to train. It can be:\n - A regular expression to match layer names to train\n - One of these predefined values:\n heads: The RPN, classifier and mask heads of the network\n all: All the layers\n 3+: Train Resnet stage 3 and up\n 4+: Train Resnet stage 4 and up\n 5+: Train Resnet stage 5 and up\n augmentation: Optional. An imgaug (https://github.com/aleju/imgaug)\n augmentation. For example, passing imgaug.augmenters.Fliplr(0.5)\n flips images right/left 50% of the time. You can pass complex\n augmentations as well. This augmentation applies 50% of the\n time, and when it does it flips images right/left half the time\n and adds a Gaussian blur with a random sigma in range 0 to 5.\n\n augmentation = imgaug.augmenters.Sometimes(0.5, [\n imgaug.augmenters.Fliplr(0.5),\n imgaug.augmenters.GaussianBlur(sigma=(0.0, 5.0))\n ])\n\t custom_callbacks: Optional. Add custom callbacks to be called\n\t with the keras fit_generator method. Must be list of type keras.callbacks.\n no_augmentation_sources: Optional. List of sources to exclude for\n augmentation. A source is string that identifies a dataset and is\n defined in the Dataset class.\n \"\"\"\n assert self.mode == \"training\", \"Create model in training mode.\"\n\n # Pre-defined layer regular expressions\n layer_regex = {\n # all layers but the backbone\n \"heads\": r\"(mrcnn\\_.*)|(rpn\\_.*)|(fpn\\_.*)\",\n # From a specific Resnet stage and up\n \"3+\": r\"(res3.*)|(bn3.*)|(res4.*)|(bn4.*)|(res5.*)|(bn5.*)|(mrcnn\\_.*)|(rpn\\_.*)|(fpn\\_.*)\",\n \"4+\": r\"(res4.*)|(bn4.*)|(res5.*)|(bn5.*)|(mrcnn\\_.*)|(rpn\\_.*)|(fpn\\_.*)\",\n \"5+\": r\"(res5.*)|(bn5.*)|(mrcnn\\_.*)|(rpn\\_.*)|(fpn\\_.*)\",\n # All layers\n \"all\": \".*\",\n }\n if layers in layer_regex.keys():\n layers = layer_regex[layers]\n\n # Data generators\n train_generator = data_generator(train_dataset, self.config, shuffle=True,\n augmentation=augmentation,\n batch_size=self.config.BATCH_SIZE,\n no_augmentation_sources=no_augmentation_sources)\n val_generator = data_generator(val_dataset, self.config, shuffle=True,\n batch_size=self.config.BATCH_SIZE)\n\n # Create log_dir if not exists\n if not os.path.exists(self.log_dir):\n os.makedirs(self.log_dir)\n\n # Callbacks\n callbacks = [\n keras.callbacks.TensorBoard(log_dir=self.log_dir,\n histogram_freq=0, write_graph=True, write_images=False),\n keras.callbacks.ModelCheckpoint(self.checkpoint_path,\n verbose=0, save_weights_only=True),\n ]\n\n # Add custom callbacks to the list\n if custom_callbacks:\n callbacks += custom_callbacks\n\n # Train\n log(\"\\nStarting at epoch {}. LR={}\\n\".format(self.epoch, learning_rate))\n log(\"Checkpoint Path: {}\".format(self.checkpoint_path))\n self.set_trainable(layers)\n self.compile(learning_rate, self.config.LEARNING_MOMENTUM)\n\n # Work-around for Windows: Keras fails on Windows when using\n # multiprocessing workers. See discussion here:\n # https://github.com/matterport/Mask_RCNN/issues/13#issuecomment-353124009\n if os.name is 'nt':\n workers = 0\n else:\n workers = multiprocessing.cpu_count()\n\n self.keras_model.fit_generator(\n train_generator,\n initial_epoch=self.epoch,\n epochs=epochs,\n steps_per_epoch=self.config.STEPS_PER_EPOCH,\n callbacks=callbacks,\n validation_data=val_generator,\n validation_steps=self.config.VALIDATION_STEPS,\n max_queue_size=100,\n workers=workers,\n use_multiprocessing=True,\n )\n self.epoch = max(self.epoch, epochs)\n\n def mold_inputs(self, images):\n \"\"\"Takes a list of images and modifies them to the format expected\n as an input to the neural network.\n images: List of image matrices [height,width,depth]. Images can have\n different sizes.\n\n Returns 3 Numpy matrices:\n molded_images: [N, h, w, 3]. Images resized and normalized.\n image_metas: [N, length of meta data]. Details about each image.\n windows: [N, (y1, x1, y2, x2)]. The portion of the image that has the\n original image (padding excluded).\n \"\"\"\n molded_images = []\n image_metas = []\n windows = []\n for image in images:\n # Resize image\n # TODO: move resizing to mold_image()\n molded_image, window, scale, padding, crop = utils.resize_image(\n image,\n min_dim=self.config.IMAGE_MIN_DIM,\n min_scale=self.config.IMAGE_MIN_SCALE,\n max_dim=self.config.IMAGE_MAX_DIM,\n mode=self.config.IMAGE_RESIZE_MODE)\n molded_image = mold_image(molded_image, self.config)\n # Build image_meta\n image_meta = compose_image_meta(\n 0, image.shape, molded_image.shape, window, scale,\n np.zeros([self.config.NUM_CLASSES], dtype=np.int32))\n # Append\n molded_images.append(molded_image)\n windows.append(window)\n image_metas.append(image_meta)\n # Pack into arrays\n molded_images = np.stack(molded_images)\n image_metas = np.stack(image_metas)\n windows = np.stack(windows)\n return molded_images, image_metas, windows\n\n def unmold_detections(self, detections, mrcnn_mask, original_image_shape,\n image_shape, window):\n \"\"\"Reformats the detections of one image from the format of the neural\n network output to a format suitable for use in the rest of the\n application.\n\n detections: [N, (y1, x1, y2, x2, class_id, score)] in normalized coordinates\n mrcnn_mask: [N, height, width, num_classes]\n original_image_shape: [H, W, C] Original image shape before resizing\n image_shape: [H, W, C] Shape of the image after resizing and padding\n window: [y1, x1, y2, x2] Pixel coordinates of box in the image where the real\n image is excluding the padding.\n\n Returns:\n boxes: [N, (y1, x1, y2, x2)] Bounding boxes in pixels\n class_ids: [N] Integer class IDs for each bounding box\n scores: [N] Float probability scores of the class_id\n masks: [height, width, num_instances] Instance masks\n \"\"\"\n # How many detections do we have?\n # Detections array is padded with zeros. Find the first class_id == 0.\n zero_ix = np.where(detections[:, 4] == 0)[0]\n N = zero_ix[0] if zero_ix.shape[0] > 0 else detections.shape[0]\n\n # Extract boxes, class_ids, scores, and class-specific masks\n boxes = detections[:N, :4]\n class_ids = detections[:N, 4].astype(np.int32)\n scores = detections[:N, 5]\n masks = mrcnn_mask[np.arange(N), :, :, class_ids]\n\n # Translate normalized coordinates in the resized image to pixel\n # coordinates in the original image before resizing\n window = utils.norm_boxes(window, image_shape[:2])\n wy1, wx1, wy2, wx2 = window\n shift = np.array([wy1, wx1, wy1, wx1])\n wh = wy2 - wy1 # window height\n ww = wx2 - wx1 # window width\n scale = np.array([wh, ww, wh, ww])\n # Convert boxes to normalized coordinates on the window\n boxes = np.divide(boxes - shift, scale)\n # Convert boxes to pixel coordinates on the original image\n boxes = utils.denorm_boxes(boxes, original_image_shape[:2])\n\n # Filter out detections with zero area. Happens in early training when\n # network weights are still random\n exclude_ix = np.where(\n (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) <= 0)[0]\n if exclude_ix.shape[0] > 0:\n boxes = np.delete(boxes, exclude_ix, axis=0)\n class_ids = np.delete(class_ids, exclude_ix, axis=0)\n scores = np.delete(scores, exclude_ix, axis=0)\n masks = np.delete(masks, exclude_ix, axis=0)\n N = class_ids.shape[0]\n\n # Resize masks to original image size and set boundary threshold.\n full_masks = []\n for i in range(N):\n # Convert neural network mask to full size mask\n full_mask = utils.unmold_mask(masks[i], boxes[i], original_image_shape)\n full_masks.append(full_mask)\n full_masks = np.stack(full_masks, axis=-1)\\\n if full_masks else np.empty(original_image_shape[:2] + (0,))\n\n return boxes, class_ids, scores, full_masks\n\n def detect(self, images, verbose=0):\n \"\"\"Runs the detection pipeline.\n\n images: List of images, potentially of different sizes.\n\n Returns a list of dicts, one dict per image. The dict contains:\n rois: [N, (y1, x1, y2, x2)] detection bounding boxes\n class_ids: [N] int class IDs\n scores: [N] float probability scores for the class IDs\n masks: [H, W, N] instance binary masks\n \"\"\"\n assert self.mode == \"inference\", \"Create model in inference mode.\"\n assert len(\n images) == self.config.BATCH_SIZE, \"len(images) must be equal to BATCH_SIZE\"\n\n if verbose:\n log(\"Processing {} images\".format(len(images)))\n for image in images:\n log(\"image\", image)\n\n # Mold inputs to format expected by the neural network\n molded_images, image_metas, windows = self.mold_inputs(images)\n\n # Validate image sizes\n # All images in a batch MUST be of the same size\n image_shape = molded_images[0].shape\n for g in molded_images[1:]:\n assert g.shape == image_shape,\\\n \"After resizing, all images must have the same size. Check IMAGE_RESIZE_MODE and image sizes.\"\n\n # Anchors\n anchors = self.get_anchors(image_shape)\n # Duplicate across the batch dimension because Keras requires it\n # TODO: can this be optimized to avoid duplicating the anchors?\n anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)\n\n if verbose:\n log(\"molded_images\", molded_images)\n log(\"image_metas\", image_metas)\n log(\"anchors\", anchors)\n # Run object detection\n detections, _, _, mrcnn_mask, _, _, _ =\\\n self.keras_model.predict([molded_images, image_metas, anchors], verbose=0)\n # Process detections\n results = []\n for i, image in enumerate(images):\n final_rois, final_class_ids, final_scores, final_masks =\\\n self.unmold_detections(detections[i], mrcnn_mask[i],\n image.shape, molded_images[i].shape,\n windows[i])\n results.append({\n \"rois\": final_rois,\n \"class_ids\": final_class_ids,\n \"scores\": final_scores,\n \"masks\": final_masks,\n })\n return results\n\n def detect_molded(self, molded_images, image_metas, verbose=0):\n \"\"\"Runs the detection pipeline, but expect inputs that are\n molded already. Used mostly for debugging and inspecting\n the model.\n\n molded_images: List of images loaded using load_image_gt()\n image_metas: image meta data, also returned by load_image_gt()\n\n Returns a list of dicts, one dict per image. The dict contains:\n rois: [N, (y1, x1, y2, x2)] detection bounding boxes\n class_ids: [N] int class IDs\n scores: [N] float probability scores for the class IDs\n masks: [H, W, N] instance binary masks\n \"\"\"\n assert self.mode == \"inference\", \"Create model in inference mode.\"\n assert len(molded_images) == self.config.BATCH_SIZE,\\\n \"Number of images must be equal to BATCH_SIZE\"\n\n if verbose:\n log(\"Processing {} images\".format(len(molded_images)))\n for image in molded_images:\n log(\"image\", image)\n\n # Validate image sizes\n # All images in a batch MUST be of the same size\n image_shape = molded_images[0].shape\n for g in molded_images[1:]:\n assert g.shape == image_shape, \"Images must have the same size\"\n\n # Anchors\n anchors = self.get_anchors(image_shape)\n # Duplicate across the batch dimension because Keras requires it\n # TODO: can this be optimized to avoid duplicating the anchors?\n anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)\n\n if verbose:\n log(\"molded_images\", molded_images)\n log(\"image_metas\", image_metas)\n log(\"anchors\", anchors)\n # Run object detection\n detections, _, _, mrcnn_mask, _, _, _ =\\\n self.keras_model.predict([molded_images, image_metas, anchors], verbose=0)\n # Process detections\n results = []\n for i, image in enumerate(molded_images):\n window = [0, 0, image.shape[0], image.shape[1]]\n final_rois, final_class_ids, final_scores, final_masks =\\\n self.unmold_detections(detections[i], mrcnn_mask[i],\n image.shape, molded_images[i].shape,\n window)\n results.append({\n \"rois\": final_rois,\n \"class_ids\": final_class_ids,\n \"scores\": final_scores,\n \"masks\": final_masks,\n })\n return results\n\n def get_anchors(self, image_shape):\n \"\"\"Returns anchor pyramid for the given image size.\"\"\"\n backbone_shapes = compute_backbone_shapes(self.config, image_shape)\n # Cache anchors and reuse if image shape is the same\n if not hasattr(self, \"_anchor_cache\"):\n self._anchor_cache = {}\n if not tuple(image_shape) in self._anchor_cache:\n # Generate Anchors\n a = utils.generate_pyramid_anchors(\n self.config.RPN_ANCHOR_SCALES,\n self.config.RPN_ANCHOR_RATIOS,\n backbone_shapes,\n self.config.BACKBONE_STRIDES,\n self.config.RPN_ANCHOR_STRIDE)\n # Keep a copy of the latest anchors in pixel coordinates because\n # it's used in inspect_model notebooks.\n # TODO: Remove this after the notebook are refactored to not use it\n self.anchors = a\n # Normalize coordinates\n self._anchor_cache[tuple(image_shape)] = utils.norm_boxes(a, image_shape[:2])\n return self._anchor_cache[tuple(image_shape)]\n\n def ancestor(self, tensor, name, checked=None):\n \"\"\"Finds the ancestor of a TF tensor in the computation graph.\n tensor: TensorFlow symbolic tensor.\n name: Name of ancestor tensor to find\n checked: For internal use. A list of tensors that were already\n searched to avoid loops in traversing the graph.\n \"\"\"\n checked = checked if checked is not None else []\n # Put a limit on how deep we go to avoid very long loops\n if len(checked) > 500:\n return None\n # Convert name to a regex and allow matching a number prefix\n # because Keras adds them automatically\n if isinstance(name, str):\n name = re.compile(name.replace(\"/\", r\"(\\_\\d+)*/\"))\n\n parents = tensor.op.inputs\n for p in parents:\n if p in checked:\n continue\n if bool(re.fullmatch(name, p.name)):\n return p\n checked.append(p)\n a = self.ancestor(p, name, checked)\n if a is not None:\n return a\n return None\n\n def find_trainable_layer(self, layer):\n \"\"\"If a layer is encapsulated by another layer, this function\n digs through the encapsulation and returns the layer that holds\n the weights.\n \"\"\"\n if layer.__class__.__name__ == 'TimeDistributed':\n return self.find_trainable_layer(layer.layer)\n return layer\n\n def get_trainable_layers(self):\n \"\"\"Returns a list of layers that have weights.\"\"\"\n layers = []\n # Loop through all layers\n for l in self.keras_model.layers:\n # If layer is a wrapper, find inner trainable layer\n l = self.find_trainable_layer(l)\n # Include layer if it has weights\n if l.get_weights():\n layers.append(l)\n return layers\n\n def run_graph(self, images, outputs, image_metas=None):\n \"\"\"Runs a sub-set of the computation graph that computes the given\n outputs.\n\n image_metas: If provided, the images are assumed to be already\n molded (i.e. resized, padded, and normalized)\n\n outputs: List of tuples (name, tensor) to compute. The tensors are\n symbolic TensorFlow tensors and the names are for easy tracking.\n\n Returns an ordered dict of results. Keys are the names received in the\n input and values are Numpy arrays.\n \"\"\"\n model = self.keras_model\n\n # Organize desired outputs into an ordered dict\n outputs = OrderedDict(outputs)\n for o in outputs.values():\n assert o is not None\n\n # Build a Keras function to run parts of the computation graph\n inputs = model.inputs\n if model.uses_learning_phase and not isinstance(K.learning_phase(), int):\n inputs += [K.learning_phase()]\n kf = K.function(model.inputs, list(outputs.values()))\n\n # Prepare inputs\n if image_metas is None:\n molded_images, image_metas, _ = self.mold_inputs(images)\n else:\n molded_images = images\n image_shape = molded_images[0].shape\n # Anchors\n anchors = self.get_anchors(image_shape)\n # Duplicate across the batch dimension because Keras requires it\n # TODO: can this be optimized to avoid duplicating the anchors?\n anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)\n model_in = [molded_images, image_metas, anchors]\n\n # Run inference\n if model.uses_learning_phase and not isinstance(K.learning_phase(), int):\n model_in.append(0.)\n outputs_np = kf(model_in)\n\n # Pack the generated Numpy arrays into a a dict and log the results.\n outputs_np = OrderedDict([(k, v)\n for k, v in zip(outputs.keys(), outputs_np)])\n for k, v in outputs_np.items():\n log(k, v)\n return outputs_np\n\n\n############################################################\n# Data Formatting\n############################################################\n\ndef compose_image_meta(image_id, original_image_shape, image_shape,\n window, scale, active_class_ids):\n \"\"\"Takes attributes of an image and puts them in one 1D array.\n\n image_id: An int ID of the image. Useful for debugging.\n original_image_shape: [H, W, C] before resizing or padding.\n image_shape: [H, W, C] after resizing and padding\n window: (y1, x1, y2, x2) in pixels. The area of the image where the real\n image is (excluding the padding)\n scale: The scaling factor applied to the original image (float32)\n active_class_ids: List of class_ids available in the dataset from which\n the image came. Useful if training on images from multiple datasets\n where not all classes are present in all datasets.\n \"\"\"\n meta = np.array(\n [image_id] + # size=1\n list(original_image_shape) + # size=3\n list(image_shape) + # size=3\n list(window) + # size=4 (y1, x1, y2, x2) in image cooredinates\n [scale] + # size=1\n list(active_class_ids) # size=num_classes\n )\n return meta\n\n\ndef parse_image_meta(meta):\n \"\"\"Parses an array that contains image attributes to its components.\n See compose_image_meta() for more details.\n\n meta: [batch, meta length] where meta length depends on NUM_CLASSES\n\n Returns a dict of the parsed values.\n \"\"\"\n image_id = meta[:, 0]\n original_image_shape = meta[:, 1:4]\n image_shape = meta[:, 4:7]\n window = meta[:, 7:11] # (y1, x1, y2, x2) window of image in in pixels\n scale = meta[:, 11]\n active_class_ids = meta[:, 12:]\n return {\n \"image_id\": image_id.astype(np.int32),\n \"original_image_shape\": original_image_shape.astype(np.int32),\n \"image_shape\": image_shape.astype(np.int32),\n \"window\": window.astype(np.int32),\n \"scale\": scale.astype(np.float32),\n \"active_class_ids\": active_class_ids.astype(np.int32),\n }\n\n\ndef parse_image_meta_graph(meta):\n \"\"\"Parses a tensor that contains image attributes to its components.\n See compose_image_meta() for more details.\n\n meta: [batch, meta length] where meta length depends on NUM_CLASSES\n\n Returns a dict of the parsed tensors.\n \"\"\"\n image_id = meta[:, 0]\n original_image_shape = meta[:, 1:4]\n image_shape = meta[:, 4:7]\n window = meta[:, 7:11] # (y1, x1, y2, x2) window of image in in pixels\n scale = meta[:, 11]\n active_class_ids = meta[:, 12:]\n return {\n \"image_id\": image_id,\n \"original_image_shape\": original_image_shape,\n \"image_shape\": image_shape,\n \"window\": window,\n \"scale\": scale,\n \"active_class_ids\": active_class_ids,\n }\n\n\ndef mold_image(images, config):\n \"\"\"Expects an RGB image (or array of images) and subtracts\n the mean pixel and converts it to float. Expects image\n colors in RGB order.\n \"\"\"\n return images.astype(np.float32) - config.MEAN_PIXEL\n\n\ndef unmold_image(normalized_images, config):\n \"\"\"Takes a image normalized with mold() and returns the original.\"\"\"\n return (normalized_images + config.MEAN_PIXEL).astype(np.uint8)\n\n\n############################################################\n# Miscellenous Graph Functions\n############################################################\n\ndef trim_zeros_graph(boxes, name=None):\n \"\"\"Often boxes are represented with matrices of shape [N, 4] and\n are padded with zeros. This removes zero boxes.\n\n boxes: [N, 4] matrix of boxes.\n non_zeros: [N] a 1D boolean mask identifying the rows to keep\n \"\"\"\n non_zeros = tf.cast(tf.reduce_sum(tf.abs(boxes), axis=1), tf.bool)\n boxes = tf.boolean_mask(boxes, non_zeros, name=name)\n return boxes, non_zeros\n\n\ndef batch_pack_graph(x, counts, num_rows):\n \"\"\"Picks different number of values from each row\n in x depending on the values in counts.\n \"\"\"\n outputs = []\n for i in range(num_rows):\n outputs.append(x[i, :counts[i]])\n return tf.concat(outputs, axis=0)\n\n\ndef norm_boxes_graph(boxes, shape):\n \"\"\"Converts boxes from pixel coordinates to normalized coordinates.\n boxes: [..., (y1, x1, y2, x2)] in pixel coordinates\n shape: [..., (height, width)] in pixels\n\n Note: In pixel coordinates (y2, x2) is outside the box. But in normalized\n coordinates it's inside the box.\n\n Returns:\n [..., (y1, x1, y2, x2)] in normalized coordinates\n \"\"\"\n h, w = tf.split(tf.cast(shape, tf.float32), 2)\n scale = tf.concat([h, w, h, w], axis=-1) - tf.constant(1.0)\n shift = tf.constant([0., 0., 1., 1.])\n return tf.divide(boxes - shift, scale)\n\n\ndef denorm_boxes_graph(boxes, shape):\n \"\"\"Converts boxes from normalized coordinates to pixel coordinates.\n boxes: [..., (y1, x1, y2, x2)] in normalized coordinates\n shape: [..., (height, width)] in pixels\n\n Note: In pixel coordinates (y2, x2) is outside the box. But in normalized\n coordinates it's inside the box.\n\n Returns:\n [..., (y1, x1, y2, x2)] in pixel coordinates\n \"\"\"\n h, w = tf.split(tf.cast(shape, tf.float32), 2)\n scale = tf.concat([h, w, h, w], axis=-1) - tf.constant(1.0)\n shift = tf.constant([0., 0., 1., 1.])\n return tf.cast(tf.round(tf.multiply(boxes, scale) + shift), tf.int32)\n" ]
[ [ "numpy.amax", "numpy.expand_dims", "tensorflow.concat", "tensorflow.control_dependencies", "tensorflow.stack", "tensorflow.reduce_sum", "tensorflow.minimum", "tensorflow.cast", "tensorflow.image.non_max_suppression", "tensorflow.equal", "tensorflow.image.crop_and_resize", "numpy.concatenate", "tensorflow.abs", "tensorflow.map_fn", "numpy.any", "tensorflow.pad", "tensorflow.where", "tensorflow.random_shuffle", "numpy.where", "tensorflow.add_n", "numpy.divide", "numpy.random.randint", "tensorflow.boolean_mask", "numpy.hstack", "tensorflow.Variable", "numpy.reshape", "numpy.fliplr", "numpy.arange", "tensorflow.squeeze", "numpy.stack", "tensorflow.divide", "tensorflow.stop_gradient", "tensorflow.gather", "numpy.copy", "numpy.argmax", "tensorflow.nn.top_k", "tensorflow.argmax", "numpy.zeros", "numpy.log", "tensorflow.gather_nd", "tensorflow.unique", "tensorflow.shape", "numpy.random.choice", "tensorflow.identity", "tensorflow.exp", "tensorflow.sparse_tensor_to_dense", "numpy.delete", "tensorflow.nn.sparse_softmax_cross_entropy_with_logits", "tensorflow.split", "tensorflow.round", "numpy.array", "numpy.sum", "tensorflow.size", "tensorflow.reduce_max", "tensorflow.multiply", "tensorflow.transpose", "tensorflow.constant", "tensorflow.range", "tensorflow.reduce_mean", "numpy.abs", "tensorflow.maximum", "tensorflow.reshape", "tensorflow.expand_dims", "numpy.sort", "numpy.ones", "numpy.random.shuffle", "tensorflow.log", "numpy.broadcast_to", "tensorflow.sqrt", "numpy.empty", "tensorflow.logical_and" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
matthew-brett/regreg
[ "8a10a79cbaf771c2a6d70e8094ab753ec075aab7", "8a10a79cbaf771c2a6d70e8094ab753ec075aab7" ]
[ "regreg/affine/tests/test_normalize.py", "regreg/affine/tests/test_fused_lasso.py" ]
[ "from itertools import product\nimport numpy as np\nimport scipy.sparse\n\nimport regreg.api as rr\nfrom regreg.identity_quadratic import identity_quadratic as sq\nimport nose.tools as nt\n\n\ndef test_centering():\n \"\"\"\n This test verifies that the normalized transform\n of affine correctly implements the linear\n transform that multiplies first by X, then centers.\n \"\"\"\n # N - number of data points\n # P - number of columns in design == number of betas\n N, P = 40, 30\n # an arbitrary positive offset for data and design\n offset = 50\n\n # design - with ones as last column\n X = np.ones((N,P))\n X[:,:-1] = np.random.normal(size=(N,P-1)) + offset\n X2 = X - X.mean(axis=0)[np.newaxis,:]\n L = rr.normalize(X, center=True, scale=False)\n # coef for loss\n\n for _ in range(10):\n beta = np.random.normal(size=(P,))\n v = L.linear_map(beta)\n v2 = np.dot(X, beta)\n v2 -= v2.mean()\n v3 = np.dot(X2, beta)\n v4 = L.affine_map(beta)\n np.testing.assert_almost_equal(v, v3)\n np.testing.assert_almost_equal(v, v2)\n np.testing.assert_almost_equal(v, v4)\n\n y = np.random.standard_normal(N)\n u1 = L.adjoint_map(y)\n y2 = y - y.mean()\n u2 = np.dot(X.T, y2)\n np.testing.assert_almost_equal(u1, u2)\n\ndef test_scaling():\n \"\"\"\n This test verifies that the normalized transform\n of affine correctly implements the linear\n transform that multiplies first by X, then centers.\n \"\"\"\n # N - number of data points\n # P - number of columns in design == number of betas\n N, P = 40, 30\n # an arbitrary positive offset for data and design\n offset = 50\n\n # design - with ones as last column\n X = np.ones((N,P))\n X[:,:-1] = np.random.normal(size=(N,P-1)) + offset\n\n L = rr.normalize(X, center=False, scale=True)\n # coef for loss\n\n scalings = np.sqrt((X**2).sum(0) / N)\n scaling_matrix = np.diag(1./scalings)\n \n for _ in range(10):\n\n beta = np.random.normal(size=(P,))\n v = L.linear_map(beta)\n v2 = np.dot(X, np.dot(scaling_matrix, beta))\n v3 = L.affine_map(beta)\n np.testing.assert_almost_equal(v, v2)\n np.testing.assert_almost_equal(v, v3)\n\n y = np.random.standard_normal(N)\n u1 = L.adjoint_map(y)\n u2 = np.dot(scaling_matrix, np.dot(X.T, y))\n np.testing.assert_almost_equal(u1, u2)\n\ndef test_scaling_and_centering():\n \"\"\"\n This test verifies that the normalized transform\n of affine correctly implements the linear\n transform that multiplies first by X, then centers.\n \"\"\"\n # N - number of data points\n # P - number of columns in design == number of betas\n N, P = 40, 30\n # an arbitrary positive offset for data and design\n offset = 50\n\n # design - with no colum of ones!\n X = np.random.normal(size=(N,P)) + offset\n\n L = rr.normalize(X, center=True, scale=True) # the default\n # coef for loss\n\n scalings = np.std(X, 0)\n scaling_matrix = np.diag(1./scalings)\n\n for _ in range(10):\n beta = np.random.normal(size=(P,))\n v = L.linear_map(beta)\n v2 = np.dot(X, np.dot(scaling_matrix, beta))\n v2 -= v2.mean()\n np.testing.assert_almost_equal(v, v2)\n\n y = np.random.standard_normal(N)\n u1 = L.adjoint_map(y)\n y2 = y - y.mean()\n u2 = np.dot(scaling_matrix, np.dot(X.T, y2))\n np.testing.assert_almost_equal(u1, u2)\n\ndef test_centering_fit(debug=False):\n\n # N - number of data points\n # P - number of columns in design == number of betas\n N, P = 40, 30\n # an arbitrary positive offset for data and design\n offset = 50\n\n # design - with ones as last column\n X = np.ones((N,P))\n X = np.random.normal(size=(N,P)) + offset\n X2 = X - X.mean(axis=0)[np.newaxis,:]\n\n # the normalizer\n L = rr.normalize(X, center=True, scale=False)\n\n # data\n Y = np.random.normal(size=(N,)) + offset\n\n # coef for loss\n coef = 0.5\n # lagrange for penalty\n lagrange = .1\n\n # Loss function (squared difference between fitted and actual data)\n loss = rr.quadratic.affine(L, -Y, coef=coef)\n\n penalties = [rr.constrained_positive_part(25, lagrange=lagrange),\n rr.nonnegative(5)]\n groups = [slice(0,25), slice(25,30)]\n penalty = rr.separable((P,), penalties,\n groups)\n initial = np.random.standard_normal(P)\n\n composite_form = rr.separable_problem.fromatom(penalty, loss)\n solver = rr.FISTA(composite_form)\n solver.debug = debug\n solver.fit(tol=1.0e-12, min_its=200)\n coefs = solver.composite.coefs\n\n # Solve the problem with X2\n loss2 = rr.quadratic.affine(X2, -Y, coef=coef)\n\n initial2 = np.random.standard_normal(P)\n composite_form2 = rr.separable_problem.fromatom(penalty, loss2)\n\n for _ in range(10):\n beta = np.random.standard_normal(P)\n g1 = loss.smooth_objective(beta, mode='grad')\n g2 = loss2.smooth_objective(beta, mode='grad')\n np.testing.assert_almost_equal(g1, g2)\n b1 = penalty.proximal(sq(1, beta, g1, 0))\n b2 = penalty.proximal(sq(1, beta, g1, 0))\n np.testing.assert_almost_equal(b1, b2)\n\n f1 = composite_form.objective(beta)\n f2 = composite_form2.objective(beta)\n np.testing.assert_almost_equal(f1, f2)\n\n solver2 = rr.FISTA(composite_form2)\n solver2.debug = debug\n solver2.fit(tol=1.0e-12, min_its=200)\n coefs2 = solver2.composite.coefs\n\n np.testing.assert_almost_equal(composite_form.objective(coefs), composite_form.objective(coefs2))\n np.testing.assert_almost_equal(composite_form2.objective(coefs), composite_form2.objective(coefs2))\n\n nt.assert_true(np.linalg.norm(coefs - coefs2) / max(np.linalg.norm(coefs),1) < 1.0e-04)\n\ndef test_scaling_fit(debug=False):\n\n # N - number of data points\n # P - number of columns in design == number of betas\n N, P = 40, 30\n # an arbitrary positive offset for data and design\n offset = 2\n\n # design - with ones as last column\n X = np.ones((N,P))\n X[:,:-1] = np.random.normal(size=(N,P-1)) + offset\n X2 = X / (np.sqrt((X**2).sum(0) / N))[np.newaxis,:]\n L = rr.normalize(X, center=False, scale=True)\n\n # data\n Y = np.random.normal(size=(N,)) + offset\n\n # coef for loss\n coef = 0.5\n # lagrange for penalty\n lagrange = .1\n\n # Loss function (squared difference between fitted and actual data)\n loss = rr.quadratic.affine(L, -Y, coef=coef)\n\n penalties = [rr.constrained_positive_part(25, lagrange=lagrange),\n rr.nonnegative(5)]\n groups = [slice(0,25), slice(25,30)]\n penalty = rr.separable((P,), penalties,\n groups)\n initial = np.random.standard_normal(P)\n composite_form = rr.separable_problem.fromatom(penalty, loss)\n\n solver = rr.FISTA(composite_form)\n solver.debug = debug\n solver.fit(tol=1.0e-12, min_its=200)\n coefs = solver.composite.coefs\n\n # Solve the problem with X2\n loss2 = rr.quadratic.affine(X2, -Y, coef=coef)\n\n initial2 = np.random.standard_normal(P)\n composite_form2 = rr.separable_problem.fromatom(penalty, loss2)\n\n solver2 = rr.FISTA(composite_form2)\n solver2.debug = debug\n solver2.fit(tol=1.0e-12, min_its=200)\n coefs2 = solver2.composite.coefs\n\n for _ in range(10):\n beta = np.random.standard_normal(P)\n g1 = loss.smooth_objective(beta, mode='grad')\n g2 = loss2.smooth_objective(beta, mode='grad')\n np.testing.assert_almost_equal(g1, g2)\n b1 = penalty.proximal(sq(1, beta, g1, 0))\n b2 = penalty.proximal(sq(1, beta, g2, 0))\n np.testing.assert_almost_equal(b1, b2)\n\n f1 = composite_form.objective(beta)\n f2 = composite_form2.objective(beta)\n np.testing.assert_almost_equal(f1, f2)\n\n\n np.testing.assert_almost_equal(composite_form.objective(coefs), composite_form.objective(coefs2))\n np.testing.assert_almost_equal(composite_form2.objective(coefs), composite_form2.objective(coefs2))\n\n nt.assert_true(np.linalg.norm(coefs - coefs2) / max(np.linalg.norm(coefs),1) < 1.0e-04)\n\ndef test_scaling_and_centering_fit(debug=False):\n\n # N - number of data points\n # P - number of columns in design == number of betas\n N, P = 40, 30\n # an arbitrary positive offset for data and design\n offset = 2\n\n # design - with ones as last column\n X = np.random.normal(size=(N,P)) + offset\n X2 = X - X.mean(0)[np.newaxis,:]\n X2 = X2 / np.std(X2,0)[np.newaxis,:]\n\n L = rr.normalize(X, center=True, scale=True)\n # data\n Y = np.random.normal(size=(N,)) + offset\n\n # coef for loss\n coef = 0.5\n # lagrange for penalty\n lagrange = .1\n\n # Loss function (squared difference between fitted and actual data)\n loss = rr.quadratic.affine(L, -Y, coef=coef)\n\n penalties = [rr.constrained_positive_part(25, lagrange=lagrange),\n rr.nonnegative(5)]\n groups = [slice(0,25), slice(25,30)]\n penalty = rr.separable((P,), penalties,\n groups)\n\n initial = np.random.standard_normal(P)\n composite_form = rr.separable_problem.fromatom(penalty, loss)\n solver = rr.FISTA(composite_form)\n solver.debug = debug\n solver.fit(tol=1.0e-12, min_its=200)\n coefs = solver.composite.coefs\n\n # Solve the problem with X2\n loss2 = rr.quadratic.affine(X2, -Y, coef=coef)\n\n initial2 = np.random.standard_normal(P)\n composite_form2 = rr.separable_problem.fromatom(penalty, loss2)\n\n solver2 = rr.FISTA(composite_form2)\n solver2.debug = debug\n solver2.fit(tol=1.0e-12, min_its=200)\n coefs2 = solver2.composite.coefs\n\n for _ in range(10):\n beta = np.random.standard_normal(P)\n g1 = loss.smooth_objective(beta, mode='grad')\n g2 = loss2.smooth_objective(beta, mode='grad')\n np.testing.assert_almost_equal(g1, g2)\n b1 = penalty.proximal(sq(1, beta, g1, 0))\n b2 = penalty.proximal(sq(1, beta, g2, 0))\n np.testing.assert_almost_equal(b1, b2)\n\n f1 = composite_form.objective(beta)\n f2 = composite_form2.objective(beta)\n np.testing.assert_almost_equal(f1, f2)\n\n np.testing.assert_almost_equal(composite_form.objective(coefs), composite_form.objective(coefs2))\n np.testing.assert_almost_equal(composite_form2.objective(coefs), composite_form2.objective(coefs2))\n\n nt.assert_true(np.linalg.norm(coefs - coefs2) / max(np.linalg.norm(coefs),1) < 1.0e-04)\n\ndef test_scaling_and_centering_fit_inplace(debug=False):\n\n # N - number of data points\n # P - number of columns in design == number of betas\n N, P = 40, 30\n # an arbitrary positive offset for data and design\n offset = 2\n\n # design\n X = np.random.normal(size=(N,P)) + offset\n L = rr.normalize(X, center=True, scale=True, inplace=True)\n\n # X should have been normalized in place\n np.testing.assert_almost_equal(np.sum(X**2, 0), N)\n np.testing.assert_almost_equal(np.sum(X, 0), 0)\n\n # data\n Y = np.random.normal(size=(N,)) + offset\n\n # coef for loss\n coef = 0.5\n # lagrange for penalty\n lagrange = .1\n\n # Loss function (squared difference between fitted and actual data)\n loss = rr.quadratic.affine(L, -Y, coef=coef)\n\n penalties = [rr.constrained_positive_part(25, lagrange=lagrange),\n rr.nonnegative(5)]\n groups = [slice(0,25), slice(25,30)]\n penalty = rr.separable((P,), penalties,\n groups)\n\n initial = np.random.standard_normal(P)\n composite_form = rr.separable_problem.fromatom(penalty, loss)\n\n solver = rr.FISTA(composite_form)\n solver.debug = debug\n solver.fit(tol=1.0e-12, min_its=200)\n coefs = solver.composite.coefs\n\n # Solve the problem with X, which has been normalized in place\n loss2 = rr.quadratic.affine(X, -Y, coef=coef)\n\n initial2 = np.random.standard_normal(P)\n composite_form2 = rr.separable_problem.fromatom(penalty, loss2)\n\n solver2 = rr.FISTA(composite_form2)\n solver2.debug = debug\n solver2.fit(tol=1.0e-12, min_its=200)\n coefs2 = solver2.composite.coefs\n\n for _ in range(10):\n beta = np.random.standard_normal(P)\n g1 = loss.smooth_objective(beta, mode='grad')\n g2 = loss2.smooth_objective(beta, mode='grad')\n np.testing.assert_almost_equal(g1, g2)\n b1 = penalty.proximal(sq(1, beta, g1, 0))\n b2 = penalty.proximal(sq(1, beta, g2, 0))\n np.testing.assert_almost_equal(b1, b2)\n\n f1 = composite_form.objective(beta)\n f2 = composite_form2.objective(beta)\n np.testing.assert_almost_equal(f1, f2)\n\n np.testing.assert_almost_equal(composite_form.objective(coefs), composite_form.objective(coefs2))\n np.testing.assert_almost_equal(composite_form2.objective(coefs), composite_form2.objective(coefs2))\n\n nt.assert_true(np.linalg.norm(coefs - coefs2) / max(np.linalg.norm(coefs),1) < 1.0e-04)\n\ndef test_scaling_fit_inplace(debug=False):\n\n # N - number of data points\n # P - number of columns in design == number of betas\n N, P = 40, 30\n # an arbitrary positive offset for data and design\n offset = 2\n\n # design - with ones as last column\n X = np.ones((N,P))\n X[:,:-1] = np.random.normal(size=(N,P-1)) + offset\n L = rr.normalize(X, center=False, scale=True, inplace=True)\n\n # X should have been normalized in place\n np.testing.assert_almost_equal(np.sum(X**2, 0), N)\n\n # data\n Y = np.random.normal(size=(N,)) + offset\n\n # coef for loss\n coef = 0.5\n # lagrange for penalty\n lagrange = .1\n\n # Loss function (squared difference between fitted and actual data)\n loss = rr.quadratic.affine(L, -Y, coef=coef)\n\n penalties = [rr.constrained_positive_part(25, lagrange=lagrange),\n rr.nonnegative(5)]\n groups = [slice(0,25), slice(25,30)]\n penalty = rr.separable((P,), penalties,\n groups)\n initial = np.random.standard_normal(P)\n composite_form = rr.separable_problem.fromatom(penalty, loss)\n\n solver = rr.FISTA(composite_form)\n solver.debug = debug\n solver.fit(tol=1.0e-12, min_its=200)\n coefs = solver.composite.coefs\n\n # Solve the problem with X, which has been normalized in place\n loss2 = rr.quadratic.affine(X, -Y, coef=coef)\n\n initial2 = np.random.standard_normal(P)\n composite_form2 = rr.separable_problem.fromatom(penalty, loss2)\n\n solver2 = rr.FISTA(composite_form2)\n solver2.debug = debug\n solver2.fit(tol=1.0e-12, min_its=200)\n coefs2 = solver2.composite.coefs\n\n for _ in range(10):\n beta = np.random.standard_normal(P)\n g1 = loss.smooth_objective(beta, mode='grad')\n g2 = loss2.smooth_objective(beta, mode='grad')\n np.testing.assert_almost_equal(g1, g2)\n b1 = penalty.proximal(sq(1, beta, g1,0))\n b2 = penalty.proximal(sq(1, beta, g2,0))\n np.testing.assert_almost_equal(b1, b2)\n\n f1 = composite_form.objective(beta)\n f2 = composite_form2.objective(beta)\n np.testing.assert_almost_equal(f1, f2)\n\n\n np.testing.assert_almost_equal(composite_form.objective(coefs), composite_form.objective(coefs2))\n np.testing.assert_almost_equal(composite_form2.objective(coefs), composite_form2.objective(coefs2))\n\n nt.assert_true(np.linalg.norm(coefs - coefs2) / max(np.linalg.norm(coefs),1) < 1.0e-04)\n\ndef test_centering_fit_inplace(debug=False):\n\n # N - number of data points\n # P - number of columns in design == number of betas\n N, P = 40, 30\n # an arbitrary positive offset for data and design\n offset = 2\n\n # design - with ones as last column\n X = np.random.normal(size=(N,P)) + offset\n L = rr.normalize(X, center=True, scale=False, inplace=True)\n\n # X should have been normalized in place\n np.testing.assert_almost_equal(np.sum(X, 0), 0)\n\n # data\n Y = np.random.normal(size=(N,)) + offset\n\n # coef for loss\n coef = 0.5\n # lagrange for penalty\n lagrange = .1\n\n # Loss function (squared difference between fitted and actual data)\n loss = rr.quadratic.affine(L, -Y, coef=coef)\n\n penalties = [rr.constrained_positive_part(25, lagrange=lagrange),\n rr.nonnegative(5)]\n groups = [slice(0,25), slice(25,30)]\n penalty = rr.separable((P,), penalties,\n groups)\n initial = np.random.standard_normal(P)\n\n composite_form = rr.separable_problem.fromatom(penalty, loss)\n\n solver = rr.FISTA(composite_form)\n solver.debug = debug\n solver.fit(tol=1.0e-12, min_its=200)\n coefs = solver.composite.coefs\n\n # Solve the problem with X, which has been normalized in place\n loss2 = rr.quadratic.affine(X, -Y, coef=coef)\n\n initial2 = np.random.standard_normal(P)\n composite_form2 = rr.separable_problem.fromatom(penalty, loss2)\n\n solver2 = rr.FISTA(composite_form2)\n solver2.debug = debug\n solver2.fit(tol=1.0e-12, min_its=200)\n coefs2 = solver2.composite.coefs\n\n for _ in range(10):\n beta = np.random.standard_normal(P)\n g1 = loss.smooth_objective(beta, mode='grad')\n g2 = loss2.smooth_objective(beta, mode='grad')\n np.testing.assert_almost_equal(g1, g2)\n b1 = penalty.proximal(sq(1, beta, g1,0))\n b2 = penalty.proximal(sq(1, beta, g2,0))\n np.testing.assert_almost_equal(b1, b2)\n\n f1 = composite_form.objective(beta)\n f2 = composite_form2.objective(beta)\n np.testing.assert_almost_equal(f1, f2)\n\n\n np.testing.assert_almost_equal(composite_form.objective(coefs), composite_form.objective(coefs2))\n np.testing.assert_almost_equal(composite_form2.objective(coefs), composite_form2.objective(coefs2))\n\n nt.assert_true(np.linalg.norm(coefs - coefs2) / max(np.linalg.norm(coefs),1) < 1.0e-04)\n\ndef test_normalize_intercept():\n\n for issparse, value, inplace, intercept_column, scale, center in product([False, True], \n [1,3], \n [False, True], \n [None, 2],\n [True, False],\n [True, False]):\n \n print (issparse, value, inplace, intercept_column, scale, center)\n if not (issparse and inplace):\n\n X = np.random.standard_normal((20,6))\n if intercept_column is not None:\n X[:,intercept_column] = 1\n Y = X.copy()\n\n if issparse:\n X = scipy.sparse.csr_matrix(X)\n\n Xn = rr.normalize(X, \n value=value, \n inplace=inplace, \n intercept_column=intercept_column,\n scale=scale, \n center=center)\n\n if intercept_column is not None:\n v = np.zeros(Y.shape[1])\n v[intercept_column] = 4\n yield np.testing.assert_allclose, Xn.linear_map(v), 4 * np.ones(Y.shape[0])\n\n if scale and center:\n\n Y -= Y.mean(0)[None,:]\n Y /= Y.std(0)[None,:]\n Y *= np.sqrt(value)\n if intercept_column is not None:\n Y[:,intercept_column] = 1\n \n elif scale and not center:\n\n Y /= (np.sqrt((Y**2).sum(0))[None,:] / np.sqrt(Y.shape[0]))\n Y *= np.sqrt(value)\n if intercept_column is not None:\n Y[:,intercept_column] = 1\n\n elif center and not scale:\n\n Y -= Y.mean(0)[None,:]\n if intercept_column is not None:\n Y[:,intercept_column] = 1\n\n V = np.random.standard_normal((20, 3))\n U = np.random.standard_normal((6,4))\n\n Xn.adjoint_map(V)\n yield np.testing.assert_allclose, np.dot(Y, U), Xn.linear_map(np.array(U))\n yield np.testing.assert_allclose, np.dot(Y, U), Xn.affine_map(np.array(U))\n yield np.testing.assert_allclose, np.dot(Y, U[:,0]), Xn.linear_map(np.array(U[:,0]))\n yield np.testing.assert_allclose, np.dot(Y, U[:,0]), Xn.affine_map(np.array(U[:,0]))\n yield np.testing.assert_allclose, np.dot(Y.T, V), Xn.adjoint_map(V)\n yield nt.assert_raises, ValueError, Xn.linear_map, np.zeros((6,4,3))\n\n X2 = Xn.slice_columns(list(range(3)))\n Y2 = Y[:,:3]\n U2 = np.random.standard_normal((3,4))\n V2 = np.random.standard_normal(20)\n\n yield np.testing.assert_allclose, np.dot(Y2, U2), X2.linear_map(np.array(U2))\n yield np.testing.assert_allclose, np.dot(Y2, U2), X2.affine_map(np.array(U2))\n yield np.testing.assert_allclose, np.dot(Y2, U2[:,0]), X2.linear_map(np.array(U2[:,0]))\n yield np.testing.assert_allclose, np.dot(Y2, U2[:,0]), X2.affine_map(np.array(U2[:,0]))\n yield np.testing.assert_allclose, np.dot(Y2.T, V2), X2.adjoint_map(V2)\n\n X2 = Xn.slice_columns(list(range(3,6)))\n Y2 = Y[:,3:]\n U2 = np.random.standard_normal((3,4))\n V2 = np.random.standard_normal(20)\n\n yield np.testing.assert_allclose, np.dot(Y2, U2), X2.linear_map(np.array(U2))\n yield np.testing.assert_allclose, np.dot(Y2, U2), X2.affine_map(np.array(U2))\n yield np.testing.assert_allclose, np.dot(Y2, U2[:,0]), X2.linear_map(np.array(U2[:,0]))\n yield np.testing.assert_allclose, np.dot(Y2, U2[:,0]), X2.affine_map(np.array(U2[:,0]))\n yield np.testing.assert_allclose, np.dot(Y2.T, V2), X2.adjoint_map(V2)\n\n keep = np.zeros(6, np.bool)\n keep[:3] = 1\n X2 = Xn.slice_columns(keep)\n Y2 = Y[:,:3]\n U2 = np.random.standard_normal((3,4))\n V2 = np.random.standard_normal(20)\n\n yield np.testing.assert_allclose, np.dot(Y2, U2), X2.linear_map(np.array(U2))\n yield np.testing.assert_allclose, np.dot(Y2, U2), X2.affine_map(np.array(U2))\n yield np.testing.assert_allclose, np.dot(Y2, U2[:,0]), X2.linear_map(np.array(U2[:,0]))\n yield np.testing.assert_allclose, np.dot(Y2, U2[:,0]), X2.affine_map(np.array(U2[:,0]))\n yield np.testing.assert_allclose, np.dot(Y2.T, V2), X2.adjoint_map(V2)\n\n yield nt.assert_raises, ValueError, rr.normalize, scipy.sparse.csr_matrix(Y), True, True, 1, True\n\n", "from itertools import product\nimport numpy as np\nimport scipy.sparse\n\nimport regreg.api as rr\nimport regreg.affine.fused_lasso as FL\nfrom regreg.identity_quadratic import identity_quadratic as sq\nimport nose.tools as nt\n\ndef test_class():\n p = 50\n for order in range(1,3):\n fused = FL.trend_filter.grid(p, order=order)\n fused2 = FL.trend_filter(np.arange(p), order=order)\n V = np.random.standard_normal(p)\n U = np.random.standard_normal(p - order)\n np.testing.assert_allclose(fused.linear_map(V), fused2.linear_map(V))\n np.testing.assert_allclose(fused.affine_map(V), fused2.affine_map(V))\n np.testing.assert_allclose(fused.adjoint_map(U), fused2.adjoint_map(U))\n\n V2 = np.random.standard_normal((p, 3))\n U2 = np.random.standard_normal((p - order, 3))\n\n np.testing.assert_allclose(fused.linear_map(V2), fused2.linear_map(V2))\n np.testing.assert_allclose(fused.affine_map(V2), fused2.affine_map(V2))\n np.testing.assert_allclose(fused.adjoint_map(U2), fused2.adjoint_map(U2))\n\n if order == 1:\n fusedI = FL.trend_filter_inverse.grid(p, order=order)\n fusedI2 = FL.trend_filter_inverse(np.arange(p), order=order)\n\n np.testing.assert_allclose(fusedI.linear_map(U), fusedI2.linear_map(U))\n np.testing.assert_allclose(fusedI.affine_map(U), fusedI2.affine_map(U))\n np.testing.assert_allclose(fusedI.adjoint_map(V), fusedI2.adjoint_map(V))\n\n np.testing.assert_allclose(fusedI.linear_map(U2), fusedI2.linear_map(U2))\n np.testing.assert_allclose(fusedI.affine_map(U2), fusedI2.affine_map(U2))\n np.testing.assert_allclose(fusedI.adjoint_map(V2), fusedI2.adjoint_map(V2))\n\n\ndef test_difference_transform():\n p = 50\n for order in range(1,3):\n FL.difference_transform(np.arange(p), order=order, sorted=False)\n FL.difference_transform(np.arange(p), order=order, transform=False)\n" ]
[ [ "numpy.diag", "numpy.dot", "numpy.sqrt", "numpy.random.standard_normal", "numpy.linalg.norm", "numpy.ones", "numpy.testing.assert_almost_equal", "numpy.std", "numpy.random.normal", "numpy.array", "numpy.zeros", "numpy.sum" ], [ "numpy.random.standard_normal", "numpy.arange" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
rdelfin/learning_robots_simulation
[ "5a212eac66683cc8a8d43f9a0346cb3a135586c7" ]
[ "test.py" ]
[ "from unittest import main, TestCase\nfrom nn.neuralnet import NeuralNet\n\nimport numpy as np\n\ndelta = 0.00000001\n\nclass NeuralNetTest(TestCase):\n def test_cost(self):\n training_x = np.mat([[1, 2], [3, 4]], dtype=np.float64)\n training_y = np.mat([[2], [2]], dtype=np.float64)\n training_examples = (training_x, training_y)\n weights = [\n # W^(0)\n 0.3, 0.01,\n -0.2, 0.2,\n 0.1, -0.5,\n # W^(1)\n 0.3,\n 0.2,\n 0.05\n ]\n test_nn = NeuralNet([2, 2, 1], 0.1)\n test_nn.set_weights(weights)\n\n #hidden_1 = [0.5, 0.549833997] # Mid-point calculations\n #hidden_2 = [0.477515175, 0.581759377] # Mid-point calculations\n #output = np.mat([[0.295503035], [0.331302075]]) #Mid-point calculations\n expected_cost = 1.422465667\n\n self.assertAlmostEqual(expected_cost, test_nn.cost(training_examples), places=5)\n \n\n def test_gradient(self):\n training_x = np.mat([[1, 2, 5, 4], [5, 2, 7, 9]], dtype=np.float64)\n training_y = np.mat([[1, 2, 1], [5, 4, 3]], dtype=np.float64)\n training_examples = (training_x, training_y)\n test_nn = NeuralNet([4, 7, 5, 3], 0.1)\n weights = test_nn.get_weights()\n\n # Get cost according to different weights\n estimate_grad = np.zeros_like(weights)\n\n for i in range(len(estimate_grad)):\n del_vector = np.eye(1, len(weights), i) * delta / 2\n weights_lower = np.array(weights - del_vector)\n weights_upper = np.array(weights + del_vector)\n test_nn.set_weights(weights_lower)\n lower_cost = test_nn.cost(training_examples)\n test_nn.set_weights(weights_upper)\n upper_cost = test_nn.cost(training_examples)\n\n estimate_grad[i] = (upper_cost - lower_cost) / delta\n\n test_nn.set_weights(weights)\n backprop_grad = test_nn.gradient(training_examples)\n\n self.assertLessEqual(np.linalg.norm(backprop_grad - estimate_grad),\n np.linalg.norm(estimate_grad) * 0.005)\n\n\nif __name__ == \"__main__\": # pragma: no cover\n main()\n" ]
[ [ "numpy.array", "numpy.mat", "numpy.zeros_like", "numpy.linalg.norm" ] ]
[ { "matplotlib": [], "numpy": [ "1.10", "1.12", "1.11", "1.19", "1.24", "1.13", "1.16", "1.9", "1.18", "1.23", "1.21", "1.22", "1.20", "1.7", "1.15", "1.14", "1.17", "1.8" ], "pandas": [], "scipy": [], "tensorflow": [] } ]
JiaZhou-PU/raven
[ "a1fd82facb0f02f20770ea4df39d55a999c49017", "a1fd82facb0f02f20770ea4df39d55a999c49017", "a1fd82facb0f02f20770ea4df39d55a999c49017" ]
[ "framework/CodeInterfaces/Utilities/csvUtilities.py", "framework/SupervisedLearning/GaussPolynomialRom.py", "framework/Samplers/AdaptiveDynamicEventTree.py" ]
[ "# Copyright 2017 Battelle Energy Alliance, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nCreated on Jun 5, 2015\n\n@author: alfoa\n\"\"\"\n#for future compatibility with Python 3--------------------------------------------------------------\nfrom __future__ import division, print_function, absolute_import\n# WARNING if you import unicode_literals here, we fail tests (e.g. framework.testFactorials). This may be a future-proofing problem. 2015-04.\n#End compatibility block for Python 3----------------------------------------------------------------\n\n#External Modules------------------------------------------------------------------------------------\nimport os\nfrom glob import glob\nimport numpy as np\n#External Modules End--------------------------------------------------------------------------------\n\n#Internal Modules------------------------------------------------------------------------------------\n#Internal Modules End--------------------------------------------------------------------------------\n\nclass csvUtilityClass(object):\n \"\"\"\n This utility class is aimed to provide utilities for CSV handling.\n \"\"\"\n def __init__(self, listOfFiles, linesToSkipAfterHeader=0, delimeter=\",\", mergeSameVariables=False):\n \"\"\"\n Constructor\n @ In, listOfFiles, list, list of CSV files that need to be merged. If in one or more \"filenames\" the special symbol $*$ is present, the class will use the filename as root name and look for all the files with that root. For example:\n if listOfFiles[1] == \"aPath/outputChannel$*$\":\n the code will inquire the directory \"aPath\" to look for all the files starting with the name \"outputChannel\" => at end we will have a list of files like \"outputChannel_1.csv,outputChannel_ab.csv, etc\"\n @ In, linesToSkipAfterHeader, int, optional, the number of lines that need to be skipped after the header\n @ In, delimeter, string, optional, the delimiter of the csv\n @ In, mergeSameVariables, bool, optional, do variables with the same name need to be merged together ? (aka, take only the values of the first occurence)?\n @ Out, None\n \"\"\"\n if len(listOfFiles) == 0:\n raise IOError(\"MergeCSV class ERROR: the number of CSV files provided is equal to 0!! it can not merge anything!\")\n self.listOfFiles = [] # list of files\n self.dataContainer = {} # dictionary that is going to contain all the data from the multiple CSVs\n self.allHeaders = [] # it containes all the headers\n filePathToExpand = []\n self.mergeSameVariables = mergeSameVariables\n for filename in listOfFiles:\n if \"$*$\" in filename:\n filePathToExpand.append(filename)\n else:\n self.listOfFiles.append(filename)\n if len(filePathToExpand) > 0:\n # we need to look for this files\n for fileToExpand in filePathToExpand:\n self.listOfFiles.extend(glob(os.path.join(os.path.split(fileToExpand)[0],os.path.split(fileToExpand)[1].replace(\"$*$\",\"*\") + \".csv\")))\n\n for filename in self.listOfFiles:\n # open file\n myFile = open (filename,'rb')\n # read the field names\n head = myFile.readline().decode()\n for _ in range(linesToSkipAfterHeader):\n myFile.readline()\n all_field_names = head.split(delimeter)\n for index in range(len(all_field_names)):\n all_field_names[index] = all_field_names[index].strip()\n if all_field_names[-1] == \"\":\n all_field_names.pop(-1) # it means there is a trailing \"'\" at the end of the file\n isAlreadyIn = False\n\n # load the table data (from the csv file) into a numpy nd array\n data = np.loadtxt(myFile, delimiter=delimeter, usecols=tuple([i for i in range(len(all_field_names))]))\n # close file\n myFile.close()\n self.allHeaders.extend(all_field_names)\n # store the data\n self.dataContainer[filename] = {\"headers\":all_field_names,\"data\":data}\n\n def mergeCSV(self,outputFileName, options = {}):\n \"\"\"\n Method that is going to merge multiple csvs in a single one.\n @ In, outputFileName, string, full path of the resulting merged CSV (output file name, eg. /users/userName/output.csv)\n @ In, options, dict, optional, dictionary of options: { \"variablesToExpandFrom\":\"aVariable\" (a variable through which the \"shorter\" CSVs need to be expanded)\n \"sameKeySuffix\":\"integerCounter or filename (default)\" (if in the CSVs that need to be merged there are\n multiple occurrences of the same key, the code will append either a letter (A,B,C,D,etc) or an integer counter (1,2,3,etc)\n\n }\n @ Out, None\n \"\"\"\n if len(outputFileName.strip()) == 0:\n raise IOError(\"MergeCSV class ERROR: the outputFileName string is empty!\")\n options['returnAsDict'] = False\n self.allHeaders, dataFinal = self.mergeCsvAndReturnOutput(options)\n np.savetxt(outputFileName,dataFinal,delimiter=\",\",header=\",\".join(self.allHeaders))\n\n def mergeCsvAndReturnOutput(self, options = {}):\n \"\"\"\n Method that is going to read multiple csvs and return the merged results\n @ In, options, dict, optional, dictionary of options: { \"variablesToExpandFrom\":\"aVariable\" (a variable through which the \"shorter\" CSVs need to be expanded)\n \"sameKeySuffix\":\"integerCounter or filename (default)\" (if in the CSVs that need to be merged there are\n multiple occurrences of the same key, the code will append either a letter (A,B,C,D,etc) or an integer counter (1,2,3,etc)\n \"returnAsDict\":True/False, True if the merged values need to be returned as a dictionary, otherwise it returns a tuple\n }\n @ Out, mergedReturn, dict or tuple, merged csvs values (see \"returnAsDict\" option above to understand what you get)\n \"\"\"\n # set some default\n sameKeySuffix = \"filename\"\n variablesToExpandFrom = []\n returnAsDict = False\n variablesToExpandFrom.append('time')\n if options:\n if \"sameKeySuffix\" in options.keys():\n sameKeySuffix = options[\"sameKeySuffix\"]\n if \"variablesToExpandFrom\" in options.keys():\n variablesToExpandFrom = options[\"variablesToExpandFrom\"]\n if \"returnAsDict\" in options.keys():\n returnAsDict = bool(options[\"returnAsDict\"])\n setHeaders = list(set(self.allHeaders))\n headerCounts = {}\n headerAppender = {}\n for variable in setHeaders:\n headerCounts[variable] = self.allHeaders.count(variable)\n headerAppender[variable] = 0\n self.allHeaders = []\n variablesToExpandFromValues = {}\n variablesToExpandFromValuesSet = []\n for filename, data in self.dataContainer.items():\n for varToExpandFrom in variablesToExpandFrom:\n if varToExpandFrom in data[\"headers\"]:\n variablesToExpandFromValues[filename] = data[\"data\"][:,data[\"headers\"].index(varToExpandFrom)]\n variablesToExpandFromValuesSet.extend(variablesToExpandFromValues[filename].tolist())\n else:\n print(\"in file \" + filename + \"the variable \"+ varToExpandFrom + \" has not been found\")\n for cnt, head in enumerate(data[\"headers\"]):\n if headerCounts[head] > 1 and head not in variablesToExpandFrom:\n if not self.mergeSameVariables:\n #append a suffix\n if sameKeySuffix == \"filename\":\n self.dataContainer[filename][\"headers\"][cnt] = head + \"_\" + os.path.split(filename)[-1].split(\".\")[0]\n else:\n headerAppender[variable] += 1\n self.dataContainer[filename][\"headers\"][cnt] = head + \"_\" + str(headerAppender[variable])\n self.allHeaders.extend(data[\"headers\"])\n # at this point all the headers are unique\n variablesToExpandFromValuesSet = list(set(variablesToExpandFromValuesSet))\n variablesToExpandFromValuesSet = sorted(variablesToExpandFromValuesSet, key=float)\n variablesToExpandFromValuesSet = np.array(variablesToExpandFromValuesSet)\n variablesToExpandFromValuesSet.shape = (len(variablesToExpandFromValuesSet),1)\n if len(variablesToExpandFromValues.keys()) != len(self.dataContainer.keys()):\n raise Exception (\"the variables \"+str(variablesToExpandFrom) + \" have not been found in all files!!!!\")\n dataFinal = np.zeros((len(variablesToExpandFromValuesSet),len(self.allHeaders)))\n # we use a neighbors.KNeighborsRegressor to merge the csvs\n from sklearn import neighbors\n nearest = neighbors.KNeighborsRegressor(n_neighbors=1)\n for filename, data in self.dataContainer.items():\n for _, varToExpandFrom in enumerate(variablesToExpandFrom):\n if varToExpandFrom in data[\"headers\"]:\n index = data[\"headers\"].index(varToExpandFrom)\n dataFinal[:,index] = variablesToExpandFromValuesSet[:,index]\n break\n for headIndex, head in enumerate(data[\"headers\"]):\n if head not in variablesToExpandFrom:\n nearest.fit(np.atleast_2d(data[\"data\"][:,index]).T,data[\"data\"][:,headIndex]) #[nsamples,nfeatures]\n dataFinal[:,self.allHeaders.index(head)] = nearest.predict(variablesToExpandFromValuesSet)[:]\n if returnAsDict:\n mergedReturn = {}\n for variableToAdd in self.allHeaders:\n if self.mergeSameVariables:\n if variableToAdd not in mergedReturn.keys():\n mergedReturn[variableToAdd] = dataFinal[:,self.allHeaders.index(variableToAdd)]\n else:\n mergedReturn[variableToAdd] = dataFinal[:,self.allHeaders.index(variableToAdd)] # dataFinal[:,cnt]\n else:\n mergedReturn = (self.allHeaders,dataFinal)\n return mergedReturn\n", "# Copyright 2017 Battelle Energy Alliance, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\n Created on May 8, 2018\n\n @author: talbpaul\n Originally from SupervisedLearning.py, split in PR #650 in July 2018\n Class implementation for the GaussPolynomialRom\n\"\"\"\n#for future compatibility with Python 3--------------------------------------------------------------\nfrom __future__ import division, print_function, unicode_literals, absolute_import\n#End compatibility block for Python 3----------------------------------------------------------------\n\nfrom numpy import average\nimport sys\n\n#External Modules------------------------------------------------------------------------------------\nimport numpy as np\nfrom collections import OrderedDict\nfrom scipy import spatial\n#External Modules End--------------------------------------------------------------------------------\n\n#Internal Modules------------------------------------------------------------------------------------\nfrom .SupervisedLearning import supervisedLearning\n#Internal Modules End--------------------------------------------------------------------------------\n\n\n\nclass GaussPolynomialRom(supervisedLearning):\n \"\"\"\n Gauss Polynomial Rom Class\n \"\"\"\n def __confidenceLocal__(self,featureVals):\n \"\"\"\n This should return an estimation of the quality of the prediction.\n @ In, featureVals, 2-D numpy array, [n_samples,n_features]\n @ Out, confidence, float, the confidence\n \"\"\"\n pass\n\n def __resetLocal__(self):\n \"\"\"\n Reset ROM. After this method the ROM should be described only by the initial parameter settings\n @ In, None\n @ Out, None\n \"\"\"\n pass\n\n def __returnCurrentSettingLocal__(self):\n \"\"\"\n Returns a dictionary with the parameters and their current values\n @ In, None\n @ Out, params, dict, dictionary of parameter names and current values\n \"\"\"\n pass\n\n def __initLocal__(self):\n \"\"\"\n Method used to add additional initialization features used by pickling\n @ In, None\n @ Out, None\n \"\"\"\n pass\n\n def __init__(self,messageHandler,**kwargs):\n \"\"\"\n A constructor that will appropriately intialize a supervised learning object\n @ In, messageHandler, MessageHandler object, it is in charge of raising errors, and printing messages\n @ In, kwargs, dict, an arbitrary list of kwargs\n @ Out, None\n \"\"\"\n supervisedLearning.__init__(self,messageHandler,**kwargs)\n self.initialized = False #only True once self.initialize has been called\n self.interpolator = None #FIXME what's this?\n self.printTag = 'GAUSSgpcROM('+'-'.join(self.target)+')'\n self.indexSetType = None #string of index set type, TensorProduct or TotalDegree or HyperbolicCross\n self.indexSetVals = [] #list of tuples, custom index set to use if CustomSet is the index set type\n self.maxPolyOrder = None #integer of relative maximum polynomial order to use in any one dimension\n self.itpDict = {} #dict of quad,poly,weight choices keyed on varName\n self.norm = None #combined distribution normalization factors (product)\n self.sparseGrid = None #Quadratures.SparseGrid object, has points and weights\n self.distDict = None #dict{varName: Distribution object}, has point conversion methods based on quadrature\n self.quads = None #dict{varName: Quadrature object}, has keys for distribution's point conversion methods\n self.polys = None #dict{varName: OrthoPolynomial object}, has polynomials for evaluation\n self.indexSet = None #array of tuples, polynomial order combinations\n self.polyCoeffDict = None #dict{index set point, float}, polynomial combination coefficients for each combination\n self.numRuns = None #number of runs to generate ROM; default is len(self.sparseGrid)\n self.itpDict = {} #dict{varName: dict{attribName:value} }\n self.featv = None # list of feature variables\n self.targv = None # list of target variables\n self.mean = None\n self.variance = None\n self.sdx = None\n self.partialVariances = None\n self.sparseGridType = 'smolyak' #type of sparse quadrature to use,default smolyak\n self.sparseQuadOptions = ['smolyak','tensor'] # choice of sparse quadrature construction methods\n\n for key,val in kwargs.items():\n if key=='IndexSet':\n self.indexSetType = val\n elif key=='IndexPoints':\n self.indexSetVals=[]\n strIndexPoints = val.strip()\n strIndexPoints = strIndexPoints.replace(' ','').replace('\\n','').strip('()')\n strIndexPoints = strIndexPoints.split('),(')\n self.raiseADebug(strIndexPoints)\n for s in strIndexPoints:\n self.indexSetVals.append(tuple(int(i) for i in s.split(',')))\n self.raiseADebug('points',self.indexSetVals)\n elif key=='PolynomialOrder':\n self.maxPolyOrder = int(val)\n elif key=='Interpolation':\n for var,value in val.items():\n self.itpDict[var]={'poly' :'DEFAULT',\n 'quad' :'DEFAULT',\n 'weight':'1'}\n for atrName,atrVal in value.items():\n if atrName in ['poly','quad','weight']:\n self.itpDict[var][atrName]=atrVal\n else:\n self.raiseAnError(IOError,'Unrecognized option: '+atrName)\n elif key == 'SparseGrid':\n if val.lower() not in self.sparseQuadOptions:\n self.raiseAnError(IOError,'No such sparse quadrature implemented: %s. Options are %s.' %(val,str(self.sparseQuadOptions)))\n self.sparseGridType = val\n\n if not self.indexSetType:\n self.raiseAnError(IOError,'No IndexSet specified!')\n if self.indexSetType=='Custom':\n if len(self.indexSetVals)<1:\n self.raiseAnError(IOError,'If using CustomSet, must specify points in <IndexPoints> node!')\n else:\n for i in self.indexSetVals:\n if len(i)<len(self.features):\n self.raiseAnError(IOError,'CustomSet points',i,'is too small!')\n if not self.maxPolyOrder:\n self.raiseAnError(IOError,'No maxPolyOrder specified!')\n if self.maxPolyOrder < 1:\n self.raiseAnError(IOError,'Polynomial order cannot be less than 1 currently.')\n\n def writeXML(self, writeTo, requests = None, skip = None):\n \"\"\"\n Adds requested entries to XML node.\n @ In, writeTo, xmlUtils.StaticXmlElement, StaticXmlElement to write to\n @ In, requests, list, optional, list of requests for whom to write\n @ In, skip, list, optional, list of targets to skip (often a pivot parameter)\n @ Out, None\n \"\"\"\n if not self.amITrained:\n self.raiseAnError(RuntimeError,'ROM is not yet trained!')\n if skip is None:\n skip = []\n #establish what we can handle, and how\n scalars = ['mean','expectedValue','variance','samples']\n vectors = ['polyCoeffs','partialVariance','sobolIndices','sobolTotalIndices']\n canDo = scalars + vectors\n #lowercase for convenience\n scalars = list(s.lower() for s in scalars)\n vectors = list(v.lower() for v in vectors)\n for target in self.target:\n if target in skip:\n continue\n if requests is None:\n requests = canDo\n # loop over the requested items\n for request in requests:\n request=request.strip()\n if request.lower() in scalars:\n if request.lower() in ['mean','expectedvalue']:\n val = self.__mean__(target)\n elif request.lower() == 'variance':\n val = self.__variance__(target)\n elif request.lower() == 'samples':\n if self.numRuns != None:\n val = self.numRuns\n else:\n val = len(self.sparseGrid)\n writeTo.addScalar(target,request,val)\n elif request.lower() in vectors:\n if request.lower() == 'polycoeffs':\n valueDict = OrderedDict()\n valueDict['inputVariables'] = ','.join(self.features)\n keys = list(self.polyCoeffDict[target].keys())\n keys.sort()\n for key in keys:\n valueDict['_'+'_'.join(str(k) for k in key)+'_'] = self.polyCoeffDict[target][key]\n elif request.lower() in ['partialvariance', 'sobolindices', 'soboltotalindices']:\n sobolIndices, partialVars = self.getSensitivities(target)\n sobolTotals = self.getTotalSensitivities(sobolIndices)\n #sort by value\n entries = []\n if request.lower() in ['partialvariance','sobolindices']:\n #these both will have same sort\n for key in sobolIndices.keys():\n entries.append( ('.'.join(key),partialVars[key],key) )\n elif request.lower() in ['soboltotalindices']:\n for key in sobolTotals.keys():\n entries.append( ('.'.join(key),sobolTotals[key],key) )\n entries.sort(key=lambda x: abs(x[1]),reverse=True)\n #add entries to results list\n valueDict=OrderedDict()\n for entry in entries:\n name,_,key = entry\n if request.lower() == 'partialvariance':\n valueDict[name] = partialVars[key]\n elif request.lower() == 'sobolindices':\n valueDict[name] = sobolIndices[key]\n elif request.lower() == 'soboltotalindices':\n valueDict[name] = sobolTotals[key]\n writeTo.addVector(target,request,valueDict)\n else:\n self.raiseAWarning('ROM does not know how to return \"'+request+'\". Skipping...')\n\n def _localNormalizeData(self,values,names,feat):\n \"\"\"\n Overwrites default normalization procedure.\n @ In, values, list(float), unused\n @ In, names, list(string), unused\n @ In, feat, string, feature to (not) normalize\n @ Out, None\n \"\"\"\n self.muAndSigmaFeatures[feat] = (0.0,1.0)\n\n def interpolationInfo(self):\n \"\"\"\n Returns the interpolation information\n @ In, None\n @ Out, interpValues, dict, dictionary of interpolation information\n \"\"\"\n interpValues = dict(self.itpDict)\n return interpValues\n\n def initialize(self,idict):\n \"\"\"\n Initializes the instance.\n @ In, idict, dict, objects needed to initalize\n @ Out, None\n \"\"\"\n self.sparseGrid = idict.get('SG' ,None)\n self.distDict = idict.get('dists' ,None)\n self.quads = idict.get('quads' ,None)\n self.polys = idict.get('polys' ,None)\n self.indexSet = idict.get('iSet' ,None)\n self.numRuns = idict.get('numRuns' ,None)\n #make sure requireds are not None\n if self.sparseGrid is None:\n self.raiseAnError(RuntimeError,'Tried to initialize without key object \"SG\" ')\n if self.distDict is None:\n self.raiseAnError(RuntimeError,'Tried to initialize without key object \"dists\"')\n if self.quads is None:\n self.raiseAnError(RuntimeError,'Tried to initialize without key object \"quads\"')\n if self.polys is None:\n self.raiseAnError(RuntimeError,'Tried to initialize without key object \"polys\"')\n if self.indexSet is None:\n self.raiseAnError(RuntimeError,'Tried to initialize without key object \"iSet\" ')\n self.initialized = True\n\n def _multiDPolyBasisEval(self,orders,pts):\n \"\"\"\n Evaluates each polynomial set at given orders and points, returns product.\n @ In orders, tuple(int), polynomial orders to evaluate\n @ In pts, tuple(float), values at which to evaluate polynomials\n @ Out, tot, float, product of polynomial evaluations\n \"\"\"\n tot=1\n for i,(o,p) in enumerate(zip(orders,pts)):\n varName = self.sparseGrid.varNames[i]\n tot*=self.polys[varName](o,p)\n return tot\n\n def __trainLocal__(self,featureVals,targetVals):\n \"\"\"\n Trains ROM.\n @ In, featureVals, np.ndarray, feature values\n @ In, targetVals, np.ndarray, target values\n \"\"\"\n #check to make sure ROM was initialized\n if not self.initialized:\n self.raiseAnError(RuntimeError,'ROM has not yet been initialized! Has the Sampler associated with this ROM been used?')\n self.raiseADebug('training',self.features,'->',self.target)\n self.featv, self.targv = featureVals,targetVals\n self.polyCoeffDict = {key: dict({}) for key in self.target}\n #check equality of point space\n self.raiseADebug('...checking required points are available...')\n fvs = []\n tvs = {key: list({}) for key in self.target}\n sgs = list(self.sparseGrid.points())\n missing=[]\n kdTree = spatial.KDTree(featureVals)\n #TODO this is slowest loop in this algorithm, by quite a bit.\n for pt in sgs:\n #KDtree way\n distances,idx = kdTree.query(pt,k=1,distance_upper_bound=1e-9) #FIXME how to set the tolerance generically?\n #KDTree repots a \"not found\" as at infinite distance with index len(data)\n if idx >= len(featureVals):\n found = False\n else:\n found = True\n point = tuple(featureVals[idx])\n #end KDTree way\n if found:\n fvs.append(point)\n for cnt, target in enumerate(self.target):\n tvs[target].append(targetVals[idx,cnt])\n else:\n missing.append(pt)\n if len(missing)>0:\n msg='\\n'\n msg+='DEBUG missing feature vals:\\n'\n for i in missing:\n msg+=' '+str(i)+'\\n'\n self.raiseADebug(msg)\n self.raiseADebug('sparse:',sgs)\n self.raiseADebug('solns :',fvs)\n self.raiseAnError(IOError,'input values do not match required values!')\n #make translation matrix between lists, also actual-to-standardized point map\n self.raiseADebug('...constructing translation matrices...')\n translate={}\n for i in range(len(fvs)):\n translate[tuple(fvs[i])]=sgs[i]\n standardPoints = {}\n for pt in fvs:\n stdPt = []\n for i,p in enumerate(pt):\n varName = self.sparseGrid.varNames[i]\n stdPt.append( self.distDict[varName].convertToQuad(self.quads[varName].type,p) )\n standardPoints[tuple(pt)] = stdPt[:]\n #make polynomials\n self.raiseADebug('...constructing polynomials...')\n self.norm = np.prod(list(self.distDict[v].measureNorm(self.quads[v].type) for v in self.distDict.keys()))\n for i,idx in enumerate(self.indexSet):\n idx=tuple(idx)\n for target in self.target:\n self.polyCoeffDict[target][idx]=0\n wtsum=0\n for pt,soln in zip(fvs,tvs[target]):\n tupPt = tuple(pt)\n stdPt = standardPoints[tupPt]\n wt = self.sparseGrid.weights(translate[tupPt])\n self.polyCoeffDict[target][idx]+=soln*self._multiDPolyBasisEval(idx,stdPt)*wt\n self.polyCoeffDict[target][idx]*=self.norm\n self.amITrained=True\n self.raiseADebug('...training complete!')\n\n def printPolyDict(self,printZeros=False):\n \"\"\"\n Human-readable version of the polynomial chaos expansion.\n @ In, printZeros, bool, optional, optional flag for printing even zero coefficients\n @ Out, None\n \"\"\"\n for target in self.target:\n data=[]\n for idx,val in self.polyCoeffDict[target].items():\n if abs(val) > 1e-12 or printZeros:\n data.append([idx,val])\n data.sort()\n self.raiseADebug('polyDict for ['+target+'] with inputs '+str(self.features)+':')\n for idx,val in data:\n self.raiseADebug(' '+str(idx)+' '+str(val))\n\n def checkForNonzeros(self,tol=1e-12):\n \"\"\"\n Checks poly coefficient dictionary for nonzero entries.\n @ In, tol, float, optional, the tolerance under which is zero (default 1e-12)\n @ Out, data, dict, {'target1':list(tuple),'target2':list(tuple)}: the indices and values of the nonzero coefficients for each target\n \"\"\"\n data = dict.fromkeys(self.target,[])\n for target in self.target:\n for idx,val in self.polyCoeffDict[target].items():\n if round(val,11) !=0:\n data[target].append([idx,val])\n return data\n\n def __mean__(self, targ=None):\n \"\"\"\n Returns the mean of the ROM.\n @ In, None\n @ In, targ, str, optional, the target for which the __mean__ needs to be computed\n @ Out, __mean__, float, the mean\n \"\"\"\n return self.__evaluateMoment__(1,targ)\n\n def __variance__(self, targ=None):\n \"\"\"\n returns the variance of the ROM.\n @ In, None\n @ In, targ, str, optional, the target for which the __variance__ needs to be computed\n @ Out, __variance__, float, variance\n \"\"\"\n mean = self.__evaluateMoment__(1,targ)\n return self.__evaluateMoment__(2,targ) - mean*mean\n\n def __evaluateMoment__(self,r, targ=None):\n \"\"\"\n Use the ROM's built-in method to calculate moments.\n @ In, r, int, moment to calculate\n @ In, targ, str, optional, the target for which the moment needs to be computed\n @ Out, tot, float, evaluation of moment\n \"\"\"\n target = self.target[0] if targ is None else targ\n #TODO is there a faster way still to do this?\n if r==1:\n return self.polyCoeffDict[target][tuple([0]*len(self.features))]\n elif r==2:\n return sum(s**2 for s in self.polyCoeffDict[target].values())\n tot=0\n for pt,wt in self.sparseGrid:\n tot+=self.__evaluateLocal__([pt])[target]**r*wt\n tot*=self.norm\n return tot\n\n def __evaluateLocal__(self,featureVals):\n \"\"\"\n Evaluates a point.\n @ In, featureVals, list, of values at which to evaluate the ROM\n @ Out, returnDict, dict, the evaluated point for each target\n \"\"\"\n featureVals=featureVals[0]\n returnDict={}\n stdPt = np.zeros(len(featureVals))\n for p,pt in enumerate(featureVals):\n varName = self.sparseGrid.varNames[p]\n stdPt[p] = self.distDict[varName].convertToQuad(self.quads[varName].type,pt)\n for target in self.target:\n tot=0\n for idx,coeff in self.polyCoeffDict[target].items():\n tot+=coeff*self._multiDPolyBasisEval(idx,stdPt)\n returnDict[target] = tot\n return returnDict\n\n def _printPolynomial(self):\n \"\"\"\n Prints each polynomial for each coefficient.\n @ In, None\n @ Out, None\n \"\"\"\n for target in self.target:\n self.raiseADebug('Target:'+target+'.Coeff Idx:')\n for idx,coeff in self.polyCoeffDict[target].items():\n if abs(coeff)<1e-12:\n continue\n self.raiseADebug(str(idx))\n for i,ix in enumerate(idx):\n var = self.features[i]\n self.raiseADebug(self.polys[var][ix]*coeff,'|',var)\n\n def __returnInitialParametersLocal__(self):\n \"\"\"\n Returns a dictionary with the parameters and their initial values\n @ In, None\n @ Out, params, dict, dictionary of parameter names and initial values\n \"\"\"\n params = {}\n return params\n\n def getSensitivities(self,targ=None):\n \"\"\"\n Calculates the Sobol indices (percent partial variances) of the terms in this expansion.\n @ In, targ, str, optional, the target for which the moment needs to be computed\n @ Out, getSensitivities, tuple(dict), Sobol indices and partial variances keyed by subset\n \"\"\"\n target = self.target[0] if targ is None else targ\n totVar = self.__variance__(target)\n partials = {}\n #calculate partial variances\n self.raiseADebug('Calculating partial variances...')\n for poly,coeff in self.polyCoeffDict[target].items():\n #use poly to determine subset\n subset = self._polyToSubset(poly)\n # skip mean\n if len(subset) < 1:\n continue\n subset = tuple(subset)\n if subset not in partials.keys():\n partials[subset] = 0\n partials[subset] += coeff*coeff\n #calculate Sobol indices\n indices = {}\n for subset,partial in partials.items():\n indices[subset] = partial / totVar\n return (indices,partials)\n\n def getTotalSensitivities(self,indices):\n \"\"\"\n Given the Sobol global sensitivity indices, calculates the total indices for each subset.\n @ In, indices, dict, tuple(subset):float(index)\n @ Out, totals, dict, tuple(subset):float(index)\n \"\"\"\n #total index is the sum of all Sobol indices in which a subset belongs\n totals={}\n for subset in indices.keys():\n setSub = set(subset)\n totals[subset] = 0\n for checkSubset in indices.keys():\n setCheck = set(checkSubset)\n if setSub.issubset(setCheck):\n totals[subset] += indices[checkSubset]\n return totals\n\n def _polyToSubset(self,poly):\n \"\"\"\n Given a tuple with polynomial orders, returns the subset it belongs exclusively to\n @ In, poly, tuple(int), polynomial index set entry\n @ Out, subset, tuple(str), subset\n \"\"\"\n boolRep = tuple(False if poly[i]==0 else True for i in range(len(poly)))\n subset = []\n for i,p in enumerate(boolRep):\n if p:\n subset.append(self.features[i])\n return tuple(subset)\n", "# Copyright 2017 Battelle Energy Alliance, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\n This module contains the Adaptive Dynamic Event Tree and\n the Adaptive Hybrid Dynamic Event Tree sampling strategies\n\n Created on May 21, 2016\n @author: alfoa\n supercedes Samplers.py from alfoa\n\"\"\"\n#for future compatibility with Python 3--------------------------------------------------------------\nfrom __future__ import division, print_function, unicode_literals, absolute_import\n#End compatibility block for Python 3----------------------------------------------------------------\n\n#External Modules------------------------------------------------------------------------------------\nimport sys\nimport copy\nimport numpy as np\nfrom operator import mul\nfrom functools import reduce\nimport xml.etree.ElementTree as ET\nimport itertools\n#External Modules End--------------------------------------------------------------------------------\n\n#Internal Modules------------------------------------------------------------------------------------\nfrom .DynamicEventTree import DynamicEventTree\nfrom .LimitSurfaceSearch import LimitSurfaceSearch\nfrom utils import utils\nimport utils.TreeStructure as ETS\nimport MessageHandler\n#Internal Modules End--------------------------------------------------------------------------------\n\nclass AdaptiveDynamicEventTree(DynamicEventTree, LimitSurfaceSearch):\n \"\"\"\n This class is aimed to perform a supervised Adaptive Dynamic Event Tree sampling strategy\n \"\"\"\n\n @classmethod\n def getInputSpecification(cls):\n \"\"\"\n Method to get a reference to a class that specifies the input data for\n class cls.\n @ In, cls, the class for which we are retrieving the specification\n @ Out, inputSpecification, InputData.ParameterInput, class to use for\n specifying input of cls.\n \"\"\"\n inputSpecification = super(AdaptiveDynamicEventTree, cls).getInputSpecification()\n\n return inputSpecification\n\n def __init__(self):\n \"\"\"\n Default Constructor that will initialize member variables with reasonable\n defaults or empty lists/dictionaries where applicable.\n @ In, None\n @ Out, None\n \"\"\"\n DynamicEventTree.__init__(self) # init DET\n LimitSurfaceSearch.__init__(self) # init Adaptive\n self.detAdaptMode = 1 # Adaptive Dynamic Event Tree method (=1 -> DynamicEventTree as hybridsampler and subsequent LimitSurfaceSearch,=2 -> DynamicEventTree online adaptive)\n self.noTransitionStrategy = 1 # Strategy in case no transitions have been found by DET (1 = 'Probability MC', 2 = Increase the grid exploration)\n self.insertAdaptBPb = True # Add Probabability THs requested by adaptive in the initial grid (default = False)\n self.startAdaptive = False # Flag to trigger the begin of the adaptive limit surface search\n self.adaptiveReady = False # Flag to store the response of the LimitSurfaceSearch.localStillReady method\n self.investigatedPoints = [] # List containing the points that have been already investigated\n self.completedHistCnt = 1 # Counter of the completed histories\n self.hybridDETstrategy = None # Integer flag to turn the hybrid strategy on:\n # None -> No hybrid approach,\n # 1 -> the epistemic variables are going to be part of the limit surface search\n # 2 -> the epistemic variables are going to be treated by a normal hybrid DET approach and the LimitSurface search\n # will be performed on each epistemic tree (n LimitSurfaces)\n self.foundEpistemicTree = False # flag that testifies if an epistemic tree has been found (Adaptive Hybrid DET)\n self.actualHybridTree = '' # name of the root tree used in self.hybridDETstrategy=2 to check which Tree needs to be used for the current LS search\n self.sortedListOfHists = [] # sorted list of histories\n\n @staticmethod\n def _checkIfRunning(treeValues):\n \"\"\"\n Static method (no self) that checks if a job is running\n @ In, treeValues, TreeStructure.Node, the node in which the running info are stored\n @ Out, _checkIfRunning, bool, is it running?\n \"\"\"\n return not treeValues['runEnded']\n\n @staticmethod\n def _checkEnded(treeValues):\n \"\"\"\n Static method (no self) that checks if a job finished to run\n @ In, treeValues, TreeStructure.Node, the node in which the running info are stored\n @ Out, _checkEnded, bool, is it finished?\n \"\"\"\n return treeValues['runEnded']\n\n @staticmethod\n def _checkCompleteHistory(treeValues):\n \"\"\"\n Static method (no self) that checks if a 'branch' represents a completed history\n @ In, treeValues, TreeStructure.Node, the node in which the running info are stored\n @ Out, _checkCompleteHistory, bool, is it a completed history (hit the last thershold?)\n \"\"\"\n return treeValues['completedHistory']\n\n def _localWhatDoINeed(self):\n \"\"\"\n This method is a local mirror of the general whatDoINeed method.\n It is implmented by the samplers that need to request special objects\n @ In, None\n @ Out, needDict, dict, dictionary listing needed objects\n \"\"\"\n #adaptNeedInst = self.limitSurfaceInstances.values()[-1]._localWhatDoINeed()\n needDict = dict(itertools.chain(LimitSurfaceSearch._localWhatDoINeed(self).items(),DynamicEventTree._localWhatDoINeed(self).items()))\n return needDict\n\n def _checkIfStartAdaptive(self):\n \"\"\"\n Function that checks if the adaptive needs to be started (mode 1)\n @ In, None\n @ Out, None\n \"\"\"\n if not self.startAdaptive:\n self.startAdaptive = True\n for treer in self.TreeInfo.values():\n for _ in treer.iterProvidedFunction(self._checkIfRunning):\n self.startAdaptive = False\n break\n if not self.startAdaptive:\n break\n\n def _checkClosestBranch(self):\n \"\"\"\n Function that checks the closest branch already evaluated\n @ In, None\n @ Out, returnTuple, tuple, closest branch info:\n - if self.hybridDETstrategy and branch found -> returnTuple = (valBranch,cdfValues,treer)\n - if self.hybridDETstrategy and branch not found -> returnTuple = (None,cdfValues,treer)\n - if not self.hybridDETstrategy and branch found -> returnTuple = (valBranch,cdfValues)\n - if not self.hybridDETstrategy and branch not found -> returnTuple = (None,cdfValues)\n \"\"\"\n from sklearn import neighbors\n\n # compute cdf of sampled vars\n lowerCdfValues = {}\n cdfValues = {}\n self.raiseADebug(\"Check for closest branch:\")\n self.raiseADebug(\"_\"*50)\n for key,value in self.values.items():\n self.raiseADebug(\"Variable name : \"+str(key))\n self.raiseADebug(\"Distrbution name: \"+str(self.toBeSampled[key]))\n if key not in self.epistemicVariables.keys():\n cdfValues[key] = self.distDict[key].cdf(value)\n try:\n index = utils.first(np.atleast_1d(np.asarray(self.branchProbabilities[key]) <= cdfValues[key]).nonzero())[-1]\n val = self.branchProbabilities[key][index]\n except (ValueError, IndexError):\n val = None\n lowerCdfValues[key] = val\n self.raiseADebug(\"CDF value : \"+str(cdfValues[key]))\n self.raiseADebug(\"Lower CDF found : \"+str(lowerCdfValues[key]))\n self.raiseADebug(\"_\"*50)\n #if hybrid DET, we need to find the correct tree that matches the values of the epistemic\n if self.hybridDETstrategy is not None:\n self.foundEpistemicTree, treer, compareDict = False, None, dict.fromkeys(self.epistemicVariables.keys(),False)\n for tree in self.TreeInfo.values():\n epistemicVars = tree.getrootnode().get(\"hybridsamplerCoordinate\")[0]['SampledVars']\n for key in self.epistemicVariables.keys():\n compareDict[key] = utils.compare(epistemicVars[key],self.values[key])\n if all(compareDict.values()):\n # we found the right epistemic tree\n self.foundEpistemicTree, treer = True, tree\n break\n else:\n treer = utils.first(self.TreeInfo.values())\n\n # check if in the adaptive points already explored (if not push into the grid)\n if not self.insertAdaptBPb:\n candidatesBranch = []\n # check if adaptive point is better choice -> TODO: improve efficiency\n for invPoint in self.investigatedPoints:\n pbth = [invPoint[self.toBeSampled[key]] for key in cdfValues.keys()]\n if all(i <= pbth[cnt] for cnt,i in enumerate(cdfValues.values())):\n candidatesBranch.append(invPoint)\n if len(candidatesBranch) > 0:\n if None in lowerCdfValues.values():\n lowerCdfValues = candidatesBranch[0]\n for invPoint in candidatesBranch:\n pbth = [invPoint[self.toBeSampled[key]] for key in cdfValues.keys()]\n if all(i >= pbth[cnt] for cnt,i in enumerate(lowerCdfValues.values())):\n lowerCdfValues = invPoint\n # Check if The adaptive point requested is outside the so far run grid; in case return None\n # In addition, if Adaptive Hybrid DET, if treer is None, we did not find any tree\n # in the epistemic space => we need to create another one\n if None in lowerCdfValues.values() or treer is None:\n if self.hybridDETstrategy is not None:\n returnTuple = None, cdfValues, treer\n else:\n returnTuple = None, cdfValues\n return returnTuple\n\n nntrain, mapping = None, {}\n for ending in treer.iterProvidedFunction(self._checkEnded):\n #already ended branches, create training set for nearest algorithm (take coordinates <= of cdfValues) -> TODO: improve efficiency\n pbth = [ending.get('SampledVarsPb')[key] for key in lowerCdfValues.keys()]\n if all(pbth[cnt] <= i for cnt,i in enumerate(lowerCdfValues.values())):\n if nntrain is None:\n nntrain = np.zeros((1,len(cdfValues.keys())))\n nntrain[0,:] = np.array(copy.copy(pbth))\n else:\n nntrain = np.concatenate((nntrain,np.atleast_2d(np.array(copy.copy(pbth)))),axis=0)\n mapping[nntrain.shape[0]] = ending\n if nntrain is not None:\n neigh = neighbors.NearestNeighbors(n_neighbors=len(mapping.keys()))\n neigh.fit(nntrain)\n valBranch = self._checkValidityOfBranch(neigh.kneighbors([list(lowerCdfValues.values())]),mapping)\n if self.hybridDETstrategy is not None:\n returnTuple = valBranch,cdfValues,treer\n else:\n returnTuple = valBranch,cdfValues\n return returnTuple\n else:\n returnTuple = (None,cdfValues,treer) if self.hybridDETstrategy is not None else (None,cdfValues)\n return returnTuple\n\n def _checkValidityOfBranch(self,branchSet,mapping):\n \"\"\"\n Function that checks if the nearest branches found by method _checkClosestBranch are valid\n @ In, branchSet, tuple, tuple of branches\n @ In, mapping, dict, dictionary of candidated branches\n @ Out, validBranch, TreeStructure.Node, most valid branch (if not found, return None)\n \"\"\"\n validBranch = None\n idOfBranches = branchSet[1][-1]\n for closestBranch in idOfBranches:\n if not mapping[closestBranch+1].get('completedHistory') and not mapping[closestBranch+1].get('happenedEvent'):\n validBranch = mapping[closestBranch+1]\n break\n return validBranch\n\n def _retrieveBranchInfo(self,branch):\n \"\"\"\n Function that retrieves the key information from a branch to start a newer calculation\n @ In, branch, TreeStructure.Node, the branch to inquire\n @ Out, info, dict, the dictionary with information on the inputted branch\n \"\"\"\n info = branch.getValues()\n info['actualBranchOnLevel'] = branch.numberBranches()\n info['parentNode'] = branch\n return info\n\n def _constructEndInfoFromBranch(self,model, myInput, info, cdfValues):\n \"\"\"\n Method to construct the end information from the 'info' inputted\n @ In, model, Models object, the model that is used to explore the input space (e.g. a code, like RELAP-7)\n @ In, myInput, list, list of inputs for the Models object (passed through the Steps XML block)\n @ In, info, dict, dictionary of information at the end of a branch (information collected by the method _retrieveBranchInfo)\n @ In, cdfValues, dict, dictionary of CDF thresholds reached by the branch that just ended.\n @ Out, None\n \"\"\"\n endInfo = info['parentNode'].get('endInfo')\n del self.inputInfo\n self.counter += 1\n self.branchCountOnLevel = info['actualBranchOnLevel']+1\n # Get Parent node name => the branch name is creating appending to this name a comma and self.branchCountOnLevel counter\n rname = info['parentNode'].get('name') + '-' + str(self.branchCountOnLevel)\n info['parentNode'].add('completedHistory', False)\n self.raiseADebug(str(rname))\n bcnt = self.branchCountOnLevel\n while info['parentNode'].isAnActualBranch(rname):\n bcnt += 1\n rname = info['parentNode'].get('name') + '-' + str(bcnt)\n # create a subgroup that will be appended to the parent element in the xml tree structure\n subGroup = ETS.HierarchicalNode(self.messageHandler,rname)\n subGroup.add('parent', info['parentNode'].get('name'))\n subGroup.add('name', rname)\n self.raiseADebug('cond pb = '+str(info['parentNode'].get('conditionalPbr')))\n condPbC = float(info['parentNode'].get('conditionalPbr'))\n\n # Loop over branchChangedParams (events) and start storing information,\n # such as conditional pb, variable values, into the xml tree object\n branchChangedParamValue = []\n branchChangedParamPb = []\n branchParams = []\n if endInfo:\n for key in endInfo['branchChangedParams'].keys():\n branchParams.append(key)\n branchChangedParamPb.append(endInfo['branchChangedParams'][key]['associatedProbability'][0])\n branchChangedParamValue.append(endInfo['branchChangedParams'][key]['oldValue'][0])\n subGroup.add('branchChangedParam',branchParams)\n subGroup.add('branchChangedParamValue',branchChangedParamValue)\n subGroup.add('branchChangedParamPb',branchChangedParamPb)\n else:\n pass\n # add conditional probability\n subGroup.add('conditionalPbr',condPbC)\n # add initiator distribution info, start time, etc.\n subGroup.add('startTime', info['parentNode'].get('endTime'))\n # initialize the endTime to be equal to the start one... It will modified at the end of this branch\n subGroup.add('endTime', info['parentNode'].get('endTime'))\n # add the branchedLevel dictionary to the subgroup\n # branch calculation info... running, queue, etc are set here\n subGroup.add('runEnded',False)\n subGroup.add('running',False)\n subGroup.add('queue',True)\n subGroup.add('completedHistory', False)\n # Append the new branch (subgroup) info to the parentNode in the tree object\n info['parentNode'].appendBranch(subGroup)\n # Fill the values dictionary that will be passed into the model in order to create an input\n # In this dictionary the info for changing the original input is stored\n self.inputInfo = {'prefix':rname,'endTimeStep':info['parentNode'].get('actualEndTimeStep'),\n 'branchChangedParam':subGroup.get('branchChangedParam'),\n 'branchChangedParamValue':subGroup.get('branchChangedParamValue'),\n 'conditionalPb':subGroup.get('conditionalPbr'),\n 'startTime':info['parentNode'].get('endTime'),\n 'RAVEN_parentID':subGroup.get('parent'),\n 'RAVEN_isEnding':True}\n # add the newer branch name to the map\n self.rootToJob[rname] = self.rootToJob[subGroup.get('parent')]\n # check if it is a preconditioned DET sampling, if so add the relative information\n # it exists only in case an hybridDET strategy is activated\n precSampled = info['parentNode'].get('hybridsamplerCoordinate')\n if precSampled:\n self.inputInfo['hybridsamplerCoordinate' ] = copy.deepcopy(precSampled)\n subGroup.add('hybridsamplerCoordinate', copy.copy(precSampled))\n # The probability Thresholds are stored here in the cdfValues dictionary... We are sure that they are whitin the ones defined in the grid\n # check is not needed\n self.inputInfo['initiatorDistribution' ] = [self.toBeSampled[key] for key in cdfValues.keys()]\n self.inputInfo['PbThreshold' ] = list(cdfValues.values())\n self.inputInfo['ValueThreshold' ] = [self.distDict[key].ppf(value) for key,value in cdfValues.items()]\n self.inputInfo['SampledVars' ] = {}\n self.inputInfo['SampledVarsPb' ] = {}\n for varname in self.standardDETvariables:\n self.inputInfo['SampledVars' ][varname] = self.distDict[varname].ppf(cdfValues[varname])\n self.inputInfo['SampledVarsPb'][varname] = cdfValues[varname]\n # constant variables\n self._constantVariables()\n if precSampled:\n for precSample in precSampled:\n self.inputInfo['SampledVars' ].update(precSample['SampledVars'])\n self.inputInfo['SampledVarsPb'].update(precSample['SampledVarsPb'])\n self.inputInfo['PointProbability' ] = reduce(mul, self.inputInfo['SampledVarsPb'].values())*subGroup.get('conditionalPbr')\n self.inputInfo['ProbabilityWeight'] = self.inputInfo['PointProbability' ]\n self.inputInfo.update({'ProbabilityWeight-'+key.strip():value for key,value in self.inputInfo['SampledVarsPb'].items()})\n # add additional edits if needed\n model.getAdditionalInputEdits(self.inputInfo)\n # Add the new input path into the RunQueue system\n newInputs = {'args':[str(self.type)], 'kwargs': dict(self.inputInfo)}\n self.RunQueue['queue'].append(newInputs)\n self.RunQueue['identifiers'].append(self.inputInfo['prefix'])\n for key,value in self.inputInfo.items():\n subGroup.add(key,copy.copy(value))\n if endInfo:\n subGroup.add('endInfo',copy.deepcopy(endInfo))\n\n def localStillReady(self,ready):\n \"\"\"\n first perform some check to understand what it needs to be done possibly perform an early return\n ready is returned\n @ In, ready, bool, a boolean representing whether the caller is prepared for another input.\n @ Out, ready, bool, a boolean representing whether the caller is prepared for another input.\n \"\"\"\n if self.counter == 0:\n return True\n if len(self.RunQueue['queue']) != 0:\n detReady = True\n else:\n detReady = False\n # since the RunQueue is empty, let's check if there are still branches running => if not => start the adaptive search\n self._checkIfStartAdaptive()\n if self.startAdaptive:\n data = self.lastOutput.asDataset()\n endingData = data.where(data['RAVEN_isEnding']==True,drop=True)\n numCompletedHistories = len(endingData['RAVEN_isEnding'])\n if numCompletedHistories > self.completedHistCnt:\n lastOutDict = {key:endingData[key].values for key in endingData.keys()}\n if numCompletedHistories > self.completedHistCnt:\n actualLastOutput = self.lastOutput\n self.lastOutput = copy.deepcopy(lastOutDict)\n ready = LimitSurfaceSearch.localStillReady(self,ready)\n self.lastOutput = actualLastOutput\n self.completedHistCnt = numCompletedHistories\n self.raiseAMessage(\"Completed full histories are \"+str(self.completedHistCnt))\n else:\n ready = False\n self.adaptiveReady = ready\n if ready or detReady:\n return True\n else:\n return False\n return detReady\n\n def localGenerateInput(self,model,myInput):\n \"\"\"\n Function to select the next most informative point for refining the limit\n surface search.\n After this method is called, the self.inputInfo should be ready to be sent\n to the model\n @ In, model, model instance, an instance of a model\n @ In, myInput, list, a list of the original needed inputs for the model (e.g. list of files, etc.)\n @ Out, None\n \"\"\"\n if self.startAdaptive == True and self.adaptiveReady == True:\n LimitSurfaceSearch.localGenerateInput(self,model,myInput)\n #the adaptive sampler created the next point sampled vars\n #find the closest branch\n if self.hybridDETstrategy is not None:\n closestBranch, cdfValues, treer = self._checkClosestBranch()\n else:\n closestBranch, cdfValues = self._checkClosestBranch()\n if closestBranch is None:\n self.raiseADebug('An usable branch for next candidate has not been found => create a parallel branch!')\n # add pbthresholds in the grid\n investigatedPoint = {}\n for key,value in cdfValues.items():\n try:\n ind = utils.first(np.atleast_1d(np.asarray(self.branchProbabilities[key]) <= value).nonzero())[-1]\n except (IndexError, ValueError):\n ind = 0\n if value not in self.branchProbabilities[key]:\n self.branchProbabilities[key].insert(ind,value)\n self.branchValues[key].insert(ind,self.distDict[key].ppf(value))\n investigatedPoint[key] = value\n # collect investigated point\n self.investigatedPoints.append(investigatedPoint)\n if closestBranch:\n info = self._retrieveBranchInfo(closestBranch)\n self._constructEndInfoFromBranch(model, myInput, info, cdfValues)\n else:\n # create a new tree, since there are no branches that are close enough to the adaptive request\n elm = ETS.HierarchicalNode(self.messageHandler,self.name + '_' + str(len(self.TreeInfo.keys())+1))\n elm.add('name', self.name + '_'+ str(len(self.TreeInfo.keys())+1))\n elm.add('startTime', 0.0)\n # Initialize the endTime to be equal to the start one...\n # It will modified at the end of each branch\n elm.add('endTime', 0.0)\n elm.add('runEnded',False)\n elm.add('running',True)\n elm.add('queue',False)\n elm.add('completedHistory', False)\n branchedLevel = {}\n for key,value in cdfValues.items():\n branchedLevel[key] = utils.first(np.atleast_1d(np.asarray(self.branchProbabilities[key]) == value).nonzero())[-1]\n # The dictionary branchedLevel is stored in the xml tree too. That's because\n # the advancement of the thresholds must follow the tree structure\n elm.add('branchedLevel', branchedLevel)\n if self.hybridDETstrategy is not None and not self.foundEpistemicTree:\n # adaptive hybrid DET and not found a tree in the epistemic space\n # take the first tree and modify the hybridsamplerCoordinate\n hybridSampled = copy.deepcopy(utils.first(self.TreeInfo.values()).getrootnode().get('hybridsamplerCoordinate'))\n for hybridStrategy in hybridSampled:\n for key in self.epistemicVariables.keys():\n if key in hybridStrategy['SampledVars'].keys():\n self.raiseADebug(\"epistemic var \" + str(key)+\" value = \"+str(self.values[key]))\n hybridStrategy['SampledVars'][key] = copy.copy(self.values[key])\n hybridStrategy['SampledVarsPb'][key] = self.distDict[key].pdf(self.values[key])\n hybridStrategy['prefix'] = len(self.TreeInfo.values())+1\n # TODO: find a strategy to recompute the probability weight here (for now == PointProbability)\n hybridStrategy['PointProbability'] = reduce(mul, self.inputInfo['SampledVarsPb'].values())\n hybridStrategy['ProbabilityWeight'] = reduce(mul, self.inputInfo['SampledVarsPb'].values())\n elm.add('hybridsamplerCoordinate', hybridSampled)\n self.inputInfo.update({'ProbabilityWeight-'+key.strip():value for key,value in self.inputInfo['SampledVarsPb'].items()})\n # Here it is stored all the info regarding the DET => we create the info for all the branchings and we store them\n self.TreeInfo[self.name + '_' + str(len(self.TreeInfo.keys())+1)] = ETS.HierarchicalTree(self.messageHandler,elm)\n self._createRunningQueueBeginOne(self.TreeInfo[self.name + '_' + str(len(self.TreeInfo.keys()))],branchedLevel, model,myInput)\n return DynamicEventTree.localGenerateInput(self,model,myInput)\n\n def localInputAndChecks(self,xmlNode, paramInput):\n \"\"\"\n Class specific xml inputs will be read here and checked for validity.\n @ In, xmlNode, xml.etree.ElementTree.Element, The xml element node that will be checked against the available options specific to this Sampler.\n @ In, paramInput, InputData.ParameterInput, the parsed parameters\n @ Out, None\n \"\"\"\n #TODO remove using xmlNode\n #check if the hybrid DET has been activated, in case remove the nodes and treat them separaterly\n hybridNodes = xmlNode.findall(\"HybridSampler\")\n if len(hybridNodes) != 0:\n # check the type of hybrid that needs to be performed\n limitSurfaceHybrid = False\n for elm in hybridNodes:\n samplType = elm.attrib['type'] if 'type' in elm.attrib.keys() else None\n if samplType == 'LimitSurface':\n if len(hybridNodes) != 1:\n self.raiseAnError(IOError,'if one of the HybridSampler is of type \"LimitSurface\", it can not be combined with other strategies. Only one HybridSampler node can be inputted!')\n limitSurfaceHybrid = True\n if limitSurfaceHybrid == True:\n #remove the elements from original xmlNode and check if the types are compatible\n for elm in hybridNodes:\n xmlNode.remove(elm)\n self.hybridDETstrategy = 1\n else:\n self.hybridDETstrategy = 2\n if self.hybridDETstrategy == 2:\n self.raiseAnError(IOError, 'The sheaf of LSs for the Adaptive Hybrid DET is not yet available. Use type \"LimitSurface\"!')\n\n DynamicEventTree.localInputAndChecks(self,xmlNode, paramInput)\n # now we put back the nodes into the xmlNode to initialize the LimitSurfaceSearch with those variables as well\n for elm in hybridNodes:\n for child in elm:\n if limitSurfaceHybrid == True:\n xmlNode.append(child)\n if child.tag in ['variable','Distribution']:\n self.epistemicVariables[child.attrib['name']] = None\n LimitSurfaceSearch._readMoreXMLbase(self,xmlNode)\n LimitSurfaceSearch.localInputAndChecks(self,xmlNode, paramInput)\n if 'mode' in xmlNode.attrib.keys():\n if xmlNode.attrib['mode'].lower() == 'online':\n self.detAdaptMode = 2\n elif xmlNode.attrib['mode'].lower() == 'post':\n self.detAdaptMode = 1\n else:\n self.raiseAnError(IOError,'unknown mode ' + xmlNode.attrib['mode'] + '. Available are \"online\" and \"post\"!')\n if 'noTransitionStrategy' in xmlNode.attrib.keys():\n if xmlNode.attrib['noTransitionStrategy'].lower() == 'mc':\n self.noTransitionStrategy = 1\n elif xmlNode.attrib['noTransitionStrategy'].lower() == 'grid':\n self.noTransitionStrategy = 2\n else:\n self.raiseAnError(IOError,'unknown noTransitionStrategy '+xmlNode.attrib['noTransitionStrategy']+'. Available are \"mc\" and \"grid\"!')\n if 'updateGrid' in xmlNode.attrib.keys():\n if utils.stringIsTrue(xmlNode.attrib['updateGrid']):\n self.insertAdaptBPb = True\n # we add an artificial threshold because I need to find a way to prepend a rootbranch into a Tree object\n for val in self.branchProbabilities.values():\n if min(val) != 1e-3:\n val.insert(0, 1e-3)\n\n def _generateDistributions(self,availableDist,availableFunc):\n \"\"\"\n Generates the distrbutions and functions.\n @ In, availDist, dict, dict of distributions\n @ In, availableFunc, dict, dict of functions\n @ Out, None\n \"\"\"\n DynamicEventTree._generateDistributions(self,availableDist,availableFunc)\n\n def localInitialize(self,solutionExport = None):\n \"\"\"\n Will perform all initialization specific to this Sampler. For instance,\n creating an empty container to hold the identified surface points, error\n checking the optionally provided solution export and other preset values,\n and initializing the limit surface Post-Processor used by this sampler.\n @ In, solutionExport, DataObjects, optional, a PointSet to hold the solution (a list of limit surface points)\n @ Out, None\n \"\"\"\n if self.detAdaptMode == 2:\n self.startAdaptive = True\n # we first initialize the LimitSurfaceSearch sampler\n LimitSurfaceSearch.localInitialize(self,solutionExport=solutionExport)\n if self.hybridDETstrategy is not None:\n # we are running an adaptive hybrid DET and not only an adaptive DET\n if self.hybridDETstrategy == 1:\n gridVector = self.limitSurfacePP.gridEntity.returnParameter(\"gridVectors\")\n # construct an hybrid DET through an XML node\n distDict, xmlNode = {}, ET.fromstring('<InitNode> <HybridSampler type=\"Grid\" name=\"none\"/> </InitNode>')\n for varName, dist in self.distDict.items():\n if varName.replace('<distribution>','') in self.epistemicVariables.keys():\n # found an epistemic\n varNode = ET.Element('Distribution' if varName.startswith('<distribution>') else 'variable',{'name':varName.replace('<distribution>','')})\n varNode.append(ET.fromstring(\"<distribution>\"+dist.name.strip()+\"</distribution>\"))\n distDict[dist.name.strip()] = self.distDict[varName]\n varNode.append(ET.fromstring('<grid construction=\"custom\" type=\"value\">'+' '.join([str(elm) for elm in utils.first(gridVector.values())[varName.replace('<distribution>','')]])+'</grid>'))\n xmlNode.find(\"HybridSampler\").append(varNode)\n #TODO, need to pass real paramInput\n self._localInputAndChecksHybrid(xmlNode, paramInput=None)\n for hybridsampler in self.hybridStrategyToApply.values():\n hybridsampler._generateDistributions(distDict, {})\n DynamicEventTree.localInitialize(self)\n if self.hybridDETstrategy == 2:\n self.actualHybridTree = utils.first(self.TreeInfo.keys())\n self._endJobRunnable = sys.maxsize\n\n def generateInput(self,model,oldInput):\n \"\"\"\n This method has to be overwritten to provide the specialization for the specific sampler\n The model instance in might be needed since, especially for external codes,\n only the code interface possesses the dictionary for reading the variable definition syntax\n @ In, model, model instance, it is the instance of a RAVEN model\n @ In, oldInput, list, a list of the original needed inputs for the model (e.g. list of files, etc. etc)\n @ Out, generateInput, tuple(0,list), list containing the new inputs -in reality it is the model that returns this; the Sampler generates the value to be placed in the input of the model.\n \"\"\"\n return DynamicEventTree.generateInput(self, model, oldInput)\n\n def localFinalizeActualSampling(self,jobObject,model,myInput):\n \"\"\"\n General function (available to all samplers) that finalize the sampling\n calculation just ended. In this case (DET), The function reads the\n information from the ended calculation, updates the working variables, and\n creates the new inputs for the next branches\n @ In, jobObject, instance, an instance of a JobHandler\n @ In, model, model instance, it is the instance of a RAVEN model\n @ In, myInput, list, the generating input\n @ Out, None\n \"\"\"\n returncode = DynamicEventTree.localFinalizeActualSampling(self,jobObject,model,myInput,genRunQueue=False)\n forceEvent = True if self.startAdaptive else False\n if returncode:\n self._createRunningQueue(model,myInput, forceEvent)\n" ]
[ [ "sklearn.neighbors.KNeighborsRegressor", "numpy.atleast_2d", "numpy.array" ], [ "scipy.spatial.KDTree" ], [ "numpy.asarray" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
tensorflow/lingvo
[ "f72c353b76b345fe6d9d02c20466b7e60492fadd", "f72c353b76b345fe6d9d02c20466b7e60492fadd" ]
[ "lingvo/core/py_utils.py", "lingvo/jax/train.py" ]
[ "# Lint as: python3\n# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Common utilities.\"\"\"\n\n# ==============================================================================\n# Note: Avoid adding dependencies to py_utils beyond standard python packages\n# and tensorflow.\n# ==============================================================================\n\nimport collections as py_collections\nimport contextlib\nimport functools\nimport hashlib\nimport inspect\nimport math\nimport numbers\nimport os\nimport pkgutil\nimport re\nimport threading\nimport traceback\nimport typing\nfrom typing import Optional, Union\n\nimport lingvo.compat as tf\nfrom lingvo.core import cluster_factory\nfrom lingvo.core import gshard_utils\nfrom lingvo.core import hyperparams\nfrom lingvo.core import nested_map\nfrom lingvo.core import ops\nfrom lingvo.core import py_utils_flags\nfrom lingvo.core import retry\nfrom lingvo.core import symbolic\nfrom lingvo.core import thread_local_utils\nfrom lingvo.core import tshape\n\nimport numpy as np\nimport six\n\n# pylint: disable=g-direct-tensorflow-import\nfrom tensorflow.core.framework import attr_value_pb2\nfrom tensorflow.core.framework import node_def_pb2\nfrom tensorflow.core.protobuf import rewriter_config_pb2\nfrom tensorflow.python.framework import func_graph\nfrom tensorflow.python.framework import function\nfrom tensorflow.python.ops import init_ops\nfrom tensorflow.python.ops import stateless_random_ops\nfrom tensorflow.python.tf2 import enabled as tf2_enabled\nfrom tensorflow.python.tpu import topology as tf_topology\nfrom tensorflow.python.tpu import tpu_function\nfrom tensorflow.python.util import deprecation\n# pylint: enable=g-direct-tensorflow-import\n\nFLAGS = tf.flags.FLAGS\n\n# pylint: disable=protected-access\n_FromGlobal = py_utils_flags._FromGlobal\n# pylint: enable=protected-access\nuse_xla = py_utils_flags.use_xla\nuse_tpu = py_utils_flags.use_tpu\ntestonly_skip_norm_layers = py_utils_flags.testonly_skip_norm_layers\ntpu_compat = py_utils_flags.tpu_compat\nuse_stateless_vars_init = py_utils_flags.use_stateless_vars_init\n\nENQUEUE_OPS = '__lingvo_enqueue_ops'\n\n# pylint: disable=protected-access\ndeprecation._PRINT_DEPRECATION_WARNINGS = False\n\n# pylint: enable=protected-access\n\nThreadLocalStack = thread_local_utils.ThreadLocalStack\nThreadLocalDict = thread_local_utils.ThreadLocalDict\nNestedMap = nested_map.NestedMap\n\n\ndef Assert(condition, data, *args, **kwargs):\n if py_utils_flags.enable_asserts():\n return tf.Assert(condition, data, *args, **kwargs)\n else:\n return tf.no_op()\n\n\ndef assert_equal(*args, **kwargs): # pylint: disable=invalid-name\n if py_utils_flags.enable_asserts():\n return tf.assert_equal(*args, **kwargs)\n else:\n return tf.no_op()\n\n\ndef assert_greater_equal(*args, **kwargs): # pylint: disable=invalid-name\n if py_utils_flags.enable_asserts():\n return tf.debugging.assert_greater_equal(*args, **kwargs)\n else:\n return tf.no_op()\n\n\ndef assert_greater(*args, **kwargs): # pylint: disable=invalid-name\n if py_utils_flags.enable_asserts():\n return tf.assert_greater(*args, **kwargs)\n else:\n return tf.no_op()\n\n\ndef assert_less_equal(*args, **kwargs): # pylint: disable=invalid-name\n if py_utils_flags.enable_asserts():\n return tf.debugging.assert_less_equal(*args, **kwargs)\n else:\n return tf.no_op()\n\n\ndef assert_less(*args, **kwargs): # pylint: disable=invalid-name\n if py_utils_flags.enable_asserts():\n return tf.assert_less(*args, **kwargs)\n else:\n return tf.no_op()\n\n\ndef assert_between(x, l, r, *args, **kwargs): # pylint: disable=invalid-name\n x = tf.convert_to_tensor(x)\n l = tf.cast(tf.convert_to_tensor(l), x.dtype)\n r = tf.cast(tf.convert_to_tensor(r), x.dtype)\n return tf.group([\n assert_greater_equal(x, l, *args, **kwargs),\n assert_less(x, r, *args, **kwargs)\n ])\n\n\ndef assert_shape_match(*args, **kwargs): # pylint: disable=invalid-name\n if py_utils_flags.enable_asserts():\n filepath, line, func, _ = traceback.extract_stack(limit=3)[-2]\n kwargs['msg'] = 'LINGVO ASSERT %s:%s(%s)' % (re.sub(\n r'.*/', '', filepath), line, func)\n return ops.assert_shape_match(*args, **kwargs)\n else:\n return tf.no_op()\n\n\ndef assert_same_dim0(xs, *args, **kwargs): # pylint: disable=invalid-name\n if py_utils_flags.enable_asserts():\n return ops.assert_same_dim0(xs, *args, **kwargs)\n else:\n return tf.no_op()\n\n\ndef assert_even_divide(denorm, num): # pylint: disable=invalid-name\n \"\"\"Asserts that denorm is evenly divided by num.\"\"\"\n denorm = tf.convert_to_tensor(denorm)\n num = tf.convert_to_tensor(num)\n\n if denorm.dtype not in (tf.int32, tf.int64):\n raise ValueError('denorminator.dtype is not tf.int32 or tf.int64.')\n if num.dtype not in (tf.int32, tf.int64):\n raise ValueError('numerator.dtype is not tf.int32 or tf.int64.')\n\n num = HasShape(num, GetShape(denorm))\n\n quo = denorm // num\n return assert_equal(quo * num, denorm)\n\n\ndef AssertIdShape(expected_ids_shape_pattern, ids_shape, *args):\n \"\"\"Asserts shape expected_ids_shape_pattern matches all other input shapes.\"\"\"\n\n def AssertFn(inputs):\n dependencies = [\n assert_shape_match(inputs.ids_shape, inputs.expected_ids_shape_pattern)\n ] + [\n assert_shape_match(inputs.ids_shape, x_shape) for x_shape in inputs.args\n ]\n return with_dependencies(dependencies, inputs.ids_shape)\n\n inputs = NestedMap(\n expected_ids_shape_pattern=expected_ids_shape_pattern,\n ids_shape=ids_shape,\n args=args)\n return CallDefun(AssertFn, Transform(tf.convert_to_tensor, inputs))\n\n\ndef _CheckNumerics(x, message=None, *args, **kwargs):\n if x.dtype.is_floating:\n x_name = x.name if not tf.executing_eagerly() else '[eager]'\n if 'name' not in kwargs:\n kwargs['name'] = re.sub(r':\\d+', '', x_name) + '_CheckNumerics'\n return tf.debugging.check_numerics(x, message if message else x_name, *args,\n **kwargs)\n else:\n return x\n\n\ndef CheckNumerics(inp, message=None, *args, **kwargs):\n \"\"\"Check numerics for tensors in inp.\"\"\"\n if not py_utils_flags.enable_check_numerics():\n return inp\n if isinstance(inp, list):\n return [_CheckNumerics(x, message, *args, **kwargs) for x in inp]\n if isinstance(inp, tuple):\n return tuple(_CheckNumerics(x, message, *args, **kwargs) for x in inp)\n return _CheckNumerics(inp, message, *args, **kwargs)\n\n\ndef with_dependencies(dependencies, output_tensor): # pylint: disable=invalid-name\n with tf.control_dependencies(dependencies):\n return tf.identity(output_tensor)\n\n\ndef _VarInCollection(var, collection):\n \"\"\"Return whether a variable `var` is in the given variable collection.\"\"\"\n # We use variable reference for comparison, since variable is not hashable in\n # eager mode.\n return var.ref() in [v.ref() for v in collection]\n\n\[email protected]\ndef _PrintOptions(*args, **kwargs):\n original = np.get_printoptions()\n np.set_printoptions(*args, **kwargs)\n try:\n yield\n finally:\n np.set_printoptions(**original)\n\n\ndef _Print(name, x):\n with _PrintOptions(linewidth=1000):\n tf.logging.info('%s = %s', name, np.array_repr(x))\n\n\ndef Log(value, prefix, **kwargs):\n \"\"\"Prints out values of tensors.\n\n Useful for debugging. E.g.,\n x = ... a tf.Tensor ...\n y = ... a tf.Tensor ...\n z = compute(x, y)\n z = Log(z, 'debug compute()', x=x, y=y)\n\n Args:\n value: A Tensor. Log happens after this tensor's computed.\n prefix: Every tensor is logged with this prefix.\n **kwargs: keywords and tensors. Tensors are logged in the sort order of\n these keywards.\n\n Returns:\n value is returned.\n \"\"\"\n\n # Ensures tensors are printed in order.\n last = value\n for k in sorted(kwargs):\n with tf.control_dependencies([last]):\n last = tf.py_func(_Print, [prefix + ' : ' + k, kwargs[k]], [])\n with tf.control_dependencies([last]):\n return tf.identity(value)\n\n\ndef Debug(tensor, message='', enabled=True, summarize=100, more=None):\n \"\"\"Wrapper around tf.Print() and tf.logging.info() to simplify debug printing.\n\n x = py_utils.Debug(x)\n\n When the graph is built a regular log info line will be printed:\n -DBG- py_utils_test.py:429 x=Tensor(...\n\n Then when the tensor node is evaluated it will print lines like:\n -DBG- py_utils_test.py:429 x Const:0[x.shape=][2 2][x=][[1 2][3 4]]\n\n WARNING: The code that parses local variable names can fail. E.g. don't write\n two Debug() calls on one line or a Debug() call that spans more than one line.\n\n Args:\n tensor: A tensor to print.\n message: A message to print.\n enabled: To enable the debugging.\n summarize: Integer with number of tensor values to print.\n more: An optional list of additional tensors.\n\n Returns:\n The tensor.\n \"\"\"\n if not enabled or _FromGlobal('disable_py_utils_debug'):\n return tensor\n\n if more is None:\n more = []\n\n stack = inspect.stack()[1][0]\n caller = inspect.getframeinfo(stack)\n\n caller_var = ''\n caller_more_vars = []\n if caller.code_context:\n # Rough and likely to fail. But better than nothing.\n match = re.compile(r'Debug\\((.*?)(\\)|,).*$').search(caller.code_context[0])\n if match:\n caller_var = match.groups()[0]\n if more:\n more_vars = re.compile(r'more=\\[(.*?)\\].*$').search(\n caller.code_context[0]).groups()[0]\n if more_vars:\n caller_more_vars = more_vars.split(',')\n\n the_class = ''\n if 'self' in stack.f_locals:\n the_class = stack.f_locals['self'].__class__.__name__\n header = '-DBG- {}:{}:{}:{} {} '.format(\n os.path.basename(caller.filename), the_class, caller.function,\n caller.lineno, message)\n\n info = '{}{}={}'.format(header, caller_var, tensor)\n for name, val in zip(caller_more_vars, more):\n info += ' {}={}'.format(name.strip(), val)\n tf.logging.info(info)\n\n if isinstance(tensor, tf.Tensor):\n tensors = []\n tensors += [tf.constant('{}.shape='.format(caller_var)), tf.shape(tensor)]\n for name, val in zip(caller_more_vars, more):\n tensors += [tf.constant('{}.shape='.format(name.strip())), tf.shape(val)]\n\n tensors += [tf.constant('{}='.format(caller_var)), tensor]\n for name, val in zip(caller_more_vars, more):\n tensors += [tf.constant('{}='.format(name.strip())), val]\n\n name = tensor.name if not tf.executing_eagerly() else '[eager]'\n info = '{}{} {}'.format(header, caller_var, name)\n return tf.identity(\n tf.Print(tensor, tensors, info, summarize=summarize),\n re.sub(':.*$', '', name))\n\n return tensor\n\n\ndef _Save(steps, prefix, key, val):\n filename = '%s.%08d.%s.npy' % (six.ensure_text(prefix), steps,\n six.ensure_text(key))\n with tf.io.gfile.GFile(filename, 'w') as outfile:\n np.save(outfile, val)\n\n\ndef Save(value, filename_prefix, **kwargs):\n \"\"\"Saves values of tensors into files.\n\n Useful for debugging. E.g.,\n x = ... a tf.Tensor ...\n y = ... a tf.Tensor ...\n z = compute(x, y)\n z = Save(z, '/path/tmp', x=x, y=y, z=z)\n\n Args:\n value: A Tensor. Saving happens after this tensor is computed.\n filename_prefix: Every tensor is saved with this filename prefix.\n **kwargs: keywords and tensors. Tensors are logged in the sort order of\n these keywards.\n\n Returns:\n value is returned.\n \"\"\"\n last = value\n steps = GetGlobalStep()\n for k in sorted(kwargs):\n with tf.control_dependencies([last]):\n last = tf.py_func(_Save, [steps, filename_prefix, k, kwargs[k]], [])\n with tf.control_dependencies([last]):\n return tf.identity(value)\n\n\ndef HasRank(tensor, expected_rank):\n \"\"\"Syntactic sugar for asserting that tensor has the expected rank.\"\"\"\n if tensor.shape.ndims is not None and isinstance(expected_rank, int):\n assert tensor.shape.ndims == expected_rank, (\n 'Ranks did not match, got %d, '\n 'expected %d') % (tensor.shape.ndims, expected_rank)\n return tensor\n if py_utils_flags.enable_asserts():\n return with_dependencies([tf.assert_equal(tf.rank(tensor), expected_rank)],\n tensor)\n else:\n return tensor\n\n\ndef HasAtLeastRank(tensor, expected_rank):\n \"\"\"Syntactic sugar for asserting that tensor has rank >= expected_rank.\"\"\"\n if tensor.shape.ndims is not None and isinstance(expected_rank, int):\n assert tensor.shape.ndims >= expected_rank, (\n 'Rank of tensor %d did not exceed the expected value %d.') % (\n tensor.shape.ndims, expected_rank)\n return tensor\n if py_utils_flags.enable_asserts():\n return with_dependencies(\n [tf.debugging.assert_greater_equal(tf.rank(tensor), expected_rank)],\n tensor)\n else:\n return tensor\n\n\ndef GetRank(tensor):\n \"\"\"Returns tensor's rank as an int if it's available, otherwise a Tensor.\n\n Args:\n tensor: The input tensor.\n\n Returns:\n Either an int or a Tensor for the rank of the input tensor.\n \"\"\"\n if tensor.shape.ndims is not None:\n return tensor.shape.ndims # int\n else:\n return tf.rank(tensor) # Tensor\n\n\ndef GetShape(tensor, ndims=None):\n \"\"\"Returns tensor's shape as a list which can be unpacked, unlike tf.shape.\n\n Tries to return static shape if it's available. Note that this means\n some of the outputs will be ints while the rest will be Tensors.\n\n Args:\n tensor: The input tensor.\n ndims: If not None, returns the shapes for the first `ndims` dimensions.\n \"\"\"\n tensor = tf.convert_to_tensor(tensor)\n dynamic_shape = tf.shape(tensor)\n\n # Early exit for unranked tensor.\n if tensor.shape.ndims is None:\n if ndims is None:\n return dynamic_shape\n else:\n return [dynamic_shape[x] for x in range(ndims)]\n\n # Ranked tensor.\n if ndims is None:\n ndims = tensor.shape.ndims\n else:\n ndims = min(ndims, tensor.shape.ndims)\n\n # Return mixture of static and dynamic dims.\n static_shape = tensor.shape.as_list()\n shapes = [\n static_shape[x] if static_shape[x] is not None else dynamic_shape[x]\n for x in range(ndims)\n ]\n return shapes\n\n\ndef HasShape(tensor, expected_shape, ndims=None):\n \"\"\"Syntactic sugar for asserting that tensor has the expected shape.\n\n Args:\n tensor: A Tensor.\n expected_shape: A Python list or a 1D tensor. Elements of expected_shape can\n be -1 which indicate that any size is valid for that dimension.\n ndims: If not None, check only the first `ndims` dimensions of `tensor`.\n Must be equal to the length of `expected_shape` if not None.\n\n Returns:\n The input `tensor` with control dependencies that will raise a runtime\n error if dynamic shape checks fail.\n\n Raises:\n ValueError: A value error if the assertion fails at static shape checks.\n \"\"\"\n if not py_utils_flags.enable_asserts():\n return tensor\n\n filepath, line, func, _ = traceback.extract_stack(limit=3)[-2]\n msg = 'LINGVO ASSERT %s:%s(%s)' % (re.sub(r'.*/', '',\n filepath), line, func)\n\n tensor_shape = GetShape(tensor)\n if ndims is not None:\n tensor_shape = tensor_shape[:ndims]\n\n # TODO(jngiam): Attempt to switch back to tf.Assert after it has better\n # support on GPUs.\n assert_op = ops.assert_shape_match(tensor_shape, expected_shape, msg=msg)\n\n # If expected_shape is a Tensor, then we are unable to perform static checks.\n # In this case, we can do a dynamic check and return.\n if isinstance(expected_shape, tf.Tensor):\n return with_dependencies([assert_op], tensor)\n\n # Infer ranks from the inputs.\n expected_rank = len(expected_shape)\n if isinstance(tensor_shape, tf.Tensor):\n tensor_rank = tensor.shape.ndims\n else:\n tensor_rank = len(tensor_shape)\n\n # If ndims is None, then either one of the ranks should not be None, or they\n # should both match. If both ranks are None, then they are both tensors and\n # should be caught by the earlier short-circuit.\n if ndims is None:\n if (tensor_rank is not None) and (expected_rank != tensor_rank):\n raise ValueError('Tensor does not match rank of expected shape.\\n'\n 'Tensor shape: {} Expected shape: {}'.format(\n tensor_shape, expected_shape))\n # Both tensors can be assumed to be of same rank.\n ndims = expected_rank\n else:\n if (tensor_rank is not None) and (tensor_rank < ndims):\n raise ValueError('Tensor has fewer dimensions than ndims.\\n'\n 'Tensor shape: {} ndims: {}'.format(tensor_shape, ndims))\n if expected_rank != ndims:\n raise ValueError(\n 'Expected shape must have number of dimensions equal to ndims.\\n'\n 'Expected shape: {} ndims: {}'.format(expected_shape, ndims))\n\n # Ensure that both tensor_shape and expected_shape are both lists.\n tensor_shape = tensor_shape[:ndims]\n if isinstance(tensor_shape, tf.Tensor):\n tensor_shape = tf.unstack(tensor_shape, num=ndims)\n\n # Map tf.Dimension values to their held values.\n tensor_shape = [\n v.value if isinstance(v, tf.Dimension) else v for v in tensor_shape\n ]\n expected_shape = [\n v.value if isinstance(v, tf.Dimension) else v for v in expected_shape\n ]\n\n all_static_checks = True\n for idx, (dim, expected_dim) in enumerate(zip(tensor_shape, expected_shape)):\n if isinstance(expected_dim, tf.Tensor):\n all_static_checks = False\n elif expected_dim == -1:\n continue\n elif isinstance(dim, tf.Tensor):\n all_static_checks = False\n elif dim != expected_dim:\n raise ValueError('Tensor does not match expected shape on dimension {}.\\n'\n 'Tensor shape: {} Expected shape: {}'.format(\n idx, tensor_shape, expected_shape))\n\n if all_static_checks:\n return tf.convert_to_tensor(tensor)\n else:\n return with_dependencies([assert_op], tensor)\n\n\ndef HasSameShape(x, ref):\n return HasShape(x, GetShape(ref))\n\n\ndef GetSize(tensor):\n shape = GetShape(tensor)\n if (isinstance(shape, tf.Tensor) or\n any([isinstance(x, tf.Tensor) for x in shape])):\n return tf.size(tensor)\n return np.prod(shape)\n\n\ndef CausalSelfAttenPadding(seqlen, dtype):\n \"\"\"Wraps tf.linalg.band_part() for tflite compatibility.\"\"\"\n if FLAGS.tflite_compatible:\n # [N, 1]\n rows = tf.expand_dims(tf.range(seqlen), -1)\n # [1, N]\n cols = tf.expand_dims(tf.range(seqlen), 0)\n row_cols = rows - cols\n return tf.where(row_cols < 0, tf.ones([seqlen, seqlen], dtype),\n tf.zeros([seqlen, seqlen], tf.float32))\n else:\n return 1.0 - tf.linalg.band_part(\n tf.ones([seqlen, seqlen], dtype=dtype), -1, 0)\n\n\ndef outside_all_rewrites(): # pylint: disable=invalid-name\n return tf.control_dependencies(None)\n\n\n# TODO(jamesqin): remove once b/147439702 is fixed.\n_OUTSIDE_COMPILATION = threading.local()\n\n\ndef RunOnTpuHost(func, *args, **kwargs):\n r\"\"\"Runs the given function call on TPU host.\n\n Invokes func(\\*args, \\*\\*kwargs) directly if not running on tpu.\n\n Args:\n func: the function to invoke.\n *args: args of func\n **kwargs: kwargs of func\n\n Returns:\n The function return value.\n \"\"\"\n if use_tpu() and not getattr(_OUTSIDE_COMPILATION, 'on', False):\n _OUTSIDE_COMPILATION.on = True\n res = tf.tpu.outside_compilation(func, *args, **kwargs)\n _OUTSIDE_COMPILATION.on = False\n else:\n res = func(*args, **kwargs)\n return res\n\n\ndef tpu_host(func): # pylint: disable=invalid-name\n r\"\"\"Decorates a python function to only run on TPU hosts.\n\n This function has no effect when running on CPU/GPU.\n\n Example::\n\n @py_utils.tpu_host()\n def ComputeWER(self):\n # Call a custom op computing WER.\n\n Args:\n func: the function to invoke\n\n Returns:\n A TPU-host only function\n \"\"\"\n\n def Wrapped(*args, **kwargs):\n return RunOnTpuHost(func, *args, **kwargs)\n\n return Wrapped\n\n\n# Maps a TPU job name ('/job:xxx') to the job's DeviceAssignment object.\n# When there is only a single TPU job, the key could be None.\n_tpu_device_assignment_dict = dict()\n\n\ndef SetTpuDeviceAssignment(tpu_device_assignment, job=None):\n if job in _tpu_device_assignment_dict:\n tf.logging.warning('tpu_device_assignment was already set, '\n 'overwriting with new assignment.')\n _tpu_device_assignment_dict[job] = tpu_device_assignment\n\n\n# This function should called in unittest only.\ndef ClearTpuDevice():\n global _tpu_device_assignment_dict\n _tpu_device_assignment_dict = dict()\n\n\ndef GetTpuDeviceAssignment(job=None):\n return _tpu_device_assignment_dict[job]\n\n\n# Whether it's running in eager mode. This is different than\n# tf.executing_eagerly(), which will return False inside a tf.function.\n_IS_EAGER_MODE = False\n\n\ndef SetEagerMode(eager_mode=True):\n global _IS_EAGER_MODE\n _IS_EAGER_MODE = eager_mode\n if eager_mode:\n tf.enable_eager_execution()\n tf.config.set_soft_device_placement(True)\n else:\n tf.disable_eager_execution()\n\n\ndef IsEagerMode():\n return _IS_EAGER_MODE\n\n\n# Maintains a tf.GradientTape stack.\n_GRADIENT_TAPE_STACK = ThreadLocalStack()\n\n\[email protected]\ndef GradientTape(*args, **kwargs):\n \"\"\"Creates a tf.GradientTape and use it for automatic differentiation.\"\"\"\n tape = tf.GradientTape(*args, **kwargs)\n _GRADIENT_TAPE_STACK.stack.append(tape)\n try:\n with tape:\n yield\n finally:\n _GRADIENT_TAPE_STACK.stack.pop()\n\n\n# The tf.train.ExponentialMovingAverage singleton used by all subtasks in\n# multi-task training with ExecutorTpu.\n_EXECUTOR_EMA = None\n\n\ndef SetExponentialMovingAverage(ema):\n global _EXECUTOR_EMA\n assert ema\n assert not _EXECUTOR_EMA, 'EMA was set before.'\n _EXECUTOR_EMA = ema\n\n\ndef ExponentialMovingAverage():\n return _EXECUTOR_EMA\n\n\ndef SessionConfig(soft_placement=True,\n inline=True,\n cluster_def=None,\n disable_meta_optimizer=False):\n \"\"\"Returns a session config proto.\n\n Args:\n soft_placement: Turns allow_soft_placement on iff True.\n inline: Turns do_function_inlining on iff True.\n cluster_def: A tf.train.ClusterDef describing the cluster.\n disable_meta_optimizer: Turns off grappler/metagraph optimizer.\n\n Returns:\n A TF session config proto.\n \"\"\"\n session_config = tf.config_pb2.ConfigProto(\n allow_soft_placement=soft_placement,\n graph_options=tf.GraphOptions(\n optimizer_options=tf.OptimizerOptions(\n opt_level=tf.OptimizerOptions.L1, do_function_inlining=inline)),\n cluster_def=cluster_def)\n session_config.share_cluster_devices_in_session = True\n\n if disable_meta_optimizer:\n # Useful if start-up time is critical.\n session_config.graph_options.rewrite_options.disable_meta_optimizer = True\n # Disable layout optimizer which increases GPU memory usage.\n session_config.graph_options.rewrite_options.layout_optimizer = (\n rewriter_config_pb2.RewriterConfig.OFF)\n return session_config\n\n\ndef AssertIsCompatible(a, b):\n assert a.IsCompatible(b), ('%s vs %s' % (a, b))\n\n\ndef SetShapes(dst_nmap, src_nmap):\n \"\"\"Set shapes in dst_nmap using those in src_nmap.\"\"\"\n AssertIsCompatible(src_nmap, dst_nmap)\n for src, dst in zip(src_nmap.Flatten(), dst_nmap.Flatten()):\n dst.set_shape(src.shape)\n\n\ndef Dtypes(nmap_list):\n \"\"\"Returns all tensors' data types in a list.\"\"\"\n return [v.dtype for v in Flatten(nmap_list)]\n\n\ndef Flatten(x):\n \"\"\"Flattens 'x' by extracting tensors from nested structures to a list.\"\"\"\n return tf.nest.flatten(x)\n\n\ndef Pack(tmpl, values):\n \"\"\"Packs 'values' according to 'tmpl'.\"\"\"\n return tf.nest.pack_sequence_as(tmpl, values)\n\n\ndef Transform(fn, *v):\n \"\"\"Replaces every nested value x in 'v' with fn(x) and returns the result.\"\"\"\n return tf.nest.map_structure(fn, *v)\n\n\ndef ConvertNoneGradientToZeros(xs, dxs):\n \"\"\"Sanitize dxs so that None becomes zeros appropriately.\n\n Args:\n xs: A list of tensors.\n dxs: A list of tensors. dxs[i] corresponds to xs[i]'s gradient.\n\n Returns:\n A `.NestedMap` same as dxs with None replaced by a zero tensor.\n \"\"\"\n fn = lambda x, dx: tf.zeros_like(x) if dx is None else dx\n return Transform(fn, xs, dxs)\n\n\ndef IsCompatible(lhs, rhs):\n \"\"\"Returns true if lhs and rhs are compatible.\"\"\"\n try:\n tf.nest.assert_same_structure(lhs, rhs)\n return True\n except (ValueError, TypeError):\n return False\n\n\nclass _Unique:\n \"\"\"A helper to uniqify variables in a NestedMap.\"\"\"\n\n def __init__(self):\n self._vset = set()\n\n def __call__(self, v):\n if (v is None) or (id(v) in self._vset):\n return False\n else:\n self._vset.add(id(v))\n return True\n\n\ndef ToUniqueList(nmap):\n \"\"\"Returns the flattened `nmap` with duplicates removed.\"\"\"\n return nmap.Filter(_Unique()).Flatten()\n\n\ndef ReadOnlyAttrDictView(backing):\n \"\"\"Wraps a dict to provide a read-only view of its contents.\n\n Dict keys can also be accessed by attribute.\n\n Args:\n backing: Dict-like object to wrap.\n\n Returns:\n Read-only Mapping that can be accessed by index (['foo']) or attr (d.foo).\n \"\"\"\n\n class Wrapper:\n \"\"\"Wrapper object.\"\"\"\n\n # Disable pytype attribute checking.\n _HAS_DYNAMIC_ATTRIBUTES = True\n\n def __getitem__(self, key):\n return backing[key]\n\n def __len__(self):\n return len(backing)\n\n def __iter__(self):\n return iter(backing)\n\n def __getattr__(self, key):\n return backing[key]\n\n def __hasattr__(self, key):\n return key in backing\n\n def __setattr__(self, key, value):\n raise AttributeError('Dictionary is read-only.')\n\n def __setitem__(self, key, value):\n raise AttributeError('Dictionary is read-only.')\n\n return Wrapper()\n\n\ndef ToStaticShape(shape):\n \"\"\"Converts 'shape' to a static shape.\"\"\"\n if isinstance(shape, (list, tuple)):\n shape = [\n dim.value if isinstance(dim, tf.Dimension) else dim for dim in shape\n ]\n static_shape = []\n for dim in shape:\n if symbolic.IsExpr(dim):\n static_shape.append(symbolic.ToStatic(dim))\n else:\n static_shape.append(dim)\n return static_shape\n else:\n return shape.value if isinstance(shape, tf.Dimension) else shape\n\n\ndef Zeros(shape, *args, **kwargs):\n return tf.zeros(ToStaticShape(shape), *args, **kwargs)\n\n\nclass UniformSampler:\n \"\"\"A reservoir sampler.\n\n This class implements reservoir sampling: Given a limit of `num_samples` total\n samples, this class maintains a uniform probability (1 / `num_samples`) of\n keeping any item dynamically added to the sampler.\n\n See https://en.wikipedia.org/wiki/Reservoir_sampling for details.\n \"\"\"\n\n def __init__(self, num_samples):\n assert num_samples > 0\n self._num_samples = num_samples\n self._num_seen_items = 0\n self._samples = []\n\n def Add(self, item):\n \"\"\"Add item to sampler.\"\"\"\n self._num_seen_items += 1\n\n if len(self._samples) < self._num_samples:\n self._samples.append(item)\n return\n\n index = np.random.randint(0, self._num_seen_items)\n if index < self._num_samples:\n self._samples[index] = item\n\n @property\n def samples(self):\n \"\"\"Fetch the current samples from the sampler.\"\"\"\n return self._samples\n\n\nclass RNNCellStateInit:\n \"\"\"State initialization functions for RNN cell init state.\"\"\"\n\n @staticmethod\n def _Params(method, seed):\n p = hyperparams.Params()\n p.Define('method', method,\n 'Initialization method. Should be one of zeros, random_normal.')\n p.Define('seed', seed, 'Random seed used to generate initial values.')\n p.Freeze()\n return p\n\n @staticmethod\n def Zeros():\n \"\"\"tf.zeros().\"\"\"\n return RNNCellStateInit._Params('zeros', seed=None)\n\n @staticmethod\n def RandomNormal(seed=None):\n \"\"\"tf.random.normal().\"\"\"\n return RNNCellStateInit._Params('random_normal', seed)\n\n\ndef DefaultRNNCellStateInit():\n return RNNCellStateInit.Zeros()\n\n\ndef InitRNNCellState(shape, init=None, dtype=None, name=None, is_eval=False):\n \"\"\"Initial state definitions for RNN cell implementations.\n\n Args:\n shape: A array of ints/symbols for specifying the shape of the state.\n init: Hyperparameters as returned by one of the static implemetaitons in\n RNNCellStateInit.\n dtype: The dype of the states. Defaults to tf.float32.\n name: A name for the operation. If --stateless_vars_init is set, this name\n is used to generate a seed on a per-variable basis. Otherwise, this name\n is optional.\n is_eval: Bool, set to True if we need special behavior in eval mode.\n\n Returns:\n A Tensor of the specified shape, and sampled from the distribution as\n defined by the init parameters.\n \"\"\"\n shape = ToStaticShape(shape)\n\n if init is None:\n init = DefaultRNNCellStateInit()\n if dtype is None:\n dtype = tf.float32\n\n method = init.method\n if ((method in ['zeros']) or (method in ['random_normal'] and is_eval)):\n init_state = tf.zeros(shape=shape, dtype=dtype, name=name)\n elif method in ['random_normal']:\n if use_stateless_vars_init():\n if name is None:\n raise ValueError('InitRNNCellState() requires a `name` argument when '\n '--stateless_vars_init is enabled.')\n seed = _GenerateStatelessRngSeed(name, init.seed)\n init_state = stateless_random_ops.stateless_random_normal(\n shape=shape, dtype=dtype, name=name, seed=seed)\n else:\n init_state = tf.random.normal(\n shape=shape, dtype=dtype, name=name, seed=init.seed)\n else:\n raise ValueError('Initialization method (%s) not supported.' % method)\n\n return init_state\n\n\nclass WeightInit:\n \"\"\"Static class providing weight initialization config params.\"\"\"\n\n @staticmethod\n def _Params(method, scale, seed, custom_v_init=None):\n \"\"\"Parameters of this class.\"\"\"\n p = hyperparams.Params()\n p.Define('method', method, 'Initialization method.')\n p.Define('scale', scale, 'Initialization scale.')\n p.Define('seed', seed, 'Random seed used to generate initial values.')\n p.Define('custom_v_init', custom_v_init,\n 'A custom tf.init_ops.Initializer instance.')\n p.Freeze()\n return p\n\n @staticmethod\n def Gaussian(scale=1.0, seed=None):\n \"\"\"scale * tf.random.normal(0, 1.0).\"\"\"\n return WeightInit._Params('gaussian', scale, seed)\n\n @staticmethod\n def Uniform(scale=1.0, seed=None):\n \"\"\"scale * tf.random.uniform(-1.0, 1.0).\"\"\"\n return WeightInit._Params('uniform', scale, seed)\n\n @staticmethod\n def UniformPositive(scale=1.0, seed=None):\n \"\"\"scale * tf.random.uniform(0., 1.0).\"\"\"\n return WeightInit._Params('uniform_positive', scale, seed)\n\n @staticmethod\n def Category(scale=2, seed=None):\n \"\"\"tf.floor(scale * tf.random.uniform(0., 1.0)).\"\"\"\n return WeightInit._Params('category', scale, seed)\n\n @staticmethod\n def Xavier(scale=1.0, seed=None):\n \"\"\"Xavier initialization (x = sqrt(6. / (in + out)); [-x, x]).\"\"\"\n return WeightInit._Params('xavier', scale, seed)\n\n @staticmethod\n def XavierWithFixupParams(scale=1.0,\n depth=1.0,\n layers_per_residual_block=1.0,\n seed=None):\n \"\"\"Xavier initialization with Fixup.\"\"\"\n scale = scale * math.pow(depth, (-1.0 / (2 * layers_per_residual_block)))\n return WeightInit._Params('xavier', scale, seed)\n\n @staticmethod\n def GeoMeanXavier(scale=1.0, seed=None):\n \"\"\"A variant of Xavier (x = sqrt(3. / sqrt(in * out)); [-x, x]).\"\"\"\n return WeightInit._Params('geo_mean_xavier', scale, seed)\n\n @staticmethod\n def Constant(scale=1.0):\n \"\"\"scale.\"\"\"\n return WeightInit._Params('constant', scale, 0)\n\n @staticmethod\n def TruncatedGaussian(scale=1.0, seed=None):\n \"\"\"scale * tf.random.truncated_normal(0, 1.0).\"\"\"\n return WeightInit._Params('truncated_gaussian', scale, seed)\n\n @staticmethod\n def GaussianSqrtDim(scale=1.0, seed=None):\n \"\"\"scale * tf.random.normal(0, 1 / sqrt(dim0)).\"\"\"\n return WeightInit._Params('gaussian_sqrt_dim', scale, seed)\n\n @staticmethod\n def GaussianSqrtFanIn(scale=1.0, seed=None):\n \"\"\"scale * tf.random.normal(0, 1 / sqrt(fan_in)).\"\"\"\n return WeightInit._Params('gaussian_sqrt_fanin', scale, seed)\n\n @staticmethod\n def GaussianSqrtFanOut(scale=1.0, seed=None):\n \"\"\"scale * tf.random.normal(0, 1 / sqrt(fan_out)).\"\"\"\n return WeightInit._Params('gaussian_sqrt_fanout', scale, seed)\n\n @staticmethod\n def GaussianSqrtFanAvg(scale=1.0, seed=None):\n \"\"\"tf.random.normal(0, sqrt(2.0 / (in + out))).\"\"\"\n return WeightInit._Params('gaussian_sqrt_fanavg', scale, seed)\n\n @staticmethod\n def UniformSqrtDim(scale=1.0, seed=None):\n \"\"\"scale * tf.uniform(-1 / sqrt(dim0), 1 / sqrt(dim0)).\"\"\"\n return WeightInit._Params('uniform_sqrt_dim', scale, seed)\n\n @staticmethod\n def UniformUnitScaling(scale=1.0, seed=None):\n \"\"\"scale * sqrt(3) / sqrt(dim0) * tf.uniform(-1, 1).\"\"\"\n return WeightInit._Params('uniform_unit_scaling', scale, seed)\n\n @staticmethod\n def UniformUnitScalingFanAvg(scale=1.0, seed=None):\n \"\"\"Same as tf.variance_scaling_initializer() ...\n\n Samples are drawn from a uniform distribution within [-limit, limit], with\n limit = sqrt(3 * scale / n)\n\n where\n n = max(1., (fan_in + fan_out) / 2).\n See tf.keras.initializers.VarianceScaling for details.\n\n Args:\n scale: A Python float.\n seed: A Python int or None.\n\n Returns:\n A WeightInit param.\n \"\"\"\n return WeightInit._Params('uniform_unit_scaling_fan_avg', scale, seed)\n\n @staticmethod\n def TruncatedGaussianSqrtDim(scale=1.0, seed=None):\n \"\"\"scale * tf.random.truncated_normal(0, 1 / sqrt(dim0)).\"\"\"\n return WeightInit._Params('truncated_gaussian_sqrt_dim', scale, seed)\n\n @staticmethod\n def TruncatedGaussianSqrtFanIn(scale=1.0, seed=None):\n \"\"\"scale * tf.random.truncated_normal(0, 1 / sqrt(fan_in)).\"\"\"\n return WeightInit._Params('truncated_gaussian_sqrt_fanin', scale, seed)\n\n @staticmethod\n def TruncatedGaussianSqrtFanOut(scale=1.0, seed=None):\n \"\"\"scale * tf.random.truncated_normal(0, 1 / sqrt(fan_out)).\"\"\"\n return WeightInit._Params('truncated_gaussian_sqrt_fanout', scale, seed)\n\n @staticmethod\n def KaimingUniformFanInRelu(scale=1.0, seed=None):\n return WeightInit._Params('kaiming_uniform_fanin_relu', scale, seed)\n\n @staticmethod\n def KaimingUniformFanInLeakyRelu(scale=np.sqrt(5.), seed=None):\n return WeightInit._Params('kaiming_uniform_fanin_leakyrelu', scale, seed)\n\n @staticmethod\n def CustomVarInit(custom_v_init):\n return WeightInit._Params('custom', 1.0, None, custom_v_init)\n\n @staticmethod\n def CustomConstantVarInit(custom_v_init):\n return WeightInit._Params('custom_constant', 1.0, None, custom_v_init)\n\n\n_DEFAULT_XAVIER_INIT = 1.000001\n\n\ndef DefaultParamInit():\n # Here we use 1.000001 as a signature for user picking up the\n # default param initializer.\n return WeightInit.Xavier(_DEFAULT_XAVIER_INIT)\n\n\n# TODO(rpang, jonathanasdf): explore adding _is_default to hyperparams.Param.\ndef IsDefaultParamInit(p):\n return (p.method == 'xavier' and\n abs(p.scale - _DEFAULT_XAVIER_INIT) < 1e-7 and p.seed is None)\n\n\ndef WeightParams(shape,\n init=None,\n dtype=None,\n collections=None,\n device_mesh=None,\n tensor_split_dims_mapping=None):\n \"\"\"Returns a hyperparams for a weight variable given the shape/init/dtype.\"\"\"\n if init is None:\n init = WeightInit.Xavier(_DEFAULT_XAVIER_INIT)\n if dtype is None:\n dtype = tf.float32\n if collections is None:\n collections = []\n if device_mesh is not None:\n assert tensor_split_dims_mapping is not None\n assert len(tensor_split_dims_mapping) == len(shape)\n\n p = hyperparams.Params()\n p.Define('dtype', dtype, 'The weight data type.')\n p.Define('shape', shape, 'The weight shape.')\n p.Define('init', init, 'Initialization method.')\n p.Define('collections', collections,\n 'Variable collections this weight belongs to.')\n p.Define(\n 'device_mesh', device_mesh,\n 'A numpy.ndarray describing the topology of a device mesh to partition'\n ' this variable onto. Each element in the np.ndarray is the ID of a'\n ' device in the topology. device_mesh and tensor_split_dims_mapping below'\n ' together specifies how this weight tensor should be sharded across'\n ' different tpu cores. If None, this variable is not sharded.'\n ' Here are examples: np.array([0, 1, 2, 3, 4, 5, 6, 7]) which is a 1d'\n ' mesh with 8 devices, np.array([[0, 1, 2, 3], [4, 5, 6, 7]]) which is'\n ' 2d matrix of 8 devices.')\n p.Define(\n 'tensor_split_dims_mapping', tensor_split_dims_mapping,\n 'A list of integers that map each tensor axis to the device mesh axis'\n ' along which it is sharded. Its length is the tensor rank, and'\n ' split_dims_mapping[i] is device mesh axis for tensor dimension i. Use'\n ' -1 for tensor dimensions that are not sharded. If the list is set to'\n ' None and a device_mesh is specified, the sharding will be treated as'\n ' replicated. Here is a concrete examples: '\n ' device_mesh=np.array([[0, 1, 2, 3] [4, 5, 6, 7]]), of shape [2, 4]'\n ' shape=[x, y, z], so this is a 3d variable.'\n ' tensor_split_dims_mapping=[-1, -1, 1], in this case, the third dim'\n ' of the variable is split along the second dim of the mesh. Each '\n ' split of the variable is of the shape [x, y, z/4].')\n\n # The following two flags are used in Jax only.\n p.Define(\n 'repeat_prefix', None,\n 'If not None, the full shape of this var is repeat_prefix+shape. '\n 'For example, if repeat_prefix=[16, 2], and shape=[512, 1024], then '\n 'real shape of variable is [16, 2, 512, 1024]. \"repeat_prefix\" is '\n 'often used if a layer is to be used in a recurrent loop, where '\n 'logically there are n sub-layers, but for performance/hbm usage '\n 'reasons we stack all the variables in creating those n-layers.')\n p.Define('repeat_prefix_split_dims_mapping', None,\n 'Tensor split dims mapping for the repeat_prefix dims.')\n return p\n\n\ndef FindNeeded(endpoints):\n \"\"\"List names of tensors and operations required to compute endpoints.\"\"\"\n names_seen = set()\n queue = []\n for e in Flatten(endpoints):\n if isinstance(e, tf.Operation):\n queue.append(e)\n else:\n queue.append(e.op)\n while queue:\n op = queue.pop()\n name = op.name\n if name not in names_seen:\n names_seen.add(name)\n names_seen.update((o.name for o in op.outputs))\n queue.extend(i.op for i in op.inputs)\n queue.extend(op.control_inputs)\n return names_seen\n\n\nclass _CollectionGetter:\n \"\"\"Get graph local value from a defined collection.\"\"\"\n\n def __init__(self, key, default_factory):\n self._key = key\n self._default_factory = default_factory\n\n def __call__(self):\n collection = tf.get_collection(self._key)\n if collection:\n assert len(collection) == 1\n return collection[0]\n value = self._default_factory()\n tf.add_to_collection(self._key, value)\n return value\n\n\ndef SanitizeScopeKey(key):\n \"\"\"Removes invalid symbols from name_scope keys.\"\"\"\n if key.startswith('_'):\n key = key[1:]\n return key.replace('[', '_').replace(']', '')\n\n\n# Maintain a session for unit tests (initialized in test_utils.py).\n_SESSION_SCOPE = ThreadLocalStack()\n\n\[email protected]\ndef UnitTestSessionScope(sess):\n _SESSION_SCOPE.stack.append(sess)\n try:\n yield\n finally:\n _SESSION_SCOPE.stack.pop()\n\n\ndef GetUnitTestSession():\n \"\"\"Get the current variable reuse setting.\"\"\"\n return _SESSION_SCOPE.stack[-1] if _SESSION_SCOPE.stack else None\n\n\n# Global variable to control multitask variable reuse\n# If False (default) the default tf.get_variable is used, that is:\n# - Reusing scopes only allow getting existing variables\n# - Non-reusing scopes only allow getting new variables\n# With GetOpportunisticVariableReuse() == True:\n# - Reusing scopes only allow getting existing variables, as usual\n# - Non-reusing scopes reuse new variables or get new ones\n_OPPORTUNISTIC_VARIABLE_REUSE = ThreadLocalStack()\n\n\[email protected]\ndef OpportunisticVariableReuseScope(enable_opportunistic_reuse=True):\n _OPPORTUNISTIC_VARIABLE_REUSE.stack.append(enable_opportunistic_reuse)\n try:\n yield\n finally:\n _OPPORTUNISTIC_VARIABLE_REUSE.stack.pop()\n\n\ndef GetOpportunisticVariableReuse():\n \"\"\"Get the current variable reuse setting.\"\"\"\n return (_OPPORTUNISTIC_VARIABLE_REUSE.stack[-1]\n if _OPPORTUNISTIC_VARIABLE_REUSE.stack else False)\n\n\n_VARIABLE_RENAME_RULES = ThreadLocalStack()\n\n# Global variable to track task calling scope.\n# Currently only used for TPU Embedding purposes as a TPUEmbeddingLayer\n# may be shared across tasks and the calling task needs to be known\n# for tracking embedding activations for backprop.\n_TASK_CALL_SCOPE = ThreadLocalStack()\n\n\ndef TaskCallScopeName(task):\n \"\"\"Get a unique string identifying a task.\"\"\"\n return f'{task.params.name}_{id(task)}'\n\n\[email protected]\ndef TaskCallScope(task):\n _TASK_CALL_SCOPE.stack.append(TaskCallScopeName(task))\n try:\n yield\n finally:\n _TASK_CALL_SCOPE.stack.pop()\n\n\ndef GetTaskCallScope():\n \"\"\"Get the current task call scope.\"\"\"\n return _TASK_CALL_SCOPE.stack[-1] if _TASK_CALL_SCOPE.stack else None\n\n\[email protected]\ndef VariableRenameScope(renames):\n \"\"\"Append the renaming rules to the stack of renames.\n\n Args:\n renames: pairs of (regexp, new_name_format). If the regexp matches, the\n new_name_format will be interpolated using the matched groups.\n\n Yields:\n scope in which the renaming rules are applied\n \"\"\"\n _VARIABLE_RENAME_RULES.stack.append(renames)\n try:\n yield\n finally:\n _VARIABLE_RENAME_RULES.stack.pop()\n\n\ndef GetVariableName(name):\n \"\"\"Get variable name after application of all renaming rules.\n\n Args:\n name: untransformed variable name with scope_name prepended\n\n Returns:\n name possibly modified using renaming rules\n \"\"\"\n matched = False\n new_name = name\n for renames in _VARIABLE_RENAME_RULES.stack:\n tf.logging.log_first_n(\n tf.logging.WARN,\n ('Renaming variables is not supported in eager mode. '\n 'Please look into migrating away from variable renaming.'), 1)\n for regexp, name_format in renames:\n match = re.match(regexp, name)\n if match:\n if matched:\n tf.logging.warning('Multiple matches for: %s', name)\n matched = True\n new_name = name_format % match.groups()\n if new_name != name:\n tf.logging.info(\"WARNING!!! Renaming variable '%s' to '%s'\", name, new_name)\n return new_name\n\n\n_LIST_REGEX_DTYPE = ThreadLocalStack()\n\n\[email protected]\ndef VariableListDtypeRegexScope(list_regex_dtypes):\n \"\"\"Append the list of (regex, dtype) to override the dtype.\n\n Args:\n list_regex_dtypes: pairs of (regexp, dtype). If the regexp matches, the data\n type of the variable will be changed by the corresponding dtype.\n\n Yields:\n scope in which the list of (regex, dtype) is applied.\n \"\"\"\n _LIST_REGEX_DTYPE.stack.append(list_regex_dtypes)\n try:\n yield\n finally:\n _LIST_REGEX_DTYPE.stack.pop()\n\n\ndef FindDataType(var_name):\n \"\"\"Find the data type for var_name.\n\n Args:\n var_name: A string, name of the variable.\n\n Returns:\n The dtype of the first matched regex with var_name, or None if no matching\n found.\n \"\"\"\n for regex_dtypes in _LIST_REGEX_DTYPE.stack:\n for regex, data_type in regex_dtypes:\n if re.match(regex, var_name):\n return data_type\n return None\n\n\ndef GenerateSeedFromName(name):\n \"\"\"Generate a random seed from a name string.\n\n Args:\n name: A string.\n\n Returns:\n An integer seed in the range [0, 2**31 - 1).\n \"\"\"\n md5 = hashlib.md5()\n md5.update(six.ensure_binary(name))\n return np.int64(int(md5.hexdigest(), 16) % (2**31 - 1))\n\n\ndef MaybeGenerateSeedFromScope():\n \"\"\"Generate a random seed from the current name of the scope.\n\n If running in eager mode, this returns 0.\n\n Returns:\n An integer seed in the range [0, 2**31 - 1).\n \"\"\"\n if not tf.executing_eagerly():\n return GenerateSeedFromName(tf.no_op(name='new_step_seed').name)\n return 0\n\n\ndef GenerateSeedFromId(obj_id):\n \"\"\"Generate a random seed from the id of an object.\n\n If deterministic execution (i.e. unit test), generate the seed from a fixed\n unique name instead.\n\n Args:\n obj_id: id(object).\n\n Returns:\n An integer seed in the range [0, 2**31 - 1).\n \"\"\"\n if tf.get_default_graph().seed is not None:\n # We are in a program/test which need determistic randomization.\n with tf.name_scope(''):\n return GenerateSeedFromName(tf.no_op(name='new_step_seed').name)\n\n md5 = hashlib.md5()\n md5.update(np.int64(obj_id))\n return np.int64(int(md5.hexdigest(), 16) % (2**31 - 1))\n\n\n_VARIABLE_SHAPE_PREFIXES = ThreadLocalStack()\n\n\ndef GetVarLeadingDimsAsCombinedLayers(var):\n \"\"\"Gets the number of leading dimensions of `var` marked as combined layers.\n\n Such dimensions represent variables from different layers stacked together,\n e.g., in RepeatLayer, and optimizers (which have shape-dependant behaviors)\n can adjust its behavior based on this information to match the behavior for\n separate layer variables.\n\n Args:\n var: A variable.\n\n Returns:\n An integer representing the number of leading dimensions.\n \"\"\"\n try:\n return var.op.get_attr('_num_leading_dims_for_combined_layers')\n except ValueError:\n return 0\n except AttributeError:\n # AttributeError: 'DistributedVarOp' object has no attribute 'get_attr'.\n return 0\n\n\[email protected]\ndef VariableShapePrefixContext(shape_prefix):\n \"\"\"Add a shape prefix to variable created by CreateVariable().\n\n This new dimension will be marked as combined-layers. See also comments for\n GetVarLeadingDimsAsCombinedLayers().\n\n Args:\n shape_prefix: a positive integer of shape prefix.\n\n Yields:\n None.\n \"\"\"\n assert shape_prefix > 0, ('%s' % shape_prefix)\n _VARIABLE_SHAPE_PREFIXES.stack.append(shape_prefix)\n try:\n yield\n finally:\n _VARIABLE_SHAPE_PREFIXES.stack.pop()\n\n\ndef GetVariableShapePrefixes():\n \"\"\"Return the list of shape prefixes for CreateVariable().\"\"\"\n return _VARIABLE_SHAPE_PREFIXES.stack\n\n\ndef GetVariableNumLeadingDimsForCombinedLayersContext():\n \"\"\"Return the number of leading combined-layers dims for CreateVariable().\"\"\"\n return len(_VARIABLE_SHAPE_PREFIXES.stack)\n\n\ndef GetFanInFanOut(shape, prefix_dims_to_skip):\n \"\"\"Returns (fan_in, fan_out) of a weight variable of the give shape.\"\"\"\n if not shape:\n return None, None\n if len(shape) < prefix_dims_to_skip:\n raise ValueError(f'Variable shape is {shape} but prefix_dims_to_skip is '\n f'{prefix_dims_to_skip}, larger than the shape rank.')\n adjusted_shape = shape[prefix_dims_to_skip:]\n if len(adjusted_shape) < 1:\n return 1, 1\n elif len(adjusted_shape) == 1:\n # Following _compute_fans() from TF's init_ops.py.\n return adjusted_shape[0], adjusted_shape[0]\n else:\n receptive_field_size = 1\n for s in adjusted_shape[:-2]:\n receptive_field_size *= s\n fan_in = adjusted_shape[-2] * receptive_field_size\n fan_out = adjusted_shape[-1] * receptive_field_size\n return fan_in, fan_out\n\n\n_VARIABLE_STORE_STACK = ThreadLocalStack()\n\n\[email protected]\ndef VariableStore():\n \"\"\"Keeps track of {variable_name: (variable, var_params)}.\n\n When CreateVariable would result in a variable name that exists in the store,\n the existing variable is returned, or an error is raised, depending on whether\n the variable scope supports reuse.\n\n This mimics the behavior of tf.compat.v1.get_variable() with regards to\n variable reuse, while functioning correctly in TF2 eager context. However, it\n only applies to variables created via CreateVariable.\n\n When there are nested VariableStore contexts, they all provide the same\n variable store object. That is, the scope of the variable store is the\n outermost context.\n\n Yields:\n A dictionary representing the variable store.\n \"\"\"\n store = _VARIABLE_STORE_STACK.stack[-1] if _VARIABLE_STORE_STACK.stack else {}\n _VARIABLE_STORE_STACK.stack.append(store)\n try:\n yield store\n finally:\n _VARIABLE_STORE_STACK.stack.pop()\n\n\ndef _GetVariableStore():\n return (_VARIABLE_STORE_STACK.stack[-1]\n if _VARIABLE_STORE_STACK.stack else None)\n\n\ndef _DefaultVariableCreator(**kwargs):\n kwargs.pop('var_name')\n kwargs.pop('var_params')\n return tf.get_variable(**kwargs)\n\n\n_VARIABLE_CREATOR_STACK = ThreadLocalStack()\n\n\ndef _GetVariableCreator():\n fn = _DefaultVariableCreator\n for wrapper in reversed(_VARIABLE_CREATOR_STACK.stack):\n fn = functools.partial(wrapper, fn)\n return fn\n\n\[email protected]\ndef VariableCreatorScope(variable_creator):\n \"\"\"Yields a context around a variable_creator, used by `CreateVariable()`.\n\n The function must have the following signature::\n\n def variable_creator(next_creator, **kwargs)\n\n The function may delegate variable creation to the next variable creator, or\n return its own tf.Variable.\n\n This differs from tf.variable_creator_scope in that tf.variable_creator_scope\n modifies a tf.Variable() call while this modifies a tf.get_variable() call. As\n the code is migrated to TF2 and tf.get_variable() is deprecated, this may be\n upgraded to using tf.variable_creator_scope instead.\n\n This differs from tf.variable_scope(custom_getter=variable_creator) in that\n the kwargs passed can be manipulated.\n\n Variable creators are resolved from the outermost towards the innermost.\n\n The innermost variable creator function is tf.get_variable.\n\n The passed in kwargs must conform to what tf.get_variable accepts, with the\n addition of `var_name` and `var_params`.\n\n Args:\n variable_creator: A variable creator function.\n \"\"\"\n _VARIABLE_CREATOR_STACK.stack.append(variable_creator)\n try:\n yield\n finally:\n _VARIABLE_CREATOR_STACK.stack.pop()\n\n\ndef PlaceOnTpuCore(core_id):\n \"\"\"Returns a VariableCreatorScope that places variables on a given tpu core.\n\n Only applies when running with TPUs.\n\n Does not yet properly support model parallelism.\n\n Args:\n core_id: The tpu core id.\n \"\"\"\n\n def Creator(next_creator, **kwargs):\n cluster = cluster_factory.Current()\n if use_tpu():\n device = cluster.WorkerDeviceInModelSplit(core_id)\n elif (\n tpu_compat() and\n cluster.params.job in ('controller', 'trainer_client', 'executor_tpu')):\n # The job is running in a fleet that uses tpu, but does not itself have\n # access to the tpu, e.g. controller job. In this case, the returned\n # device needs to be the cpu device on the tpu host for the given core.\n # FIXME: the current implementation is wrong for large values of core_id.\n device = cluster.ListDevices(cluster.params.worker)[0, 0]\n else:\n device = ''\n\n with tf.device(device):\n return next_creator(**kwargs)\n\n return VariableCreatorScope(Creator)\n\n\n# Variable creators.\ndef MaybeReuseFromVariableStore(next_creator, **kwargs):\n \"\"\"Variable creator that attempts to reuse variables from variable store.\"\"\"\n var_name = kwargs['var_name']\n p = kwargs['var_params']\n store = _GetVariableStore()\n if store is not None and var_name in store:\n if tf.get_variable_scope().reuse:\n var, cached_p = store[var_name]\n tf.logging.info('Reusing var %s', var.name)\n assert cached_p == p.ToText(), (\n 'Cached config:\\n %s vs new config:\\n %s' % (cached_p, p.ToText()))\n return var\n\n var = next_creator(**kwargs)\n tf.logging.info('Creating var %s shape=%s on device %s', var.name, var.shape,\n var.device)\n for col in p.collections:\n tf.add_to_collection(col, var)\n if store is not None:\n store[var_name] = (var, p.ToText())\n return var\n\n\ndef MaybePinVarsToCpu(next_creator, **kwargs):\n if _FromGlobal('pin_vars_to_cpu'):\n with tf.device('/cpu:0'):\n return next_creator(**kwargs)\n return next_creator(**kwargs)\n\n\ndef MaybeOpportunisticVariableReuse(next_creator, **kwargs):\n if GetOpportunisticVariableReuse():\n with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE):\n return next_creator(**kwargs)\n return next_creator(**kwargs)\n\n\n# TODO(yonghui): Add support for partitioned Variables.\ndef CreateVariable(name,\n params,\n reuse=None,\n trainable=True,\n collections=None,\n default_seed=None,\n synchronization=tf.VariableSynchronization.AUTO,\n aggregation=tf.VariableAggregation.NONE):\n \"\"\"Creates tf.Variable according to param_config.\n\n Args:\n name: A string, name of the variable.\n params: A WeightParams specifying the details of how this variable should be\n constructed and initialized.\n reuse: Whether or not to reuse an existing variable. It has the same\n semantics as the reuse arg in tf.variable_scope.\n trainable: Whether or not the variable is trainable.\n collections: Override the default variable collection (\n tf.GraphKeys.GLOBAL_VARIABLES). Note that specifying a collections\n argument in `params` does not override this collection; the caller must\n set this field explicitly in the call to CreateVariable().\n default_seed: Seed to use for initialization if not specified in params.\n Used for deterministic initialization in tests.\n synchronization: Indicates when a distributed a variable will be aggregated.\n Accepted values are constants defined in the class\n tf.VariableSynchronization. By default the synchronization is set to AUTO\n and the current DistributionStrategy chooses when to synchronize.\n aggregation: Indicates how a distributed variable will be aggregated.\n Accepted values are constants defined in the class tf.VariableAggregation.\n\n Returns:\n The created variable.\n \"\"\"\n if use_stateless_vars_init():\n return _CreateVariableStateless(name, params, reuse, trainable, collections,\n default_seed, synchronization, aggregation)\n else:\n return _CreateVariableStateful(name, params, reuse, trainable, collections,\n default_seed, synchronization, aggregation)\n\n\ndef _CreateVariableStateful(name,\n params,\n reuse=None,\n trainable=True,\n collections=None,\n default_seed=None,\n synchronization=tf.VariableSynchronization.AUTO,\n aggregation=tf.VariableAggregation.NONE):\n \"\"\"Creates tf.Variable using TF stateful RNGs according to param_config.\n\n Args:\n name: A string, name of the variable.\n params: A WeightParams specifying the details of how this variable should be\n constructed and initialized.\n reuse: Whether or not to reuse an existing variable. It has the same\n semantics as the reuse arg in tf.variable_scope.\n trainable: Whether or not the variable is trainable.\n collections: Override the default variable collection (\n tf.GraphKeys.GLOBAL_VARIABLES).\n default_seed: Seed to use for initialization if not specified in params.\n Used for deterministic initialization in tests.\n synchronization: Indicates when a distributed a variable will be aggregated.\n Accepted values are constants defined in the class\n tf.VariableSynchronization. By default the synchronization is set to AUTO\n and the current DistributionStrategy chooses when to synchronize.\n aggregation: Indicates how a distributed variable will be aggregated.\n Accepted values are constants defined in the class tf.VariableAggregation.\n\n Returns:\n The created variable.\n \"\"\"\n p = params.Copy()\n shape = tf.TensorShape(ToStaticShape(p.shape)).as_list()\n if shape:\n assert all([dim_size > 0 for dim_size in shape]), shape\n dim0 = shape[0]\n else:\n dim0 = 1\n assert p.init.method == 'constant' or np.all(np.asarray(p.init.scale) >= 0)\n method = p.init.method\n scale = p.init.scale\n seed = p.init.seed\n\n if IsDefaultParamInit(p.init):\n tf.logging.warning(\n 'WARNING!!! var %s is using the default xavier initializer.'\n ' Make sure this is intended.', name)\n\n with tf.variable_scope(name) as scope:\n var_name = GetVariableName(scope.name)\n\n if tf.get_default_graph().seed is not None:\n # We are in a program/test which need determistic randomization.\n if seed is None:\n if default_seed is not None:\n seed = default_seed\n else:\n # We are not given a per-variable random seed. We use hash of\n # variable name as a stable random seed.\n seed = GenerateSeedFromName(var_name)\n\n # If var_name matches a regex, then set the var_dtype; else use p.dtype.\n var_dtype = FindDataType(var_name)\n if var_dtype is None:\n var_dtype = p.dtype\n init_dtype = var_dtype.real_dtype\n\n # TODO(b/172827074): we do not natively support var initialization for\n # int8 type except for constant initialization.\n # NOTE: For int8, we initialize by scaling float32 random values to integer.\n if init_dtype == tf.int8:\n init_dtype = tf.float32\n\n v_init = _CreateVarInitStateful(name, method, shape, dim0, seed, scale,\n init_dtype, p.init.custom_v_init)\n\n if var_dtype == tf.complex64:\n\n def ComplexWrapper(init):\n\n def _Wrapper(shape, dtype, partition_info):\n del dtype\n # A more complex alternative may be to use the init function for\n # magnitudes and uniform random for phases instead.\n shape = [2] + shape\n value = init(shape, init_dtype, partition_info)\n return tf.complex(value[0], value[1])\n\n return _Wrapper\n\n v_init = ComplexWrapper(v_init)\n\n if var_dtype == tf.int8:\n\n def FloatToInt8Wrapper(init):\n\n def _Wrapper(shape, dtype, partition_info):\n del dtype\n value = init(shape, init_dtype, partition_info)\n scale = tf.math.maximum(\n tf.math.reduce_min(value) / -127,\n tf.math.reduce_max(value) / 127)\n value = tf.divide(value, scale)\n return tf.cast(value, tf.int8)\n\n return _Wrapper\n\n v_init = FloatToInt8Wrapper(v_init)\n\n def LingvoVariableCreator(next_creator, **kwargs):\n \"\"\"Lingvo variable creator.\"\"\"\n # TODO(yonghui): Possibly get away from variable_scope and implement our own\n # variable sharing mechanism.\n with tf.variable_scope(name) as scope:\n var_scope = tf.VariableScope(\n scope.reuse,\n custom_getter=scope.custom_getter,\n caching_device=scope.caching_device,\n use_resource=True)\n with tf.variable_scope(var_scope), tf.variable_scope(var_name, reuse=reuse):\n return next_creator(**kwargs)\n\n with contextlib.ExitStack() as context_stack:\n for variable_creator_fn in (LingvoVariableCreator,\n MaybeOpportunisticVariableReuse,\n MaybePinVarsToCpu, MaybeReuseFromVariableStore):\n context_stack.enter_context(VariableCreatorScope(variable_creator_fn))\n if method == 'custom_constant':\n call_shape = None\n else:\n call_shape = GetVariableShapePrefixes() + list(shape)\n var = _GetVariableCreator()(\n var_name=var_name,\n var_params=p,\n name='var',\n shape=call_shape,\n dtype=var_dtype,\n initializer=v_init,\n collections=collections,\n trainable=trainable,\n validate_shape=True,\n synchronization=synchronization,\n aggregation=aggregation)\n\n combined_layers_dims = GetVariableNumLeadingDimsForCombinedLayersContext()\n if combined_layers_dims > 0:\n # pylint: disable=protected-access\n var.op._set_attr('_num_leading_dims_for_combined_layers',\n attr_value_pb2.AttrValue(i=combined_layers_dims))\n\n # Shard the variable according to the sharding spec.\n tensor_split_dims_mapping = p.tensor_split_dims_mapping\n if tensor_split_dims_mapping is not None:\n count = (\n len(GetVariableShapePrefixes()) + len(shape) -\n len(tensor_split_dims_mapping) -\n len(gshard_utils.GetMeshSplitDimPrefixContext()))\n tensor_split_dims_mapping = [-1] * count + tensor_split_dims_mapping\n var = gshard_utils.MeshSplit(\n var, p.device_mesh, tensor_split_dims_mapping, use_sharding_op=False)\n\n return var\n\n\ndef _CreateVariableStateless(name,\n params,\n reuse=None,\n trainable=True,\n collections=None,\n default_seed=None,\n synchronization=tf.VariableSynchronization.AUTO,\n aggregation=tf.VariableAggregation.NONE):\n \"\"\"Creates tf.Variable using TF stateless RNGs according to `params`.\n\n Args:\n name: A string, name of the variable.\n params: A WeightParams specifying the details of how this variable should be\n constructed and initialized.\n reuse: Whether or not to reuse an existing variable. It has the same\n semantics as the reuse arg in tf.variable_scope.\n trainable: Whether or not the variable is trainable.\n collections: Override the default variable collection (\n tf.GraphKeys.GLOBAL_VARIABLES).\n default_seed: Seed to use for initialization if not specified in params.\n Used for deterministic initialization in tests.\n synchronization: Indicates when a distributed a variable will be aggregated.\n Accepted values are constants defined in the class\n tf.VariableSynchronization. By default the synchronization is set to AUTO\n and the current DistributionStrategy chooses when to synchronize.\n aggregation: Indicates how a distributed variable will be aggregated.\n Accepted values are constants defined in the class tf.VariableAggregation.\n\n Returns:\n The created variable.\n \"\"\"\n p = params.Copy()\n shape = tf.TensorShape(ToStaticShape(p.shape)).as_list()\n if shape:\n assert all([dim_size > 0 for dim_size in shape]), shape\n dim0 = shape[0]\n else:\n dim0 = 1\n assert p.init.method == 'constant' or np.all(np.asarray(p.init.scale) >= 0)\n method = p.init.method\n scale = p.init.scale\n seed = p.init.seed\n\n if IsDefaultParamInit(p.init):\n tf.logging.warning(\n 'WARNING!!! var %s is using the default xavier initializer.'\n ' Make sure this is intended.', name)\n\n with tf.variable_scope(name) as scope:\n var_name = GetVariableName(scope.name)\n\n user_seed = seed if seed is not None else default_seed\n seed = _GenerateStatelessRngSeed(var_name, user_seed)\n\n # If var_name matches a regex, then set the var_dtype; else use p.dtype.\n var_dtype = FindDataType(var_name)\n if var_dtype is None:\n var_dtype = p.dtype\n init_dtype = var_dtype.real_dtype\n v_init = _CreateVarInitStateless(name, method, shape, dim0, seed, scale,\n init_dtype, p.init.custom_v_init)\n\n if var_dtype == tf.complex64:\n raise TypeError(\n 'Stateless variable initialization does not support tf.complex64.')\n\n def LingvoVariableCreator(next_creator, **kwargs):\n \"\"\"Lingvo variable creator.\"\"\"\n # TODO(yonghui): Possibly get away from variable_scope and implement our own\n # variable sharing mechanism.\n with tf.variable_scope(name) as scope:\n var_scope = tf.VariableScope(\n scope.reuse,\n custom_getter=scope.custom_getter,\n caching_device=scope.caching_device,\n use_resource=True)\n with tf.variable_scope(var_scope), tf.variable_scope(var_name, reuse=reuse):\n return next_creator(**kwargs)\n\n with contextlib.ExitStack() as context_stack:\n for variable_creator_fn in (LingvoVariableCreator,\n MaybeOpportunisticVariableReuse,\n MaybeReuseFromVariableStore):\n context_stack.enter_context(VariableCreatorScope(variable_creator_fn))\n var = _GetVariableCreator()(\n var_name=var_name,\n var_params=p,\n name='var',\n shape=GetVariableShapePrefixes() + list(shape),\n dtype=var_dtype,\n initializer=v_init,\n collections=collections,\n trainable=trainable,\n validate_shape=True,\n synchronization=synchronization,\n aggregation=aggregation)\n\n combined_layers_dims = GetVariableNumLeadingDimsForCombinedLayersContext()\n if combined_layers_dims > 0:\n # pylint: disable=protected-access\n var.op._set_attr('_num_leading_dims_for_combined_layers',\n attr_value_pb2.AttrValue(i=combined_layers_dims))\n\n # Shard the variable according to the sharding spec.\n tensor_split_dims_mapping = p.tensor_split_dims_mapping\n if tensor_split_dims_mapping is not None:\n count = (\n len(GetVariableShapePrefixes()) + len(shape) -\n len(tensor_split_dims_mapping) -\n len(gshard_utils.GetMeshSplitDimPrefixContext()))\n tensor_split_dims_mapping = [-1] * count + tensor_split_dims_mapping\n var = gshard_utils.MeshSplit(\n var, p.device_mesh, tensor_split_dims_mapping, use_sharding_op=False)\n\n return var\n\n\ndef _RandomXavierUniformInitializer(method, scale, seed):\n \"\"\"Creates a random Xavier uniform initializer.\"\"\"\n combined_layers_dims = GetVariableNumLeadingDimsForCombinedLayersContext()\n\n def XavierUniform(shape, dtype, partition_info):\n \"\"\"Xavier initialization (x = sqrt(6. / (in + out)); scale*[-x, x]).\"\"\"\n del partition_info # Unused.\n if not shape:\n raise ValueError('\\'shape\\' must not be \\'None\\' or 0 for XavierUniform')\n fan_in, fan_out = GetFanInFanOut(shape, combined_layers_dims)\n if method == 'xavier':\n limit = math.sqrt(6. / (fan_in + fan_out))\n elif method == 'geo_mean_xavier':\n limit = math.sqrt(3. / math.sqrt(fan_in * fan_out))\n return scale * tf.random.uniform(shape, -limit, limit, dtype, seed)\n\n return XavierUniform\n\n\ndef _CreateVarInitStateful(name,\n method,\n shape,\n dim0,\n seed,\n scale,\n init_dtype,\n custom_v_init=None):\n \"\"\"Creates variable initialization function for a stateful RNG.\"\"\"\n if (method in [\n 'gaussian_sqrt_dim', 'uniform_sqrt_dim', 'truncated_gaussian_sqrt_dim'\n ]):\n if len(shape) > 2:\n # This is probably not the right method to use when len(shape) > 2,\n # e.g. dim0 will be 3 with a 3x3 conv2d kernel.\n tf.logging.warning(\n 'Initializing %s of shape %s with method %s: dim0=%s. '\n 'Make sure that it is intended.', name, shape, method, dim0)\n scale *= 1.0 / math.sqrt(dim0)\n\n combined_layers_dims = GetVariableNumLeadingDimsForCombinedLayersContext()\n\n if method in ['gaussian_sqrt_fanin', 'truncated_gaussian_sqrt_fanin']:\n fan_in, _ = GetFanInFanOut(shape, combined_layers_dims)\n if fan_in is not None:\n scale *= 1.0 / math.sqrt(fan_in)\n if method in ['gaussian_sqrt_fanout', 'truncated_gaussian_sqrt_fanout']:\n _, fan_out = GetFanInFanOut(shape, combined_layers_dims)\n if fan_out is not None:\n scale *= 1.0 / math.sqrt(fan_out)\n if method in ['gaussian_sqrt_fanavg']:\n fan_in, fan_out = GetFanInFanOut(shape, combined_layers_dims)\n if fan_in is not None and fan_out is not None:\n scale *= math.sqrt(2.0 / (fan_in + fan_out))\n\n if method in [\n 'gaussian', 'gaussian_sqrt_dim', 'gaussian_sqrt_fanin',\n 'gaussian_sqrt_fanout', 'gaussian_sqrt_fanavg'\n ]:\n v_init = init_ops.random_normal_initializer(\n mean=0.0, stddev=scale, seed=seed, dtype=init_dtype)\n elif method in ['uniform', 'uniform_sqrt_dim']:\n v_init = init_ops.random_uniform_initializer(\n minval=-scale, maxval=scale, seed=seed, dtype=init_dtype)\n elif method in ['uniform_positive']:\n v_init = init_ops.random_uniform_initializer(\n minval=0.0, maxval=scale, seed=seed, dtype=init_dtype)\n elif method == 'category':\n uniform_init = init_ops.random_uniform_initializer(\n minval=0.0, maxval=scale, seed=seed, dtype=init_dtype)\n v_init = lambda *args, **kwargs: tf.floor(uniform_init(*args, **kwargs))\n elif method in ['uniform_unit_scaling']:\n v_init = init_ops.uniform_unit_scaling_initializer(\n factor=scale, seed=seed, dtype=init_dtype)\n elif method in ['uniform_unit_scaling_fan_avg']:\n v_init = tf.variance_scaling_initializer(\n scale=scale,\n mode='fan_avg',\n distribution='uniform',\n seed=seed,\n dtype=init_dtype)\n elif method in [\n 'truncated_gaussian', 'truncated_gaussian_sqrt_dim',\n 'truncated_gaussian_sqrt_fanin', 'truncated_gaussian_sqrt_fanout'\n ]:\n v_init = init_ops.truncated_normal_initializer(\n mean=0.0, stddev=scale, seed=seed, dtype=init_dtype)\n elif method in ['constant']:\n v_init = init_ops.constant_initializer(value=scale, dtype=init_dtype)\n elif method in ['xavier', 'geo_mean_xavier']:\n\n def XavierUniform(shape, dtype, partition_info):\n \"\"\"Xavier initialization (x = sqrt(6. / (in + out)); scale*[-x, x]).\"\"\"\n del partition_info # Unused.\n if not shape:\n raise ValueError(\n '\\'shape\\' must not be \\'None\\' or 0 for XavierUniform')\n fan_in, fan_out = GetFanInFanOut(shape, combined_layers_dims)\n if method == 'xavier':\n limit = math.sqrt(6. / (fan_in + fan_out))\n elif method == 'geo_mean_xavier':\n limit = math.sqrt(3. / math.sqrt(fan_in * fan_out))\n return scale * tf.random.uniform(shape, -limit, limit, dtype, seed)\n\n v_init = XavierUniform\n elif method in [\n 'kaiming_uniform_fanin_relu', 'kaiming_uniform_fanin_leakyrelu'\n ]:\n fan_in = np.prod(shape[:-1])\n if method == 'kaiming_uniform_fanin_leakyrelu':\n # Assume the 'a' parameter is the 'scale' argument.\n gain = np.sqrt(2. / (1 + scale**2))\n else:\n gain = np.sqrt(2.)\n std_dev = gain / np.sqrt(fan_in)\n bound = np.sqrt(3.0) * std_dev\n v_init = init_ops.random_uniform_initializer(\n minval=-bound, maxval=bound, seed=seed, dtype=init_dtype)\n elif method in ['custom', 'custom_constant']:\n v_init = custom_v_init\n else:\n assert False, 'init_type `%s` not supported.' % method\n\n return v_init\n\n\ndef _GenerateStatelessRngSeed(name, seed):\n \"\"\"Generates a 2-tuple seed for a stateless variable initializer.\n\n We want to ensure that different variables end up with different random values\n even when they are passed the same seed and shape. To this aim, this function\n generates a pseudo-unique seed by hashing the variable name and mapping it\n into a scalar seed. More specifically, the returned value is a 2-tuple of\n tf.int32 scalar, where the first element is the user-provided seed and the\n second element is obtained by hashing the variable name.\n\n Args:\n name: The variable name for which to generate a stateless-like seed.\n seed: The user-specified scalar seed.\n\n Returns:\n A 2-tuple seed of tf.int32 values (for TPU compatibility).\n \"\"\"\n seed0 = seed or 0\n seed1 = GenerateSeedFromName(name)\n return tf.constant([seed0, seed1], dtype=tf.int32)\n\n\ndef _DeterministicRandomNormalInitializer(seed, mean, stddev):\n \"\"\"Creates a random normal initializer.\"\"\"\n\n def DeterministicNormal(shape, dtype, partition_info):\n del partition_info # Unused.\n return stateless_random_ops.stateless_random_normal(\n shape=shape, seed=seed, mean=mean, stddev=stddev, dtype=dtype)\n\n return DeterministicNormal\n\n\ndef _DeterministicRandomUniformInitializer(seed, minval, maxval):\n \"\"\"Creates a random uniform initializer.\"\"\"\n\n def DeterministicUniform(shape, dtype, partition_info):\n del partition_info # Unused.\n return stateless_random_ops.stateless_random_uniform(\n shape=shape, seed=seed, minval=minval, maxval=maxval, dtype=dtype)\n\n return DeterministicUniform\n\n\ndef _DeterministicRandomTruncatedNormalInitializer(seed, mean, stddev):\n \"\"\"Creates a random truncated normal initializer.\"\"\"\n\n def DeterministicTruncatedNormal(shape, dtype, partition_info):\n del partition_info # Unused.\n return stateless_random_ops.stateless_truncated_normal(\n shape=shape, seed=seed, mean=mean, stddev=stddev, dtype=dtype)\n\n return DeterministicTruncatedNormal\n\n\ndef _DeterministicRandomUniformUnitScalingInitializer(seed, factor):\n \"\"\"Creates a random uniform unit scaling initializer.\"\"\"\n\n def DeterministicUniformUnitScaling(shape, dtype, partition_info):\n # The following logic is originally from (UniformUnitScaling.__call__())\n # in TensorFlow: python/ops/init_ops.py\n scale_shape = shape\n if partition_info is not None:\n scale_shape = partition_info.full_shape\n\n input_size = 1.0\n # Estimating input size is not possible to do perfectly, but we try.\n # The estimate, obtained by multiplying all dimensions but the last one,\n # is the right thing for matrix multiply and convolutions (see above).\n for dim in scale_shape[:-1]:\n input_size *= float(dim)\n # Avoid errors when initializing zero-size tensors.\n input_size = max(input_size, 1.0)\n maxval = math.sqrt(3 / input_size) * factor\n return stateless_random_ops.stateless_random_uniform(\n shape=shape, seed=seed, minval=-maxval, maxval=maxval, dtype=dtype)\n\n return DeterministicUniformUnitScaling\n\n\ndef _DeterministicRandomVarianceScalingInitializer(scale, mode, distribution,\n seed):\n \"\"\"Creates a variance scaling initializer.\"\"\"\n\n if scale <= 0.:\n raise ValueError('`scale` must be positive float.')\n if mode not in {'fan_in', 'fan_out', 'fan_avg'}:\n raise ValueError('Invalid `mode` argument:', mode)\n distribution = distribution.lower()\n if distribution not in {\n 'normal', 'uniform', 'truncated_normal', 'untruncated_normal'\n }:\n raise ValueError('Invalid `distribution` argument:', distribution)\n\n combined_layers_dims = GetVariableNumLeadingDimsForCombinedLayersContext()\n\n def DeterministicVarianceScaling(shape, dtype, partition_info):\n # This is originally from TensorFlow: python/ops/init_ops.py\n scale_shape = shape\n if partition_info is not None:\n scale_shape = partition_info.full_shape\n # Handle special case of empty list as shape, since fan_in and fan_out\n # are numerically added below. Without this, GetFanInFanOut() would\n # return None, None instead.\n if isinstance(scale_shape, (list, tuple)) and not scale_shape:\n fan_in, fan_out = 1, 1\n else:\n fan_in, fan_out = GetFanInFanOut(scale_shape, combined_layers_dims)\n if mode == 'fan_in':\n scale_inner = scale / max(1., fan_in)\n elif mode == 'fan_out':\n scale_inner = scale / max(1., fan_out)\n else:\n scale_inner = scale / max(1., (fan_in + fan_out) / 2.)\n if distribution == 'normal' or distribution == 'truncated_normal':\n # constant taken from scipy.stats.truncnorm.std(\n # a=-2, b=2, loc=0., scale=1.)\n stddev = math.sqrt(scale_inner) / .87962566103423978\n return stateless_random_ops.stateless_truncated_normal(\n shape=shape, seed=seed, mean=0.0, stddev=stddev, dtype=dtype)\n elif distribution == 'untruncated_normal':\n stddev = math.sqrt(scale_inner)\n return stateless_random_ops.stateless_random_normal(\n shape=shape, seed=seed, mean=0.0, stddev=stddev, dtype=dtype)\n else:\n limit = math.sqrt(3.0 * scale_inner)\n return stateless_random_ops.stateless_random_uniform(\n shape=shape, seed=seed, minval=-limit, maxval=limit, dtype=dtype)\n\n return DeterministicVarianceScaling\n\n\ndef _DeterministicRandomXavierUniformInitializer(method, scale, seed):\n \"\"\"Creates a variance scaling initializer.\"\"\"\n combined_layers_dims = GetVariableNumLeadingDimsForCombinedLayersContext()\n\n def XavierUniform(shape, dtype, partition_info):\n \"\"\"Xavier initialization (x = sqrt(6. / (in + out)); scale*[-x, x]).\"\"\"\n del partition_info # Unused.\n if not shape:\n raise ValueError('\\'shape\\' must not be \\'None\\' or 0 for XavierUniform')\n fan_in, fan_out = GetFanInFanOut(shape, combined_layers_dims)\n if method == 'xavier':\n limit = math.sqrt(6. / (fan_in + fan_out))\n elif method == 'geo_mean_xavier':\n limit = math.sqrt(3. / math.sqrt(fan_in * fan_out))\n return scale * stateless_random_ops.stateless_random_uniform(\n shape, seed, -limit, limit, dtype)\n\n return XavierUniform\n\n\ndef _CreateVarInitStateless(name,\n method,\n shape,\n dim0,\n seed,\n scale,\n init_dtype,\n custom_v_init=None):\n \"\"\"Creates variable initialization function for a stateless RNG.\"\"\"\n if (method in [\n 'gaussian_sqrt_dim', 'uniform_sqrt_dim', 'truncated_gaussian_sqrt_dim'\n ]):\n if len(shape) > 2:\n # This is probably not the right method to use when len(shape) > 2,\n # e.g. dim0 will be 3 with a 3x3 conv2d kernel.\n tf.logging.warning(\n 'Initializing %s of shape %s with method %s: dim0=%s. '\n 'Make sure that it is intended.', name, shape, method, dim0)\n scale *= 1.0 / math.sqrt(dim0)\n\n combined_layers_dims = GetVariableNumLeadingDimsForCombinedLayersContext()\n\n if method in ['gaussian_sqrt_fanin', 'truncated_gaussian_sqrt_fanin']:\n fan_in, _ = GetFanInFanOut(shape, combined_layers_dims)\n if fan_in is not None:\n scale *= 1.0 / math.sqrt(fan_in)\n if method in ['gaussian_sqrt_fanout', 'truncated_gaussian_sqrt_fanout']:\n _, fan_out = GetFanInFanOut(shape, combined_layers_dims)\n if fan_out is not None:\n scale *= 1.0 / math.sqrt(fan_out)\n if method in ['gaussian_sqrt_fanavg']:\n fan_in, fan_out = GetFanInFanOut(shape, combined_layers_dims)\n if fan_in is not None and fan_out is not None:\n scale *= math.sqrt(2.0 / (fan_in + fan_out))\n\n if method in [\n 'gaussian', 'gaussian_sqrt_dim', 'gaussian_sqrt_fanin',\n 'gaussian_sqrt_fanout', 'gaussian_sqrt_fanavg'\n ]:\n v_init = _DeterministicRandomNormalInitializer(\n seed=seed, mean=0., stddev=scale)\n elif method in ['uniform', 'uniform_sqrt_dim']:\n v_init = _DeterministicRandomUniformInitializer(\n seed=seed, minval=-scale, maxval=scale)\n elif method in ['uniform_positive']:\n v_init = _DeterministicRandomUniformInitializer(\n seed=seed, minval=0., maxval=scale)\n elif method in ['uniform_unit_scaling']:\n v_init = _DeterministicRandomUniformUnitScalingInitializer(\n seed=seed, factor=scale)\n elif method in ['uniform_unit_scaling_fan_avg']:\n v_init = _DeterministicRandomVarianceScalingInitializer(\n scale=scale, mode='fan_avg', distribution='uniform', seed=seed)\n elif method in [\n 'truncated_gaussian', 'truncated_gaussian_sqrt_dim',\n 'truncated_gaussian_sqrt_fanin', 'truncated_gaussian_sqrt_fanout'\n ]:\n v_init = _DeterministicRandomTruncatedNormalInitializer(\n seed=seed, mean=0., stddev=scale)\n elif method in ['constant']:\n v_init = init_ops.constant_initializer(value=scale, dtype=init_dtype)\n elif method in ['xavier', 'geo_mean_xavier']:\n v_init = _DeterministicRandomXavierUniformInitializer(method, scale, seed)\n elif method in [\n 'kaiming_uniform_fanin_relu', 'kaiming_uniform_fanin_leakyrelu'\n ]:\n fan_in = np.prod(shape[:-1])\n if method == 'kaiming_uniform_fanin_leakyrelu':\n # Assume the 'a' parameter is the 'scale' argument.\n gain = np.sqrt(2. / (1 + scale**2))\n else:\n gain = np.sqrt(2.)\n std_dev = gain / np.sqrt(fan_in)\n bound = np.sqrt(3.0) * std_dev\n v_init = _DeterministicRandomUniformInitializer(\n seed=seed, minval=-bound, maxval=bound)\n elif method in ['custom', 'custom_constant']:\n v_init = custom_v_init\n else:\n assert False, 'init_type %s not supported.' % method\n\n return v_init\n\n\n_global_variable_scope = None\n\n\ndef GetGlobalVariableScope():\n \"\"\"Gets the global variable scope (as if no variable_scope has been set).\n\n Returns:\n The VariableScope corresponding to as if no tf.variable_scope is in effect.\n \"\"\"\n if not _global_variable_scope:\n # Each thread gets its own default global variable scope, and we take\n # advantage of that in order to get a top-level scope. This avoids the\n # need to call tf.get_variable_scope() at the module level, which allows\n # this module to be imported without modifying global state (i.e. creating\n # the default graph). It is important to not mutate the global state at\n # module load time, because it let's us flip flags after import that affect\n # core TensorFlow behavior.\n def Initialize():\n global _global_variable_scope\n _global_variable_scope = tf.get_variable_scope()\n\n t = threading.Thread(target=Initialize)\n t.start()\n t.join()\n return _global_variable_scope\n\n\n_GLOBAL_STEP_STACK = ThreadLocalStack()\n\n\[email protected]\ndef GlobalStepContext(global_step_tensor):\n _GLOBAL_STEP_STACK.stack.append(global_step_tensor)\n try:\n yield\n finally:\n _GLOBAL_STEP_STACK.stack.pop()\n\n\ndef GetGlobalStep():\n \"\"\"Return the global_step.\"\"\"\n if _GLOBAL_STEP_STACK.stack:\n return _GLOBAL_STEP_STACK.stack[-1]\n return tf.train.get_global_step()\n\n\ndef GetOrCreateGlobalStepVar():\n \"\"\"Return the global_step variable, creating it if it does not exist.\n\n Prefer GetGlobalStep if a tensor rather than a tf.Variable is sufficient.\n\n Returns:\n The global_step variable, or a new created one if it does not exist.\n \"\"\"\n with tf.variable_scope(GetGlobalVariableScope(), use_resource=True):\n if _FromGlobal('pin_vars_to_cpu'):\n with tf.device('/cpu:0'):\n return tf.train.get_or_create_global_step()\n else:\n return tf.train.get_or_create_global_step()\n\n\ndef LogMultiLines(label, lines):\n if not isinstance(lines, (list, tuple)):\n lines = lines.split('\\n')\n for line in lines:\n tf.logging.info('%s: %s', label, line)\n\n\ndef _LogPlacement(label, theta, copy):\n \"\"\"Logs theta and its copy's device placement.\"\"\"\n\n def GetDevices(m):\n \"\"\"Flatten a `.NestedMap` m and extracts each value's device.\"\"\"\n return [x.device for x in m.Flatten()]\n\n tf.logging.info('=== %s ===', label)\n LogMultiLines(\n label,\n theta.Pack([('%s -> %s' % (x[0], x[1]))\n for x in zip(GetDevices(theta), GetDevices(copy))\n ]).DebugString())\n tf.logging.info('==========')\n\n\ndef CreateLocalTheta(theta, device_list=None, label=None):\n \"\"\"Creates local copy of theta and shards across devices device list.\n\n Leaves variables intact.\n\n Args:\n theta: a `.NestedMap` of variables.\n device_list: list of devices to shard across. If None, defaults to a list\n [''].\n label: Logging label.\n\n Returns:\n A `.NestedMap` of identity() wrapped theta\n \"\"\"\n\n class AddIdentity:\n \"\"\"Helper class.\"\"\"\n\n def __init__(self, device_list):\n self._list = device_list if device_list else ['']\n self._index = 0\n\n def __call__(self, x):\n if isinstance(x, tf.Variable):\n return x\n with tf.device(self._list[self._index % len(self._list)]):\n self._index += 1\n return tf.identity(x)\n\n copy = theta.Transform(AddIdentity(device_list))\n _LogPlacement(label, theta, copy)\n return copy\n\n\ndef _GetVarsToLoad(all_vars, variable_loading_rules, var_ignore_rules,\n ckpt_path):\n \"\"\"Determines variables to load and their names in checkpoint.\"\"\"\n # This list contains mappings from var names as they appear in the checkpoint\n # to the vars in our model they correspond to.\n unused_rules = {\n regexp: name_format for regexp, name_format in variable_loading_rules\n }\n vars_to_load = []\n for model_var in all_vars:\n loaded = False\n for regexp, name_format in variable_loading_rules:\n match = re.match(regexp, model_var.name)\n # Skip if var doesn't match the loading rules, or if it should be ignored.\n if not match:\n tf.logging.debug('Loading rules do not match %s.', model_var.name)\n continue\n elif any(re.match(r, model_var.name) for r in var_ignore_rules):\n tf.logging.debug('Ignoring %s from loading.', model_var.name)\n continue\n checkpoint_var_name = name_format % match.groups()\n if checkpoint_var_name.endswith(':0'):\n checkpoint_var_name = checkpoint_var_name[:-2]\n tf.logging.info('Loading %s from %s with regexp: %s', model_var.name,\n checkpoint_var_name, regexp)\n vars_to_load.append((checkpoint_var_name, model_var))\n unused_rules.pop(regexp, None)\n loaded = True\n break\n if not loaded:\n tf.logging.info(\n 'Not loading model variable %s from %s as it does not match any rules'\n ' or matches ignored', model_var.name, ckpt_path)\n for regexp, name_format in unused_rules.items():\n tf.logging.warning(f'User provided rule matched no variables: ({regexp}, '\n f'{name_format})')\n return vars_to_load\n\n\ndef OverrideVarsFromCheckpoint(all_vars, checkpoint_path,\n variable_loading_rules, var_ignore_rules):\n \"\"\"Add TF graph ops to override variables from a provided checkpoint.\n\n Args:\n all_vars: List of all the parameters in the model.\n checkpoint_path: A path to the checkpoints of a pretrained model.\n variable_loading_rules: A list of tuples of strings defining (regex to match\n parameter names in the model to override, format string to determine the\n corresponding var in the checkpoint).\n var_ignore_rules: A list consisting of a list of regexes to match parameter\n names in the model which should not be overridden, even if they match\n those in the loading rules.\n\n Returns:\n A callable that, when called with a tf.Session, will restore the variables\n from the provided checkpoint.\n \"\"\"\n vars_to_load = _GetVarsToLoad(all_vars, variable_loading_rules,\n var_ignore_rules, checkpoint_path)\n if not vars_to_load:\n all_rules_text = '\\n'.join(\n [f'{k} --> {v}' for k, v in variable_loading_rules])\n raise ValueError(f'Variable loading rules {all_rules_text} '\n f'did not match any of {len(all_vars)} vars.')\n load_var_names = '\\n'.join(sorted([v.name for _, v in vars_to_load]))\n tf.logging.info(f'Overriding {len(vars_to_load)} vars from '\n f'{checkpoint_path}:\\n{load_var_names}')\n\n savers = []\n while vars_to_load:\n # When restoring, it's possible the same value in the checkpoint\n # can be restored to multiple variables (e.g. during\n # distillation). However, tf.train.Saver, since it's used for\n # both saving and restoring, requires the name in the checkpoint\n # to be unique for each variable. So, we call it multiple times\n # with a unique set of names each time.\n unique_vars_to_load = {}\n remaining_vars_to_load = []\n for k, v in vars_to_load:\n if k not in unique_vars_to_load:\n unique_vars_to_load[k] = v\n else:\n remaining_vars_to_load.append((k, v))\n savers.append(tf.train.Saver(var_list=unique_vars_to_load, sharded=True))\n vars_to_load = remaining_vars_to_load\n\n def _Restore(sess):\n for saver in savers:\n saver.restore(sess, checkpoint_path)\n\n return _Restore\n\n\ndef OverrideVarsFromCheckpoints(all_vars, ckpts_loading_rules):\n \"\"\"Add TF graph ops to override model variables from checkpoints.\n\n Args:\n all_vars: List of all the parameters in the model.\n ckpts_loading_rules: A dictionary of checkpoint path: loading rules.\n Checkpoint path must be a path to a pretrained model, and loading rules is\n expected to be a tuple of two lists. The first consisting of tuples of\n strings defining (regex to match parameter names in the model to override,\n format string to determine the corresponding var in the checkpoint), and\n the second list consisting of a list of regexes to match parameter names\n in the model which should not be overridden, even if they match those in\n the loading rules.\n\n Returns:\n A callable that, when called with a tf.Session, will restore the variables\n from checkpoint and return a list of overwritten variables.\n\n Raises:\n ValueError: if colliding vars exist or loading rules is not a list.\n \"\"\"\n if len(ckpts_loading_rules) > 1:\n tf.logging.info('Overriding vars from multiple checkpoints.')\n\n var_refs_overridden = set()\n var_names_overridden = set()\n restore_fns = []\n for ckpt_path, loading_rules in ckpts_loading_rules.items():\n tf.logging.info('Overriding vars from checkpoint: %s', ckpt_path)\n\n if not isinstance(loading_rules, tuple):\n raise ValueError('Loading rules for %s must be a tuple of two lists!' %\n ckpt_path)\n if len(loading_rules) != 2 or not all(\n isinstance(l, list) for l in loading_rules):\n raise ValueError('Loading rules for %s must be a tuple of two lists!' %\n ckpt_path)\n\n # Filter the model variables to be overridden.\n to_load_vars = _GetVarsToLoad(all_vars, loading_rules[0], loading_rules[1],\n ckpt_path)\n var_refs_to_override = [var[1].experimental_ref() for var in to_load_vars]\n var_names_to_override = [var[1].name for var in to_load_vars]\n\n overlap_refs = set.intersection(var_refs_overridden, var_refs_to_override)\n if overlap_refs:\n raise ValueError('Colliding variables to override: %s' % overlap_refs)\n\n restore_fns.append(\n OverrideVarsFromCheckpoint(all_vars, ckpt_path, loading_rules[0],\n loading_rules[1]))\n var_refs_overridden.update(var_refs_to_override)\n var_names_overridden.update(var_names_to_override)\n tf.logging.info('Model variables overridden: %s', var_refs_overridden)\n\n def _Restore(sess):\n for fn in restore_fns:\n fn(sess)\n return var_names_overridden\n\n return _Restore\n\n\ndef ComputeGradientsSimple(loss_or_activations,\n all_vars,\n grad_aggregation_method,\n colocate_gradients_with_ops,\n gate_gradients,\n activations_grad=None):\n \"\"\"Compute gradients.\"\"\"\n tape = _GRADIENT_TAPE_STACK.stack[-1] if _GRADIENT_TAPE_STACK.stack else None\n if IsEagerMode() and tape:\n tf.logging.info('ComputeGradientsSimple: using gradient tape.')\n if activations_grad is not None:\n raise ValueError('GradientTape does not accept gradient input values.')\n if grad_aggregation_method or colocate_gradients_with_ops or gate_gradients:\n tf.logging.warning(\n 'When GradientTape is used, these field will be ignored: '\n f'grad_aggregation_method ({grad_aggregation_method}), '\n f'colocate_gradients_with_ops ({colocate_gradients_with_ops}), '\n f'gate_gradients ({gate_gradients}).')\n return tape.gradient(\n loss_or_activations,\n all_vars,\n unconnected_gradients=tf.UnconnectedGradients.ZERO)\n\n return tf.gradients(\n loss_or_activations,\n all_vars,\n grad_ys=activations_grad,\n aggregation_method=grad_aggregation_method,\n colocate_gradients_with_ops=colocate_gradients_with_ops,\n gate_gradients=gate_gradients)\n\n\ndef _ComputeGradientsTpu(loss_or_activations,\n all_vars,\n grad_aggregation_method,\n colocate_gradients_with_ops,\n gate_gradients,\n skip_zero_gradients=None,\n use_bf16_gradients_ar=False,\n defer_crs_to_apply_grad=False,\n activations_grad=None,\n is_activations=False,\n tpu_embedding_activations=None):\n \"\"\"Computes gradients for local loss across whole TPU cluster.\n\n This implementation specializes for the case where weight params maybe used\n for different number of times in the forward computation, so that gradients\n should be normalized by the actual number of times they are being computed.\n\n TODO(yonghui): Maybe merge this implementation with the _ComputeGradientsTpu\n one.\n\n Args:\n loss_or_activations: The loss or activations to backprop from.\n all_vars: Vars with respect to which gradients are to be computed.\n grad_aggregation_method: aggregation method to use when calling\n tf.gradients.\n colocate_gradients_with_ops: boolean, whether or not to colocate gradient op\n with the original op.\n gate_gradients: boolean, flag to be passed to tf.gradients.\n skip_zero_gradients: whether to skip zero gradients during aggregation.\n use_bf16_gradients_ar: Whether to use bfloat16 dtype for gradients\n all-reduce.\n defer_crs_to_apply_grad: Whether to defer gradient cross replica sum to\n apply_gradient. This helps reducing the number of gradient all-reduces\n when doing gradient accumulation, which does gradient cross replica sum\n only every k steps in a tf.cond. Currently this works only when\n skip_zero_gradients is None.\n activations_grad: The gradients computed for activations.\n is_activations: A boolean, whether the input is loss or activations.\n tpu_embedding_activations: A `.NestedMap` of tpu embedding feature name ->\n embedding feature tensor.\n\n Returns:\n Gradients to be passed back. If tpu_embedding_activations is set, their\n gradients will be placed at the end.\n\n Raises:\n ValueError: upon invalid arguments.\n \"\"\"\n if is_activations:\n assert activations_grad is not None\n\n if not skip_zero_gradients and not is_activations:\n # Scale the loss to account for the full batch size.\n shards = tpu_function.get_tpu_context().number_of_shards\n assert shards\n loss_or_activations *= tf.constant(\n 1.0 / shards, dtype=loss_or_activations.dtype)\n else:\n assert not tpu_embedding_activations, (\n 'Gradient computation for tpu embedding activations requires proper '\n 'loss scaling, and so is not compatible with skip_zero_gradients and '\n 'is_activations.')\n\n # Computes the gradients.\n # Sum the grads so that we can compute statistics across the whole batch.\n all_grads = ComputeGradientsSimple(\n loss_or_activations=loss_or_activations,\n all_vars=all_vars +\n (tpu_embedding_activations if tpu_embedding_activations else []),\n grad_aggregation_method=grad_aggregation_method,\n colocate_gradients_with_ops=colocate_gradients_with_ops,\n gate_gradients=gate_gradients,\n activations_grad=activations_grad)\n\n if tpu_embedding_activations:\n # Note we don't need to aggregate TPU embedding gradients below.\n tpu_embedding_grads = all_grads[len(all_vars):]\n all_grads = all_grads[:len(all_vars)]\n else:\n tpu_embedding_grads = []\n\n # NOTE: We can't use tpu_optimizer.CrossShardOptimizer since\n # we need to scale the grads *after* the cross_replica_sum to\n # match GPU version!\n\n # TODO(cwhipkey): should we do something different here? - we could do\n # some operations on the gradients before the aggregation (see comments in\n # tensorflow/contrib/tpu/python/tpu/tpu_optimizer.py - see compute_gradients -\n # for some more details).\n\n aggregated_grads = []\n for g in all_grads:\n if g is None:\n aggregated_grads.append(None)\n continue\n if use_bf16_gradients_ar:\n g = tf.cast(g, tf.bfloat16)\n with tf.ops.colocate_with(g):\n if skip_zero_gradients is None:\n # loss is already scaled by 1/shards.\n if defer_crs_to_apply_grad:\n normalized_g = tf.convert_to_tensor(g)\n else:\n normalized_g = tf.tpu.cross_replica_sum(g)\n else:\n # Compute the cross-replica mean of 'g', skipping zero gradients.\n\n # Q(yonghui): Is there a better way to detect a non-zero gradient?\n # Note(yonghui): gradient of a weight can be zero if that\n # weight is not used in the forward computation, e.g. as in\n # switchable layers in neural architecture search, pruned by channel\n # mask, or sparsified.\n if skip_zero_gradients == 'weight':\n # Same shape as 'g'.\n g_is_non_zero = tf.cast(tf.math.abs(g) > 1e-8, g.dtype)\n elif skip_zero_gradients == 'variable':\n # A variable-wide 0/1 scalar.\n g_is_non_zero = tf.cast(\n tf.reduce_sum(tf.math.abs(g)) > 1e-24, g.dtype)\n else:\n raise ValueError('Unknown skip_zero_gradients: %s' %\n skip_zero_gradients)\n num_updates = tf.maximum(tf.tpu.cross_replica_sum(g_is_non_zero), 1.0)\n normalized_g = tf.tpu.cross_replica_sum(g) / num_updates\n aggregated_grads.append(normalized_g)\n return aggregated_grads + tpu_embedding_grads\n\n\nclass _VarGrad(typing.NamedTuple):\n var: tf.Tensor\n grad: Union[tf.Tensor, tf.IndexedSlices]\n scale: Optional[tf.Tensor] = None\n\n\nclass VarGrad:\n \"\"\"A class that holds a variable and a gradient.\n\n This does not inherit from namedtuple so that tf.nest operations do not\n recurse into it.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n self._var_grad = _VarGrad(*args, **kwargs)\n\n def __getitem__(self, key):\n return self._var_grad[key]\n\n def __getattr__(self, key):\n return getattr(self._var_grad, key)\n\n def __iter__(self):\n if self._var_grad.scale is None:\n return iter((self._var_grad.var, self._var_grad.grad))\n return iter(self._var_grad)\n\n def __repr__(self):\n return repr(self._var_grad)\n\n\ndef SkipNoneGradients(var_grads):\n \"\"\"Removes pairs whose grad is None.\"\"\"\n for key, (_, g) in var_grads.FlattenItems():\n if g is None:\n tf.logging.info('ComputeGradients drops %s', key)\n return var_grads.Filter(lambda var_grad: var_grad.grad is not None)\n\n\ndef ComputeGradients(\n loss_or_activations,\n vmap,\n grad_aggregation_method=tf.AggregationMethod.EXPERIMENTAL_TREE,\n colocate_gradients_with_ops=True,\n gate_gradients=False,\n compute_gradients_fn=None,\n skip_zero_gradients=None,\n use_bf16_gradients_ar=False,\n skip_none_gradients=True,\n defer_crs_to_apply_grad=False,\n activations_grad=None,\n is_activations=False,\n tpu_embedding_activations=None):\n \"\"\"Computes gradients of variables in vmap w.r.t loss.\n\n Args:\n loss_or_activations: either the loss, which is a scalar tensor, or\n activations, which could be a tensor or a list of tensors.\n vmap: A `.NestedMap` of variables.\n grad_aggregation_method: Specifies the method used to combine gradient\n terms. Accepted values are constants defined in the class\n AggregationMethod.\n colocate_gradients_with_ops: If True, try colocating gradients with the\n corresponding op.\n gate_gradients: If True, add a tuple around the gradients returned for an\n operations. This avoids some race conditions.\n compute_gradients_fn: Function to use to compute gradients. If None, use\n default. compute_gradients_fn should have the same signature as this\n function, but without the last argument.\n skip_zero_gradients: Whether to skip aggregating zero gradients. This helps\n in case where some weights may not be used in forward computation, e.g.,\n sparsely activated networks or switchable layers in neural architectural\n search. Only applicable on TPU.\n Possible values are:\n\n - None: do not skip zero gradients;\n - `variable`: skip if the entire variable's gradients are almost zero;\n reduce_sum(abs(grads)) < 1e-8.\n - `weight`: skip if the individual weight's gradients are almost zero:\n abs(grad) < 1e-8.\n\n use_bf16_gradients_ar: Whether to use bfloat16 dtype for gradients\n all-reduce. This applies to TPU only.\n skip_none_gradients: Whether to skip gradients that are None.\n defer_crs_to_apply_grad: Whether to defer gradient cross replica sum to\n apply_gradient. This applies to TPU only.\n activations_grad: The gradients computed for activations.\n is_activations: A boolean, whether the input is loss or activations.\n tpu_embedding_activations: A `.NestedMap` of tpu embedding feature name ->\n embedding feature tensor.\n\n Returns:\n var_grad - a `.NestedMap` of VarGrad. You can view\n var_grad as an ordered list of (key, (var, grad)) tuples. Every\n key of var_grad exists in vmap. Every variable in vmap that\n contributes to loss must exist in var_grad. Every var of var_grad\n must exist in vmap. grad is the corresponding gradient computed\n for var. grad is guaranteed to be not None.\n If tpu_embedding_activations is set, a sub `.NestedMap` named\n tpu_embedding_var_grads will be used to store the VarGrads for the\n activations. In this case, key is the feature name, and var in the VarGrad\n is the activation tensor (not a real variable).\n \"\"\"\n if not is_activations:\n loss_or_activations = HasRank(loss_or_activations, 0)\n if not tpu_embedding_activations:\n tpu_embedding_activations = NestedMap()\n assert isinstance(tpu_embedding_activations, NestedMap)\n assert isinstance(vmap, NestedMap)\n assert skip_zero_gradients in (None, 'variable', 'weight')\n\n # Uniqify and remove None.\n filtered_vmap = vmap.Filter(_Unique())\n assert filtered_vmap is not None\n\n # Filter out variables not contributing to 'loss_or_activations'.\n # This doesn't work if the training loop is wrapped inside a tf.function,\n # since all variables will be lifted out and trainable_variables will be\n # empty. In that case we skip the check.\n trainable_variables = set(tf.trainable_variables())\n if trainable_variables:\n\n def Needed(v):\n if isinstance(v, tf.Variable):\n if v not in trainable_variables:\n # Skip non-trainable variables. Otherwise,\n # tf.Optimizer.apply_gradients throws up an exception instead\n # of skipping the update.\n return False\n return True\n\n filtered_vmap = filtered_vmap.Filter(Needed)\n assert filtered_vmap is not None\n\n filtered_vlist = filtered_vmap.Flatten()\n\n # Use caller-supplied gradient function if supplied.\n if compute_gradients_fn is not None:\n assert not tpu_embedding_activations\n take_grad = compute_gradients_fn\n else:\n # tpu vs non-tpu is slightly different.\n if use_tpu():\n take_grad = functools.partial(\n _ComputeGradientsTpu,\n skip_zero_gradients=skip_zero_gradients,\n use_bf16_gradients_ar=use_bf16_gradients_ar,\n defer_crs_to_apply_grad=defer_crs_to_apply_grad,\n activations_grad=activations_grad,\n is_activations=is_activations,\n tpu_embedding_activations=tpu_embedding_activations.Flatten())\n else:\n assert not tpu_embedding_activations\n take_grad = ComputeGradientsSimple\n\n grads = take_grad(loss_or_activations, filtered_vlist,\n grad_aggregation_method, colocate_gradients_with_ops,\n gate_gradients)\n\n if tpu_embedding_activations:\n tpu_embedding_grads = grads[len(filtered_vlist):]\n grads = grads[:len(filtered_vlist)]\n else:\n tpu_embedding_grads = None\n\n # Formulate pairs of (var, grad) and pack them into the same\n # structure as filtered_vmap.\n var_grads = filtered_vmap.Pack(\n [VarGrad(v, g) for v, g in zip(filtered_vlist, grads)])\n\n if skip_none_gradients:\n var_grads = SkipNoneGradients(var_grads)\n\n if tpu_embedding_grads:\n # Create VarGrads for TPU embedding activations in a dedicated sub map.\n assert 'tpu_embedding_var_grads' not in var_grads\n tpu_embedding_activation_list = tpu_embedding_activations.Flatten()\n tpu_embedding_var_grads = [\n VarGrad(v, g)\n for v, g in zip(tpu_embedding_activation_list, tpu_embedding_grads)\n ]\n tpu_embedding_var_grads = tpu_embedding_activations.Pack(\n tpu_embedding_var_grads)\n\n # Replace None gradients with zeros, since TPU embedding expect all\n # activations to have gradients.\n def _NoneToZeros(key, var_grad):\n if var_grad.grad is None:\n tf.logging.warning(\n f'TPU embedding gradient for feature {key} is None. Replacing with '\n 'zeros.')\n return VarGrad(var_grad.var, tf.zeros_like(var_grad.var))\n return var_grad\n\n var_grads.tpu_embedding_var_grads = (\n tpu_embedding_var_grads.TransformWithKey(_NoneToZeros))\n\n return var_grads\n\n\ndef MaskGradients(var_grad, grad_mask):\n \"\"\"Computes gradients of non-masked variables in vmap w.r.t loss.\n\n Args:\n var_grad: A `.NestedMap` of (variable, gradient)\n grad_mask: A dict of (variable name, mask).\n\n Returns:\n var_grad - a `.NestedMap` of (variable, mask * gradient).\n \"\"\"\n\n def ApplyMask(entry):\n var, grad = entry\n mask = grad_mask[var.name]\n if isinstance(grad, tf.IndexedSlices):\n return VarGrad(var, tf.IndexedSlices(grad.values * mask, grad.indices))\n else:\n return VarGrad(var, grad * mask)\n\n return var_grad.Transform(ApplyMask)\n\n\ndef ApplyGradMultiplier(vs_gs, grad_scale=None):\n \"\"\"Scale gradients by grad_scale on same device as corresponding variables.\n\n Args:\n vs_gs: A `.NestedMap` of VarGrad.\n grad_scale: If None, each vs_gs entry has the scale. Otherwise, grad_scale\n applies to every entry.\n\n Returns:\n A `.NestedMap` of (variable, gradient * grad_scale). In particular, if\n grad_scale is 0, the result gradient is always 0, even if the input\n gradient is inf or nan.\n \"\"\"\n\n def ScaleOrZero(var: tf.Tensor, grad: tf.Tensor,\n scale: tf.Tensor) -> tf.Tensor:\n grad = CheckNumerics(grad, 'Gradient for %s is not finite.' % var.name)\n return tf.where(\n tf.equal(scale, 0.), tf.zeros_like(grad),\n tf.cast(scale, grad.dtype) * grad)\n\n def Scale(item: VarGrad) -> VarGrad:\n \"\"\"Scales the gradient.\"\"\"\n var, grad = item\n assert grad is not None, ('No grad found for ', var.name)\n if grad_scale is None:\n scale = item.scale\n else:\n scale = grad_scale\n with tf.device(var.device):\n if isinstance(grad, tf.IndexedSlices):\n grad = tf.IndexedSlices(\n ScaleOrZero(var, grad.values, scale), grad.indices,\n grad.dense_shape)\n else:\n grad = ScaleOrZero(var, grad, scale)\n return VarGrad(var, grad)\n\n return vs_gs.Transform(Scale)\n\n\ndef HasNanOrInf(x):\n if isinstance(x, tf.IndexedSlices):\n x = x.values\n with tf.device(x.device):\n if x.dtype.is_complex:\n return tf.reduce_any(\n [HasNanOrInf(tf.math.real(x)),\n HasNanOrInf(tf.math.imag(x))])\n return tf.reduce_any(\n tf.math.logical_or(tf.math.is_nan(x), tf.math.is_inf(x)))\n\n\ndef HasNanOrInfGradient(var_grads):\n \"\"\"Returns a bool tensor to indicate if `var_grads` contains NaNs or Infs.\n\n Args:\n var_grads: A `.NestedMap` with (var, grad) tuple as the map value.\n\n Returns:\n A bool scalar tensor to indicate if the `var_grads` contains NaNs or Infs.\n \"\"\"\n return tf.reduce_any([HasNanOrInf(g) for (_, g) in var_grads.Flatten()])\n\n\ndef ApplyGradNormClipping(vs_gs, norm=1.0):\n \"\"\"Clip gradients to norm on same device as corresponding variables.\n\n Args:\n vs_gs: A `.NestedMap` of VarGrad.\n norm: Each tensor's gradient will be scaled down to have a maximum L2-norm\n value of `norm`.\n\n Returns:\n A `.NestedMap` of VarGrad(variable, scaled_gradient). In particular, if\n grad_scale is 0, the result gradient is always 0, even if the input\n gradient is inf or nan.\n \"\"\"\n\n def ClipByNorm(var, grad, norm):\n grad = CheckNumerics(grad, 'Gradient for %s is not finite.' % var.name)\n return tf.clip_by_norm(grad, norm)\n\n def Clip(item):\n \"\"\"Scales the gradient.\"\"\"\n var, grad = item\n assert grad is not None, ('No grad found for ', var.name)\n with tf.device(var.device):\n if isinstance(grad, tf.IndexedSlices):\n grad = tf.IndexedSlices(\n ClipByNorm(var, grad.values, norm), grad.indices, grad.dense_shape)\n else:\n grad = ClipByNorm(var, grad, norm)\n return VarGrad(var, grad)\n\n return vs_gs.Transform(Clip)\n\n\nSKIP_LP_REGULARIZATION = '__lingvo_skip_lp_regularization'\n\n\ndef AdjustGradientsWithLpLoss(var_grads, lp_regularizer_weight, p=2.0):\n \"\"\"Adjusts the map of (var, grad) with Lp regularization, where p=1.0 or 2.0.\n\n Args:\n var_grads: a `.NestedMap` or list of (variable, gradient).\n lp_regularizer_weight: Lp regularization weight.\n p: For now we support 1.0 or 2.0.\n\n Returns:\n A tuple (lp_loss, var_grads).\n\n - lp_loss: A scalar. The lp loss.\n - var_grads: a `.NestedMap` or list of (variable, gradient) regulated by Lp.\n \"\"\"\n # TODO(yuancao): For now we support p=1 or 2, but this can be extended to\n # lp-norm in general.\n\n assert p in [2.0, 1.0], 'For now we only support L1/L2 regularization.'\n\n def GetVar(item):\n var, grad = item\n if isinstance(grad, tf.IndexedSlices):\n with tf.device(var.device):\n ids = HasRank(grad.indices, 1)\n uniq_ids = tf.unique(ids).y\n return tf.gather(var, uniq_ids)\n else:\n return var\n\n def ShouldAdjust(v):\n return not _VarInCollection(v, tf.get_collection(SKIP_LP_REGULARIZATION))\n\n filtered_var_grads = [\n var_grad for var_grad in Flatten(var_grads) if ShouldAdjust(var_grad.var)\n ]\n filtered_vars = Transform(GetVar, filtered_var_grads)\n for v in filtered_vars:\n tf.logging.info('AdjustGradientsWithLpLoss: %s', v.name)\n\n if p == 2.0:\n lp_loss = 0.5 * lp_regularizer_weight * SumSquared(filtered_vars)\n elif p == 1.0:\n lp_loss = lp_regularizer_weight * SumAbs(filtered_vars)\n\n def LpGrad(var_grad):\n \"\"\"Adjusts item's grad w/ Lp loss term.\"\"\"\n var, grad = var_grad\n if isinstance(grad, tf.IndexedSlices):\n # Question(rpang): do we apply Lp loss here even if 'var' is in\n # SKIP_LP_REGULARIZATION?\n #\n # Note: IndexedSlces appears for embedding lookups.\n # Embedding lookup ids can have duplicate. For duplicated ids, we\n # only want to consider once for each ids.\n with tf.device(var.device):\n emb = HasRank(var, 2)\n vocab_size = tf.shape(emb)[0]\n ids = HasRank(grad.indices, 1)\n values = tf.gather(emb, ids) # [#ids, dims]\n with tf.device(grad.device):\n # Counts is a vector of size vocab_size. counts[i] is i-th words\n # occurrences in 'ids'.\n counts = tf.math.unsorted_segment_sum(\n tf.ones_like(ids, dtype=values.dtype), ids, vocab_size)\n\n # Gradients for duplicated ids will be summed when they get\n # applied, and hence we account for that by first dividing\n # gradient resulting from lp loss by how many times the id is\n # duplicated.\n #\n # For each id in 'ids', we know counts[id] is non-zero,\n # hence, it's always safe to take reciprocal.\n weights = tf.math.reciprocal(tf.gather(counts, ids))\n weights = tf.expand_dims(weights, -1) # [#ids, 1]\n if p == 2.0:\n grad_v = values\n elif p == 1.0:\n grad_v = tf.sign(values)\n delta = lp_regularizer_weight * weights * grad_v\n grad = tf.IndexedSlices(grad.values + delta, ids)\n elif not _VarInCollection(var, tf.get_collection(SKIP_LP_REGULARIZATION)):\n with tf.device(var.device):\n if p == 2.0:\n grad_v = var\n elif p == 1.0:\n grad_v = tf.sign(var)\n delta = lp_regularizer_weight * grad_v\n with tf.device(grad.device):\n grad += delta\n return VarGrad(var, grad)\n\n return lp_loss, Transform(LpGrad, var_grads)\n\n\ndef SplitRecursively(x, num_splits, axis=-1):\n \"\"\"Splits Tensors in 'x' recursively.\n\n Args:\n x: a Tensor, or a list or NestMap containing Tensors to split.\n num_splits: number of splits per Tensor.\n axis: the split axis.\n\n Returns:\n A list of split values of length 'num_splits'.\n\n - If 'x' is a Tensor, a list of split Tensors.\n - If 'x' is a list, a list of lists, where each sublist has the same length\n as 'x' and the k'th element in each sublist corresponds to a split of the\n k'th element from 'x'.\n - If 'x' is a `.NestedMap`, a list of `.NestedMap`, where each field\n corresponds to a split from the same field of 'x'.\n \"\"\"\n if isinstance(x, tf.Tensor):\n return tf.split(x, num_splits, axis=axis)\n elif isinstance(x, list):\n splits = [SplitRecursively(element, num_splits, axis) for element in x]\n splits = list(zip(*splits))\n return [list(t) for t in splits]\n elif isinstance(x, NestedMap):\n results = [NestedMap() for _ in range(num_splits)]\n for key, val in x.items():\n val_splits = SplitRecursively(val, num_splits, axis)\n for i in range(num_splits):\n results[i][key] = val_splits[i]\n return results\n else:\n raise TypeError('Unexpected type for SplitRecursively: %s' % type(x))\n\n\ndef ConcatRecursively(splits, axis=-1):\n \"\"\"Concatenates tensors from 'splits'.\n\n This is the inverse function of SplitRecursively.\n\n Args:\n splits: a list of splits to concatenate, where elements can be Tensors,\n lists, or `.NestedMap`. The elements must share the same type and\n structure. For example, list elements must have the same length;\n `.NestedMap` must have the same set of fields.\n axis: the concatenation axis.\n\n Returns:\n Concatenated data.\n\n - If input 'splits' are Tensors, returns a concatenated Tensor.\n - If input 'splits' are lists, returns a list of the same length where the\n k'th element represents concatenated data of the k'th element from each\n split.\n - If input 'splits' are `.NestedMap`, returns a `.NestedMap` with each field\n concatenated from corresponding fields of input splits.\n\n Raises:\n TypeError: if 'splits' is not a list or elements of 'splits' do not have\n known or matching types.\n ValueError: if 'splits' is empty or elements of 'splits' do not have\n matching structures.\n \"\"\"\n if not isinstance(splits, list):\n raise TypeError('Non-list inputs for ConcatRecursively: %s' % splits)\n if not splits:\n raise ValueError('Empty inputs for ConcatRecursively: %s' % splits)\n\n tmpl = splits[0]\n\n if isinstance(tmpl, tf.Tensor):\n return tf.concat(splits, axis=axis)\n elif isinstance(tmpl, list):\n if not all(isinstance(split, list) for split in splits):\n raise TypeError('Type mismatch for ConcatRecursively: %s' % splits)\n if not all(len(split) == len(tmpl) for split in splits):\n raise ValueError('Length mismatch for ConcatRecursively: %s' % splits)\n return [\n ConcatRecursively([split[i]\n for split in splits], axis)\n for i in range(len(tmpl))\n ]\n elif isinstance(tmpl, NestedMap):\n if not all(isinstance(split, NestedMap) for split in splits):\n raise TypeError('Type mismatch for ConcatRecursively: %s' % splits)\n results = NestedMap()\n for key in tmpl:\n results[key] = ConcatRecursively([split[key] for split in splits], axis)\n return results\n else:\n raise TypeError('Unexpected type for ConcatRecursively: %s' % type(splits))\n\n\ndef WeightedAvg(values, weights, sum_reduction_fn=tf.reduce_sum, name=''):\n \"\"\"Computes weighted average of values from a tensor.\n\n Args:\n values: a tensor of values\n weights: a tensor of weights\n sum_reduction_fn: called to reduce the values and weights to single value\n name: name of metric.\n\n Returns:\n A tuple (avg, total_weight).\n\n - avg: weighted average value\n - total_weight: sum of all weights\n \"\"\"\n msg = 'shape of values and weights tensors must match for metric ' + name\n values = with_dependencies(\n [assert_equal(tf.shape(values), tf.shape(weights), message=msg)], values)\n total_weight = sum_reduction_fn(weights)\n # divide_no_nan only supports tf.{float,complex}*.\n dtype = values.dtype if values.dtype is tf.float64 else tf.float32\n avg = tf.math.divide_no_nan(\n sum_reduction_fn(tf.cast(values, dtype) * tf.cast(weights, dtype)),\n tf.cast(total_weight, dtype))\n return tf.cast(avg, values.dtype), total_weight\n\n\ndef WeightedAvgOfMetrics(metrics):\n \"\"\"Computes the weighted average of metrics in the list.\n\n Args:\n metrics: list of dictionaries of metrics\n\n Returns:\n ret_dict - dictionary of weighted averages of each metrics.\n \"\"\"\n ret_dict = {}\n lists_of_metrics = {}\n for m in metrics:\n for name, (value, weight) in m.items():\n if name not in lists_of_metrics:\n lists_of_metrics[name] = []\n lists_of_metrics[name].append((value, weight))\n\n for name, values_and_weights in sorted(lists_of_metrics.items()):\n values = tf.stack([x[0] for x in values_and_weights])\n weights = tf.stack([x[1] for x in values_and_weights])\n ret_dict[name] = WeightedAvg(values, weights, tf.reduce_sum, name)\n\n return ret_dict\n\n\ndef ConcatPerExampleTensors(per_example):\n \"\"\"Concatenate per-example tensors from many hosts into one large block.\n\n Args:\n per_example: list of dictionaries of per-example tensors.\n\n Returns:\n ret_dict - string -> concatenated tensors.\n \"\"\"\n ret_dict = {}\n lists_of_per_example = {}\n for m in per_example:\n for name, value in m.items():\n if name not in lists_of_per_example:\n lists_of_per_example[name] = []\n lists_of_per_example[name].append(value)\n\n for name, values in sorted(lists_of_per_example.items()):\n ret_dict[name] = tf.concat(values, 0)\n\n return ret_dict\n\n\ndef CombineMetrics(loss_metric_weight_pairs):\n \"\"\"Combines metrics from `loss_metric_weight_pairs` according to weights.\n\n Keys must either exist in all metrics, in which it will be processed as a\n weighted sum, or exist in only one metrics, in which case it will be copied.\n\n Args:\n loss_metric_weight_pairs: a list of (metrics, weight) pairs, where each\n weight is a float and each metrics is a dict with str keys and\n (metric_value, target_weight) values.\n\n Returns:\n A dict with the same set of keys as input metrics and values of\n (weighted_sum(metric_value), weighted_sum(target_weight)).\n\n Raises:\n ValueError: if there exists a metric that exists in more than one element\n of `loss_metric_weight_pairs` but not in all of them.\n \"\"\"\n all_keys = set(\n [k for loss_metrics, _ in loss_metric_weight_pairs for k in loss_metrics]) # pylint: disable=g-complex-comprehension\n result = {}\n for k in all_keys:\n count = 0\n for loss_metrics, weight in loss_metric_weight_pairs:\n if k in loss_metrics:\n count += 1\n if count > 1 and count != len(loss_metric_weight_pairs):\n raise ValueError('Found metric %s which exists in more than one'\n 'but not all loss metrics.' % k)\n\n total_val = 0\n total_target_weight = 0\n for loss_metrics, weight in loss_metric_weight_pairs:\n if k in loss_metrics:\n val, target_weight = loss_metrics[k]\n if count == 1:\n # Single metric, don't multiply by weight.\n total_val = val * target_weight\n total_target_weight = target_weight\n else:\n # Total weighted sum of all predictions.\n total_val += weight * val * target_weight\n total_target_weight += weight * target_weight\n\n result[k] = (total_val / total_target_weight, total_target_weight)\n return result\n\n\ndef AddVN(p, x, per_step=False):\n \"\"\"Add variational noise to x.\n\n Args:\n p: Layer params, with a `vn` subparam containing `VariationalNoiseParams`.\n x: Input to add variational noise to.\n per_step: Whether to add per_step noise.\n\n Returns:\n The input with variational noise added according to params.\n \"\"\"\n tensor_name = x.name if not tf.executing_eagerly() else '[eager]'\n if per_step:\n if not p.vn.per_step_vn:\n tf.logging.info(\n 'p.vn.per_step_vn is not set. Not adding per-step vn to ' +\n tensor_name)\n return x\n else:\n if not p.vn.global_vn:\n tf.logging.info('p.vn.global_vn is not set. Not adding global vn to ' +\n tensor_name)\n return x\n\n tf.logging.info(\n f\"Add {'per-step' if per_step else 'global'} vn to {tensor_name}: {p.vn}\")\n\n if p.vn.scale is None:\n raise ValueError('VN scale must be set.')\n\n if p.vn.deterministic:\n noises = DeterministicVN(p, tf.shape(x), mean=0.0, std=1.0)\n noises = tf.cast(noises, x.dtype)\n else:\n if per_step:\n # recurrent.py does not support stateful random ops in cell_fn due to\n # rematerialization.\n raise ValueError('per_step vn requires deterministic=True.')\n noises = tf.random.normal(\n tf.shape(x), stddev=1.0, seed=p.vn.seed, dtype=x.dtype)\n scale = tf.where(GetGlobalStep() >= p.vn.start_step, p.vn.scale, 0.0)\n return x + tf.cast(scale, x.dtype) * noises\n\n\ndef VariationalNoiseParams(scale,\n global_vn=False,\n per_step_vn=False,\n seed=None,\n deterministic=None,\n start_step=0):\n \"\"\"Returns a hyperparams for variational noise.\"\"\"\n if deterministic is None:\n deterministic = cluster_factory.Current().in_unit_test\n p = hyperparams.Params()\n p.Define(\n 'scale', scale,\n 'Std of the variational noise to apply . This can be a scalar,'\n ' or a scalar tensor.')\n p.Define('global_vn', global_vn,\n 'Adds global variational noise every training setp iff True.')\n p.Define('per_step_vn', per_step_vn,\n 'Adds per-timesetp variational noise iff True.')\n p.Define('seed', seed, 'Random seed used to generate noise.')\n p.Define(\n 'deterministic', deterministic, 'If true, generate noise using'\n 'stateless random ops that are compatible with TF functional ops.')\n p.Define(\n 'start_step', start_step,\n 'Step starting from which variational noise is added during training.')\n return p\n\n\ndef DefaultVN():\n return VariationalNoiseParams(scale=None)\n\n\n# To disable VN of a layer, we use 1.0 in the first input parameter\n# of the following function because otherwise it is the same to DefaultVN()\n# which will be updated by parent configuration in CopyBaseParams()\ndef DisableVN():\n return VariationalNoiseParams(1.0, False, False)\n\n\n# Step seed keyed by graph.\n_STEP_SEED_DICT = ThreadLocalDict()\n\n# The step seed will increment by np.prod(_STEP_SEED_INCREMENT.stack)\n_STEP_SEED_INCREMENT = ThreadLocalStack()\n\n\[email protected]\ndef StepSeedIncrementContext(step):\n \"\"\"Adds an element to _STEP_SEED_INCREMENT.\"\"\"\n assert step > 0, ('%s' % step)\n _STEP_SEED_INCREMENT.stack.append(step)\n try:\n yield\n finally:\n _STEP_SEED_INCREMENT.stack.pop()\n\n\ndef GetStepSeed():\n \"\"\"Gets step_seed.\"\"\"\n key = id(tf.get_default_graph())\n if key not in _STEP_SEED_DICT.dict:\n ResetStepSeed()\n return _STEP_SEED_DICT.dict[key]\n\n\ndef ResetStepSeed(seed=0):\n \"\"\"Resets step_seed to specified value.\"\"\"\n key = id(tf.get_default_graph())\n _STEP_SEED_DICT.dict[key] = tf.convert_to_tensor(seed, dtype=tf.int64)\n\n\ndef MaybeResetStepSeedFromScope():\n \"\"\"In graph mode, resets step_seed according to the current named scope.\n\n This is used in graph mode to avoid \"tensor is from a different graph\"\n errors that happen when we share random seend tensors too much.\n See b/129159299 for more context.\n\n Eager mode does not have this problem, so in eager mode we do nothing.\n \"\"\"\n if not tf.executing_eagerly():\n ResetStepSeed(GenerateSeedFromName(tf.no_op(name='new_step_seed').name))\n\n\ndef MaybeResetStepSeed(seed):\n \"\"\"If we're in graph mode, reset the step seed.\"\"\"\n if not tf.executing_eagerly():\n ResetStepSeed(seed)\n\n\ndef GetIncStepSeed():\n \"\"\"Returns and increments the step_seed.\"\"\"\n step_seed = GetStepSeed()\n # TODO(lepikhin): introduce a routine filling a queue of uint32 random seeds\n # independent of underlying PRNG used by tensorflow.\n inc = np.prod(_STEP_SEED_INCREMENT.stack)\n ResetStepSeed(step_seed + inc)\n return step_seed\n\n\ndef GenerateStepSeedPair(p, op_seed=None):\n \"\"\"Generates a seed pair for deterministic random operations in ...\n\n functional loops.\n\n This function retrieves a unique seed pair on each call, based off the current\n global step and step seed. The step seed ensures this function returns a\n unique seed pair on each call: calling this function automatically increments\n the step seed. The step seed is automatically reset at the beginning of each\n global step in the model's FProp and works transparently through recurrent.py.\n\n Args:\n p: A hyperparams.Params object, containing keys 'random_seed' and\n 'is_inference'.\n op_seed: An additional operation-level seed to apply.\n\n Returns:\n A size 2 tensor of op seeds to use for stateless_random ops.\n \"\"\"\n seed_dtype = tf.int32 if use_tpu() else tf.int64\n if p.is_inference and p.random_seed is None:\n # Ensure GetIncStepSeed is called even inside the shortcut.\n # This ensures if p.random_seed is set for other ops that use this function\n # that they will get the same seed pair whether or not p.random_seed is set\n # for this specific call.\n GetIncStepSeed()\n # Unlike tf.random*, stateless random ops are completely determined by the\n # passed-in seeds. This means at inference time the same inputs will produce\n # the same outputs, even if the model is supposed to have randomness such as\n # dropout during inference. We inject additional randomness only during\n # inference if the graph is exported with random_seed=None as a workaround.\n return tf.random.uniform([2], maxval=seed_dtype.max, dtype=seed_dtype)\n\n global_step = tf.cast(GetGlobalStep(), seed_dtype)\n step_seed = tf.cast(GetIncStepSeed(), seed_dtype)\n seeds = tf.stack([global_step, step_seed])\n\n if p.random_seed is not None:\n seeds += p.random_seed\n if op_seed is not None:\n op_seed = tf.cast(op_seed, seed_dtype)\n seeds += op_seed\n return seeds\n\n\ndef DeterministicDropout(x, keep_prob, seeds, noise_shape=None, name=None):\n \"\"\"Similar to `tf.nn.dropout()`, but fully deterministic.\n\n Args:\n x: A float Tensor on which to apply dropout.\n keep_prob: A scalar `Tensor` of keep probability.\n seeds: A Tensor of shape [2]. 2 seeds for deterministic random number\n generator.\n noise_shape: A 1-D `Tensor` of type `int32`, representing the shape for\n randomly generated keep/drop flags.\n name: An optional name for this operation.\n\n Returns:\n A Tensor with the same shape as `x`.\n\n Raises:\n InvalidArgumentError: if keep_prob is invalid.\n \"\"\"\n if isinstance(keep_prob, numbers.Real):\n if keep_prob <= 0 or keep_prob > 1:\n raise tf.errors.InvalidArgumentError(\n 'keep_prob must be in range (0, 1]. Value: {}'.format(keep_prob))\n\n if keep_prob == 1:\n return x\n with tf.name_scope(name, 'dropout', [x]) as name:\n if use_tpu():\n seeds = tf.cast(seeds, tf.int32)\n keep_prob = tf.convert_to_tensor(\n keep_prob, dtype=tf.float32, name='keep_prob')\n # uniform in [keep_prob, 1.0 + keep_prob)\n # StatelessRandomUniform op does not support non-float (e.g. bfloat16) dtype\n # and non-int32 seed types.\n noise_shape = noise_shape or GetShape(x)\n random_tensor = keep_prob + tf.random.stateless_uniform(\n noise_shape, seed=seeds, dtype=tf.float32)\n # 0. if [keep_prob, 1.0) and 1. if [1.0, 1.0 + keep_prob)\n binary_tensor = tf.floor(random_tensor)\n if x.dtype != tf.float32:\n binary_tensor = tf.cast(binary_tensor, x.dtype)\n keep_prob = tf.cast(keep_prob, dtype=x.dtype)\n result = tf.div(x, keep_prob) * binary_tensor\n result.set_shape(x.get_shape())\n return result\n\n\ndef DeterministicVN(params, noise_shape, mean=0.0, std=1.0, name=None):\n \"\"\"Produces Fully deterministic Gaussian noise from shape, mean and std.\n\n Args:\n params: Nested map of params.\n noise_shape: A 1-D `Tensor` of type `int32`, representing the shape for\n randomly generated Gaussian noise.\n mean: Mean for the Gaussian noise.\n std: Standard deviation for noise.\n name: An optional name for this operation.\n\n Returns:\n A Tensor with the shape noise_shape and type fprop_dtype.\n \"\"\"\n\n with tf.name_scope(name, 'gaussian_noise') as name:\n seeds = GenerateStepSeedPair(params, params.vn.seed)\n random_tensor = mean + (\n std * tf.random.stateless_normal(noise_shape, seed=seeds))\n if FPropDtype(params) != tf.float32:\n random_tensor = tf.cast(random_tensor, FPropDtype(params))\n return random_tensor\n\n\nBATCH_NORM_UPDATES = 'batch_norm_updates'\n\n_BATCH_NORM_UPDATES_DICT = '__batch_norm_update_dict'\n_get_batch_norm_updates_dict = _CollectionGetter(_BATCH_NORM_UPDATES_DICT,\n lambda: {})\n\n\ndef UpdateBatchNormVars(batch_norm_var, batch_norm_stats, decay):\n \"\"\"Update batch normalization moving averages.\"\"\"\n with tf.name_scope(\n 'AssignMovingAvg', values=[\n batch_norm_var,\n batch_norm_stats,\n decay,\n ]) as scope:\n with tf.ops.colocate_with(batch_norm_var):\n decay = tf.convert_to_tensor(\n 1.0 - decay, dtype=batch_norm_var.dtype.base_dtype)\n update_delta = (batch_norm_var - tf.cast(\n batch_norm_stats, batch_norm_var.dtype.base_dtype)) * decay\n has_nan_or_inf = tf.reduce_any(\n tf.math.logical_or(\n tf.math.is_nan(update_delta), tf.math.is_inf(update_delta)))\n update_delta = tf.where(has_nan_or_inf, tf.zeros_like(update_delta),\n update_delta)\n bn_update = tf.assign_sub(batch_norm_var, update_delta, name=scope)\n tf.add_to_collection(BATCH_NORM_UPDATES, bn_update)\n if not tf.executing_eagerly_outside_functions():\n bn_update_dict = _get_batch_norm_updates_dict()\n if bn_update.name in bn_update_dict:\n raise ValueError(f'BN update {bn_update.name} already exists.')\n bn_update_dict[bn_update.name] = (batch_norm_var, batch_norm_stats)\n return bn_update\n\n\ndef FindRelevantBatchNormUpdates(loss, batch_norm_updates):\n \"\"\"Finds and returns a list of relevant batch-normalization updates.\n\n Args:\n loss: The loss that is being optimized for. A tensor or a list of tensors.\n batch_norm_updates: A list of batch normalization updates.\n\n Returns:\n A pair of lists. The first list contains all the batch normalization updates\n that are relevant to the loss being optimized, and the second list contains\n all in batch_norm_updates but not in the first list.\n \"\"\"\n if tf.executing_eagerly_outside_functions():\n return [], []\n dependent_ops_and_tensors = set(FindNeeded(loss))\n relevant_updates = []\n irrelevant_updates = []\n\n bn_update_dict = _get_batch_norm_updates_dict()\n for bn_update in batch_norm_updates:\n assert bn_update.name in bn_update_dict, (\n f'{bn_update.name} is probably not a valid batch normalization update '\n 'op. Make sure batch normalization is done through calling'\n ' the py_utils.UpdateBatchNormVars helper routine.')\n bn_stat_name = bn_update_dict[bn_update.name][1].name\n if bn_stat_name in dependent_ops_and_tensors:\n # If a batch normalization stat is computed in the forward pass in\n # computing loss, then the corresponding batch normalization update is\n # relevant. Otherwise, it is not.\n relevant_updates.append(bn_update)\n else:\n irrelevant_updates.append(bn_update)\n return relevant_updates, irrelevant_updates\n\n\n_SAMPLE_STEP_STACK = ThreadLocalStack()\n\n\[email protected]\ndef SampleStep(step):\n \"\"\"A context for a sample step during decoding.\n\n Example usage::\n\n with py_utils.SampleStep(step):\n sample = self.DecodeOneStep()\n\n Args:\n step: the step tensor.\n\n Yields:\n a context manager for the step scope.\n \"\"\"\n try:\n _SAMPLE_STEP_STACK.stack.append(step)\n yield step\n finally:\n _SAMPLE_STEP_STACK.stack.pop()\n\n\ndef _GetSampleStep():\n return _SAMPLE_STEP_STACK.stack[-1] if _SAMPLE_STEP_STACK.stack else None\n\n\ndef AddDebugTensor(tensor, summarize=None, name=None):\n \"\"\"Adds `tensor` to the debug collection.\n\n Prints the tensor if `--print_debug_tensors` is True.\n\n Args:\n tensor: A tensor.\n summarize: Only print this many entries of each tensor. If None, then a\n maximum of 3 elements are printed per input tensor.\n name: An optional name for the tensor.\n\n Returns:\n A Tensor that evaluates to the same value as the input tensor.\n \"\"\"\n if _FromGlobal('print_debug_tensors'):\n step = _GetSampleStep()\n tensors_to_print = ([] if step is None else [step]) + [tensor]\n with tf.name_scope(name) as s:\n tensor = tf.Print(\n tensor,\n tensors_to_print,\n message='DEBUG tensor %s' % s,\n name=name,\n summarize=summarize)\n return tensor\n\n\ndef ArgMax(inputs):\n \"\"\"tf.argmax wrapper.\n\n Args:\n inputs: A tensor, whose last dimension is being reduced on.\n\n Returns:\n A tensor of rank tf.rank(logits)-1. If i == ret[indices],\n logits[indices, i] is the maximum among logits[indices, :].\n \"\"\"\n if use_tpu():\n return tf.argmax(inputs, axis=-1, output_type=tf.int32)\n else:\n return tf.argmax(inputs, axis=-1)\n\n\ndef _EnsureMatrixShape(x):\n if x.shape.ndims is None:\n x.set_shape([None, None])\n else:\n assert x.shape.ndims == 2\n return x\n\n\ndef Matmul(x, y, *args, **kwargs):\n \"\"\"tf.matmul wrapper expecting x and y are actually matrices.\"\"\"\n x = _EnsureMatrixShape(x)\n y = _EnsureMatrixShape(y)\n return tf.matmul(x, y, *args, **kwargs)\n\n\ndef clip_by_value(t, clip_value_min, clip_value_max, name=None): # pylint: disable=invalid-name\n if t.dtype.is_complex:\n return tf.complex(\n tf.clip_by_value(\n tf.math.real(t), clip_value_min, clip_value_max, '%s_real' % name),\n tf.clip_by_value(\n tf.math.imag(t), clip_value_min, clip_value_max, '%s_imag' % name))\n return tf.clip_by_value(t, clip_value_min, clip_value_max, name)\n\n\ndef _TransformAndSum(tensor_list, transform):\n with tf.name_scope('TransformAndSum'):\n sum_transform = []\n for t in tensor_list:\n with tf.device(t.device):\n if isinstance(t, tf.IndexedSlices):\n sum_transform += [tf.reduce_sum(transform(t.values))]\n else:\n sum_transform += [tf.reduce_sum(transform(t))]\n return tf.add_n(sum_transform)\n\n\ndef SumSquared(tensor_list):\n return _TransformAndSum(tensor_list, lambda v: v**2)\n\n\ndef SumAbs(tensor_list):\n return _TransformAndSum(tensor_list, tf.abs)\n\n\ndef ReduceRms(x: tf.Tensor) -> tf.Tensor:\n \"\"\"Computes root mean square of tensor x with numerical stability.\"\"\"\n if not x.shape.is_fully_defined():\n raise ValueError('Shape of x must be fully defined.')\n\n if not x.shape.as_list():\n return x\n\n denom = functools.reduce((lambda x, y: x * y), x.shape.as_list())\n if denom <= 1e8:\n return tf.math.sqrt(tf.math.reduce_mean(tf.math.square(x)))\n\n tf.logging.info('reduce_rms %s denom=%d', x, denom)\n sum_square_x = tf.math.reduce_sum(tf.math.reduce_sum(tf.math.square(x), -1))\n avg_square_x = sum_square_x / tf.constant(denom, dtype=sum_square_x.dtype)\n return tf.math.sqrt(avg_square_x)\n\n\ndef PiecewiseConstant(x_in, boundaries, values, vdtype):\n \"\"\"Returns the piecewise value of x_in.\"\"\"\n x_in = tf.cast(tf.convert_to_tensor(x_in), tf.float32)\n assert len(values) == len(boundaries) + 1\n assert sorted(boundaries) == list(boundaries)\n bs = tf.convert_to_tensor(boundaries, dtype=tf.float32)\n vs = tf.convert_to_tensor(values, dtype=vdtype)\n # The following is equivalent to 'return vs[index]'.\n index = tf.reduce_sum(tf.cast(tf.greater_equal(x_in, bs), tf.int32))\n one_hot_vec = tf.one_hot(\n tf.expand_dims(index, 0), depth=len(values), dtype=vdtype)\n return Matmul(tf.reshape(vs, (1, -1)), tf.transpose(one_hot_vec))[0][0]\n\n\ndef PadSequenceDimension(x, length, pad_val, shape=None, axis=1):\n \"\"\"Pads x to `length` using `pad_val` along the axis dim.\n\n Assumes `x` is a tensor with rank >= 2, and it only pads `x` to `length`\n along the axis dim. Explicitly sets the returned tensor shape to `shape` if\n given. Raises runtime errors if x.shape[axis] > length or\n x.shape[i] != shape[i] where i != axis.\n\n Args:\n x: the tensor to be padded with axis dimension being the time. E.g., x\n usually has shape [batch, seq_len, ...], when axis=1.\n length: an int to specify the length to pad x to.\n pad_val: an int or float used to pad x.\n shape: an int array specifying the shape of the padded tensor if specified.\n axis: The dimension that x will be padded, default to 1.\n\n Returns:\n The padded tensor with shape [batch, seq_len, ...], where\n ret[:, :seq_len, ...] == x, when axis=1, and similarly for other axes.\n \"\"\"\n if x.shape.ndims is not None:\n rank = x.shape.ndims\n assert rank >= 2\n slen = GetShape(x, rank)[axis]\n pad_len = length - slen\n pad = [[0, 0] for _ in range(rank)]\n pad[axis][1] = pad_len\n else:\n rank = tf.rank(x)\n with tf.control_dependencies([assert_greater_equal(rank, 2)]):\n slen = tf.shape(x)[axis]\n pad_len = length - slen\n pad = tf.scatter_nd([[axis, 1]], [pad_len], [rank, 2])\n x = tf.pad(x, pad, constant_values=pad_val)\n if x.shape.ndims is not None and isinstance(length, int):\n static_shape = x.shape.as_list()\n static_shape[axis] = length\n x.set_shape(static_shape)\n\n if shape:\n if not isinstance(shape, (list, tuple)):\n raise TypeError('Shape must be a list or tuple.')\n x = HasRank(x, len(shape))\n x = tf.ensure_shape(x, shape)\n return x\n\n\ndef PadSequenceTo(xs, padding, length, pad_val):\n \"\"\"Pads `xs` and `padding` to `length` using `pad_val` along the 2nd dim.\n\n Pads `xs` to `length` using `pad_val`, and `padding` using 1.\n Raise error if `x.shape[:2]` and `padding.shape` are not the same.\n\n Args:\n xs: A Tensor or a list of Tensors of shape [batch, seqlen] or [batch,\n seqlen, ...].\n padding: A 0/1 Tensor of shape [batch, seqlen]. 1 is for padded locations.\n length: A Python int, the length to pad to.\n pad_val: A Python numeric, used for padding x.\n\n Returns:\n A tuple of padded xs and padding.\n \"\"\"\n if not isinstance(xs, (list, tuple)):\n new_xs = [xs]\n else:\n new_xs = xs\n\n res = []\n for x in new_xs:\n batch, slen = GetShape(x, 2)\n\n padding = HasRank(padding, 2)\n padding = HasShape(padding, [batch, slen])\n\n new_x = PadSequenceDimension(x, length, pad_val)\n res.append(new_x)\n padding = PadSequenceDimension(padding, length, tf.cast(1, padding.dtype))\n\n if not isinstance(xs, (list, tuple)):\n assert len(res) == 1\n return res[0], padding\n else:\n return tuple(res), padding\n\n\ndef ApplyPadding(padding, x, padded=None, use_select=True, ensure_shape=True):\n \"\"\"Applies padding to a tensor.\n\n This is preferable to using arithmetic means for masking out padded values\n such as::\n\n # Equiv to ApplyPadding(padding, x)\n x *= 1.0 - padding\n # Equiv to ApplyPadding(padding, new, old)\n new = old * padding + new * (1 - padding)\n\n Aside from just being easier to read and reason about, using this function\n is friendly to quantized representations because it does not mix arithmetic\n on the padding values with the values in the tensor being padded (which can\n have a very different range than the 0..1 padding tensor).\n\n In addition, this works around issues in quantized schemes where we are\n guaranteed to have an exact 0 but not necessarily any other number (i.e. 1).\n\n Args:\n padding: Tensor of padding values where 0 == keep and 1 == pad.\n x: Tensor to apply padding to.\n padded: Optional. Values to include for padded elements. Defaults to zeros.\n Must have a shape broadcastable to 'x' if specified.\n use_select: Controls whether padding is applied with a select-mask\n (True/default) or arithmetically (False). Some platforms have a\n sensitivity to one or the other and this is used to work around such\n issues.\n ensure_shape: If true, ensures the shape of the result is the same as of x.\n\n Returns:\n A tensor with the same shape as x with padded values masked.\n \"\"\"\n padding = with_dependencies([\n Assert(\n tf.reduce_all(\n tf.math.logical_or(\n tf.equal(padding, tf.zeros([], padding.dtype)),\n tf.equal(padding, tf.ones([], padding.dtype)))), [padding])\n ], padding)\n if use_select:\n if padded is None:\n padded = tf.zeros([], x.dtype)\n if padding.dtype != tf.bool:\n padding = padding > tf.zeros([], padding.dtype)\n result = tf.where_v2(padding, padded, x)\n else:\n result = x * tf.cast(1.0 - tf.cast(padding, tf.float32), x.dtype)\n if padded is not None:\n result += padded * tf.cast(padding, padded.dtype)\n if ensure_shape:\n result = tf.ensure_shape(result, x.shape)\n return result\n\n\ndef LengthsFromPaddings(paddings):\n \"\"\"Computes lengths of each sequence in a batch, ignoring trailing padding.\n\n Note the following isn't guaranteed due to leading paddings.\n PaddingsFromLengths(LengthsFromPaddings(x)) == x\n\n Args:\n paddings: a tensor with shape [batch, length].\n\n Returns:\n lengths tensor shaped [batch] containing the unpadded length of each\n sequence in the batch.\n \"\"\"\n paddings = HasRank(paddings, 2)\n paddings = tf.cast(paddings, tf.int32)\n # Find the last unpadded value.\n # Cannot just use tf.reduce_sum because there might be leading paddings.\n # Everything after the last unpadded value has 1.0 - paddings == 0.0, so in\n # the cumsum below they will have the same value.\n cumsum = tf.cumsum(1 - paddings, axis=1)\n same_as_last_element = tf.equal(cumsum, cumsum[:, -1:])\n # Counting the number of elements with the same value gives us num_padded + 1\n # and so counting the number that differs gives us num_padded - 1.\n length = tf.reduce_sum(\n 1 - tf.cast(same_as_last_element, tf.int32), axis=1) + 1\n # Special case for all 0 paddings.\n all_zero_paddings = tf.equal(tf.reduce_sum(1 - paddings, axis=1), 0)\n return tf.where(all_zero_paddings, tf.zeros_like(length), length)\n\n\ndef PaddingsFromLengths(lengths, maxlen=None):\n \"\"\"Computes paddings Tensor from lengths.\n\n Note the following isn't guaranteed due to leading paddings.\n PaddingsFromLengths(LengthsFromPaddings(x)) == x.\n\n This method does not generate leading paddings.\n\n Args:\n lengths: A int32 Tensor of shape [B].\n maxlen: None or a Python int or a scalar Tensor.\n\n Returns:\n A 0/1 valued Tensor of shape [B, maxlen or ?] where 1s are padded positions.\n \"\"\"\n lengths = HasRank(lengths, 1)\n if maxlen is not None:\n lengths = with_dependencies(\n [assert_less_equal(tf.cast(tf.reduce_max(lengths), tf.int32), maxlen)],\n lengths)\n\n return 1. - tf.sequence_mask(lengths, maxlen=maxlen, dtype=tf.float32)\n\n\ndef TrimTrailingPaddings(inputs, paddings):\n \"\"\"Trims trailing paddings from inputs.\n\n Since the number of dimensions is not fixed, this will not work on TPU.\n\n Args:\n inputs: a tensor with shape [batch, length, ...].\n paddings: a tensor with shape [batch, length].\n\n Returns:\n Trimmed inputs and paddings. For compatibility reasons, the trimmed tensors\n will always have length at least 1.\n \"\"\"\n paddings = HasRank(paddings, 2)\n max_length = tf.maximum(tf.reduce_max(LengthsFromPaddings(paddings)), 1)\n output_shape = tf.shape(inputs)\n output_shape = tf.concat([[output_shape[0], max_length], output_shape[2:]],\n axis=0)\n outputs = tf.slice(inputs, tf.zeros_like(output_shape), output_shape)\n out_paddings = tf.slice(paddings, [0, 0],\n tf.stack([output_shape[0], max_length]))\n return outputs, out_paddings\n\n\ndef ReversePaddedSequence(inputs, paddings):\n \"\"\"Reverse inputs based on paddings.\n\n Only reverse the unpadded portion of `inputs`. It assumes inputs are only\n padded in the end.\n\n Args:\n inputs: a tensor of [seq_length, batch_size, num_input_nodes].\n paddings: a tensor of float32/float64 zero or one of shape [seq_length,\n batch_size, 1].\n\n Returns:\n A reversed tensor of the same shape as `inputs`.\n \"\"\"\n inversed_paddings = 1.0 - tf.squeeze(paddings, 2)\n inputs_length = tf.cast(\n tf.math.rint(tf.reduce_sum(inversed_paddings, axis=0)), tf.int32)\n return tf.reverse_sequence(inputs, inputs_length, seq_axis=0, batch_axis=1)\n\n\ndef ConcatenatePaddedSequences(input0, input1, padding0, padding1, seq_dim=1):\n \"\"\"Concatenates input sequences with varying lengths as defined by paddings.\n\n This is a helper function for concatenating 2 batches of input sequences,\n where each example in the batch can have different lengths, as defined by\n the corresponding paddings. To concatenate correctly, it makes use of\n tf.reverse_sequence to partially reverse the sequences before\n concatenating them together.\n\n NOTE: We assume that the tensors have no leading paddings.\n\n Args:\n input0: A tensor of size [batch, max_length, ...] or [max_length, batch,\n ...] depending on the value set for axis.\n input1: A tensor of size [batch, max_length, ...] or [max_length, batch,\n ...] depending on the value set for axis.\n padding0: A Tensor of size [batch, max_length] or [max_length, batch]\n corresponding to the padding for input0.\n padding1: A Tensor of size [batch, max_length] or [max_length, batch]\n corresponding to the padding for input1.\n seq_dim: int, the time axis along which the tensors will be concatenated.\n Should be 0 or 1. Assumes that batch_dim is 1 - seq_dim.\n\n Returns:\n The concatenation of input0 and input1, and the corresponding padding.\n\n Raises:\n tf.errors.InvalidArgumentError when seq_dim is not 0 or 1.\n \"\"\"\n if seq_dim != 0 and seq_dim != 1:\n raise tf.errors.InvalidArgumentError(None, None, 'seq_dim must be 0 or 1.')\n batch_dim = 1 - seq_dim\n # inpu0 and input1 should have the same batch size and same rank.\n input0 = with_dependencies([\n assert_equal(GetShape(input0)[batch_dim],\n GetShape(input1)[batch_dim]),\n assert_equal(GetRank(input0), GetRank(input1))\n ], input0)\n\n batch_size = GetShape(padding0)[batch_dim]\n # batch dimension of inputs and paddings should match.\n input0 = with_dependencies([\n assert_equal(GetShape(input0)[batch_dim], batch_size),\n assert_equal(GetShape(padding1)[batch_dim], batch_size)\n ], input0)\n input0_seq_dim = tf.cast(\n tf.tile([tf.shape(padding0)[seq_dim]], [batch_size]), dtype=tf.int32)\n input1_seq_dim = tf.cast(\n tf.tile([tf.shape(padding1)[seq_dim]], [batch_size]), dtype=tf.int32)\n # LengthsFromPaddings assumes that paddings is of size [batch, max_length].\n if seq_dim == 1:\n seq_length0 = LengthsFromPaddings(padding0)\n seq_length1 = LengthsFromPaddings(padding1)\n else:\n seq_length0 = LengthsFromPaddings(tf.transpose(padding0))\n seq_length1 = LengthsFromPaddings(tf.transpose(padding1))\n # We assume that the tensors have no leading paddings.\n # TODO(arunnt): Concatenate tensors with leading paddings correctly.\n seq_length0 = with_dependencies([\n assert_equal(\n seq_length0,\n tf.cast(tf.reduce_sum(1.0 - padding0, seq_dim), dtype=tf.int32))\n ], seq_length0)\n seq_length1 = with_dependencies([\n assert_equal(\n seq_length1,\n tf.cast(tf.reduce_sum(1.0 - padding1, seq_dim), dtype=tf.int32))\n ], seq_length1)\n # Concatenate input sequences.\n reversed_input0 = tf.reverse_sequence(\n input0, seq_length0, seq_axis=seq_dim, batch_axis=batch_dim)\n reversed_input1 = tf.reverse_sequence(\n input1, input1_seq_dim, seq_axis=seq_dim, batch_axis=batch_dim)\n reversed_concat = tf.concat([reversed_input1, reversed_input0], axis=seq_dim)\n concat_inputs = tf.reverse_sequence(\n reversed_concat,\n seq_length0 + input1_seq_dim,\n seq_axis=seq_dim,\n batch_axis=batch_dim)\n # Concatenate paddings. Note that paddings are always a Tensor of 0s and 1s,\n # so, unlike the inputs, we don't have to reverse padding1, we can simply\n # concatenate reversed padding0 and padding1.\n reversed_padding0 = tf.reverse_sequence(\n padding0, input0_seq_dim, seq_axis=seq_dim, batch_axis=batch_dim)\n reversed_concat_padding = tf.concat([reversed_padding0, padding1],\n axis=seq_dim)\n concat_paddings = tf.reverse_sequence(\n reversed_concat_padding,\n input0_seq_dim + seq_length1,\n seq_axis=seq_dim,\n batch_axis=batch_dim)\n return concat_inputs, concat_paddings\n\n\ndef ShiftLeft(tensor, shift_size, pad_val=0, axis=1):\n \"\"\"Shifts the values in a tensor to the left along the axis dimension.\n\n The first shift_size values are dropped, and the tensor is padded on the\n right with pad_val.\n\n Args:\n tensor: the input tensor with the axis dim being time.\n shift_size: the number of frames >= 0 to shift.\n pad_val: the value to pad on the right of the tensor.\n axis: The dimension along which the tensor will be shifted, default to 1.\n\n Returns:\n A left shifted tensor on dimension axis.\n \"\"\"\n rank = tensor.shape.rank\n with tf.control_dependencies(\n [assert_greater_equal(rank, 2),\n assert_greater_equal(shift_size, 0)]):\n time = GetShape(tensor)[axis]\n begin = tf.scatter_nd([[axis]], [shift_size], [rank])\n return PadSequenceDimension(\n tf.slice(tensor, begin, size=[-1] * rank), time, pad_val, axis=axis)\n\n\ndef CreateIdsAndLabels(ids, paddings, sos_id=1, eos_id=2):\n \"\"\"Creates ids and labels to be used as decoder targets.\n\n Args:\n ids: int Tensor of shape [batch, maxlen], without sos or eos.\n paddings: float Tensor of shape [batch, maxlen].\n sos_id: ID for the sos special token.\n eos_id: ID for the eos special token.\n\n Returns:\n A NestedMap with:\n - ids: int Tensor of shape [batch, maxlen + 1], with sos prepended.\n - labels: int Tensor of shape [batch, maxlen + 1], with eos appended.\n - paddings: float Tensor of shape [batch, maxlen + 1].\n - weights: float Tensor of shape [batch, maxlen + 1].\n \"\"\"\n ids = tf.where(\n tf.equal(paddings, 0.0), ids, tf.broadcast_to([[eos_id]], GetShape(ids)))\n targets = NestedMap()\n targets.ids = tf.pad(ids, [[0, 0], [1, 0]], constant_values=sos_id)\n targets.labels = tf.pad(ids, [[0, 0], [0, 1]], constant_values=eos_id)\n targets.paddings = tf.pad(paddings, [[0, 0], [1, 0]])\n targets.weights = 1.0 - targets.paddings\n return targets\n\n\ndef Retry(*args, **kwargs):\n return retry.Retry(*args, **kwargs)\n\n\n# FailedPreconditionError: variables are not initialized.\n# AbortedError: processes restarts.\n# UnavailableError: Bad hardware status: 0x1\ntransient_tf_errors = (tf.errors.FailedPreconditionError,\n tf.errors.AbortedError, tf.errors.UnavailableError)\n\n\ndef RetryOnTransientTfError(*args, **kwargs):\n return Retry(transient_tf_errors, *args, **kwargs)\n\n\ndef PadOrTrimTo(x, shape, pad_val=0, pad_after_contents=True):\n \"\"\"Pad and slice x to the given shape.\n\n Args:\n x: A tensor.\n shape: The shape of the returned tensor.\n pad_val: An int or float used to pad x.\n pad_after_contents: Whether to pad and trim after the original contents of\n each dimension.\n\n Returns:\n 'x' is padded with pad_val and sliced so that the result has the given\n shape.\n\n Raises:\n ValueError: if shape is a tf.TensorShape and not fully defined.\n \"\"\"\n if isinstance(shape, (list, tuple)):\n expected_rank = len(shape)\n elif isinstance(shape, tf.TensorShape):\n if not shape.is_fully_defined():\n raise ValueError('shape %s padding %s must be fully defined.' %\n (shape, x))\n expected_rank = shape.rank\n else:\n shape = HasRank(shape, 1)\n expected_rank = tf.size(shape)\n x = HasRank(x, expected_rank)\n\n pad = shape - tf.minimum(tf.shape(x), shape)\n zeros = tf.zeros_like(pad)\n if pad_after_contents:\n # If dim_i is less than shape[i], pads after contents.\n paddings = tf.stack([zeros, pad], axis=1)\n # If dim_i is larger than shape[i], we slice [0:shape[i]] for dim_i.\n slice_begin = zeros\n else:\n # If dim_i is less than shape[i], pads before contents.\n paddings = tf.stack([pad, zeros], axis=1)\n # If dim-i is larger than shape[i], we slice [dim_i - shape[i]:dim_i]\n # for dim_i.\n slice_begin = tf.shape(x) + pad - shape\n\n x = tf.pad(x, paddings, constant_values=pad_val)\n x = tf.slice(x, slice_begin, shape)\n\n return tf.reshape(x, shape)\n\n\ndef RepeatDim(tensor, multiple, axis):\n \"\"\"Copies elements in tensor's axis \"multiple\" times, like np.repeat.\"\"\"\n # x = [[1, 2, 3], [4, 5, 6]]\n # RepeatDim(x, multiple=2, axis=1) gives:\n # [[1, 1, 2, 2, 3, 3]. [4, 4, 5, 5, 6, 6]]\n # As a comparison tf.tile(x, multiples=[1, 2]) gives:\\\n # [[1, 2, 3, 1, 2, 3], [4, 5, 6, 4, 5, 6]]\n\n if multiple == 1:\n return tensor\n t_shape = tf.shape(tensor)\n tensor_dims = tf.concat(\n [t_shape[:axis], [t_shape[axis] * multiple], t_shape[axis + 1:]], 0)\n multiple_dims = tf.concat([\n tf.fill([axis + 1], 1), [multiple],\n tf.fill([tf.rank(tensor) - axis - 1], 1)\n ], 0)\n return tf.reshape(\n tf.tile(tf.expand_dims(tensor, axis + 1), multiple_dims), tensor_dims)\n\n\ndef StackTensorsRecursively(values):\n \"\"\"Recursively stacks Tensors in a list of `.NestedMap`.\n\n Args:\n values: a list of `.NestedMap` or Tensors to stacks.\n\n Returns:\n A `.NestedMap` with stacked values or a stacked Tensor.\n \"\"\"\n flatten = [w.Flatten() for w in values]\n stacked = []\n for i in range(len(flatten[0])):\n stacked += [tf.stack([flatten[j][i] for j in range(len(flatten))])]\n ret = values[0].Pack(stacked)\n return ret\n\n\ndef MixByWeight(inputs, weights, seed=None):\n \"\"\"Returns a weighted random choice and bprop type from the give inputs.\n\n Args:\n inputs: a list of callables, where each callable returns a tf.Tensor or a\n nested structure containing tf.Tensor. Function return types must be\n consistent across elements. The tf.Operation to compute the result tensor\n will only be invoked for one input at a time. For example, if each fn\n represents an input record stream, a record will be drawn only from a\n selected stream while the other streams will remain unchanged.\n weights: a 1D tensor of float > 0 of the same length as inputs.\n seed: random seed.\n\n Returns:\n A probabilistic sample from the inputs proportional to the weights. The\n return type will be the same as return type of individual 'fn' from the\n inputs.\n A one-hot vector of the source selected.\n \"\"\"\n weights = tf.convert_to_tensor(weights, dtype=tf.float32)\n weights = with_dependencies([\n assert_equal(tf.shape(weights), [len(inputs)]),\n assert_greater_equal(tf.reduce_min(weights), 0.0)\n ], weights)\n\n lower = tf.cumsum(weights, exclusive=True)\n upper = tf.cumsum(weights, exclusive=False)\n r = tf.random.uniform(shape=[], maxval=upper[-1], seed=seed)\n return_input = tf.case(\n [(tf.math.logical_and(lower[i] <= r, r < upper[i]), inputs[i])\n for i in range(len(inputs))],\n exclusive=True)\n selected_index = tf.case(\n [(tf.math.logical_and(lower[i] <= r, r < upper[i]), lambda i=i: i)\n for i in range(len(inputs))],\n exclusive=True)\n bprop_index = tf.one_hot(selected_index, len(inputs), dtype=tf.float32)\n return return_input, bprop_index\n\n\ndef CheckShapes(shapes):\n \"\"\"Asserts that shapes is a tuple of NestedMap or tshape.Shape.\"\"\"\n assert isinstance(shapes, tuple), str(shapes)\n for s in shapes:\n if isinstance(s, NestedMap):\n assert all([isinstance(t, tshape.Shape) for t in Flatten(s)\n ]), '{} contains non-tensor value.'.format(s)\n else:\n assert isinstance(s, tshape.Shape), '{}: {}'.format(type(s), s)\n\n\ndef FPropDtype(params):\n return params.fprop_dtype if params.fprop_dtype is not None else params.dtype\n\n\ndef UpdateFpropDtype(params, fprop_dtype):\n \"\"\"Recursively update the fprop_dtype of the Params.\"\"\"\n # Handle the case when the input \"params\" is not an instance of hyperparams\n # For example, when UpdateDtype is called recursively for all the items in\n # the \"sub\" list of SequentialLayer (see 1st elif below)\n if not isinstance(params, hyperparams.Params):\n return\n\n for key, val in params.IterParams():\n if isinstance(val, hyperparams.Params):\n UpdateFpropDtype(val, fprop_dtype)\n elif isinstance(val, (list, tuple)):\n for item in val:\n UpdateFpropDtype(item, fprop_dtype)\n elif key == 'fprop_dtype':\n params.fprop_dtype = fprop_dtype\n\n\ndef UpdateDtype(params, dtype):\n \"\"\"Recursively update the dtype of the Params.\"\"\"\n # Handle the case when the input \"params\" is not an instance of hyperparams\n # For example, when UpdateDtype is called recursively for all the items in\n # the \"sub\" list of SequentialLayer (see 1st elif below)\n if not isinstance(params, hyperparams.Params):\n return\n\n for key, val in params.IterParams():\n if isinstance(val, hyperparams.Params):\n UpdateDtype(val, dtype)\n elif isinstance(val, (list, tuple)):\n for item in val:\n UpdateDtype(item, dtype)\n elif key == 'dtype':\n params.dtype = dtype\n\n\ndef NameScopeDecorator(name_scope):\n \"\"\"Decorates a python function to introduce a tf.name_scope.\n\n Example::\n\n @py_utils.NameScopeDecorator('foobar')\n def MyFoobarMethod(self):\n # ... Do TF things\n\n Args:\n name_scope: The name scope to introduce.\n\n Returns:\n A function decorator.\n \"\"\"\n\n def Decorator(f):\n\n def Wrapped(*args, **kwargs):\n with tf.name_scope(name_scope):\n return f(*args, **kwargs)\n\n return Wrapped\n\n return Decorator\n\n\ndef SequencesToDebugStrings(ids, lens, summarize=5):\n \"\"\"Returns debug strings for the given sequences.\n\n Args:\n ids: int32 of [batch, len].\n lens: int32 of [batch].\n summarize: number of ids to summarize per sequence.\n\n Returns:\n A string tensor of [batch].\n \"\"\"\n num_seqs = tf.shape(lens)[0]\n\n def _Body(i, result):\n line = tf.strings.format('{}', ids[i, :lens[i]], summarize=summarize)\n return i + 1, tf.concat([result, tf.reshape(line, [1])], axis=0)\n\n i0 = tf.zeros(shape=[], dtype=tf.int32)\n result0 = tf.constant('', shape=[0], dtype=tf.string)\n _, strs = tf.while_loop(\n lambda i, result: i < num_seqs,\n _Body, (i0, result0),\n shape_invariants=(i0.shape, tf.TensorShape([None])))\n return strs\n\n\n# TODO(jamesqin): follow suggestions in\n# b/167460492#comment16\ndef RematerializeFn(fn, *xs):\n \"\"\"Calls fn and rematerializes fn in the backward pass.\n\n `fn(*xs) -> ys`, where xs and ys can be a single tensor or a tuple of tensors.\n\n Args:\n fn: A python function to be rematerialized in the backprop pass.\n *xs: A single tensor or a list/tuple of tensors. `xs` are input args to the\n fn function.\n\n Returns:\n `fn(*xs)`\n \"\"\"\n initial_step_seed = GetStepSeed()\n final_step_seed = MaybeGenerateSeedFromScope()\n\n def Backward(fwd_xs, fwd_ys, d_fwd_ys):\n \"\"\"The backward function that rematerializes forward outputs.\"\"\"\n del fwd_ys\n always_true = tf.random.uniform([]) < 2.0\n # Alternatively, can do this:\n # tf.where(tf.math.is_nan(x),\n # tf.constant(float('nan'), dtype=x.dtype) * tf.ones_like(x),\n # x)\n bak_xs = [tf.where(always_true, x, tf.zeros_like(x)) for x in fwd_xs.xs]\n for dst, src in zip(bak_xs, xs):\n dst.set_shape(src.shape)\n ResetStepSeed(initial_step_seed)\n ys = fn(*bak_xs)\n MaybeResetStepSeed(final_step_seed)\n dxs = tf.gradients(ys, bak_xs, grad_ys=d_fwd_ys)\n dxs_final = []\n for dx, x in zip(dxs, bak_xs):\n if dx is None:\n dxs_final.append(tf.zeros_like(x))\n else:\n dxs_final.append(dx)\n assert len(dxs_final) == len(bak_xs)\n return NestedMap(\n initial_step_seed=tf.zeros_like(initial_step_seed), xs=dxs_final)\n\n ys_shapes = []\n\n # TODO(huangyp, yonghui): Check Forward doesn't use any stateful random ops.\n def Forward(fwd_xs):\n \"\"\"Forward function plus sanity checks.\"\"\"\n for dst, src in zip(fwd_xs.xs, xs):\n dst.set_shape(src.shape)\n ResetStepSeed(fwd_xs.initial_step_seed)\n ys = fn(*fwd_xs.xs)\n # Some sanity check.\n assert not GetExtraInputs()\n assert not GetExtraArgs()\n assert not GetExtraVars()\n if isinstance(ys, tuple):\n for y in ys:\n assert isinstance(y, tf.Tensor)\n ys_shapes.append(y.shape)\n else:\n assert isinstance(ys, tf.Tensor)\n ys_shapes.append(ys.shape)\n return ys\n\n ys = CallDefun(\n Forward,\n NestedMap(initial_step_seed=initial_step_seed, xs=xs),\n bak=Backward)\n if isinstance(ys, tuple):\n for y, s in zip(ys, ys_shapes):\n y.set_shape(s)\n else:\n ys.set_shape(ys_shapes[0])\n # TODO(b/129159299): The ResetStepSeed below is needed to work around this\n # bug, which is a problem with global tensors being shared by different\n # inference graphs. It should be replaced with the new step seed value\n # returned from the Forward function when the bug is fixed.\n MaybeResetStepSeed(final_step_seed)\n return ys\n\n\n# A set of names of stateful random number generator ops.\n# See tensorflow/core/ops/random_ops.cc\n_STATEFUL_RANDOM_OPS = frozenset({\n # pyformat: disable\n 'RandomUniform',\n 'RandomUniformInt',\n 'RandomStandardNormal',\n 'ParameterizedTruncatedNormal',\n 'TruncatedNormal',\n 'RandomShuffle',\n 'Multinomial',\n 'RandomGamma',\n 'RandomPoisson',\n 'RandomPoissonV2',\n # pyformat: enable\n})\n\n\ndef StatefulRandomOpsInDefun(func, graph=None):\n \"\"\"Checks whether the Defun depends on stateful random number ops.\n\n Stateful random number generator ops should be avoid in Recurrent() call.\n Otherwise, these ops produce inconsistent values between FProp and BProp.\n\n Args:\n func: a _DefinedFunction or ConcreteFunction to check.\n graph: a Graph. Set None to use the default graph.\n\n Returns:\n A list of names of the stateful random ops.\n\n Raises:\n InvalidArgumentError: if the input func/graph is invalid.\n \"\"\"\n if graph is None:\n graph = tf.get_default_graph()\n func.add_to_graph(graph)\n graph_def = graph.as_graph_def()\n\n # A dict from function name to FunctionDef.\n func_defs = {x.signature.name: x for x in graph_def.library.function}\n\n if isinstance(func, function._DefinedFunction): # pylint: disable=protected-access\n if func.definition.signature.name not in func_defs:\n raise tf.errors.InvalidArgumentError(\n None, None, 'Defun {} is not in the graph .'.format(\n func.definition.signature.name))\n nodes = py_collections.deque(func.definition.node_def)\n else:\n nodes = py_collections.deque(func.function_def.node_def)\n\n stateful_ops = []\n\n # Recursively search for stateful random op.\n while nodes:\n node = nodes.pop()\n assert isinstance(node, node_def_pb2.NodeDef), node\n\n if node.op in _STATEFUL_RANDOM_OPS:\n stateful_ops.append(node.name)\n continue\n\n def _AddDefunNodes(func_name):\n \"\"\"If the given func_name is a Defun, add its sub-nodes into nodes.\"\"\"\n if func_name in func_defs:\n nodes.extend(func_defs[func_name].node_def)\n\n # For functional.{While|For|If} ops, add their Defun attr into search.\n if node.op == 'While':\n _AddDefunNodes(node.attr['body'].func.name)\n _AddDefunNodes(node.attr['cond'].func.name)\n elif node.op == 'For':\n _AddDefunNodes(node.attr['body'].func.name)\n elif node.op == 'If':\n _AddDefunNodes(node.attr['then_branch'].func.name)\n _AddDefunNodes(node.attr['else_branch'].func.name)\n elif node.op == 'StatefulPartitionedCall':\n _AddDefunNodes(node.attr['f'].func.name)\n elif node.op != 'PartitionedCall':\n # For other op, check whether itself is a Defun op.\n _AddDefunNodes(node.op)\n\n return stateful_ops\n\n\ndef ToPlaceholders(nmap, dtype=None):\n \"\"\"Converts every Tensor in nmap to a placeholder.\"\"\"\n\n def _ToPlacerholder(x):\n shape = [None for _ in x.shape[:-1]] + [x.shape[-1]]\n return tf.placeholder(dtype=dtype or x.dtype, shape=shape)\n\n return nmap.Transform(_ToPlacerholder)\n\n\ndef Softmax(logits, axis=None, extra_logit=None, name=None):\n \"\"\"Softmax with extra_logits, might be useful for large xformer LM.\"\"\"\n if extra_logit is None:\n return tf.nn.softmax(logits, axis=axis, name=name)\n\n axis = -1 if axis is None else axis\n\n def ReduceLogSumExp(x):\n max_logit = tf.math.reduce_max(\n tf.stop_gradient(x), axis=axis, keepdims=True)\n\n base_logit = tf.math.maximum(max_logit, extra_logit)\n x -= base_logit\n exp_x = tf.math.exp(x)\n sum_exp_x = tf.math.reduce_sum(exp_x, axis=axis, keepdims=True)\n\n sum_exp_x += tf.math.exp(extra_logit - base_logit)\n return tf.math.log(sum_exp_x) + base_logit\n\n def LogSoftmax(x):\n return x - ReduceLogSumExp(x)\n\n with tf.name_scope(name):\n return tf.math.exp(LogSoftmax(logits))\n\n\ndef SoftmaxCrossEntropyFocalLoss(logits,\n label_ids=None,\n label_probs=None,\n alpha=None,\n gamma=None,\n stop_gradient_on_focal_loss_coefficient=False):\n u\"\"\"Focal loss for multinomial (softmax) logistic loss.\n\n [1] Focal loss https://arxiv.org/abs/1708.02002\n\n Args:\n logits: [..., C]. Logits for the multinomial logistic regression. C is the\n number of classes.\n label_ids: [...]. Each entry in labels must be an index in [0, C).\n label_probs: [..., C]. Each vector along last dimension must be a valid\n probability distribution.\n alpha: [C]. The weighting factor alpha. Eq (3) in [1].\n gamma: []. Tunable focusing parameter. Eq (4) in [1].\n stop_gradient_on_focal_loss_coefficient: If true, stops gradient on the\n focal loss coefficient (1-p)^gamma to stabilize the gradient.\n\n Returns:\n loss[i..., j] = FL(pₜ) = - αₜ(1-pₜ)ˠlog(pₜ) Eq (5) in [1].\n \"\"\"\n\n def _ApplyFocalLossCoefficient(loss, log_probs):\n if gamma is not None and gamma != 0:\n probs = tf.exp(log_probs)\n coefficient = tf.pow(1.0 - probs, gamma)\n if stop_gradient_on_focal_loss_coefficient:\n coefficient = tf.stop_gradient(coefficient)\n loss *= coefficient\n return loss\n\n if label_probs is not None:\n log_probs = tf.nn.log_softmax(logits)\n loss = -(label_probs * log_probs)\n loss = _ApplyFocalLossCoefficient(loss, log_probs)\n if alpha is not None:\n loss *= tf.reshape(\n alpha, tf.concat([tf.ones(tf.rank(loss) - 1, tf.int32), [-1]],\n axis=0))\n loss = tf.reduce_sum(loss, axis=-1)\n else:\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=label_ids, logits=logits)\n loss = _ApplyFocalLossCoefficient(loss, -loss)\n if alpha is not None:\n loss *= tf.gather(alpha, label_ids)\n return loss\n\n\ndef SigmoidCrossEntropyFocalLoss(logits, labels, alpha=None, gamma=None):\n u\"\"\"Focal loss for binary (sigmoid) logistic loss.\n\n [1] Focal loss https://arxiv.org/abs/1708.02002\n\n Args:\n logits: [..., C]. Logits for the sigmoid logistic regression.\n labels: [..., C]. 0/1 labels.\n alpha: The weighting factor alpha. Eq (3) in [1].\n gamma: Tunable focusing parameter. Eq (4) in [1].\n\n Returns:\n loss[i..., j] = FL(pₜ) = - αₜ(1-pₜ)ˠlog(pₜ) Eq (5) in [1].\n \"\"\"\n\n # [1] Eq (4).\n #\n # The numerically-stable way to compute\n # log(p) for positives;\n # log(1 - p) for negatives.\n loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=labels, logits=logits)\n\n if gamma is not None and gamma != 0:\n # The modulating factor. Note that\n # (1 - p)ˠ = [1 - σ(x)]ˠ = [σ(-x)]ˠ, for positives.\n # pˠ = [σ(x)]ˠ, for negatives.\n loss *= tf.pow(tf.sigmoid(logits * (1 - labels * 2)), gamma)\n\n if alpha is not None:\n # [1] Eq (3)\n loss *= (alpha * labels + (1 - alpha) * (1 - labels))\n\n return loss\n\n\n_RECORD_FORMAT_RE = re.compile('(^[A-Za-z_]+):(.*)')\n\n\ndef RecordFormatFromFilePattern(file_pattern):\n \"\"\"Return the record format string for a Lingvo file pattern.\n\n Lingvo file patterns take the form of:\n tfrecord:/path/to/bar -> tfrecord is the record_format.\n\n This function takes a file pattern and returns a string indicating\n which format the filepattern implies.\n\n Args:\n file_pattern: String file pattern.\n\n Returns:\n Tuple (string, string):\n\n - record_format: String record format, e.g., \"tfrecord\", etc.\n - file_pattern: The file pattern without any prefixes.\n \"\"\"\n result = re.match(_RECORD_FORMAT_RE, file_pattern)\n\n if result is None:\n # TODO(vrv): Fix all callers so that file_pattern must contain\n # the record format prefix.\n return 'sstable', file_pattern\n\n # regexp ensures that a match implies there are two groups:\n # the record format and then the file pattern.\n return result.groups()\n\n\ndef ReadFileLines(file_path):\n \"\"\"Read a text file and return the lines.\n\n If the file cannot be found at the given path, attempt to load it from the\n Lingvo package (useful for data dependencies in par files).\n\n Args:\n file_path: path to file, either absolute or relative to the bazel workspace.\n\n Returns:\n A list of lines from the file.\n \"\"\"\n if not tf.io.gfile.exists(file_path):\n try:\n lines = pkgutil.get_data(\n 'lingvo', file_path.replace('lingvo/', '', 1))\n if lines:\n lines = lines.splitlines(True)\n except IOError:\n # If pkgutil can't find the file, continue and let GFile raise the error.\n lines = None\n else:\n lines = None\n\n if not lines:\n with tf.io.gfile.GFile(file_path, 'r') as f:\n lines = f.readlines()\n\n return lines\n\n\n# Partially borrowed from\n# https://github.com/tensorflow/tensor2tensor/blob/32929305e1a4ec926eff24123758b794df35492b/tensor2tensor/layers/common_layers.py#L349\ndef CumSum(x, axis=0, exclusive=False, use_einsum=False):\n \"\"\"A TPU efficient implementation of tf.cumsum().\n\n This is equivalent to tf.cumsum and is faster on TPU as of 08/2019 unless\n the axis dimension is very large. The current Tensorflow implementation is\n based on scanning and reducing which is not efficient on TPU.\n\n Args:\n x: An input Tensor.\n axis: An int for the axis.\n exclusive: A bool for performing exclusive cumsum.\n use_einsum: If true, use einsum on TPU.\n\n Returns:\n A Tensor of the same shape as x.\n\n Raises:\n ValueError: if the input axis is invalid.\n \"\"\"\n if x.dtype not in (tf.float32, tf.bfloat16) or not use_tpu():\n # Fallback to tf.cumsum when inputs are not floats or not running on TPU.\n return tf.cumsum(x, axis=axis, exclusive=exclusive)\n\n rank = GetRank(x)\n # Needs to know the rank for the final transpose if axis is not the last\n # dimension. Otherwise, falls back to tf.cumsum.\n if not isinstance(rank, int) and axis != -1:\n return tf.cumsum(x, axis=axis, exclusive=exclusive)\n\n if axis < -1:\n if axis + rank < 0:\n raise ValueError('Unexpected axis: %d (rank = %d)' % (axis, rank))\n axis += rank\n\n if use_einsum:\n assert isinstance(rank, int) and rank < 26, rank\n # Use einsum to avoid data formatting overhead.\n a2z = ''.join([chr(i) for i in range(97, 123)]) # abc...xyz\n src = a2z[:rank]\n if axis == -1:\n tgt = src[:-1] + 'z'\n else:\n tgt = src[:axis] + 'z' + src[axis + 1:]\n length = GetShape(x)[axis]\n causal_mask = tf.linalg.band_part(\n tf.ones([length, length], dtype=x.dtype), 0, -1)\n return tf.einsum(f'{src},{src[axis]}z->{tgt}', x, causal_mask)\n\n length = GetShape(x)[axis]\n my_range = tf.range(length)\n comparator = tf.less if exclusive else tf.less_equal\n mask = tf.cast(\n comparator(tf.expand_dims(my_range, 1), tf.expand_dims(my_range, 0)),\n x.dtype)\n result = tf.tensordot(x, mask, axes=[[axis], [0]])\n if axis != -1 and axis != rank - 1:\n result = tf.transpose(\n result,\n list(range(axis)) + [rank - 1] + list(range(axis, rank - 1)))\n return result\n\n\ndef ProjectLastDim(inputs, weight, input_dim, output_dim):\n \"\"\"Linear projection on the last dim of the input tensor.\n\n This is a TPU efficient implementation to avoid reshaping inputs to Rank-2\n tensor by using Einsum for the compute.\n\n Args:\n inputs: An input Tensor, the last dimension of which is input_dim.\n weight: A weight matrix with shape [input_dim, output_dim].\n input_dim: An integer or a symbolic dim, the last dimension of the inputs.\n output_dim: An integer or a symbolic dim, the last dimension of the outputs.\n\n Returns:\n An output Tensor of the same rank as inputs, the last dimension is\n output_dim.\n \"\"\"\n input_dim = int(\n symbolic.ToStatic(input_dim) if symbolic.IsExpr(input_dim) else input_dim)\n output_dim = int(\n symbolic.ToStatic(output_dim) if symbolic.IsExpr(output_dim\n ) else output_dim)\n\n # Assert input_dim and output_dim\n inputs = with_dependencies([assert_equal(GetShape(inputs)[-1], input_dim)],\n inputs)\n weight = with_dependencies([\n assert_equal(GetShape(weight)[0], input_dim),\n assert_equal(GetShape(weight)[-1], output_dim)\n ], weight)\n\n if (use_tpu() and inputs.shape is not None and\n inputs.shape.rank is not None and inputs.shape.rank < 26):\n # Avoids reshape if feasible and uses Einsum.\n if inputs.shape.rank == 2:\n outputs = tf.matmul(inputs, weight)\n else:\n # This is equivalent to:\n # outputs = tf.einsum('...y,yz->...z', inputs, weight)\n # Unfortunately ... in einsum() leads to extra HBM usage.\n s = ''.join([chr(x) for x in range(97, 123)]) # abc...xyz\n r = inputs.shape.rank\n outputs = tf.einsum('{0}y,yz->{0}z'.format(s[:r - 1]), inputs, weight)\n else:\n outputs = Matmul(tf.reshape(inputs, ToStaticShape([-1, input_dim])), weight)\n outputs = tf.reshape(\n outputs,\n tf.concat([\n tf.cast(GetShape(inputs)[:-1], tf.int32),\n ToStaticShape([output_dim])\n ],\n axis=0))\n\n return outputs\n\n\[email protected]\ndef RemoveAssertContext(remove=True):\n \"\"\"Hacks to replace certain unwanted tensorflow ops.\"\"\"\n # TODO(zhifengc/huangyp): Consider implementing assert_equal\n # op replacement for lingvo. As assert_equal doesn't support String on GPUs.\n # Hack to replace tf.assert_equal\n # TODO(b/136040013): Remove this after migration to tf.function.\n if remove:\n saved_assert_equal = tf.check_ops.assert_equal\n\n def NoOP(*args, **kwargs): # pylint: disable=unused-argument\n return tf.no_op()\n\n tf.check_ops.assert_equal = NoOP # Make assert_equal a no op.\n try:\n yield\n finally:\n tf.check_ops.assert_equal = saved_assert_equal\n else:\n yield\n\n\ndef _AssertInputsMatch(op, args, implicit_captures):\n \"\"\"Assert that op's inputs match with args and implicit_captures.\n\n Args:\n op: The operation to check.\n args: A nested structure representing the explicit arguments of 'op'.\n implicit_captures: A nested structure representing the implicitly captured\n inputs of 'op'.\n\n Raises:\n ValueError: if the number of inputs mismatch.\n \"\"\"\n expected_inputs = Flatten([args, implicit_captures])\n expected_num_inputs = len(expected_inputs)\n if len(op.inputs) > expected_num_inputs:\n raise ValueError(('Too many inputs. The most likely cause is that fwd '\n 'captures additional tensors: extra inputs %r vs %r '\n 'captures=%r') % (list(op.inputs), list(expected_inputs),\n list(Flatten(implicit_captures))))\n if len(op.inputs) < expected_num_inputs:\n raise ValueError(('Mismatched inputs to fwd: Found %d vs expected %d: %r'\n '. Implicit captures(%d) = %r') %\n (len(op.inputs), expected_num_inputs, list(op.inputs),\n len(Flatten(implicit_captures)), implicit_captures))\n\n\ndef TensorSpecs(nmap, keep_shape=True):\n \"\"\"Transforms tensors in the input nested structure to TensorSpecs.\"\"\"\n if nmap is None:\n return None\n fn = lambda t: tf.TensorSpec(t.shape if keep_shape else None, t.dtype)\n return Transform(fn, nmap)\n\n\ndef _DefineDefun(fwd, fwd_sig, bak=None, bak_as_function=False, device=None):\n \"\"\"Wraps fwd in a defun with custom gradient bak.\n\n Args:\n fwd: A callable xs: Nested Structure -> ys: Nested Structure.\n fwd_sig: A Nested Structure of tf.TensorSpec representing the input\n signature of `fwd`, or None (meaning that fwd takes no inputs).\n bak: A callable xs, ys, dys: Nested Structure -> dxs[, dcapture]: Nested\n Structure. The custom backprop function for `fwd`. bak needs to return\n dcapture if fwd uses any implicitly captured tensors, whose gradients are\n dcapture.\n bak_as_function: Whether to create a TF graph function for `bak`.\n device: the device on which to run `fwd` and `bak`.\n\n Returns:\n A NestedMap containing:\n\n - call: A callable that will execute `fwd`. It has the same input and output\n signatures as `fwd`.\n - func: The underlying TF function that `call` calls. If not None, it will\n be a _DefinedFunction or ConcreteFunction that takes flat inputs and\n returns flat outputs, and can be used by routines that require a TF\n function object (e.g. tf.If, tf.While, etc).\n Always not None when `bak` is None.\n - output_dtypes: A nested structure compatible with the outputs of `fwd`\n containing the corresponding output dtypes.\n - stateful_ops: A list of (op_name, op_type) tuples representing the\n stateful ops used by `fwd`.\n - captured_inputs: Implicit inputs captured by `fwd`.\n \"\"\"\n assert fwd is not None\n noinline = not use_xla()\n\n if fwd_sig is None:\n fwd_sig = []\n get_dtype = lambda x: x.dtype\n arg_dtypes = Flatten(Transform(get_dtype, fwd_sig))\n get_shape = lambda x: x.shape\n arg_shapes = Flatten(Transform(get_shape, fwd_sig))\n\n # Used to hold the backward function used by Grad, which will be defined if\n # bak is set.\n sigs = NestedMap()\n # Output of this method.\n res = NestedMap()\n\n python_grad_func = None\n if bak:\n\n def Grad(op, *args):\n \"\"\"Gradient function for the forward function.\n\n Args:\n op: The forward operation.\n *args: Gradients wrt op.outputs.\n\n Returns:\n Tuple of derivatives.\n \"\"\"\n _AssertInputsMatch(op, fwd_sig, res.captured_inputs)\n # Ensure dys contains no None.\n args = ConvertNoneGradientToZeros(list(op.outputs), list(args))\n xs = op.inputs[:len(arg_dtypes)] # The rest are captures.\n return sigs.backward(*Flatten([xs, op.outputs, args]))\n\n python_grad_func = Grad\n\n def _SetShape(dst_list, shape_list):\n for dst, shape in zip(dst_list, shape_list):\n if isinstance(dst, tf.Tensor):\n dst.set_shape(shape)\n\n @tf.Defun(*arg_dtypes, python_grad_func=python_grad_func, noinline=noinline)\n def Forward(*args):\n \"\"\"The forward function.\"\"\"\n _SetShape(args, arg_shapes)\n with RemoveAssertContext(remove=noinline):\n call = lambda: fwd(Pack(fwd_sig, args)) if args else fwd()\n if device is None:\n # Defun will handle the device assignment.\n rets = call()\n else:\n with tf.device(device):\n rets = call()\n res.outputs = rets\n return Flatten(rets)\n\n forward = Forward\n if not arg_dtypes:\n # In this case Forward is an _OverloadedFunction, we need to instantiate it.\n forward = Forward.instantiate([])\n\n # Invokes fwd() to get res.outputs.\n forward.add_to_graph(tf.get_default_graph())\n res.func = forward\n res.stateful_ops = forward.stateful_ops\n res.captured_inputs = forward.captured_inputs\n output_dtypes = Transform(get_dtype, res.outputs)\n output_shapes = Transform(get_shape, res.outputs)\n\n def Call(args=None):\n \"\"\"Wrapper of fwd.\"\"\"\n if args is None:\n flat_rets = forward()\n else:\n flat_rets = forward(*Flatten(args))\n if not isinstance(flat_rets, (tuple, list)):\n flat_rets = [flat_rets]\n _SetShape(flat_rets, Flatten(output_shapes))\n return Pack(output_dtypes, flat_rets)\n\n res.call = Call\n\n if bak:\n\n def Backward(*args):\n \"\"\"The backward function.\"\"\"\n _SetShape(args, Flatten([arg_shapes, output_shapes, output_shapes]))\n xs, ys, dys = Pack([fwd_sig, output_dtypes, output_dtypes], args)\n with RemoveAssertContext(remove=noinline):\n if device is None:\n # Defun will handle the device assignment.\n dxs = bak(xs, ys, dys)\n else:\n with tf.device(device):\n dxs = bak(xs, ys, dys)\n return Flatten(dxs)\n\n if bak_as_function:\n sigs.backward = tf.Defun(\n *Flatten([arg_dtypes, output_dtypes, output_dtypes]),\n noinline=noinline)(\n Backward)\n\n sigs.backward.add_to_graph(tf.get_default_graph())\n else:\n sigs.backward = Backward\n\n return res\n\n\n# Global variable to control rendezvous sharing in tf.function.\n# If False (default) rendezvous sharing is disabled in tf.function, that is, the\n# function body use a separate rendezvous and can't communicate with parent\n# graph via send/recv.\n# With _GetSharedRendezvous() == True, the function body share the same\n# rendezvous with the parent graph and can talk to it using send/recv. This is\n# useful for layers like StackedRecurrent.\n_SHARED_RENDEZVOUS = ThreadLocalStack()\n\n\[email protected]\ndef _SharedRendezvousScope(shared_rendezvous=True):\n _SHARED_RENDEZVOUS.stack.append(shared_rendezvous)\n try:\n yield\n finally:\n _SHARED_RENDEZVOUS.stack.pop()\n\n\ndef _GetSharedRendezvous():\n \"\"\"Get the current rendezvous sharing setting.\"\"\"\n return _SHARED_RENDEZVOUS.stack[-1] if _SHARED_RENDEZVOUS.stack else False\n\n\ndef _ApplySharedRendezvous(func):\n \"\"\"Apply the rendezvous sharing setting on the given tf.function func.\"\"\"\n # pylint: disable=protected-access\n func._shared_rendezvous = _GetSharedRendezvous()\n # pylint: enable=protected-access\n\n\ndef _WrapFunction(func=None, input_signature=None):\n \"\"\"Wraps func as a tf.function.\"\"\"\n if input_signature is None:\n input_signature = []\n\n def Decorated(fn):\n\n @tf.function(input_signature=input_signature, autograph=False)\n def Fn(*args):\n # TODO(b/163904067): mimic Defun' behavior and reset the step seed to\n # avoid it being used as an implicit capture. This is not a desired\n # behavior, it should take the step seed from parent graph instead.\n ResetStepSeed()\n\n # Mimic Defun and disable collection sharing.\n graph = tf.get_default_graph()\n # Don't share summaries collection with parent graph (b/168745134).\n graph.clear_collection(tf.GraphKeys.SUMMARIES)\n return fn(*args)\n\n _ApplySharedRendezvous(Fn)\n\n # Add the function to the graph so it'll be traced under the current\n # context. This is necessary if the function body captures any non-tensor\n # values from the environment, like symbolic maps.\n cf = Fn.get_concrete_function()\n cf.add_to_graph()\n return cf\n\n # For the `foo = _WrapFunction(foo, ...)` use case.\n if func is not None:\n return Decorated(func)\n\n # For the `@_WrapFunction(...)` use case.\n return Decorated\n\n\ndef _DefineFunction(fwd, fwd_sig, bak=None, bak_as_function=False, device=None):\n \"\"\"Wraps fwd in a defun with custom gradient bak.\n\n Args:\n fwd: A callable xs: Nested Structure -> ys: Nested Structure.\n fwd_sig: A Nested Structure of tf.TensorSpec representing the input\n signature of `fwd`, or None (meaning that fwd takes no inputs).\n bak: A callable xs, ys, dys: Nested Structure -> dxs[, dcapture]: Nested\n Structure. The custom backprop function for `fwd`. bak needs to return\n dcapture if fwd uses any implicitly captured tensors, whose gradients are\n dcapture.\n bak_as_function: Whether to create a TF graph function for `bak`.\n device: the device on which to run `fwd` and `bak`.\n\n Returns:\n A NestedMap containing:\n\n - call: A callable that will execute `fwd`. It has the same input and output\n signatures as `fwd`.\n - func: The underlying TF function that `call` calls. If not None, it will\n be a _DefinedFunction or ConcreteFunction that takes flat inputs and\n returns flat outputs, and can be used by routines that require a TF\n function object (e.g. tf.If, tf.While, etc).\n Always not None when `bak` is None.\n - outputs: The outputs of `fwd`. Used for reflection only (e.g. to get the\n output dtypes, shapes, etc).\n - stateful_ops: A list of (op_name, op_type) tuples representing the\n stateful ops used by `fwd`.\n - captured_inputs: Implicit inputs captured by `fwd`.\n \"\"\"\n assert fwd is not None\n noinline = not use_xla()\n\n if fwd_sig is None:\n fwd_sig = []\n\n if device is None:\n # Get the current device to mimic Defun's behavior.\n # pylint: disable=protected-access\n device_funcs = tf.get_default_graph()._device_functions_outer_to_inner\n device = device_funcs[-1] if device_funcs else None\n # pylint: enable=protected-access\n\n # Output of this method.\n res = NestedMap()\n\n @_WrapFunction(input_signature=Flatten(fwd_sig))\n def Forward(*args):\n \"\"\"The forward function.\"\"\"\n with RemoveAssertContext(remove=noinline), tf.device(device):\n if args:\n xs = Pack(fwd_sig, args)\n rets = fwd(xs)\n else:\n rets = fwd()\n res.outputs = rets\n return Flatten(rets)\n\n res.captured_inputs = Forward.captured_inputs\n\n # Get the stateful ops used in cell_fn. Logic borrowed from\n # _EagerDefinedFunction.__init__().\n graph = Forward.graph\n input_ops = set(arg.op for arg in graph.inputs)\n operations = [op for op in graph.get_operations() if op not in input_ops]\n res.stateful_ops = [(o.name, o.type) for o in operations if o._is_stateful] # pylint: disable=protected-access\n\n def Call(func, args=None):\n \"\"\"Wrapper of fwd.\"\"\"\n if args is None:\n flat_rets = func()\n else:\n flat_rets = func(*Flatten(args))\n if not isinstance(flat_rets, (tuple, list)):\n flat_rets = [flat_rets]\n return Pack(res.outputs, flat_rets)\n\n if not bak:\n res.func = Forward\n res.call = lambda args=None: Call(Forward, args)\n return res\n\n shared_rendezvous = _GetSharedRendezvous()\n ret_specs = TensorSpecs(res.outputs)\n\n def Backward(*args):\n xs, ys, dys = Pack([fwd_sig, ret_specs, ret_specs], args)\n with RemoveAssertContext(remove=noinline), tf.device(device):\n dxs = bak(xs, ys, dys)\n return Flatten(dxs)\n\n if bak_as_function:\n backward_cf = _WrapFunction(\n Backward, input_signature=Flatten([fwd_sig, ret_specs, ret_specs]))\n else:\n\n def BackwardWithSharedRendezvous(*args):\n with _SharedRendezvousScope(shared_rendezvous):\n return Backward(*args)\n\n backward_cf = BackwardWithSharedRendezvous\n\n @tf.custom_gradient\n def ForwardWithGrad(*args):\n \"\"\"Forward function and its custom gradient.\"\"\"\n # Note that `args` includes implicit captures. This is required by\n # tf.custom_gradient so that when the Grad() outputs include gradients to\n # implicit captures, they match the inputs to ForwardWithGrad().\n #\n # However, Forward doesn't take implicit captures as input, so we exclude\n # them here.\n fwd_args = args[:(len(args) - len(Flatten(res.captured_inputs)))]\n op = NestedMap(inputs=args, outputs=Forward(*fwd_args))\n\n def Grad(*args, **kwargs):\n \"\"\"Gradient function for the forward function.\n\n Args:\n *args: Gradients wrt op.outputs.\n **kwargs: Additional arguments from tf.custom_gradient.\n\n Returns:\n Tuple of derivatives.\n \"\"\"\n if kwargs:\n tf.logging.warning(\n 'Ignoring additional arguments used by tf.custom_gradient: %s',\n str(kwargs))\n\n _AssertInputsMatch(op, fwd_sig, res.captured_inputs)\n\n # Ensure dys contains no None.\n args = ConvertNoneGradientToZeros(list(op.outputs), list(args))\n\n xs, _ = Pack([fwd_sig, res.captured_inputs], op.inputs)\n return backward_cf(*Flatten([xs, op.outputs, args]))\n\n return op.outputs, Grad\n\n res.func = None\n forward = lambda *xs: ForwardWithGrad(*Flatten([xs, res.captured_inputs]))\n res.call = lambda args=None: Call(forward, args)\n return res\n\n\n# Global variable to control whether to use tf.function.\n# If not set, the result is determined by tf2 status. See _UseTfFunction for\n# details.\n# TODO(laigd): remove after b/169869929 is fixed.\n_USE_TF_FUNCTION = ThreadLocalStack()\n\n# Constants for propagating framework tensors through Function.\n_FRAMEWORK_TENSOR_GLOBAL_STEP = '_global_step'\n\n\[email protected]\ndef TfFunctionScope(use_tf_function=True):\n _USE_TF_FUNCTION.stack.append(use_tf_function)\n try:\n yield\n finally:\n _USE_TF_FUNCTION.stack.pop()\n\n\ndef _UseTfFunction():\n \"\"\"Whether to use tf.function instead of tf.Defun.\"\"\"\n if _USE_TF_FUNCTION.stack:\n return _USE_TF_FUNCTION.stack[-1]\n return tf2_enabled()\n\n\nclass Function(object):\n \"\"\"Function builds a TensorFlow graph function from a callable.\n\n In the high level this is similar to tf.Defun and tf.function. In fact this\n relies on those as underlying implementations, but with specific configuration\n so it's easier to use and can work well in some extreme cases in Lingvo.\n\n Example usage:\n\n - No inputs:\n\n >>> @Function()\n ... def foo():\n ... return tf.constant(1.0)\n >>> y = foo()\n\n - Scalar input:\n\n >>> @Function(fwd_sig=tf.TensorSpec(None, tf.float32))\n ... def foo(x):\n ... return x * 2\n >>> y = foo(1.0)\n\n - List input:\n\n >>> @Function(fwd_sig=[tf.TensorSpec(None, tf.float32) for _ in range(2)])\n ... def foo(xs):\n ... return xs[0] + xs[1]\n >>> y = foo([1.0, 2.0])\n\n - Nested input:\n\n >>> @Function(fwd_sig=NestedMap(x=tf.TensorSpec(None, tf.float32)))\n ... def foo(nmap):\n ... return nmap.x * 2\n >>> y = foo(NestedMap(x=1.0))\n\n - With custom gradient function (other input types mentioned above are also\n supported):\n\n >>> def bar(x, y, dy):\n ... del y, dy\n ... return 4.0 * x * dy\n >>>\n >>> @Function(fwd_sig=tf.TensorSpec(None, tf.float32), bak=bar)\n ... def foo(x):\n ... return 2.0 * x * x\n\n - Used in control flow ops:\n\n >>> then_branch = Function(tf.TensorSpec([], tf.int32))(lambda x: x / 2)\n >>> else_branch = Function(tf.TensorSpec([], tf.int32))(lambda x: 3 * x + 1)\n >>> y = tf.If(cond, inputs, then_branch.func, else_branch.func)\n \"\"\"\n\n # TODO(laigd): the use_tf_function option is added for backward compatibility\n # reasons. Remove it after the migration.\n def __init__(self,\n fwd_sig=None,\n bak=None,\n bak_as_function=False,\n device=None,\n use_tf_function=None):\n \"\"\"Constructor.\n\n Below we assume `fwd` is the input to `__call__` that is used to build the\n TensorFlow graph function encapsulated by this object.\n\n Args:\n fwd_sig: A Nested Structure of tf.TensorSpec representing the input\n signature of `fwd`, or None (meaning that `fwd` takes no inputs). The\n actual inputs should be compatible with this (have same shapes and\n dtypes).\n bak: A callable xs, ys, dys: Nested Structure -> dxs[, dcapture]: Nested\n Structure. The custom backprop function for `fwd`. bak needs to return\n dcapture if `fwd` uses any implicitly captured tensors, whose gradients\n are dcapture.\n bak_as_function: Whether to create a TF graph function for `bak`.\n device: The device on which to run `fwd` and `bak`. Defaults to the\n current device.\n use_tf_function: Whether use tf.function. Defaults to _UseTfFunction().\n \"\"\"\n self._fwd_sig = fwd_sig\n self._bak = bak\n self._bak_as_function = bak_as_function\n self._device = device\n self._use_tf_function = use_tf_function\n\n def __call__(self, fwd):\n \"\"\"Creates a graph function.\n\n Args:\n fwd: a callable xs: Nested Structure -> ys: Nested Structure.\n\n Returns:\n A DefinedFunction object encapsulating `fwd` as a graph function.\n \"\"\"\n assert callable(fwd)\n return DefinedFunction(fwd, self._fwd_sig, self._bak, self._bak_as_function,\n self._device, self._use_tf_function)\n\n\nclass DefinedFunction(object):\n \"\"\"Encapsulates a TensorFlow graph function and its properties.\"\"\"\n\n def __init__(self,\n fwd,\n fwd_sig=None,\n bak=None,\n bak_as_function=False,\n device=None,\n use_tf_function=None):\n \"\"\"Constructor.\n\n Args:\n fwd: A callable xs: Nested Structure -> ys: Nested Structure. Used to\n build the TensorFlow graph function that this object encapsulates.\n fwd_sig: A Nested Structure of tf.TensorSpec representing the input\n signature of `fwd`, or None (meaning that `fwd` takes no inputs). The\n actual inputs should be compatible with this (have same shapes and\n dtypes).\n bak: A callable xs, ys, dys: Nested Structure -> dxs[, dcapture]: Nested\n Structure. The custom backprop function for `fwd`. bak needs to return\n dcapture if `fwd` uses any implicitly captured tensors, whose gradients\n are dcapture.\n bak_as_function: Whether to create a TF graph function for `bak`.\n device: The device on which to run `fwd` and `bak`. Defaults to the\n current device.\n use_tf_function: Whether use tf.function. Defaults to _UseTfFunction().\n \"\"\"\n self._fwd_sig = fwd_sig\n\n wrapped_fwd_sig = fwd_sig\n fwd_fn = fwd\n bak_fn = bak\n\n graph_random_seed = None\n if tf.get_default_graph().seed is not None:\n graph_random_seed = tf.get_default_graph().seed\n\n # Wrap the forward function to propagate framework tensors like step_seed\n # and global_step.\n wrapped_fwd_sig = NestedMap()\n self._added_global_step = False\n if GetGlobalStep() is not None:\n wrapped_fwd_sig[_FRAMEWORK_TENSOR_GLOBAL_STEP] = (\n tf.TensorSpec([], tf.int64))\n self._added_global_step = True\n if fwd_sig is not None:\n wrapped_fwd_sig.inputs = fwd_sig\n elif not wrapped_fwd_sig:\n wrapped_fwd_sig = None\n\n def ForwardWrapped(wrapped_inputs=None):\n if graph_random_seed is not None:\n tf.random.set_seed(graph_random_seed)\n global_step = None\n if wrapped_inputs:\n assert isinstance(wrapped_inputs, NestedMap)\n global_step = wrapped_inputs.get(_FRAMEWORK_TENSOR_GLOBAL_STEP, None)\n with GlobalStepContext(global_step):\n if wrapped_inputs and 'inputs' in wrapped_inputs:\n result = fwd(wrapped_inputs.inputs)\n else:\n result = fwd()\n return result\n\n fwd_fn = ForwardWrapped\n\n if bak:\n\n # Wrap the backward function to return zero gradients for framework\n # tensors like step_seed and global_step.\n def BackwardWrapped(wrapped_xs, ys, dys):\n if graph_random_seed is not None:\n tf.random.set_seed(graph_random_seed)\n with GlobalStepContext(\n wrapped_xs.get(_FRAMEWORK_TENSOR_GLOBAL_STEP, None)):\n result = bak(wrapped_xs.inputs, ys, dys)\n dxs = Transform(tf.zeros_like, wrapped_xs)\n if isinstance(result, tuple) and len(result) == 2:\n dxs.inputs, dcapture = result\n return dxs, dcapture\n else:\n dxs.inputs = result\n return dxs\n\n bak_fn = BackwardWrapped\n\n if use_tf_function is None:\n use_tf_function = _UseTfFunction()\n fn = _DefineFunction if use_tf_function else _DefineDefun\n self._data = fn(\n fwd=fwd_fn,\n fwd_sig=wrapped_fwd_sig,\n bak=bak_fn,\n bak_as_function=bak_as_function,\n device=device)\n\n def __call__(self, args=None):\n \"\"\"Invokes the graph function.\n\n Args:\n args: the inputs to the graph function, must be compatible with `fwd_sig`.\n\n Returns:\n The output tensors with the same structure as the output of `fwd`,\n returned by a call to the graph function.\n \"\"\"\n assert IsCompatible(args,\n self._fwd_sig), '{} vs {}'.format(args, self._fwd_sig)\n return self._data.call(self.AddFrameworkInputs(args))\n\n @property\n def func(self):\n \"\"\"The underlying TensorFlow graph function that this object encapsulates.\n\n The returned graph function is created by tracing `fwd` during construction.\n If not None, it will be a _DefinedFunction or ConcreteFunction that takes\n flat inputs and returns flat outputs, and can be used by routines that\n require a TensorFlow function object (e.g. tf.If, tf.While, etc).\n\n If no backprop function is provided during construction, the result is\n always not None.\n \"\"\"\n return self._data.func\n\n def AddFrameworkInputs(self, inputs):\n \"\"\"Add framework tensors like step_seed and global_step to inputs.\n\n This is only necessary when using `func`, as wrapping is handled\n automatically in __call__.\n\n Args:\n inputs: inputs to the function.\n\n Returns:\n Inputs wrapped with framework tensors suitable for use with `func`.\n \"\"\"\n result = NestedMap()\n if self._added_global_step:\n global_step = GetGlobalStep()\n assert global_step is not None\n result[_FRAMEWORK_TENSOR_GLOBAL_STEP] = tf.cast(global_step, tf.int64)\n if inputs is not None:\n result.inputs = inputs\n return result if result else None\n\n @property\n def output_dtypes(self):\n \"\"\"Output dtypes of the graph function.\n\n The result will have the same structure as the outputs of `fwd` but contain\n the corresponding output dtypes.\n \"\"\"\n return Transform(lambda x: x.dtype, self._data.outputs)\n\n @property\n def stateful_ops(self):\n \"\"\"Stateful ops used by `fwd`, as a list of (op_name, op_type) tuples.\"\"\"\n return self._data.stateful_ops\n\n @property\n def captured_inputs(self):\n \"\"\"Implicit input tensors captured by `fwd`.\"\"\"\n return self._data.captured_inputs\n\n\ndef CallDefun(fwd, args=None, bak=None, bak_as_function=False, device=None):\n \"\"\"Wraps fwd in a defun with custom gradient bak and calls it with args.\n\n Args:\n fwd: A callable xs: Nested Structure -> ys: Nested Structure.\n args: A Nested Structure of tf.Tensor or None.\n bak: A callable xs, ys, dys: Nested Structure -> dxs[, dcapture]: Nested\n Structure. The custom backprop function for fwd. bak needs to return\n dcapture if fwd uses any implicitly captured tensors, whose gradients are\n dcapture.\n bak_as_function: Whether to create a TF graph function for bak.\n device: the device on which to run fwd and bak.\n\n Returns:\n A Nested Structure equivalent to what fwd(args) computes.\n \"\"\"\n if args is not None:\n args = Transform(tf.convert_to_tensor, args)\n sigs = Function(\n fwd_sig=TensorSpecs(args),\n bak=bak,\n bak_as_function=bak_as_function,\n device=device)(\n fwd=fwd)\n if args is None:\n return sigs()\n else:\n return sigs(args)\n\n\ndef If(cond, inputs, then_branch, else_branch):\n \"\"\"Helper to construct an if/else statement.\n\n Args:\n cond: A scalar `Tensor` that can be converted to boolean.\n inputs: A flattenable representing the input tensors of the if/else\n statement. Can be None to represent no inputs.\n then_branch: A callable 'inputs' -> flattenable. The returned value should\n be compatible with what 'else_branch' returns.\n else_branch: A callable 'inputs' -> flattenable. The returned value should\n be compatible with what 'then_branch' returns.\n\n Returns:\n Output returned by the call to either 'then_branch' or 'else_branch'.\n \"\"\"\n fwd_sig = TensorSpecs(inputs)\n then_sigs = Function(fwd_sig=fwd_sig)(fwd=then_branch)\n else_sigs = Function(fwd_sig=fwd_sig)(fwd=else_branch)\n assert IsCompatible(then_sigs.output_dtypes, else_sigs.output_dtypes), (\n 'Outputs of then_branch and else_branch are not compatible: {} vs {}'\n .format(then_sigs.output_dtypes, else_sigs.output_dtypes))\n if then_sigs.captured_inputs != else_sigs.captured_inputs:\n raise ValueError('Differing captured inputs in then and else. '\n 'Ensure the same tensors are captured in the same order.')\n\n ret = tf.If(\n cond=cond,\n inputs=Flatten(then_sigs.AddFrameworkInputs(inputs)) +\n then_sigs.captured_inputs,\n then_branch=then_sigs.func,\n else_branch=else_sigs.func)\n return Pack(then_sigs.output_dtypes, ret)\n\n\ndef _Itype():\n \"\"\"Loop iterator data type.\"\"\"\n return tf.int32 if use_xla() else tf.int64\n\n\ndef WhileLoop(cond, body, loop_state):\n \"\"\"Helper to construct a while loop.\n\n Args:\n cond: A callable NestedMap -> tf.bool.\n body: A callable NestedMap -> NestedMap.\n loop_state: A flattenable (NestedMap, list, tuple, etc.) representing the\n loop state.\n\n Returns:\n The final loop state in the same structure as loop_state.\n \"\"\"\n fwd_sig = TensorSpecs(loop_state)\n cond_sigs = Function(fwd_sig=fwd_sig)(fwd=cond)\n\n def BodyWrapped(loop_state):\n result = body(loop_state)\n # loop_state is augmented with global tensors inside of DefinedFunction.\n # WhileLoop needs to return the same structure as the inputs, so we augment\n # the return value here to match.\n result = cond_sigs.AddFrameworkInputs(result)\n return result\n\n body_sigs = Function(fwd_sig=fwd_sig)(fwd=BodyWrapped)\n wrapped_inputs = body_sigs.AddFrameworkInputs(loop_state)\n new_state = tf.While(\n Flatten(wrapped_inputs), cond=cond_sigs.func, body=body_sigs.func)\n\n # The functional `While` used above does not have a registered gradient.\n # This was not a problem in Graph mode, however in Eager mode,\n # GradientTape will attempt to call the gradient of the While op in the\n # forward pass. `stop_gradient` is used to pretend the op is a constant\n # in the forward pass. This also avoids calling the gradient of other ops in\n # `While` in the forward pass.\n # Details in https://www.tensorflow.org/api_docs/python/tf/custom_gradient.\n # Guarded by 'IsEagerMode' to limit impact.\n if IsEagerMode():\n new_state = [tf.stop_gradient(t) for t in new_state]\n\n return Pack(wrapped_inputs, new_state).inputs\n\n\ndef ForLoop(body, start, limit, delta, loop_state):\n \"\"\"Helper to construct a for loop.\n\n Args:\n body: A callable (tf.int, NestedMap) -> NestedMap.\n start: Loop variable's initial value.\n limit: Loop variable's limit value.\n delta: Loop variable's change per iteration.\n loop_state: A flattenable (NestedMap, list, tuple, etc.) representing the\n loop state.\n\n Returns:\n The final loop state in the same structure as loop_state.\n \"\"\"\n state = NestedMap(\n iter=tf.cast(start, _Itype()),\n limit=tf.cast(limit, _Itype()),\n delta=tf.cast(delta, _Itype()),\n loop_state=loop_state)\n\n def LoopCond(state):\n return tf.less(state.iter, state.limit)\n\n def LoopBody(state):\n state.loop_state = body(state.iter, state.loop_state)\n state.iter = tf.add(state.iter, state.delta)\n return state\n\n return WhileLoop(LoopCond, LoopBody, state).loop_state\n\n\ndef TopK(x_in, k):\n \"\"\"Equivalent to tf.math.top_k(x_in, k) but more efficient on tpu.\"\"\"\n assert k <= 2, 'This implementation is only efficient for small k.'\n # TODO(yonghui): Try out an alternative idea where we first reshape x_in as a\n # 2d tensor, then call tf.math.top_k, and then reshape back.\n x_in_shape = x_in.shape\n x_rank = x_in_shape.rank\n assert x_rank and x_in_shape.as_list()[x_rank - 1] > 0\n last_dim_size = x_in_shape.as_list()[x_rank - 1]\n min_value = tf.math.reduce_min(x_in) - 1.0\n\n out_indices = []\n out_values = []\n\n for unused_i in range(k):\n index_i = tf.math.argmax(x_in, axis=-1, output_type=tf.int32)\n mask_i = tf.one_hot(index_i, last_dim_size)\n # TODO(yonghui): Would tf.gather be more efficient and numerically stable\n # here?\n value_i = tf.reduce_sum(mask_i * x_in, -1, keepdims=True)\n x_in = (1.0 - mask_i) * x_in + mask_i * min_value\n out_indices.append(tf.expand_dims(index_i, -1))\n out_values.append(value_i)\n\n if k == 1:\n return out_values[0], out_indices[0]\n else:\n return tf.concat(out_values, x_rank - 1), tf.concat(out_indices, x_rank - 1)\n\n\ndef ReadVariable(var_op):\n \"\"\"Returns the value of the given variable operation.\n\n Args:\n var_op: the `Operation` object for a VarHandleOp.\n\n Raises:\n TypeError: if var_op is not a VarHandleOp.\n\n Returns:\n A `Tensor` containing the value of the variable.\n \"\"\"\n if var_op.type != 'VarHandleOp':\n raise TypeError('var_op should be a VarHandleOp, got %s' % str(var_op.type))\n # Filter out the ReadVariableOps that have control dependencies to avoid\n # side-effects when the user runs it.\n filter_fn = lambda op: op.type == 'ReadVariableOp' and not op.control_inputs\n var_readers = list(filter(filter_fn, var_op.outputs[0].consumers()))\n assert var_readers\n return var_readers[0].outputs[0]\n\n\n_TPU_SUMMARY_TENSORS_KEY = ('__lingvo_tpu_summary_tensors')\n\n_TPU_SUMMARY_CONTEXTS = ThreadLocalStack()\n\n\ndef _GetTpuSummaryTensor():\n if _TPU_SUMMARY_CONTEXTS.stack:\n return _TPU_SUMMARY_CONTEXTS.stack[-1]\n return _CollectionGetter(_TPU_SUMMARY_TENSORS_KEY, lambda: [])()\n\n\[email protected]\ndef TpuSummaryTensorContext():\n \"\"\"Creates a context where AddTpuSummaryTensor() will add tensors.\"\"\"\n _TPU_SUMMARY_CONTEXTS.stack.append([])\n try:\n yield\n finally:\n _TPU_SUMMARY_CONTEXTS.stack.pop()\n\n\ndef AddTpuSummaryTensor(name, value, weight=1.0):\n \"\"\"Adds tensor to global collection of summaries, or a local context if any.\n\n This needs to be used in situations where tf.summary() could be used but\n currently tf.summary is not supported. Use py_utils.AddTpuSummaryTensor() in\n low level code to add summary tensors to global collection of summaries.\n Then recover all summary tensors from global collection by calling\n py_utils.GetTpuSummaryTensors() from top level code (for example from\n ComputeLoss method of BaseTask).\n\n In addition to 'name' argument, current tensorflow name scope is also\n captured and added to the metric name. This way for example summaries from\n a repeated layer will appear as separate graphs in the tensorboard.\n\n Weight argument is optional and defaults to 1.0. See BaseTask.ComputeLoss for\n the exact definition of weight for eval metrics.\n\n Args:\n name: metric name\n value: metric value tensor\n weight: weight tensor for weighted metrics\n \"\"\"\n tpu_summary_tensors = _GetTpuSummaryTensor()\n x = NestedMap()\n x.name = name\n x.value = value, tf.convert_to_tensor(weight)\n x.name_scope = tf.get_default_graph().get_name_scope()\n tpu_summary_tensors.append(x)\n\n\ndef GetTpuSummaryTensors():\n \"\"\"Returns summary tensors from global collection.\n\n Returns:\n A dict containing str keys and (metric, weight) pairs as values\n \"\"\"\n tpu_summary_tensors = _GetTpuSummaryTensor()\n return {\n '%s/%s' % (x.name, SanitizeScopeKey(x.name_scope)): x.value\n for x in tpu_summary_tensors\n }\n\n\ndef ClearTpuSummaryTensors():\n tpu_summary_tensors = _GetTpuSummaryTensor()\n del tpu_summary_tensors[:]\n\n\ndef ComputationShape(split_size, topology=None):\n \"\"\"Decides the computation shape based on the split_size.\n\n Args:\n split_size: number of accelerators to use per split.\n topology: a serialized string of `tensorflow.tpu.TopologyProto`, or a\n `tf.tpu.experimental.Topology` object, that describes the TPU cluster\n topology. If not set, it'll use a default setting based on split_size.\n\n Returns:\n A 4-element list that describes the computation shape.\n \"\"\"\n if topology:\n if isinstance(topology, tf.tpu.experimental.Topology):\n topology_info = topology\n else:\n topology_info = tf_topology.Topology(serialized=topology)\n computation_shape = None\n if topology and functools.reduce(lambda a, b: a * b,\n topology_info.mesh_shape) == split_size:\n computation_shape = topology_info.mesh_shape\n elif split_size == 1:\n computation_shape = [1, 1, 1, 1]\n elif topology and topology_info.mesh_shape[\n -1] == 1 and split_size in topology_info.mesh_shape:\n # For Megacore, if we find exact match on mesh shape, map split_size to it\n computation_shape = [1, 1, 1, 1]\n computation_shape[topology_info.mesh_shape.tolist().index(\n split_size)] = split_size\n else:\n if topology:\n cores_per_chip = topology_info.mesh_shape[-1]\n else:\n cores_per_chip = 2\n assert split_size % cores_per_chip == 0\n split_chips = split_size // cores_per_chip\n if split_chips == 1:\n computation_shape = [1, 1, 1, cores_per_chip]\n elif split_chips == 2:\n computation_shape = [1, 2, 1, cores_per_chip]\n elif split_chips == 4:\n computation_shape = [2, 2, 1, cores_per_chip]\n elif split_chips == 8:\n computation_shape = [4, 2, 1, cores_per_chip]\n elif split_chips == 12:\n computation_shape = [1, 1, 12, cores_per_chip]\n elif split_chips == 16:\n computation_shape = [4, 4, 1, cores_per_chip]\n elif split_chips == 24:\n computation_shape = [1, 2, 12, cores_per_chip]\n elif split_chips == 32:\n if topology and topology_info.mesh_shape[1] == 32:\n # Fwd within-replica all-reduces is performed along column;\n # Bwd gradient cross-replica all-reduces is performed along row.\n # This currently has better performance than the strided patten.\n computation_shape = [1, 32, 1, cores_per_chip]\n else:\n computation_shape = [4, 8, 1, cores_per_chip]\n elif split_chips == 64:\n computation_shape = [8, 8, 1, cores_per_chip]\n elif split_chips == 128:\n computation_shape = [8, 16, 1, cores_per_chip]\n elif split_chips == 256:\n computation_shape = [16, 16, 1, cores_per_chip]\n elif split_chips == 512:\n computation_shape = [16, 32, 1, cores_per_chip]\n elif split_chips == 1024:\n computation_shape = [32, 32, 1, cores_per_chip]\n elif split_chips == 2048:\n computation_shape = [64, 32, 1, cores_per_chip]\n elif split_chips == 4096:\n computation_shape = [128, 32, 1, cores_per_chip]\n else:\n assert False, ('Model parallelism with %d devices is currently not'\n ' supported.' % split_size)\n assert computation_shape is not None\n return computation_shape\n\n\ndef GetExtraVars():\n \"\"\"Returns the captured variables by the function.\"\"\"\n g = tf.get_default_graph()\n if isinstance(g, func_graph.FuncGraph):\n return g.variable_captures\n return function.get_extra_vars()\n\n\ndef GetExtraInputs():\n \"\"\"Returns the captured input tensors by the function.\"\"\"\n g = tf.get_default_graph()\n if isinstance(g, func_graph.FuncGraph):\n return g.external_captures\n return function.get_extra_inputs()\n\n\ndef GetExtraArgs():\n \"\"\"Returns the corresponding function arguments for the captured inputs.\"\"\"\n g = tf.get_default_graph()\n if isinstance(g, func_graph.FuncGraph):\n return g.internal_captures\n return function.get_extra_args()\n\n\ndef ShardedFilePatternToGlob(file_pattern):\n \"\"\"Converts a file pattern path@shards to path-?????-of-shards.\"\"\"\n if ',' in file_pattern:\n raise ValueError(\n 'ShardedFilePatternToGlob does not support multiple file patterns.')\n if '@' not in file_pattern:\n return file_pattern\n path, shards = file_pattern.split('@')\n if shards == '*':\n return f'{path}-?????-of-*'\n return f'{path}-?????-of-{int(shards):05}'\n\n\ndef ComputeNceAndAuc(probs, targets, mask):\n \"\"\"Compute normalized cross entropy and AUC of the PR curve for a batch.\n\n Args:\n probs: a tensor of shape [batch, time].\n targets: a tensor of shape [batch, time], where each element is either 0 or\n 1 indicating wrong or correct.\n mask: a tensor of shape [batch, time], a mask for hyp sequence.\n\n Returns:\n nce: a tensor of shape [1], the normalized cross entropy value.\n auc: a tensor of shape [1], the AUC value.\n \"\"\"\n\n def LogWithClip(tensor, clip_value_min=1e-8):\n \"\"\"Clip all elements of a tensor to a minimum before taking log.\"\"\"\n return tf.math.log(tf.clip_by_value(tensor, clip_value_min, 1.0))\n\n bce = -targets * LogWithClip(probs) - (1 - targets) * LogWithClip(1 - probs)\n num_cor = tf.reduce_sum(targets * mask)\n num_tokens = tf.reduce_sum(mask)\n wcr = num_cor / num_tokens\n entropy = -wcr * LogWithClip(wcr) - (1 - wcr) * LogWithClip(1 - wcr)\n avg_conditional_entropy = tf.reduce_mean(tf.boolean_mask(bce, mask))\n nce = (entropy - avg_conditional_entropy) / entropy\n auc = tf.metrics.auc(targets, probs, mask, curve='PR')[1]\n return nce, auc\n\n\ndef GatherTensorValuesBySeqIndices(tensor, class_indices, keepdims=False):\n \"\"\"Gather values from a 3d tensor according to sequences of indices.\n\n Args:\n tensor: a 3d tensor of [dim0, dim1, num_class], e.g. output from softmax.\n class_indices: a 2d tensor of [dim0, dim1], where the second dim is a\n sequence of class indices between 0 to num_class - 1, inclusive.\n keepdims: bool, expand the last dimension of the returned tensor if True.\n\n Returns:\n A tensor ret of [dim0, dim1], where\n ret[b, t] = tensor[b, t, indices[b, t]].\n If keepdims is True, then ret has shape [dim0, dim1, 1].\n \"\"\"\n tensor = HasRank(tensor, 3)\n class_indices = HasRank(class_indices, 2)\n tensor = HasShape(tensor, GetShape(class_indices), 2)\n dim0 = GetShape(class_indices)[0]\n dim1 = GetShape(class_indices)[1]\n dim0_indices = tf.tile(tf.expand_dims(tf.range(dim0), axis=-1), [1, dim1])\n dim1_indices = tf.tile(tf.expand_dims(tf.range(dim1), axis=0), [dim0, 1])\n gather_indices = tf.stack([\n tf.cast(dim0_indices, dtype=class_indices.dtype),\n tf.cast(dim1_indices, dtype=class_indices.dtype), class_indices\n ],\n axis=-1)\n ret = tf.gather_nd(tensor, gather_indices)\n if keepdims:\n ret = tf.expand_dims(ret, axis=-1)\n return ret\n\n\ndef GetSoftmaxProbsBySeqIndices(logits, indices, keepdims=False):\n \"\"\"Get softmax probabilities from index sequences given logits sequences.\n\n Args:\n logits: a tensor of [batch, time, num_class] or [time, batch, num_class].\n indices: a tensor of [batch, time] or [time, batch].\n keepdims: bool, expand the last dimension of the returned tensor if True.\n\n Returns:\n a tensor of [batch, time] or [time, batch] for the corresponding softmax\n probabilities. If keepdims is True, returned tensor has a third dimension\n of size 1.\n \"\"\"\n probs = tf.nn.softmax(logits)\n return GatherTensorValuesBySeqIndices(probs, indices, keepdims)\n\n\ndef DivideNoNan(x, y):\n \"\"\"Equivalent to tf.math.divide_no_nan but supports bfloat16.\"\"\"\n safe_y = tf.where(tf.equal(y, 0.), tf.ones_like(y), y)\n return tf.where(tf.equal(y, 0.0), tf.zeros_like(x), x / safe_y)\n\n\ndef SequencePaddings(seqlen, maxlen=None):\n mask = tf.sequence_mask(seqlen, maxlen, dtype=tf.float32)\n return 1 - mask\n\n\ndef AppendDims(x, ndims):\n return tf.reshape(x, GetShape(x) + [1] * ndims)\n\n\ndef MaybeSoftCapLogits(x, cap=0.0):\n \"\"\"Caps logits x to be within a certain range.\n\n Args:\n x: A float tensor, the logit values to be capped.\n cap: a float, the limit to cap x within. If cap <= 0.0, x is not capped.\n\n Returns:\n logits after capping.\n \"\"\"\n if cap <= 0.0:\n return x\n else:\n return cap * tf.math.tanh(x / cap)\n\n\ndef GetTpuEmbeddingGraphCollection():\n \"\"\"Return the graph collection that stores the TpuEmbeddingCollection.\"\"\"\n tpu_emb_graph_collection = tf.get_collection_ref('__tpu_embedding_collection')\n assert len(tpu_emb_graph_collection) <= 1\n return tpu_emb_graph_collection\n\n\nclass AuxLossContext:\n \"\"\"Context that holds a list of aux-losses.\n\n By default it is non-reentrant, but can be specified as reentrant explicitly\n when creating an inner context.\n \"\"\"\n\n _global_stack = []\n\n @classmethod\n def Current(cls):\n \"\"\"Returns current context or None.\"\"\"\n if cls._global_stack:\n return cls._global_stack[-1]\n else:\n return None\n\n def __init__(self, reentrant=False):\n self.aux_loss_tensors = []\n self._reentrant = reentrant\n\n def AddLoss(self, loss):\n self.aux_loss_tensors.append(loss)\n\n @property\n def aux_losses(self):\n return self.aux_loss_tensors\n\n def __enter__(self):\n if not self._reentrant:\n assert not self._global_stack, 'no re-entry'\n self._global_stack.append(self)\n return self\n\n def __exit__(self, *args):\n self._global_stack.pop()\n\n\ndef GetTrainableVariables(scope, bprop_variable_filter,\n bprop_variable_exclusion, vmap):\n \"\"\"Returns trainable vars.\n\n Args:\n scope: A Python str.\n bprop_variable_filter: see BaseTask.Params().bprop_variable_filter.\n bprop_variable_exclusion: see BaseTask.Params().bprop_variable_exclusion.\n vmap: A NestedMap of var_path(str) -> tf Variable.\n\n Returns:\n A filtered NestedMap of var_path(str) -> trainable tf Variable.\n \"\"\"\n pos = re.compile(bprop_variable_filter) if bprop_variable_filter else None\n neg = re.compile(\n bprop_variable_exclusion) if bprop_variable_exclusion else None\n\n def VariableFilter(v):\n \"\"\"Returns True if variable v should be optimized by this learner.\"\"\"\n if not v.trainable:\n return False\n\n if pos and not pos.search(v.name):\n tf.logging.info('%s: disabled by bprop_variable_filter: %s', scope,\n v.name)\n return False\n if neg and neg.search(v.name):\n tf.logging.info('%s: disabled by bprop_variable_exclusion: %s', scope,\n v.name)\n return False\n return True\n\n return vmap.Filter(VariableFilter)\n", "# Lint as: python3\n# Copyright 2021 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Training loop for lingvo Jax model.\"\"\"\n\nimport functools\nimport os\nimport time\nfrom typing import List, Optional\n\nfrom absl import logging\nimport jax\nfrom jax.experimental import maps\nfrom lingvo.jax import checkpoints\nfrom lingvo.jax import model_utils\nfrom lingvo.jax import partitioning\nfrom lingvo.jax import py_utils\nfrom lingvo.jax import summary_utils\nfrom lingvo.jax import trainer_lib\nimport tensorflow.compat.v2 as tf\n\nInstantiableParams = py_utils.InstantiableParams\n\n\ndef train_and_evaluate(model_name: str, job_log_dir: Optional[str],\n multi_host_checkpointing: Optional[bool],\n restore_checkpoint_dir: Optional[str],\n restore_checkpoint_step: Optional[int],\n eval_on_test: Optional[bool]) -> None:\n \"\"\"Runs the training and evaluation loop.\n\n Args:\n model_name: The name of the model from the registry to train.\n job_log_dir: The directory for the job logs.\n multi_host_checkpointing: Whether to use multi-host checkpointing.\n restore_checkpoint_dir: If set, the directory from which to restore\n checkpoint. If unset, use job_log_dir's `checkpoints` subdirectory\n instead.\n restore_checkpoint_step: If set, the checkpoint step to restore. If unset,\n try to restore from the latest checkpoint if any.\n eval_on_test: Whether to eval on test as a part of the training loop.\n \"\"\"\n model_config = model_utils.get_model(model_name)()\n\n if jax.process_index() == 0:\n # Write out the params file.\n params_fpath = os.path.join(job_log_dir, 'model_params.txt')\n if not tf.io.gfile.exists(job_log_dir):\n tf.io.gfile.makedirs(job_log_dir)\n with tf.io.gfile.GFile(params_fpath, 'w') as params_file:\n datasets = model_config.Datasets()\n for dataset in datasets:\n params_file.write(dataset.ToText())\n params_file.write('\\n\\n')\n params_file.write(model_config.Task().ToText())\n\n model_p = model_config.Task()\n train_input_p = [v for v in model_config.Datasets() if v.is_training]\n if len(train_input_p) != 1:\n raise ValueError(\n f'Expecting exactly one training split. Got `{len(train_input_p)}`.')\n train_input_p = train_input_p[0].input_gen_params\n eval_input_p = None\n if eval_on_test:\n eval_input_p = [\n v.input_gen_params for v in model_config.Datasets() if not v.is_training\n ]\n if 'bucket_batch_limit' in train_input_p:\n logging.info('train_input_p.bucket_batch_limit: %s',\n train_input_p.bucket_batch_limit)\n if model_p.device_mesh is not None:\n train_and_evaluate_spmd_model(model_p, train_input_p, job_log_dir,\n multi_host_checkpointing,\n restore_checkpoint_dir,\n restore_checkpoint_step, eval_input_p)\n else:\n train_and_evaluate_pmap(model_p, train_input_p, job_log_dir,\n restore_checkpoint_dir, restore_checkpoint_step,\n eval_input_p)\n\n\ndef train_and_evaluate_pmap(\n model_p: InstantiableParams, train_input_p: InstantiableParams,\n job_log_dir: Optional[str], restore_checkpoint_dir: Optional[str],\n restore_checkpoint_step: Optional[int],\n eval_input_p: Optional[List[InstantiableParams]]) -> None:\n \"\"\"Runs the training and evaluation loop.\n\n Args:\n model_p: Params for the data parallel model.\n train_input_p: Params for the train data input pipeline.\n job_log_dir: Directory for the job logs.\n restore_checkpoint_dir: If set, the directory from which to restore\n checkpoint. If unset, use job_log_dir's `checkpoints` subdirectory\n instead.\n restore_checkpoint_step: If set, the checkpoint step to restore. If unset,\n try to restore from the latest checkpoint if any.\n eval_input_p: Optional list of params for the eval input pipeline.\n \"\"\"\n logging.info('Using pmap for data parallelism.')\n jax_model = model_p.Instantiate()\n\n with py_utils.InfeedContextScope(\n infeed_host_index=jax.process_index(),\n num_infeed_hosts=jax.process_count()):\n train_input_pipeline = train_input_p.Instantiate()\n get_model_inputs = functools.partial(model_utils.get_model_inputs,\n train_input_pipeline)\n if eval_input_p is not None:\n eval_input_pipelines = [input_p.Instantiate() for input_p in eval_input_p]\n get_eval_model_inputs = functools.partial(model_utils.get_model_inputs,\n eval_input_pipelines)\n\n # TODO(shafey): Retrieve the seeds from the model definition instead.\n prng_key = jax.random.PRNGKey(1234)\n prng_key, init_key = jax.random.split(prng_key)\n\n checkpoint_dir = os.path.join(job_log_dir, 'checkpoints')\n restore_checkpoint_dir = restore_checkpoint_dir or checkpoint_dir\n model_states = trainer_lib.InitializesModelState(jax_model, init_key)\n model_states = checkpoints.RestoreCheckpoint(\n model_states, restore_checkpoint_dir, step=restore_checkpoint_step)\n total_num_params = jax_model.total_num_vars\n replicated_model_states = trainer_lib.ReplicateModelState(model_states)\n # Unreplicated model states are not needed anymore at that point.\n del model_states\n\n logging.info('replicated_model_states shapes: %s',\n jax.tree_map(lambda x: x.shape, replicated_model_states))\n # From now on, different replicas should use different random seeds.\n # Here, each process will have its unique prng_key.\n # prng_key will be further split so that each core on a host will get\n # different prng_key.\n prng_key = jax.random.fold_in(prng_key, jax.process_index())\n logging.info('root prng_key: %s', prng_key)\n\n fprop_dtype = model_p.fprop_dtype\n\n def train_step(states, prng_key, inputs):\n return trainer_lib.TrainStepSingleLearner(\n jax_model,\n states,\n prng_key,\n inputs,\n data_parallel_axis_name='batch',\n fprop_dtype=fprop_dtype)\n\n def eval_step(mdl_vars, prng_key, global_step, inputs):\n return trainer_lib.EvalStepSingleLearner(\n jax_model,\n mdl_vars,\n prng_key,\n global_step,\n inputs,\n data_parallel_axis_name='batch',\n fprop_dtype=fprop_dtype)\n\n num_devices = jax.local_device_count()\n prng_key, train_key, eval_key = jax.random.split(prng_key, 3)\n train_prng_seed = jax.random.split(train_key, num=num_devices)\n eval_prng_seed = jax.random.split(eval_key, num=num_devices)\n logging.info('train prng_seed: %s', train_prng_seed)\n logging.info('eval prng_seed: %s', eval_prng_seed)\n\n p_train_step = jax.pmap(train_step, donate_argnums=(0,), axis_name='batch')\n p_eval_step = jax.pmap(eval_step, axis_name='batch')\n\n train_p = model_p.train\n\n logging.info('Training loop starting...')\n summary_base_dir = os.path.join(job_log_dir, 'summaries')\n summary_train_dir = os.path.join(summary_base_dir, 'train')\n summary_eval_dir = os.path.join(summary_base_dir, 'eval_train')\n summary_writer = summary_utils.GetSummaryWriter\n if eval_input_p is not None:\n summary_eval_dirs = [\n os.path.join(summary_base_dir, f'eval_test_{split}')\n for split, _ in enumerate(eval_input_p)\n ]\n # Only run one eval step during training.\n # TODO(yonghui): Allow user to customize this.\n eval_num_steps = [-1 if p.resettable else 1 for p in eval_input_p]\n\n with summary_writer(\n summary_train_dir) as train_summary_writer, summary_writer(\n summary_eval_dir) as eval_summary_writer:\n\n summary_utils.WriteModelStructure(\n train_summary_writer, replicated_model_states, is_vars_replicated=True)\n summary_utils.WriteTotalNumParams(train_summary_writer, total_num_params)\n\n summary_last_time = time.time()\n summary_last_step = None\n\n step_i = int(jax.device_get(replicated_model_states.step)[0])\n while True:\n logging.debug('step=`%d`: Beginning', step_i)\n if step_i >= train_p.num_train_steps:\n logging.info(\n 'Training loop completed (step (`%d`) greater than '\n 'num_train_step (`%d`).', step_i, train_p.num_train_steps)\n break\n if summary_last_step is None:\n summary_last_step = step_i - 1\n\n if (jax.process_index() == 0 and\n step_i % train_p.save_interval_steps == 0):\n checkpoints.SaveCheckpoint(\n replicated_model_states,\n checkpoint_dir,\n max_checkpoints=train_p.save_max_to_keep)\n\n if step_i <= 5:\n logging.info('step=`%d`: Retrieving model inputs.', step_i)\n logging.debug(' Retrieving inputs.')\n model_inputs = tf.nest.map_structure(py_utils.Reshard, get_model_inputs())\n logging.debug(' Retrieved inputs.')\n logging.debug(' Performing train_step().')\n (replicated_model_states, loss, metrics, per_example_out,\n summary_tensors) = p_train_step(replicated_model_states, train_prng_seed,\n model_inputs)\n logging.debug(' Completed train_step().')\n\n logging.debug(' Writing summaries (attempt).')\n if summary_utils.WriteSummaryEveryNSteps(\n replicated_model_states,\n train_summary_writer,\n step_i,\n train_p.summary_interval_steps,\n loss,\n metrics,\n per_example_out,\n summary_tensors,\n train_p.norm_summary_interval_steps,\n summary_last_time,\n summary_last_step,\n unreplicate_mdl_vars=True,\n unreplicate_metrics=True):\n summary_last_time = time.time()\n summary_last_step = step_i\n # Synchronize step_i\n step_i = int(jax.device_get(replicated_model_states.step)[0])\n else:\n # Increment locally to avoid an explicit sync.\n step_i += 1\n logging.debug(' Wrote summaries (attempted).')\n\n # Run eval at regular step interval.\n if step_i % train_p.eval_interval_steps == 0:\n logging.debug(' Starting eval_step().')\n logging.debug(' Retrieving eval model_inputs.')\n eval_inputs = get_model_inputs()\n logging.debug(' Retrieved eval model_inputs.')\n logging.debug(' Performing eval_step() runs on training split.')\n eval_step_fn = functools.partial(p_eval_step,\n replicated_model_states.mdl_vars,\n eval_prng_seed,\n replicated_model_states.step)\n loss, mean_metrics, summary_tensors = model_utils.run_eval_one_step(\n eval_inputs, eval_step_fn, reshard_inputs=True)\n logging.debug(' Completed eval_step() runs on training split.')\n logging.info('step=`%d`', step_i)\n logging.info(' eval loss: %s', loss)\n logging.info(' mean_metrics: %s', mean_metrics)\n logging.info(' summary_tensors: %s', summary_tensors)\n if step_i % train_p.summary_interval_steps == 0:\n logging.debug(' Writing eval summaries.')\n summary_utils.WriteSummaryEntry(\n eval_summary_writer,\n step_i,\n loss,\n mean_metrics,\n summary_tensors,\n unreplicate_metrics=True)\n logging.debug(' Wrote eval summaries.')\n # Eval on the test sets.\n if eval_input_p is not None:\n logging.debug(' Performing eval_step() runs on test splits.')\n model_utils.run_eval_loop_over_test_splits(\n eval_num_steps,\n eval_step_fn,\n summary_writer,\n summary_eval_dirs,\n step_i,\n get_eval_model_inputs,\n reshard_inputs=True)\n for i, _ in enumerate(eval_input_pipelines):\n if eval_input_p[i].resettable:\n # We re-instantiate the input to reset it.\n with py_utils.InfeedContextScope(\n infeed_host_index=jax.process_index(),\n num_infeed_hosts=jax.process_count()):\n eval_input_pipelines[i] = eval_input_p[i].Instantiate()\n if any([p.resettable for p in eval_input_p]):\n get_eval_model_inputs = functools.partial(\n model_utils.get_model_inputs, eval_input_pipelines)\n logging.debug(' Completed eval_step() runs on test splits.')\n logging.debug('step=`%d`: End', step_i - 1)\n\n\ndef train_and_evaluate_spmd_model(\n model_p: InstantiableParams, train_input_p: InstantiableParams,\n job_log_dir: Optional[str], multi_host_checkpointing: bool,\n restore_checkpoint_dir: Optional[str],\n restore_checkpoint_step: Optional[int],\n eval_input_p: Optional[InstantiableParams]) -> None:\n \"\"\"Runs the training and evaluation loop.\n\n Args:\n model_p: Params for the SPMD model.\n train_input_p: Params for the train data pipeline.\n job_log_dir: Directory for the job logs.\n multi_host_checkpointing: Whether to use multi-host checkpointing.\n restore_checkpoint_dir: If set, the directory from which to restore\n checkpoint. If unset, use job_log_dir's `checkpoints` subdirectory\n instead.\n restore_checkpoint_step: If set, the checkpoint step to restore. If unset,\n try to restore from the latest checkpoint if any.\n eval_input_p: Optional list of params for the eval input pipeline.\n \"\"\"\n logging.info('Using SPMD sharding for model parallelism.')\n with py_utils.InfeedContextScope(\n infeed_host_index=jax.process_index(),\n num_infeed_hosts=jax.process_count()):\n train_input_pipeline = train_input_p.Instantiate()\n get_model_inputs = functools.partial(model_utils.get_model_inputs,\n train_input_pipeline)\n if eval_input_p is not None:\n eval_input_pipelines = [input_p.Instantiate() for input_p in eval_input_p]\n get_eval_model_inputs = functools.partial(model_utils.get_model_inputs,\n eval_input_pipelines)\n\n # TODO(bf-jax): Retrieve the seeds from the model definition instead.\n prng_key = jax.random.PRNGKey(1234)\n prng_key, init_key = jax.random.split(prng_key)\n\n checkpoint_dir = os.path.join(job_log_dir, 'checkpoints')\n restore_checkpoint_dir = restore_checkpoint_dir or checkpoint_dir\n if multi_host_checkpointing:\n checkpoint_task_dir = os.path.join(checkpoint_dir,\n f'{jax.process_index():03d}')\n restore_checkpoint_task_dir = os.path.join(restore_checkpoint_dir,\n f'{jax.process_index():03d}')\n else:\n checkpoint_task_dir = checkpoint_dir\n restore_checkpoint_task_dir = restore_checkpoint_dir\n\n if jax.process_index() == 0:\n tf.io.gfile.makedirs(checkpoint_dir)\n if multi_host_checkpointing:\n # Block all hosts until directory is ready.\n py_utils.SyncGlobalDevices(f'checkpointer:makedirs:{checkpoint_dir}')\n\n logging.info('step=`0`: Retrieving model inputs.')\n model_inputs = tf.nest.map_structure(lambda x: x.numpy(), get_model_inputs())\n\n def get_shape_dtype(x):\n # We assume all the hosts infeed the same data.\n process_count = jax.process_count()\n assert len(x.shape) >= 1\n x_shape = (x.shape[0] * process_count,) + x.shape[1:]\n y = jax.ShapeDtypeStruct(x_shape, x.dtype)\n return y\n\n inputs_shape = tf.nest.map_structure(get_shape_dtype, model_inputs)\n\n mesh_shape = model_p.device_mesh.shape\n device_mesh = partitioning.CreateDeviceMesh(mesh_shape)\n logging.info('device_mesh: %s', device_mesh)\n with maps.mesh(device_mesh, model_p.mesh_axis_names):\n (partitioned_train_state, _, train_step, eval_step, _, _,\n total_num_params) = trainer_lib.PartitionSpmdModel(model_p, init_key,\n inputs_shape)\n\n partitioned_train_state = checkpoints.RestoreCheckpoint(\n partitioned_train_state,\n restore_checkpoint_task_dir,\n step=restore_checkpoint_step)\n logging.info('partitioned_train_state shapes: %s',\n jax.tree_map(lambda x: x.shape, partitioned_train_state))\n if multi_host_checkpointing:\n py_utils.SyncGlobalDevices(f'checkpointer:restored:{checkpoint_dir}')\n\n # We do not fold in jax.process_index in contrast to the pmap version and\n # use a single global key instead to rely on pjit to split for different\n # replicas.\n logging.info('root prng_key: %s', prng_key)\n prng_key, train_key, eval_key = jax.random.split(prng_key, 3)\n logging.info('train prng_key: %s', train_key)\n logging.info('eval prng_key: %s', eval_key)\n\n train_p = model_p.train\n\n logging.info('Training loop starting...')\n summary_base_dir = os.path.join(job_log_dir, 'summaries')\n summary_train_dir = os.path.join(summary_base_dir, 'train')\n summary_eval_dir = os.path.join(summary_base_dir, 'eval_train')\n summary_writer = summary_utils.GetSummaryWriter\n if eval_input_p is not None:\n summary_eval_dirs = [\n os.path.join(summary_base_dir, f'eval_test_{split}')\n for split, _ in enumerate(eval_input_p)\n ]\n # Eval batch size per replica defaults to 1 when not resettable,\n # otherwise we exhaust all eval data (num_steps=-1).\n # TODO(yonghui): Allow user to customize this.\n eval_num_steps = [-1 if p.resettable else 1 for p in eval_input_p]\n\n with summary_writer(\n summary_train_dir) as train_summary_writer, summary_writer(\n summary_eval_dir) as eval_summary_writer:\n\n # This only prints the view from the first host machine.\n summary_utils.WriteModelStructure(\n train_summary_writer,\n partitioned_train_state,\n is_vars_replicated=False)\n summary_utils.WriteTotalNumParams(train_summary_writer, total_num_params)\n\n summary_last_time = time.time()\n summary_last_step = None\n\n step_i = int(jax.device_get(partitioned_train_state.step))\n\n # Start the train loop. Make sure all at the same step.\n py_utils.SyncGlobalDevices(f'Start training loop from step: {step_i}')\n while True:\n logging.debug('step=`%d`: Beginning', step_i)\n if step_i >= train_p.num_train_steps:\n logging.info(\n 'Training loop completed (step (`%d`) greater than '\n 'num_train_step (`%d`).', step_i, train_p.num_train_steps)\n break\n\n if summary_last_step is None:\n summary_last_step = step_i - 1\n\n if step_i % train_p.save_interval_steps == 0:\n logging.info('Saving a ckpt at step: %d', step_i)\n if multi_host_checkpointing:\n py_utils.SyncGlobalDevices(\n f'checkpointer:saving:{checkpoint_dir}:step-{step_i}')\n if multi_host_checkpointing or jax.process_index() == 0:\n checkpoints.SaveCheckpoint(\n partitioned_train_state,\n checkpoint_task_dir,\n max_checkpoints=train_p.save_max_to_keep,\n unreplicate=False)\n if multi_host_checkpointing:\n py_utils.SyncGlobalDevices(\n f'checkpointer:saved:{checkpoint_dir}:step-{step_i}')\n\n logging.debug(' Performing train_step().')\n (partitioned_train_state, loss, metrics, per_example_out,\n summary_tensors) = train_step(partitioned_train_state, train_key,\n model_inputs)\n logging.debug(' Completed train_step().')\n\n logging.debug(' Writing summaries (attempt).')\n if summary_utils.WriteSummaryEveryNSteps(\n partitioned_train_state,\n train_summary_writer,\n step_i,\n train_p.summary_interval_steps,\n loss,\n metrics,\n per_example_out,\n summary_tensors,\n train_p.norm_summary_interval_steps,\n summary_last_time,\n summary_last_step,\n unreplicate_mdl_vars=False,\n unreplicate_metrics=False):\n summary_last_time = time.time()\n summary_last_step = step_i\n step_i = int(jax.device_get(partitioned_train_state.step))\n else:\n # Increment train step locally to avoid an explicit device sync.\n step_i += 1\n logging.debug(' Wrote summaries (attempted).')\n\n # Run eval at regular step interval.\n if step_i % train_p.eval_interval_steps == 0:\n logging.debug(' Starting eval_step().')\n logging.debug(' Retrieving eval model_inputs.')\n eval_inputs = get_model_inputs()\n logging.debug(' Retrieved eval model_inputs.')\n logging.debug(' Performing eval_step() runs on training split.')\n eval_step_fn = functools.partial(eval_step,\n partitioned_train_state.mdl_vars,\n eval_key,\n partitioned_train_state.step)\n loss, mean_metrics, summary_tensors = model_utils.run_eval_one_step(\n eval_inputs, eval_step_fn, reshard_inputs=False)\n logging.debug(' Completed eval_step() runs on training split.')\n\n logging.info('step=`%d`', step_i)\n logging.info(' eval loss: %s', loss)\n logging.info(' mean_metrics: %s', mean_metrics)\n logging.info(' summary_tensors: %s', summary_tensors)\n if step_i % train_p.summary_interval_steps == 0:\n logging.debug(' Writing eval summaries.')\n summary_utils.WriteSummaryEntry(\n eval_summary_writer,\n step_i,\n loss,\n mean_metrics,\n summary_tensors,\n unreplicate_metrics=False)\n logging.debug(' Wrote eval summaries.')\n # If we have eval test then also evaluate on test.\n if eval_input_p is not None:\n logging.debug(' Performing eval_step() runs on test splits.')\n model_utils.run_eval_loop_over_test_splits(\n eval_num_steps,\n eval_step_fn,\n summary_writer,\n summary_eval_dirs,\n step_i,\n get_eval_model_inputs,\n reshard_inputs=False)\n for i, _ in enumerate(eval_input_pipelines):\n if eval_input_p[i].resettable:\n # We re-instantiate the input to reset it.\n with py_utils.InfeedContextScope(\n infeed_host_index=jax.process_index(),\n num_infeed_hosts=jax.process_count()):\n eval_input_pipelines[i] = eval_input_p[i].Instantiate()\n if any([p.resettable for p in eval_input_p]):\n get_eval_model_inputs = functools.partial(\n model_utils.get_model_inputs, eval_input_pipelines)\n logging.debug(' Completed eval_step() runs on test splits.')\n\n # Get new model inputs\n if step_i <= 5:\n logging.info('step=`%d`: Retrieving model inputs.', step_i)\n logging.debug(' Retrieving inputs.')\n model_inputs = tf.nest.map_structure(lambda x: x.numpy(),\n get_model_inputs())\n logging.debug(' Retrieved inputs.')\n logging.debug('step=`%d`: End', step_i - 1)\n" ]
[ [ "numpy.sqrt", "numpy.get_printoptions", "numpy.asarray", "tensorflow.python.framework.function.get_extra_vars", "tensorflow.python.ops.init_ops.random_uniform_initializer", "tensorflow.python.ops.init_ops.constant_initializer", "numpy.random.randint", "tensorflow.python.tpu.tpu_function.get_tpu_context", "tensorflow.python.ops.stateless_random_ops.stateless_random_normal", "tensorflow.python.ops.stateless_random_ops.stateless_truncated_normal", "numpy.array_repr", "tensorflow.python.tpu.topology.Topology", "numpy.save", "tensorflow.python.ops.init_ops.uniform_unit_scaling_initializer", "numpy.int64", "tensorflow.core.framework.attr_value_pb2.AttrValue", "tensorflow.python.framework.function.get_extra_inputs", "numpy.set_printoptions", "tensorflow.python.ops.init_ops.random_normal_initializer", "tensorflow.python.tf2.enabled", "numpy.prod", "tensorflow.python.framework.function.get_extra_args", "tensorflow.python.ops.init_ops.truncated_normal_initializer", "tensorflow.python.ops.stateless_random_ops.stateless_random_uniform" ], [ "tensorflow.compat.v2.nest.map_structure", "tensorflow.compat.v2.io.gfile.GFile", "tensorflow.compat.v2.io.gfile.exists", "tensorflow.compat.v2.io.gfile.makedirs" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "2.7", "2.6", "1.13", "2.3", "2.4", "2.9", "2.5", "2.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
y-yao/pyscf
[ "9b6589109be372334c51053d48fc1b80ce10ce23" ]
[ "pyscf/cc/gccsd_t_rdm.py" ]
[ "#!/usr/bin/env python\n# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Author: Qiming Sun <[email protected]>\n#\n\nimport numpy\nfrom pyscf import lib\nfrom pyscf.lib import logger\nfrom pyscf.cc import gccsd_rdm\n\ndef _gamma1_intermediates(mycc, t1, t2, l1, l2, eris=None):\n doo, dov, dvo, dvv = gccsd_rdm._gamma1_intermediates(mycc, t1, t2, l1, l2)\n\n if eris is None: eris = mycc.ao2mo()\n\n nocc, nvir = t1.shape\n bcei = numpy.asarray(eris.ovvv).conj().transpose(3,2,1,0)\n majk = numpy.asarray(eris.ooov).conj().transpose(2,3,0,1)\n bcjk = numpy.asarray(eris.oovv).conj().transpose(2,3,0,1)\n\n mo_e = eris.fock.diagonal().real\n eia = mo_e[:nocc,None] - mo_e[nocc:]\n d3 = lib.direct_sum('ia+jb+kc->ijkabc', eia, eia, eia)\n\n t3c =(numpy.einsum('jkae,bcei->ijkabc', t2, bcei)\n - numpy.einsum('imbc,majk->ijkabc', t2, majk))\n t3c = t3c - t3c.transpose(0,1,2,4,3,5) - t3c.transpose(0,1,2,5,4,3)\n t3c = t3c - t3c.transpose(1,0,2,3,4,5) - t3c.transpose(2,1,0,3,4,5)\n t3c /= d3\n\n t3d = numpy.einsum('ia,bcjk->ijkabc', t1, bcjk)\n t3d += numpy.einsum('ai,jkbc->ijkabc', eris.fock[nocc:,:nocc], t2)\n t3d = t3d - t3d.transpose(0,1,2,4,3,5) - t3d.transpose(0,1,2,5,4,3)\n t3d = t3d - t3d.transpose(1,0,2,3,4,5) - t3d.transpose(2,1,0,3,4,5)\n t3d /= d3\n\n goo = numpy.einsum('iklabc,jklabc->ij', (t3c+t3d).conj(), t3c) * (1./12)\n gvv = numpy.einsum('ijkacd,ijkbcd->ab', t3c+t3d, t3c.conj()) * (1./12)\n doo[numpy.diag_indices(nocc)] -= goo.diagonal()\n dvv[numpy.diag_indices(nvir)] += gvv.diagonal()\n dvo += numpy.einsum('ijab,ijkabc->ck', t2.conj(), t3c) * (1./4)\n\n return doo, dov, dvo, dvv\n\n# gamma2 intermediates in Chemist's notation\ndef _gamma2_intermediates(mycc, t1, t2, l1, l2, eris=None):\n dovov, dvvvv, doooo, doovv, dovvo, dvvov, dovvv, dooov = \\\n gccsd_rdm._gamma2_intermediates(mycc, t1, t2, l1, l2)\n if eris is None: eris = mycc.ao2mo()\n\n nocc, nvir = t1.shape\n bcei = numpy.asarray(eris.ovvv).conj().transpose(3,2,1,0)\n majk = numpy.asarray(eris.ooov).conj().transpose(2,3,0,1)\n bcjk = numpy.asarray(eris.oovv).conj().transpose(2,3,0,1)\n\n mo_e = eris.fock.diagonal().real\n eia = mo_e[:nocc,None] - mo_e[nocc:]\n d3 = lib.direct_sum('ia+jb+kc->ijkabc', eia, eia, eia)\n\n t3c =(numpy.einsum('jkae,bcei->ijkabc', t2, bcei)\n - numpy.einsum('imbc,majk->ijkabc', t2, majk))\n t3c = t3c - t3c.transpose(0,1,2,4,3,5) - t3c.transpose(0,1,2,5,4,3)\n t3c = t3c - t3c.transpose(1,0,2,3,4,5) - t3c.transpose(2,1,0,3,4,5)\n t3c /= d3\n\n t3d = numpy.einsum('ia,bcjk->ijkabc', t1, bcjk)\n t3d += numpy.einsum('ai,jkbc->ijkabc', eris.fock[nocc:,:nocc], t2)\n t3d = t3d - t3d.transpose(0,1,2,4,3,5) - t3d.transpose(0,1,2,5,4,3)\n t3d = t3d - t3d.transpose(1,0,2,3,4,5) - t3d.transpose(2,1,0,3,4,5)\n t3d /= d3\n\n goovv = numpy.einsum('kc,ijkabc->ijab', t1.conj(), t3c).conj() * (1./4)\n dovov += goovv.transpose(0,2,1,3) - goovv.transpose(0,3,1,2)\n\n m3 = t3c * 2 + t3d\n# *(1/8) instead of (1/4) because ooov appears 4 times in the 2pdm tensor due\n# to symmetrization, and its contribution is scaled by 1/2 in Tr(H,2pdm)\n gooov = numpy.einsum('imbc,ijkabc->jkma', t2, m3.conj()) * (1./8)\n dooov -= gooov.transpose(0,2,1,3) - gooov.transpose(1,2,0,3)\n\n govvv = numpy.einsum('jkae,ijkabc->iecb', t2, m3.conj()) * (1./8)\n dovvv += govvv.transpose(0,2,1,3) - govvv.transpose(0,3,1,2)\n return dovov, dvvvv, doooo, doovv, dovvo, dvvov, dovvv, dooov\n\ndef make_rdm1(mycc, t1, t2, l1, l2, eris=None):\n d1 = _gamma1_intermediates(mycc, t1, t2, l1, l2, eris)\n return gccsd_rdm._make_rdm1(mycc, d1, True)\n\n# rdm2 in Chemist's notation\ndef make_rdm2(mycc, t1, t2, l1, l2, eris=None):\n d1 = _gamma1_intermediates(mycc, t1, t2, l1, l2, eris)\n d2 = _gamma2_intermediates(mycc, t1, t2, l1, l2, eris)\n return gccsd_rdm._make_rdm2(mycc, d1, d2, True, True)\n\n\nif __name__ == '__main__':\n from functools import reduce\n from pyscf import gto\n from pyscf import scf\n from pyscf import ao2mo\n from pyscf import cc\n\n mol = gto.Mole()\n mol.atom = [\n [8 , (0. , 0. , 0.)],\n [1 , (0. , -.957 , .587)],\n [1 , (0.2, .757 , .487)]]\n mol.basis = '631g'\n mol.build()\n mf0 = mf = scf.RHF(mol).run(conv_tol=1.)\n mf = scf.addons.convert_to_ghf(mf)\n\n from pyscf.cc import ccsd_t_lambda_slow as ccsd_t_lambda\n from pyscf.cc import ccsd_t_rdm_slow as ccsd_t_rdm\n mycc0 = cc.CCSD(mf0)\n eris0 = mycc0.ao2mo()\n mycc0.kernel(eris=eris0)\n t1 = mycc0.t1\n t2 = mycc0.t2\n imds = ccsd_t_lambda.make_intermediates(mycc0, t1, t2, eris0)\n l1, l2 = ccsd_t_lambda.update_lambda(mycc0, t1, t2, t1, t2, eris0, imds)\n dm1ref = ccsd_t_rdm.make_rdm1(mycc0, t1, t2, l1, l2, eris0)\n dm2ref = ccsd_t_rdm.make_rdm2(mycc0, t1, t2, l1, l2, eris0)\n\n mycc = cc.GCCSD(mf)\n eris = mycc.ao2mo()\n t1 = mycc.spatial2spin(t1, mycc.mo_coeff.orbspin)\n t2 = mycc.spatial2spin(t2, mycc.mo_coeff.orbspin)\n l1 = mycc.spatial2spin(l1, mycc.mo_coeff.orbspin)\n l2 = mycc.spatial2spin(l2, mycc.mo_coeff.orbspin)\n gdm1 = make_rdm1(mycc, t1, t2, l1, l2, eris)\n gdm2 = make_rdm2(mycc, t1, t2, l1, l2, eris)\n idxa = numpy.where(mycc.mo_coeff.orbspin == 0)[0]\n idxb = numpy.where(mycc.mo_coeff.orbspin == 1)[0]\n\n trdm1 = gdm1[idxa[:,None],idxa]\n trdm1+= gdm1[idxb[:,None],idxb]\n trdm2 = gdm2[idxa[:,None,None,None],idxa[:,None,None],idxa[:,None],idxa]\n trdm2+= gdm2[idxb[:,None,None,None],idxb[:,None,None],idxb[:,None],idxb]\n dm2ab = gdm2[idxa[:,None,None,None],idxa[:,None,None],idxb[:,None],idxb]\n trdm2+= dm2ab\n trdm2+= dm2ab.transpose(2,3,0,1)\n print(abs(trdm1 - dm1ref).max())\n print(abs(trdm2 - dm2ref).max())\n\n mol = gto.Mole()\n mol.atom = [\n [8 , (0. , 0. , 0.)],\n [1 , (0. , -0.757 , 0.587)],\n [1 , (0. , 0.757 , 0.587)]]\n mol.basis = '631g'\n mol.spin = 2\n mol.charge = 2\n mol.build()\n mf0 = mf = scf.UHF(mol).run(conv_tol=1)\n mf = scf.addons.convert_to_ghf(mf)\n\n from pyscf.cc import uccsd_t_slow\n from pyscf.cc import uccsd_t_lambda\n from pyscf.cc import uccsd_t_rdm\n mycc0 = cc.UCCSD(mf0)\n eris0 = mycc0.ao2mo()\n mycc0.kernel(eris=eris0)\n t1 = mycc0.t1\n t2 = mycc0.t2\n imds = uccsd_t_lambda.make_intermediates(mycc0, t1, t2, eris0)\n l1, l2 = uccsd_t_lambda.update_lambda(mycc0, t1, t2, t1, t2, eris0, imds)\n dm1ref = uccsd_t_rdm.make_rdm1(mycc0, t1, t2, l1, l2, eris0)\n dm2ref = uccsd_t_rdm.make_rdm2(mycc0, t1, t2, l1, l2, eris0)\n\n mycc = cc.GCCSD(mf)\n eris = mycc.ao2mo()\n t1 = mycc.spatial2spin(t1, mycc.mo_coeff.orbspin)\n t2 = mycc.spatial2spin(t2, mycc.mo_coeff.orbspin)\n l1 = mycc.spatial2spin(l1, mycc.mo_coeff.orbspin)\n l2 = mycc.spatial2spin(l2, mycc.mo_coeff.orbspin)\n gdm1 = make_rdm1(mycc, t1, t2, l1, l2, eris)\n gdm2 = make_rdm2(mycc, t1, t2, l1, l2, eris)\n idxa = numpy.where(mycc.mo_coeff.orbspin == 0)[0]\n idxb = numpy.where(mycc.mo_coeff.orbspin == 1)[0]\n\n print(abs(dm1ref[0] - gdm1[idxa[:,None],idxa]).max())\n print(abs(dm1ref[1] - gdm1[idxb[:,None],idxb]).max())\n print(abs(dm2ref[0] - gdm2[idxa[:,None,None,None],idxa[:,None,None],idxa[:,None],idxa]).max())\n print(abs(dm2ref[1] - gdm2[idxa[:,None,None,None],idxa[:,None,None],idxb[:,None],idxb]).max())\n print(abs(dm2ref[2] - gdm2[idxb[:,None,None,None],idxb[:,None,None],idxb[:,None],idxb]).max())\n\n ecc, t1, t2 = mycc.kernel(eris=eris)\n e3ref = mycc.e_tot + mycc.ccsd_t()\n l1, l2 = mycc.solve_lambda(eris=eris)\n dm1 = make_rdm1(mycc, t1, t2, l1, l2, eris=eris)\n dm2 = make_rdm2(mycc, t1, t2, l1, l2, eris=eris)\n nao = mol.nao_nr()\n mo_a = mf.mo_coeff[:nao]\n mo_b = mf.mo_coeff[nao:]\n nmo = mo_a.shape[1]\n eri = ao2mo.kernel(mf._eri, mo_a+mo_b, compact=False).reshape([nmo]*4)\n orbspin = mf.mo_coeff.orbspin\n sym_forbid = (orbspin[:,None] != orbspin)\n eri[sym_forbid,:,:] = 0\n eri[:,:,sym_forbid] = 0\n hcore = scf.RHF(mol).get_hcore()\n h1 = reduce(numpy.dot, (mo_a.T.conj(), hcore, mo_a))\n h1+= reduce(numpy.dot, (mo_b.T.conj(), hcore, mo_b))\n e3 = numpy.einsum('ij,ji', h1, dm1)\n e3+= numpy.einsum('ijkl,ijkl', eri, dm2) * .5\n e3+= mol.energy_nuc()\n print(e3 - e3ref)\n" ]
[ [ "numpy.diag_indices", "numpy.where", "numpy.asarray", "numpy.einsum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
signals-dev/sintel
[ "e1b296d356854323d4582c074fa90c25adfd6573" ]
[ "sintel/resources/computing/similar_windows.py" ]
[ "import logging\n\nimport numpy as np\nimport pandas as pd\nfrom flask_restful import Resource, reqparse\nfrom sklearn.preprocessing import MinMaxScaler\n\nfrom sintel.db import DBExplorer, schema\nfrom sintel.resources.auth_utils import verify_auth\nfrom sintel.resources.computing.utils.search_similars import return_candidate_shapes\n\nLOGGER = logging.getLogger(__name__)\n\nparser = reqparse.RequestParser(bundle_errors=True)\nparser.add_argument(\n 'start', type=float, help='Start timestamp', required=True, location='args')\nparser.add_argument(\n 'end', type=float, help='End timestamp', required=True, location='args')\nparser.add_argument(\n 'datarun_id', type=str, help='ID of signalrun', required=True, location='args')\nparser.add_argument(\n 'metric', choices=['euclidean', 'dtw'], help='Distance metric',\n default=\"euclidean\", location='args')\nparser.add_argument(\n 'number', type=int, help='Number of returned windows', default=100, location='args')\n\n\ndef get_windows(start, end, datarun_id, metric, number):\n # doc = schema.Prediction.find_one(signalrun=ObjectId(datarun_id))\n prediction_data = DBExplorer.get_prediction(datarun_id)\n timeseries = {\n 'timestamp': [d[0] for d in prediction_data['data']],\n 'value': [d[1] for d in prediction_data['data']]\n }\n df = pd.DataFrame(data=timeseries)\n\n # find the existing events\n event_docs = schema.Event.find(signalrun=datarun_id)\n events = [(doc.start_time, doc.stop_time) for doc in event_docs]\n\n # get candidate shapes\n windows, worst_dist = return_candidate_shapes(df, start, end, func=metric,\n events=events)\n\n # represent it as similarities ranging from 0 to 100%\n scaler = MinMaxScaler(feature_range=[0, 1])\n X = np.asarray([w[2] for w in windows]).reshape(-1, 1)\n X_ = np.asarray([0, worst_dist]).reshape(-1, 1)\n scaler.fit(X_)\n X = scaler.transform(X).flatten()\n windows = [{'start': w[0], 'end': w[1], 'similarity': 1 - X[idx]}\n for idx, w in enumerate(windows)]\n windows.sort(key=lambda w: w['similarity'], reverse=True)\n\n return windows[: number]\n\n\nclass SimilarWindows(Resource):\n def get(self):\n \"\"\"\n @api {get} /computings/similar_windows/ Get similar windows\n @apiName GetSimilarWindows\n @apiGroup Computing\n @apiVersion 1.0.0\n\n @apiParam {Number} start Start timestamp.\n @apiParam {Number} end End timestamp.\n @apiParam {String} datarun_id Datarun ID.\n @apiParam {String=\"euclidean\",\"dtw\"} metric Distance metric used in\n shapen matching.\n @apiParam {Number} [number=5] Number of similar windows to return.\n\n @apiSuccess {Object[]} windows A list of windows.\n @apiSuccess {Number} windows.start Window start timestamp.\n @apiSuccess {Number} windows.end Window end timestamp.\n @apiSuccess {Number} windows.distance Window end timestamp.\n \"\"\"\n\n res, status = verify_auth()\n if status == 401:\n return res, status\n\n try:\n args = parser.parse_args()\n except Exception as e:\n LOGGER.exception(str(e))\n return {'message', str(e)}, 400\n\n try:\n windows = get_windows(**args)\n except Exception as e:\n LOGGER.exception(str(e))\n return {'message', 'error computing the similar shapes'}, 500\n\n return {\n 'windows': windows\n }, 200\n" ]
[ [ "numpy.asarray", "pandas.DataFrame", "sklearn.preprocessing.MinMaxScaler" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
Eshan-Agarwal/magenta
[ "21f4cbf8ac2717df6a6fbff8cc6a027fbf3e4057", "21f4cbf8ac2717df6a6fbff8cc6a027fbf3e4057", "21f4cbf8ac2717df6a6fbff8cc6a027fbf3e4057", "21f4cbf8ac2717df6a6fbff8cc6a027fbf3e4057", "21f4cbf8ac2717df6a6fbff8cc6a027fbf3e4057", "21f4cbf8ac2717df6a6fbff8cc6a027fbf3e4057" ]
[ "magenta/music/events_lib_test.py", "magenta/models/arbitrary_image_stylization/export_hub.py", "magenta/music/mfcc_mel_test.py", "magenta/models/shared/sequence_generator_bundle.py", "magenta/pipelines/pianoroll_pipeline_test.py", "magenta/models/onsets_frames_transcription/metrics_test.py" ]
[ "# Copyright 2020 The Magenta Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for events_lib.\"\"\"\n\nimport copy\n\nfrom magenta.music import events_lib\nimport tensorflow.compat.v1 as tf\n\n\nclass EventsLibTest(tf.test.TestCase):\n\n def testDeepcopy(self):\n events = events_lib.SimpleEventSequence(\n pad_event=0, events=[0, 1, 2], start_step=0, steps_per_quarter=4,\n steps_per_bar=8)\n events_copy = copy.deepcopy(events)\n self.assertEqual(events, events_copy)\n\n events.set_length(2)\n self.assertNotEqual(events, events_copy)\n\n def testAppendEvent(self):\n events = events_lib.SimpleEventSequence(pad_event=0)\n\n events.append(7)\n self.assertListEqual([7], list(events))\n self.assertEqual(0, events.start_step)\n self.assertEqual(1, events.end_step)\n\n events.append('cheese')\n self.assertListEqual([7, 'cheese'], list(events))\n self.assertEqual(0, events.start_step)\n self.assertEqual(2, events.end_step)\n\n def testSetLength(self):\n events = events_lib.SimpleEventSequence(\n pad_event=0, events=[60], start_step=9)\n events.set_length(5)\n self.assertListEqual([60, 0, 0, 0, 0],\n list(events))\n self.assertEqual(9, events.start_step)\n self.assertEqual(14, events.end_step)\n self.assertListEqual([9, 10, 11, 12, 13], events.steps)\n\n events = events_lib.SimpleEventSequence(\n pad_event=0, events=[60], start_step=9)\n events.set_length(5, from_left=True)\n self.assertListEqual([0, 0, 0, 0, 60],\n list(events))\n self.assertEqual(5, events.start_step)\n self.assertEqual(10, events.end_step)\n self.assertListEqual([5, 6, 7, 8, 9], events.steps)\n\n events = events_lib.SimpleEventSequence(pad_event=0, events=[60, 0, 0, 0])\n events.set_length(3)\n self.assertListEqual([60, 0, 0], list(events))\n self.assertEqual(0, events.start_step)\n self.assertEqual(3, events.end_step)\n self.assertListEqual([0, 1, 2], events.steps)\n\n events = events_lib.SimpleEventSequence(pad_event=0, events=[60, 0, 0, 0])\n events.set_length(3, from_left=True)\n self.assertListEqual([0, 0, 0], list(events))\n self.assertEqual(1, events.start_step)\n self.assertEqual(4, events.end_step)\n self.assertListEqual([1, 2, 3], events.steps)\n\n def testIncreaseResolution(self):\n events = events_lib.SimpleEventSequence(pad_event=0, events=[1, 0, 1, 0],\n start_step=5, steps_per_bar=4,\n steps_per_quarter=1)\n events.increase_resolution(3, fill_event=None)\n self.assertListEqual([1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0], list(events))\n self.assertEqual(events.start_step, 15)\n self.assertEqual(events.steps_per_bar, 12)\n self.assertEqual(events.steps_per_quarter, 3)\n\n events = events_lib.SimpleEventSequence(pad_event=0, events=[1, 0, 1, 0])\n events.increase_resolution(2, fill_event=0)\n self.assertListEqual([1, 0, 0, 0, 1, 0, 0, 0], list(events))\n\n\nif __name__ == '__main__':\n tf.test.main()\n", "# Copyright 2020 The Magenta Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Genarates a TF-Hub module for arbitrary image stylization.\n\nThe module is compatible with TF-2 and is intended as a demonstration of TF-Hub\nmodule creation based on TF-1 models.\n\nThe created hub module can be used for image stylization with:\n m = hub.load('hub_handle')\n stylized_image = m(content_image, style_image)\n\nwhere content_image, style_image and the generated stylized_image are 4d arrays,\nwith the first being the batch dimension, that can be 1 for individual images.\nThe input and output values of the images should be in the range [0, 1].\nThe shapes of content and style image don't have to match. Output image shape\nis the same as the content image shape.\n\nA pre-trained checkpoint for the given model is available at\nhttps://storage.googleapis.com/download.magenta.tensorflow.org/models/arbitrary_style_transfer.tar.gz\nOne can download and extract this tar file and to provide the path to the\ncheckpoint in it to the checkpoint flag.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import google_type_annotations\nfrom __future__ import print_function\n\nfrom absl import flags\n\nfrom magenta.models.arbitrary_image_stylization import arbitrary_image_stylization_build_model\nimport tensorflow.compat.v1 as tf\n\nflags.DEFINE_string('checkpoint', None, 'Path to the model checkpoint.')\nflags.DEFINE_string('export_path', None, 'Path where to save the hub module.')\nFLAGS = flags.FLAGS\n\n\ndef build_network(content_img, style_img):\n \"\"\"Builds the neural network for image stylization.\"\"\"\n stylize_op, _, _, _ = arbitrary_image_stylization_build_model.build_model(\n content_img,\n style_img,\n trainable=False,\n is_training=False,\n adds_losses=False)\n return stylize_op\n\n\ndef get_stylize_fn():\n \"\"\"Creates a tf.function for stylization.\"\"\"\n input_spec = [\n tf.TensorSpec((None, None, None, 3), tf.float32),\n tf.TensorSpec((None, None, None, 3), tf.float32)\n ]\n predict_feeds = []\n predict_fetches = []\n\n def umbrella_function(content_img, style_img):\n predict_feeds.extend([content_img, style_img])\n predict_result = build_network(content_img, style_img)\n predict_fetches.extend([\n predict_result,\n ])\n return predict_result\n\n umbrella_wrapped = tf.compat.v1.wrap_function(umbrella_function, input_spec)\n fn = umbrella_wrapped.prune(predict_feeds, predict_fetches)\n return fn\n\n\ndef create_hub_module_object():\n \"\"\"Creates an exportable saved model object.\"\"\"\n obj = tf.train.Checkpoint()\n obj.__call__ = get_stylize_fn()\n obj.variables = list(obj.__call__.graph.variables)\n # To avoid error related to reading expected variable save_counter.\n obj.save_counter # pylint: disable=pointless-statement\n return obj\n\n\ndef main(unused_argv=None):\n obj = create_hub_module_object()\n with tf.Session() as sess:\n sess.run(tf.initialize_all_variables())\n tf.train.Saver(obj.variables).restore(sess, FLAGS.checkpoint)\n tf.saved_model.save(obj, FLAGS.export_path, signatures=obj.__call__)\n tf.logging.info('Saved hub module in: %s', FLAGS.export_path)\n\n\ndef console_entry_point():\n tf.compat.v1.app.run(main)\n\n\nif __name__ == '__main__':\n console_entry_point()\n", "# Copyright 2020 The Magenta Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for mfcc_mel.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom magenta.music import mfcc_mel\nimport numpy as np\nimport tensorflow.compat.v1 as tf\n\n\nclass MfccMelTest(tf.test.TestCase):\n\n def testMelSpectrumAgreesWithGoldenValues(self):\n # Parallel dsp/mfcc:mel_spectrum_test.\n sample_count = 513\n input_ = np.sqrt(np.arange(1, sample_count + 1))[np.newaxis, :]\n spec_to_mel_matrix = mfcc_mel.SpectrogramToMelMatrix(\n num_spectrogram_bins=sample_count,\n audio_sample_rate=22050,\n num_mel_bins=20,\n lower_edge_hertz=20.0,\n upper_edge_hertz=4000.0)\n mel_spectrum = np.dot(input_, spec_to_mel_matrix)\n expected = np.array(\n [7.422619, 10.30330648, 13.72703292, 17.24158686, 21.35253118,\n 25.77781089, 31.30624108, 37.05877236, 43.9436536, 51.80306637,\n 60.79867148, 71.14363376, 82.90910141, 96.50069158, 112.08428368,\n 129.96721968, 150.4277597, 173.74997634, 200.86037462, 231.59802942])\n np.testing.assert_array_almost_equal(expected, mel_spectrum[0, :])\n\n def testSpectrogramToMelMatrixChecksFrequencyBounds(self):\n # Lower edge must be >= 0, but 0 is OK.\n mfcc_mel.SpectrogramToMelMatrix(\n num_spectrogram_bins=513,\n audio_sample_rate=22050,\n num_mel_bins=20,\n lower_edge_hertz=0.0,\n upper_edge_hertz=4000.0)\n with self.assertRaises(ValueError):\n mfcc_mel.SpectrogramToMelMatrix(\n num_spectrogram_bins=513,\n audio_sample_rate=22050,\n num_mel_bins=20,\n lower_edge_hertz=-1.0,\n upper_edge_hertz=4000.0)\n # Upper edge must be <= Nyquist, but Nyquist is OK.\n mfcc_mel.SpectrogramToMelMatrix(\n num_spectrogram_bins=513,\n audio_sample_rate=22050,\n num_mel_bins=20,\n lower_edge_hertz=20.0,\n upper_edge_hertz=11025.0)\n with self.assertRaises(ValueError):\n mfcc_mel.SpectrogramToMelMatrix(\n num_spectrogram_bins=513,\n audio_sample_rate=22050,\n num_mel_bins=20,\n lower_edge_hertz=20.0,\n upper_edge_hertz=16000.0)\n # Must be a positive gap between edges.\n with self.assertRaises(ValueError):\n mfcc_mel.SpectrogramToMelMatrix(\n num_spectrogram_bins=513,\n audio_sample_rate=22050,\n num_mel_bins=20,\n lower_edge_hertz=20.0,\n upper_edge_hertz=20.0)\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n", "# Copyright 2020 The Magenta Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Utility functions for handling bundle files.\"\"\"\n\nfrom magenta.music.protobuf import generator_pb2\nimport tensorflow.compat.v1 as tf\nfrom google.protobuf import message\n\n\nclass GeneratorBundleParseError(Exception):\n \"\"\"Exception thrown when a bundle file cannot be parsed.\"\"\"\n pass\n\n\ndef read_bundle_file(bundle_file):\n # Read in bundle file.\n bundle = generator_pb2.GeneratorBundle()\n with tf.gfile.Open(bundle_file, 'rb') as f:\n try:\n bundle.ParseFromString(f.read())\n except message.DecodeError as e:\n raise GeneratorBundleParseError(e)\n return bundle\n", "# Copyright 2020 The Magenta Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for pianoroll_pipeline.\"\"\"\n\nfrom magenta.music import sequences_lib\nfrom magenta.music import testing_lib as music_testing_lib\nfrom magenta.music.protobuf import music_pb2\nfrom magenta.pipelines import pianoroll_pipeline\nimport tensorflow.compat.v1 as tf\n\n\nclass PianorollPipelineTest(tf.test.TestCase):\n\n def setUp(self):\n super(PianorollPipelineTest, self).setUp()\n self.note_sequence = music_testing_lib.parse_test_proto(\n music_pb2.NoteSequence,\n \"\"\"\n tempos: {\n qpm: 60\n }\n ticks_per_quarter: 220\n \"\"\")\n\n def testExtractPianorollSequences(self):\n music_testing_lib.add_track_to_sequence(\n self.note_sequence, 0, [(60, 100, 0.0, 4.0)])\n quantized_sequence = sequences_lib.quantize_note_sequence(\n self.note_sequence, steps_per_quarter=1)\n\n seqs, _ = pianoroll_pipeline.extract_pianoroll_sequences(\n quantized_sequence)\n self.assertEqual(1, len(seqs))\n\n seqs, _ = pianoroll_pipeline.extract_pianoroll_sequences(\n quantized_sequence, min_steps_discard=2, max_steps_discard=5)\n self.assertEqual(1, len(seqs))\n\n self.note_sequence.notes[0].end_time = 1.0\n self.note_sequence.total_time = 1.0\n quantized_sequence = sequences_lib.quantize_note_sequence(\n self.note_sequence, steps_per_quarter=1)\n seqs, _ = pianoroll_pipeline.extract_pianoroll_sequences(\n quantized_sequence, min_steps_discard=3, max_steps_discard=5)\n self.assertEqual(0, len(seqs))\n\n self.note_sequence.notes[0].end_time = 10.0\n self.note_sequence.total_time = 10.0\n quantized_sequence = sequences_lib.quantize_note_sequence(\n self.note_sequence, steps_per_quarter=1)\n seqs, _ = pianoroll_pipeline.extract_pianoroll_sequences(\n quantized_sequence, min_steps_discard=3, max_steps_discard=5)\n self.assertEqual(0, len(seqs))\n\n def testExtractPianorollMultiProgram(self):\n music_testing_lib.add_track_to_sequence(\n self.note_sequence, 0,\n [(60, 100, 0.0, 4.0), (64, 100, 0.0, 3.0), (67, 100, 1.0, 2.0)])\n self.note_sequence.notes[0].program = 2\n quantized_sequence = sequences_lib.quantize_note_sequence(\n self.note_sequence, steps_per_quarter=1)\n\n seqs, _ = pianoroll_pipeline.extract_pianoroll_sequences(\n quantized_sequence)\n self.assertEqual(0, len(seqs))\n\n def testExtractNonZeroStart(self):\n music_testing_lib.add_track_to_sequence(\n self.note_sequence, 0, [(60, 100, 0.0, 4.0)])\n quantized_sequence = sequences_lib.quantize_note_sequence(\n self.note_sequence, steps_per_quarter=1)\n\n seqs, _ = pianoroll_pipeline.extract_pianoroll_sequences(\n quantized_sequence, start_step=4, min_steps_discard=1)\n self.assertEqual(0, len(seqs))\n seqs, _ = pianoroll_pipeline.extract_pianoroll_sequences(\n quantized_sequence, start_step=0, min_steps_discard=1)\n self.assertEqual(1, len(seqs))\n\n\nif __name__ == '__main__':\n tf.test.main()\n", "# Copyright 2020 The Magenta Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for metrics.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom magenta.models.onsets_frames_transcription import metrics\nfrom magenta.music.protobuf import music_pb2\n\nimport numpy as np\nimport tensorflow.compat.v1 as tf\n\n\nclass MetricsTest(tf.test.TestCase):\n\n def testSequenceToValuedIntervals(self):\n sequence = music_pb2.NoteSequence()\n sequence.notes.add(pitch=60, start_time=1.0, end_time=2.0, velocity=80)\n # Should be dropped because it is 0 duration.\n sequence.notes.add(pitch=60, start_time=3.0, end_time=3.0, velocity=90)\n\n intervals, pitches, velocities = metrics.sequence_to_valued_intervals(\n sequence)\n np.testing.assert_array_equal([[1., 2.]], intervals)\n np.testing.assert_array_equal([60], pitches)\n np.testing.assert_array_equal([80], velocities)\n\n\nif __name__ == '__main__':\n tf.test.main()\n" ]
[ [ "tensorflow.compat.v1.test.main" ], [ "tensorflow.compat.v1.train.Checkpoint", "tensorflow.compat.v1.Session", "tensorflow.compat.v1.initialize_all_variables", "tensorflow.compat.v1.compat.v1.app.run", "tensorflow.compat.v1.logging.info", "tensorflow.compat.v1.TensorSpec", "tensorflow.compat.v1.compat.v1.wrap_function", "tensorflow.compat.v1.train.Saver", "tensorflow.compat.v1.saved_model.save" ], [ "numpy.dot", "tensorflow.compat.v1.test.main", "numpy.arange", "numpy.array", "numpy.testing.assert_array_almost_equal" ], [ "tensorflow.compat.v1.gfile.Open" ], [ "tensorflow.compat.v1.test.main" ], [ "numpy.testing.assert_array_equal", "tensorflow.compat.v1.test.main" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
diegcr/2D-Motion-Retargeting
[ "2b4acedb45a281d2867c812fce6063dc68b8e88b" ]
[ "train.py" ]
[ "from dataset import get_dataloader\nfrom common import config\nfrom model import get_autoencoder\nfrom functional.utils import cycle\nfrom agent import get_training_agent\nfrom functional.visualization import visulize_motion_in_training\nimport torch\nimport os\nfrom collections import OrderedDict\nfrom tqdm import tqdm\nfrom tensorboardX import SummaryWriter\nimport argparse\n\ntorch.backends.cudnn.benchmark = True\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('-n', '--name', type=str, choices=['skeleton', 'view', 'full'], required=True,\n help='which structure to use')\n # parser.add_argument('-c', '--continue', dest='continue_path', type=str, required=False)\n parser.add_argument('-g', '--gpu_ids', type=int, default=0, required=False, help=\"specify gpu ids\")\n parser.add_argument('--disable_triplet', action='store_true', default=False, help=\"disable triplet loss\")\n parser.add_argument('--use_footvel_loss', action='store_true', default=False, help=\"use use footvel loss\")\n parser.add_argument('--vis', action='store_true', default=False, help=\"visualize output in training\")\n args = parser.parse_args()\n\n config.initialize(args)\n\n net = get_autoencoder(config)\n print(net)\n net = net.to(config.device)\n\n # create tensorboard writer\n train_tb = SummaryWriter(os.path.join(config.log_dir, 'train.events'))\n val_tb = SummaryWriter(os.path.join(config.log_dir, 'val.events'))\n\n # create dataloader\n train_loader = get_dataloader('train', config, config.batch_size, config.num_workers)\n mean_pose, std_pose = train_loader.dataset.mean_pose, train_loader.dataset.std_pose\n val_loader = get_dataloader('test', config, config.batch_size, config.num_workers)\n val_loader = cycle(val_loader)\n\n # create training agent\n tr_agent = get_training_agent(config, net)\n clock = tr_agent.clock\n\n # start training\n for e in range(config.nr_epochs):\n\n # begin iteration\n pbar = tqdm(train_loader)\n for b, data in enumerate(pbar):\n # train step\n outputs, losses = tr_agent.train_func(data)\n\n losses_values = {k:v.item() for k, v in losses.items()}\n\n # record loss to tensorboard\n for k, v in losses_values.items():\n train_tb.add_scalar(k, v, clock.step)\n\n # visualize\n if args.vis and clock.step % config.visualize_frequency == 0:\n imgs = visulize_motion_in_training(outputs, mean_pose, std_pose)\n for k, img in imgs.items():\n train_tb.add_image(k, torch.from_numpy(img), clock.step)\n\n pbar.set_description(\"EPOCH[{}][{}/{}]\".format(e, b, len(train_loader)))\n pbar.set_postfix(OrderedDict({\"loss\": sum(losses_values.values())}))\n\n # validation step\n if clock.step % config.val_frequency == 0:\n data = next(val_loader)\n\n outputs, losses = tr_agent.val_func(data)\n\n losses_values = {k: v.item() for k, v in losses.items()}\n\n for k, v in losses_values.items():\n val_tb.add_scalar(k, v, clock.step)\n\n if args.vis and clock.step % config.visualize_frequency == 0:\n imgs = visulize_motion_in_training(outputs, mean_pose, std_pose)\n for k, img in imgs.items():\n val_tb.add_image(k, torch.from_numpy(img), clock.step)\n\n clock.tick()\n\n train_tb.add_scalar('learning_rate', tr_agent.optimizer.param_groups[-1]['lr'], clock.epoch)\n tr_agent.update_learning_rate()\n\n if clock.epoch % config.save_frequency == 0:\n tr_agent.save_network()\n tr_agent.save_network('latest.pth.tar')\n\n clock.tock()\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "torch.from_numpy" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mathPi/shap
[ "8cd9b3567352ac6751e54d73a4dbce1848f786a0", "8cd9b3567352ac6751e54d73a4dbce1848f786a0" ]
[ "shap/plots/_scatter.py", "tests/explainers/test_tree.py" ]
[ "from __future__ import division\n\nimport numpy as np\nimport warnings\ntry:\n import matplotlib.pyplot as pl\n import matplotlib\nexcept ImportError:\n warnings.warn(\"matplotlib could not be loaded!\")\n pass\nfrom ._labels import labels\nfrom . import colors\nfrom ..utils import convert_name, approximate_interactions\nfrom ..utils._general import encode_array_if_needed\nfrom .._explanation import Explanation\n\n\n# TODO: Make the color bar a one-sided beeswarm plot so we can see the density along the color axis\ndef scatter(shap_values, color=\"#1E88E5\", hist=True, axis_color=\"#333333\", cmap=colors.red_blue,\n dot_size=16, x_jitter=\"auto\", alpha=1, title=None, xmin=None, xmax=None, ymin=None, ymax=None,\n ax=None, show=True):\n \"\"\" Create a SHAP dependence scatter plot, colored by an interaction feature.\n\n Plots the value of the feature on the x-axis and the SHAP value of the same feature\n on the y-axis. This shows how the model depends on the given feature, and is like a\n richer extenstion of classical parital dependence plots. Vertical dispersion of the\n data points represents interaction effects. Grey ticks along the y-axis are data\n points where the feature's value was NaN.\n\n Note that if you want to change the data being displayed you can update the\n shap_values.display_features attribute and it will then be used for plotting instead of\n shap_values.data.\n\n\n Parameters\n ----------\n shap_values : shap.Explanation\n A single column of a SHAP Explanation object (i.e. shap_values[:,\"Feature A\"]).\n\n color : string or shap.Explanation\n How to color the scatter plot points. This can be a fixed color string, or a Explanation object.\n If it is an explanation object then the scatter plot points are colored by the feature that\n seems to have the strongest interaction effect with the feature given by the shap_values argument.\n This is calculated using shap.utils.approximate_interactions.\n If only a single column of an Explanation object is passed then that feature column will be used\n to color the data points.\n\n hist : bool\n Whether to show a light histogram along the x-axis to show the density of the data. Note that the\n histogram is normalized that that if all the point were in a single bin then that bin would span\n the full height of the plot.\n\n x_jitter : 'auto' or float (0 - 1)\n Adds random jitter to feature values. May increase plot readability when a feature\n is discrete. By default x_jitter is chosen base on auto-detection of categorical features\n\n alpha : float\n The transparency of the data points (between 0 and 1). This can be useful to the\n show density of the data points when using a large dataset.\n\n xmin : float or string\n Represents the lower bound of the plot's x-axis. It can be a string of the format\n \"percentile(float)\" to denote that percentile of the feature's value used on the x-axis.\n\n xmax : float or string\n Represents the upper bound of the plot's x-axis. It can be a string of the format\n \"percentile(float)\" to denote that percentile of the feature's value used on the x-axis.\n\n ax : matplotlib Axes object\n Optionally specify an existing matplotlib Axes object, into which the plot will be placed.\n In this case we do not create a Figure, otherwise we do.\n\n \"\"\"\n\n assert str(type(shap_values)).endswith(\"Explanation'>\"), \"The shap_values paramemter must be a shap.Explanation object!\"\n if len(shap_values.shape) != 1:\n raise Exception(\"The passed Explanation object has multiple columns, please pass a single feature column to shap.plots.dependence like: shap_values[:,column]\")\n \n # this unpacks the explanation object for the code that was written earlier\n feature_names = [shap_values.feature_names]\n ind = 0\n shap_values_arr = shap_values.values.reshape(-1, 1)\n features = shap_values.data.reshape(-1, 1)\n if shap_values.display_data is None:\n display_features = features\n else:\n display_features = shap_values.display_data.reshape(-1, 1)\n interaction_index = None\n\n # unwrap explanation objects used for bounds\n if issubclass(type(xmin), Explanation):\n xmin = xmin.data\n if issubclass(type(xmax), Explanation):\n xmax = xmax.data\n if issubclass(type(ymin), Explanation):\n ymin = ymin.values\n if issubclass(type(ymax), Explanation):\n ymax = ymax.values\n\n # wrap np.arrays as Explanations\n if isinstance(color, np.ndarray):\n color = Explanation(values=color, base_values=None, data=color)\n \n # TODO: This stacking could be avoided if we use the new shap.utils.potential_interactions function\n if str(type(color)).endswith(\"Explanation'>\"):\n shap_values2 = color\n if issubclass(type(shap_values2.feature_names), (str, int)):\n feature_names.append(shap_values2.feature_names)\n shap_values_arr = np.hstack([shap_values_arr, shap_values2.values.reshape(-1, len(feature_names)-1)])\n features = np.hstack([features, shap_values2.data.reshape(-1, len(feature_names)-1)])\n if shap_values2.display_data is None:\n display_features = np.hstack([display_features, shap_values2.data.reshape(-1, len(feature_names)-1)])\n else:\n display_features = np.hstack([display_features, shap_values2.display_data.reshape(-1, len(feature_names)-1)])\n else:\n feature_names2 = np.array(shap_values2.feature_names)\n mask = ~(feature_names[0] == feature_names2)\n feature_names.extend(feature_names2[mask])\n shap_values_arr = np.hstack([shap_values_arr, shap_values2.values[:,mask]])\n features = np.hstack([features, shap_values2.data[:,mask]])\n if shap_values2.display_data is None:\n display_features = np.hstack([display_features, shap_values2.data[:,mask]])\n else:\n display_features = np.hstack([display_features, shap_values2.display_data[:,mask]])\n color = None\n interaction_index = \"auto\"\n\n\n if type(shap_values_arr) is list:\n raise TypeError(\"The passed shap_values_arr are a list not an array! If you have a list of explanations try \" \\\n \"passing shap_values_arr[0] instead to explain the first output class of a multi-output model.\")\n\n # convert from DataFrames if we got any\n if str(type(features)).endswith(\"'pandas.core.frame.DataFrame'>\"):\n if feature_names is None:\n feature_names = features.columns\n features = features.values\n\n if feature_names is None:\n feature_names = [labels['FEATURE'] % str(i) for i in range(shap_values_arr.shape[1])]\n\n # allow vectors to be passed\n if len(shap_values_arr.shape) == 1:\n shap_values_arr = np.reshape(shap_values_arr, len(shap_values_arr), 1)\n if len(features.shape) == 1:\n features = np.reshape(features, len(features), 1)\n\n ind = convert_name(ind, shap_values_arr, feature_names)\n\n # pick jitter for categorical features\n vals = np.sort(np.unique(features[:,ind]))\n min_dist = np.inf\n for i in range(1,len(vals)):\n d = vals[i] - vals[i-1]\n if d > 1e-8 and d < min_dist:\n min_dist = d\n num_points_per_value = len(features[:,ind]) / len(vals)\n if num_points_per_value < 10:\n #categorical = False\n if x_jitter == \"auto\":\n x_jitter = 0\n elif num_points_per_value < 100:\n #categorical = True\n if x_jitter == \"auto\":\n x_jitter = min_dist * 0.1\n else:\n #categorical = True\n if x_jitter == \"auto\":\n x_jitter = min_dist * 0.2\n\n # guess what other feature as the stongest interaction with the plotted feature\n if not hasattr(ind, \"__len__\"):\n if interaction_index == \"auto\":\n interaction_index = approximate_interactions(ind, shap_values_arr, features)[0]\n interaction_index = convert_name(interaction_index, shap_values_arr, feature_names)\n categorical_interaction = False\n\n # create a matplotlib figure, if `ax` hasn't been specified.\n if not ax:\n figsize = (7.5, 5) if interaction_index != ind and interaction_index is not None else (6, 5)\n fig = pl.figure(figsize=figsize)\n ax = fig.gca()\n else:\n fig = ax.get_figure()\n\n # plotting SHAP interaction values\n if len(shap_values_arr.shape) == 3 and hasattr(ind, \"__len__\") and len(ind) == 2:\n ind1 = convert_name(ind[0], shap_values_arr, feature_names)\n ind2 = convert_name(ind[1], shap_values_arr, feature_names)\n if ind1 == ind2:\n proj_shap_values_arr = shap_values_arr[:, ind2, :]\n else:\n proj_shap_values_arr = shap_values_arr[:, ind2, :] * 2 # off-diag values are split in half\n\n # there is no interaction coloring for the main effect\n if ind1 == ind2:\n fig.set_size_inches(6, 5, forward=True)\n\n # TODO: remove recursion; generally the functions should be shorter for more maintainable code\n dependence_legacy(\n ind1, proj_shap_values_arr, features, feature_names=feature_names,\n interaction_index=(None if ind1 == ind2 else ind2), display_features=display_features, ax=ax, show=False,\n xmin=xmin, xmax=xmax, x_jitter=x_jitter, alpha=alpha\n )\n if ind1 == ind2:\n ax.set_ylabel(labels['MAIN_EFFECT'] % feature_names[ind1])\n else:\n ax.set_ylabel(labels['INTERACTION_EFFECT'] % (feature_names[ind1], feature_names[ind2]))\n\n if show:\n pl.show()\n return\n\n assert shap_values_arr.shape[0] == features.shape[0], \\\n \"'shap_values_arr' and 'features' values must have the same number of rows!\"\n assert shap_values_arr.shape[1] == features.shape[1], \\\n \"'shap_values_arr' must have the same number of columns as 'features'!\"\n\n # get both the raw and display feature values\n oinds = np.arange(shap_values_arr.shape[0]) # we randomize the ordering so plotting overlaps are not related to data ordering\n np.random.shuffle(oinds)\n xv = encode_array_if_needed(features[oinds, ind])\n xd = display_features[oinds, ind]\n \n s = shap_values_arr[oinds, ind]\n if type(xd[0]) == str:\n name_map = {}\n for i in range(len(xv)):\n name_map[xd[i]] = xv[i]\n xnames = list(name_map.keys())\n \n # allow a single feature name to be passed alone\n if type(feature_names) == str:\n feature_names = [feature_names]\n name = feature_names[ind]\n\n # get both the raw and display color values\n color_norm = None\n if interaction_index is not None:\n interaction_feature_values = encode_array_if_needed(features[:, interaction_index])\n cv = interaction_feature_values\n cd = display_features[:, interaction_index]\n clow = np.nanpercentile(cv.astype(np.float), 5)\n chigh = np.nanpercentile(cv.astype(np.float), 95)\n if clow == chigh:\n clow = np.nanmin(cv.astype(np.float))\n chigh = np.nanmax(cv.astype(np.float))\n if type(cd[0]) == str:\n cname_map = {}\n for i in range(len(cv)):\n cname_map[cd[i]] = cv[i]\n cnames = list(cname_map.keys())\n categorical_interaction = True\n elif clow % 1 == 0 and chigh % 1 == 0 and chigh - clow < 10:\n categorical_interaction = True\n\n # discritize colors for categorical features\n if categorical_interaction and clow != chigh:\n clow = np.nanmin(cv.astype(np.float))\n chigh = np.nanmax(cv.astype(np.float))\n bounds = np.linspace(clow, chigh, int(chigh - clow + 2))\n color_norm = matplotlib.colors.BoundaryNorm(bounds, cmap.N-1)\n\n # optionally add jitter to feature values\n xv_no_jitter = xv.copy()\n if x_jitter > 0:\n if x_jitter > 1: x_jitter = 1\n xvals = xv.copy()\n if isinstance(xvals[0], float):\n xvals = xvals.astype(np.float)\n xvals = xvals[~np.isnan(xvals)]\n xvals = np.unique(xvals) # returns a sorted array\n if len(xvals) >= 2:\n smallest_diff = np.min(np.diff(xvals))\n jitter_amount = x_jitter * smallest_diff\n xv += (np.random.random_sample(size = len(xv))*jitter_amount) - (jitter_amount/2)\n\n \n # the actual scatter plot, TODO: adapt the dot_size to the number of data points?\n xv_nan = np.isnan(xv)\n xv_notnan = np.invert(xv_nan)\n if interaction_index is not None:\n\n # plot the nan values in the interaction feature as grey\n cvals = features[oinds, interaction_index].astype(np.float64)\n cvals_imp = cvals.copy()\n cvals_imp[np.isnan(cvals)] = (clow + chigh) / 2.0\n cvals[cvals_imp > chigh] = chigh\n cvals[cvals_imp < clow] = clow\n p = ax.scatter(\n xv[xv_notnan], s[xv_notnan], s=dot_size, linewidth=0, c=cvals[xv_notnan],\n cmap=cmap, alpha=alpha, vmin=clow, vmax=chigh,\n norm=color_norm, rasterized=len(xv) > 500\n )\n p.set_array(cvals[xv_notnan])\n else:\n p = ax.scatter(xv, s, s=dot_size, linewidth=0, color=color,\n alpha=alpha, rasterized=len(xv) > 500)\n\n if interaction_index != ind and interaction_index is not None:\n # draw the color bar\n if type(cd[0]) == str:\n tick_positions = np.array([cname_map[n] for n in cnames])\n tick_positions *= 1 - 1 / len(cnames)\n tick_positions += 0.5 * (chigh - clow) / (chigh - clow + 1)\n cb = pl.colorbar(p, ticks=tick_positions, ax=ax)\n cb.set_ticklabels(cnames)\n else:\n cb = pl.colorbar(p, ax=ax)\n\n cb.set_label(feature_names[interaction_index], size=13)\n cb.ax.tick_params(labelsize=11)\n if categorical_interaction:\n cb.ax.tick_params(length=0)\n cb.set_alpha(1)\n cb.outline.set_visible(False)\n bbox = cb.ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted())\n cb.ax.set_aspect((bbox.height - 0.7) * 20)\n\n # handles any setting of xmax and xmin\n # note that we handle None,float, or \"percentile(float)\" formats\n if xmin is not None or xmax is not None:\n if type(xmin) == str and xmin.startswith(\"percentile\"):\n xmin = np.nanpercentile(xv, float(xmin[11:-1]))\n if type(xmax) == str and xmax.startswith(\"percentile\"):\n xmax = np.nanpercentile(xv, float(xmax[11:-1]))\n\n if xmin is None or xmin == np.nanmin(xv):\n xmin = np.nanmin(xv) - (xmax - np.nanmin(xv))/20\n if xmax is None or xmax == np.nanmax(xv):\n xmax = np.nanmax(xv) + (np.nanmax(xv) - xmin)/20\n\n ax.set_xlim(xmin, xmax)\n\n if ymin is not None or ymax is not None:\n # if type(ymin) == str and ymin.startswith(\"percentile\"):\n # ymin = np.nanpercentile(xv, float(ymin[11:-1]))\n # if type(ymax) == str and ymax.startswith(\"percentile\"):\n # ymax = np.nanpercentile(xv, float(ymax[11:-1]))\n\n if ymin is None or ymin == np.nanmin(xv):\n ymin = np.nanmin(xv) - (ymax - np.nanmin(xv))/20\n if ymax is None or ymax == np.nanmax(xv):\n ymax = np.nanmax(xv) + (np.nanmax(xv) - ymin)/20\n\n ax.set_ylim(ymin, ymax)\n\n # plot any nan feature values as tick marks along the y-axis\n xlim = ax.get_xlim()\n if interaction_index is not None:\n p = ax.scatter(\n xlim[0] * np.ones(xv_nan.sum()), s[xv_nan], marker=1,\n linewidth=2, c=cvals_imp[xv_nan], cmap=cmap, alpha=alpha,\n vmin=clow, vmax=chigh\n )\n p.set_array(cvals[xv_nan])\n else:\n ax.scatter(\n xlim[0] * np.ones(xv_nan.sum()), s[xv_nan], marker=1,\n linewidth=2, color=color, alpha=alpha\n )\n ax.set_xlim(xlim)\n\n # the histogram of the data\n if hist:\n ax2 = ax.twinx()\n #n, bins, patches = \n xlim = ax.get_xlim()\n xvals = np.unique(xv_no_jitter)\n\n if len(xvals) / len(xv_no_jitter) < 0.2 and len(xvals) < 75 and np.max(xvals) < 75 and np.min(xvals) >= 0:\n np.sort(xvals)\n bin_edges = []\n for i in range(int(np.max(xvals)+1)):\n bin_edges.append(i-0.5)\n\n #bin_edges.append((xvals[i] + xvals[i+1])/2)\n bin_edges.append(int(np.max(xvals))+0.5)\n\n lim = np.floor(np.min(xvals) - 0.5) + 0.5, np.ceil(np.max(xvals) + 0.5) - 0.5\n ax.set_xlim(lim)\n else:\n if len(xv_no_jitter) >= 500:\n bin_edges = 50\n elif len(xv_no_jitter) >= 200:\n bin_edges = 20\n elif len(xv_no_jitter) >= 100:\n bin_edges = 10\n else:\n bin_edges = 5\n \n ax2.hist(xv[~np.isnan(xv)], bin_edges, density=False, facecolor='#000000', alpha=0.1, range=(xlim[0], xlim[1]), zorder=-1)\n ax2.set_ylim(0,len(xv))\n\n ax2.xaxis.set_ticks_position('bottom')\n ax2.yaxis.set_ticks_position('left')\n ax2.yaxis.set_ticks([])\n ax2.spines['right'].set_visible(False)\n ax2.spines['top'].set_visible(False)\n ax2.spines['left'].set_visible(False)\n ax2.spines['bottom'].set_visible(False)\n\n pl.sca(ax)\n\n # make the plot more readable\n ax.set_xlabel(name, color=axis_color, fontsize=13)\n ax.set_ylabel(labels['VALUE_FOR'] % name, color=axis_color, fontsize=13)\n if title is not None:\n ax.set_title(title, color=axis_color, fontsize=13)\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.tick_params(color=axis_color, labelcolor=axis_color, labelsize=11)\n for spine in ax.spines.values():\n spine.set_edgecolor(axis_color)\n if type(xd[0]) == str:\n ax.set_xticks([name_map[n] for n in xnames])\n ax.set_xticklabels(xnames, dict(rotation='vertical', fontsize=11))\n if show:\n with warnings.catch_warnings(): # ignore expected matplotlib warnings\n warnings.simplefilter(\"ignore\", RuntimeWarning)\n pl.show()\n\n\n\n\ndef dependence_legacy(ind, shap_values=None, features=None, feature_names=None, display_features=None,\n interaction_index=\"auto\",\n color=\"#1E88E5\", axis_color=\"#333333\", cmap=None,\n dot_size=16, x_jitter=0, alpha=1, title=None, xmin=None, xmax=None, ax=None, show=True):\n \"\"\" Create a SHAP dependence plot, colored by an interaction feature.\n\n Plots the value of the feature on the x-axis and the SHAP value of the same feature\n on the y-axis. This shows how the model depends on the given feature, and is like a\n richer extenstion of the classical parital dependence plots. Vertical dispersion of the\n data points represents interaction effects. Grey ticks along the y-axis are data\n points where the feature's value was NaN.\n\n\n Parameters\n ----------\n ind : int or string\n If this is an int it is the index of the feature to plot. If this is a string it is\n either the name of the feature to plot, or it can have the form \"rank(int)\" to specify\n the feature with that rank (ordered by mean absolute SHAP value over all the samples).\n\n shap_values : numpy.array\n Matrix of SHAP values (# samples x # features).\n\n features : numpy.array or pandas.DataFrame\n Matrix of feature values (# samples x # features).\n\n feature_names : list\n Names of the features (length # features).\n\n display_features : numpy.array or pandas.DataFrame\n Matrix of feature values for visual display (such as strings instead of coded values).\n\n interaction_index : \"auto\", None, int, or string\n The index of the feature used to color the plot. The name of a feature can also be passed\n as a string. If \"auto\" then shap.common.approximate_interactions is used to pick what\n seems to be the strongest interaction (note that to find to true stongest interaction you\n need to compute the SHAP interaction values).\n\n x_jitter : float (0 - 1)\n Adds random jitter to feature values. May increase plot readability when feature\n is discrete.\n\n alpha : float\n The transparency of the data points (between 0 and 1). This can be useful to the\n show density of the data points when using a large dataset.\n\n xmin : float or string\n Represents the lower bound of the plot's x-axis. It can be a string of the format\n \"percentile(float)\" to denote that percentile of the feature's value used on the x-axis.\n\n xmax : float or string\n Represents the upper bound of the plot's x-axis. It can be a string of the format\n \"percentile(float)\" to denote that percentile of the feature's value used on the x-axis.\n\n ax : matplotlib Axes object\n Optionally specify an existing matplotlib Axes object, into which the plot will be placed.\n In this case we do not create a Figure, otherwise we do.\n\n \"\"\"\n\n if cmap is None:\n cmap = colors.red_blue\n\n if type(shap_values) is list:\n raise TypeError(\"The passed shap_values are a list not an array! If you have a list of explanations try \" \\\n \"passing shap_values[0] instead to explain the first output class of a multi-output model.\")\n\n # convert from DataFrames if we got any\n if str(type(features)).endswith(\"'pandas.core.frame.DataFrame'>\"):\n if feature_names is None:\n feature_names = features.columns\n features = features.values\n if str(type(display_features)).endswith(\"'pandas.core.frame.DataFrame'>\"):\n if feature_names is None:\n feature_names = display_features.columns\n display_features = display_features.values\n elif display_features is None:\n display_features = features\n\n if feature_names is None:\n feature_names = [labels['FEATURE'] % str(i) for i in range(shap_values.shape[1])]\n\n # allow vectors to be passed\n if len(shap_values.shape) == 1:\n shap_values = np.reshape(shap_values, len(shap_values), 1)\n if len(features.shape) == 1:\n features = np.reshape(features, len(features), 1)\n\n ind = convert_name(ind, shap_values, feature_names)\n\n # guess what other feature as the stongest interaction with the plotted feature\n if not hasattr(ind, \"__len__\"):\n if interaction_index == \"auto\":\n interaction_index = approximate_interactions(ind, shap_values, features)[0]\n interaction_index = convert_name(interaction_index, shap_values, feature_names)\n categorical_interaction = False\n\n # create a matplotlib figure, if `ax` hasn't been specified.\n if not ax:\n figsize = (7.5, 5) if interaction_index != ind and interaction_index is not None else (6, 5)\n fig = pl.figure(figsize=figsize)\n ax = fig.gca()\n else:\n fig = ax.get_figure()\n\n # plotting SHAP interaction values\n if len(shap_values.shape) == 3 and hasattr(ind, \"__len__\") and len(ind) == 2:\n ind1 = convert_name(ind[0], shap_values, feature_names)\n ind2 = convert_name(ind[1], shap_values, feature_names)\n if ind1 == ind2:\n proj_shap_values = shap_values[:, ind2, :]\n else:\n proj_shap_values = shap_values[:, ind2, :] * 2 # off-diag values are split in half\n\n # there is no interaction coloring for the main effect\n if ind1 == ind2:\n fig.set_size_inches(6, 5, forward=True)\n\n # TODO: remove recursion; generally the functions should be shorter for more maintainable code\n dependence_legacy(\n ind1, proj_shap_values, features, feature_names=feature_names,\n interaction_index=(None if ind1 == ind2 else ind2), display_features=display_features, ax=ax, show=False,\n xmin=xmin, xmax=xmax, x_jitter=x_jitter, alpha=alpha\n )\n if ind1 == ind2:\n ax.set_ylabel(labels['MAIN_EFFECT'] % feature_names[ind1])\n else:\n ax.set_ylabel(labels['INTERACTION_EFFECT'] % (feature_names[ind1], feature_names[ind2]))\n\n if show:\n pl.show()\n return\n\n assert shap_values.shape[0] == features.shape[0], \\\n \"'shap_values' and 'features' values must have the same number of rows!\"\n assert shap_values.shape[1] == features.shape[1], \\\n \"'shap_values' must have the same number of columns as 'features'!\"\n\n # get both the raw and display feature values\n oinds = np.arange(shap_values.shape[0]) # we randomize the ordering so plotting overlaps are not related to data ordering\n np.random.shuffle(oinds)\n \n xv = encode_array_if_needed(features[oinds, ind])\n\n xd = display_features[oinds, ind]\n s = shap_values[oinds, ind]\n if type(xd[0]) == str:\n name_map = {}\n for i in range(len(xv)):\n name_map[xd[i]] = xv[i]\n xnames = list(name_map.keys())\n\n # allow a single feature name to be passed alone\n if type(feature_names) == str:\n feature_names = [feature_names]\n name = feature_names[ind]\n\n # get both the raw and display color values\n color_norm = None\n if interaction_index is not None:\n interaction_feature_values = encode_array_if_needed(features[:, interaction_index])\n cv = interaction_feature_values\n cd = display_features[:, interaction_index]\n clow = np.nanpercentile(cv.astype(np.float), 5)\n chigh = np.nanpercentile(cv.astype(np.float), 95)\n if clow == chigh:\n clow = np.nanmin(cv.astype(np.float))\n chigh = np.nanmax(cv.astype(np.float))\n if type(cd[0]) == str:\n cname_map = {}\n for i in range(len(cv)):\n cname_map[cd[i]] = cv[i]\n cnames = list(cname_map.keys())\n categorical_interaction = True\n elif clow % 1 == 0 and chigh % 1 == 0 and chigh - clow < 10:\n categorical_interaction = True\n\n # discritize colors for categorical features\n if categorical_interaction and clow != chigh:\n clow = np.nanmin(cv.astype(np.float))\n chigh = np.nanmax(cv.astype(np.float))\n bounds = np.linspace(clow, chigh, int(chigh - clow + 2))\n color_norm = matplotlib.colors.BoundaryNorm(bounds, cmap.N-1)\n\n # optionally add jitter to feature values\n if x_jitter > 0:\n if x_jitter > 1: x_jitter = 1\n xvals = xv.copy()\n if isinstance(xvals[0], float):\n xvals = xvals.astype(np.float)\n xvals = xvals[~np.isnan(xvals)]\n xvals = np.unique(xvals) # returns a sorted array\n if len(xvals) >= 2:\n smallest_diff = np.min(np.diff(xvals))\n jitter_amount = x_jitter * smallest_diff\n xv += (np.random.random_sample(size = len(xv))*jitter_amount) - (jitter_amount/2)\n\n # the actual scatter plot, TODO: adapt the dot_size to the number of data points?\n xv_nan = np.isnan(xv)\n xv_notnan = np.invert(xv_nan)\n if interaction_index is not None:\n\n # plot the nan values in the interaction feature as grey\n cvals = interaction_feature_values[oinds].astype(np.float64)\n cvals_imp = cvals.copy()\n cvals_imp[np.isnan(cvals)] = (clow + chigh) / 2.0\n cvals[cvals_imp > chigh] = chigh\n cvals[cvals_imp < clow] = clow\n p = ax.scatter(\n xv[xv_notnan], s[xv_notnan], s=dot_size, linewidth=0, c=cvals[xv_notnan],\n cmap=cmap, alpha=alpha, vmin=clow, vmax=chigh,\n norm=color_norm, rasterized=len(xv) > 500\n )\n p.set_array(cvals[xv_notnan])\n else:\n p = ax.scatter(xv, s, s=dot_size, linewidth=0, color=color,\n alpha=alpha, rasterized=len(xv) > 500)\n\n if interaction_index != ind and interaction_index is not None:\n # draw the color bar\n if type(cd[0]) == str:\n tick_positions = [cname_map[n] for n in cnames]\n if len(tick_positions) == 2:\n tick_positions[0] -= 0.25\n tick_positions[1] += 0.25\n cb = pl.colorbar(p, ticks=tick_positions, ax=ax)\n cb.set_ticklabels(cnames)\n else:\n cb = pl.colorbar(p, ax=ax)\n\n cb.set_label(feature_names[interaction_index], size=13)\n cb.ax.tick_params(labelsize=11)\n if categorical_interaction:\n cb.ax.tick_params(length=0)\n cb.set_alpha(1)\n cb.outline.set_visible(False)\n bbox = cb.ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted())\n cb.ax.set_aspect((bbox.height - 0.7) * 20)\n\n # handles any setting of xmax and xmin\n # note that we handle None,float, or \"percentile(float)\" formats\n if xmin is not None or xmax is not None:\n if type(xmin) == str and xmin.startswith(\"percentile\"):\n xmin = np.nanpercentile(xv, float(xmin[11:-1]))\n if type(xmax) == str and xmax.startswith(\"percentile\"):\n xmax = np.nanpercentile(xv, float(xmax[11:-1]))\n\n if xmin is None or xmin == np.nanmin(xv):\n xmin = np.nanmin(xv) - (xmax - np.nanmin(xv))/20\n if xmax is None or xmax == np.nanmax(xv):\n xmax = np.nanmax(xv) + (np.nanmax(xv) - xmin)/20\n\n ax.set_xlim(xmin, xmax)\n\n # plot any nan feature values as tick marks along the y-axis\n xlim = ax.get_xlim()\n if interaction_index is not None:\n p = ax.scatter(\n xlim[0] * np.ones(xv_nan.sum()), s[xv_nan], marker=1,\n linewidth=2, c=cvals_imp[xv_nan], cmap=cmap, alpha=alpha,\n vmin=clow, vmax=chigh\n )\n p.set_array(cvals[xv_nan])\n else:\n ax.scatter(\n xlim[0] * np.ones(xv_nan.sum()), s[xv_nan], marker=1,\n linewidth=2, color=color, alpha=alpha\n )\n ax.set_xlim(xlim)\n\n # make the plot more readable\n ax.set_xlabel(name, color=axis_color, fontsize=13)\n ax.set_ylabel(labels['VALUE_FOR'] % name, color=axis_color, fontsize=13)\n if title is not None:\n ax.set_title(title, color=axis_color, fontsize=13)\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.tick_params(color=axis_color, labelcolor=axis_color, labelsize=11)\n for spine in ax.spines.values():\n spine.set_edgecolor(axis_color)\n if type(xd[0]) == str:\n ax.set_xticks([name_map[n] for n in xnames])\n ax.set_xticklabels(xnames, dict(rotation='vertical', fontsize=11))\n if show:\n with warnings.catch_warnings(): # ignore expected matplotlib warnings\n warnings.simplefilter(\"ignore\", RuntimeWarning)\n pl.show()\n", "# pylint: disable=missing-function-docstring,too-many-lines,fixme\n\"\"\"Test tree functions.\"\"\"\nimport itertools\nimport math\nimport pickle\nimport numpy as np\nimport pandas as pd\nimport pytest\nimport sklearn\nimport sklearn.pipeline\nfrom sklearn.experimental import enable_hist_gradient_boosting # pylint: disable=unused-import\nimport shap\n\n\ndef test_front_page_xgboost():\n xgboost = pytest.importorskip('xgboost')\n\n # load JS visualization code to notebook\n shap.initjs()\n\n # train XGBoost model\n X, y = shap.datasets.boston()\n model = xgboost.train({\"learning_rate\": 0.01, \"silent\": 1}, xgboost.DMatrix(X, label=y), 100)\n\n # explain the model's predictions using SHAP values\n explainer = shap.TreeExplainer(model)\n shap_values = explainer.shap_values(X)\n\n # visualize the first prediction's explaination\n shap.force_plot(explainer.expected_value, shap_values[0, :], X.iloc[0, :])\n\n # visualize the training set predictions\n shap.force_plot(explainer.expected_value, shap_values, X)\n\n # create a SHAP dependence plot to show the effect of a single feature across the whole dataset\n shap.dependence_plot(5, shap_values, X, show=False)\n shap.dependence_plot(\"RM\", shap_values, X, show=False)\n\n # summarize the effects of all the features\n shap.summary_plot(shap_values, X, show=False)\n\n\ndef test_front_page_sklearn():\n # load JS visualization code to notebook\n shap.initjs()\n\n # train model\n X, y = shap.datasets.boston()\n models = [\n sklearn.ensemble.RandomForestRegressor(n_estimators=10),\n sklearn.ensemble.ExtraTreesRegressor(n_estimators=10),\n ]\n for model in models:\n model.fit(X, y)\n\n # explain the model's predictions using SHAP values\n explainer = shap.TreeExplainer(model)\n shap_values = explainer.shap_values(X)\n\n # visualize the first prediction's explaination\n shap.force_plot(explainer.expected_value, shap_values[0, :], X.iloc[0, :])\n\n # visualize the training set predictions\n shap.force_plot(explainer.expected_value, shap_values, X)\n\n # create a SHAP dependence plot to show the effect of a single feature across the whole\n # dataset\n shap.dependence_plot(5, shap_values, X, show=False)\n shap.dependence_plot(\"RM\", shap_values, X, show=False)\n\n # summarize the effects of all the features\n shap.summary_plot(shap_values, X, show=False)\n\n\ndef _conditional_expectation(tree, S, x):\n tree_ind = 0\n\n def R(node_ind):\n\n f = tree.features[tree_ind, node_ind]\n lc = tree.children_left[tree_ind, node_ind]\n rc = tree.children_right[tree_ind, node_ind]\n if lc < 0:\n return tree.values[tree_ind, node_ind]\n if f in S:\n if x[f] <= tree.thresholds[tree_ind, node_ind]:\n return R(lc)\n return R(rc)\n lw = tree.node_sample_weight[tree_ind, lc]\n rw = tree.node_sample_weight[tree_ind, rc]\n return (R(lc) * lw + R(rc) * rw) / (lw + rw)\n\n out = 0.0\n l = tree.values.shape[0] if tree.tree_limit is None else tree.tree_limit\n for i in range(l):\n tree_ind = i\n out += R(0)\n return out\n\n\ndef _brute_force_tree_shap(tree, x):\n m = len(x)\n phi = np.zeros(m)\n for p in itertools.permutations(list(range(m))):\n for i in range(m):\n phi[p[i]] += _conditional_expectation(tree, p[:i + 1], x) - _conditional_expectation(\n tree, p[:i], x)\n return phi / math.factorial(m)\n\n\ndef test_xgboost_direct():\n xgboost = pytest.importorskip('xgboost')\n\n N = 100\n M = 4\n X = np.random.randn(N, M)\n y = np.random.randn(N)\n\n model = xgboost.XGBRegressor()\n model.fit(X, y)\n\n explainer = shap.TreeExplainer(model)\n shap_values = explainer.shap_values(X)\n\n assert np.allclose(shap_values[0, :], _brute_force_tree_shap(explainer.model, X[0, :]))\n\n\ndef test_xgboost_multiclass():\n xgboost = pytest.importorskip('xgboost')\n\n # train XGBoost model\n X, Y = shap.datasets.iris()\n model = xgboost.XGBClassifier(objective=\"binary:logistic\", max_depth=4)\n model.fit(X, Y)\n\n # explain the model's predictions using SHAP values (use pred_contrib in LightGBM)\n shap_values = shap.TreeExplainer(model).shap_values(X)\n\n # ensure plot works for first class\n shap.dependence_plot(0, shap_values[0], X, show=False)\n\n\ndef _validate_shap_values(model, x_test):\n # explain the model's predictions using SHAP values\n tree_explainer = shap.TreeExplainer(model)\n shap_values = tree_explainer.shap_values(x_test)\n expected_values = tree_explainer.expected_value\n # validate values sum to the margin prediction of the model plus expected_value\n assert np.allclose(np.sum(shap_values, axis=1) + expected_values, model.predict(x_test))\n\n\ndef test_xgboost_ranking():\n xgboost = pytest.importorskip('xgboost')\n\n # train lightgbm ranker model\n x_train, y_train, x_test, _, q_train, _ = shap.datasets.rank()\n params = {'objective': 'rank:pairwise', 'learning_rate': 0.1,\n 'gamma': 1.0, 'min_child_weight': 0.1,\n 'max_depth': 5, 'n_estimators': 4}\n model = xgboost.sklearn.XGBRanker(**params)\n model.fit(x_train, y_train, q_train.astype(int))\n _validate_shap_values(model, x_test)\n\n\ndef test_xgboost_mixed_types():\n xgboost = pytest.importorskip('xgboost')\n\n X, y = shap.datasets.boston()\n X[\"LSTAT\"] = X[\"LSTAT\"].astype(np.int64)\n X[\"B\"] = X[\"B\"].astype(np.bool)\n bst = xgboost.train({\"learning_rate\": 0.01, \"silent\": 1}, xgboost.DMatrix(X, label=y), 1000)\n shap_values = shap.TreeExplainer(bst).shap_values(X)\n shap.dependence_plot(0, shap_values, X, show=False)\n\n\ndef test_ngboost():\n ngboost = pytest.importorskip('ngboost')\n\n X, y = shap.datasets.boston()\n model = ngboost.NGBRegressor(n_estimators=20).fit(X, y)\n explainer = shap.TreeExplainer(model, model_output=0)\n assert np.max(np.abs(\n explainer.shap_values(X).sum(1) + explainer.expected_value - model.predict(X))) < 1e-5\n\n\ndef test_pyspark_classifier_decision_tree():\n # pylint: disable=bare-except\n pyspark = pytest.importorskip(\"pyspark\")\n pytest.importorskip(\"pyspark.ml\")\n try:\n spark = pyspark.sql.SparkSession.builder.config(\n conf=pyspark.SparkConf().set(\"spark.master\", \"local[*]\")).getOrCreate()\n except:\n pytest.skip(\"Could not create pyspark context\")\n\n iris_sk = sklearn.datasets.load_iris()\n iris = pd.DataFrame(data=np.c_[iris_sk['data'], iris_sk['target']],\n columns=iris_sk['feature_names'] + ['target'])[:100]\n col = [\"sepal_length\", \"sepal_width\", \"petal_length\", \"petal_width\", \"type\"]\n iris = spark.createDataFrame(iris, col)\n iris = pyspark.ml.feature.VectorAssembler(inputCols=col[:-1], outputCol=\"features\").transform(\n iris)\n iris = pyspark.ml.feature.StringIndexer(inputCol=\"type\", outputCol=\"label\").fit(iris).transform(\n iris)\n\n classifiers = [\n pyspark.ml.classification.GBTClassifier(labelCol=\"label\", featuresCol=\"features\"),\n pyspark.ml.classification.RandomForestClassifier(labelCol=\"label\", featuresCol=\"features\"),\n pyspark.ml.classification.DecisionTreeClassifier(labelCol=\"label\", featuresCol=\"features\")]\n for classifier in classifiers:\n model = classifier.fit(iris)\n explainer = shap.TreeExplainer(model)\n # Make sure the model can be serializable to run shap values with spark\n pickle.dumps(explainer)\n X = pd.DataFrame(data=iris_sk.data, columns=iris_sk.feature_names)[ # pylint: disable=E1101\n :100]\n\n shap_values = explainer.shap_values(X)\n expected_values = explainer.expected_value\n\n predictions = model.transform(iris).select(\"rawPrediction\").rdd.map(\n lambda x: [float(y) for y in x['rawPrediction']]).toDF(['class0', 'class1']).toPandas()\n\n if str(type(model)).endswith(\"GBTClassificationModel'>\"):\n diffs = expected_values + shap_values.sum(1) - predictions.class1\n assert np.max(np.abs(diffs)) < 1e-4, \"SHAP values don't sum to model output for class0!\"\n else:\n normalizedPredictions = (predictions.T / predictions.sum(1)).T\n diffs = expected_values[0] + shap_values[0].sum(1) - normalizedPredictions.class0\n assert np.max(\n np.abs(diffs)) < 1e-4, \"SHAP values don't sum to model output for class0!\" + model\n diffs = expected_values[1] + shap_values[1].sum(1) - normalizedPredictions.class1\n assert np.max(\n np.abs(diffs)) < 1e-4, \"SHAP values don't sum to model output for class1!\" + model\n assert (np.abs(\n expected_values - normalizedPredictions.mean()) < 1e-1).all(), \\\n \"Bad expected_value!\" + model\n spark.stop()\n\n\ndef test_pyspark_regression_decision_tree():\n # pylint: disable=bare-except\n pyspark = pytest.importorskip(\"pyspark\")\n pytest.importorskip(\"pyspark.ml\")\n try:\n spark = pyspark.sql.SparkSession.builder.config(\n conf=pyspark.SparkConf().set(\"spark.master\", \"local[*]\")).getOrCreate()\n except:\n pytest.skip(\"Could not create pyspark context\")\n\n iris_sk = sklearn.datasets.load_iris()\n iris = pd.DataFrame(data=np.c_[iris_sk['data'], iris_sk['target']],\n columns=iris_sk['feature_names'] + ['target'])[:100]\n\n # Simple regressor: try to predict sepal length based on the other features\n col = [\"sepal_length\", \"sepal_width\", \"petal_length\", \"petal_width\", \"type\"]\n iris = spark.createDataFrame(iris, col).drop(\"type\")\n iris = pyspark.ml.feature.VectorAssembler(inputCols=col[1:-1], outputCol=\"features\").transform(\n iris)\n\n regressors = [\n pyspark.ml.regression.GBTRegressor(labelCol=\"sepal_length\", featuresCol=\"features\"),\n pyspark.ml.regression.RandomForestRegressor(labelCol=\"sepal_length\", featuresCol=\"features\"),\n pyspark.ml.regression.DecisionTreeRegressor(labelCol=\"sepal_length\", featuresCol=\"features\")\n ]\n for regressor in regressors:\n model = regressor.fit(iris)\n explainer = shap.TreeExplainer(model)\n X = pd.DataFrame(data=iris_sk.data, columns=iris_sk.feature_names).drop('sepal length (cm)', 1)[:100] # pylint: disable=E1101\n\n shap_values = explainer.shap_values(X)\n expected_values = explainer.expected_value\n\n # validate values sum to the margin prediction of the model plus expected_value\n predictions = model.transform(iris).select(\"prediction\").toPandas()\n diffs = expected_values + shap_values.sum(1) - predictions[\"prediction\"]\n assert np.max(np.abs(diffs)) < 1e-4, \"SHAP values don't sum to model output for class0!\"\n assert (np.abs(expected_values - predictions.mean()) < 1e-1).all(), \"Bad expected_value!\"\n spark.stop()\n\n\ndef test_sklearn_random_forest_multiclass():\n X, y = shap.datasets.iris()\n y[y == 2] = 1\n model = sklearn.ensemble.RandomForestClassifier(n_estimators=100, max_depth=None,\n min_samples_split=2,\n random_state=0)\n model.fit(X, y)\n\n explainer = shap.TreeExplainer(model)\n shap_values = explainer.shap_values(X)\n\n assert np.abs(shap_values[0][0, 0] - 0.05) < 1e-3\n assert np.abs(shap_values[1][0, 0] + 0.05) < 1e-3\n\n\ndef create_binary_newsgroups_data():\n categories = ['alt.atheism', 'soc.religion.christian']\n newsgroups_train = sklearn.datasets.fetch_20newsgroups(subset='train', categories=categories)\n newsgroups_test = sklearn.datasets.fetch_20newsgroups(subset='test', categories=categories)\n class_names = ['atheism', 'christian']\n return newsgroups_train, newsgroups_test, class_names\n\n\ndef create_random_forest_vectorizer():\n # pylint: disable=unused-argument,no-self-use,missing-class-docstring\n vectorizer = sklearn.feature_extraction.text.CountVectorizer(lowercase=False, min_df=0.0,\n binary=True)\n\n class DenseTransformer(sklearn.base.TransformerMixin):\n def fit(self, X, y=None, **fit_params):\n return self\n\n def transform(self, X, y=None, **fit_params):\n return X.toarray()\n\n rf = sklearn.ensemble.RandomForestClassifier(n_estimators=10, random_state=777)\n return sklearn.pipeline.Pipeline(\n [('vectorizer', vectorizer), ('to_dense', DenseTransformer()), ('rf', rf)])\n\n\ndef test_sklearn_random_forest_newsgroups():\n # note: this test used to fail in native TreeExplainer code due to memory corruption\n newsgroups_train, newsgroups_test, _ = create_binary_newsgroups_data()\n pipeline = create_random_forest_vectorizer()\n pipeline.fit(newsgroups_train.data, newsgroups_train.target)\n rf = pipeline.named_steps['rf']\n vectorizer = pipeline.named_steps['vectorizer']\n densifier = pipeline.named_steps['to_dense']\n\n dense_bg = densifier.transform(vectorizer.transform(newsgroups_test.data[0:20]))\n\n test_row = newsgroups_test.data[83:84]\n explainer = shap.TreeExplainer(rf, dense_bg, feature_perturbation=\"interventional\")\n vec_row = vectorizer.transform(test_row)\n dense_row = densifier.transform(vec_row)\n explainer.shap_values(dense_row)\n\n\ndef test_sklearn_decision_tree_multiclass():\n X, y = shap.datasets.iris()\n y[y == 2] = 1\n model = sklearn.tree.DecisionTreeClassifier(max_depth=None, min_samples_split=2, random_state=0)\n model.fit(X, y)\n\n explainer = shap.TreeExplainer(model)\n shap_values = explainer.shap_values(X)\n assert np.abs(shap_values[0][0, 0] - 0.05) < 1e-1\n assert np.abs(shap_values[1][0, 0] + 0.05) < 1e-1\n\n\ndef test_lightgbm():\n lightgbm = pytest.importorskip(\"lightgbm\")\n\n # train lightgbm model\n X, y = shap.datasets.boston()\n model = lightgbm.sklearn.LGBMRegressor(categorical_feature=[8])\n model.fit(X, y)\n\n # explain the model's predictions using SHAP values\n ex = shap.TreeExplainer(model)\n shap_values = ex.shap_values(X)\n\n predicted = model.predict(X, raw_score=True)\n\n assert np.abs(shap_values.sum(1) + ex.expected_value - predicted).max() < 1e-4, \\\n \"SHAP values don't sum to model output!\"\n\n\ndef test_gpboost():\n gpboost = pytest.importorskip(\"gpboost\")\n # train gpboost model\n X, y = shap.datasets.boston()\n data_train = gpboost.Dataset(X, y, categorical_feature=[8])\n model = gpboost.train(params={'objective': 'regression_l2', 'learning_rate': 0.1, 'verbose': 0},\n train_set=data_train, num_boost_round=10)\n\n # explain the model's predictions using SHAP values\n ex = shap.TreeExplainer(model, feature_perturbation=\"tree_path_dependent\")\n shap_values = ex.shap_values(X)\n\n predicted = model.predict(X, raw_score=True)\n\n assert np.abs(shap_values.sum(1) + ex.expected_value - predicted).max() < 1e-4, \\\n \"SHAP values don't sum to model output!\"\n\n\ndef test_catboost():\n catboost = pytest.importorskip(\"catboost\")\n # train catboost model\n X, y = shap.datasets.boston()\n X[\"RAD\"] = X[\"RAD\"].astype(np.int)\n model = catboost.CatBoostRegressor(iterations=30, learning_rate=0.1, random_seed=123)\n p = catboost.Pool(X, y, cat_features=[\"RAD\"])\n model.fit(p, verbose=False, plot=False)\n\n # explain the model's predictions using SHAP values\n ex = shap.TreeExplainer(model)\n shap_values = ex.shap_values(p)\n\n predicted = model.predict(X)\n\n assert np.abs(shap_values.sum(1) + ex.expected_value - predicted).max() < 1e-4, \\\n \"SHAP values don't sum to modThisel output!\"\n\n X, y = sklearn.datasets.load_breast_cancer(return_X_y=True)\n model = catboost.CatBoostClassifier(iterations=10, learning_rate=0.5, random_seed=12)\n model.fit(\n X,\n y,\n verbose=False,\n plot=False\n )\n ex = shap.TreeExplainer(model)\n shap_values = ex.shap_values(X)\n\n predicted = model.predict(X, prediction_type=\"RawFormulaVal\")\n assert np.abs(shap_values.sum(1) + ex.expected_value - predicted).max() < 1e-4, \\\n \"SHAP values don't sum to model output!\"\n\n\ndef test_catboost_categorical():\n catboost = pytest.importorskip(\"catboost\")\n bunch = sklearn.datasets.load_boston()\n X, y = sklearn.datasets.load_boston(return_X_y=True)\n X = pd.DataFrame(X, columns=bunch.feature_names) # pylint: disable=no-member\n X['CHAS'] = X['CHAS'].astype(str)\n\n model = catboost.CatBoostRegressor(100, cat_features=['CHAS'], verbose=False)\n model.fit(X, y)\n\n explainer = shap.TreeExplainer(model)\n shap_values = explainer.shap_values(X)\n\n predicted = model.predict(X)\n\n assert np.abs(shap_values.sum(1) + explainer.expected_value - predicted).max() < 1e-4, \\\n \"SHAP values don't sum to model output!\"\n\n\ndef test_lightgbm_constant_prediction():\n # note: this test used to fail with lightgbm 2.2.1 with error:\n # ValueError: zero-size array to reduction operation maximum which has no identity\n # on TreeExplainer when trying to compute max nodes:\n # max_nodes = np.max([len(t.values) for t in self.trees])\n # The test does not fail with latest lightgbm 2.2.3 however\n lightgbm = pytest.importorskip(\"lightgbm\")\n # train lightgbm model with a constant value for y\n X, y = shap.datasets.boston()\n # use the mean for all values\n mean = np.mean(y)\n y.fill(mean)\n model = lightgbm.sklearn.LGBMRegressor(n_estimators=1)\n model.fit(X, y)\n\n # explain the model's predictions using SHAP values\n shap.TreeExplainer(model).shap_values(X)\n\n\ndef test_lightgbm_constant_multiclass():\n # note: this test used to fail with lightgbm 2.2.1 with error:\n # ValueError: zero-size array to reduction operation maximum which has no identity\n # on TreeExplainer when trying to compute max nodes:\n # max_nodes = np.max([len(t.values) for t in self.trees])\n # The test does not fail with latest lightgbm 2.2.3 however\n lightgbm = pytest.importorskip(\"lightgbm\")\n\n # train lightgbm model\n X, Y = shap.datasets.iris()\n Y.fill(1)\n model = lightgbm.sklearn.LGBMClassifier(num_classes=3, objective=\"multiclass\")\n model.fit(X, Y)\n\n # explain the model's predictions using SHAP values\n shap.TreeExplainer(model).shap_values(X)\n\n\ndef test_lightgbm_multiclass():\n lightgbm = pytest.importorskip(\"lightgbm\")\n # train lightgbm model\n X, Y = shap.datasets.iris()\n model = lightgbm.sklearn.LGBMClassifier()\n model.fit(X, Y)\n\n # explain the model's predictions using SHAP values\n shap_values = shap.TreeExplainer(model).shap_values(X)\n\n # ensure plot works for first class\n shap.dependence_plot(0, shap_values[0], X, show=False)\n\n\ndef test_lightgbm_binary():\n lightgbm = pytest.importorskip(\"lightgbm\")\n # train lightgbm model\n X_train, X_test, Y_train, _ = sklearn.model_selection.train_test_split(*shap.datasets.adult(),\n test_size=0.2,\n random_state=0)\n model = lightgbm.sklearn.LGBMClassifier()\n model.fit(X_train, Y_train)\n\n # explain the model's predictions using SHAP values\n shap_values = shap.TreeExplainer(model).shap_values(X_test)\n\n # validate structure of shap values, must be a list of ndarray for both classes\n assert isinstance(shap_values, list)\n assert len(shap_values) == 2\n\n # ensure plot works for first class\n shap.dependence_plot(0, shap_values[0], X_test, show=False)\n\n\n# def test_lightgbm_ranking():\n# try:\n# import lightgbm\n# except:\n# print(\"Skipping test_lightgbm_ranking!\")\n# return\n#\n#\n\n# # train lightgbm ranker model\n# x_train, y_train, x_test, y_test, q_train, q_test = shap.datasets.rank()\n# model = lightgbm.LGBMRanker()\n# model.fit(x_train, y_train, group=q_train, eval_set=[(x_test, y_test)],\n# eval_group=[q_test], eval_at=[1, 3], early_stopping_rounds=5, verbose=False,\n# callbacks=[lightgbm.reset_parameter(learning_rate=lambda x: 0.95 ** x * 0.1)])\n# _validate_shap_values(model, x_test)\n\n# TODO: Test tree_limit argument\n\ndef test_sklearn_interaction():\n # train a simple sklean RF model on the iris dataset\n X, _ = shap.datasets.iris()\n X_train, _, Y_train, _ = sklearn.model_selection.train_test_split(*shap.datasets.iris(),\n test_size=0.2, random_state=0)\n rforest = sklearn.ensemble.RandomForestClassifier(n_estimators=100, max_depth=None,\n min_samples_split=2,\n random_state=0)\n model = rforest.fit(X_train, Y_train)\n\n # verify symmetry of the interaction values (this typically breaks if anything is wrong)\n interaction_vals = shap.TreeExplainer(model).shap_interaction_values(X)\n for i, _ in enumerate(interaction_vals):\n for j, _ in enumerate(interaction_vals[i]):\n for k, _ in enumerate(interaction_vals[i][j]):\n for l, _ in enumerate(interaction_vals[i][j][k]):\n assert abs(interaction_vals[i][j][k][l] - interaction_vals[i][j][l][k]) < 1e-4\n\n # ensure the interaction plot works\n shap.summary_plot(interaction_vals[0], X, show=False)\n\n\ndef test_lightgbm_interaction():\n lightgbm = pytest.importorskip(\"lightgbm\")\n\n # train XGBoost model\n X, y = shap.datasets.boston()\n model = lightgbm.sklearn.LGBMRegressor()\n model.fit(X, y)\n\n # verify symmetry of the interaction values (this typically breaks if anything is wrong)\n interaction_vals = shap.TreeExplainer(model).shap_interaction_values(X)\n for j, _ in enumerate(interaction_vals):\n for k, _ in enumerate(interaction_vals[j]):\n for l, _ in enumerate(interaction_vals[j][k]):\n assert abs(interaction_vals[j][k][l] - interaction_vals[j][l][k]) < 1e-4\n\n\ndef test_sum_match_random_forest():\n X_train, X_test, Y_train, _ = sklearn.model_selection.train_test_split(*shap.datasets.adult(),\n test_size=0.2,\n random_state=0)\n clf = sklearn.ensemble.RandomForestClassifier(random_state=202, n_estimators=10, max_depth=10)\n clf.fit(X_train, Y_train)\n predicted = clf.predict_proba(X_test)\n ex = shap.TreeExplainer(clf)\n shap_values = ex.shap_values(X_test)\n assert np.abs(shap_values[0].sum(1) + ex.expected_value[0] - predicted[:, 0]).max() < 1e-4, \\\n \"SHAP values don't sum to model output!\"\n\n\ndef test_sum_match_extra_trees():\n X_train, X_test, Y_train, _ = sklearn.model_selection.train_test_split(*shap.datasets.adult(),\n test_size=0.2,\n random_state=0)\n clf = sklearn.ensemble.ExtraTreesRegressor(random_state=202, n_estimators=10, max_depth=10)\n clf.fit(X_train, Y_train)\n predicted = clf.predict(X_test)\n ex = shap.TreeExplainer(clf)\n shap_values = ex.shap_values(X_test)\n assert np.abs(shap_values.sum(1) + ex.expected_value - predicted).max() < 1e-4, \\\n \"SHAP values don't sum to model output!\"\n\n\ndef test_single_row_random_forest():\n X_train, X_test, Y_train, _ = sklearn.model_selection.train_test_split(*shap.datasets.adult(),\n test_size=0.2,\n random_state=0)\n clf = sklearn.ensemble.RandomForestClassifier(random_state=202, n_estimators=10, max_depth=10)\n clf.fit(X_train, Y_train)\n predicted = clf.predict_proba(X_test)\n ex = shap.TreeExplainer(clf)\n shap_values = ex.shap_values(X_test.iloc[0, :])\n assert np.abs(shap_values[0].sum() + ex.expected_value[0] - predicted[0, 0]) < 1e-4, \\\n \"SHAP values don't sum to model output!\"\n\n\ndef test_sum_match_gradient_boosting_classifier():\n X_train, X_test, Y_train, _ = sklearn.model_selection.train_test_split(*shap.datasets.adult(),\n test_size=0.2,\n random_state=0)\n clf = sklearn.ensemble.GradientBoostingClassifier(random_state=202, n_estimators=10,\n max_depth=10)\n clf.fit(X_train, Y_train)\n\n # Use decision function to get prediction before it is mapped to a probability\n predicted = clf.decision_function(X_test)\n\n # check SHAP values\n ex = shap.TreeExplainer(clf)\n initial_ex_value = ex.expected_value\n shap_values = ex.shap_values(X_test)\n assert np.abs(shap_values.sum(1) + ex.expected_value - predicted).max() < 1e-4, \\\n \"SHAP values don't sum to model output!\"\n\n # check initial expected value\n assert np.abs(initial_ex_value - ex.expected_value) < 1e-4, \"Inital expected value is wrong!\"\n\n # check SHAP interaction values\n shap_interaction_values = ex.shap_interaction_values(X_test.iloc[:10, :])\n assert np.abs(\n shap_interaction_values.sum(1).sum(1) + ex.expected_value - predicted[:10]).max() < 1e-4, \\\n \"SHAP interaction values don't sum to model output!\"\n\n\ndef test_single_row_gradient_boosting_classifier():\n X_train, X_test, Y_train, _ = sklearn.model_selection.train_test_split(*shap.datasets.adult(),\n test_size=0.2,\n random_state=0)\n clf = sklearn.ensemble.GradientBoostingClassifier(random_state=202, n_estimators=10,\n max_depth=10)\n clf.fit(X_train, Y_train)\n predicted = clf.decision_function(X_test)\n ex = shap.TreeExplainer(clf)\n shap_values = ex.shap_values(X_test.iloc[0, :])\n assert np.abs(shap_values.sum() + ex.expected_value - predicted[0]) < 1e-4, \\\n \"SHAP values don't sum to model output!\"\n\n\ndef test_HistGradientBoostingRegressor():\n # train a tree-based model\n X, y = shap.datasets.diabetes()\n model = sklearn.ensemble.HistGradientBoostingRegressor(max_iter=1000, max_depth=6).fit(X, y)\n explainer = shap.TreeExplainer(model)\n shap_values = explainer.shap_values(X)\n assert np.max(np.abs(shap_values.sum(1) + explainer.expected_value - model.predict(X))) < 1e-4\n\n\ndef test_HistGradientBoostingClassifier_proba():\n # train a tree-based model\n X, y = shap.datasets.adult()\n model = sklearn.ensemble.HistGradientBoostingClassifier(max_iter=10, max_depth=6).fit(X, y)\n explainer = shap.TreeExplainer(model, shap.sample(X, 10), model_output=\"predict_proba\")\n shap_values = explainer.shap_values(X)\n assert np.max(np.abs(\n shap_values[0].sum(1) + explainer.expected_value[0] - model.predict_proba(X)[:, 0])) < 1e-4\n\n\ndef test_HistGradientBoostingClassifier_multidim():\n # train a tree-based model\n X, y = shap.datasets.adult()\n X = X[:100]\n y = y[:100]\n y = np.random.randint(0, 3, len(y))\n model = sklearn.ensemble.HistGradientBoostingClassifier(max_iter=10, max_depth=6).fit(X, y)\n explainer = shap.TreeExplainer(model, shap.sample(X, 10), model_output=\"raw\")\n shap_values = explainer.shap_values(X)\n assert np.max(np.abs(shap_values[0].sum(1) +\n explainer.expected_value[0] - model.decision_function(X)[:, 0])) < 1e-4\n\n\ndef test_sum_match_gradient_boosting_regressor():\n X_train, X_test, Y_train, _ = sklearn.model_selection.train_test_split(*shap.datasets.adult(),\n test_size=0.2,\n random_state=0)\n clf = sklearn.ensemble.GradientBoostingRegressor(random_state=202, n_estimators=10,\n max_depth=10)\n clf.fit(X_train, Y_train)\n\n predicted = clf.predict(X_test)\n ex = shap.TreeExplainer(clf)\n shap_values = ex.shap_values(X_test)\n assert np.abs(shap_values.sum(1) + ex.expected_value - predicted).max() < 1e-4, \\\n \"SHAP values don't sum to model output!\"\n\n\ndef test_single_row_gradient_boosting_regressor():\n X_train, X_test, Y_train, _ = sklearn.model_selection.train_test_split(*shap.datasets.adult(),\n test_size=0.2,\n random_state=0)\n clf = sklearn.ensemble.GradientBoostingRegressor(random_state=202, n_estimators=10,\n max_depth=10)\n clf.fit(X_train, Y_train)\n\n predicted = clf.predict(X_test)\n ex = shap.TreeExplainer(clf)\n shap_values = ex.shap_values(X_test.iloc[0, :])\n assert np.abs(shap_values.sum() + ex.expected_value - predicted[0]) < 1e-4, \\\n \"SHAP values don't sum to model output!\"\n\n\ndef test_multi_target_random_forest():\n X_train, X_test, Y_train, _ = sklearn.model_selection.train_test_split(\n *shap.datasets.linnerud(), test_size=0.2,\n random_state=0)\n est = sklearn.ensemble.RandomForestRegressor(random_state=202, n_estimators=10, max_depth=10)\n est.fit(X_train, Y_train)\n predicted = est.predict(X_test)\n\n explainer = shap.TreeExplainer(est)\n expected_values = np.asarray(explainer.expected_value)\n assert len(\n expected_values) == est.n_outputs_, \"Length of expected_values doesn't match n_outputs_\"\n shap_values = np.asarray(explainer.shap_values(X_test)).reshape(\n est.n_outputs_ * X_test.shape[0], X_test.shape[1])\n phi = np.hstack((shap_values, np.repeat(expected_values, X_test.shape[0]).reshape(-1, 1)))\n assert np.allclose(phi.sum(1), predicted.flatten(order=\"F\"), atol=1e-4)\n\n\ndef test_isolation_forest():\n IsolationForest = pytest.importorskip(\"sklearn.ensemble.IsolationForest\")\n _average_path_length = pytest.importorskip(\"sklearn.ensemble.iforest._average_path_length\")\n X, _ = shap.datasets.boston()\n for max_features in [1.0, 0.75]:\n iso = IsolationForest(max_features=max_features)\n iso.fit(X)\n\n explainer = shap.TreeExplainer(iso)\n shap_values = explainer.shap_values(X)\n\n l = _average_path_length( # pylint: disable=protected-access\n np.array([iso.max_samples_]))[0]\n score_from_shap = - 2 ** (- (np.sum(shap_values, axis=1) + explainer.expected_value) / l)\n assert np.allclose(iso.score_samples(X), score_from_shap, atol=1e-7)\n\n\ndef test_pyod_isolation_forest():\n try:\n IForest = pytest.importorskip(\"pyod.models.iforest.IForest\")\n except:\n pytest.skip(\"Failed to import pyod.models.iforest.IForest\")\n _average_path_length = pytest.importorskip(\"sklearn.ensemble.iforest._average_path_length\")\n\n X, _ = shap.datasets.boston()\n for max_features in [1.0, 0.75]:\n iso = IForest(max_features=max_features)\n iso.fit(X)\n\n explainer = shap.TreeExplainer(iso)\n shap_values = explainer.shap_values(X)\n\n l = _average_path_length(np.array([iso.max_samples_]))[0]\n score_from_shap = - 2 ** (- (np.sum(shap_values, axis=1) + explainer.expected_value) / l)\n assert np.allclose(iso.detector_.score_samples(X), score_from_shap, atol=1e-7)\n\n\n# TODO: this has sometimes failed with strange answers, should run memcheck on this for any\n# memory issues at some point...\ndef test_multi_target_extra_trees():\n X_train, X_test, Y_train, _ = sklearn.model_selection.train_test_split(\n *shap.datasets.linnerud(), test_size=0.2,\n random_state=0)\n est = sklearn.ensemble.ExtraTreesRegressor(random_state=202, n_estimators=10, max_depth=10)\n est.fit(X_train, Y_train)\n predicted = est.predict(X_test)\n\n explainer = shap.TreeExplainer(est)\n expected_values = np.asarray(explainer.expected_value)\n assert len(\n expected_values) == est.n_outputs_, \"Length of expected_values doesn't match n_outputs_\"\n shap_values = np.asarray(explainer.shap_values(X_test)).reshape(\n est.n_outputs_ * X_test.shape[0], X_test.shape[1])\n phi = np.hstack((shap_values, np.repeat(expected_values, X_test.shape[0]).reshape(-1, 1)))\n assert np.allclose(phi.sum(1), predicted.flatten(order=\"F\"), atol=1e-4)\n\n\ndef test_provided_background_tree_path_dependent():\n xgboost = pytest.importorskip(\"xgboost\")\n np.random.seed(10)\n\n X, y = shap.datasets.iris()\n X = X[:100]\n y = y[:100]\n train_x, test_x, train_y, _ = sklearn.model_selection.train_test_split(X, y, random_state=1)\n feature_names = [\"a\", \"b\", \"c\", \"d\"]\n dtrain = xgboost.DMatrix(train_x, label=train_y, feature_names=feature_names)\n dtest = xgboost.DMatrix(test_x, feature_names=feature_names)\n\n params = {\n 'booster': 'gbtree',\n 'objective': 'binary:logistic',\n 'max_depth': 4,\n 'eta': 0.1,\n 'nthread': -1,\n 'silent': 1\n }\n\n bst = xgboost.train(params=params, dtrain=dtrain, num_boost_round=100)\n\n explainer = shap.TreeExplainer(bst, test_x, feature_perturbation=\"tree_path_dependent\")\n diffs = explainer.expected_value + \\\n explainer.shap_values(test_x).sum(1) - bst.predict(dtest, output_margin=True)\n assert np.max(np.abs(diffs)) < 1e-4, \"SHAP values don't sum to model output!\"\n assert np.abs(explainer.expected_value - bst.predict(dtest,\n output_margin=True).mean()) < 1e-6, \\\n \"Bad expected_value!\"\n\n\ndef test_provided_background_independent():\n xgboost = pytest.importorskip(\"xgboost\")\n\n np.random.seed(10)\n\n X, y = shap.datasets.iris()\n X = X[:100]\n y = y[:100]\n train_x, test_x, train_y, _ = sklearn.model_selection.train_test_split(X, y, random_state=1)\n feature_names = [\"a\", \"b\", \"c\", \"d\"]\n dtrain = xgboost.DMatrix(train_x, label=train_y, feature_names=feature_names)\n dtest = xgboost.DMatrix(test_x, feature_names=feature_names)\n\n params = {\n 'booster': 'gbtree',\n 'objective': 'binary:logistic',\n 'max_depth': 4,\n 'eta': 0.1,\n 'nthread': -1,\n 'silent': 1\n }\n\n bst = xgboost.train(params=params, dtrain=dtrain, num_boost_round=100)\n\n explainer = shap.TreeExplainer(bst, test_x, feature_perturbation=\"interventional\")\n diffs = explainer.expected_value + \\\n explainer.shap_values(test_x).sum(1) - bst.predict(dtest, output_margin=True)\n assert np.max(np.abs(diffs)) < 1e-4, \"SHAP values don't sum to model output!\"\n assert np.abs(explainer.expected_value - bst.predict(dtest,\n output_margin=True).mean()) < 1e-4, \\\n \"Bad expected_value!\"\n\n\ndef test_provided_background_independent_prob_output():\n xgboost = pytest.importorskip(\"xgboost\")\n\n np.random.seed(10)\n\n X, y = shap.datasets.iris()\n X = X[:100]\n y = y[:100]\n train_x, test_x, train_y, _ = sklearn.model_selection.train_test_split(X, y, random_state=1)\n feature_names = [\"a\", \"b\", \"c\", \"d\"]\n dtrain = xgboost.DMatrix(train_x, label=train_y, feature_names=feature_names)\n dtest = xgboost.DMatrix(test_x, feature_names=feature_names)\n\n for objective in [\"reg:logistic\", \"binary:logistic\"]:\n params = {\n 'booster': 'gbtree',\n 'objective': objective,\n 'max_depth': 4,\n 'eta': 0.1,\n 'nthread': -1,\n 'silent': 1\n }\n\n bst = xgboost.train(params=params, dtrain=dtrain, num_boost_round=100)\n\n explainer = shap.TreeExplainer(bst, test_x, feature_perturbation=\"interventional\",\n model_output=\"probability\")\n diffs = explainer.expected_value + explainer.shap_values(test_x).sum(1) - bst.predict(dtest)\n assert np.max(np.abs(diffs)) < 1e-4, \"SHAP values don't sum to model output!\"\n assert np.abs(\n explainer.expected_value - bst.predict(dtest).mean()) < 1e-4, \"Bad expected_value!\"\n\n\ndef test_single_tree_compare_with_kernel_shap():\n \"\"\" Compare with Kernel SHAP, which makes the same independence assumptions\n as Independent Tree SHAP. Namely, they both assume independence between the\n set being conditioned on, and the remainder set.\n \"\"\"\n xgboost = pytest.importorskip(\"xgboost\")\n np.random.seed(10)\n\n n = 100\n X = np.random.normal(size=(n, 7))\n y = np.matmul(X, [-2, 1, 3, 5, 2, 20, -5])\n\n # train a model with single tree\n Xd = xgboost.DMatrix(X, label=y)\n model = xgboost.train({'eta': 1,\n 'max_depth': 6,\n 'base_score': 0,\n \"lambda\": 0},\n Xd, 1)\n ypred = model.predict(Xd)\n\n # Compare for five random samples\n for _ in range(5):\n x_ind = np.random.choice(X.shape[1])\n x = X[x_ind:x_ind + 1, :]\n\n expl = shap.TreeExplainer(model, X, feature_perturbation=\"interventional\")\n f = lambda inp: model.predict(xgboost.DMatrix(inp))\n expl_kern = shap.KernelExplainer(f, X)\n\n itshap = expl.shap_values(x)\n kshap = expl_kern.shap_values(x, nsamples=150)\n assert np.allclose(itshap, kshap), \\\n \"Kernel SHAP doesn't match Independent Tree SHAP!\"\n assert np.allclose(itshap.sum() + expl.expected_value, ypred[x_ind]), \\\n \"SHAP values don't sum to model output!\"\n\n\ndef test_several_trees():\n \"\"\" Make sure Independent Tree SHAP sums up to the correct value for\n larger models (20 trees).\n \"\"\"\n xgboost = pytest.importorskip(\"xgboost\")\n np.random.seed(10)\n\n n = 1000\n X = np.random.normal(size=(n, 7))\n b = np.array([-2, 1, 3, 5, 2, 20, -5])\n y = np.matmul(X, b)\n max_depth = 6\n\n # train a model with single tree\n Xd = xgboost.DMatrix(X, label=y)\n model = xgboost.train({'eta': 1,\n 'max_depth': max_depth,\n 'base_score': 0,\n \"lambda\": 0},\n Xd, 20)\n ypred = model.predict(Xd)\n\n # Compare for five random samples\n for _ in range(5):\n x_ind = np.random.choice(X.shape[1])\n x = X[x_ind:x_ind + 1, :]\n expl = shap.TreeExplainer(model, X, feature_perturbation=\"interventional\")\n itshap = expl.shap_values(x)\n assert np.allclose(itshap.sum() + expl.expected_value, ypred[x_ind]), \\\n \"SHAP values don't sum to model output!\"\n\n\ndef test_single_tree_nonlinear_transformations():\n \"\"\" Make sure Independent Tree SHAP single trees with non-linear\n transformations.\n \"\"\"\n # Supported non-linear transforms\n # def sigmoid(x):\n # return(1/(1+np.exp(-x)))\n\n # def log_loss(yt,yp):\n # return(-(yt*np.log(yp) + (1 - yt)*np.log(1 - yp)))\n\n # def mse(yt,yp):\n # return(np.square(yt-yp))\n\n xgboost = pytest.importorskip(\"xgboost\")\n np.random.seed(10)\n\n n = 100\n X = np.random.normal(size=(n, 7))\n y = np.matmul(X, [-2, 1, 3, 5, 2, 20, -5])\n y = y + abs(min(y))\n y = np.random.binomial(n=1, p=y / max(y))\n\n # train a model with single tree\n Xd = xgboost.DMatrix(X, label=y)\n model = xgboost.train({'eta': 1,\n 'max_depth': 6,\n 'base_score': y.mean(),\n \"lambda\": 0,\n \"objective\": \"binary:logistic\"},\n Xd, 1)\n pred = model.predict(Xd, output_margin=True) # In margin space (log odds)\n trans_pred = model.predict(Xd) # In probability space\n\n expl = shap.TreeExplainer(model, X, feature_perturbation=\"interventional\")\n f = lambda inp: model.predict(xgboost.DMatrix(inp), output_margin=True)\n expl_kern = shap.KernelExplainer(f, X)\n\n x_ind = 0\n x = X[x_ind:x_ind + 1, :]\n itshap = expl.shap_values(x)\n kshap = expl_kern.shap_values(x, nsamples=300)\n assert np.allclose(itshap.sum() + expl.expected_value, pred[x_ind]), \\\n \"SHAP values don't sum to model output on explaining margin!\"\n assert np.allclose(itshap, kshap), \\\n \"Independent Tree SHAP doesn't match Kernel SHAP on explaining margin!\"\n\n model.set_attr(objective=\"binary:logistic\")\n expl = shap.TreeExplainer(model, X, feature_perturbation=\"interventional\",\n model_output=\"probability\")\n itshap = expl.shap_values(x)\n assert np.allclose(itshap.sum() + expl.expected_value, trans_pred[x_ind]), \\\n \"SHAP values don't sum to model output on explaining logistic!\"\n\n # expl = shap.TreeExplainer(model, X, feature_perturbation=\"interventional\",\n # model_output=\"logloss\")\n # itshap = expl.shap_values(x,y=y[x_ind])\n # margin_pred = model.predict(xgb.DMatrix(x),output_margin=True)\n # currpred = log_loss(y[x_ind],sigmoid(margin_pred))\n # assert np.allclose(itshap.sum(), currpred - expl.expected_value), \\\n # \"SHAP values don't sum to model output on explaining logloss!\"\n\n\ndef test_xgboost_classifier_independent_margin():\n xgboost = pytest.importorskip(\"xgboost\")\n # train XGBoost model\n np.random.seed(10)\n n = 1000\n X = np.random.normal(size=(n, 7))\n y = np.matmul(X, [-2, 1, 3, 5, 2, 20, -5])\n y = y + abs(min(y))\n y = np.random.binomial(n=1, p=y / max(y))\n\n model = xgboost.XGBClassifier(n_estimators=10, max_depth=5)\n model.fit(X, y)\n\n # explain the model's predictions using SHAP values\n e = shap.TreeExplainer(model, X, feature_perturbation=\"interventional\", model_output=\"margin\")\n shap_values = e.shap_values(X)\n\n assert np.allclose(shap_values.sum(1) + e.expected_value, model.predict(X, output_margin=True))\n\n\ndef test_xgboost_classifier_independent_probability():\n xgboost = pytest.importorskip(\"xgboost\")\n\n # train XGBoost model\n np.random.seed(10)\n n = 1000\n X = np.random.normal(size=(n, 7))\n b = np.array([-2, 1, 3, 5, 2, 20, -5])\n y = np.matmul(X, b)\n y = y + abs(min(y))\n y = np.random.binomial(n=1, p=y / max(y))\n\n model = xgboost.XGBClassifier(n_estimators=10, max_depth=5)\n model.fit(X, y)\n\n # explain the model's predictions using SHAP values\n e = shap.TreeExplainer(model, X, feature_perturbation=\"interventional\",\n model_output=\"probability\")\n shap_values = e.shap_values(X)\n\n assert np.allclose(shap_values.sum(1) + e.expected_value, model.predict_proba(X)[:, 1])\n\n\n# def test_front_page_xgboost_global_path_dependent():\n# try:\n# xgboost = pytest.importorskip(\"xgboost\")\n# except:\n# print(\"Skipping test_front_page_xgboost!\")\n# return\n#\n#\n\n# # train XGBoost model\n# X, y = shap.datasets.boston()\n# model = xgboost.XGBRegressor()\n# model.fit(X, y)\n\n# # explain the model's predictions using SHAP values\n# explainer = shap.TreeExplainer(model, X, feature_perturbation=\"global_path_dependent\")\n# shap_values = explainer.shap_values(X)\n\n# assert np.allclose(shap_values.sum(1) + explainer.expected_value, model.predict(X))\n\ndef test_skopt_rf_et():\n skopt = pytest.importorskip(\"skopt\")\n\n # Define an objective function for skopt to optimise.\n def objective_function(x):\n return x[0] ** 2 - x[1] ** 2 + x[1] * x[0]\n\n # Uneven bounds to prevent \"objective has been evaluated\" warnings.\n problem_bounds = [(-1e6, 3e6), (-1e6, 3e6)]\n\n # Don't worry about \"objective has been evaluated\" warnings.\n result_et = skopt.forest_minimize(objective_function, problem_bounds, n_calls=100,\n base_estimator=\"ET\")\n result_rf = skopt.forest_minimize(objective_function, problem_bounds, n_calls=100,\n base_estimator=\"RF\")\n\n et_df = pd.DataFrame(result_et.x_iters, columns=[\"X0\", \"X1\"])\n\n # Explain the model's predictions.\n explainer_et = shap.TreeExplainer(result_et.models[-1], et_df)\n shap_values_et = explainer_et.shap_values(et_df)\n\n rf_df = pd.DataFrame(result_rf.x_iters, columns=[\"X0\", \"X1\"])\n\n # Explain the model's predictions (Random forest).\n explainer_rf = shap.TreeExplainer(result_rf.models[-1], rf_df)\n shap_values_rf = explainer_rf.shap_values(rf_df)\n\n assert np.allclose(shap_values_et.sum(1) + explainer_et.expected_value,\n result_et.models[-1].predict(et_df))\n assert np.allclose(shap_values_rf.sum(1) + explainer_rf.expected_value,\n result_rf.models[-1].predict(rf_df))\n" ]
[ [ "numpy.nanmax", "numpy.hstack", "matplotlib.colors.BoundaryNorm", "numpy.invert", "numpy.unique", "numpy.isnan", "numpy.arange", "numpy.min", "numpy.nanmin", "matplotlib.pyplot.sca", "numpy.random.shuffle", "numpy.sort", "matplotlib.pyplot.colorbar", "numpy.max", "numpy.diff", "numpy.array", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ], [ "sklearn.ensemble.RandomForestRegressor", "numpy.asarray", "sklearn.ensemble.HistGradientBoostingClassifier", "pandas.DataFrame", "sklearn.tree.DecisionTreeClassifier", "numpy.random.randn", "numpy.mean", "sklearn.datasets.load_boston", "sklearn.datasets.fetch_20newsgroups", "numpy.allclose", "sklearn.ensemble.RandomForestClassifier", "numpy.matmul", "sklearn.ensemble.GradientBoostingRegressor", "sklearn.feature_extraction.text.CountVectorizer", "numpy.repeat", "numpy.zeros", "sklearn.ensemble.HistGradientBoostingRegressor", "numpy.random.choice", "sklearn.datasets.load_iris", "sklearn.model_selection.train_test_split", "sklearn.ensemble.ExtraTreesRegressor", "sklearn.ensemble.GradientBoostingClassifier", "numpy.array", "numpy.sum", "sklearn.datasets.load_breast_cancer", "numpy.random.seed", "numpy.abs", "numpy.random.normal" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
konpat/psi4numpy
[ "dc0b51d9a05286023474e1e5b4828705676bf60d", "dc0b51d9a05286023474e1e5b4828705676bf60d", "dc0b51d9a05286023474e1e5b4828705676bf60d", "dc0b51d9a05286023474e1e5b4828705676bf60d", "dc0b51d9a05286023474e1e5b4828705676bf60d" ]
[ "Response-Theory/Coupled-Cluster/RHF/helper_ccpert.py", "Self-Consistent-Field/RHF.py", "Coupled-Cluster/RHF/helper_ccenergy.py", "Self-Consistent-Field/RHF_EFP.py", "Configuration-Interaction/CI_DL.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nA simple python script to compute RHF-CCSD linear response function \nfor calculating properties like dipole polarizabilities, optical\nrotations etc. \n\nReferences: \n1. A Whirlwind Introduction to Coupled Cluster Response Theory, T.D. Crawford, Private Notes,\n (pdf in the current directory).\n2. H. Koch and P. Jørgensen, J. Chem. Phys. Volume 93, pp. 3333-3344 (1991).\n3. S. R. Gwaltney, M. Nooijen and R.J. Bartlett, Chemical Physics Letters, 248, pp. 189-198 (1996).\n4. Chapter 13, \"Molecular Electronic-Structure Theory\", Trygve Helgaker, \n Poul Jørgensen and Jeppe Olsen, John Wiley & Sons Ltd.\n\n\"\"\"\n\n__authors__ = \"Ashutosh Kumar\"\n__credits__ = [\"Ashutosh Kumar\", \"Daniel G. A. Smith\", \"Lori A. Burns\", \"T. D. Crawford\"]\n\n__copyright__ = \"(c) 2014-2017, The Psi4NumPy Developers\"\n__license__ = \"BSD-3-Clause\"\n__date__ = \"2017-05-17\"\n\nimport time\nimport numpy as np\nimport psi4\nimport sys\nsys.path.append(\"../../../Coupled-Cluster/RHF\")\nfrom utils import ndot\nfrom utils import helper_diis\n\nclass HelperCCPert(object):\n def __init__(self, name, pert, ccsd, hbar, cclambda, omega):\n\n # start of the ccpert class\n time_init = time.time()\n\n # Grabbing all the info from the wavefunctions passed\n self.pert = pert\n self.name = name\n self.MO = ccsd.MO\n self.ndocc = ccsd.ndocc\n self.nmo = ccsd.nmo\n self.nocc = ccsd.ndocc\n self.nvirt = ccsd.nmo - ccsd.nocc \n self.mints = ccsd.mints\n self.F = ccsd.F\n self.t1 = ccsd.t1\n self.t2 = ccsd.t2\n self.ttau = hbar.ttau\n self.Loovv = hbar.Loovv\n self.Looov = hbar.Looov\n self.Lvovv = hbar.Lvovv\n self.Hov = hbar.Hov\n self.Hvv = hbar.Hvv\n self.Hoo = hbar.Hoo\n self.Hoooo = hbar.Hoooo\n self.Hvvvv = hbar.Hvvvv\n self.Hvovv = hbar.Hvovv\n self.Hooov = hbar.Hooov\n self.Hovvo = hbar.Hovvo\n self.Hovov = hbar.Hovov\n self.Hvvvo = hbar.Hvvvo\n self.Hovoo = hbar.Hovoo\n self.l1 = cclambda.l1\n self.l2 = cclambda.l2\n self.omega = omega\n\n self.slice_o = slice(0, self.nocc)\n self.slice_v = slice(self.nocc, self.nmo)\n self.slice_a = slice(0, self.nmo)\n self.slice_dict = {'o' : self.slice_o, 'v' : self.slice_v,\n 'a' : self.slice_a}\n\n # Build the denominators from diagonal elements of Hbar and omega\n self.Dia = self.Hoo.diagonal().reshape(-1, 1) - self.Hvv.diagonal()\n self.Dijab = self.Hoo.diagonal().reshape(-1, 1, 1, 1) + self.Hoo.diagonal().reshape(-1, 1, 1) - self.Hvv.diagonal().reshape(-1, 1) - self.Hvv.diagonal() \n self.Dia += omega\n self.Dijab += omega\n \n # Guesses for X1 and X2 amplitudes (First order perturbed T amplitudes)\n self.x1 = self.build_Avo().swapaxes(0,1)/self.Dia\n self.pertbar_ijab = self.build_Avvoo().swapaxes(0,2).swapaxes(1,3)\n self.x2 = self.pertbar_ijab.copy()\n self.x2 += self.pertbar_ijab.swapaxes(0,1).swapaxes(2,3)\n self.x2 = self.x2/self.Dijab\n \n # Guesses for Y1 and Y2 amplitudes (First order perturbed Lambda amplitudes)\n self.y1 = 2.0 * self.x1.copy() \n self.y2 = 4.0 * self.x2.copy() \n self.y2 -= 2.0 * self.x2.swapaxes(2,3)\n\n # Conventions used : \n # occ orbitals : i, j, k, l, m, n\n # virt orbitals : a, b, c, d, e, f\n # all oribitals : p, q, r, s, t, u, v\n\n def get_MO(self, string):\n if len(string) != 4:\n psi4.core.clean()\n raise Exception('get_MO: string %s must have 4 elements.' % string)\n return self.MO[self.slice_dict[string[0]], self.slice_dict[string[1]],\n self.slice_dict[string[2]], self.slice_dict[string[3]]]\n\n def get_F(self, string):\n if len(string) != 2:\n psi4.core.clean()\n raise Exception('get_F: string %s must have 2 elements.' % string)\n return self.F[self.slice_dict[string[0]], self.slice_dict[string[1]]]\n\n\n def get_pert(self, string):\n if len(string) != 2:\n psi4.core.clean()\n raise Exception('get_pert: string %s must have 2 elements.' % string)\n return self.pert[self.slice_dict[string[0]], self.slice_dict[string[1]]]\n\n # Build different pieces of the similarity transformed perturbation operator\n # using ground state T amplitudes i.e T(0).\n # A_bar = e^{-T(0)} A e^{T(0)} = A + [A,T(0)] + 1/2! [[A,T(0)],T(0)] \n # since A is a one body operator, the expansion truncates at double commutators.\n\n def build_Aoo(self):\n Aoo = self.get_pert('oo').copy()\n Aoo += ndot('ie,me->mi', self.t1, self.get_pert('ov'))\n return Aoo\n\n def build_Aov(self):\n Aov = self.get_pert('ov').copy()\n return Aov\n\n def build_Avo(self):\n Avo = self.get_pert('vo').copy()\n Avo += ndot('ae,ie->ai', self.get_pert('vv'), self.t1)\n Avo -= ndot('ma,mi->ai', self.t1, self.get_pert('oo'))\n Avo += ndot('miea,me->ai', self.t2, self.get_pert('ov'), prefactor=2.0)\n Avo += ndot('imea,me->ai', self.t2, self.get_pert('ov'), prefactor=-1.0)\n tmp = ndot('ie,ma->imea', self.t1, self.t1)\n Avo -= ndot('imea,me->ai', tmp, self.get_pert('ov'))\n return Avo\n\n def build_Avv(self):\n Avv = self.get_pert('vv').copy()\n Avv -= ndot('ma,me->ae', self.t1, self.get_pert('ov'))\n return Avv\n\n def build_Aovoo(self):\n Aovoo = 0\n Aovoo += ndot('ijeb,me->mbij', self.t2, self.get_pert('ov'))\n return Aovoo\n\n def build_Avvvo(self):\n Avvvo = 0\n Avvvo -= ndot('miab,me->abei', self.t2, self.get_pert('ov'))\n return Avvvo\n\n def build_Avvoo(self):\n Avvoo = 0\n Avvoo += ndot('ijeb,ae->abij', self.t2, self.build_Avv())\n Avvoo -= ndot('mjab,mi->abij', self.t2, self.build_Aoo())\n return Avvoo\n\n # Intermediates to avoid construction of 3 body Hbar terms\n # in solving X amplitude equations.\n def build_Zvv(self):\n Zvv = 0\n Zvv += ndot('amef,mf->ae', self.Hvovv, self.x1, prefactor=2.0)\n Zvv += ndot('amfe,mf->ae', self.Hvovv, self.x1, prefactor=-1.0)\n Zvv -= ndot('mnaf,mnef->ae', self.x2, self.Loovv)\n return Zvv\n\n def build_Zoo(self):\n Zoo = 0\n Zoo -= ndot('mnie,ne->mi', self.Hooov, self.x1, prefactor=2.0)\n Zoo -= ndot('nmie,ne->mi', self.Hooov, self.x1, prefactor=-1.0)\n Zoo -= ndot('mnef,inef->mi', self.Loovv, self.x2)\n return Zoo\n\n # Intermediates to avoid construction of 3 body Hbar terms\n # in solving Y amplitude equations (just like in lambda equations).\n def build_Goo(self, t2, y2):\n Goo = 0\n Goo += ndot('mjab,ijab->mi', t2, y2)\n return Goo\n\n def build_Gvv(self, y2, t2):\n Gvv = 0\n Gvv -= ndot('ijab,ijeb->ae', y2, t2)\n return Gvv\n\n def update_X(self):\n \n # X1 and X2 amplitudes are the Fourier analogues of first order perturbed T1 and T2 amplitudes, \n # (eq. 65, reference 1). For a given perturbation, these amplitudes are frequency dependent and \n # can be obtained by solving a linear system of equations, (Hbar(0) - omgea * I)X = Hbar(1)\n # Refer to eq 70 of reference 1. Writing t_mu^(1)(omega) as X_mu and Hbar^(1)(omega) as A_bar,\n # X1 equations:\n # omega * X_ia = <phi^a_i|A_bar|O> + <phi^a_i|Hbar^(0)|phi^c_k> * X_kc + <phi^a_i|Hbar^(0)|phi^cd_kl> * X_klcd\n # X2 equations:\n # omega * X_ijab = <phi^ab_ij|A_bar|O> + <phi^ab_ij|Hbar^(0)|phi^c_k> * X_kc + <phi^ab_ij|Hbar^(0)|phi^cd_kl> * X_klcd\n # Note that the RHS terms have exactly the same structure as EOM-CCSD sigma equations.\n # Spin Orbital expressions (Einstein summation):\n\n # X1 equations: \n # -omega * X_ia + A_bar_ai + X_ie * Hvv_ae - X_ma * Hoo_mi + X_me * Hovvo_maei + X_miea * Hov_me \n # + 0.5 * X_imef * Hvovv_amef - 0.5 * X_mnae * Hooov_mnie = 0\n\n # X2 equations:\n # -omega * X_ijab + A_bar_abij + P(ij) X_ie * Hvvvo_abej - P(ab) X_ma * Hovoo_mbij \n # + P(ab) X_mf * Hvovv_amef * t_ijeb - P(ij) X_ne * Hooov_mnie * t_mjab \n # + P(ab) X_ijeb * Hvv_ae - P(ij) X_mjab * Hov_mi + 0.5 * X_mnab * Hoooo_mnij + 0.5 * X_ijef * Hvvvv_abef \n # + P(ij) P(ab) X_miea * Hovvo_mbej - 0.5 * P(ab) X_mnaf * Hoovv_mnef * t_ijeb\n # - 0.5 * P(ij) X_inef * Hoovv_mnef * t_mjab \n\n # It should be noted that in order to avoid construction of 3-body Hbar terms appearing in X2 equations like,\n # Hvvooov_bamjif = Hvovv_amef * t_ijeb, \n # Hvvooov_banjie = Hooov_mnie * t_mjab,\n # Hvoooov_bmnjif = Hoovv_mnef * t_ijeb, \n # Hvvoovv_banjef = Hoovv_mnef * t_mjab, \n # we make use of Z intermediates: \n # Zvv_ae = - Hooov_amef * X_mf - 0.5 * X_mnaf * Hoovv_mnef, \n # Zoo_mi = - X_ne * Hooov_mnie - 0.5 * Hoovv_mnef * X_inef, \n # And then contract Z with T2 amplitudes.\n \n # X1 equations \n r_x1 = self.build_Avo().swapaxes(0,1).copy()\n r_x1 -= self.omega * self.x1.copy()\n r_x1 += ndot('ie,ae->ia', self.x1, self.Hvv)\n r_x1 -= ndot('mi,ma->ia', self.Hoo, self.x1)\n r_x1 += ndot('maei,me->ia', self.Hovvo, self.x1, prefactor=2.0)\n r_x1 += ndot('maie,me->ia', self.Hovov, self.x1, prefactor=-1.0)\n r_x1 += ndot('miea,me->ia', self.x2, self.Hov, prefactor=2.0)\n r_x1 += ndot('imea,me->ia', self.x2, self.Hov, prefactor=-1.0)\n r_x1 += ndot('imef,amef->ia', self.x2, self.Hvovv, prefactor=2.0)\n r_x1 += ndot('imef,amfe->ia', self.x2, self.Hvovv, prefactor=-1.0)\n r_x1 -= ndot('mnie,mnae->ia', self.Hooov, self.x2, prefactor=2.0)\n r_x1 -= ndot('nmie,mnae->ia', self.Hooov, self.x2, prefactor=-1.0)\n # X1 equations over! \n\n # X2 equations \n # Final r_x2_ijab = r_x2_ijab + r_x2_jiba\n r_x2 = self.build_Avvoo().swapaxes(0,2).swapaxes(1,3).copy()\n # a factor of 0.5 because of the comment just above\n # and due to the fact that X2_ijab = X2_jiba \n r_x2 -= 0.5 * self.omega * self.x2\n r_x2 += ndot('ie,abej->ijab', self.x1, self.Hvvvo)\n r_x2 -= ndot('mbij,ma->ijab', self.Hovoo, self.x1)\n r_x2 += ndot('ijeb,ae->ijab', self.x2, self.Hvv)\n r_x2 -= ndot('mi,mjab->ijab', self.Hoo, self.x2)\n r_x2 += ndot('mnij,mnab->ijab', self.Hoooo, self.x2, prefactor=0.5)\n r_x2 += ndot('ijef,abef->ijab', self.x2, self.Hvvvv, prefactor=0.5)\n r_x2 += ndot('miea,mbej->ijab', self.x2, self.Hovvo, prefactor=2.0)\n r_x2 += ndot('miea,mbje->ijab', self.x2, self.Hovov, prefactor=-1.0)\n r_x2 -= ndot('imeb,maje->ijab', self.x2, self.Hovov)\n r_x2 -= ndot('imea,mbej->ijab', self.x2, self.Hovvo)\n r_x2 += ndot('mi,mjab->ijab', self.build_Zoo(), self.t2)\n r_x2 += ndot('ijeb,ae->ijab', self.t2, self.build_Zvv())\n # X2 equations over! \n\n old_x2 = self.x2.copy()\n old_x1 = self.x1.copy()\n\n # update X1 and X2\n self.x1 += r_x1/self.Dia\n # Final r_x2_ijab = r_x2_ijab + r_x2_jiba\n tmp = r_x2/self.Dijab\n self.x2 += tmp + tmp.swapaxes(0,1).swapaxes(2,3)\n\n # Calcuate rms with the residual \n rms = 0\n rms += np.einsum('ia,ia->', old_x1 - self.x1, old_x1 - self.x1)\n rms += np.einsum('ijab,ijab->', old_x2 - self.x2, old_x2 - self.x2)\n return np.sqrt(rms)\n\n def inhomogenous_y2(self):\n\n # Inhomogenous terms appearing in Y2 equations\n # <O|L1(0)|A_bar|phi^ab_ij>\n r_y2 = ndot('ia,jb->ijab', self.l1, self.build_Aov(), prefactor=2.0)\n r_y2 -= ndot('ja,ib->ijab', self.l1, self.build_Aov()) \n # <O|L2(0)|A_bar|phi^ab_ij>\n r_y2 += ndot('ijeb,ea->ijab', self.l2, self.build_Avv())\n r_y2 -= ndot('im,mjab->ijab', self.build_Aoo(), self.l2)\n # <O|L1(0)|[Hbar(0), X1]|phi^ab_ij>\n tmp = ndot('me,ja->meja', self.x1, self.l1)\n r_y2 -= ndot('mieb,meja->ijab', self.Loovv, tmp)\n tmp = ndot('me,mb->eb', self.x1, self.l1)\n r_y2 -= ndot('ijae,eb->ijab', self.Loovv, tmp)\n tmp = ndot('me,ie->mi', self.x1, self.l1)\n r_y2 -= ndot('mi,jmba->ijab', tmp, self.Loovv)\n tmp = ndot('me,jb->mejb', self.x1, self.l1, prefactor=2.0)\n r_y2 += ndot('imae,mejb->ijab', self.Loovv, tmp)\n # <O|L2(0)|[Hbar(0), X1]|phi^ab_ij>\n tmp = ndot('me,ma->ea', self.x1, self.Hov)\n r_y2 -= ndot('ijeb,ea->ijab', self.l2, tmp)\n tmp = ndot('me,ie->mi', self.x1, self.Hov)\n r_y2 -= ndot('mi,jmba->ijab', tmp, self.l2)\n tmp = ndot('me,ijef->mijf', self.x1, self.l2)\n r_y2 -= ndot('mijf,fmba->ijab', tmp, self.Hvovv)\n tmp = ndot('me,imbf->eibf', self.x1, self.l2)\n r_y2 -= ndot('eibf,fjea->ijab', tmp, self.Hvovv)\n tmp = ndot('me,jmfa->ejfa', self.x1, self.l2)\n r_y2 -= ndot('fibe,ejfa->ijab', self.Hvovv, tmp)\n tmp = ndot('me,fmae->fa', self.x1, self.Hvovv, prefactor=2.0)\n tmp -= ndot('me,fmea->fa', self.x1, self.Hvovv)\n r_y2 += ndot('ijfb,fa->ijab', self.l2, tmp)\n tmp = ndot('me,fiea->mfia', self.x1, self.Hvovv, prefactor=2.0)\n tmp -= ndot('me,fiae->mfia', self.x1, self.Hvovv)\n r_y2 += ndot('mfia,jmbf->ijab', tmp, self.l2)\n tmp = ndot('me,jmna->ejna', self.x1, self.Hooov)\n r_y2 += ndot('ineb,ejna->ijab', self.l2, tmp)\n tmp = ndot('me,mjna->ejna', self.x1, self.Hooov)\n r_y2 += ndot('nieb,ejna->ijab', self.l2, tmp)\n tmp = ndot('me,nmba->enba', self.x1, self.l2)\n r_y2 += ndot('jine,enba->ijab', self.Hooov, tmp)\n tmp = ndot('me,mina->eina', self.x1, self.Hooov, prefactor=2.0)\n tmp -= ndot('me,imna->eina', self.x1, self.Hooov)\n r_y2 -= ndot('eina,njeb->ijab', tmp, self.l2)\n tmp = ndot('me,imne->in', self.x1, self.Hooov, prefactor=2.0)\n tmp -= ndot('me,mine->in', self.x1, self.Hooov)\n r_y2 -= ndot('in,jnba->ijab', tmp, self.l2)\n # <O|L2(0)|[Hbar(0), X2]|phi^ab_ij>\n tmp = ndot('ijef,mnef->ijmn', self.l2, self.x2, prefactor=0.5) \n r_y2 += ndot('ijmn,mnab->ijab', tmp, self.get_MO('oovv')) \n tmp = ndot('ijfe,mnef->ijmn', self.get_MO('oovv'), self.x2, prefactor=0.5) \n r_y2 += ndot('ijmn,mnba->ijab', tmp, self.l2) \n tmp = ndot('mifb,mnef->ibne', self.l2, self.x2) \n r_y2 += ndot('ibne,jnae->ijab', tmp, self.get_MO('oovv')) \n tmp = ndot('imfb,mnef->ibne', self.l2, self.x2) \n r_y2 += ndot('ibne,njae->ijab', tmp, self.get_MO('oovv')) \n tmp = ndot('mjfb,mnef->jbne', self.l2, self.x2) \n r_y2 -= ndot('jbne,inae->ijab', tmp, self.Loovv) \n r_y2 -= ndot('in,jnba->ijab', self.build_Goo(self.Loovv, self.x2), self.l2) \n r_y2 += ndot('ijfb,af->ijab', self.l2, self.build_Gvv(self.Loovv, self.x2))\n r_y2 += ndot('ijae,be->ijab', self.Loovv, self.build_Gvv(self.l2, self.x2))\n r_y2 -= ndot('imab,jm->ijab', self.Loovv, self.build_Goo(self.l2, self.x2))\n tmp = ndot('nifb,mnef->ibme', self.l2, self.x2)\n r_y2 -= ndot('ibme,mjea->ijab', tmp, self.Loovv)\n tmp = ndot('njfb,mnef->jbme', self.l2, self.x2, prefactor=2.0)\n r_y2 += ndot('imae,jbme->ijab', self.Loovv, tmp)\n\n return r_y2\n\n\n def inhomogenous_y1(self):\n \n # Inhomogenous terms appearing in Y1 equations\n # <O|A_bar|phi^a_i>\n r_y1 = 2.0 * self.build_Aov().copy()\n # <O|L1(0)|A_bar|phi^a_i>\n r_y1 -= ndot('im,ma->ia', self.build_Aoo(), self.l1)\n r_y1 += ndot('ie,ea->ia', self.l1, self.build_Avv())\n # <O|L2(0)|A_bar|phi^a_i>\n r_y1 += ndot('imfe,feam->ia', self.l2, self.build_Avvvo())\n r_y1 -= ndot('ienm,mnea->ia', self.build_Aovoo(), self.l2, prefactor=0.5)\n r_y1 -= ndot('iemn,mnae->ia', self.build_Aovoo(), self.l2, prefactor=0.5)\n # <O|[Hbar(0), X1]|phi^a_i>\n r_y1 += ndot('imae,me->ia', self.Loovv, self.x1, prefactor=2.0)\n # <O|L1(0)|[Hbar(0), X1]|phi^a_i>\n tmp = ndot('ma,ie->miae', self.Hov, self.l1, prefactor=-1.0)\n tmp -= ndot('ma,ie->miae', self.l1, self.Hov)\n tmp -= ndot('mina,ne->miae', self.Hooov, self.l1, prefactor=2.0)\n tmp -= ndot('imna,ne->miae', self.Hooov, self.l1, prefactor=-1.0)\n tmp -= ndot('imne,na->miae', self.Hooov, self.l1, prefactor=2.0)\n tmp -= ndot('mine,na->miae', self.Hooov, self.l1, prefactor=-1.0)\n tmp += ndot('fmae,if->miae', self.Hvovv, self.l1, prefactor=2.0)\n tmp += ndot('fmea,if->miae', self.Hvovv, self.l1, prefactor=-1.0)\n tmp += ndot('fiea,mf->miae', self.Hvovv, self.l1, prefactor=2.0)\n tmp += ndot('fiae,mf->miae', self.Hvovv, self.l1, prefactor=-1.0)\n r_y1 += ndot('miae,me->ia', tmp, self.x1) \n # <O|L1(0)|[Hbar(0), X2]|phi^a_i>\n tmp = ndot('mnef,nf->me', self.x2, self.l1, prefactor=2.0)\n tmp += ndot('mnfe,nf->me', self.x2, self.l1, prefactor=-1.0)\n r_y1 += ndot('imae,me->ia', self.Loovv, tmp)\n r_y1 -= ndot('ni,na->ia', self.build_Goo(self.x2, self.Loovv), self.l1)\n r_y1 += ndot('ie,ea->ia', self.l1, self.build_Gvv(self.x2, self.Loovv))\n # <O|L2(0)|[Hbar(0), X1]|phi^a_i>\n tmp = ndot('nief,mfna->iema', self.l2, self.Hovov, prefactor=-1.0)\n tmp -= ndot('ifne,nmaf->iema', self.Hovov, self.l2)\n tmp -= ndot('inef,mfan->iema', self.l2, self.Hovvo)\n tmp -= ndot('ifen,nmfa->iema', self.Hovvo, self.l2)\n tmp += ndot('imfg,fgae->iema', self.l2, self.Hvvvv, prefactor=0.5)\n tmp += ndot('imgf,fgea->iema', self.l2, self.Hvvvv, prefactor=0.5)\n tmp += ndot('imno,onea->iema', self.Hoooo, self.l2, prefactor=0.5)\n tmp += ndot('mino,noea->iema', self.Hoooo, self.l2, prefactor=0.5)\n r_y1 += ndot('iema,me->ia', tmp, self.x1) \n tmp = ndot('nb,fb->nf', self.x1, self.build_Gvv(self.t2, self.l2))\n r_y1 += ndot('inaf,nf->ia', self.Loovv, tmp) \n tmp = ndot('me,fa->mefa', self.x1, self.build_Gvv(self.t2, self.l2))\n r_y1 += ndot('mief,mefa->ia', self.Loovv, tmp)\n tmp = ndot('me,ni->meni', self.x1, self.build_Goo(self.t2, self.l2))\n r_y1 -= ndot('meni,mnea->ia', tmp, self.Loovv)\n tmp = ndot('jf,nj->fn', self.x1, self.build_Goo(self.t2, self.l2))\n r_y1 -= ndot('inaf,fn->ia', self.Loovv, tmp)\n # <O|L2(0)|[Hbar(0), X2]|phi^a_i>\n r_y1 -= ndot('mi,ma->ia', self.build_Goo(self.x2, self.l2), self.Hov) \n r_y1 += ndot('ie,ea->ia', self.Hov, self.build_Gvv(self.x2, self.l2)) \n tmp = ndot('imfg,mnef->igne',self.l2, self.x2)\n r_y1 -= ndot('igne,gnea->ia', tmp, self.Hvovv)\n tmp = ndot('mifg,mnef->igne',self.l2, self.x2)\n r_y1 -= ndot('igne,gnae->ia', tmp, self.Hvovv)\n tmp = ndot('mnga,mnef->gaef',self.l2, self.x2)\n r_y1 -= ndot('gief,gaef->ia', self.Hvovv, tmp)\n tmp = ndot('gmae,mnef->ganf',self.Hvovv, self.x2, prefactor=2.0)\n tmp += ndot('gmea,mnef->ganf',self.Hvovv, self.x2, prefactor=-1.0)\n r_y1 += ndot('nifg,ganf->ia', self.l2, tmp)\n r_y1 -= ndot('giea,ge->ia', self.Hvovv, self.build_Gvv(self.l2, self.x2), prefactor=2.0) \n r_y1 -= ndot('giae,ge->ia', self.Hvovv, self.build_Gvv(self.l2, self.x2), prefactor=-1.0)\n tmp = ndot('oief,mnef->oimn', self.l2, self.x2) \n r_y1 += ndot('oimn,mnoa->ia', tmp, self.Hooov)\n tmp = ndot('mofa,mnef->oane', self.l2, self.x2) \n r_y1 += ndot('inoe,oane->ia', self.Hooov, tmp)\n tmp = ndot('onea,mnef->oamf', self.l2, self.x2) \n r_y1 += ndot('miof,oamf->ia', self.Hooov, tmp)\n r_y1 -= ndot('mioa,mo->ia', self.Hooov, self.build_Goo(self.x2, self.l2), prefactor=2.0) \n r_y1 -= ndot('imoa,mo->ia', self.Hooov, self.build_Goo(self.x2, self.l2), prefactor=-1.0) \n tmp = ndot('imoe,mnef->ionf', self.Hooov, self.x2, prefactor=-2.0) \n tmp -= ndot('mioe,mnef->ionf', self.Hooov, self.x2, prefactor=-1.0) \n r_y1 += ndot('ionf,nofa->ia', tmp, self.l2)\n \n return r_y1\n\n def update_Y(self):\n\n # Y1 and Y2 amplitudes are the Fourier analogues of first order perturbed L1 and L2 amplitudes, \n # While X amplitudes are referred to as right hand perturbed amplitudes, Y amplitudes are the\n # left hand perturbed amplitudes. Just like X1 and X2, they can be obtained by solving a linear \n # sytem of equations. Refer to eq 73 of reference 1. for Writing l_mu^(1)(omega) as Y_mu, \n # Y1 equations:\n # omega * Y_ia + Y_kc * <phi^c_k|Hbar(0)|phi^a_i> + Y_klcd * <phi^cd_kl|Hbar(0)|phi^a_i> \n # + <O|(1 + L(0))|Hbar_bar(1)(omega)|phi^a_i> = 0\n # Y2 equations: \n # omega * Y_ijab + Y_kc * <phi^c_k|Hbar(0)|phi^ab_ij> + Y_klcd * <phi^cd_kl|Hbar(0)|phi^ab_ij> \n # + <O|(1 + L(0))|Hbar_bar(1)(omega)|phi^ab_ij> = 0\n # where Hbar_bar(1)(omega) = Hbar(1) + [Hbar(0), T(1)] = A_bar + [Hbar(0), X]\n # Note that the homogenous terms of Y1 and Y2 equations except the omega term are exactly identical in \n # structure to the L1 and L2 equations and just like lambdas, the equations for these Y amplitudes have \n # been derived using the unitray group approach. Please refer to helper_cclambda file for a complete \n # decsription.\n\n # Y1 equations\n # Inhomogenous terms\n r_y1 = self.im_y1.copy()\n # Homogenous terms now!\n r_y1 += self.omega * self.y1\n r_y1 += ndot('ie,ea->ia', self.y1, self.Hvv)\n r_y1 -= ndot('im,ma->ia', self.Hoo, self.y1)\n r_y1 += ndot('ieam,me->ia', self.Hovvo, self.y1, prefactor=2.0)\n r_y1 += ndot('iema,me->ia', self.Hovov, self.y1, prefactor=-1.0)\n r_y1 += ndot('imef,efam->ia', self.y2, self.Hvvvo)\n r_y1 -= ndot('iemn,mnae->ia', self.Hovoo, self.y2)\n r_y1 -= ndot('eifa,ef->ia', self.Hvovv, self.build_Gvv(self.y2, self.t2), prefactor=2.0)\n r_y1 -= ndot('eiaf,ef->ia', self.Hvovv, self.build_Gvv(self.y2, self.t2), prefactor=-1.0)\n r_y1 -= ndot('mina,mn->ia', self.Hooov, self.build_Goo(self.t2, self.y2), prefactor=2.0)\n r_y1 -= ndot('imna,mn->ia', self.Hooov, self.build_Goo(self.t2, self.y2), prefactor=-1.0)\n # Y1 equations over!\n\n # Y2 equations\n # Final r_y2_ijab = r_y2_ijab + r_y2_jiba\n # Inhomogenous terms\n r_y2 = self.im_y2.copy()\n # Homogenous terms now!\n # a factor of 0.5 because of the relation/comment just above\n # and due to the fact that Y2_ijab = Y2_jiba \n r_y2 += 0.5 * self.omega * self.y2.copy()\n r_y2 += ndot('ia,jb->ijab', self.y1, self.Hov, prefactor=2.0)\n r_y2 -= ndot('ja,ib->ijab', self.y1, self.Hov)\n r_y2 += ndot('ijeb,ea->ijab', self.y2, self.Hvv)\n r_y2 -= ndot('im,mjab->ijab', self.Hoo, self.y2)\n r_y2 += ndot('ijmn,mnab->ijab', self.Hoooo, self.y2, prefactor=0.5)\n r_y2 += ndot('ijef,efab->ijab', self.y2, self.Hvvvv, prefactor=0.5)\n r_y2 += ndot('ie,ejab->ijab', self.y1, self.Hvovv, prefactor=2.0)\n r_y2 += ndot('ie,ejba->ijab', self.y1, self.Hvovv, prefactor=-1.0)\n r_y2 -= ndot('mb,jima->ijab', self.y1, self.Hooov, prefactor=2.0)\n r_y2 -= ndot('mb,ijma->ijab', self.y1, self.Hooov, prefactor=-1.0)\n r_y2 += ndot('ieam,mjeb->ijab', self.Hovvo, self.y2, prefactor=2.0)\n r_y2 += ndot('iema,mjeb->ijab', self.Hovov, self.y2, prefactor=-1.0)\n r_y2 -= ndot('mibe,jema->ijab', self.y2, self.Hovov)\n r_y2 -= ndot('mieb,jeam->ijab', self.y2, self.Hovvo)\n r_y2 += ndot('ijeb,ae->ijab', self.Loovv, self.build_Gvv(self.y2, self.t2))\n r_y2 -= ndot('mi,mjab->ijab', self.build_Goo(self.t2, self.y2), self.Loovv)\n # Y2 equations over!\n\n old_y1 = self.y1.copy()\n old_y2 = self.y2.copy()\n\n # update Y1 and Y2\n self.y1 += r_y1/self.Dia\n # Final r_y2_ijab = r_y2_ijab + r_y2_jiba\n tmp = r_y2/self.Dijab \n self.y2 += tmp + tmp.swapaxes(0,1).swapaxes(2,3) \n\n # Calcuate rms from the residual \n rms = np.einsum('ia,ia->', r_y1/self.Dia, r_y1/self.Dia)\n rms += np.einsum('ijab,ijab->', old_y2 - self.y2, old_y2 - self.y2)\n return np.sqrt(rms)\n\n def pseudoresponse(self, hand):\n polar1 = 0\n polar2 = 0\n if hand == 'right':\n z1 = self.x1 ; z2 = self.x2\n else:\n z1 = self.y1 ; z2 = self.y2\n\n # To match the pseudoresponse values with PSI4\n polar1 += ndot('ia,ai->', z1, self.build_Avo(), prefactor=2.0)\n tmp = self.pertbar_ijab + self.pertbar_ijab.swapaxes(0,1).swapaxes(2,3) \n polar2 += ndot('ijab,ijab->', z2, tmp, prefactor=2.0)\n polar2 += ndot('ijba,ijab->', z2, tmp, prefactor=-1.0)\n\n return -2.0 * (polar1 + polar2)\n\n def solve(self, hand, r_conv=1.e-7, maxiter=100, max_diis=8, start_diis=1):\n\n ### Start of the solve routine \n ccpert_tstart = time.time()\n \n # calculate the pseudoresponse from guess amplitudes\n pseudoresponse_old = self.pseudoresponse(hand)\n print(\"CCPERT_%s Iteration %3d: pseudoresponse = %.15f dE = % .5E \" % (self.name, 0, pseudoresponse_old, -pseudoresponse_old))\n\n # Set up DIIS before iterations begin\n if hand == 'right':\n diis_object = helper_diis(self.x1, self.x2, max_diis)\n else:\n diis_object = helper_diis(self.y1, self.y2, max_diis)\n # calculate the inhomogenous terms of the left hand amplitudes equation before iterations begin\n self.im_y1 = self.inhomogenous_y1()\n self.im_y2 = self.inhomogenous_y2()\n\n # Iterate!\n for CCPERT_iter in range(1, maxiter + 1):\n\n # Residual build and update\n if hand == 'right':\n rms = self.update_X()\n else:\n rms = self.update_Y()\n\n # pseudoresponse with updated amplitudes\n pseudoresponse = self.pseudoresponse(hand)\n\n # Print CCPERT iteration information\n print('CCPERT_%s Iteration %3d: pseudoresponse = %.15f dE = % .5E DIIS = %d' % (self.name, CCPERT_iter, pseudoresponse, (pseudoresponse - pseudoresponse_old), diis_object.diis_size))\n\n # Check convergence\n if (rms < r_conv):\n print('\\nCCPERT_%s has converged in %.3f seconds!' % (self.name, time.time() - ccpert_tstart))\n return pseudoresponse\n\n # Update old pseudoresponse\n pseudoresponse_old = pseudoresponse\n\n # Add the new error vector\n if hand == 'right':\n diis_object.add_error_vector(self.x1, self.x2)\n else:\n diis_object.add_error_vector(self.y1, self.y2)\n\n\n if CCPERT_iter >= start_diis:\n if hand == 'right': \n self.x1, self.x2 = diis_object.extrapolate(self.x1, self.x2)\n else: \n self.y1, self.y2 = diis_object.extrapolate(self.y1, self.y2)\n\n# End HelperCCPert class\n\nclass HelperCCLinresp(object):\n\n def __init__(self, cclambda, ccpert_A, ccpert_B):\n\n # start of the cclinresp class \n time_init = time.time()\n # Grab all the info from ccpert obejct, a and b here are the two \n # perturbations Ex. for dipole polarizabilities, A = mu, B = mu (dipole operator) \n self.ccpert_A = ccpert_A\n self.ccpert_B = ccpert_B\n self.pert_A = ccpert_A.pert\n self.pert_B = ccpert_B.pert\n self.l1 = cclambda.l1\n self.l2 = cclambda.l2\n # Grab X and Y amplitudes corresponding to perturbation A\n self.x1_A = ccpert_A.x1\n self.x2_A = ccpert_A.x2\n self.y1_A = ccpert_A.y1\n self.y2_A = ccpert_A.y2\n # Grab X and Y amplitudes corresponding to perturbation B\n self.x1_B = ccpert_B.x1\n self.x2_B = ccpert_B.x2\n self.y1_B = ccpert_B.y1\n self.y2_B = ccpert_B.y2\n\n\n def linresp(self):\n\n # Please refer to equation 78 of reference 1. \n # Writing H(1)(omega) = B, T(1)(omega) = X, L(1)(omega) = Y\n # <<A;B>> = <0|Y(B) * A_bar|0> + <0|(1+L(0))[A_bar, X(B)]|0> \n # polar1 polar2\n self.polar1 = 0\n self.polar2 = 0\n # <0|Y1(B) * A_bar|0>\n self.polar1 += ndot(\"ai,ia->\", self.ccpert_A.build_Avo(), self.y1_B)\n # <0|Y2(B) * A_bar|0>\n self.polar1 += ndot(\"abij,ijab->\", self.ccpert_A.build_Avvoo(), self.y2_B, prefactor=0.5)\n self.polar1 += ndot(\"baji,ijab->\", self.ccpert_A.build_Avvoo(), self.y2_B, prefactor=0.5)\n # <0|[A_bar, X(B)]|0>\n self.polar2 += ndot(\"ia,ia->\", self.ccpert_A.build_Aov(), self.x1_B, prefactor=2.0)\n # <0|L1(0)[A_bar, X1(B)]|0>\n tmp = ndot('ia,ic->ac', self.l1, self.x1_B)\n self.polar2 += ndot('ac,ac->', tmp, self.ccpert_A.build_Avv())\n tmp = ndot('ia,ka->ik', self.l1, self.x1_B)\n self.polar2 -= ndot('ik,ki->', tmp, self.ccpert_A.build_Aoo())\n # <0|L1(0)[A_bar, X2(B)]|0>\n tmp = ndot('ia,jb->ijab', self.l1, self.ccpert_A.build_Aov())\n self.polar2 += ndot('ijab,ijab->', tmp, self.x2_B, prefactor=2.0)\n self.polar2 += ndot('ijab,ijba->', tmp, self.x2_B, prefactor=-1.0)\n # <0|L2(0)[A_bar, X1(B)]|0>\n tmp = ndot('ijbc,bcaj->ia', self.l2, self.ccpert_A.build_Avvvo())\n self.polar2 += ndot('ia,ia->', tmp, self.x1_B)\n tmp = ndot('ijab,kbij->ak', self.l2, self.ccpert_A.build_Aovoo())\n self.polar2 -= ndot('ak,ka->', tmp, self.x1_B, prefactor=0.5)\n tmp = ndot('ijab,kaji->bk', self.l2, self.ccpert_A.build_Aovoo())\n self.polar2 -= ndot('bk,kb->', tmp, self.x1_B, prefactor=0.5)\n # <0|L2(0)[A_bar, X1(B)]|0>\n tmp = ndot('ijab,kjab->ik', self.l2, self.x2_B)\n self.polar2 -= ndot('ik,ki->', tmp, self.ccpert_A.build_Aoo(), prefactor=0.5)\n tmp = ndot('ijab,kiba->jk', self.l2, self.x2_B,)\n self.polar2 -= ndot('jk,kj->', tmp, self.ccpert_A.build_Aoo(), prefactor=0.5)\n tmp = ndot('ijab,ijac->bc', self.l2, self.x2_B,)\n self.polar2 += ndot('bc,bc->', tmp, self.ccpert_A.build_Avv(), prefactor=0.5)\n tmp = ndot('ijab,ijcb->ac', self.l2, self.x2_B,)\n self.polar2 += ndot('ac,ac->', tmp, self.ccpert_A.build_Avv(), prefactor=0.5)\n\n return -1.0*(self.polar1 + self.polar2)\n\n# End HelperCCLinresp class\n", "\"\"\"\nA restricted Hartree-Fock script using the Psi4NumPy Formalism\n\"\"\"\n\n__authors__ = \"Daniel G. A. Smith\"\n__credits__ = [\"Daniel G. A. Smith\"]\n\n__copyright__ = \"(c) 2014-2017, The Psi4NumPy Developers\"\n__license__ = \"BSD-3-Clause\"\n__date__ = \"2017-9-30\"\n\nimport time\nimport numpy as np\nnp.set_printoptions(precision=5, linewidth=200, suppress=True)\nimport psi4\n\n# Memory for Psi4 in GB\npsi4.set_memory('500 MB')\npsi4.core.set_output_file(\"output.dat\", False)\n\n# Memory for numpy in GB\nnumpy_memory = 2\n\nmol = psi4.geometry(\"\"\"\nO\nH 1 1.1\nH 1 1.1 2 104\nsymmetry c1\n\"\"\")\n\npsi4.set_options({'basis': 'cc-pvdz',\n 'scf_type': 'pk',\n 'e_convergence': 1e-8})\n\n# Set defaults\nmaxiter = 40\nE_conv = 1.0E-6\nD_conv = 1.0E-3\n\n# Integral generation from Psi4's MintsHelper\nwfn = psi4.core.Wavefunction.build(mol, psi4.core.get_global_option('BASIS'))\nt = time.time()\nmints = psi4.core.MintsHelper(wfn.basisset())\nS = np.asarray(mints.ao_overlap())\n\n# Get nbf and ndocc for closed shell molecules\nnbf = S.shape[0]\nndocc = wfn.nalpha()\n\nprint('\\nNumber of occupied orbitals: %d' % ndocc)\nprint('Number of basis functions: %d' % nbf)\n\n# Run a quick check to make sure everything will fit into memory\nI_Size = (nbf**4) * 8.e-9\nprint(\"\\nSize of the ERI tensor will be %4.2f GB.\" % I_Size)\n\n# Estimate memory usage\nmemory_footprint = I_Size * 1.5\nif I_Size > numpy_memory:\n psi4.core.clean()\n raise Exception(\"Estimated memory utilization (%4.2f GB) exceeds numpy_memory \\\n limit of %4.2f GB.\" % (memory_footprint, numpy_memory))\n\n# Compute required quantities for SCF\nV = np.asarray(mints.ao_potential())\nT = np.asarray(mints.ao_kinetic())\nI = np.asarray(mints.ao_eri())\n\nprint('\\nTotal time taken for integrals: %.3f seconds.' % (time.time() - t))\nt = time.time()\n\n# Build H_core\nH = T + V\n\n# Orthogonalizer A = S^(-1/2) using Psi4's matrix power.\nA = mints.ao_overlap()\nA.power(-0.5, 1.e-16)\nA = np.asarray(A)\n\n# Calculate initial core guess\nHp = A.dot(H).dot(A)\ne, C2 = np.linalg.eigh(Hp)\nC = A.dot(C2)\nCocc = C[:, :ndocc]\nD = np.einsum('pi,qi->pq', Cocc, Cocc)\n\nprint('\\nTotal time taken for setup: %.3f seconds' % (time.time() - t))\n\nprint('\\nStart SCF iterations:\\n')\nt = time.time()\nE = 0.0\nEnuc = mol.nuclear_repulsion_energy()\nEold = 0.0\nDold = np.zeros_like(D)\n\nfor SCF_ITER in range(1, maxiter + 1):\n\n # Build fock matrix\n J = np.einsum('pqrs,rs->pq', I, D)\n K = np.einsum('prqs,rs->pq', I, D)\n F = H + J * 2 - K\n\n diis_e = np.einsum('ij,jk,kl->il', F, D, S) - np.einsum('ij,jk,kl->il', S, D, F)\n diis_e = A.dot(diis_e).dot(A)\n\n # SCF energy and update\n SCF_E = np.einsum('pq,pq->', F + H, D) + Enuc\n dRMS = np.mean(diis_e**2)**0.5\n\n print('SCF Iteration %3d: Energy = %4.16f dE = % 1.5E dRMS = %1.5E' % (SCF_ITER, SCF_E, (SCF_E - Eold), dRMS))\n if (abs(SCF_E - Eold) < E_conv) and (dRMS < D_conv):\n break\n\n Eold = SCF_E\n Dold = D\n\n # Diagonalize Fock matrix\n Fp = A.dot(F).dot(A)\n e, C2 = np.linalg.eigh(Fp)\n C = A.dot(C2)\n Cocc = C[:, :ndocc]\n D = np.einsum('pi,qi->pq', Cocc, Cocc)\n\n if SCF_ITER == maxiter:\n clean()\n raise Exception(\"Maximum number of SCF cycles exceeded.\")\n\nprint('Total time for SCF iterations: %.3f seconds \\n' % (time.time() - t))\n\nprint('Final SCF energy: %.8f hartree' % SCF_E)\nSCF_E_psi = psi4.energy('SCF')\npsi4.compare_values(SCF_E_psi, SCF_E, 6, 'SCF Energy')\n", "\"\"\"\nA simple python script to compute RHF-CCSD energy. Equations (Spin orbitals) from reference 1\nhave been spin-factored. However, explicit building of Wabef intermediates are avoided here.\n\nReferences: \n1. J.F. Stanton, J. Gauss, J.D. Watts, and R.J. Bartlett, \n J. Chem. Phys., volume 94, pp. 4334-4345 (1991).\n\"\"\"\n\n__authors__ = \"Ashutosh Kumar\"\n__credits__ = [\n \"T. D. Crawford\", \"Daniel G. A. Smith\", \"Lori A. Burns\", \"Ashutosh Kumar\"\n]\n\n__copyright__ = \"(c) 2014-2017, The Psi4NumPy Developers\"\n__license__ = \"BSD-3-Clause\"\n__date__ = \"2017-05-17\"\n\nimport time\nimport numpy as np\nimport psi4\nfrom utils import ndot\nfrom utils import helper_diis\n\n\nclass HelperCCEnergy(object):\n def __init__(self, mol, rhf_e, rhf_wfn, memory=2):\n\n print(\"\\nInitalizing CCSD object...\\n\")\n\n # Integral generation from Psi4's MintsHelper\n time_init = time.time()\n\n self.rhf_e = rhf_e\n self.wfn = rhf_wfn\n\n self.ccsd_corr_e = 0.0\n self.ccsd_e = 0.0\n\n self.ndocc = self.wfn.doccpi()[0]\n self.nmo = self.wfn.nmo()\n self.memory = memory\n self.C = self.wfn.Ca()\n self.npC = np.asarray(self.C)\n\n self.mints = psi4.core.MintsHelper(self.wfn.basisset())\n H = np.asarray(self.mints.ao_kinetic()) + np.asarray(\n self.mints.ao_potential())\n self.nmo = H.shape[0]\n\n # Update H, transform to MO basis\n H = np.einsum('uj,vi,uv', self.npC, self.npC, H)\n\n print('Starting AO -> MO transformation...')\n\n ERI_Size = self.nmo * 128.e-9\n memory_footprint = ERI_Size * 5\n if memory_footprint > self.memory:\n psi.clean()\n raise Exception(\n \"Estimated memory utilization (%4.2f GB) exceeds numpy_memory \\\n limit of %4.2f GB.\" % (memory_footprint,\n self.memory))\n\n # Integral generation from Psi4's MintsHelper\n self.MO = np.asarray(self.mints.mo_eri(self.C, self.C, self.C, self.C))\n # Physicist notation\n self.MO = self.MO.swapaxes(1, 2)\n print(\"Size of the ERI tensor is %4.2f GB, %d basis functions.\" %\n (ERI_Size, self.nmo))\n\n # Update nocc and nvirt\n self.nocc = self.ndocc\n self.nvirt = self.nmo - self.nocc\n\n # Make slices\n self.slice_o = slice(0, self.nocc)\n self.slice_v = slice(self.nocc, self.nmo)\n self.slice_a = slice(0, self.nmo)\n self.slice_dict = {\n 'o': self.slice_o,\n 'v': self.slice_v,\n 'a': self.slice_a\n }\n\n # Compute Fock matrix\n self.F = H + 2.0 * np.einsum('pmqm->pq',\n self.MO[:, self.slice_o, :, self.slice_o])\n self.F -= np.einsum('pmmq->pq',\n self.MO[:, self.slice_o, self.slice_o, :])\n\n ### Occupied and Virtual orbital energies\n Focc = np.diag(self.F)[self.slice_o]\n Fvir = np.diag(self.F)[self.slice_v]\n\n self.Dia = Focc.reshape(-1, 1) - Fvir\n self.Dijab = Focc.reshape(-1, 1, 1, 1) + Focc.reshape(\n -1, 1, 1) - Fvir.reshape(-1, 1) - Fvir\n\n ### Construct initial guess\n print('Building initial guess...')\n # t^a_i\n self.t1 = np.zeros((self.nocc, self.nvirt))\n # t^{ab}_{ij}\n self.t2 = self.MO[self.slice_o, self.slice_o, self.slice_v,\n self.slice_v] / self.Dijab\n\n print('\\n..initialized CCSD in %.3f seconds.\\n' %\n (time.time() - time_init))\n\n # occ orbitals : i, j, k, l, m, n\n # virt orbitals : a, b, c, d, e, f\n # all oribitals : p, q, r, s, t, u, v\n\n def get_MO(self, string):\n if len(string) != 4:\n psi4.core.clean()\n raise Exception('get_MO: string %s must have 4 elements.' % string)\n return self.MO[self.slice_dict[string[0]], self.slice_dict[string[1]],\n self.slice_dict[string[2]], self.slice_dict[string[3]]]\n\n def get_F(self, string):\n if len(string) != 2:\n psi4.core.clean()\n raise Exception('get_F: string %s must have 4 elements.' % string)\n return self.F[self.slice_dict[string[0]], self.slice_dict[string[1]]]\n\n #Equations from Reference 1 (Stanton's paper)\n\n #Bulid Eqn 9:\n def build_tilde_tau(self):\n ttau = self.t2.copy()\n tmp = 0.5 * np.einsum('ia,jb->ijab', self.t1, self.t1)\n ttau += tmp\n return ttau\n\n #Build Eqn 10:\n def build_tau(self):\n ttau = self.t2.copy()\n tmp = np.einsum('ia,jb->ijab', self.t1, self.t1)\n ttau += tmp\n return ttau\n\n #Build Eqn 3:\n def build_Fae(self):\n Fae = self.get_F('vv').copy()\n Fae -= ndot('me,ma->ae', self.get_F('ov'), self.t1, prefactor=0.5)\n Fae += ndot('mf,mafe->ae', self.t1, self.get_MO('ovvv'), prefactor=2.0)\n Fae += ndot(\n 'mf,maef->ae', self.t1, self.get_MO('ovvv'), prefactor=-1.0)\n Fae -= ndot(\n 'mnaf,mnef->ae',\n self.build_tilde_tau(),\n self.get_MO('oovv'),\n prefactor=2.0)\n Fae -= ndot(\n 'mnaf,mnfe->ae',\n self.build_tilde_tau(),\n self.get_MO('oovv'),\n prefactor=-1.0)\n return Fae\n\n #Build Eqn 4:\n def build_Fmi(self):\n Fmi = self.get_F('oo').copy()\n Fmi += ndot('ie,me->mi', self.t1, self.get_F('ov'), prefactor=0.5)\n Fmi += ndot('ne,mnie->mi', self.t1, self.get_MO('ooov'), prefactor=2.0)\n Fmi += ndot(\n 'ne,mnei->mi', self.t1, self.get_MO('oovo'), prefactor=-1.0)\n Fmi += ndot(\n 'inef,mnef->mi',\n self.build_tilde_tau(),\n self.get_MO('oovv'),\n prefactor=2.0)\n Fmi += ndot(\n 'inef,mnfe->mi',\n self.build_tilde_tau(),\n self.get_MO('oovv'),\n prefactor=-1.0)\n return Fmi\n\n #Build Eqn 5:\n def build_Fme(self):\n Fme = self.get_F('ov').copy()\n Fme += ndot('nf,mnef->me', self.t1, self.get_MO('oovv'), prefactor=2.0)\n Fme += ndot(\n 'nf,mnfe->me', self.t1, self.get_MO('oovv'), prefactor=-1.0)\n return Fme\n\n #Build Eqn 6:\n def build_Wmnij(self):\n Wmnij = self.get_MO('oooo').copy()\n Wmnij += ndot('je,mnie->mnij', self.t1, self.get_MO('ooov'))\n Wmnij += ndot('ie,mnej->mnij', self.t1, self.get_MO('oovo'))\n # prefactor of 1 instead of 0.5 below to fold the last term of\n # 0.5 * tau_ijef Wabef in Wmnij contraction: 0.5 * tau_mnab Wmnij_mnij\n Wmnij += ndot(\n 'ijef,mnef->mnij',\n self.build_tau(),\n self.get_MO('oovv'),\n prefactor=1.0)\n return Wmnij\n\n #Build Eqn 8:\n def build_Wmbej(self):\n Wmbej = self.get_MO('ovvo').copy()\n Wmbej += ndot('jf,mbef->mbej', self.t1, self.get_MO('ovvv'))\n Wmbej -= ndot('nb,mnej->mbej', self.t1, self.get_MO('oovo'))\n tmp = (0.5 * self.t2)\n tmp += np.einsum('jf,nb->jnfb', self.t1, self.t1)\n Wmbej -= ndot('jnfb,mnef->mbej', tmp, self.get_MO('oovv'))\n Wmbej += ndot(\n 'njfb,mnef->mbej', self.t2, self.get_MO('oovv'), prefactor=1.0)\n Wmbej += ndot(\n 'njfb,mnfe->mbej', self.t2, self.get_MO('oovv'), prefactor=-0.5)\n return Wmbej\n\n # This intermediate appaears in the spin factorization of Wmbej terms.\n def build_Wmbje(self):\n Wmbje = -1.0 * (self.get_MO('ovov').copy())\n Wmbje -= ndot('jf,mbfe->mbje', self.t1, self.get_MO('ovvv'))\n Wmbje += ndot('nb,mnje->mbje', self.t1, self.get_MO('ooov'))\n tmp = (0.5 * self.t2)\n tmp += np.einsum('jf,nb->jnfb', self.t1, self.t1)\n Wmbje += ndot('jnfb,mnfe->mbje', tmp, self.get_MO('oovv'))\n return Wmbje\n\n # This intermediate is required to build second term of 0.5 * tau_ijef * Wabef,\n # as explicit construction of Wabef is avoided here.\n def build_Zmbij(self):\n Zmbij = 0\n Zmbij += ndot('mbef,ijef->mbij', self.get_MO('ovvv'), self.build_tau())\n return Zmbij\n\n def update(self):\n\n ### Build OEI intermediates\n Fae = self.build_Fae()\n Fmi = self.build_Fmi()\n Fme = self.build_Fme()\n\n #### Build residual of T1 equations by spin adaption of Eqn 1:\n r_T1 = self.get_F('ov').copy()\n r_T1 += ndot('ie,ae->ia', self.t1, Fae)\n r_T1 -= ndot('ma,mi->ia', self.t1, Fmi)\n r_T1 += ndot('imae,me->ia', self.t2, Fme, prefactor=2.0)\n r_T1 += ndot('imea,me->ia', self.t2, Fme, prefactor=-1.0)\n r_T1 += ndot(\n 'nf,nafi->ia', self.t1, self.get_MO('ovvo'), prefactor=2.0)\n r_T1 += ndot(\n 'nf,naif->ia', self.t1, self.get_MO('ovov'), prefactor=-1.0)\n r_T1 += ndot(\n 'mief,maef->ia', self.t2, self.get_MO('ovvv'), prefactor=2.0)\n r_T1 += ndot(\n 'mife,maef->ia', self.t2, self.get_MO('ovvv'), prefactor=-1.0)\n r_T1 -= ndot(\n 'mnae,nmei->ia', self.t2, self.get_MO('oovo'), prefactor=2.0)\n r_T1 -= ndot(\n 'mnae,nmie->ia', self.t2, self.get_MO('ooov'), prefactor=-1.0)\n\n ### Build residual of T2 equations by spin adaptation of Eqn 2:\n # <ij||ab> -> <ij|ab>\n # spin -> spin-adapted (<alpha beta| alpha beta>)\n r_T2 = self.get_MO('oovv').copy()\n\n # Conventions used:\n # P(ab) f(a,b) = f(a,b) - f(b,a)\n # P(ij) f(i,j) = f(i,j) - f(j,i)\n # P^(ab)_(ij) f(a,b,i,j) = f(a,b,i,j) + f(b,a,j,i)\n\n # P(ab) {t_ijae Fae_be} -> P^(ab)_(ij) {t_ijae Fae_be}\n tmp = ndot('ijae,be->ijab', self.t2, Fae)\n r_T2 += tmp\n r_T2 += tmp.swapaxes(0, 1).swapaxes(2, 3)\n\n # P(ab) {-0.5 * t_ijae t_mb Fme_me} -> P^(ab)_(ij) {-0.5 * t_ijae t_mb Fme_me}\n tmp = ndot('mb,me->be', self.t1, Fme)\n first = ndot('ijae,be->ijab', self.t2, tmp, prefactor=0.5)\n r_T2 -= first\n r_T2 -= first.swapaxes(0, 1).swapaxes(2, 3)\n\n # P(ij) {-t_imab Fmi_mj} -> P^(ab)_(ij) {-t_imab Fmi_mj}\n tmp = ndot('imab,mj->ijab', self.t2, Fmi, prefactor=1.0)\n r_T2 -= tmp\n r_T2 -= tmp.swapaxes(0, 1).swapaxes(2, 3)\n\n # P(ij) {-0.5 * t_imab t_je Fme_me} -> P^(ab)_(ij) {-0.5 * t_imab t_je Fme_me}\n tmp = ndot('je,me->jm', self.t1, Fme)\n first = ndot('imab,jm->ijab', self.t2, tmp, prefactor=0.5)\n r_T2 -= first\n r_T2 -= first.swapaxes(0, 1).swapaxes(2, 3)\n\n # Build TEI Intermediates\n tmp_tau = self.build_tau()\n Wmnij = self.build_Wmnij()\n Wmbej = self.build_Wmbej()\n Wmbje = self.build_Wmbje()\n Zmbij = self.build_Zmbij()\n\n # 0.5 * tau_mnab Wmnij_mnij -> tau_mnab Wmnij_mnij\n # This also includes the last term in 0.5 * tau_ijef Wabef\n # as Wmnij is modified to include this contribution.\n r_T2 += ndot('mnab,mnij->ijab', tmp_tau, Wmnij, prefactor=1.0)\n\n # Wabef used in eqn 2 of reference 1 is very expensive to build and store, so we have\n # broken down the term , 0.5 * tau_ijef * Wabef (eqn. 7) into different components\n # The last term in the contraction 0.5 * tau_ijef * Wabef is already accounted\n # for in the contraction just above.\n\n # First term: 0.5 * tau_ijef <ab||ef> -> tau_ijef <ab|ef>\n r_T2 += ndot(\n 'ijef,abef->ijab', tmp_tau, self.get_MO('vvvv'), prefactor=1.0)\n\n # Second term: 0.5 * tau_ijef (-P(ab) t_mb <am||ef>) -> -P^(ab)_(ij) {t_ma * Zmbij_mbij}\n # where Zmbij_mbij = <mb|ef> * tau_ijef\n tmp = ndot('ma,mbij->ijab', self.t1, Zmbij)\n r_T2 -= tmp\n r_T2 -= tmp.swapaxes(0, 1).swapaxes(2, 3)\n\n # P(ij)P(ab) t_imae Wmbej -> Broken down into three terms below\n # First term: P^(ab)_(ij) {(t_imae - t_imea)* Wmbej_mbej}\n tmp = ndot('imae,mbej->ijab', self.t2, Wmbej, prefactor=1.0)\n tmp += ndot('imea,mbej->ijab', self.t2, Wmbej, prefactor=-1.0)\n r_T2 += tmp\n r_T2 += tmp.swapaxes(0, 1).swapaxes(2, 3)\n\n # Second term: P^(ab)_(ij) t_imae * (Wmbej_mbej + Wmbje_mbje)\n tmp = ndot('imae,mbej->ijab', self.t2, Wmbej, prefactor=1.0)\n tmp += ndot('imae,mbje->ijab', self.t2, Wmbje, prefactor=1.0)\n r_T2 += tmp\n r_T2 += tmp.swapaxes(0, 1).swapaxes(2, 3)\n\n # Third term: P^(ab)_(ij) t_mjae * Wmbje_mbie\n tmp = ndot('mjae,mbie->ijab', self.t2, Wmbje, prefactor=1.0)\n r_T2 += tmp\n r_T2 += tmp.swapaxes(0, 1).swapaxes(2, 3)\n\n # -P(ij)P(ab) {-t_ie * t_ma * <mb||ej>} -> P^(ab)_(ij) {-t_ie * t_ma * <mb|ej>\n # + t_ie * t_mb * <ma|je>}\n tmp = ndot('ie,ma->imea', self.t1, self.t1)\n tmp1 = ndot('imea,mbej->ijab', tmp, self.get_MO('ovvo'))\n r_T2 -= tmp1\n r_T2 -= tmp1.swapaxes(0, 1).swapaxes(2, 3)\n tmp = ndot('ie,mb->imeb', self.t1, self.t1)\n tmp1 = ndot('imeb,maje->ijab', tmp, self.get_MO('ovov'))\n r_T2 -= tmp1\n r_T2 -= tmp1.swapaxes(0, 1).swapaxes(2, 3)\n\n # P(ij) {t_ie <ab||ej>} -> P^(ab)_(ij) {t_ie <ab|ej>}\n tmp = ndot(\n 'ie,abej->ijab', self.t1, self.get_MO('vvvo'), prefactor=1.0)\n r_T2 += tmp\n r_T2 += tmp.swapaxes(0, 1).swapaxes(2, 3)\n\n # P(ab) {-t_ma <mb||ij>} -> P^(ab)_(ij) {-t_ma <mb|ij>}\n tmp = ndot(\n 'ma,mbij->ijab', self.t1, self.get_MO('ovoo'), prefactor=1.0)\n r_T2 -= tmp\n r_T2 -= tmp.swapaxes(0, 1).swapaxes(2, 3)\n\n ### Update T1 and T2 amplitudes\n self.t1 += r_T1 / self.Dia\n self.t2 += r_T2 / self.Dijab\n\n rms = np.einsum('ia,ia->', r_T1 / self.Dia, r_T1 / self.Dia)\n rms += np.einsum('ijab,ijab->', r_T2 / self.Dijab, r_T2 / self.Dijab)\n\n return np.sqrt(rms)\n\n def compute_corr_energy(self):\n CCSDcorr_E = 2.0 * np.einsum('ia,ia->', self.get_F('ov'), self.t1)\n tmp_tau = self.build_tau()\n CCSDcorr_E += 2.0 * np.einsum('ijab,ijab->', tmp_tau,\n self.get_MO('oovv'))\n CCSDcorr_E -= 1.0 * np.einsum('ijab,ijba->', tmp_tau,\n self.get_MO('oovv'))\n\n self.ccsd_corr_e = CCSDcorr_E\n self.ccsd_e = self.rhf_e + self.ccsd_corr_e\n return CCSDcorr_E\n\n def compute_energy(self,\n e_conv=1e-7,\n r_conv=1e-7,\n maxiter=100,\n max_diis=8,\n start_diis=1):\n\n ### Start Iterations\n ccsd_tstart = time.time()\n\n # Compute MP2 energy\n CCSDcorr_E_old = self.compute_corr_energy()\n print(\n \"CCSD Iteration %3d: CCSD correlation = %.15f dE = % .5E MP2\" %\n (0, CCSDcorr_E_old, -CCSDcorr_E_old))\n\n # Set up DIIS before iterations begin\n diis_object = helper_diis(self.t1, self.t2, max_diis)\n\n # Iterate!\n for CCSD_iter in range(1, maxiter + 1):\n\n rms = self.update()\n\n # Compute CCSD correlation energy\n CCSDcorr_E = self.compute_corr_energy()\n\n # Print CCSD iteration information\n print(\n 'CCSD Iteration %3d: CCSD correlation = %.15f dE = % .5E DIIS = %d'\n % (CCSD_iter, CCSDcorr_E, (CCSDcorr_E - CCSDcorr_E_old),\n diis_object.diis_size))\n\n # Check convergence\n if (abs(CCSDcorr_E - CCSDcorr_E_old) < e_conv and rms < r_conv):\n print('\\nCCSD has converged in %.3f seconds!' %\n (time.time() - ccsd_tstart))\n return CCSDcorr_E\n\n # Update old energy\n CCSDcorr_E_old = CCSDcorr_E\n\n # Add the new error vector\n diis_object.add_error_vector(self.t1, self.t2)\n\n if CCSD_iter >= start_diis:\n self.t1, self.t2 = diis_object.extrapolate(self.t1, self.t2)\n\n\n# End HelperCCEnergy class\n", "\"\"\"\nReference implementation of RHF/EFP using libefp through PylibEFP.\n\nRequirements:\nNumPy\nPylibEFP >=0.1\nlibEFP >=1.5b1\nPsi4 >=1.2a1.dev507 (c. late Aug 2017)\n\nReferences:\nSCF in Python from @dgasmith's most excellent Self-Consistent-Field/RHF.py .\nSCF/EFP in Psi4 by @andysim, @edeprince3, @ilyak, @loriab\nlibefp from [Kaliman:2013:2284]\n\n\"\"\"\nfrom __future__ import division\nfrom __future__ import print_function\n\n__authors__ = \"Lori A. Burns\"\n__credits__ = [\"Andrew C. Simmonett\", \"A. Eugene DePrince III\", \"Ilya A. Kaliman\", \"Lori A. Burns\", \"Daniel G. A. Smith\"]\n\n__copyright__ = \"(c) 2014-2017, The Psi4NumPy Developers\"\n__license__ = \"BSD-3-Clause\"\n__date__ = \"2017-08-28\"\n\nimport time\nimport numpy as np\nnp.set_printoptions(precision=5, linewidth=200, suppress=True)\nimport psi4\nimport pylibefp\n\nimport os\n\n\n# Memory for Psi4 in GB\npsi4.set_memory('500 MB')\npsi4.core.set_output_file(\"output.dat\", False)\n\n# Memory for numpy in GB\nnumpy_memory = 2\n\ndef set_qm_atoms(mol, efpobj):\n \"\"\"Provides list of coordinates of quantum mechanical atoms from\n psi4.core.Molecule `mol` to pylibefp.core.efp() `efpobj`.\n\n \"\"\"\n ptc = []\n coords = []\n for iat in range(mol.natom()):\n ptc.append(mol.charge(iat))\n coords.append(mol.x(iat))\n coords.append(mol.y(iat))\n coords.append(mol.z(iat))\n\n efpobj.set_point_charges(ptc, coords)\n\n\ndef modify_Fock_permanent(mol, nbf, efpobj):\n \"\"\"Computes array of the EFP contribution to the potential felt by\n QM atoms, due to permanent EFP moments, for a SCF procedure.\n\n Requires psi4.core.Molecule `mol`, number of basis functions `nbf`,\n and pylibefp.core.efp() `efpobj`.\n\n \"\"\"\n # get composition counts from libefp\n n_fr = efpobj.get_frag_count()\n natoms = efpobj.get_frag_atom_count()\n\n # get multipoles count, pos'n, values from libefp\n # charge + dipoles + quadrupoles + octupoles = 20\n n_mp = efpobj.get_multipole_count()\n xyz_mp = np.asarray(efpobj.get_multipole_coordinates()).reshape(n_mp, 3)\n val_mp = np.asarray(efpobj.get_multipole_values()).reshape(n_mp, 20)\n\n # 0 X Y Z XX YY ZZ XY XZ YZ\n prefacs = np.array([ 1, 1, 1, 1, 1/3, 1/3, 1/3, 2/3, 2/3, 2/3,\n 1/15, 1/15, 1/15, 3/15, 3/15, 3/15, 3/15, 3/15, 3/15, 6/15])\n # XXX YYY ZZZ XXY XXZ XYY YYZ XZZ YZZ XYZ\n\n # EFP permanent moment contribution to the Fock Matrix\n V2 = np.zeros((nbf, nbf))\n\n # Cartesian basis one-electron EFP perturbation\n efp_ints = np.zeros((20, nbf, nbf))\n\n for imp in range(n_mp):\n origin = xyz_mp[imp]\n\n # get EFP multipole integrals from Psi4\n p4_efp_ints = mints.ao_efp_multipole_potential(origin=origin)\n for pole in range(20):\n efp_ints[pole] = np.asarray(p4_efp_ints[pole])\n\n # add frag atom Z into multipole charge (when pos'n of atom matches mp)\n for ifr in range(n_fr):\n atoms = efpobj.get_frag_atoms(ifr)\n for iat in range(natoms[ifr]):\n xyz_atom = [atoms[iat]['x'], atoms[iat]['y'], atoms[iat]['z']]\n if np.allclose(xyz_atom, origin, atol=1e-10):\n val_mp[imp, 0] += atoms[iat]['Z']\n\n # scale multipole integrals by multipole magnitudes. result goes into V\n for pole in range(20):\n efp_ints[pole] *= -prefacs[pole] * val_mp[imp, pole]\n V2 += efp_ints[pole]\n\n return V2\n\n\ndef modify_Fock_induced(nbf, efpobj, verbose=1):\n \"\"\"Returns shared matrix containing the EFP contribution to the potential\n felt by QM atoms, due to EFP induced dipoles, in a SCF procedure.\n\n \"\"\"\n # get induced dipoles count, pos'n, values from libefp\n # dipoles = 3\n n_id = efpobj.get_induced_dipole_count()\n xyz_id = np.asarray(efpobj.get_induced_dipole_coordinates(verbose=verbose)).reshape(n_id, 3)\n val_id = np.asarray(efpobj.get_induced_dipole_values(verbose=verbose)).reshape(n_id, 3)\n val_idt = np.asarray(efpobj.get_induced_dipole_conj_values(verbose=verbose)).reshape(n_id, 3)\n\n # take average of induced dipole and conjugate\n val_id = (val_id + val_idt) * 0.5\n\n # EFP induced dipole contribution to the Fock Matrix\n V2 = np.zeros((nbf, nbf))\n\n # Cartesian basis one-electron EFP perturbation\n field_ints = np.zeros((3, nbf, nbf))\n\n for iid in range(n_id):\n origin = xyz_id[iid]\n\n # get electric field integrals from Psi4\n p4_field_ints = mints.electric_field(origin=origin)\n for pole in range(3):\n field_ints[pole] = np.asarray(p4_field_ints[pole])\n\n # scale field integrals by induced dipole magnitudes. result goes into V\n for pole in range(3):\n field_ints[pole] *= -val_id[iid, pole]\n V2 += field_ints[pole]\n\n return V2\n\n\ndef field_fn(xyz):\n \"\"\"Compute electric field from electrons in ab initio part for libefp polarization calculation.\n\n Parameters\n ----------\n xyz : list\n 3 * n_pt (flat) array of points at which to compute electric field\n\n Returns\n -------\n list\n 3 * n_pt (flat) array of electric field at points in `xyz`.\n\n Notes\n -----\n Function signature defined by libefp, so function uses number of\n basis functions `nbf` and density matrix `efp_density` from global\n namespace.\n\n \"\"\"\n global nbf\n global efp_density\n\n points = np.array(xyz).reshape(-1, 3)\n n_pt = len(points)\n\n # Cartesian basis one-electron EFP perturbation\n field_ints = np.zeros((3, nbf, nbf))\n\n # Electric field at points\n field = np.zeros((n_pt, 3))\n\n for ipt in range(n_pt):\n # get electric field integrals from Psi4\n p4_field_ints = mints.electric_field(origin=points[ipt])\n\n field[ipt] = [np.vdot(efp_density, np.asarray(p4_field_ints[0])) * 2.0, # Ex\n np.vdot(efp_density, np.asarray(p4_field_ints[1])) * 2.0, # Ey\n np.vdot(efp_density, np.asarray(p4_field_ints[2])) * 2.0] # Ez\n\n field = np.reshape(field, 3 * n_pt)\n\n return field\n\n\nref_V2 = np.array([\n [ -0.02702339455725, -0.00631509453548, -0.00000280084677, -0.00060226624612, 0.00000155158400, -0.00452046694500, -0.00000038595163, -0.00008299120179, 0.00000021380548, -0.00090142990526, 0.00000473984815, 0.00000183105977, -0.00091126369988, 0.00000235760871, -0.00090571433548, -0.00093899785533, -0.00186143580968, -0.00093995668834, -0.00186166418149],\n [ -0.00631509453548, -0.02702339455725, -0.00001910979606, -0.00410918056805, 0.00001058624630, -0.02063616591789, -0.00001267718384, -0.00272597555850, 0.00000702277450, -0.01445203384431, 0.00033391900577, 0.00012899688747, -0.01514481783551, 0.00016609189393, -0.01475386897318, -0.00642763371094, -0.01023119897476, -0.00663585147245, -0.01026009701560],\n [ -0.00000280084677, -0.00001910979606, -0.02665234730712, 0.00037371007562, 0.00014436865150, -0.00001859005760, -0.01317104219809, 0.00038448758263, 0.00014853213109, -0.00013643690212, -0.00190980298320, -0.00014163627448, 0.00010970691227, -0.00002569550824, -0.00001652160690, 0.00553694631171, 0.00296253882599, -0.00592518334643, -0.00307008036331],\n [ -0.00060226624612, -0.00410918056805, 0.00037371007562, -0.02742768609665, 0.00018588404124, -0.00399742114424, 0.00038448758263, -0.01396874115447, 0.00019124479202, -0.00190980298320, 0.00010970691227, -0.00002569550824, -0.00553609000895, 0.00005214006927, -0.00185450039426, -0.00111418876009, -0.00223702838075, -0.00084629056834, -0.00203426014685],\n [ 0.00000155158400, 0.00001058624630, 0.00014436865150, 0.00018588404124, -0.02699015026797, 0.00001029832686, 0.00014853213109, 0.00019124479202, -0.01351858713356, -0.00014163627448, -0.00002569550824, -0.00001652160690, 0.00005214006927, -0.00185450039426, 0.00011345627547, 0.00451469309687, 0.00241810592738, 0.00477138911517, 0.00250189743578],\n [ -0.00452046694500, -0.02063616591789, -0.00001859005760, -0.00399742114424, 0.00001029832686, -0.02702319749786, -0.00003768753434, -0.00796996845458, 0.00002030483034, -0.01818952603674, 0.00070524995886, 0.00027244649517, -0.01965271296696, 0.00035079256242, -0.01882701369086, -0.00987165499275, -0.01795740782584, -0.01115822875565, -0.01834575971991],\n [ -0.00000038595163, -0.00001267718384, -0.01317104219809, 0.00038448758263, 0.00014853213109, -0.00003768753434, -0.02503648443824, 0.00199702997314, 0.00077269122299, -0.00033902442705, -0.00294678699585, -0.00039007690599, 0.00030807567722, -0.00006972255302, -0.00003443473716, 0.00878465177892, 0.00777812963854, -0.01154437574140, -0.00936638912773],\n [ -0.00008299120179, -0.00272597555850, 0.00038448758263, -0.01396874115447, 0.00019124479202, -0.00796996845458, 0.00199702997314, -0.02918413124002, 0.00099361419288, -0.00294678699585, 0.00030807567722, -0.00006972255302, -0.00831580669109, 0.00013571897621, -0.00279672819574, -0.00251085900448, -0.00821286621429, -0.00153039204428, -0.00622386437502],\n [ 0.00000021380548, 0.00000702277450, 0.00014853213109, 0.00019124479202, -0.01351858713356, 0.00002030483034, 0.00077269122299, 0.00099361419288, -0.02684469027539, -0.00039007690599, -0.00006972255302, -0.00003443473716, 0.00013571897621, -0.00279672819574, 0.00029057799444, 0.00789015760008, 0.00719868343548, 0.00944135089382, 0.00814913589233],\n [ -0.00090142990526, -0.01445203384431, -0.00013643690212, -0.00190980298320, -0.00014163627448, -0.01818952603674, -0.00033902442705, -0.00294678699585, -0.00039007690599, -0.02563070634460, 0.00066403177471, 0.00035090564283, -0.00910270453424, 0.00007502850470, -0.00874245358696, -0.00913676610260, -0.01107408168593, -0.01046477748444, -0.01146456481073],\n [ 0.00000473984815, 0.00033391900577, -0.00190980298320, 0.00010970691227, -0.00002569550824, 0.00070524995886, -0.00294678699585, 0.00030807567722, -0.00006972255302, 0.00066403177471, -0.00910270453424, 0.00007502850470, 0.00068756471942, 0.00005040470506, 0.00022274776644, 0.00124025666869, 0.00135413078331, -0.00068224645268, -0.00032923928756],\n [ 0.00000183105977, 0.00012899688747, -0.00014163627448, -0.00002569550824, -0.00001652160690, 0.00027244649517, -0.00039007690599, -0.00006972255302, -0.00003443473716, 0.00035090564283, 0.00007502850470, -0.00874245358696, 0.00005040470506, 0.00022274776644, 0.00020687758368, -0.00412380528180, -0.00051519173670, 0.00491320446628, 0.00097904308284],\n [ -0.00091126369988, -0.01514481783551, 0.00010970691227, -0.00553609000895, 0.00005214006927, -0.01965271296696, 0.00030807567722, -0.00831580669109, 0.00013571897621, -0.00910270453424, 0.00068756471942, 0.00005040470506, -0.02840235120105, 0.00033061285791, -0.00923711128151, -0.00458459601546, -0.01138951947581, -0.00454371602298, -0.01120759511573],\n [ 0.00000235760871, 0.00016609189393, -0.00002569550824, 0.00005214006927, -0.00185450039426, 0.00035079256242, -0.00006972255302, 0.00013571897621, -0.00279672819574, 0.00007502850470, 0.00005040470506, 0.00022274776644, 0.00033061285791, -0.00923711128151, 0.00037744020930, 0.00095088751145, 0.00091755913622, 0.00066686324895, 0.00079498664458],\n [ -0.00090571433548, -0.01475386897318, -0.00001652160690, -0.00185450039426, 0.00011345627547, -0.01882701369086, -0.00003443473716, -0.00279672819574, 0.00029057799444, -0.00874245358696, 0.00022274776644, 0.00020687758368, -0.00923711128151, 0.00037744020930, -0.02691937643507, -0.00793049330280, -0.01147295613562, -0.00845374029895, -0.01156033431781],\n [ -0.00093899785533, -0.00642763371094, 0.00553694631171, -0.00111418876009, 0.00451469309687, -0.00987165499275, 0.00878465177892, -0.00251085900448, 0.00789015760008, -0.00913676610260, 0.00124025666869, -0.00412380528180, -0.00458459601546, 0.00095088751145, -0.00793049330280, -0.01785633292778, -0.01175654591020, -0.00144863365096, -0.00543904115350],\n [ -0.00186143580968, -0.01023119897476, 0.00296253882599, -0.00223702838075, 0.00241810592738, -0.01795740782584, 0.00777812963854, -0.00821286621429, 0.00719868343548, -0.01107408168593, 0.00135413078331, -0.00051519173670, -0.01138951947581, 0.00091755913622, -0.01147295613562, -0.01175654591020, -0.01842598268335, -0.00600898660138, -0.01416862694275],\n [ -0.00093995668834, -0.00663585147245, -0.00592518334643, -0.00084629056834, 0.00477138911517, -0.01115822875565, -0.01154437574140, -0.00153039204428, 0.00944135089382, -0.01046477748444, -0.00068224645268, 0.00491320446628, -0.00454371602298, 0.00066686324895, -0.00845374029895, -0.00144863365096, -0.00600898660138, -0.02521907195360, -0.01660151455045],\n [ -0.00186166418149, -0.01026009701560, -0.00307008036331, -0.00203426014685, 0.00250189743578, -0.01834575971991, -0.00936638912773, -0.00622386437502, 0.00814913589233, -0.01146456481073, -0.00032923928756, 0.00097904308284, -0.01120759511573, 0.00079498664458, -0.01156033431781, -0.00543904115350, -0.01416862694275, -0.01660151455045, -0.02521511777687]])\n\n\n\nmol = psi4.geometry(\"\"\"\nunits bohr\n0 1\nO1 0.000000000000 0.000000000000 0.224348285559\nH2 -1.423528800232 0.000000000000 -0.897393142237\nH3 1.423528800232 0.000000000000 -0.897393142237\nsymmetry c1\nno_com\nno_reorient\n\"\"\")\n\n# <-- efp\n# [Kaliman:2013:2284] Fig. 4 -- Initialize EFP\nefpmol = pylibefp.core.efp()\n# [Kaliman:2013:2284] Fig. 4 -- Set fragment coordinates\nfrags = ['h2o', 'nh3', 'nh3']\nefpmol.add_potential(frags)\nefpmol.add_fragment(frags)\nefpmol.set_frag_coordinates(0, 'xyzabc', [-4.014110144291, 2.316749370493, -1.801514729931, -2.902133, 1.734999, -1.953647])\nefpmol.set_frag_coordinates(1, 'xyzabc', [ 1.972094713645, 3.599497221584, 5.447701074734, -1.105309, 2.033306, -1.488582])\nefpmol.set_frag_coordinates(2, 'xyzabc', [-7.876296399270, -1.854372164887, -2.414804197762, 2.526442, 1.658262, -2.742084])\nefpmol.prepare()\nefpmol.set_opts({}, append='psi')\nefpmol.set_electron_density_field_fn(field_fn)\n# --> efp\n\npsi4.set_options({'basis': '6-31g*',\n 'scf_type': 'pk',\n 'e_convergence': 1e-8})\n\n# Set defaults\nmaxiter = 40\nE_conv = 1.0E-6\nD_conv = 1.0E-3\n\n# Integral generation from Psi4's MintsHelper\nwfn = psi4.core.Wavefunction.build(mol, psi4.core.get_global_option('BASIS'))\nt = time.time()\nmints = psi4.core.MintsHelper(wfn.basisset())\nS = np.asarray(mints.ao_overlap())\n\n# Get nbf and ndocc for closed shell molecules\nnbf = S.shape[0]\nndocc = wfn.nalpha()\n\nprint('\\nNumber of occupied orbitals: %d' % ndocc)\nprint('Number of basis functions: %d' % nbf)\n\n# Run a quick check to make sure everything will fit into memory\nI_Size = (nbf ** 4) * 8.e-9\nprint(\"\\nSize of the ERI tensor will be %4.2f GB.\" % I_Size)\n\n# Estimate memory usage\nmemory_footprint = I_Size * 1.5\nif I_Size > numpy_memory:\n psi4.core.clean()\n raise Exception(\"Estimated memory utilization (%4.2f GB) exceeds numpy_memory \\\n limit of %4.2f GB.\" % (memory_footprint, numpy_memory))\n\n# Compute required quantities for SCF\nV = np.asarray(mints.ao_potential())\nT = np.asarray(mints.ao_kinetic())\nI = np.asarray(mints.ao_eri())\n\nprint('\\nTotal time taken for integrals: %.3f seconds.' % (time.time() - t))\n\nt = time.time()\n\n# Build H_core\nH = T + V\n\n# <-- efp: add in permanent moment contribution and cache\nVefp = modify_Fock_permanent(mol, nbf, efpmol)\nassert(psi4.compare_integers(1, np.allclose(Vefp, ref_V2), 'EFP permanent Fock contrib'))\nH = H + Vefp\nHorig = H.copy()\nset_qm_atoms(mol, efpmol)\n# --> efp\n\n# Orthogonalizer A = S^(-1/2) using Psi4's matrix power.\nA = mints.ao_overlap()\nA.power(-0.5, 1.e-16)\nA = np.asarray(A)\n\n# Calculate initial core guess\nHp = A.dot(H).dot(A)\ne, C2 = np.linalg.eigh(Hp)\nC = A.dot(C2)\nCocc = C[:, :ndocc]\nD = np.einsum('pi,qi->pq', Cocc, Cocc)\n\nprint('\\nTotal time taken for setup: %.3f seconds' % (time.time() - t))\n\nprint('QM/EFP: iterating Total Energy including QM/EFP Induction')\nt = time.time()\nE = 0.0\nEnuc = mol.nuclear_repulsion_energy()\nEold = 0.0\nDold = np.zeros_like(D)\n\nfor SCF_ITER in range(1, maxiter + 1):\n\n # <-- efp: add contribution to Fock matrix\n verbose_dipoles = 1 if (SCF_ITER == 1) else 0\n # [Kaliman:2013:2284] Fig. 4 -- Compute electric field from wavefunction\n # [Kaliman:2013:2284] Fig. 4 -- Compute electric field from induced dipoles\n Vefp = modify_Fock_induced(nbf, efpmol, verbose=verbose_dipoles)\n H = Horig.copy() + Vefp\n # --> efp\n\n # Build fock matrix\n J = np.einsum('pqrs,rs->pq', I, D)\n K = np.einsum('prqs,rs->pq', I, D)\n F = H + J * 2 - K\n\n diis_e = np.einsum('ij,jk,kl->il', F, D, S) - np.einsum('ij,jk,kl->il', S, D, F)\n diis_e = A.dot(diis_e).dot(A)\n\n # SCF energy and update\n # [Kaliman:2013:2284] Fig. 4 -- Compute QM wavefunction\n SCF_E = np.einsum('pq,pq->', F + H, D) + Enuc\n dRMS = np.mean(diis_e**2)**0.5\n\n # <-- efp: add contribution to energy\n efp_density = D\n # [Kaliman:2013:2284] Fig. 4 -- Compute EFP induced dipoles\n efp_wfn_dependent_energy = efpmol.get_wavefunction_dependent_energy()\n SCF_E += efp_wfn_dependent_energy\n # --> efp\n\n print('SCF Iteration %3d: Energy = %4.16f dE = % 1.5E dRMS = %1.5E dEFP = %12.8f'\n % (SCF_ITER, SCF_E, (SCF_E - Eold), dRMS, efp_wfn_dependent_energy))\n if (abs(SCF_E - Eold) < E_conv) and (dRMS < D_conv):\n break\n\n Eold = SCF_E\n Dold = D\n\n # Diagonalize Fock matrix\n Fp = A.dot(F).dot(A)\n e, C2 = np.linalg.eigh(Fp)\n C = A.dot(C2)\n Cocc = C[:, :ndocc]\n D = np.einsum('pi,qi->pq', Cocc, Cocc)\n\n if SCF_ITER == maxiter:\n clean()\n raise Exception(\"Maximum number of SCF cycles exceeded.\")\n\n\n# <-- efp\nefpmol.compute()\nefpene = efpmol.get_energy(label='psi')\n# [Kaliman:2013:2284] Fig. 4 -- Compute one electron EFP contributions to Hamiltonian\nefp_wfn_independent_energy = efpene['total'] - efpene['ind']\nSCF_E += efp_wfn_independent_energy\nprint(efpmol.energy_summary(scfefp=SCF_E, label='psi'))\n# --> efp\n\nprint('Total time for SCF iterations: %.3f seconds \\n' % (time.time() - t))\n\n# references confirmed against Q-Chem & Psi4\nassert(psi4.compare_values( 0.2622598847, efpene['total'] - efpene['ind'], 6, 'EFP corr to SCF'))\nassert(psi4.compare_values(-0.0117694790, efpene['ind'], 6, 'QM-EFP Indc'))\nassert(psi4.compare_values(-0.0021985285, efpene['disp'], 6, 'EFP-EFP Disp'))\nassert(psi4.compare_values( 0.0056859871, efpene['exch'], 6, 'EFP-EFP Exch'))\nassert(psi4.compare_values( 0.2504904057, efpene['total'], 6, 'EFP-EFP Totl'))\nassert(psi4.compare_values(-76.0139362744, SCF_E, 6, 'SCF'))\nefpmol.clean()\n", "\"\"\"\nA Psi4 input script to compute CI energy using an iterative Davidson-Lu solver.\n\nReferences:\nEquations from [Szabo:1996]\n\"\"\"\n\n__authors__ = \"Tianyuan Zhang\"\n__credits__ = [\"Tianyuan Zhang\", \"Jeffrey B. Schriber\", \"Daniel G. A. Smith\"]\n\n__copyright__ = \"(c) 2014-2017, The Psi4NumPy Developers\"\n__license__ = \"BSD-3-Clause\"\n__date__ = \"2017-05-26\"\n\nimport numpy as np\nimport psi4\n\nnp.set_printoptions(precision=7, linewidth=200, threshold=2000, suppress=True)\n\n# Memory for Psi4 in GB\n# psi4.core.set_memory(int(2e9), False)\npsi4.core.set_output_file('output.dat', False)\n\n# Memory for numpy in GB\nnumpy_memory = 2\n\n## Uncomment for short test\n#mol = psi4.geometry(\"\"\"\n#0 1\n#N 0.0 0.0 0.0\n#N 1.1 0.0 0.0\n#symmetry c1\n#units angstrom\n#\"\"\")\n## Number of roots\n#nroot = 1\n#psi4.set_options({\"BASIS\": \"STO-3G\", \"NUM_ROOTS\" : 1})\n\n## Uncomment for long test\nmol = psi4.geometry(\"\"\"\nO\nH 1 1.1\nH 1 1.1 2 104\nsymmetry c1\n\"\"\")\n # Number of roots\nnroot = 1\npsi4.set_options({\"BASIS\": \"6-31g\", \"NUM_ROOTS\" : 3})\n\n# Build the SCF Wavefunction\nscf_energy, scf_wfn = psi4.energy(\"HF\", return_wfn=True)\n\n# Build integrals\nmints = psi4.core.MintsHelper(scf_wfn.basisset())\n\n# Build a CI Wavefunction\n# This automatically generates the determinants based on the options\n# Note that a CISD wavefunction is default if no options are given\n# Other CI wavefunctions can be requested, e.g. { \"FCI\" : True }\npsi4.core.prepare_options_for_module(\"DETCI\")\nciwfn = psi4.core.CIWavefunction(scf_wfn)\n\n# Transform the integrals\nmints.integrals()\nciwfn.transform_ci_integrals()\n\n# Get the number of determinants\nndet = ciwfn.ndet()\nprint(\"Number of determinants in CI space: %d\" % ciwfn.ndet())\n\n\n## Other options\n\n# Number of guess vectors\nguess_size = 4\n\n# Convergence tolerance of the residual norm\nctol = 1.e-5\n\n# Convergence tolerance of the energy\netol = 1.e-9\n\n# Make sure the guess is smaller than the CI space\nif guess_size > ndet:\n raise Exception(\"Number of guesses (%d) exceeds CI dimension (%d)!\" % (guess_size, ndet))\n\nprint('Using %d determinants in the guess\\n' % guess_size)\n\n# Build the Hamiltonian in the space of guess determinants\nH = np.array(ciwfn.hamiltonian(guess_size))\n\n# Get guess eigenvectors\ngvecs = []\ngevals, gevecs = np.linalg.eigh(H)\n#for x in range(nroot):\nfor x in range(guess_size):\n guess = np.zeros((ciwfn.ndet()))\n guess[:guess_size] = gevecs[:, x]\n gvecs.append(guess)\n print('Guess CI energy (Hsize %d) %2.9f' % (guess_size, gevals[x] + mol.nuclear_repulsion_energy()))\nprint(\"\")\n\n# Maximum number of vectors\nmax_guess = 200\n\n# Build diagonal\nHd = ciwfn.Hd_vector(5)\n\ncvecs = ciwfn.new_civector(max_guess, 200, True, True)\ncvecs.set_nvec(max_guess)\ncvecs.init_io_files(False)\n\nswork_vec = max_guess\nsvecs = ciwfn.new_civector(max_guess + 1, 201, True, True)\nsvecs.set_nvec(max_guess)\nsvecs.init_io_files(False)\n\ndwork_vec = nroot\ndvecs = ciwfn.new_civector(nroot + 1, 202, True, True)\ndvecs.init_io_files(False)\ndvecs.set_nvec(nroot + 1)\n\nfor x in range(nroot + 1):\n dvecs.write(x, 0)\nfor x in range(max_guess):\n svecs.write(x, 0)\nfor x in range(max_guess):\n cvecs.write(x, 0)\n\n# Current number of vectors\nnum_vecs = guess_size\n\n# Copy gvec data into in ci_gvecs\narr_cvecs = np.asarray(cvecs)\nfor x in range(guess_size):\n arr_cvecs[:] = gvecs[x]\n cvecs.write(x, 0)\n cvecs.symnormalize(1 / np.linalg.norm(gvecs[x]), x)\n\ndelta_c = np.zeros(nroot)\n\nEold = scf_energy\nG = np.zeros((max_guess, max_guess))\n\n# Begin Davidson iterations\nfor CI_ITER in range(max_guess - 1):\n\n # Subspace Matrix, Gij = < bi | H | bj >\n for i in range(0, num_vecs):\n # Build sigma for each b\n cvecs.read(i, 0)\n svecs.read(i, 0)\n ciwfn.sigma(cvecs, svecs, i, i)\n for j in range(i, num_vecs):\n # G_ij = (b_i, sigma_j)\n cvecs.read(i, 0)\n svecs.read(j, 0)\n G[j, i] = G[i, j] = svecs.vdot(cvecs, i, j)\n\n evals, evecs = np.linalg.eigh(G[:num_vecs, :num_vecs])\n CI_E = evals\n\n # Use average over roots as convergence criteria\n avg_energy = 0.0\n avg_dc = 0.0\n for n in range(nroot):\n avg_energy += evals[n]\n avg_dc += delta_c[n]\n avg_energy /= nroot\n avg_dc /= nroot\n avg_energy += mol.nuclear_repulsion_energy()\n\n print('CI Iteration %3d: Energy = %4.16f dE = % 1.5E dC = %1.5E' % (CI_ITER, avg_energy, (avg_energy - Eold),\n avg_dc))\n if (abs(avg_energy - Eold) < etol) and (avg_dc < ctol) and (CI_ITER > 3):\n print('CI has converged!\\n')\n break\n Eold = avg_energy\n\n # Build new vectors as linear combinations of the subspace matrix, H\n for n in range(nroot):\n\n # Build as linear combinations of previous vectors\n dvecs.zero()\n dvecs.write(dwork_vec, 0)\n for c in range(len(evecs[:, n])):\n dvecs.axpy(evecs[c, n], cvecs, dwork_vec, c)\n\n # Build new vector new_vec = ((H * cvec) - evals[n] * cvec) / (evals[n] - Hd)\n ciwfn.sigma(dvecs, svecs, dwork_vec, swork_vec)\n svecs.axpy(-1 * evals[n], dvecs, swork_vec, dwork_vec)\n norm = svecs.dcalc(evals[n], Hd, swork_vec)\n\n if (norm < 1e-9):\n continue\n\n svecs.symnormalize(1 / norm, swork_vec)\n delta_c[n] = norm\n\n # Build a new vector that is orthornormal to all previous vectors\n dvecs.copy(svecs, n, swork_vec)\n norm = dvecs.norm(n)\n dvecs.symnormalize(1 / norm, n)\n\n total_proj = 0\n for i in range(num_vecs):\n proj = svecs.vdot(cvecs, swork_vec, i)\n total_proj += proj\n dvecs.axpy(-proj, cvecs, n, i)\n\n norm = dvecs.norm(n)\n dvecs.symnormalize(1 / norm, n)\n\n # This *should* screen out contributions that are projected out by above\n if True:\n cvecs.write(num_vecs, 0)\n cvecs.copy(dvecs, num_vecs, n)\n num_vecs += 1\n\nprint('SCF energy: % 16.10f' % (scf_energy))\nfor n in range(nroot):\n print('State %d Total Energy: % 16.10f' % (n, CI_E[n] + mol.nuclear_repulsion_energy()))\nprint(\"\")\n\nE = psi4.energy('detci')\n\nfor n in range(nroot):\n ci_ref = psi4.get_variable('CI ROOT %d TOTAL ENERGY' % n)\n ci_compute = CI_E[n] + mol.nuclear_repulsion_energy()\n psi4.compare_values(ci_ref, ci_compute, 6, 'CI Root %d Total Energy' % n)\n" ]
[ [ "numpy.sqrt", "numpy.einsum" ], [ "numpy.einsum", "numpy.asarray", "numpy.set_printoptions", "numpy.linalg.eigh", "numpy.zeros_like", "numpy.mean" ], [ "numpy.diag", "numpy.sqrt", "numpy.einsum", "numpy.asarray", "numpy.zeros" ], [ "numpy.allclose", "numpy.einsum", "numpy.asarray", "numpy.reshape", "numpy.set_printoptions", "numpy.linalg.eigh", "numpy.zeros_like", "numpy.mean", "numpy.array", "numpy.zeros" ], [ "numpy.asarray", "numpy.set_printoptions", "numpy.linalg.norm", "numpy.linalg.eigh", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ddonatien/mmcv
[ "5532275712dc2bae516e50fa8fda585dff737cba" ]
[ "mmcv/cnn/utils/weight_init.py" ]
[ "# Copyright (c) Open-MMLab. All rights reserved.\nimport copy\nimport math\nimport warnings\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch import Tensor\n\nfrom mmcv.utils import Registry, build_from_cfg, get_logger, print_log\n\nINITIALIZERS = Registry('initializer')\n\n\ndef constant_init(module, val, bias=0):\n if hasattr(module, 'weight') and module.weight is not None:\n nn.init.constant_(module.weight, val)\n if hasattr(module, 'bias') and module.bias is not None:\n nn.init.constant_(module.bias, bias)\n\n\ndef xavier_init(module, gain=1, bias=0, distribution='normal'):\n assert distribution in ['uniform', 'normal']\n if hasattr(module, 'weight') and module.weight is not None:\n if distribution == 'uniform':\n nn.init.xavier_uniform_(module.weight, gain=gain)\n else:\n nn.init.xavier_normal_(module.weight, gain=gain)\n if hasattr(module, 'bias') and module.bias is not None:\n nn.init.constant_(module.bias, bias)\n\n\ndef normal_init(module, mean=0, std=1, bias=0):\n if hasattr(module, 'weight') and module.weight is not None:\n nn.init.normal_(module.weight, mean, std)\n if hasattr(module, 'bias') and module.bias is not None:\n nn.init.constant_(module.bias, bias)\n\n\ndef trunc_normal_init(module: nn.Module,\n mean: float = 0,\n std: float = 1,\n a: float = -2,\n b: float = 2,\n bias: float = 0) -> None:\n if hasattr(module, 'weight') and module.weight is not None:\n trunc_normal_(module.weight, mean, std, a, b) # type: ignore\n if hasattr(module, 'bias') and module.bias is not None:\n nn.init.constant_(module.bias, bias) # type: ignore\n\n\ndef uniform_init(module, a=0, b=1, bias=0):\n if hasattr(module, 'weight') and module.weight is not None:\n nn.init.uniform_(module.weight, a, b)\n if hasattr(module, 'bias') and module.bias is not None:\n nn.init.constant_(module.bias, bias)\n\n\ndef kaiming_init(module,\n a=0,\n mode='fan_out',\n nonlinearity='relu',\n bias=0,\n distribution='normal'):\n assert distribution in ['uniform', 'normal']\n if hasattr(module, 'weight') and module.weight is not None:\n if distribution == 'uniform':\n nn.init.kaiming_uniform_(\n module.weight, a=a, mode=mode, nonlinearity=nonlinearity)\n else:\n nn.init.kaiming_normal_(\n module.weight, a=a, mode=mode, nonlinearity=nonlinearity)\n if hasattr(module, 'bias') and module.bias is not None:\n nn.init.constant_(module.bias, bias)\n\n\ndef caffe2_xavier_init(module, bias=0):\n # `XavierFill` in Caffe2 corresponds to `kaiming_uniform_` in PyTorch\n # Acknowledgment to FAIR's internal code\n kaiming_init(\n module,\n a=1,\n mode='fan_in',\n nonlinearity='leaky_relu',\n bias=bias,\n distribution='uniform')\n\n\ndef bias_init_with_prob(prior_prob):\n \"\"\"initialize conv/fc bias value according to a given probability value.\"\"\"\n bias_init = float(-np.log((1 - prior_prob) / prior_prob))\n return bias_init\n\n\ndef _get_bases_name(m):\n return [b.__name__ for b in m.__class__.__bases__]\n\n\nclass BaseInit(object):\n\n def __init__(self, *, bias=0, bias_prob=None, layer=None):\n self.wholemodule = False\n if not isinstance(bias, (int, float)):\n raise TypeError(f'bias must be a number, but got a {type(bias)}')\n\n if bias_prob is not None:\n if not isinstance(bias_prob, float):\n raise TypeError(f'bias_prob type must be float, \\\n but got {type(bias_prob)}')\n\n if layer is not None:\n if not isinstance(layer, (str, list)):\n raise TypeError(f'layer must be a str or a list of str, \\\n but got a {type(layer)}')\n else:\n layer = []\n\n if bias_prob is not None:\n self.bias = bias_init_with_prob(bias_prob)\n else:\n self.bias = bias\n self.layer = [layer] if isinstance(layer, str) else layer\n\n\[email protected]_module(name='Constant')\nclass ConstantInit(BaseInit):\n \"\"\"Initialize module parameters with constant values.\n\n Args:\n val (int | float): the value to fill the weights in the module with\n bias (int | float): the value to fill the bias. Defaults to 0.\n bias_prob (float, optional): the probability for bias initialization.\n Defaults to None.\n layer (str | list[str], optional): the layer will be initialized.\n Defaults to None.\n \"\"\"\n\n def __init__(self, val, **kwargs):\n super().__init__(**kwargs)\n self.val = val\n\n def __call__(self, module):\n\n def init(m):\n if self.wholemodule:\n constant_init(m, self.val, self.bias)\n else:\n layername = m.__class__.__name__\n basesname = _get_bases_name(m)\n if len(set(self.layer) & set([layername] + basesname)):\n constant_init(m, self.val, self.bias)\n\n module.apply(init)\n\n\[email protected]_module(name='Xavier')\nclass XavierInit(BaseInit):\n r\"\"\"Initialize module parameters with values according to the method\n described in `Understanding the difficulty of training deep feedforward\n neural networks - Glorot, X. & Bengio, Y. (2010).\n <http://proceedings.mlr.press/v9/glorot10a/glorot10a.pdf>`_\n\n Args:\n gain (int | float): an optional scaling factor. Defaults to 1.\n bias (int | float): the value to fill the bias. Defaults to 0.\n bias_prob (float, optional): the probability for bias initialization.\n Defaults to None.\n distribution (str): distribution either be ``'normal'``\n or ``'uniform'``. Defaults to ``'normal'``.\n layer (str | list[str], optional): the layer will be initialized.\n Defaults to None.\n \"\"\"\n\n def __init__(self, gain=1, distribution='normal', **kwargs):\n super().__init__(**kwargs)\n self.gain = gain\n self.distribution = distribution\n\n def __call__(self, module):\n\n def init(m):\n if self.wholemodule:\n xavier_init(m, self.gain, self.bias, self.distribution)\n else:\n layername = m.__class__.__name__\n basesname = _get_bases_name(m)\n if len(set(self.layer) & set([layername] + basesname)):\n xavier_init(m, self.gain, self.bias, self.distribution)\n\n module.apply(init)\n\n\[email protected]_module(name='Normal')\nclass NormalInit(BaseInit):\n r\"\"\"Initialize module parameters with the values drawn from the normal\n distribution :math:`\\mathcal{N}(\\text{mean}, \\text{std}^2)`.\n\n Args:\n mean (int | float):the mean of the normal distribution. Defaults to 0.\n std (int | float): the standard deviation of the normal distribution.\n Defaults to 1.\n bias (int | float): the value to fill the bias. Defaults to 0.\n bias_prob (float, optional): the probability for bias initialization.\n Defaults to None.\n layer (str | list[str], optional): the layer will be initialized.\n Defaults to None.\n\n \"\"\"\n\n def __init__(self, mean=0, std=1, **kwargs):\n super().__init__(**kwargs)\n self.mean = mean\n self.std = std\n\n def __call__(self, module):\n\n def init(m):\n if self.wholemodule:\n normal_init(m, self.mean, self.std, self.bias)\n else:\n layername = m.__class__.__name__\n basesname = _get_bases_name(m)\n if len(set(self.layer) & set([layername] + basesname)):\n normal_init(m, self.mean, self.std, self.bias)\n\n module.apply(init)\n\n\[email protected]_module(name='TruncNormal')\nclass TruncNormalInit(BaseInit):\n r\"\"\"Initialize module parameters with the values drawn from the normal\n distribution :math:`\\mathcal{N}(\\text{mean}, \\text{std}^2)` with values\n outside :math:`[a, b]`.\n\n Args:\n mean (float): the mean of the normal distribution. Defaults to 0.\n std (float): the standard deviation of the normal distribution.\n Defaults to 1.\n a (float): The minimum cutoff value.\n b ( float): The maximum cutoff value.\n bias (float): the value to fill the bias. Defaults to 0.\n bias_prob (float, optional): the probability for bias initialization.\n Defaults to None.\n layer (str | list[str], optional): the layer will be initialized.\n Defaults to None.\n\n \"\"\"\n\n def __init__(self,\n mean: float = 0,\n std: float = 1,\n a: float = -2,\n b: float = 2,\n **kwargs) -> None:\n super().__init__(**kwargs)\n self.mean = mean\n self.std = std\n self.a = a\n self.b = b\n\n def __call__(self, module: nn.Module) -> None:\n\n def init(m):\n if self.wholemodule:\n trunc_normal_init(m, self.mean, self.std, self.a, self.b,\n self.bias)\n else:\n layername = m.__class__.__name__\n basesname = _get_bases_name(m)\n if len(set(self.layer) & set([layername] + basesname)):\n trunc_normal_init(m, self.mean, self.std, self.a, self.b,\n self.bias)\n\n module.apply(init)\n\n\[email protected]_module(name='Uniform')\nclass UniformInit(BaseInit):\n r\"\"\"Initialize module parameters with values drawn from the uniform\n distribution :math:`\\mathcal{U}(a, b)`.\n\n Args:\n a (int | float): the lower bound of the uniform distribution.\n Defaults to 0.\n b (int | float): the upper bound of the uniform distribution.\n Defaults to 1.\n bias (int | float): the value to fill the bias. Defaults to 0.\n bias_prob (float, optional): the probability for bias initialization.\n Defaults to None.\n layer (str | list[str], optional): the layer will be initialized.\n Defaults to None.\n \"\"\"\n\n def __init__(self, a=0, b=1, **kwargs):\n super().__init__(**kwargs)\n self.a = a\n self.b = b\n\n def __call__(self, module):\n\n def init(m):\n if self.wholemodule:\n uniform_init(m, self.a, self.b, self.bias)\n else:\n layername = m.__class__.__name__\n basesname = _get_bases_name(m)\n if len(set(self.layer) & set([layername] + basesname)):\n uniform_init(m, self.a, self.b, self.bias)\n\n module.apply(init)\n\n\[email protected]_module(name='Kaiming')\nclass KaimingInit(BaseInit):\n r\"\"\"Initialize module paramters with the valuse according to the method\n described in `Delving deep into rectifiers: Surpassing human-level\n performance on ImageNet classification - He, K. et al. (2015).\n <https://www.cv-foundation.org/openaccess/content_iccv_2015/\n papers/He_Delving_Deep_into_ICCV_2015_paper.pdf>`_\n\n Args:\n a (int | float): the negative slope of the rectifier used after this\n layer (only used with ``'leaky_relu'``). Defaults to 0.\n mode (str): either ``'fan_in'`` or ``'fan_out'``. Choosing\n ``'fan_in'`` preserves the magnitude of the variance of the weights\n in the forward pass. Choosing ``'fan_out'`` preserves the\n magnitudes in the backwards pass. Defaults to ``'fan_out'``.\n nonlinearity (str): the non-linear function (`nn.functional` name),\n recommended to use only with ``'relu'`` or ``'leaky_relu'`` .\n Defaults to 'relu'.\n bias (int | float): the value to fill the bias. Defaults to 0.\n bias_prob (float, optional): the probability for bias initialization.\n Defaults to None.\n distribution (str): distribution either be ``'normal'`` or\n ``'uniform'``. Defaults to ``'normal'``.\n layer (str | list[str], optional): the layer will be initialized.\n Defaults to None.\n \"\"\"\n\n def __init__(self,\n a=0,\n mode='fan_out',\n nonlinearity='relu',\n distribution='normal',\n **kwargs):\n super().__init__(**kwargs)\n self.a = a\n self.mode = mode\n self.nonlinearity = nonlinearity\n self.distribution = distribution\n\n def __call__(self, module):\n\n def init(m):\n if self.wholemodule:\n kaiming_init(m, self.a, self.mode, self.nonlinearity,\n self.bias, self.distribution)\n else:\n layername = m.__class__.__name__\n basesname = _get_bases_name(m)\n if len(set(self.layer) & set([layername] + basesname)):\n kaiming_init(m, self.a, self.mode, self.nonlinearity,\n self.bias, self.distribution)\n\n module.apply(init)\n\n\[email protected]_module(name='Caffe2Xavier')\nclass Caffe2XavierInit(KaimingInit):\n # `XavierFill` in Caffe2 corresponds to `kaiming_uniform_` in PyTorch\n # Acknowledgment to FAIR's internal code\n def __init__(self, **kwargs):\n super().__init__(\n a=1,\n mode='fan_in',\n nonlinearity='leaky_relu',\n distribution='uniform',\n **kwargs)\n\n def __call__(self, module):\n super().__call__(module)\n\n\[email protected]_module(name='Pretrained')\nclass PretrainedInit(object):\n \"\"\"Initialize module by loading a pretrained model.\n\n Args:\n checkpoint (str): the checkpoint file of the pretrained model should\n be load.\n prefix (str, optional): the prefix of a sub-module in the pretrained\n model. it is for loading a part of the pretrained model to\n initialize. For example, if we would like to only load the\n backbone of a detector model, we can set ``prefix='backbone.'``.\n Defaults to None.\n map_location (str): map tensors into proper locations.\n \"\"\"\n\n def __init__(self, checkpoint, prefix=None, map_location=None):\n self.checkpoint = checkpoint\n self.prefix = prefix\n self.map_location = map_location\n\n def __call__(self, module):\n from mmcv.runner import (_load_checkpoint_with_prefix, load_checkpoint,\n load_state_dict)\n logger = get_logger('mmcv')\n if self.prefix is None:\n print_log(f'load model from: {self.checkpoint}', logger=logger)\n load_checkpoint(\n module,\n self.checkpoint,\n map_location=self.map_location,\n strict=False,\n logger=logger)\n else:\n print_log(\n f'load {self.prefix} in model from: {self.checkpoint}',\n logger=logger)\n state_dict = _load_checkpoint_with_prefix(\n self.prefix, self.checkpoint, map_location=self.map_location)\n load_state_dict(module, state_dict, strict=False, logger=logger)\n\n\ndef _initialize(module, cfg, wholemodule=False):\n func = build_from_cfg(cfg, INITIALIZERS)\n # wholemodule flag is for override mode, there is no layer key in override\n # and initializer will give init values for the whole module with the name\n # in override.\n func.wholemodule = wholemodule\n func(module)\n\n\ndef _initialize_override(module, override, cfg):\n if not isinstance(override, (dict, list)):\n raise TypeError(f'override must be a dict or a list of dict, \\\n but got {type(override)}')\n\n override = [override] if isinstance(override, dict) else override\n\n for override_ in override:\n\n cp_override = copy.deepcopy(override_)\n name = cp_override.pop('name', None)\n if name is None:\n raise ValueError('`override` must contain the key \"name\",'\n f'but got {cp_override}')\n # if override only has name key, it means use args in init_cfg\n if not cp_override:\n cp_override.update(cfg)\n # if override has name key and other args except type key, it will\n # raise error\n elif 'type' not in cp_override.keys():\n raise ValueError(\n f'`override` need \"type\" key, but got {cp_override}')\n\n if hasattr(module, name):\n _initialize(getattr(module, name), cp_override, wholemodule=True)\n else:\n raise RuntimeError(f'module did not have attribute {name}, '\n f'but init_cfg is {cp_override}.')\n\n\ndef initialize(module, init_cfg):\n \"\"\"Initialize a module.\n\n Args:\n module (``torch.nn.Module``): the module will be initialized.\n init_cfg (dict | list[dict]): initialization configuration dict to\n define initializer. OpenMMLab has implemented 6 initializers\n including ``Constant``, ``Xavier``, ``Normal``, ``Uniform``,\n ``Kaiming``, and ``Pretrained``.\n Example:\n >>> module = nn.Linear(2, 3, bias=True)\n >>> init_cfg = dict(type='Constant', layer='Linear', val =1 , bias =2)\n >>> initialize(module, init_cfg)\n\n >>> module = nn.Sequential(nn.Conv1d(3, 1, 3), nn.Linear(1,2))\n >>> # define key ``'layer'`` for initializing layer with different\n >>> # configuration\n >>> init_cfg = [dict(type='Constant', layer='Conv1d', val=1),\n dict(type='Constant', layer='Linear', val=2)]\n >>> initialize(module, init_cfg)\n\n >>> # define key``'override'`` to initialize some specific part in\n >>> # module\n >>> class FooNet(nn.Module):\n >>> def __init__(self):\n >>> super().__init__()\n >>> self.feat = nn.Conv2d(3, 16, 3)\n >>> self.reg = nn.Conv2d(16, 10, 3)\n >>> self.cls = nn.Conv2d(16, 5, 3)\n >>> model = FooNet()\n >>> init_cfg = dict(type='Constant', val=1, bias=2, layer='Conv2d',\n >>> override=dict(type='Constant', name='reg', val=3, bias=4))\n >>> initialize(model, init_cfg)\n\n >>> model = ResNet(depth=50)\n >>> # Initialize weights with the pretrained model.\n >>> init_cfg = dict(type='Pretrained',\n checkpoint='torchvision://resnet50')\n >>> initialize(model, init_cfg)\n\n >>> # Initialize weights of a sub-module with the specific part of\n >>> # a pretrained model by using \"prefix\".\n >>> url = 'http://download.openmmlab.com/mmdetection/v2.0/retinanet/'\\\n >>> 'retinanet_r50_fpn_1x_coco/'\\\n >>> 'retinanet_r50_fpn_1x_coco_20200130-c2398f9e.pth'\n >>> init_cfg = dict(type='Pretrained',\n checkpoint=url, prefix='backbone.')\n \"\"\"\n if not isinstance(init_cfg, (dict, list)):\n raise TypeError(f'init_cfg must be a dict or a list of dict, \\\n but got {type(init_cfg)}')\n\n if isinstance(init_cfg, dict):\n init_cfg = [init_cfg]\n\n for cfg in init_cfg:\n # should deeply copy the original config because cfg may be used by\n # other modules, e.g., one init_cfg shared by multiple bottleneck\n # blocks, the expected cfg will be changed after pop and will change\n # the initialization behavior of other modules\n cp_cfg = copy.deepcopy(cfg)\n override = cp_cfg.pop('override', None)\n _initialize(module, cp_cfg)\n\n if override is not None:\n cp_cfg.pop('layer', None)\n _initialize_override(module, override, cp_cfg)\n else:\n # All attributes in module have same initialization.\n pass\n\n\ndef _no_grad_trunc_normal_(tensor: Tensor, mean: float, std: float, a: float,\n b: float) -> Tensor:\n # Method based on\n # https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf\n # Modified from\n # https://github.com/pytorch/pytorch/blob/master/torch/nn/init.py\n def norm_cdf(x):\n # Computes standard normal cumulative distribution function\n return (1. + math.erf(x / math.sqrt(2.))) / 2.\n\n if (mean < a - 2 * std) or (mean > b + 2 * std):\n warnings.warn(\n 'mean is more than 2 std from [a, b] in nn.init.trunc_normal_. '\n 'The distribution of values may be incorrect.',\n stacklevel=2)\n\n with torch.no_grad():\n # Values are generated by using a truncated uniform distribution and\n # then using the inverse CDF for the normal distribution.\n # Get upper and lower cdf values\n lower = norm_cdf((a - mean) / std)\n upper = norm_cdf((b - mean) / std)\n\n # Uniformly fill tensor with values from [lower, upper], then translate\n # to [2lower-1, 2upper-1].\n tensor.uniform_(2 * lower - 1, 2 * upper - 1)\n\n # Use inverse cdf transform for normal distribution to get truncated\n # standard normal\n tensor.erfinv_()\n\n # Transform to proper mean, std\n tensor.mul_(std * math.sqrt(2.))\n tensor.add_(mean)\n\n # Clamp to ensure it's in the proper range\n tensor.clamp_(min=a, max=b)\n return tensor\n\n\ndef trunc_normal_(tensor: Tensor,\n mean: float = 0.,\n std: float = 1.,\n a: float = -2.,\n b: float = 2.) -> Tensor:\n r\"\"\"Fills the input Tensor with values drawn from a truncated\n normal distribution. The values are effectively drawn from the\n normal distribution :math:`\\mathcal{N}(\\text{mean}, \\text{std}^2)`\n with values outside :math:`[a, b]` redrawn until they are within\n the bounds. The method used for generating the random values works\n best when :math:`a \\leq \\text{mean} \\leq b`.\n\n Modified from\n https://github.com/pytorch/pytorch/blob/master/torch/nn/init.py\n\n Args:\n tensor (``torch.Tensor``): an n-dimensional `torch.Tensor`.\n mean (float): the mean of the normal distribution.\n std (float): the standard deviation of the normal distribution.\n a (float): the minimum cutoff value.\n b (float): the maximum cutoff value.\n \"\"\"\n return _no_grad_trunc_normal_(tensor, mean, std, a, b)\n" ]
[ [ "torch.nn.init.uniform_", "numpy.log", "torch.nn.init.constant_", "torch.nn.init.xavier_normal_", "torch.nn.init.kaiming_uniform_", "torch.no_grad", "torch.nn.init.normal_", "torch.nn.init.xavier_uniform_", "torch.nn.init.kaiming_normal_" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
simonthor/zfit
[ "97a18cd6cf14240be2cf52185681d0132f866179" ]
[ "docs/plots/fftconv_spline_linear.py" ]
[ "# Copyright (c) 2020 zfit\n\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\n\nimport zfit\n\n\ndef plot_conv_comparison():\n # test special properties here\n n_point_plotting = 2000\n obs = zfit.Space(\"obs1\", limits=(-5, 5))\n param1 = zfit.Parameter('param1', -3)\n param2 = zfit.Parameter('param2', 0.3)\n gauss1 = zfit.pdf.Gauss(0., param2, obs=obs)\n func1 = zfit.pdf.Uniform(param1, param2, obs=obs)\n func2 = zfit.pdf.Uniform(-1.2, -1, obs=obs)\n func = zfit.pdf.SumPDF([func1, func2], 0.5)\n n_points_conv = 50\n conv_lin = zfit.pdf.FFTConvPDFV1(func=func, kernel=gauss1, n=n_points_conv, interpolation='linear')\n conv_spline = zfit.pdf.FFTConvPDFV1(func=func, kernel=gauss1, n=n_points_conv, interpolation='spline')\n\n x = tf.linspace(-5., 3., n_point_plotting)\n probs_lin = conv_lin.pdf(x=x)\n probs_spline = conv_spline.pdf(x=x)\n\n plt.figure()\n plt.plot(x, probs_lin, label='linear')\n plt.plot(x, probs_spline, label='spline')\n plt.legend()\n plt.title(f\"FFT Conv with interpolation: {n_points_conv} points\")\n plt.show(block=True)\n\n\nif __name__ == '__main__':\n plot_conv_comparison()\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.title", "matplotlib.pyplot.plot", "tensorflow.linspace", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jeongho2/ssd.myPytorch
[ "fdb30a309cd3378afe7510493b1363161f54545c", "fdb30a309cd3378afe7510493b1363161f54545c" ]
[ "setuptest.py", "data/raccoon.py" ]
[ "import torch\n\nprint(torch.__version__)\nprint(torch.cuda.is_available())\n\nimport cv2\n", "\"\"\" \nCOCO Raccon Dataset Classes\n\"\"\"\n\nfrom .config import HOME\nimport os\nimport os.path as osp\nimport sys\nimport torch\nimport torch.utils.data as data\nimport torchvision.transforms as transforms\nimport cv2\nimport numpy as np\n\nCOCO_RACCOON_ROOT = osp.join(HOME, 'data/coco_raccoon')\nIMAGES = 'images'\nANNOTATIONS = 'annotations'\nCOCO_RACCOON_API = 'PythonAPI'\nINSTANCES_SET = '{}.coco.json'\nCOCO_RACCOON_CLASSES = ( # always index 0\n 'raccoon')\n\ndef get_label_map(label_file):\n label_map = {}\n labels = open(label_file, 'r')\n for line in labels:\n ids = line.split(',')\n label_map[int(ids[0])] = int(ids[1])\n return label_map\n\n\nclass COCORaccoonAnnotationTransform(object):\n \"\"\"Transforms a COCO Raccoon annotation into a Tensor of bbox coords and label index\n Initilized with a dictionary lookup of classnames to indexes\n \"\"\"\n def __init__(self):\n self.label_map = get_label_map(osp.join(COCO_RACCOON_ROOT, 'coco_raccoon_labels.txt'))\n\n def __call__(self, target, width, height):\n \"\"\"\n Args:\n target (dict): COCO Racoon target json annotation as a python dict\n height (int): height\n width (int): width\n Returns:\n a list containing lists of bounding boxes [bbox coords, class idx]\n \"\"\"\n scale = np.array([width, height, width, height])\n res = []\n for obj in target:\n if 'bbox' in obj:\n bbox = obj['bbox']\n bbox[2] += bbox[0]\n bbox[3] += bbox[1]\n label_idx = self.label_map[obj['category_id']] - 1\n final_box = list(np.array(bbox)/scale)\n final_box.append(label_idx)\n res += [final_box] # [xmin, ymin, xmax, ymax, label_idx]\n else:\n print(\"no bbox problem!\")\n\n return res # [[xmin, ymin, xmax, ymax, label_idx], ... ]\n\n\nclass COCORaccoonDetection(data.Dataset):\n \"\"\"`MS Coco Detection <http://mscoco.org/dataset/#detections-challenge2016>`_ Dataset.\n Args:\n root (string): Root directory where images are downloaded to.\n set_name (string): Name of the specific set of COCO images.\n transform (callable, optional): A function/transform that augments the\n raw images`\n target_transform (callable, optional): A function/transform that takes\n in the target (bbox) and transforms it.\n \"\"\"\n\n def __init__(self, root, image_set='train', transform=None,\n target_transform=COCORaccoonAnnotationTransform(), dataset_name='Roboflow COCO Raccon'):\n sys.path.append(osp.join(root, COCO_RACCOON_API))\n from pycocotools.coco import COCO\n self.root = osp.join(root, IMAGES, image_set)\n self.coco = COCO(osp.join(root, ANNOTATIONS,\n INSTANCES_SET.format(image_set)))\n self.ids = list(self.coco.imgToAnns.keys())\n self.transform = transform\n self.target_transform = target_transform\n self.name = dataset_name\n\n def __getitem__(self, index):\n \"\"\"\n Args:\n index (int): Index\n Returns:\n tuple: Tuple (image, target).\n target is the object returned by ``coco.loadAnns``.\n \"\"\"\n im, gt, h, w = self.pull_item(index)\n return im, gt\n\n def __len__(self):\n return len(self.ids)\n\n def pull_item(self, index):\n \"\"\"\n Args:\n index (int): Index\n Returns:\n tuple: Tuple (image, target, height, width).\n target is the object returned by ``coco.loadAnns``.\n \"\"\"\n img_id = self.ids[index]\n target = self.coco.imgToAnns[img_id]\n ann_ids = self.coco.getAnnIds(imgIds=img_id)\n\n target = self.coco.loadAnns(ann_ids)\n path = osp.join(self.root, self.coco.loadImgs(img_id)[0]['file_name'])\n assert osp.exists(path), 'Image path does not exist: {}'.format(path)\n img = cv2.imread(osp.join(self.root, path))\n height, width, _ = img.shape\n if self.target_transform is not None:\n target = self.target_transform(target, width, height)\n if self.transform is not None:\n target = np.array(target)\n img, boxes, labels = self.transform(img, target[:, :4],\n target[:, 4])\n # to rgb\n img = img[:, :, (2, 1, 0)]\n\n target = np.hstack((boxes, np.expand_dims(labels, axis=1)))\n return torch.from_numpy(img).permute(2, 0, 1), target, height, width\n\n def pull_image(self, index):\n '''Returns the original image object at index in PIL form\n\n Note: not using self.__getitem__(), as any transformations passed in\n could mess up this functionality.\n\n Argument:\n index (int): index of img to show\n Return:\n cv2 img\n '''\n img_id = self.ids[index]\n path = self.coco.loadImgs(img_id)[0]['file_name']\n return cv2.imread(osp.join(self.root, path), cv2.IMREAD_COLOR)\n\n def pull_anno(self, index):\n '''Returns the original annotation of image at index\n\n Note: not using self.__getitem__(), as any transformations passed in\n could mess up this functionality.\n\n Argument:\n index (int): index of img to get annotation of\n Return:\n list: [img_id, [(label, bbox coords),...]]\n eg: ('001718', [('dog', (96, 13, 438, 332))])\n '''\n img_id = self.ids[index]\n ann_ids = self.coco.getAnnIds(imgIds=img_id)\n return self.coco.loadAnns(ann_ids)\n\n def __repr__(self):\n fmt_str = 'Dataset ' + self.__class__.__name__ + '\\n'\n fmt_str += ' Number of datapoints: {}\\n'.format(self.__len__())\n fmt_str += ' Root Location: {}\\n'.format(self.root)\n tmp = ' Transforms (if any): '\n fmt_str += '{0}{1}\\n'.format(tmp, self.transform.__repr__().replace('\\n', '\\n' + ' ' * len(tmp)))\n tmp = ' Target Transforms (if any): '\n fmt_str += '{0}{1}'.format(tmp, self.target_transform.__repr__().replace('\\n', '\\n' + ' ' * len(tmp)))\n return fmt_str\n" ]
[ [ "torch.cuda.is_available" ], [ "numpy.array", "numpy.expand_dims", "torch.from_numpy" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
bharathjatoth/Reinforcement_learning
[ "c3baecd02cbc2d2d4f7a33a57d961c5cd1a3bee6" ]
[ "Algotrade.py" ]
[ "'''\npredicting the stock prices with the help of reinforcement learning\nthis tutorial goes in step by step procedure and uses q learning\ndata used: from yahoo finance\nauthor : Bharath Kumar (bharathjatoth.github.io)\nStep 1 : import all the libraries which are used by the algo\n'''\nimport keras\nimport numpy as np\nfrom keras.models import Sequential\nfrom keras.models import load_model\nfrom keras.layers import Dense\nfrom keras.optimizer_v2.adam import Adam\nimport math\nimport random,sys\nfrom collections import deque\n\n#creating a class agent which has all the static data threshold values\nclass Agent:\n def __init__(self,state_size,is_eval=False,model_name=\"\"):\n self.state_size = state_size\n self.action_size = 3 #buy sell and hold\n self.memory = deque(maxlen=100)\n self.inventory = []\n self.model_name = model_name\n self.is_eval = is_eval\n self.gamma = 0.99\n self.epsilon = 1.0\n self.epsilon_min = 0.01\n self.epsilon_decay = 0.998\n self.model = load_model(model_name) if is_eval else self._model()\n\n def _model(self):\n model = Sequential()\n model.add(Dense(units=64,input_dim=self.state_size,activation='relu'))\n model.add(Dense(units=32,activation='relu'))\n model.add(Dense(units=8,activation=\"relu\"))\n model.add(Dense(self.action_size,activation=\"linear\"))\n model.compile(loss='mse',optimizer=Adam(lr=0.0001))\n return model\n\n def act(self,state):\n if not self.is_eval and random.random()<=self.epsilon:\n return random.randrange(self.action_size)\n options = self.model.predict(state)\n return np.argmax(options[0])\n\n def expreplay(self,batch_size):\n minibatch = []\n l = len(self.memory)\n for i in range(1-batch_size+1,1): minibatch.append(self.memory[i])\n for state, action,reward, next_state,done in minibatch:\n target = reward\n if not done:\n target = reward + self.gamma*np.argmax(self.model.predict(next_state)[0])\n target_f = self.model.predict(state)\n target_f[0][action] = target\n self.model.fit(state,target_f,epochs=1,verbose=0)\n if self.epsilon > self.epsilon_min: self.epsilon *=self.epsilon_decay\n\ndef format_price(n):\n return (\"-Rs. \" if n<0 else \"Rs.\"+\"{:.2f}%\".format(abs(n)))\n\ndef getstockdatavec(key):\n vec = []\n lines = open(r'filename to be input from here','r').read().splitlines()\n for line in lines[1:]:\n if line.split(\",\")[4] != \"null\":\n vec.append(float(line.split(\",\")[4]))\n return vec\n\ndef sigmoid(x):\n return 1/(1+math.exp(-x))\n\ndef getstate(data,t,n):\n d = t-n+1\n block = data[d:t+1] if d>=0 else -d*[data[0]] + data[0:t+1]\n res = []\n for i in range(n-1):\n res.append(sigmoid(block[i+1]-block[i]))\n return np.array([res])\n\n#training the agent\nstock_name = input(\"Enter the stock name, window_size, episode_count : \")\nwindow_size = input()\nepisode_count = input()\nstock_name = str(stock_name)\nwindow_size = int(window_size)\nepisode_count = int(episode_count)\nagent = Agent(window_size)\ndata = getstockdatavec(stock_name)\nl = len(data) - 1\nbatch_size = 32\nfor episode in range(episode_count+1):\n print(\"episode number : \",episode)\n state = getstate(data,0,window_size+1)\n total_profit = 0\n agent.inventory = []\n for t in range(l):\n print(state)\n action = agent.act(state)\n next_state = getstate(data,t+1,window_size+1)\n reward = 0\n if action==1: #buy\n agent.inventory.append(data[t])\n print('buy : ',format_price(data[t]))\n elif action==2 and len(agent.inventory) > 0:\n #sell the share\n bought_price = window_size_price = agent.inventory.pop(0)\n reward = max(data[t] - bought_price,0)\n total_profit += data[t]-bought_price\n print(\"profit : \",format_price(data[t]-bought_price))\n done=True if t == l-1 else False\n agent.memory.append((state,action,reward,next_state,done))\n state = next_state\n if done:\n print('total profit ---------------',format_price(total_profit))\n if len(agent.memory) > batch_size:\n agent.expreplay(batch_size)\n if episode % 10 == 0:\n agent.model.save(str(episode))\n\n" ]
[ [ "numpy.array", "numpy.argmax" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
elviva404/food-detection-yolov5
[ "796a0c1df6e9c9a705dff7782b3f9b213344f11b", "796a0c1df6e9c9a705dff7782b3f9b213344f11b" ]
[ "model/models/classifier.py", "model/models/backbone.py" ]
[ "import torch\nfrom .base_model import BaseModel\n\nclass Classifier(BaseModel):\n def __init__(self, model, **kwargs):\n super(Classifier, self).__init__(**kwargs)\n self.model = model\n self.model_name = self.model.name\n if self.optimizer is not None:\n self.optimizer = self.optimizer(self.parameters(), lr= self.lr)\n self.set_optimizer_params()\n\n if self.freeze:\n for params in self.model.parameters():\n params.requires_grad = False\n\n if self.device:\n self.model.to(self.device)\n \n def forward(self, x):\n return self.model(x)\n\n def training_step(self, batch):\n outputs = self.model(batch, self.device)\n targets = batch['targets'].to(self.device)\n loss = self.criterion(outputs, targets)\n loss_dict = {'T': loss.item()}\n return loss, loss_dict\n\n def inference_step(self, batch, return_probs=False):\n outputs = self.model(batch, self.device)\n preds = torch.argmax(outputs, dim=1)\n preds = preds.detach()\n if return_probs:\n probs = torch.nn.functional.softmax(outputs, dim=1)\n probs, _ = torch.max(probs, dim=1)\n return preds.cpu().numpy(), probs.cpu().numpy()\n else:\n return preds.numpy()\n\n def evaluate_step(self, batch):\n outputs = self.model(batch, self.device)\n targets = batch['targets'].to(self.device)\n loss = self.criterion(outputs, targets)\n loss_dict = {'T': loss.item()}\n\n self.update_metrics(outputs = outputs, targets = targets)\n return loss, loss_dict", "# Author: Zylo117\nimport os\nimport timm\nimport numpy as np\n\nimport torch\nfrom torch import nn\n\nfrom .yolo import YoloLoss, Yolov4, non_max_suppression, Yolov5\nfrom model.utils.utils import download_pretrained_weights\n\nCACHE_DIR='./.cache'\n\ndef get_model(args, config, num_classes):\n \n NUM_CLASSES = num_classes\n max_post_nms = config.max_post_nms if config.max_post_nms > 0 else None\n max_pre_nms = config.max_pre_nms if config.max_pre_nms > 0 else None\n load_weights = True\n \n net = None\n\n version_name = config.model_name.split('v')[1]\n net = YoloBackbone(\n version_name=version_name,\n load_weights=load_weights, \n num_classes=NUM_CLASSES, \n max_pre_nms=max_pre_nms,\n max_post_nms=max_post_nms)\n \n return net\n\nclass BaseBackbone(nn.Module):\n def __init__(self, **kwargs):\n super(BaseBackbone, self).__init__()\n pass\n def forward(self, batch):\n pass\n def detect(self, batch):\n pass\n\nclass BaseTimmModel(nn.Module):\n \"\"\"Some Information about BaseTimmModel\"\"\"\n\n def __init__(\n self,\n num_classes,\n name=\"vit_base_patch16_224\",\n from_pretrained=True,\n freeze_backbone=False,\n ):\n super().__init__()\n self.name = name\n self.model = timm.create_model(name, pretrained=from_pretrained)\n if name.find(\"nfnet\") != -1:\n self.model.head.fc = nn.Linear(self.model.head.fc.in_features, num_classes)\n elif name.find(\"efficientnet\") != -1:\n self.model.classifier = nn.Linear(\n self.model.classifier.in_features, num_classes\n )\n elif name.find(\"resnext\") != -1:\n self.model.fc = nn.Linear(self.model.fc.in_features, num_classes)\n elif name.find(\"vit\") != -1:\n self.model.head = nn.Linear(self.model.head.in_features, num_classes)\n elif name.find(\"densenet\") != -1:\n self.model.classifier = nn.Linear(\n self.model.classifier.in_features, num_classes\n )\n else:\n assert False, \"Classifier block not included in TimmModel\"\n\n self.model = nn.DataParallel(self.model)\n\n def forward(self, batch, device):\n inputs = batch[\"imgs\"]\n inputs = inputs.to(device)\n outputs = self.model(inputs)\n return outputs\n\nclass YoloBackbone(BaseBackbone):\n def __init__(\n self,\n version_name='5s',\n num_classes=80, \n max_pre_nms=None,\n load_weights=True, \n max_post_nms=None,\n **kwargs):\n\n super(YoloBackbone, self).__init__(**kwargs)\n\n if max_pre_nms is None:\n max_pre_nms = 30000\n self.max_pre_nms = max_pre_nms\n\n if max_post_nms is None:\n max_post_nms = 1000\n self.max_post_nms = max_post_nms\n\n version = version_name[0]\n if version=='4':\n version_mode = version_name.split('-')[1]\n self.name = f'yolov4-{version_mode}'\n self.model = Yolov4(\n cfg=f'./model/models/yolo/configs/yolov4-{version_mode}.yaml', ch=3, nc=num_classes\n )\n elif version =='5':\n version_mode = version_name[-1]\n self.name = f'yolov5{version_mode}'\n self.model = Yolov5(\n cfg=f'./model/models/yolo/configs/yolov5{version_mode}.yaml', ch=3, nc=num_classes\n )\n \n\n if load_weights:\n tmp_path = os.path.join(CACHE_DIR, f'yolov{version_name}.pth')\n download_pretrained_weights(f'yolov{version_name}', tmp_path)\n ckpt = torch.load(tmp_path, map_location='cpu') # load checkpoint\n try:\n ret = self.model.load_state_dict(ckpt, strict=False) \n except:\n pass\n print(\"Loaded pretrained model\")\n\n self.model = nn.DataParallel(self.model)\n self.loss_fn = YoloLoss(\n num_classes=num_classes,\n model=self.model)\n\n self.num_classes = num_classes\n\n def forward(self, batch, device):\n inputs = batch[\"imgs\"]\n targets = batch['yolo_targets']\n\n inputs = inputs.to(device)\n targets = targets.to(device)\n \n if self.model.training:\n outputs = self.model(inputs)\n else:\n _ , outputs = self.model(inputs)\n\n loss, loss_items = self.loss_fn(outputs, targets)\n\n ret_loss_dict = {\n 'T': loss,\n 'IOU': loss_items[0],\n 'OBJ': loss_items[1],\n 'CLS': loss_items[2],\n }\n return ret_loss_dict\n\n def detect(self, batch, device):\n inputs = batch[\"imgs\"]\n inputs = inputs.to(device)\n outputs, _ = self.model(inputs)\n outputs = non_max_suppression(\n outputs, \n conf_thres=0.0001, \n iou_thres=0.8, \n max_nms=self.max_pre_nms,\n max_det=self.max_post_nms) #[bs, max_det, 6]\n \n out = []\n for i, output in enumerate(outputs):\n # [x1,y1,x2,y2, score, label]\n if output is not None and len(output) != 0:\n output = output.detach().cpu().numpy()\n boxes = output[:, :4]\n boxes[:,[0,2]] = boxes[:,[0,2]] \n boxes[:,[1,3]] = boxes[:,[1,3]] \n\n # Convert labels to COCO format\n labels = output[:, -1] + 1\n scores = output[:, -2]\n \n else:\n boxes = []\n labels = []\n scores = []\n if len(boxes) > 0:\n out.append({\n 'bboxes': boxes,\n 'classes': labels,\n 'scores': scores,\n })\n else:\n out.append({\n 'bboxes': np.array(()),\n 'classes': np.array(()),\n 'scores': np.array(()),\n })\n\n return out\n\ndef freeze_bn(model):\n def set_bn_eval(m):\n classname = m.__class__.__name__\n if \"BatchNorm2d\" in classname:\n m.affine = False\n m.weight.requires_grad = False\n m.bias.requires_grad = False\n m.eval() \n model.apply(set_bn_eval)\n\n\n\n\n \n" ]
[ [ "torch.nn.functional.softmax", "torch.max", "torch.argmax" ], [ "torch.nn.Linear", "torch.nn.DataParallel", "numpy.array", "torch.load" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
AntonioCCosta/wormpose
[ "a92c0459f940d6e1f517b90d3a445ad3c0689ed8", "a92c0459f940d6e1f517b90d3a445ad3c0689ed8" ]
[ "wormpose/dataset/features.py", "wormpose/dataset/image_processing/frame_preprocessor.py" ]
[ "\"\"\"\nThis module deals with loading features from a dataset.\n\nIt will calculate extra features such as the worm length.\n\"\"\"\n\nfrom typing import Dict, Tuple\n\nimport numpy as np\n\n\nclass Features(object):\n def __init__(self, raw_features: dict):\n\n _validate_features(raw_features)\n\n self.skeletons = raw_features[\"skeletons\"]\n self.frame_rate = raw_features[\"frame_rate\"]\n self.timestamp = raw_features[\"timestamp\"] if \"timestamp\" in raw_features else None\n self.ventral_side = raw_features[\"ventral_side\"] if \"ventral_side\" in raw_features else None\n\n # set defaults for optional values\n if self.ventral_side is None:\n self.ventral_side = \"unknown\"\n if self.timestamp is None: # default behavior: all frames are equidistant in time\n self.timestamp = np.arange(0, len(self.skeletons))\n\n self._set_measurements(raw_features)\n self._set_labelled_indexes()\n\n def _set_labelled_indexes(self):\n # Save indexes where skeleton is valid (not nan)\n skel_is_not_nan = ~np.any(np.isnan(self.skeletons), axis=(1, 2))\n self.labelled_indexes = np.where(skel_is_not_nan)[0]\n\n def _set_measurements(self, raw_features: dict):\n worm_length = _calculate_worm_length(self.skeletons)\n self.measurements = np.stack(\n [\n worm_length,\n raw_features[\"head_width\"],\n raw_features[\"midbody_width\"],\n raw_features[\"tail_width\"],\n ],\n axis=1,\n )\n self.measurements.dtype = {\n \"names\": (\"worm_length\", \"head_width\", \"midbody_width\", \"tail_width\"),\n \"formats\": (float, float, float, float),\n }\n\n\nFeaturesDict = Dict[str, Features]\n\n\ndef _calculate_worm_length(skeletons):\n worm_length = np.full(len(skeletons), np.nan, dtype=float)\n where_skel_nan = np.where(~np.any(np.isnan(skeletons), axis=(1, 2)))\n for i in where_skel_nan[0]:\n skel = skeletons[i]\n worm_length[i] = np.sum(np.sqrt(np.sum((skel[:-1] - skel[1:]) ** 2, axis=1)))\n return worm_length\n\n\ndef _validate_features(raw_features: dict):\n if not (\n len(raw_features[\"skeletons\"])\n == len(raw_features[\"head_width\"])\n == len(raw_features[\"midbody_width\"])\n == len(raw_features[\"tail_width\"])\n ):\n raise ValueError(\"inconsistent features\")\n if not (\n raw_features[\"head_width\"].dtype == raw_features[\"midbody_width\"].dtype == raw_features[\"tail_width\"].dtype\n ) or not np.issubdtype(raw_features[\"head_width\"].dtype, np.floating):\n raise TypeError(\"Body measurements type should be identical and floating point\")\n if not np.issubdtype(raw_features[\"skeletons\"].dtype, np.floating):\n raise TypeError(\"Skeleton type should be floating point\")\n if len(raw_features[\"skeletons\"].shape) != 3 or raw_features[\"skeletons\"].shape[2] != 2:\n raise ValueError(\"Wrong skeleton shape\")\n if raw_features[\"skeletons\"].shape[1] < 20:\n raise UserWarning(\n \"Low number of skeleton joints (< 20), consider interpolating to improve quality of synthetic images\"\n )\n if raw_features.get(\"timestamp\") is not None and len(raw_features[\"timestamp\"]) != len(raw_features[\"skeletons\"]):\n raise ValueError(\"Inconsistent timestamp\")\n\n\ndef calculate_max_average_worm_length(features: FeaturesDict) -> float:\n \"\"\"\n Calculates the average worm length from each video, and returns the maximum.\n\n :param features: A dictionary of Features\n :return: Biggest average worm length from all videos\n \"\"\"\n return float(np.nanmax([np.nanmean(x.measurements[\"worm_length\"]) for x in features.values()]))\n\n\nMINIMUM_IMAGE_SIZE = 32\n\n\ndef calculate_crop_window_size(features: FeaturesDict) -> Tuple[int, int]:\n \"\"\"\n Returns an image shape that is just big enough to view the worm object,\n the image will be cropped (or expanded) to that size as an input to the neural network\n Can be overriden in child classes for another behavior (for example a fixed chosen size)\n :param features: A dictionary of Features\n :return: A tuple of two integer values (height, width)\n \"\"\"\n import math\n\n # calculate the image shape as maximum of all the average worm lengths for each video\n # then we know the longest worm will fit the image shape,\n # the smaller ones will just have more background space\n max_average_worm_length = calculate_max_average_worm_length(features)\n if np.isnan(max_average_worm_length):\n raise ValueError(\n \"Can't calculate the crop window size: \"\n \"couldn't get the max average worm length in the dataset.\"\n \" Please check the labeled features in this dataset.\"\n )\n\n crop_size = max(MINIMUM_IMAGE_SIZE, int(max_average_worm_length))\n\n # round to even number\n crop_size = math.ceil(float(crop_size) / 2) * 2\n return crop_size, crop_size\n", "\"\"\"\nApplies safely the frame preprocessing function to a frame,\nset the background pixels to a uniform value, deduces the region of interest\n\"\"\"\nfrom typing import Tuple\n\nimport numpy as np\n\nfrom wormpose.dataset.base_dataset import BaseFramePreprocessing\n\n\ndef run(frame_preprocessing: BaseFramePreprocessing, frame: np.ndarray) -> Tuple[np.ndarray, int, Tuple[slice, slice]]:\n \"\"\"\n Safely preprocesses an image, set the background pixels to a uniform color, calculates worm region of interest\n\n :param frame_preprocessing: Frame preprocessing logic\n :param frame: Image to preprocess\n :return: Processed image, value of the background color, region of interest coordinates\n \"\"\"\n\n # copy to avoid modifying the source image\n frame_copy = np.copy(frame)\n\n # call the frame preprocessing function to get the segmented image\n segmentation_mask, background_color = frame_preprocessing.process(frame_copy)\n\n # enforces background color type\n background_color = int(background_color)\n\n # erase background, set everything not the foreground to a uniform color\n frame_copy[segmentation_mask == 0] = background_color\n\n # get region of interest (full image if no worm is found)\n where_worm = np.where(segmentation_mask != 0)\n if len(where_worm[0]) == 0 or len(where_worm[1]) == 0:\n worm_roi = np.s_[0 : frame_copy.shape[0], 0 : frame_copy.shape[1]]\n else:\n worm_roi = np.s_[\n np.min(where_worm[0]) : np.max(where_worm[0]),\n np.min(where_worm[1]) : np.max(where_worm[1]),\n ]\n\n return frame_copy, background_color, worm_roi\n" ]
[ [ "numpy.isnan", "numpy.issubdtype", "numpy.stack", "numpy.nanmean", "numpy.where", "numpy.sum" ], [ "numpy.max", "numpy.copy", "numpy.where", "numpy.min" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]