repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
sabuj7177/TensorFI2
[ "6272a3951793255815506f143748bdd9345c1d2f" ]
[ "experiments/layer-states/vgg16-imagenet.py" ]
[ "import os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\nimport tensorflow as tf\n\nimport numpy as np\nimport random\nfrom tensorflow.keras.preprocessing.image import load_img, img_to_array\nfrom tensorflow.keras.applications.vgg16 import preprocess_input, decode_predictions\n\nimport time, sys, math\n\nfrom src import tensorfi2 as tfi\n\nmodel = tf.keras.applications.VGG16(\n include_top=True, weights='imagenet', input_tensor=None,\n input_shape=None, pooling=None, classes=1000)\n\nmodel.compile(optimizer='sgd', loss='categorical_crossentropy')\n\n#model.save_weights('h5/vgg16-trained.h5')\n\nnumImages = 10\n\n#https://gist.github.com/aaronpolhamus/964a4411c0906315deb9f4a3723aac57\nimagesets = ['n02442845', 'n15075141', 'n02457408', 'n03642806', 'n03100240', 'n03792782', 'n03131574', 'n13133613', 'n12144580', 'n02992211']\nlabels = ['mink', 'toilet_tissue', 'three-toed_sloth', 'laptop', 'convertible', 'mountain_bike', 'crib', 'ear', 'corn', 'cello']\n#classes = [23, 889, 38, 228, 268, 255, 298, 329, 331, 342]\n\nimages = []\nimg_labels = []\n\nfor i, l in zip(imagesets, labels):\n abspath = '/home/nniranjhana/datasets/imagenet18/validation/'\n abspathi = os.path.join(abspath, i)\n for j in range(numImages):\n rand_file = random.choice(os.listdir(abspathi))\n path = os.path.join(abspathi, rand_file)\n image = load_img(path, target_size=(224, 224))\n image = img_to_array(image)\n image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))\n image = preprocess_input(image)\n out = model.predict(image)\n label = decode_predictions(out)\n label = label[0][0]\n if(label[1] == l):\n images.append(path)\n img_labels.append(l)\n\nind = random.sample(range(len(images)), 10)\n\nconf = sys.argv[1]\nfilePath = sys.argv[2]\nfilePath = os.path.join(filePath, \"res.csv\")\n\nf = open(filePath, \"w\")\nnumFaults = int(sys.argv[3])\nnumInjections = 10\n#numInjections = int(sys.argv[4])\n\ntotsdc = 0.0\n\nstart = time.time()\nfor i in range(numFaults):\n model.load_weights('h5/vgg16-trained.h5')\n tfi.inject(model=model, confFile=conf)\n sdc = 0.\n for i in ind:\n image = load_img(images[i], target_size=(224, 224))\n image = img_to_array(image)\n image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))\n image = preprocess_input(image)\n out = model.predict(image)\n label_ = decode_predictions(out)\n label_ = label_[0][0]\n if(label_[1] != img_labels[i]):\n sdc = sdc + 1.\n f.write(str(sdc/numInjections))\n f.write(\"\\n\")\n totsdc = totsdc + sdc\nf.write(\"\\n\")\nf.write(str(totsdc/(numFaults*numInjections)))\nf.write(\"\\n\")\nf.write(\"Time for %d injections: %f seconds\" % (numFaults*numInjections, time.time() - start))\nf.close()\n" ]
[ [ "tensorflow.keras.preprocessing.image.load_img", "tensorflow.keras.preprocessing.image.img_to_array", "tensorflow.keras.applications.vgg16.decode_predictions", "tensorflow.keras.applications.VGG16", "tensorflow.keras.applications.vgg16.preprocess_input" ] ]
aaita92/qiskit-aqua
[ "8681045790123eefc347dfd05dee547bddc3d2df" ]
[ "qiskit/aqua/components/uncertainty_models/univariate_variational_distribution.py" ]
[ "# -*- coding: utf-8 -*-\n\n# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2019.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"\nThe Univariate Variational Distribution.\n\"\"\"\n\nfrom typing import Union, List\nimport numpy as np\n\nfrom qiskit import ClassicalRegister\nfrom qiskit.aqua.components.variational_forms import VariationalForm\nfrom qiskit.aqua.utils.validation import validate_min\nfrom .univariate_distribution import UnivariateDistribution\n\n\nclass UnivariateVariationalDistribution(UnivariateDistribution):\n \"\"\"\n The Univariate Variational Distribution.\n \"\"\"\n\n def __init__(self,\n num_qubits: int,\n var_form: VariationalForm,\n params: [Union[List[float], np.ndarray]],\n low: float = 0,\n high: float = 1) -> None:\n validate_min('num_qubits', num_qubits, 1)\n self._num_qubits = num_qubits\n self._var_form = var_form\n self.params = params\n if isinstance(num_qubits, int):\n probabilities = np.zeros(2 ** num_qubits)\n elif isinstance(num_qubits, float):\n probabilities = np.zeros(2 ** int(num_qubits))\n else:\n probabilities = np.zeros(2 ** sum(num_qubits))\n super().__init__(num_qubits, probabilities, low, high)\n\n def build(self, qc, q, q_ancillas=None, params=None):\n circuit_var_form = self._var_form.construct_circuit(self.params)\n qc.append(circuit_var_form.to_instruction(), q)\n\n def set_probabilities(self, quantum_instance):\n \"\"\"\n Set Probabilities\n Args:\n quantum_instance (QuantumInstance): Quantum instance\n \"\"\"\n qc_ = self._var_form.construct_circuit(self.params)\n\n # q_ = QuantumRegister(self._num_qubits)\n # qc_ = QuantumCircuit(q_)\n # self.build(qc_, None)\n\n if quantum_instance.is_statevector:\n pass\n else:\n c__ = ClassicalRegister(self._num_qubits, name='c')\n qc_.add_register(c__)\n qc_.measure(qc_.qregs[0], c__)\n result = quantum_instance.execute(qc_)\n if quantum_instance.is_statevector:\n result = result.get_statevector(qc_)\n values = np.multiply(result, np.conj(result))\n values = list(values.real)\n else:\n result = result.get_counts(qc_)\n keys = list(result)\n values = list(result.values())\n values = [float(v) / np.sum(values) for v in values]\n values = [x for _, x in sorted(zip(keys, values))]\n\n probabilities = values\n self._probabilities = np.array(probabilities)\n" ]
[ [ "numpy.sum", "numpy.array", "numpy.conj", "numpy.zeros" ] ]
kencan7749/particles_detection_fsr
[ "9845eb58da0d948461c5d548e81e5ce4e9b790a0" ]
[ "point_classification/2_learning/train_cluster/121_train_big_dataset_dual_point_cloud_geometry.py" ]
[ "# TensorFlow and tf.keras\nimport tensorflow as tf\nimport tensorflow.contrib as tfcontrib\nfrom tensorflow.python.keras import layers\nfrom tensorflow.python.keras import losses\nfrom tensorflow.python.keras import models\nfrom tensorflow.python.keras import backend as K\nfrom tensorflow.python.client import session as sess\n\n# Helper libraries\nimport numpy as np\nimport os\nimport glob\nimport zipfile\nimport functools\nimport h5py\n\nimport argparse\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--cluster\", help=\"Runs script on cluster\")\nargs = parser.parse_args()\npath = './dataset/'\nfor run in range(10):\n\n # Data definitions\n file_names = [\"1-dust\", \"2-dust\", \"3-dust\", \"4-dust\", \"5-dust\", \"6-dust\", \"7-dust\",\n \"8-dust\", \"9-smoke\", \"10-smoke\", \"11-smoke\", \"12-smoke\", \"13-smoke\",\n \"14-smoke\", \"15-smoke\", \"16-smoke\", \"17-smoke\", \"18-smoke\", \"19-smoke\"]\n #file_names = [\"1-dust\", \"2-dust\"]\n metapath = \"metadata.npy\"\n\n train_indices = [1, 2, 3, 6, 7, 8, 10, 11, 13, 16, 18]\n #the public dataset is missing 9-\n train_indices = [1, 2, 3, 6, 7, 10, 11, 13, 16, 18]\n test_indices = [0, 4, 5, 9, 12, 14, 15, 17]\n #train_indices = [0,1]\n #test_indices = [0,1]\n NAME = '121_dual_point_cloud_geometry_run_' + str(run+1)\n\n # In case we run it on the local pc\n if not args.cluster:\n for i in range(len(file_names)):\n file_names[i] = path + file_names[i]\n metapath = path + metapath\n # Complete filenames\n for i in range(len(file_names)):\n file_names[i] = file_names[i] + \"_labeled_spaces_img.npy\"\n\n # Import metadata\n metadata = np.load(metapath)\n metadata = metadata.astype(int)\n\n # Load data\n images_train = np.load(file_names[train_indices[0]])[metadata[train_indices[0],0]:metadata[train_indices[0],1]]\n meta = metadata[train_indices[0], 2:4]\n meta_train = np.zeros([len(images_train), 2])\n meta_train[:,:] = meta\n images_test = np.load(file_names[test_indices[0]])[metadata[test_indices[0],0]:metadata[test_indices[0],1]]\n meta = metadata[test_indices[0], 2:4]\n meta_test = np.zeros([len(images_test), 2])\n meta_test[:,:] = meta\n # Training Set\n for i in range(len(train_indices)-1):\n current_images = np.load(file_names[train_indices[i+1]])[metadata[train_indices[i+1],\n 0]:metadata[train_indices[i+1],1]]\n meta = metadata[train_indices[i + 1], 2:4]\n meta_vector = np.zeros([len(current_images), 2])\n meta_vector[:,:] = meta\n images_train = np.concatenate([images_train, current_images], axis = 0)\n meta_train = np.concatenate([meta_train, meta_vector], axis = 0)\n print(images_train.shape)\n print(meta_train.shape)\n # Test Set\n for i in range(len(test_indices)-1):\n current_images = np.load(file_names[test_indices[i+1]])[metadata[test_indices[i+1],\n 0]:metadata[test_indices[i+1],1]]\n meta = metadata[test_indices[i + 1], 2:4]\n meta_vector = np.zeros([len(current_images), 2])\n meta_vector[:,:] = meta\n images_test = np.concatenate([images_test, current_images], axis = 0)\n meta_test = np.concatenate([meta_test, meta_vector], axis=0)\n print(images_test.shape)\n print(meta_test.shape)\n\n # --------------------------------Start of actual software---------------------------------\n\n features_train = images_train[:,:,:,[0,6]] # Here change number of channels\n labels_train = images_train[:,:,:,12:15]\n features_test = images_test[:,:,:,[0,6]] # Here change number of channels\n labels_test = images_test[:,:,:,12:15]\n del images_train\n del images_test\n\n width = 2172 # So far empricially for all datasets\n width_pixel = 512\n img_shape = (32, 512, 2) # 512 comes from approx 90 degrees (360 degrees are 2172) also since it can be divided by 32\n # Change input channels here\n number_labels = 3\n batch_size = 32\n epochs = 50\n num_train_examples = len(features_train)\n num_test_examples = len(features_test)\n\n # Building the model\n\n def conv_block(input_tensor, num_filters):\n encoder = layers.Conv2D(num_filters, (3, 3), padding='same')(input_tensor)\n encoder = layers.BatchNormalization()(encoder)\n encoder = layers.Activation('relu')(encoder)\n encoder = layers.Conv2D(num_filters, (3, 3), padding='same')(encoder)\n encoder = layers.BatchNormalization()(encoder)\n encoder = layers.Activation('relu')(encoder)\n return encoder\n\n\n def encoder_block(input_tensor, num_filters):\n encoder = conv_block(input_tensor, num_filters)\n encoder_pool = layers.MaxPooling2D((1, 2), strides=(1, 2))(encoder)\n\n return encoder_pool, encoder\n\n\n def decoder_block(input_tensor, concat_tensor, num_filters):\n decoder = layers.Conv2DTranspose(num_filters, (1, 2), strides=(1, 2), padding='same')(input_tensor)\n decoder = layers.concatenate([concat_tensor, decoder], axis=-1)\n decoder = layers.BatchNormalization()(decoder)\n decoder = layers.Activation('relu')(decoder)\n decoder = layers.Conv2D(num_filters, (3, 3), padding='same')(decoder)\n decoder = layers.BatchNormalization()(decoder)\n decoder = layers.Activation('relu')(decoder)\n decoder = layers.Conv2D(num_filters, (3, 3), padding='same')(decoder)\n decoder = layers.BatchNormalization()(decoder)\n decoder = layers.Activation('relu')(decoder)\n return decoder\n\n inputs = layers.Input(shape=img_shape)\n # 2144\n encoder0_pool, encoder0 = encoder_block(inputs, 16)\n # 1072\n encoder1_pool, encoder1 = encoder_block(encoder0_pool, 32)\n # 536\n encoder2_pool, encoder2 = encoder_block(encoder1_pool, 64)\n # 268\n encoder3_pool, encoder3 = encoder_block(encoder2_pool, 128)\n # 134\n center = conv_block(encoder3_pool, 256)\n # center\n decoder3 = decoder_block(center, encoder3, 128)\n # 268\n decoder2 = decoder_block(decoder3, encoder2, 64)\n # 536\n decoder1 = decoder_block(decoder2, encoder1, 32)\n # 1072\n decoder0 = decoder_block(decoder1, encoder0, 16)\n # 2144\n outputs = layers.Conv2D(number_labels, (1, 1), activation='softmax')(decoder0)\n\n model = models.Model(inputs=[inputs], outputs=[outputs])\n\n def dice_coeff(y_true, y_pred):\n smooth = 1.\n # Flatten\n y_true_f = tf.reshape(y_true, [-1])\n y_pred_f = tf.reshape(y_pred, [-1])\n intersection = tf.reduce_sum(y_true_f * y_pred_f)\n score = (2. * intersection + smooth) / (tf.reduce_sum(y_true_f) + tf.reduce_sum(y_pred_f) + smooth)\n return score\n\n def dice_loss(y_true, y_pred):\n loss = 1 - dice_coeff(y_true, y_pred)\n return loss\n\n def bce_dice_loss(y_true, y_pred):\n loss = losses.binary_crossentropy(y_true, y_pred) + dice_loss(y_true, y_pred)\n return loss\n\n model.compile(optimizer='adam', loss=bce_dice_loss, metrics=[dice_loss, 'accuracy'])\n\n model.summary()\n\n save_model_weights = 'models/weights_big_dataset_' + NAME +'.hdf5'\n #save_model = 'models/model_big_dataset_always_update.hdf5'\n log_dir = 'logs\\\\' + NAME\n os.makedirs('models', exist_ok=True)\n os.makedirs(log_dir, exist_ok=True)\n cp = tf.keras.callbacks.ModelCheckpoint(filepath=save_model_weights, monitor='val_dice_loss', save_best_only=False, verbose=1)\n cp2 = tf.keras.callbacks.TensorBoard(log_dir='logs\\\\' + NAME, histogram_freq=0,\n write_graph=True, write_images=False)\n\n # Function to augment data and keep randomly selected image of width \"width_pixel\"\n def augment_data(img, label_img, meta_img):\n # Take random snippet around polar angle = pi/2 (y-axis) for the two dust datasets of width 512 (approx. 90 degrees)\n # This snippet is shifted up to +-45 degrees (256 pixels) --> 90 degrees snippet between pi and 0 possible\n middle_angle = np.random.uniform(meta_img[0], meta_img[1])\n if middle_angle < 0: # Guarantess that at least end or start is in interval\n middle_angle += 2*np.pi\n middle_index = int(np.rint((width)*(middle_angle)/(2*np.pi)))\n start_index = int(middle_index - width_pixel/2)\n end_index = int(middle_index + width_pixel/2)\n if start_index >= 0 and end_index < width:\n img = img[:, start_index:end_index]\n label_img = label_img[:, start_index:end_index]\n elif end_index >= width:\n img = np.concatenate([img[:,start_index:],img[:,:end_index-width]], axis = 1)\n label_img = np.concatenate([label_img[:,start_index:],label_img[:,:end_index-width]], axis = 1)\n elif start_index < 0:\n img = np.concatenate([img[:, start_index+width:], img[:, :end_index]], axis = 1)\n label_img = np.concatenate([label_img[:, start_index+width:], label_img[:, :end_index]], axis = 1)\n # horizontal_flip with probability 0.5\n flip_prob = np.random.uniform(0.0, 1.0)\n if flip_prob > 0.5:\n img, label_img = img[:,::-1,:], label_img[:,::-1,:]\n return img, label_img\n\n # Build validation data\n features_test_2 = np.zeros([features_test.shape[0], features_test.shape[1], width_pixel, features_test.shape[3]])\n labels_test_2 = np.zeros([labels_test.shape[0], labels_test.shape[1], width_pixel, labels_test.shape[3]])\n for i, img in enumerate(features_test):\n img, label_img = augment_data(img, labels_test[i], meta_test[i])\n features_test_2[i, :, :, :] = img\n labels_test_2[i, :, :, :] = label_img\n features_test = features_test_2\n del features_test_2\n labels_test = labels_test_2\n del labels_test_2\n\n # Define Generator to save memory\n def generator(features, labels, meta_train):\n while True:\n print(\"\\naugmented!\\n\")\n print(int(np.ceil(len(features) / float(batch_size))))\n # Shuffle Arrays in same manner\n permutation = np.random.permutation(len(features))\n features = features[permutation]\n labels = labels[permutation]\n meta_train = meta_train[permutation]\n # Pick out one after another in generator form\n for i in range(int(np.ceil(len(features) / float(batch_size)))-1):\n feature_new = np.zeros((batch_size, features[batch_size*i].shape[0], width_pixel,\n features[batch_size*i].shape[2]))\n label_new = np.zeros((batch_size, labels[batch_size*i].shape[0], width_pixel,\n labels[batch_size*i].shape[2]))\n for c in range(batch_size):\n feature, label = augment_data(features[batch_size*i+c], labels[batch_size*i+c],\n meta_train[batch_size*i+c])\n feature_new[c,:,:,:] = feature\n label_new[c,:,:,:] = label\n yield feature_new, label_new\n\n history = model.fit_generator(generator(features_train, labels_train, meta_train),\n steps_per_epoch=int(np.ceil(num_train_examples / float(batch_size))),\n epochs = epochs,\n validation_data=(features_test, labels_test),\n validation_steps=int(np.ceil(num_test_examples / float(batch_size))),\n callbacks=[cp, cp2])\n\n #history = model.fit(dataset,\n # steps_per_epoch=int(np.ceil(num_train_examples / float(batch_size))),\n # epochs=epochs,\n # validation_data=val_ds,\n # validation_steps=int(np.ceil(num_test_examples / float(batch_size))),\n # callbacks=[cp])\n\n #model.save(save_model)" ]
[ [ "tensorflow.keras.callbacks.TensorBoard", "numpy.concatenate", "tensorflow.python.keras.layers.MaxPooling2D", "tensorflow.python.keras.losses.binary_crossentropy", "tensorflow.python.keras.layers.Input", "numpy.zeros", "tensorflow.python.keras.layers.Conv2D", "tensorflow.python.keras.layers.Conv2DTranspose", "numpy.load", "numpy.rint", "tensorflow.reshape", "tensorflow.keras.callbacks.ModelCheckpoint", "numpy.random.uniform", "tensorflow.reduce_sum", "tensorflow.python.keras.layers.Activation", "tensorflow.python.keras.layers.concatenate", "tensorflow.python.keras.layers.BatchNormalization", "tensorflow.python.keras.models.Model" ] ]
windj007/timelog-utils
[ "dc85bc04365355226c0bafe9227d60db632de5a3" ]
[ "redmine_import_timelog.py" ]
[ "#!/usr/bin/env python\n\nimport argparse, os, pandas, yaml, datetime, pytz, functools, redmine\n\n\ndef get_config(config_file):\n with open(config_file, 'r') as f:\n return yaml.load(f)\n\n\ndef get_redmine_client(config):\n return redmine.Redmine(config['redmine-base-address'],\n key = config['api-key'])\n\n\ndef make_datetime(row, timezone):\n return datetime.datetime(year = row['year'],\n month = row['month'],\n day = row['day'],\n hour = row['hour'],\n minute = row['minute'],\n tzinfo = timezone)\n\n\n_TS_COLUMNS = ['year',\n 'month',\n 'day',\n 'hour',\n 'minute']\ndef load_timelog(fpath, timezone):\n log = pandas.read_csv(fpath,\n encoding = 'utf8')\n log['timestamp'] = log[_TS_COLUMNS] \\\n .fillna(method = 'pad') \\\n .astype('int') \\\n .apply(functools.partial(make_datetime,\n timezone = timezone),\n axis = 1)\n log.set_index('timestamp', inplace = True)\n log.drop(_TS_COLUMNS,\n axis = 1,\n inplace = True)\n log['duration_minutes'] = list(pandas.Series(log.index) \\\n .groupby([d.date() for d in log.index]) \\\n .apply(lambda t: (t.shift(-1).fillna(method = 'pad') - t) \\\n .apply(lambda td: td.total_seconds() / 60.0)))\n log['duration_hours'] = log['duration_minutes'] / 60.0\n return log\n\n\ndef create_timeentry(client, ts, timelog_row):\n e = client.time_entry.new()\n e.comments = timelog_row['action']\n e.issue_id = int(timelog_row['redmine task id'])\n e.spent_on = datetime.date(year = ts.year,\n month = ts.month,\n day = ts.day)\n e.hours = timelog_row['duration_hours']\n #activity_id: the id of the time activity. This parameter is required unless a default activity is defined in Redmine.\n e.comments = timelog_row['action']\n e.save()\n\n\ndef import_timelog(client, timelog):\n for ts in timelog.index:\n try:\n int(timelog.loc[ts, 'redmine task id'])\n except ValueError:\n continue\n create_timeentry(client, ts, timelog.loc[ts])\n\n\nDATE_FORMAT = '%Y-%m-%dT%H:%M'\n\n\nif __name__ == '__main__':\n aparser = argparse.ArgumentParser()\n aparser.add_argument('--config',\n type = str,\n default = os.path.join(os.path.dirname(__file__),\n 'config.yaml'),\n help = 'Path to config file')\n aparser.add_argument('--since',\n type = str,\n default = datetime.date.today().strftime(DATE_FORMAT),\n help = 'Path to config file')\n aparser.add_argument('filename',\n type = str,\n help = 'CSV-file with timelog')\n\n args = aparser.parse_args()\n\n conf = get_config(args.config)\n client = get_redmine_client(conf)\n\n tz = pytz.timezone(conf['time-zone'])\n\n timelog = load_timelog(args.filename, tz)\n\n since = datetime.datetime.strptime(args.since, DATE_FORMAT).replace(tzinfo = tz)\n import_timelog(client, timelog[timelog.index >= since])\n" ]
[ [ "pandas.read_csv", "pandas.Series" ] ]
ppeigne/optimizers
[ "42eed8adb010ca0549cb269fab5e21f2e1693279" ]
[ "optimizers/activations.py" ]
[ "import numpy as np\n\ndef sigmoid(z: np.ndarray) -> np.ndarray:\n return 1 / (1 + np.exp(-z))\n\ndef relu(z: np.ndarray) -> np.ndarray:\n return np.maximum(0, z)\n\ndef leaky_relu(z: np.ndarray, rate: float = .01) -> np.ndarray:\n return np.maximum(z * rate, z)\n" ]
[ [ "numpy.exp", "numpy.maximum" ] ]
cfeenstra67/Quantum_Harmonic_Oscillator_Sim
[ "b36a26ce1e2853d5f9fa47b94dfb01c694b58d72" ]
[ "script/QHO_Demo.py" ]
[ "#!/usr/local/bin/python3\n\nimport wx\nfrom wx.lib.scrolledpanel import ScrolledPanel\n\nimport matplotlib as mpl\nimport matplotlib.gridspec as gridspec\nmpl.use('WXAgg')\nfrom matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas\nfrom matplotlib.backends.backend_wx import NavigationToolbar2Wx\nfrom matplotlib.figure import Figure\nfrom matplotlib import pyplot as plt\nfrom matplotlib import animation\n\nimport numpy as np\nfrom scipy.special import eval_hermite\nfrom math import factorial as fact\nfrom time import time\nfrom functools import partial\nfrom collections import OrderedDict as odict\nfrom sympy import *\n\n\ndef e_lvl_func(n):\n psi_n=lambda y: (((2**n)*fact(n))**-.5)*(np.pi**-.25)*np.exp((-y**2)/2)*eval_hermite(n,y)\n return lambda y,t: psi_n(y)*np.exp(-1j*(n+.5)*t) \n \ndef super_pos_func(state):\n def func(y,t):\n try: total=np.full(len(y),0,dtype=complex)\n except: total=0\n for lvl, coef in enumerate(state):\n total+=coef*e_lvl_func(lvl)(y,t)\n return total\n return func\n\nclass plot_panel(wx.Panel):\n def __init__(self, parent, dx=.01, state=[1,1,1,1], dt=np.pi*2/96, active_dict={'rl':True,'im':True,'parab':True,'e_n':True}, e_lvls=-1):\n super().__init__(parent)\n \n \n self.__e_lvls=e_lvls if e_lvls!=-1 else len(state)\n self.state=state\n self.__supyb=2\n self.__xprop=2\n self.__active_dict=active_dict\n self.dx=dx\n self.dt=dt\n \n self.fig=Figure()\n self.canvas=FigureCanvas(self,-1,self.fig)\n gs=gridspec.GridSpec(3,1)\n self.ax=self.fig.add_subplot(gs[:2,0])\n self.super_ax=self.fig.add_subplot(gs[2,0])\n self.super_pos_rl, =self.super_ax.plot([],[],'b')\n self.super_pos_im, =self.super_ax.plot([],[],'r')\n \n self.e_lvls=self.__e_lvls\n self.supyb=self.__supyb\n\n sizer=wx.BoxSizer()\n sizer.Add(self.canvas,proportion=1,flag=wx.EXPAND)\n self.SetSizer(sizer)\n \n def get_e_lvls(self): return self.__e_lvls\n def set_e_lvls(self, val):\n self.ax.clear()\n self.__e_lvls=val\n self.x_ran=(self.e_lvls**.5)*self.xprop\n self.x_ran=(-self.x_ran,self.x_ran)\n self.ax.set_xlim(*self.x_ran)\n self.ax.set_ylim(0,self.e_lvls)\n self.super_ax.set_xlim(*self.x_ran)\n self.x=np.arange(*self.x_ran,step=self.dx)\n \n self.lvls=[]\n self.e_rl_plots=[]\n self.e_im_plots=[]\n for i in range(self.e_lvls):\n e_plot, =self.ax.plot(self.x,np.full(len(self.x),i+.5),'y',lw=1); self.lvls.append(e_plot)\n e_rl_plot, =self.ax.plot([],[],'b',lw=1); self.e_rl_plots.append(e_rl_plot)\n e_im_plot, =self.ax.plot([],[],'r',lw=1); self.e_im_plots.append(e_im_plot)\n self.parab, =self.ax.plot(self.x,self.x**2,'g')\n self.__active_lines=self.active_lines()\n e_lvls=property(get_e_lvls,set_e_lvls)\n \n def get_supyb(self): return self.__supyb\n def set_supyb(self, val):\n self.__supyb=val\n self.super_ax.set_ylim(-val,val)\n supyb=property(get_supyb,set_supyb)\n \n def get_xprop(self): return self.__xprop\n def set_xprop(self, val):\n self.__xprop=val\n self.e_lvls=self.e_lvls\n xprop=property(get_xprop,set_xprop)\n \n def start(self):\n t0=time()\n self.animate(0)\n t1=time()\n interval=1000*self.dt-(t1-t0)\n self.animate_init()\n self.ani=animation.FuncAnimation(self.fig,self.animate,frames=192,interval=interval,blit=True)\n def reset(self):\n self.ani.frame_seq=self.ani.new_frame_seq()\n def hard_reset(self):\n self.canvas.draw()\n if self.ani:\n self.ani._stop()\n del(self.ani)\n self.start()\n def set_active(self,key,val):\n if self.__active_dict[key]!=val:\n self.__active_dict[key]=val\n if key=='parab':\n self.parab.set_visible(val)\n self.hard_reset()\n elif key=='e_n':\n for lvl, lvl_plot in enumerate(self.lvls):\n lvl_plot.set_visible(val)\n self.hard_reset()\n else:\n self.__active_lines=self.active_lines()\n self.reset()\n def get_active(self, key): return self.__active_dict[key]\n def active_lines(self):\n lines=[]\n if self.__active_dict['rl']:\n lines.extend(self.e_rl_plots)\n lines.append(self.super_pos_rl)\n if self.__active_dict['im']:\n lines.extend(self.e_im_plots)\n lines.append(self.super_pos_im)\n return lines\n def animate_init(self):\n full=self.__active_lines\n for some_plot in full: some_plot.set_data([],[])\n return full\n def animate(self, t):\n for lvl, (rl_plot, im_plot) in enumerate(zip(self.e_rl_plots,self.e_im_plots)):\n y=e_lvl_func(lvl)(self.x,t*self.dt)+(lvl+.5)*(1j+1)\n rl_plot.set_data(self.x,y.real)\n im_plot.set_data(self.x,y.imag)\n y_sup=super_pos_func(self.state)(self.x,t*self.dt)\n self.super_pos_rl.set_data(self.x,y_sup.real)\n self.super_pos_im.set_data(self.x,y_sup.imag)\n return self.__active_lines\n\nclass control_panel(ScrolledPanel):\n def __init__(self, parent, width=200):\n super().__init__(parent,size=(width,1000))\n self.border_w=5\n self.ctrl_w=width-15-self.border_w*2\n self.SetupScrolling(scroll_x=False,scroll_y=True)\n self.vbox=wx.BoxSizer(wx.VERTICAL)\n self.SetSizer(self.vbox)\n cb_dict=odict((('im','Imaginary'),('rl','Real'),('parab','Parabola'),('e_n','Energy Levels')))\n def checkbox_fired(event):\n cb=event.EventObject\n key=next(k for k,v in cb_dict.items() if v==cb.GetLabel())\n parent.Parent.checkbox_changed(key,cb.GetValue())\n for val in cb_dict.values(): self.add_check(val,checkbox_fired)\n tc_dict=odict((('e_lvls',('# Energy Levels:',str(parent.Parent.plot.e_lvls))),('state',('Superposition:',\",\".join([str(num) for num in parent.Parent.plot.state])))))\n def textctrl_fired(event, key):\n tc=event.EventObject\n parent.Parent.textctrl_changed(key,tc)\n self.tctrl_dict={}\n for key,(val,initial) in tc_dict.items(): self.tctrl_dict[key]=self.add_textctrl(val,partial(textctrl_fired,key=key),initial)\n def edit_superp(event):\n editor=spfn_editor(parent.Parent)\n editor.Show()\n self.add_button('Generate w/ Fn',edit_superp)\n self.add_textctrl('Superposition Y Bound:',partial(textctrl_fired,key='super_y'),str(parent.Parent.plot.supyb))\n self.add_textctrl('X Proportion:',partial(textctrl_fired,key='xprop'),str(parent.Parent.plot.xprop))\n spacer=wx.Panel(self, size=(self.ctrl_w,20))\n self.add_control(spacer)\n def add_check(self, label_text, action):\n new_cb=wx.CheckBox(self, wx.ID_ANY, label_text, size=(self.ctrl_w,20))\n new_cb.SetValue(True)\n new_cb.Bind(wx.EVT_CHECKBOX, action)\n self.add_control(new_cb)\n def add_textctrl(self, label_text, ret_action, initial=''):\n h=20\n bg=wx.Panel(self,size=(self.ctrl_w,h*2+self.border_w))\n txt=wx.StaticText(bg, label=label_text, size=(self.ctrl_w,h))\n txt_ctrl=wx.TextCtrl(bg, style=wx.TE_PROCESS_ENTER,size=(self.ctrl_w,h),pos=(0,h+self.border_w))\n txt_ctrl.Bind(wx.EVT_TEXT_ENTER, ret_action)\n txt_ctrl.SetValue(initial)\n self.add_control(bg)\n return txt_ctrl\n def add_button(self, label_text, action):\n new_b=wx.Button(self, size=(self.ctrl_w,20), label=label_text)\n new_b.Bind(wx.EVT_BUTTON,action)\n self.add_control(new_b)\n def add_control(self, ctrl):\n self.vbox.Add(ctrl,0,wx.TOP | wx.LEFT | wx.RIGHT,border=self.border_w)\n \nclass plot_editor(wx.Frame):\n def __init__(self,size=(500,500)):\n super().__init__(None,wx.ID_ANY,\"QM Demo\",size=size)\n self.bkg=wx.Panel(self,size=size)\n self.plot=plot_panel(self.bkg)\n self.controls=control_panel(self.bkg)\n hbox=wx.BoxSizer()\n hbox.Add(self.plot,proportion=1,flag=wx.EXPAND)\n hbox.Add(self.controls,proportion=0,flag=wx.LEFT)\n self.bkg.SetSizer(hbox)\n def checkbox_changed(self, key, checked):\n self.plot.set_active(key,checked)\n def textctrl_changed(self, key, textctrl):\n text=textctrl.GetValue()\n if key=='e_lvls':\n try:\n self.plot.e_lvls=int(text)\n self.plot.hard_reset()\n except:\n textctrl.SetValue(str(self.plot.e_lvls))\n elif key=='state':\n try:\n self.plot.state=[float(txt) for txt in text.split(\",\")]\n self.plot.reset()\n except:\n textctrl.SetValue(\",\".join([str(num) for num in self.plot.state]))\n elif key=='super_y':\n try:\n self.plot.supyb=float(text)\n self.plot.hard_reset()\n except:\n textctrl.SetValue(str(self.plot.supyb))\n elif key=='xprop':\n try:\n self.plot.xprop=float(text)\n self.plot.hard_reset()\n except:\n textctrl.SetValue(str(self.plot.xprop))\n def state_generated(self, new_state):\n key='state'\n tc=self.controls.tctrl_dict[key]\n tc.SetValue(\",\".join([str(num) for num in new_state]))\n self.plot.state=new_state\n self.plot.reset()\n\nclass spfn_editor(wx.Frame):\n def __init__(self, controller=None, size=(300,150)):\n super().__init__(None, wx.ID_ANY, \"Superposition Editor\", size=size, style=wx.SYSTEM_MENU | wx.CAPTION | wx.CLOSE_BOX)\n self.cont=controller\n w,h=size\n self.border_w=5\n self.line_w=w-self.border_w*2\n self.bkg=wx.Panel(self)\n box=wx.BoxSizer()\n box.Add(self.bkg)\n self.SetSizer(box)\n self.vbox=wx.BoxSizer(wx.VERTICAL)\n self.bkg.SetSizer(self.vbox)\n fields=('Fn:','For:','From:','To:')\n self.ctrl_dict={}\n for field in fields: self.ctrl_dict[field]=self.add_line(field)\n go=wx.Button(self.bkg,size=(self.line_w,20),label='Go')\n go.Bind(wx.EVT_BUTTON,self.pressed_go)\n self.vbox.Add(go,0,wx.ALL,border=self.border_w)\n self.Fit()\n def add_line(self, text):\n prop=.3\n h=20\n pn=wx.Panel(self.bkg,size=(self.line_w,h))\n hbox=wx.BoxSizer()\n txt=wx.StaticText(pn,label=text,size=(self.line_w*prop,h))\n ctrl=wx.TextCtrl(pn,size=(self.line_w*(1-prop),h),pos=(self.line_w*prop,0))\n hbox.Add(txt)\n hbox.Add(ctrl)\n pn.SetSizer(hbox)\n self.vbox.Add(pn,0,wx.TOP | wx.LEFT | wx.RIGHT, border=self.border_w)\n return ctrl\n def pressed_go(self, event=None):\n try:\n fn=self.ctrl_dict['Fn:'].GetValue()\n var=Symbol(self.ctrl_dict['For:'].GetValue())\n start=int(self.ctrl_dict['From:'].GetValue())\n end=int(self.ctrl_dict['To:'].GetValue())\n seq=[float(sympify(fn).evalf(subs={var:i})) for i in range(start,end+1)]\n self.cont.state_generated(seq)\n except: pass\n \n\nif __name__ == '__main__':\n app=wx.App(redirect=False)\n win_size=(600,400)\n win=plot_editor(size=win_size)\n win.plot.start()\n win.Show(True)\n app.MainLoop()" ]
[ [ "matplotlib.use", "matplotlib.animation.FuncAnimation", "matplotlib.backends.backend_wxagg.FigureCanvasWxAgg", "numpy.exp", "numpy.arange", "matplotlib.figure.Figure", "scipy.special.eval_hermite", "matplotlib.gridspec.GridSpec" ] ]
shimbay/tensorflow
[ "3c83fac462fd7938590ec69861d0d3691432e661" ]
[ "tensorflow/python/framework/ops.py" ]
[ "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Classes and functions used to construct graphs.\"\"\"\n# pylint: disable=g-bad-name\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport copy\nimport re\nimport sys\nimport threading\nimport types\nfrom absl import app\n\nimport numpy as np\nimport six\nfrom six.moves import map # pylint: disable=redefined-builtin\nfrom six.moves import xrange # pylint: disable=redefined-builtin\n\nfrom tensorflow.core.framework import attr_value_pb2\nfrom tensorflow.core.framework import function_pb2\nfrom tensorflow.core.framework import graph_pb2\nfrom tensorflow.core.framework import node_def_pb2\nfrom tensorflow.core.framework import op_def_pb2\nfrom tensorflow.core.framework import versions_pb2\nfrom tensorflow.core.protobuf import config_pb2\n# pywrap_tensorflow must be imported first to avoid protobuf issues.\n# (b/143110113)\n# pylint: disable=invalid-import-order,g-bad-import-order,unused-import\nfrom tensorflow.python import pywrap_tensorflow\nfrom tensorflow.python import pywrap_tfe\n# pylint: enable=invalid-import-order,g-bad-import-order,unused-import\nfrom tensorflow.python import tf2\nfrom tensorflow.python.client import pywrap_tf_session\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import core\nfrom tensorflow.python.eager import monitoring\nfrom tensorflow.python.eager import tape\nfrom tensorflow.python.framework import c_api_util\nfrom tensorflow.python.framework import composite_tensor\nfrom tensorflow.python.framework import cpp_shape_inference_pb2\nfrom tensorflow.python.framework import device as pydev\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import indexed_slices\nfrom tensorflow.python.framework import registry\nfrom tensorflow.python.framework import tensor_conversion_registry\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.framework import traceable_stack\nfrom tensorflow.python.framework import versions\nfrom tensorflow.python.ops import control_flow_util\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.profiler import trace\nfrom tensorflow.python.types import core as core_tf_types\nfrom tensorflow.python.types import internal\nfrom tensorflow.python.util import compat\nfrom tensorflow.python.util import decorator_utils\nfrom tensorflow.python.util import deprecation\nfrom tensorflow.python.util import dispatch\nfrom tensorflow.python.util import function_utils\nfrom tensorflow.python.util import lock_util\nfrom tensorflow.python.util import memory\nfrom tensorflow.python.util import object_identity\nfrom tensorflow.python.util import tf_contextlib\nfrom tensorflow.python.util import tf_stack\nfrom tensorflow.python.util import traceback_utils\nfrom tensorflow.python.util.compat import collections_abc\nfrom tensorflow.python.util.deprecation import deprecated_args\nfrom tensorflow.python.util.lazy_loader import LazyLoader\nfrom tensorflow.python.util.tf_export import kwarg_only\nfrom tensorflow.python.util.tf_export import tf_export\n\nag_ctx = LazyLoader(\n \"ag_ctx\", globals(),\n \"tensorflow.python.autograph.core.ag_ctx\")\n\n\n# Temporary global switches determining if we should enable the work-in-progress\n# calls to the C API. These will be removed once all functionality is supported.\n_USE_C_API = True\n_USE_C_SHAPES = True\n\n_api_usage_gauge = monitoring.BoolGauge(\n \"/tensorflow/api/ops_eager_execution\",\n \"Whether ops.enable_eager_execution() is called.\")\n\n_tensor_equality_api_usage_gauge = monitoring.BoolGauge(\n \"/tensorflow/api/enable_tensor_equality\",\n \"Whether ops.enable_tensor_equality() is called.\")\n\n_control_flow_api_gauge = monitoring.BoolGauge(\n \"/tensorflow/api/enable_control_flow_v2\",\n \"Whether enable_control_flow_v2() is called.\")\n\n_tf_function_api_guage = monitoring.BoolGauge(\n \"/tensorflow/api/tf_function\",\n \"Whether tf.function() is used.\")\n\n# pylint: disable=protected-access\n_DTYPES_INTERN_TABLE = dtypes._INTERN_TABLE\n# pylint: enable=protected-access\n\n\ndef tensor_id(tensor):\n \"\"\"Returns a unique identifier for this Tensor.\"\"\"\n return tensor._id # pylint: disable=protected-access\n\n\nclass _UserDeviceSpec(object):\n \"\"\"Store user-specified device and provide computation of merged device.\"\"\"\n\n def __init__(self, device_name_or_function):\n self._device_name_or_function = device_name_or_function\n self.display_name = str(self._device_name_or_function)\n self.function = device_name_or_function\n self.raw_string = None\n\n if isinstance(device_name_or_function, pydev.MergeDevice):\n self.is_null_merge = device_name_or_function.is_null_merge\n\n elif callable(device_name_or_function):\n self.is_null_merge = False\n dev_func = self._device_name_or_function\n func_name = function_utils.get_func_name(dev_func)\n func_code = function_utils.get_func_code(dev_func)\n if func_code:\n fname = func_code.co_filename\n lineno = func_code.co_firstlineno\n else:\n fname = \"unknown\"\n lineno = -1\n self.display_name = \"%s<%s, %d>\" % (func_name, fname, lineno)\n\n elif device_name_or_function is None:\n # NOTE(taylorrobie): This MUST be False. None signals a break in the\n # device stack, so `is_null_merge` must be False for such a case to\n # allow callers to safely skip over null merges without missing a None.\n self.is_null_merge = False\n\n else:\n self.raw_string = device_name_or_function\n self.function = pydev.merge_device(device_name_or_function)\n self.is_null_merge = self.function.is_null_merge\n\n # We perform this check in __init__ because it is of non-trivial cost,\n # and self.string_merge is typically called many times.\n self.fast_string_merge = isinstance(self.function, pydev.MergeDevice)\n\n def string_merge(self, node_def):\n if self.fast_string_merge:\n return self.function.shortcut_string_merge(node_def)\n\n return compat.as_str(_device_string(self.function(node_def)))\n\n\nclass NullContextmanager(object):\n\n def __init__(self, *args, **kwargs):\n pass\n\n def __enter__(self):\n pass\n\n def __exit__(self, type_arg, value_arg, traceback_arg):\n return False # False values do not suppress exceptions\n\n\ndef _override_helper(clazz_object, operator, func):\n \"\"\"Overrides (string) operator on Tensors to call func.\n\n Args:\n clazz_object: the class to override for; either Tensor or SparseTensor.\n operator: the string name of the operator to override.\n func: the function that replaces the overridden operator.\n\n Raises:\n ValueError: If operator is not allowed to be overwritten.\n \"\"\"\n if operator not in Tensor.OVERLOADABLE_OPERATORS:\n raise ValueError(\"Overriding %s is disallowed\" % operator)\n setattr(clazz_object, operator, func)\n\n\ndef _as_graph_element(obj):\n \"\"\"Convert `obj` to a graph element if possible, otherwise return `None`.\n\n Args:\n obj: Object to convert.\n\n Returns:\n The result of `obj._as_graph_element()` if that method is available;\n otherwise `None`.\n \"\"\"\n conv_fn = getattr(obj, \"_as_graph_element\", None)\n if conv_fn and callable(conv_fn):\n return conv_fn()\n return None\n\n\n# Deprecated - do not use.\n# This API to avoid breaking estimator and tensorflow-mesh which depend on this\n# internal API. The stub should be safe to use after TF 2.3 is released.\ndef is_dense_tensor_like(t):\n return isinstance(t, core_tf_types.Tensor)\n\n\ndef uid():\n \"\"\"A unique (within this program execution) integer.\"\"\"\n return pywrap_tfe.TFE_Py_UID()\n\n\ndef numpy_text(tensor, is_repr=False):\n \"\"\"Human readable representation of a tensor's numpy value.\"\"\"\n if tensor.dtype.is_numpy_compatible:\n # pylint: disable=protected-access\n text = repr(tensor._numpy()) if is_repr else str(tensor._numpy())\n # pylint: enable=protected-access\n else:\n text = \"<unprintable>\"\n if \"\\n\" in text:\n text = \"\\n\" + text\n return text\n\n\n@tf_export(v1=[\"enable_tensor_equality\"])\ndef enable_tensor_equality():\n \"\"\"Compare Tensors with element-wise comparison and thus be unhashable.\n\n Comparing tensors with element-wise allows comparisons such as\n tf.Variable(1.0) == 1.0. Element-wise equality implies that tensors are\n unhashable. Thus tensors can no longer be directly used in sets or as a key in\n a dictionary.\n \"\"\"\n logging.vlog(1, \"Enabling tensor equality\")\n _tensor_equality_api_usage_gauge.get_cell().set(True)\n Tensor._USE_EQUALITY = True # pylint: disable=protected-access\n\n\n@tf_export(v1=[\"disable_tensor_equality\"])\ndef disable_tensor_equality():\n \"\"\"Compare Tensors by their id and be hashable.\n\n This is a legacy behaviour of TensorFlow and is highly discouraged.\n \"\"\"\n logging.vlog(1, \"Disabling tensor equality\")\n _tensor_equality_api_usage_gauge.get_cell().set(False)\n Tensor._USE_EQUALITY = False # pylint: disable=protected-access\n\n\n# TODO(mdan): This object should subclass Symbol, not just Tensor.\n@tf_export(\"Tensor\", \"experimental.numpy.ndarray\", v1=[\"Tensor\"])\nclass Tensor(internal.NativeObject, core_tf_types.Tensor):\n \"\"\"A `tf.Tensor` represents a multidimensional array of elements.\n\n All elements are of a single known data type.\n\n When writing a TensorFlow program, the main object that is\n manipulated and passed around is the `tf.Tensor`.\n\n A `tf.Tensor` has the following properties:\n\n * a single data type (float32, int32, or string, for example)\n * a shape\n\n TensorFlow supports eager execution and graph execution. In eager\n execution, operations are evaluated immediately. In graph\n execution, a computational graph is constructed for later\n evaluation.\n\n TensorFlow defaults to eager execution. In the example below, the\n matrix multiplication results are calculated immediately.\n\n >>> # Compute some values using a Tensor\n >>> c = tf.constant([[1.0, 2.0], [3.0, 4.0]])\n >>> d = tf.constant([[1.0, 1.0], [0.0, 1.0]])\n >>> e = tf.matmul(c, d)\n >>> print(e)\n tf.Tensor(\n [[1. 3.]\n [3. 7.]], shape=(2, 2), dtype=float32)\n\n Note that during eager execution, you may discover your `Tensors` are actually\n of type `EagerTensor`. This is an internal detail, but it does give you\n access to a useful function, `numpy`:\n\n >>> type(e)\n <class '...ops.EagerTensor'>\n >>> print(e.numpy())\n [[1. 3.]\n [3. 7.]]\n\n In TensorFlow, `tf.function`s are a common way to define graph execution.\n\n A Tensor's shape (that is, the rank of the Tensor and the size of\n each dimension) may not always be fully known. In `tf.function`\n definitions, the shape may only be partially known.\n\n Most operations produce tensors of fully-known shapes if the shapes of their\n inputs are also fully known, but in some cases it's only possible to find the\n shape of a tensor at execution time.\n\n A number of specialized tensors are available: see `tf.Variable`,\n `tf.constant`, `tf.placeholder`, `tf.sparse.SparseTensor`, and\n `tf.RaggedTensor`.\n\n For more on Tensors, see the [guide](https://tensorflow.org/guide/tensor).\n\n \"\"\"\n\n # List of Python operators that we allow to override.\n OVERLOADABLE_OPERATORS = {\n # Binary.\n \"__add__\",\n \"__radd__\",\n \"__sub__\",\n \"__rsub__\",\n \"__mul__\",\n \"__rmul__\",\n \"__div__\",\n \"__rdiv__\",\n \"__truediv__\",\n \"__rtruediv__\",\n \"__floordiv__\",\n \"__rfloordiv__\",\n \"__mod__\",\n \"__rmod__\",\n \"__lt__\",\n \"__le__\",\n \"__gt__\",\n \"__ge__\",\n \"__ne__\",\n \"__eq__\",\n \"__and__\",\n \"__rand__\",\n \"__or__\",\n \"__ror__\",\n \"__xor__\",\n \"__rxor__\",\n \"__getitem__\",\n \"__pow__\",\n \"__rpow__\",\n # Unary.\n \"__invert__\",\n \"__neg__\",\n \"__abs__\",\n \"__matmul__\",\n \"__rmatmul__\"\n }\n\n # Whether to allow hashing or numpy-style equality\n _USE_EQUALITY = tf2.enabled()\n\n def __init__(self, op, value_index, dtype):\n \"\"\"Creates a new `Tensor`.\n\n Args:\n op: An `Operation`. `Operation` that computes this tensor.\n value_index: An `int`. Index of the operation's endpoint that produces\n this tensor.\n dtype: A `DType`. Type of elements stored in this tensor.\n\n Raises:\n TypeError: If the op is not an `Operation`.\n \"\"\"\n if not isinstance(op, Operation):\n raise TypeError(\"op needs to be an Operation: %s\" % (op,))\n self._op = op\n self._value_index = value_index\n self._dtype = dtypes.as_dtype(dtype)\n # This will be set by self._as_tf_output().\n self._tf_output = None\n # This will be set by self.shape().\n self._shape_val = None\n # List of operations that use this Tensor as input. We maintain this list\n # to easily navigate a computation graph.\n self._consumers = []\n self._id = uid()\n self._name = None\n\n def __getattr__(self, name):\n if name in {\"T\", \"astype\", \"ravel\", \"transpose\", \"reshape\", \"clip\", \"size\",\n \"tolist\", \"data\"}:\n # TODO(wangpeng): Export the enable_numpy_behavior knob\n raise AttributeError(\"\"\"\n '{}' object has no attribute '{}'.\n If you are looking for numpy-related methods, please run the following:\n from tensorflow.python.ops.numpy_ops import np_config\n np_config.enable_numpy_behavior()\"\"\".format(type(self).__name__, name))\n self.__getattribute__(name)\n\n @staticmethod\n def _create_with_tf_output(op, value_index, dtype, tf_output):\n ret = Tensor(op, value_index, dtype)\n ret._tf_output = tf_output\n return ret\n\n @property\n def op(self):\n \"\"\"The `Operation` that produces this tensor as an output.\"\"\"\n return self._op\n\n @property\n def dtype(self):\n \"\"\"The `DType` of elements in this tensor.\"\"\"\n return self._dtype\n\n @property\n def graph(self):\n \"\"\"The `Graph` that contains this tensor.\"\"\"\n return self._op.graph\n\n @property\n def name(self):\n \"\"\"The string name of this tensor.\"\"\"\n if self._name is None:\n if not self._op.name:\n raise ValueError(\"Operation was not named: %s\" % self._op)\n self._name = \"%s:%d\" % (self._op.name, self._value_index)\n return self._name\n\n @property\n def device(self):\n \"\"\"The name of the device on which this tensor will be produced, or None.\"\"\"\n return self._op.device\n\n @property\n def shape(self):\n \"\"\"Returns a `tf.TensorShape` that represents the shape of this tensor.\n\n >>> t = tf.constant([1,2,3,4,5])\n >>> t.shape\n TensorShape([5])\n\n `tf.Tensor.shape` is equivalent to `tf.Tensor.get_shape()`.\n\n In a `tf.function` or when building a model using\n `tf.keras.Input`, they return the build-time shape of the\n tensor, which may be partially unknown.\n\n A `tf.TensorShape` is not a tensor. Use `tf.shape(t)` to get a tensor\n containing the shape, calculated at runtime.\n\n See `tf.Tensor.get_shape()`, and `tf.TensorShape` for details and examples.\n \"\"\"\n if self._shape_val is None:\n self._shape_val = self._c_api_shape()\n return self._shape_val\n\n def _c_api_shape(self):\n \"\"\"Returns the TensorShape of this tensor according to the C API.\"\"\"\n c_graph = self._op._graph._c_graph # pylint: disable=protected-access\n shape_vec, unknown_shape = pywrap_tf_session.TF_GraphGetTensorShapeHelper(\n c_graph, self._as_tf_output())\n if unknown_shape:\n return tensor_shape.unknown_shape()\n else:\n shape_vec = [None if d == -1 else d for d in shape_vec]\n return tensor_shape.TensorShape(shape_vec)\n\n @property\n def _shape(self):\n logging.warning(\"Tensor._shape is private, use Tensor.shape \"\n \"instead. Tensor._shape will eventually be removed.\")\n return self.shape\n\n @_shape.setter\n def _shape(self, value):\n raise ValueError(\n \"Tensor._shape cannot be assigned, use Tensor.set_shape instead.\")\n\n def _disallow_when_autograph_disabled(self, task):\n raise errors.OperatorNotAllowedInGraphError(\n \"{} is not allowed: AutoGraph is disabled in this function.\"\n \" Try decorating it directly with @tf.function.\".format(task))\n\n def _disallow_when_autograph_enabled(self, task):\n raise errors.OperatorNotAllowedInGraphError(\n \"{} is not allowed: AutoGraph did convert this function. This might\"\n \" indicate you are trying to use an unsupported feature.\".format(task))\n\n def _disallow_in_graph_mode(self, task):\n raise errors.OperatorNotAllowedInGraphError(\n \"{} is not allowed in Graph execution. Use Eager execution or decorate\"\n \" this function with @tf.function.\".format(task))\n\n def _disallow_bool_casting(self):\n if ag_ctx.control_status_ctx().status == ag_ctx.Status.DISABLED:\n self._disallow_when_autograph_disabled(\n \"using a `tf.Tensor` as a Python `bool`\")\n elif ag_ctx.control_status_ctx().status == ag_ctx.Status.ENABLED:\n self._disallow_when_autograph_enabled(\n \"using a `tf.Tensor` as a Python `bool`\")\n else:\n # Default: V1-style Graph execution.\n self._disallow_in_graph_mode(\"using a `tf.Tensor` as a Python `bool`\")\n\n def _disallow_iteration(self):\n if ag_ctx.control_status_ctx().status == ag_ctx.Status.DISABLED:\n self._disallow_when_autograph_disabled(\"iterating over `tf.Tensor`\")\n elif ag_ctx.control_status_ctx().status == ag_ctx.Status.ENABLED:\n self._disallow_when_autograph_enabled(\"iterating over `tf.Tensor`\")\n else:\n # Default: V1-style Graph execution.\n self._disallow_in_graph_mode(\"iterating over `tf.Tensor`\")\n\n def __iter__(self):\n if not context.executing_eagerly():\n self._disallow_iteration()\n\n shape = self._shape_tuple()\n if shape is None:\n raise TypeError(\"Cannot iterate over a tensor with unknown shape.\")\n if not shape:\n raise TypeError(\"Cannot iterate over a scalar tensor.\")\n if shape[0] is None:\n raise TypeError(\n \"Cannot iterate over a tensor with unknown first dimension.\")\n return _TensorIterator(self, shape[0])\n\n def _shape_as_list(self):\n if self.shape.ndims is not None:\n return [dim.value for dim in self.shape.dims]\n else:\n return None\n\n def _shape_tuple(self):\n shape = self._shape_as_list()\n if shape is None:\n return None\n return tuple(shape)\n\n def _rank(self):\n \"\"\"Integer rank of this Tensor, if known, else None.\n\n Returns:\n Integer rank or None\n \"\"\"\n return self.shape.ndims\n\n def get_shape(self):\n \"\"\"Returns a `tf.TensorShape` that represents the shape of this tensor.\n\n In eager execution the shape is always fully-known.\n\n >>> a = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])\n >>> print(a.shape)\n (2, 3)\n\n `tf.Tensor.get_shape()` is equivalent to `tf.Tensor.shape`.\n\n\n When executing in a `tf.function` or building a model using\n `tf.keras.Input`, `Tensor.shape` may return a partial shape (including\n `None` for unknown dimensions). See `tf.TensorShape` for more details.\n\n >>> inputs = tf.keras.Input(shape = [10])\n >>> # Unknown batch size\n >>> print(inputs.shape)\n (None, 10)\n\n The shape is computed using shape inference functions that are\n registered for each `tf.Operation`.\n\n The returned `tf.TensorShape` is determined at *build* time, without\n executing the underlying kernel. It is not a `tf.Tensor`. If you need a\n shape *tensor*, either convert the `tf.TensorShape` to a `tf.constant`, or\n use the `tf.shape(tensor)` function, which returns the tensor's shape at\n *execution* time.\n\n This is useful for debugging and providing early errors. For\n example, when tracing a `tf.function`, no ops are being executed, shapes\n may be unknown (See the [Concrete Functions\n Guide](https://www.tensorflow.org/guide/concrete_function) for details).\n\n >>> @tf.function\n ... def my_matmul(a, b):\n ... result = a@b\n ... # the `print` executes during tracing.\n ... print(\"Result shape: \", result.shape)\n ... return result\n\n The shape inference functions propagate shapes to the extent possible:\n\n >>> f = my_matmul.get_concrete_function(\n ... tf.TensorSpec([None,3]),\n ... tf.TensorSpec([3,5]))\n Result shape: (None, 5)\n\n Tracing may fail if a shape missmatch can be detected:\n\n >>> cf = my_matmul.get_concrete_function(\n ... tf.TensorSpec([None,3]),\n ... tf.TensorSpec([4,5]))\n Traceback (most recent call last):\n ...\n ValueError: Dimensions must be equal, but are 3 and 4 for 'matmul' (op:\n 'MatMul') with input shapes: [?,3], [4,5].\n\n In some cases, the inferred shape may have unknown dimensions. If\n the caller has additional information about the values of these\n dimensions, `tf.ensure_shape` or `Tensor.set_shape()` can be used to augment\n the inferred shape.\n\n >>> @tf.function\n ... def my_fun(a):\n ... a = tf.ensure_shape(a, [5, 5])\n ... # the `print` executes during tracing.\n ... print(\"Result shape: \", a.shape)\n ... return a\n\n >>> cf = my_fun.get_concrete_function(\n ... tf.TensorSpec([None, None]))\n Result shape: (5, 5)\n\n Returns:\n A `tf.TensorShape` representing the shape of this tensor.\n\n \"\"\"\n return self.shape\n\n def set_shape(self, shape):\n \"\"\"Updates the shape of this tensor.\n\n Note: It is recommended to use `tf.ensure_shape` instead of\n `Tensor.set_shape`, because `tf.ensure_shape` provides better checking for\n programming errors and can create guarantees for compiler\n optimization.\n\n With eager execution this operates as a shape assertion.\n Here the shapes match:\n\n >>> t = tf.constant([[1,2,3]])\n >>> t.set_shape([1, 3])\n\n Passing a `None` in the new shape allows any value for that axis:\n\n >>> t.set_shape([1,None])\n\n An error is raised if an incompatible shape is passed.\n\n >>> t.set_shape([1,5])\n Traceback (most recent call last):\n ...\n ValueError: Tensor's shape (1, 3) is not compatible with supplied\n shape [1, 5]\n\n When executing in a `tf.function`, or building a model using\n `tf.keras.Input`, `Tensor.set_shape` will *merge* the given `shape` with\n the current shape of this tensor, and set the tensor's shape to the\n merged value (see `tf.TensorShape.merge_with` for details):\n\n >>> t = tf.keras.Input(shape=[None, None, 3])\n >>> print(t.shape)\n (None, None, None, 3)\n\n Dimensions set to `None` are not updated:\n\n >>> t.set_shape([None, 224, 224, None])\n >>> print(t.shape)\n (None, 224, 224, 3)\n\n The main use case for this is to provide additional shape information\n that cannot be inferred from the graph alone.\n\n For example if you know all the images in a dataset have shape [28,28,3] you\n can set it with `tf.set_shape`:\n\n >>> @tf.function\n ... def load_image(filename):\n ... raw = tf.io.read_file(filename)\n ... image = tf.image.decode_png(raw, channels=3)\n ... # the `print` executes during tracing.\n ... print(\"Initial shape: \", image.shape)\n ... image.set_shape([28, 28, 3])\n ... print(\"Final shape: \", image.shape)\n ... return image\n\n Trace the function, see the [Concrete Functions\n Guide](https://www.tensorflow.org/guide/concrete_function) for details.\n\n >>> cf = load_image.get_concrete_function(\n ... tf.TensorSpec([], dtype=tf.string))\n Initial shape: (None, None, 3)\n Final shape: (28, 28, 3)\n\n Similarly the `tf.io.parse_tensor` function could return a tensor with\n any shape, even the `tf.rank` is unknown. If you know that all your\n serialized tensors will be 2d, set it with `set_shape`:\n\n >>> @tf.function\n ... def my_parse(string_tensor):\n ... result = tf.io.parse_tensor(string_tensor, out_type=tf.float32)\n ... # the `print` executes during tracing.\n ... print(\"Initial shape: \", result.shape)\n ... result.set_shape([None, None])\n ... print(\"Final shape: \", result.shape)\n ... return result\n\n Trace the function\n\n >>> concrete_parse = my_parse.get_concrete_function(\n ... tf.TensorSpec([], dtype=tf.string))\n Initial shape: <unknown>\n Final shape: (None, None)\n\n Make sure it works:\n\n >>> t = tf.ones([5,3], dtype=tf.float32)\n >>> serialized = tf.io.serialize_tensor(t)\n >>> print(serialized.dtype)\n <dtype: 'string'>\n >>> print(serialized.shape)\n ()\n >>> t2 = concrete_parse(serialized)\n >>> print(t2.shape)\n (5, 3)\n\n Caution: `set_shape` ensures that the applied shape is compatible with\n the existing shape, but it does not check at runtime. Setting\n incorrect shapes can result in inconsistencies between the\n statically-known graph and the runtime value of tensors. For runtime\n validation of the shape, use `tf.ensure_shape` instead. It also modifies\n the `shape` of the tensor.\n\n >>> # Serialize a rank-3 tensor\n >>> t = tf.ones([5,5,5], dtype=tf.float32)\n >>> serialized = tf.io.serialize_tensor(t)\n >>> # The function still runs, even though it `set_shape([None,None])`\n >>> t2 = concrete_parse(serialized)\n >>> print(t2.shape)\n (5, 5, 5)\n\n Args:\n shape: A `TensorShape` representing the shape of this tensor, a\n `TensorShapeProto`, a list, a tuple, or None.\n\n Raises:\n ValueError: If `shape` is not compatible with the current shape of\n this tensor.\n \"\"\"\n # Reset cached shape.\n self._shape_val = None\n\n # We want set_shape to be reflected in the C API graph for when we run it.\n if not isinstance(shape, tensor_shape.TensorShape):\n shape = tensor_shape.TensorShape(shape)\n dim_list = []\n if shape.dims is None:\n unknown_shape = True\n else:\n unknown_shape = False\n for dim in shape.dims:\n if dim.value is None:\n dim_list.append(-1)\n else:\n dim_list.append(dim.value)\n try:\n pywrap_tf_session.TF_GraphSetTensorShape_wrapper(\n self._op._graph._c_graph, # pylint: disable=protected-access\n self._as_tf_output(),\n dim_list,\n unknown_shape)\n except errors.InvalidArgumentError as e:\n # Convert to ValueError for backwards compatibility.\n raise ValueError(e.message)\n\n @property\n def value_index(self):\n \"\"\"The index of this tensor in the outputs of its `Operation`.\"\"\"\n return self._value_index\n\n def consumers(self):\n \"\"\"Returns a list of `Operation`s that consume this tensor.\n\n Returns:\n A list of `Operation`s.\n \"\"\"\n consumer_names = pywrap_tf_session.TF_OperationOutputConsumers_wrapper(\n self._as_tf_output())\n # pylint: disable=protected-access\n return [\n self.graph._get_operation_by_name_unsafe(name)\n for name in consumer_names\n ]\n # pylint: enable=protected-access\n\n def _as_node_def_input(self):\n \"\"\"Return a value to use for the NodeDef \"input\" attribute.\n\n The returned string can be used in a NodeDef \"input\" attribute\n to indicate that the NodeDef uses this Tensor as input.\n\n Raises:\n ValueError: if this Tensor's Operation does not have a name.\n\n Returns:\n a string.\n \"\"\"\n if not self._op.name:\n raise ValueError(\"Operation was not named: %s\" % self._op)\n if self._value_index == 0:\n return self._op.name\n else:\n return \"%s:%d\" % (self._op.name, self._value_index)\n\n def _as_tf_output(self):\n # pylint: disable=protected-access\n # NOTE: Beyond preventing unnecessary (re-)allocation, the cached object\n # also guarantees that a dictionary of tf_output objects will retain a\n # deterministic (yet unsorted) order which prevents memory blowup in the\n # cache of executor(s) stored for every session.\n if self._tf_output is None:\n self._tf_output = c_api_util.tf_output(self.op._c_op, self.value_index)\n return self._tf_output\n # pylint: enable=protected-access\n\n def __str__(self):\n return \"Tensor(\\\"%s\\\"%s%s%s)\" % (\n self.name,\n (\", shape=%s\" %\n self.get_shape()) if self.get_shape().ndims is not None else \"\",\n (\", dtype=%s\" % self._dtype.name) if self._dtype else \"\",\n (\", device=%s\" % self.device) if self.device else \"\")\n\n def __repr__(self):\n return \"<tf.Tensor '%s' shape=%s dtype=%s>\" % (self.name, self.get_shape(),\n self._dtype.name)\n\n def __hash__(self):\n g = getattr(self, \"graph\", None)\n if (Tensor._USE_EQUALITY and executing_eagerly_outside_functions() and\n (g is None or g.building_function)):\n raise TypeError(\"Tensor is unhashable. \"\n \"Instead, use tensor.ref() as the key.\")\n else:\n return id(self)\n\n def __copy__(self):\n # TODO(b/77597810): get rid of Tensor copies.\n cls = self.__class__\n result = cls.__new__(cls)\n result.__dict__.update(self.__dict__)\n return result\n\n # NOTE(mrry): This enables the Tensor's overloaded \"right\" binary\n # operators to run when the left operand is an ndarray, because it\n # accords the Tensor class higher priority than an ndarray, or a\n # numpy matrix.\n # TODO(mrry): Convert this to using numpy's __numpy_ufunc__\n # mechanism, which allows more control over how Tensors interact\n # with ndarrays.\n __array_priority__ = 100\n\n def __array__(self, dtype=None):\n del dtype\n raise NotImplementedError(\n \"Cannot convert a symbolic Tensor ({}) to a numpy array.\"\n \" This error may indicate that you're trying to pass a Tensor to\"\n \" a NumPy call, which is not supported\".format(self.name))\n\n def __len__(self):\n raise TypeError(\"len is not well defined for symbolic Tensors. ({}) \"\n \"Please call `x.shape` rather than `len(x)` for \"\n \"shape information.\".format(self.name))\n\n # TODO(mdan): This convoluted machinery is hard to maintain. Clean up.\n @staticmethod\n def _override_operator(operator, func):\n _override_helper(Tensor, operator, func)\n\n def __bool__(self):\n \"\"\"Dummy method to prevent a tensor from being used as a Python `bool`.\n\n This overload raises a `TypeError` when the user inadvertently\n treats a `Tensor` as a boolean (most commonly in an `if` or `while`\n statement), in code that was not converted by AutoGraph. For example:\n\n ```python\n if tf.constant(True): # Will raise.\n # ...\n\n if tf.constant(5) < tf.constant(7): # Will raise.\n # ...\n ```\n\n Raises:\n `TypeError`.\n \"\"\"\n self._disallow_bool_casting()\n\n def __nonzero__(self):\n \"\"\"Dummy method to prevent a tensor from being used as a Python `bool`.\n\n This is the Python 2.x counterpart to `__bool__()` above.\n\n Raises:\n `TypeError`.\n \"\"\"\n self._disallow_bool_casting()\n\n def eval(self, feed_dict=None, session=None):\n \"\"\"Evaluates this tensor in a `Session`.\n\n Note: If you are not using `compat.v1` libraries, you should not need this,\n (or `feed_dict` or `Session`). In eager execution (or within `tf.function`)\n you do not need to call `eval`.\n\n Calling this method will execute all preceding operations that\n produce the inputs needed for the operation that produces this\n tensor.\n\n *N.B.* Before invoking `Tensor.eval()`, its graph must have been\n launched in a session, and either a default session must be\n available, or `session` must be specified explicitly.\n\n Args:\n feed_dict: A dictionary that maps `Tensor` objects to feed values. See\n `tf.Session.run` for a description of the valid feed values.\n session: (Optional.) The `Session` to be used to evaluate this tensor. If\n none, the default session will be used.\n\n Returns:\n A numpy array corresponding to the value of this tensor.\n \"\"\"\n return _eval_using_default_session(self, feed_dict, self.graph, session)\n\n @deprecation.deprecated(None, \"Use ref() instead.\")\n def experimental_ref(self):\n return self.ref()\n\n def ref(self):\n # tf.Variable also has the same ref() API. If you update the\n # documentation here, please update tf.Variable.ref() as well.\n \"\"\"Returns a hashable reference object to this Tensor.\n\n The primary use case for this API is to put tensors in a set/dictionary.\n We can't put tensors in a set/dictionary as `tensor.__hash__()` is no longer\n available starting Tensorflow 2.0.\n\n The following will raise an exception starting 2.0\n\n >>> x = tf.constant(5)\n >>> y = tf.constant(10)\n >>> z = tf.constant(10)\n >>> tensor_set = {x, y, z}\n Traceback (most recent call last):\n ...\n TypeError: Tensor is unhashable. Instead, use tensor.ref() as the key.\n >>> tensor_dict = {x: 'five', y: 'ten'}\n Traceback (most recent call last):\n ...\n TypeError: Tensor is unhashable. Instead, use tensor.ref() as the key.\n\n Instead, we can use `tensor.ref()`.\n\n >>> tensor_set = {x.ref(), y.ref(), z.ref()}\n >>> x.ref() in tensor_set\n True\n >>> tensor_dict = {x.ref(): 'five', y.ref(): 'ten', z.ref(): 'ten'}\n >>> tensor_dict[y.ref()]\n 'ten'\n\n Also, the reference object provides `.deref()` function that returns the\n original Tensor.\n\n >>> x = tf.constant(5)\n >>> x.ref().deref()\n <tf.Tensor: shape=(), dtype=int32, numpy=5>\n \"\"\"\n return object_identity.Reference(self)\n\n\n# TODO(agarwal): consider getting rid of this.\n# TODO(mdan): This object should not subclass ops.Tensor.\nclass _EagerTensorBase(Tensor):\n \"\"\"Base class for EagerTensor.\"\"\"\n\n # __complex__, __int__, __float__ and __index__ may copy the tensor to CPU and\n # only work for scalars; values are cast as per numpy.\n def __complex__(self):\n return complex(self._numpy())\n\n def __int__(self):\n return int(self._numpy())\n\n def __long__(self):\n return long(self._numpy())\n\n def __float__(self):\n return float(self._numpy())\n\n def __index__(self):\n return self._numpy().__index__()\n\n def __bool__(self):\n return bool(self._numpy())\n\n __nonzero__ = __bool__\n\n def __format__(self, format_spec):\n if self._prefer_custom_summarizer():\n return self._summarize_value().__format__(format_spec)\n elif self.dtype.is_numpy_compatible:\n # Not numpy_text here, otherwise the __format__ behaves differently.\n return self._numpy().__format__(format_spec)\n else:\n return \"<unprintable>\".__format__(format_spec)\n\n def __reduce__(self):\n return convert_to_tensor, (self._numpy(),)\n\n def __copy__(self):\n # Eager Tensors are immutable so it's safe to return themselves as a copy.\n return self\n\n def __deepcopy__(self, memo):\n # Eager Tensors are immutable so it's safe to return themselves as a copy.\n del memo\n return self\n\n def __str__(self):\n if self._prefer_custom_summarizer():\n value_text = self._summarize_value()\n else:\n value_text = numpy_text(self)\n return \"tf.Tensor(%s, shape=%s, dtype=%s)\" % (value_text, self.shape,\n self.dtype.name)\n\n def __repr__(self):\n if self._prefer_custom_summarizer():\n value_text = \"value=\" + self._summarize_value()\n else:\n value_text = \"numpy=\" + numpy_text(self, is_repr=True)\n return \"<tf.Tensor: shape=%s, dtype=%s, %s>\" % (self.shape, self.dtype.name,\n value_text)\n\n def __len__(self):\n \"\"\"Returns the length of the first dimension in the Tensor.\"\"\"\n if not self.shape.ndims:\n raise TypeError(\"Scalar tensor has no `len()`\")\n # pylint: disable=protected-access\n try:\n return self._shape_tuple()[0]\n except core._NotOkStatusException as e:\n raise core._status_to_exception(e.code, e.message) from None\n\n def __array__(self, dtype=None):\n a = self._numpy()\n if not dtype:\n return a\n\n return np.array(a, dtype=dtype)\n\n def _numpy_internal(self):\n raise NotImplementedError()\n\n def _numpy(self):\n try:\n return self._numpy_internal()\n except core._NotOkStatusException as e: # pylint: disable=protected-access\n raise core._status_to_exception(e.code, e.message) from None # pylint: disable=protected-access\n\n @property\n def dtype(self):\n # Note: using the intern table directly here as this is\n # performance-sensitive in some models.\n return dtypes._INTERN_TABLE[self._datatype_enum()] # pylint: disable=protected-access\n\n def numpy(self):\n \"\"\"Copy of the contents of this Tensor into a NumPy array or scalar.\n\n Unlike NumPy arrays, Tensors are immutable, so this method has to copy\n the contents to ensure safety. Use `memoryview` to get a readonly\n view of the contents without doing a copy:\n\n >>> t = tf.constant([42])\n >>> np.array(memoryview(t))\n array([42], dtype=int32)\n\n Note that `memoryview` is only zero-copy for Tensors on CPU. If a Tensor\n is on GPU, it will have to be transferred to CPU first in order for\n `memoryview` to work.\n\n Returns:\n A NumPy array of the same shape and dtype or a NumPy scalar, if this\n Tensor has rank 0.\n\n Raises:\n ValueError: If the dtype of this Tensor does not have a compatible\n NumPy dtype.\n \"\"\"\n # TODO(slebedev): Consider avoiding a copy for non-CPU or remote tensors.\n maybe_arr = self._numpy() # pylint: disable=protected-access\n return maybe_arr.copy() if isinstance(maybe_arr, np.ndarray) else maybe_arr\n\n @property\n def backing_device(self):\n \"\"\"Returns the name of the device holding this tensor's memory.\n\n `.backing_device` is usually the same as `.device`, which returns\n the device on which the kernel of the operation that produced this tensor\n ran. However, some operations can produce tensors on a different device\n (e.g., an operation that executes on the GPU but produces output tensors\n in host memory).\n \"\"\"\n raise NotImplementedError()\n\n def _datatype_enum(self):\n raise NotImplementedError()\n\n def _shape_tuple(self):\n \"\"\"The shape of this Tensor, as a tuple.\n\n This is more performant than tuple(shape().as_list()) as it avoids\n two list and one object creation. Marked private for now as from an API\n perspective, it would be better to have a single performant way of\n getting a shape rather than exposing shape() and shape_tuple()\n (and heaven forbid, shape_list() etc. as well!). Punting on that for now,\n but ideally one would work things out and remove the need for this method.\n\n Returns:\n tuple with the shape.\n \"\"\"\n raise NotImplementedError()\n\n def _rank(self):\n \"\"\"Integer rank of this Tensor.\n\n Unlike regular Tensors, the rank is always known for EagerTensors.\n\n This is more performant than len(self._shape_tuple())\n\n Returns:\n Integer rank\n \"\"\"\n raise NotImplementedError()\n\n def _num_elements(self):\n \"\"\"Number of elements of this Tensor.\n\n Unlike regular Tensors, the number of elements is always known for\n EagerTensors.\n\n This is more performant than tensor.shape.num_elements\n\n Returns:\n Long - num elements in the tensor\n \"\"\"\n raise NotImplementedError()\n\n def _copy_to_device(self, device_name): # pylint: disable=redefined-outer-name\n raise NotImplementedError()\n\n @staticmethod\n def _override_operator(name, func):\n setattr(_EagerTensorBase, name, func)\n\n def _copy_nograd(self, ctx=None, device_name=None):\n \"\"\"Copies tensor to dest device, but doesn't record the operation.\"\"\"\n # Creates a new tensor on the dest device.\n if ctx is None:\n ctx = context.context()\n if device_name is None:\n device_name = ctx.device_name\n # pylint: disable=protected-access\n try:\n ctx.ensure_initialized()\n new_tensor = self._copy_to_device(device_name)\n except core._NotOkStatusException as e:\n raise core._status_to_exception(e.code, e.message) from None\n return new_tensor\n\n def _copy(self, ctx=None, device_name=None):\n \"\"\"Copies tensor to dest device.\"\"\"\n new_tensor = self._copy_nograd(ctx, device_name)\n # Record the copy on tape and define backprop copy as well.\n if context.executing_eagerly():\n self_device = self.device\n\n def grad_fun(dresult):\n return [\n dresult._copy(device_name=self_device)\n if hasattr(dresult, \"_copy\") else dresult\n ]\n\n tape.record_operation(\"_copy\", [new_tensor], [self], grad_fun)\n return new_tensor\n # pylint: enable=protected-access\n\n @property\n def shape(self):\n if self._tensor_shape is None: # pylint: disable=access-member-before-definition\n # pylint: disable=protected-access\n try:\n # `_tensor_shape` is declared and defined in the definition of\n # `EagerTensor`, in C.\n self._tensor_shape = tensor_shape.TensorShape(self._shape_tuple())\n except core._NotOkStatusException as e:\n raise core._status_to_exception(e.code, e.message) from None\n\n return self._tensor_shape\n\n def get_shape(self):\n \"\"\"Alias of Tensor.shape.\"\"\"\n return self.shape\n\n def _shape_as_list(self):\n \"\"\"The shape of the tensor as a list.\"\"\"\n return list(self._shape_tuple())\n\n @property\n def ndim(self):\n \"\"\"Returns the number of Tensor dimensions.\"\"\"\n return self.shape.ndims\n\n @deprecation.deprecated(None, \"Use tf.identity instead.\")\n def cpu(self):\n \"\"\"A copy of this Tensor with contents backed by host memory.\"\"\"\n return self._copy(context.context(), \"CPU:0\")\n\n @deprecation.deprecated(None, \"Use tf.identity instead.\")\n def gpu(self, gpu_index=0):\n \"\"\"A copy of this Tensor with contents backed by memory on the GPU.\n\n Args:\n gpu_index: Identifies which GPU to place the contents on the returned\n Tensor in.\n\n Returns:\n A GPU-memory backed Tensor object initialized with the same contents\n as this Tensor.\n \"\"\"\n return self._copy(context.context(), \"GPU:\" + str(gpu_index))\n\n def set_shape(self, shape):\n if not self.shape.is_compatible_with(shape):\n raise ValueError(\n \"Tensor's shape %s is not compatible with supplied shape %s\" %\n (self.shape, shape))\n\n # Methods not supported / implemented for Eager Tensors.\n @property\n def op(self):\n raise AttributeError(\n \"Tensor.op is meaningless when eager execution is enabled.\")\n\n @property\n def graph(self):\n raise AttributeError(\n \"Tensor.graph is meaningless when eager execution is enabled.\")\n\n @property\n def name(self):\n raise AttributeError(\n \"Tensor.name is meaningless when eager execution is enabled.\")\n\n @property\n def value_index(self):\n raise AttributeError(\n \"Tensor.value_index is meaningless when eager execution is enabled.\")\n\n def consumers(self):\n raise NotImplementedError(\n \"Tensor.consumers is meaningless when eager execution is enabled.\")\n\n def _add_consumer(self, consumer):\n raise NotImplementedError(\n \"_add_consumer not supported when eager execution is enabled.\")\n\n def _as_node_def_input(self):\n raise NotImplementedError(\n \"_as_node_def_input not supported when eager execution is enabled.\")\n\n def _as_tf_output(self):\n raise NotImplementedError(\n \"_as_tf_output not supported when eager execution is enabled.\")\n\n def eval(self, feed_dict=None, session=None):\n raise NotImplementedError(\n \"eval is not supported when eager execution is enabled, \"\n \"is .numpy() what you're looking for?\")\n\n\n# This call creates an EagerTensor class, as a subclass of _EagerTensorBase, and\n# registers it with the current module.\n# It is exposed as an __internal__ api for now (b/171081052), though we\n# expect it to be eventually covered by tf Tensor types and typing.\nEagerTensor = tf_export(\"__internal__.EagerTensor\", v1=[])(\n pywrap_tfe.TFE_Py_InitEagerTensor(_EagerTensorBase))\n\n\n@tf_export(v1=[\"convert_to_tensor\"])\[email protected]_dispatch_support\ndef convert_to_tensor_v1_with_dispatch(\n value,\n dtype=None,\n name=None,\n preferred_dtype=None,\n dtype_hint=None):\n \"\"\"Converts the given `value` to a `Tensor`.\n\n This function converts Python objects of various types to `Tensor`\n objects. It accepts `Tensor` objects, numpy arrays, Python lists,\n and Python scalars. For example:\n\n ```python\n import numpy as np\n\n def my_func(arg):\n arg = tf.convert_to_tensor(arg, dtype=tf.float32)\n return tf.matmul(arg, arg) + arg\n\n # The following calls are equivalent.\n value_1 = my_func(tf.constant([[1.0, 2.0], [3.0, 4.0]]))\n value_2 = my_func([[1.0, 2.0], [3.0, 4.0]])\n value_3 = my_func(np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32))\n ```\n\n This function can be useful when composing a new operation in Python\n (such as `my_func` in the example above). All standard Python op\n constructors apply this function to each of their Tensor-valued\n inputs, which allows those ops to accept numpy arrays, Python lists,\n and scalars in addition to `Tensor` objects.\n\n Note: This function diverges from default Numpy behavior for `float` and\n `string` types when `None` is present in a Python list or scalar. Rather\n than silently converting `None` values, an error will be thrown.\n\n Args:\n value: An object whose type has a registered `Tensor` conversion function.\n dtype: Optional element type for the returned tensor. If missing, the type\n is inferred from the type of `value`.\n name: Optional name to use if a new `Tensor` is created.\n preferred_dtype: Optional element type for the returned tensor, used when\n dtype is None. In some cases, a caller may not have a dtype in mind when\n converting to a tensor, so preferred_dtype can be used as a soft\n preference. If the conversion to `preferred_dtype` is not possible, this\n argument has no effect.\n dtype_hint: same meaning as preferred_dtype, and overrides it.\n\n Returns:\n A `Tensor` based on `value`.\n\n Raises:\n TypeError: If no conversion function is registered for `value` to `dtype`.\n RuntimeError: If a registered conversion function returns an invalid value.\n ValueError: If the `value` is a tensor not of given `dtype` in graph mode.\n \"\"\"\n return convert_to_tensor_v1(value, dtype=dtype, name=name,\n preferred_dtype=preferred_dtype,\n dtype_hint=dtype_hint)\n\n\ndef convert_to_tensor_v1(value,\n dtype=None,\n name=None,\n preferred_dtype=None,\n dtype_hint=None):\n \"\"\"Converts the given `value` to a `Tensor` (with the TF1 API).\"\"\"\n preferred_dtype = deprecation.deprecated_argument_lookup(\n \"dtype_hint\", dtype_hint, \"preferred_dtype\", preferred_dtype)\n return convert_to_tensor_v2(value, dtype, preferred_dtype, name)\n\n\n@tf_export(\"convert_to_tensor\", v1=[])\[email protected]_dispatch_support\ndef convert_to_tensor_v2_with_dispatch(\n value, dtype=None, dtype_hint=None, name=None):\n \"\"\"Converts the given `value` to a `Tensor`.\n\n This function converts Python objects of various types to `Tensor`\n objects. It accepts `Tensor` objects, numpy arrays, Python lists,\n and Python scalars.\n\n For example:\n\n >>> import numpy as np\n >>> def my_func(arg):\n ... arg = tf.convert_to_tensor(arg, dtype=tf.float32)\n ... return arg\n\n >>> # The following calls are equivalent.\n ...\n >>> value_1 = my_func(tf.constant([[1.0, 2.0], [3.0, 4.0]]))\n >>> print(value_1)\n tf.Tensor(\n [[1. 2.]\n [3. 4.]], shape=(2, 2), dtype=float32)\n >>> value_2 = my_func([[1.0, 2.0], [3.0, 4.0]])\n >>> print(value_2)\n tf.Tensor(\n [[1. 2.]\n [3. 4.]], shape=(2, 2), dtype=float32)\n >>> value_3 = my_func(np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32))\n >>> print(value_3)\n tf.Tensor(\n [[1. 2.]\n [3. 4.]], shape=(2, 2), dtype=float32)\n\n This function can be useful when composing a new operation in Python\n (such as `my_func` in the example above). All standard Python op\n constructors apply this function to each of their Tensor-valued\n inputs, which allows those ops to accept numpy arrays, Python lists,\n and scalars in addition to `Tensor` objects.\n\n Note: This function diverges from default Numpy behavior for `float` and\n `string` types when `None` is present in a Python list or scalar. Rather\n than silently converting `None` values, an error will be thrown.\n\n Args:\n value: An object whose type has a registered `Tensor` conversion function.\n dtype: Optional element type for the returned tensor. If missing, the type\n is inferred from the type of `value`.\n dtype_hint: Optional element type for the returned tensor, used when dtype\n is None. In some cases, a caller may not have a dtype in mind when\n converting to a tensor, so dtype_hint can be used as a soft preference.\n If the conversion to `dtype_hint` is not possible, this argument has no\n effect.\n name: Optional name to use if a new `Tensor` is created.\n\n Returns:\n A `Tensor` based on `value`.\n\n Raises:\n TypeError: If no conversion function is registered for `value` to `dtype`.\n RuntimeError: If a registered conversion function returns an invalid value.\n ValueError: If the `value` is a tensor not of given `dtype` in graph mode.\n \"\"\"\n return convert_to_tensor_v2(\n value, dtype=dtype, dtype_hint=dtype_hint, name=name)\n\n\ndef convert_to_tensor_v2(value, dtype=None, dtype_hint=None, name=None):\n \"\"\"Converts the given `value` to a `Tensor`.\"\"\"\n return convert_to_tensor(\n value=value,\n dtype=dtype,\n name=name,\n preferred_dtype=dtype_hint,\n as_ref=False)\n\n\ndef _error_prefix(name):\n return \"\" if name is None else \"%s: \" % name\n\n\ndef pack_eager_tensors(tensors, ctx=None):\n \"\"\"Pack multiple `EagerTensor`s of the same dtype and shape.\n\n Args:\n tensors: a list of EagerTensors to pack.\n ctx: context.context().\n\n Returns:\n A packed EagerTensor.\n \"\"\"\n if not isinstance(tensors, list):\n raise TypeError(\"tensors must be a list or a tuple: %s\" % tensors)\n\n if not tensors:\n raise ValueError(\"Empty tensors is unexpected for packing.\")\n\n dtype = tensors[0].dtype\n shape = tensors[0].shape\n handle_data = tensors[0]._handle_data # pylint: disable=protected-access\n is_resource = dtype == dtypes.resource\n for i in range(len(tensors)):\n t = tensors[i]\n if not isinstance(t, EagerTensor):\n raise TypeError(\"tensors must be a list of EagerTensors: %s\" % t)\n\n if t.dtype != dtype:\n raise ValueError(\n \"All tensors being packed should have the same dtype %s, \"\n \"but the %d-th tensor is of dtype %s\" % (dtype, i, t.dtype))\n if t.shape != shape:\n raise ValueError(\n \"All tensors being packed should have the same shape %s, \"\n \"but the %d-th tensor is of shape %s\" % (shape, i, t.shape))\n # pylint: disable=protected-access\n if is_resource and t._handle_data != handle_data:\n raise ValueError(\n \"All tensors being packed should have the same handle data %s, \"\n \"but the %d-th tensor is of handle data %s\" %\n (handle_data, i, t._handle_data))\n # pylint: enable=protected-access\n\n if ctx is None:\n ctx = context.context()\n\n # Propogate handle data for resource variables\n packed_tensor = ctx.pack_eager_tensors(tensors)\n if handle_data is not None:\n packed_tensor._handle_data = handle_data # pylint: disable=protected-access\n\n def grad_fun(_):\n raise ValueError(\n \"Gradients through pack_eager_tensors are not supported yet.\")\n\n tape.record_operation(\"pack_eager_tensors\", [packed_tensor], tensors,\n grad_fun)\n\n return packed_tensor\n\n\[email protected]_wrapper(\"convert_to_tensor\")\ndef convert_to_tensor(value,\n dtype=None,\n name=None,\n as_ref=False,\n preferred_dtype=None,\n dtype_hint=None,\n ctx=None,\n accepted_result_types=(Tensor,)):\n \"\"\"Implementation of the public convert_to_tensor.\"\"\"\n # TODO(b/142518781): Fix all call-sites and remove redundant arg\n preferred_dtype = preferred_dtype or dtype_hint\n if isinstance(value, EagerTensor):\n if ctx is None:\n ctx = context.context()\n if not ctx.executing_eagerly():\n graph = get_default_graph()\n if not graph.building_function:\n raise RuntimeError(\"Attempting to capture an EagerTensor without \"\n \"building a function.\")\n return graph.capture(value, name=name)\n\n if dtype is not None:\n dtype = dtypes.as_dtype(dtype)\n if isinstance(value, Tensor):\n if dtype is not None and not dtype.is_compatible_with(value.dtype):\n raise ValueError(\n \"Tensor conversion requested dtype %s for Tensor with dtype %s: %r\" %\n (dtype.name, value.dtype.name, value))\n return value\n\n if preferred_dtype is not None:\n preferred_dtype = dtypes.as_dtype(preferred_dtype)\n\n # See below for the reason why it's `type(value)` and not just `value`.\n # https://docs.python.org/3.8/reference/datamodel.html#special-lookup\n overload = getattr(type(value), \"__tf_tensor__\", None)\n if overload is not None:\n return overload(value, dtype, name) # pylint: disable=not-callable\n\n for base_type, conversion_func in tensor_conversion_registry.get(type(value)):\n # If dtype is None but preferred_dtype is not None, we try to\n # cast to preferred_dtype first.\n ret = None\n if dtype is None and preferred_dtype is not None:\n try:\n ret = conversion_func(\n value, dtype=preferred_dtype, name=name, as_ref=as_ref)\n except (TypeError, ValueError):\n # Could not coerce the conversion to use the preferred dtype.\n pass\n else:\n if (ret is not NotImplemented and\n ret.dtype.base_dtype != preferred_dtype.base_dtype):\n raise TypeError(\"convert_to_tensor did not convert to \"\n \"the preferred dtype: %s vs %s \" %\n (ret.dtype.base_dtype, preferred_dtype.base_dtype))\n\n if ret is None:\n ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)\n\n if ret is NotImplemented:\n continue\n\n if not isinstance(ret, accepted_result_types):\n raise RuntimeError(\n \"%sConversion function %r for type %s returned non-Tensor: %r\" %\n (_error_prefix(name), conversion_func, base_type, ret))\n if dtype and not dtype.is_compatible_with(ret.dtype):\n raise RuntimeError(\n \"%sConversion function %r for type %s returned incompatible \"\n \"dtype: requested = %s, actual = %s\" %\n (_error_prefix(name), conversion_func, base_type, dtype.name,\n ret.dtype.name))\n return ret\n raise TypeError(\"%sCannot convert %r with type %s to Tensor: \"\n \"no conversion function registered.\" %\n (_error_prefix(name), value, type(value)))\n\n\ninternal_convert_to_tensor = convert_to_tensor\n\n\ndef internal_convert_n_to_tensor(values,\n dtype=None,\n name=None,\n as_ref=False,\n preferred_dtype=None,\n ctx=None):\n \"\"\"Converts `values` to a list of `Tensor` objects.\n\n Args:\n values: A list of objects that can be consumed by `tf.convert_to_tensor()`.\n dtype: (Optional.) The required `DType` of the returned `Tensor` objects.\n name: (Optional.) A name prefix to used when a new `Tensor` is created, in\n which case element `i` will be given the name `name + '_' + i`.\n as_ref: True if the caller wants the results as ref tensors.\n preferred_dtype: Optional element type for the returned tensors, used when\n dtype is None. In some cases, a caller may not have a dtype in mind when\n converting to a tensor, so preferred_dtype can be used as a soft\n preference. If the conversion to `preferred_dtype` is not possible, this\n argument has no effect.\n ctx: The value of context.context().\n\n Returns:\n A list of `Tensor` and/or `IndexedSlices` objects.\n\n Raises:\n TypeError: If no conversion function is registered for an element in\n `values`.\n RuntimeError: If a registered conversion function returns an invalid\n value.\n \"\"\"\n if not isinstance(values, collections_abc.Sequence):\n raise TypeError(\"values must be a sequence.\")\n ret = []\n if ctx is None:\n ctx = context.context()\n for i, value in enumerate(values):\n n = None if name is None else \"%s_%d\" % (name, i)\n ret.append(\n convert_to_tensor(\n value,\n dtype=dtype,\n name=n,\n as_ref=as_ref,\n preferred_dtype=preferred_dtype,\n ctx=ctx))\n return ret\n\n\ndef convert_n_to_tensor(values, dtype=None, name=None, preferred_dtype=None):\n \"\"\"Converts `values` to a list of `Tensor` objects.\n\n Args:\n values: A list of objects that can be consumed by `tf.convert_to_tensor()`.\n dtype: (Optional.) The required `DType` of the returned `Tensor` objects.\n name: (Optional.) A name prefix to used when a new `Tensor` is created, in\n which case element `i` will be given the name `name + '_' + i`.\n preferred_dtype: Optional element type for the returned tensors, used when\n dtype is None. In some cases, a caller may not have a dtype in mind when\n converting to a tensor, so preferred_dtype can be used as a soft\n preference. If the conversion to `preferred_dtype` is not possible, this\n argument has no effect.\n\n Returns:\n A list of `Tensor` and/or `IndexedSlices` objects.\n\n Raises:\n TypeError: If no conversion function is registered for an element in\n `values`.\n RuntimeError: If a registered conversion function returns an invalid\n value.\n \"\"\"\n return internal_convert_n_to_tensor(\n values=values,\n dtype=dtype,\n name=name,\n preferred_dtype=preferred_dtype,\n as_ref=False)\n\n\ndef convert_to_tensor_or_composite(value, dtype=None, name=None):\n \"\"\"Converts the given object to a `Tensor` or `CompositeTensor`.\n\n If `value` is a `CompositeTensor` it is returned unmodified. Otherwise, it\n is converted to a `Tensor` using `convert_to_tensor()`.\n\n Args:\n value: A `CompositeTensor` or an object that can be consumed by\n `convert_to_tensor()`.\n dtype: (Optional.) The required `DType` of the returned `Tensor` or\n `CompositeTensor`.\n name: (Optional.) A name to use if a new `Tensor` is created.\n\n Returns:\n A `Tensor` or `CompositeTensor`, based on `value`.\n\n Raises:\n ValueError: If `dtype` does not match the element type of `value`.\n \"\"\"\n return internal_convert_to_tensor_or_composite(\n value=value, dtype=dtype, name=name, as_ref=False)\n\n\ndef internal_convert_to_tensor_or_composite(value,\n dtype=None,\n name=None,\n as_ref=False):\n \"\"\"Converts the given object to a `Tensor` or `CompositeTensor`.\n\n If `value` is a `CompositeTensor` it is returned unmodified. Otherwise, it\n is converted to a `Tensor` using `convert_to_tensor()`.\n\n Args:\n value: A `CompositeTensor`, or an object that can be consumed by\n `convert_to_tensor()`.\n dtype: (Optional.) The required `DType` of the returned `Tensor` or\n `CompositeTensor`.\n name: (Optional.) A name to use if a new `Tensor` is created.\n as_ref: True if the caller wants the results as ref tensors.\n\n Returns:\n A `Tensor` or `CompositeTensor`, based on `value`.\n\n Raises:\n ValueError: If `dtype` does not match the element type of `value`.\n \"\"\"\n if isinstance(value, composite_tensor.CompositeTensor):\n value_dtype = getattr(value, \"dtype\", None)\n if dtype and not dtypes.as_dtype(dtype).is_compatible_with(value_dtype):\n raise ValueError(\n \"Tensor conversion requested dtype %s for Tensor with dtype %s: %r\" %\n (dtypes.as_dtype(dtype).name, value.dtype.name, str(value)))\n return value\n else:\n return convert_to_tensor(\n value,\n dtype=dtype,\n name=name,\n as_ref=as_ref,\n accepted_result_types=(Tensor, composite_tensor.CompositeTensor))\n\n\ndef internal_convert_n_to_tensor_or_composite(values,\n dtype=None,\n name=None,\n as_ref=False):\n \"\"\"Converts `values` to a list of `Tensor` or `CompositeTensor` objects.\n\n Any `CompositeTensor` objects in `values` are returned unmodified.\n\n Args:\n values: A list of `None`, `CompositeTensor`, or objects that can be consumed\n by `convert_to_tensor()`.\n dtype: (Optional.) The required `DType` of the returned `Tensor`s or\n `CompositeTensor`s.\n name: (Optional.) A name prefix to used when a new `Tensor` is created, in\n which case element `i` will be given the name `name + '_' + i`.\n as_ref: True if the caller wants the results as ref tensors.\n\n Returns:\n A list of `Tensor`, `CompositeTensor`, and/or `None` objects.\n\n Raises:\n TypeError: If no conversion function is registered for an element in\n `values`.\n RuntimeError: If a registered conversion function returns an invalid\n value.\n \"\"\"\n if not isinstance(values, collections_abc.Sequence):\n raise TypeError(\"values must be a sequence.\")\n ret = []\n for i, value in enumerate(values):\n if value is None:\n ret.append(value)\n else:\n n = None if name is None else \"%s_%d\" % (name, i)\n ret.append(\n internal_convert_to_tensor_or_composite(\n value, dtype=dtype, name=n, as_ref=as_ref))\n return ret\n\n\ndef convert_n_to_tensor_or_composite(values, dtype=None, name=None):\n \"\"\"Converts `values` to a list of `Output` or `CompositeTensor` objects.\n\n Any `CompositeTensor` objects in `values` are returned unmodified.\n\n Args:\n values: A list of `None`, `CompositeTensor``, or objects that can be\n consumed by `convert_to_tensor()`.\n dtype: (Optional.) The required `DType` of the returned `Tensor`s or\n `CompositeTensor`s.\n name: (Optional.) A name prefix to used when a new `Tensor` is created, in\n which case element `i` will be given the name `name + '_' + i`.\n\n Returns:\n A list of `Tensor` and/or `CompositeTensor` objects.\n\n Raises:\n TypeError: If no conversion function is registered for an element in\n `values`.\n RuntimeError: If a registered conversion function returns an invalid\n value.\n \"\"\"\n return internal_convert_n_to_tensor_or_composite(\n values=values, dtype=dtype, name=name, as_ref=False)\n\n\ndef _device_string(dev_spec):\n if pydev.is_device_spec(dev_spec):\n return dev_spec.to_string()\n else:\n return dev_spec\n\n\ndef _NodeDef(op_type, name, attrs=None):\n \"\"\"Create a NodeDef proto.\n\n Args:\n op_type: Value for the \"op\" attribute of the NodeDef proto.\n name: Value for the \"name\" attribute of the NodeDef proto.\n attrs: Dictionary where the key is the attribute name (a string)\n and the value is the respective \"attr\" attribute of the NodeDef proto (an\n AttrValue).\n\n Returns:\n A node_def_pb2.NodeDef protocol buffer.\n \"\"\"\n node_def = node_def_pb2.NodeDef(op=compat.as_bytes(op_type),\n name=compat.as_bytes(name))\n if attrs:\n for k, v in six.iteritems(attrs):\n node_def.attr[k].CopyFrom(v)\n return node_def\n\n\n# Copied from core/framework/node_def_util.cc\n# TODO(mrry,josh11b): Consolidate this validation in C++ code.\n_VALID_OP_NAME_REGEX = re.compile(r\"^[A-Za-z0-9.][A-Za-z0-9_.\\\\/>-]*$\")\n_VALID_SCOPE_NAME_REGEX = re.compile(r\"^[A-Za-z0-9_.\\\\/>-]*$\")\n\n\n@tf_export(\"__internal__.create_c_op\", v1=[])\n@traceback_utils.filter_traceback\ndef _create_c_op(graph, node_def, inputs, control_inputs, op_def=None):\n \"\"\"Creates a TF_Operation.\n\n Args:\n graph: a `Graph`.\n node_def: `node_def_pb2.NodeDef` for the operation to create.\n inputs: A flattened list of `Tensor`s. This function handles grouping\n tensors into lists as per attributes in the `node_def`.\n control_inputs: A list of `Operation`s to set as control dependencies.\n op_def: Optional. `op_def_pb2.OpDef` for the operation to create. If not\n specified, is looked up from the `graph` using `node_def.op`.\n\n Returns:\n A wrapped TF_Operation*.\n \"\"\"\n if op_def is None:\n op_def = graph._get_op_def(node_def.op) # pylint: disable=protected-access\n # TODO(skyewm): op_def_library.apply_op() flattens the incoming inputs.\n # Refactor so we don't have to do this here.\n inputs = _reconstruct_sequence_inputs(op_def, inputs, node_def.attr)\n # pylint: disable=protected-access\n op_desc = pywrap_tf_session.TF_NewOperation(graph._c_graph,\n compat.as_str(node_def.op),\n compat.as_str(node_def.name))\n if node_def.device:\n pywrap_tf_session.TF_SetDevice(op_desc, compat.as_str(node_def.device))\n # Add inputs\n for op_input in inputs:\n if isinstance(op_input, (list, tuple)):\n pywrap_tf_session.TF_AddInputList(op_desc,\n [t._as_tf_output() for t in op_input])\n else:\n pywrap_tf_session.TF_AddInput(op_desc, op_input._as_tf_output())\n\n # Add control inputs\n for control_input in control_inputs:\n pywrap_tf_session.TF_AddControlInput(op_desc, control_input._c_op)\n # pylint: enable=protected-access\n\n # Add attrs\n for name, attr_value in node_def.attr.items():\n serialized = attr_value.SerializeToString()\n # TODO(skyewm): this creates and deletes a new TF_Status for every attr.\n # It might be worth creating a convenient way to re-use the same status.\n pywrap_tf_session.TF_SetAttrValueProto(op_desc, compat.as_str(name),\n serialized)\n\n try:\n c_op = pywrap_tf_session.TF_FinishOperation(op_desc)\n except errors.InvalidArgumentError as e:\n # Convert to ValueError for backwards compatibility.\n raise ValueError(e.message)\n\n return c_op\n\n\n@tf_export(\"Operation\")\nclass Operation(object):\n \"\"\"Represents a graph node that performs computation on tensors.\n\n An `Operation` is a node in a `tf.Graph` that takes zero or more `Tensor`\n objects as input, and produces zero or more `Tensor` objects as output.\n Objects of type `Operation` are created by calling a Python op constructor\n (such as `tf.matmul`) within a `tf.function` or under a `tf.Graph.as_default`\n context manager.\n\n For example, within a `tf.function`, `c = tf.matmul(a, b)` creates an\n `Operation` of type \"MatMul\" that takes tensors `a` and `b` as input, and\n produces `c` as output.\n\n If a `tf.compat.v1.Session` is used, an `Operation` of a `tf.Graph` can be\n executed by passing it to `tf.Session.run`. `op.run()` is a shortcut for\n calling `tf.compat.v1.get_default_session().run(op)`.\n \"\"\"\n\n def __init__(self,\n node_def,\n g,\n inputs=None,\n output_types=None,\n control_inputs=None,\n input_types=None,\n original_op=None,\n op_def=None):\n r\"\"\"Creates an `Operation`.\n\n NOTE: This constructor validates the name of the `Operation` (passed\n as `node_def.name`). Valid `Operation` names match the following\n regular expression:\n\n [A-Za-z0-9.][A-Za-z0-9_.\\\\-/]*\n\n Args:\n node_def: `node_def_pb2.NodeDef`. `NodeDef` for the `Operation`. Used for\n attributes of `node_def_pb2.NodeDef`, typically `name`, `op`, and\n `device`. The `input` attribute is irrelevant here as it will be\n computed when generating the model.\n g: `Graph`. The parent graph.\n inputs: list of `Tensor` objects. The inputs to this `Operation`.\n output_types: list of `DType` objects. List of the types of the `Tensors`\n computed by this operation. The length of this list indicates the\n number of output endpoints of the `Operation`.\n control_inputs: list of operations or tensors from which to have a control\n dependency.\n input_types: List of `DType` objects representing the types of the tensors\n accepted by the `Operation`. By default uses `[x.dtype.base_dtype for x\n in inputs]`. Operations that expect reference-typed inputs must specify\n these explicitly.\n original_op: Optional. Used to associate the new `Operation` with an\n existing `Operation` (for example, a replica with the op that was\n replicated).\n op_def: Optional. The `op_def_pb2.OpDef` proto that describes the op type\n that this `Operation` represents.\n\n Raises:\n TypeError: if control inputs are not Operations or Tensors,\n or if `node_def` is not a `NodeDef`,\n or if `g` is not a `Graph`,\n or if `inputs` are not tensors,\n or if `inputs` and `input_types` are incompatible.\n ValueError: if the `node_def` name is not valid.\n \"\"\"\n # For internal use only: `node_def` can be set to a TF_Operation to create\n # an Operation for that op. This is useful for creating Operations for ops\n # indirectly created by C API methods, e.g. the ops created by\n # TF_ImportGraphDef. When `node_def` is a TF_Operation, all optional fields\n # should be None.\n\n if isinstance(node_def, node_def_pb2.NodeDef):\n if node_def.ByteSize() >= (1 << 31) or node_def.ByteSize() < 0:\n raise ValueError(\n \"Cannot create a tensor proto whose content is larger than 2GB.\")\n if not _VALID_OP_NAME_REGEX.match(node_def.name):\n raise ValueError(\"'%s' is not a valid node name\" % node_def.name)\n c_op = None\n elif type(node_def).__name__ == \"TF_Operation\":\n assert inputs is None\n assert output_types is None\n assert control_inputs is None\n assert input_types is None\n assert original_op is None\n assert op_def is None\n c_op = node_def\n else:\n raise TypeError(\"node_def needs to be a NodeDef: %s\" % (node_def,))\n\n if not isinstance(g, Graph):\n raise TypeError(\"g needs to be a Graph: %s\" % (g,))\n self._graph = g\n\n if inputs is None:\n inputs = []\n elif not isinstance(inputs, list):\n raise TypeError(\"inputs needs to be a list of Tensors: %s\" % (inputs,))\n for a in inputs:\n if not isinstance(a, Tensor):\n raise TypeError(\"input needs to be a Tensor: %s\" % (a,))\n if input_types is None:\n input_types = [i.dtype.base_dtype for i in inputs]\n else:\n if not all(\n x.is_compatible_with(i.dtype) for i, x in zip(inputs, input_types)):\n raise TypeError(\"In op '%s', input types (%s) are not compatible \"\n \"with expected types (%s)\" %\n (node_def.name, [i.dtype for i in inputs], input_types))\n\n # Build the list of control inputs.\n control_input_ops = []\n if control_inputs:\n for c in control_inputs:\n control_op = None\n if isinstance(c, Operation):\n control_op = c\n elif isinstance(c, (Tensor, IndexedSlices)):\n control_op = c.op\n else:\n raise TypeError(\"Control input must be an Operation, \"\n \"a Tensor, or IndexedSlices: %s\" % c)\n control_input_ops.append(control_op)\n\n # This will be set by self.inputs.\n self._inputs_val = None\n\n # pylint: disable=protected-access\n self._original_op = original_op\n\n # List of _UserDevSpecs holding code location of device context manager\n # invocations and the users original argument to them.\n self._device_code_locations = None\n # Dict mapping op name to file and line information for op colocation\n # context managers.\n self._colocation_code_locations = None\n self._control_flow_context = self.graph._get_control_flow_context()\n\n # Gradient function for this op. There are three ways to specify gradient\n # function, and first available gradient gets used, in the following order.\n # 1. self._gradient_function\n # 2. Gradient name registered by \"_gradient_op_type\" attribute.\n # 3. Gradient name registered by op.type.\n self._gradient_function = None\n\n # Initialize self._c_op.\n if c_op:\n self._c_op = c_op\n op_def = g._get_op_def(pywrap_tf_session.TF_OperationOpType(c_op))\n name = self.name\n else:\n if op_def is None:\n op_def = self._graph._get_op_def(node_def.op)\n self._c_op = _create_c_op(self._graph, node_def, inputs,\n control_input_ops, op_def)\n name = compat.as_str(node_def.name)\n\n self._traceback = tf_stack.extract_stack_for_node(self._c_op)\n\n # pylint: enable=protected-access\n\n self._is_stateful = op_def.is_stateful\n\n # Initialize self._outputs.\n num_outputs = pywrap_tf_session.TF_OperationNumOutputs(self._c_op)\n self._outputs = []\n for i in range(num_outputs):\n tf_output = c_api_util.tf_output(self._c_op, i)\n output_type = pywrap_tf_session.TF_OperationOutputType(tf_output)\n tensor = Tensor._create_with_tf_output(self, i, output_type, tf_output) # pylint: disable=protected-access\n self._outputs.append(tensor)\n\n self._id_value = self._graph._add_op(self, name) # pylint: disable=protected-access\n\n if not c_op:\n self._control_flow_post_processing(input_tensors=inputs)\n\n def _control_flow_post_processing(self, input_tensors=None):\n \"\"\"Add this op to its control flow context.\n\n This may add new ops and change this op's inputs. self.inputs must be\n available before calling this method.\n\n Args:\n input_tensors: (Optional.) A list of `Tensors` corresponding to the inputs\n of this op, which should be equivalent to `self.inputs`. Pass this\n argument to avoid evaluating `self.inputs` unnecessarily.\n \"\"\"\n if input_tensors is None:\n input_tensors = self.inputs\n for input_tensor in input_tensors:\n control_flow_util.CheckInputFromValidContext(self, input_tensor.op)\n if self._control_flow_context is not None:\n self._control_flow_context.AddOp(self)\n\n def colocation_groups(self):\n \"\"\"Returns the list of colocation groups of the op.\"\"\"\n default_colocation_group = [compat.as_bytes(\"loc:@%s\" % self.name)]\n try:\n class_attr = self.get_attr(\"_class\")\n except ValueError:\n # This op has no explicit colocation group, so it is itself its\n # own root of a colocation group.\n return default_colocation_group\n\n attr_groups = [\n class_name for class_name in class_attr\n if class_name.startswith(b\"loc:@\")\n ]\n\n # If there are no colocation groups in the explicit _class field,\n # return the default colocation group.\n return attr_groups if attr_groups else default_colocation_group\n\n def values(self):\n \"\"\"DEPRECATED: Use outputs.\"\"\"\n return tuple(self.outputs)\n\n def _get_control_flow_context(self):\n \"\"\"Returns the control flow context of this op.\n\n Returns:\n A context object.\n \"\"\"\n return self._control_flow_context\n\n def _set_control_flow_context(self, ctx):\n \"\"\"Sets the current control flow context of this op.\n\n Args:\n ctx: a context object.\n \"\"\"\n self._control_flow_context = ctx\n\n @property\n def name(self):\n \"\"\"The full name of this operation.\"\"\"\n return pywrap_tf_session.TF_OperationName(self._c_op)\n\n @property\n def _id(self):\n \"\"\"The unique integer id of this operation.\"\"\"\n return self._id_value\n\n @property\n def device(self):\n \"\"\"The name of the device to which this op has been assigned, if any.\n\n Returns:\n The string name of the device to which this op has been\n assigned, or an empty string if it has not been assigned to a\n device.\n \"\"\"\n return pywrap_tf_session.TF_OperationDevice(self._c_op)\n\n @property\n def _device_assignments(self):\n \"\"\"Code locations for device context managers active at op creation.\n\n This property will return a list of traceable_stack.TraceableObject\n instances where .obj is a string representing the assigned device\n (or information about the function that would be applied to this op\n to compute the desired device) and the filename and lineno members\n record the location of the relevant device context manager.\n\n For example, suppose file_a contained these lines:\n\n file_a.py:\n 15: with tf.device('/gpu:0'):\n 16: node_b = tf.constant(4, name='NODE_B')\n\n Then a TraceableObject t_obj representing the device context manager\n would have these member values:\n\n t_obj.obj -> '/gpu:0'\n t_obj.filename = 'file_a.py'\n t_obj.lineno = 15\n\n and node_b.op._device_assignments would return the list [t_obj].\n\n Returns:\n [str: traceable_stack.TraceableObject, ...] as per this method's\n description, above.\n \"\"\"\n return self._device_code_locations or []\n\n @property\n def _colocation_dict(self):\n \"\"\"Code locations for colocation context managers active at op creation.\n\n This property will return a dictionary for which the keys are nodes with\n which this Operation is colocated, and for which the values are\n traceable_stack.TraceableObject instances. The TraceableObject instances\n record the location of the relevant colocation context manager but have the\n \"obj\" field set to None to prevent leaking private data.\n\n For example, suppose file_a contained these lines:\n\n file_a.py:\n 14: node_a = tf.constant(3, name='NODE_A')\n 15: with tf.compat.v1.colocate_with(node_a):\n 16: node_b = tf.constant(4, name='NODE_B')\n\n Then a TraceableObject t_obj representing the colocation context manager\n would have these member values:\n\n t_obj.obj -> None\n t_obj.filename = 'file_a.py'\n t_obj.lineno = 15\n\n and node_b.op._colocation_dict would return the dictionary\n\n { 'NODE_A': t_obj }\n\n Returns:\n {str: traceable_stack.TraceableObject} as per this method's description,\n above.\n \"\"\"\n locations_dict = self._colocation_code_locations or {}\n return locations_dict.copy()\n\n @property\n def _output_types(self):\n \"\"\"List this operation's output types.\n\n Returns:\n List of the types of the Tensors computed by this operation.\n Each element in the list is an integer whose value is one of\n the TF_DataType enums defined in pywrap_tf_session.h\n The length of this list indicates the number of output endpoints\n of the operation.\n \"\"\"\n num_outputs = pywrap_tf_session.TF_OperationNumOutputs(self._c_op)\n output_types = [\n int(pywrap_tf_session.TF_OperationOutputType(self._tf_output(i)))\n for i in xrange(num_outputs)\n ]\n\n return output_types\n\n def _tf_output(self, output_idx):\n \"\"\"Create and return a new TF_Output for output_idx'th output of this op.\"\"\"\n tf_output = pywrap_tf_session.TF_Output()\n tf_output.oper = self._c_op\n tf_output.index = output_idx\n return tf_output\n\n def _tf_input(self, input_idx):\n \"\"\"Create and return a new TF_Input for input_idx'th input of this op.\"\"\"\n tf_input = pywrap_tf_session.TF_Input()\n tf_input.oper = self._c_op\n tf_input.index = input_idx\n return tf_input\n\n def _set_device(self, device): # pylint: disable=redefined-outer-name\n \"\"\"Set the device of this operation.\n\n Args:\n device: string or device.. The device to set.\n \"\"\"\n self._set_device_from_string(compat.as_str(_device_string(device)))\n\n def _set_device_from_string(self, device_str):\n \"\"\"Fast path to set device if the type is known to be a string.\n\n This function is called frequently enough during graph construction that\n there are non-trivial performance gains if the caller can guarantee that\n the specified device is already a string.\n\n Args:\n device_str: A string specifying where to place this op.\n \"\"\"\n pywrap_tf_session.SetRequestedDevice(\n self._graph._c_graph, # pylint: disable=protected-access\n self._c_op, # pylint: disable=protected-access\n device_str)\n\n def _update_input(self, index, tensor):\n \"\"\"Update the input to this operation at the given index.\n\n NOTE: This is for TF internal use only. Please don't use it.\n\n Args:\n index: the index of the input to update.\n tensor: the Tensor to be used as the input at the given index.\n\n Raises:\n TypeError: if tensor is not a Tensor,\n or if input tensor type is not convertible to dtype.\n ValueError: if the Tensor is from a different graph.\n \"\"\"\n if not isinstance(tensor, Tensor):\n raise TypeError(\"tensor must be a Tensor: %s\" % tensor)\n _assert_same_graph(self, tensor)\n\n # Reset cached inputs.\n self._inputs_val = None\n pywrap_tf_session.UpdateEdge(\n self._graph._c_graph, # pylint: disable=protected-access\n tensor._as_tf_output(), # pylint: disable=protected-access\n self._tf_input(index))\n\n def _add_while_inputs(self, tensors):\n \"\"\"See AddWhileInputHack in python_api.h.\n\n NOTE: This is for TF internal use only. Please don't use it.\n\n Args:\n tensors: list of Tensors\n\n Raises:\n TypeError: if tensor is not a Tensor,\n or if input tensor type is not convertible to dtype.\n ValueError: if the Tensor is from a different graph.\n \"\"\"\n for tensor in tensors:\n if not isinstance(tensor, Tensor):\n raise TypeError(\"tensor must be a Tensor: %s\" % tensor)\n _assert_same_graph(self, tensor)\n\n # Reset cached inputs.\n self._inputs_val = None\n pywrap_tf_session.AddWhileInputHack(\n self._graph._c_graph, # pylint: disable=protected-access\n tensor._as_tf_output(), # pylint: disable=protected-access\n self._c_op)\n\n def _add_control_inputs(self, ops):\n \"\"\"Add a list of new control inputs to this operation.\n\n Args:\n ops: the list of Operations to add as control input.\n\n Raises:\n TypeError: if ops is not a list of Operations.\n ValueError: if any op in ops is from a different graph.\n \"\"\"\n for op in ops:\n if not isinstance(op, Operation):\n raise TypeError(\"op must be an Operation: %s\" % op)\n pywrap_tf_session.AddControlInput(\n self._graph._c_graph, # pylint: disable=protected-access\n self._c_op, # pylint: disable=protected-access\n op._c_op) # pylint: disable=protected-access\n\n def _add_control_input(self, op):\n \"\"\"Add a new control input to this operation.\n\n Args:\n op: the Operation to add as control input.\n\n Raises:\n TypeError: if op is not an Operation.\n ValueError: if op is from a different graph.\n \"\"\"\n if not isinstance(op, Operation):\n raise TypeError(\"op must be an Operation: %s\" % op)\n pywrap_tf_session.AddControlInput(\n self._graph._c_graph, # pylint: disable=protected-access\n self._c_op, # pylint: disable=protected-access\n op._c_op) # pylint: disable=protected-access\n\n def _remove_all_control_inputs(self):\n \"\"\"Removes any control inputs to this operation.\"\"\"\n pywrap_tf_session.RemoveAllControlInputs(self._graph._c_graph, self._c_op) # pylint: disable=protected-access\n\n def _add_outputs(self, types, shapes):\n \"\"\"Adds new Tensors to self.outputs.\n\n Note: this is generally unsafe to use. This is used in certain situations in\n conjunction with _set_type_list_attr.\n\n Args:\n types: list of DTypes\n shapes: list of TensorShapes\n \"\"\"\n assert len(types) == len(shapes)\n orig_num_outputs = len(self.outputs)\n for i in range(len(types)):\n t = Tensor(self, orig_num_outputs + i, types[i])\n self._outputs.append(t)\n t.set_shape(shapes[i])\n\n def __str__(self):\n return str(self.node_def)\n\n def __repr__(self):\n return \"<tf.Operation '%s' type=%s>\" % (self.name, self.type)\n\n def __tf_tensor__(self, dtype=None, name=None):\n \"\"\"Raises a helpful error.\"\"\"\n raise TypeError(\"can't convert Operation '{}' to Tensor\".format(self.name))\n\n @property\n def outputs(self):\n \"\"\"The list of `Tensor` objects representing the outputs of this op.\"\"\"\n return self._outputs\n\n @property\n def inputs(self):\n \"\"\"The sequence of `Tensor` objects representing the data inputs of this op.\"\"\"\n if self._inputs_val is None:\n # pylint: disable=protected-access\n self._inputs_val = tuple(\n map(self.graph._get_tensor_by_tf_output,\n pywrap_tf_session.GetOperationInputs(self._c_op)))\n # pylint: enable=protected-access\n return self._inputs_val\n\n @property\n def _input_types(self):\n num_inputs = pywrap_tf_session.TF_OperationNumInputs(self._c_op)\n input_types = [\n dtypes.as_dtype(\n pywrap_tf_session.TF_OperationInputType(self._tf_input(i)))\n for i in xrange(num_inputs)\n ]\n return input_types\n\n @property\n def control_inputs(self):\n \"\"\"The `Operation` objects on which this op has a control dependency.\n\n Before this op is executed, TensorFlow will ensure that the\n operations in `self.control_inputs` have finished executing. This\n mechanism can be used to run ops sequentially for performance\n reasons, or to ensure that the side effects of an op are observed\n in the correct order.\n\n Returns:\n A list of `Operation` objects.\n\n \"\"\"\n control_c_ops = pywrap_tf_session.TF_OperationGetControlInputs_wrapper(\n self._c_op)\n # pylint: disable=protected-access\n return [\n self.graph._get_operation_by_name_unsafe(\n pywrap_tf_session.TF_OperationName(c_op)) for c_op in control_c_ops\n ]\n # pylint: enable=protected-access\n\n @property\n def _control_outputs(self):\n \"\"\"The `Operation` objects which have a control dependency on this op.\n\n Before any of the ops in self._control_outputs can execute tensorflow will\n ensure self has finished executing.\n\n Returns:\n A list of `Operation` objects.\n\n \"\"\"\n control_c_ops = pywrap_tf_session.TF_OperationGetControlOutputs_wrapper(\n self._c_op)\n # pylint: disable=protected-access\n return [\n self.graph._get_operation_by_name_unsafe(\n pywrap_tf_session.TF_OperationName(c_op)) for c_op in control_c_ops\n ]\n # pylint: enable=protected-access\n\n @property\n def type(self):\n \"\"\"The type of the op (e.g. `\"MatMul\"`).\"\"\"\n return pywrap_tf_session.TF_OperationOpType(self._c_op)\n\n @property\n def graph(self):\n \"\"\"The `Graph` that contains this operation.\"\"\"\n return self._graph\n\n @property\n def node_def(self):\n # pylint: disable=line-too-long\n \"\"\"Returns the `NodeDef` representation of this operation.\n\n Returns:\n A\n [`NodeDef`](https://www.tensorflow.org/code/tensorflow/core/framework/node_def.proto)\n protocol buffer.\n \"\"\"\n # pylint: enable=line-too-long\n with c_api_util.tf_buffer() as buf:\n pywrap_tf_session.TF_OperationToNodeDef(self._c_op, buf)\n data = pywrap_tf_session.TF_GetBuffer(buf)\n node_def = node_def_pb2.NodeDef()\n node_def.ParseFromString(compat.as_bytes(data))\n return node_def\n\n @property\n def op_def(self):\n # pylint: disable=line-too-long\n \"\"\"Returns the `OpDef` proto that represents the type of this op.\n\n Returns:\n An\n [`OpDef`](https://www.tensorflow.org/code/tensorflow/core/framework/op_def.proto)\n protocol buffer.\n \"\"\"\n # pylint: enable=line-too-long\n return self._graph._get_op_def(self.type)\n\n @property\n def traceback(self):\n \"\"\"Returns the call stack from when this operation was constructed.\"\"\"\n return self._traceback\n\n def _set_attr(self, attr_name, attr_value):\n \"\"\"Private method used to set an attribute in the node_def.\"\"\"\n buf = pywrap_tf_session.TF_NewBufferFromString(\n compat.as_bytes(attr_value.SerializeToString()))\n try:\n self._set_attr_with_buf(attr_name, buf)\n finally:\n pywrap_tf_session.TF_DeleteBuffer(buf)\n\n def _set_attr_with_buf(self, attr_name, attr_buf):\n \"\"\"Set an attr in the node_def with a pre-allocated buffer.\"\"\"\n # pylint: disable=protected-access\n pywrap_tf_session.SetAttr(self._graph._c_graph, self._c_op, attr_name,\n attr_buf)\n # pylint: enable=protected-access\n\n def _set_func_attr(self, attr_name, func_name):\n \"\"\"Private method used to set a function attribute in the node_def.\"\"\"\n func = attr_value_pb2.NameAttrList(name=func_name)\n self._set_attr(attr_name, attr_value_pb2.AttrValue(func=func))\n\n def _set_func_list_attr(self, attr_name, func_names):\n \"\"\"Private method used to set a list(function) attribute in the node_def.\"\"\"\n funcs = [attr_value_pb2.NameAttrList(name=func_name)\n for func_name in func_names]\n funcs_list = attr_value_pb2.AttrValue.ListValue(func=funcs)\n self._set_attr(attr_name, attr_value_pb2.AttrValue(list=funcs_list))\n\n def _set_type_list_attr(self, attr_name, types):\n \"\"\"Private method used to set a list(type) attribute in the node_def.\"\"\"\n if not types:\n return\n if isinstance(types[0], dtypes.DType):\n types = [dt.as_datatype_enum for dt in types]\n types_list = attr_value_pb2.AttrValue.ListValue(type=types)\n self._set_attr(attr_name, attr_value_pb2.AttrValue(list=types_list))\n\n def _set_shape_list_attr(self, attr_name, shapes):\n \"\"\"Private method used to set a list(shape) attribute in the node_def.\"\"\"\n shapes = [s.as_proto() for s in shapes]\n shapes_list = attr_value_pb2.AttrValue.ListValue(shape=shapes)\n self._set_attr(attr_name, attr_value_pb2.AttrValue(list=shapes_list))\n\n def _clear_attr(self, attr_name):\n \"\"\"Private method used to clear an attribute in the node_def.\"\"\"\n # pylint: disable=protected-access\n pywrap_tf_session.ClearAttr(self._graph._c_graph, self._c_op, attr_name)\n # pylint: enable=protected-access\n\n def get_attr(self, name):\n \"\"\"Returns the value of the attr of this op with the given `name`.\n\n Args:\n name: The name of the attr to fetch.\n\n Returns:\n The value of the attr, as a Python object.\n\n Raises:\n ValueError: If this op does not have an attr with the given `name`.\n \"\"\"\n fields = (\"s\", \"i\", \"f\", \"b\", \"type\", \"shape\", \"tensor\", \"func\")\n try:\n with c_api_util.tf_buffer() as buf:\n pywrap_tf_session.TF_OperationGetAttrValueProto(self._c_op, name, buf)\n data = pywrap_tf_session.TF_GetBuffer(buf)\n except errors.InvalidArgumentError as e:\n # Convert to ValueError for backwards compatibility.\n raise ValueError(e.message)\n x = attr_value_pb2.AttrValue()\n x.ParseFromString(data)\n\n oneof_value = x.WhichOneof(\"value\")\n if oneof_value is None:\n return []\n if oneof_value == \"list\":\n for f in fields:\n if getattr(x.list, f):\n if f == \"type\":\n return [dtypes.as_dtype(t) for t in x.list.type]\n else:\n return list(getattr(x.list, f))\n return []\n if oneof_value == \"type\":\n return dtypes.as_dtype(x.type)\n assert oneof_value in fields, \"Unsupported field type in \" + str(x)\n return getattr(x, oneof_value)\n\n def _get_attr_type(self, name):\n \"\"\"Returns the `DType` value of the attr of this op with the given `name`.\"\"\"\n try:\n dtype_enum = pywrap_tf_session.TF_OperationGetAttrType(self._c_op, name)\n return _DTYPES_INTERN_TABLE[dtype_enum]\n except errors.InvalidArgumentError as e:\n # Convert to ValueError for backwards compatibility.\n raise ValueError(e.message)\n\n def _get_attr_bool(self, name):\n \"\"\"Returns the `bool` value of the attr of this op with the given `name`.\"\"\"\n try:\n return pywrap_tf_session.TF_OperationGetAttrBool(self._c_op, name)\n except errors.InvalidArgumentError as e:\n # Convert to ValueError for backwards compatibility.\n raise ValueError(e.message)\n\n def _get_attr_int(self, name):\n \"\"\"Returns the `int` value of the attr of this op with the given `name`.\"\"\"\n try:\n return pywrap_tf_session.TF_OperationGetAttrInt(self._c_op, name)\n except errors.InvalidArgumentError as e:\n # Convert to ValueError for backwards compatibility.\n raise ValueError(e.message)\n\n def run(self, feed_dict=None, session=None):\n \"\"\"Runs this operation in a `Session`.\n\n Calling this method will execute all preceding operations that\n produce the inputs needed for this operation.\n\n *N.B.* Before invoking `Operation.run()`, its graph must have been\n launched in a session, and either a default session must be\n available, or `session` must be specified explicitly.\n\n Args:\n feed_dict: A dictionary that maps `Tensor` objects to feed values. See\n `tf.Session.run` for a description of the valid feed values.\n session: (Optional.) The `Session` to be used to run to this operation. If\n none, the default session will be used.\n \"\"\"\n _run_using_default_session(self, feed_dict, self.graph, session)\n\n# TODO(b/185395742): Clean up usages of _gradient_registry\ngradient_registry = _gradient_registry = registry.Registry(\"gradient\")\n\n\n@tf_export(\"RegisterGradient\")\nclass RegisterGradient(object):\n \"\"\"A decorator for registering the gradient function for an op type.\n\n This decorator is only used when defining a new op type. For an op\n with `m` inputs and `n` outputs, the gradient function is a function\n that takes the original `Operation` and `n` `Tensor` objects\n (representing the gradients with respect to each output of the op),\n and returns `m` `Tensor` objects (representing the partial gradients\n with respect to each input of the op).\n\n For example, assuming that operations of type `\"Sub\"` take two\n inputs `x` and `y`, and return a single output `x - y`, the\n following gradient function would be registered:\n\n ```python\n @tf.RegisterGradient(\"Sub\")\n def _sub_grad(unused_op, grad):\n return grad, tf.negative(grad)\n ```\n\n The decorator argument `op_type` is the string type of an\n operation. This corresponds to the `OpDef.name` field for the proto\n that defines the operation.\n \"\"\"\n\n __slots__ = [\"_op_type\"]\n\n def __init__(self, op_type):\n \"\"\"Creates a new decorator with `op_type` as the Operation type.\n\n Args:\n op_type: The string type of an operation. This corresponds to the\n `OpDef.name` field for the proto that defines the operation.\n\n Raises:\n TypeError: If `op_type` is not string.\n \"\"\"\n if not isinstance(op_type, six.string_types):\n raise TypeError(\"op_type must be a string\")\n self._op_type = op_type\n\n def __call__(self, f):\n \"\"\"Registers the function `f` as gradient function for `op_type`.\"\"\"\n gradient_registry.register(f, self._op_type)\n return f\n\n\[email protected]_endpoints(\"NotDifferentiable\", \"NoGradient\")\n@tf_export(\"no_gradient\", v1=[\"no_gradient\", \"NotDifferentiable\", \"NoGradient\"])\ndef no_gradient(op_type):\n \"\"\"Specifies that ops of type `op_type` is not differentiable.\n\n This function should *not* be used for operations that have a\n well-defined gradient that is not yet implemented.\n\n This function is only used when defining a new op type. It may be\n used for ops such as `tf.size()` that are not differentiable. For\n example:\n\n ```python\n tf.no_gradient(\"Size\")\n ```\n\n The gradient computed for 'op_type' will then propagate zeros.\n\n For ops that have a well-defined gradient but are not yet implemented,\n no declaration should be made, and an error *must* be thrown if\n an attempt to request its gradient is made.\n\n Args:\n op_type: The string type of an operation. This corresponds to the\n `OpDef.name` field for the proto that defines the operation.\n\n Raises:\n TypeError: If `op_type` is not a string.\n\n \"\"\"\n if not isinstance(op_type, six.string_types):\n raise TypeError(\"op_type must be a string\")\n gradient_registry.register(None, op_type)\n\n\n# Aliases for the old names, will be eventually removed.\nNoGradient = no_gradient\nNotDifferentiable = no_gradient\n\n\ndef get_gradient_function(op):\n \"\"\"Returns the function that computes gradients for \"op\".\"\"\"\n if not op.inputs:\n return None\n\n gradient_function = op._gradient_function # pylint: disable=protected-access\n if gradient_function:\n return gradient_function\n\n try:\n op_type = op.get_attr(\"_gradient_op_type\")\n except ValueError:\n op_type = op.type\n return gradient_registry.lookup(op_type)\n\n\ndef set_shape_and_handle_data_for_outputs(_):\n \"\"\"No op. TODO(b/74620627): Remove this.\"\"\"\n pass\n\n\nclass OpStats(object):\n \"\"\"A holder for statistics about an operator.\n\n This class holds information about the resource requirements for an op,\n including the size of its weight parameters on-disk and how many FLOPS it\n requires to execute forward inference.\n\n If you define a new operation, you can create a function that will return a\n set of information about its usage of the CPU and disk space when serialized.\n The function itself takes a Graph object that's been set up so you can call\n methods like get_tensor_by_name to help calculate the results, and a NodeDef\n argument.\n\n \"\"\"\n\n __slots__ = [\"_statistic_type\", \"_value\"]\n\n def __init__(self, statistic_type, value=None):\n \"\"\"Sets up the initial placeholders for the statistics.\"\"\"\n self.statistic_type = statistic_type\n self.value = value\n\n @property\n def statistic_type(self):\n return self._statistic_type\n\n @statistic_type.setter\n def statistic_type(self, statistic_type):\n self._statistic_type = statistic_type\n\n @property\n def value(self):\n return self._value\n\n @value.setter\n def value(self, value):\n self._value = value\n\n def __iadd__(self, other):\n if other.statistic_type != self.statistic_type:\n raise ValueError(\"Can't add an OpStat of type %s to one of %s.\" %\n (self.statistic_type, other.statistic_type))\n if self.value is None:\n self.value = other.value\n elif other.value is not None:\n self._value += other.value\n return self\n\n\n_stats_registry = registry.Registry(\"statistical functions\")\n\n\nclass RegisterStatistics(object):\n \"\"\"A decorator for registering the statistics function for an op type.\n\n This decorator can be defined for an op type so that it gives a\n report on the resources used by an instance of an operator, in the\n form of an OpStats object.\n\n Well-known types of statistics include these so far:\n\n - flops: When running a graph, the bulk of the computation happens doing\n numerical calculations like matrix multiplications. This type allows a node\n to return how many floating-point operations it takes to complete. The\n total number of FLOPs for a graph is a good guide to its expected latency.\n\n You can add your own statistics just by picking a new type string, registering\n functions for the ops you care about, and then calling get_stats_for_node_def.\n\n If a statistic for an op is registered multiple times, a KeyError will be\n raised.\n\n Since the statistics is counted on a per-op basis. It is not suitable for\n model parameters (capacity), which is expected to be counted only once, even\n if it is shared by multiple ops. (e.g. RNN)\n\n For example, you can define a new metric called doohickey for a Foo operation\n by placing this in your code:\n\n ```python\n @ops.RegisterStatistics(\"Foo\", \"doohickey\")\n def _calc_foo_bojangles(unused_graph, unused_node_def):\n return ops.OpStats(\"doohickey\", 20)\n ```\n\n Then in client code you can retrieve the value by making this call:\n\n ```python\n doohickey = ops.get_stats_for_node_def(graph, node_def, \"doohickey\")\n ```\n\n If the NodeDef is for an op with a registered doohickey function, you'll get\n back the calculated amount in doohickey.value, or None if it's not defined.\n\n \"\"\"\n\n __slots__ = [\"_op_type\", \"_statistic_type\"]\n\n def __init__(self, op_type, statistic_type):\n \"\"\"Saves the `op_type` as the `Operation` type.\"\"\"\n if not isinstance(op_type, six.string_types):\n raise TypeError(\"op_type must be a string.\")\n if \",\" in op_type:\n raise TypeError(\"op_type must not contain a comma.\")\n self._op_type = op_type\n if not isinstance(statistic_type, six.string_types):\n raise TypeError(\"statistic_type must be a string.\")\n if \",\" in statistic_type:\n raise TypeError(\"statistic_type must not contain a comma.\")\n self._statistic_type = statistic_type\n\n def __call__(self, f):\n \"\"\"Registers \"f\" as the statistics function for \"op_type\".\"\"\"\n _stats_registry.register(f, self._op_type + \",\" + self._statistic_type)\n return f\n\n\ndef get_stats_for_node_def(graph, node, statistic_type):\n \"\"\"Looks up the node's statistics function in the registry and calls it.\n\n This function takes a Graph object and a NodeDef from a GraphDef, and if\n there's an associated statistics method, calls it and returns a result. If no\n function has been registered for the particular node type, it returns an empty\n statistics object.\n\n Args:\n graph: A Graph object that's been set up with the node's graph.\n node: A NodeDef describing the operator.\n statistic_type: A string identifying the statistic we're interested in.\n\n Returns:\n An OpStats object containing information about resource usage.\n \"\"\"\n\n try:\n stats_func = _stats_registry.lookup(node.op + \",\" + statistic_type)\n result = stats_func(graph, node)\n except LookupError:\n result = OpStats(statistic_type)\n return result\n\n\ndef name_from_scope_name(name):\n \"\"\"Returns the name of an op given the name of its scope.\n\n Args:\n name: the name of the scope.\n\n Returns:\n the name of the op (equal to scope name minus any trailing slash).\n \"\"\"\n return name[:-1] if (name and name[-1] == \"/\") else name\n\n\n_MUTATION_LOCK_GROUP = 0\n_SESSION_RUN_LOCK_GROUP = 1\n\n\n@tf_contextlib.contextmanager\ndef resource_creator_scope(resource_type, resource_creator):\n with get_default_graph()._resource_creator_scope(resource_type, # pylint: disable=protected-access\n resource_creator):\n yield\n\n\n@tf_export(\"Graph\")\nclass Graph(object):\n \"\"\"A TensorFlow computation, represented as a dataflow graph.\n\n Graphs are used by `tf.function`s to represent the function's computations.\n Each graph contains a set of `tf.Operation` objects, which represent units of\n computation; and `tf.Tensor` objects, which represent the units of data that\n flow between operations.\n\n ### Using graphs directly (deprecated)\n\n A `tf.Graph` can be constructed and used directly without a `tf.function`, as\n was required in TensorFlow 1, but this is deprecated and it is recommended to\n use a `tf.function` instead. If a graph is directly used, other deprecated\n TensorFlow 1 classes are also required to execute the graph, such as a\n `tf.compat.v1.Session`.\n\n A default graph can be registered with the `tf.Graph.as_default` context\n manager. Then, operations will be added to the graph instead of being executed\n eagerly. For example:\n\n ```python\n g = tf.Graph()\n with g.as_default():\n # Define operations and tensors in `g`.\n c = tf.constant(30.0)\n assert c.graph is g\n ```\n\n `tf.compat.v1.get_default_graph()` can be used to obtain the default graph.\n\n Important note: This class *is not* thread-safe for graph construction. All\n operations should be created from a single thread, or external\n synchronization must be provided. Unless otherwise specified, all methods\n are not thread-safe.\n\n A `Graph` instance supports an arbitrary number of \"collections\"\n that are identified by name. For convenience when building a large\n graph, collections can store groups of related objects: for\n example, the `tf.Variable` uses a collection (named\n `tf.GraphKeys.GLOBAL_VARIABLES`) for\n all variables that are created during the construction of a graph. The caller\n may define additional collections by specifying a new name.\n \"\"\"\n\n def __init__(self):\n \"\"\"Creates a new, empty Graph.\"\"\"\n # Protects core state that can be returned via public accessors.\n # Thread-safety is provided on a best-effort basis to support buggy\n # programs, and is not guaranteed by the public `tf.Graph` API.\n #\n # NOTE(mrry): This does not protect the various stacks. A warning will\n # be reported if these are used from multiple threads\n self._lock = threading.RLock()\n # The group lock synchronizes Session.run calls with methods that create\n # and mutate ops (e.g. Graph.create_op()). This synchronization is\n # necessary because it's illegal to modify an operation after it's been run.\n # The group lock allows any number of threads to mutate ops at the same time\n # but if any modification is going on, all Session.run calls have to wait.\n # Similarly, if one or more Session.run calls are going on, all mutate ops\n # have to wait until all Session.run calls have finished.\n self._group_lock = lock_util.GroupLock(num_groups=2)\n self._nodes_by_id = {} # GUARDED_BY(self._lock)\n self._next_id_counter = 0 # GUARDED_BY(self._lock)\n self._nodes_by_name = {} # GUARDED_BY(self._lock)\n self._version = 0 # GUARDED_BY(self._lock)\n # Maps a name used in the graph to the next id to use for that name.\n self._names_in_use = {}\n self._stack_state_is_thread_local = False\n self._thread_local = threading.local()\n # Functions that will be applied to choose a device if none is specified.\n # In TF2.x or after switch_to_thread_local(),\n # self._thread_local._device_function_stack is used instead.\n self._graph_device_function_stack = traceable_stack.TraceableStack()\n # Default original_op applied to new ops.\n self._default_original_op = None\n # Current control flow context. It could be either CondContext or\n # WhileContext defined in ops/control_flow_ops.py\n self._control_flow_context = None\n # A new node will depend of the union of all of the nodes in the stack.\n # In TF2.x or after switch_to_thread_local(),\n # self._thread_local._control_dependencies_stack is used instead.\n self._graph_control_dependencies_stack = []\n # Arbitrary collections of objects.\n self._collections = {}\n # The graph-level random seed\n self._seed = None\n # A dictionary of attributes that should be applied to all ops.\n self._attr_scope_map = {}\n # A map from op type to the kernel label that should be used.\n self._op_to_kernel_label_map = {}\n # A map from op type to an alternative op type that should be used when\n # computing gradients.\n self._gradient_override_map = {}\n # A map from op type to a gradient function that should be used instead.\n self._gradient_function_map = {}\n # True if the graph is considered \"finalized\". In that case no\n # new operations can be added.\n self._finalized = False\n # Functions defined in the graph\n self._functions = collections.OrderedDict()\n # Default GraphDef versions\n self._graph_def_versions = versions_pb2.VersionDef(\n producer=versions.GRAPH_DEF_VERSION,\n min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER)\n self._building_function = False\n # Stack of colocate_with ops. In TF2.x or after switch_to_thread_local(),\n # self._thread_local._colocation_stack is used instead.\n self._graph_colocation_stack = traceable_stack.TraceableStack()\n # Set of tensors that are dangerous to feed!\n self._unfeedable_tensors = object_identity.ObjectIdentitySet()\n # Set of operations that are dangerous to fetch!\n self._unfetchable_ops = set()\n # A map of tensor handle placeholder to tensor dtype.\n self._handle_feeders = {}\n # A map from tensor handle to its read op.\n self._handle_readers = {}\n # A map from tensor handle to its move op.\n self._handle_movers = {}\n # A map from tensor handle to its delete op.\n self._handle_deleters = {}\n # Allow optimizers and other objects to pseudo-uniquely key graphs (this key\n # will be shared when defining function graphs, for example, so optimizers\n # being called inside function definitions behave as if they were seeing the\n # actual outside graph).\n self._graph_key = \"grap-key-%d/\" % (uid(),)\n # A string with the last reduction method passed to\n # losses.compute_weighted_loss(), or None. This is required only for\n # backward compatibility with Estimator and optimizer V1 use cases.\n self._last_loss_reduction = None\n # Flag that is used to indicate whether loss has been scaled by optimizer.\n # If this flag has been set, then estimator uses it to scale losss back\n # before reporting. This is required only for backward compatibility with\n # Estimator and optimizer V1 use cases.\n self._is_loss_scaled_by_optimizer = False\n self._container = \"\"\n # Set to True if this graph is being built in an\n # AutomaticControlDependencies context.\n self._add_control_dependencies = False\n # Cache for OpDef protobufs retrieved via the C API.\n self._op_def_cache = {}\n # Cache for constant results of `broadcast_gradient_args()`. The keys are\n # tuples of fully-defined shapes: (x_shape_tuple, y_shape_tuple), and the\n # values are tuples of reduction indices: (rx, ry).\n self._bcast_grad_args_cache = {}\n # Cache for constant results of `reduced_shape()`. The keys are pairs of\n # tuples: (input_shape_tuple, reduction_indices_tuple), and the values\n # are pairs of tuples: (output_shape_kept_dims, tile_scaling).\n self._reduced_shape_cache = {}\n\n # TODO(skyewm): fold as much of the above as possible into the C\n # implementation\n self._scoped_c_graph = c_api_util.ScopedTFGraph()\n # The C API requires all ops to have shape functions. Disable this\n # requirement (many custom ops do not have shape functions, and we don't\n # want to break these existing cases).\n pywrap_tf_session.SetRequireShapeInferenceFns(self._c_graph, False)\n if tf2.enabled():\n self.switch_to_thread_local()\n\n # Note: this method is private because the API of tf.Graph() is public and\n # frozen, and this functionality is still not ready for public visibility.\n @tf_contextlib.contextmanager\n def _variable_creator_scope(self, creator, priority=100):\n \"\"\"Scope which defines a variable creation function.\n\n Args:\n creator: A callable taking `next_creator` and `kwargs`. See the\n `tf.variable_creator_scope` docstring.\n priority: Creators with a higher `priority` are called first. Within the\n same priority, creators are called inner-to-outer.\n\n Yields:\n `_variable_creator_scope` is a context manager with a side effect, but\n doesn't return a value.\n\n Raises:\n RuntimeError: If variable creator scopes are not properly nested.\n \"\"\"\n # This step keeps a reference to the existing stack, and it also initializes\n # self._thread_local._variable_creator_stack if it doesn't exist yet.\n old = self._variable_creator_stack\n new = list(old)\n new.append((priority, creator))\n # Sorting is stable, so we'll put higher-priority creators later in the list\n # but otherwise maintain registration order.\n new.sort(key=lambda item: item[0])\n self._thread_local._variable_creator_stack = new # pylint: disable=protected-access\n try:\n yield\n finally:\n if self._thread_local._variable_creator_stack is not new: # pylint: disable=protected-access\n raise RuntimeError(\n \"Exiting variable_creator_scope without proper nesting.\")\n self._thread_local._variable_creator_stack = old # pylint: disable=protected-access\n\n # TODO(b/192405401): unify resource_creator_scope with variable_creator_scope.\n # pylint: disable=protected-access\n @tf_contextlib.contextmanager\n def _resource_creator_scope(self, resource_type, creator):\n \"\"\"Scope which defines a resource creation function used by some resource.\n\n The resource should be a subclass of CachableResource with a class method\n `cls._resource_type`, the output of which is what the `resource_type`\n argument should be. By default, `cls._resource_type` returns the class name,\n `cls.__name__`. Given a scope, creators being added with the same\n `resource_type` argument will be composed together to apply to all classes\n with this `_resource_type`.\n\n\n `creator` is expected to be a function with the following signature:\n\n ```\n def resource_creator(next_creator, *a, **kwargs)\n ```\n\n The creator is supposed to eventually call the next_creator to create an\n instance if it does want to create an instance and not call\n the class initialization method directly. This helps make creators\n composable. A creator may choose to create multiple instances, return\n already existing instances, or simply register that an instance was created\n and defer to the next creator in line. Creators can also modify keyword\n arguments seen by the next creators.\n\n Valid keyword arguments in `kwargs` depends on the specific resource\n class. For StaticHashTable, this may be:\n * initializer: The table initializer to use.\n * default_value: The value to use if a key is missing in the table.\n * name: Optional name for the table, default to None.\n\n\n Args:\n resource_type: the output of the resource class's `_resource_type` method.\n creator: the passed creator for the resource.\n\n Yields:\n A scope in which the creator is active\n\n Raises:\n RuntimeError: If resource_creator_scope is existed without proper nesting.\n \"\"\"\n # This step keeps a reference to the existing stack, and it also initializes\n # self._thread_local._variable_creator_stack if it doesn't exist yet.\n old = self._resource_creator_stack\n new = copy.deepcopy(old)\n if isinstance(resource_type, (list, tuple)):\n for r in resource_type:\n new[r].append(creator)\n else:\n new[resource_type].append(creator)\n self._thread_local._resource_creator_stack = new\n try:\n yield\n finally:\n if self._thread_local._resource_creator_stack is not new:\n raise RuntimeError(\n \"Exiting resource_creator_scope without proper nesting.\")\n self._thread_local._resource_creator_stack = old\n\n @property\n def _resource_creator_stack(self):\n if not hasattr(self._thread_local, \"_resource_creator_stack\"):\n self._thread_local._resource_creator_stack = collections.defaultdict(list)\n return self._thread_local._resource_creator_stack\n\n @_resource_creator_stack.setter\n def _resource_creator_stack(self, resource_creator_stack):\n self._thread_local._resource_creator_stack = resource_creator_stack\n # pylint: enable=protected-access\n\n # Note: this method is private because the API of tf.Graph() is public and\n # frozen, and this functionality is still not ready for public visibility.\n @property\n def _variable_creator_stack(self):\n if not hasattr(self._thread_local, \"_variable_creator_stack\"):\n self._thread_local._variable_creator_stack = [] # pylint: disable=protected-access\n\n # This previously returned a copy of the stack instead of the stack itself,\n # to guard against accidental mutation. Consider, however, code that wants\n # to save and restore the variable creator stack:\n # def f():\n # original_stack = graph._variable_creator_stack\n # graph._variable_creator_stack = new_stack\n # ... # Some code\n # graph._variable_creator_stack = original_stack\n #\n # And lets say you have some code that calls this function with some\n # variable_creator:\n # def g():\n # with variable_scope.variable_creator_scope(creator):\n # f()\n # When exiting the variable creator scope, it would see a different stack\n # object than it expected leading to a \"Exiting variable_creator_scope\n # without proper nesting\" error.\n return self._thread_local._variable_creator_stack # pylint: disable=protected-access\n\n @_variable_creator_stack.setter\n def _variable_creator_stack(self, variable_creator_stack):\n self._thread_local._variable_creator_stack = variable_creator_stack # pylint: disable=protected-access\n\n def _check_not_finalized(self):\n \"\"\"Check if the graph is finalized.\n\n Raises:\n RuntimeError: If the graph finalized.\n \"\"\"\n if self._finalized:\n raise RuntimeError(\"Graph is finalized and cannot be modified.\")\n\n def _add_op(self, op, op_name):\n \"\"\"Adds 'op' to the graph and returns the unique ID for the added Operation.\n\n Args:\n op: the Operation to add.\n op_name: the name of the Operation.\n\n Returns:\n An integer that is a unique ID for the added Operation.\n \"\"\"\n self._check_not_finalized()\n with self._lock:\n self._next_id_counter += 1\n op_id = self._next_id_counter\n self._nodes_by_id[op_id] = op\n self._nodes_by_name[op_name] = op\n self._version = max(self._version, op_id)\n return op_id\n\n @property\n def _c_graph(self):\n return self._scoped_c_graph.graph\n\n @property\n def version(self):\n \"\"\"Returns a version number that increases as ops are added to the graph.\n\n Note that this is unrelated to the\n `tf.Graph.graph_def_versions`.\n\n Returns:\n An integer version that increases as ops are added to the graph.\n \"\"\"\n if self._finalized:\n return self._version\n\n with self._lock:\n return self._version\n\n @property\n def graph_def_versions(self):\n # pylint: disable=line-too-long\n \"\"\"The GraphDef version information of this graph.\n\n For details on the meaning of each version, see\n [`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto).\n\n Returns:\n A `VersionDef`.\n \"\"\"\n # pylint: enable=line-too-long\n with c_api_util.tf_buffer() as buf:\n pywrap_tf_session.TF_GraphVersions(self._c_graph, buf)\n data = pywrap_tf_session.TF_GetBuffer(buf)\n version_def = versions_pb2.VersionDef()\n version_def.ParseFromString(compat.as_bytes(data))\n return version_def\n\n @property\n def seed(self):\n \"\"\"The graph-level random seed of this graph.\"\"\"\n return self._seed\n\n @seed.setter\n def seed(self, seed):\n self._seed = seed\n\n @property\n def finalized(self):\n \"\"\"True if this graph has been finalized.\"\"\"\n return self._finalized\n\n def finalize(self):\n \"\"\"Finalizes this graph, making it read-only.\n\n After calling `g.finalize()`, no new operations can be added to\n `g`. This method is used to ensure that no operations are added\n to a graph when it is shared between multiple threads, for example\n when using a `tf.compat.v1.train.QueueRunner`.\n \"\"\"\n self._finalized = True\n\n def _unsafe_unfinalize(self):\n \"\"\"Opposite of `finalize`.\n\n Internal interface.\n\n NOTE: Unfinalizing a graph could have negative impact on performance,\n especially in a multi-threaded environment. Unfinalizing a graph\n when it is in use by a Session may lead to undefined behavior. Ensure\n that all sessions using a graph are closed before calling this method.\n \"\"\"\n self._finalized = False\n\n def _get_control_flow_context(self):\n \"\"\"Returns the current control flow context.\n\n Returns:\n A context object.\n \"\"\"\n return self._control_flow_context\n\n def _set_control_flow_context(self, ctx):\n \"\"\"Sets the current control flow context.\n\n Args:\n ctx: a context object.\n \"\"\"\n self._control_flow_context = ctx\n\n def _copy_functions_to_graph_def(self, graph_def, starting_bytesize):\n \"\"\"If this graph contains functions, copy them to `graph_def`.\"\"\"\n bytesize = starting_bytesize\n for f in self._functions.values():\n bytesize += f.definition.ByteSize()\n if bytesize >= (1 << 31) or bytesize < 0:\n raise ValueError(\"GraphDef cannot be larger than 2GB.\")\n graph_def.library.function.extend([f.definition])\n if f.grad_func_name:\n grad_def = function_pb2.GradientDef()\n grad_def.function_name = f.name\n grad_def.gradient_func = f.grad_func_name\n graph_def.library.gradient.extend([grad_def])\n\n def _as_graph_def(self, from_version=None, add_shapes=False):\n # pylint: disable=line-too-long\n \"\"\"Returns a serialized `GraphDef` representation of this graph.\n\n The serialized `GraphDef` can be imported into another `Graph`\n (using `tf.import_graph_def`) or used with the\n [C++ Session API](../../../../api_docs/cc/index.md).\n\n This method is thread-safe.\n\n Args:\n from_version: Optional. If this is set, returns a `GraphDef` containing\n only the nodes that were added to this graph since its `version`\n property had the given value.\n add_shapes: If true, adds an \"_output_shapes\" list attr to each node with\n the inferred shapes of each of its outputs.\n\n Returns:\n A tuple containing a\n [`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto)\n protocol buffer, and the version of the graph to which that\n `GraphDef` corresponds.\n\n Raises:\n ValueError: If the `graph_def` would be too large.\n\n \"\"\"\n # pylint: enable=line-too-long\n with self._lock:\n with c_api_util.tf_buffer() as buf:\n pywrap_tf_session.TF_GraphToGraphDef(self._c_graph, buf)\n data = pywrap_tf_session.TF_GetBuffer(buf)\n graph = graph_pb2.GraphDef()\n graph.ParseFromString(compat.as_bytes(data))\n # Strip the experimental library field iff it's empty.\n if not graph.library.function:\n graph.ClearField(\"library\")\n\n if add_shapes:\n for node in graph.node:\n op = self._nodes_by_name[node.name]\n if op.outputs:\n node.attr[\"_output_shapes\"].list.shape.extend(\n [output.get_shape().as_proto() for output in op.outputs])\n for function_def in graph.library.function:\n defined_function = self._functions[function_def.signature.name]\n try:\n func_graph = defined_function.graph\n except AttributeError:\n # _DefinedFunction doesn't have a graph, _EagerDefinedFunction\n # does. Both rely on ops.py, so we can't really isinstance check\n # them.\n continue\n input_shapes = function_def.attr[\"_input_shapes\"]\n try:\n func_graph_inputs = func_graph.inputs\n except AttributeError:\n continue\n # TODO(b/141471245): Fix the inconsistency when inputs of func graph\n # are appended during gradient computation of while/cond.\n assert len(input_shapes.list.shape) in [0, len(func_graph_inputs)]\n # If the function_def has inputs already filled out, skip this step.\n if not input_shapes.list.shape:\n for input_tensor, arg_def in zip(func_graph_inputs,\n function_def.signature.input_arg):\n input_shapes.list.shape.add().CopyFrom(\n input_tensor.get_shape().as_proto())\n if input_tensor.dtype == dtypes.resource:\n _copy_handle_data_to_arg_def(input_tensor, arg_def)\n\n for output_tensor, arg_def in zip(func_graph.outputs,\n function_def.signature.output_arg):\n if output_tensor.dtype == dtypes.resource:\n _copy_handle_data_to_arg_def(output_tensor, arg_def)\n\n for node in function_def.node_def:\n try:\n op = func_graph.get_operation_by_name(node.name)\n except KeyError:\n continue\n outputs = op.outputs\n\n if op.type == \"StatefulPartitionedCall\":\n # Filter out any extra outputs (possibly added by function\n # backpropagation rewriting).\n num_outputs = len(node.attr[\"Tout\"].list.type)\n outputs = outputs[:num_outputs]\n\n node.attr[\"_output_shapes\"].list.shape.extend(\n [output.get_shape().as_proto() for output in outputs])\n\n return graph, self._version\n\n def as_graph_def(self, from_version=None, add_shapes=False):\n # pylint: disable=line-too-long\n \"\"\"Returns a serialized `GraphDef` representation of this graph.\n\n The serialized `GraphDef` can be imported into another `Graph`\n (using `tf.import_graph_def`) or used with the\n [C++ Session API](../../api_docs/cc/index.md).\n\n This method is thread-safe.\n\n Args:\n from_version: Optional. If this is set, returns a `GraphDef` containing\n only the nodes that were added to this graph since its `version`\n property had the given value.\n add_shapes: If true, adds an \"_output_shapes\" list attr to each node with\n the inferred shapes of each of its outputs.\n\n Returns:\n A\n [`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto)\n protocol buffer.\n\n Raises:\n ValueError: If the `graph_def` would be too large.\n \"\"\"\n # pylint: enable=line-too-long\n result, _ = self._as_graph_def(from_version, add_shapes)\n return result\n\n def _is_function(self, name):\n \"\"\"Tests whether 'name' is registered in this graph's function library.\n\n Args:\n name: string op name.\n\n Returns:\n bool indicating whether or not 'name' is registered in function library.\n \"\"\"\n return compat.as_str(name) in self._functions\n\n def _get_function(self, name):\n \"\"\"Returns the function definition for 'name'.\n\n Args:\n name: string function name.\n\n Returns:\n The function def proto.\n \"\"\"\n return self._functions.get(compat.as_str(name), None)\n\n def _add_function(self, function):\n \"\"\"Adds a function to the graph.\n\n After the function has been added, you can call to the function by\n passing the function name in place of an op name to\n `Graph.create_op()`.\n\n Args:\n function: A `_DefinedFunction` object.\n\n Raises:\n ValueError: if another function is defined with the same name.\n \"\"\"\n self._check_not_finalized()\n\n name = function.name\n # Sanity checks on gradient definition.\n if (function.grad_func_name is not None) and (function.python_grad_func is\n not None):\n raise ValueError(\"Gradient defined twice for function %s\" % name)\n\n # Add function to graph\n # pylint: disable=protected-access\n gradient = (\n function._grad_func._c_func.func if function._grad_func else None)\n pywrap_tf_session.TF_GraphCopyFunction(self._c_graph, function._c_func.func,\n gradient)\n # pylint: enable=protected-access\n\n self._functions[compat.as_str(name)] = function\n\n # Need a new-enough consumer to support the functions we add to the graph.\n if self._graph_def_versions.min_consumer < 12:\n self._graph_def_versions.min_consumer = 12\n\n @property\n def building_function(self):\n \"\"\"Returns True iff this graph represents a function.\"\"\"\n return self._building_function\n\n # Helper functions to create operations.\n @deprecated_args(None,\n \"Shapes are always computed; don't use the compute_shapes \"\n \"as it has no effect.\", \"compute_shapes\")\n @traceback_utils.filter_traceback\n def create_op(\n self,\n op_type,\n inputs,\n dtypes=None, # pylint: disable=redefined-outer-name\n input_types=None,\n name=None,\n attrs=None,\n op_def=None,\n compute_shapes=True,\n compute_device=True):\n \"\"\"Creates an `Operation` in this graph.\n\n This is a low-level interface for creating an `Operation`. Most\n programs will not call this method directly, and instead use the\n Python op constructors, such as `tf.constant()`, which add ops to\n the default graph.\n\n Args:\n op_type: The `Operation` type to create. This corresponds to the\n `OpDef.name` field for the proto that defines the operation.\n inputs: A list of `Tensor` objects that will be inputs to the `Operation`.\n dtypes: (Optional) A list of `DType` objects that will be the types of the\n tensors that the operation produces.\n input_types: (Optional.) A list of `DType`s that will be the types of the\n tensors that the operation consumes. By default, uses the base `DType`\n of each input in `inputs`. Operations that expect reference-typed inputs\n must specify `input_types` explicitly.\n name: (Optional.) A string name for the operation. If not specified, a\n name is generated based on `op_type`.\n attrs: (Optional.) A dictionary where the key is the attribute name (a\n string) and the value is the respective `attr` attribute of the\n `NodeDef` proto that will represent the operation (an `AttrValue`\n proto).\n op_def: (Optional.) The `OpDef` proto that describes the `op_type` that\n the operation will have.\n compute_shapes: (Optional.) Deprecated. Has no effect (shapes are always\n computed).\n compute_device: (Optional.) If True, device functions will be executed to\n compute the device property of the Operation.\n\n Raises:\n TypeError: if any of the inputs is not a `Tensor`.\n ValueError: if colocation conflicts with existing device assignment.\n\n Returns:\n An `Operation` object.\n \"\"\"\n del compute_shapes\n for idx, a in enumerate(inputs):\n if not isinstance(a, Tensor):\n raise TypeError(\"Input #%d is not a tensor: %s\" % (idx, a))\n return self._create_op_internal(op_type, inputs, dtypes, input_types, name,\n attrs, op_def, compute_device)\n\n def _create_op_internal(\n self,\n op_type,\n inputs,\n dtypes=None, # pylint: disable=redefined-outer-name\n input_types=None,\n name=None,\n attrs=None,\n op_def=None,\n compute_device=True):\n \"\"\"Creates an `Operation` in this graph.\n\n Implements `Graph.create_op()` without the overhead of the deprecation\n wrapper.\n\n Args:\n op_type: The `Operation` type to create. This corresponds to the\n `OpDef.name` field for the proto that defines the operation.\n inputs: A list of `Tensor` objects that will be inputs to the `Operation`.\n dtypes: (Optional) A list of `DType` objects that will be the types of the\n tensors that the operation produces.\n input_types: (Optional.) A list of `DType`s that will be the types of the\n tensors that the operation consumes. By default, uses the base `DType`\n of each input in `inputs`. Operations that expect reference-typed inputs\n must specify `input_types` explicitly.\n name: (Optional.) A string name for the operation. If not specified, a\n name is generated based on `op_type`.\n attrs: (Optional.) A dictionary where the key is the attribute name (a\n string) and the value is the respective `attr` attribute of the\n `NodeDef` proto that will represent the operation (an `AttrValue`\n proto).\n op_def: (Optional.) The `OpDef` proto that describes the `op_type` that\n the operation will have.\n compute_device: (Optional.) If True, device functions will be executed to\n compute the device property of the Operation.\n\n Raises:\n ValueError: if colocation conflicts with existing device assignment.\n\n Returns:\n An `Operation` object.\n \"\"\"\n self._check_not_finalized()\n if name is None:\n name = op_type\n # If a names ends with a '/' it is a \"name scope\" and we use it as-is,\n # after removing the trailing '/'.\n if name and name[-1] == \"/\":\n name = name_from_scope_name(name)\n else:\n name = self.unique_name(name)\n\n node_def = _NodeDef(op_type, name, attrs)\n\n input_ops = set(t.op for t in inputs)\n control_inputs = self._control_dependencies_for_inputs(input_ops)\n # _create_op_helper mutates the new Operation. `_mutation_lock` ensures a\n # Session.run call cannot occur between creating and mutating the op.\n with self._mutation_lock():\n ret = Operation(\n node_def,\n self,\n inputs=inputs,\n output_types=dtypes,\n control_inputs=control_inputs,\n input_types=input_types,\n original_op=self._default_original_op,\n op_def=op_def)\n self._create_op_helper(ret, compute_device=compute_device)\n return ret\n\n def _create_op_from_tf_operation(self, c_op, compute_device=True):\n \"\"\"Creates an `Operation` in this graph from the supplied TF_Operation.\n\n This method is like create_op() except the new Operation is constructed\n using `c_op`. The returned Operation will have `c_op` as its _c_op\n field. This is used to create Operation objects around TF_Operations created\n indirectly by the C API (e.g. by TF_ImportGraphDef, TF_FinishWhile).\n\n This function does not call Operation._control_flow_post_processing or\n Graph._control_dependencies_for_inputs (since the inputs may not be\n available yet). The caller is responsible for calling these methods.\n\n Args:\n c_op: a wrapped TF_Operation\n compute_device: (Optional.) If True, device functions will be executed to\n compute the device property of the Operation.\n\n Returns:\n An `Operation` object.\n \"\"\"\n self._check_not_finalized()\n ret = Operation(c_op, self)\n # If a name_scope was created with ret.name but no nodes were created in it,\n # the name will still appear in _names_in_use even though the name hasn't\n # been used. This is ok, just leave _names_in_use as-is in this case.\n # TODO(skyewm): make the C API guarantee no name conflicts.\n name_key = ret.name.lower()\n if name_key not in self._names_in_use:\n self._names_in_use[name_key] = 1\n self._create_op_helper(ret, compute_device=compute_device)\n return ret\n\n def _create_op_helper(self, op, compute_device=True):\n \"\"\"Common logic for creating an op in this graph.\"\"\"\n # Apply any additional attributes requested. Do not overwrite any existing\n # attributes.\n for key, value in self._attr_scope_map.items():\n try:\n op.get_attr(key)\n except ValueError:\n if callable(value):\n value = value(op.node_def)\n if not isinstance(value, (type(None), attr_value_pb2.AttrValue)):\n raise TypeError(\n \"Callable for scope map key '%s' must return either None or \"\n \"an AttrValue protocol buffer; but it returned: %s\" %\n (key, value))\n if value:\n op._set_attr(key, value) # pylint: disable=protected-access\n\n # Apply a kernel label if one has been specified for this op type.\n try:\n kernel_label = self._op_to_kernel_label_map[op.type]\n op._set_attr(\"_kernel\", # pylint: disable=protected-access\n attr_value_pb2.AttrValue(s=compat.as_bytes(kernel_label)))\n except KeyError:\n pass\n\n op._gradient_function = self._gradient_function_map.get(op.type) # pylint: disable=protected-access\n\n # Apply the overriding op type for gradients if one has been specified for\n # this op type.\n try:\n mapped_op_type = self._gradient_override_map[op.type]\n op._set_attr(\"_gradient_op_type\", # pylint: disable=protected-access\n attr_value_pb2.AttrValue(s=compat.as_bytes(mapped_op_type)))\n except KeyError:\n pass\n\n self._record_op_seen_by_control_dependencies(op)\n\n if compute_device:\n self._apply_device_functions(op)\n\n # Snapshot the colocation stack metadata before we might generate error\n # messages using it. Note that this snapshot depends on the actual stack\n # and is independent of the op's _class attribute.\n # pylint: disable=protected-access\n op._colocation_code_locations = self._snapshot_colocation_stack_metadata()\n # pylint: enable=protected-access\n\n if self._colocation_stack:\n all_colocation_groups = []\n is_device_set = False\n for colocation_op in self._colocation_stack.peek_objs():\n try:\n all_colocation_groups.extend(colocation_op.colocation_groups())\n except AttributeError:\n pass\n if colocation_op.device and not is_device_set:\n # pylint: disable=protected-access\n op._set_device(colocation_op.device)\n # pylint: enable=protected-access\n is_device_set = True\n\n all_colocation_groups = sorted(set(all_colocation_groups))\n # pylint: disable=protected-access\n op._set_attr(\n \"_class\",\n attr_value_pb2.AttrValue(\n list=attr_value_pb2.AttrValue.ListValue(s=all_colocation_groups)))\n # pylint: enable=protected-access\n\n # Sets \"container\" attribute if\n # (1) self._container is not None\n # (2) \"is_stateful\" is set in OpDef\n # (3) \"container\" attribute is in OpDef\n # (4) \"container\" attribute is None\n if self._container and op._is_stateful: # pylint: disable=protected-access\n try:\n container_attr = op.get_attr(\"container\")\n except ValueError:\n # \"container\" attribute is not in OpDef\n pass\n else:\n if not container_attr:\n op._set_attr(\"container\", attr_value_pb2.AttrValue( # pylint: disable=protected-access\n s=compat.as_bytes(self._container)))\n\n def _add_new_tf_operations(self, compute_devices=True):\n \"\"\"Creates `Operations` in this graph for any new TF_Operations.\n\n This is useful for when TF_Operations are indirectly created by the C API\n outside of the Operation constructor (e.g. by TF_ImportGraphDef,\n TF_FinishWhile). This ensures there are corresponding Operations for all\n TF_Operations in the underlying TF_Graph.\n\n Args:\n compute_devices: (Optional.) If True, device functions will be executed to\n compute the device properties of each new Operation.\n\n Returns:\n A list of the new `Operation` objects.\n \"\"\"\n self._check_not_finalized()\n\n # Create all Operation objects before accessing their inputs since an op may\n # be created before its inputs.\n new_ops = [\n self._create_op_from_tf_operation(c_op, compute_device=compute_devices)\n for c_op in c_api_util.new_tf_operations(self)\n ]\n\n # pylint: disable=protected-access\n for op in new_ops:\n new_control_inputs = self._control_dependencies_for_inputs(op.inputs)\n op._add_control_inputs(new_control_inputs)\n op._control_flow_post_processing()\n # pylint: enable=protected-access\n\n return new_ops\n\n def as_graph_element(self, obj, allow_tensor=True, allow_operation=True):\n \"\"\"Returns the object referred to by `obj`, as an `Operation` or `Tensor`.\n\n This function validates that `obj` represents an element of this\n graph, and gives an informative error message if it is not.\n\n This function is the canonical way to get/validate an object of\n one of the allowed types from an external argument reference in the\n Session API.\n\n This method may be called concurrently from multiple threads.\n\n Args:\n obj: A `Tensor`, an `Operation`, or the name of a tensor or operation. Can\n also be any object with an `_as_graph_element()` method that returns a\n value of one of these types. Note: `_as_graph_element` will be called\n inside the graph's lock and so may not modify the graph.\n allow_tensor: If true, `obj` may refer to a `Tensor`.\n allow_operation: If true, `obj` may refer to an `Operation`.\n\n Returns:\n The `Tensor` or `Operation` in the Graph corresponding to `obj`.\n\n Raises:\n TypeError: If `obj` is not a type we support attempting to convert\n to types.\n ValueError: If `obj` is of an appropriate type but invalid. For\n example, an invalid string.\n KeyError: If `obj` is not an object in the graph.\n \"\"\"\n if self._finalized:\n return self._as_graph_element_locked(obj, allow_tensor, allow_operation)\n\n with self._lock:\n return self._as_graph_element_locked(obj, allow_tensor, allow_operation)\n\n def _as_graph_element_locked(self, obj, allow_tensor, allow_operation):\n \"\"\"See `Graph.as_graph_element()` for details.\"\"\"\n # The vast majority of this function is figuring\n # out what an API user might be doing wrong, so\n # that we can give helpful error messages.\n #\n # Ideally, it would be nice to split it up, but we\n # need context to generate nice error messages.\n\n if allow_tensor and allow_operation:\n types_str = \"Tensor or Operation\"\n elif allow_tensor:\n types_str = \"Tensor\"\n elif allow_operation:\n types_str = \"Operation\"\n else:\n raise ValueError(\"allow_tensor and allow_operation can't both be False.\")\n\n temp_obj = _as_graph_element(obj)\n if temp_obj is not None:\n obj = temp_obj\n\n # If obj appears to be a name...\n if isinstance(obj, compat.bytes_or_text_types):\n name = compat.as_str(obj)\n\n if \":\" in name and allow_tensor:\n # Looks like a Tensor name and can be a Tensor.\n try:\n op_name, out_n = name.split(\":\")\n out_n = int(out_n)\n except:\n raise ValueError(\"The name %s looks a like a Tensor name, but is \"\n \"not a valid one. Tensor names must be of the \"\n \"form \\\"<op_name>:<output_index>\\\".\" % repr(name))\n if op_name in self._nodes_by_name:\n op = self._nodes_by_name[op_name]\n else:\n raise KeyError(\"The name %s refers to a Tensor which does not \"\n \"exist. The operation, %s, does not exist in the \"\n \"graph.\" % (repr(name), repr(op_name)))\n try:\n return op.outputs[out_n]\n except:\n raise KeyError(\"The name %s refers to a Tensor which does not \"\n \"exist. The operation, %s, exists but only has \"\n \"%s outputs.\" %\n (repr(name), repr(op_name), len(op.outputs)))\n\n elif \":\" in name and not allow_tensor:\n # Looks like a Tensor name but can't be a Tensor.\n raise ValueError(\"Name %s appears to refer to a Tensor, not a %s.\" %\n (repr(name), types_str))\n\n elif \":\" not in name and allow_operation:\n # Looks like an Operation name and can be an Operation.\n if name not in self._nodes_by_name:\n raise KeyError(\"The name %s refers to an Operation not in the \"\n \"graph.\" % repr(name))\n return self._nodes_by_name[name]\n\n elif \":\" not in name and not allow_operation:\n # Looks like an Operation name but can't be an Operation.\n if name in self._nodes_by_name:\n # Yep, it's an Operation name\n err_msg = (\"The name %s refers to an Operation, not a %s.\" %\n (repr(name), types_str))\n else:\n err_msg = (\"The name %s looks like an (invalid) Operation name, \"\n \"not a %s.\" % (repr(name), types_str))\n err_msg += (\" Tensor names must be of the form \"\n \"\\\"<op_name>:<output_index>\\\".\")\n raise ValueError(err_msg)\n\n elif isinstance(obj, Tensor) and allow_tensor:\n # Actually obj is just the object it's referring to.\n if obj.graph is not self:\n raise ValueError(\"Tensor %s is not an element of this graph.\" % obj)\n return obj\n elif isinstance(obj, Operation) and allow_operation:\n # Actually obj is just the object it's referring to.\n if obj.graph is not self:\n raise ValueError(\"Operation %s is not an element of this graph.\" % obj)\n return obj\n else:\n # We give up!\n raise TypeError(\"Can not convert a %s into a %s.\" %\n (type(obj).__name__, types_str))\n\n def get_operations(self):\n \"\"\"Return the list of operations in the graph.\n\n You can modify the operations in place, but modifications\n to the list such as inserts/delete have no effect on the\n list of operations known to the graph.\n\n This method may be called concurrently from multiple threads.\n\n Returns:\n A list of Operations.\n \"\"\"\n if self._finalized:\n return list(self._nodes_by_id.values())\n\n with self._lock:\n return list(self._nodes_by_id.values())\n\n def get_operation_by_name(self, name):\n \"\"\"Returns the `Operation` with the given `name`.\n\n This method may be called concurrently from multiple threads.\n\n Args:\n name: The name of the `Operation` to return.\n\n Returns:\n The `Operation` with the given `name`.\n\n Raises:\n TypeError: If `name` is not a string.\n KeyError: If `name` does not correspond to an operation in this graph.\n \"\"\"\n\n if not isinstance(name, six.string_types):\n raise TypeError(\"Operation names are strings (or similar), not %s.\" %\n type(name).__name__)\n return self.as_graph_element(name, allow_tensor=False, allow_operation=True)\n\n def _get_operation_by_name_unsafe(self, name):\n \"\"\"Returns the `Operation` with the given `name`.\n\n This is a internal unsafe version of get_operation_by_name. It skips many\n checks and does not have user friendly error messages but runs considerably\n faster. This method may be called concurrently from multiple threads.\n\n Args:\n name: The name of the `Operation` to return.\n\n Returns:\n The `Operation` with the given `name`.\n\n Raises:\n KeyError: If `name` does not correspond to an operation in this graph.\n \"\"\"\n\n if self._finalized:\n return self._nodes_by_name[name]\n\n with self._lock:\n return self._nodes_by_name[name]\n\n def _get_operation_by_tf_operation(self, tf_oper):\n op_name = pywrap_tf_session.TF_OperationName(tf_oper)\n return self._get_operation_by_name_unsafe(op_name)\n\n def get_tensor_by_name(self, name):\n \"\"\"Returns the `Tensor` with the given `name`.\n\n This method may be called concurrently from multiple threads.\n\n Args:\n name: The name of the `Tensor` to return.\n\n Returns:\n The `Tensor` with the given `name`.\n\n Raises:\n TypeError: If `name` is not a string.\n KeyError: If `name` does not correspond to a tensor in this graph.\n \"\"\"\n # Names should be strings.\n if not isinstance(name, six.string_types):\n raise TypeError(\"Tensor names are strings (or similar), not %s.\" %\n type(name).__name__)\n return self.as_graph_element(name, allow_tensor=True, allow_operation=False)\n\n def _get_tensor_by_tf_output(self, tf_output):\n \"\"\"Returns the `Tensor` representing `tf_output`.\n\n Note that there is only one such `Tensor`, i.e. multiple calls to this\n function with the same TF_Output value will always return the same `Tensor`\n object.\n\n Args:\n tf_output: A wrapped `TF_Output` (the C API equivalent of `Tensor`).\n\n Returns:\n The `Tensor` that represents `tf_output`.\n \"\"\"\n op = self._get_operation_by_tf_operation(tf_output.oper)\n return op.outputs[tf_output.index]\n\n @property\n def _last_id(self):\n return self._next_id_counter\n\n def _get_op_def(self, type): # pylint: disable=redefined-builtin\n \"\"\"Returns the `OpDef` proto for `type`. `type` is a string.\"\"\"\n # NOTE: No locking is required because the lookup and insertion operations\n # on Python dictionaries are atomic.\n try:\n return self._op_def_cache[type]\n except KeyError:\n with c_api_util.tf_buffer() as buf:\n # pylint: disable=protected-access\n pywrap_tf_session.TF_GraphGetOpDef(self._c_graph, compat.as_bytes(type),\n buf)\n # pylint: enable=protected-access\n data = pywrap_tf_session.TF_GetBuffer(buf)\n op_def = op_def_pb2.OpDef()\n op_def.ParseFromString(compat.as_bytes(data))\n self._op_def_cache[type] = op_def\n return op_def\n\n def as_default(self):\n \"\"\"Returns a context manager that makes this `Graph` the default graph.\n\n This method should be used if you want to create multiple graphs\n in the same process. For convenience, a global default graph is\n provided, and all ops will be added to this graph if you do not\n create a new graph explicitly.\n\n Use this method with the `with` keyword to specify that ops created within\n the scope of a block should be added to this graph. In this case, once\n the scope of the `with` is exited, the previous default graph is set again\n as default. There is a stack, so it's ok to have multiple nested levels\n of `as_default` calls.\n\n The default graph is a property of the current thread. If you\n create a new thread, and wish to use the default graph in that\n thread, you must explicitly add a `with g.as_default():` in that\n thread's function.\n\n The following code examples are equivalent:\n\n ```python\n # 1. Using Graph.as_default():\n g = tf.Graph()\n with g.as_default():\n c = tf.constant(5.0)\n assert c.graph is g\n\n # 2. Constructing and making default:\n with tf.Graph().as_default() as g:\n c = tf.constant(5.0)\n assert c.graph is g\n ```\n\n If eager execution is enabled ops created under this context manager will be\n added to the graph instead of executed eagerly.\n\n Returns:\n A context manager for using this graph as the default graph.\n \"\"\"\n return _default_graph_stack.get_controller(self)\n\n @property\n def collections(self):\n \"\"\"Returns the names of the collections known to this graph.\"\"\"\n return list(self._collections)\n\n def add_to_collection(self, name, value):\n \"\"\"Stores `value` in the collection with the given `name`.\n\n Note that collections are not sets, so it is possible to add a value to\n a collection several times.\n\n Args:\n name: The key for the collection. The `GraphKeys` class contains many\n standard names for collections.\n value: The value to add to the collection.\n \"\"\" # pylint: disable=g-doc-exception\n self._check_not_finalized()\n with self._lock:\n if name not in self._collections:\n self._collections[name] = [value]\n else:\n self._collections[name].append(value)\n\n def add_to_collections(self, names, value):\n \"\"\"Stores `value` in the collections given by `names`.\n\n Note that collections are not sets, so it is possible to add a value to\n a collection several times. This function makes sure that duplicates in\n `names` are ignored, but it will not check for pre-existing membership of\n `value` in any of the collections in `names`.\n\n `names` can be any iterable, but if `names` is a string, it is treated as a\n single collection name.\n\n Args:\n names: The keys for the collections to add to. The `GraphKeys` class\n contains many standard names for collections.\n value: The value to add to the collections.\n \"\"\"\n # Make sure names are unique, but treat strings as a single collection name\n names = (names,) if isinstance(names, six.string_types) else set(names)\n for name in names:\n self.add_to_collection(name, value)\n\n def get_collection_ref(self, name):\n \"\"\"Returns a list of values in the collection with the given `name`.\n\n If the collection exists, this returns the list itself, which can\n be modified in place to change the collection. If the collection does\n not exist, it is created as an empty list and the list is returned.\n\n This is different from `get_collection()` which always returns a copy of\n the collection list if it exists and never creates an empty collection.\n\n Args:\n name: The key for the collection. For example, the `GraphKeys` class\n contains many standard names for collections.\n\n Returns:\n The list of values in the collection with the given `name`, or an empty\n list if no value has been added to that collection.\n \"\"\" # pylint: disable=g-doc-exception\n with self._lock:\n coll_list = self._collections.get(name, None)\n if coll_list is None:\n coll_list = []\n self._collections[name] = coll_list\n return coll_list\n\n def get_collection(self, name, scope=None):\n \"\"\"Returns a list of values in the collection with the given `name`.\n\n This is different from `get_collection_ref()` which always returns the\n actual collection list if it exists in that it returns a new list each time\n it is called.\n\n Args:\n name: The key for the collection. For example, the `GraphKeys` class\n contains many standard names for collections.\n scope: (Optional.) A string. If supplied, the resulting list is filtered\n to include only items whose `name` attribute matches `scope` using\n `re.match`. Items without a `name` attribute are never returned if a\n scope is supplied. The choice of `re.match` means that a `scope` without\n special tokens filters by prefix.\n\n Returns:\n The list of values in the collection with the given `name`, or\n an empty list if no value has been added to that collection. The\n list contains the values in the order under which they were\n collected.\n \"\"\" # pylint: disable=g-doc-exception\n with self._lock:\n collection = self._collections.get(name, None)\n if collection is None:\n return []\n if scope is None:\n return list(collection)\n else:\n c = []\n regex = re.compile(scope)\n for item in collection:\n try:\n if regex.match(item.name):\n c.append(item)\n except AttributeError:\n # Collection items with no name are ignored.\n pass\n return c\n\n def get_all_collection_keys(self):\n \"\"\"Returns a list of collections used in this graph.\"\"\"\n with self._lock:\n return [x for x in self._collections if isinstance(x, six.string_types)]\n\n def clear_collection(self, name):\n \"\"\"Clears all values in a collection.\n\n Args:\n name: The key for the collection. The `GraphKeys` class contains many\n standard names for collections.\n \"\"\"\n self._check_not_finalized()\n with self._lock:\n if name in self._collections:\n del self._collections[name]\n\n @tf_contextlib.contextmanager\n def _original_op(self, op):\n \"\"\"Python 'with' handler to help annotate ops with their originator.\n\n An op may have an 'original_op' property that indicates the op on which\n it was based. For example a replica op is based on the op that was\n replicated and a gradient op is based on the op that was differentiated.\n\n All ops created in the scope of this 'with' handler will have\n the given 'op' as their original op.\n\n Args:\n op: The Operation that all ops created in this scope will have as their\n original op.\n\n Yields:\n Nothing.\n \"\"\"\n old_original_op = self._default_original_op\n self._default_original_op = op\n try:\n yield\n finally:\n self._default_original_op = old_original_op\n\n @property\n def _name_stack(self):\n # This may be called from a thread where name_stack doesn't yet exist.\n if not hasattr(self._thread_local, \"_name_stack\"):\n self._thread_local._name_stack = \"\"\n return self._thread_local._name_stack\n\n @_name_stack.setter\n def _name_stack(self, name_stack):\n self._thread_local._name_stack = name_stack\n\n # pylint: disable=g-doc-return-or-yield,line-too-long\n @tf_contextlib.contextmanager\n def name_scope(self, name):\n \"\"\"Returns a context manager that creates hierarchical names for operations.\n\n A graph maintains a stack of name scopes. A `with name_scope(...):`\n statement pushes a new name onto the stack for the lifetime of the context.\n\n The `name` argument will be interpreted as follows:\n\n * A string (not ending with '/') will create a new name scope, in which\n `name` is appended to the prefix of all operations created in the\n context. If `name` has been used before, it will be made unique by\n calling `self.unique_name(name)`.\n * A scope previously captured from a `with g.name_scope(...) as\n scope:` statement will be treated as an \"absolute\" name scope, which\n makes it possible to re-enter existing scopes.\n * A value of `None` or the empty string will reset the current name scope\n to the top-level (empty) name scope.\n\n For example:\n\n ```python\n with tf.Graph().as_default() as g:\n c = tf.constant(5.0, name=\"c\")\n assert c.op.name == \"c\"\n c_1 = tf.constant(6.0, name=\"c\")\n assert c_1.op.name == \"c_1\"\n\n # Creates a scope called \"nested\"\n with g.name_scope(\"nested\") as scope:\n nested_c = tf.constant(10.0, name=\"c\")\n assert nested_c.op.name == \"nested/c\"\n\n # Creates a nested scope called \"inner\".\n with g.name_scope(\"inner\"):\n nested_inner_c = tf.constant(20.0, name=\"c\")\n assert nested_inner_c.op.name == \"nested/inner/c\"\n\n # Create a nested scope called \"inner_1\".\n with g.name_scope(\"inner\"):\n nested_inner_1_c = tf.constant(30.0, name=\"c\")\n assert nested_inner_1_c.op.name == \"nested/inner_1/c\"\n\n # Treats `scope` as an absolute name scope, and\n # switches to the \"nested/\" scope.\n with g.name_scope(scope):\n nested_d = tf.constant(40.0, name=\"d\")\n assert nested_d.op.name == \"nested/d\"\n\n with g.name_scope(\"\"):\n e = tf.constant(50.0, name=\"e\")\n assert e.op.name == \"e\"\n ```\n\n The name of the scope itself can be captured by `with\n g.name_scope(...) as scope:`, which stores the name of the scope\n in the variable `scope`. This value can be used to name an\n operation that represents the overall result of executing the ops\n in a scope. For example:\n\n ```python\n inputs = tf.constant(...)\n with g.name_scope('my_layer') as scope:\n weights = tf.Variable(..., name=\"weights\")\n biases = tf.Variable(..., name=\"biases\")\n affine = tf.matmul(inputs, weights) + biases\n output = tf.nn.relu(affine, name=scope)\n ```\n\n NOTE: This constructor validates the given `name`. Valid scope\n names match one of the following regular expressions:\n\n [A-Za-z0-9.][A-Za-z0-9_.\\\\-/]* (for scopes at the root)\n [A-Za-z0-9_.\\\\-/]* (for other scopes)\n\n Args:\n name: A name for the scope.\n\n Returns:\n A context manager that installs `name` as a new name scope.\n\n Raises:\n ValueError: If `name` is not a valid scope name, according to the rules\n above.\n \"\"\"\n if name:\n if isinstance(name, compat.bytes_or_text_types):\n name = compat.as_str(name)\n\n if self._name_stack:\n # Scopes created in a nested scope may have initial characters\n # that are illegal as the initial character of an op name\n # (viz. '-', '\\', '/', and '_').\n if not _VALID_SCOPE_NAME_REGEX.match(name):\n raise ValueError(\"'%s' is not a valid scope name\" % name)\n else:\n # Scopes created in the root must match the more restrictive\n # op name regex, which constrains the initial character.\n if not _VALID_OP_NAME_REGEX.match(name):\n raise ValueError(\"'%s' is not a valid scope name\" % name)\n old_stack = self._name_stack\n if not name: # Both for name=None and name=\"\" we re-set to empty scope.\n new_stack = \"\"\n returned_scope = \"\"\n elif name[-1] == \"/\":\n new_stack = name_from_scope_name(name)\n returned_scope = name\n else:\n new_stack = self.unique_name(name)\n returned_scope = new_stack + \"/\"\n self._name_stack = new_stack\n try:\n yield returned_scope\n finally:\n self._name_stack = old_stack\n\n # pylint: enable=g-doc-return-or-yield,line-too-long\n\n def unique_name(self, name, mark_as_used=True):\n \"\"\"Return a unique operation name for `name`.\n\n Note: You rarely need to call `unique_name()` directly. Most of\n the time you just need to create `with g.name_scope()` blocks to\n generate structured names.\n\n `unique_name` is used to generate structured names, separated by\n `\"/\"`, to help identify operations when debugging a graph.\n Operation names are displayed in error messages reported by the\n TensorFlow runtime, and in various visualization tools such as\n TensorBoard.\n\n If `mark_as_used` is set to `True`, which is the default, a new\n unique name is created and marked as in use. If it's set to `False`,\n the unique name is returned without actually being marked as used.\n This is useful when the caller simply wants to know what the name\n to be created will be.\n\n Args:\n name: The name for an operation.\n mark_as_used: Whether to mark this name as being used.\n\n Returns:\n A string to be passed to `create_op()` that will be used\n to name the operation being created.\n \"\"\"\n if self._name_stack:\n name = self._name_stack + \"/\" + name\n\n # For the sake of checking for names in use, we treat names as case\n # insensitive (e.g. foo = Foo).\n name_key = name.lower()\n i = self._names_in_use.get(name_key, 0)\n # Increment the number for \"name_key\".\n if mark_as_used:\n self._names_in_use[name_key] = i + 1\n if i > 0:\n base_name_key = name_key\n # Make sure the composed name key is not already used.\n while name_key in self._names_in_use:\n name_key = \"%s_%d\" % (base_name_key, i)\n i += 1\n # Mark the composed name_key as used in case someone wants\n # to call unique_name(\"name_1\").\n if mark_as_used:\n self._names_in_use[name_key] = 1\n\n # Return the new name with the original capitalization of the given name.\n name = \"%s_%d\" % (name, i - 1)\n return name\n\n def get_name_scope(self):\n \"\"\"Returns the current name scope.\n\n For example:\n\n ```python\n with tf.name_scope('scope1'):\n with tf.name_scope('scope2'):\n print(tf.compat.v1.get_default_graph().get_name_scope())\n ```\n would print the string `scope1/scope2`.\n\n Returns:\n A string representing the current name scope.\n \"\"\"\n return self._name_stack\n\n @tf_contextlib.contextmanager\n def _colocate_with_for_gradient(self, op, gradient_uid,\n ignore_existing=False):\n with self.colocate_with(op, ignore_existing):\n if gradient_uid is not None:\n ctx = _get_enclosing_context(self)\n if ctx is not None:\n ctx.EnterGradientColocation(op, gradient_uid)\n try:\n yield\n finally:\n ctx.ExitGradientColocation(op, gradient_uid)\n else:\n yield\n else:\n yield\n\n @tf_contextlib.contextmanager\n def colocate_with(self, op, ignore_existing=False):\n \"\"\"Returns a context manager that specifies an op to colocate with.\n\n Note: this function is not for public use, only for internal libraries.\n\n For example:\n\n ```python\n a = tf.Variable([1.0])\n with g.colocate_with(a):\n b = tf.constant(1.0)\n c = tf.add(a, b)\n ```\n\n `b` and `c` will always be colocated with `a`, no matter where `a`\n is eventually placed.\n\n **NOTE** Using a colocation scope resets any existing device constraints.\n\n If `op` is `None` then `ignore_existing` must be `True` and the new\n scope resets all colocation and device constraints.\n\n Args:\n op: The op to colocate all created ops with, or `None`.\n ignore_existing: If true, only applies colocation of this op within the\n context, rather than applying all colocation properties on the stack.\n If `op` is `None`, this value must be `True`.\n\n Raises:\n ValueError: if op is None but ignore_existing is False.\n\n Yields:\n A context manager that specifies the op with which to colocate\n newly created ops.\n \"\"\"\n if op is None and not ignore_existing:\n raise ValueError(\"Trying to reset colocation (op is None) but \"\n \"ignore_existing is not True\")\n op, device_only_candidate = _op_to_colocate_with(op, self)\n\n # By default, colocate_with resets the device function stack,\n # since colocate_with is typically used in specific internal\n # library functions where colocation is intended to be \"stronger\"\n # than device functions.\n #\n # In the future, a caller may specify that device_functions win\n # over colocation, in which case we can add support.\n device_fn_tmp = self._device_function_stack\n self._device_function_stack = traceable_stack.TraceableStack()\n\n if ignore_existing:\n current_stack = self._colocation_stack\n self._colocation_stack = traceable_stack.TraceableStack()\n\n if op is not None:\n # offset refers to the stack frame used for storing code location.\n # We use 4, the sum of 1 to use our caller's stack frame and 3\n # to jump over layers of context managers above us.\n if device_only_candidate is not None:\n self._colocation_stack.push_obj(device_only_candidate, offset=4)\n self._colocation_stack.push_obj(op, offset=4)\n elif not ignore_existing:\n raise ValueError(\"Trying to reset colocation (op is None) but \"\n \"ignore_existing is not True\")\n try:\n yield\n finally:\n # Restore device function stack\n self._device_function_stack = device_fn_tmp\n if op is not None:\n self._colocation_stack.pop_obj()\n if device_only_candidate is not None:\n self._colocation_stack.pop_obj()\n\n # Reset the colocation stack if requested.\n if ignore_existing:\n self._colocation_stack = current_stack\n\n def _add_device_to_stack(self, device_name_or_function, offset=0):\n \"\"\"Add device to stack manually, separate from a context manager.\"\"\"\n total_offset = 1 + offset\n spec = _UserDeviceSpec(device_name_or_function)\n self._device_function_stack.push_obj(spec, offset=total_offset)\n return spec\n\n @tf_contextlib.contextmanager\n def device(self, device_name_or_function):\n # pylint: disable=line-too-long\n \"\"\"Returns a context manager that specifies the default device to use.\n\n The `device_name_or_function` argument may either be a device name\n string, a device function, or None:\n\n * If it is a device name string, all operations constructed in\n this context will be assigned to the device with that name, unless\n overridden by a nested `device()` context.\n * If it is a function, it will be treated as a function from\n Operation objects to device name strings, and invoked each time\n a new Operation is created. The Operation will be assigned to\n the device with the returned name.\n * If it is None, all `device()` invocations from the enclosing context\n will be ignored.\n\n For information about the valid syntax of device name strings, see\n the documentation in\n [`DeviceNameUtils`](https://www.tensorflow.org/code/tensorflow/core/util/device_name_utils.h).\n\n For example:\n\n ```python\n with g.device('/device:GPU:0'):\n # All operations constructed in this context will be placed\n # on GPU 0.\n with g.device(None):\n # All operations constructed in this context will have no\n # assigned device.\n\n # Defines a function from `Operation` to device string.\n def matmul_on_gpu(n):\n if n.type == \"MatMul\":\n return \"/device:GPU:0\"\n else:\n return \"/cpu:0\"\n\n with g.device(matmul_on_gpu):\n # All operations of type \"MatMul\" constructed in this context\n # will be placed on GPU 0; all other operations will be placed\n # on CPU 0.\n ```\n\n **N.B.** The device scope may be overridden by op wrappers or\n other library code. For example, a variable assignment op\n `v.assign()` must be colocated with the `tf.Variable` `v`, and\n incompatible device scopes will be ignored.\n\n Args:\n device_name_or_function: The device name or function to use in the\n context.\n\n Yields:\n A context manager that specifies the default device to use for newly\n created ops.\n\n Raises:\n RuntimeError: If device scopes are not properly nested.\n \"\"\"\n self._add_device_to_stack(device_name_or_function, offset=2)\n old_top_of_stack = self._device_function_stack.peek_top_obj()\n try:\n yield\n finally:\n new_top_of_stack = self._device_function_stack.peek_top_obj()\n if old_top_of_stack is not new_top_of_stack:\n raise RuntimeError(\"Exiting device scope without proper scope nesting.\")\n self._device_function_stack.pop_obj()\n\n def _apply_device_functions(self, op):\n \"\"\"Applies the current device function stack to the given operation.\"\"\"\n # Apply any device functions in LIFO order, so that the most recently\n # pushed function has the first chance to apply a device to the op.\n # We apply here because the result can depend on the Operation's\n # signature, which is computed in the Operation constructor.\n # pylint: disable=protected-access\n prior_device_string = None\n for device_spec in self._device_function_stack.peek_objs():\n if device_spec.is_null_merge:\n continue\n\n if device_spec.function is None:\n break\n\n device_string = device_spec.string_merge(op)\n\n # Take advantage of the fact that None is a singleton and Python interns\n # strings, since identity checks are faster than equality checks.\n if device_string is not prior_device_string:\n op._set_device_from_string(device_string)\n prior_device_string = device_string\n op._device_code_locations = self._snapshot_device_function_stack_metadata()\n # pylint: enable=protected-access\n\n # pylint: disable=g-doc-return-or-yield\n @tf_contextlib.contextmanager\n def container(self, container_name):\n \"\"\"Returns a context manager that specifies the resource container to use.\n\n Stateful operations, such as variables and queues, can maintain their\n states on devices so that they can be shared by multiple processes.\n A resource container is a string name under which these stateful\n operations are tracked. These resources can be released or cleared\n with `tf.Session.reset()`.\n\n For example:\n\n ```python\n with g.container('experiment0'):\n # All stateful Operations constructed in this context will be placed\n # in resource container \"experiment0\".\n v1 = tf.Variable([1.0])\n v2 = tf.Variable([2.0])\n with g.container(\"experiment1\"):\n # All stateful Operations constructed in this context will be\n # placed in resource container \"experiment1\".\n v3 = tf.Variable([3.0])\n q1 = tf.queue.FIFOQueue(10, tf.float32)\n # All stateful Operations constructed in this context will be\n # be created in the \"experiment0\".\n v4 = tf.Variable([4.0])\n q1 = tf.queue.FIFOQueue(20, tf.float32)\n with g.container(\"\"):\n # All stateful Operations constructed in this context will be\n # be placed in the default resource container.\n v5 = tf.Variable([5.0])\n q3 = tf.queue.FIFOQueue(30, tf.float32)\n\n # Resets container \"experiment0\", after which the state of v1, v2, v4, q1\n # will become undefined (such as uninitialized).\n tf.Session.reset(target, [\"experiment0\"])\n ```\n\n Args:\n container_name: container name string.\n\n Returns:\n A context manager for defining resource containers for stateful ops,\n yields the container name.\n \"\"\"\n original_container = self._container\n self._container = container_name\n try:\n yield self._container\n finally:\n self._container = original_container\n\n # pylint: enable=g-doc-return-or-yield\n\n class _ControlDependenciesController(object):\n \"\"\"Context manager for `control_dependencies()`.\"\"\"\n\n def __init__(self, graph, control_inputs):\n \"\"\"Create a new `_ControlDependenciesController`.\n\n A `_ControlDependenciesController` is the context manager for\n `with tf.control_dependencies()` blocks. These normally nest,\n as described in the documentation for `control_dependencies()`.\n\n The `control_inputs` argument list control dependencies that must be\n added to the current set of control dependencies. Because of\n uniquification the set can be empty even if the caller passed a list of\n ops. The special value `None` indicates that we want to start a new\n empty set of control dependencies instead of extending the current set.\n\n In that case we also clear the current control flow context, which is an\n additional mechanism to add control dependencies.\n\n Args:\n graph: The graph that this controller is managing.\n control_inputs: List of ops to use as control inputs in addition to the\n current control dependencies. None to indicate that the dependencies\n should be cleared.\n \"\"\"\n self._graph = graph\n if control_inputs is None:\n self._control_inputs_val = []\n self._new_stack = True\n else:\n self._control_inputs_val = control_inputs\n self._new_stack = False\n self._seen_nodes = set()\n self._old_stack = None\n self._old_control_flow_context = None\n\n# pylint: disable=protected-access\n\n def __enter__(self):\n if self._new_stack:\n # Clear the control_dependencies graph.\n self._old_stack = self._graph._control_dependencies_stack\n self._graph._control_dependencies_stack = []\n # Clear the control_flow_context too.\n self._old_control_flow_context = self._graph._get_control_flow_context()\n self._graph._set_control_flow_context(None)\n self._graph._push_control_dependencies_controller(self)\n\n def __exit__(self, unused_type, unused_value, unused_traceback):\n self._graph._pop_control_dependencies_controller(self)\n if self._new_stack:\n self._graph._control_dependencies_stack = self._old_stack\n self._graph._set_control_flow_context(self._old_control_flow_context)\n\n# pylint: enable=protected-access\n\n @property\n def control_inputs(self):\n return self._control_inputs_val\n\n def add_op(self, op):\n if isinstance(op, Tensor):\n op = op.ref()\n self._seen_nodes.add(op)\n\n def op_in_group(self, op):\n if isinstance(op, Tensor):\n op = op.ref()\n return op in self._seen_nodes\n\n def _push_control_dependencies_controller(self, controller):\n self._control_dependencies_stack.append(controller)\n\n def _pop_control_dependencies_controller(self, controller):\n assert self._control_dependencies_stack[-1] is controller\n self._control_dependencies_stack.pop()\n\n def _current_control_dependencies(self):\n ret = set()\n for controller in self._control_dependencies_stack:\n for op in controller.control_inputs:\n ret.add(op)\n return ret\n\n def _control_dependencies_for_inputs(self, input_ops):\n \"\"\"For an op that takes `input_ops` as inputs, compute control inputs.\n\n The returned control dependencies should yield an execution that\n is equivalent to adding all control inputs in\n self._control_dependencies_stack to a newly created op. However,\n this function attempts to prune the returned control dependencies\n by observing that nodes created within the same `with\n control_dependencies(...):` block may have data dependencies that make\n the explicit approach redundant.\n\n Args:\n input_ops: The data input ops for an op to be created.\n\n Returns:\n A list of control inputs for the op to be created.\n \"\"\"\n ret = []\n for controller in self._control_dependencies_stack:\n # If any of the input_ops already depends on the inputs from controller,\n # we say that the new op is dominated (by that input), and we therefore\n # do not need to add control dependencies for this controller's inputs.\n dominated = False\n for op in input_ops:\n if controller.op_in_group(op):\n dominated = True\n break\n if not dominated:\n # Don't add a control input if we already have a data dependency on i.\n # NOTE(mrry): We do not currently track transitive data dependencies,\n # so we may add redundant control inputs.\n ret.extend(c for c in controller.control_inputs if c not in input_ops)\n return ret\n\n def _record_op_seen_by_control_dependencies(self, op):\n \"\"\"Record that the given op depends on all registered control dependencies.\n\n Args:\n op: An Operation.\n \"\"\"\n for controller in self._control_dependencies_stack:\n controller.add_op(op)\n\n def control_dependencies(self, control_inputs):\n \"\"\"Returns a context manager that specifies control dependencies.\n\n Use with the `with` keyword to specify that all operations constructed\n within the context should have control dependencies on\n `control_inputs`. For example:\n\n ```python\n with g.control_dependencies([a, b, c]):\n # `d` and `e` will only run after `a`, `b`, and `c` have executed.\n d = ...\n e = ...\n ```\n\n Multiple calls to `control_dependencies()` can be nested, and in\n that case a new `Operation` will have control dependencies on the union\n of `control_inputs` from all active contexts.\n\n ```python\n with g.control_dependencies([a, b]):\n # Ops constructed here run after `a` and `b`.\n with g.control_dependencies([c, d]):\n # Ops constructed here run after `a`, `b`, `c`, and `d`.\n ```\n\n You can pass None to clear the control dependencies:\n\n ```python\n with g.control_dependencies([a, b]):\n # Ops constructed here run after `a` and `b`.\n with g.control_dependencies(None):\n # Ops constructed here run normally, not waiting for either `a` or `b`.\n with g.control_dependencies([c, d]):\n # Ops constructed here run after `c` and `d`, also not waiting\n # for either `a` or `b`.\n ```\n\n *N.B.* The control dependencies context applies *only* to ops that\n are constructed within the context. Merely using an op or tensor\n in the context does not add a control dependency. The following\n example illustrates this point:\n\n ```python\n # WRONG\n def my_func(pred, tensor):\n t = tf.matmul(tensor, tensor)\n with tf.control_dependencies([pred]):\n # The matmul op is created outside the context, so no control\n # dependency will be added.\n return t\n\n # RIGHT\n def my_func(pred, tensor):\n with tf.control_dependencies([pred]):\n # The matmul op is created in the context, so a control dependency\n # will be added.\n return tf.matmul(tensor, tensor)\n ```\n\n Also note that though execution of ops created under this scope will trigger\n execution of the dependencies, the ops created under this scope might still\n be pruned from a normal tensorflow graph. For example, in the following\n snippet of code the dependencies are never executed:\n\n ```python\n loss = model.loss()\n with tf.control_dependencies(dependencies):\n loss = loss + tf.constant(1) # note: dependencies ignored in the\n # backward pass\n return tf.gradients(loss, model.variables)\n ```\n\n This is because evaluating the gradient graph does not require evaluating\n the constant(1) op created in the forward pass.\n\n Args:\n control_inputs: A list of `Operation` or `Tensor` objects which must be\n executed or computed before running the operations defined in the\n context. Can also be `None` to clear the control dependencies.\n\n Returns:\n A context manager that specifies control dependencies for all\n operations constructed within the context.\n\n Raises:\n TypeError: If `control_inputs` is not a list of `Operation` or\n `Tensor` objects.\n \"\"\"\n if control_inputs is None:\n return self._ControlDependenciesController(self, None)\n # First convert the inputs to ops, and deduplicate them.\n # NOTE(mrry): Other than deduplication, we do not currently track direct\n # or indirect dependencies between control_inputs, which may result in\n # redundant control inputs.\n control_ops = []\n current = self._current_control_dependencies()\n for c in control_inputs:\n # The hasattr(handle) is designed to match ResourceVariables. This is so\n # control dependencies on a variable or on an unread variable don't\n # trigger reads.\n if (isinstance(c, IndexedSlices) or\n (hasattr(c, \"_handle\") and hasattr(c, \"op\"))):\n c = c.op\n c = self.as_graph_element(c)\n if isinstance(c, Tensor):\n c = c.op\n elif not isinstance(c, Operation):\n raise TypeError(\"Control input must be Operation or Tensor: %s\" % c)\n if c not in current:\n control_ops.append(c)\n current.add(c)\n return self._ControlDependenciesController(self, control_ops)\n\n # pylint: disable=g-doc-return-or-yield\n @tf_contextlib.contextmanager\n def _attr_scope(self, attr_map):\n \"\"\"EXPERIMENTAL: A context manager for setting attributes on operators.\n\n This context manager can be used to add additional\n attributes to operators within the scope of the context.\n\n For example:\n\n with ops.Graph().as_default() as g:\n f_1 = Foo() # No extra attributes\n with g._attr_scope({\"_a\": tf.attr_value_pb2.AttrValue(b=False)}):\n f_2 = Foo() # Additional attribute _a=False\n with g._attr_scope({\"_a\": tf.attr_value_pb2.AttrValue(b=True)}):\n f_3 = Foo() # Additional attribute _a=False\n with g._attr_scope({\"_a\": None}):\n f_4 = Foo() # No additional attributes.\n\n Args:\n attr_map: A dictionary mapping attr name strings to AttrValue protocol\n buffers or None.\n\n Returns:\n A context manager that sets the kernel label to be used for one or more\n ops created in that context.\n\n Raises:\n TypeError: If attr_map is not a dictionary mapping\n strings to AttrValue protobufs.\n \"\"\"\n if not isinstance(attr_map, dict):\n raise TypeError(\"attr_map must be a dictionary mapping \"\n \"strings to AttrValue protocol buffers\")\n # The saved_attrs dictionary stores any currently-set labels that\n # will be overridden by this context manager.\n saved_attrs = {}\n # Install the given attribute\n for name, attr in attr_map.items():\n if not (isinstance(name, six.string_types) and\n (isinstance(attr, (type(None), attr_value_pb2.AttrValue)) or\n callable(attr))):\n raise TypeError(\"attr_map must be a dictionary mapping \"\n \"strings to AttrValue protocol buffers or \"\n \"callables that emit AttrValue protocol buffers\")\n try:\n saved_attrs[name] = self._attr_scope_map[name]\n except KeyError:\n pass\n if attr is None:\n del self._attr_scope_map[name]\n else:\n self._attr_scope_map[name] = attr\n try:\n yield # The code within the context runs here.\n finally:\n # Remove the attributes set for this context, and restore any saved\n # attributes.\n for name, attr in attr_map.items():\n try:\n self._attr_scope_map[name] = saved_attrs[name]\n except KeyError:\n del self._attr_scope_map[name]\n\n # pylint: enable=g-doc-return-or-yield\n\n # pylint: disable=g-doc-return-or-yield\n @tf_contextlib.contextmanager\n def _kernel_label_map(self, op_to_kernel_label_map):\n \"\"\"EXPERIMENTAL: A context manager for setting kernel labels.\n\n This context manager can be used to select particular\n implementations of kernels within the scope of the context.\n\n For example:\n\n with ops.Graph().as_default() as g:\n f_1 = Foo() # Uses the default registered kernel for the Foo op.\n with g.kernel_label_map({\"Foo\": \"v_2\"}):\n f_2 = Foo() # Uses the registered kernel with label \"v_2\"\n # for the Foo op.\n with g.kernel_label_map({\"Foo\": \"v_3\"}):\n f_3 = Foo() # Uses the registered kernel with label \"v_3\"\n # for the Foo op.\n with g.kernel_label_map({\"Foo\": \"\"}):\n f_4 = Foo() # Uses the default registered kernel\n # for the Foo op.\n\n Args:\n op_to_kernel_label_map: A dictionary mapping op type strings to kernel\n label strings.\n\n Returns:\n A context manager that sets the kernel label to be used for one or more\n ops created in that context.\n\n Raises:\n TypeError: If op_to_kernel_label_map is not a dictionary mapping\n strings to strings.\n \"\"\"\n if not isinstance(op_to_kernel_label_map, dict):\n raise TypeError(\"op_to_kernel_label_map must be a dictionary mapping \"\n \"strings to strings\")\n # The saved_labels dictionary stores any currently-set labels that\n # will be overridden by this context manager.\n saved_labels = {}\n # Install the given label\n for op_type, label in op_to_kernel_label_map.items():\n if not (isinstance(op_type, six.string_types) and\n isinstance(label, six.string_types)):\n raise TypeError(\"op_to_kernel_label_map must be a dictionary mapping \"\n \"strings to strings\")\n try:\n saved_labels[op_type] = self._op_to_kernel_label_map[op_type]\n except KeyError:\n pass\n self._op_to_kernel_label_map[op_type] = label\n try:\n yield # The code within the context runs here.\n finally:\n # Remove the labels set for this context, and restore any saved labels.\n for op_type, label in op_to_kernel_label_map.items():\n try:\n self._op_to_kernel_label_map[op_type] = saved_labels[op_type]\n except KeyError:\n del self._op_to_kernel_label_map[op_type]\n\n # pylint: enable=g-doc-return-or-yield\n\n @tf_contextlib.contextmanager\n def _override_gradient_function(self, gradient_function_map):\n \"\"\"Specify gradient function for the given op type.\"\"\"\n\n # This is an internal API and we don't need nested context for this.\n # TODO(mdan): make it a proper context manager.\n assert not self._gradient_function_map\n self._gradient_function_map = gradient_function_map\n try:\n yield\n finally:\n self._gradient_function_map = {}\n\n # pylint: disable=g-doc-return-or-yield\n @tf_contextlib.contextmanager\n def gradient_override_map(self, op_type_map):\n \"\"\"EXPERIMENTAL: A context manager for overriding gradient functions.\n\n This context manager can be used to override the gradient function\n that will be used for ops within the scope of the context.\n\n For example:\n\n ```python\n @tf.RegisterGradient(\"CustomSquare\")\n def _custom_square_grad(op, grad):\n # ...\n\n with tf.Graph().as_default() as g:\n c = tf.constant(5.0)\n s_1 = tf.square(c) # Uses the default gradient for tf.square.\n with g.gradient_override_map({\"Square\": \"CustomSquare\"}):\n s_2 = tf.square(s_2) # Uses _custom_square_grad to compute the\n # gradient of s_2.\n ```\n\n Args:\n op_type_map: A dictionary mapping op type strings to alternative op type\n strings.\n\n Returns:\n A context manager that sets the alternative op type to be used for one\n or more ops created in that context.\n\n Raises:\n TypeError: If `op_type_map` is not a dictionary mapping strings to\n strings.\n \"\"\"\n if not isinstance(op_type_map, dict):\n raise TypeError(\"op_type_map must be a dictionary mapping \"\n \"strings to strings\")\n # The saved_mappings dictionary stores any currently-set mappings that\n # will be overridden by this context manager.\n saved_mappings = {}\n # Install the given label\n for op_type, mapped_op_type in op_type_map.items():\n if not (isinstance(op_type, six.string_types) and\n isinstance(mapped_op_type, six.string_types)):\n raise TypeError(\"op_type_map must be a dictionary mapping \"\n \"strings to strings\")\n try:\n saved_mappings[op_type] = self._gradient_override_map[op_type]\n except KeyError:\n pass\n self._gradient_override_map[op_type] = mapped_op_type\n try:\n yield # The code within the context runs here.\n finally:\n # Remove the labels set for this context, and restore any saved labels.\n for op_type, mapped_op_type in op_type_map.items():\n try:\n self._gradient_override_map[op_type] = saved_mappings[op_type]\n except KeyError:\n del self._gradient_override_map[op_type]\n\n # pylint: enable=g-doc-return-or-yield\n\n def prevent_feeding(self, tensor):\n \"\"\"Marks the given `tensor` as unfeedable in this graph.\"\"\"\n self._unfeedable_tensors.add(tensor)\n\n def is_feedable(self, tensor):\n \"\"\"Returns `True` if and only if `tensor` is feedable.\"\"\"\n return tensor not in self._unfeedable_tensors\n\n def prevent_fetching(self, op):\n \"\"\"Marks the given `op` as unfetchable in this graph.\"\"\"\n self._unfetchable_ops.add(op)\n\n def is_fetchable(self, tensor_or_op):\n \"\"\"Returns `True` if and only if `tensor_or_op` is fetchable.\"\"\"\n if isinstance(tensor_or_op, Tensor):\n return tensor_or_op.op not in self._unfetchable_ops\n else:\n return tensor_or_op not in self._unfetchable_ops\n\n def switch_to_thread_local(self):\n \"\"\"Make device, colocation and dependencies stacks thread-local.\n\n Device, colocation and dependencies stacks are not thread-local be default.\n If multiple threads access them, then the state is shared. This means that\n one thread may affect the behavior of another thread.\n\n After this method is called, the stacks become thread-local. If multiple\n threads access them, then the state is not shared. Each thread uses its own\n value; a thread doesn't affect other threads by mutating such a stack.\n\n The initial value for every thread's stack is set to the current value\n of the stack when `switch_to_thread_local()` was first called.\n \"\"\"\n if not self._stack_state_is_thread_local:\n self._stack_state_is_thread_local = True\n\n @property\n def _device_function_stack(self):\n if self._stack_state_is_thread_local:\n # This may be called from a thread where device_function_stack doesn't yet\n # exist.\n # pylint: disable=protected-access\n if not hasattr(self._thread_local, \"_device_function_stack\"):\n stack_copy_for_this_thread = self._graph_device_function_stack.copy()\n self._thread_local._device_function_stack = stack_copy_for_this_thread\n return self._thread_local._device_function_stack\n # pylint: enable=protected-access\n else:\n return self._graph_device_function_stack\n\n @property\n def _device_functions_outer_to_inner(self):\n user_device_specs = self._device_function_stack.peek_objs()\n device_functions = [spec.function for spec in user_device_specs]\n device_functions_outer_to_inner = list(reversed(device_functions))\n return device_functions_outer_to_inner\n\n def _snapshot_device_function_stack_metadata(self):\n \"\"\"Return device function stack as a list of TraceableObjects.\n\n Returns:\n [traceable_stack.TraceableObject, ...] where each TraceableObject's .obj\n member is a displayable name for the user's argument to Graph.device, and\n the filename and lineno members point to the code location where\n Graph.device was called directly or indirectly by the user.\n \"\"\"\n snapshot = []\n for obj in self._device_function_stack.peek_traceable_objs():\n obj_copy = obj.copy_metadata()\n obj_copy.obj = obj.obj.display_name\n snapshot.append(obj_copy)\n return snapshot\n\n @_device_function_stack.setter\n def _device_function_stack(self, device_function_stack):\n if self._stack_state_is_thread_local:\n # pylint: disable=protected-access\n self._thread_local._device_function_stack = device_function_stack\n # pylint: enable=protected-access\n else:\n self._graph_device_function_stack = device_function_stack\n\n @property\n def _colocation_stack(self):\n \"\"\"Return thread-local copy of colocation stack.\"\"\"\n if self._stack_state_is_thread_local:\n # This may be called from a thread where colocation_stack doesn't yet\n # exist.\n # pylint: disable=protected-access\n if not hasattr(self._thread_local, \"_colocation_stack\"):\n stack_copy_for_this_thread = self._graph_colocation_stack.copy()\n self._thread_local._colocation_stack = stack_copy_for_this_thread\n return self._thread_local._colocation_stack\n # pylint: enable=protected-access\n else:\n return self._graph_colocation_stack\n\n def _snapshot_colocation_stack_metadata(self):\n \"\"\"Return colocation stack metadata as a dictionary.\"\"\"\n return {\n traceable_obj.obj.name: traceable_obj.copy_metadata()\n for traceable_obj in self._colocation_stack.peek_traceable_objs()\n }\n\n @_colocation_stack.setter\n def _colocation_stack(self, colocation_stack):\n if self._stack_state_is_thread_local:\n # pylint: disable=protected-access\n self._thread_local._colocation_stack = colocation_stack\n # pylint: enable=protected-access\n else:\n self._graph_colocation_stack = colocation_stack\n\n @property\n def _control_dependencies_stack(self):\n if self._stack_state_is_thread_local:\n # This may be called from a thread where control_dependencies_stack\n # doesn't yet exist.\n if not hasattr(self._thread_local, \"_control_dependencies_stack\"):\n self._thread_local._control_dependencies_stack = (\n self._graph_control_dependencies_stack[:])\n return self._thread_local._control_dependencies_stack\n else:\n return self._graph_control_dependencies_stack\n\n @_control_dependencies_stack.setter\n def _control_dependencies_stack(self, control_dependencies):\n if self._stack_state_is_thread_local:\n self._thread_local._control_dependencies_stack = control_dependencies\n else:\n self._graph_control_dependencies_stack = control_dependencies\n\n @property\n def _distribution_strategy_stack(self):\n \"\"\"A stack to maintain distribution strategy context for each thread.\"\"\"\n if not hasattr(self._thread_local, \"_distribution_strategy_stack\"):\n self._thread_local._distribution_strategy_stack = [] # pylint: disable=protected-access\n return self._thread_local._distribution_strategy_stack # pylint: disable=protected-access\n\n @_distribution_strategy_stack.setter\n def _distribution_strategy_stack(self, _distribution_strategy_stack):\n self._thread_local._distribution_strategy_stack = ( # pylint: disable=protected-access\n _distribution_strategy_stack)\n\n @property\n def _global_distribute_strategy_scope(self):\n \"\"\"For implementing `tf.distribute.set_strategy()`.\"\"\"\n if not hasattr(self._thread_local, \"distribute_strategy_scope\"):\n self._thread_local.distribute_strategy_scope = None\n return self._thread_local.distribute_strategy_scope\n\n @_global_distribute_strategy_scope.setter\n def _global_distribute_strategy_scope(self, distribute_strategy_scope):\n self._thread_local.distribute_strategy_scope = (distribute_strategy_scope)\n\n def _mutation_lock(self):\n \"\"\"Returns a lock to guard code that creates & mutates ops.\n\n See the comment for self._group_lock for more info.\n \"\"\"\n return self._group_lock.group(_MUTATION_LOCK_GROUP)\n\n def _session_run_lock(self):\n \"\"\"Returns a lock to guard code for Session.run.\n\n See the comment for self._group_lock for more info.\n \"\"\"\n return self._group_lock.group(_SESSION_RUN_LOCK_GROUP)\n\n\n# TODO(agarwal): currently device directives in an outer eager scope will not\n# apply to inner graph mode code. Fix that.\n\n\n@tf_export(v1=[\"device\"])\ndef device(device_name_or_function):\n \"\"\"Wrapper for `Graph.device()` using the default graph.\n\n See `tf.Graph.device` for more details.\n\n Args:\n device_name_or_function: The device name or function to use in the context.\n\n Returns:\n A context manager that specifies the default device to use for newly\n created ops.\n\n Raises:\n RuntimeError: If eager execution is enabled and a function is passed in.\n \"\"\"\n if context.executing_eagerly():\n if callable(device_name_or_function):\n raise RuntimeError(\n \"tf.device does not support functions when eager execution \"\n \"is enabled.\")\n return context.device(device_name_or_function)\n elif executing_eagerly_outside_functions():\n @tf_contextlib.contextmanager\n def combined(device_name_or_function):\n with get_default_graph().device(device_name_or_function):\n if not callable(device_name_or_function):\n with context.device(device_name_or_function):\n yield\n else:\n yield\n return combined(device_name_or_function)\n else:\n return get_default_graph().device(device_name_or_function)\n\n\n@tf_export(\"device\", v1=[])\ndef device_v2(device_name):\n \"\"\"Specifies the device for ops created/executed in this context.\n\n This function specifies the device to be used for ops created/executed in a\n particular context. Nested contexts will inherit and also create/execute\n their ops on the specified device. If a specific device is not required,\n consider not using this function so that a device can be automatically\n assigned. In general the use of this function is optional. `device_name` can\n be fully specified, as in \"/job:worker/task:1/device:cpu:0\", or partially\n specified, containing only a subset of the \"/\"-separated fields. Any fields\n which are specified will override device annotations from outer scopes.\n\n For example:\n\n ```python\n with tf.device('/job:foo'):\n # ops created here have devices with /job:foo\n with tf.device('/job:bar/task:0/device:gpu:2'):\n # ops created here have the fully specified device above\n with tf.device('/device:gpu:1'):\n # ops created here have the device '/job:foo/device:gpu:1'\n ```\n\n Args:\n device_name: The device name to use in the context.\n\n Returns:\n A context manager that specifies the default device to use for newly\n created ops.\n\n Raises:\n RuntimeError: If a function is passed in.\n \"\"\"\n if callable(device_name):\n raise RuntimeError(\"tf.device does not support functions.\")\n return device(device_name)\n\n\n@tf_export(v1=[\"container\"])\ndef container(container_name):\n \"\"\"Wrapper for `Graph.container()` using the default graph.\n\n Args:\n container_name: The container string to use in the context.\n\n Returns:\n A context manager that specifies the default container to use for newly\n created stateful ops.\n \"\"\"\n return get_default_graph().container(container_name)\n\n\ndef _colocate_with_for_gradient(op, gradient_uid, ignore_existing=False):\n if context.executing_eagerly():\n if op is not None:\n if not hasattr(op, \"device\"):\n op = internal_convert_to_tensor_or_indexed_slices(op)\n return device(op.device)\n else:\n return NullContextmanager()\n else:\n default_graph = get_default_graph()\n if isinstance(op, EagerTensor):\n if default_graph.building_function:\n return default_graph.device(op.device)\n else:\n raise ValueError(\"Encountered an Eager-defined Tensor during graph \"\n \"construction, but a function was not being built.\")\n return default_graph._colocate_with_for_gradient(\n op, gradient_uid=gradient_uid, ignore_existing=ignore_existing)\n\n\n# Internal interface to colocate_with. colocate_with has been deprecated from\n# public API. There are still a few internal uses of colocate_with. Add internal\n# only API for those uses to avoid deprecation warning.\ndef colocate_with(op, ignore_existing=False):\n return _colocate_with_for_gradient(op, None, ignore_existing=ignore_existing)\n\n\[email protected](\n date=None, instructions=\"Colocations handled automatically by placer.\")\n@tf_export(v1=[\"colocate_with\"])\ndef _colocate_with(op, ignore_existing=False):\n return colocate_with(op, ignore_existing)\n\n\n@tf_export(\"control_dependencies\")\ndef control_dependencies(control_inputs):\n \"\"\"Wrapper for `Graph.control_dependencies()` using the default graph.\n\n See `tf.Graph.control_dependencies` for more details.\n\n Note: *In TensorFlow 2 with eager and/or Autograph, you should not require\n this method, as ops execute in the expected order thanks to automatic control\n dependencies.* Only use `tf.control_dependencies` when working with v1\n `tf.Graph` code.\n\n When eager execution is enabled, any callable object in the `control_inputs`\n list will be called.\n\n Args:\n control_inputs: A list of `Operation` or `Tensor` objects which must be\n executed or computed before running the operations defined in the context.\n Can also be `None` to clear the control dependencies. If eager execution\n is enabled, any callable object in the `control_inputs` list will be\n called.\n\n Returns:\n A context manager that specifies control dependencies for all\n operations constructed within the context.\n \"\"\"\n if context.executing_eagerly():\n if control_inputs:\n # Execute any pending callables.\n for control in control_inputs:\n if callable(control):\n control()\n return NullContextmanager()\n else:\n return get_default_graph().control_dependencies(control_inputs)\n\n\nclass _DefaultStack(threading.local):\n \"\"\"A thread-local stack of objects for providing implicit defaults.\"\"\"\n\n def __init__(self):\n super(_DefaultStack, self).__init__()\n self._enforce_nesting = True\n self.stack = []\n\n def get_default(self):\n return self.stack[-1] if self.stack else None\n\n def reset(self):\n self.stack = []\n\n def is_cleared(self):\n return not self.stack\n\n @property\n def enforce_nesting(self):\n return self._enforce_nesting\n\n @enforce_nesting.setter\n def enforce_nesting(self, value):\n self._enforce_nesting = value\n\n @tf_contextlib.contextmanager\n def get_controller(self, default):\n \"\"\"A context manager for manipulating a default stack.\"\"\"\n self.stack.append(default)\n try:\n yield default\n finally:\n # stack may be empty if reset() was called\n if self.stack:\n if self._enforce_nesting:\n if self.stack[-1] is not default:\n raise AssertionError(\n \"Nesting violated for default stack of %s objects\" %\n type(default))\n self.stack.pop()\n else:\n self.stack.remove(default)\n\n\n_default_session_stack = _DefaultStack() # pylint: disable=protected-access\n\n\ndef default_session(session):\n \"\"\"Python \"with\" handler for defining a default session.\n\n This function provides a means of registering a session for handling\n Tensor.eval() and Operation.run() calls. It is primarily intended for use\n by session.Session, but can be used with any object that implements\n the Session.run() interface.\n\n Use with the \"with\" keyword to specify that Tensor.eval() and Operation.run()\n invocations within the scope of a block should be executed by a particular\n session.\n\n The default session applies to the current thread only, so it is always\n possible to inspect the call stack and determine the scope of a default\n session. If you create a new thread, and wish to use the default session\n in that thread, you must explicitly add a \"with ops.default_session(sess):\"\n block in that thread's function.\n\n Example:\n The following code examples are equivalent:\n\n # 1. Using the Session object directly:\n sess = ...\n c = tf.constant(5.0)\n sess.run(c)\n\n # 2. Using default_session():\n sess = ...\n with ops.default_session(sess):\n c = tf.constant(5.0)\n result = c.eval()\n\n # 3. Overriding default_session():\n sess = ...\n with ops.default_session(sess):\n c = tf.constant(5.0)\n with ops.default_session(...):\n c.eval(session=sess)\n\n Args:\n session: The session to be installed as the default session.\n\n Returns:\n A context manager for the default session.\n \"\"\"\n return _default_session_stack.get_controller(session)\n\n\n@tf_export(v1=[\"get_default_session\"])\ndef get_default_session():\n \"\"\"Returns the default session for the current thread.\n\n The returned `Session` will be the innermost session on which a\n `Session` or `Session.as_default()` context has been entered.\n\n NOTE: The default session is a property of the current thread. If you\n create a new thread, and wish to use the default session in that\n thread, you must explicitly add a `with sess.as_default():` in that\n thread's function.\n\n Returns:\n The default `Session` being used in the current thread.\n \"\"\"\n return _default_session_stack.get_default()\n\n\ndef _eval_using_default_session(tensors, feed_dict, graph, session=None):\n \"\"\"Uses the default session to evaluate one or more tensors.\n\n Args:\n tensors: A single Tensor, or a list of Tensor objects.\n feed_dict: A dictionary that maps Tensor objects (or tensor names) to lists,\n numpy ndarrays, TensorProtos, or strings.\n graph: The graph in which the tensors are defined.\n session: (Optional) A different session to use to evaluate \"tensors\".\n\n Returns:\n Either a single numpy ndarray if \"tensors\" is a single tensor; or a list\n of numpy ndarrays that each correspond to the respective element in\n \"tensors\".\n\n Raises:\n ValueError: If no default session is available; the default session\n does not have \"graph\" as its graph; or if \"session\" is specified,\n and it does not have \"graph\" as its graph.\n \"\"\"\n if session is None:\n session = get_default_session()\n if session is None:\n raise ValueError(\"Cannot evaluate tensor using `eval()`: No default \"\n \"session is registered. Use `with \"\n \"sess.as_default()` or pass an explicit session to \"\n \"`eval(session=sess)`\")\n if session.graph is not graph:\n raise ValueError(\"Cannot use the default session to evaluate tensor: \"\n \"the tensor's graph is different from the session's \"\n \"graph. Pass an explicit session to \"\n \"`eval(session=sess)`.\")\n else:\n if session.graph is not graph:\n raise ValueError(\"Cannot use the given session to evaluate tensor: \"\n \"the tensor's graph is different from the session's \"\n \"graph.\")\n return session.run(tensors, feed_dict)\n\n\ndef _run_using_default_session(operation, feed_dict, graph, session=None):\n \"\"\"Uses the default session to run \"operation\".\n\n Args:\n operation: The Operation to be run.\n feed_dict: A dictionary that maps Tensor objects (or tensor names) to lists,\n numpy ndarrays, TensorProtos, or strings.\n graph: The graph in which \"operation\" is defined.\n session: (Optional) A different session to use to run \"operation\".\n\n Raises:\n ValueError: If no default session is available; the default session\n does not have \"graph\" as its graph; or if \"session\" is specified,\n and it does not have \"graph\" as its graph.\n \"\"\"\n if session is None:\n session = get_default_session()\n if session is None:\n raise ValueError(\"Cannot execute operation using `run()`: No default \"\n \"session is registered. Use `with \"\n \"sess.as_default():` or pass an explicit session to \"\n \"`run(session=sess)`\")\n if session.graph is not graph:\n raise ValueError(\"Cannot use the default session to execute operation: \"\n \"the operation's graph is different from the \"\n \"session's graph. Pass an explicit session to \"\n \"run(session=sess).\")\n else:\n if session.graph is not graph:\n raise ValueError(\"Cannot use the given session to execute operation: \"\n \"the operation's graph is different from the session's \"\n \"graph.\")\n session.run(operation, feed_dict)\n\n\nclass _DefaultGraphStack(_DefaultStack): # pylint: disable=protected-access\n \"\"\"A thread-local stack of objects for providing an implicit default graph.\"\"\"\n\n def __init__(self):\n super(_DefaultGraphStack, self).__init__()\n self._global_default_graph = None\n\n def get_default(self):\n \"\"\"Override that returns a global default if the stack is empty.\"\"\"\n if self.stack:\n return self.stack[-1]\n elif self._global_default_graph:\n return self._global_default_graph\n else:\n self._global_default_graph = Graph()\n return self._global_default_graph\n\n def _GetGlobalDefaultGraph(self):\n if self._global_default_graph is None:\n # TODO(mrry): Perhaps log that the default graph is being used, or set\n # provide some other feedback to prevent confusion when a mixture of\n # the global default graph and an explicit graph are combined in the\n # same process.\n self._global_default_graph = Graph()\n return self._global_default_graph\n\n def reset(self):\n super(_DefaultGraphStack, self).reset()\n self._global_default_graph = None\n\n @tf_contextlib.contextmanager\n def get_controller(self, default):\n context.context().context_switches.push(default.building_function,\n default.as_default,\n default._device_function_stack)\n try:\n with super(_DefaultGraphStack,\n self).get_controller(default) as g, context.graph_mode():\n yield g\n finally:\n # If an exception is raised here it may be hiding a related exception in\n # the try-block (just above).\n context.context().context_switches.pop()\n\n\n_default_graph_stack = _DefaultGraphStack()\n\n\n# Shared helper used in init_scope and executing_eagerly_outside_functions\n# to obtain the outermost context that is not building a function, and the\n# innermost non empty device stack.\ndef _get_outer_context_and_inner_device_stack():\n \"\"\"Get the outermost context not building a function.\"\"\"\n default_graph = get_default_graph()\n outer_context = None\n innermost_nonempty_device_stack = default_graph._device_function_stack # pylint: disable=protected-access\n\n if not _default_graph_stack.stack:\n # If the default graph stack is empty, then we cannot be building a\n # function. Install the global graph (which, in this case, is also the\n # default graph) as the outer context.\n if default_graph.building_function:\n raise RuntimeError(\"The global graph is building a function.\")\n outer_context = default_graph.as_default\n else:\n # Find a context that is not building a function.\n for stack_entry in reversed(context.context().context_switches.stack):\n if not innermost_nonempty_device_stack:\n innermost_nonempty_device_stack = stack_entry.device_stack\n if not stack_entry.is_building_function:\n outer_context = stack_entry.enter_context_fn\n break\n\n if outer_context is None:\n # As a last resort, obtain the global default graph; this graph doesn't\n # necessarily live on the graph stack (and hence it doesn't necessarily\n # live on the context stack), but it is stored in the graph stack's\n # encapsulating object.\n outer_context = _default_graph_stack._GetGlobalDefaultGraph().as_default # pylint: disable=protected-access\n\n if outer_context is None:\n # Sanity check; this shouldn't be triggered.\n raise RuntimeError(\"All graphs are building functions, and no \"\n \"eager context was previously active.\")\n\n return outer_context, innermost_nonempty_device_stack\n\n\n# pylint: disable=g-doc-return-or-yield,line-too-long\n@tf_export(\"init_scope\")\n@tf_contextlib.contextmanager\ndef init_scope():\n \"\"\"A context manager that lifts ops out of control-flow scopes and function-building graphs.\n\n There is often a need to lift variable initialization ops out of control-flow\n scopes, function-building graphs, and gradient tapes. Entering an\n `init_scope` is a mechanism for satisfying these desiderata. In particular,\n entering an `init_scope` has three effects:\n\n (1) All control dependencies are cleared the moment the scope is entered;\n this is equivalent to entering the context manager returned from\n `control_dependencies(None)`, which has the side-effect of exiting\n control-flow scopes like `tf.cond` and `tf.while_loop`.\n\n (2) All operations that are created while the scope is active are lifted\n into the lowest context on the `context_stack` that is not building a\n graph function. Here, a context is defined as either a graph or an eager\n context. Every context switch, i.e., every installation of a graph as\n the default graph and every switch into eager mode, is logged in a\n thread-local stack called `context_switches`; the log entry for a\n context switch is popped from the stack when the context is exited.\n Entering an `init_scope` is equivalent to crawling up\n `context_switches`, finding the first context that is not building a\n graph function, and entering it. A caveat is that if graph mode is\n enabled but the default graph stack is empty, then entering an\n `init_scope` will simply install a fresh graph as the default one.\n\n (3) The gradient tape is paused while the scope is active.\n\n When eager execution is enabled, code inside an init_scope block runs with\n eager execution enabled even when tracing a `tf.function`. For example:\n\n ```python\n tf.compat.v1.enable_eager_execution()\n\n @tf.function\n def func():\n # A function constructs TensorFlow graphs,\n # it does not execute eagerly.\n assert not tf.executing_eagerly()\n with tf.init_scope():\n # Initialization runs with eager execution enabled\n assert tf.executing_eagerly()\n ```\n\n Raises:\n RuntimeError: if graph state is incompatible with this initialization.\n \"\"\"\n # pylint: enable=g-doc-return-or-yield,line-too-long\n\n if context.executing_eagerly():\n # Fastpath.\n with tape.stop_recording():\n yield\n else:\n # Retrieve the active name scope: entering an `init_scope` preserves\n # the name scope of the current context.\n scope = get_default_graph().get_name_scope()\n if scope and scope[-1] != \"/\":\n # Names that end with trailing slashes are treated by `name_scope` as\n # absolute.\n scope = scope + \"/\"\n\n outer_context, innermost_nonempty_device_stack = (\n _get_outer_context_and_inner_device_stack())\n\n outer_graph = None\n outer_device_stack = None\n try:\n with outer_context(), name_scope(\n scope, skip_on_eager=False), control_dependencies(\n None), tape.stop_recording():\n context_manager = NullContextmanager\n context_manager_input = None\n if not context.executing_eagerly():\n # The device stack is preserved when lifting into a graph. Eager\n # execution doesn't implement device stacks and in particular it\n # doesn't support device functions, so in general it's not possible\n # to do the same when lifting into the eager context.\n outer_graph = get_default_graph()\n outer_device_stack = outer_graph._device_function_stack # pylint: disable=protected-access\n outer_graph._device_function_stack = innermost_nonempty_device_stack # pylint: disable=protected-access\n elif innermost_nonempty_device_stack is not None:\n for device_spec in innermost_nonempty_device_stack.peek_objs():\n if device_spec.function is None:\n break\n if device_spec.raw_string:\n context_manager = context.device\n context_manager_input = device_spec.raw_string\n break\n # It is currently not possible to have a device function in V2,\n # but in V1 we are unable to apply device functions in eager mode.\n # This means that we will silently skip some of the entries on the\n # device stack in V1 + eager mode.\n\n with context_manager(context_manager_input):\n yield\n finally:\n # If an exception is raised here it may be hiding a related exception in\n # try-block (just above).\n if outer_graph is not None:\n outer_graph._device_function_stack = outer_device_stack # pylint: disable=protected-access\n\n\n@tf_export(v1=[\"executing_eagerly_outside_functions\"])\ndef executing_eagerly_outside_functions():\n \"\"\"Returns True if executing eagerly, even if inside a graph function.\n\n This function will check the outermost context for the program and see if\n it is in eager mode. It is useful comparing to `tf.executing_eagerly()`,\n which checks the current context and will return `False` within a\n `tf.function` body. It can be used to build library that behave differently\n in eager runtime and v1 session runtime (deprecated).\n\n Example:\n\n >>> tf.compat.v1.enable_eager_execution()\n >>> @tf.function\n ... def func():\n ... # A function constructs TensorFlow graphs, it does not execute eagerly,\n ... # but the outer most context is still eager.\n ... assert not tf.executing_eagerly()\n ... return tf.compat.v1.executing_eagerly_outside_functions()\n >>> func()\n <tf.Tensor: shape=(), dtype=bool, numpy=True>\n\n Returns:\n boolean, whether the outermost context is in eager mode.\n \"\"\"\n if context.executing_eagerly():\n return True\n else:\n outer_context, _ = _get_outer_context_and_inner_device_stack()\n with outer_context():\n return context.executing_eagerly()\n\n\n@tf_export(\"inside_function\", v1=[])\ndef inside_function():\n \"\"\"Indicates whether the caller code is executing inside a `tf.function`.\n\n Returns:\n Boolean, True if the caller code is executing inside a `tf.function`\n rather than eagerly.\n\n Example:\n\n >>> tf.inside_function()\n False\n >>> @tf.function\n ... def f():\n ... print(tf.inside_function())\n >>> f()\n True\n \"\"\"\n return get_default_graph().building_function\n\n\n@tf_export(v1=[\"enable_eager_execution\"])\ndef enable_eager_execution(config=None, device_policy=None,\n execution_mode=None):\n \"\"\"Enables eager execution for the lifetime of this program.\n\n Eager execution provides an imperative interface to TensorFlow. With eager\n execution enabled, TensorFlow functions execute operations immediately (as\n opposed to adding to a graph to be executed later in a `tf.compat.v1.Session`)\n and\n return concrete values (as opposed to symbolic references to a node in a\n computational graph).\n\n For example:\n\n ```python\n tf.compat.v1.enable_eager_execution()\n\n # After eager execution is enabled, operations are executed as they are\n # defined and Tensor objects hold concrete values, which can be accessed as\n # numpy.ndarray`s through the numpy() method.\n assert tf.multiply(6, 7).numpy() == 42\n ```\n\n Eager execution cannot be enabled after TensorFlow APIs have been used to\n create or execute graphs. It is typically recommended to invoke this function\n at program startup and not in a library (as most libraries should be usable\n both with and without eager execution).\n\n @compatibility(TF2)\n This function is not necessary if you are using TF2. Eager execution is\n enabled by default.\n @end_compatibility\n\n Args:\n config: (Optional.) A `tf.compat.v1.ConfigProto` to use to configure the\n environment in which operations are executed. Note that\n `tf.compat.v1.ConfigProto` is also used to configure graph execution (via\n `tf.compat.v1.Session`) and many options within `tf.compat.v1.ConfigProto`\n are not implemented (or are irrelevant) when eager execution is enabled.\n device_policy: (Optional.) Policy controlling how operations requiring\n inputs on a specific device (e.g., a GPU 0) handle inputs on a different\n device (e.g. GPU 1 or CPU). When set to None, an appropriate value will\n be picked automatically. The value picked may change between TensorFlow\n releases.\n Valid values:\n - tf.contrib.eager.DEVICE_PLACEMENT_EXPLICIT: raises an error if the\n placement is not correct.\n - tf.contrib.eager.DEVICE_PLACEMENT_WARN: copies the tensors which are not\n on the right device but logs a warning.\n - tf.contrib.eager.DEVICE_PLACEMENT_SILENT: silently copies the tensors.\n Note that this may hide performance problems as there is no notification\n provided when operations are blocked on the tensor being copied between\n devices.\n - tf.contrib.eager.DEVICE_PLACEMENT_SILENT_FOR_INT32: silently copies\n int32 tensors, raising errors on the other ones.\n execution_mode: (Optional.) Policy controlling how operations dispatched are\n actually executed. When set to None, an appropriate value will be picked\n automatically. The value picked may change between TensorFlow releases.\n Valid values:\n - tf.contrib.eager.SYNC: executes each operation synchronously.\n - tf.contrib.eager.ASYNC: executes each operation asynchronously. These\n operations may return \"non-ready\" handles.\n\n Raises:\n ValueError: If eager execution is enabled after creating/executing a\n TensorFlow graph, or if options provided conflict with a previous call\n to this function.\n \"\"\"\n _api_usage_gauge.get_cell().set(True)\n logging.vlog(1, \"Enabling eager execution\")\n if context.default_execution_mode != context.EAGER_MODE:\n return enable_eager_execution_internal(\n config=config,\n device_policy=device_policy,\n execution_mode=execution_mode,\n server_def=None)\n\n\n@tf_export(v1=[\"disable_eager_execution\"])\ndef disable_eager_execution():\n \"\"\"Disables eager execution.\n\n This function can only be called before any Graphs, Ops, or Tensors have been\n created.\n\n @compatibility(TF2)\n This function is not necessary if you are using TF2. Eager execution is\n enabled by default. If you want to use Graph mode please consider\n [tf.function](https://www.tensorflow.org/api_docs/python/tf/function).\n @end_compatibility\n \"\"\"\n _api_usage_gauge.get_cell().set(False)\n logging.vlog(1, \"Disabling eager execution\")\n context.default_execution_mode = context.GRAPH_MODE\n c = context.context_safe()\n if c is not None:\n c._thread_local_data.is_eager = False # pylint: disable=protected-access\n\n\ndef enable_eager_execution_internal(config=None,\n device_policy=None,\n execution_mode=None,\n server_def=None):\n \"\"\"Enables eager execution for the lifetime of this program.\n\n Most of the doc string for enable_eager_execution is relevant here as well.\n\n Args:\n config: See enable_eager_execution doc string\n device_policy: See enable_eager_execution doc string\n execution_mode: See enable_eager_execution doc string\n server_def: (Optional.) A tensorflow::ServerDef proto. Enables execution on\n remote devices. GrpcServers need to be started by creating an identical\n server_def to this, and setting the appropriate task_indexes, so that the\n servers can communicate. It will then be possible to execute operations on\n remote devices.\n\n Raises:\n ValueError\n\n \"\"\"\n if config is not None and not isinstance(config, config_pb2.ConfigProto):\n raise TypeError(\"config must be a tf.ConfigProto, but got %s\" %\n type(config))\n if device_policy not in (None, context.DEVICE_PLACEMENT_EXPLICIT,\n context.DEVICE_PLACEMENT_WARN,\n context.DEVICE_PLACEMENT_SILENT,\n context.DEVICE_PLACEMENT_SILENT_FOR_INT32):\n raise ValueError(\n \"device_policy must be one of None, tf.contrib.eager.DEVICE_PLACEMENT_*\"\n )\n if execution_mode not in (None, context.SYNC, context.ASYNC):\n raise ValueError(\n \"execution_mode must be one of None, tf.contrib.eager.SYNC, \"\n \"tf.contrib.eager.ASYNC\")\n if context.default_execution_mode == context.GRAPH_MODE:\n graph_mode_has_been_used = (\n _default_graph_stack._global_default_graph is not None) # pylint: disable=protected-access\n if graph_mode_has_been_used:\n raise ValueError(\n \"tf.enable_eager_execution must be called at program startup.\")\n context.default_execution_mode = context.EAGER_MODE\n # pylint: disable=protected-access\n with context._context_lock:\n if context._context is None:\n context._set_context_locked(context.Context(\n config=config,\n device_policy=device_policy,\n execution_mode=execution_mode,\n server_def=server_def))\n elif ((config is not None and config is not context._context._config) or\n (device_policy is not None and\n device_policy is not context._context._device_policy) or\n (execution_mode is not None and\n execution_mode is not context._context._execution_mode)):\n raise ValueError(\n \"Trying to change the options of an active eager\"\n \" execution. Context config: %s, specified config:\"\n \" %s. Context device policy: %s, specified device\"\n \" policy: %s. Context execution mode: %s, \"\n \" specified execution mode %s.\" %\n (context._context._config, config, context._context._device_policy,\n device_policy, context._context._execution_mode, execution_mode))\n else:\n # We already created everything, so update the thread local data.\n context._context._thread_local_data.is_eager = True\n\n # Monkey patch to get rid of an unnecessary conditional since the context is\n # now initialized.\n context.context = context.context_safe\n\n\ndef eager_run(main=None, argv=None):\n \"\"\"Runs the program with an optional main function and argv list.\n\n The program will run with eager execution enabled.\n\n Example:\n ```python\n import tensorflow as tf\n # Import subject to future changes:\n from tensorflow.contrib.eager.python import tfe\n\n def main(_):\n u = tf.constant(6.0)\n v = tf.constant(7.0)\n print(u * v)\n\n if __name__ == \"__main__\":\n tfe.run()\n ```\n\n Args:\n main: the main function to run.\n argv: the arguments to pass to it.\n \"\"\"\n enable_eager_execution()\n app.run(main, argv)\n\n\n@tf_export(v1=[\"reset_default_graph\"])\ndef reset_default_graph():\n \"\"\"Clears the default graph stack and resets the global default graph.\n\n NOTE: The default graph is a property of the current thread. This\n function applies only to the current thread. Calling this function while\n a `tf.compat.v1.Session` or `tf.compat.v1.InteractiveSession` is active will\n result in undefined\n behavior. Using any previously created `tf.Operation` or `tf.Tensor` objects\n after calling this function will result in undefined behavior.\n\n @compatibility(TF2)\n `reset_default_graph` does not work with either eager execution or\n `tf.function`, and you should not invoke it directly. To migrate code that\n uses Graph-related functions to TF2, rewrite the code without them. See the\n [migration guide](https://www.tensorflow.org/guide/migrate) for more\n description about the behavior and semantic changes between Tensorflow 1 and\n Tensorflow 2.\n @end_compatibility\n\n Raises:\n AssertionError: If this function is called within a nested graph.\n \"\"\"\n if not _default_graph_stack.is_cleared():\n raise AssertionError(\"Do not use tf.reset_default_graph() to clear \"\n \"nested graphs. If you need a cleared graph, \"\n \"exit the nesting and create a new graph.\")\n _default_graph_stack.reset()\n\n\n@tf_export(v1=[\"get_default_graph\"])\ndef get_default_graph():\n \"\"\"Returns the default graph for the current thread.\n\n The returned graph will be the innermost graph on which a\n `Graph.as_default()` context has been entered, or a global default\n graph if none has been explicitly created.\n\n NOTE: The default graph is a property of the current thread. If you\n create a new thread, and wish to use the default graph in that\n thread, you must explicitly add a `with g.as_default():` in that\n thread's function.\n\n @compatibility(TF2)\n `get_default_graph` does not work with either eager execution or\n `tf.function`, and you should not invoke it directly. To migrate code that\n uses Graph-related functions to TF2, rewrite the code without them. See the\n [migration guide](https://www.tensorflow.org/guide/migrate) for more\n description about the behavior and semantic changes between Tensorflow 1 and\n Tensorflow 2.\n @end_compatibility\n\n Returns:\n The default `Graph` being used in the current thread.\n \"\"\"\n return _default_graph_stack.get_default()\n\n\ndef has_default_graph():\n \"\"\"Returns True if there is a default graph.\"\"\"\n return len(_default_graph_stack.stack) >= 1\n\n\n# Exported due to b/171079555\n@tf_export(\"__internal__.get_name_scope\", v1=[])\ndef get_name_scope():\n \"\"\"Returns the current name scope in the default_graph.\n\n For example:\n\n ```python\n with tf.name_scope('scope1'):\n with tf.name_scope('scope2'):\n print(tf.get_name_scope())\n ```\n would print the string `scope1/scope2`.\n\n Returns:\n A string representing the current name scope.\n \"\"\"\n if context.executing_eagerly():\n return context.context().scope_name.rstrip(\"/\")\n return get_default_graph().get_name_scope()\n\n\ndef _assert_same_graph(original_item, item):\n \"\"\"Fail if the 2 items are from different graphs.\n\n Args:\n original_item: Original item to check against.\n item: Item to check.\n\n Raises:\n ValueError: if graphs do not match.\n \"\"\"\n original_graph = getattr(original_item, \"graph\", None)\n graph = getattr(item, \"graph\", None)\n if original_graph and graph and original_graph is not graph:\n raise ValueError(\n \"%s must be from the same graph as %s (graphs are %s and %s).\" %\n (item, original_item, graph, original_graph))\n\n\ndef _get_graph_from_inputs(op_input_list, graph=None):\n \"\"\"Returns the appropriate graph to use for the given inputs.\n\n This library method provides a consistent algorithm for choosing the graph\n in which an Operation should be constructed:\n\n 1. If the default graph is being used to construct a function, we\n use the default graph.\n 2. If the \"graph\" is specified explicitly, we validate that all of the inputs\n in \"op_input_list\" are compatible with that graph.\n 3. Otherwise, we attempt to select a graph from the first Operation-\n or Tensor-valued input in \"op_input_list\", and validate that all other\n such inputs are in the same graph.\n 4. If the graph was not specified and it could not be inferred from\n \"op_input_list\", we attempt to use the default graph.\n\n Args:\n op_input_list: A list of inputs to an operation, which may include `Tensor`,\n `Operation`, and other objects that may be converted to a graph element.\n graph: (Optional) The explicit graph to use.\n\n Raises:\n TypeError: If op_input_list is not a list or tuple, or if graph is not a\n Graph.\n ValueError: If a graph is explicitly passed and not all inputs are from it,\n or if the inputs are from multiple graphs, or we could not find a graph\n and there was no default graph.\n\n Returns:\n The appropriate graph to use for the given inputs.\n\n \"\"\"\n current_default_graph = get_default_graph()\n if current_default_graph.building_function:\n return current_default_graph\n\n op_input_list = tuple(op_input_list) # Handle generators correctly\n if graph and not isinstance(graph, Graph):\n raise TypeError(\"Input graph needs to be a Graph: %s\" % (graph,))\n\n # 1. We validate that all of the inputs are from the same graph. This is\n # either the supplied graph parameter, or the first one selected from one\n # the graph-element-valued inputs. In the latter case, we hold onto\n # that input in original_graph_element so we can provide a more\n # informative error if a mismatch is found.\n original_graph_element = None\n for op_input in op_input_list:\n # Determine if this is a valid graph_element.\n # TODO(josh11b): Note that we exclude subclasses of Tensor. Need to clean this\n # up.\n graph_element = None\n if (isinstance(op_input, (Operation, internal.NativeObject)) and\n ((not isinstance(op_input, Tensor)) or type(op_input) == Tensor)): # pylint: disable=unidiomatic-typecheck\n graph_element = op_input\n else:\n graph_element = _as_graph_element(op_input)\n\n if graph_element is not None:\n if not graph:\n original_graph_element = graph_element\n graph = getattr(graph_element, \"graph\", None)\n elif original_graph_element is not None:\n _assert_same_graph(original_graph_element, graph_element)\n elif graph_element.graph is not graph:\n raise ValueError(\"%s is not from the passed-in graph.\" % graph_element)\n\n # 2. If all else fails, we use the default graph, which is always there.\n return graph or current_default_graph\n\n\n@tf_export(v1=[\"GraphKeys\"])\nclass GraphKeys(object):\n \"\"\"Standard names to use for graph collections.\n\n The standard library uses various well-known names to collect and\n retrieve values associated with a graph. For example, the\n `tf.Optimizer` subclasses default to optimizing the variables\n collected under `tf.GraphKeys.TRAINABLE_VARIABLES` if none is\n specified, but it is also possible to pass an explicit list of\n variables.\n\n The following standard keys are defined:\n\n * `GLOBAL_VARIABLES`: the default collection of `Variable` objects, shared\n across distributed environment (model variables are subset of these). See\n `tf.compat.v1.global_variables`\n for more details.\n Commonly, all `TRAINABLE_VARIABLES` variables will be in `MODEL_VARIABLES`,\n and all `MODEL_VARIABLES` variables will be in `GLOBAL_VARIABLES`.\n * `LOCAL_VARIABLES`: the subset of `Variable` objects that are local to each\n machine. Usually used for temporarily variables, like counters.\n Note: use `tf.contrib.framework.local_variable` to add to this collection.\n * `MODEL_VARIABLES`: the subset of `Variable` objects that are used in the\n model for inference (feed forward). Note: use\n `tf.contrib.framework.model_variable` to add to this collection.\n * `TRAINABLE_VARIABLES`: the subset of `Variable` objects that will\n be trained by an optimizer. See\n `tf.compat.v1.trainable_variables`\n for more details.\n * `SUMMARIES`: the summary `Tensor` objects that have been created in the\n graph. See\n `tf.compat.v1.summary.merge_all`\n for more details.\n * `QUEUE_RUNNERS`: the `QueueRunner` objects that are used to\n produce input for a computation. See\n `tf.compat.v1.train.start_queue_runners`\n for more details.\n * `MOVING_AVERAGE_VARIABLES`: the subset of `Variable` objects that will also\n keep moving averages. See\n `tf.compat.v1.moving_average_variables`\n for more details.\n * `REGULARIZATION_LOSSES`: regularization losses collected during graph\n construction.\n\n The following standard keys are _defined_, but their collections are **not**\n automatically populated as many of the others are:\n\n * `WEIGHTS`\n * `BIASES`\n * `ACTIVATIONS`\n \"\"\"\n\n # Key to collect Variable objects that are global (shared across machines).\n # Default collection for all variables, except local ones.\n GLOBAL_VARIABLES = \"variables\"\n # Key to collect local variables that are local to the machine and are not\n # saved/restored.\n LOCAL_VARIABLES = \"local_variables\"\n # Key to collect local variables which are used to accumulate interal state\n # to be used in tf.metrics.*.\n METRIC_VARIABLES = \"metric_variables\"\n # Key to collect model variables defined by layers.\n MODEL_VARIABLES = \"model_variables\"\n # Key to collect Variable objects that will be trained by the\n # optimizers.\n TRAINABLE_VARIABLES = \"trainable_variables\"\n # Key to collect summaries.\n SUMMARIES = \"summaries\"\n # Key to collect QueueRunners.\n QUEUE_RUNNERS = \"queue_runners\"\n # Key to collect table initializers.\n TABLE_INITIALIZERS = \"table_initializer\"\n # Key to collect asset filepaths. An asset represents an external resource\n # like a vocabulary file.\n ASSET_FILEPATHS = \"asset_filepaths\"\n # Key to collect Variable objects that keep moving averages.\n MOVING_AVERAGE_VARIABLES = \"moving_average_variables\"\n # Key to collect regularization losses at graph construction.\n REGULARIZATION_LOSSES = \"regularization_losses\"\n # Key to collect concatenated sharded variables.\n CONCATENATED_VARIABLES = \"concatenated_variables\"\n # Key to collect savers.\n SAVERS = \"savers\"\n # Key to collect weights\n WEIGHTS = \"weights\"\n # Key to collect biases\n BIASES = \"biases\"\n # Key to collect activations\n ACTIVATIONS = \"activations\"\n # Key to collect update_ops\n UPDATE_OPS = \"update_ops\"\n # Key to collect losses\n LOSSES = \"losses\"\n # Key to collect BaseSaverBuilder.SaveableObject instances for checkpointing.\n SAVEABLE_OBJECTS = \"saveable_objects\"\n # Key to collect all shared resources used by the graph which need to be\n # initialized once per cluster.\n RESOURCES = \"resources\"\n # Key to collect all shared resources used in this graph which need to be\n # initialized once per session.\n LOCAL_RESOURCES = \"local_resources\"\n # Trainable resource-style variables.\n TRAINABLE_RESOURCE_VARIABLES = \"trainable_resource_variables\"\n\n # Key to indicate various ops.\n INIT_OP = \"init_op\"\n LOCAL_INIT_OP = \"local_init_op\"\n READY_OP = \"ready_op\"\n READY_FOR_LOCAL_INIT_OP = \"ready_for_local_init_op\"\n SUMMARY_OP = \"summary_op\"\n GLOBAL_STEP = \"global_step\"\n\n # Used to count the number of evaluations performed during a single evaluation\n # run.\n EVAL_STEP = \"eval_step\"\n TRAIN_OP = \"train_op\"\n\n # Key for control flow context.\n COND_CONTEXT = \"cond_context\"\n WHILE_CONTEXT = \"while_context\"\n\n # Used to store v2 summary names.\n _SUMMARY_COLLECTION = \"_SUMMARY_V2\"\n\n # List of all collections that keep track of variables.\n _VARIABLE_COLLECTIONS = [\n GLOBAL_VARIABLES,\n LOCAL_VARIABLES,\n METRIC_VARIABLES,\n MODEL_VARIABLES,\n TRAINABLE_VARIABLES,\n MOVING_AVERAGE_VARIABLES,\n CONCATENATED_VARIABLES,\n TRAINABLE_RESOURCE_VARIABLES,\n ]\n\n # Key for streaming model ports.\n # NOTE(yuanbyu): internal and experimental.\n _STREAMING_MODEL_PORTS = \"streaming_model_ports\"\n\n @decorator_utils.classproperty\n @deprecation.deprecated(None, \"Use `tf.GraphKeys.GLOBAL_VARIABLES` instead.\")\n def VARIABLES(cls): # pylint: disable=no-self-argument\n return cls.GLOBAL_VARIABLES\n\n\ndef dismantle_graph(graph):\n \"\"\"Cleans up reference cycles from a `Graph`.\n\n Helpful for making sure the garbage collector doesn't need to run after a\n temporary `Graph` is no longer needed.\n\n Args:\n graph: A `Graph` object to destroy. Neither it nor any of its ops are usable\n after this function runs.\n \"\"\"\n memory.dismantle_ordered_dict(graph._functions) # pylint: disable=protected-access\n\n # Now clean up Operation<->Graph reference cycles by clearing all of the\n # attributes for the Graph and its ops.\n graph_operations = graph.get_operations()\n for op in graph_operations:\n op.__dict__ = {}\n graph.__dict__ = {}\n\n\n@tf_export(v1=[\"add_to_collection\"])\ndef add_to_collection(name, value):\n \"\"\"Wrapper for `Graph.add_to_collection()` using the default graph.\n\n See `tf.Graph.add_to_collection`\n for more details.\n\n Args:\n name: The key for the collection. For example, the `GraphKeys` class\n contains many standard names for collections.\n value: The value to add to the collection.\n\n @compatibility(eager)\n Collections are only supported in eager when variables are created inside\n an EagerVariableStore (e.g. as part of a layer or template).\n @end_compatibility\n \"\"\"\n get_default_graph().add_to_collection(name, value)\n\n\n@tf_export(v1=[\"add_to_collections\"])\ndef add_to_collections(names, value):\n \"\"\"Wrapper for `Graph.add_to_collections()` using the default graph.\n\n See `tf.Graph.add_to_collections`\n for more details.\n\n Args:\n names: The key for the collections. The `GraphKeys` class contains many\n standard names for collections.\n value: The value to add to the collections.\n\n @compatibility(eager)\n Collections are only supported in eager when variables are created inside\n an EagerVariableStore (e.g. as part of a layer or template).\n @end_compatibility\n \"\"\"\n get_default_graph().add_to_collections(names, value)\n\n\n@tf_export(v1=[\"get_collection_ref\"])\ndef get_collection_ref(key):\n \"\"\"Wrapper for `Graph.get_collection_ref()` using the default graph.\n\n See `tf.Graph.get_collection_ref`\n for more details.\n\n Args:\n key: The key for the collection. For example, the `GraphKeys` class contains\n many standard names for collections.\n\n Returns:\n The list of values in the collection with the given `name`, or an empty\n list if no value has been added to that collection. Note that this returns\n the collection list itself, which can be modified in place to change the\n collection.\n\n @compatibility(eager)\n Collections are not supported when eager execution is enabled.\n @end_compatibility\n \"\"\"\n return get_default_graph().get_collection_ref(key)\n\n\n@tf_export(v1=[\"get_collection\"])\ndef get_collection(key, scope=None):\n \"\"\"Wrapper for `Graph.get_collection()` using the default graph.\n\n See `tf.Graph.get_collection`\n for more details.\n\n Args:\n key: The key for the collection. For example, the `GraphKeys` class contains\n many standard names for collections.\n scope: (Optional.) If supplied, the resulting list is filtered to include\n only items whose `name` attribute matches using `re.match`. Items without\n a `name` attribute are never returned if a scope is supplied and the\n choice or `re.match` means that a `scope` without special tokens filters\n by prefix.\n\n Returns:\n The list of values in the collection with the given `name`, or\n an empty list if no value has been added to that collection. The\n list contains the values in the order under which they were\n collected.\n\n @compatibility(eager)\n Collections are not supported when eager execution is enabled.\n @end_compatibility\n \"\"\"\n return get_default_graph().get_collection(key, scope)\n\n\ndef get_all_collection_keys():\n \"\"\"Returns a list of collections used in the default graph.\"\"\"\n return get_default_graph().get_all_collection_keys()\n\n\ndef name_scope(name, default_name=None, values=None, skip_on_eager=True):\n \"\"\"Internal-only entry point for `name_scope*`.\n\n Internal ops do not use the public API and instead rely on\n `ops.name_scope` regardless of the execution mode. This function\n dispatches to the correct `name_scope*` implementation based on\n the arguments provided and the current mode. Specifically,\n\n * if `values` contains a graph tensor `Graph.name_scope` is used;\n * `name_scope_v1` is used in graph mode;\n * `name_scope_v2` -- in eager mode.\n\n Args:\n name: The name argument that is passed to the op function.\n default_name: The default name to use if the `name` argument is `None`.\n values: The list of `Tensor` arguments that are passed to the op function.\n skip_on_eager: Indicates to return NullContextmanager if executing eagerly.\n By default this is True since naming tensors and operations in eager mode\n have little use and cause unnecessary performance overhead. However, it is\n important to preserve variable names since they are often useful for\n debugging and saved models.\n\n Returns:\n `name_scope*` context manager.\n \"\"\"\n if not context.executing_eagerly():\n return internal_name_scope_v1(name, default_name, values)\n\n if skip_on_eager:\n return NullContextmanager()\n\n name = default_name if name is None else name\n if values:\n # The presence of a graph tensor in `values` overrides the context.\n # TODO(slebedev): this is Keras-specific and should be removed.\n # pylint: disable=unidiomatic-typecheck\n graph_value = next((value for value in values if type(value) == Tensor),\n None)\n # pylint: enable=unidiomatic-typecheck\n if graph_value is not None:\n return graph_value.graph.name_scope(name)\n\n return name_scope_v2(name or \"\")\n\n\nclass internal_name_scope_v1(object): # pylint: disable=invalid-name\n \"\"\"Graph-only version of `name_scope_v1`.\"\"\"\n\n @property\n def name(self):\n return self._name\n\n def __init__(self, name, default_name=None, values=None):\n \"\"\"Initialize the context manager.\n\n Args:\n name: The name argument that is passed to the op function.\n default_name: The default name to use if the `name` argument is `None`.\n values: The list of `Tensor` arguments that are passed to the op function.\n\n Raises:\n TypeError: if `default_name` is passed in but not a string.\n \"\"\"\n if not (default_name is None or isinstance(default_name, six.string_types)):\n raise TypeError(\n \"`default_name` type (%s) is not a string type. You likely meant to \"\n \"pass this into the `values` kwarg.\" % type(default_name))\n self._name = default_name if name is None else name\n self._default_name = default_name\n self._values = values\n\n def __enter__(self):\n \"\"\"Start the scope block.\n\n Returns:\n The scope name.\n\n Raises:\n ValueError: if neither `name` nor `default_name` is provided\n but `values` are.\n \"\"\"\n if self._name is None and self._values is not None:\n # We only raise an error if values is not None (provided) because\n # currently tf.name_scope(None) (values=None then) is sometimes used as\n # an idiom to reset to top scope.\n raise ValueError(\n \"At least one of name (%s) and default_name (%s) must be provided.\"\n % (self._name, self._default_name))\n\n g = get_default_graph()\n if self._values and not g.building_function:\n # Specialize based on the knowledge that `_get_graph_from_inputs()`\n # ignores `inputs` when building a function.\n g_from_inputs = _get_graph_from_inputs(self._values)\n if g_from_inputs is not g:\n g = g_from_inputs\n self._g_manager = g.as_default()\n self._g_manager.__enter__()\n else:\n self._g_manager = None\n else:\n self._g_manager = None\n\n try:\n self._name_scope = g.name_scope(self._name)\n return self._name_scope.__enter__()\n except:\n if self._g_manager is not None:\n self._g_manager.__exit__(*sys.exc_info())\n raise\n\n def __exit__(self, *exc_info):\n self._name_scope.__exit__(*exc_info)\n if self._g_manager is not None:\n self._g_manager.__exit__(*exc_info)\n\n\n# Named like a function for backwards compatibility with the\n# @tf_contextlib.contextmanager version, which was switched to a class to avoid\n# some object creation overhead.\n@tf_export(v1=[\"name_scope\"])\nclass name_scope_v1(object): # pylint: disable=invalid-name\n \"\"\"A context manager for use when defining a Python op.\n\n This context manager validates that the given `values` are from the\n same graph, makes that graph the default graph, and pushes a\n name scope in that graph (see\n `tf.Graph.name_scope`\n for more details on that).\n\n For example, to define a new Python op called `my_op`:\n\n ```python\n def my_op(a, b, c, name=None):\n with tf.name_scope(name, \"MyOp\", [a, b, c]) as scope:\n a = tf.convert_to_tensor(a, name=\"a\")\n b = tf.convert_to_tensor(b, name=\"b\")\n c = tf.convert_to_tensor(c, name=\"c\")\n # Define some computation that uses `a`, `b`, and `c`.\n return foo_op(..., name=scope)\n ```\n \"\"\"\n\n __slots__ = [\"_name\", \"_name_scope\"]\n\n @property\n def name(self):\n return self._name\n\n def __init__(self, name, default_name=None, values=None):\n \"\"\"Initialize the context manager.\n\n Args:\n name: The name argument that is passed to the op function.\n default_name: The default name to use if the `name` argument is `None`.\n values: The list of `Tensor` arguments that are passed to the op function.\n\n Raises:\n TypeError: if `default_name` is passed in but not a string.\n \"\"\"\n self._name_scope = name_scope(\n name, default_name, values, skip_on_eager=False)\n self._name = default_name if name is None else name\n\n def __enter__(self):\n return self._name_scope.__enter__()\n\n def __exit__(self, *exc_info):\n return self._name_scope.__exit__(*exc_info)\n\n\n@tf_export(\"get_current_name_scope\", v1=[])\ndef get_current_name_scope():\n \"\"\"Returns current full name scope specified by `tf.name_scope(...)`s.\n\n For example,\n ```python\n with tf.name_scope(\"outer\"):\n tf.get_current_name_scope() # \"outer\"\n\n with tf.name_scope(\"inner\"):\n tf.get_current_name_scope() # \"outer/inner\"\n ```\n\n In other words, `tf.get_current_name_scope()` returns the op name prefix that\n will be prepended to, if an op is created at that place.\n\n Note that `@tf.function` resets the name scope stack as shown below.\n\n ```\n with tf.name_scope(\"outer\"):\n\n @tf.function\n def foo(x):\n with tf.name_scope(\"inner\"):\n return tf.add(x * x) # Op name is \"inner/Add\", not \"outer/inner/Add\"\n ```\n \"\"\"\n\n ctx = context.context()\n if ctx.executing_eagerly():\n return ctx.scope_name.rstrip(\"/\")\n else:\n return get_default_graph().get_name_scope()\n\n\n@tf_export(\"name_scope\", v1=[])\nclass name_scope_v2(object):\n \"\"\"A context manager for use when defining a Python op.\n\n This context manager pushes a name scope, which will make the name of all\n operations added within it have a prefix.\n\n For example, to define a new Python op called `my_op`:\n\n ```python\n def my_op(a, b, c, name=None):\n with tf.name_scope(\"MyOp\") as scope:\n a = tf.convert_to_tensor(a, name=\"a\")\n b = tf.convert_to_tensor(b, name=\"b\")\n c = tf.convert_to_tensor(c, name=\"c\")\n # Define some computation that uses `a`, `b`, and `c`.\n return foo_op(..., name=scope)\n ```\n\n When executed, the Tensors `a`, `b`, `c`, will have names `MyOp/a`, `MyOp/b`,\n and `MyOp/c`.\n\n Inside a `tf.function`, if the scope name already exists, the name will be\n made unique by appending `_n`. For example, calling `my_op` the second time\n will generate `MyOp_1/a`, etc.\n \"\"\"\n\n __slots__ = [\"_name\", \"_exit_fns\"]\n\n def __init__(self, name):\n \"\"\"Initialize the context manager.\n\n Args:\n name: The prefix to use on all names created within the name scope.\n\n Raises:\n ValueError: If name is not a string.\n \"\"\"\n if not isinstance(name, six.string_types):\n raise ValueError(\"name for name_scope must be a string.\")\n self._name = name\n self._exit_fns = []\n\n @property\n def name(self):\n return self._name\n\n def __enter__(self):\n \"\"\"Start the scope block.\n\n Returns:\n The scope name.\n \"\"\"\n ctx = context.context()\n if ctx.executing_eagerly():\n # Names are not auto-incremented in eager mode.\n # A trailing slash breaks out of nested name scopes, indicating a\n # fully specified scope name, for compatibility with Graph.name_scope.\n # This also prevents auto-incrementing.\n old_name = ctx.scope_name\n name = self._name\n if not name:\n scope_name = \"\"\n elif name[-1] == \"/\":\n scope_name = name\n elif old_name:\n scope_name = old_name + name + \"/\"\n else:\n scope_name = name + \"/\"\n ctx.scope_name = scope_name\n\n def _restore_name_scope(*_):\n ctx.scope_name = old_name\n\n self._exit_fns.append(_restore_name_scope)\n else:\n scope = get_default_graph().name_scope(self._name)\n scope_name = scope.__enter__()\n self._exit_fns.append(scope.__exit__)\n return scope_name\n\n def __exit__(self, type_arg, value_arg, traceback_arg):\n self._exit_fns.pop()(type_arg, value_arg, traceback_arg)\n return False # False values do not suppress exceptions\n\n def __getstate__(self):\n return self._name, self._exit_fns\n\n def __setstate__(self, state):\n self._name = state[0]\n self._exit_fns = state[1]\n\n\ndef strip_name_scope(name, export_scope):\n \"\"\"Removes name scope from a name.\n\n Args:\n name: A `string` name.\n export_scope: Optional `string`. Name scope to remove.\n\n Returns:\n Name with name scope removed, or the original name if export_scope\n is None.\n \"\"\"\n if export_scope:\n if export_scope[-1] == \"/\":\n export_scope = export_scope[:-1]\n\n try:\n # Strips export_scope/, export_scope///,\n # ^export_scope/, loc:@export_scope/.\n str_to_replace = r\"([\\^]|loc:@|^)\" + export_scope + r\"[\\/]+(.*)\"\n return re.sub(str_to_replace, r\"\\1\\2\", compat.as_str(name), count=1)\n except TypeError as e:\n # If the name is not of a type we can process, simply return it.\n logging.warning(e)\n return name\n else:\n return name\n\n\ndef prepend_name_scope(name, import_scope):\n \"\"\"Prepends name scope to a name.\n\n Args:\n name: A `string` name.\n import_scope: Optional `string`. Name scope to add.\n\n Returns:\n Name with name scope added, or the original name if import_scope\n is None.\n \"\"\"\n if import_scope:\n if import_scope[-1] == \"/\":\n import_scope = import_scope[:-1]\n\n try:\n str_to_replace = r\"([\\^]|loc:@|^)(.*)\"\n return re.sub(str_to_replace, r\"\\1\" + import_scope + r\"/\\2\",\n compat.as_str(name))\n except TypeError as e:\n # If the name is not of a type we can process, simply return it.\n logging.warning(e)\n return name\n else:\n return name\n\n\n# pylint: disable=g-doc-return-or-yield\n# pylint: disable=not-context-manager\n@tf_export(v1=[\"op_scope\"])\n@tf_contextlib.contextmanager\ndef op_scope(values, name, default_name=None):\n \"\"\"DEPRECATED. Same as name_scope above, just different argument order.\"\"\"\n logging.warn(\"tf.op_scope(values, name, default_name) is deprecated,\"\n \" use tf.name_scope(name, default_name, values)\")\n with name_scope(name, default_name=default_name, values=values) as scope:\n yield scope\n\n\n_proto_function_registry = registry.Registry(\"proto functions\")\n\n\ndef register_proto_function(collection_name,\n proto_type=None,\n to_proto=None,\n from_proto=None):\n \"\"\"Registers `to_proto` and `from_proto` functions for collection_name.\n\n `to_proto` function converts a Python object to the corresponding protocol\n buffer, and returns the protocol buffer.\n\n `from_proto` function converts protocol buffer into a Python object, and\n returns the object..\n\n Args:\n collection_name: Name of the collection.\n proto_type: Protobuf type, such as `saver_pb2.SaverDef`,\n `variable_pb2.VariableDef`, `queue_runner_pb2.QueueRunnerDef`..\n to_proto: Function that implements Python object to protobuf conversion.\n from_proto: Function that implements protobuf to Python object conversion.\n \"\"\"\n if to_proto and not callable(to_proto):\n raise TypeError(\"to_proto must be callable.\")\n if from_proto and not callable(from_proto):\n raise TypeError(\"from_proto must be callable.\")\n\n _proto_function_registry.register((proto_type, to_proto, from_proto),\n collection_name)\n\n\ndef get_collection_proto_type(collection_name):\n \"\"\"Returns the proto_type for collection_name.\"\"\"\n try:\n return _proto_function_registry.lookup(collection_name)[0]\n except LookupError:\n return None\n\n\ndef get_to_proto_function(collection_name):\n \"\"\"Returns the to_proto function for collection_name.\"\"\"\n try:\n return _proto_function_registry.lookup(collection_name)[1]\n except LookupError:\n return None\n\n\ndef get_from_proto_function(collection_name):\n \"\"\"Returns the from_proto function for collection_name.\"\"\"\n try:\n return _proto_function_registry.lookup(collection_name)[2]\n except LookupError:\n return None\n\n\ndef _op_to_colocate_with(v, graph):\n \"\"\"Operation object corresponding to v to use for colocation constraints.\"\"\"\n if v is None:\n return None, None\n if isinstance(v, Operation):\n return v, None\n\n # We always want to colocate with the reference op.\n # When 'v' is a ResourceVariable, the reference op is the handle creating op.\n #\n # What this should be is:\n # if isinstance(v, ResourceVariable):\n # return v.handle.op, v\n # However, that would require a circular import dependency.\n # As of October 2018, there were attempts underway to remove\n # colocation constraints altogether. Assuming that will\n # happen soon, perhaps this hack to work around the circular\n # import dependency is acceptable.\n if hasattr(v, \"handle\") and isinstance(v.handle, Tensor):\n device_only_candidate = lambda: None\n device_only_candidate.device = v.device\n device_only_candidate.name = v.name\n if graph.building_function:\n return graph.capture(v.handle).op, device_only_candidate\n else:\n return v.handle.op, device_only_candidate\n return internal_convert_to_tensor_or_indexed_slices(v, as_ref=True).op, None\n\n\ndef _is_keras_symbolic_tensor(x):\n return hasattr(x, \"graph\") and getattr(x.graph, \"name\", None) == \"keras_graph\"\n\n\n# These symbols were originally defined in this module; import them for\n# backwards compatibility until all references have been updated to access\n# them from the indexed_slices.py module.\nIndexedSlices = indexed_slices.IndexedSlices\nIndexedSlicesValue = indexed_slices.IndexedSlicesValue\nconvert_to_tensor_or_indexed_slices = \\\n indexed_slices.convert_to_tensor_or_indexed_slices\nconvert_n_to_tensor_or_indexed_slices = \\\n indexed_slices.convert_n_to_tensor_or_indexed_slices\ninternal_convert_to_tensor_or_indexed_slices = \\\n indexed_slices.internal_convert_to_tensor_or_indexed_slices\ninternal_convert_n_to_tensor_or_indexed_slices = \\\n indexed_slices.internal_convert_n_to_tensor_or_indexed_slices\nregister_tensor_conversion_function = \\\n tensor_conversion_registry.register_tensor_conversion_function\n\n\n# Helper functions for op wrapper modules generated by `python_op_gen`.\n\n\ndef to_raw_op(f):\n \"\"\"Make a given op wrapper function `f` raw.\n\n Raw op wrappers can only be called with keyword arguments.\n\n Args:\n f: An op wrapper function to make raw.\n\n Returns:\n Raw `f`.\n \"\"\"\n # Copy `f` to get a new `__dict__`, otherwise `tf_export` will fail\n # due to double-registration.\n f = types.FunctionType(f.__code__, f.__globals__, f.__name__, f.__defaults__,\n f.__closure__)\n return kwarg_only(f)\n\n\ndef raise_from_not_ok_status(e, name):\n message = e.message + (\" name: \" + name if name is not None else \"\")\n raise core._status_to_exception(e.code, message) from None # pylint: disable=protected-access\n\n\ndef add_exit_callback_to_default_func_graph(fn):\n \"\"\"Add a callback to run when the default function graph goes out of scope.\n\n Usage:\n\n ```python\n @tf.function\n def fn(x, v):\n expensive = expensive_object(v)\n add_exit_callback_to_default_func_graph(lambda: expensive.release())\n return g(x, expensive)\n\n fn(x=tf.constant(...), v=...)\n # `expensive` has been released.\n ```\n\n Args:\n fn: A callable that takes no arguments and whose output is ignored.\n To be executed when exiting func graph scope.\n\n Raises:\n RuntimeError: If executed when the current default graph is not a FuncGraph,\n or not currently executing in function creation mode (e.g., if inside\n an init_scope).\n \"\"\"\n default_graph = get_default_graph()\n if not default_graph._building_function: # pylint: disable=protected-access\n raise RuntimeError(\n \"Cannot add scope exit callbacks when not building a function. \"\n \"Default graph: {}\".format(default_graph))\n default_graph._add_scope_exit_callback(fn) # pylint: disable=protected-access\n\n\ndef _reconstruct_sequence_inputs(op_def, inputs, attrs):\n \"\"\"Regroups a flat list of input tensors into scalar and sequence inputs.\n\n Args:\n op_def: The `op_def_pb2.OpDef` (for knowing the input types)\n inputs: a list of input `Tensor`s to the op.\n attrs: mapping from attr name to `attr_value_pb2.AttrValue` (these define\n how long each sequence is)\n\n Returns:\n A list of `Tensor`s (corresponding to scalar inputs) and lists of\n `Tensor`s (corresponding to sequence inputs).\n \"\"\"\n grouped_inputs = []\n i = 0\n for input_arg in op_def.input_arg:\n if input_arg.number_attr:\n input_len = attrs[input_arg.number_attr].i\n is_sequence = True\n elif input_arg.type_list_attr:\n input_len = len(attrs[input_arg.type_list_attr].list.type)\n is_sequence = True\n else:\n input_len = 1\n is_sequence = False\n\n if is_sequence:\n grouped_inputs.append(inputs[i:i + input_len])\n else:\n grouped_inputs.append(inputs[i])\n i += input_len\n\n assert i == len(inputs)\n return grouped_inputs\n\n\n_numpy_style_type_promotion = False\n\n\ndef enable_numpy_style_type_promotion():\n \"\"\"If called, follows NumPy's rules for type promotion.\n\n Used for enabling NumPy behavior on methods for TF NumPy.\n \"\"\"\n global _numpy_style_type_promotion\n _numpy_style_type_promotion = True\n\n\n_numpy_style_slicing = False\n\n\ndef enable_numpy_style_slicing():\n \"\"\"If called, follows NumPy's rules for slicing Tensors.\n\n Used for enabling NumPy behavior on slicing for TF NumPy.\n \"\"\"\n global _numpy_style_slicing\n _numpy_style_slicing = True\n\n\nclass _TensorIterator(object):\n \"\"\"Iterates over the leading dim of a Tensor. Performs no error checks.\"\"\"\n\n __slots__ = [\"_tensor\", \"_index\", \"_limit\"]\n\n def __init__(self, tensor, dim0):\n self._tensor = tensor\n self._index = 0\n self._limit = dim0\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if self._index == self._limit:\n raise StopIteration\n result = self._tensor[self._index]\n self._index += 1\n return result\n\n next = __next__ # python2.x compatibility.\n\n\ndef set_int_list_attr(op, attr_name, ints):\n \"\"\"TF internal method used to set a list(int) attribute in the node_def.\"\"\"\n ints_list = attr_value_pb2.AttrValue.ListValue(i=ints)\n op._set_attr(attr_name, attr_value_pb2.AttrValue(list=ints_list)) # pylint:disable=protected-access\n\n\ndef _get_enclosing_context(graph):\n # pylint: disable=protected-access\n if graph is None:\n return None\n\n if graph._control_flow_context is not None:\n return graph._control_flow_context\n\n if graph.building_function and hasattr(graph, \"outer_graph\"):\n return _get_enclosing_context(graph.outer_graph)\n\n\ndef get_resource_handle_data(graph_op):\n assert type(graph_op) == Tensor # pylint: disable=unidiomatic-typecheck\n\n handle_data = pywrap_tf_session.GetHandleShapeAndType(\n graph_op.graph._c_graph, graph_op._as_tf_output()) # pylint: disable=protected-access\n\n return cpp_shape_inference_pb2.CppShapeInferenceResult.HandleData.FromString(\n compat.as_bytes(handle_data))\n\n\ndef _copy_handle_data_to_arg_def(tensor, arg_def):\n handle_data = get_resource_handle_data(tensor)\n if handle_data.shape_and_type:\n shape_and_type = handle_data.shape_and_type[0]\n proto = arg_def.handle_data.add()\n proto.dtype = shape_and_type.dtype\n proto.shape.CopyFrom(handle_data.shape_and_type[0].shape)\n" ]
[ [ "tensorflow.python.client.pywrap_tf_session.TF_OperationGetAttrInt", "tensorflow.python.framework.device.is_device_spec", "tensorflow.core.framework.attr_value_pb2.AttrValue.ListValue", "tensorflow.python.tf2.enabled", "tensorflow.python.util.memory.dismantle_ordered_dict", "tensorflow.python.client.pywrap_tf_session.TF_OperationName", "tensorflow.python.framework.traceable_stack.TraceableStack", "tensorflow.python.framework.c_api_util.tf_buffer", "tensorflow.python.platform.tf_logging.vlog", "tensorflow.python.framework.tensor_shape.TensorShape", "tensorflow.python.framework.tensor_shape.unknown_shape", "tensorflow.python.framework.c_api_util.new_tf_operations", "tensorflow.python.util.tf_export.tf_export", "tensorflow.python.util.compat.as_bytes", "tensorflow.python.util.compat.as_str", "tensorflow.python.client.pywrap_tf_session.TF_GraphToGraphDef", "tensorflow.python.eager.context.graph_mode", "tensorflow.python.profiler.trace.trace_wrapper", "tensorflow.python.eager.context.context", "tensorflow.python.client.pywrap_tf_session.TF_OperationGetAttrBool", "tensorflow.python.util.function_utils.get_func_name", "tensorflow.python.framework.registry.Registry", "tensorflow.python.client.pywrap_tf_session.TF_OperationNumInputs", "tensorflow.python.eager.context.device", "tensorflow.python.client.pywrap_tf_session.TF_GetBuffer", "tensorflow.python.client.pywrap_tf_session.RemoveAllControlInputs", "tensorflow.python.client.pywrap_tf_session.TF_GraphVersions", "tensorflow.python.util.deprecation.deprecated", "tensorflow.python.client.pywrap_tf_session.SetRequireShapeInferenceFns", "tensorflow.python.util.function_utils.get_func_code", "numpy.array", "tensorflow.python.client.pywrap_tf_session.TF_OperationToNodeDef", "tensorflow.python.client.pywrap_tf_session.TF_OperationDevice", "tensorflow.python.framework.dtypes.as_dtype", "tensorflow.python.client.pywrap_tf_session.SetRequestedDevice", "tensorflow.python.client.pywrap_tf_session.TF_OperationGetAttrType", "tensorflow.python.eager.tape.record_operation", "tensorflow.python.client.pywrap_tf_session.TF_OperationGetAttrValueProto", "tensorflow.core.framework.op_def_pb2.OpDef", "tensorflow.python.eager.tape.stop_recording", "tensorflow.python.util.tf_stack.extract_stack_for_node", "tensorflow.python.client.pywrap_tf_session.TF_GraphCopyFunction", "tensorflow.python.client.pywrap_tf_session.AddControlInput", "tensorflow.core.framework.versions_pb2.VersionDef", "tensorflow.python.client.pywrap_tf_session.TF_FinishOperation", "tensorflow.python.client.pywrap_tf_session.TF_DeleteBuffer", "tensorflow.python.eager.monitoring.BoolGauge", "tensorflow.python.framework.device.merge_device", "tensorflow.python.client.pywrap_tf_session.TF_OperationOpType", "tensorflow.python.client.pywrap_tf_session.TF_OperationGetControlInputs_wrapper", "tensorflow.core.framework.function_pb2.GradientDef", "tensorflow.python.ops.control_flow_util.CheckInputFromValidContext", "tensorflow.python.pywrap_tfe.TFE_Py_InitEagerTensor", "tensorflow.python.util.object_identity.ObjectIdentitySet", "tensorflow.python.util.lock_util.GroupLock", "tensorflow.python.pywrap_tfe.TFE_Py_UID", "tensorflow.python.client.pywrap_tf_session.TF_Input", "tensorflow.python.framework.c_api_util.tf_output", "tensorflow.python.client.pywrap_tf_session.TF_OperationNumOutputs", "tensorflow.python.util.deprecation.deprecated_args", "tensorflow.python.eager.context.Context", "tensorflow.python.eager.core._status_to_exception", "tensorflow.python.util.object_identity.Reference", "tensorflow.python.eager.context.executing_eagerly", "tensorflow.python.platform.tf_logging.warn", "tensorflow.python.util.tf_export.kwarg_only", "tensorflow.python.client.pywrap_tf_session.GetOperationInputs", "tensorflow.core.framework.node_def_pb2.NodeDef", "tensorflow.python.client.pywrap_tf_session.TF_Output", "tensorflow.python.eager.context.context_safe", "tensorflow.python.client.pywrap_tf_session.SetAttr", "tensorflow.python.platform.tf_logging.warning", "tensorflow.python.client.pywrap_tf_session.TF_OperationOutputType", "tensorflow.core.framework.attr_value_pb2.AttrValue", "tensorflow.core.framework.attr_value_pb2.NameAttrList", "tensorflow.core.framework.graph_pb2.GraphDef", "tensorflow.python.client.pywrap_tf_session.TF_AddControlInput", "tensorflow.python.client.pywrap_tf_session.ClearAttr", "tensorflow.python.util.deprecation.deprecated_argument_lookup", "tensorflow.python.util.deprecation.deprecated_endpoints", "tensorflow.python.framework.c_api_util.ScopedTFGraph", "tensorflow.python.client.pywrap_tf_session.TF_OperationGetControlOutputs_wrapper" ] ]
RWTH-EBC/EHDO
[ "7b03262fba126adf829b6ad1ad6b800db05ae880" ]
[ "optimization_model/clustering_medoid.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n\nEHDO - ENERGY HUB DESIGN OPTIMIZATION Tool\n\nDeveloped by: E.ON Energy Research Center, \n Institute for Energy Efficient Buildings and Indoor Climate, \n RWTH Aachen University, \n Germany\n \nContact: Marco Wirtz \n [email protected]\n\n k-medoids clustering implemented by Thomas Schütz (2015).\n\"\"\"\n\nfrom __future__ import division\nimport numpy as np\nimport math\nimport optim_app.help_functions.k_medoids\n\ndef _distances(values, norm=2):\n \"\"\"\n Compute distance matrix for all data sets (rows of values)\n \n Parameters\n ----------\n values : 2-dimensional array\n Rows represent days and columns values\n norm : integer, optional\n Compute the distance according to this norm. 2 is the standard\n Euklidean-norm.\n \n Return\n ------\n d : 2-dimensional array\n Distances between each data set\n \"\"\"\n # Initialize distance matrix\n d = np.zeros((values.shape[1], values.shape[1]))\n\n # Define a function that computes the distance between two days\n dist = (lambda day1, day2, r: \n math.pow(np.sum(np.power(np.abs(day1 - day2), r)), 1/r))\n\n # Remember: The d matrix is symmetrical!\n for i in range(values.shape[1]): # loop over first days\n for j in range(i+1, values.shape[1]): # loop second days\n d[i, j] = dist(values[:,i], values[:,j], norm)\n \n # Fill the remaining entries\n d = d + d.T\n \n return d\n\n\ndef cluster(inputs, number_clusters=12, norm=2, time_limit=300, mip_gap=0.0,\n weights=None):\n \"\"\"\n Cluster a set of inputs into clusters by solving a k-medoid problem.\n \n Parameters\n ----------\n inputs : 2-dimensional array\n First dimension: Number of different input types.\n Second dimension: Values for each time step of interes.\n number_clusters : integer, optional\n How many clusters shall be computed?\n norm : integer, optional\n Compute the distance according to this norm. 2 is the standard\n Euklidean-norm.\n time_limit : integer, optional\n Time limit for the optimization in seconds\n mip_gap : float, optional\n Optimality tolerance (0: proven global optimum)\n weights : 1-dimensional array, optional\n Weight for each input. If not provided, all inputs are treated equally.\n \n Returns\n -------\n scaled_typ_days : \n Scaled typical demand days. The scaling is based on the annual demands.\n nc : array_like\n Weighting factors of each cluster\n z : 2-dimensional array\n Mapping of each day to the clusters\n \"\"\"\n # Determine time steps per day\n len_day = int(inputs.shape[1] / 365)\n \n # Set weights if not already given\n if weights == None:\n weights = np.ones(inputs.shape[0])\n elif not sum(weights) == 1: # Rescale weights\n weights = np.array(weights) / sum(weights)\n \n # Manipulate inputs\n # Initialize arrays\n inputsTransformed = []\n inputsScaled = []\n inputsScaledTransformed = []\n \n # Fill and reshape\n # Scaling to values between 0 and 1, thus all inputs shall have the same\n # weight and will be clustered equally in terms of quality \n for i in range(inputs.shape[0]):\n vals = inputs[i,:]\n if np.max(vals) == np.min(vals):\n temp = np.zeros_like(vals)\n else:\n temp = ((vals - np.min(vals)) / (np.max(vals) - np.min(vals))\n * math.sqrt(weights[i]))\n inputsScaled.append(temp)\n inputsScaledTransformed.append(temp.reshape((len_day, 365), order=\"F\"))\n inputsTransformed.append(vals.reshape((len_day, 365), order=\"F\"))\n\n # Put the scaled and reshaped inputs together\n L = np.concatenate(tuple(inputsScaledTransformed))\n\n # Compute distances\n d = _distances(L, norm)\n\n # Execute optimization model\n (y, z, obj) = optim_app.help_functions.k_medoids.k_medoids(d, number_clusters, time_limit, mip_gap)\n \n # Section 2.3 and retain typical days\n nc = np.zeros_like(y)\n typicalDays = []\n\n # nc contains how many days are there in each cluster\n nc = []\n for i in range(len(y)):\n temp = np.sum(z[i,:])\n if temp > 0:\n nc.append(temp)\n typicalDays.append([ins[:,i] for ins in inputsTransformed])\n\n typicalDays = np.array(typicalDays)\n nc = np.array(nc, dtype=\"int\")\n nc_cumsum = np.cumsum(nc) * len_day\n\n # Construct (yearly) load curves\n # ub = upper bound, lb = lower bound\n clustered = np.zeros_like(inputs)\n for i in range(len(nc)):\n if i == 0:\n lb = 0\n else:\n lb = nc_cumsum[i-1]\n ub = nc_cumsum[i]\n \n for j in range(len(inputsTransformed)):\n clustered[j, lb:ub] = np.tile(typicalDays[i][j], nc[i])\n\n # Scaling to preserve original demands\n sums_inputs = [np.sum(inputs[j,:]) for j in range(inputs.shape[0])]\n scaled = np.array([nc[day] * typicalDays[day,:,:] \n for day in range(number_clusters)])\n sums_scaled = [np.sum(scaled[:,j,:]) if not np.sum(scaled[:,j,:]) == 0 else 1 \n for j in range(inputs.shape[0])]\n scaling_factors = [sums_inputs[j] / sums_scaled[j] \n for j in range(inputs.shape[0])]\n scaled_typ_days = [scaling_factors[j] * typicalDays[:,j,:]\n for j in range(inputs.shape[0])]\n \n return (scaled_typ_days, nc, z)" ]
[ [ "numpy.max", "numpy.array", "numpy.zeros_like", "numpy.zeros", "numpy.sum", "numpy.ones", "numpy.tile", "numpy.min", "numpy.abs", "numpy.cumsum" ] ]
zhangyu233/mvscode
[ "883b2ba1a2ed61198a331f83d5ec7813b7204ef3" ]
[ "DTUmesh.py" ]
[ "import open3d as o3d\nimport numpy as np\nfrom sklearn import preprocessing\npath = \"E:\\ztt\\data\\MVS\\Points_MVS\\Points\\camp\"\n\n\n\nif __name__ == \"__main__\":\n\n meshpath =\"E:\\ztt\\data\\MVS\\SampleSet\\SampleSet\\MVS Data\\Surfaces\\camp\"\n #\n # mesh = o3d.io.read_triangle_mesh(meshpath + \"/camp001_l3_surf_11_trim_8.ply\")\n # o3d.visualization.draw_geometries([mesh])\n mesh = o3d.io.read_triangle_mesh(\"E:\\ztt\\data\\MVS\\SampleSet\\SampleSet\\MVS Data\\Surfaces\\camp\" + \"\\camp001_l3_surf_11_trim_8.ply\")\n z = np.asarray(mesh.vertices)\n index = np.random.permutation(range(1000))\n\n z2 = z[index,:]\n min_max_scaler = preprocessing.MinMaxScaler()\n X_minMax = min_max_scaler.fit_transform(z)\n test_pcd = o3d.geometry.PointCloud()\n test_pcd.points = o3d.utility.Vector3dVector(z2)\n\n\n o3d.visualization.draw_geometries([test_pcd])\n print(mesh)\n # o3d.io.write_triangle_mesh(\"copy_of_knot.ply\", mesh)\n\n # print(\"Testing IO for images ...\")\n # img = o3d.io.read_image(\"../../TestData/lena_color.jpg\")\n # print(img)\n # o3d.io.write_image(\"copy_of_lena_color.jpg\", img)\n z =1" ]
[ [ "numpy.asarray", "sklearn.preprocessing.MinMaxScaler" ] ]
hologerry/diffvg
[ "3ef584606d414c075f2a2d2d29ae9e9c14a66ac0" ]
[ "apps/single_open_curve_thickness.py" ]
[ "from subprocess import call\nimport pydiffvg\nimport torch\n# import skimage\n\npydiffvg.set_print_timing(True)\n\n# Use GPU if available\npydiffvg.set_use_gpu(torch.cuda.is_available())\n\ncanvas_width, canvas_height = 256, 256\nnum_control_points = torch.tensor([2])\npoints = torch.tensor([[120.0, 30.0], # base\n [150.0, 60.0], # control point\n [90.0, 198.0], # control point\n [60.0, 218.0]]) # base\nthickness = torch.tensor([10.0, 5.0, 4.0, 20.0])\npath = pydiffvg.Path(num_control_points=num_control_points,\n points=points,\n is_closed=False,\n stroke_width=thickness)\nshapes = [path]\npath_group = pydiffvg.ShapeGroup(shape_ids=torch.tensor([0]),\n fill_color=None,\n stroke_color=torch.tensor([0.6, 0.3, 0.6, 0.8]))\nshape_groups = [path_group]\nscene_args = pydiffvg.RenderFunction.serialize_scene(\n canvas_width, canvas_height, shapes, shape_groups)\n\nrender = pydiffvg.RenderFunction.apply\nimg = render(256, # width\n 256, # height\n 2, # num_samples_x\n 2, # num_samples_y\n 0, # seed\n None, # background_image\n *scene_args)\n# The output image is in linear RGB space. Do Gamma correction before saving the image.\npydiffvg.imwrite(img.cpu(), 'results/single_open_curve_thickness/target.png', gamma=2.2)\ntarget = img.clone()\n\n# Move the path to produce initial guess\n# normalize points for easier learning rate\npoints_n = torch.tensor([[100.0/256.0, 40.0/256.0], # base\n [155.0/256.0, 65.0/256.0], # control point\n [100.0/256.0, 180.0/256.0], # control point\n [65.0/256.0, 238.0/256.0]], # base\n requires_grad=True)\nthickness_n = torch.tensor([10.0 / 100.0, 10.0 / 100.0, 10.0 / 100.0, 10.0 / 100.0],\n requires_grad=True)\nstroke_color = torch.tensor([0.4, 0.7, 0.5, 0.5], requires_grad=True)\npath.points = points_n * 256\npath.stroke_width = thickness_n * 100\npath_group.stroke_color = stroke_color\nscene_args = pydiffvg.RenderFunction.serialize_scene(\n canvas_width, canvas_height, shapes, shape_groups)\nimg = render(256, # width\n 256, # height\n 2, # num_samples_x\n 2, # num_samples_y\n 1, # seed\n None, # background_image\n *scene_args)\npydiffvg.imwrite(img.cpu(), 'results/single_open_curve_thickness/init.png', gamma=2.2)\n\n# Optimize\noptimizer = torch.optim.Adam([points_n, thickness_n, stroke_color], lr=1e-2)\n# Run 200 Adam iterations.\nfor t in range(200):\n print('iteration:', t)\n optimizer.zero_grad()\n # Forward pass: render the image.\n path.points = points_n * 256\n path.stroke_width = thickness_n * 100\n path_group.stroke_color = stroke_color\n scene_args = pydiffvg.RenderFunction.serialize_scene(\n canvas_width, canvas_height, shapes, shape_groups)\n img = render(256, # width\n 256, # height\n 2, # num_samples_x\n 2, # num_samples_y\n t+1, # seed\n None, # background_image\n *scene_args)\n # Save the intermediate render.\n pydiffvg.imwrite(img.cpu(), 'results/single_open_curve_thickness/iter_{}.png'.format(t), gamma=2.2)\n # Compute the loss function. Here it is L2.\n loss = (img - target).pow(2).sum()\n print('loss:', loss.item())\n\n # Backpropagate the gradients.\n loss.backward()\n # Print the gradients\n print('points_n.grad:', points_n.grad)\n print('thickness_n.grad:', thickness_n.grad)\n print('stroke_color.grad:', stroke_color.grad)\n\n # Take a gradient descent step.\n optimizer.step()\n # Print the current params.\n print('points:', path.points)\n print('thickness:', path.stroke_width)\n print('stroke_color:', path_group.stroke_color)\n\n# Render the final result.\npath.points = points_n * 256\npath.stroke_width = thickness_n * 100\npath_group.stroke_color = stroke_color\nscene_args = pydiffvg.RenderFunction.serialize_scene(\n canvas_width, canvas_height, shapes, shape_groups)\nimg = render(256, # width\n 256, # height\n 2, # num_samples_x\n 2, # num_samples_y\n 202, # seed\n None, # background_image\n *scene_args)\n# Save the images and differences.\npydiffvg.imwrite(img.cpu(), 'results/single_open_curve_thickness/final.png')\n\n# Convert the intermediate renderings to a video.\ncall([\"ffmpeg\", \"-framerate\", \"24\", \"-i\",\n \"results/single_open_curve_thickness/iter_%d.png\", \"-vb\", \"20M\",\n \"results/single_open_curve_thickness/out.mp4\"])\n" ]
[ [ "torch.cuda.is_available", "torch.optim.Adam", "torch.tensor" ] ]
tonyD1999/haralick-labeling-visualized
[ "beeb8acaa57f2ff0793618acdc08666ab8a17f32" ]
[ "duc_algo.py" ]
[ "from pathlib import Path\n\nimport numpy as np\n\nfrom utils import neighborhood_values\nfrom visualizer import HaralickVisualizer\n\n\ndef ccl_8(image, display=False):\n def pad_with(vector, pad_width, iaxis, kwargs):\n pad_value = kwargs.get('padder', 0)\n vector[:pad_width[0]] = pad_value\n vector[-pad_width[1]:] = pad_value\n equil_relationship = {0:[]}\n image = np.pad(image, 1, pad_with)\n image[image > 0] = np.arange(1, np.sum(image > 0) + 1)\n visualizer = HaralickVisualizer(index_image=image,\n canvas_size=(512, 512),\n output_dir=Path('./output'))\n\n\n state = {\n 'iter': 0,\n 'finished': False,\n 'mode': 'forward',\n 'pos': (0, 0),\n 'step': 0\n }\n state['iter'] += 1\n for i in range(1, len(image)-1):\n for j in range(1, len(image[i])-1):\n state['step'] += 1\n state['pos'] = (i, j)\n current_pixel = image[i][j]\n neighbors_ = [(image[i][j-1], image[i][j-1]),\n (image[i-1][j], image[i-1][j]),\n (image[i-1][j-1], image[i-1][j-1]),\n (image[i-1][j+1], image[i-1][j+1])]\n \n neighbors = list(filter(lambda x: x[0]!=0, neighbors_))\n if current_pixel == 0:\n continue\n if len(neighbors) > 0:\n label_list = list(map(lambda x: x[1], neighbors))\n image[i][j] = min(label_list)\n for _ in neighbors:\n equil_relationship[_[1]] = set(list(equil_relationship[_[1]]) + label_list)\n else:\n new_label = list(equil_relationship.keys())[-1] + 1\n equil_relationship[new_label] = []\n image[i][j] = new_label\n if display:\n visualizer.display(state, title='Labeling...', wait=1)\n\n for key, values in equil_relationship.items():\n for value in values:\n equil_relationship[key] = equil_relationship[key].union(equil_relationship[value])\n \n state['iter'] += 1\n state['mode'] = 'forward'\n for i in range(1, len(image)-1):\n for j in range(1, len(image[i])-1):\n state['step'] += 1\n state['pos'] = (i, j)\n current_pixel = image[i][j]\n if current_pixel == 0:\n continue\n labels = equil_relationship[image[i][j]]\n if len(labels) > 0:\n assign_label = min(labels)\n image[i][j] = assign_label\n if display: \n visualizer.display(state, title='Labeling...', wait=1)\n state['finished'] = True\n visualizer.display(state, title='RESULT', wait=0)\n\ndef ccl_4(image, display=False):\n def pad_with(vector, pad_width, iaxis, kwargs):\n pad_value = kwargs.get('padder', 0)\n vector[:pad_width[0]] = pad_value\n vector[-pad_width[1]:] = pad_value\n equil_relationship = {0:[]}\n image = np.pad(image, 1, pad_with)\n image[image > 0] = np.arange(1, np.sum(image > 0) + 1)\n visualizer = HaralickVisualizer(index_image=image,\n canvas_size=(512, 512),\n output_dir=Path('./output'))\n\n\n state = {\n 'iter': 0,\n 'finished': False,\n 'mode': 'forward4',\n 'pos': (0, 0),\n 'step': 0\n }\n state['iter'] += 1\n for i in range(1, len(image)-1):\n for j in range(1, len(image[i])-1):\n state['step'] += 1\n state['pos'] = (i, j)\n current_pixel = image[i][j]\n neighbors_ = [(image[i][j-1], image[i][j-1]),\n (image[i-1][j], image[i-1][j])]\n \n neighbors = list(filter(lambda x: x[0]!=0, neighbors_))\n if current_pixel == 0:\n continue\n if len(neighbors) > 0:\n label_list = list(map(lambda x: x[1], neighbors))\n image[i][j] = min(label_list)\n for _ in neighbors:\n equil_relationship[_[1]] = set(list(equil_relationship[_[1]]) + label_list)\n else:\n new_label = list(equil_relationship.keys())[-1] + 1\n equil_relationship[new_label] = []\n image[i][j] = new_label\n if display:\n visualizer.display(state, title='Labeling...', wait=1)\n\n for key, values in equil_relationship.items():\n for value in values:\n equil_relationship[key] = equil_relationship[key].union(equil_relationship[value])\n \n state['iter'] += 1\n for i in range(1, len(image)-1):\n for j in range(1, len(image[i])-1):\n state['step'] += 1\n state['pos'] = (i, j)\n current_pixel = image[i][j]\n if current_pixel == 0:\n continue\n labels = equil_relationship[image[i][j]]\n if len(labels) > 0:\n assign_label = min(labels)\n image[i][j] = assign_label\n if display: \n visualizer.display(state, title='Labeling...', wait=1)\n state['finished'] = True\n visualizer.display(state, title='RESULT', wait=0)\n\ndef bfs(image, display=False):\n def pad_with(vector, pad_width, iaxis, kwargs):\n pad_value = kwargs.get('padder', 0)\n vector[:pad_width[0]] = pad_value\n vector[-pad_width[1]:] = pad_value\n image = np.pad(image, 1, pad_with)\n # label = np.zeros(image.shape, dtype=np.int)\n image[image > 0] = np.arange(1, np.sum(image > 0) + 1)\n visualizer = HaralickVisualizer(index_image=image,\n canvas_size=(512, 512),\n output_dir=Path('./output'))\n\n\n state = {\n 'iter': 0,\n 'finished': False,\n 'mode': 'search',\n 'pos': (0, 0),\n 'step': 0\n }\n visited = np.zeros(image.shape, dtype=np.int)\n count = 1\n state['iter'] += 1\n for i in range(1, len(image)-1):\n for j in range(1, len(image[i])-1):\n current_pixel = image[i][j]\n state['pos'] = (i, j)\n # if display:\n # visualizer.display(state, title='Labeling...', wait=1)\n if current_pixel == 0 or visited[i][j] == 1:\n continue\n queue = []\n queue.append((i, j))\n # visited[i][j] = 1\n # image[i][j] = count\n while queue:\n current_x, current_y = queue.pop(0)\n state['step'] += 1\n state['pos'] = (current_x, current_y)\n if display:\n visualizer.display(state, title='Labeling...', wait=1)\n if visited[current_x][current_y] == 1:\n continue\n visited[current_x][current_y] = 1\n image[current_x][current_y] = count\n neighbors_ = [(image[current_x][current_y-1], (current_x, current_y-1)),\n (image[current_x-1][current_y], (current_x-1, current_y)),\n (image[current_x-1][current_y-1], (current_x-1, current_y-1)),\n (image[current_x-1][current_y+1], (current_x-1, current_y+1)),\n (image[current_x][current_y+1], (current_x, current_y+1)),\n (image[current_x+1][current_y], (current_x+1, current_y)),\n (image[current_x+1][current_y-1], (current_x+1, current_y-1)),\n (image[current_x+1][current_y+1], (current_x+1, current_y+1))]\n neighbors = list(filter(lambda x: x[0]!=0, neighbors_))\n neighbors = list(map(lambda x: x[1], neighbors))\n for neighbor in neighbors:\n neighbor_x, neighbor_y = neighbor\n state['step'] += 1\n state['pos'] = (neighbor_x, neighbor_y)\n if visited[neighbor_x][neighbor_y] == 0 and (neighbor_x, neighbor_y) not in queue:\n queue.append(neighbor)\n # visited[neighbor_x][neighbor_y] = 1\n # image[neighbor_x][neighbor_y] = count\n # if display:\n # visualizer.display(state, title='Labeling...', wait=1)\n count += 1\n state['finished'] = True\n visualizer.display(state, title='RESULT', wait=0)\n\ndef dfs(image, display=False):\n def pad_with(vector, pad_width, iaxis, kwargs):\n pad_value = kwargs.get('padder', 0)\n vector[:pad_width[0]] = pad_value\n vector[-pad_width[1]:] = pad_value\n image = np.pad(image, 1, pad_with)\n # label = np.zeros(image.shape, dtype=np.int)\n image[image > 0] = np.arange(1, np.sum(image > 0) + 1)\n visualizer = HaralickVisualizer(index_image=image,\n canvas_size=(512, 512),\n output_dir=Path('./output'))\n\n\n state = {\n 'iter': 0,\n 'finished': False,\n 'mode': 'search',\n 'pos': (0, 0),\n 'step': 0\n }\n visited = np.zeros(image.shape, dtype=np.int)\n count = 1\n state['iter'] += 1\n for i in range(1, len(image)-1):\n for j in range(1, len(image[i])-1):\n state['pos'] = (i, j)\n # if display:\n # visualizer.display(state, title='Labeling...', wait=1)\n current_pixel = image[i][j]\n if current_pixel == 0 or visited[i][j] == 1:\n continue\n stack = []\n stack.append((i, j))\n \n while stack:\n current_x, current_y = stack.pop()\n state['pos'] = (current_x, current_y)\n state['step'] += 1\n if display:\n visualizer.display(state, title='Labeling...', wait=1)\n\n if visited[current_x][current_y] == 1:\n continue\n visited[current_x][current_y] = 1\n image[current_x][current_y] = count\n neighbors_ = [(image[current_x][current_y-1], (current_x, current_y-1)),\n (image[current_x-1][current_y], (current_x-1, current_y)),\n (image[current_x-1][current_y-1], (current_x-1, current_y-1)),\n (image[current_x-1][current_y+1], (current_x-1, current_y+1)),\n (image[current_x][current_y+1], (current_x, current_y+1)),\n (image[current_x+1][current_y], (current_x+1, current_y)),\n (image[current_x+1][current_y-1], (current_x+1, current_y-1)),\n (image[current_x+1][current_y+1], (current_x+1, current_y+1))]\n neighbors = list(filter(lambda x: x[0]!=0, neighbors_))\n neighbors = list(map(lambda x: x[1], neighbors))\n for neighbor in neighbors:\n neighbor_x, neighbor_y = neighbor\n if visited[neighbor_x][neighbor_y] == 0 and (neighbor_x, neighbor_y) not in stack:\n stack.append(neighbor)\n \n count += 1\n state['finished'] = True\n visualizer.display(state, title='RESULT', wait=0)\n\n\n\n\n\n \n" ]
[ [ "numpy.sum", "numpy.pad", "numpy.zeros" ] ]
fengzhang427/HEP
[ "c0188bb3c69f2d5f8842f6ee2987b6fa5eb46241" ]
[ "models/LUM_model.py" ]
[ "from abc import ABC\n\nfrom torch import nn\nimport torch\nfrom models.NDM_model import Conv2dBlock\ntry:\n from itertools import izip as zip\nexcept ImportError:\n pass\n\n\nclass DecomNet(nn.Module, ABC):\n def __init__(self, params):\n super(DecomNet, self).__init__()\n self.norm = params['norm']\n self.activ = params['activ']\n self.pad_type = params['pad_type']\n #\n self.conv0 = Conv2dBlock(4, 32, 3, 1, 1, norm=self.norm, activation=self.activ, pad_type=self.pad_type)\n self.conv1 = Conv2dBlock(4, 64, 9, 1, 4, norm=self.norm, activation='none', pad_type=self.pad_type)\n self.conv2 = Conv2dBlock(64, 64, 3, 1, 1, norm=self.norm, activation=self.activ, pad_type=self.pad_type)\n self.conv3 = Conv2dBlock(64, 128, 3, 2, 1, norm=self.norm, activation=self.activ, pad_type=self.pad_type)\n self.conv4 = Conv2dBlock(128, 128, 3, 1, 1, norm=self.norm, activation=self.activ, pad_type=self.pad_type)\n self.conv5 = nn.ConvTranspose2d(128, 64, 3, 2, 1, 1)\n self.activation = nn.ReLU(inplace=True)\n self.conv6 = Conv2dBlock(128, 64, 3, 1, 1, norm=self.norm, activation=self.activ, pad_type=self.pad_type)\n self.conv7 = Conv2dBlock(96, 64, 3, 1, 1, norm=self.norm, activation='none', pad_type=self.pad_type)\n self.conv8 = Conv2dBlock(64, 4, 3, 1, 1, norm=self.norm, activation='none', pad_type=self.pad_type)\n\n def forward(self, input_im):\n input_max = torch.max(input_im, dim=1, keepdim=True)[0]\n image = torch.cat((input_max, input_im), dim=1)\n # Refelectance\n x0 = self.conv0(image)\n # print('x0:', x0.shape)\n x1 = self.conv1(image)\n # print('x1:', x1.shape)\n x2 = self.conv2(x1)\n # print('x2:', x2.shape)\n x3 = self.conv3(x2)\n # print('x3:', x3.shape)\n x4 = self.conv4(x3)\n # print('x4:', x4.shape)\n x5 = self.conv5(x4)\n x5 = self.activation(x5)\n # print('x5:', x5.shape)\n cat5 = torch.cat((x5, x2), dim=1)\n x6 = self.conv6(cat5)\n # print('x6:', x6.shape)\n cat6 = torch.cat((x6, x0), dim=1)\n x7 = self.conv7(cat6)\n # print('x7:', x7.shape)\n x8 = self.conv8(x7)\n # print('x8:', x8.shape)\n # Outputs\n R = torch.sigmoid(x8[:, 0:3, :, :])\n L = torch.sigmoid(x8[:, 3:4, :, :])\n return R, L\n" ]
[ [ "torch.sigmoid", "torch.cat", "torch.max", "torch.nn.ConvTranspose2d", "torch.nn.ReLU" ] ]
jonaths/dqn-grid
[ "c7a23a482046ac82042bff7126cb373eb57bec8d" ]
[ "tiny_dqn.py" ]
[ "from __future__ import division, print_function, unicode_literals\n\n# Handle arguments (before slow imports so --help can be fast)\nimport argparse\n\nparser = argparse.ArgumentParser(\n description=\"Train a DQN net to play MsMacman.\")\nparser.add_argument(\"-n\", \"--number-steps\", type=int, default=4000000,\n help=\"total number of training steps\")\nparser.add_argument(\"-l\", \"--learn-iterations\", type=int, default=4,\n help=\"number of game iterations between each training step\")\nparser.add_argument(\"-s\", \"--save-steps\", type=int, default=1000,\n help=\"number of training steps between saving checkpoints\")\nparser.add_argument(\"-c\", \"--copy-steps\", type=int, default=10000,\n help=\"number of training steps between copies of online DQN to target DQN\")\nparser.add_argument(\"-r\", \"--render\", action=\"store_true\", default=False,\n help=\"render the game during training or testing\")\nparser.add_argument(\"-p\", \"--path\", default=\"model/my_dqn.ckpt\",\n help=\"path of the checkpoint file\")\nparser.add_argument(\"-t\", \"--test\", action=\"store_true\", default=False,\n help=\"test (no learning and minimal epsilon)\")\nparser.add_argument(\"-v\", \"--verbosity\", action=\"count\", default=0,\n help=\"increase output verbosity\")\nargs = parser.parse_args()\n\nfrom collections import deque\nimport gym\nimport numpy as np\nimport os\nimport tensorflow as tf\nfrom networks.qnetworks import conv_network\nfrom summaries.summaries import variable_summaries\n\nenv = gym.make(\"MsPacman-v0\")\ndone = True # env needs to be reset\n\n# First let's build the two DQNs (online & target)\ninput_height = 88\ninput_width = 80\ninput_channels = 1\nn_outputs = env.action_space.n # 9 discrete actions are available\n\nX_state = tf.placeholder(tf.float32, shape=[None, input_height, input_width, input_channels])\nonline_q_values, online_vars = conv_network(X_state, n_outputs=n_outputs, name=\"q_networks/online\")\ntarget_q_values, target_vars = conv_network(X_state, n_outputs=n_outputs, name=\"q_networks/target\")\n\n# We need an operation to copy the online DQN to the target DQN\ncopy_ops = [target_var.assign(online_vars[var_name])\n for var_name, target_var in target_vars.items()]\ncopy_online_to_target = tf.group(*copy_ops)\n\n# Now for the training operations\nlearning_rate = 0.001\nmomentum = 0.95\n\nwith tf.variable_scope(\"train\"):\n X_action = tf.placeholder(tf.int32, shape=[None])\n y = tf.placeholder(tf.float32, shape=[None, 1])\n q_value = tf.reduce_sum(online_q_values * tf.one_hot(X_action, n_outputs),\n axis=1, keep_dims=True)\n error = tf.abs(y - q_value)\n clipped_error = tf.clip_by_value(error, 0.0, 1.0)\n linear_error = 2 * (error - clipped_error)\n loss = tf.reduce_mean(tf.square(clipped_error) + linear_error)\n\n global_step = tf.Variable(0, trainable=False, name='global_step')\n optimizer = tf.train.MomentumOptimizer(\n learning_rate, momentum, use_nesterov=True)\n training_op = optimizer.minimize(loss, global_step=global_step)\n\n# agrupa los summaries en el grafo para que no aparezcan por todos lados\nwith tf.name_scope('summaries'):\n variable_summaries(linear_error, 'linear_error')\n variable_summaries(loss, 'loss')\n\n# evita agregar al grafo los summaries uno por uno\nmerged = tf.summary.merge_all()\n\ninit = tf.global_variables_initializer()\nsaver = tf.train.Saver()\n\n# Let's implement a simple replay memory\nreplay_memory_size = 20000\nreplay_memory = deque([], maxlen=replay_memory_size)\n\n\ndef sample_memories(batch_size):\n indices = np.random.permutation(len(replay_memory))[:batch_size]\n cols = [[], [], [], [], []] # state, action, reward, next_state, continue\n for idx in indices:\n memory = replay_memory[idx]\n for col, value in zip(cols, memory):\n col.append(value)\n cols = [np.array(col) for col in cols]\n return (cols[0], cols[1], cols[2].reshape(-1, 1), cols[3],\n cols[4].reshape(-1, 1))\n\n\n# And on to the epsilon-greedy policy with decaying epsilon\neps_min = 0.1\neps_max = 1.0 if not args.test else eps_min\neps_decay_steps = args.number_steps // 2\n\n\ndef epsilon_greedy(q_values, step):\n epsilon = max(eps_min, eps_max - (eps_max - eps_min) * step / eps_decay_steps)\n if np.random.rand() < epsilon:\n return np.random.randint(n_outputs) # random action\n else:\n return np.argmax(q_values) # optimal action\n\n\n# We need to preprocess the images to speed up training\nmspacman_color = np.array([210, 164, 74]).mean()\n\n\ndef preprocess_observation(obs):\n img = obs[1:176:2, ::2] # crop and downsize\n img = img.mean(axis=2) # to greyscale\n img[img == mspacman_color] = 0 # Improve contrast\n img = (img - 128) / 128 - 1 # normalize from -1. to 1.\n return img.reshape(88, 80, 1)\n\n\n# TensorFlow - Execution phase\ntraining_start = 10000 # start training after 10,000 game iterations\ndiscount_rate = 0.99\nskip_start = 90 # Skip the start of every game (it's just waiting time).\nbatch_size = 50\niteration = 0 # game iterations\ndone = True # env needs to be reset\n\n# We will keep track of the max Q-Value over time and compute the mean per game\nloss_val = np.infty\ngame_length = 0\ntotal_max_q = 0\nmean_max_q = 0.0\n\nwith tf.Session() as sess:\n if os.path.isfile(args.path + \".index\"):\n saver.restore(sess, args.path)\n else:\n init.run()\n copy_online_to_target.run()\n\n writer = tf.summary.FileWriter(\"output\", sess.graph)\n\n while True:\n step = global_step.eval()\n if step >= args.number_steps:\n break\n iteration += 1\n if args.verbosity > 0:\n print(\"\\rIteration {} Training step {}/{} ({:.1f})% \"\n \"Loss {:5f} Mean Max-Q {:5f} \".format(\n iteration, step, args.number_steps, step * 100 / args.number_steps,\n loss_val, mean_max_q), end=\"\")\n if done: # game over, start again\n obs = env.reset()\n for skip in range(skip_start): # skip the start of each game\n obs, reward, done, info = env.step(0)\n state = preprocess_observation(obs)\n\n if args.render:\n env.render()\n\n # Online DQN evaluates what to do\n q_values = online_q_values.eval(feed_dict={X_state: [state]})\n action = epsilon_greedy(q_values, step)\n\n # Online DQN plays\n obs, reward, done, info = env.step(action)\n next_state = preprocess_observation(obs)\n\n # Let's memorize what happened\n replay_memory.append((state, action, reward, next_state, 1.0 - done))\n state = next_state\n\n if args.test:\n continue\n\n # Compute statistics for tracking progress (not shown in the book)\n total_max_q += q_values.max()\n game_length += 1\n if done:\n mean_max_q = total_max_q / game_length\n total_max_q = 0.0\n game_length = 0\n\n if iteration < training_start or iteration % args.learn_iterations != 0:\n continue # only train after warmup period and at regular intervals\n\n # Sample memories and use the target DQN to produce the target Q-Value\n X_state_val, X_action_val, rewards, X_next_state_val, continues = (sample_memories(batch_size))\n next_q_values = target_q_values.eval(feed_dict={X_state: X_next_state_val})\n max_next_q_values = np.max(next_q_values, axis=1, keepdims=True)\n y_val = rewards + continues * discount_rate * max_next_q_values\n\n # Train the online DQN\n _, loss_val, summary = sess.run([training_op, loss, merged],\n feed_dict={X_state: X_state_val, X_action: X_action_val, y: y_val})\n\n if step % 10 == 0:\n writer.add_summary(summary, step)\n\n # Regularly copy the online DQN to the target DQN\n if step % args.copy_steps == 0:\n copy_online_to_target.run()\n\n # And save regularly\n if step % args.save_steps == 0:\n saver.save(sess, args.path)\n" ]
[ [ "numpy.random.rand", "tensorflow.group", "tensorflow.clip_by_value", "tensorflow.one_hot", "tensorflow.global_variables_initializer", "numpy.max", "tensorflow.train.Saver", "tensorflow.Variable", "tensorflow.variable_scope", "numpy.random.randint", "numpy.argmax", "tensorflow.abs", "numpy.array", "tensorflow.Session", "tensorflow.placeholder", "tensorflow.name_scope", "tensorflow.summary.merge_all", "tensorflow.train.MomentumOptimizer", "tensorflow.summary.FileWriter", "tensorflow.square" ] ]
gusseppe/pymach
[ "0266d5608702b8c951f4782966fcca8f2b4fe67c" ]
[ "pymach/core/fselect.py" ]
[ "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# Author: Gusseppe Bravo <[email protected]>\n# License: BSD 3 clause\n\"\"\"\nThis module provides a few of useful functions (actually, methods)\nfor feature selection the dataset which is to be studied.\n\n\"\"\"\nfrom __future__ import print_function\nimport numpy as np\nimport pandas as pd\n\nfrom sklearn.feature_selection import SelectKBest\nfrom sklearn.feature_selection import chi2\nfrom sklearn.feature_selection import f_regression\nfrom sklearn.ensemble import ExtraTreesClassifier\nfrom sklearn.ensemble import ExtraTreesRegressor\nfrom sklearn.decomposition import PCA\nfrom sklearn.feature_selection import SelectFromModel\nfrom sklearn.pipeline import Pipeline, FeatureUnion\nfrom sklearn.base import BaseEstimator, TransformerMixin\n\n# __all__ = [\n# 'pipeline']\n\n\nclass Select:\n \"\"\" A class for feature selection \"\"\"\n\n # data = None\n\n def __init__(self, definer):\n self.definer = definer\n self.problem_type = definer.problem_type\n self.infer_algorithm = definer.infer_algorithm\n # self.response = definer.response\n # self.data_path = definer.data_path\n self.n_features = definer.n_features\n\n def pipeline(self):\n \"\"\" This function chooses the best way to find features\"\"\"\n\n transformers = []\n\n custom = self.CustomFeature()\n #transformers.append(('custom', custom))\n n_features = int(self.n_features/2)\n\n #kbest = SelectKBest(score_func=chi2, k=n_features)\n #transformers.append(('kbest', kbest))\n\n # pca = PCA(n_components=n_features, svd_solver='randomized', whiten=True)\n # transformers.append(('pca', pca))\n\n if self.definer.problem_type == 'classification':\n extraTC = SelectFromModel(ExtraTreesClassifier(criterion='entropy'))\n else:\n extraTC = SelectFromModel(ExtraTreesRegressor())\n\n transformers.append(('extraTC', extraTC))\n\n #scaler = StandardScaler()\n #transformers.append(('scaler', scaler))\n #binarizer = Binarizer()\n return FeatureUnion(transformers)\n\n class CustomFeature(TransformerMixin):\n \"\"\" A custome class for featuring \"\"\"\n\n def transform(self, X, **transform_params):\n #X = pd.DataFrame(X)\n return X\n\n def fit(self, X, y=None, **fit_params):\n return self\n" ]
[ [ "sklearn.ensemble.ExtraTreesRegressor", "sklearn.ensemble.ExtraTreesClassifier", "sklearn.pipeline.FeatureUnion" ] ]
TuanNguyen27/numpyro
[ "b4ba9f1dd7e1e2d12ee67bebd4e5872fad69f770" ]
[ "test/contrib/test_tfp.py" ]
[ "# Copyright Contributors to the Pyro project.\n# SPDX-License-Identifier: Apache-2.0\n\nimport inspect\n\nfrom numpy.testing import assert_allclose\nimport pytest\n\nfrom jax import random\nimport jax.numpy as jnp\n\nimport numpyro\nimport numpyro.distributions as dist\nfrom numpyro.infer import MCMC, NUTS\n\n\n# XXX: for some reasons, pytest raises ImportWarning when we import tfp\[email protected](\"ignore:can't resolve package\")\ndef test_api_consistent():\n from numpyro.contrib.tfp import distributions as tfd\n\n for name in tfd.__all__:\n if name in numpyro.distributions.__all__:\n tfp_dist = getattr(tfd, name)\n numpyro_dist = getattr(numpyro.distributions, name)\n if type(numpyro_dist).__name__ == \"function\":\n numpyro_dist = getattr(numpyro.distributions, name + \"Logits\")\n for p in tfp_dist.arg_constraints:\n assert p in dict(inspect.signature(tfp_dist).parameters)\n\n\[email protected](\"ignore:can't resolve package\")\ndef test_independent():\n from numpyro.contrib.tfp import distributions as tfd\n\n d = tfd.Independent(tfd.Normal(jnp.zeros(10), jnp.ones(10)), reinterpreted_batch_ndims=1)\n assert d.event_shape == (10,)\n assert d.batch_shape == ()\n\n\[email protected](\"ignore:can't resolve package\")\ndef test_transformed_distributions():\n from tensorflow_probability.substrates.jax import bijectors as tfb\n from numpyro.contrib.tfp import distributions as tfd\n\n d = dist.TransformedDistribution(dist.Normal(0, 1), dist.transforms.ExpTransform())\n d1 = tfd.TransformedDistribution(tfd.Normal(0, 1), tfb.Exp())\n d2 = dist.TransformedDistribution(dist.Normal(0, 1), tfd.BijectorTransform(tfb.Exp()))\n x = random.normal(random.PRNGKey(0), (1000,))\n d_x = d.log_prob(x).sum()\n d1_x = d1.log_prob(x).sum()\n d2_x = d2.log_prob(x).sum()\n assert_allclose(d_x, d1_x)\n assert_allclose(d_x, d2_x)\n\n\[email protected](\"ignore:can't resolve package\")\ndef test_logistic_regression():\n from numpyro.contrib.tfp import distributions as dist\n\n N, dim = 3000, 3\n num_warmup, num_samples = (1000, 1000)\n data = random.normal(random.PRNGKey(0), (N, dim))\n true_coefs = jnp.arange(1., dim + 1.)\n logits = jnp.sum(true_coefs * data, axis=-1)\n labels = dist.Bernoulli(logits=logits)(rng_key=random.PRNGKey(1))\n\n def model(labels):\n coefs = numpyro.sample('coefs', dist.Normal(jnp.zeros(dim), jnp.ones(dim)))\n logits = numpyro.deterministic('logits', jnp.sum(coefs * data, axis=-1))\n return numpyro.sample('obs', dist.Bernoulli(logits=logits), obs=labels)\n\n kernel = NUTS(model)\n mcmc = MCMC(kernel, num_warmup, num_samples)\n mcmc.run(random.PRNGKey(2), labels)\n mcmc.print_summary()\n samples = mcmc.get_samples()\n assert samples['logits'].shape == (num_samples, N)\n assert_allclose(jnp.mean(samples['coefs'], 0), true_coefs, atol=0.22)\n\n\[email protected](\"ignore:can't resolve package\")\n# TODO: remove after https://github.com/tensorflow/probability/issues/1072 is resolved\[email protected](\"ignore:Explicitly requested dtype\")\ndef test_beta_bernoulli():\n from numpyro.contrib.tfp import distributions as dist\n\n warmup_steps, num_samples = (500, 2000)\n\n def model(data):\n alpha = jnp.array([1.1, 1.1])\n beta = jnp.array([1.1, 1.1])\n p_latent = numpyro.sample('p_latent', dist.Beta(alpha, beta))\n numpyro.sample('obs', dist.Bernoulli(p_latent), obs=data)\n return p_latent\n\n true_probs = jnp.array([0.9, 0.1])\n data = dist.Bernoulli(true_probs)(rng_key=random.PRNGKey(1), sample_shape=(1000, 2))\n kernel = NUTS(model=model, trajectory_length=0.1)\n mcmc = MCMC(kernel, num_warmup=warmup_steps, num_samples=num_samples)\n mcmc.run(random.PRNGKey(2), data)\n mcmc.print_summary()\n samples = mcmc.get_samples()\n assert_allclose(jnp.mean(samples['p_latent'], 0), true_probs, atol=0.05)\n" ]
[ [ "numpy.testing.assert_allclose" ] ]
Photon26/wrs-main_0614
[ "c0d0e38deac9785e9c382305f65f3ac5f221787d" ]
[ "motion/trajectory/trapezoid.py" ]
[ "import math\nimport numpy as np\n\n\nclass TrajTrap(object):\n\n def __init__(self):\n self.fit = self._fit_max_acc\n self.predict = self._predict_max_acc\n\n def _fit_max_acc(self, conf0, spd0, conf1, spd1):\n # print(conf0, conf1)\n # assume max speed and check if it is needed in the given time interval\n self.avg_spd = self._max_spd * (np.sign((conf0 + conf1)) + ((conf0 + conf1) == 0))\n self.acc_begin = np.sign(self.avg_spd - spd0) * self._max_acc\n self.acc_end = np.sign(spd1 - self.avg_spd) * self._max_acc\n self.t_begin = abs(self.avg_spd - spd0) / self._max_acc\n self.t_end = abs(self.avg_spd - spd1) / self._max_acc\n begin_movement = spd0 * self.t_begin + (self.acc_begin * self.t_begin ** 2) / 2\n end_movement = self.avg_spd * self.t_end + (self.acc_end * self.t_end ** 2) / 2\n self.t_middle = (conf1 - conf0 - begin_movement - end_movement) / self.avg_spd\n # print(self.t_middle, self.avg_spd, self._interval_time, self.t_begin, self.t_end)\n # # for those do not need the max speed, only consider acceleration\n # slctn = self.t_begin + self.t_end >= self._interval_time\n # self.avg_spd[slctn] = ((-self.acc_begin * self.acc_end * self._interval_time -\n # spd0 * self.acc_end + spd1 * self.acc_begin) /\n # (self.acc_begin - self.acc_end))[slctn]\n # self.t_begin[slctn] = (abs(self.avg_spd - spd0) / self._max_acc)[slctn]\n # self.t_end[slctn] = (abs(self.avg_spd - spd1) / self._max_acc)[slctn]\n # self.t_middle[slctn] = 0\n # print(self._interval_time, self.t_begin, self.t_end)\n # print(self.t_middle, (self._interval_time - self.t_begin - self.t_end), self.t_begin, self.t_end)\n if np.any(np.logical_and(self.t_middle > self._interval_time - self.t_begin - self.t_end, self.t_middle > 0)):\n # for those need that max speed, check if the max speed is fast enough to finish the given motion\n raise ValueError(\"The required time interval is too short!\")\n # also check if a lower max speed works\n # print(abs(self.t_middle - (self._interval_time - self.t_begin - self.t_end)), self.t_middle)\n init_avg_spd = self.avg_spd\n cnter = np.ones_like(self.avg_spd)\n cnter_last = np.zeros_like(self.avg_spd, dtype=bool)\n while True:\n slctn = np.logical_or(abs(self.t_middle - (self._interval_time - self.t_begin - self.t_end)) > .001,\n self.t_middle < 1e-6)\n if np.any(np.logical_and(abs(self._interval_time - self.t_begin - self.t_end)<1e-6, self.t_middle>0)):\n raise ValueError(\"The required time interval is too short!\")\n # print(slctn)\n if np.any(slctn):\n # print(self.t_middle)\n sign = -np.ones_like(self.avg_spd)\n loc_slctn = np.logical_and(self.t_middle > 1e-6, self._interval_time - self.t_begin - self.t_end > 0)\n loc_slctn = np.logical_and(loc_slctn, self.t_middle > self._interval_time - self.t_begin - self.t_end)\n # print(loc_slctn)\n if np.any(loc_slctn):\n sign[loc_slctn] = 1\n cnter[np.logical_and(loc_slctn, not cnter_last[loc_slctn])] += 1\n cnter_last[loc_slctn] = True\n not_loc_slctn = np.logical_not(loc_slctn)\n cnter[np.logical_and(not_loc_slctn, cnter_last[not_loc_slctn])] += 1\n cnter_last[not_loc_slctn] = False\n self.avg_spd[slctn] += (sign * init_avg_spd / np.exp2(cnter))[slctn]\n # if any(abs(self.t_middle - (self._interval_time - self.t_begin - self.t_end)) < .001):\n # print(self.t_middle, self.t_begin, self.t_end, self.avg_spd)\n self.acc_begin[slctn] = (np.sign(self.avg_spd - spd0) * self._max_acc)[slctn]\n self.acc_end[slctn] = (np.sign(spd1 - self.avg_spd) * self._max_acc)[slctn]\n self.t_begin[slctn] = (abs(self.avg_spd - spd0) / self._max_acc)[slctn]\n self.t_end[slctn] = (abs(self.avg_spd - spd1) / self._max_acc)[slctn]\n begin_movement = spd0 * self.t_begin + (self.acc_begin * self.t_begin ** 2) / 2\n end_movement = self.avg_spd * self.t_end + (self.acc_end * self.t_end ** 2) / 2\n self.t_middle = (conf1 - conf0 - begin_movement - end_movement) / self.avg_spd\n # print(self.t_middle, self.t_begin, self.t_end, self.avg_spd)\n # print(self.t_middle, (self._interval_time - self.t_begin - self.t_end))\n # print(\"xxxx\")\n # print(self.acc_begin)\n # print(self.acc_end)\n # print(self.t_begin)\n # print(self.t_end)\n # print(self.avg_spd)\n # print(self._interval_time)\n # print(slctn)\n else:\n break\n self.conf0 = conf0\n self.conf1 = conf1\n self.spd0 = spd0\n self.spd1 = spd1\n\n def _predict_max_acc(self, step):\n local_interpolated_confs = np.zeros_like(step)\n local_interplated_spds = np.zeros_like(step)\n local_accs = np.zeros_like(step)\n slctn = step <= self.t_begin\n local_interpolated_confs[slctn] = (self.conf0 + self.spd0 * step + (self.acc_begin * step ** 2) / 2)[slctn]\n local_interplated_spds[slctn] = (self.spd0 + self.acc_begin * step)[slctn]\n local_accs[slctn] = self.acc_begin\n slctn = np.logical_and(self.t_begin < step, step <= self.t_begin + self.t_middle)\n t_left = step - self.t_begin\n local_interpolated_confs[slctn] = (self.conf0 + self.spd0 * self.t_begin + (\n self.acc_begin * self.t_begin ** 2) / 2 + self.avg_spd * t_left)[slctn]\n local_interplated_spds[slctn] = (self.avg_spd + local_interplated_spds)[slctn]\n local_accs[slctn] = 0\n slctn = self.t_begin + self.t_middle < step\n t_left = step - self.t_begin - self.t_middle\n local_interpolated_confs[slctn] = (self.conf0 + self.spd0 * self.t_begin + (\n self.acc_begin * self.t_begin ** 2) / 2 + self.avg_spd * self.t_middle + self.avg_spd * t_left + (\n self.acc_end * t_left ** 2) / 2)[slctn]\n local_interplated_spds[slctn] = (self.avg_spd + self.acc_end * t_left)[slctn]\n local_accs[slctn] = self.acc_end\n return local_interpolated_confs, local_interplated_spds, local_accs\n\n def piecewise_interpolation(self, path, control_frequency=.005, interval_time=2.0, max_acc=math.pi / 6,\n max_spd=math.pi * 2):\n \"\"\"\n :param path: a 1d array of configurations\n :param control_frequency: the program will sample time_interval/control_frequency confs\n :param max_acc, max_spd\n :return:\n author: weiwei\n date: 20200328\n \"\"\"\n self._max_acc = max_acc\n self._max_spd = max_spd\n self._interval_time = interval_time\n path = np.array(path)\n passing_conf_list = []\n passing_spd_list = []\n for id, jntconf in enumerate(path[:-1]):\n passing_conf_list.append(jntconf)\n if id == 0:\n passing_spd_list.append(np.zeros_like(jntconf))\n else:\n pre_conf = path[id - 1]\n nxt_conf = path[id + 1]\n pre_avg_spd = (jntconf - pre_conf) / self._interval_time\n nxt_avg_spd = (nxt_conf - jntconf) / self._interval_time\n # set to 0 if signs are different -> reduces overshoot\n zero_id = np.where((np.sign(pre_avg_spd) + np.sign(nxt_avg_spd)) == 0)\n pass_spd = (pre_avg_spd + nxt_avg_spd) / 2.0\n pass_spd[zero_id] = 0.0\n passing_spd_list.append(pass_spd)\n passing_conf_list.append(path[-1])\n passing_spd_list.append(np.zeros_like(path[-1]))\n interpolated_confs = []\n interpolated_spds = []\n interpolated_accs = []\n for id, passing_conf in enumerate(passing_conf_list):\n if id == 0:\n continue\n pre_passing_conf = passing_conf_list[id - 1]\n pre_passing_spd = passing_spd_list[id - 1]\n passing_spd = passing_spd_list[id]\n self.fit(pre_passing_conf, pre_passing_spd, passing_conf, passing_spd)\n samples = np.linspace(0, self._interval_time, math.floor(interval_time / control_frequency))\n local_interpolated_confs, local_interplated_spds, local_accs = self.predict(samples)\n interpolated_confs += local_interpolated_confs.tolist()\n interpolated_spds += local_interplated_spds.tolist()\n interpolated_accs += local_accs.tolist()\n return interpolated_confs, interpolated_spds, interpolated_accs\n\n\nif __name__ == '__main__':\n import matplotlib.pyplot as plt\n\n # y = [[0],[3]]\n # control_frequency = .0051\n # time_interval = 15.0\n # y = [[0],[math.pi*3]]\n y = [[math.pi / 6], [math.pi/2]]\n control_frequency = .005\n interval_time = 3\n traj = TrajTrap()\n interpolated_confs, interpolated_spds, local_accs = traj.piecewise_interpolation(y,\n control_frequency=control_frequency,\n interval_time=interval_time)\n # print(interpolated_spds)\n # interpolated_spds=np.array(interpolated_spds)\n # print(interpolated_confs)\n x = np.linspace(0, interval_time * (len(y) - 1), (len(y) - 1) * math.floor(interval_time / control_frequency))\n fig, axs = plt.subplots(3, figsize=(3.5,4.75))\n fig.tight_layout(pad=.7)\n axs[0].plot(x, interpolated_confs)\n axs[0].plot(range(0, interval_time * (len(y)), interval_time), y, '--o',color='tab:blue')\n axs[1].plot(x, interpolated_spds)\n axs[2].plot(x, local_accs)\n # plt.quiver(x, interpolated_confs, x, interpolated_spds, width=.001)\n # plt.plot(y)\n plt.show()\n" ]
[ [ "numpy.logical_not", "numpy.zeros_like", "numpy.ones_like", "numpy.array", "numpy.exp2", "numpy.logical_and", "matplotlib.pyplot.subplots", "numpy.sign", "numpy.any", "matplotlib.pyplot.show" ] ]
m-szalay/Cirq
[ "1bd083a87fdf49212f347d88f15713e90cc72f8f" ]
[ "cirq/ops/pauli_string_test.py" ]
[ "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport itertools\nimport math\nfrom typing import List, cast\n\nimport numpy as np\nimport pytest\nimport sympy\n\nimport cirq\nimport cirq.testing\n\n\ndef _make_qubits(n):\n return [cirq.NamedQubit('q{}'.format(i)) for i in range(n)]\n\n\ndef _sample_qubit_pauli_maps():\n \"\"\" All combinations of having a Pauli or nothing on 3 qubits.\n Yields 64 qubit pauli maps\n \"\"\"\n qubits = _make_qubits(3)\n paulis_or_none = (None, cirq.X, cirq.Y, cirq.Z)\n for paulis in itertools.product(paulis_or_none, repeat=len(qubits)):\n yield {\n qubit: pauli\n for qubit, pauli in zip(qubits, paulis)\n if pauli is not None\n }\n\n\ndef _small_sample_qubit_pauli_maps():\n \"\"\" A few representative samples of qubit maps.\n\n Only tests 10 combinations of Paulis to speed up testing.\n \"\"\"\n qubits = _make_qubits(3)\n yield {}\n yield {qubits[0]: cirq.X}\n yield {qubits[1]: cirq.X}\n yield {qubits[2]: cirq.X}\n yield {qubits[1]: cirq.Z}\n\n yield {qubits[0]: cirq.Y, qubits[1]: cirq.Z}\n yield {qubits[1]: cirq.Z, qubits[2]: cirq.X}\n yield {qubits[0]: cirq.X, qubits[1]: cirq.X, qubits[2]: cirq.X}\n yield {qubits[0]: cirq.X, qubits[1]: cirq.Y, qubits[2]: cirq.Z}\n yield {qubits[0]: cirq.Z, qubits[1]: cirq.X, qubits[2]: cirq.Y}\n\n\ndef test_eq_ne_hash():\n q0, q1, q2 = _make_qubits(3)\n eq = cirq.testing.EqualsTester()\n eq.make_equality_group(\n lambda: cirq.PauliString(), lambda: cirq.PauliString(qubit_pauli_map={\n }), lambda: cirq.PauliString(qubit_pauli_map={}, coefficient=+1))\n eq.add_equality_group(cirq.PauliString(qubit_pauli_map={}, coefficient=-1))\n for q, pauli in itertools.product((q0, q1), (cirq.X, cirq.Y, cirq.Z)):\n eq.add_equality_group(\n cirq.PauliString(qubit_pauli_map={q: pauli}, coefficient=+1))\n eq.add_equality_group(\n cirq.PauliString(qubit_pauli_map={q: pauli}, coefficient=-1))\n for q, p0, p1 in itertools.product((q0, q1), (cirq.X, cirq.Y, cirq.Z),\n (cirq.X, cirq.Y, cirq.Z)):\n eq.add_equality_group(\n cirq.PauliString(qubit_pauli_map={\n q: p0,\n q2: p1\n }, coefficient=+1))\n\n\ndef test_equal_up_to_coefficient():\n q0, = _make_qubits(1)\n assert cirq.PauliString({}, +1).equal_up_to_coefficient(\n cirq.PauliString({}, +1))\n assert cirq.PauliString({}, -1).equal_up_to_coefficient(\n cirq.PauliString({}, -1))\n assert cirq.PauliString({}, +1).equal_up_to_coefficient(\n cirq.PauliString({}, -1))\n assert cirq.PauliString({}, +1).equal_up_to_coefficient(\n cirq.PauliString({}, 2j))\n\n assert cirq.PauliString({q0: cirq.X}, +1).equal_up_to_coefficient(\n cirq.PauliString({q0: cirq.X}, +1))\n assert cirq.PauliString({q0: cirq.X}, -1).equal_up_to_coefficient(\n cirq.PauliString({q0: cirq.X}, -1))\n assert cirq.PauliString({q0: cirq.X}, +1).equal_up_to_coefficient(\n cirq.PauliString({q0: cirq.X}, -1))\n\n assert not cirq.PauliString({q0: cirq.X}, +1).equal_up_to_coefficient(\n cirq.PauliString({q0: cirq.Y}, +1))\n assert not cirq.PauliString({q0: cirq.X}, +1).equal_up_to_coefficient(\n cirq.PauliString({q0: cirq.Y}, 1j))\n assert not cirq.PauliString({q0: cirq.X}, -1).equal_up_to_coefficient(\n cirq.PauliString({q0: cirq.Y}, -1))\n assert not cirq.PauliString({q0: cirq.X}, +1).equal_up_to_coefficient(\n cirq.PauliString({q0: cirq.Y}, -1))\n\n assert not cirq.PauliString({q0: cirq.X}, +1).equal_up_to_coefficient(\n cirq.PauliString({}, +1))\n assert not cirq.PauliString({q0: cirq.X}, -1).equal_up_to_coefficient(\n cirq.PauliString({}, -1))\n assert not cirq.PauliString({q0: cirq.X}, +1).equal_up_to_coefficient(\n cirq.PauliString({}, -1))\n\n\ndef test_exponentiation_as_exponent():\n a, b = cirq.LineQubit.range(2)\n p = cirq.PauliString({a: cirq.X, b: cirq.Y})\n\n with pytest.raises(NotImplementedError, match='non-Hermitian'):\n _ = math.e**(math.pi * p)\n\n with pytest.raises(TypeError, match='unsupported'):\n _ = 'test'**p\n\n assert cirq.approx_eq(\n math.e**(-0.5j * math.pi * p),\n cirq.PauliStringPhasor(p, exponent_neg=0.5, exponent_pos=-0.5))\n\n assert cirq.approx_eq(\n math.e**(0.25j * math.pi * p),\n cirq.PauliStringPhasor(p, exponent_neg=-0.25, exponent_pos=0.25))\n\n assert cirq.approx_eq(\n 2**(0.25j * math.pi * p),\n cirq.PauliStringPhasor(p,\n exponent_neg=-0.25 * math.log(2),\n exponent_pos=0.25 * math.log(2)))\n\n assert cirq.approx_eq(\n np.exp(0.25j * math.pi * p),\n cirq.PauliStringPhasor(p, exponent_neg=-0.25, exponent_pos=0.25))\n\n\ndef test_exponentiate_single_value_as_exponent():\n q = cirq.LineQubit(0)\n\n assert cirq.approx_eq(math.e**(-0.125j * math.pi * cirq.X(q)),\n cirq.rx(0.25 * math.pi).on(q))\n\n assert cirq.approx_eq(math.e**(-0.125j * math.pi * cirq.Y(q)),\n cirq.ry(0.25 * math.pi).on(q))\n\n assert cirq.approx_eq(math.e**(-0.125j * math.pi * cirq.Z(q)),\n cirq.rz(0.25 * math.pi).on(q))\n\n assert cirq.approx_eq(np.exp(-0.15j * math.pi * cirq.X(q)),\n cirq.rx(0.3 * math.pi).on(q))\n\n assert cirq.approx_eq(cirq.X(q)**0.5, cirq.XPowGate(exponent=0.5).on(q))\n\n assert cirq.approx_eq(cirq.Y(q)**0.5, cirq.YPowGate(exponent=0.5).on(q))\n\n assert cirq.approx_eq(cirq.Z(q)**0.5, cirq.ZPowGate(exponent=0.5).on(q))\n\n\ndef test_exponentiation_as_base():\n a, b = cirq.LineQubit.range(2)\n p = cirq.PauliString({a: cirq.X, b: cirq.Y})\n\n with pytest.raises(TypeError, match='unsupported'):\n _ = (2 * p)**5\n\n with pytest.raises(TypeError, match='unsupported'):\n _ = p**'test'\n\n with pytest.raises(TypeError, match='unsupported'):\n _ = p**1j\n\n assert p**-1 == p\n\n assert cirq.approx_eq(\n p**0.5, cirq.PauliStringPhasor(p, exponent_neg=0.5, exponent_pos=0))\n\n assert cirq.approx_eq(\n p**-0.5, cirq.PauliStringPhasor(p, exponent_neg=-0.5, exponent_pos=0))\n\n assert cirq.approx_eq(\n math.e**(0.25j * math.pi * p),\n cirq.PauliStringPhasor(p, exponent_neg=-0.25, exponent_pos=0.25))\n\n assert cirq.approx_eq(\n 2**(0.25j * math.pi * p),\n cirq.PauliStringPhasor(p,\n exponent_neg=-0.25 * math.log(2),\n exponent_pos=0.25 * math.log(2)))\n\n assert cirq.approx_eq(\n np.exp(0.25j * math.pi * p),\n cirq.PauliStringPhasor(p, exponent_neg=-0.25, exponent_pos=0.25))\n\n np.testing.assert_allclose(\n cirq.unitary(np.exp(0.5j * math.pi * cirq.Z(a))),\n np.diag([np.exp(0.5j * math.pi),\n np.exp(-0.5j * math.pi)]),\n atol=1e-8)\n\n\[email protected]('pauli', (cirq.X, cirq.Y, cirq.Z))\ndef test_list_op_constructor_matches_mapping(pauli):\n q0, = _make_qubits(1)\n op = pauli.on(q0)\n assert cirq.PauliString([op]) == cirq.PauliString({q0: pauli})\n\n\ndef test_constructor_flexibility():\n a, b = cirq.LineQubit.range(2)\n with pytest.raises(TypeError, match='cirq.PAULI_STRING_LIKE'):\n _ = cirq.PauliString(cirq.CZ(a, b))\n with pytest.raises(TypeError, match='cirq.PAULI_STRING_LIKE'):\n _ = cirq.PauliString('test')\n with pytest.raises(TypeError, match='S is not a Pauli'):\n _ = cirq.PauliString(qubit_pauli_map={a: cirq.S})\n\n assert cirq.PauliString(\n cirq.X(a)) == cirq.PauliString(qubit_pauli_map={a: cirq.X})\n assert cirq.PauliString([cirq.X(a)\n ]) == cirq.PauliString(qubit_pauli_map={a: cirq.X})\n assert cirq.PauliString([[[cirq.X(a)]]\n ]) == cirq.PauliString(qubit_pauli_map={a: cirq.X})\n assert cirq.PauliString([[[cirq.I(a)]]]) == cirq.PauliString()\n\n assert cirq.PauliString(1, 2, 3, cirq.X(a), cirq.Y(a)) == cirq.PauliString(\n qubit_pauli_map={a: cirq.Z}, coefficient=6j)\n\n assert cirq.PauliString(cirq.X(a), cirq.X(a)) == cirq.PauliString()\n assert cirq.PauliString(cirq.X(a),\n cirq.X(b)) == cirq.PauliString(qubit_pauli_map={\n a: cirq.X,\n b: cirq.X\n })\n\n assert cirq.PauliString(0) == cirq.PauliString(coefficient=0)\n\n assert cirq.PauliString(1, 2, 3, {a: cirq.X},\n cirq.Y(a)) == cirq.PauliString(\n qubit_pauli_map={a: cirq.Z}, coefficient=6j)\n\n\[email protected]('qubit_pauli_map', _sample_qubit_pauli_maps())\ndef test_getitem(qubit_pauli_map):\n other = cirq.NamedQubit('other')\n pauli_string = cirq.PauliString(qubit_pauli_map=qubit_pauli_map)\n for key in qubit_pauli_map:\n assert qubit_pauli_map[key] == pauli_string[key]\n with pytest.raises(KeyError):\n _ = qubit_pauli_map[other]\n with pytest.raises(KeyError):\n _ = pauli_string[other]\n\n\[email protected]('qubit_pauli_map', _sample_qubit_pauli_maps())\ndef test_get(qubit_pauli_map):\n other = cirq.NamedQubit('other')\n pauli_string = cirq.PauliString(qubit_pauli_map)\n for key in qubit_pauli_map:\n assert qubit_pauli_map.get(key) == pauli_string.get(key)\n assert qubit_pauli_map.get(other) is None\n assert pauli_string.get(other) is None\n # pylint: disable=too-many-function-args\n assert qubit_pauli_map.get(other, 5) == pauli_string.get(other, 5) == 5\n # pylint: enable=too-many-function-args\n\n\[email protected]('qubit_pauli_map', _sample_qubit_pauli_maps())\ndef test_contains(qubit_pauli_map):\n other = cirq.NamedQubit('other')\n pauli_string = cirq.PauliString(qubit_pauli_map)\n for key in qubit_pauli_map:\n assert key in pauli_string\n assert other not in pauli_string\n\n\[email protected]('qubit_pauli_map', _sample_qubit_pauli_maps())\ndef test_basic_functionality(qubit_pauli_map):\n pauli_string = cirq.PauliString(qubit_pauli_map)\n # Test items\n assert len(qubit_pauli_map.items()) == len(pauli_string.items())\n assert set(qubit_pauli_map.items()) == set(pauli_string.items())\n\n # Test values\n assert len(qubit_pauli_map.values()) == len(pauli_string.values())\n assert set(qubit_pauli_map.values()) == set(pauli_string.values())\n\n # Test length\n assert len(qubit_pauli_map) == len(pauli_string)\n\n # Test keys\n assert (len(qubit_pauli_map.keys()) == len(pauli_string.keys()) == len(\n pauli_string.qubits))\n assert (set(qubit_pauli_map.keys()) == set(pauli_string.keys()) == set(\n pauli_string.qubits))\n\n # Test iteration\n assert len(tuple(qubit_pauli_map)) == len(tuple(pauli_string))\n assert set(tuple(qubit_pauli_map)) == set(tuple(pauli_string))\n\n\ndef test_repr():\n q0, q1, q2 = _make_qubits(3)\n pauli_string = cirq.PauliString({q2: cirq.X, q1: cirq.Y, q0: cirq.Z})\n cirq.testing.assert_equivalent_repr(pauli_string)\n cirq.testing.assert_equivalent_repr(-pauli_string)\n cirq.testing.assert_equivalent_repr(1j * pauli_string)\n cirq.testing.assert_equivalent_repr(2 * pauli_string)\n cirq.testing.assert_equivalent_repr(cirq.PauliString())\n\n\ndef test_str():\n q0, q1, q2 = _make_qubits(3)\n pauli_string = cirq.PauliString({q2: cirq.X, q1: cirq.Y, q0: cirq.Z})\n assert str(cirq.PauliString({})) == 'I'\n assert str(-cirq.PauliString({})) == '-I'\n assert str(pauli_string) == 'Z(q0)*Y(q1)*X(q2)'\n assert str(-pauli_string) == '-Z(q0)*Y(q1)*X(q2)'\n assert str(1j*pauli_string) == '1j*Z(q0)*Y(q1)*X(q2)'\n assert str(pauli_string*-1j) == '-1j*Z(q0)*Y(q1)*X(q2)'\n\n\[email protected]('map1,map2,out', (lambda q0, q1, q2: (\n ({}, {}, {}),\n ({q0: cirq.X}, {q0: cirq.Y}, {q0: (cirq.X, cirq.Y)}),\n ({q0: cirq.X}, {q1: cirq.X}, {}),\n ({q0: cirq.Y, q1: cirq.Z}, {q1: cirq.Y, q2: cirq.X},\n {q1: (cirq.Z, cirq.Y)}),\n ({q0: cirq.X, q1: cirq.Y, q2: cirq.Z}, {}, {}),\n ({q0: cirq.X, q1: cirq.Y, q2: cirq.Z}, {q0: cirq.Y, q1: cirq.Z},\n {q0: (cirq.X, cirq.Y), q1: (cirq.Y, cirq.Z)}),\n ))(*_make_qubits(3)))\ndef test_zip_items(map1, map2, out):\n ps1 = cirq.PauliString(map1)\n ps2 = cirq.PauliString(map2)\n out_actual = tuple(ps1.zip_items(ps2))\n assert len(out_actual) == len(out)\n assert dict(out_actual) == out\n\n\[email protected]('map1,map2,out', (lambda q0, q1, q2: (\n ({}, {}, ()),\n ({q0: cirq.X}, {q0: cirq.Y}, ((cirq.X, cirq.Y),)),\n ({q0: cirq.X}, {q1: cirq.X}, ()),\n ({q0: cirq.Y, q1: cirq.Z}, {q1: cirq.Y, q2: cirq.X},\n ((cirq.Z, cirq.Y),)),\n ({q0: cirq.X, q1: cirq.Y, q2: cirq.Z}, {}, ()),\n ({q0: cirq.X, q1: cirq.Y, q2: cirq.Z}, {q0: cirq.Y, q1: cirq.Z},\n # Order not necessary\n ((cirq.X, cirq.Y), (cirq.Y, cirq.Z)))\n ))(*_make_qubits(3)))\ndef test_zip_paulis(map1, map2, out):\n ps1 = cirq.PauliString(map1)\n ps2 = cirq.PauliString(map2)\n out_actual = tuple(ps1.zip_paulis(ps2))\n assert len(out_actual) == len(out)\n if len(out) <= 1:\n assert out_actual == out\n assert set(out_actual) == set(out) # Ignore output order\n\n\ndef test_commutes():\n qubits = _make_qubits(3)\n\n ps1 = cirq.PauliString([cirq.X(qubits[0])])\n with pytest.raises(TypeError):\n cirq.commutes(ps1, 'X')\n assert cirq.commutes(ps1, 'X', default='default') == 'default'\n for A, commutes in [(cirq.X, True), (cirq.Y, False)]:\n assert (cirq.commutes(ps1,\n cirq.PauliString([A(qubits[0])])) == commutes)\n assert cirq.commutes(ps1, cirq.PauliString([A(qubits[1])]))\n\n ps1 = cirq.PauliString(dict(zip(qubits, (cirq.X, cirq.Y))))\n\n for paulis, commutes in {\n (cirq.X, cirq.Y): True,\n (cirq.X, cirq.Z): False,\n (cirq.Y, cirq.X): True,\n (cirq.Y, cirq.Z): True,\n (cirq.X, cirq.Y, cirq.Z): True,\n (cirq.X, cirq.Z, cirq.Z): False,\n (cirq.Y, cirq.X, cirq.Z): True,\n (cirq.Y, cirq.Z, cirq.X): True,\n }.items():\n ps2 = cirq.PauliString(dict(zip(qubits, paulis)))\n assert cirq.commutes(ps1, ps2) == commutes\n\n for paulis, commutes in {\n (cirq.Y, cirq.X): True,\n (cirq.Z, cirq.X): False,\n (cirq.X, cirq.Y): False,\n (cirq.Z, cirq.Y): False,\n }.items():\n ps2 = cirq.PauliString(dict(zip(qubits[1:], paulis)))\n assert cirq.commutes(ps1, ps2) == commutes\n\n\ndef test_negate():\n q0, q1 = _make_qubits(2)\n qubit_pauli_map = {q0: cirq.X, q1: cirq.Y}\n ps1 = cirq.PauliString(qubit_pauli_map)\n ps2 = cirq.PauliString(qubit_pauli_map, -1)\n assert -ps1 == ps2\n assert ps1 == -ps2\n neg_ps1 = -ps1\n assert -neg_ps1 == ps1\n\n\ndef test_mul_scalar():\n a, b = cirq.LineQubit.range(2)\n p = cirq.PauliString({a: cirq.X, b: cirq.Y})\n assert -p == -1 * p == -1.0 * p == p * -1 == p * complex(-1)\n assert -p != 1j * p\n assert +p == 1 * p\n\n assert p * cirq.I(a) == p\n assert cirq.I(a) * p == p\n\n with pytest.raises(TypeError,\n match=\"sequence by non-int of type 'PauliString'\"):\n _ = p * 'test'\n with pytest.raises(TypeError,\n match=\"sequence by non-int of type 'PauliString'\"):\n _ = 'test' * p\n\n\ndef test_div_scalar():\n a, b = cirq.LineQubit.range(2)\n p = cirq.PauliString({a: cirq.X, b: cirq.Y})\n assert -p == p / -1 == p / -1.0 == p / (-1 + 0j)\n assert -p != p / 1j\n assert +p == p / 1\n assert p * 2 == p / 0.5\n with pytest.raises(TypeError):\n _ = p / 'test'\n with pytest.raises(TypeError):\n # noinspection PyUnresolvedReferences\n _ = 'test' / p\n\n\ndef test_mul_strings():\n a, b, c, d = cirq.LineQubit.range(4)\n p1 = cirq.PauliString({a: cirq.X, b: cirq.Y, c: cirq.Z})\n p2 = cirq.PauliString({b: cirq.X, c: cirq.Y, d: cirq.Z})\n assert p1 * p2 == -cirq.PauliString({\n a: cirq.X,\n b: cirq.Z,\n c: cirq.X,\n d: cirq.Z,\n })\n\n assert cirq.X(a) * cirq.PauliString({a: cirq.X}) == cirq.PauliString()\n assert cirq.PauliString({a: cirq.X}) * cirq.X(a) == cirq.PauliString()\n assert cirq.X(a) * cirq.X(a) == cirq.PauliString()\n assert -cirq.X(a) * -cirq.X(a) == cirq.PauliString()\n\n with pytest.raises(TypeError, match='unsupported'):\n _ = cirq.X(a) * object()\n with pytest.raises(TypeError, match='unsupported'):\n # noinspection PyUnresolvedReferences\n _ = object() * cirq.X(a)\n assert -cirq.X(a) == -cirq.PauliString({a: cirq.X})\n\n\ndef test_op_equivalence():\n a, b = cirq.LineQubit.range(2)\n various_x = [\n cirq.X(a),\n cirq.PauliString({a: cirq.X}),\n cirq.PauliString([cirq.X.on(a)]),\n cirq.SingleQubitPauliStringGateOperation(cirq.X, a),\n cirq.GateOperation(cirq.X, [a]),\n ]\n\n for x in various_x:\n cirq.testing.assert_equivalent_repr(x)\n\n eq = cirq.testing.EqualsTester()\n eq.add_equality_group(*various_x)\n eq.add_equality_group(cirq.Y(a), cirq.PauliString({a: cirq.Y}))\n eq.add_equality_group(-cirq.PauliString({a: cirq.X}))\n eq.add_equality_group(cirq.Z(a), cirq.PauliString({a: cirq.Z}))\n eq.add_equality_group(cirq.Z(b), cirq.PauliString({b: cirq.Z}))\n\n\ndef test_op_product():\n a, b = cirq.LineQubit.range(2)\n\n assert cirq.X(a) * cirq.X(b) == cirq.PauliString({a: cirq.X, b: cirq.X})\n assert cirq.X(a) * cirq.Y(b) == cirq.PauliString({a: cirq.X, b: cirq.Y})\n assert cirq.Z(a) * cirq.Y(b) == cirq.PauliString({a: cirq.Z, b: cirq.Y})\n\n assert cirq.X(a) * cirq.X(a) == cirq.PauliString()\n assert cirq.X(a) * cirq.Y(a) == 1j * cirq.PauliString({a: cirq.Z})\n assert cirq.Y(a) * cirq.Z(b) * cirq.X(a) == -1j * cirq.PauliString({\n a: cirq.Z,\n b: cirq.Z\n })\n\n\ndef test_pos():\n q0, q1 = _make_qubits(2)\n qubit_pauli_map = {q0: cirq.X, q1: cirq.Y}\n ps1 = cirq.PauliString(qubit_pauli_map)\n assert ps1 == +ps1\n\n\ndef test_pow():\n a, b = cirq.LineQubit.range(2)\n\n assert cirq.PauliString({a: cirq.X})**0.25 == cirq.X(a)**0.25\n assert cirq.PauliString({a: cirq.Y})**0.25 == cirq.Y(a)**0.25\n assert cirq.PauliString({a: cirq.Z})**0.25 == cirq.Z(a)**0.25\n\n p = cirq.PauliString({a: cirq.X, b: cirq.Y})\n assert p**1 == p\n assert p**-1 == p\n assert (-p)**1 == -p\n assert (-p)**-1 == -p\n assert (1j * p)**1 == 1j * p\n assert (1j * p)**-1 == -1j * p\n\n\ndef test_rpow():\n a, b = cirq.LineQubit.range(2)\n\n u = cirq.unitary(np.exp(1j * np.pi / 2 * cirq.Z(a) * cirq.Z(b)))\n np.testing.assert_allclose(u, np.diag([1j, -1j, -1j, 1j]), atol=1e-8)\n\n u = cirq.unitary(np.exp(-1j * np.pi / 4 * cirq.Z(a) * cirq.Z(b)))\n cirq.testing.assert_allclose_up_to_global_phase(u,\n np.diag([1, 1j, 1j, 1]),\n atol=1e-8)\n\n u = cirq.unitary(np.e**(1j * np.pi * cirq.Z(a) * cirq.Z(b)))\n np.testing.assert_allclose(u, np.diag([-1, -1, -1, -1]), atol=1e-8)\n\n\ndef test_numpy_ufunc():\n with pytest.raises(TypeError, match=\"returned NotImplemented\"):\n _ = np.sin(cirq.PauliString())\n with pytest.raises(NotImplementedError, match=\"non-Hermitian\"):\n _ = np.exp(cirq.PauliString())\n x = np.exp(1j * np.pi * cirq.PauliString())\n assert x is not None\n\n\ndef test_map_qubits():\n a, b = (cirq.NamedQubit(name) for name in 'ab')\n q0, q1 = _make_qubits(2)\n qubit_pauli_map1 = {a: cirq.X, b: cirq.Y}\n qubit_pauli_map2 = {q0: cirq.X, q1: cirq.Y}\n qubit_map = {a: q0, b: q1}\n ps1 = cirq.PauliString(qubit_pauli_map1)\n ps2 = cirq.PauliString(qubit_pauli_map2)\n assert ps1.map_qubits(qubit_map) == ps2\n\n\ndef test_to_z_basis_ops():\n x0 = np.array([1, 1]) / np.sqrt(2)\n x1 = np.array([1, -1]) / np.sqrt(2)\n y0 = np.array([1, 1j]) / np.sqrt(2)\n y1 = np.array([1, -1j]) / np.sqrt(2)\n z0 = np.array([1, 0])\n z1 = np.array([0, 1])\n\n q0, q1, q2, q3, q4, q5 = _make_qubits(6)\n pauli_string = cirq.PauliString({q0: cirq.X, q1: cirq.X,\n q2: cirq.Y, q3: cirq.Y,\n q4: cirq.Z, q5: cirq.Z})\n circuit = cirq.Circuit(pauli_string.to_z_basis_ops())\n\n initial_state = cirq.kron(x0, x1, y0, y1, z0, z1, shape_len=1)\n z_basis_state = circuit.final_state_vector(initial_state)\n\n expected_state = np.zeros(2 ** 6)\n expected_state[0b010101] = 1\n\n cirq.testing.assert_allclose_up_to_global_phase(\n z_basis_state, expected_state, rtol=1e-7, atol=1e-7)\n\n\ndef test_to_z_basis_ops_product_state():\n q0, q1, q2, q3, q4, q5 = _make_qubits(6)\n pauli_string = cirq.PauliString({\n q0: cirq.X,\n q1: cirq.X,\n q2: cirq.Y,\n q3: cirq.Y,\n q4: cirq.Z,\n q5: cirq.Z\n })\n circuit = cirq.Circuit(pauli_string.to_z_basis_ops())\n\n initial_state = cirq.KET_PLUS(q0) * cirq.KET_MINUS(q1) * cirq.KET_IMAG(\n q2) * cirq.KET_MINUS_IMAG(q3) * cirq.KET_ZERO(q4) * cirq.KET_ONE(q5)\n z_basis_state = circuit.final_state_vector(initial_state)\n\n expected_state = np.zeros(2**6)\n expected_state[0b010101] = 1\n\n cirq.testing.assert_allclose_up_to_global_phase(z_basis_state,\n expected_state,\n rtol=1e-7,\n atol=1e-7)\n\n\ndef _assert_pass_over(ops: List[cirq.Operation],\n before: cirq.PauliString,\n after: cirq.PauliString):\n assert before.pass_operations_over(ops[::-1]) == after\n assert after.pass_operations_over(ops, after_to_before=True) == before\n\n\[email protected]('shift,sign',\n itertools.product(range(3), (-1, +1)))\ndef test_pass_operations_over_single(shift: int, sign: int):\n q0, q1 = _make_qubits(2)\n X, Y, Z = (cirq.Pauli.by_relative_index(cast(cirq.Pauli, pauli), shift)\n for pauli in (cirq.X, cirq.Y, cirq.Z))\n\n op0 = cirq.SingleQubitCliffordGate.from_pauli(Y)(q1)\n ps_before: cirq.PauliString[cirq.Qid] = cirq.PauliString({q0: X}, sign)\n ps_after = ps_before\n _assert_pass_over([op0], ps_before, ps_after)\n\n op0 = cirq.SingleQubitCliffordGate.from_pauli(X)(q0)\n op1 = cirq.SingleQubitCliffordGate.from_pauli(Y)(q1)\n ps_before = cirq.PauliString({q0: X, q1: Y}, sign)\n ps_after = ps_before\n _assert_pass_over([op0, op1], ps_before, ps_after)\n\n op0 = cirq.SingleQubitCliffordGate.from_double_map({Z: (X, False),\n X: (Z, False)})(q0)\n ps_before = cirq.PauliString({q0: X, q1: Y}, sign)\n ps_after = cirq.PauliString({q0: Z, q1: Y}, sign)\n _assert_pass_over([op0], ps_before, ps_after)\n\n op1 = cirq.SingleQubitCliffordGate.from_pauli(X)(q1)\n ps_before = cirq.PauliString({q0: X, q1: Y}, sign)\n ps_after = -ps_before\n _assert_pass_over([op1], ps_before, ps_after)\n\n ps_after = cirq.PauliString({q0: Z, q1: Y}, -sign)\n _assert_pass_over([op0, op1], ps_before, ps_after)\n\n op0 = cirq.SingleQubitCliffordGate.from_pauli(Z, True)(q0)\n op1 = cirq.SingleQubitCliffordGate.from_pauli(X, True)(q0)\n ps_before = cirq.PauliString({q0: X}, sign)\n ps_after = cirq.PauliString({q0: Y}, -sign)\n _assert_pass_over([op0, op1], ps_before, ps_after)\n\n\[email protected]('shift,t_or_f1, t_or_f2,neg',\n itertools.product(range(3), *((True, False),)*3))\ndef test_pass_operations_over_double(shift: int, t_or_f1: bool, t_or_f2: bool,\n neg: bool):\n sign = -1 if neg else +1\n q0, q1, q2 = _make_qubits(3)\n X, Y, Z = (cirq.Pauli.by_relative_index(pauli, shift)\n for pauli in (cirq.X, cirq.Y, cirq.Z))\n\n op0 = cirq.PauliInteractionGate(Z, t_or_f1, X, t_or_f2)(q0, q1)\n ps_before = cirq.PauliString(qubit_pauli_map={\n q0: Z,\n q2: Y\n },\n coefficient=sign)\n ps_after = cirq.PauliString(qubit_pauli_map={\n q0: Z,\n q2: Y\n },\n coefficient=sign)\n _assert_pass_over([op0], ps_before, ps_after)\n\n op0 = cirq.PauliInteractionGate(Y, t_or_f1, X, t_or_f2)(q0, q1)\n ps_before = cirq.PauliString({q0: Z, q2: Y}, sign)\n ps_after = cirq.PauliString({q0: Z, q2: Y, q1: X}, sign)\n _assert_pass_over([op0], ps_before, ps_after)\n\n op0 = cirq.PauliInteractionGate(Z, t_or_f1, X, t_or_f2)(q0, q1)\n ps_before = cirq.PauliString({q0: Z, q1: Y}, sign)\n ps_after = cirq.PauliString({q1: Y}, sign)\n _assert_pass_over([op0], ps_before, ps_after)\n\n op0 = cirq.PauliInteractionGate(Y, t_or_f1, X, t_or_f2)(q0, q1)\n ps_before = cirq.PauliString({q0: Z, q1: Y}, sign)\n ps_after = cirq.PauliString({q0: X, q1: Z},\n -1 if neg ^ t_or_f1 ^ t_or_f2 else +1)\n _assert_pass_over([op0], ps_before, ps_after)\n\n op0 = cirq.PauliInteractionGate(X, t_or_f1, X, t_or_f2)(q0, q1)\n ps_before = cirq.PauliString({q0: Z, q1: Y}, sign)\n ps_after = cirq.PauliString({q0: Y, q1: Z},\n +1 if neg ^ t_or_f1 ^ t_or_f2 else -1)\n _assert_pass_over([op0], ps_before, ps_after)\n\n\ndef test_pass_operations_over_cz():\n q0, q1 = _make_qubits(2)\n op0 = cirq.CZ(q0, q1)\n ps_before = cirq.PauliString({q0: cirq.Z, q1: cirq.Y})\n ps_after = cirq.PauliString({q1: cirq.Y})\n _assert_pass_over([op0], ps_before, ps_after)\n\n\ndef test_pass_operations_over_no_common_qubits():\n class DummyGate(cirq.SingleQubitGate):\n pass\n\n q0, q1 = _make_qubits(2)\n op0 = DummyGate()(q1)\n ps_before = cirq.PauliString({q0: cirq.Z})\n ps_after = cirq.PauliString({q0: cirq.Z})\n _assert_pass_over([op0], ps_before, ps_after)\n\n\ndef test_pass_unsupported_operations_over():\n q0, = _make_qubits(1)\n pauli_string = cirq.PauliString({q0: cirq.X})\n with pytest.raises(TypeError, match='not a known Clifford'):\n pauli_string.pass_operations_over([cirq.T(q0)])\n\n\ndef test_with_qubits():\n old_qubits = cirq.LineQubit.range(9)\n new_qubits = cirq.LineQubit.range(9, 18)\n qubit_pauli_map = {q: cirq.Pauli.by_index(q.x) for q in old_qubits}\n pauli_string = cirq.PauliString(qubit_pauli_map, -1)\n new_pauli_string = pauli_string.with_qubits(*new_qubits)\n\n assert new_pauli_string.qubits == tuple(new_qubits)\n for q in new_qubits:\n assert new_pauli_string[q] == cirq.Pauli.by_index(q.x)\n assert new_pauli_string.coefficient == -1\n\n\ndef test_with_coefficient():\n qubits = cirq.LineQubit.range(4)\n qubit_pauli_map = {q: cirq.Pauli.by_index(q.x) for q in qubits}\n pauli_string = cirq.PauliString(qubit_pauli_map, 1.23)\n ps2 = pauli_string.with_coefficient(1.0)\n assert ps2.coefficient == 1.0\n assert ps2.equal_up_to_coefficient(pauli_string)\n assert pauli_string != ps2\n assert pauli_string.coefficient == 1.23\n\n\[email protected]('qubit_pauli_map', _small_sample_qubit_pauli_maps())\ndef test_consistency(qubit_pauli_map):\n pauli_string = cirq.PauliString(qubit_pauli_map)\n cirq.testing.assert_implements_consistent_protocols(pauli_string)\n\n\ndef test_scaled_unitary_consistency():\n a, b = cirq.LineQubit.range(2)\n cirq.testing.assert_implements_consistent_protocols(2 * cirq.X(a) *\n cirq.Y(b))\n cirq.testing.assert_implements_consistent_protocols(1j * cirq.X(a) *\n cirq.Y(b))\n\n\ndef test_bool():\n a = cirq.LineQubit(0)\n assert not bool(cirq.PauliString({}))\n assert bool(cirq.PauliString({a: cirq.X}))\n\n\ndef _pauli_string_matrix_cases():\n q0, q1, q2 = cirq.LineQubit.range(3)\n return (\n (cirq.X(q0) * 2, None, np.array([[0, 2], [2, 0]])),\n (cirq.X(q0) * cirq.Y(q1), (q0,), np.array([[0, 1], [1, 0]])),\n (cirq.X(q0) * cirq.Y(q1), (q1,), np.array([[0, -1j], [1j, 0]])),\n (cirq.X(q0) * cirq.Y(q1), None,\n np.array([[0, 0, 0, -1j], [0, 0, 1j, 0], [0, -1j, 0, 0], [1j, 0, 0,\n 0]])),\n (cirq.X(q0) * cirq.Y(q1), (q0, q1),\n np.array([[0, 0, 0, -1j], [0, 0, 1j, 0], [0, -1j, 0, 0], [1j, 0, 0,\n 0]])),\n (cirq.X(q0) * cirq.Y(q1), (q1, q0),\n np.array([[0, 0, 0, -1j], [0, 0, -1j, 0], [0, 1j, 0, 0], [1j, 0, 0,\n 0]])),\n (cirq.X(q0) * cirq.Y(q1), (q2,), np.eye(2)),\n (cirq.X(q0) * cirq.Y(q1), (q2, q1),\n np.array([[0, -1j, 0, 0], [1j, 0, 0, 0], [0, 0, 0, -1j], [0, 0, 1j,\n 0]])),\n (cirq.X(q0) * cirq.Y(q1), (q2, q0, q1),\n np.array([[0, 0, 0, -1j, 0, 0, 0, 0], [0, 0, 1j, 0, 0, 0, 0, 0],\n [0, -1j, 0, 0, 0, 0, 0, 0], [1j, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, -1j], [0, 0, 0, 0, 0, 0, 1j, 0],\n [0, 0, 0, 0, 0, -1j, 0, 0], [0, 0, 0, 0, 1j, 0, 0, 0]])),\n )\n\n\[email protected]('pauli_string, qubits, expected_matrix',\n _pauli_string_matrix_cases())\ndef test_matrix(pauli_string, qubits, expected_matrix):\n assert np.allclose(pauli_string.matrix(qubits), expected_matrix)\n\n\ndef test_unitary_matrix():\n a, b = cirq.LineQubit.range(2)\n assert not cirq.has_unitary(2 * cirq.X(a) * cirq.Z(b))\n assert cirq.unitary(2 * cirq.X(a) * cirq.Z(b), default=None) is None\n np.testing.assert_allclose(\n cirq.unitary(cirq.X(a) * cirq.Z(b)),\n np.array([\n [0, 0, 1, 0],\n [0, 0, 0, -1],\n [1, 0, 0, 0],\n [0, -1, 0, 0],\n ]))\n np.testing.assert_allclose(\n cirq.unitary(1j * cirq.X(a) * cirq.Z(b)),\n np.array([\n [0, 0, 1j, 0],\n [0, 0, 0, -1j],\n [1j, 0, 0, 0],\n [0, -1j, 0, 0],\n ]))\n\n\ndef test_decompose():\n a, b = cirq.LineQubit.range(2)\n assert cirq.decompose_once(2 * cirq.X(a) * cirq.Z(b), default=None) is None\n assert cirq.decompose_once(1j * cirq.X(a) * cirq.Z(b)) == [\n cirq.GlobalPhaseOperation(1j),\n cirq.X(a), cirq.Z(b)\n ]\n assert cirq.decompose_once(cirq.Y(b) * cirq.Z(a)) == [cirq.Z(a), cirq.Y(b)]\n\n\ndef test_rejects_non_paulis():\n q = cirq.NamedQubit('q')\n with pytest.raises(TypeError):\n _ = cirq.PauliString({q: cirq.S})\n\n\ndef test_cannot_multiply_by_non_paulis():\n q = cirq.NamedQubit('q')\n with pytest.raises(TypeError):\n _ = cirq.X(q) * cirq.Z(q)**0.5\n with pytest.raises(TypeError):\n _ = cirq.Z(q)**0.5 * cirq.X(q)\n with pytest.raises(TypeError):\n _ = cirq.Y(q) * cirq.S(q)\n\n\ndef test_filters_identities():\n q1, q2 = cirq.LineQubit.range(2)\n assert cirq.PauliString({q1: cirq.I, q2: cirq.X}) == \\\n cirq.PauliString({q2: cirq.X})\n\n\ndef test_expectation_from_state_vector_invalid_input():\n q0, q1, q2, q3 = _make_qubits(4)\n ps = cirq.PauliString({q0: cirq.X, q1: cirq.Y})\n wf = np.array([1, 0, 0, 0], dtype=np.complex64)\n q_map = {q0: 0, q1: 1}\n\n im_ps = (1j + 1) * ps\n with pytest.raises(NotImplementedError, match='non-Hermitian'):\n im_ps.expectation_from_state_vector(wf, q_map)\n\n with pytest.raises(TypeError, match='dtype'):\n ps.expectation_from_state_vector(np.array([1, 0], dtype=np.int), q_map)\n\n with pytest.raises(TypeError, match='mapping'):\n # noinspection PyTypeChecker\n ps.expectation_from_state_vector(wf, \"bad type\")\n with pytest.raises(TypeError, match='mapping'):\n # noinspection PyTypeChecker\n ps.expectation_from_state_vector(wf, {\"bad key\": 1})\n with pytest.raises(TypeError, match='mapping'):\n # noinspection PyTypeChecker\n ps.expectation_from_state_vector(wf, {q0: \"bad value\"})\n with pytest.raises(ValueError, match='complete'):\n ps.expectation_from_state_vector(wf, {q0: 0})\n with pytest.raises(ValueError, match='complete'):\n ps.expectation_from_state_vector(wf, {q0: 0, q2: 2})\n with pytest.raises(ValueError, match='indices'):\n ps.expectation_from_state_vector(wf, {q0: -1, q1: 1})\n with pytest.raises(ValueError, match='indices'):\n ps.expectation_from_state_vector(wf, {q0: 0, q1: 3})\n with pytest.raises(ValueError, match='indices'):\n ps.expectation_from_state_vector(wf, {q0: 0, q1: 0})\n # Excess keys are ignored.\n _ = ps.expectation_from_state_vector(wf, {q0: 0, q1: 1, q2: 0})\n\n # Incorrectly shaped state_vector input.\n with pytest.raises(ValueError, match='7'):\n ps.expectation_from_state_vector(np.arange(7, dtype=np.complex64),\n q_map)\n q_map_2 = {q0: 0, q1: 1, q2: 2, q3: 3}\n with pytest.raises(ValueError, match='normalized'):\n ps.expectation_from_state_vector(np.arange(16, dtype=np.complex64),\n q_map_2)\n\n # The ambiguous case: Density matrices satisfying L2 normalization.\n rho_or_wf = 0.5 * np.ones((2, 2), dtype=np.complex64)\n _ = ps.expectation_from_state_vector(rho_or_wf, q_map)\n\n wf = np.arange(16, dtype=np.complex64) / np.linalg.norm(np.arange(16))\n with pytest.raises(ValueError, match='shape'):\n ps.expectation_from_state_vector(wf.reshape((16, 1)), q_map_2)\n with pytest.raises(ValueError, match='shape'):\n ps.expectation_from_state_vector(wf.reshape((4, 4, 1)), q_map_2)\n\n\ndef test_expectation_from_state_vector_check_preconditions():\n q0, q1, q2, q3 = _make_qubits(4)\n ps = cirq.PauliString({q0: cirq.X, q1: cirq.Y})\n q_map = {q0: 0, q1: 1, q2: 2, q3: 3}\n\n with pytest.raises(ValueError, match='normalized'):\n ps.expectation_from_state_vector(np.arange(16, dtype=np.complex64),\n q_map)\n\n _ = ps.expectation_from_state_vector(np.arange(16, dtype=np.complex64),\n q_map,\n check_preconditions=False)\n\n\ndef test_expectation_from_state_vector_basis_states():\n q0 = cirq.LineQubit(0)\n x0 = cirq.PauliString({q0: cirq.X})\n q_map = {q0: 0}\n\n np.testing.assert_allclose(x0.expectation_from_state_vector(\n np.array([1, 0], dtype=np.complex), q_map),\n 0,\n atol=1e-7)\n np.testing.assert_allclose(x0.expectation_from_state_vector(\n np.array([0, 1], dtype=np.complex), q_map),\n 0,\n atol=1e-7)\n np.testing.assert_allclose(x0.expectation_from_state_vector(\n np.array([1, 1], dtype=np.complex) / np.sqrt(2), q_map),\n 1,\n atol=1e-7)\n np.testing.assert_allclose(x0.expectation_from_state_vector(\n np.array([1, -1], dtype=np.complex) / np.sqrt(2), q_map),\n -1,\n atol=1e-7)\n\n y0 = cirq.PauliString({q0: cirq.Y})\n np.testing.assert_allclose(y0.expectation_from_state_vector(\n np.array([1, 1j], dtype=np.complex) / np.sqrt(2), q_map),\n 1,\n atol=1e-7)\n np.testing.assert_allclose(y0.expectation_from_state_vector(\n np.array([1, -1j], dtype=np.complex) / np.sqrt(2), q_map),\n -1,\n atol=1e-7)\n np.testing.assert_allclose(y0.expectation_from_state_vector(\n np.array([1, 1], dtype=np.complex) / np.sqrt(2), q_map),\n 0,\n atol=1e-7)\n np.testing.assert_allclose(y0.expectation_from_state_vector(\n np.array([1, -1], dtype=np.complex) / np.sqrt(2), q_map),\n 0,\n atol=1e-7)\n\n\ndef test_expectation_from_state_vector_entangled_states():\n q0, q1 = _make_qubits(2)\n z0z1_pauli_map = {q0: cirq.Z, q1: cirq.Z}\n z0z1 = cirq.PauliString(z0z1_pauli_map)\n x0x1_pauli_map = {q0: cirq.X, q1: cirq.X}\n x0x1 = cirq.PauliString(x0x1_pauli_map)\n q_map = {q0: 0, q1: 1}\n wf1 = np.array([0, 1, 1, 0], dtype=np.complex) / np.sqrt(2)\n for state in [wf1, wf1.reshape(2, 2)]:\n np.testing.assert_allclose(\n z0z1.expectation_from_state_vector(state, q_map), -1)\n np.testing.assert_allclose(\n x0x1.expectation_from_state_vector(state, q_map), 1)\n\n wf2 = np.array([1, 0, 0, 1], dtype=np.complex) / np.sqrt(2)\n for state in [wf2, wf2.reshape(2, 2)]:\n np.testing.assert_allclose(\n z0z1.expectation_from_state_vector(state, q_map), 1)\n np.testing.assert_allclose(\n x0x1.expectation_from_state_vector(state, q_map), 1)\n\n wf3 = np.array([1, 1, 1, 1], dtype=np.complex) / 2\n for state in [wf3, wf3.reshape(2, 2)]:\n np.testing.assert_allclose(\n z0z1.expectation_from_state_vector(state, q_map), 0)\n np.testing.assert_allclose(\n x0x1.expectation_from_state_vector(state, q_map), 1)\n\n\ndef test_expectation_from_state_vector_qubit_map():\n q0, q1, q2 = _make_qubits(3)\n z = cirq.PauliString({q0: cirq.Z})\n wf = np.array([0, 1, 0, 1, 0, 0, 0, 0], dtype=np.complex) / np.sqrt(2)\n for state in [wf, wf.reshape(2, 2, 2)]:\n np.testing.assert_allclose(\n z.expectation_from_state_vector(state, {\n q0: 0,\n q1: 1,\n q2: 2\n }), 1)\n np.testing.assert_allclose(\n z.expectation_from_state_vector(state, {\n q0: 0,\n q1: 2,\n q2: 1\n }), 1)\n np.testing.assert_allclose(\n z.expectation_from_state_vector(state, {\n q0: 1,\n q1: 0,\n q2: 2\n }), 0)\n np.testing.assert_allclose(\n z.expectation_from_state_vector(state, {\n q0: 1,\n q1: 2,\n q2: 0\n }), 0)\n np.testing.assert_allclose(\n z.expectation_from_state_vector(state, {\n q0: 2,\n q1: 0,\n q2: 1\n }), -1)\n np.testing.assert_allclose(\n z.expectation_from_state_vector(state, {\n q0: 2,\n q1: 1,\n q2: 0\n }), -1)\n\n\ndef test_pauli_string_expectation_from_state_vector_pure_state():\n qubits = cirq.LineQubit.range(4)\n q_map = {q: i for i, q in enumerate(qubits)}\n\n circuit = cirq.Circuit(\n cirq.X(qubits[1]),\n cirq.H(qubits[2]),\n cirq.X(qubits[3]),\n cirq.H(qubits[3]),\n )\n wf = circuit.final_state_vector(qubit_order=qubits)\n\n z0z1 = cirq.PauliString({qubits[0]: cirq.Z, qubits[1]: cirq.Z})\n z0z2 = cirq.PauliString({qubits[0]: cirq.Z, qubits[2]: cirq.Z})\n z0z3 = cirq.PauliString({qubits[0]: cirq.Z, qubits[3]: cirq.Z})\n z0x1 = cirq.PauliString({qubits[0]: cirq.Z, qubits[1]: cirq.X})\n z1x2 = cirq.PauliString({qubits[1]: cirq.Z, qubits[2]: cirq.X})\n x0z1 = cirq.PauliString({qubits[0]: cirq.X, qubits[1]: cirq.Z})\n x3 = cirq.PauliString({qubits[3]: cirq.X})\n\n for state in [wf, wf.reshape((2, 2, 2, 2))]:\n np.testing.assert_allclose(\n z0z1.expectation_from_state_vector(state, q_map), -1)\n np.testing.assert_allclose(\n z0z2.expectation_from_state_vector(state, q_map), 0)\n np.testing.assert_allclose(\n z0z3.expectation_from_state_vector(state, q_map), 0)\n np.testing.assert_allclose(\n z0x1.expectation_from_state_vector(state, q_map), 0)\n np.testing.assert_allclose(\n z1x2.expectation_from_state_vector(state, q_map), -1)\n np.testing.assert_allclose(\n x0z1.expectation_from_state_vector(state, q_map), 0)\n np.testing.assert_allclose(\n x3.expectation_from_state_vector(state, q_map), -1)\n\n\ndef test_pauli_string_expectation_from_state_vector_pure_state_with_coef():\n qs = cirq.LineQubit.range(4)\n q_map = {q: i for i, q in enumerate(qs)}\n\n circuit = cirq.Circuit(\n cirq.X(qs[1]),\n cirq.H(qs[2]),\n cirq.X(qs[3]),\n cirq.H(qs[3]),\n )\n wf = circuit.final_state_vector(qubit_order=qs)\n\n z0z1 = cirq.Z(qs[0]) * cirq.Z(qs[1]) * .123\n z0z2 = cirq.Z(qs[0]) * cirq.Z(qs[2]) * -1\n z1x2 = -cirq.Z(qs[1]) * cirq.X(qs[2])\n\n for state in [wf, wf.reshape((2, 2, 2, 2))]:\n np.testing.assert_allclose(\n z0z1.expectation_from_state_vector(state, q_map), -0.123)\n np.testing.assert_allclose(\n z0z2.expectation_from_state_vector(state, q_map), 0)\n np.testing.assert_allclose(\n z1x2.expectation_from_state_vector(state, q_map), 1)\n\n\ndef test_expectation_from_density_matrix_invalid_input():\n q0, q1, q2, q3 = _make_qubits(4)\n ps = cirq.PauliString({q0: cirq.X, q1: cirq.Y})\n wf = cirq.testing.random_superposition(4)\n rho = np.kron(wf.conjugate().T, wf).reshape(4, 4)\n q_map = {q0: 0, q1: 1}\n\n im_ps = (1j + 1) * ps\n with pytest.raises(NotImplementedError, match='non-Hermitian'):\n im_ps.expectation_from_density_matrix(rho, q_map)\n\n with pytest.raises(TypeError, match='dtype'):\n ps.expectation_from_density_matrix(0.5 * np.eye(2, dtype=np.int), q_map)\n\n with pytest.raises(TypeError, match='mapping'):\n # noinspection PyTypeChecker\n ps.expectation_from_density_matrix(rho, \"bad type\")\n with pytest.raises(TypeError, match='mapping'):\n # noinspection PyTypeChecker\n ps.expectation_from_density_matrix(rho, {\"bad key\": 1})\n with pytest.raises(TypeError, match='mapping'):\n # noinspection PyTypeChecker\n ps.expectation_from_density_matrix(rho, {q0: \"bad value\"})\n with pytest.raises(ValueError, match='complete'):\n ps.expectation_from_density_matrix(rho, {q0: 0})\n with pytest.raises(ValueError, match='complete'):\n ps.expectation_from_density_matrix(rho, {q0: 0, q2: 2})\n with pytest.raises(ValueError, match='indices'):\n ps.expectation_from_density_matrix(rho, {q0: -1, q1: 1})\n with pytest.raises(ValueError, match='indices'):\n ps.expectation_from_density_matrix(rho, {q0: 0, q1: 3})\n with pytest.raises(ValueError, match='indices'):\n ps.expectation_from_density_matrix(rho, {q0: 0, q1: 0})\n # Excess keys are ignored.\n _ = ps.expectation_from_density_matrix(rho, {q0: 0, q1: 1, q2: 0})\n\n with pytest.raises(ValueError, match='hermitian'):\n ps.expectation_from_density_matrix(1j * np.eye(4), q_map)\n with pytest.raises(ValueError, match='trace'):\n ps.expectation_from_density_matrix(np.eye(4, dtype=np.complex64), q_map)\n with pytest.raises(ValueError, match='semidefinite'):\n ps.expectation_from_density_matrix(\n np.array(\n [[1.1, 0, 0, 0], [0, -.1, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],\n dtype=np.complex64), q_map)\n\n # Incorrectly shaped density matrix input.\n with pytest.raises(ValueError, match='shape'):\n ps.expectation_from_density_matrix(np.ones((4, 5), dtype=np.complex64),\n q_map)\n q_map_2 = {q0: 0, q1: 1, q2: 2, q3: 3}\n with pytest.raises(ValueError, match='shape'):\n ps.expectation_from_density_matrix(rho.reshape((4, 4, 1)), q_map_2)\n with pytest.raises(ValueError, match='shape'):\n ps.expectation_from_density_matrix(rho.reshape((-1)), q_map_2)\n\n # Correctly shaped state_vectors.\n with pytest.raises(ValueError, match='shape'):\n ps.expectation_from_density_matrix(np.array([1, 0], dtype=np.complex64),\n q_map)\n with pytest.raises(ValueError, match='shape'):\n ps.expectation_from_density_matrix(wf, q_map)\n\n # The ambiguous cases: state_vectors satisfying trace normalization.\n # This also throws an unrelated warning, which is a bug. See #2041.\n rho_or_wf = 0.25 * np.ones((4, 4), dtype=np.complex64)\n _ = ps.expectation_from_density_matrix(rho_or_wf, q_map)\n\n\ndef test_expectation_from_density_matrix_check_preconditions():\n q0, q1 = _make_qubits(2)\n ps = cirq.PauliString({q0: cirq.X, q1: cirq.Y})\n q_map = {q0: 0, q1: 1}\n\n with pytest.raises(ValueError, match='semidefinite'):\n ps.expectation_from_density_matrix(\n np.array(\n [[1.1, 0, 0, 0], [0, -.1, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],\n dtype=np.complex64), q_map)\n\n _ = ps.expectation_from_density_matrix(np.array(\n [[1.1, 0, 0, 0], [0, -.1, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],\n dtype=np.complex64),\n q_map,\n check_preconditions=False)\n\n\ndef test_expectation_from_density_matrix_basis_states():\n q0 = cirq.LineQubit(0)\n x0_pauli_map = {q0: cirq.X}\n x0 = cirq.PauliString(x0_pauli_map)\n q_map = {q0: 0}\n np.testing.assert_allclose(\n x0.expectation_from_density_matrix(\n np.array([[1, 0], [0, 0]], dtype=np.complex), q_map), 0)\n np.testing.assert_allclose(\n x0.expectation_from_density_matrix(\n np.array([[0, 0], [0, 1]], dtype=np.complex), q_map), 0)\n np.testing.assert_allclose(\n x0.expectation_from_density_matrix(\n np.array([[1, 1], [1, 1]], dtype=np.complex) / 2, q_map), 1)\n np.testing.assert_allclose(\n x0.expectation_from_density_matrix(\n np.array([[1, -1], [-1, 1]], dtype=np.complex) / 2, q_map), -1)\n\n\ndef test_expectation_from_density_matrix_entangled_states():\n q0, q1 = _make_qubits(2)\n z0z1_pauli_map = {q0: cirq.Z, q1: cirq.Z}\n z0z1 = cirq.PauliString(z0z1_pauli_map)\n x0x1_pauli_map = {q0: cirq.X, q1: cirq.X}\n x0x1 = cirq.PauliString(x0x1_pauli_map)\n q_map = {q0: 0, q1: 1}\n\n wf1 = np.array([0, 1, 1, 0], dtype=np.complex) / np.sqrt(2)\n rho1 = np.kron(wf1, wf1).reshape(4, 4)\n for state in [rho1, rho1.reshape(2, 2, 2, 2)]:\n np.testing.assert_allclose(\n z0z1.expectation_from_density_matrix(state, q_map), -1)\n np.testing.assert_allclose(\n x0x1.expectation_from_density_matrix(state, q_map), 1)\n\n wf2 = np.array([1, 0, 0, 1], dtype=np.complex) / np.sqrt(2)\n rho2 = np.kron(wf2, wf2).reshape(4, 4)\n for state in [rho2, rho2.reshape(2, 2, 2, 2)]:\n np.testing.assert_allclose(\n z0z1.expectation_from_density_matrix(state, q_map), 1)\n np.testing.assert_allclose(\n x0x1.expectation_from_density_matrix(state, q_map), 1)\n\n wf3 = np.array([1, 1, 1, 1], dtype=np.complex) / 2\n rho3 = np.kron(wf3, wf3).reshape(4, 4)\n for state in [rho3, rho3.reshape(2, 2, 2, 2)]:\n np.testing.assert_allclose(\n z0z1.expectation_from_density_matrix(state, q_map), 0)\n np.testing.assert_allclose(\n x0x1.expectation_from_density_matrix(state, q_map), 1)\n\n\ndef test_expectation_from_density_matrix_qubit_map():\n q0, q1, q2 = _make_qubits(3)\n z = cirq.PauliString({q0: cirq.Z})\n wf = np.array([0, 1, 0, 1, 0, 0, 0, 0], dtype=np.complex) / np.sqrt(2)\n rho = np.kron(wf, wf).reshape(8, 8)\n\n for state in [rho, rho.reshape(2, 2, 2, 2, 2, 2)]:\n np.testing.assert_allclose(\n z.expectation_from_density_matrix(state, {\n q0: 0,\n q1: 1,\n q2: 2\n }), 1)\n np.testing.assert_allclose(\n z.expectation_from_density_matrix(state, {\n q0: 0,\n q1: 2,\n q2: 1\n }), 1)\n np.testing.assert_allclose(\n z.expectation_from_density_matrix(state, {\n q0: 1,\n q1: 0,\n q2: 2\n }), 0)\n np.testing.assert_allclose(\n z.expectation_from_density_matrix(state, {\n q0: 1,\n q1: 2,\n q2: 0\n }), 0)\n np.testing.assert_allclose(\n z.expectation_from_density_matrix(state, {\n q0: 2,\n q1: 0,\n q2: 1\n }), -1)\n np.testing.assert_allclose(\n z.expectation_from_density_matrix(state, {\n q0: 2,\n q1: 1,\n q2: 0\n }), -1)\n\n\ndef test_pauli_string_expectation_from_density_matrix_pure_state():\n qubits = cirq.LineQubit.range(4)\n q_map = {q: i for i, q in enumerate(qubits)}\n\n circuit = cirq.Circuit(\n cirq.X(qubits[1]),\n cirq.H(qubits[2]),\n cirq.X(qubits[3]),\n cirq.H(qubits[3]),\n )\n state_vector = circuit.final_state_vector(qubit_order=qubits)\n rho = np.outer(state_vector, np.conj(state_vector))\n\n z0z1 = cirq.PauliString({qubits[0]: cirq.Z, qubits[1]: cirq.Z})\n z0z2 = cirq.PauliString({qubits[0]: cirq.Z, qubits[2]: cirq.Z})\n z0z3 = cirq.PauliString({qubits[0]: cirq.Z, qubits[3]: cirq.Z})\n z0x1 = cirq.PauliString({qubits[0]: cirq.Z, qubits[1]: cirq.X})\n z1x2 = cirq.PauliString({qubits[1]: cirq.Z, qubits[2]: cirq.X})\n x0z1 = cirq.PauliString({qubits[0]: cirq.X, qubits[1]: cirq.Z})\n x3 = cirq.PauliString({qubits[3]: cirq.X})\n\n for state in [rho, rho.reshape((2, 2, 2, 2, 2, 2, 2, 2))]:\n np.testing.assert_allclose(\n z0z1.expectation_from_density_matrix(state, q_map), -1)\n np.testing.assert_allclose(\n z0z2.expectation_from_density_matrix(state, q_map), 0)\n np.testing.assert_allclose(\n z0z3.expectation_from_density_matrix(state, q_map), 0)\n np.testing.assert_allclose(\n z0x1.expectation_from_density_matrix(state, q_map), 0)\n np.testing.assert_allclose(\n z1x2.expectation_from_density_matrix(state, q_map), -1)\n np.testing.assert_allclose(\n x0z1.expectation_from_density_matrix(state, q_map), 0)\n np.testing.assert_allclose(\n x3.expectation_from_density_matrix(state, q_map), -1)\n\n\ndef test_pauli_string_expectation_from_density_matrix_pure_state_with_coef():\n qs = cirq.LineQubit.range(4)\n q_map = {q: i for i, q in enumerate(qs)}\n\n circuit = cirq.Circuit(\n cirq.X(qs[1]),\n cirq.H(qs[2]),\n cirq.X(qs[3]),\n cirq.H(qs[3]),\n )\n state_vector = circuit.final_state_vector(qubit_order=qs)\n rho = np.outer(state_vector, np.conj(state_vector))\n\n z0z1 = cirq.Z(qs[0]) * cirq.Z(qs[1]) * .123\n z0z2 = cirq.Z(qs[0]) * cirq.Z(qs[2]) * -1\n z1x2 = -cirq.Z(qs[1]) * cirq.X(qs[2])\n\n for state in [rho, rho.reshape(2, 2, 2, 2, 2, 2, 2, 2)]:\n np.testing.assert_allclose(\n z0z1.expectation_from_density_matrix(state, q_map), -0.123)\n np.testing.assert_allclose(\n z0z2.expectation_from_density_matrix(state, q_map), 0)\n np.testing.assert_allclose(\n z1x2.expectation_from_density_matrix(state, q_map), 1)\n\n\ndef test_pauli_string_expectation_from_state_vector_mixed_state_linearity():\n n_qubits = 6\n\n state_vector1 = cirq.testing.random_superposition(2**n_qubits)\n state_vector2 = cirq.testing.random_superposition(2**n_qubits)\n rho1 = np.outer(state_vector1, np.conj(state_vector1))\n rho2 = np.outer(state_vector2, np.conj(state_vector2))\n density_matrix = rho1 / 2 + rho2 / 2\n\n qubits = cirq.LineQubit.range(n_qubits)\n q_map = {q: i for i, q in enumerate(qubits)}\n paulis = [cirq.X, cirq.Y, cirq.Z]\n pauli_string = cirq.PauliString(\n {q: np.random.choice(paulis) for q in qubits})\n\n a = pauli_string.expectation_from_state_vector(state_vector1, q_map)\n b = pauli_string.expectation_from_state_vector(state_vector2, q_map)\n c = pauli_string.expectation_from_density_matrix(density_matrix, q_map)\n np.testing.assert_allclose(0.5 * (a + b), c)\n\n\ndef test_conjugated_by_normal_gates():\n a = cirq.LineQubit(0)\n\n assert cirq.X(a).conjugated_by(cirq.H(a)) == cirq.Z(a)\n assert cirq.Y(a).conjugated_by(cirq.H(a)) == -cirq.Y(a)\n assert cirq.Z(a).conjugated_by(cirq.H(a)) == cirq.X(a)\n\n assert cirq.X(a).conjugated_by(cirq.S(a)) == -cirq.Y(a)\n assert cirq.Y(a).conjugated_by(cirq.S(a)) == cirq.X(a)\n assert cirq.Z(a).conjugated_by(cirq.S(a)) == cirq.Z(a)\n\n\ndef test_dense():\n a, b, c, d, e = cirq.LineQubit.range(5)\n p = cirq.PauliString([cirq.X(a), cirq.Y(b), cirq.Z(c)])\n assert p.dense([a, b, c, d]) == cirq.DensePauliString('XYZI')\n assert p.dense([d, e, a, b, c]) == cirq.DensePauliString('IIXYZ')\n assert -p.dense([a, b, c, d]) == -cirq.DensePauliString('XYZI')\n\n with pytest.raises(ValueError, match=r'not self.keys\\(\\) <= set\\(qubits\\)'):\n _ = p.dense([a, b])\n with pytest.raises(ValueError, match=r'not self.keys\\(\\) <= set\\(qubits\\)'):\n _ = p.dense([a, b, d])\n\n\ndef test_conjugated_by_incorrectly_powered_cliffords():\n a, b = cirq.LineQubit.range(2)\n p = cirq.PauliString([cirq.X(a), cirq.Z(b)])\n cliffords = [\n cirq.H(a),\n cirq.X(a),\n cirq.Y(a),\n cirq.Z(a),\n cirq.H(a),\n cirq.CNOT(a, b),\n cirq.CZ(a, b),\n cirq.SWAP(a, b),\n cirq.ISWAP(a, b),\n cirq.XX(a, b),\n cirq.YY(a, b),\n cirq.ZZ(a, b),\n ]\n for c in cliffords:\n with pytest.raises(TypeError, match='not a known Clifford'):\n _ = p.conjugated_by(c**0.1)\n with pytest.raises(TypeError, match='not a known Clifford'):\n _ = p.conjugated_by(c**sympy.Symbol('t'))\n\n\ndef test_conjugated_by_global_phase():\n a = cirq.LineQubit(0)\n assert cirq.X(a).conjugated_by(cirq.GlobalPhaseOperation(1j)) == cirq.X(a)\n assert cirq.Z(a).conjugated_by(cirq.GlobalPhaseOperation(\n np.exp(1.1j))) == cirq.Z(a)\n\n class DecomposeGlobal(cirq.Gate):\n\n def num_qubits(self):\n return 1\n\n def _decompose_(self, qubits):\n yield cirq.GlobalPhaseOperation(1j)\n\n assert cirq.X(a).conjugated_by(DecomposeGlobal().on(a)) == cirq.X(a)\n\n\ndef test_conjugated_by_composite_with_disjoint_sub_gates():\n a, b = cirq.LineQubit.range(2)\n\n class DecomposeDisjoint(cirq.Gate):\n\n def num_qubits(self):\n return 2\n\n def _decompose_(self, qubits):\n yield cirq.H(qubits[1])\n\n assert cirq.X(a).conjugated_by(DecomposeDisjoint().on(a, b)) == cirq.X(a)\n assert cirq.X(a).pass_operations_over([DecomposeDisjoint().on(a, b)\n ]) == cirq.X(a)\n\n\ndef test_conjugated_by_clifford_composite():\n\n class UnknownGate(cirq.Gate):\n\n def num_qubits(self) -> int:\n return 4\n\n def _decompose_(self, qubits):\n # Involved.\n yield cirq.SWAP(qubits[0], qubits[1])\n # Uninvolved.\n yield cirq.SWAP(qubits[2], qubits[3])\n\n a, b, c, d = cirq.LineQubit.range(4)\n p = cirq.X(a) * cirq.Z(b)\n u = UnknownGate()\n assert p.conjugated_by(u(a, b, c, d)) == cirq.Z(a) * cirq.X(b)\n\n\ndef test_conjugated_by_move_into_uninvolved():\n a, b, c, d = cirq.LineQubit.range(4)\n p = cirq.X(a) * cirq.Z(b)\n assert p.conjugated_by([\n cirq.SWAP(c, d),\n cirq.SWAP(b, c),\n ]) == cirq.X(a) * cirq.Z(d)\n assert p.conjugated_by([\n cirq.SWAP(b, c),\n cirq.SWAP(c, d),\n ]) == cirq.X(a) * cirq.Z(c)\n\n\ndef test_conjugated_by_common_single_qubit_gates():\n a, b = cirq.LineQubit.range(2)\n\n base_single_qubit_gates = [\n cirq.I,\n cirq.X,\n cirq.Y,\n cirq.Z,\n cirq.X**-0.5,\n cirq.Y**-0.5,\n cirq.Z**-0.5,\n cirq.X**0.5,\n cirq.Y**0.5,\n cirq.Z**0.5,\n cirq.H,\n ]\n single_qubit_gates = [\n g**i for i in range(4) for g in base_single_qubit_gates\n ]\n for p in [cirq.X, cirq.Y, cirq.Z]:\n for g in single_qubit_gates:\n assert p.on(a).conjugated_by(g.on(b)) == p.on(a)\n\n actual = cirq.unitary(p.on(a).conjugated_by(g.on(a)))\n u = cirq.unitary(g)\n expected = (np.conj(u.T) @ cirq.unitary(p) @ u)\n assert cirq.allclose_up_to_global_phase(actual, expected, atol=1e-8)\n\n\ndef test_conjugated_by_common_two_qubit_gates():\n\n class OrderSensitiveGate(cirq.Gate):\n\n def num_qubits(self):\n return 2\n\n def _decompose_(self, qubits):\n return [cirq.Y(qubits[0])**-0.5, cirq.CNOT(*qubits)]\n\n a, b, c, d = cirq.LineQubit.range(4)\n two_qubit_gates = [\n cirq.CNOT,\n cirq.CZ,\n cirq.ISWAP,\n cirq.ISWAP**-1,\n cirq.SWAP,\n cirq.XX**0.5,\n cirq.YY**0.5,\n cirq.ZZ**0.5,\n cirq.XX,\n cirq.YY,\n cirq.ZZ,\n cirq.XX**-0.5,\n cirq.YY**-0.5,\n cirq.ZZ**-0.5,\n ]\n two_qubit_gates.extend([\n OrderSensitiveGate(),\n ])\n for p1 in [cirq.I, cirq.X, cirq.Y, cirq.Z]:\n for p2 in [cirq.I, cirq.X, cirq.Y, cirq.Z]:\n pd = cirq.DensePauliString([p1, p2])\n p = pd.sparse()\n for g in two_qubit_gates:\n assert p.conjugated_by(g.on(c, d)) == p\n\n actual = cirq.unitary(p.conjugated_by(g.on(a, b)).dense([a, b]))\n u = cirq.unitary(g)\n expected = (np.conj(u.T) @ cirq.unitary(pd) @ u)\n np.testing.assert_allclose(actual, expected, atol=1e-8)\n\n\ndef test_conjugated_by_ordering():\n\n class OrderSensitiveGate(cirq.Gate):\n\n def num_qubits(self):\n return 2\n\n def _decompose_(self, qubits):\n return [cirq.Y(qubits[0])**-0.5, cirq.CNOT(*qubits)]\n\n a, b = cirq.LineQubit.range(2)\n inp = cirq.Z(b)\n out1 = inp.conjugated_by(OrderSensitiveGate().on(a, b))\n out2 = inp.conjugated_by([cirq.H(a), cirq.CNOT(a, b)])\n out3 = inp.conjugated_by(cirq.CNOT(a, b)).conjugated_by(cirq.H(a))\n assert out1 == out2 == out3 == cirq.X(a) * cirq.Z(b)\n\n\ndef test_pass_operations_over_ordering():\n\n class OrderSensitiveGate(cirq.Gate):\n\n def num_qubits(self):\n return 2\n\n def _decompose_(self, qubits):\n return [cirq.Y(qubits[0])**-0.5, cirq.CNOT(*qubits)]\n\n a, b = cirq.LineQubit.range(2)\n inp = cirq.Z(b)\n out1 = inp.pass_operations_over([OrderSensitiveGate().on(a, b)])\n out2 = inp.pass_operations_over([cirq.CNOT(a, b), cirq.Y(a)**-0.5])\n out3 = inp.pass_operations_over([cirq.CNOT(a, b)\n ]).pass_operations_over([cirq.Y(a)**-0.5])\n assert out1 == out2 == out3 == cirq.X(a) * cirq.Z(b)\n\n\ndef test_pass_operations_over_ordering_reversed():\n\n class OrderSensitiveGate(cirq.Gate):\n\n def num_qubits(self):\n return 2\n\n def _decompose_(self, qubits):\n return [cirq.Y(qubits[0])**-0.5, cirq.CNOT(*qubits)]\n\n a, b = cirq.LineQubit.range(2)\n inp = cirq.X(a) * cirq.Z(b)\n out1 = inp.pass_operations_over([OrderSensitiveGate().on(a, b)],\n after_to_before=True)\n out2 = inp.pass_operations_over(\n [cirq.Y(a)**-0.5, cirq.CNOT(a, b)], after_to_before=True)\n out3 = inp.pass_operations_over([cirq.Y(a)**-0.5],\n after_to_before=True).pass_operations_over(\n [cirq.CNOT(a, b)], after_to_before=True)\n assert out1 == out2 == out3 == cirq.Z(b)\n\n\ndef test_pretty_print():\n a, b, c = cirq.LineQubit.range(3)\n result = cirq.PauliString({a: 'x', b: 'y', c: 'z'})\n\n # Test Jupyter console output from\n class FakePrinter:\n\n def __init__(self):\n self.text_pretty = ''\n\n def text(self, to_print):\n self.text_pretty += to_print\n\n p = FakePrinter()\n result._repr_pretty_(p, False)\n assert p.text_pretty == 'X(0)*Y(1)*Z(2)'\n\n # Test cycle handling\n p = FakePrinter()\n result._repr_pretty_(p, True)\n assert p.text_pretty == 'cirq.PauliString(...)'\n\n\ndef test_deprecated():\n a = cirq.LineQubit(0)\n state_vector = np.array([1, 1], dtype=np.complex64) / np.sqrt(2)\n with cirq.testing.assert_logs('expectation_from_wavefunction',\n 'expectation_from_state_vector',\n 'deprecated'):\n _ = cirq.PauliString({\n a: 'x'\n }).expectation_from_wavefunction(state_vector, {a: 0})\n\n with cirq.testing.assert_logs('state', 'state_vector', 'deprecated'):\n # pylint: disable=unexpected-keyword-arg,no-value-for-parameter\n _ = cirq.PauliString({\n a: 'x'\n }).expectation_from_state_vector(state=state_vector, qubit_map={a: 0})\n\n\n# pylint: disable=line-too-long\ndef test_circuit_diagram_info():\n a, b, c = cirq.LineQubit.range(3)\n\n assert cirq.circuit_diagram_info(cirq.PauliString(), default=None) is None\n\n cirq.testing.assert_has_diagram(\n cirq.Circuit(\n cirq.PauliString({a: cirq.X}),\n -cirq.PauliString({a: cirq.X}),\n cirq.X(a) * cirq.Z(c),\n 1j * cirq.X(a) * cirq.Y(b),\n -1j * cirq.Y(b),\n 1j**0.5 * cirq.X(a) * cirq.Y(b),\n ),\n \"\"\"\n0: ───PauliString(+X)───PauliString(-X)───PauliString(+X)───PauliString(iX)──────────────────────PauliString((0.707+0.707i)*X)───\n │ │ │\n1: ───────────────────────────────────────┼─────────────────Y─────────────────PauliString(-iY)───Y───────────────────────────────\n │\n2: ───────────────────────────────────────Z──────────────────────────────────────────────────────────────────────────────────────\n \"\"\",\n )\n\n\n# pylint: enable=line-too-long\n\n\ndef test_mutable_pauli_string_equality():\n eq = cirq.testing.EqualsTester()\n a, b, c = cirq.LineQubit.range(3)\n\n eq.add_equality_group(\n cirq.MutablePauliString(),\n cirq.MutablePauliString(),\n cirq.MutablePauliString(1),\n cirq.MutablePauliString(-1, -1),\n cirq.MutablePauliString({a: 0}),\n cirq.MutablePauliString({a: \"I\"}),\n cirq.MutablePauliString({a: cirq.I}),\n cirq.MutablePauliString(cirq.I(a)),\n cirq.MutablePauliString(cirq.I(b)),\n )\n\n eq.add_equality_group(\n cirq.MutablePauliString({a: \"X\"}),\n cirq.MutablePauliString({a: 1}),\n cirq.MutablePauliString({a: cirq.X}),\n cirq.MutablePauliString(cirq.X(a)),\n )\n\n eq.add_equality_group(\n cirq.MutablePauliString({b: \"X\"}),\n cirq.MutablePauliString({b: 1}),\n cirq.MutablePauliString({b: cirq.X}),\n cirq.MutablePauliString(cirq.X(b)),\n cirq.MutablePauliString(-1j, cirq.Y(b), cirq.Z(b)),\n )\n\n eq.add_equality_group(\n cirq.MutablePauliString({\n a: \"X\",\n b: \"Y\",\n c: \"Z\"\n }),\n cirq.MutablePauliString({\n a: 1,\n b: 2,\n c: 3\n }),\n cirq.MutablePauliString({\n a: cirq.X,\n b: cirq.Y,\n c: cirq.Z\n }),\n cirq.MutablePauliString(cirq.X(a) * cirq.Y(b) * cirq.Z(c)),\n cirq.MutablePauliString(\n cirq.MutablePauliString(cirq.X(a) * cirq.Y(b) * cirq.Z(c))),\n cirq.MutablePauliString(\n cirq.MutablePauliString(cirq.X(a), cirq.Y(b), cirq.Z(c))),\n )\n\n # Cross-type equality. (Can't use tester because hashability differs.)\n p = cirq.X(a) * cirq.Y(b)\n assert p == cirq.MutablePauliString(p)\n\n with pytest.raises(TypeError, match=\"cirq.PAULI_STRING_LIKE\"):\n _ = cirq.MutablePauliString(\"test\")\n with pytest.raises(TypeError, match=\"cirq.PAULI_STRING_LIKE\"):\n # noinspection PyTypeChecker\n _ = cirq.MutablePauliString(object())\n\n\ndef test_mutable_pauli_string_inplace_multiplication():\n a, b, c = cirq.LineQubit.range(3)\n p = cirq.MutablePauliString()\n original = p\n\n # Support for *=.\n p *= cirq.X(a)\n assert p == cirq.X(a) and p is original\n\n # Bad operand.\n with pytest.raises(TypeError, match=\"cirq.PAULI_STRING_LIKE\"):\n p.inplace_left_multiply_by([cirq.X(a), cirq.CZ(a, b), cirq.Z(b)])\n with pytest.raises(TypeError, match=\"cirq.PAULI_STRING_LIKE\"):\n p.inplace_left_multiply_by(cirq.CZ(a, b))\n with pytest.raises(TypeError, match=\"cirq.PAULI_STRING_LIKE\"):\n p.inplace_right_multiply_by([cirq.X(a), cirq.CZ(a, b), cirq.Z(b)])\n with pytest.raises(TypeError, match=\"cirq.PAULI_STRING_LIKE\"):\n p.inplace_right_multiply_by(cirq.CZ(a, b))\n assert p == cirq.X(a) and p is original\n\n # Correct order of *=.\n p *= cirq.Y(a)\n assert p == -1j * cirq.Z(a) and p is original\n p *= cirq.Y(a)\n assert p == cirq.X(a) and p is original\n\n # Correct order of inplace_left_multiply_by.\n p.inplace_left_multiply_by(cirq.Y(a))\n assert p == 1j * cirq.Z(a) and p is original\n p.inplace_left_multiply_by(cirq.Y(a))\n assert p == cirq.X(a) and p is original\n\n # Correct order of inplace_right_multiply_by.\n p.inplace_right_multiply_by(cirq.Y(a))\n assert p == -1j * cirq.Z(a) and p is original\n p.inplace_right_multiply_by(cirq.Y(a))\n assert p == cirq.X(a) and p is original\n\n # Multi-qubit case.\n p *= -1 * cirq.X(a) * cirq.X(b)\n assert p == -cirq.X(b) and p is original\n\n # Support for PAULI_STRING_LIKE\n p.inplace_left_multiply_by({c: 'Z'})\n assert p == -cirq.X(b) * cirq.Z(c) and p is original\n p.inplace_right_multiply_by({c: 'Z'})\n assert p == -cirq.X(b) and p is original\n\n\ndef test_mutable_frozen_copy():\n a, b, c = cirq.LineQubit.range(3)\n p = -cirq.X(a) * cirq.Y(b) * cirq.Z(c)\n\n pf = p.frozen()\n pm = p.mutable_copy()\n pmm = pm.mutable_copy()\n pmf = pm.frozen()\n\n assert isinstance(p, cirq.PauliString)\n assert isinstance(pf, cirq.PauliString)\n assert isinstance(pm, cirq.MutablePauliString)\n assert isinstance(pmm, cirq.MutablePauliString)\n assert isinstance(pmf, cirq.PauliString)\n\n assert p is pf\n assert pm is not pmm\n assert p == pf == pm == pmm == pmf\n\n\ndef test_mutable_pauli_string_inplace_conjugate_by():\n a, b, c = cirq.LineQubit.range(3)\n p = cirq.MutablePauliString(cirq.X(a))\n\n class NoOp(cirq.Operation):\n\n def __init__(self, *qubits):\n self._qubits = qubits\n\n @property\n def qubits(self):\n # coverage: ignore\n return self._qubits\n\n def with_qubits(self, *new_qubits):\n raise NotImplementedError()\n\n def _decompose_(self):\n return []\n\n # No-ops\n p2 = p.inplace_after(cirq.GlobalPhaseOperation(1j))\n assert p2 is p and p == cirq.X(a)\n p2 = p.inplace_after(NoOp(a, b))\n assert p2 is p and p == cirq.X(a)\n\n # After H and back.\n p2 = p.inplace_after(cirq.H(a))\n assert p2 is p and p == cirq.Z(a)\n p2 = p.inplace_before(cirq.H(a))\n assert p2 is p and p == cirq.X(a)\n\n # After S and back.\n p2 = p.inplace_after(cirq.S(a))\n assert p2 is p and p == cirq.Y(a)\n p2 = p.inplace_before(cirq.S(a))\n assert p2 is p and p == cirq.X(a)\n\n # Before S and back.\n p2 = p.inplace_before(cirq.S(a))\n assert p2 is p and p == -cirq.Y(a)\n p2 = p.inplace_after(cirq.S(a))\n assert p2 is p and p == cirq.X(a)\n\n # After inverse S and back.\n p2 = p.inplace_after(cirq.S(a)**-1)\n assert p2 is p and p == -cirq.Y(a)\n p2 = p.inplace_before(cirq.S(a)**-1)\n assert p2 is p and p == cirq.X(a)\n\n # On other qubit.\n p2 = p.inplace_after(cirq.S(b))\n assert p2 is p and p == cirq.X(a)\n\n # Two qubit operation.\n p2 = p.inplace_after(cirq.CZ(a, b))\n assert p2 is p and p == cirq.X(a) * cirq.Z(b)\n p2 = p.inplace_after(cirq.CZ(a, c))\n assert p2 is p and p == cirq.X(a) * cirq.Z(b) * cirq.Z(c)\n p2 = p.inplace_after(cirq.H(b))\n assert p2 is p and p == cirq.X(a) * cirq.X(b) * cirq.Z(c)\n p2 = p.inplace_after(cirq.CNOT(b, c))\n assert p2 is p and p == -cirq.X(a) * cirq.Y(b) * cirq.Y(c)\n\n # Inverted interactions.\n p = cirq.MutablePauliString(cirq.X(a))\n p2 = p.inplace_after(\n cirq.PauliInteractionGate(cirq.Y, True, cirq.Z, False).on(a, b))\n assert p2 is p and p == cirq.X(a) * cirq.Z(b)\n p = cirq.MutablePauliString(cirq.X(a))\n p2 = p.inplace_after(\n cirq.PauliInteractionGate(cirq.X, False, cirq.Z, True).on(a, b))\n assert p2 is p and p == cirq.X(a)\n p = cirq.MutablePauliString(cirq.X(a))\n p2 = p.inplace_after(\n cirq.PauliInteractionGate(cirq.Y, False, cirq.Z, True).on(a, b))\n assert p2 is p and p == -cirq.X(a) * cirq.Z(b)\n p = cirq.MutablePauliString(cirq.X(a))\n p2 = p.inplace_after(\n cirq.PauliInteractionGate(cirq.Z, False, cirq.Y, True).on(a, b))\n assert p2 is p and p == -cirq.X(a) * cirq.Y(b)\n p = cirq.MutablePauliString(cirq.X(a))\n p2 = p.inplace_after(\n cirq.PauliInteractionGate(cirq.Z, True, cirq.X, False).on(a, b))\n assert p2 is p and p == cirq.X(a) * cirq.X(b)\n p = cirq.MutablePauliString(cirq.X(a))\n p2 = p.inplace_after(\n cirq.PauliInteractionGate(cirq.Z, True, cirq.Y, False).on(a, b))\n assert p2 is p and p == cirq.X(a) * cirq.Y(b)\n\n\ndef test_after_before_vs_conjugate_by():\n a, b, c = cirq.LineQubit.range(3)\n p = cirq.X(a) * cirq.Y(b) * cirq.Z(c)\n assert p.before(cirq.S(b)) == p.conjugated_by(cirq.S(b))\n assert p.after(cirq.S(b)**-1) == p.conjugated_by(cirq.S(b))\n assert p.before(cirq.CNOT(a, b)) == p.conjugated_by(cirq.CNOT(\n a, b)) == (p.after(cirq.CNOT(a, b)))\n\n\ndef test_mutable_pauli_string_dict_functionality():\n a, b, c = cirq.LineQubit.range(3)\n p = cirq.MutablePauliString()\n with pytest.raises(KeyError):\n _ = p[a]\n assert p.get(a) is None\n assert a not in p\n assert not bool(p)\n p[a] = cirq.X\n assert bool(p)\n assert a in p\n assert p[a] == cirq.X\n\n p[a] = \"Y\"\n assert p[a] == cirq.Y\n p[a] = 3\n assert p[a] == cirq.Z\n p[a] = \"I\"\n assert a not in p\n p[a] = 0\n assert a not in p\n\n assert len(p) == 0\n p[b] = \"Y\"\n p[a] = \"X\"\n p[c] = \"Z\"\n assert len(p) == 3\n assert list(iter(p)) == [b, a, c]\n assert list(p.values()) == [cirq.Y, cirq.X, cirq.Z]\n assert list(p.keys()) == [b, a, c]\n assert p.keys() == {a, b, c}\n assert p.keys() ^ {c} == {a, b}\n\n del p[b]\n assert b not in p\n\n\ndef test_mutable_pauli_string_text():\n p = cirq.MutablePauliString(\n cirq.X(cirq.LineQubit(0)) * cirq.Y(cirq.LineQubit(1)))\n assert str(cirq.MutablePauliString()) == \"mutable I\"\n assert str(p) == \"mutable X(0)*Y(1)\"\n cirq.testing.assert_equivalent_repr(p)\n\n\ndef test_mutable_pauli_string_mul():\n a, b = cirq.LineQubit.range(2)\n p = cirq.X(a).mutable_copy()\n q = cirq.Y(b).mutable_copy()\n pq = cirq.X(a) * cirq.Y(b)\n assert p * q == pq\n assert isinstance(p * q, cirq.PauliString)\n assert 2 * p == cirq.X(a) * 2 == p * 2\n assert isinstance(p * 2, cirq.PauliString)\n assert isinstance(2 * p, cirq.PauliString)\n\n\ndef test_mutable_can_override_mul():\n\n class LMul:\n\n def __mul__(self, other):\n return \"Yay!\"\n\n class RMul:\n\n def __rmul__(self, other):\n return \"Yay!\"\n\n assert cirq.MutablePauliString() * RMul() == \"Yay!\"\n assert LMul() * cirq.MutablePauliString() == \"Yay!\"\n\n\ndef test_coefficient_precision():\n qs = cirq.LineQubit.range(4 * 10**3)\n r = cirq.MutablePauliString({q: cirq.X for q in qs})\n r2 = cirq.MutablePauliString({q: cirq.Y for q in qs})\n r2 *= r\n assert r2.coefficient == 1\n" ]
[ [ "numpy.testing.assert_allclose", "numpy.array", "numpy.random.choice", "numpy.zeros", "numpy.ones", "numpy.exp", "numpy.eye", "numpy.arange", "numpy.conj", "numpy.sqrt", "numpy.diag", "numpy.kron" ] ]
katchke/FaceAI
[ "30d8217460217529224606106860489e9de47704" ]
[ "face_detect_multi_scale.py" ]
[ "__author__ = 'Rahul'\n# -*- coding: utf-8 -*-\n\n# Detect faces from an image and crop them\n\nimport numpy as np\n\nimport os\nimport cv2\nimport multiprocessing as mp\nimport urllib2\n\n\nclass Image(object):\n def __init__(self, path):\n # Uncomment if image path is on disk\n # self.filename = path.split(\"/\")[-1]\n # self.img = np.array(cv2.imread(path), dtype=np.uint8) # read image\n\n # If image path is a URL\n resource = urllib2.urlopen(path)\n self.img = cv2.imdecode(np.asarray(bytearray(resource.read()), dtype=np.uint8), -1)\n\n self.faces = []\n\n if self.img:\n self.size = self.img.shape # get image shape (height,width,channels)\n\n def detect_faces(self):\n face_detector = CascadedDetector(\n cascade_fn=os.path.dirname(\n os.path.dirname(__file__)) + '/data/OpenCV/data/haarcascades/haarcascade_frontalface_default.xml')\n faces = face_detector.detect(self.img)\n\n face_detector = CascadedDetector(\n cascade_fn=os.path.dirname(\n os.path.dirname(__file__)) + '/data/OpenCV/data/haarcascades/haarcascade_frontalface_alt.xml')\n\n if len(faces) == 0:\n faces = face_detector.detect(self.img)\n else:\n new_faces = []\n for each_face in face_detector.detect(self.img):\n face_found = False\n\n for each_existing_face in faces:\n if check_overlap(each_face, each_existing_face) or check_overlap(each_existing_face, each_face):\n face_found = True\n break\n\n if not face_found:\n new_faces.append(each_face)\n\n if len(new_faces):\n faces = np.append(faces, new_faces, 0)\n\n for f_num, face_dimensions in enumerate(faces):\n x0, y0, x1, y1 = face_dimensions\n face_img = self.img[y0 - 0.7 * y0:y1 + 0.1 * y1, x0 - 0.1 * x0:x1 + 0.1 * x1]\n face = Face(face_img, face_dimensions, f_num)\n self.faces.append(face)\n\n # Uncomment if image of face is to be saved on disk\n # face.save_image(self.filename)\n\n def show_image(self):\n cv2.imshow(\"img\", self.img)\n cv2.waitKey(0)\n\n\nclass Face(object):\n def __init__(self, img, dimensions, f_num=None):\n self.img = img\n self.dimensions = dimensions\n self.f_num = f_num\n\n def save_image(self, filename):\n extension = filename.split('.')[-1]\n\n if not os.path.exists(\"faces/{}\".format(filename)):\n os.makedirs(\"faces/{}\".format(filename))\n\n cv2.imwrite(\"faces/{}/{}.{}\".format(filename, str(self.f_num), extension), self.img)\n\n\nclass CascadedDetector(object):\n def __init__(self, cascade_fn=\"./cascades/haarcascade_frontalface_alt2.xml\", scale_factor=1.2, min_neighbors=5,\n min_size=(30, 30)):\n if not os.path.exists(cascade_fn):\n raise IOError(\"No valid cascade found for path {}\".format(cascade_fn))\n\n self.cascade = cv2.CascadeClassifier(cascade_fn)\n self.scale_factor = scale_factor\n self.min_neighbors = min_neighbors\n self.min_size = min_size\n\n def detect(self, src):\n if np.ndim(src) == 3:\n src = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)\n\n src = cv2.equalizeHist(src)\n rects = self.cascade.detectMultiScale(src, scaleFactor=self.scale_factor, minNeighbors=self.min_neighbors,\n minSize=self.min_size)\n if not len(rects):\n return np.ndarray((0,))\n\n rects[:, 2:] += rects[:, :2]\n\n return rects\n\n\ndef check_overlap(face1, face2):\n # Check if two faces are overlapping\n a0, b0, a1, b1 = face1\n x0, y0, x1, y1 = face2\n overlap = False\n\n if x1 >= a0 >= x0 and y1 >= b0 >= y0:\n overlap = True\n elif x1 >= a1 >= x0 and y1 >= b1 >= y0:\n overlap = True\n elif x1 >= a0 >= x0 and y1 >= b1 >= y0:\n overlap = True\n elif x1 >= a1 >= x0 and y1 >= b0 >= y0:\n overlap = True\n\n return overlap\n\n\ndef start_detection(filename):\n image = Image('images/{}'.format(filename))\n image.detect_faces()\n\n\nif __name__ == '__main__':\n n_cores = 4\n\n # Resize dimensions\n resize_width = 150\n\n # Path to images\n images = os.listdir('images')\n\n if '.DS_Store' in images:\n images.remove('.DS_Store')\n \n pool = mp.Pool(processes=n_cores)\n pool.map(start_detection, images)\n" ]
[ [ "numpy.ndarray", "numpy.ndim", "numpy.append" ] ]
ianwonilkim/gct634-2020
[ "e671cb4949fa1d3944937e01ace9e4fdd2b914b0" ]
[ "hw3/train.py" ]
[ "import argparse\nfrom datetime import datetime\nfrom pathlib import Path\nfrom collections import defaultdict\n\nimport numpy as np\nimport torch as th\nimport torch.nn as nn\nfrom torch.nn.utils import clip_grad_norm_\nfrom torch.optim.lr_scheduler import StepLR\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm\n\nfrom model import Transcriber, Transcriber_CRNN, Transcriber_ONF, Transcriber_RNN\nfrom dataset import MAESTRO_small, allocate_batch\nfrom evaluate import evaluate\nfrom constants import HOP_SIZE\n\n\ndef cycle(iterable):\n while True:\n for item in iterable:\n yield item\n\n\ndef train(model_type, logdir, batch_size, iterations, validation_interval, sequence_length, learning_rate, weight_decay, cnn_unit, fc_unit, debug=False, save_midi=False):\n if logdir is None:\n logdir = Path('runs') / ('exp_' + datetime.now().strftime('%y%m%d-%H%M%S'))\n Path(logdir).mkdir(parents=True, exist_ok=True)\n\n if sequence_length % HOP_SIZE != 0:\n adj_length = sequence_length // HOP_SIZE * HOP_SIZE\n print(f'sequence_length: {sequence_length} is not divide by {HOP_SIZE}.\\n \\\n adjusted into : {adj_length}')\n sequence_length = adj_length\n\n if debug:\n dataset = MAESTRO_small(groups=['debug'], sequence_length=sequence_length, hop_size=HOP_SIZE, random_sample=True)\n valid_dataset = dataset\n iterations = 100\n validation_interval = 10\n else:\n dataset = MAESTRO_small(groups=['train'], sequence_length=sequence_length, hop_size=HOP_SIZE, random_sample=True)\n valid_dataset = MAESTRO_small(groups=['validation'], sequence_length=sequence_length, hop_size=HOP_SIZE, random_sample=False)\n loader = DataLoader(dataset, batch_size, shuffle=True)\n\n device = th.device('cuda') if th.cuda.is_available() else th.device('cpu')\n\n if model_type == 'baseline':\n model = Transcriber(cnn_unit=cnn_unit, fc_unit=fc_unit)\n elif model_type == 'rnn':\n model = Transcriber_RNN(cnn_unit=cnn_unit, fc_unit=fc_unit)\n elif model_type == 'crnn':\n model = Transcriber_CRNN(cnn_unit=cnn_unit, fc_unit=fc_unit)\n elif model_type == 'ONF':\n model = Transcriber_ONF(cnn_unit=cnn_unit, fc_unit=fc_unit)\n optimizer = th.optim.Adam(model.parameters(), learning_rate, weight_decay=weight_decay)\n scheduler = StepLR(optimizer, step_size=1000, gamma=0.98)\n criterion = nn.BCEWithLogitsLoss()\n\n model = model.to(device)\n\n loop = tqdm(range(1, iterations+1))\n \n for step, batch in zip(loop, cycle(loader)):\n optimizer.zero_grad()\n batch = allocate_batch(batch, device)\n\n frame_logit, onset_logit = model(batch['audio'])\n frame_loss = criterion(frame_logit, batch['frame'])\n onset_loss = criterion(onset_logit, batch['onset'])\n loss = onset_loss + frame_loss\n\n loss.mean().backward()\n\n for parameter in model.parameters():\n clip_grad_norm_([parameter], 3.0)\n\n optimizer.step()\n scheduler.step()\n loop.set_postfix_str(\"loss: {:.3e}\".format(loss.mean()))\n\n if step % validation_interval == 0:\n model.eval()\n with th.no_grad():\n loader = DataLoader(valid_dataset, batch_size=batch_size, shuffle=False)\n metrics = defaultdict(list)\n for batch in loader:\n batch_results = evaluate(model, batch, device)\n \n for key, value in batch_results.items():\n metrics[key].extend(value)\n print('')\n for key, value in metrics.items():\n if key[-2:] == 'f1' or 'loss' in key:\n print(f'{key:27} : {np.mean(value):.4f}')\n model.train()\n\n th.save({'model_state_dict': model.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict(),\n 'step' : step,\n 'cnn_unit' : cnn_unit,\n 'fc_unit' : fc_unit\n },\n Path(logdir) / f'model-{step}.pt')\n del dataset, valid_dataset\n \n test_dataset = MAESTRO_small(groups=['test'], hop_size=HOP_SIZE, random_sample=False)\n model.eval()\n with th.no_grad():\n loader = DataLoader(test_dataset, batch_size=1, shuffle=False)\n metrics = defaultdict(list)\n for batch in loader:\n batch_results = evaluate(model, batch, device, save=save_midi, save_path=logdir)\n for key, value in batch_results.items():\n metrics[key].extend(value)\n print('')\n for key, value in metrics.items():\n if key[-2:] == 'f1' or 'loss' in key:\n print(f'{key} : {np.mean(value)}')\n\n with open(Path(logdir) / 'results.txt', 'w') as f:\n for key, values in metrics.items():\n _, category, name = key.split('/')\n metric_string = f'{category:>32} {name:26}: {np.mean(values):.3f} +- {np.std(values):.3f}'\n print(metric_string)\n f.write(metric_string + '\\n')\n \n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--model_type', default='baseline', type=str)\n parser.add_argument('--logdir', default=None, type=str)\n parser.add_argument('-v', '--sequence_length', default=102400, type=int)\n parser.add_argument('-lr', '--learning_rate', default=6e-4, type=float)\n parser.add_argument('-b', '--batch_size', default=16, type=int)\n parser.add_argument('-i', '--iterations', default=10000, type=int)\n parser.add_argument('-vi', '--validation_interval', default=1000, type=int)\n parser.add_argument('-wd', '--weight_decay', default=0)\n parser.add_argument('-cnn', '--cnn_unit', default=48, type=int)\n parser.add_argument('-fc', '--fc_unit', default=256, type=int)\n parser.add_argument('--save_midi', action='store_true')\n parser.add_argument('--debug', action='store_true')\n args = parser.parse_args()\n\n train(**vars(parser.parse_args()))" ]
[ [ "torch.device", "torch.optim.lr_scheduler.StepLR", "torch.nn.utils.clip_grad_norm_", "torch.no_grad", "numpy.mean", "numpy.std", "torch.cuda.is_available", "torch.utils.data.DataLoader", "torch.nn.BCEWithLogitsLoss" ] ]
AriJordan/IML_project
[ "5f303931a2c2c4de907b84dfe1047afe6e0cf221" ]
[ "iml2020/examples/utilities/plot_helpers.py" ]
[ "import numpy as np\nfrom scipy import linalg\n\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\n# from plt import cm\nfrom matplotlib import rc\nimport time\nimport IPython\n\n# rc('font', **{'family': 'sans-serif', 'sans-serif': ['Helvetica']})\n# rc('text', usetex=True)\n\n\ndef make_meshgrid(x, y, h=.02):\n \"\"\"Create a mesh of points to plot in\n\n Parameters\n ----------\n x: data to base x-axis meshgrid on\n y: data to base y-axis meshgrid on\n h: stepsize for meshgrid, optional\n\n Returns\n -------\n xx, yy : ndarray\n \"\"\"\n x_min, x_max = x.min() - 1, x.max() + 1\n y_min, y_max = y.min() - 1, y.max() + 1\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h),\n np.arange(y_min, y_max, h))\n return xx, yy\n\n\ndef plot_ellipse(mean, covar, color='red', ax=None):\n if ax is None:\n fig, ax = plt.subplots(1, 1)\n\n v, w = linalg.eigh(covar)\n v = 2. * np.sqrt(2.) * np.sqrt(v)\n u = w[0] / linalg.norm(w[0])\n\n # Plot an ellipse to show the Gaussian component\n angle = np.arctan(u[1] / u[0])\n angle = 180. * angle / np.pi # convert to degrees\n\n ell = mpl.patches.Ellipse(mean, v[0], v[1], 180. + angle, color=color)\n ell.set_clip_box(ax.bbox)\n ell.set_alpha(0.5)\n ax.add_artist(ell)\n\n\ndef process_plot(fig, options=dict()):\n if 'x_label' in options.keys():\n fig.set_xlabel(options['x_label'])\n if 'y_label' in options.keys():\n fig.set_ylabel(options['y_label'])\n if 'x_lim' in options.keys():\n fig.set_ylim(options['x_lim'])\n if 'y_lim' in options.keys():\n fig.set_ylim(options['y_lim'])\n if 'title' in options.keys():\n fig.set_title(options['title'])\n if 'legend' in options.keys():\n if options['legend']:\n fig.legend(loc=options.get('legend_loc', 'best'))\n\n\ndef plot_data(X, Y, fig=None, options=dict()):\n # fig_data = plt.figure()\n if fig is None:\n fig = plt.subplot(111)\n fig.plot(X, Y, options.get('marker', 'b*'), \n label=options.get('label', 'Raw data'),\n linewidth=options.get('linewidth', 2),\n fillstyle=options.get('fillstyle', 'full'),\n ms=options.get('size', 8))\n process_plot(fig, options)\n\n\ndef plot_fit(X, w, fig=None, options=dict()):\n if fig is None:\n fig = plt.subplot(111)\n\n x_min = np.min(X[:, -2])\n x_max = np.max(X[:, -2])\n dim = w.size - 1\n x_plot = np.reshape(np.linspace(x_min, x_max, 100), [-1, 1])\n x1_plot = np.ones_like(x_plot)\n for d in range(dim):\n x1_plot = np.concatenate((np.power(x_plot, 1 + d), x1_plot), axis=1)\n\n y_plot = np.dot(x1_plot, w)\n fig.plot(x_plot, y_plot, 'r-', label=options.get('label', 'Regression fit'))\n process_plot(fig, options)\n\n\ndef plot_contours(ax, clf, xx, yy, **params):\n \"\"\"Plot the decision boundaries for a classifier.\n\n Parameters\n ----------\n ax: matplotlib axes object\n clf: a classifier\n xx: meshgrid ndarray\n yy: meshgrid ndarray\n params: dictionary of params to pass to contourf, optional\n \"\"\"\n Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])\n Z = Z.reshape(xx.shape)\n out = ax.contourf(xx, yy, Z, **params)\n return out\n\n \ndef plot_contour(X, Y, w_trajectory, func, fig=None, options=dict()):\n if fig is None:\n fig = plt.subplot(111)\n\n w_min = np.min(w_trajectory, axis=0)\n w_max = np.max(w_trajectory, axis=0)\n w_range = np.abs(w_max - w_min)\n w_range[w_range == 0] = 3\n w_end = w_trajectory[-1, :]\n\n [xg, yg] = np.meshgrid(np.linspace(w_end[0] - 2 * w_range[0], w_end[0] + 2 * w_range[0], 100),\n np.linspace(w_end[1] - 2 * w_range[1], w_end[1] + 2 * w_range[1], 100))\n wg = np.concatenate((xg.reshape([-1, 1]), yg.reshape([-1, 1])), axis=1)\n zg = np.zeros((wg.shape[0], 1))\n j = 0\n for wj in wg:\n zg[j] = func(wj, X, Y)\n j += 1\n zg = np.reshape(zg, newshape=xg.shape)\n fig.contour(xg, yg, zg)\n\n process_plot(fig, options)\n\n\ndef plot_arrow(w_old, w_new, fig=None, options=dict()):\n if fig is None:\n fig = plt.subplot(111)\n length = w_new - w_old\n arrow = fig.arrow(w_old[0], w_old[1], length[0], length[1],\n width=max(0.01, min(0.1, np.sqrt(length[0] ** 2 + length[1] ** 2))), length_includes_head=False,\n fc='r')\n return arrow\n\n\ndef plot_trajectory(w_trajectory, fig=None, options=dict()):\n if fig is None:\n fig = plt.subplot(111)\n traj_plot, = fig.plot(w_trajectory[:, 0], w_trajectory[:, 1], 'r.-', alpha=0.5)\n return traj_plot\n\n\ndef linear_regression_progression(X, Y, w_trajectory, index_trajectory, func, contourplot=None, dataplot=None, options=dict()):\n # Plot Contour\n if contourplot is not None:\n contour_opts = options.get('contour_opts', dict())\n plot_contour(X, Y, w_trajectory, func, fig=contourplot, options=contour_opts)\n\n # Plot raw Data\n if dataplot is not None:\n data_opts = options.get('data_opts', dict())\n plot_data(X[:, -2], Y, fig=dataplot, options=data_opts)\n\n for idx in range(len(w_trajectory) - 1):\n if 'traj_plot' in locals():\n traj_plot.remove() # Remove previous trajectory.\n if 'arrow' in locals():\n arrow.remove() # Remove previous arrow.\n if dataplot is not None:\n while len(dataplot.lines) > 1:\n dataplot.lines.pop(-1) # Remove previous line.\n\n if contourplot is not None:\n # Plot gradient arrow.\n contour_opts = options.get('contour_opts', dict())\n arrow = plot_arrow(w_trajectory[idx, :], w_trajectory[idx + 1, :], fig=contourplot, options=contour_opts)\n\n # Plot weight trajectory.\n traj_plot = plot_trajectory(w_trajectory[:(idx + 1), :], fig=contourplot, options=contour_opts)\n\n if dataplot is not None:\n # Plot best fit line. \n data_opts = options.get('data_opts', dict())\n x_idx = index_trajectory[idx]\n #if x_idx.size == 1:\n if data_opts.get('sgd_point', False):\n opt = {'marker': 'mX', 'label': 'Current SGD point', 'size': 15}\n plot_data(X[x_idx, -2], Y[x_idx], fig=dataplot, options=opt)\n\n plot_fit(X, w_trajectory[idx, :], fig=dataplot, options=data_opts)\n\n IPython.display.clear_output(wait=True)\n IPython.display.display(plt.gcf())\n\n # Wait for human visualization. \n time.sleep(0.5)\n # input(\"Press Enter to continue...\")\n\n plt.close()\n\n\ndef kernelized_regression_progression(X, Xtr, Ytr, alpha_trayectory, index_trajectory, regressor, fig=None, options=dict()):\n if fig is None:\n fig = plt.subplot(111)\n\n n_iter = index_trajectory.shape[0]\n for it in range(n_iter):\n alpha = alpha_trayectory[it, :]\n while len(fig.lines) > 2:\n fig.lines.pop(-1)\n\n x_idx = index_trajectory[it]\n if options.get('sgd_point', False):\n opt = {'marker': 'mX', 'label': 'Current SGD point', 'size': 15}\n plot_data(Xtr[x_idx], Ytr[x_idx], fig=fig, options=opt)\n\n regressor.set_weights(alpha)\n Yhat = regressor.predict(X)\n fig.plot(X, Yhat, options.get('marker', 'g-'), label=options.get('label', 'Kernel'))\n IPython.display.clear_output(wait=True)\n IPython.display.display(plt.gcf())\n\n process_plot(fig, options)\n # Wait for human visualization. \n time.sleep(0.1)\n\n plt.close()\n\n\ndef classification_progression(X, Y, w_trajectory, index_trajectory, classifier, contour_plot=None, error_plot=None, options=dict()):\n if contour_plot is not None:\n contour_opts = options.get('contour_opts', dict())\n\n if error_plot is not None:\n error_opts = options.get('error_opts', dict())\n current_error_line, = error_plot.plot([], [], error_opts.get('marker', 'g*-'), label='Current Loss') \n train_error_line, = error_plot.plot([], [], error_opts.get('marker', 'r*-'), label='Train Loss') \n test_error_line, = error_plot.plot([], [], error_opts.get('marker', 'b*-'), label='Test Loss')\n\n process_plot(contour_plot, contour_opts)\n\n min_x = np.min(X[:, 0])\n max_x = np.max(X[:, 0])\n min_y = np.min(X[:, 1])\n max_y = np.max(X[:, 1])\n n_points = options.get('n_points', 20)\n [xg, yg] = np.meshgrid(np.linspace(min_x, max_x, n_points),\n np.linspace(min_y, max_y, n_points))\n\n x1g = np.concatenate((xg.reshape([-1, 1]),\n yg.reshape([-1, 1]),\n np.ones_like(xg).reshape([-1, 1])),\n axis=1)\n\n n_iter = index_trajectory.shape[0]\n\n current_idx = []\n current_loss = []\n\n train_loss = []\n test_loss = []\n test_idx = []\n\n # error_plot.set_xlim([0, n_iter])\n # error_plot.\n for it in range(n_iter):\n if contour_plot is not None:\n while len(contour_plot.lines) > contour_opts.get('n_classes', 2):\n contour_plot.lines.pop(-1)\n\n if 'contour' in locals():\n for c in contour.collections:\n c.remove()\n\n x_idx = index_trajectory[it]\n if contour_opts.get('sgd_point', False):\n opt = {'marker': 'mX', 'label': 'Current SGD point', 'size': 15}\n plot_data(classifier._Xtr[x_idx, 0], classifier._Xtr[x_idx, 1], fig=contour_plot, options=opt)\n\n w = w_trajectory[it, :]\n classifier.set_weights(w)\n zg = classifier.predict(x1g) # Replace this by func call\n contour = contour_plot.contourf(xg, yg, np.reshape(zg, newshape=xg.shape), alpha=0.3,\n cmap=mpl.cm.jet, vmin=0, vmax=1) # colors=('blue', 'red'))\n contour_plot.set_yticklabels([])\n if it == 0 and not classifier.structured: \n m = plt.cm.ScalarMappable(cmap=mpl.cm.jet)\n m.set_array(zg)\n# m.set_alpha(0.3)\n m.set_clim(0., 1.)\n cbar = plt.colorbar(m, ax=contour_plot, boundaries=np.linspace(0, 1, 6), alpha=0.3)\n cbar.set_label('P(y=1)', rotation=270, labelpad=20)\n\n if error_plot is not None:\n w = w_trajectory[it, :] \n current_idx.append(it)\n current_loss.append(classifier.loss(w, index_trajectory[it]))\n current_error_line.set_data(current_idx, current_loss)\n\n if (it % error_opts.get('epoch', 1)) == 0:\n test_idx.append(it)\n test_loss.append(classifier.test_loss(w))\n train_loss.append(classifier.loss(w))\n\n test_error_line.set_data(test_idx, test_loss)\n train_error_line.set_data(test_idx, train_loss)\n \n error_plot.relim()\n error_plot.autoscale()\n error_plot.legend(loc='upper right')\n error_plot.set_ylabel('Loss')\n error_plot.set_xlabel('Num Iter')\n\n plt.draw()\n\n IPython.display.clear_output(wait=True)\n IPython.display.display(plt.gcf())\n\n time.sleep(0.1)\n\n plt.close()\n\n\ndef plot_classification_boundaries(X, classifier, fig=None, options=dict()):\n if fig is None:\n fig = plt.subplot(111)\n min_x = np.min(X[:, 0])\n max_x = np.max(X[:, 0])\n min_y = np.min(X[:, 1])\n max_y = np.max(X[:, 1])\n n_points = options.get('n_points', 20)\n [xg, yg] = np.meshgrid(np.linspace(min_x, max_x, n_points),\n np.linspace(min_y, max_y, n_points))\n\n x1g = np.concatenate((xg.reshape([-1, 1]),\n yg.reshape([-1, 1]),\n np.ones_like(xg).reshape([-1, 1])),\n axis=1)\n\n Zg = classifier.predict(x1g)\n contour = fig.contourf(xg, yg, np.reshape(Zg, newshape=xg.shape), alpha=0.3,\n cmap=mpl.cm.jet) # colors=('blue', 'red'))\n cb = plt.colorbar(contour)\n" ]
[ [ "numpy.ones_like", "numpy.dot", "scipy.linalg.eigh", "numpy.min", "matplotlib.patches.Ellipse", "matplotlib.pyplot.draw", "matplotlib.pyplot.gcf", "numpy.max", "matplotlib.pyplot.colorbar", "matplotlib.pyplot.subplots", "numpy.arange", "numpy.sqrt", "matplotlib.pyplot.subplot", "numpy.reshape", "numpy.zeros", "matplotlib.pyplot.close", "numpy.arctan", "numpy.power", "matplotlib.pyplot.cm.ScalarMappable", "scipy.linalg.norm", "numpy.abs", "numpy.linspace" ] ]
sophiakrix/bio2bel
[ "0db93f0f130f6d746692cef2caa36b5c63185a7a" ]
[ "src/bio2bel/sources/biogrid.py" ]
[ "# -*- coding: utf-8 -*-\n\n\"\"\"This script downloads and parses BioGRID data and maps the interaction types to BEL.\"\"\"\n\nimport os\nimport pandas as pd\n\nfrom bio2bel.utils import ensure_path\nimport pybel.dsl\nfrom pybel import BELGraph\n\nSEP = '\\t'\n\nBIOGRID2BEL_MAPPER = {\n # increases\n 'synthetic genetic interaction defined by inequality': 'increases',\n 'additive genetic interaction defined by inequality': 'increases',\n\n # decreases\n 'suppressive genetic interaction defined by inequality': 'decreases',\n\n # association\n 'direct interaction': 'association',\n 'physical association': 'association',\n 'colocalization': 'association',\n 'association': 'association',\n}\n\nBIOGRID2BEL_FUNCTION_MAPPER = {\n 'direct interaction': '',\n 'suppressive genetic interaction defined by inequality': 'geneAbundance',\n 'physical association': '',\n 'colocalization': 'location',\n 'synthetic genetic interaction defined by inequality': 'geneAbundance',\n 'association': '',\n 'additive genetic interaction defined by inequality': 'geneAbundance'\n}\n\nMODULE_NAME = 'biogrid'\nURL = 'https://downloads.thebiogrid.org/File/BioGRID/Release-Archive/BIOGRID-3.5.183/BIOGRID-ALL-3.5.183.mitab.zip'\n\n\ndef _load_file(module_name: str = MODULE_NAME, url: str = URL) -> str:\n \"\"\"Load the file from the URL and place it into the bio2bel_sophia directory.\n\n :param module_name: name of module (database)\n :param url: URL to file from database\n :return: path of saved database file\n \"\"\"\n\n return ensure_path(prefix=module_name, url=url)\n\n\ndef _get_my_df() -> pd.DataFrame:\n \"\"\"Get my dataframe.\"\"\"\n path = _load_file()\n df = pd.read_csv(path)\n return df\n\ndef _get_sample_df() -> pd.DataFrame:\n \"\"\"Get sample dataframe of biogrid.\n\n :return:\n \"\"\"\n\n\ndef get_bel() -> BELGraph:\n df = _get_my_df()\n graph = BELGraph(name='intact')\n for _, row in df.iterrows():\n _add_my_row(graph, row)\n return graph\n\n\nfrom protmapper.uniprot_client import get_mnemonic\n\n\ndef _add_my_row(graph: BELGraph, row) -> None:\n relation = row['relation']\n source_uniprot_id = row['source']\n target_uniprot_id = row['target']\n\n pubmed_ids = row['pubmed_ids']\n pubmed_ids = pubmed_ids.split('|')\n\n source = pybel.dsl.Protein(\n namespace='uniprot',\n identifier=source_uniprot_id,\n name=get_mnemonic(source_uniprot_id),\n )\n target = pybel.dsl.Protein(\n namespace='uniprot',\n identifier=target_uniprot_id,\n name=get_mnemonic(target_uniprot_id),\n )\n\n for pubmed_id in pubmed_ids:\n if relation == 'deubiquitination':\n target_ub = target.with_variants(\n pybel.dsl.ProteinModification('Ub')\n )\n graph.add_decreases(\n source,\n target_ub,\n citation=pubmed_id,\n evidence='From intact',\n )\n elif relation == 'ubiqutination':\n target_ub = target.with_variants(\n pybel.dsl.ProteinModification('Ub')\n )\n graph.add_increases(\n source,\n target_ub,\n citation=...,\n evidence='From intact',\n )\n\n elif relation == 'degratation':\n graph.add_decreases(\n source,\n target,\n citation=...,\n evidence='From intact',\n )\n\n elif relation == 'activates':\n graph.add_increases(\n source,\n target,\n ...,\n object_modifier=pybel.dsl.activity(),\n )\n elif relation == 'co-expressed':\n graph.add_correlation(\n pybel.dsl.Rna(\n namespace='uniprot',\n identifier=source_uniprot_id,\n name=get_mnemonic(source_uniprot_id),\n ),\n pybel.dsl.Rna(\n namespace='uniprot',\n identifier=target_uniprot_id,\n name=get_mnemonic(target_uniprot_id),\n ),\n annotations=dict(\n cell_line={'HEK2': True}\n ),\n )\n\ndef preprocess_biogrid():\n _load_file(module_name=MODULE_NAME, url=URL)\n\n" ]
[ [ "pandas.read_csv" ] ]
imec-int/federated-learning-lib
[ "1b2dc964de01cd23f357edbce7527ec1bcfc2cd3" ]
[ "lib/ibmfl/aggregator/fusion/spahm_fusion_handler.py" ]
[ "\"\"\"\nLicensed Materials - Property of IBM\nRestricted Materials of IBM\n20190891\n© Copyright IBM Corp. 2021 All Rights Reserved.\n\"\"\"\n\"\"\"\nModule to where fusion algorithms are implemented.\n\"\"\"\nimport logging\nimport numpy as np\nfrom scipy.optimize import linear_sum_assignment\n\nfrom ibmfl.model.model_update import ModelUpdate\nfrom ibmfl.aggregator.fusion.fusion_handler import FusionHandler\nfrom ibmfl.exceptions import GlobalTrainingException\n\nlogger = logging.getLogger(__name__)\n\n\nclass SPAHMFusionHandler(FusionHandler):\n \"\"\"\n Class for SPAHM aggregation of exponential family models.\n The method is described here: https://arxiv.org/abs/1911.00218\n\n This method supports any model of the exponential family class\n \"\"\"\n\n def __init__(self, hyperparams, protocol_handler,\n fl_model=None, data_handler=None, **kwargs):\n \"\"\"\n Initializes an SPAHMFusionHandler object with provided fl_model,\n data_handler and hyperparams.\n\n :param hyperparams: Hyperparameters used for SPAHM training. \\\n The five hyperparameters used are: \\\n 1. sigma: `float` (default 1.0) Determines how far the local \\\n model neurons are allowed from the global model. A bigger value \\\n results in more matching and hence a smaller global model. \\\n 2. sigma0: `float` (default 1.0) Defines the standard-deviation \\\n of the global network neurons. Acts as a regularizer. \\\n 3. gamma: `float` (default 1.0) Indian Buffet Process parameter \\\n controlling the expected number of features present in each \\\n observation. \\\n 4. iters: `int` (default 10) How many iterations of randomly \\\n initialized matching-unmatching procedure is to be performed \\\n before finalizing the solution \\\n 5. optimize_hyperparams: `bool` (default: True) Should SPAHM \\\n optimize the provided hyperparameters or not?\n :type hyperparams: `dict`\n :param protocol_handler: Protocol handler used for handling learning \\\n algorithm's request for communication.\n :type protocol_handler: `ProtoHandler`\n :param fl_model: model to be trained\n :type fl_model: `model.FLModel`\n :param data_handler: data handler that will be used to obtain data\n :type data_handler: `DataHandler`\n :param kwargs: Additional arguments to initialize a fusion handler.\n :type kwargs: `dict\n \"\"\"\n super().__init__(hyperparams, protocol_handler,\n data_handler, fl_model, **kwargs)\n\n self.name = \"SPAHM\"\n self.params_global = hyperparams.get('global', {})\n self.params_local = hyperparams.get('local', None)\n\n if self.perc_quorum != 1.:\n raise GlobalTrainingException('Quorum specified is less than required value of 1') \n\n self.rounds = self.params_global.get('rounds', 1)\n self.termination_accuracy = \\\n self.params_global.get('termination_accuracy', float(\"inf\"))\n self.sigma = self.params_global.get('sigma', 1.0)\n self.sigma0 = self.params_global.get('sigma0', 1.0)\n self.gamma = self.params_global.get('gamma', 1.0)\n self.iters = self.params_global.get('iters', 10)\n self.optimize_hyperparams = \\\n self.params_global.get('optimize_hyperparams', True)\n\n self._local_weights = None\n self.curr_round = 0\n self.score = -1\n if fl_model is None:\n self.model_update = None\n else:\n self.model_update = fl_model.get_model_update()\n\n def start_global_training(self):\n \"\"\"\n Starts global federated learning training process.\n \"\"\"\n self.curr_round = 0\n\n while not self.reach_termination_criteria(self.curr_round):\n\n lst_parties = self.get_registered_parties()\n lst_payload = self.__prepare_payload__(lst_parties)\n\n lst_replies = self.query_parties(lst_payload, lst_parties)\n\n # Collect all model updates for fusion:\n global_weights = self.fusion_collected_responses(lst_replies)\n self.model_update = ModelUpdate(weights=global_weights)\n\n # Update model if we are maintaining one\n if self.fl_model is not None:\n self.fl_model.update_model(self.model_update)\n\n self.curr_round += 1\n self.save_current_state()\n\n def __prepare_payload__(self, lst_parties):\n \"\"\"\n Prepares payload for each individual local model\n\n :return: payload for each client\n :rtype: `list`\n \"\"\"\n\n lst_payload = []\n\n # Create custom payload for each local model\n for i in range(len(lst_parties)):\n party_model_update = ModelUpdate(weights=self._local_weights[i]) \\\n if self._local_weights is not None else self.model_update\n\n payload = {\n 'hyperparams': {'local': self.params_local},\n 'model_update': party_model_update\n }\n lst_payload.append(payload)\n\n return lst_payload\n\n def reach_termination_criteria(self, curr_round):\n \"\"\"\n Returns True when termination criteria has been reached, otherwise\n returns False.\n Termination criteria is reached when the number of rounds run reaches\n the one provided as global rounds hyperparameter.\n If a `DataHandler` has been provided and a targeted accuracy has been\n given in the list of hyperparameters, early termination is verified.\n\n :param curr_round: Number of global rounds that already run\n :type curr_round: `int`\n :return: boolean\n :rtype: `boolean`\n \"\"\"\n \n if curr_round >= self.rounds:\n logger.info('Reached maximum global rounds. Finish training :) ')\n return True\n\n return self.terminate_with_metrics(curr_round)\n\n def get_global_model(self):\n \"\"\"\n Returns last model_update\n\n :return: model_update\n :rtype: `ModelUpdate`\n \"\"\"\n return self.model_update\n\n def fusion_collected_responses(self, lst_model_updates):\n \"\"\"\n Receives a list of model updates, where a model update is of the type\n `ModelUpdate`, using the weighs included in each model_update, it\n finds the mean.\n The averaged weights is stored in self.model_update\n as type `ModelUpdate`.\n\n :param lst_model_updates: List of model updates of type `ModelUpdate` to be averaged.\n :type lst_model_updates: `list`\n :return: None\n \"\"\"\n\n n_models = len(lst_model_updates)\n weights = [\n np.array(update.get('weights')) for update in lst_model_updates\n ]\n\n if n_models == 0:\n raise GlobalTrainingException('No weights available for SPAHM')\n\n for weight in weights:\n if None in weight or weight is None:\n return\n\n if n_models == 1:\n global_weights = weights[0]\n self._local_weights = weights\n else:\n global_weights, _, _, assignment = self.match_local_atoms(\n local_atoms=weights, sigma=self.sigma, sigma0=self.sigma0,\n gamma=self.gamma, it=self.iters,\n optimize_hyper=self.optimize_hyperparams)\n\n self._local_weights = [\n self.__build_init__(global_weights, assignment, j)\n for j in range(n_models)\n ]\n\n global_weights = np.array(global_weights).tolist()\n return global_weights\n\n @staticmethod\n def compute_cost(global_atoms, atoms_j, sigma, sigma0, mu0,\n popularity_counts, gamma, J):\n \"\"\"\n Computes the full cost to be used by Hungarian algorithm.\n Refer equation (9) in the paper\n \"\"\"\n\n Lj = atoms_j.shape[0]\n counts = np.array(popularity_counts)\n sigma_ratio = sigma0 / sigma\n denum_match = np.outer(counts + 1, sigma0) + sigma\n param_cost = []\n for l in range(Lj):\n cost_match = ((sigma_ratio * (atoms_j[l] + global_atoms) ** 2 +\n 2 * mu0 * (atoms_j[l] + global_atoms)\n ) / denum_match).sum(axis=1)\n param_cost.append(cost_match)\n\n denum_no_match = np.outer(counts, sigma0) + sigma\n cost_no_match = (\n (sigma_ratio * global_atoms ** 2 + 2 * mu0 * global_atoms) /\n denum_no_match).sum(axis=1)\n\n sigma_cost = (np.log(denum_no_match) - np.log(denum_match)).sum(axis=1)\n mu_cost = (\n np.outer(counts, mu0 ** 2) / denum_no_match -\n np.outer(counts + 1, mu0 ** 2) / denum_match\n ).sum(axis=1)\n counts = np.minimum(counts, 10) # truncation of prior counts influence\n param_cost = (\n np.array(param_cost) - cost_no_match + sigma_cost +\n mu_cost + 2 * np.log(counts / (J - counts))\n )\n\n # Nonparametric cost\n L = global_atoms.shape[0]\n max_added = min(Lj, max(700 - L, 1))\n # max_added = Lj\n nonparam_cost = (\n (sigma_ratio * atoms_j ** 2 + 2 * mu0 * atoms_j - mu0 ** 2) /\n (sigma0 + sigma)).sum(axis=1)\n nonparam_cost = np.outer(nonparam_cost, np.ones(max_added))\n cost_pois = 2 * np.log(np.arange(1, max_added + 1))\n nonparam_cost -= cost_pois\n nonparam_cost += 2 * np.log(gamma / J)\n\n # sigma penalty\n nonparam_cost += np.log(sigma).sum() - np.log(sigma0 + sigma).sum()\n\n full_cost = np.hstack((param_cost, nonparam_cost))\n return full_cost\n\n def matching_upd_j(self, atoms_j, global_atoms, global_atoms_squared,\n sigma, sigma0, mu0, popularity_counts, gamma, J):\n \"\"\"\n Computes cost [Equation 9] and solves the linear assignment problem\n using hungarian algorithm\n \"\"\"\n\n L = global_atoms.shape[0]\n\n full_cost = self.compute_cost(global_atoms, atoms_j, sigma, sigma0,\n mu0, popularity_counts, gamma, J)\n\n row_ind, col_ind = linear_sum_assignment(-full_cost)\n assignment_j = []\n new_L = L\n\n for l, i in zip(row_ind, col_ind):\n if i < L:\n popularity_counts[i] += 1\n assignment_j.append(i)\n global_atoms[i] += atoms_j[l]\n global_atoms_squared[i] += atoms_j[l] ** 2\n else: # new neuron\n popularity_counts += [1]\n assignment_j.append(new_L)\n new_L += 1\n global_atoms = np.vstack((global_atoms, atoms_j[l]))\n global_atoms_squared = np.vstack(\n (global_atoms_squared, atoms_j[l] ** 2)\n )\n\n return (\n global_atoms, global_atoms_squared,\n popularity_counts, assignment_j\n )\n\n @staticmethod\n def objective(global_atoms, popularity_counts, sigma, sigma0, mu0):\n \"\"\" Computes the full objective function \"\"\"\n\n popularity_counts = np.copy(popularity_counts)\n obj_denum = np.outer(popularity_counts, sigma0) + sigma\n obj_num = ((sigma0 / sigma) * global_atoms ** 2 +\n 2 * mu0 * global_atoms -\n np.outer(popularity_counts, mu0 ** 2))\n obj = (obj_num / obj_denum - np.log(obj_denum)).sum()\n return obj\n\n @staticmethod\n def hyperparameters(global_atoms, global_atoms_squared, popularity_counts):\n \"\"\"\n Estimates the hyperparameters mu0, sigma, and sigma0\n \"\"\"\n\n popularity_counts = np.copy(popularity_counts)\n mean_atoms = global_atoms / popularity_counts.reshape(-1, 1)\n mu0 = mean_atoms.mean(axis=0)\n sigma = (\n global_atoms_squared -\n (global_atoms ** 2) / popularity_counts.reshape(-1, 1)\n )\n sigma = sigma.sum(axis=0) / (\n popularity_counts.sum() - len(popularity_counts))\n sigma = np.maximum(sigma, 1e-10)\n sigma0 = ((mean_atoms - mu0) ** 2).mean(axis=0)\n sigma0 = (sigma0 - sigma * ((1 / popularity_counts).sum()) /\n len(popularity_counts))\n sigma0 = np.maximum(sigma0, 1e-10)\n return mu0, sigma, sigma0\n\n def match_local_atoms(self, local_atoms, sigma, sigma0,\n gamma, it, optimize_hyper=True):\n \"\"\"\n Estimates the global atoms given the local atoms along with the\n hyperparameters.\n \"\"\"\n\n J = len(local_atoms)\n D = local_atoms[0].shape[1]\n group_order = sorted(range(J), key=lambda x: -local_atoms[x].shape[0])\n\n sigma = np.ones(D) * sigma\n sigma0 = np.ones(D) * sigma0\n total_atoms = sum([atoms_j.shape[0] for atoms_j in local_atoms])\n mu0 = sum(\n [atoms_j.sum(axis=0) for atoms_j in local_atoms]\n ) / total_atoms\n logger.info(f'SPAHM: Init mu0 estimate mean is {mu0.mean()}')\n\n global_atoms = np.copy(local_atoms[group_order[0]])\n global_atoms_squared = np.copy(local_atoms[group_order[0]] ** 2)\n\n popularity_counts = [1] * global_atoms.shape[0]\n\n assignment = [[] for _ in range(J)]\n\n assignment[group_order[0]] = list(range(global_atoms.shape[0]))\n\n # Initialize\n for j in group_order[1:]:\n (\n global_atoms, global_atoms_squared,\n popularity_counts, assignment_j) = self.matching_upd_j(\n local_atoms[j], global_atoms, global_atoms_squared, sigma,\n sigma0, mu0, popularity_counts, gamma, J)\n assignment[j] = assignment_j\n\n if optimize_hyper:\n mu0, sigma, sigma0 = self.hyperparameters(global_atoms,\n global_atoms_squared,\n popularity_counts)\n logger.info(f'SPAHM: Init Sigma mean estimate is {sigma.mean()}; '\n f'sigma0 is {sigma0.mean()}; '\n f'mu0 is {mu0.mean()}')\n\n logger.info('Init objective (without prior) is %f; '\n 'number of global atoms is %d' %\n (self.objective(global_atoms, popularity_counts, sigma,\n sigma0, mu0), global_atoms.shape[0]))\n\n # Iterate over groups\n for iteration in range(it):\n random_order = np.random.permutation(J)\n for j in random_order: # random_order:\n to_delete = []\n # Remove j\n Lj = len(assignment[j])\n for l, i in sorted(\n zip(range(Lj), assignment[j]), key=lambda x: -x[1]):\n\n popularity_counts[i] -= 1\n if popularity_counts[i] == 0:\n del popularity_counts[i]\n to_delete.append(i)\n for j_clean in range(J):\n for idx, l_ind in enumerate(assignment[j_clean]):\n if i < l_ind and j_clean != j:\n assignment[j_clean][idx] -= 1\n elif i == l_ind and j_clean != j:\n logger.warning('SPAHM : weird unmatching')\n else:\n global_atoms[i] = global_atoms[i] - local_atoms[j][l]\n global_atoms_squared[i] = global_atoms_squared[i] - \\\n local_atoms[j][l] ** 2\n\n global_atoms = np.delete(global_atoms, to_delete, axis=0)\n global_atoms_squared = np.delete(global_atoms_squared,\n to_delete, axis=0)\n\n # Match j\n (\n global_atoms, global_atoms_squared,\n popularity_counts, assignment_j) = self.matching_upd_j(\n local_atoms[j], global_atoms, global_atoms_squared,\n sigma, sigma0, mu0, popularity_counts, gamma, J)\n\n assignment[j] = assignment_j\n\n if optimize_hyper:\n mu0, sigma, sigma0 = self.hyperparameters(global_atoms,\n global_atoms_squared,\n popularity_counts)\n logger.info(f'Sigma mean estimate is {sigma.mean()}; '\n f'sigma0 is {sigma0.mean()}; '\n f'mu0 is {mu0.mean()}')\n\n logger.info('Matching iteration %d' % iteration)\n logger.info('Objective (without prior) at iteration %d is %f; '\n 'number of global atoms is %d' %\n (iteration,\n self.objective(global_atoms, popularity_counts,\n sigma, sigma0, mu0),\n global_atoms.shape[0]))\n\n logger.info(f'Number of global atoms is {global_atoms.shape[0]}, '\n f'gamma {gamma}')\n\n map_out = (mu0 * sigma + global_atoms * sigma0) / (\n np.outer(popularity_counts, sigma0) + sigma)\n return map_out, popularity_counts, (\n mu0.mean(), sigma.mean(), sigma0.mean()), assignment\n\n @staticmethod\n def __build_init__(hungarian_weights, assignments, j):\n \"\"\"\n Create local weights from the matched global weights\n\n :param hungarian_weights: Global network weights\n :param assignments: Assignment matrix mapping local to global neurons\n :param j: Network index for which updated local weights are required\n\n :return: local network weights\n :rtype: list of list\n \"\"\"\n\n global_to_local_asgnmt = assignments[j]\n local_params = [hungarian_weights[k] for k in global_to_local_asgnmt]\n return local_params\n\n def get_current_metrics(self):\n \"\"\"Returns metrics pertaining to current state of fusion handler\n\n :return: metrics\n :rtype: `dict`\n \"\"\"\n fh_metrics = dict()\n fh_metrics['rounds'] = self.rounds\n fh_metrics['curr_round'] = self.curr_round\n fh_metrics['score'] = self.score \n #fh_metrics['model_update'] = self.model_update\n return fh_metrics\n" ]
[ [ "numpy.array", "numpy.delete", "numpy.log", "numpy.minimum", "numpy.copy", "numpy.ones", "numpy.random.permutation", "scipy.optimize.linear_sum_assignment", "numpy.arange", "numpy.outer", "numpy.hstack", "numpy.vstack", "numpy.maximum" ] ]
tyang92/TextGAN-PyTorch
[ "8d6de6e67af13899c00d3ac4e00591718be58f04" ]
[ "instructor/real_data/relgan_instructor.py" ]
[ "# -*- coding: utf-8 -*-\n# @Author : William\n# @Project : TextGAN-william\n# @FileName : relgan_instructor.py\n# @Time : Created at 2019-04-25\n# @Blog : http://zhiweil.ml/\n# @Description : \n# Copyrights (C) 2018. All Rights Reserved.\n\nimport torch\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom tqdm import tqdm\n\nimport config as cfg\nfrom instructor.real_data.instructor import BasicInstructor\nfrom models.RelGAN_D import RelGAN_D\nfrom models.RelGAN_G import RelGAN_G\nfrom utils.helpers import get_fixed_temperature, get_losses\n\n\nclass RelGANInstructor(BasicInstructor):\n def __init__(self, opt):\n super(RelGANInstructor, self).__init__(opt)\n\n # generator, discriminator\n self.gen = RelGAN_G(cfg.mem_slots, cfg.num_heads, cfg.head_size, cfg.gen_embed_dim, cfg.gen_hidden_dim,\n cfg.vocab_size, cfg.max_seq_len, cfg.padding_idx, gpu=cfg.CUDA)\n self.dis = RelGAN_D(cfg.dis_embed_dim, cfg.max_seq_len, cfg.num_rep, cfg.vocab_size, cfg.padding_idx,\n gpu=cfg.CUDA)\n self.init_model()\n\n # Optimizer\n self.gen_opt = optim.Adam(self.gen.parameters(), lr=cfg.gen_lr)\n self.gen_adv_opt = optim.Adam(self.gen.parameters(), lr=cfg.gen_adv_lr)\n self.dis_opt = optim.Adam(self.dis.parameters(), lr=cfg.dis_lr)\n\n def _run(self):\n # ===PRE-TRAINING (GENERATOR)===\n if not cfg.gen_pretrain:\n self.log.info('Starting Generator MLE Training...')\n self.pretrain_generator(cfg.MLE_train_epoch)\n if cfg.if_save and not cfg.if_test:\n torch.save(self.gen.state_dict(), cfg.pretrained_gen_path)\n print('Save pretrain_generator: {}'.format(cfg.pretrained_gen_path))\n\n # # ===ADVERSARIAL TRAINING===\n self.log.info('Starting Adversarial Training...')\n progress = tqdm(range(cfg.ADV_train_epoch))\n for adv_epoch in progress:\n self.sig.update()\n if self.sig.adv_sig:\n g_loss = self.adv_train_generator(cfg.ADV_g_step) # Generator\n d_loss = self.adv_train_discriminator(cfg.ADV_d_step) # Discriminator\n self.update_temperature(adv_epoch, cfg.ADV_train_epoch) # update temperature\n\n progress.set_description(\n 'g_loss: %.4f, d_loss: %.4f, temperature: %.4f' % (g_loss, d_loss, self.gen.temperature))\n\n # TEST\n if adv_epoch % cfg.adv_log_step == 0:\n self.log.info('[ADV] epoch %d: g_loss: %.4f, d_loss: %.4f, %s' % (\n adv_epoch, g_loss, d_loss, self.cal_metrics(fmt_str=True)))\n\n if cfg.if_save and not cfg.if_test:\n self._save('ADV', adv_epoch)\n else:\n self.log.info('>>> Stop by adv_signal! Finishing adversarial training...')\n progress.close()\n break\n\n def _test(self):\n print('>>> Begin test...')\n\n self._run()\n pass\n\n def pretrain_generator(self, epochs):\n \"\"\"\n Max Likelihood Pre-training for the generator\n \"\"\"\n for epoch in range(epochs):\n self.sig.update()\n if self.sig.pre_sig:\n # ===Train===\n pre_loss = self.train_gen_epoch(self.gen, self.train_data.loader, self.mle_criterion, self.gen_opt)\n\n # ===Test===\n if epoch % cfg.pre_log_step == 0 or epoch == epochs - 1:\n self.log.info('[MLE-GEN] epoch %d : pre_loss = %.4f, %s' % (\n epoch, pre_loss, self.cal_metrics(fmt_str=True)))\n\n if cfg.if_save and not cfg.if_test:\n self._save('MLE', epoch)\n else:\n self.log.info('>>> Stop by pre signal, skip to adversarial training...')\n break\n\n def adv_train_generator(self, g_step):\n total_loss = 0\n for step in range(g_step):\n real_samples = self.train_data.random_batch()['target']\n gen_samples = self.gen.sample(cfg.batch_size, cfg.batch_size, one_hot=True)\n if cfg.CUDA:\n real_samples, gen_samples = real_samples.cuda(), gen_samples.cuda()\n real_samples = F.one_hot(real_samples, cfg.vocab_size).float()\n\n # ===Train===\n d_out_real = self.dis(real_samples)\n d_out_fake = self.dis(gen_samples)\n g_loss, _ = get_losses(d_out_real, d_out_fake, cfg.loss_type)\n\n self.optimize(self.gen_adv_opt, g_loss, self.gen)\n total_loss += g_loss.item()\n\n return total_loss / g_step if g_step != 0 else 0\n\n def adv_train_discriminator(self, d_step):\n total_loss = 0\n for step in range(d_step):\n real_samples = self.train_data.random_batch()['target']\n gen_samples = self.gen.sample(cfg.batch_size, cfg.batch_size, one_hot=True)\n if cfg.CUDA:\n real_samples, gen_samples = real_samples.cuda(), gen_samples.cuda()\n real_samples = F.one_hot(real_samples, cfg.vocab_size).float()\n\n # ===Train===\n d_out_real = self.dis(real_samples)\n d_out_fake = self.dis(gen_samples)\n _, d_loss = get_losses(d_out_real, d_out_fake, cfg.loss_type)\n\n self.optimize(self.dis_opt, d_loss, self.dis)\n total_loss += d_loss.item()\n\n return total_loss / d_step if d_step != 0 else 0\n\n def update_temperature(self, i, N):\n self.gen.temperature = get_fixed_temperature(cfg.temperature, i, N, cfg.temp_adpt)\n\n @staticmethod\n def optimize(opt, loss, model=None, retain_graph=False):\n opt.zero_grad()\n loss.backward(retain_graph=retain_graph)\n if model is not None:\n torch.nn.utils.clip_grad_norm_(model.parameters(), cfg.clip_norm)\n opt.step()\n" ]
[ [ "torch.nn.functional.one_hot" ] ]
HiroIshida/PyMCubes
[ "8308f654dc48d5f9451158cbcf01fa0d2419c656" ]
[ "test_smoothing.py" ]
[ "\n\nimport pytest\n\nimport numpy as np\n\nimport mcubes\n\n\ndef test_sphere():\n\n # Create sphere with radius 25 centered at (50, 50, 50)\n x, y, z = np.mgrid[:100, :100, :100]\n levelset = np.sqrt((x - 50)**2 + (y - 50)**2 + (z - 50)**2) - 25\n\n # vertices, triangles = mcubes.marching_cubes(levelset, 0)\n # mcubes.export_obj(vertices, triangles, 'sphere1.obj')\n\n binary_levelset = levelset > 0\n smoothed_levelset = mcubes.smooth(\n binary_levelset,\n method='constrained',\n max_iters=500,\n rel_tol=1e-4\n )\n\n vertices, _ = mcubes.marching_cubes(smoothed_levelset, 0.0)\n\n # Check all vertices have same distance to (50, 50, 50)\n dist = np.sqrt(np.sum((vertices - [50, 50, 50])**2, axis=1))\n\n assert dist.min() > 24.5 and dist.max() < 25.5\n assert np.all(np.abs(smoothed_levelset - levelset) < 1)\n assert np.all((smoothed_levelset > 0) == binary_levelset)\n\n\ndef test_gaussian_smoothing():\n\n # Create sphere with radius 25 centered at (50, 50, 50)\n x, y, z = np.mgrid[:100, :100, :100]\n levelset = np.sqrt((x - 50)**2 + (y - 50)**2 + (z - 50)**2) - 25\n\n binary_levelset = levelset > 0\n smoothed_levelset = mcubes.smooth(\n binary_levelset,\n method='gaussian',\n sigma=3\n )\n\n vertices, _ = mcubes.marching_cubes(smoothed_levelset, 0.0)\n\n # Check all vertices have same distance to (50, 50, 50)\n dist = np.sqrt(np.sum((vertices - [50, 50, 50])**2, axis=1))\n assert dist.min() > 24 and dist.max() < 25\n\n\ndef test_wrong_ndim():\n\n binary_levelset = np.random.uniform(size=(10)) < 0.5\n\n with pytest.raises(ValueError):\n mcubes.smooth(\n binary_levelset,\n method='constrained',\n max_iters=500,\n rel_tol=1e-4\n )\n\n binary_levelset = np.random.uniform(size=(10, 10, 10, 10)) < 0.5\n\n with pytest.raises(ValueError):\n mcubes.smooth(\n binary_levelset,\n method='constrained',\n max_iters=500,\n rel_tol=1e-4\n )\n\n\ndef test_wrong_method():\n\n with pytest.raises(ValueError):\n mcubes.smooth(\n np.zeros((10, 10), dtype=np.bool_),\n method='wrong',\n max_iters=500,\n rel_tol=1e-4\n )\n\n\ndef test_circle():\n\n x, y = np.mgrid[:100, :100]\n levelset = np.sqrt((x - 50)**2 + (y - 50)**2) - 25\n binary_levelset = levelset > 0\n\n smoothed_levelset = mcubes.smooth(\n binary_levelset,\n max_iters=500,\n rel_tol=1e-4\n )\n\n assert np.all(np.abs(smoothed_levelset - levelset) < 1)\n assert np.all((smoothed_levelset > 0) == binary_levelset)\n\n\n# if __name__ == '__main__':\n# # logging.basicConfig(level=logging.DEBUG)\n# test_circle()\n# test_sphere()\n# test_large_sphere()\n" ]
[ [ "numpy.zeros", "numpy.sum", "numpy.random.uniform", "numpy.abs", "numpy.sqrt", "numpy.all" ] ]
frostinassiky/2D-TAN
[ "9eb5aec0007dbf5de4d78347cf2ccfc523638637" ]
[ "lib/datasets/tacos.py" ]
[ "\"\"\" Dataset loader for the TACoS dataset \"\"\"\nimport os\nimport json\n\nimport h5py\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\nimport torch.utils.data as data\nimport torchtext\n\nfrom . import average_to_fixed_length\nfrom core.eval import iou\nfrom core.config import config\n\nclass TACoS(data.Dataset):\n\n vocab = torchtext.vocab.pretrained_aliases[\"glove.6B.300d\"]()\n vocab.itos.extend(['<unk>'])\n vocab.stoi['<unk>'] = vocab.vectors.shape[0]\n vocab.vectors = torch.cat([vocab.vectors, torch.zeros(1, vocab.dim)], dim=0)\n word_embedding = nn.Embedding.from_pretrained(vocab.vectors)\n\n def __init__(self, split):\n super(TACoS, self).__init__()\n\n self.vis_input_type = config.DATASET.VIS_INPUT_TYPE\n self.data_dir = config.DATA_DIR\n self.split = split\n\n # val_1.json is renamed as val.json, val_2.json is renamed as test.json\n with open(os.path.join(self.data_dir, '{}.json'.format(split)),'r') as f:\n annotations = json.load(f)\n anno_pairs = []\n for vid, video_anno in annotations.items():\n duration = video_anno['num_frames']/video_anno['fps']\n for timestamp, sentence in zip(video_anno['timestamps'], video_anno['sentences']):\n if timestamp[0] < timestamp[1]:\n anno_pairs.append(\n {\n 'video': vid,\n 'duration': duration,\n 'times':[max(timestamp[0]/video_anno['fps'],0),min(timestamp[1]/video_anno['fps'],duration)],\n 'description':sentence,\n }\n )\n self.annotations = anno_pairs\n\n def __getitem__(self, index):\n video_id = self.annotations[index]['video']\n gt_s_time, gt_e_time = self.annotations[index]['times']\n sentence = self.annotations[index]['description']\n duration = self.annotations[index]['duration']\n\n word_idxs = torch.tensor([self.vocab.stoi.get(w.lower(), 400000) for w in sentence.split()], dtype=torch.long)\n word_vectors = self.word_embedding(word_idxs)\n\n visual_input, visual_mask = self.get_video_features(video_id)\n\n # visual_input = sample_to_fixed_length(visual_input, random_sampling=config.DATASET.RANDOM_SAMPLING)\n visual_input = average_to_fixed_length(visual_input)\n num_clips = config.DATASET.NUM_SAMPLE_CLIPS//config.DATASET.TARGET_STRIDE\n s_times = torch.arange(0,num_clips).float()*duration/num_clips\n e_times = torch.arange(1,num_clips+1).float()*duration/num_clips\n overlaps = iou(torch.stack([s_times[:,None].expand(-1,num_clips),\n e_times[None,:].expand(num_clips,-1)],dim=2).view(-1,2).tolist(),\n torch.tensor([gt_s_time, gt_e_time]).tolist()).reshape(num_clips,num_clips)\n\n item = {\n 'visual_input': visual_input,\n 'vis_mask': visual_mask,\n 'anno_idx': index,\n 'word_vectors': word_vectors,\n 'duration': duration,\n 'txt_mask': torch.ones(word_vectors.shape[0], 1),\n 'map_gt': torch.from_numpy(overlaps),\n }\n\n return item\n\n def __len__(self):\n return len(self.annotations)\n\n def get_video_features(self, vid):\n assert config.DATASET.VIS_INPUT_TYPE == 'c3d'\n with h5py.File(os.path.join(self.data_dir, 'tall_c3d_features.hdf5'), 'r') as f:\n features = torch.from_numpy(f[vid][:])\n if config.DATASET.NORMALIZE:\n features = F.normalize(features,dim=1)\n vis_mask = torch.ones((features.shape[0], 1))\n return features, vis_mask" ]
[ [ "torch.zeros", "torch.nn.functional.normalize", "torch.nn.Embedding.from_pretrained", "torch.arange", "torch.from_numpy", "torch.ones", "torch.tensor" ] ]
omry/dynamicslearn
[ "fdc19728da95ba9566722b28da40f32f5c4d250e" ]
[ "learn/trainer.py" ]
[ "import os\nimport sys\n\nsys.path.append(os.getcwd())\n\n# Our infrastucture files\n# from utils_data import * \n# from utils_nn import *\nfrom learn.utils.data import *\nfrom learn.utils.nn import *\n\n# neural nets\nfrom learn.models.model_general_nn import GeneralNN\nfrom learn.models.model_ensemble_nn import EnsembleNN\n\n# Torch Packages\nimport torch\n\n# timing etc\nimport os\nimport hydra\n\n# Plotting\nimport matplotlib.pyplot as plt\n\nimport logging\n\nlog = logging.getLogger(__name__)\n\n\ndef save_file(object, filename):\n path = os.path.join(os.getcwd(), filename)\n log.info(f\"Saving File: {filename}\")\n torch.save(object, path)\n\n\ndef create_model_params(df, model_cfg):\n # only take targets from robot.yaml\n target_keys = []\n for typ in model_cfg.delta_state_targets:\n target_keys.append(typ + '_0dx')\n for typ in model_cfg.true_state_targets:\n target_keys.append(typ + '_1fx')\n\n # grab variables\n history_states = df.filter(regex='tx')\n history_actions = df.filter(regex='tu')\n\n # add extra inputs like objective function\n extra_inputs = []\n if model_cfg.extra_inputs:\n for extra in model_cfg.extra_inputs:\n extra_inputs.append(extra)\n\n # trim past states to be what we want\n history = int(history_states.columns[-1][-3])\n if history > model_cfg.history:\n for i in range(history, model_cfg.history, -1):\n str_remove = str(i) + 't'\n for state in history_states.columns:\n if str_remove in state:\n history_states.drop(columns=state, inplace=True)\n for action in history_actions.columns:\n if str_remove in action:\n history_actions.drop(columns=action, inplace=True)\n\n # ignore states not helpful to prediction\n for ignore in model_cfg.ignore_in:\n for state in history_states.columns:\n if ignore in state:\n history_states.drop(columns=state, inplace=True)\n\n params = dict()\n params['targets'] = df.loc[:, target_keys]\n params['states'] = history_states\n params['inputs'] = history_actions\n # TODO add extra inputs to these parameters\n\n return params\n\n\ndef params_to_training(data):\n X = data['states'].values\n U = data['inputs'].values\n dX = data['targets'].values\n return X, U, dX\n\n\ndef train_model(X, U, dX, model_cfg):\n log.info(\"Training Model\")\n dx = np.shape(X)[1]\n du = np.shape(U)[1]\n dt = np.shape(dX)[1]\n\n # if set dimensions, double check them here\n if model_cfg.training.dx != -1:\n assert model_cfg.training.dx == dx, \"model dimensions in cfg do not match data given\"\n if model_cfg.training.du != -1:\n assert model_cfg.training.dx == du, \"model dimensions in cfg do not match data given\"\n if model_cfg.training.dt != -1:\n assert model_cfg.training.dx == dt, \"model dimensions in cfg do not match data given\"\n\n train_log = dict()\n nn_params = { # all should be pretty self-explanatory\n 'dx': dx,\n 'du': du,\n 'dt': dt,\n 'hid_width': model_cfg.training.hid_width,\n 'hid_depth': model_cfg.training.hid_depth,\n 'bayesian_flag': model_cfg.training.probl,\n 'activation': Swish(), # TODO use hydra.utils.instantiate\n 'dropout': model_cfg.training.extra.dropout,\n 'split_flag': False,\n 'ensemble': model_cfg.ensemble\n }\n\n train_params = {\n 'epochs': model_cfg.optimizer.epochs,\n 'batch_size': model_cfg.optimizer.batch,\n 'optim': model_cfg.optimizer.name,\n 'split': model_cfg.optimizer.split,\n 'lr': model_cfg.optimizer.lr, # bayesian .00175, mse: .0001\n 'lr_schedule': model_cfg.optimizer.lr_schedule,\n 'test_loss_fnc': [],\n 'preprocess': model_cfg.optimizer.preprocess,\n }\n\n train_log['nn_params'] = nn_params\n train_log['train_params'] = train_params\n\n if model_cfg.ensemble:\n newNN = EnsembleNN(nn_params, model_cfg.training.E)\n acctest, acctrain = newNN.train_cust((X, U, dX), train_params)\n\n else:\n newNN = GeneralNN(nn_params)\n newNN.init_weights_orth()\n if nn_params['bayesian_flag']: newNN.init_loss_fnc(dX, l_mean=1, l_cov=1) # data for std,\n acctest, acctrain = newNN.train_cust((X, U, dX), train_params)\n\n if model_cfg.ensemble:\n min_err = np.min(acctrain, 0)\n min_err_test = np.min(acctest, 0)\n else:\n min_err = np.min(acctrain)\n min_err_test = np.min(acctest)\n\n train_log['testerror'] = acctest\n train_log['trainerror'] = acctrain\n train_log['min_trainerror'] = min_err\n train_log['min_testerror'] = min_err_test\n\n return newNN, train_log\n\n\n######################################################################\[email protected](config_path='conf/trainer.yaml')\ndef trainer(cfg):\n log.info(\"============= Configuration =============\")\n log.info(f\"Config:\\n{cfg.pretty()}\")\n log.info(\"=========================================\")\n\n ######################################################################\n log.info('Training a new model')\n\n data_dir = cfg.load.base_dir\n\n avail_data = os.path.join(os.getcwd()[:os.getcwd().rfind('outputs')-1]+f\"/ex_data/SAS/{cfg.robot}.csv\")\n if os.path.isfile(avail_data):\n df = pd.read_csv(avail_data)\n log.info(f\"Loaded preprocessed data from {avail_data}\")\n else:\n if cfg.robot =='iono':\n df, log_load = preprocess_iono(data_dir, cfg.load)\n else:\n df, log_load = preprocess_cf(data_dir, cfg.load)\n msg = f\"Loading Data\"\n if 'dir' in log_load is not None:\n msg += f\", dir={log_load['dir']}\"\n if 'num_files' in log_load is not None:\n msg += f\", num_files={log_load['num_files']}\"\n if 'datapoints' in log_load:\n msg += f\", datapoints={log_load['datapoints']}\"\n log.info(msg)\n\n data = create_model_params(df, cfg.model)\n\n X, U, dX = params_to_training(data)\n\n model, train_log = train_model(X, U, dX, cfg.model)\n model.store_training_lists(list(data['states'].columns),\n list(data['inputs'].columns),\n list(data['targets'].columns))\n\n msg = \"Trained Model...\"\n msg += \"Prediction List\" + str(list(data['targets'].columns)) + \"\\n\"\n msg += \"Min test error: \" + str(train_log['min_testerror']) + \"\\n\"\n msg += \"Mean Min test error: \" + str(np.mean(train_log['min_testerror'])) + \"\\n\"\n msg += \"Min train error: \" + str(train_log['min_trainerror']) + \"\\n\"\n log.info(msg)\n\n if cfg.model.training.plot_loss:\n ax1 = plt.subplot(211)\n ax1.plot(train_log['testerror'], label='Test Loss')\n plt.title('Test Loss')\n ax2 = plt.subplot(212)\n ax2.plot(train_log['trainerror'], label='Train Loss')\n plt.title('Training Loss')\n ax1.legend()\n # plt.show()\n plt.savefig(os.path.join(os.getcwd() + '/modeltraining.pdf'))\n\n # Saves NN params\n if cfg.save:\n save_file(model, cfg.model.name + '.pth')\n\n normX, normU, normdX = model.getNormScalers()\n save_file((normX, normU, normdX), cfg.model.name + \"_normparams.pkl\")\n\n # Saves data file\n save_file(data, cfg.model.name + \"_data.pkl\")\n\n\nif __name__ == '__main__':\n sys.exit(trainer())\n" ]
[ [ "torch.save", "matplotlib.pyplot.title", "matplotlib.pyplot.subplot" ] ]
kungfumas/bahasa-alami
[ "24ad2cb0c537ece11a0f7af376ef778a3c2839ca" ]
[ "Section 7 - Text Classification/Text Classification Part 8 - Testing model performance.py" ]
[ "# Text Classifiation using NLP\n\n# Importing the libraries\nimport numpy as np\nimport re\nimport pickle \nimport nltk\nfrom nltk.corpus import stopwords\nfrom sklearn.datasets import load_files\nnltk.download('stopwords')\n\n\n# Importing the dataset\nreviews = load_files('txt_sentoken/')\nX,y = reviews.data,reviews.target\n\n\n# Pickling the dataset\nwith open('X.pickle','wb') as f:\n pickle.dump(X,f)\n \nwith open('y.pickle','wb') as f:\n pickle.dump(y,f)\n\n# Unpickling dataset\nX_in = open('X.pickle','rb')\ny_in = open('y.pickle','rb')\nX = pickle.load(X_in)\ny = pickle.load(y_in)\n\n\n# Creating the corpus\ncorpus = []\nfor i in range(0, 2000):\n review = re.sub(r'\\W', ' ', str(X[i]))\n review = review.lower()\n review = re.sub(r'^br$', ' ', review)\n review = re.sub(r'\\s+br\\s+',' ',review)\n review = re.sub(r'\\s+[a-z]\\s+', ' ',review)\n review = re.sub(r'^b\\s+', '', review)\n review = re.sub(r'\\s+', ' ', review)\n corpus.append(review) \n \n\n# Creating the BOW model\nfrom sklearn.feature_extraction.text import CountVectorizer\nvectorizer = CountVectorizer(max_features = 2000, min_df = 3, max_df = 0.6, stop_words = stopwords.words('english'))\nX = vectorizer.fit_transform(corpus).toarray()\n\n\n# Creating the Tf-Idf Model\nfrom sklearn.feature_extraction.text import TfidfTransformer\ntransformer = TfidfTransformer()\nX = transformer.fit_transform(X).toarray()\n\n\n# Creating the Tf-Idf model directly\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nvectorizer = TfidfVectorizer(max_features = 2000, min_df = 3, max_df = 0.6, stop_words = stopwords.words('english'))\nX = vectorizer.fit_transform(corpus).toarray()\n\n\n# Splitting the dataset into the Training set and Test set\nfrom sklearn.model_selection import train_test_split\ntext_train, text_test, sent_train, sent_test = train_test_split(X, y, test_size = 0.20, random_state = 0)\n\n\n# Training the classifier\nfrom sklearn.linear_model import LogisticRegression\nclassifier = LogisticRegression()\nclassifier.fit(text_train,sent_train)\n\n\n# Testing model performance\nsent_pred = classifier.predict(text_test)\n\n\nfrom sklearn.metrics import confusion_matrix\ncm = confusion_matrix(sent_test, sent_pred)" ]
[ [ "sklearn.metrics.confusion_matrix", "sklearn.model_selection.train_test_split", "sklearn.linear_model.LogisticRegression", "sklearn.datasets.load_files", "sklearn.feature_extraction.text.TfidfTransformer" ] ]
tydcg/pythonlearn
[ "05da4ad0c81b919a6ca55e0b926af3a01475d861" ]
[ "other3d.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D, art3d\nfrom matplotlib.animation import FuncAnimation\n\n\ndef init_faces(N):\n f = []\n for r in range(N - 1):\n for c in range(N - 1):\n v0 = r * N + c\n f.append([v0, v0 + 1, v0 + N + 1, v0 + N])\n return np.array(f)\n\n\ndef init_vert(N):\n v = np.meshgrid(range(N), range(N), [1.0])\n return np.dstack(v).reshape(-1, 3)\n\n\ndef set_amplitude(v, A):\n v[:, 2] = A * (np.sin(np.pi * v[:, 0] / (N - 1)) * np.sin(np.pi * v[:, 1] / (N - 1)))\n return v\n\n\nN = 10\nf = init_faces(N)\nv = init_vert(N)\n\nfig = plt.figure()\nax = fig.add_subplot(1, 1, 1, projection=\"3d\")\n\npc = art3d.Poly3DCollection(v[f])\n# pc.set_animated(True) # Is this required? Why?\n\nax.add_collection(pc)\n\ndef init_fig():\n ax.set_xlim([0, N])\n ax.set_ylim([0, N])\n ax.set_zlim([0, 5])\n return pc,\n\n\ndef update_fig(frame):\n A = np.sin(frame)\n new_v = set_amplitude(v, A)\n pc.set_verts(new_v[f])\n return pc\n\ndef gen():\n while True:\n yield np.linspace(0, 2 * np.pi, 128)\n\nani = FuncAnimation(fig, update_fig, frames=np.linspace(0, 2 * np.pi, 128),\n init_func=init_fig, blit=False, repeat=True)\n\n# ani = FuncAnimation(fig, update_fig, gen,\n# init_func=init_fig, blit=False, interval=150)\n\nplt.show()\n" ]
[ [ "numpy.array", "numpy.sin", "matplotlib.pyplot.figure", "numpy.dstack", "matplotlib.pyplot.show", "numpy.linspace" ] ]
bkornpob/kbastroutils
[ "89bdf8f395234a22a8b1386f028892b07559022e" ]
[ "kbastroutils/grismapcorr.py" ]
[ "import numpy as np\nfrom scipy.interpolate import interp2d\nimport copy\n\nclass GrismApCorr:\n def __init__(self):\n TABLE = {'HST-WFC3-IR-G102': \n {'ref': 'ISR WFC3-2011-05'\n ,'filter': 'G102'\n ,'scale': 0.13\n ,'scaleunit': 'arcsec/pix'\n ,'type': 'diameter'\n ,'row': 'apsize'\n ,'col': 'wave'\n ,'apunit': 'arcsec'\n ,'apsize': np.array((0.128,0.385,0.641\n ,0.898,1.154,1.411\n ,1.667,1.924,3.719\n ,7.567,12.954,25.779\n ))\n ,'waveunit': 'A'\n ,'wave': np.array((8850.,9350.,9850.,10350.,10850.,11350.))\n ,'value' : np.array(((0.459,0.391,0.414,0.464,0.416,0.369)\n ,(0.825,0.809,0.808,0.811,0.794,0.792)\n ,(0.890,0.889,0.887,0.880,0.875,0.888)\n ,(0.920,0.917,0.916,0.909,0.904,0.916)\n ,(0.939,0.937,0.936,0.930,0.925,0.936)\n ,(0.952,0.950,0.950,0.943,0.940,0.949)\n ,(0.962,0.961,0.961,0.954,0.951,0.958)\n ,(0.969,0.968,0.969,0.962,0.959,0.965)\n ,(0.985,0.984,0.986,0.982,0.980,0.983)\n ,(0.995,0.995,0.996,0.991,0.990,0.992)\n ,(0.999,0.999,0.999,0.997,0.996,0.995)\n ,(1.000,1.000,1.000,1.000,1.000,1.000)\n ))\n ,'model': None\n }\n ,'HST-WFC3-IR-G141':\n {'ref': 'ISR WFC3-2011-05'\n ,'filter': 'G141'\n ,'scale': 0.13\n ,'scaleunit': 'arcsec/pix'\n ,'type': 'diameter'\n ,'row': 'apsize'\n ,'col': 'wave'\n ,'apunit': 'arcsec'\n ,'apsize': np.array((0.128,0.385,0.641\n ,0.898,1.154,1.411\n ,1.667,1.924,3.719\n ,7.567,12.954,25.779\n )) \n ,'waveunit': 'A'\n ,'wave': np.array((11300.,12300.,13300.,14300.,15300.,16300.))\n ,'value': np.array(((0.442,0.444,0.395,0.344,0.342,0.376)\n ,(0.805,0.792,0.764,0.747,0.732,0.732)\n ,(0.866,0.877,0.865,0.863,0.850,0.859)\n ,(0.912,0.901,0.893,0.894,0.884,0.898)\n ,(0.933,0.924,0.914,0.913,0.903,0.913)\n ,(0.947,0.940,0.931,0.932,0.921,0.932)\n ,(0.958,0.950,0.942,0.944,0.934,0.945)\n ,(0.966,0.959,0.951,0.953,0.944,0.954)\n ,(0.985,0.984,0.981,0.985,0.980,0.985)\n ,(0.993,0.995,0.992,0.997,0.992,0.996)\n ,(0.996,0.998,0.997,1.000,0.997,1.000)\n ,(1.000,1.000,1.000,1.000,1.000,1.000)\n ))\n ,'model': None\n }\n ,'HST-ACS-WFC-G800L':\n {'ref': 'ISR WFC3-2011-05'\n ,'filter': 'G102'\n ,'scale': 0.13\n ,'scaleunit': 'arcsec/pix'\n ,'type': 'diameter'\n ,'row': 'apsize'\n ,'col': 'wave'\n ,'apunit': 'arcsec'\n ,'apsize': np.array((0.128,0.385,0.641\n ,0.898,1.154,1.411\n ,1.667,1.924,3.719\n ,7.567,12.954,25.779\n ))\n ,'waveunit': 'A'\n ,'wave': np.array((8850.,9350.,9850.,10350.,10850.,11350.))\n ,'value' : np.array(((0.459,0.391,0.414,0.464,0.416,0.369)\n ,(0.825,0.809,0.808,0.811,0.794,0.792)\n ,(0.890,0.889,0.887,0.880,0.875,0.888)\n ,(0.920,0.917,0.916,0.909,0.904,0.916)\n ,(0.939,0.937,0.936,0.930,0.925,0.936)\n ,(0.952,0.950,0.950,0.943,0.940,0.949)\n ,(0.962,0.961,0.961,0.954,0.951,0.958)\n ,(0.969,0.968,0.969,0.962,0.959,0.965)\n ,(0.985,0.984,0.986,0.982,0.980,0.983)\n ,(0.995,0.995,0.996,0.991,0.990,0.992)\n ,(0.999,0.999,0.999,0.997,0.996,0.995)\n ,(1.000,1.000,1.000,1.000,1.000,1.000)\n ))\n ,'model': None\n }\n }\n self.table = TABLE\n self.instrument = list(TABLE.keys())\n self.make_model()\n def make_model(self):\n for i in self.instrument:\n apsize = 0.5 * np.copy(self.table[i]['apsize'])\n wave = np.copy(self.table[i]['wave'])\n value = np.copy(self.table[i]['value'])\n model = interp2d(wave,apsize,value,kind='linear',copy=True\n ,bounds_error=False,fill_value=np.nan\n )\n self.table[i]['model'] = copy.deepcopy(model)\n def make_apcorr(self,instrument,wave,apsize,apunit='pix'\n ,replace='median'\n ):\n apunittab = self.table[instrument]['apunit']\n model = self.table[instrument]['model']\n apsize2 = None\n value = None\n if (apunittab=='arcsec') & (apunit=='pix'):\n apsize2 = self.pix2arcsec(instrument,apsize)\n elif (apunittab=='pix') & (apunit=='arcsec'):\n apsize2 = self.arcsec2pix(instrument,apsize)\n value = model(wave,apsize2)\n if replace=='median':\n median = np.median(value[np.where(np.isfinite(value))])\n value[np.where(~np.isfinite(value))] = median\n value[np.where(value <= 0.)] = 0.\n value[np.where(value >= 1.)] = 1. \n return value\n def pix2arcsec(self,instrument=None,pixsize=None):\n out = None\n if not instrument:\n print('Error: instrument is required. Set to None')\n return\n if not pixsize:\n print('Error: pixsize is required. Set to None')\n return\n scale = self.table[instrument]['scale']\n scaleunit = self.table[instrument]['scaleunit']\n if scaleunit=='arcsec/pix':\n out = pixsize * scale\n elif scaleunit=='pix/arcsec':\n out = pixsize.astype(float) / scaleunit\n else:\n print('Error: invalid scaleunit. Set to None')\n return out\n def arcsec2pix(self,instrument=None,arcsec=None):\n out = None\n if not instrument:\n print('Error: instrument is required. Set to None')\n return\n if not arcsec:\n print('Error: arcsec is required. Set to None')\n scale = self.table[instrument]['scale']\n scaleunit = self.table[instrument]['scaleunit']\n if scaleunit=='arcsec/pix':\n out = arcsec.astype(float) / scale\n elif scaleunit=='pix/arcsec':\n out = arcsec.astype(float) * scale\n else:\n print('Error: invalid scaleunit. Set to None')\n return out\n" ]
[ [ "numpy.array", "numpy.copy", "numpy.where", "numpy.isfinite", "scipy.interpolate.interp2d" ] ]
sjleake/bcdi
[ "bf071ad085a11622158e1e651857a8a172c51cf1" ]
[ "tests/utils/test_utilities.py" ]
[ "# -*- coding: utf-8 -*-\n\n# BCDI: tools for pre(post)-processing Bragg coherent X-ray diffraction imaging data\n# (c) 07/2017-06/2019 : CNRS UMR 7344 IM2NP\n# (c) 07/2019-05/2021 : DESY PHOTON SCIENCE\n# (c) 06/2021-present : DESY CFEL\n# authors:\n# Jerome Carnis, [email protected]\n\nimport numpy as np\nimport unittest\nimport bcdi.utils.utilities as util\n\n\ndef run_tests(test_class):\n suite = unittest.TestLoader().loadTestsFromTestCase(test_class)\n runner = unittest.TextTestRunner(verbosity=2)\n return runner.run(suite)\n\n\nclass TestInRange(unittest.TestCase):\n \"\"\"Tests on the function utilities.in_range.\"\"\"\n\n def setUp(self):\n # executed before each test\n self.extent = (-10, 99, -20, 89, 10, 119)\n\n # def tearDown(self):\n # executed after each test\n\n def test_inrange_in_range(self):\n self.assertTrue(util.in_range(point=(0, 0, 20), extent=self.extent))\n\n def test_inrange_not_in_range_low_z(self):\n self.assertFalse(util.in_range(point=(-11, 0, 20), extent=self.extent))\n\n def test_inrange_not_in_range_high_z(self):\n self.assertFalse(util.in_range(point=(100, 0, 20), extent=self.extent))\n\n def test_inrange_not_in_range_low_y(self):\n self.assertFalse(util.in_range(point=(0, -21, 20), extent=self.extent))\n\n def test_inrange_not_in_range_high_y(self):\n self.assertFalse(util.in_range(point=(0, 90, 20), extent=self.extent))\n\n def test_inrange_not_in_range_low_x(self):\n self.assertFalse(util.in_range(point=(0, 0, 9), extent=self.extent))\n\n def test_inrange_not_in_range_high_x(self):\n self.assertFalse(util.in_range(point=(0, 0, 120), extent=self.extent))\n\n def test_inrange_lower_edge_z(self):\n self.assertTrue(util.in_range(point=(-10, 0, 20), extent=self.extent))\n\n def test_inrange_larger_edge_z(self):\n self.assertTrue(util.in_range(point=(99, 0, 20), extent=self.extent))\n\n def test_inrange_lower_edge_y(self):\n self.assertTrue(util.in_range(point=(0, -20, 20), extent=self.extent))\n\n def test_inrange_larger_edge_y(self):\n self.assertTrue(util.in_range(point=(0, 89, 20), extent=self.extent))\n\n def test_inrange_lower_edge_x(self):\n self.assertTrue(util.in_range(point=(0, 0, 10), extent=self.extent))\n\n def test_inrange_larger_edge_x(self):\n self.assertTrue(util.in_range(point=(0, 0, 119), extent=self.extent))\n\n\nclass TestIsFloat(unittest.TestCase):\n \"\"\"\n Tests on the function utilities.is_float.\n\n def is_float(string)\n \"\"\"\n\n def test_string_float(self):\n self.assertTrue(util.is_float(\"12.0\"))\n\n def test_string_int(self):\n self.assertTrue(util.is_float(\"12\"))\n\n def test_string_complex(self):\n self.assertFalse(util.is_float(\"12 + 1j\"))\n\n def test_string_none(self):\n self.assertFalse(util.is_float(\"None\"))\n\n def test_string_not_numeric(self):\n self.assertFalse(util.is_float(\"abc\"))\n\n def test_none(self):\n with self.assertRaises(TypeError):\n util.is_float(None)\n\n def test_array(self):\n with self.assertRaises(TypeError):\n util.is_float(np.ones(3))\n\n\nif __name__ == \"__main__\":\n run_tests(TestInRange)\n" ]
[ [ "numpy.ones" ] ]
aharker619/ena_experiment_analysis
[ "40c80f16e131cabe92af453926fa6a5051bdb78c" ]
[ "in_vivo/filo_const_analysis.py" ]
[ "# Compare different filopodia density depending on categorization values for filopodia\n# Change length/width parameters for classifying filopodia, see if result changes\n\nimport os\nimport scipy.io as sio\nfrom scipy import stats\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\ndef average_filo_per_perimeter(filepath):\n \"\"\"\n Given a matlab result file return average number of filopodia/perimeter\n \"\"\"\n matdata = sio.loadmat(filepath)\n y_single = matdata['N'][0][0] / matdata['Per'][0][0]\n return y_single\n \ndef get_data(path2, files):\n \"\"\"\n Given path and list of files for each file get y and x data and return for plot\n \"\"\"\n y_singleframe = []\n unit_x = []\n for file in files:\n if file != \".DS_Store\":\n y_single = average_filo_per_perimeter(''.join([path2, file]))\n y_singleframe.append(y_single)\n unit_x.append(file)\n y_series = pd.Series(y_singleframe)\n \n return unit_x, y_series\n\n# Get all files in path directory, change to correct folder\npath = \"/Test_Constants/\"\n# These are length_width (opposite of Filotrack order)\nconstant_folders = ['6_7/', '6_6/', '6_75/', '55_7/', '7_7/', '7_6/']\nall_data = pd.DataFrame()\nfolders = ['ControlAO/','ActinOnly/', 'Dimer/', 'Trimer/', 'Tetramer/']\nfor c_folder in constant_folders:\n const = c_folder.split('/')[0]\n # Go through each folder and get data to calulcate mean and stdev\n df_single = pd.DataFrame()\n for folder in folders:\n path2 = ''.join([path, c_folder, folder])\n folder_files = os.listdir(path2)\n x, y_series = get_data(path2, folder_files)\n name = folder.split('/')[0]\n data = pd.DataFrame({'constants': np.repeat(const, len(y_series)), \n 'cells': np.repeat(name, len(y_series)), \n 'F/P': y_series})\n df_single = df_single.append(data)\n all_data = all_data.append(df_single)\n\ngrouped = all_data.groupby('constants')\nfig = plt.figure()\nfig.subplots_adjust(hspace = 0.4, wspace = 0.4)\nfor (title, group), i in zip(grouped, range(1, 7)):\n ax = plt.subplot(2, 3, i) \n ax = sns.boxplot(x = 'cells', y = 'F/P', data = group, palette = \"Spectral\")\n ax.set_ylim([0, 0.07])\n plt.xticks(rotation = 90) \n plt.xlabel(title)\n # if you want specific data points use swarmplot\n #ax = sns.swarmplot(x = 'cells', y = 'F/P', data = group, color = \"black\", size = 4)\n\nfig.set_size_inches(10, 8)\nfig.savefig('/Test_Constants/diff_const.png')\nfig.show()\n\n\ngrouped2 = all_data.groupby('cells')\nfig2 = plt.figure()\n#fig2.subplots(sharey = True)\nfig2.subplots_adjust(hspace = 0.4, wspace = 0.4)\nfor (title, group), i in zip(grouped2, range(1, 7)):\n ax = plt.subplot(2, 3, i) \n ax = sns.boxplot(x = 'constants', y = 'F/P', data = group, palette = \"Spectral\")\n ax.set_ylim([0, 0.07])\n plt.xticks(rotation = 90)\n plt.xlabel(title)\nfig2.set_size_inches(10, 8)\nfig2.savefig('/Test_Constants/diff_cells.png')\n\nstat_data = []\nsig_data = []\nfor title, group in grouped:\n ao = [group[group['cells'] == 'ActinOnly']['F/P']]\n cao = [group[group['cells'] == 'ControlAO']['F/P']]\n rd = [group[group['cells'] == 'Dimer']['F/P']]\n rtr = [group[group['cells'] == 'Trimer']['F/P']]\n rtet = [group[group['cells'] == 'Tetramer']['F/P']]\n all_cells = [('ao', ao), ('cao', cao), ('rd', rd), ('rtr', rtr), ('rtet', rtet)]\n update_cells = [('ao', ao), ('cao', cao), ('rd', rd), ('rtr', rtr), ('rtet', rtet)]\n for name1, cell1 in all_cells:\n for name2, cell2 in update_cells:\n if name1 != name2:\n stat = stats.ttest_ind(cell1[0], cell2[0], axis = 0, equal_var = False)\n stat_data.append((title, name1, name2, stat[1]))\n if stat[1] < 0.05:\n sig_data.append((title, name1, name2, stat[1]))\n update_cells = update_cells[1:]\n \n \n\n" ]
[ [ "pandas.DataFrame", "matplotlib.pyplot.xlabel", "scipy.io.loadmat", "scipy.stats.ttest_ind", "matplotlib.pyplot.figure", "pandas.Series", "matplotlib.pyplot.xticks", "matplotlib.pyplot.subplot" ] ]
vgolfier/Dashboard_Uber_Prize
[ "ff25a571a618edfef4300ba222549c19f8bb4495" ]
[ "Dashboard_Uber_Prize/submission.py" ]
[ "import pdb\nimport math\nimport numpy as np \nfrom os import listdir\nfrom os.path import dirname, join\nimport pandas as pd \n# import seaborn as sns \n\nfrom bokeh.models import ColumnDataSource\nfrom bokeh.palettes import Dark2, Category10, Category20, Plasma256, YlOrRd\n\nHOURS = [str(h) for h in range(24)]\nROUTE_IDS = ['1340', '1341', '1342', '1343', '1344', '1345', '1346', '1347', '1348', '1349', '1350', '1351']\nBUSES_LIST = ['BUS-DEFAULT', 'BUS-SMALL-HD', 'BUS-STD-HD', 'BUS-STD-ART']\nAGENCY_IDS = [217]\nTRANSIT_SCALE_FACTOR = 0.1\n\ndef reset_index(df):\n '''Returns DataFrame with index as columns'''\n index_df = df.index.to_frame(index=False)\n df = df.reset_index(drop=True)\n # In merge is important the order in which you pass the dataframes\n # if the index contains a Categorical. \n # pd.merge(df, index_df, left_index=True, right_index=True) does not work\n return pd.merge(index_df, df, left_index=True, right_index=True)\n\ndef calc_ridership_perc(row):\n if row['numPassengers'] > row['seatingCapacity']:\n return 100.0 + (row['numPassengers'] - row['seatingCapacity']) * 100.0 / row['standingRoomCapacity']\n else:\n return row['numPassengers'] * 100.0 / row['seatingCapacity']\n\nclass Submission():\n\n def __init__(self, name, scenario):\n \"\"\"\n Initialize class object.\n\n Parameters\n ----------\n name : str\n dfs : list of pd.DataFrame\n\n Returns\n -------\n None\n \"\"\"\n self.name = name\n self.scenario = scenario\n self.modes = ['OnDemand_ride', 'car', 'drive_transit', 'walk', 'walk_transit']\n self.submissions_dir = join(dirname(__file__), 'data/submissions/{}/{}'.format(self.scenario, self.name))\n self.reference_dir = join(dirname(__file__), 'data/sioux_faux_bus_lines')\n self.get_data(from_csv=True)\n self.make_data_sources()\n\n def get_data(self, from_csv=False):\n\n if from_csv:\n self.frequency_df = pd.read_csv(join(self.submissions_dir, 'competition/submission-inputs/FrequencyAdjustment.csv'))\n self.fares_df = pd.read_csv(join(self.submissions_dir, 'competition/submission-inputs/MassTransitFares.csv'))\n self.incentives_df = pd.read_csv(join(self.submissions_dir, 'competition/submission-inputs/ModeIncentives.csv'))\n self.fleet_df = pd.read_csv(join(self.submissions_dir, 'competition/submission-inputs/VehicleFleetMix.csv'))\n\n self.scores_df = pd.read_csv(join(self.submissions_dir, 'competition/submissionScores.csv'))\n\n self.activities_df = pd.read_csv(join(self.submissions_dir, 'activities_dataframe.csv'))\n self.households_df = pd.read_csv(join(self.submissions_dir, 'households_dataframe.csv'))\n self.legs_df = pd.read_csv(join(self.submissions_dir, 'legs_dataframe.csv'))\n self.paths_df = pd.read_csv(join(self.submissions_dir, 'path_traversals_dataframe.csv'))\n self.persons_df = pd.read_csv(join(self.submissions_dir, 'persons_dataframe.csv'))\n self.trips_df = pd.read_csv(join(self.submissions_dir, 'trips_dataframe.csv'))\n self.mode_choice_df = pd.read_csv(join(self.submissions_dir, 'modeChoice.csv'))\n self.realized_mode_choice_df = pd.read_csv(join(self.submissions_dir, 'realizedModeChoice.csv'))\n\n path = join(self.submissions_dir, 'ITERS')\n iter_num = max([int(file.split('.')[1]) for file in listdir(path) if file != '.DS_Store'])\n path = join(path, 'it.{}'.format(iter_num))\n self.mode_choice_hourly_df = pd.read_csv(join(path, '{}.modeChoice.csv'.format(iter_num)), index_col=0).T\n self.travel_times_df = pd.read_csv(join(path, '{}.averageTravelTimes.csv'.format(iter_num)))\n\n self.seating_capacities = pd.read_csv(join(self.reference_dir, \"availableVehicleTypes.csv\"))[[\n \"vehicleTypeId\", \"seatingCapacity\"]].set_index(\"vehicleTypeId\", drop=True).T.to_dict(\"records\")[0]\n self.standing_room_capacities = pd.read_csv(join(self.reference_dir, \"availableVehicleTypes.csv\"))[[\n \"vehicleTypeId\", \"standingRoomCapacity\"]].set_index(\"vehicleTypeId\", drop=True).T.to_dict(\"records\")[0]\n self.trip_to_route = pd.read_csv(join(self.reference_dir, \"gtfs_data/trips.txt\"))[[\n \"trip_id\", \"route_id\"]].set_index(\"trip_id\", drop=True).T.to_dict('records')[0]\n self.operational_costs = pd.read_csv(join(self.reference_dir, \"vehicleCosts.csv\"))[[\n \"vehicleTypeId\", \"opAndMaintCost\"]].set_index(\"vehicleTypeId\", drop=True).T.to_dict(\"records\")[0]\n else:\n pass\n\n def make_data_sources(self):\n\n self.modeinc_input_data = self.make_modeinc_input_data()\n self.fleetmix_input_data = self.make_fleetmix_input_data()\n self.fares_input_data = self.make_fares_input_data()\n self.routesched_input_line_data, self.routesched_input_start_data, self.routesched_input_end_data = self.make_routesched_input_data()\n\n self.normalized_scores_data = self.make_normalized_scores_data()\n\n self.mode_planned_pie_chart_data = self.make_mode_pie_chart_data(self.mode_choice_df.copy())\n self.mode_realized_pie_chart_data = self.make_mode_pie_chart_data(self.realized_mode_choice_df.copy())\n self.mode_choice_by_time_data = self.make_mode_choice_by_time_data()\n self.mode_choice_by_age_group_data = self.make_mode_choice_by_age_group_data()\n self.mode_choice_by_income_group_data = self.make_mode_choice_by_income_group_data()\n self.mode_choice_by_distance_data = self.make_mode_choice_by_distance_data()\n\n self.congestion_travel_time_by_mode_data = self.make_congestion_travel_time_by_mode_data()\n self.congestion_travel_time_per_passenger_trip_data = self.make_congestion_travel_time_per_passenger_trip_data()\n self.congestion_miles_traveled_per_mode_data = self.make_congestion_miles_traveled_per_mode_data()\n self.congestion_bus_vmt_by_ridership_data = self.make_congestion_bus_vmt_by_ridership_data()\n self.congestion_on_demand_vmt_by_phases_data = self.make_congestion_on_demand_vmt_by_phases_data()\n self.congestion_travel_speed_data = self.make_congestion_travel_speed_data()\n\n self.los_travel_expenditure_data = self.make_los_travel_expenditure_data()\n self.los_crowding_data = self.make_los_crowding_data()\n\n self.transit_cb_costs_data, self.transit_cb_benefits_data = self.make_transit_cb_data()\n self.transit_inc_by_mode_data = self.make_transit_inc_by_mode_data()\n \n self.sustainability_25pm_per_mode_data = self.make_sustainability_25pm_per_mode_data()\n\n def splitting_min_max(self, df, name_column):\n \"\"\" Parsing and splitting the ranges in the \"age\" (or \"income\") columns into two new columns:\n \"min_age\" (or \"min_income\") with the bottom value of the range and \"max_age\" (or \"max_income\") with the top value\n of the range. Ex: [0:120] --> 0, 120\n\n Parameters\n ----------\n df: pandas dataframe\n ModeIncentives.csv or MassTransitFares.csv input file\n\n name_column: str\n Column containing the range values to parse\n\n Returns\n -------\n df: pandas dataframe\n New input dataframe with two \"min\" and \"max\" columns with floats int values instead of ranges values\n\n \"\"\"\n # Parsing the ranges and creating two new columns with the min and max values of the range\n if df.empty:\n df[\"min_{0}\".format(name_column)] = [0]\n df[\"max_{0}\".format(name_column)] = [0]\n else:\n min_max = df[name_column].str.replace(\"[\", \"\").str.replace(\"]\", \"\").str.replace(\"(\", \"\").str.replace(\")\", \"\")\\\n .str.split(\":\", expand=True)\n df[\"min_{0}\".format(name_column)] = min_max.iloc[:, 0].astype(int)\n df[\"max_{0}\".format(name_column)] = min_max.iloc[:, 1].astype(int)\n\n return df\n\n def make_normalized_scores_data(self):\n scores = self.scores_df\n scores = scores.loc[:,[\"Component Name\", \"Weighted Score\"]]\n scores.set_index(\"Component Name\", inplace=True)\n scores.reset_index(inplace=True)\n\n scores.loc[:, \"Component Name\"] = scores[\"Component Name\"].astype('category')#.cat.reorder_categories(CATEGORIES)\n\n scores = scores.sort_values(by=\"Component Name\")\n\n scores.loc[:, 'color'] = \"#4682b4\"\n scores.loc[scores[\"Component Name\"] == 'Submission Score', 'color'] = \"#000080\"\n\n # min_score = min(scores['Weighted Score'].min(), 0.0) * 1.1\n # max_score = max(scores['Weighted Score'].max(), 1.0) * 1.1\n\n data = scores.to_dict(orient='list')\n return data\n\n def make_fleetmix_input_data(self):\n\n fleet_mix = self.fleet_df\n\n if fleet_mix.empty:\n fleet_mix = pd.DataFrame(\n [[agency_id, \"{}\".format(route_id), \"BUS-DEFAULT\"] for route_id in ROUTE_IDS for agency_id in AGENCY_IDS],\n columns=[\"agencyId\", \"routeId\", \"vehicleTypeId\"])\n\n df = pd.DataFrame([AGENCY_IDS[0], '1', BUSES_LIST[0]]).T\n df.columns = [\"agencyId\", \"routeId\", \"vehicleTypeId\"]\n\n # Adding the missing bus types in the dataframe so that they appear in the plot\n for bus in BUSES_LIST:\n if bus not in fleet_mix[\"vehicleTypeId\"].values:\n df.loc[0, \"vehicleTypeId\"] = bus\n fleet_mix = fleet_mix.append(df, ignore_index=True, sort=False)\n\n # Adding the missing bus routes in the dataframe so that they appear in the plot\n fleet_mix.loc[:, \"routeId\"] = fleet_mix[\"routeId\"].astype(str)\n\n df = pd.DataFrame([AGENCY_IDS[0], \"\", BUSES_LIST[0]]).T\n df.columns = [\"agencyId\", \"routeId\", \"vehicleTypeId\"]\n\n for route in ROUTE_IDS:\n if route not in fleet_mix[\"routeId\"].values:\n df.loc[0, \"routeId\"] = route\n fleet_mix = fleet_mix.append(df, ignore_index=True, sort=False)\n\n # Reodering bus types starting by \"BUS-DEFAULT\" and then by ascending bus size order\n fleet_mix.loc[:, \"vehicleTypeId\"] = fleet_mix[\"vehicleTypeId\"].astype('category').cat.reorder_categories(\n BUSES_LIST)\n\n fleet_mix = fleet_mix.drop(labels=\"agencyId\", axis=1)\n fleet_mix.sort_values(by=\"vehicleTypeId\", inplace=True)\n fleet_mix.reset_index(inplace=True, drop=True)\n\n data = fleet_mix.to_dict(orient='list')\n return data \n\n def make_routesched_input_data(self):\n\n frequency = self.frequency_df\n frequency.loc[:, \"route_id\"] = frequency[\"route_id\"].astype(str)\n\n # Add all missing routes (the ones that were not changed) in the DF so that they appear int he plot\n df = pd.DataFrame([0, 0, 24*3600, 10800]).T\n df.columns = [\"route_id\", \"start_time\", \"end_time\", \"headway_secs\"]\n\n for route in ROUTE_IDS:\n if route not in frequency[\"route_id\"].values:\n df.loc[0, \"route_id\"] = route\n frequency = frequency.append(df, ignore_index=True, sort=False)\n\n frequency.loc[:, \"start_time\"] = (frequency[\"start_time\"].astype(int) / 3600).round(1)\n frequency.loc[:, \"end_time\"] = (frequency[\"end_time\"].astype(int) / 3600).round(1)\n frequency.loc[:, \"headway_secs\"] = (frequency[\"headway_secs\"].astype(int) / 3600).round(1)\n\n frequency = frequency.sort_values(by=\"route_id\").set_index(\"route_id\")\n\n palette_dict = dict(zip(ROUTE_IDS, (Category20[20][::2] + Category20[20][1::2])[:len(ROUTE_IDS)]))\n\n line_data=dict( \n xs=[[f_row['start_time'], f_row['end_time']] for i, f_row in frequency.iterrows()], \n ys=[[f_row['headway_secs'], f_row['headway_secs']] for i, f_row in frequency.iterrows()],\n color=[palette_dict[i] for i, f_row in frequency.iterrows()],\n name=[i for i, f_row in frequency.iterrows()]\n )\n start_data=dict( \n xs=[f_row['start_time'] for i, f_row in frequency.iterrows()], \n ys=[f_row['headway_secs'] for i, f_row in frequency.iterrows()],\n color=[palette_dict[i] for i, f_row in frequency.iterrows()]\n )\n end_data=dict( \n xs=[f_row['end_time'] for i, f_row in frequency.iterrows()], \n ys=[f_row['headway_secs'] for i, f_row in frequency.iterrows()],\n color=[palette_dict[i] for i, f_row in frequency.iterrows()]\n )\n return line_data, start_data, end_data\n\n def make_fares_input_data(self, max_fare=10, max_age=120):\n fares = self.fares_df\n\n fares.loc[:, \"age\"] = fares[\"age\"].astype(str)\n fares.loc[:, \"routeId\"] = fares[\"routeId\"].astype(str)\n\n df = pd.DataFrame(columns=[\"agencyId\", \"routeId\", \"age\", \"amount\"])\n\n # Replace RouteId = NaN values by all bus lines (12 rows)\n for i, fare in fares.iterrows():\n if fare['routeId'] == 'nan':\n df1 = pd.DataFrame(\n [[fare['agencyId'], route, fare['age'], fare['amount']] for route in ROUTE_IDS],\n columns=[\"agencyId\", \"routeId\", \"age\", \"amount\"])\n df = df.append(df1, ignore_index=True, sort=False)\n\n else:\n df = df.append(fare, ignore_index=True, sort=False)\n\n # Splitting age ranges into 2 columns (min_age and max_age)\n fares = self.splitting_min_max(df, \"age\")\n fares.loc[:, \"routeId\"] = fares[\"routeId\"].astype(str)\n fares.loc[:, \"amount\"] = fares[\"amount\"].astype(float)\n\n fares = fares.drop(labels=[\"age\"], axis=1)\n fares = fares.sort_values(by=[\"amount\", \"routeId\"])\n data = fares.to_dict(orient='list')\n return data \n\n def make_modeinc_input_data(self, max_incentive=50, max_age=120, max_income=150000):\n\n incentives = self.incentives_df\n incentives.loc[:, \"amount\"] = incentives[\"amount\"].astype(float)\n\n # Completing the dataframe with the missing subsidized modes (so that they appear in the plot)\n df = pd.DataFrame([\"\", \"(0:{})\".format(max_age), \"(0:{})\".format(max_income), 0.00]).T\n df.columns = [\"mode\", \"age\", \"income\", \"amount\"]\n\n modes = [\"OnDemand_ride\", \"drive_transit\", \"walk_transit\"]\n for mode in modes:\n df.loc[0, \"mode\"] = mode\n incentives = incentives.append(df, ignore_index=True, sort=False)\n incentives = incentives[incentives[\"mode\"].isin(modes)].drop_duplicates()\n\n # Splitting age and income columns\n incentives = self.splitting_min_max(incentives, \"age\")\n incentives = self.splitting_min_max(incentives, \"income\")\n\n incentives = incentives.drop(labels=[\"age\", \"income\"], axis=1)\n\n # Changing the type of the \"mode\" column to 'category' to reorder the modes\n incentives.loc[:, \"mode\"] = incentives[\"mode\"].astype('category').cat.reorder_categories(modes)\n\n incentives = incentives.sort_values(by=[\"amount\", \"mode\"])\n data = incentives.to_dict(orient='list')\n return data\n\n def make_mode_pie_chart_data(self, mode_choice):\n\n # Select columns w/ modes\n mode_choice = mode_choice.iloc[-1].drop('iterations').reset_index(name='value').rename(columns={'index': 'Mode'})\n \n mode_choice.loc[:, 'perc'] = mode_choice['value']/mode_choice['value'].sum() * 100.0\n mode_choice.loc[:, 'angle'] = mode_choice['value']/mode_choice['value'].sum() * 2*math.pi\n mode_choice = mode_choice.sort_values('angle', ascending=False)\n cumangle = 0.0\n scale = 3.0\n for i, row in mode_choice.iterrows():\n mode_choice.loc[i, 'start_angle'] = cumangle\n cumangle += row['angle'] / 2.0\n mode_choice.loc[i, 'x_loc'] = math.cos(cumangle) * 0.2\n mode_choice.loc[i, 'y_loc'] = 1 + (math.sin(cumangle) * 0.2 * scale)\n cumangle += row['angle'] / 2.0\n mode_choice.loc[i, 'end_angle'] = cumangle\n \n sorterIndex = dict(zip(self.modes + ['others'], range(len(self.modes + ['others']))))\n mode_choice.loc[:, 'Mode'].replace(to_replace='ride_hail', value='OnDemand_ride', inplace=True)\n mode_choice.loc[:, 'Mode_order'] = mode_choice['Mode'].map(sorterIndex)\n mode_choice = mode_choice.sort_values('Mode_order')\n\n mode_choice.loc[:, 'color'] = Dark2[len(mode_choice)]\n \n mode_choice.loc[:, \"label\"] = mode_choice.apply(lambda x: '{}%'.format(round(x['perc'], 1)) if x['perc']\n >= 2.0 else '', axis=1)\n mode_choice.loc[:, \"label\"] = mode_choice[\"label\"].str.pad(30, side = \"left\")\n data = mode_choice.to_dict(orient='list')\n return data\n\n def make_mode_choice_by_time_data(self):\n \n mode_choice_by_hour = self.mode_choice_hourly_df.reset_index().dropna()\n \n mode_choice_by_hour.loc[:, \"hours\"] = mode_choice_by_hour[\"index\"].apply(lambda x: x.split(\"_\")[1])\n mode_choice_by_hour.rename(columns={\"ride_hail\": \"OnDemand_ride\"}, inplace=True)\n mode_choice_by_hour = mode_choice_by_hour.drop(labels=\"index\", axis=1)\n\n max_hour = max([int(h) for h in mode_choice_by_hour['hours']])\n if max_hour > 23:\n hours = [str(h) for h in range(max_hour + 1)]\n else:\n hours = HOURS\n max_hour = 23\n\n # Completing the dataframe with the missing ridership bins (so that they appear in the plot)\n df = pd.DataFrame([0, 0.0, 0.0, 0.0, 0.0, 0.0]).T\n df.columns = [\"hours\"] + self.modes\n\n for hour in range(max_hour + 1):\n if str(hour) not in mode_choice_by_hour[\"hours\"].values:\n df.loc[0, \"hours\"] = str(hour)\n mode_choice_by_hour = mode_choice_by_hour.append(df, ignore_index=True, sort=False)\n\n mode_choice_by_hour = mode_choice_by_hour.set_index('hours')\n\n # max_choice = mode_choice_by_hour.sum(axis=1).max() * 1.1\n\n data = mode_choice_by_hour.reset_index().to_dict(orient='list')\n return data \n\n def make_mode_choice_by_income_group_data(self):\n\n persons_cols = ['PID', 'income']\n trips_cols = ['PID', 'realizedTripMode']\n people_income_mode = self.persons_df[persons_cols].merge(self.trips_df[trips_cols], on=['PID'])\n edges = [0, 10000, 25000, 50000, 75000, 100000, float('inf')]\n bins = ['[$0, $10k)', '[$10k, $25k)', '[$25k, $50k)', '[$50k, $75k)', '[$75k, $100k)', '[$100k, inf)']\n people_income_mode.loc[:, 'income_group'] = pd.cut(people_income_mode['income'],\n bins=edges,\n labels=bins,\n right=False).astype(str)\n grouped = people_income_mode.groupby(by=['realizedTripMode', 'income_group']).agg('count').reset_index()\n # ymax = grouped['PID'].max() * 1.1\n\n grouped = grouped.pivot(\n index='realizedTripMode', \n columns='income_group', \n values='PID').reset_index().rename(columns={'index':'realizedTripMode'})\n data = grouped.to_dict(orient='list')\n\n return data \n\n def make_mode_choice_by_age_group_data(self):\n\n persons_cols = ['PID', 'Age']\n trips_cols = ['PID', 'realizedTripMode']\n people_age_mode = self.persons_df[persons_cols].merge(self.trips_df[trips_cols], on=['PID'])\n edges = [0, 18, 30, 40, 50, 60, float('inf')]\n bins = ['[{}, {})'.format(edges[i], edges[i+1]) for i in range(len(edges)-1)]\n people_age_mode.loc[:, 'age_group'] = pd.cut(people_age_mode['Age'],\n bins=edges,\n labels=bins,\n right=False).astype(str)\n grouped = people_age_mode.groupby(by=['realizedTripMode', 'age_group']).agg('count').reset_index()\n # ymax = grouped['PID'].max() * 1.1\n\n grouped = grouped.pivot(\n index='realizedTripMode', \n columns='age_group', \n values='PID').reset_index().rename(columns={'index':'realizedTripMode'})\n data = grouped.to_dict(orient='list')\n return data \n\n def make_mode_choice_by_distance_data(self):\n \n mode_df = self.trips_df[['Trip_ID', 'Distance_m', 'realizedTripMode']].copy()\n mode_df.loc[:,'Distance_miles'] = mode_df['Distance_m'] * 0.000621371\n\n edges = [0, .5, 1, 1.5, 2, 2.5, 3, 3.5, 4, 5, 7.5, 10, 40]\n bins = ['[{}, {})'.format(edges[i], edges[i+1]) for i in range(len(edges)-1)]\n mode_df.loc[:,'Trip Distance (miles)'] = pd.cut(mode_df['Distance_miles'],\n bins=edges,\n labels=bins,\n right=False).astype(str)\n\n mode_df_grouped = mode_df.groupby(by=['realizedTripMode', 'Trip Distance (miles)']).agg('count').reset_index()\n\n # rename df column to num_people due to grouping\n mode_df_grouped = mode_df_grouped.rename(index=str, columns={'Trip_ID': 'num_trips'})\n\n for_plot = mode_df_grouped[['realizedTripMode', 'Trip Distance (miles)', 'num_trips']]\n # max_trips = for_plot.groupby('Trip Distance (miles)')['num_trips'].sum().max() * 1.1\n\n for_plot = for_plot.rename(columns={'realizedTripMode': 'Trip Mode'})\n for_plot = for_plot.pivot(index='Trip Distance (miles)', columns='Trip Mode', values='num_trips').reset_index()\n \n # colors = Dark2[len(self.modes)]\n\n data = for_plot.to_dict(orient='list')\n return data \n\n def make_congestion_travel_time_by_mode_data(self):\n\n travel_time = pd.DataFrame(self.travel_times_df.set_index(\"TravelTimeMode\\Hour\").mean(axis=1)).T\n\n travel_time.rename(columns={'ride_hail': 'OnDemand_ride'}, inplace=True)\n # del travel_time['others']\n\n # max_time = travel_time.max(axis=1)[0] * 1.1\n\n palette = Dark2[len(self.modes)]\n\n data=dict( \n x=self.modes,\n y=[travel_time[mode] for mode in self.modes],\n color=palette,\n )\n return data\n\n def make_congestion_travel_time_per_passenger_trip_data(self):\n\n travel_time = self.travel_times_df.set_index(\"TravelTimeMode\\Hour\").T.reset_index()\n \n travel_time.rename(columns={\"ride_hail\": \"OnDemand_ride\"}, inplace=True)\n # del travel_time['others']\n\n # max_hour = max([int(h) for h in travel_time['index']])\n # if max_hour > 23:\n # hours = [str(h) for h in range(max_hour + 1)]\n # else:\n # hours = HOURS\n # max_hour = 23\n\n # max_time = travel_time.max().max() * 1.1 \n\n data = travel_time.to_dict(orient='list')\n return data\n\n def make_congestion_miles_traveled_per_mode_data(self):\n\n # get_vmt_dataframe:\n vmt_walk = round(\n self.paths_df[self.paths_df[\"mode\"] == \"walk\"][\"length\"].apply(lambda x: x * 0.000621371).sum(), 0)\n vmt_bus = round(\n self.paths_df[self.paths_df[\"mode\"] == \"bus\"][\"length\"].apply(lambda x: x * 0.000621371).sum(), 0)\n vmt_on_demand = round(\n self.paths_df[self.paths_df[\"vehicle\"].str.contains(\"rideHailVehicle\")][\"length\"].apply(lambda x: x * 0.000621371).sum(), 0)\n vmt_car = round(self.legs_df[self.legs_df[\"Mode\"] == \"car\"][\"Distance_m\"].apply(lambda x: x * 0.000621371).sum(), 0)\n vmt = pd.DataFrame({\"bus\": [vmt_bus], \"car\": [vmt_car], \"OnDemand_ride\": [vmt_on_demand], \"walk\" : [vmt_walk]})\n\n modes = ['OnDemand_ride', 'car', 'walk', 'bus']\n vmt = pd.melt(vmt, value_vars=modes)\n\n # max_vmt = vmt['value'].max() * 1.1\n\n palette = Dark2[5]\n data = dict(modes=modes, vmt=[vmt_on_demand, vmt_car, vmt_walk, vmt_bus], \n color=[palette[0], palette[1], palette[3], palette[4]])\n return data\n\n def make_congestion_bus_vmt_by_ridership_data(self):\n columns = [\"numPassengers\", \"vehicleType\", \"length\", \"departureTime\", \"arrivalTime\"]\n vmt_bus_ridership = self.paths_df[self.paths_df[\"mode\"] == \"bus\"][columns]\n vmt_bus_ridership.loc[:, 'seatingCapacity'] = vmt_bus_ridership['vehicleType'].apply(\n lambda x: self.seating_capacities[x])\n vmt_bus_ridership.loc[:, 'standingRoomCapacity'] = vmt_bus_ridership['vehicleType'].apply(\n lambda x: self.standing_room_capacities[x])\n\n vmt_bus_ridership.loc[:, 'ridershipPerc'] = vmt_bus_ridership.apply(lambda x: calc_ridership_perc(x), axis=1)\n\n # Split the travels by hour of the day\n edges = range(0,25*3600,3600)\n vmt_bus_ridership.loc[:, \"Hour\"] = pd.cut(vmt_bus_ridership[\"departureTime\"], \n bins=edges,\n labels=HOURS,\n include_lowest=True)\n\n # Group by hours of the day and number of passengers in the bus\n vmt_bus_ridership = vmt_bus_ridership.groupby(by=[\"Hour\", \"ridershipPerc\"])['length'].sum().reset_index()\n edges = [0, 0.01, 50, 100, 150.0, 200.0]\n bins = [\n 'empty\\n(0 passengers)', \n 'low ridership\\n(< 50% seating capacity)', \n 'medium ridership\\n(< seating capacity)', \n 'high ridership\\n(< 50% standing capacity)',\n 'crowded\\n(<= standing capacity)'\n ]\n vmt_bus_ridership.loc[:, \"ridership\"] = pd.cut(vmt_bus_ridership[\"ridershipPerc\"], \n bins=edges,\n labels=bins,\n include_lowest=True)\n vmt_bus_ridership.replace(np.nan, 0, inplace=True)\n vmt_bus_ridership.loc[:, \"Hour\"] = vmt_bus_ridership[\"Hour\"].astype(\"int\")\n # del vmt_bus_ridership['numPassengers']\n # del vmt_bus_ridership['departureTime']\n # del vmt_bus_ridership['arrivalTime']\n # Completing the dataframe with the missing ridership bins (so that they appear in the plot)\n df = pd.DataFrame([0, 0.0, \"\"]).T\n df.columns = [\"Hour\", \"length\", \"ridership\"]\n\n for ridership in bins:\n for hour in range(24):\n if len(vmt_bus_ridership[(vmt_bus_ridership['Hour'] == hour) & (vmt_bus_ridership['ridership'] == ridership)].index) == 0:\n df.loc[0, \"ridership\"] = ridership\n df.loc[0, \"Hour\"] = hour\n vmt_bus_ridership = vmt_bus_ridership.append(df, ignore_index=True, sort=False)\n vmt_bus_ridership = vmt_bus_ridership.groupby(['Hour', 'ridership'])['length'].sum().reset_index().pivot(\n index='Hour',\n columns='ridership', \n values='length')\n # ymax = vmt_bus_ridership.sum(axis=1).max()*1.1\n\n # colors = Dark2[len(bins)]\n\n data = vmt_bus_ridership.reset_index().to_dict(orient='list')\n return data \n\n def make_congestion_on_demand_vmt_by_phases_data(self):\n\n columns = [\"numPassengers\", \"departureTime\", \"length\"]\n vmt_on_demand = self.paths_df[self.paths_df[\"vehicle\"].str.contains(\"rideHailVehicle\")].copy()[columns]\n # Split the travels by hour of the day\n edges = range(0,25*3600,3600)\n vmt_on_demand.loc[:, \"Hour\"] = pd.cut(vmt_on_demand[\"departureTime\"],\n bins=edges,\n labels=HOURS,\n right=False)\n driving_states = [\"fetch\", \"fare\"]\n vmt_on_demand.loc[:, \"drivingState\"] = pd.cut(vmt_on_demand[\"numPassengers\"], \n bins=[0, 1, 2], \n labels=driving_states,\n right=False)\n\n vmt_on_demand = vmt_on_demand.groupby(by=[\"Hour\", \"drivingState\"])['length'].sum().reset_index()\n vmt_on_demand.replace(np.nan, 0, inplace=True)\n vmt_on_demand.loc[:, \"Hour\"] = vmt_on_demand[\"Hour\"].astype(\"int\")\n vmt_on_demand = vmt_on_demand.pivot(\n index='Hour', \n columns='drivingState',\n values='length')\n # ymax = vmt_on_demand.sum(axis=1).max()*1.1\n\n # colors = Dark2[3][:len(driving_states)]\n\n data = vmt_on_demand.reset_index().to_dict(orient='list')\n return data \n\n def make_congestion_travel_speed_data(self):\n\n trips = self.trips_df[self.trips_df['Duration_sec'] > 0].copy()\n \n trips.loc[:, 'average speed (meters/sec)'] = trips['Distance_m'] / trips['Duration_sec']\n trips.loc[:, 'Average Speed (miles/hour)'] = 2.23694 * trips['average speed (meters/sec)']\n trips.loc[:, 'Start_time_hour'] = trips['Start_time'] / 3600\n \n edges = [6, 8, 10, 12, 14, 16, 18, 20, 22, 24]\n bins = ['[{}, {})'.format(edges[i], edges[i+1]) for i in range(len(edges)-1)]\n trips.loc[:, 'time_interval'] = pd.cut(trips['Start_time_hour'],\n bins=edges,\n labels=bins,\n right=False).astype(str)\n\n trips = trips.rename(index=str, columns={\"time_interval\": \"Start time interval (hour)\"})\n\n grouped = trips.groupby(by=['Start time interval (hour)', 'realizedTripMode'])['Average Speed (miles/hour)'].mean().reset_index()\n # max_speed = grouped['Average Speed (miles/hour)'].max() * 1.2\n\n grouped = grouped.pivot(\n index='Start time interval (hour)', \n columns='realizedTripMode', \n values='Average Speed (miles/hour)')\n grouped = grouped.reset_index().rename(columns={'index':'Start time interval (hour)'})\n\n data = grouped.to_dict(orient='list')\n return data \n\n def make_los_travel_expenditure_data(self):\n\n trips = self.trips_df.copy()\n trips.loc[:, 'trip_cost'] = np.zeros(trips.shape[0])\n\n trips.loc[trips['realizedTripMode'] == 'car', 'trip_cost'] = \\\n trips[trips['realizedTripMode'] == 'car']['FuelCost'].values\n\n fare_modes = ['walk_transit', 'drive_transit', 'OnDemand_ride']\n trips.loc[trips['realizedTripMode'].isin(fare_modes), 'trip_cost'] = \\\n trips[trips['realizedTripMode'].isin(fare_modes)]['Fare'].values - \\\n trips[trips['realizedTripMode'].isin(fare_modes)]['Incentive'].values\n\n trips.loc[trips['realizedTripMode'] == 'drive_transit', 'trip_cost'] = \\\n trips[trips['realizedTripMode'] == 'drive_transit']['trip_cost'].values + \\\n trips[trips['realizedTripMode'] == 'drive_transit']['FuelCost'].values\n\n trips.loc[trips['trip_cost'] < 0,:] = 0\n trips.loc[:, \"hour_of_day\"] = np.floor(trips.Start_time/3600)\n\n grouped = trips.groupby(by=[\"realizedTripMode\", \"hour_of_day\"])[\"trip_cost\"].mean().reset_index()\n grouped = grouped[grouped['realizedTripMode'] != 0]\n # max_cost = grouped['trip_cost'].max() * 1.1\n\n grouped = grouped.pivot(\n index='hour_of_day', \n columns='realizedTripMode', \n values='trip_cost')\n grouped = grouped.reset_index().rename(columns={'index':'hour_of_day'})\n\n data = grouped.to_dict(orient='list')\n return data \n\n def make_los_crowding_data(self):\n\n columns = [\"vehicle\", \"numPassengers\", \"departureTime\", \"arrivalTime\", \"vehicleType\"]\n bus_slice_df = self.paths_df[self.paths_df[\"mode\"] == \"bus\"].copy()[columns]\n\n bus_slice_df.loc[:, \"route_id\"] = bus_slice_df['vehicle'].apply(lambda x: self.trip_to_route[x.split(\":\")[1].split('-')[0]])\n bus_slice_df.loc[:, \"serviceTime\"] = (bus_slice_df['arrivalTime'] - bus_slice_df['departureTime']) / 3600\n bus_slice_df.loc[:, \"seatingCapacity\"] = bus_slice_df['vehicleType'].apply(\n lambda x: TRANSIT_SCALE_FACTOR * self.seating_capacities[x])\n bus_slice_df.loc[:, \"passengerOverflow\"] = bus_slice_df['numPassengers'] > bus_slice_df['seatingCapacity']\n # AM peak = 7am-10am, PM Peak = 5pm-8pm, Early Morning, Midday, Late Evening = in between\n bins = [0, 25200, 36000, 61200, 72000, 86400]\n labels = [\"Early Morning (12a-7a)\", \"AM Peak (7a-10a)\", \"Midday (10a-5p)\", \"PM Peak (5p-8p)\", \"Late Evening (8p-12a)\"]\n bus_slice_df.loc[:, \"servicePeriod\"] = pd.cut(bus_slice_df['departureTime'],\n bins=bins,\n labels=labels)\n grouped_data = bus_slice_df[bus_slice_df['passengerOverflow']].groupby([\n \"route_id\", \"servicePeriod\"])[\"serviceTime\"].sum().fillna(0).reset_index()\n # max_crowding = grouped_data['serviceTime'].max() * 1.1\n\n grouped_data = reset_index(grouped_data.pivot(\n index='route_id', \n columns='servicePeriod', \n values='serviceTime')).rename(columns={'index':'route_id'})\n\n # Completing the dataframe with the missing service periods and route_ids (so that they appear in the plot)\n for label in labels:\n if label not in grouped_data.columns:\n grouped_data.loc[:, label] = 0.0\n \n df = pd.DataFrame(['', 0.0, 0.0, 0.0, 0.0, 0.0]).T\n df.columns = [\"route_id\"] + labels\n\n for route_id in ROUTE_IDS:\n if route_id not in set(grouped_data['route_id']):\n df.loc[0, \"route_id\"] = route_id\n grouped_data = grouped_data.append(df, ignore_index=True, sort=False)\n\n grouped_data.loc[:, 'route_id'] = grouped_data.loc[:, 'route_id'].astype(str)\n data = grouped_data.to_dict(orient='list')\n return data \n\n def make_transit_cb_data(self):\n\n columns = [\"vehicle\", \"numPassengers\", \"departureTime\", \"arrivalTime\", \"FuelCost\", \"vehicleType\"]\n bus_slice_df = self.paths_df.loc[self.paths_df[\"mode\"] == \"bus\"].copy()[columns]\n\n bus_slice_df.loc[:, \"route_id\"] = bus_slice_df['vehicle'].apply(lambda x: self.trip_to_route[x.split(\":\")[-1].split('-')[0]])\n bus_slice_df.loc[:, \"operational_costs_per_bus\"] = bus_slice_df['vehicleType'].apply(lambda x: self.operational_costs[x])\n bus_slice_df.loc[:, \"serviceTime\"] = (bus_slice_df['arrivalTime'] - bus_slice_df['departureTime']) / 3600\n bus_slice_df.loc[:, \"OperationalCosts\"] = bus_slice_df['operational_costs_per_bus'] * bus_slice_df['serviceTime']\n\n columns = [\"Veh\", \"Fare\"]\n bus_fare_df = self.legs_df[self.legs_df[\"Mode\"] == \"bus\"].copy()[columns]\n\n bus_fare_df.loc[:, \"route_id\"] = bus_fare_df['Veh'].apply(\n lambda x: self.trip_to_route[x.split(\":\")[-1].split('-')[0].split('-')[0]])\n \n merged_df = pd.merge(bus_slice_df, bus_fare_df, on=[\"route_id\"])\n\n labels = [\"OperationalCosts\", \"FuelCost\", \"Fare\"]\n costs_labels = labels[:2]\n benefits_labels = [\"Fare\"]\n\n grouped_data = merged_df.groupby(by=\"route_id\")[labels].sum()\n\n # max_cost = grouped_data.sum(axis=1).max() * 1.1\n grouped_data.reset_index(inplace=True)\n\n # Completing the dataframe with the missing route_ids (so that they appear in the plot)\n df = pd.DataFrame(['', 0.0, 0.0, 0.0]).T\n df.columns = [\"route_id\"] + labels\n\n for route_id in ROUTE_IDS:\n if int(route_id) not in grouped_data['route_id'].values:\n df.loc[0, \"route_id\"] = int(route_id)\n grouped_data = grouped_data.append(df, ignore_index=True, sort=False)\n grouped_data.sort_values('route_id', inplace=True)\n\n grouped_data.loc[:, 'route_id'] = grouped_data.loc[:, 'route_id'].astype(str)\n grouped_data.loc[:, 'OperationalCosts'] *= -1\n grouped_data.loc[:, 'FuelCost'] *= -1\n\n # colors = Dark2[len(labels)]\n\n costs_data = grouped_data[['route_id'] + costs_labels].to_dict(orient='list')\n benefits_data = grouped_data[['route_id'] + benefits_labels].to_dict(orient='list')\n return costs_data, benefits_data\n\n def make_transit_inc_by_mode_data(self):\n \n columns = ['FuelCost', 'Fare', 'Start_time', 'realizedTripMode', 'Incentive']\n trips = self.trips_df.copy()[columns]\n\n trips.loc[:, 'trip_cost'] = np.zeros(trips.shape[0])\n trips.loc[:, 'ride_expenditure'] = trips['Fare'] - trips['Incentive']\n ride_modes = set(['walk_transit', 'drive_transit', 'OnDemand_ride'])\n\n trips.loc[trips['realizedTripMode'] == 'car', 'trip_cost'] = trips[trips['realizedTripMode'] == 'car']['FuelCost'].values\n trips.loc[trips['realizedTripMode'].isin(ride_modes), 'trip_cost'] = trips[trips['realizedTripMode'].isin(ride_modes)]['ride_expenditure'].values\n trips.loc[trips['realizedTripMode'] == 'drive_transit', 'trip_cost'] += trips[trips['realizedTripMode'] == 'drive_transit']['FuelCost'].values\n\n trips.loc[:, 'Incentives distributed'] = trips['Incentive'].values\n trips.loc[trips['trip_cost'] < 0, 'Incentives distributed'] -= trips[trips['trip_cost'] < 0]['trip_cost'].values\n\n trips.loc[:, \"hour_of_day\"] = np.floor(trips['Start_time'] / 3600).astype(int)\n grouped = trips.groupby(by=[\"realizedTripMode\", \"hour_of_day\"])[\"Incentives distributed\"].sum().reset_index()\n\n # max_incentives = grouped['Incentives distributed'].max() * 1.1\n # if max_incentives == 0:\n # max_incentives = 100\n\n grouped = grouped.pivot(\n index='hour_of_day', \n columns='realizedTripMode', \n values='Incentives distributed')\n grouped = grouped.reset_index().rename(columns={'index':'hour_of_day'})\n\n data = grouped.to_dict(orient='list')\n return data \n\n def make_sustainability_25pm_per_mode_data(self):\n \n columns = [\"vehicle\", \"mode\", \"length\", \"departureTime\"]\n vmt = self.paths_df.copy()[columns]\n\n # emissions for each mode\n emissions_bus = round(\n vmt[vmt[\"mode\"] == \"bus\"][\"length\"].apply(lambda x: x * 0.000621371 * 0.259366648).sum(), 0)\n emissions_on_demand = round(\n vmt[vmt[\"vehicle\"].str.contains(\"rideHailVehicle\")][\"length\"].apply(\n lambda x: x * 0.000621371 * 0.001716086).sum(), 0)\n emissions_car = round(\n self.legs_df[self.legs_df[\"Mode\"] == \"car\"][\"Distance_m\"].apply(lambda x: x * 0.000621371 * 0.001716086).sum(), 0)\n\n emissions = pd.DataFrame({\"bus\": [emissions_bus], \"car\": [emissions_car], \"OnDemand_ride\": [emissions_on_demand]})\n\n modes = ['OnDemand_ride', 'car', 'bus']\n emissions = pd.melt(emissions, value_vars=modes)\n\n # max_emissions = emissions['value'].max() * 1.1\n \n palette = Dark2[len(modes)]\n data=dict(modes=modes, emissions=[emissions_on_demand, emissions_car, emissions_bus], color=palette)\n return data\n" ]
[ [ "pandas.cut", "numpy.zeros", "pandas.merge", "pandas.DataFrame", "pandas.melt", "numpy.floor" ] ]
Gnostikoi/active-semi-supervised-clustering
[ "429e19a6fe5f8cd1005c0bcbed0bc4ca18c32064" ]
[ "active_semi_clustering/active/pairwise_constraints/min_max.py" ]
[ "import numpy as np\n\nfrom ...exceptions import MaximumQueriesExceeded\nfrom .explore_consolidate import ExploreConsolidate\n\n\nclass MinMax(ExploreConsolidate):\n def _consolidate(self, neighborhoods, X, oracle):\n n = X.shape[0]\n\n skeleton = set()\n for neighborhood in neighborhoods:\n for i in neighborhood:\n skeleton.add(i)\n\n remaining = set()\n for i in range(n):\n if i not in skeleton:\n remaining.add(i)\n\n distances = np.zeros((n, n))\n for i in range(n):\n for j in range(n):\n distances[i, j] = np.sqrt(((X[i] - X[j]) ** 2).sum())\n\n kernel_width = np.percentile(distances, 20)\n\n while True:\n try:\n max_similarities = np.full(n, fill_value=float('+inf'))\n for x_i in remaining:\n max_similarities[x_i] = np.max([similarity(X[x_i], X[x_j], kernel_width) for x_j in skeleton])\n\n q_i = max_similarities.argmin()\n\n sorted_neighborhoods = reversed(sorted(neighborhoods, key=lambda neighborhood: np.max([similarity(X[q_i], X[n_i], kernel_width) for n_i in neighborhood])))\n\n for neighborhood in sorted_neighborhoods:\n if oracle.query(q_i, neighborhood[0]):\n neighborhood.append(q_i)\n break\n\n skeleton.add(q_i)\n remaining.remove(q_i)\n\n except MaximumQueriesExceeded:\n break\n\n return neighborhoods\n\n\ndef similarity(x, y, kernel_width):\n return np.exp(-((x - y) ** 2).sum() / (2 * (kernel_width ** 2)))\n" ]
[ [ "numpy.percentile", "numpy.zeros" ] ]
luca-morreale/neural_surface_maps
[ "08275190fc8b2b2bf17c5be695c125f1806ff649" ]
[ "utils/show.py" ]
[ "\nimport trimesh\n\nimport matplotlib as mpl\nfrom matplotlib import pyplot as plt\nfrom matplotlib.tri import Triangulation\n\nmpl.rcParams['agg.path.chunksize'] = 10000\n\n\ndef show_mesh(filename, source, out, faces, pp_loss=None):\n np_uvs = source.cpu().numpy()\n np_verts = out.cpu().numpy()\n np_faces = faces.cpu().numpy()\n\n mesh = trimesh.Trimesh(\n vertices=np_verts,\n faces=np_faces,\n vertex_attributes={\n 'texture_u': np_uvs[:,0], # for meshlab\n 'texture_v': np_uvs[:,1], # for meshlab\n 's': np_uvs[:,0], # for blender\n 't': np_uvs[:,1], # for blender\n },\n process=False) # no data reordering\n\n if pp_loss is not None:\n mesh.vertex_attributes['error'] = pp_loss.cpu().numpy()\n\n mesh.export(filename)\n\n\ndef show_mesh_2D(filename, uv_points, triangles, landmarks=None):\n uv_points = uv_points.detach().cpu().numpy()\n triangles = triangles.detach().cpu().numpy()\n\n # draw image of conformal points given faces\n plt.figure(figsize=(10, 10), dpi=90)\n plt.title('Mesh layout')\n\n plt.triplot(uv_points[:,0], uv_points[:,1], triangles, linewidth=0.5, c='k')\n\n plt.axis('equal')\n plt.savefig(filename)\n plt.close()\n\n" ]
[ [ "matplotlib.pyplot.savefig", "matplotlib.pyplot.title", "matplotlib.pyplot.close", "matplotlib.pyplot.figure", "matplotlib.pyplot.triplot", "matplotlib.pyplot.axis" ] ]
chamberm/Detect
[ "a0381f98116346a1135560ad7192ec2dac741b69" ]
[ "Detect/models/PCA.py" ]
[ "from __future__ import division, print_function, absolute_import\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport streamlit as st\nimport pandas as pd\nfrom sklearn.decomposition import PCA\nimport seaborn as sns\n\ndef save(model):\n pass\n\ndef covar_matrix(data, verbose=False):\n covariance_matrix = np.cov(data, rowvar=False)\n if is_pos_def(covariance_matrix):\n inv_covariance_matrix = np.linalg.inv(covariance_matrix)\n if is_pos_def(inv_covariance_matrix):\n return covariance_matrix, inv_covariance_matrix\n else:\n print(\"Error: Inverse of Covariance Matrix is not positive definite!\")\n else:\n print(\"Error: Covariance Matrix is not positive definite!\")\n \ndef MahalanobisDist(inv_cov_matrix, mean_distr, data, verbose=False):\n inv_covariance_matrix = inv_cov_matrix\n vars_mean = mean_distr\n diff = data - vars_mean\n md = np.zeros(len(diff))\n for i in range(len(diff)):\n md[i] = np.sqrt(diff[i].dot(inv_covariance_matrix).dot(diff[i]))\n return md\n\ndef MD_detectOutliers(dist, extreme=False, verbose=False):\n k = 3. if extreme else 2.\n threshold = np.mean(dist) * k\n outliers = []\n for i in range(len(dist)):\n if dist[i] >= threshold:\n outliers.append(i) # index of the outlier\n return np.array(outliers)\n\ndef MD_threshold(dist, extreme=False, verbose=False):\n k = 3. if extreme else 2.\n threshold = np.mean(dist) * k\n return threshold\n\ndef is_pos_def(A):\n if np.allclose(A, A.T):\n try:\n np.linalg.cholesky(A)\n return True\n except np.linalg.LinAlgError:\n return False\n else:\n return False\n \ndef run(model):\n X_train = model.get_train()\n X_test = model.get_test()\n pca = PCA(0.85, svd_solver= 'full')\n \n X_train_PCA = pca.fit_transform(X_train)\n X_test_PCA = pca.transform(X_test)\n #fig, ax = plt.subplots(1,1,figsize=(12, 8))\n #ax.plot(np.cumsum(pca.explained_variance_ratio_))\n st.write(\"Explained variance:\", pca.explained_variance_ratio_)\n \n X_train_PCA = pd.DataFrame(X_train_PCA)\n X_train_PCA.index = X_train.index\n data_train = X_train_PCA.values\n \n X_test_PCA = pd.DataFrame(X_test_PCA)\n X_test_PCA.index = X_test.index\n data_test = X_test_PCA.values\n \n cov_matrix, inv_cov_matrix = covar_matrix(data_train)\n mean_distr = np.mean(data_train, axis=0)\n \n Mob_test = MahalanobisDist(inv_cov_matrix, mean_distr, data_test, verbose=False)\n M_test = np.squeeze(pd.DataFrame(data=Mob_test, index=X_test.index))\n \n Mob_train = MahalanobisDist(inv_cov_matrix, mean_distr, data_train, verbose=False)\n M_train = np.squeeze(pd.DataFrame(data=Mob_train, index=X_train.index))\n\n return M_train, M_test\n " ]
[ [ "numpy.array", "numpy.cov", "pandas.DataFrame", "numpy.linalg.inv", "numpy.mean", "numpy.allclose", "numpy.linalg.cholesky", "sklearn.decomposition.PCA" ] ]
TommyLike/mindspore
[ "930a1fb0a8fa9432025442c4f4732058bb7af592", "930a1fb0a8fa9432025442c4f4732058bb7af592", "930a1fb0a8fa9432025442c4f4732058bb7af592", "930a1fb0a8fa9432025442c4f4732058bb7af592" ]
[ "tests/ut/python/communication/test_data_parallel_resnet.py", "tests/vm_impl/vm_me.py", "tests/ut/python/pynative_mode/test_cell_bprop.py", "tests/ut/python/parallel/test_one_weight_parameter.py" ]
[ "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nresnet50 example\n\"\"\"\nimport numpy as np\n\nfrom mindspore import Tensor, Model, ParallelMode\nfrom mindspore.ops.operations import TensorAdd\nimport mindspore.nn as nn\nimport mindspore.context as context\nfrom mindspore.nn.optim import Momentum\nfrom ....dataset_mock import MindData\n\n\ndef conv3x3(in_channels, out_channels, stride=1, padding=1, pad_mode='pad'):\n \"\"\"3x3 convolution \"\"\"\n return nn.Conv2d(in_channels, out_channels,\n kernel_size=3, stride=stride, padding=padding, pad_mode=pad_mode)\n\n\ndef conv1x1(in_channels, out_channels, stride=1, padding=0, pad_mode='pad'):\n \"\"\"1x1 convolution\"\"\"\n return nn.Conv2d(in_channels, out_channels,\n kernel_size=1, stride=stride, padding=padding, pad_mode=pad_mode)\n\n\nclass ResidualBlock(nn.Cell):\n \"\"\"\n residual Block\n \"\"\"\n expansion = 4\n\n def __init__(self,\n in_channels,\n out_channels,\n stride=1,\n down_sample=False):\n super(ResidualBlock, self).__init__()\n\n out_chls = out_channels // self.expansion\n self.conv1 = conv1x1(in_channels, out_chls, stride=1, padding=0)\n self.bn1 = nn.BatchNorm2d(out_chls)\n\n self.conv2 = conv3x3(out_chls, out_chls, stride=stride, padding=1)\n self.bn2 = nn.BatchNorm2d(out_chls)\n\n self.conv3 = conv1x1(out_chls, out_channels, stride=1, padding=0)\n self.bn3 = nn.BatchNorm2d(out_channels)\n\n self.relu = nn.ReLU()\n self.downsample = down_sample\n\n self.conv_down_sample = conv1x1(in_channels, out_channels,\n stride=stride, padding=0)\n self.bn_down_sample = nn.BatchNorm2d(out_channels)\n self.add = TensorAdd()\n\n def construct(self, x):\n \"\"\"\n :param x:\n :return:\n \"\"\"\n identity = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample:\n identity = self.conv_down_sample(identity)\n identity = self.bn_down_sample(identity)\n\n out = self.add(out, identity)\n out = self.relu(out)\n\n return out\n\n\nclass ResNet18(nn.Cell):\n \"\"\"\n resnet nn.Cell\n \"\"\"\n\n def __init__(self, block, num_classes=100):\n super(ResNet18, self).__init__()\n\n self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, pad_mode='pad')\n self.bn1 = nn.BatchNorm2d(64)\n self.relu = nn.ReLU()\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1, pad_mode='pad')\n\n self.layer1 = self.MakeLayer(\n block, 2, in_channels=64, out_channels=256, stride=1)\n self.layer2 = self.MakeLayer(\n block, 2, in_channels=256, out_channels=512, stride=2)\n self.layer3 = self.MakeLayer(\n block, 2, in_channels=512, out_channels=1024, stride=2)\n self.layer4 = self.MakeLayer(\n block, 2, in_channels=1024, out_channels=2048, stride=2)\n\n self.avgpool = nn.AvgPool2d(7, 1)\n self.flatten = nn.Flatten()\n self.fc = nn.Dense(512 * block.expansion, num_classes)\n\n def MakeLayer(self, block, layer_num, in_channels, out_channels, stride):\n \"\"\"\n make block layer\n :param block:\n :param layer_num:\n :param in_channels:\n :param out_channels:\n :param stride:\n :return:\n \"\"\"\n layers = []\n resblk = block(in_channels, out_channels,\n stride=stride, down_sample=True)\n layers.append(resblk)\n\n for _ in range(1, layer_num):\n resblk = block(out_channels, out_channels, stride=1)\n layers.append(resblk)\n\n return nn.SequentialCell(layers)\n\n def construct(self, x):\n \"\"\"\n :param x:\n :return:\n \"\"\"\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n x = self.avgpool(x)\n x = self.flatten(x)\n x = self.fc(x)\n\n return x\n\n\nclass ResNet9(nn.Cell):\n \"\"\"\n resnet nn.Cell\n \"\"\"\n\n def __init__(self, block, num_classes=100):\n super(ResNet9, self).__init__()\n\n self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, pad_mode='pad')\n self.bn1 = nn.BatchNorm2d(64)\n self.relu = nn.ReLU()\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1, pad_mode='same')\n\n self.layer1 = self.MakeLayer(\n block, 1, in_channels=64, out_channels=256, stride=1)\n self.layer2 = self.MakeLayer(\n block, 1, in_channels=256, out_channels=512, stride=2)\n self.layer3 = self.MakeLayer(\n block, 1, in_channels=512, out_channels=1024, stride=2)\n self.layer4 = self.MakeLayer(\n block, 1, in_channels=1024, out_channels=2048, stride=2)\n\n self.avgpool = nn.AvgPool2d(7, 1)\n self.flatten = nn.Flatten()\n self.fc = nn.Dense(512 * block.expansion, num_classes)\n\n def MakeLayer(self, block, layer_num, in_channels, out_channels, stride):\n \"\"\"\n make block layer\n :param block:\n :param layer_num:\n :param in_channels:\n :param out_channels:\n :param stride:\n :return:\n \"\"\"\n layers = []\n resblk = block(in_channels, out_channels,\n stride=stride, down_sample=True)\n layers.append(resblk)\n\n for _ in range(1, layer_num):\n resblk = block(out_channels, out_channels, stride=1)\n layers.append(resblk)\n\n return nn.SequentialCell(layers)\n\n def construct(self, x):\n \"\"\"\n :param x:\n :return:\n \"\"\"\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n x = self.avgpool(x)\n x = self.flatten(x)\n x = self.fc(x)\n\n return x\n\n\ndef resnet9(classnum):\n return ResNet9(ResidualBlock, classnum)\n\n\nclass DatasetLenet(MindData):\n \"\"\"DatasetLenet definition\"\"\"\n\n def __init__(self, predict, label, length=3, size=None, batch_size=None,\n np_types=None, output_shapes=None, input_indexs=()):\n super(DatasetLenet, self).__init__(size=size, batch_size=batch_size,\n np_types=np_types, output_shapes=output_shapes,\n input_indexs=input_indexs)\n self.predict = predict\n self.label = label\n self.index = 0\n self.length = length\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if self.index >= self.length:\n raise StopIteration\n self.index += 1\n return self.predict, self.label\n\n def reset(self):\n self.index = 0\n\n\ndef test_resnet_train_tensor():\n \"\"\"test_resnet_train_tensor\"\"\"\n batch_size = 1\n size = 2\n context.set_context(mode=context.GRAPH_MODE)\n context.reset_auto_parallel_context()\n context.set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL, device_num=size,\n parameter_broadcast=True)\n one_hot_len = 10\n dataset_types = (np.float32, np.float32)\n dataset_shapes = [[batch_size, 3, 224, 224], [batch_size, one_hot_len]]\n predict = Tensor(np.ones([batch_size, 3, 224, 224]).astype(np.float32) * 0.01)\n label = Tensor(np.zeros([batch_size, one_hot_len]).astype(np.float32))\n dataset = DatasetLenet(predict, label, 2,\n size=2, batch_size=2,\n np_types=dataset_types,\n output_shapes=dataset_shapes,\n input_indexs=(0, 1))\n dataset.reset()\n network = resnet9(one_hot_len)\n network.set_train()\n loss_fn = nn.SoftmaxCrossEntropyWithLogits()\n optimizer = Momentum(filter(lambda x: x.requires_grad, network.get_parameters()), learning_rate=0.1, momentum=0.9)\n model = Model(network=network, loss_fn=loss_fn, optimizer=optimizer)\n model.train(epoch=2, train_dataset=dataset, dataset_sink_mode=False)\n context.set_context(mode=context.GRAPH_MODE)\n context.reset_auto_parallel_context()\n\n\nclass_num = 10\n\n\ndef get_dataset():\n dataset_types = (np.float32, np.float32)\n dataset_shapes = ((32, 3, 224, 224), (32, class_num))\n\n dataset = MindData(size=2, batch_size=1,\n np_types=dataset_types,\n output_shapes=dataset_shapes,\n input_indexs=(0, 1))\n return dataset\n", "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"VM implementations based on numpy.\"\"\"\n\nimport numpy as np\nfrom mindspore._checkparam import Rel\nfrom mindspore._checkparam import ParamValidator as validator\n\n\ndef avg_pooling(x, pool_h, pool_w, stride, pad):\n \"\"\"\n Applies average pooling over an input array.\n\n Args:\n x (numpy.ndarray): The input array to be average pooled.\n pool_h (int): Height of the pooling window.\n pool_w (int): Width of the pooling window.\n stride (int): The stride of the sliding window.\n pad (int): Padding to be added on height and width.\n\n Returns:\n numpy.ndarray, an output array after applying average pooling on input array.\n \"\"\"\n validator.check_integer(\"stride\", stride, 0, Rel.GT)\n num, channel, height, width = x.shape\n out_h = (height + 2*pad - pool_h)//stride + 1\n out_w = (width + 2*pad - pool_w)//stride + 1\n\n col = im2col(x, pool_h, pool_w, stride, pad)\n col = col.reshape(-1, pool_h*pool_w)\n\n out = np.mean(col, axis=1)\n out = out.reshape(num, out_h, out_w, channel).transpose(0, 3, 1, 2)\n\n return out\n\n\ndef avg_pool_grad(dout, origin_shape, pool_h, pool_w, stride, pad):\n \"\"\"\n Gets grad of average pooling.\n\n Args:\n x (numpy.ndarray): The input array to be average pooled.\n dout (numpy.ndarray): The grad of pre-layer.\n pool_h (int): Height of the pooling window.\n pool_w (int): Width of the pooling window.\n stride (int): The stride of the sliding window.\n pad (int): Padding to be added on height and width.\n\n Returns:\n numpy.ndarray, grad of avgerage pooling.\n \"\"\"\n # pylint: disable=unused-argument\n _, _, height, width = dout.shape\n dx = np.zeros(origin_shape)\n for i in range(height):\n for j in range(width):\n dx[:, :, i:(i+pool_h), j:(j+pool_w)] += np.ones((pool_h, pool_w))\n return dx\n\n\ndef _batch_norm(x, scale, shift, running_mean=None, running_var=None,\n eps=1e-05, momentum=0.1, is_training=True):\n \"\"\"Batch normalization over an array.\"\"\"\n _, c_h_w = x.shape\n # Handle running_mean and running_var are not None\n # if running_mean is None:\n # running_mean = np.zeros(c_h_w)\n # running_var = np.zeros(c_h_w)\n running_mean = np.zeros(c_h_w)\n running_var = np.zeros(c_h_w)\n if np.ndim(scale) > 0:\n scale = scale.mean()\n if np.ndim(shift) > 0:\n shift = shift.mean()\n\n if is_training:\n x_mean = np.mean(x, axis=0)\n x_var = np.var(x, axis=0)\n\n # Normalization followed by Affine transformation\n x_norm = (x - x_mean)/np.sqrt(x_var + eps)\n\n # Estimate running average of mean and variance to use at test time\n running_mean = momentum * running_mean + (1 - momentum) * x_mean\n running_var = momentum * running_var + (1 - momentum) * x_var\n else:\n # normalize using running average\n x_norm = (x - running_mean)/np.sqrt(running_var + eps)\n x_mean = running_mean\n x_var = running_var\n\n out = scale * x_norm + shift\n\n return out, x_mean, x_var, running_mean, running_var\n\n\ndef batch_norm(x, scale=1, shift=0, mean=None, variance=None,\n eps=1e-05, momentum=0.1, is_training=True):\n \"\"\"Batch normalization over an array.\"\"\"\n input_shape = x.shape\n if x.ndim != 2:\n batch_num = x.shape[0]\n x = x.reshape(batch_num, -1)\n\n out, _, _, running_mean, running_var = _batch_norm(x, scale, shift, mean, variance, \\\n eps, momentum, is_training)\n\n return out.reshape(*input_shape), np.array(scale), np.array(shift), running_mean, running_var\n\n\ndef _batch_norm_grad(dout, x, scale, save_mean, save_inv_variance, \\\n eps=1e-05, momentum=0.1, is_training=True):\n \"\"\"Batch normalization over an array.\"\"\"\n if x.ndim != 2:\n batch_num = x.shape[0]\n x = x.reshape(batch_num, -1)\n if np.ndim(scale) > 0:\n scale = scale.mean()\n x_norm, x_mean, x_var, _, _ = _batch_norm(x, scale, shift=0, running_mean=save_mean, \\\n running_var=save_inv_variance, \\\n eps=eps, momentum=momentum, is_training=is_training)\n batch_size = x.shape[0]\n dx_norm = scale * dout\n dvar = np.sum(dx_norm*(x - x_mean)*((x_var + eps)**(-3.0/2))*(-1.0/2), axis=0)\n dmean = np.sum(dx_norm*(-1.0/np.sqrt(x_var + eps)), axis=0) \\\n + dvar*(np.sum(-2*(x - x_mean), axis=0)*(1.0/batch_size))\n dx = dx_norm*(1.0/np.sqrt(x_var + eps)) + dvar*(2.0*(x - x_mean)/batch_size) + dmean*(1.0/batch_size)\n dgamma = np.sum(dout*x_norm, axis=0)\n dbeta = np.sum(dout, axis=0)\n return dx, dgamma, dbeta\n\n\ndef batch_norm_grad(dy, x, scale, save_mean, save_inv_variance):\n \"\"\"Batch normalization over an array.\"\"\"\n if dy.ndim != 2:\n batch_size = dy.shape[0]\n dy = dy.reshape(batch_size, -1)\n\n dx, dgamma, dbeta = _batch_norm_grad(dy, x, scale, save_mean, save_inv_variance)\n input_shape = x.shape\n dx = dx.reshape(*input_shape)\n return dx, dgamma, dbeta\n\n\ndef col2im(col, input_shape, filter_h, filter_w, stride=1, pad=0):\n \"\"\"Rearranges a row vector to an image.\"\"\"\n validator.check_integer(\"stride\", stride, 0, Rel.GT)\n batch_num, channel, height, width = input_shape\n out_h = (height + 2*pad - filter_h)//stride + 1\n out_w = (width + 2*pad - filter_w)//stride + 1\n col = col.reshape(batch_num, out_h, out_w, channel, filter_h, filter_w) \\\n .transpose(0, 3, 4, 5, 1, 2)\n\n img = np.zeros((batch_num,\n channel,\n height + 2*pad + stride - 1,\n width + 2*pad + stride - 1)) \\\n .astype(col.dtype)\n for y in range(filter_h):\n y_max = y + stride*out_h\n for x in range(filter_w):\n x_max = x + stride*out_w\n img[:, :, y:y_max:stride, x:x_max:stride] += col[:, :, y, x, :, :]\n\n return img[:, :, pad:height + pad, pad:width + pad]\n\n\ndef convolve(x, w, b=None, pad_mode=\"valid\"):\n \"\"\"\n Gets the discrete, linear convolution of two one-dimensional sequences.\n\n Args:\n x (numpy.ndarray): One-dimensional input array.\n w (numpy.ndarray): One-dimensional input array.\n b (numpy.ndarray): One-dimensional input array. Default: None.\n pad_mode (str): Padding mode which can be: \"full\" means returns the\n convolution at each point of overlap, with an output shape\n of (N+M-1,); \"same\" means returns output of length max(M, N);\n Amd \"valid\" means returns output of length max(M, N) - min(M, N)\n + 1. Default: \"valid\".\n\n Returns:\n numpy.ndarray, discrete, linear convolution of x and w, then plus b.\n \"\"\"\n if pad_mode not in {\"same\", \"valid\"}:\n pad_mode = \"full\"\n y = np.convolve(x, w, pad_mode)\n if b:\n y += b\n return y\n\n\ndef conv2d(x, weight, bias=None, stride=1, pad=0,\n dilation=1, groups=1, padding_mode='zeros'):\n \"\"\"Convolution 2D.\"\"\"\n # pylint: disable=unused-argument\n validator.check_integer(\"stride\", stride, 0, Rel.GT)\n batch_num, _, x_h, x_w = x.shape\n filter_num, _, filter_h, filter_w = weight.shape\n out_h = 1 + int((x_h + 2 * pad - filter_h - (filter_h - 1) * (dilation - 1)) / stride)\n out_w = 1 + int((x_w + 2 * pad - filter_w - (filter_w - 1) * (dilation - 1)) / stride)\n col = im2col(x, filter_h, filter_w, stride, pad, dilation)\n col_w = np.reshape(weight, (filter_num, -1)).T\n out = np.dot(col, col_w)\n out = out.reshape(batch_num, out_h, out_w, -1).transpose(0, 3, 1, 2)\n if bias is not None:\n out += bias\n return out\n\n\ndef conv2d_backprop_filter(dout, x, w_size, stride=1, pad=0):\n \"\"\"Backpropagation filter for conv2d.\"\"\"\n filter_num, channel, filter_height, filter_width = w_size\n dout = dout.transpose(0, 2, 3, 1).reshape(-1, filter_num)\n col = im2col(x, filter_height, filter_width, stride, pad)\n dw = np.dot(col.T, dout)\n dw = dw.transpose(1, 0).reshape(filter_num, channel, filter_height, filter_width)\n return dw\n\n\ndef conv2d_backprop_input(dout, x_size, weight, stride=1, pad=0):\n \"\"\"Backpropagation input for conv2d.\"\"\"\n filter_num, _, filter_h, filter_w = weight.shape\n dout = dout.transpose(0, 2, 3, 1).reshape(-1, filter_num)\n col_w = weight.reshape(filter_num, -1).T\n dcol = np.dot(dout, col_w.T)\n dx = col2im(dcol, x_size, filter_h, filter_w, stride, pad)\n return dx\n\n\ndef flatten(x):\n \"\"\"\n Flattens an array to one dimension.\n\n Args:\n x (numpy.ndarray): An array to be flattened.\n\n Returns:\n numpy.ndarray, a flattened array in one dimension.\n \"\"\"\n return x.flatten()\n\n\ndef flatten2(x):\n \"\"\"\n Flattens an array to one dimension by reshape.\n\n Args:\n x (numpy.ndarray): An array to be flattened.\n\n Returns:\n numpy.ndarray, a flattened array in one dimension.\n \"\"\"\n return x.reshape(1, -1)\n\n\ndef flatten_batch(x):\n \"\"\"\n Flattens a batch of arrays to one dimension.\n\n Args:\n x (numpy.ndarray): A batch of arrays to be flattened.\n\n Returns:\n numpy.ndarray, a flattened one dimension array.\n \"\"\"\n return x.reshape(x.shape[0], -1)\n\n\ndef flatten_grad(dout, x):\n \"\"\"Grad of flatten.\"\"\"\n dout = np.reshape(dout, x)\n return dout\n\n\ndef im2col(img, filter_h, filter_w, stride=1, pad=0, dilation=1):\n \"\"\"Rearranges an image to row vector.\"\"\"\n validator.check_integer(\"stride\", stride, 0, Rel.GT)\n batch_num, channel, height, width = img.shape\n out_h = (height + 2*pad - filter_h- (filter_h - 1) * (dilation - 1))//stride + 1\n out_w = (width + 2*pad - filter_w- (filter_w - 1) * (dilation - 1))//stride + 1\n\n img = np.pad(img, [(0, 0), (0, 0), (pad, pad), (pad, pad)], 'constant')\n col = np.zeros((batch_num, channel, filter_h, filter_w, out_h, out_w)).astype(img.dtype)\n\n for y in range(filter_h):\n y_max = y + stride*out_h\n for x in range(filter_w):\n x_max = x + stride*out_w\n col[:, :, y, x, :, :] = img[:, :, y:y_max:stride, x:x_max:stride]\n\n col = col.transpose(0, 4, 5, 1, 2, 3).reshape(batch_num*out_h*out_w, -1)\n return col\n\n\ndef matmul(x, w, b=None):\n \"\"\"\n Dot product of array x and w, then plus array b if b is not None.\n\n Args:\n x (numpy.ndarray): Represents the input array.\n w (numpy.ndarray): Represents weights array.\n b (numpy.ndarray): Represents bias array which has the same shape as x. Default: None.\n\n Returns:\n numpy.ndarray, the result of (x*w + b).\n \"\"\"\n y = np.dot(x, w)\n if b:\n y += b\n return y\n\n\ndef max_pooling(x, pool_h, pool_w, stride, pad):\n \"\"\"Max pooling.\"\"\"\n validator.check_integer(\"stride\", stride, 0, Rel.GT)\n num, channel, height, width = x.shape\n out_h = (height + 2*pad - pool_h)//stride + 1\n out_w = (width + 2*pad - pool_w)//stride + 1\n\n col = im2col(x, pool_h, pool_w, stride, pad)\n col = col.reshape(-1, pool_h*pool_w)\n\n out = np.max(col, axis=1)\n out = out.reshape(num, out_h, out_w, channel).transpose(0, 3, 1, 2)\n\n return out\n\n\ndef max_pool_grad(x, dout, pool_h, pool_w, stride, pad):\n \"\"\"Grad of max pooling.\"\"\"\n dout = dout.transpose(0, 2, 3, 1)\n pool_size = pool_h * pool_w\n dmax = np.zeros((dout.size, pool_size))\n col = im2col(x, pool_h, pool_w, stride, pad)\n col = col.reshape(-1, pool_h*pool_w)\n arg_max = np.argmax(col, axis=1)\n dmax[np.arange(arg_max.size), arg_max.flatten()] = dout.flatten()\n dmax = dmax.reshape(dout.shape + (pool_size,))\n dcol = dmax.reshape(dmax.shape[0]*dmax.shape[1]*dmax.shape[2], -1)\n dx = col2im(dcol, x.shape, pool_h, pool_w, stride, pad)\n return dx\n\n\ndef max_pool_grad_with_argmax(x, arg_max, dout, pool_h, pool_w, stride, pad):\n \"\"\"Grad of max pooling with argmax.\"\"\"\n dout = dout.transpose(0, 2, 3, 1)\n pool_size = pool_h * pool_w\n dmax = np.zeros((dout.size, pool_size))\n dmax[np.arange(arg_max.size), arg_max.flatten()] = dout.flatten()\n dmax = dmax.reshape(dout.shape + (pool_size,))\n dcol = dmax.reshape(dmax.shape[0]*dmax.shape[1]*dmax.shape[2], -1)\n dx = col2im(dcol, x.shape, pool_h, pool_w, stride, pad)\n return dx\n\n\ndef max_pool_with_argmax(x, pool_h, pool_w, stride, pad):\n \"\"\"Max pooling with argmax.\"\"\"\n validator.check_integer(\"stride\", stride, 0, Rel.GT)\n num, channel, height, width = x.shape\n out_h = (height + 2*pad - pool_h)//stride + 1\n out_w = (width + 2*pad - pool_w)//stride + 1\n col = im2col(x, pool_h, pool_w, stride, pad)\n col = col.reshape(-1, pool_h*pool_w)\n out = np.max(col, axis=1)\n out_argmax = np.argmax(col, axis=1)\n out = out.reshape(num, out_h, out_w, channel).transpose(0, 3, 1, 2)\n out_argmax = out_argmax.reshape(num, out_h, out_w, channel).transpose(0, 3, 1, 2)\n return out, out_argmax\n\n\ndef relu(x):\n \"\"\"\n Rectified linear unit.\n\n Args:\n x (numpy.ndarray): The input array.\n\n Returns:\n numpy.ndarray, the array applied relu.\n \"\"\"\n return x * (x > 0)\n\n\ndef relu_grad(y):\n \"\"\"\n Grad of relu.\n\n Args:\n y (numpy.ndarray): The input array.\n\n Returns:\n numpy.ndarray, the array applied grad of relu.\n \"\"\"\n y[y <= 0] = 0\n y[y > 0] = 1\n return y\n\n\ndef sigmoid(x):\n \"\"\"\n Sigmoid activation function.\n\n Args:\n x (numpy.ndarray): The input array.\n\n Returns:\n numpy.ndarray, the array applied sigmoid.\n \"\"\"\n return 1 / (1 + np.exp(x * -1))\n\n\ndef tanh(x):\n \"\"\"\n Computes hyperbolic tangent element-wise.\n\n Args:\n x (numpy.ndarray): The input array.\n\n Returns:\n numpy.ndarray, the array applied tanh.\n \"\"\"\n a = np.exp(x) - np.exp(x * -1)\n b = np.exp(x) + np.exp(x * -1)\n return a / b\n\n\ndef softmax(x, axis=None):\n \"\"\"\n Softmax function which is `softmax(x) = np.exp(x)/sum(np.exp(x))`.\n\n Args:\n x (numpy.ndarray): Input array.\n axis (Union[int, tuple[int]]): Axis to compute values along. Default: None.\n\n Returns:\n numpy.ndarray, has the same shape as x.\n \"\"\"\n from scipy.special import softmax as scipy_softmax\n return scipy_softmax(x, axis)\n\n\ndef softmax_cross_entropy_with_logits(logits, labels):\n sample_num = labels.shape[0]\n prob = softmax(logits)\n log_likelihood = -np.log(prob[range(sample_num)]) * labels\n #loss = np.sum(log_likelihood)\n loss = log_likelihood\n\n dx = prob.copy()\n dx[range(sample_num)] -= labels\n return loss, dx\n\n\ndef shape(x):\n \"\"\"\n Gets the array's dimensions.\n\n Args:\n x (numpy.ndarray): Input array.\n\n Returns:\n tuple, the shape/dimensions of the input array.\n \"\"\"\n return np.array(np.shape(x))\n\n\ndef expand_dims(x, axis):\n \"\"\"\n Expands the shape of an array.\n\n Args:\n x (numpy.ndarray): Input array.\n axis (int): Position in the expanded axes where the new axis is placed.\n\n Returns:\n numpy.ndarray, view of input array with the number of dimensions increased by one.\n \"\"\"\n return np.expand_dims(x, axis)\n\n\ndef squeeze(x, axis):\n \"\"\"\n Removes single-dimensional entries from the shape of an array.\n\n Args:\n x (numpy.ndarray): Input array.\n axis (Union[int, tuple[int]]): Selected subset of the single-dimensional entries in the shape.\n\n Returns:\n numpy.ndarray, the input numpy.ndarray, but with all or a subset of the dimensions of length\n 1 removed.\n \"\"\"\n return np.squeeze(x, tuple(axis))\n\n\ndef reshape(x, shp):\n \"\"\"\n Applies a new shape to an array without changing its data.\n\n Args:\n x (numpy.ndarray): Input array.\n shp (tuple[int]): New shape to apply to x.\n\n Returns:\n numpy.ndarray, a new view object or a copy of input array.\n \"\"\"\n return np.reshape(x, tuple(shp))\n\n\ndef rank(x):\n \"\"\"\n Gets number of array dimensions.\n\n Args:\n x (numpy.ndarray): Input array.\n\n Returns:\n int, number of input array dimensions.\n \"\"\"\n return np.array(np.ndim(x))\n\n\ndef logsoftmax(x):\n \"\"\"\n Log softmax function.\n\n Args:\n x (numpy.ndarray): Input array.\n\n Returns:\n numpy.ndarray, the result of applying log softmax on the input array.\n \"\"\"\n return np.array(np.log(softmax(x)))\n\n\ndef transpose(x, axes=None):\n \"\"\"\n Transposes an input array according to axes.\n\n Args:\n x (numpy.ndarray): Input array.\n axes (list): The axes to be transposed. Default: None.\n\n Returns:\n numpy.ndarray, transposed array.\n \"\"\"\n return np.transpose(x, axes)\n\n\ndef invert_permutation(x):\n \"\"\"\n Gets the inverse permutation of an array.\n\n Args:\n x (numpy.ndarray): Input array.\n\n Returns:\n tuple, the inverse permutation of the input array.\n \"\"\"\n x = np.array(x)\n y = np.argsort(x)\n return tuple(y)\n\n\ndef select(cond, x, y):\n \"\"\"\n Gets elements from x or y depending on cond.\n\n Args:\n cond (bool): Where True, yield x, otherwise yield y.\n x (numpy.ndarray): Values from which to choose.\n y (numpy.ndarray): Values from which to choose.\n\n Returns:\n numpy.ndarray, elements from x where condition is True, and elements from y elsewhere.\n \"\"\"\n return np.where(cond, x, y)\n\n\ndef sum_by_axis(x, axis):\n \"\"\"\n Sum of array elements over a given axis.\n\n Args:\n x (numpy.ndarray): Input array.\n axis (Union[int, tuple[int]]): Axis or axes along which a sum is performed.\n\n Returns:\n numpy.ndarray, has the same shape as input array with the specified axis removed.\n \"\"\"\n return np.sum(x, axis)\n\n\ndef equal(x, y):\n \"\"\"\n Gets (x == y) element-wise.\n\n Args:\n x (numpy.ndarray): Input array.\n y (numpy.ndarray): Input array.\n\n Returns:\n numpy.ndarray, element-wise comparison of x and y.\n \"\"\"\n return np.equal(x, y)\n\n\ndef not_equal(x, y):\n \"\"\"\n Gets (x != y) element-wise.\n\n Args:\n x (numpy.ndarray): Input array.\n y (numpy.ndarray): Input array.\n\n Returns:\n numpy.ndarray, element-wise comparison of x and y.\n \"\"\"\n return np.not_equal(x, y)\n\n\ndef greater(x, y):\n \"\"\"\n Get the truth value of (x > y) element-wise.\n\n Args:\n x (numpy.ndarray): Input array.\n y (numpy.ndarray): Input array.\n\n Returns:\n numpy.ndarray, element-wise comparison of x and y.\n \"\"\"\n return np.greater(x, y)\n\ndef less(x, y):\n \"\"\"\n Get the truth value of (x < y) element-wise.\n\n Args:\n x (numpy.ndarray): Input array.\n y (numpy.ndarray): Input array.\n\n Returns:\n Array, element-wise comparison of x and y.\n \"\"\"\n return np.less(x, y)\n\n\n\n\ndef logical_not(x):\n \"\"\"\n Gets the truth value of NOT x element-wise.\n\n Args:\n x (numpy.ndarray): Input array.\n\n Returns:\n bool, have the same shape as x of the NOT operation on elements of x.\n \"\"\"\n return np.logical_not(x)\n\n\ndef sqrt(x):\n \"\"\"\n Gets the non-negative square-root of an numpy.ndarray, element-wise.\n\n Args:\n x (numpy.ndarray): Input array.\n\n Returns:\n numpy.ndarray, has the same shape as x, containing the positive square-root of each\n element in x.\n \"\"\"\n return np.sqrt(x)\n\n\ndef power(x, y):\n \"\"\"\n First array elements raised to powers from second numpy.ndarray, element-wise.\n\n Args:\n x (numpy.ndarray): The bases array.\n y (numpy.ndarray): The exponents array.\n\n Returns:\n numpy.ndarray, the bases in x raised to the exponents in y.\n \"\"\"\n return np.power(x, y)\n\n\ndef exp(x):\n \"\"\"\n Gets the exponential of all elements in the input array.\n\n Args:\n x (numpy.ndarray): Input array.\n\n Returns:\n numpy.ndarray, element-wise exponential of x.\n \"\"\"\n return np.exp(x)\n\n\ndef maximum(x, y):\n \"\"\"\n Gets the max of x and y element-wise.\n\n If x > y, return x. Otherwise, return y.\n\n Args:\n x (numpy.ndarray): First input array.\n y (numpy.ndarray): Second input array ave the same type as x.\n\n Returns:\n numpy.ndarray, has the same type as x.\n \"\"\"\n return np.maximum(x, y)\n\n\ndef minimum(x, y):\n \"\"\"\n Gets the min of x and y element-wise.\n\n If x < y, return x. Otherwise, return y.\n\n Args:\n x (numpy.ndarray): First input array.\n y (numpy.ndarray): Second input array have the same type as x.\n\n Returns:\n numpy.ndarray, has the same type as x.\n \"\"\"\n return np.minimum(x, y)\n", "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\" test_cell_bprop \"\"\"\nimport numpy as np\nimport mindspore.nn as nn\nfrom mindspore.ops import composite as C\nfrom mindspore.ops import operations as P\nfrom mindspore import Parameter\nfrom mindspore.common.tensor import Tensor\nimport mindspore.common.dtype as mstype\nfrom mindspore.common.initializer import initializer\nfrom mindspore import context\nfrom ....mindspore_test_framework.utils.bprop_util import bprop\n\nimport pytest\n\n\ndef setup_module(module):\n context.set_context(mode=context.PYNATIVE_MODE)\n\n\nclass MulAdd(nn.Cell):\n def __init__(self):\n super(MulAdd, self).__init__()\n\n def construct(self, x, y):\n return 2 * x + y\n\n def bprop(self, x, y, out, dout):\n # In this test case, The user defined bprop is wrong defined purposely to distinguish from ad result\n return 2 * dout, 2 * y\n\ndef test_grad_mul_add():\n mul_add = MulAdd()\n assert C.grad_all(mul_add)(1, 2) == (2, 4)\n\n\nclass InlineMulADD(nn.Cell):\n def __init__(self):\n super(InlineMulADD, self).__init__()\n self.mul_add = MulAdd()\n self.param = Parameter(2, 'param')\n\n def construct(self, x, y):\n return self.mul_add(x, y) + x + self.param * y\n\ndef test_grad_inline_mul_add():\n inline_mul_add = InlineMulADD()\n assert C.grad_all(inline_mul_add)(1, 2) == (3, 6)\n\n\nclass WithParameter(nn.Cell):\n def __init__(self):\n super(WithParameter, self).__init__()\n self.param = Parameter(2, 'param')\n\n def construct(self, x, y):\n return self.param * x + y\n\n def bprop(self, x, y, out, dout):\n # In this test case, The user defined bprop is wrong defined purposely to distinguish from ad result\n return self.param * dout, 2 * y\n\ndef test_with_param():\n with_param = WithParameter()\n with pytest.raises(RuntimeError):\n C.grad_all(with_param)(1, 2)\n\nclass WithNoBprop(nn.Cell):\n def __init__(self):\n super(WithNoBprop, self).__init__()\n\n def construct(self, x, y):\n return 2 * x + y\n\ndef test_with_no_bprop():\n with_no_bprop = WithNoBprop()\n C.grad_all(with_no_bprop)(1, 2) == (2, 1)\n\ndef test_grad_in_bprop_1():\n class GradInBprop_1(nn.Cell):\n def __init__(self):\n super(GradInBprop_1, self).__init__()\n self.relu = P.ReLU()\n def construct(self, x, y):\n return self.relu(x)\n class GradInBprop_2(nn.Cell):\n def __init__(self):\n super(GradInBprop_2, self).__init__()\n self.f = GradInBprop_1()\n def construct(self, x, y):\n return self.f(x, y), C.grad_all(self.f)(x, y)\n def bprop(self, x, y, out, dout):\n grads = C.grad_all(self.f)(x, y)\n return out[1][0], grads[1]\n class GradInBprop_3(nn.Cell):\n def __init__(self):\n super(GradInBprop_3, self).__init__()\n self.f = GradInBprop_2()\n def construct(self, x, y):\n return self.f(x, y)\n grad_in_bprop = GradInBprop_3()\n grads = C.grad_all(grad_in_bprop)(Tensor(np.ones([2, 2]).astype(np.float32)),\n Tensor(np.ones([2, 2]).astype(np.float32)))\n assert (grads[0].asnumpy() == np.ones([2, 2]).astype(np.float32)).all()\n assert (grads[1].asnumpy() == np.zeros([2, 2]).astype(np.float32)).all()\n\ndef test_grad_in_bprop_2():\n class GradInBprop_1(nn.Cell):\n def __init__(self):\n super(GradInBprop_1, self).__init__()\n self.relu = P.ReLU()\n def construct(self, x, y):\n return self.relu(x)\n def bprop(self, x, y, out, dout):\n return x * y, y + x\n class GradInBprop_2(nn.Cell):\n def __init__(self):\n super(GradInBprop_2, self).__init__()\n self.f = GradInBprop_1()\n def construct(self, x, y):\n return self.f(x, y), C.grad_all(self.f)(x, y)\n def bprop(self, x, y, out, dout):\n grads = C.grad_all(self.f)(x, y)\n return out[1][0], grads[1]\n class GradInBprop_3(nn.Cell):\n def __init__(self):\n super(GradInBprop_3, self).__init__()\n self.f = GradInBprop_2()\n def construct(self, x, y):\n return self.f(x, y)\n grad_in_bprop = GradInBprop_3()\n grads = C.grad_all(grad_in_bprop)(Tensor(np.ones([2, 2]).astype(np.float32)),\n Tensor(np.ones([2, 2]).astype(np.float32)))\n assert (grads[0].asnumpy() == np.ones([2, 2]).astype(np.float32)).all()\n assert (grads[1].asnumpy() == np.array([[2, 2], [2, 2]]).astype(np.float32)).all()\n\ndef test_grad_in_bprop_3():\n class GradInBprop_1(nn.Cell):\n def __init__(self):\n super(GradInBprop_1, self).__init__()\n self.relu = P.ReLU()\n def construct(self, x, y):\n return self.relu(x)\n class GradInBprop_2(nn.Cell):\n def __init__(self):\n super(GradInBprop_2, self).__init__()\n self.f = GradInBprop_1()\n def construct(self, x, y):\n return self.f(x, y), C.grad_all(self.f)(x, y)\n def bprop(self, x, y, out, dout):\n grads = C.grad_all(self.f)(x, y)\n return out[1][0], grads[1]\n class GradInBprop_3(nn.Cell):\n def __init__(self):\n super(GradInBprop_3, self).__init__()\n self.f = GradInBprop_2()\n def construct(self, x, y):\n return self.f(x, y)\n def bprop(self, x, y, out, dout):\n return x + y + y + out[0], x + x + y + y + dout[0]\n grad_in_bprop = GradInBprop_3()\n grads = C.grad_all(grad_in_bprop)(Tensor(np.ones([2, 2]).astype(np.float32)),\n Tensor(np.ones([2, 2]).astype(np.float32)))\n assert (grads[0].asnumpy() == np.array([[4, 4], [4, 4]]).astype(np.float32)).all()\n assert (grads[1].asnumpy() == np.array([[5, 5], [5, 5]]).astype(np.float32)).all()\n\nclass OneInputBprop(nn.Cell):\n def __init__(self):\n super().__init__()\n self.op = P.ReLU()\n def construct(self, x):\n return self.op(x)\n def bprop(self, x, out, dout):\n return 5 * x,\n\ndef test_grad_one_input_bprop():\n net = OneInputBprop()\n input = Tensor(np.ones([2, 2]).astype(np.float32))\n grad = C.grad_all(net)(input)\n assert (grad[0].asnumpy() == np.array([5, 5]).astype(np.float32)).all()\n\n\nclass TwoInput(nn.Cell):\n def __init__(self):\n super().__init__()\n def construct(self, x, y):\n return x * y\n\nclass InlineBpropTwoInput(nn.Cell):\n def __init__(self):\n super().__init__()\n self.f = TwoInput()\n def construct(self, x, y):\n return self.f(x, y), C.grad_all(self.f)(x, y)\n def bprop(self, x, y, out, dout):\n grads = C.grad_all(self.f)(x, y)\n return grads[0] * 2, grads[1] * 2\n\ndef test_grad_inline_bprop_two_input():\n net = InlineBpropTwoInput()\n input1 = Tensor(np.ones([2, 2]).astype(np.float32))\n input2 = Tensor(np.ones([2, 2]).astype(np.float32))\n grads = C.grad_all(net)(input1, input2)\n assert (grads[0].asnumpy() == np.array([2, 2]).astype(np.float32)).all()\n assert (grads[1].asnumpy() == np.array([2, 2]).astype(np.float32)).all()\n assert (len(grads) == 2)\n\n\nclass TwoInputBprop(nn.Cell):\n def __init__(self):\n super().__init__()\n self.op = P.Mul()\n def construct(self, x, y):\n return self.op(x, y)\n def bprop(self, x, y, out, dout):\n return 5 * x, 8 * y\n\nclass TwoInput(nn.Cell):\n def __init__(self):\n super().__init__()\n self.op = P.Mul()\n def construct(self, x, y):\n return self.op(x, y)\n\nclass TwoInputWithParameter(nn.Cell):\n def __init__(self):\n super().__init__()\n self.op = P.Mul()\n self.inputdata = Parameter(initializer(1, (2,2), mstype.float32),name=\"global_step\")\n def construct(self, x, y):\n x = self.inputdata + x\n return self.op(x, y)\n\nclass TwoInputWithOnlyInitParameterBprop(nn.Cell):\n def __init__(self):\n super().__init__()\n self.op = P.Mul()\n self.inputdata = Parameter(initializer(1, (2,2), mstype.float32),name=\"global_step\")\n def construct(self, x, y):\n return self.op(x, y)\n def bprop(self, x, y, out, dout):\n return 5*x, 8*y\n\nclass InlineMutilTwoInputParameterCell(nn.Cell):\n def __init__(self):\n super().__init__()\n self.f1 = TwoInputBprop()\n self.f2 = TwoInput()\n self.f3 = TwoInputWithParameter()\n self.f4 = TwoInputWithOnlyInitParameterBprop()\n def construct(self, x, y):\n output = self.f1(x,y)+self.f2(x,y)+self.f3(x,y)+self.f4(x,y)\n return output\n\ndef test_grad_inline_bprop_multi_input():\n net = InlineMutilTwoInputParameterCell()\n input1 = Tensor(np.ones([2, 2]).astype(np.float32))\n input2 = Tensor(np.ones([2, 2]).astype(np.float32))\n grads = C.grad_all(net)(input1, input2)\n assert (grads[0].asnumpy() == np.array([[12, 12], [12, 12]]).astype(np.float32)).all()\n assert (grads[1].asnumpy() == np.array([[19, 19], [19, 19]]).astype(np.float32)).all()\n assert (len(grads) == 2)\n\nclass MulAddWithParam(nn.Cell):\n def __init__(self):\n super(MulAddWithParam, self).__init__()\n self.mul_add = MulAdd()\n self.param = Parameter(Tensor(np.array([[3, 2]], np.float32)), 'param')\n def construct(self, x):\n return self.mul_add(self.param, x)\n\n\ndef test_refkey_bprop():\n net = MulAddWithParam()\n input_data = Tensor(np.array([2, 2], np.float32))\n grads = bprop(net, input_data,\n grads_wrt_outputs=(Tensor(np.ones([1, 2]).astype(np.float32))),\n wrt=['params', 'inputs'],\n params=net.trainable_params())\n assert (grads[0][0].asnumpy() == np.array([4, 4]).astype(np.float32)).all()\n assert (grads[1][0].asnumpy() == np.array([2, 2]).astype(np.float32)).all()\n\n\nclass MulAddWithWrongOutputNum(nn.Cell):\n def __init__(self):\n super(MulAddWithWrongOutputNum, self).__init__()\n def construct(self, x, y):\n return 2 * x + y\n def bprop(self, x, y, out, dout):\n return 2 * dout, 2 * y, out\n\ndef test_grad_mul_add_with_wrong_output_num():\n mul_add = MulAddWithWrongOutputNum()\n with pytest.raises(RuntimeError):\n C.grad_all(mul_add)(1, 2)\n", "# Copyright 2019 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nfrom mindspore import context\nimport mindspore.nn as nn\nfrom mindspore.ops import operations as P\nfrom mindspore import Tensor, Parameter, ParameterTuple\nimport mindspore as ms\nfrom mindspore.common.api import _executor\nfrom mindspore.ops import composite as C\nfrom mindspore.ops import functional as F\n\nclass NetWithLoss(nn.Cell):\n def __init__(self, network, strategy3):\n super(NetWithLoss, self).__init__()\n self.loss = P.SoftmaxCrossEntropyWithLogits().set_strategy(strategy3)\n self.network = network\n\n def construct(self, x, b):\n predict = self.network(x)\n return self.loss(predict, b)[0]\n\nclass OneStepCell(nn.Cell):\n def __init__(self, network):\n super(OneStepCell, self).__init__(auto_prefix=False)\n self.network = network\n self.weights = ParameterTuple(network.network.trainable_params())\n\n def construct(self, data, label):\n weights = self.weights\n grads = C.grad_by_list(self.network, weights)(data, label)\n return grads\n\ndef test_one_weight_parameter():\n class Net(nn.Cell):\n def __init__(self, strategy1, weight):\n super().__init__()\n self.weight = Parameter(weight, \"w1\", requires_grad=True)\n self.matmul = P.MatMul().set_strategy(strategy1)\n\n def construct(self, x):\n out = self.matmul(x, self.weight)\n return out\n\n context.set_auto_parallel_context(device_num=8, global_rank=0)\n strategy1 = ((4, 1), (1, 2))\n strategy3 = ((8, 1), (8, 1))\n\n x = Tensor(np.ones([64, 32]), dtype=ms.float32)\n weight = Tensor(np.ones([32, 64]), dtype=ms.float32)\n b = Tensor(np.ones([64, 64]), dtype=ms.float32)\n\n net = Net(strategy1, weight)\n print (\"======================================dict\", net.__dict__)\n\n net_with_loss = NetWithLoss(net, strategy3)\n\n train_net = OneStepCell(net_with_loss)\n context.set_auto_parallel_context(parallel_mode=\"semi_auto_parallel\")\n\n _executor.compile(train_net, x, b)\n" ]
[ [ "numpy.ones", "numpy.zeros" ], [ "numpy.dot", "numpy.minimum", "numpy.not_equal", "numpy.exp", "numpy.mean", "numpy.where", "numpy.max", "scipy.special.softmax", "numpy.less", "numpy.argmax", "numpy.transpose", "numpy.sqrt", "numpy.greater", "numpy.ndim", "numpy.arange", "numpy.expand_dims", "numpy.convolve", "numpy.equal", "numpy.pad", "numpy.array", "numpy.reshape", "numpy.zeros", "numpy.shape", "numpy.power", "numpy.argsort", "numpy.logical_not", "numpy.sum", "numpy.ones", "numpy.var", "numpy.maximum" ], [ "numpy.array", "numpy.ones", "numpy.zeros" ], [ "numpy.ones" ] ]
skoltech-nlp/coqas
[ "bf0d8d3d1fffa0039114d7d64bacc020f5085c66" ]
[ "src/layers/layer_char_embeddings.py" ]
[ "\"\"\"class implements character-level embeddings\"\"\"\nimport string\nimport torch\nimport torch.nn as nn\nfrom src.layers.layer_base import LayerBase\nfrom src.seq_indexers.seq_indexer_char import SeqIndexerBaseChar\n\n\nclass LayerCharEmbeddings(LayerBase):\n \"\"\"LayerCharEmbeddings implements character-level embeddings.\"\"\"\n def __init__(self, gpu, char_embeddings_dim, freeze_char_embeddings=False, word_len=20, unique_characters_list=None):\n super(LayerCharEmbeddings, self).__init__(gpu)\n self.gpu = gpu\n self.char_embeddings_dim = char_embeddings_dim\n self.freeze_char_embeddings = freeze_char_embeddings\n self.word_len = word_len # standard len to pad\n # Init character sequences indexer\n self.char_seq_indexer = SeqIndexerBaseChar(gpu=gpu)\n if unique_characters_list is None:\n unique_characters_list = list(string.printable)\n for c in unique_characters_list:\n self.char_seq_indexer.add_char(c)\n # Init character embedding\n self.embeddings = nn.Embedding(num_embeddings=self.char_seq_indexer.get_items_count(),\n embedding_dim=char_embeddings_dim,\n padding_idx=0)\n # nn.init.uniform_(self.embeddings.weight, -0.5, 0.5) # Option: Ma, 2016\n\n def is_cuda(self):\n return self.embeddings.weight.is_cuda\n\n def forward(self, word_sequences):\n batch_num = len(word_sequences)\n max_seq_len = max([len(word_seq) for word_seq in word_sequences])\n char_sequences = [[[c for c in word] for word in word_seq] for word_seq in word_sequences]\n input_tensor = self.tensor_ensure_gpu(torch.zeros(batch_num, max_seq_len, self.word_len, dtype=torch.long))\n for n, curr_char_seq in enumerate(char_sequences):\n curr_seq_len = len(curr_char_seq)\n curr_char_seq_tensor = self.char_seq_indexer.get_char_tensor(curr_char_seq, self.word_len) # curr_seq_len x word_len \n input_tensor[n, :curr_seq_len, :] = curr_char_seq_tensor\n char_embeddings_feature = self.embeddings(input_tensor)\n return char_embeddings_feature.permute(0, 1, 3, 2) # shape: batch_num x max_seq_len x char_embeddings_dim x word_len\n" ]
[ [ "torch.zeros" ] ]
phanxuanphucnd/conformer
[ "a14562ef118c7539ebeade469d0e164ffb5f57a1", "a14562ef118c7539ebeade469d0e164ffb5f57a1" ]
[ "conformer/models/conformer/encoder.py", "conformer/decode/ensemble.py" ]
[ "# -*- coding: utf-8 -*-\n\nimport torch\nimport torch.nn as nn\n\nfrom torch import Tensor\nfrom typing import Tuple\n\nfrom arizona_asr.models.encoder import TransducerEncoder\nfrom arizona_asr.models.convolutional import Conv2dSubsampling\nfrom arizona_asr.models.modules import ResidualConnectionModule, Linear\nfrom arizona_asr.models.conformer.modules import (\n FeedForwardModule,\n MultiHeadedSelfAttentionModule,\n ConformerConvModule\n)\n\nclass ConformerBlock(nn.Module):\n \"\"\"\n Conformer block contains two Feed Forward modules sandwiching the Multi-Headed Self-Attention module\n and the Convolution module. This sandwich structure is inspired by Macaron-Net, which proposes replacing\n the original feed-forward layer in the Transformer block into two half-step feed-forward layers,\n one before the attention layer and one after.\n Args:\n encoder_dim (int, optional): Dimension of conformer encoder\n num_attention_heads (int, optional): Number of attention heads\n feed_forward_expansion_factor (int, optional): Expansion factor of feed forward module\n conv_expansion_factor (int, optional): Expansion factor of conformer convolution module\n feed_forward_dropout_p (float, optional): Probability of feed forward module dropout\n attention_dropout_p (float, optional): Probability of attention module dropout\n conv_dropout_p (float, optional): Probability of conformer convolution module dropout\n conv_kernel_size (int or tuple, optional): Size of the convolving kernel\n half_step_residual (bool): Flag indication whether to use half step residual or not\n device (torch.device): torch device (cuda or cpu)\n Inputs: inputs\n - **inputs** (batch, time, dim): Tensor containing input vector\n Returns: outputs\n - **outputs** (batch, time, dim): Tensor produces by conformer block.\n \"\"\"\n def __init__(\n self,\n encoder_dim: int = 512,\n num_attention_heads: int = 8,\n feed_forward_expansion_factor: int = 4,\n conv_expansion_factor: int = 2,\n feed_forward_dropout_p: float = 0.1,\n attention_dropout_p: float = 0.1,\n conv_dropout_p: float = 0.1,\n conv_kernel_size: int = 31,\n half_step_residual: bool = True,\n device: torch.device = 'cuda',\n ):\n super(ConformerBlock, self).__init__()\n \n self.device = device\n if half_step_residual:\n self.feed_forward_residual_factor = 0.5\n else:\n self.feed_forward_residual_factor = 1\n\n self.sequential = nn.Sequential(\n ResidualConnectionModule(\n module=FeedForwardModule(\n encoder_dim=encoder_dim,\n expansion_factor=feed_forward_expansion_factor,\n dropout_p=feed_forward_dropout_p,\n device=device,\n ),\n module_factor=self.feed_forward_residual_factor,\n ),\n ResidualConnectionModule(\n module=MultiHeadedSelfAttentionModule(\n d_model=encoder_dim,\n num_heads=num_attention_heads,\n dropout_p=attention_dropout_p,\n device=device,\n ),\n ),\n ResidualConnectionModule(\n module=ConformerConvModule(\n in_channels=encoder_dim,\n kernel_size=conv_kernel_size,\n expansion_factor=conv_expansion_factor,\n dropout_p=conv_dropout_p,\n device=device,\n ),\n ),\n ResidualConnectionModule(\n module=FeedForwardModule(\n encoder_dim=encoder_dim,\n expansion_factor=feed_forward_expansion_factor,\n dropout_p=feed_forward_dropout_p,\n device=device,\n ),\n module_factor=self.feed_forward_residual_factor,\n ),\n nn.LayerNorm(encoder_dim),\n )\n\n def forward(self, inputs: Tensor) -> Tensor:\n return self.sequential(inputs.to(self.device))\n\n\nclass ConformerEncoder(TransducerEncoder):\n \"\"\"\n Conformer encoder first processes the input with a convolution subsampling layer and then\n with a number of conformer blocks.\n Args:\n input_dim (int, optional): Dimension of input vector\n encoder_dim (int, optional): Dimension of conformer encoder\n num_layers (int, optional): Number of conformer blocks\n num_attention_heads (int, optional): Number of attention heads\n feed_forward_expansion_factor (int, optional): Expansion factor of feed forward module\n conv_expansion_factor (int, optional): Expansion factor of conformer convolution module\n feed_forward_dropout_p (float, optional): Probability of feed forward module dropout\n attention_dropout_p (float, optional): Probability of attention module dropout\n conv_dropout_p (float, optional): Probability of conformer convolution module dropout\n conv_kernel_size (int or tuple, optional): Size of the convolving kernel\n half_step_residual (bool): Flag indication whether to use half step residual or not\n device (torch.device): torch device (cuda or cpu)\n Inputs: inputs, input_lengths\n - **inputs** (batch, time, dim): Tensor containing input vector\n - **input_lengths** (batch): list of sequence input lengths\n Returns: outputs, output_lengths\n - **outputs** (batch, out_channels, time): Tensor produces by conformer encoder.\n - **output_lengths** (batch): list of sequence output lengths\n \"\"\"\n def __init__(\n self,\n input_dim: int = 80,\n encoder_dim: int = 512,\n num_layers: int = 17,\n num_attention_heads: int = 8,\n feed_forward_expansion_factor: int = 4,\n conv_expansion_factor: int = 2,\n input_dropout_p: float = 0.1,\n feed_forward_dropout_p: float = 0.1,\n attention_dropout_p: float = 0.1,\n conv_dropout_p: float = 0.1,\n conv_kernel_size: int = 31,\n half_step_residual: bool = True,\n device: torch.device = 'cuda',\n ):\n super(ConformerEncoder, self).__init__()\n \n self.conv_subsample = Conv2dSubsampling(input_dim, in_channels=1, out_channels=encoder_dim)\n self.input_projection = nn.Sequential(\n Linear(self.conv_subsample.get_output_dim(), encoder_dim),\n nn.Dropout(p=input_dropout_p),\n )\n self.layers = nn.ModuleList([\n ConformerBlock(\n encoder_dim=encoder_dim,\n num_attention_heads=num_attention_heads,\n feed_forward_expansion_factor=feed_forward_expansion_factor,\n conv_expansion_factor=conv_expansion_factor,\n feed_forward_dropout_p=feed_forward_dropout_p,\n attention_dropout_p=attention_dropout_p,\n conv_dropout_p=conv_dropout_p,\n conv_kernel_size=conv_kernel_size,\n half_step_residual=half_step_residual,\n device=device,\n ).to(device) for _ in range(num_layers)\n ])\n\n def forward(self, inputs: Tensor, input_lengths: Tensor) -> Tuple[Tensor, Tensor]:\n \"\"\"\n Forward propagate a `inputs` for encoder training.\n Args:\n inputs (torch.FloatTensor): A input sequence passed to encoder. Typically for inputs this will be a padded\n `FloatTensor` of size ``(batch, seq_length, dimension)``.\n input_lengths (torch.LongTensor): The length of input tensor. ``(batch)``\n Returns:\n (Tensor, Tensor)\n * outputs (torch.FloatTensor): A output sequence of encoder. `FloatTensor` of size\n ``(batch, seq_length, dimension)``\n * output_lengths (torch.LongTensor): The length of output tensor. ``(batch)``\n \"\"\"\n outputs, output_lengths = self.conv_subsample(inputs, input_lengths)\n outputs = self.input_projection(outputs)\n\n for layer in self.layers:\n outputs = layer(outputs)\n\n return outputs, output_lengths", "import torch\nimport torch.nn as nn\n\n\nclass Ensemble(nn.Module):\n \"\"\"\n Ensemble decoding.\n Decodes using multiple models simultaneously,\n Note:\n Do not use this class directly, use one of the sub classes.\n \"\"\"\n def __init__(self, models):\n super(Ensemble, self).__init__()\n self.models = models\n self.num_models = len(models)\n\n def forward(self, *args, **kwargs):\n raise NotImplementedError\n\n\nclass BasicEnsemble(Ensemble):\n \"\"\"\n Basic ensemble decoding.\n Decodes using multiple models simultaneously,\n combining their prediction distributions by adding.\n All models in the ensemble must share a target characters.\n \"\"\"\n def __init__(self, models):\n super(BasicEnsemble, self).__init__(models)\n\n def forward(self, inputs, input_lengths):\n y_hats = None\n\n with torch.no_grad():\n for model in self.models:\n if y_hats is None:\n y_hats = model(inputs, input_lengths, teacher_forcing_ratio=0.0)\n else:\n y_hats += model(inputs, input_lengths, teacher_forcing_ratio=0.0)\n\n return y_hats\n\n\nclass WeightedEnsemble(Ensemble):\n \"\"\"\n Weighted ensemble decoding.\n Decodes using multiple models simultaneously,\n combining their prediction distributions by weighted sum.\n All models in the ensemble must share a target characters.\n \"\"\"\n def __init__(self, models, dim=128):\n super(WeightedEnsemble, self).__init__(models)\n self.meta_classifier = nn.Sequential(\n nn.Linear(self.num_models, dim),\n nn.ELU(inplace=True),\n nn.Linear(dim, self.num_models)\n )\n\n def forward(self, inputs, input_lengths):\n y_hats, outputs = None, list()\n weights = torch.FloatTensor([1.] * self.num_models)\n\n # model`s parameters are fixed\n with torch.no_grad():\n for model in self.models:\n outputs.append(model(inputs, input_lengths, teacher_forcing_ratio=0.0))\n\n weights = self.meta_classifier(weights)\n\n for (output, weight) in zip(outputs, weights):\n if y_hats is None:\n y_hats = output * weight\n else:\n y_hats += output * weight\n\n return y_hats\n" ]
[ [ "torch.nn.Dropout", "torch.nn.LayerNorm" ], [ "torch.nn.Linear", "torch.FloatTensor", "torch.no_grad", "torch.nn.ELU" ] ]
lixin940207/bert4keras
[ "91e4b6e710fb54ce6e762cfe71c716a23b28e8fd" ]
[ "examples/task_sentiment_albert.py" ]
[ "#! -*- coding:utf-8 -*-\n# 情感分析例子,加载albert_zh权重(https://github.com/brightmart/albert_zh)\n\nimport json\nimport numpy as np\nfrom random import choice\nimport re, os, codecs\nfrom bert4keras.backend import set_gelu\nfrom bert4keras.utils import Tokenizer, load_vocab\nfrom bert4keras.bert import build_bert_model\nfrom bert4keras.train import PiecewiseLinearLearningRate\nfrom keras.layers import *\nfrom keras.models import Model\nfrom keras.optimizers import Adam\nfrom keras.callbacks import Callback\n\nset_gelu('tanh') # 切换gelu版本\n\n\nmaxlen = 128\nconfig_path = '/root/kg/bert/albert_small_zh_google/albert_config.json'\ncheckpoint_path = '/root/kg/bert/albert_small_zh_google/albert_model.ckpt'\ndict_path = '/root/kg/bert/albert_small_zh_google/vocab.txt'\n\n\ndef load_data(filename):\n D = []\n with codecs.open(filename, encoding='utf-8') as f:\n for l in f:\n text, label = l.strip().split('\\t')\n D.append((text, int(label)))\n return D\n\n\n# 加载数据集\ntrain_data = load_data('datasets/sentiment/sentiment.train.data')\nvalid_data = load_data('datasets/sentiment/sentiment.valid.data')\ntest_data = load_data('datasets/sentiment/sentiment.test.data')\n\n# 建立分词器\ntokenizer = Tokenizer(dict_path)\n\n\ndef seq_padding(X, padding=0):\n L = [len(x) for x in X]\n ML = max(L)\n return np.array([\n np.concatenate([x, [padding] * (ML - len(x))]) if len(x) < ML else x\n for x in X\n ])\n\n\nclass data_generator:\n def __init__(self, data, batch_size=32):\n self.data = data\n self.batch_size = batch_size\n self.steps = len(self.data) // self.batch_size\n if len(self.data) % self.batch_size != 0:\n self.steps += 1\n def __len__(self):\n return self.steps\n def __iter__(self, random=False):\n idxs = list(range(len(self.data)))\n if random:\n np.random.shuffle(idxs)\n batch_token_ids, batch_segment_ids, batch_labels = [], [], []\n for i in idxs:\n text, label = self.data[i]\n token_ids, segment_ids = tokenizer.encode(text, max_length=maxlen)\n batch_token_ids.append(token_ids)\n batch_segment_ids.append(segment_ids)\n batch_labels.append([label])\n if len(batch_token_ids) == self.batch_size or i == idxs[-1]:\n batch_token_ids = seq_padding(batch_token_ids)\n batch_segment_ids = seq_padding(batch_segment_ids)\n batch_labels = seq_padding(batch_labels)\n yield [batch_token_ids, batch_segment_ids], batch_labels\n batch_token_ids, batch_segment_ids, batch_labels = [], [], []\n def forfit(self):\n while True:\n for d in self.__iter__(True):\n yield d\n\n\n# 加载预训练模型\nbert = build_bert_model(\n config_path=config_path,\n checkpoint_path=checkpoint_path,\n with_pool=True,\n albert=True,\n return_keras_model=False,\n)\n\noutput = Dropout(rate=0.1)(bert.model.output)\noutput = Dense(units=2,\n activation='softmax',\n kernel_initializer=bert.initializer)(output)\n\nmodel = Model(bert.model.input, output)\nmodel.summary()\n\nmodel.compile(\n loss='sparse_categorical_crossentropy',\n # optimizer=Adam(1e-5), # 用足够小的学习率\n optimizer=PiecewiseLinearLearningRate(Adam(1e-4), {1000: 1, 2000: 0.1}),\n metrics=['accuracy'],\n)\n\n\n# 转换数据集\ntrain_generator = data_generator(train_data)\nvalid_generator = data_generator(valid_data)\ntest_generator = data_generator(test_data)\n\n\ndef evaluate(data):\n total, right = 0., 0.\n for x_true, y_true in data:\n y_pred = model.predict(x_true).argmax(axis=1)\n y_true = y_true[:, 0]\n total += len(y_true)\n right += (y_true == y_pred).sum()\n return right / total\n\n\nclass Evaluator(Callback):\n def __init__(self):\n self.best_val_acc = 0.\n def on_epoch_end(self, epoch, logs=None):\n val_acc = evaluate(valid_generator)\n if val_acc > self.best_val_acc:\n self.best_val_acc = val_acc\n model.save_weights('best_model.weights')\n test_acc = evaluate(test_generator)\n print(u'val_acc: %05f, best_val_acc: %05f, test_acc: %05f\\n'\n % (val_acc, self.best_val_acc, test_acc))\n\n\nevaluator = Evaluator()\nmodel.fit_generator(train_generator.forfit(),\n steps_per_epoch=len(train_generator),\n epochs=10,\n callbacks=[evaluator])\n\nmodel.load_weights('best_model.weights')\nprint(u'final test acc: %05f\\n' % (evaluate(test_generator)))\n" ]
[ [ "numpy.random.shuffle" ] ]
KPF-newstrust/ntrust-lab
[ "14162f96013c11109e983b2330bbcd986dff538e" ]
[ "utils/clustering.py" ]
[ "import sys, os\nimport re, csv\n\nimport tensorflow as tf\nfrom tensorflow.contrib.tensorboard.plugins import projector\nimport numpy as np\nimport pickle\nfrom sklearn.cluster import DBSCAN\nfrom sklearn import metrics\nfrom konlpy.tag import Mecab\n\n\ndef visualize(sess, varname, X, meta_file):\n model_path = os.path.dirname(meta_file)\n\n tf.logging.info('visualize count {}'.format(X.shape))\n Ws = tf.Variable(X, trainable=True, name=varname)\n sess.run(Ws.initializer)\n\n # save weights\n saver = tf.train.Saver()\n saver.save(sess, os.path.join(model_path, '%s.ckpt' % varname))\n\n # associate meta & embeddings\n conf = projector.ProjectorConfig()\n embedding = conf.embeddings.add()\n embedding.tensor_name = varname\n embedding.metadata_path = meta_file\n\n writer = tf.summary.FileWriter(model_path)\n projector.visualize_embeddings(writer, conf)\n\n tf.logging.info('Run `tensorboard --logdir={}` to run visualize result on tensorboard'.format(model_path))\n\n\nclass DBScan:\n def __init__(self, \n model_path=None, \n unit=100,\n eps=0.5): \n\n if model_path:\n self.model_file = os.path.join(model_path, 'dbscan.model')\n self.meta_file = os.path.join(model_path, 'dbscan.meta')\n self.X_file = os.path.join(model_path, 'dbscan_x.npy')\n\n if self.model_file and os.path.exists(self.model_file):\n try:\n with open(self.model_file, 'rb') as f:\n self.model = pickle.load(f)\n except:\n tf.logging.info('fail to load dbscan: {}'.format(sys.exc_info()))\n tf.logging.info('dbscan loaded')\n else:\n self.model = DBSCAN(\n eps=eps, # neighborhood로 인정되는 최대 거리\n min_samples=2, # core point size\n metric='euclidean',\n n_jobs=-1)\n\n self.X = np.zeros([0, unit], dtype=np.float32)\n try:\n if self.X_file and os.path.exists(self.X_file):\n self.X = np.load(self.X_file, mmap_mode='r+')\n except:\n tf.logging.info('fail to load X from file {} {}'.format(self.X_file, sys.exc_info()))\n \n\n def save(self, tags=[]):\n if self.model_file:\n try:\n with open(self.model_file, 'wb') as f:\n pickle.dump(\n self.model, f, protocol=pickle.HIGHEST_PROTOCOL)\n except:\n tf.logging.info(\n 'fail to save dbscan: {}'.format(sys.exc_info()))\n\n if self.meta_file and tags:\n with open(self.meta_file, 'ab') as f:\n for tag, label in zip(tags, self.model.labels_):\n f.write(('[%03x] %s' % (label, tag)).encode('utf-8') + b'\\n')\n\n if self.X_file:\n np.save(self.X_file, self.X) \n\n def fit(self, X):\n self.X = np.concatenate((self.X, np.array(X)), axis=0)\n # return [nsamples]\n return self.model.fit_predict(X)\n\n def labels(self):\n # return: 각 sample(doc)의 label\n return self.model.labels_\n\n def n_clusters(self):\n L = self.labels()\n return len(set(L)) - (1 if -1 in L else 0)\n\n def core_samples(self):\n return self.model.core_sample_indices_\n\n def core_tags(self, tags):\n labels = self.model.labels_\n cores = self.model.core_sample_indices_\n\n clu_tags = []\n for _ in range(self.n_clusters()):\n clu_tags.append([])\n\n for i in cores:\n clu = labels[i]\n if clu < 0: continue\n\n tag = tags[i] if len(tags) > i else ''\n clu_tags[clu].append(tag)\n return clu_tags\n\n def eval(labels, cls_labels, X):\n nclusters = len(set(labels)) - (1 if -1 in labels else 0)\n return dict(\n n_clusters=nclusters,\n homogeneity=\"%0.3f\" % metrics.homogeneity_score(cls_labels, labels),\n completeness=\"%0.3f\" % metrics.completeness_score(cls_labels, labels),\n v_measure=\"%0.3f\" % metrics.v_measure_score(cls_labels, labels),\n adjusted_rand_index=\"%0.3f\" % metrics.adjusted_rand_score(cls_labels, labels),\n adjusted_mutual_info=\"%0.3f\" % metrics.adjusted_mutual_info_score(cls_labels, labels),\n silhouette=\"%0.3f\" % metrics.silhouette_score(X, labels))\n\n\nSEP = re.compile(r\"[\\.!\\?]\\s+|$\", re.M)\n\ndef to_sentences(doc):\n sts = []\n start_sentence=0\n for sep in SEP.finditer(doc):\n st= doc[start_sentence:sep.start(0)]\n start_sentence = sep.end(0)\n if len(st) < 10: continue\n sts.append(st)\n return sts\n\n\ndef clear_str(str):\n str = re.sub(r'\\[[^\\]]+\\]', '', str)\n str = re.sub(r'\\([^\\)]+\\)', '', str)\n str = re.sub(r'\\<[^\\>]+\\>', '', str)\n str = re.sub(r'[^\\.\\?!\\uAC00-\\uD7AF]', ' ', str)\n str = re.sub(r'\\s{2,}', ' ', str)\n return str\n\n\ndef parse(sentence, mecab, allowed_morps=None):\n \"\"\"문장을 형태소로 변환\"\"\"\n\n return [pos[0]for pos in mecab.pos(sentence) \\\n if not allowed_morps or pos[1] in allowed_morps]\n\n\n" ]
[ [ "numpy.array", "numpy.zeros", "sklearn.metrics.adjusted_mutual_info_score", "tensorflow.Variable", "tensorflow.train.Saver", "tensorflow.logging.info", "numpy.save", "numpy.load", "tensorflow.contrib.tensorboard.plugins.projector.visualize_embeddings", "sklearn.metrics.silhouette_score", "sklearn.cluster.DBSCAN", "tensorflow.contrib.tensorboard.plugins.projector.ProjectorConfig", "sklearn.metrics.completeness_score", "sklearn.metrics.homogeneity_score", "sklearn.metrics.adjusted_rand_score", "tensorflow.summary.FileWriter", "sklearn.metrics.v_measure_score" ] ]
softwaresaved/jamie
[ "b854e22966cc03e2ff316d1d25b636cdcce233d9" ]
[ "jamie/types.py" ]
[ "# Custom types and enums for use in Jamie\nimport re\nimport json\nimport numpy as np\nimport pandas as pd\nfrom bson.json_util import loads\nimport datetime\nfrom enum import Enum, auto\nfrom typing import Optional, List\nfrom dataclasses import dataclass, asdict\n\n\ndef _get_mongo_date(record, key):\n if key in record and record[key] is not None and \"$date\" in record[key]:\n return loads(json.dumps(record[key])).date()\n return None\n\n\nclass Alert(Enum):\n \"Alert levels for reporting\"\n High = 3\n Medium = 2\n Low = 1\n\n @staticmethod\n def level(n):\n if n > 0.80:\n return Alert.High\n elif n > 0.60:\n return Alert.Medium\n else:\n return Alert.Low\n\n @property\n def tag(self):\n return self.name.lower()\n\n\nclass PrecisionRecall(Enum):\n\n High = \"\"\"Both precision and recall are high. The model captures most\nof the target job type as well as being precise and avoiding false\nnegatives. The reported estimates can be considered a good estimate for the\ntarget job type.\"\"\"\n\n Low = \"\"\"Both precision and recall are low or average. The model can neither\ncorrectly classify most of the positives, nor is it precise. The reported estimates\nare unreliable for the target job type.\"\"\"\n\n HighLow = \"\"\"Precision is high while recall is low or average. The model is\nconservative; the target job type is precisely identified with few\nfalse positives, but in doing so, the model fails to identify many\njobs. The reported estimates should be considered an underestimate for\nthe target job type.\"\"\"\n\n LowHigh = \"\"\"\"Precision is low or average while recall is high. The model is\noverpredicting, that is predicting more jobs in the target job type than\nactual, thus leading to low precision; while recall is high because of\nthe overprediction. The reported estimates should be considered an overestimate\nfor the target job type.\"\"\"\n\n @staticmethod\n def get(precision, recall):\n return_map = {\n (True, True): PrecisionRecall.High,\n (False, False): PrecisionRecall.Low,\n (True, False): PrecisionRecall.HighLow,\n (False, True): PrecisionRecall.LowHigh,\n }\n return return_map[precision == Alert.High, recall == Alert.High]\n\n @property\n def alert(self):\n if self.name == \"High\":\n return Alert.High\n elif self.name == \"Low\":\n return Alert.Low\n else:\n return Alert.Medium\n\n\nclass Contract(Enum):\n \"Contract type: Fixed Term or Permanent\"\n FixedTerm = auto()\n Permanent = auto()\n\n\nclass JobType(Enum):\n rse = {\n \"title\": \"Research Software\",\n \"search_keywords\": [\"research\", \"software\"],\n }\n\n @property\n def title(self):\n return self.value[\"title\"]\n\n @property\n def search_keywords(self):\n return self.value[\"search_keywords\"]\n\n\n@dataclass\nclass JobPrediction:\n \"\"\"Represents prediction for a single job\n\n Attributes\n ----------\n jobid : str\n JobID from jobs.ac.uk\n job_title : str\n Job title\n snapshot : str\n Model snapshot used for prediction\n closes : datetime.date\n Close date for job\n contract : Contract\n Contract type\n department : str\n Department of the academic institution that\n the job is associated with\n employer : str\n Job employer\n date : datetime.date\n Date of the job. This is usually the same as the posted date,\n but if that is not available, defaults to the date of job applications\n closing, or the earliest date found in the job description. This\n attribute should be used for computing timeseries.\n posted : datetime.date\n Date job was posted\n extra_location : str\n Broad geographical location of job position\n salary_min : Optional[int]\n Minimum salary associated with the job. Sometimes\n jobs have a range of salaries depending on the experience\n of the applicant.\n salary_max : Optional[int]\n Maximum salary associated with the job. Sometimes\n jobs have a range of salaries depending on the experience\n of the applicant.\n salary_median : Optional[int]\n Median salary associated with the job.\n probability : float\n Probability that the job is classified in the positive class\n probability_lower : float\n Lower confidence interval of the probability\n probability_upper : float\n Upper confidence interval of the probability\n\n Parameters\n ----------\n prediction : dict\n Dictionary representing a single prediction from the JSONL file\n generated by :class:`Predict`\n \"\"\"\n\n jobid: str\n snapshot: str\n contract: str\n employer: str\n hours: List[str]\n job_title: str\n date: datetime.date\n posted: datetime.date\n extra_location: str\n probability: float\n probability_lower: float\n probability_upper: float\n department: Optional[str] = None\n location: Optional[str] = None\n salary_max: Optional[float] = None\n salary_min: Optional[float] = None\n salary_median: Optional[float] = None\n\n def __init__(self, prediction):\n self.jobid = prediction[\"jobid\"]\n self.snapshot = prediction[\"snapshot\"]\n if \"contract\" in prediction:\n self.contract = (\n Contract.Permanent\n if prediction[\"contract\"] == \"Permanent\"\n else Contract.FixedTerm\n )\n else:\n self.contract = None\n self.employer = prediction.get(\"employer\", None)\n self.hours = prediction.get(\"hours\", None)\n self.job_title = prediction[\"job_title\"]\n self.department = prediction.get(\"department\", None)\n self.location = prediction.get(\"location\", None)\n self.extra_location = prediction.get(\"extra_location\", None)\n self.salary_max = prediction.get(\"salary_max\", None)\n self.salary_min = prediction.get(\"salary_min\", None)\n self.salary_median = prediction.get(\"salary_median\", None)\n for p in [\"probability\", \"lower_ci\", \"upper_ci\"]:\n if not 0 <= prediction[p] <= 1:\n raise ValueError(\n \"Tried reading invalid {}={}.\".format(p, prediction[p])\n )\n self.probability = prediction[\"probability\"]\n self.probability_lower = prediction[\"lower_ci\"]\n self.probability_upper = prediction[\"upper_ci\"]\n self.posted = _get_mongo_date(prediction, \"placed_on\")\n self.date = _get_mongo_date(prediction, \"date\")\n closes = _get_mongo_date(prediction, \"closes\")\n self.date = self.date or self.posted or closes # select first non-null\n\n def to_dict(self):\n return asdict(self)\n\n\n@dataclass\nclass TrainingData:\n \"\"\"Schema for the training dataset.\n\n Required columns for model training are 'description', 'job_title',\n 'aggregate_tags'. The attribute 'placed_on' is required for timeseries\n graphs of the training data.\n\n Parameters\n ----------\n description : str\n Job description\n job_title : str\n Job title\n aggregate_tags : int\n Integer equals 0 or 1\n placed_on : datetime.date\n Date job was placed on\n jobid : str\n Unique jobid given by jobs.ac.uk\n job_ref : str\n Job reference, possibly used internally by the employer\n contract : str\n Contract type, fixed term or permanent, full-time or part-time\n department : str\n Department of the employer\n duration_ad_days : int\n Duration of job advertisment in days from placed_on to closes.\n employer : str\n Employer name\n enhanced : str\n HTML content can be \"enhanced\" or \"normal\", which alters the parsing\n extra_location : str\n Region of UK where job is from\n final_bool : int\n *Unknown* boolean type\n funding_amount : Optional[str]\n Funding amount text if for a PhD position\n funding_for: Optional[str]\n Specifies whether funding is for UK, EU, international or self-funded students\n hours : str\n Specifies whether job is full time or part time\n in_uk : bool\n Specifies whether job is actually in the UK. Some jobs are by UK institutions\n but located overseas\n invalid_code : Optional[List[str]]\n List of job attributes that could not be parsed\n json : Optional[str]\n JSON representation of job\n location : str\n City where job is located\n not_student : bool\n Whether job is a PhD level position\n original : int\n *Unknown* boolean type\n original_proba : float\n *Unknown* probability\n qualification_type : str\n Type of qualification required for the job in term of education level\n reference : str\n Unknown field\n region : str\n Unknown field, possibly country of the UK where job is\n run_tag : str\n Whether job was classified in first or second run\n salary : str\n Text fragment which has information on salary\n salary_max : Optional[float]\n If a salary range is specified, higher end of the salary range, otherwise same\n as median salary\n salary_min : Optional[float]\n If a salary range is specified, lower end of the salary range, otherwise same\n as median salary\n salary_median : Optional[float]\n Median of salary_min, salary_max if both are present, otherwise equals\n the salary value\n subject_area : List[str]\n List of academic fields for the job\n tags : List[str]\n List of tags (labels) given by coders to the job\n tags_1, tags_2 : str\n Label given to job given by coder 1 and 2 respectively,\n one of {'No', 'Some', 'Insufficient Evidence', 'Most'} when answering the question\n \"How much time would be spent in this job developing software?\"\n tags_3 : Optional[str]\n Label given to job by coder 3 when coder 1 and coder 2 disagreed\n tag_count : int\n Number of coders who classified the job\n agg_tags : float\n Aggregate score from coders\n aggregate_tags : int\n Classification of whether the job is in the target class or not\n (1 indicating it is, 0 otherwise)\n multi_agg_tags : str\n Unknown field\n consensus_tags : str\n [tentative] Consensus of tags_1 and tags_2\n diff_consensus_tags : str\n Unknown field\n \"\"\"\n\n description: str\n job_title: str\n aggregate_tags: int\n placed_on: datetime.date\n jobid: str\n job_ref: str\n contract: str\n department: str\n duration_ad_days: int\n employer: str\n enhanced: str\n extra_location: str\n final_bool: int\n funding_amount: Optional[str]\n funding_for: Optional[str]\n hours: str\n in_uk: bool\n invalid_code: Optional[List[str]]\n json: Optional[str]\n location: str\n not_student: bool\n original: int\n original_proba: float\n qualification_type: str\n reference: str\n region: str\n run_tag: str\n salary: str\n salary_max: Optional[float]\n salary_min: Optional[float]\n salary_median: Optional[float]\n subject_area: List[str]\n tags: List[str]\n tags_1: str\n tags_2: str\n tags_3: Optional[str]\n tag_count: int\n agg_tags: float\n aggregate_tags: int\n multi_agg_tags: str\n consensus_tags: str\n diff_consensus_tags: str\n\n def validate(self):\n \"Validates a single row of training set data\"\n contract_re = re.compile(\".*(Fixed-Term|Permanent|Temporary).*\", re.IGNORECASE)\n allowed_tags = [\"No\", \"Some\", \"Insufficient Evidence\", \"Most\"]\n return all(\n (\n self.tags_1 in allowed_tags and self.tags_2 in allowed_tags,\n self.enhanced in [\"normal\", \"enhanced\"],\n contract_re.match(self.contract),\n )\n )\n\n @staticmethod\n def reliability(data, coders=3):\n \"Returns DataFrame which can be used to compute reliability\"\n if not isinstance(data, pd.DataFrame):\n data = pd.read_csv(data)\n ordinal = {\"No\": 1, \"Some\": 2, \"Most\": 3, \"Insufficient Evidence\": np.nan}\n for i in range(1, coders + 1):\n data[\"coder%d\" % i] = data[\"tags_%d\" % i].map(ordinal.get)\n data = data.set_index(\"jobid\")\n return data[[\"coder%d\" % i for i in range(1, coders + 1)]]\n" ]
[ [ "pandas.read_csv" ] ]
zhykoties/Hierarchical-Actor-Critc-HAC-
[ "622bdfddeac964110496668164c949b2dee1dbf9" ]
[ "ant_environments/ant_reacher_3_levels/agent.py" ]
[ "import numpy as np\nfrom layer import Layer\nfrom environment import Environment\nimport pickle as cpickle\nimport tensorflow.compat.v1 as tf\ntf.disable_v2_behavior()\nimport os\nimport pickle as cpickle\n\n# Below class instantiates an agent\nclass Agent():\n def __init__(self,FLAGS, env, agent_params):\n\n self.FLAGS = FLAGS\n self.sess = tf.Session()\n\n # Set subgoal testing ratio each layer will use\n self.subgoal_test_perc = agent_params[\"subgoal_test_perc\"]\n\n # Create agent with number of levels specified by user\n self.layers = [Layer(i,FLAGS,env,self.sess,agent_params) for i in range(FLAGS.layers)]\n\n # Below attributes will be used help save network parameters\n self.saver = None\n self.model_dir = None\n self.model_loc = None\n\n # Initialize actor/critic networks. Load saved parameters if not retraining\n self.initialize_networks()\n\n # goal_array will store goal for each layer of agent.\n self.goal_array = [None for i in range(FLAGS.layers)]\n\n self.current_state = None\n\n # Track number of low-level actions executed\n self.steps_taken = 0\n\n # Below hyperparameter specifies number of Q-value updates made after each episode\n self.num_updates = 40\n\n # Below parameters will be used to store performance results\n self.performance_log = []\n\n self.other_params = agent_params\n\n\n # Determine whether or not each layer's goal was achieved. Also, if applicable, return the highest level whose goal was achieved.\n def check_goals(self,env):\n\n # goal_status is vector showing status of whether a layer's goal has been achieved\n goal_status = [False for i in range(self.FLAGS.layers)]\n\n max_lay_achieved = None\n\n # Project current state onto the subgoal and end goal spaces\n proj_subgoal = env.project_state_to_subgoal(env.sim, self.current_state)\n proj_end_goal = env.project_state_to_end_goal(env.sim, self.current_state)\n\n for i in range(self.FLAGS.layers):\n\n goal_achieved = True\n\n # If at highest layer, compare to end goal thresholds\n if i == self.FLAGS.layers - 1:\n\n # Check dimensions are appropriate\n assert len(proj_end_goal) == len(self.goal_array[i]) == len(env.end_goal_thresholds), \"Projected end goal, actual end goal, and end goal thresholds should have same dimensions\"\n\n # Check whether layer i's goal was achieved by checking whether projected state is within the goal achievement threshold\n for j in range(len(proj_end_goal)):\n if np.absolute(self.goal_array[i][j] - proj_end_goal[j]) > env.end_goal_thresholds[j]:\n goal_achieved = False\n break\n\n # If not highest layer, compare to subgoal thresholds\n else:\n\n # Check that dimensions are appropriate\n assert len(proj_subgoal) == len(self.goal_array[i]) == len(env.subgoal_thresholds), \"Projected subgoal, actual subgoal, and subgoal thresholds should have same dimensions\"\n\n # Check whether layer i's goal was achieved by checking whether projected state is within the goal achievement threshold\n for j in range(len(proj_subgoal)):\n if np.absolute(self.goal_array[i][j] - proj_subgoal[j]) > env.subgoal_thresholds[j]:\n goal_achieved = False\n break\n\n # If projected state within threshold of goal, mark as achieved\n if goal_achieved:\n goal_status[i] = True\n max_lay_achieved = i\n else:\n goal_status[i] = False\n\n\n return goal_status, max_lay_achieved\n\n\n def initialize_networks(self):\n\n model_vars = tf.trainable_variables()\n self.saver = tf.train.Saver(model_vars)\n\n # Set up directory for saving models\n self.model_dir = os.getcwd() + '/models'\n self.model_loc = self.model_dir + '/HAC.ckpt'\n\n if not os.path.exists(self.model_dir):\n os.makedirs(self.model_dir)\n\n # Initialize actor/critic networks\n self.sess.run(tf.global_variables_initializer())\n\n # If not retraining, restore weights\n # if we are not retraining from scratch, just restore weights\n if self.FLAGS.retrain == False:\n self.saver.restore(self.sess, tf.train.latest_checkpoint(self.model_dir))\n\n\n # Save neural network parameters\n def save_model(self, episode):\n self.saver.save(self.sess, self.model_loc, global_step=episode)\n\n\n # Update actor and critic networks for each layer\n def learn(self):\n\n for i in range(len(self.layers)):\n self.layers[i].learn(self.num_updates)\n\n\n # Train agent for an episode\n def train(self,env, episode_num,total_episodes):\n\n # Select final goal from final goal space, defined in \"design_agent_and_env.py\"\n self.goal_array[self.FLAGS.layers - 1] = env.get_next_goal(self.FLAGS.test)\n print(\"Next End Goal: \", self.goal_array[self.FLAGS.layers - 1])\n\n # Select initial state from in initial state space, defined in environment.py\n self.current_state = env.reset_sim(self.goal_array[self.FLAGS.layers - 1])\n if env.name == \"ant_reacher.xml\":\n print(\"Initial Ant Position: \", self.current_state[:3])\n # print(\"Initial State: \", self.current_state)\n\n # Reset step counter\n self.steps_taken = 0\n\n # Train for an episode\n goal_status, max_lay_achieved = self.layers[self.FLAGS.layers-1].train(self,env, episode_num = episode_num)\n\n # Update actor/critic networks if not testing\n if not self.FLAGS.test and total_episodes > self.other_params[\"num_pre_training_episodes\"]:\n self.learn()\n\n # Return whether end goal was achieved\n return goal_status[self.FLAGS.layers-1]\n\n\n # Save performance evaluations\n def log_performance(self, success_rate):\n\n # Add latest success_rate to list\n self.performance_log.append(success_rate)\n\n # Save log\n cpickle.dump(self.performance_log,open(\"performance_log.p\",\"wb\"))\n" ]
[ [ "tensorflow.compat.v1.global_variables_initializer", "tensorflow.compat.v1.disable_v2_behavior", "tensorflow.compat.v1.train.Saver", "tensorflow.compat.v1.Session", "tensorflow.compat.v1.trainable_variables", "numpy.absolute", "tensorflow.compat.v1.train.latest_checkpoint" ] ]
leozz37/makani
[ "c94d5c2b600b98002f932e80a313a06b9285cc1b", "c94d5c2b600b98002f932e80a313a06b9285cc1b" ]
[ "analysis/control/actuator_util.py", "analysis/force_balance_loop/tools/fbl_load_csim_database.py" ]
[ "# Copyright 2020 Makani Technologies LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Functions for working with actuator models.\"\"\"\n\nimport ctypes\n\nfrom makani.control import actuator_util\nfrom makani.lib.python import ctype_util\nfrom makani.lib.python.autogen import autogen_util\nimport numpy as np\n\n\n_thrust_moment_keys = ['thrust', 'moment']\n\n\ndef _PythonToCtype(data, c_type):\n \"\"\"Populate a ctypes data type with a Python structure.\"\"\"\n if c_type is actuator_util.Vec3:\n # Handle Vec3.\n assert len(data) == 3\n c_data = c_type()\n c_data.x = data[0]\n c_data.y = data[1]\n c_data.z = data[2]\n return c_data\n elif hasattr(c_type, '_length_'):\n # Handle arrays.\n length = getattr(c_type, '_length_')\n assert len(data) == length\n\n c_data = c_type()\n for i in range(length):\n c_data[i] = _PythonToCtype(data[i], getattr(c_type, '_type_'))\n\n elif hasattr(c_type, '_fields_'):\n # Handle structures.\n fields = autogen_util.GetCFields(c_type)\n assert set(data.keys()) == {field for field, _ in fields}\n\n c_data = c_type()\n for field, field_type in fields:\n setattr(c_data, field, _PythonToCtype(data[field], field_type))\n\n else:\n c_data = c_type(data)\n\n return c_data\n\n\ndef _ThrustMomentToArray(thrust_moment):\n \"\"\"Convert a ThrustMoment dictionary into an array of thrust and moments.\n\n Args:\n thrust_moment: A ThrustMoment dictionary to be converted.\n\n Returns:\n A 4-by-1 numpy.matrix version of thrust_moment in array form.\n \"\"\"\n assert thrust_moment.keys() == _thrust_moment_keys\n\n return np.matrix([thrust_moment['thrust'],\n thrust_moment['moment'][0],\n thrust_moment['moment'][1],\n thrust_moment['moment'][2]])\n\n\ndef _ArrayToThrustMoment(array):\n \"\"\"Convert a 4-by-1 array into a ThrustMoment dictionary.\n\n Args:\n array: A 4-by-1 numpy.matrix to be converted.\n\n Returns:\n A ThrustMoment dictionary.\n \"\"\"\n assert np.size(array) == 4\n\n return {'thrust': array[0],\n 'moment': [array[1], array[2], array[3]]}\n\n\ndef _AddThrustMoment(thrust_moment_0, thrust_moment_1):\n \"\"\"Add two ThrustMoment dictionaries.\"\"\"\n\n assert thrust_moment_0.keys() == _thrust_moment_keys\n assert thrust_moment_1.keys() == _thrust_moment_keys\n\n thrust_moment = {}\n for k in _thrust_moment_keys:\n thrust_moment[k] = (np.asarray(thrust_moment_0[k])\n + np.asarray(thrust_moment_1[k]))\n\n return thrust_moment\n\n\ndef MixRotors(thrust_moment, weights,\n v_app, pqr, stacking_state,\n hover_flight_mode, air_density,\n rotor_params,\n rotor_control_params):\n \"\"\"Wrapper around MixRotors function.\n\n See MixRotors in control/actuator_util.c.\n\n Args:\n thrust_moment: Dict with keys 'thrust', whose value is a float, and\n 'moment', whose value is an array of three floats.\n weights: Dict with keys 'thrust', whose value is a float, and\n 'moment', whose value is an array of three floats.\n v_app: Float storing the airspeed [m/s].\n pqr: Array of 3 floats representing the body rates [rad/s].\n stacking_state: Integer (see the enum StackingState).\n hover_flight_mode: Bool indicating if we are in a hover flight mode.\n air_density: Float storing the air density [kg/m^3].\n rotor_params: Array of kNumMotors dicts storing the contents of RotorParams\n structures.\n rotor_control_params: Dict storing the contents of the RotorControlParams\n structure.\n\n Returns:\n An 8-by-1 np.matrix containing the rotor speeds [rad/s].\n \"\"\"\n assert len(rotor_params) == actuator_util.kNumMotors\n c_rotor_params = [\n _PythonToCtype(r, actuator_util.RotorParams) for r in rotor_params\n ]\n c_rotor_params_pointers = (\n ctypes.POINTER(actuator_util.RotorParams) * len(rotor_params))()\n for i, c_r in enumerate(c_rotor_params):\n c_rotor_params_pointers[i] = ctypes.pointer(c_r)\n\n c_rotors = (ctypes.c_double * actuator_util.kNumMotors)()\n c_available_thrust_moment = actuator_util.ThrustMoment()\n\n c_v_app_locals = (ctypes.c_double * actuator_util.kNumMotors)()\n\n actuator_util.MixRotors(\n ctypes.pointer(_PythonToCtype(thrust_moment, actuator_util.ThrustMoment)),\n ctypes.pointer(_PythonToCtype(weights, actuator_util.ThrustMoment)),\n v_app,\n ctypes.pointer(_PythonToCtype(pqr, actuator_util.Vec3)),\n stacking_state,\n hover_flight_mode,\n air_density,\n ctype_util.SizelessArray(c_rotor_params_pointers),\n ctypes.pointer(_PythonToCtype(rotor_control_params,\n actuator_util.RotorControlParams)),\n c_rotors,\n ctypes.pointer(c_available_thrust_moment),\n c_v_app_locals)\n\n return np.matrix([[c_rotors[i]] for i in range(actuator_util.kNumMotors)])\n\n\ndef LinearizeMixRotors(thrust_moment, params, h=1e-6):\n \"\"\"Calculate a Jacobian matrix for the MixRotors function.\n\n Produces a linearized model:\n\n MixRotors(thrust_moment + delta_thrust_moment)\n ~ MixRotors(thrust_moment) + A * delta_thrust_moment\n\n Args:\n thrust_moment: A ThrustMoment dictionary around which to linearize.\n params: A parameters structure from mconfig.\n h: Step-size used in finite difference.\n\n Returns:\n A numpy.matrix of Jacobian values of units rad/s/N and rad/s/(N-m).\n \"\"\"\n num_inputs = 4\n num_outputs = len(params['system']['rotors'])\n dfdu = np.matrix(np.zeros((num_outputs, num_inputs)))\n\n for i in range(num_inputs):\n e = np.zeros(num_inputs)\n e[i] = h\n delta_thrust_moment = _ArrayToThrustMoment(e)\n\n dfdu[:, i] = (\n MixRotors(\n _AddThrustMoment(thrust_moment, delta_thrust_moment),\n params['control']['hover']['output']['weights'],\n 0.0,\n [0.0, 0.0, 0.0],\n actuator_util.kStackingStateNormal,\n True,\n params['system']['phys']['rho'],\n params['system']['rotors'],\n params['control']['rotor_control'])\n - MixRotors(\n thrust_moment,\n params['control']['hover']['output']['weights'],\n 0.0,\n [0.0, 0.0, 0.0],\n actuator_util.kStackingStateNormal,\n True,\n params['system']['phys']['rho'],\n params['system']['rotors'],\n params['control']['rotor_control'])) / (2.0 * h)\n\n return dfdu\n", "# Copyright 2020 Makani Technologies LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Provides a Python interface to C-Sim databases.\n\nThis code was converted from:\nhttps://codesearch.corp.google.com/makani/analysis/aero/load_database.py?dr=C&g=0&l=1\n\nNote that the angles are all given in degrees instead of the standard C-Sim unit\nof rad. This is because the databases are currently written in terms of degrees.\n\"\"\"\n\nfrom __future__ import absolute_import\nimport json\nimport numpy as np\nfrom scipy.interpolate import RectBivariateSpline as rbsf\n\n\ndef UseRBSF(alphas, betas, data, alphad_order=2, betad_order=2):\n return rbsf(alphas, betas, data, kx=alphad_order, ky=betad_order, s=0)\n\n\nclass CSimDatabase(object):\n \"\"\"Loads an aero database from a .json file.\"\"\"\n\n def __init__(self, filename):\n with open(filename) as f:\n database = json.load(f)\n\n # Used for the sizing and interpolation purposes later on.\n self._num_alphads = database['num_alphas']\n self._num_betads = database['num_betas']\n self._alpha_limits = {\n 'alpha_max': np.max(np.asarray(database['alphads'][0])),\n 'alpha_min': np.min(np.asarray(database['alphads'][0]))\n }\n self._beta_limits = {\n 'beta_max': np.max(np.asarray(database['betads'][0])),\n 'beta_min': np.min(np.asarray(database['betads'][0]))\n }\n\n # alpha and beta must comprise a rectangular grid.\n self._alphads = np.asarray(database['alphads'])\n self._betads = np.asarray(database['betads'])\n\n # databases are generated about a particular omega_hat (normalized pqr)\n self._bomega_hat_0 = np.asarray(database['omega_hat'])\n\n # Load the database force coefficients that FBL may or may not utilize.\n self._bf_coef = {\n 'cx': np.asarray(database['CXtot'][0]),\n 'cy': np.asarray(database['CYtot'][0]),\n 'cz': np.asarray(database['CZtot'][0])\n }\n\n # All database objects are allowed to modifiy force coefficients depending\n # on the body angular rates.\n self._bdf_coef_dpqr = {\n 'cxp': np.asarray(database['CXp'][0]),\n 'cxq': np.asarray(database['CXq'][0]),\n 'cxr': np.asarray(database['CXr'][0]),\n 'cyp': np.asarray(database['CYp'][0]),\n 'cyq': np.asarray(database['CYq'][0]),\n 'cyr': np.asarray(database['CYr'][0]),\n 'czp': np.asarray(database['CZp'][0]),\n 'czq': np.asarray(database['CZq'][0]),\n 'czr': np.asarray(database['CZr'][0])\n }\n\n # Load the database moment coefficients that FBL may or may not utilize.\n self._bm_coef = {\n 'cl': np.asarray(database['Cltot'][0]),\n 'cm': np.asarray(database['Cmtot'][0]),\n 'cn': np.asarray(database['Cntot'][0])\n }\n\n # All database objects are allowed to modifiy force coefficients depending\n # on the body angular rates.\n self._bdm_coef_dpqr = {\n 'clp': np.asarray(database['Clp'][0]),\n 'clq': np.asarray(database['Clq'][0]),\n 'clr': np.asarray(database['Clr'][0]),\n 'cmp': np.asarray(database['Cmp'][0]),\n 'cmq': np.asarray(database['Cmq'][0]),\n 'cmr': np.asarray(database['Cmr'][0]),\n 'cnp': np.asarray(database['Cnp'][0]),\n 'cnq': np.asarray(database['Cnq'][0]),\n 'cnr': np.asarray(database['Cnr'][0])\n }\n\n # Track any constant database component offsets.\n self._coef_offsets = {\n 'cx_0': 0., 'cy_0': 0., 'cz_0': 0., 'cl_0': 0., 'cm_0': 0., 'cn_0': 0.}\n\n def OffsetBodyForceCoeffs(self, offset_dict):\n \"\"\"Offsets body force coefficients by specified amounts.\n\n Args:\n offset_dict: dictionary containing force coefficient offsets.\n \"\"\"\n #TODO: C-Sim accounts for the matrix body rotation rates for each\n # alpha and beta value. Currently this is just a constant offset, which is\n # a discrepancy. Next step is a full body rotation accounting.\n self._coef_offsets['cx_0'] = offset_dict['CX_0']\n self._coef_offsets['cy_0'] = offset_dict['CY_0']\n self._coef_offsets['cz_0'] = offset_dict['CZ_0']\n self._bf_coef['cx'] += offset_dict['CX_0']\n self._bf_coef['cy'] += offset_dict['CY_0']\n self._bf_coef['cz'] += offset_dict['CZ_0']\n\n def OffsetBodyMomentCoeffs(self, offset_dict):\n \"\"\"Offset body moment coefficients by specified amounts.\n\n Args:\n offset_dict: dictionary containing moment coefficient offsets.\n \"\"\"\n #TODO: C-Sim accounts for the matrix body rotation rates for each\n # alpha and beta value. Currently this is just a constant offset, which is\n # a discrepancy. Next step is a full body rotation accounting.\n self._coef_offsets['cl_0'] = offset_dict['CL_0']\n self._coef_offsets['cm_0'] = offset_dict['CM_0']\n self._coef_offsets['cn_0'] = offset_dict['CN_0']\n self._bf_coef['cl'] += offset_dict['CL_0']\n self._bf_coef['cm'] += offset_dict['CM_0']\n self._bf_coef['cn'] += offset_dict['CN_0']\n\n def GetConstantCoeffOffsets(self):\n return self._coef_offsets\n\n\nclass FittedDatabase(CSimDatabase):\n \"\"\"Uses Bivariate polynomial fits to model the imported C-Sim database.\n\n Note that when this database instantiates, it does not automatically generate\n fits for the moment coefficients or the roll moment contributions due to body\n rates.\n \"\"\"\n\n def __init__(self, filename, fit_method='bivariate'):\n super(FittedDatabase, self).__init__(filename)\n\n alphas = self._alphads\n betas = self._betads\n\n if fit_method == 'bivariate':\n self._fitfunc = UseRBSF\n else:\n assert False, 'FittedDatabase fit_method not found'\n\n self.FitBodyForceCoefficients(alphas, betas)\n self.FitBodyRateForceDerivativeTerms(alphas, betas)\n self.FitBodyMomentCoefficients(alphas, betas)\n self.FitBodyRateMomentDerivativeTerms(alphas, betas)\n\n def FitBodyForceCoefficients(self, alphas, betas):\n self._bf_coef_fit = {\n 'cx': self._fitfunc(alphas, betas, self._bf_coef['cx']),\n 'cy': self._fitfunc(alphas, betas, self._bf_coef['cy']),\n 'cz': self._fitfunc(alphas, betas, self._bf_coef['cz'])\n }\n\n def FitBodyRateForceDerivativeTerms(self, alphas, betas):\n self._bdf_coef_dpqr_fit = {\n 'cxp': self._fitfunc(alphas, betas, self._bdf_coef_dpqr['cxp']),\n 'cxq': self._fitfunc(alphas, betas, self._bdf_coef_dpqr['cxq']),\n 'cxr': self._fitfunc(alphas, betas, self._bdf_coef_dpqr['cxr']),\n 'cyp': self._fitfunc(alphas, betas, self._bdf_coef_dpqr['cyp']),\n 'cyq': self._fitfunc(alphas, betas, self._bdf_coef_dpqr['cyq']),\n 'cyr': self._fitfunc(alphas, betas, self._bdf_coef_dpqr['cyr']),\n 'czp': self._fitfunc(alphas, betas, self._bdf_coef_dpqr['czp']),\n 'czq': self._fitfunc(alphas, betas, self._bdf_coef_dpqr['czq']),\n 'czr': self._fitfunc(alphas, betas, self._bdf_coef_dpqr['czr'])\n }\n\n def FitBodyMomentCoefficients(self, alphas, betas):\n self._bm_coef_fit = {\n 'cl': self._fitfunc(alphas, betas, self._bm_coef['cl']),\n 'cm': self._fitfunc(alphas, betas, self._bm_coef['cm']),\n 'cn': self._fitfunc(alphas, betas, self._bm_coef['cn'])\n }\n\n def FitBodyRateMomentDerivativeTerms(self, alphas, betas):\n self._bdm_coef_dpqr_fit = {\n 'clp': self._fitfunc(alphas, betas, self._bdm_coef_dpqr['clp']),\n 'clq': self._fitfunc(alphas, betas, self._bdm_coef_dpqr['clq']),\n 'clr': self._fitfunc(alphas, betas, self._bdm_coef_dpqr['clr']),\n 'cmp': self._fitfunc(alphas, betas, self._bdm_coef_dpqr['cmp']),\n 'cmq': self._fitfunc(alphas, betas, self._bdm_coef_dpqr['cmq']),\n 'cmr': self._fitfunc(alphas, betas, self._bdm_coef_dpqr['cmr']),\n 'cnp': self._fitfunc(alphas, betas, self._bdm_coef_dpqr['cnp']),\n 'cnq': self._fitfunc(alphas, betas, self._bdm_coef_dpqr['cnq']),\n 'cnr': self._fitfunc(alphas, betas, self._bdm_coef_dpqr['cnr'])\n }\n\n def CalcForceCoeffs(self, alphad, betad, omega_hat=np.zeros((3, ))):\n \"\"\"Computes the body force and moment coefficients from a C-Sim database.\n\n Args:\n alphad: Angle of attack [deg].\n betad: Side-slip angle [deg].\n pqr_body: Array of 3-by-1 *dimensional* body rates [#].\n\n Returns:\n A dict containing computed force coefficients.\n \"\"\"\n cx = self._bf_coef_fit['cx'].ev(alphad, betad)\n cy = self._bf_coef_fit['cy'].ev(alphad, betad)\n cz = self._bf_coef_fit['cz'].ev(alphad, betad)\n cx += self.CalcCXFromOmegaHat(alphad, betad, omega_hat)\n cy += self.CalcCYFromOmegaHat(alphad, betad, omega_hat)\n cz += self.CalcCZFromOmegaHat(alphad, betad, omega_hat)\n\n return {'cx': cx, 'cy': cy, 'cz': cz}\n\n def CalcCXFromOmegaHat(self, alphad, betad, omega_hat):\n domh_x = omega_hat[0] - self._bomega_hat_0[0]\n domh_y = omega_hat[1] - self._bomega_hat_0[1]\n domh_z = omega_hat[2] - self._bomega_hat_0[2]\n cx_from_omh = self._bdf_coef_dpqr_fit['cxp'].ev(alphad, betad) * domh_x\n cx_from_omh += (self._bdf_coef_dpqr_fit['cxq'].ev(alphad, betad) * domh_y)\n cx_from_omh += (self._bdf_coef_dpqr_fit['cxr'].ev(alphad, betad) * domh_z)\n return cx_from_omh\n\n def CalcCYFromOmegaHat(self, alphad, betad, omega_hat):\n domh_x = omega_hat[0] - self._bomega_hat_0[0]\n domh_y = omega_hat[1] - self._bomega_hat_0[1]\n domh_z = omega_hat[2] - self._bomega_hat_0[2]\n cy_from_omh = self._bdf_coef_dpqr_fit['cyp'].ev(alphad, betad) * domh_x\n cy_from_omh += (self._bdf_coef_dpqr_fit['cyq'].ev(alphad, betad) * domh_y)\n cy_from_omh += (self._bdf_coef_dpqr_fit['cyr'].ev(alphad, betad) * domh_z)\n return cy_from_omh\n\n def CalcCZFromOmegaHat(self, alphad, betad, omega_hat):\n domh_x = omega_hat[0] - self._bomega_hat_0[0]\n domh_y = omega_hat[1] - self._bomega_hat_0[1]\n domh_z = omega_hat[2] - self._bomega_hat_0[2]\n cz_from_omh = self._bdf_coef_dpqr_fit['czp'].ev(alphad, betad) * domh_x\n cz_from_omh += (self._bdf_coef_dpqr_fit['czq'].ev(alphad, betad) * domh_y)\n cz_from_omh += (self._bdf_coef_dpqr_fit['czr'].ev(alphad, betad) * domh_z)\n return cz_from_omh\n\n def OffsetBodyForceCoeffs(self, offset_dict):\n super(FittedDatabase, self).OffsetBodyForceCoeffs(offset_dict)\n self.FitBodyForceCoefficients(self._alphads, self._betads)\n\n def OffsetBodyMomentCoeffs(self, offset_dict):\n super(FittedDatabase, self).OffsetBodyMomentCoefs(offset_dict)\n self.FitBodyMomentCoefficients(self._alphads, self._betads)\n\n def CalcMomentCoeffs(self, alphad, betad, pqr_body=np.zeros((3,))):\n \"\"\"Calculates the body force and moment coefficients from a C-Sim database.\n\n Args:\n alphad: Angle of attack [deg].\n betad: Side-slip angle [deg].\n pqr_body: Array of 3-by-1 dimensional body rates [#].\n\n Returns:\n A dict containing computed moment coefficients.\n\n \"\"\"\n cl = self._bm_coef_fit['cl'].ev(alphad, betad)\n cm = self._bm_coef_fit['cm'].ev(alphad, betad)\n cn = self._bm_coef_fit['cn'].ev(alphad, betad)\n cl += self.CalcCLFromOmegaHat(alphad, betad, pqr_body)\n cm += self.CalcCMFromOmegaHat(alphad, betad, pqr_body)\n cn += self.CalcCNFromOmegaHat(alphad, betad, pqr_body)\n\n return {'cl': cl, 'cm': cm, 'cn': cn}\n\n def CalcCLFromOmegaHat(self, alphad, betad, omega_hat):\n domh_x = omega_hat[0] - self._bomega_hat_0[0]\n domh_y = omega_hat[1] - self._bomega_hat_0[1]\n domh_z = omega_hat[2] - self._bomega_hat_0[2]\n cl_from_omh = self._bdm_coef_dpqr_fit['clp'].ev(alphad, betad) * domh_x\n cl_from_omh += (self._bdm_coef_dpqr_fit['clq'].ev(alphad, betad) * domh_y)\n cl_from_omh += (self._bdm_coef_dpqr_fit['clr'].ev(alphad, betad) * domh_z)\n return cl_from_omh\n\n def CalcCMFromOmegaHat(self, alphad, betad, omega_hat):\n domh_x = omega_hat[0] - self._bomega_hat_0[0]\n domh_y = omega_hat[1] - self._bomega_hat_0[1]\n domh_z = omega_hat[2] - self._bomega_hat_0[2]\n cm_from_omh = self._bdm_coef_dpqr_fit['cmp'].ev(alphad, betad) * domh_x\n cm_from_omh += (self._bdm_coef_dpqr_fit['cmq'].ev(alphad, betad) * domh_y)\n cm_from_omh += (self._bdm_coef_dpqr_fit['cmr'].ev(alphad, betad) * domh_z)\n return cm_from_omh\n\n def CalcCNFromOmegaHat(self, alphad, betad, omega_hat):\n domh_x = omega_hat[0] - self._bomega_hat_0[0]\n domh_y = omega_hat[1] - self._bomega_hat_0[1]\n domh_z = omega_hat[2] - self._bomega_hat_0[2]\n cn_from_omh = self._bdm_coef_dpqr_fit['cnp'].ev(alphad, betad) * domh_x\n cn_from_omh += (self._bdm_coef_dpqr_fit['cnq'].ev(alphad, betad) * domh_y)\n cn_from_omh += (self._bdm_coef_dpqr_fit['cnr'].ev(alphad, betad) * domh_z)\n return cn_from_omh\n\n\n\n" ]
[ [ "numpy.matrix", "numpy.asarray", "numpy.zeros", "numpy.size" ], [ "scipy.interpolate.RectBivariateSpline", "numpy.asarray", "numpy.zeros" ] ]
alucardxh/zvt
[ "02a2c64828146f4e15e702150f26a5ab6647a91c" ]
[ "zvt/recorders/em/actor/em_stock_top_ten_recorder.py" ]
[ "# -*- coding: utf-8 -*-\nfrom typing import List\n\nimport pandas as pd\n\nfrom zvt.api.utils import to_report_period_type, value_to_pct\nfrom zvt.contract import ActorType\nfrom zvt.contract.api import df_to_db\nfrom zvt.contract.recorder import TimestampsDataRecorder\nfrom zvt.domain import Stock, ActorMeta\nfrom zvt.domain.actor.stock_actor import StockTopTenHolder, StockInstitutionalInvestorHolder\nfrom zvt.recorders.em.em_api import get_holder_report_dates, get_holders\nfrom zvt.utils import to_pd_timestamp, to_time_str\n\n\nclass EMStockTopTenRecorder(TimestampsDataRecorder):\n entity_provider = 'joinquant'\n entity_schema = Stock\n\n provider = 'em'\n data_schema = StockTopTenHolder\n\n def init_timestamps(self, entity_item) -> List[pd.Timestamp]:\n result = get_holder_report_dates(code=entity_item.code)\n if result:\n return [to_pd_timestamp(item['END_DATE']) for item in result]\n\n def on_finish_entity(self, entity):\n super().on_finish_entity(entity)\n super().on_finish_entity(entity)\n holders = StockTopTenHolder.query_data(entity_id=entity.id,\n filters=[StockTopTenHolder.holding_values == None],\n session=self.session, return_type='domain')\n for holder in holders:\n ii = StockInstitutionalInvestorHolder.query_data(entity_id=entity.id,\n filters=[\n StockInstitutionalInvestorHolder.holding_values > 1,\n StockInstitutionalInvestorHolder.holding_ratio > 0.01,\n StockInstitutionalInvestorHolder.timestamp == holder.timestamp],\n limit=1, return_type='domain')\n if ii:\n holder.holding_values = holder.holding_ratio * ii[0].holding_values / ii[0].holding_ratio\n self.session.commit()\n\n def record(self, entity, start, end, size, timestamps):\n for timestamp in timestamps:\n the_date = to_time_str(timestamp)\n result = get_holders(code=entity.code, end_date=the_date)\n if result:\n holders = []\n new_actors = []\n for item in result:\n # 机构\n if item['IS_HOLDORG'] == '1':\n domains: List[ActorMeta] = ActorMeta.query_data(filters=[ActorMeta.code == item['HOLDER_CODE']],\n return_type='domain')\n if not domains:\n actor_type = ActorType.corporation.value\n actor = ActorMeta(entity_id=f'{actor_type}_cn_{item[\"HOLDER_CODE\"]}',\n id=f'{actor_type}_cn_{item[\"HOLDER_CODE\"]}',\n entity_type=actor_type,\n exchange='cn',\n code=item[\"HOLDER_CODE\"],\n name=item[\"HOLDER_NAME\"])\n else:\n actor = domains[0]\n else:\n actor_type = ActorType.individual.value\n actor = ActorMeta(entity_id=f'{actor_type}_cn_{item[\"HOLDER_NAME\"]}',\n id=f'{actor_type}_cn_{item[\"HOLDER_NAME\"]}',\n entity_type=actor_type,\n exchange='cn',\n code=item[\"HOLDER_NAME\"],\n name=item[\"HOLDER_NAME\"])\n new_actors.append(actor.__dict__)\n holder = {'id': f'{entity.entity_id}_{the_date}_{actor.entity_id}',\n 'entity_id': entity.entity_id,\n 'timestamp': timestamp,\n 'code': entity.code,\n 'name': entity.name,\n\n 'actor_id': actor.entity_id,\n 'actor_type': actor.entity_type,\n 'actor_code': actor.code,\n 'actor_name': actor.name,\n\n 'report_date': timestamp,\n 'report_period': to_report_period_type(timestamp),\n\n 'holding_numbers': item['HOLD_NUM'],\n 'holding_ratio': value_to_pct(item['HOLD_NUM_RATIO'], default=0)}\n holders.append(holder)\n if holders:\n df = pd.DataFrame.from_records(holders)\n df_to_db(data_schema=self.data_schema, df=df, provider=self.provider,\n force_update=True)\n if new_actors:\n df = pd.DataFrame.from_records(new_actors)\n df_to_db(data_schema=ActorMeta, df=df, provider=self.provider,\n force_update=False)\n\n\nif __name__ == '__main__':\n EMStockTopTenRecorder(codes=['000002']).run()\n# the __all__ is generated\n__all__ = ['EMStockTopTenRecorder']" ]
[ [ "pandas.DataFrame.from_records" ] ]
KazuhisaFujita/AlphaDDA
[ "664742567883cf3e08c2c53b3bce3112b8cc0560" ]
[ "AlphaDDA1/Othello/AlphaZero_mcts.py" ]
[ "#---------------------------------------\n#Since : 2019/04/10\n#Update: 2021/11/16\n# -*- coding: utf-8 -*-\n#---------------------------------------\nimport numpy as np\nfrom copy import deepcopy\nimport random\nimport math\nfrom nn import NNetWrapper as nnet\nfrom parameters import Parameters\nfrom Othello_bitboard import Othello\n\nclass Node():\n def __init__(self, board, states, player, move = None, psa = 0, terminal = False, winner = 0, parent = None, depth = 0):\n self.nsa = 0\n self.wsa = 0\n self.qsa = 0\n self.psa = psa\n self.player = player\n self.move = move\n self.board = board\n self.states = states\n self.children = []\n self.parent = parent\n self.terminal = terminal\n self.winner = winner\n\n def Get_states(self):\n return deepcopy(self.states)\n\n def Get_board(self):\n return deepcopy(self.board)\n\n def Get_player(self):\n return deepcopy(self.player)\n\n def Get_winner(self):\n return deepcopy(self.winner)\n\n def Add_child(self, board, states, player, move, psa, terminal, winner, parent):\n child = Node(board = board, states = states, player = player, move = move, psa = psa, terminal = terminal, winner = winner, parent = parent)\n self.children.append(child)\n\nclass A_MCTS:\n def __init__(self, game, net = None, params = Parameters()):\n self.num_moves = None\n\n g = game\n self.player = g.current_player\n if net == None:\n self.nn = nnet()\n else:\n self.nn = net\n\n self.root = Node(board = g.Get_board(), states = g.Get_states(), player = g.current_player)\n self.params = params\n\n def softmax(self, x):\n x = np.exp(x / self.params.Temp)\n return x/np.sum(x)\n\n def Expand_node(self, node, psa_vector):\n temp_g = Othello()\n temp_g.board = node.Get_board()\n temp_g.current_player = node.Get_player()\n valid_moves = temp_g.Get_valid_moves()\n for m in valid_moves:\n temp_g.Ini_board()\n temp_g.board = node.Get_board()\n temp_g.state = node.Get_states()\n temp_g.current_player = node.Get_player()\n temp_g.Play_action(m)\n psa = psa_vector[m[0] * self.params.board_x + m[1]]\n board = temp_g.Get_board()\n player = temp_g.current_player\n states = temp_g.Get_states()\n terminal = temp_g.Check_game_end()\n winner = temp_g.Get_winner()\n node.Add_child(board = board, states = states , player = player, move = m, psa = psa, terminal = terminal, winner = winner, parent = node)\n\n def Run(self):\n temp_g = Othello()\n\n for _ in range(self.params.num_mcts_sims):\n node = self.root\n\n # search a leaf node\n while len(node.children) != 0:\n node = self.Search(node)\n #Here, the node is a leaf node.\n\n v = 0\n if node.terminal:\n v = node.Get_winner()\n else:\n psa_vector, v = self.nn.predict(node.Get_states())\n\n #calculate psa\n temp_g.Ini_board()\n temp_g.board = node.Get_board()\n temp_g.current_player = node.Get_player()\n valid_moves = temp_g.Get_valid_moves()\n\n # normalize probability\n psa_vector /= np.sum(np.array([psa_vector[i[0] * self.params.board_x + i[1]] for i in valid_moves])) + 1e-7\n\n self.Expand_node(node, psa_vector)\n\n self.Back_prop(node, v)\n\n return self.Decide_move()\n\n def Decide_move(self):\n if self.num_moves > self.params.opening:\n return self.root.children[np.argmax(np.array([i.nsa for i in self.root.children]))].move\n else:\n pi = self.softmax(np.array([i.nsa for i in self.root.children]))\n best_child = self.root.children[np.random.choice(len(self.root.children), p = pi.tolist())]\n return best_child.move\n\n def Search(self, node):\n if node.parent != None:\n N = np.sum(np.array([i.nsa for i in node.children]))\n best_child = node.children[np.argmax(np.array([self.l(i.qsa, i.nsa, i.psa, N) for i in node.children], dtype=\"float\"))]\n else:\n if np.random.rand() > self.params.rnd_rate:\n N = np.sum(np.array([i.nsa for i in node.children]))\n best_child = node.children[np.argmax(np.array([self.l(i.qsa, i.nsa, i.psa, N) for i in node.children], dtype=\"float\"))]\n else:\n best_child = random.choice(node.children)\n\n return best_child\n\n def l(self, qsa, nsa, psa, N):\n return qsa + self.params.cpuct * psa * math.sqrt(N) / (nsa + 1)\n\n def Back_prop(self, node, v):\n while node != self.root:\n node.nsa += 1\n node.wsa += v * ( - node.player)\n node.qsa = node.wsa / node.nsa\n node = node.parent\n\n def Get_prob(self):\n prob = np.zeros(self.params.action_size)\n for i in self.root.children:\n prob[i.move[0] * self.params.board_x + i.move[1]] += i.nsa\n\n prob /= np.sum(prob)\n return(prob)\n" ]
[ [ "numpy.array", "numpy.random.rand", "numpy.zeros", "numpy.sum", "numpy.exp" ] ]
urialon/lingvo
[ "0819730882bfaa68d2eeb702e13d4c943172d5ff", "0819730882bfaa68d2eeb702e13d4c943172d5ff" ]
[ "lingvo/core/base_model.py", "lingvo/core/sendrecv.py" ]
[ "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Base model.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport re\nimport six\nfrom six.moves import range\nimport tensorflow as tf\n\nfrom lingvo.core import base_input_generator\nfrom lingvo.core import base_layer\nfrom lingvo.core import build_data\nfrom lingvo.core import cluster_factory\nfrom lingvo.core import early_stop\nfrom lingvo.core import hyperparams\nfrom lingvo.core import lr_schedule\nfrom lingvo.core import optimizer\nfrom lingvo.core import py_utils\nfrom lingvo.core import summary_utils\nfrom lingvo.core import task_scheduler\n\n\ndef CreateTaskGlobalStep(task_name):\n \"\"\"Create if needed and return the global_step.\"\"\"\n with tf.name_scope(None), tf.variable_scope(py_utils.global_variable_scope):\n graph_collections = [tf.GraphKeys.GLOBAL_VARIABLES, 'TASK_GLOBAL_STEP']\n _, v = py_utils.CreateVariable(\n name=task_name + '_global_step',\n params=py_utils.WeightParams([], py_utils.WeightInit.Constant(0),\n tf.int64),\n trainable=False,\n collections=graph_collections)\n summary_utils.scalar(v.name, v)\n return v\n\n\nclass StatsCounter(object):\n \"\"\"A single counter in TF.\"\"\"\n\n def __init__(self, name):\n self._name = name\n _, self._var = py_utils.CreateVariable(\n name=name,\n params=py_utils.WeightParams([], py_utils.WeightInit.Constant(0),\n tf.int64),\n trainable=False)\n self._value = self._var.value() + 0 # Makes a copy.\n\n def Value(self):\n \"\"\"Returns the current counter value.\"\"\"\n return self._value\n\n def IncBy(self, params, delta):\n \"\"\"Increment the counter by delta and return the new value.\"\"\"\n # NOTE: We must ensure _value is computed (_var + 0) before\n # updating _var with delta.\n delta = tf.to_int64(delta)\n with tf.control_dependencies([self._value]):\n summary_utils.scalar(self._name, self._value)\n return tf.identity(tf.assign_add(self._var, delta))\n\n\nclass BaseTask(base_layer.BaseLayer):\n \"\"\"A single encoder/decoder task.\n\n One task usually consists of one InputGenerator, one train_op,\n a list of eval_metrics, etc.\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super(BaseTask, cls).Params()\n p.Define('input', None, 'Input generator Params.')\n p.Define('encoder', None, 'Encoder Params.')\n p.Define('online_encoder', None, 'Online Encoder Params.')\n p.Define('decoder', None, 'Decoder Params.')\n p.Define('train', hyperparams.Params(),\n 'Params to control how this task should be trained.')\n\n tp = p.train\n tp.Define(\n 'start_up_delay_steps', 200, 'i-th replica starts training after '\n 'i*(i+1)/2*start_up_delay_steps steps')\n tp.Define('max_steps', 4 * 10**6, 'Maximum number of training steps.')\n tp.Define('tpu_steps_per_loop', 100, 'The number of training steps per '\n 'training loop for TPUs.')\n tp.Define(\n 'vn_start_step', 200000000,\n 'Step starting from which variational noise is added to '\n 'params values during training.')\n tp.Define('vn_std', 0.0, 'Std of the variational noise.')\n tp.Define(\n 'l2_regularizer_weight', None,\n 'If not None, L2 regularization to apply to the weights. '\n 'Otherwise, disable L2 regularization.')\n tp.Define(\n 'l1_regularizer_weight', None,\n 'If not None, L1 regularization to apply to the weights. '\n 'Otherwise, disable L1 regularization.')\n tp.Define('learning_rate', 0.0, 'learning rate to use.')\n tp.Define('clip_gradient_norm_to_value', 0.0,\n 'Clip gradient norm to this value.')\n tp.Define('grad_norm_to_clip_to_zero', 0.0,\n 'Clip gradient to 0 if its norm exceeds this value.')\n tp.Define('grad_norm_tracker', None, 'Params for GradNormTracker.')\n tp.Define('optimizer', optimizer.Adam.Params(), 'Params for the optimizer.')\n tp.Define('lr_schedule',\n lr_schedule.ContinuousLearningRateSchedule.Params(),\n 'Learning rate decay schedule.')\n tp.Define('early_stop', early_stop.EarlyStop.Params(),\n 'Early stopping based on dev-set performance.')\n tp.Define(\n 'ema_decay', 0.0,\n 'If > 0, enable ExponentialMovingAverage during training '\n 'with the give decay. '\n 'Must be < 1. Disabled if <= 0.')\n tp.Define(\n 'bprop_variable_filter', None,\n 'If set, only backprop variables whose names partially match '\n 'this regexp (re.search).')\n tp.Define(\n 'init_from_checkpoint_rules', {},\n 'If not None, a dictionary with keys corresponding to a checkpoint '\n 'path and values corresponding to variable loading rules is expected. '\n 'Each key is expected to be a path to a checkpoint from which to '\n 'initialize part of the model. Variables are only loaded form this '\n 'path during initialization and will override values provided by '\n 'initialization.'\n 'The corresponding values (loading_rules) are expected to be a tuple '\n 'consisting of two list: loading rules, and ignore rules, respectively.'\n 'The first list (loading rules) contains the list of variables '\n 'which should be initialized from the checkpoint: each element in the '\n 'list is a pair of strings. The first element is a regex and the '\n 'second is a python format string. If a variable in the model matches '\n 'a regex, we rename using the format string to determine the '\n 'corresponding var in the checkpoint. Note that, it is an error if a '\n 'model variable matches multiple loading rules, for the same '\n 'checkpoint or across checkpoints.'\n 'The second list (ignore rules) is a list of regexes which specify '\n 'variables in the model which should not be initialized using the '\n 'loading rules. Thus, if a variable in the model to be trained matches '\n 'one of the rules in the loading rules, as well as one of the regular '\n 'expressions in the ignore rules, the variable will not be initialized '\n 'from the checkpoint, but will instead be initialized from the '\n 'variable initalizer defined in the graph.'\n 'Examples:'\n '{\"checkpoint_path\": ([(\"(.*)\", \"%s\")], [])} will initialize all the '\n 'model parameters from the checkpoint_path.')\n tp.Define(\n 'pruning_hparams_dict', None, 'Pruning related hyperparameters. A dict '\n 'with hyperparameter: value pairs. See tf.contrib.model_pruning.')\n tp.Define('save_interval_seconds', 60 * 10,\n 'Generates a checkpoint roughly once every this many seconds.')\n tp.Define('summary_interval_steps', 100,\n 'Generates a checkpoint roughly once every this many steps.')\n\n p.Define('eval', hyperparams.Params(),\n 'Params to control how this task should be evaled.')\n ep = p.eval\n ep.Define(\n 'samples_per_summary', 1000,\n 'If > 0, generates one summary after this many samples, at most. '\n 'If == 0 or the dataset has fewer examples, evaluate the whole set.')\n ep.Define(\n 'decoder_samples_per_summary', 0,\n 'If > 0, each decoder summary will contain at most this many samples. '\n 'If == 0, defaults to `samples_per_summary` for '\n 'backwards compatibility.')\n return p\n\n @base_layer.initializer\n def __init__(self, params):\n assert issubclass(params.cls, BaseTask)\n super(BaseTask, self).__init__(params)\n\n p = self.params\n\n if p.input:\n # TODO(zhifengc): Consider a simpler way to ensure the input\n # generator stops after one epoch.\n if p.is_eval and p.eval:\n seq_inp = issubclass(p.input.cls,\n base_input_generator.BaseInputGeneratorFromFiles)\n if p.input.num_samples == 0:\n # Dataset size is unknown. Computes eval summary based on num_samples.\n assert p.eval.samples_per_summary > 0\n elif (p.eval.samples_per_summary == 0) or (p.input.num_samples <\n p.eval.samples_per_summary):\n # If we know the dataset size and we want to evaluate the full\n # set, we need to coordinate the input generator to flush out\n # all samples so the evaler and decoder compute metrics on the\n # whole set for each summary step.\n if seq_inp:\n p.input.flush_every_n = p.input.num_samples\n p.eval.samples_per_summary = p.input.num_samples\n if seq_inp and p.input.num_batcher_threads > 1:\n tf.logging.warning('input.num_batcher_threads > 1 inside eval mode. '\n 'The input generator may not iterate over exactly '\n 'one epoch per run')\n\n with tf.device(\n self.cluster.input_device), py_utils.outside_all_rewrites():\n self.CreateChild('input', p.input)\n\n self._var_grads = None\n self._encoder = None\n self._online_encoder = None\n self._decoder = None\n\n self._total_examples = None\n self._total_nans_and_infs = None\n self._loss = None\n self._num_predictions = None\n self._train_op = None\n self._eval_metrics = {}\n self._per_example = {}\n self._trainer_verbose_tensors = {}\n\n # Create the gradient mask,\n self._per_input_gradient_mask = None\n task_global_step_list = tf.get_collection('TASK_GLOBAL_STEP',\n '^%s_global_step' % p.name)\n if len(task_global_step_list) > 1:\n raise ValueError('Found multiple task_global_step for task %s' % p.name)\n self._global_step = (\n task_global_step_list[0] if len(task_global_step_list) == 1 else\n py_utils.GetOrCreateGlobalStep())\n tp = p.train\n # p.train can be None if this task is the teacher/student task in a\n # DistillationTask.\n if tp and self.cluster.job in ('worker', 'trainer', 'trainer_client',\n 'controller'):\n if tp.grad_norm_tracker:\n with tf.variable_scope(p.name):\n self.CreateChild('grad_norm_tracker', tp.grad_norm_tracker)\n\n self.CreateChild('lr_schedule', tp.lr_schedule)\n self.CreateChild('optimizer', tp.optimizer)\n self._UpdateVnConfig()\n\n def ComputePredictions(self, theta, input_batch):\n \"\"\"Computes predictions for `input_batch`.\n\n The output can be in the form of probablistic distributions, e.g., softmax\n logits for discrete outputs, mixture of logistics for continuous values, or\n regression values.\n\n For training/evaluation, the output will be used for computing loss and\n gradient updates, including comparing predicted distributions between\n teacher and student for distillation. During inference the output can be\n used to compute final outputs, perhaps with sampling.\n\n Args:\n theta: A `.NestedMap` object containing variable values of this task.\n input_batch: A `.NestedMap` object containing input tensors to this tower.\n\n Returns:\n Predictions, either a single Tensor, a `.NestedMap`, or a namedtuple.\n \"\"\"\n raise NotImplementedError('Abstract method')\n\n def ComputeLoss(self, theta, input_batch, predictions):\n \"\"\"Computes loss and other metrics for the given predictions.\n\n Args:\n theta: A `.NestedMap` object containing variable values of this task.\n input_batch: A `.NestedMap` object containing input tensors to this tower.\n predictions: The output of `ComputePredictions`.\n\n Returns:\n Two dicts:\n A dict containing str keys and (metric, weight) pairs as values, where\n one of the keys is expected to be 'loss'.\n A dict containing arbitrary tensors describing something about each\n training example, where the first dimension of each tensor is the batch\n index.\n \"\"\"\n raise NotImplementedError('Abstract method')\n\n def FilterPerExampleTensors(self, per_example):\n \"\"\"Return the per-example tensors ProcessFPropResults needs.\n\n By default we don't send any per-example tensors to ProcessFPropResults\n because some may be expensive to compute. Implement this method to let\n some of them pass through.\n\n Args:\n per_example: A dict of tensors returned as per-example tensors from FProp.\n\n Returns:\n A dict containing a subset of the key/value pairs in per_example.\n \"\"\"\n return {}\n\n def ProcessFPropResults(self, sess, global_step, metrics, per_example):\n \"\"\"Called once for each train loop.\n\n BaseModel.ProcessFPropResults is also called on each loop, so you\n can put your implementation wherever it is most convenient for you.\n\n Args:\n sess: a session.\n global_step: approximate number of model training steps.\n metrics: the metrics dict returned by FPropTower.\n per_example: the per_example dict returned by FPropTower.\n \"\"\"\n pass\n\n def FPropTower(self, theta, input_batch):\n \"\"\"Forward propagation through one tower of the model.\n\n Args:\n theta: A `.NestedMap` object containing variable values of this task\n copied to this tower's devices.\n input_batch: A `.NestedMap` object containing input tensors to this tower.\n\n Returns:\n Two dicts:\n A dict containing str keys and (metric, weight) pairs as values, where\n one of the keys is expected to be 'loss'.\n A dict containing arbitrary tensors describing something about each\n training example, where the first dimension of each tensor is the batch\n index.\n \"\"\"\n predicted = self.ComputePredictions(theta, input_batch)\n return self.ComputeLoss(theta, input_batch, predicted)\n\n def FProp(self, theta, input_batch):\n \"\"\"Forward propagation.\n\n This default `FProp` implementation here supports batch splitting in\n synchronous and asynchronous training when sub-classes implement\n `FPropTower`.\n\n Args:\n theta: A `.NestedMap` object containing weights' values of this layer and\n its children layers.\n input_batch: The input batch. A `NestedMap` of tensors. Or, if input batch\n spiltting is used, a list of `NestedMap`, one for each split.\n\n Returns:\n Two dicts:\n A dict containing str keys and (metric, weight) pairs as values, where\n one of the keys is expected to be 'loss'.\n A dict containing arbitrary tensors describing something about each\n training example, where the first dimension of each tensor is the batch\n index.\n \"\"\"\n p = self.params\n with tf.name_scope('fprop'), tf.name_scope(p.name):\n # Always reset step seed at the start of a new global_step.\n py_utils.ResetStepSeed()\n if py_utils.use_tpu():\n metrics, per_example = self._FPropTpu(theta, input_batch)\n else:\n metrics, per_example = self._FPropSplitInputBatch(theta, input_batch)\n self._FPropResult(metrics, per_example)\n return metrics, per_example\n\n def _FPropTpu(self, theta, input_batch):\n p = self.params\n with tf.name_scope('fprop'), tf.name_scope(p.name):\n with tf.name_scope('tower_0_0'):\n metrics, per_example = self.FPropTower(theta, input_batch)\n metrics = py_utils.WeightedAvgOfMetrics([metrics])\n return metrics, per_example\n\n def _FPropSplitInputBatch(self, theta, input_batch):\n \"\"\"Splits the input batch on the input device.\"\"\"\n cluster = self.cluster\n num_splits = cluster.num_splits_per_client\n\n if not isinstance(input_batch, list):\n input_batch = [input_batch]\n\n assert len(input_batch) == num_splits, (len(input_batch), num_splits)\n\n # dev_list_per_replica[i][j] is the i-th worker's j-th device.\n dev_list_per_replica = cluster.available_devices.tolist()\n\n # Asserts invariant of the total number of splits w.r.t.,\n # splits per worker.\n splits_per_replica = cluster.num_splits_per_replica\n assert num_splits == splits_per_replica * len(dev_list_per_replica), (\n num_splits, splits_per_replica, len(dev_list_per_replica))\n\n all_metrics = []\n all_per_example_tensors = []\n for w_id, w_devs in enumerate(dev_list_per_replica):\n # Make local copy of the vars, shard on devices for this worker.\n theta_local = py_utils.CreateLocalTheta(\n theta, w_devs, label='worker %d' % w_id)\n\n for s_id in range(splits_per_replica):\n # s_id-th split for the w_id-th worker.\n split_id = splits_per_replica * w_id + s_id\n with py_utils.ModelSplit(split_id):\n with tf.device(cluster.WorkerDeviceInModelSplit(0)):\n with tf.name_scope('tower_%d_%d' % (w_id, s_id)):\n batch = self.input_generator.PreprocessInputBatch(\n input_batch[split_id])\n metrics, per_example = self.FPropTower(theta_local, batch)\n all_metrics.append(metrics)\n all_per_example_tensors.append(per_example)\n\n return py_utils.WeightedAvgOfMetrics(\n all_metrics), py_utils.ConcatPerExampleTensors(all_per_example_tensors)\n\n def _FPropResult(self, metrics, per_example):\n # Adds stats about the input batch.\n metrics['num_samples_in_batch'] = (tf.convert_to_tensor(\n self.input_generator.InputBatchSize()), tf.constant(1.0))\n # Generates summaries.\n for name, (value, weight) in six.iteritems(metrics):\n self.AddEvalMetric(name, value, weight)\n per_example = self.FilterPerExampleTensors(per_example)\n for name, value in six.iteritems(per_example):\n self.AddPerExampleTensor(name, value)\n # Loss.\n self._loss, self._num_predictions = metrics['loss']\n self._loss = py_utils.CheckNumerics(self._loss)\n summary_utils.scalar('num_predictions', self._num_predictions)\n\n def GetInputBatch(self):\n \"\"\"Returns input batch from input_generator.\"\"\"\n if py_utils.use_tpu():\n return self.input_generator.CreateTpuFeeds()\n else:\n cluster = self.cluster\n num_splits = cluster.num_splits_per_client\n with tf.device(cluster.input_device):\n return self.input_generator.SplitInputBatch(num_splits)\n\n def FPropDefaultTheta(self, input_batch=None):\n \"\"\"Calls `FProp` with this layer's parameters.\"\"\"\n if input_batch is None:\n input_batch = self.GetInputBatch()\n return self.FProp(self.theta, input_batch)\n\n def GetVarGrads(self):\n return self._var_grads\n\n def AdjustGradients(self, vars_gradients):\n \"\"\"Allow for custom gradient manipulation prior to clipping.\"\"\"\n return vars_gradients\n\n def BProp(self):\n \"\"\"Constructs the backward graph.\"\"\"\n p = self.params\n vs = self.vars\n bprop_variable_filters = self.input_generator.GetBpropVariableFilters()\n # Only compute the mask if the variable filters are not empty.\n if bprop_variable_filters != [''] * len(bprop_variable_filters):\n self._ComputeGradientMask(bprop_variable_filters)\n if p.train.bprop_variable_filter:\n\n def VariableFilter(v):\n if re.search(p.train.bprop_variable_filter, v.name):\n return True\n tf.logging.info('bprop disabled by bprop_variable_filter: %s', v.name)\n return False\n\n vs = vs.Filter(VariableFilter)\n tf.logging.info('Filtered bprop variables: %s', vs)\n self._BPropForVariables(vs)\n\n def _ComputeGradientMask(self, bprop_variable_filters):\n \"\"\"Compute gradient mask for each variable and bprop_variable_filters.\n\n Note that per_input_gradient_mask[var][i] will be 1 if var matches\n bprop_variable_filter[i], 0 otherwise.\n\n Args:\n bprop_variable_filters: A list of regex bprop_variable_filters for each\n file pattern.\n \"\"\"\n self._per_input_gradient_mask = py_utils.NestedMap()\n all_vars = set(self.vars.Flatten())\n for var in all_vars:\n self._per_input_gradient_mask[var.name] = (\n tf.zeros(len(bprop_variable_filters), dtype=tf.float32))\n for i in range(len(bprop_variable_filters)):\n if re.search(bprop_variable_filters[i], var.name):\n self._per_input_gradient_mask[var.name] += (\n tf.one_hot(i, len(bprop_variable_filters), dtype=tf.float32))\n\n def _HasNanOrInf(self, var_grads):\n \"\"\"Returns a bool tensor to indicate if `var_grads` contains NaNs or Infs.\n\n Args:\n var_grads: A `.NestedMap` with (var, grad) tuple as the map value.\n\n Returns:\n A bool scalar tensor to indicate if the `var_grads` contains NaNs or Infs.\n \"\"\"\n\n def HasNanOrInf(x):\n with tf.device(x.device):\n if x.dtype.is_complex:\n return tf.reduce_any(\n [HasNanOrInf(tf.real(x)),\n HasNanOrInf(tf.imag(x))])\n return tf.reduce_any(tf.logical_or(tf.is_nan(x), tf.is_inf(x)))\n\n return tf.reduce_any([(HasNanOrInf(g.values) if isinstance(\n g, tf.IndexedSlices) else HasNanOrInf(g))\n for (_, g) in var_grads.Flatten()])\n\n def ScaleGradients(self, var_grads):\n \"\"\"Scales gradients according to training params.\n\n Args:\n var_grads: a `.NestedMap` whose values are (var, grad) pairs.\n\n Returns:\n (has_nan_or_inf, grad_scale, final_var_grads).\n\n - has_nan_or_inf: a scalar of 0 or 1, indicating whether there is any NaN\n or Inf in input gradients.\n - grad_scale: the gradient scale. 0 if gradient updates should be skipped\n for the step.\n - final_var_grads: a `.NestedMap` whose values are (var, grad) pairs,\n where gradients have already been scaled.\n \"\"\"\n p = self.params\n tp = p.train\n\n # Computes gradients' norm and adds their summaries. Note that all_grad_norm\n # may be nan, which may cause grad_scale to be nan.\n for name, vg in var_grads.FlattenItems():\n summary_utils.AddNormSummary(name, py_utils.NestedMap(s=vg))\n _, all_grad_norm = summary_utils.AddNormSummary('all', var_grads)\n grad_norm_is_nan_or_inf = tf.logical_or(\n tf.is_nan(all_grad_norm), tf.is_inf(all_grad_norm))\n\n # Optional gradient adjustment. Note that this happens after computing\n # all_grad_norm.\n var_grads = self.AdjustGradients(var_grads)\n\n # Handles NaN/Inf gradients.\n has_nan_or_inf = self._HasNanOrInf(var_grads)\n # Grad norm can still be inf even if none of the individual grad is inf.\n has_nan_or_inf = tf.logical_or(has_nan_or_inf, grad_norm_is_nan_or_inf)\n\n # Computes gradient's scale.\n grad_scale = tf.constant(1.0)\n if tp.clip_gradient_norm_to_value:\n # If all_grad_norm > tp.clip_gradient_norm_to_value, scales\n # all_grads so that the norm is 1.0.\n grad_scale = tf.minimum(1.0,\n tp.clip_gradient_norm_to_value / all_grad_norm)\n\n if tp.grad_norm_to_clip_to_zero:\n # If all_grad_norm > tp.grad_norm_to_clip_to_zero, treats\n # grad_scale as 0. This way, we ignore this step.\n grad_scale *= tf.cast(all_grad_norm < tp.grad_norm_to_clip_to_zero,\n p.dtype)\n\n if tp.grad_norm_tracker:\n grad_scale *= self.grad_norm_tracker.FPropDefaultTheta(\n all_grad_norm, has_nan_or_inf)\n\n # Force grad_scale to be 0 if there is any NaN or Inf in gradients.\n grad_scale = tf.where(has_nan_or_inf, 0.0, grad_scale)\n\n summary_utils.scalar('grad_scale_all', grad_scale)\n final_var_grads = py_utils.ApplyGradMultiplier(var_grads, grad_scale)\n return has_nan_or_inf, grad_scale, final_var_grads\n\n def _BPropForVariables(self, vmap):\n \"\"\"Constructs the backward graph for the given variables.\n\n Args:\n vmap: a `.NestedMap` of variables.\n \"\"\"\n p = self.params\n tp = p.train\n\n # Compute gradients.\n self._var_grads = py_utils.ComputeGradients(self.loss, vmap)\n\n # L2 regularizer.\n if tp.l2_regularizer_weight is not None:\n l2_loss, self._var_grads = py_utils.AdjustGradientsWithLpLoss(\n self._var_grads, tp.l2_regularizer_weight, p=2.0)\n summary_utils.scalar('l2_loss', l2_loss)\n\n # L1 regularizer.\n if tp.l1_regularizer_weight is not None:\n l1_loss, self._var_grads = py_utils.AdjustGradientsWithLpLoss(\n self._var_grads, tp.l1_regularizer_weight, p=1.0)\n summary_utils.scalar('l1_loss', l1_loss)\n\n # Mask gradients only if the mask is set.\n if self._per_input_gradient_mask:\n bprop_onehot = self.input_generator.GetInputSourceOneHot()\n self._var_grads = py_utils.MaskGradients(\n self._var_grads, self._per_input_gradient_mask, bprop_onehot)\n\n # Apply gradient clipping.\n has_nan_or_inf, _, self._var_grads = self.ScaleGradients(self._var_grads)\n\n # Histogram summary.\n summary_utils.CollectVarHistogram(self._var_grads)\n\n lrs = self.lr_schedule.Value(self._global_step)\n summary_utils.scalar('lr_schedule', lrs)\n lr = tp.learning_rate * lrs\n\n var_update_op = self.optimizer.Apply(lr, self._var_grads)\n\n relevant_bn_updates, _ = py_utils.FindRelevantBatchNormUpdates(\n self.loss, tf.get_collection(py_utils.BATCH_NORM_UPDATES))\n batch_norm_updates = tf.group(*relevant_bn_updates)\n\n # Update stats.\n stats_updates = tf.group(\n self.IncrementTotalSamples(),\n self.IncrementTotalNans(tf.to_int32(has_nan_or_inf)))\n\n # Post training step update.\n post_training_step_updates = self.PostTrainingStepUpdate(self._global_step)\n\n # Get the op to update the weight masks and thresholds\n mask_update_op = self._GetMaskUpdateOp()\n\n # TODO(rpang): try to structure _train_op as:\n # tf.cond(skip_step, <only update skip stats>, <all updates>)\n # so that we skip all other updates when a step is skipped.\n train_ops = [\n var_update_op, batch_norm_updates, stats_updates,\n post_training_step_updates, mask_update_op\n ]\n with tf.control_dependencies(train_ops):\n true_global_step = py_utils.GetOrCreateGlobalStep()\n with tf.colocate_with(true_global_step):\n increment_global_steps = tf.assign_add(true_global_step, 1)\n if self._global_step != true_global_step:\n with tf.colocate_with(self._global_step):\n increment_global_steps = tf.group(increment_global_steps,\n tf.assign_add(self._global_step, 1))\n\n train_ops.append(increment_global_steps)\n self._train_op = tf.group(*train_ops, name='train')\n\n def ApplyExponentialMovingAverage(self, ema):\n \"\"\"Wraps `self.train_op` with an op updating exponential moving average.\"\"\"\n # We need to apply EMA to trainable and moving average variable of this\n # Task, not just bprop vars, so that we create a shadow\n # '/ExponentialMovingAverage' variable for every trainable and moving\n # average variable.\n all_vars = set(tf.trainable_variables()) | set(\n tf.moving_average_variables())\n all_vars &= set(self.vars.Flatten())\n for var in all_vars:\n tf.logging.debug('ApplyExponentialMovingAverage: %s', var.name)\n with tf.control_dependencies(\n [self._train_op]), tf.name_scope('moving_average'):\n self._train_op = ema.apply(all_vars)\n\n def Decode(self, input_batch):\n \"\"\"Constructs the inference graph for eval decoding.\n\n Args:\n input_batch: The input batch. A `NestedMap` of tensors. Or, if input batch\n spiltting is used, a list of `NestedMap`, one for each split.\n\n Returns:\n a dict of Tensors as decoder output.\n \"\"\"\n return {}\n\n def Inference(self):\n \"\"\"Constructs the inference graph.\n\n Each subgraph represents a public API for a part of the graph which can\n be operated independently. By convention, the subgraph named 'default'\n should perform end to end inference via the input generator.\n\n Note that having distinct subgraphs (e.g. 'encoder', 'decoder') is\n not just a space optimization: when driving the graph externally in an\n online fashion, evaluation often needs to be broken into pieces. In this\n case, the graph will be constructed with only those pieces.\n\n Returns:\n An `inference_graph_pb2.InferenceGraph` message.\n \"\"\"\n raise NotImplementedError('Abstract method')\n\n def CreateDecoderMetrics(self):\n \"\"\"Creates a dict of decoder metrics for `PostProcessDecodeOut` to update.\n\n Returns a dict mapping from string keys to `.BaseMetric` objects.\n \"\"\"\n pass\n\n def PostProcessDecodeOut(self, decode_out_dict, decode_metrics_dict):\n \"\"\"Post-processes decoder out and updates contents of `decode_metrics_dict`.\n\n Args:\n decode_out_dict: A dictionary of Tensors fetched.\n decode_metrics_dict: A dict mapping from string key to `.BaseMetric`\n object as created by `CreateDecoderMetrics`.\n\n Returns:\n output_key_value_pairs - a list of (key, value) pairs that can be saved\n (i.e. of type str, bytes, or unicode).\n \"\"\"\n pass\n\n @property\n def loss(self):\n assert self._loss is not None, ('No loss is defined. Call FProp first.')\n return self._loss\n\n @property\n def train_op(self):\n assert self._train_op is not None, (\n 'No train op is defined. Call BProp first.')\n return self._train_op\n\n @property\n def global_step(self):\n assert self._global_step is not None, ('No global_step is defined.')\n return self._global_step\n\n @property\n def input_generator(self):\n return self.input\n\n @property\n def eval_metrics(self):\n \"\"\"Returns the evaluation metrics.\n\n Returns:\n A map from metric name (a python string) to a tuple (value, weight).\n Both value and weight are scalar Tensors.\n \"\"\"\n return self._eval_metrics\n\n @property\n def per_example_tensors(self):\n \"\"\"Returns per-example outputs.\n\n Returns:\n A map from tensor name (a python string) to a tensor, where the\n first dimension is the batch index of the training example corresponding\n to this output.\n \"\"\"\n return self._per_example\n\n def AddEvalMetric(self, name, value, weight):\n \"\"\"Adds a metric to the eval metrics.\n\n Args:\n name: A python string. The name of the metric.\n value: A scalar Tensor.\n weight: A scalar Tensor.\n\n Raises:\n ValueError: if `name` is already defined.\n\n \"\"\"\n if name in self._eval_metrics:\n raise ValueError('Metric %s has already been defined.' % name)\n self._eval_metrics[name] = (value, weight)\n\n def AddPerExampleTensor(self, name, value):\n if name in self._per_example:\n raise ValueError('Metric %s has already been defined.' % name)\n self._per_example[name] = value\n\n @property\n def total_examples(self):\n \"\"\"Returns the total number of training examples processed so far.\"\"\"\n return self._total_examples.Value()\n\n @property\n def trainer_verbose_tensors(self):\n \"\"\"Return the dict of verbose tensors to eval in the training loop.\"\"\"\n return self._trainer_verbose_tensors\n\n def AddTrainerVerboseTensor(self, name, target):\n \"\"\"Add a (set of) tensors to be evaluated in the training loop.\n\n Args:\n name: A python string. The name of the target(s).\n target: A Tensor or a list or dict of Tensors.\n\n Raises:\n ValueError: if `name` is already defined.\n\n \"\"\"\n if name in self._trainer_verbose_tensors:\n raise ValueError('Verbose target %s has already been defined.' % name)\n self._trainer_verbose_tensors[name] = target\n\n def IncrementTotalSamples(self, value=None):\n \"\"\"Updates the total number of training examples with the batch size.\"\"\"\n p = self.params\n if self._total_examples is None:\n with tf.variable_scope(p.name):\n self._total_examples = StatsCounter('total_samples')\n if value is None:\n assert self.input_generator is not None, ('No input generator defined')\n value = self.input_generator.InputBatchSize()\n return self._total_examples.IncBy(p, value)\n\n def IncrementTotalNans(self, value):\n \"\"\"Updates the total number of NaN/Inf gradients by `value`.\"\"\"\n if self._total_nans_and_infs is None:\n with tf.variable_scope(\n py_utils.global_variable_scope, reuse=tf.AUTO_REUSE):\n self._total_nans_and_infs = StatsCounter('total_nan_gradients')\n return self._total_nans_and_infs.IncBy(self.params, value)\n\n def _UpdateVnConfig(self):\n \"\"\"Update vn config from the various vn flags.\"\"\"\n p = self.params\n tp = p.train\n if tp:\n vn_enabled = ((tp.vn_std > 0) and p.vn and\n (p.vn.global_vn or p.vn.per_step_vn))\n if p.is_eval or (not vn_enabled):\n p.vn = py_utils.VariationalNoiseParams(None, False, False)\n else:\n # vn.scale is dependent on global_step.\n p.vn.scale = tf.cast(self._global_step > tp.vn_start_step,\n py_utils.FPropDtype(p)) * tp.vn_std\n\n def _GetMaskUpdateOp(self):\n \"\"\"Returns op to update masks and threshold variables for model pruning.\"\"\"\n p = self.params\n tp = p.train\n mask_update_op = tf.no_op()\n if tp.pruning_hparams_dict:\n assert isinstance(tp.pruning_hparams_dict, dict)\n pruning_hparams = tf.contrib.model_pruning.get_pruning_hparams(\n ).override_from_dict(tp.pruning_hparams_dict)\n pruning_obj = tf.contrib.model_pruning.Pruning(\n pruning_hparams, global_step=self._global_step)\n pruning_obj.add_pruning_summaries()\n mask_update_op = pruning_obj.conditional_mask_update_op()\n return mask_update_op\n\n\nclass DistillationTask(BaseTask):\n \"\"\"A task to distill knowledge from a teacher task to a student task.\n\n The training parameters (e.g., learning rate) are determined only by\n `DistillationTask.params.train`. Teacher and student task's training and eval\n parameters must be set to None.\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super(DistillationTask, cls).Params()\n p.Define('teacher', None, 'The teacher task params.')\n p.Define('student', None, 'The student task params.')\n p.Define(\n 'distillation_loss_weight',\n # Only uses distillation loss by default.\n lr_schedule.ConstantOne.Params(),\n 'A schedule of distillation loss weight. '\n 'The weight determines the fraction of total loss contributed by '\n 'distillation loss, while the rest loss will be computed against '\n 'the ground truth. '\n 'A weight of 0 means to only use ground-truth and ignore teacher '\n 'predictions, while a weight 1 means to only use teacher '\n 'predictions and ignore ground truth. '\n 'The weight is specified as a schedule to allow it to change '\n 'during training.')\n p.Define(\n 'teacher_target_type', 'truth', 'The target type for the teacher. '\n 'Choices are: '\n ' \"truth\": using the ground-truth target labels '\n ' \"beam\": using the 1-best hypothesis from the beam search.')\n p.Define(\n 'beam_search_temperature', 1.0, 'The temperature to scale the'\n 'log-prob of each beam search hypothesis. This is used in '\n 'training only')\n return p\n\n @base_layer.initializer\n def __init__(self, params):\n assert issubclass(params.cls, DistillationTask)\n super(DistillationTask, self).__init__(params)\n\n p = self.params\n # While student does not need its own input generator for training, it\n # needs an input generator for inference graphs.\n p.student.input = p.input\n # Teacher also might need an input generator, eg. for waveform_processor.\n p.teacher.input = p.input\n with tf.variable_scope(p.name):\n for child in ('teacher', 'student'):\n child_p = getattr(p, child)\n assert issubclass(child_p.cls, BaseTask)\n assert child_p.train is None\n assert child_p.eval is None\n # In theory it's ok for teacher to be a DistillationTask. In practice\n # it probably won't happen.\n assert not issubclass(child_p.cls, DistillationTask)\n child_p.name = child\n self.CreateChild(child, child_p)\n self.CreateChild('distillation_loss_weight', p.distillation_loss_weight)\n\n def ComputePredictions(self, theta, input_batch):\n p = self.params\n with tf.name_scope(p.name):\n if p.teacher_target_type == 'truth':\n teacher_predictions = self.teacher.ComputePredictions(\n theta.teacher, input_batch)\n student_predictions = self.student.ComputePredictions(\n theta.student, input_batch)\n return py_utils.NestedMap(\n teacher=teacher_predictions, student=student_predictions)\n elif p.teacher_target_type == 'beam':\n (teacher_predictions, teacher_input_batch,\n teacher_beam_prob) = self.teacher.ComputeBeamPredictions(\n theta.teacher, input_batch, p.beam_search_temperature)\n # We use 'teacher_input_batch' instead of 'input_batch' for 'student'\n # because the training of student network uses target transcripts for\n # the \"teacher forcing\" mode and here the target transcripts should come\n # from the teacher's beam search.\n student_predictions = self.student.ComputePredictions(\n theta.student, teacher_input_batch)\n return py_utils.NestedMap(\n teacher=teacher_predictions,\n student=student_predictions,\n teacher_beam_prob=teacher_beam_prob)\n else:\n raise ValueError('teacher target type not defined properly: %s' %\n self.p.teacher_target_type)\n\n def ComputeLoss(self, theta, input_batch, predictions):\n per_example = {}\n with tf.name_scope('groundtruth_loss'):\n groundtruth_loss, groundtruth_per_example = self.student.ComputeLoss(\n theta.student, input_batch, predictions.student)\n groundtruth_loss['groundtruth_loss'] = groundtruth_loss['loss']\n per_example.update(groundtruth_per_example)\n\n with tf.name_scope('distillation_loss'):\n distillation_loss, distill_per_example = self.ComputeDistillationLoss(\n theta, input_batch, predictions)\n distillation_loss['distillation_loss'] = distillation_loss['loss']\n per_example.update(distill_per_example)\n\n distillation_loss_weight = self.distillation_loss_weight.FProp(\n theta.distillation_loss_weight, self._global_step)\n metrics = py_utils.CombineMetrics([\n (groundtruth_loss, 1 - distillation_loss_weight),\n (distillation_loss, distillation_loss_weight),\n ])\n return metrics, per_example\n\n def ComputeDistillationLoss(self, theta, input_batch, predictions):\n raise NotImplementedError('Abstract method')\n\n def BProp(self):\n # Only bprop on student variables.\n self._BPropForVariables(self.student.vars)\n\n def Decode(self, input_batch):\n return self.student.Decode(input_batch)\n\n def Inference(self):\n return self.student.Inference()\n\n def CreateDecoderMetrics(self):\n return self.student.CreateDecoderMetrics()\n\n def PostProcessDecodeOut(self, dec_out_dict, dec_metrics_dict):\n return self.student.PostProcessDecodeOut(dec_out_dict, dec_metrics_dict)\n\n\nclass BaseModel(base_layer.BaseLayer):\n \"\"\"The abstract model class. All models are sub-class of this class.\"\"\"\n\n @classmethod\n def Params(cls):\n p = super(BaseModel, cls).Params()\n p.Define(\n 'model', None, 'Which python function generates the param. It includes '\n 'the file name and lineno where the function is defined.')\n p.Define(\n 'cluster', cluster_factory.Cluster.Params(),\n 'The training cluster. Individual layer may config differently'\n ' based on training cluster it is running under.')\n p.Define('input', None, 'Input generator Params.')\n p.Define('build_data', build_data.BuildData(), 'Build data of this binary.')\n p.Define('train', hyperparams.Params(),\n 'Params to control how this model should be trained.')\n tp = p.train\n tp.Define(\n 'start_up_delay_steps', 200, 'i-th replica starts training after '\n 'i*(i+1)/2*start_up_delay_steps steps')\n tp.Define('max_steps', 4 * 10**6, 'Training max of 4M steps.')\n tp.Define('tpu_steps_per_loop', 100, 'The number of training steps per '\n 'training loop for TPUs.')\n tp.Define(\n 'ema_decay', 0.0,\n 'If > 0, enable ExponentialMovingAverage during training '\n 'with the give decay. '\n 'Must be < 1. Disabled if <= 0.')\n tp.Define('init_from_checkpoint_rules', {},\n 'See BaseTask documentation for details.')\n tp.Define('early_stop', None,\n 'Early stopping based on dev-set performance.')\n tp.Define('save_interval_seconds', 60 * 10,\n 'Generates a checkpoint roughly once every this many seconds.')\n tp.Define('summary_interval_steps', 100,\n 'Generates a checkpoint roughly once every this many steps.')\n\n return p\n\n @base_layer.initializer\n def __init__(self, params):\n \"\"\"Initializes this Model.\"\"\"\n assert issubclass(params.cls, BaseModel)\n super(BaseModel, self).__init__(params)\n self._global_step = py_utils.GetOrCreateGlobalStep()\n # tasks are not yet instantiated.\n self._total_examples_sum = None\n\n self._ema = None\n tp = self.params.train\n tf.logging.info('Training parameters for %s: %s', params.cls, tp)\n if tp.ema_decay > 0:\n assert tp.ema_decay < 1.0\n self._ema = tf.train.ExponentialMovingAverage(\n decay=tp.ema_decay, num_updates=self._global_step)\n\n @property\n def global_step(self):\n assert self._global_step is not None, ('No global_step is defined.')\n return self._global_step\n\n @property\n def ema(self):\n return self._ema\n\n def ConstructFPropBPropGraph(self):\n raise NotImplementedError('Abstract method')\n\n def ConstructFPropGraph(self):\n raise NotImplementedError('Abstract method')\n\n @property\n def tasks(self):\n \"\"\"Returns a list of all tasks.\"\"\"\n raise NotImplementedError('Abstract method')\n\n def GetTask(self, task_name):\n \"\"\"Return the task associated with 'task_name'.\n\n Args:\n task_name: string, the name of the model task to be returned.\n\n Returns:\n An instance of `BaseTask`.\n \"\"\"\n raise NotImplementedError('Abstract method')\n\n @property\n def total_examples(self):\n \"\"\"Returns the total number of training examples processed so far.\"\"\"\n if self._total_examples_sum is None:\n self._total_examples_sum = tf.reduce_sum(\n [task.total_examples for task in self.tasks])\n return self._total_examples_sum\n\n def ProcessFPropResults(self, sess, global_step, metrics, per_example):\n \"\"\"Called once for each train loop.\n\n BaseTask.ProcessFPropResults is also called on each loop, so you\n can put your implementation wherever it is most convenient for you.\n\n Be sure to implement BaseTask.FilterPerExampleTensors if you plan to use any\n per-example tensors in this method.\n\n Args:\n sess: a session.\n global_step: approximate number of model training steps.\n metrics: the metrics dict returned by FPropTower.\n per_example: the per_example dict returned by FPropTower.\n \"\"\"\n pass\n\n\nclass SingleTaskModel(BaseModel):\n \"\"\"Model that consists of a single task.\"\"\"\n\n @classmethod\n def Params(cls, task_params=None):\n p = super(SingleTaskModel, cls).Params()\n p.Define('task', None, 'Task Params.')\n\n if task_params is not None:\n # Copy over model parameters from the task parameters.\n p.task = task_params\n base_layer.BaseLayer.CopyBaseParams(p.task, p)\n tp = p.train\n tp.start_up_delay_steps = p.task.train.start_up_delay_steps\n tp.max_steps = p.task.train.max_steps\n tp.tpu_steps_per_loop = p.task.train.tpu_steps_per_loop\n tp.ema_decay = p.task.train.ema_decay\n # init_from_checkpoint_rules does not need to be copied.\n tp.early_stop = p.task.train.early_stop\n tp.save_interval_seconds = p.task.train.save_interval_seconds\n tp.summary_interval_steps = p.task.train.summary_interval_steps\n\n return p\n\n @base_layer.initializer\n def __init__(self, params):\n assert issubclass(params.cls, SingleTaskModel)\n assert params.task\n p = params.Copy() # Make a copy to avoid modifying the input.\n p.name = p.name or p.task.name\n p.task.name = p.task.name or p.name\n if p.input:\n assert not p.task.input\n p.task.input = p.input\n else:\n assert p.task.input\n p.input = p.task.input\n\n super(SingleTaskModel, self).__init__(p)\n\n p = self.params\n self.CreateChild('_task', p.task)\n\n @property\n def tasks(self):\n return [self._task]\n\n def GetTask(self, task_name=None):\n assert not task_name, 'Must not specify >task_name< for single-task model.'\n return self._task\n\n def SampleTask(self, global_step):\n return self._task\n\n def ConstructFPropBPropGraph(self):\n self._task.FPropDefaultTheta()\n self._task.BProp()\n if self.ema:\n tf.logging.info('ApplyExponentialMovingAverage on %s', self._task)\n self._task.ApplyExponentialMovingAverage(self.ema)\n\n def ConstructFPropGraph(self):\n self._task.FPropDefaultTheta()\n\n\nclass MultiTaskModel(BaseModel):\n \"\"\"Model that consists of multiple tasks.\"\"\"\n\n @classmethod\n def Params(cls):\n p = super(MultiTaskModel, cls).Params()\n p.Define('task_params', hyperparams.Params(),\n 'Params object mapping task name to task Params.')\n p.Define(\n 'task_probs', hyperparams.Params(),\n 'Params object mapping task name to the relative likelihood the '\n 'task will be sampled during training.')\n p.Define('task_schedule', None, 'Task schedule.')\n p.Define(\n 'task_global_step', False,\n 'Whether or not to use task-specific global steps, which causes each '\n 'task to use its own global_step instead of the true global_step.')\n return p\n\n @base_layer.initializer\n def __init__(self, params):\n assert issubclass(params.cls, MultiTaskModel)\n super(MultiTaskModel, self).__init__(params)\n p = self.params\n assert len(p.task_params) > 1\n\n # Pass input params to tasks.\n assert isinstance(p.input, hyperparams.Params)\n assert set(dir(p.input)) == set(dir(p.task_params))\n for k, v in p.task_params.IterParams():\n assert isinstance(v, hyperparams.Params)\n assert not v.input\n v.input = p.input.Get(k)\n\n # For compatibility with older API (with p.task_probs)\n if p.task_schedule is None:\n p.task_schedule = task_scheduler.ConstantScheduler.Params()\n p.task_schedule.task_probs = sorted(list(p.task_probs.IterParams()))\n\n # CreateChild copies over global configs in p to individual task params,\n # which then gets propagated down to all sub-layers during\n # BaseTask._PropagateDownGlobalConfigs(), or through sub-sequent CreateChild\n # or CreateChildren calls.\n with tf.name_scope(p.name):\n sorted_task_params = sorted(\n (task_name, task_params)\n for task_name, task_params in p.task_params.IterParams())\n for task_name, task_params in sorted_task_params:\n if p.task_global_step:\n assert task_name == task_params.name\n CreateTaskGlobalStep(task_name)\n # Make sure each task is under its own variable scope.\n with tf.variable_scope(task_name):\n self.CreateChild(task_name, task_params)\n self.CreateChild('task_schedule', p.task_schedule)\n\n @property\n def task_names(self):\n sorted_task_names = sorted(\n task_name for task_name, _ in self.params.task_params.IterParams())\n return sorted_task_names\n\n @property\n def tasks(self):\n return [self.children[name] for name in self.task_names]\n\n def GetTask(self, task_name):\n assert task_name, 'Must specify >task_name< for multi-task model.'\n return self.children[task_name]\n\n def SampleTask(self, global_step):\n \"\"\"Sample a task according self.task_schedule.\n\n `self.task_schedule.cur_probs` will also be updated.\n\n Args:\n global_step: int. Current time step.\n \"\"\"\n sampled_task = self.task_schedule.Sample(global_step)\n tf.logging.info('Sampled task: %s', sampled_task)\n return self.children[sampled_task]\n\n def ConstructFPropBPropGraph(self):\n for task_name in self.task_names:\n with tf.name_scope(task_name):\n task = self.GetTask(task_name)\n task.FPropDefaultTheta()\n task.BProp()\n if self.ema:\n task.ApplyExponentialMovingAverage(self.ema)\n\n def ConstructFPropGraph(self):\n for task_name in self.task_names:\n with tf.name_scope(task_name):\n task = self.GetTask(task_name)\n task.FPropDefaultTheta()\n", "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Send/Recv ops.\n\nThe following _Send()/_Recv() are adapted from python op wrappers\ngenerated by python_op_gen_main. python_op_gen_main.cc's\nPrintAllPythonOps needs to be updated to export internal ops.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom google.protobuf import text_format as _text_format\nfrom tensorflow.core.framework import op_def_pb2 as _op_def_pb2\nfrom tensorflow.python.framework import op_def_library as _op_def_library\nfrom tensorflow.python.framework import op_def_registry as _op_def_registry\nfrom tensorflow.python.framework import ops as _ops\nfrom tensorflow.python.framework import tensor_shape as _tensor_shape\n\n\ndef _Recv(tensor_type, tensor_name, send_device, recv_device, name=None):\n r\"\"\"Receives the named tensor from send_device on recv_device.\n\n Args:\n tensor_type: A `tf.DType`.\n tensor_name: A `string`. The name of the tensor to receive.\n send_device: A `string`. The name of the device sending the tensor.\n recv_device: A `string`. The name of the device receiving the tensor.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `tensor_type`. The tensor to receive.\n \"\"\"\n result = _op_def_lib.apply_op(\n \"_Recv\",\n tensor_type=tensor_type,\n tensor_name=tensor_name,\n send_device=send_device,\n send_device_incarnation=0,\n recv_device=recv_device,\n client_terminated=False,\n name=name if name else \"Recv\")\n return result\n\n\n_ops.RegisterShape(\"_Recv\")(None)\n\n\ndef _Send(tensor, tensor_name, send_device, recv_device, name=None):\n r\"\"\"Sends the named tensor from send_device to recv_device.\n\n Args:\n tensor: A `Tensor`. The tensor to send.\n tensor_name: A `string`. The name of the tensor to send.\n send_device: A `string`. The name of the device sending the tensor.\n recv_device: A `string`. The name of the device receiving the tensor.\n name: A name for the operation (optional).\n\n Returns:\n The created Operation.\n \"\"\"\n result = _op_def_lib.apply_op(\n \"_Send\",\n tensor=tensor,\n tensor_name=tensor_name,\n send_device=send_device,\n send_device_incarnation=0,\n recv_device=recv_device,\n client_terminated=False,\n name=name if name else \"Send\")\n return result\n\n\n_ops.RegisterShape(\"_Send\")(None)\n\n\ndef _XlaSend(tensor, tensor_name, name=None):\n r\"\"\"Sends the named tensor from send_device to recv_device.\n\n Args:\n tensor: A `Tensor`. The tensor to send.\n tensor_name: A `string`. The name of the tensor to send.\n name: A name for the operation (optional).\n\n Returns:\n The created Operation.\n \"\"\"\n result = _op_def_lib.apply_op(\n \"XlaSend\",\n tensor=tensor,\n tensor_name=tensor_name,\n name=name if name else \"XlaSend\")\n return result\n\n\ndef _XlaRecv(dtype, tensor_name, shape, name=None):\n r\"\"\"Sends the named tensor from send_device to recv_device.\n\n Args:\n dtype: A `tf.DType`.\n tensor_name: A `string`. The name of the tensor to receive.\n shape: A `tf.TensorShape` or list of `ints`. The shape of the input tensor.\n name: A name for the operation (optional).\n\n Returns:\n The created Operation.\n \"\"\"\n result = _op_def_lib.apply_op(\n \"XlaRecv\",\n dtype=dtype,\n shape=shape,\n tensor_name=tensor_name,\n name=name if name else \"XlaRecv\")\n return result\n\n\ndef _InitOpDefLibrary():\n op_list = _op_def_pb2.OpList()\n _text_format.Merge(_InitOpDefLibrary.op_list_ascii, op_list)\n _op_def_registry.register_op_list(op_list)\n op_def_lib = _op_def_library.OpDefLibrary()\n op_def_lib.add_op_list(op_list)\n return op_def_lib\n\n\n_InitOpDefLibrary.op_list_ascii = \"\"\"op {\n name: \"_Recv\"\n output_arg {\n name: \"tensor\"\n type_attr: \"tensor_type\"\n }\n attr {\n name: \"tensor_type\"\n type: \"type\"\n }\n attr {\n name: \"tensor_name\"\n type: \"string\"\n }\n attr {\n name: \"send_device\"\n type: \"string\"\n }\n attr {\n name: \"send_device_incarnation\"\n type: \"int\"\n }\n attr {\n name: \"recv_device\"\n type: \"string\"\n }\n attr {\n name: \"client_terminated\"\n type: \"bool\"\n default_value {\n b: false\n }\n }\n is_stateful: true\n}\nop {\n name: \"_Send\"\n input_arg {\n name: \"tensor\"\n type_attr: \"T\"\n }\n attr {\n name: \"T\"\n type: \"type\"\n }\n attr {\n name: \"tensor_name\"\n type: \"string\"\n }\n attr {\n name: \"send_device\"\n type: \"string\"\n }\n attr {\n name: \"send_device_incarnation\"\n type: \"int\"\n }\n attr {\n name: \"recv_device\"\n type: \"string\"\n }\n attr {\n name: \"client_terminated\"\n type: \"bool\"\n default_value {\n b: false\n }\n }\n is_stateful: true\n}\nop {\n name: \"XlaRecv\"\n output_arg {\n name: \"tensor\"\n type_attr: \"dtype\"\n }\n attr {\n name: \"dtype\"\n type: \"type\"\n }\n attr {\n name: \"tensor_name\"\n type: \"string\"\n }\n attr {\n name: \"shape\"\n type: \"shape\"\n }\n is_stateful: true\n}\nop {\n name: \"XlaSend\"\n input_arg {\n name: \"tensor\"\n type_attr: \"T\"\n }\n attr {\n name: \"T\"\n type: \"type\"\n }\n attr {\n name: \"tensor_name\"\n type: \"string\"\n }\n is_stateful: true\n}\n\"\"\"\n\n_op_def_lib = _InitOpDefLibrary()\n\n\ndef _TpuCore(device):\n \"\"\"Returns the TPU core represented by <device>, or -1 if not TPU.\"\"\"\n prefix = \"device:TPU_REPLICATED_CORE:\"\n if prefix in device:\n return int(device[len(prefix):])\n return -1\n\n\nclass Channel(object):\n \"\"\"A communication channel to transfer tensors in order.\"\"\"\n\n def __init__(self, dtype, shape, send_device, recv_device, name=None):\n \"\"\"Construct a channel.\n\n Args:\n dtype: The dtype of tensors sent through the channel.\n shape: The shape of tensors sent through the channel. Must be a fully\n defined shape for TPUs.\n send_device: A fully-specified tensorflow device.\n recv_device: A fully-specified tensorflow device.\n name: A name for the channel (optional).\n \"\"\"\n current_graph = _ops.get_default_graph()\n assert current_graph, \"A channel is scoped within a tf.Graph\"\n self._dtype = dtype\n self._send_device = send_device\n self._recv_device = recv_device\n self._name = current_graph.unique_name(name if name else \"channel\")\n\n assert shape is not None\n shape = _tensor_shape.TensorShape(shape)\n\n self._shape = shape\n self._send_tpu_core = _TpuCore(send_device)\n self._recv_tpu_core = _TpuCore(recv_device)\n self._send_called = False\n self._recv_op = None\n assert ((self._send_tpu_core == -1) == (self._recv_tpu_core == -1)), (\n \"Mixing TPU and non-TPU: %s and %s\" % (send_device, recv_device))\n if self._send_tpu_core >= 0:\n assert self._shape.is_fully_defined(), (\n \"TPU channel must have fully defined shape. Name: %s, shape: %s\" %\n (self._name, self._shape))\n assert self._send_tpu_core != self._recv_tpu_core, (\n \"TPU send/recv must be cross-core: %s and %s\" %\n (send_device, recv_device))\n\n def Send(self, tensor):\n \"\"\"Sends a tensor through the channel.\"\"\"\n assert tensor.dtype == self._dtype\n assert not self._send_called, (\n \"Send called multiple times for %s\" % self._name)\n self._send_called = True\n if self._send_tpu_core == -1:\n return _Send(tensor, self._name, self._send_device, self._recv_device)\n else:\n with _ops.device(self._send_device):\n return _XlaSend(\n tensor, tensor_name=self._name, name=\"Send_\" + self._name)\n\n def Recv(self):\n \"\"\"Receives a tensor from the channel.\"\"\"\n if self._send_tpu_core == -1:\n return _Recv(self._dtype, self._name,\n self._send_device, self._recv_device)\n else:\n with _ops.device(self._recv_device):\n return _XlaRecv(\n self._dtype,\n tensor_name=self._name,\n shape=self._shape,\n name=\"Recv_\" + self._name)\n" ]
[ [ "tensorflow.group", "tensorflow.assign_add", "tensorflow.control_dependencies", "tensorflow.logging.warning", "tensorflow.cast", "tensorflow.no_op", "tensorflow.trainable_variables", "tensorflow.contrib.model_pruning.Pruning", "tensorflow.logging.info", "tensorflow.colocate_with", "tensorflow.constant", "tensorflow.variable_scope", "tensorflow.imag", "tensorflow.moving_average_variables", "tensorflow.get_collection", "tensorflow.minimum", "tensorflow.where", "tensorflow.to_int64", "tensorflow.contrib.model_pruning.get_pruning_hparams", "tensorflow.name_scope", "tensorflow.reduce_sum", "tensorflow.to_int32", "tensorflow.is_inf", "tensorflow.real", "tensorflow.is_nan", "tensorflow.logical_or", "tensorflow.train.ExponentialMovingAverage", "tensorflow.device", "tensorflow.logging.debug" ], [ "tensorflow.python.framework.tensor_shape.TensorShape", "tensorflow.python.framework.ops.RegisterShape", "tensorflow.core.framework.op_def_pb2.OpList", "tensorflow.python.framework.op_def_library.OpDefLibrary", "tensorflow.python.framework.op_def_registry.register_op_list", "tensorflow.python.framework.ops.device", "tensorflow.python.framework.ops.get_default_graph" ] ]
hdoupe/Tax-Cruncher
[ "e263bcf8643d747d85855bfc2af2faba519a9ace" ]
[ "cs-config/cs_config/functions.py" ]
[ "import os\nimport json\nimport traceback\nimport paramtools\nimport pandas as pd\nimport inspect\nfrom .outputs import credit_plot, rate_plot, liability_plot\nfrom .constants import MetaParameters\nfrom . import inputs\nfrom bokeh.models import ColumnDataSource\nfrom taxcrunch.cruncher import Cruncher, CruncherParams\nfrom taxcrunch.multi_cruncher import Batch\nimport taxcrunch\nfrom taxcalc import Policy\nfrom collections import OrderedDict\n\nTCPATH = inspect.getfile(Policy)\nTCDIR = os.path.dirname(TCPATH)\n\nwith open(os.path.join(TCDIR, \"policy_current_law.json\"), \"r\") as f:\n pcl = json.loads(f.read())\n\n\ndef fix_checkbox(params):\n \"\"\"\n Replace param_checkbox with param-indexed.\n \"\"\"\n pol_params = {}\n # drop checkbox parameters.\n for param, data in params.items():\n if param.endswith(\"checkbox\"):\n base_param = param.split(\"_checkbox\")[0]\n pol_params[f\"{base_param}-indexed\"] = data\n else:\n pol_params[param] = data\n\n return pol_params\n\ndef get_version():\n version = taxcrunch.__version__\n return f\"Tax-Cruncher v{version}\"\n\n\ndef get_inputs(meta_params_dict):\n \"\"\"\n Return default parameters from Tax-Cruncher\n \"\"\"\n metaparams = MetaParameters()\n metaparams.adjust(meta_params_dict)\n\n params = CruncherParams()\n policy_params = Policy()\n\n policy_params.set_state(\n year=metaparams.year.tolist())\n\n policy_params.array_first = False\n # Hack to work smoothly with convert_policy_defaults since\n # it expects a data_source attribute.\n metaparams.data_source = \"CPS\"\n filtered_pol_params = inputs.convert_policy_defaults(metaparams, policy_params)\n\n keep = [\n \"mstat\",\n \"page\",\n \"sage\",\n \"dep13\",\n \"dep17\",\n \"dep18\",\n \"otherdep\",\n \"pwages\",\n \"swages\",\n \"dividends\",\n \"intrec\",\n \"stcg\",\n \"ltcg\",\n \"otherprop\",\n \"nonprop\",\n \"pensions\",\n \"gssi\",\n \"ui\",\n \"proptax\",\n \"otheritem\",\n \"childcare\",\n \"mortgage\",\n \"businc\",\n \"sstb\",\n \"w2paid\",\n \"qualprop\",\n \"mtr_options\",\n \"schema\"\n ]\n cruncher_dict = params.dump()\n\n default_params = {\n \"Tax Information\": {k: v for k, v in cruncher_dict.items() if k in keep},\n \"Policy\": filtered_pol_params\n }\n\n meta = metaparams.dump()\n\n return {\"meta_parameters\": meta, \"model_parameters\": default_params}\n\n\ndef validate_inputs(meta_params_dict, adjustment, errors_warnings):\n params = CruncherParams()\n params.adjust(adjustment[\"Tax Information\"], raise_errors=False)\n errors_warnings[\"Tax Information\"][\"errors\"].update(params.errors)\n\n policy_adj = inputs.convert_policy_adjustment(adjustment[\"Policy\"])\n\n policy_params = Policy()\n policy_params.adjust(policy_adj, raise_errors=False, ignore_warnings=True)\n errors_warnings[\"Policy\"][\"errors\"].update(policy_params.errors)\n\n return {\"errors_warnings\": errors_warnings}\n\n\ndef run_model(meta_params_dict, adjustment):\n meta_params = MetaParameters()\n meta_params.adjust(meta_params_dict)\n\n adjustment[\"Tax Information\"][\"year\"] = meta_params.year\n params = CruncherParams()\n params.adjust(adjustment[\"Tax Information\"], raise_errors=False)\n newvals = params.specification()\n\n policy_mods = inputs.convert_policy_adjustment(adjustment[\"Policy\"])\n\n crunch = Cruncher(inputs=newvals, custom_reform=policy_mods)\n\n # make dataset for bokeh plots\n ivar = crunch.batch_ivar\n _, mtr_opt, _ = crunch.taxsim_inputs()\n df = pd.concat([ivar] * 10000, ignore_index=True)\n increments = pd.DataFrame(list(range(0, 2000000, 200)))\n\n # use Calculation Option to determine what var to increment\n if mtr_opt == 'Taxpayer Earnings':\n span = int(ivar[9])\n df[9] = increments\n elif mtr_opt == 'Spouse Earnings':\n span = int(ivar[10])\n df[10] = increments\n elif mtr_opt == 'Qualified Dividends':\n span = int(ivar[11])\n df[11] = increments\n elif mtr_opt == 'Interest Received':\n span = int(ivar[12])\n df[12] = increments\n elif mtr_opt == 'Short Term Gains':\n span = int(ivar[13])\n df[13] = increments\n elif mtr_opt == 'Long Term Gains':\n span = int(ivar[14])\n df[14] = increments\n elif mtr_opt == 'Business Income':\n span = int(ivar[15])\n df[15] = increments\n elif mtr_opt == 'Pensions':\n span = int(ivar[21])\n df[21] = increments\n elif mtr_opt == 'Gross Social Security Benefits':\n span = int(ivar[22])\n df[22] = increments\n elif mtr_opt == 'Real Estate Taxes Paid':\n span = int(ivar[24])\n df[24] = increments\n elif mtr_opt == 'Mortgage':\n span = int(ivar[27])\n df[27] = increments\n\n\n b = Batch(df)\n df_base = b.create_table()\n df_reform = b.create_table(policy_mods)\n\n # compute average tax rates\n df_base['IATR'] = df_base['Individual Income Tax'] / df_base['AGI']\n df_base['PATR'] = df_base['Payroll Tax'] / df_base['AGI']\n df_reform['IATR'] = df_reform['Individual Income Tax'] / df_reform['AGI']\n df_reform['PATR'] = df_reform['Payroll Tax'] / df_reform['AGI']\n df_base['Axis'] = increments\n df_reform['Axis'] = increments\n\n return comp_output(crunch, df_base, df_reform, span, mtr_opt)\n\n\ndef comp_output(crunch, df_base, df_reform, span, mtr_opt):\n\n liabilities = liability_plot(df_base, df_reform, span, mtr_opt)\n rates = rate_plot(df_base, df_reform, span, mtr_opt)\n credits = credit_plot(df_base, df_reform, span, mtr_opt)\n\n basic = crunch.basic_table()\n detail = crunch.calc_table()\n\n table_basic = basic.to_html(\n classes=\"table table-striped table-hover text-right\"\n )\n table_detail = detail.to_html(\n classes=\"table table-striped table-hover text-right\"\n )\n\n comp_dict = {\n \"renderable\": [\n {\"media_type\": \"table\", \"title\": \"Basic Liabilities\", \"data\": table_basic},\n liabilities, rates, credits,\n {\n \"media_type\": \"table\",\n \"title\": \"Calculation of Liabilities\",\n \"data\": table_detail,\n },\n ],\n \"downloadable\": [\n {\n \"media_type\": \"CSV\",\n \"title\": \"basic_table\",\n \"data\": basic.to_csv(),\n },\n {\n \"media_type\": \"CSV\",\n \"title\": \"calculation_table\",\n \"data\": detail.to_csv(),\n },\n ],\n }\n return comp_dict\n" ]
[ [ "pandas.concat" ] ]
shijun18/TMLI-PLAN
[ "0097d5674852eba75487b153600fc1cd5518b2a8" ]
[ "model/att_unet.py" ]
[ "import torch.nn as nn\nfrom typing import Optional, Union, List\nfrom .model_config import MODEL_CONFIG\nfrom .decoder.att_unet import AttUnetDecoder\nfrom .get_encoder import build_encoder\nfrom .base_model import SegmentationModel\nfrom .lib import SynchronizedBatchNorm2d\nBatchNorm2d = SynchronizedBatchNorm2d\n\n\nclass Flatten(nn.Module):\n def forward(self, x):\n return x.view(x.shape[0], -1)\n\n\nclass AttUnet(SegmentationModel):\n \"\"\"AttUnet is a fully convolution neural network for image semantic segmentation. Consist of *encoder* \n and *decoder* parts connected with *skip connections*. Encoder extract features of different spatial \n resolution (skip connections) which are used by decoder to define accurate segmentation mask. Use *concatenation*\n for fusing decoder blocks with skip connections.\n Args:\n in_channels: A number of input channels for the model, default is 3 (RGB images)\n encoder_name: Name of the classification model that will be used as an encoder (a.k.a backbone)\n to extract features of different spatial resolution\n encoder_weights: One of **None** (random initialization), **\"imagenet\"** (pre-training on ImageNet) and \n other pretrained weights (see table with available weights for each encoder_name)\n encoder_depth: A number of stages used in encoder in range [3, 5]. Each stage generate features \n two times smaller in spatial dimensions than previous one (e.g. for depth 0 we will have features\n with shapes [(N, C, H, W),], for depth 1 - [(N, C, H, W), (N, C, H // 2, W // 2)] and so on).\n Default is 5\n encoder_channels: List of integers which specify **out_channels** parameter for convolutions used in encoder.\n Length of the list should be the same as **encoder_depth**\n decoder_use_batchnorm: If **True**, BatchNormalization layer between Conv2D and Activation layers is used.\n Available options are **True, False**.\n decoder_attention_type: Attention module used in decoder of the model. Available options are **None** and **scse**.\n SCSE paper - https://arxiv.org/abs/1808.08127\n decoder_channels: List of integers which specify **in_channels** parameter for convolutions used in decoder.\n Length of the list should be the same as **encoder_depth**\n upsampling: Int number of upsampling factor for segmentation head, default=1 \n classes: A number of classes for output mask (or you can think as a number of channels of output mask)\n aux_classifier: If **True**, add a classification branch based the last feature of the encoder.\n Available options are **True, False**.\n Returns:\n ``torch.nn.Module``: AttUnet\n \"\"\"\n\n def __init__(\n self,\n in_channels: int = 3,\n encoder_name: str = \"simplenet\",\n encoder_weights: Optional[str] = None,\n encoder_depth: int = 5,\n encoder_channels: List[int] = [32,64,128,256,512],\n decoder_use_batchnorm: bool = True,\n decoder_attention_type: Optional[str] = None,\n decoder_channels: List[int] = (256,128,64,32),\n upsampling: int = 1,\n classes: int = 1,\n aux_classifier: bool = False,\n ):\n super().__init__()\n\n self.encoder_depth = encoder_depth\n self.encoder_channels = encoder_channels\n\n self.encoder = build_encoder(\n encoder_name,\n weights=encoder_weights,\n n_channels=in_channels\n )\n\n self.decoder = AttUnetDecoder(\n encoder_channels=self.encoder_channels,\n decoder_channels=decoder_channels,\n n_blocks=self.encoder_depth - 1, # the number of decoder block, = encoder_depth - 1 \n use_batchnorm=decoder_use_batchnorm,\n norm_layer=BatchNorm2d,\n center=False,\n attention_type=decoder_attention_type\n )\n\n self.segmentation_head = nn.Sequential(\n nn.UpsamplingBilinear2d(scale_factor=upsampling) if upsampling > 1 else nn.Identity(),\n nn.Conv2d(decoder_channels[-1], classes, kernel_size=3, padding=1)\n )\n\n if aux_classifier:\n self.classification_head = nn.Sequential(\n nn.AdaptiveAvgPool2d(1),\n Flatten(),\n nn.Dropout(p=0.2, inplace=True),\n nn.Linear(self.encoder_channels[-1], classes - 1, bias=True)\n )\n else:\n self.classification_head = None\n\n self.name = \"u-{}\".format(encoder_name)\n self.initialize()\n\n\n\n\ndef att_unet(model_name,encoder_name,**kwargs):\n params = MODEL_CONFIG[model_name][encoder_name]\n dynamic_params = kwargs\n for key in dynamic_params:\n if key in params:\n params[key] = dynamic_params[key]\n\n net = AttUnet(**params)\n return net\n\n" ]
[ [ "torch.nn.Linear", "torch.nn.Dropout", "torch.nn.Identity", "torch.nn.UpsamplingBilinear2d", "torch.nn.Conv2d", "torch.nn.AdaptiveAvgPool2d" ] ]
missakaherath/RoBERTaABSA
[ "6772fdd892684d8d044a6a1cf815ae5d4a8dcd89" ]
[ "ASGCN/models/ascnn.py" ]
[ "# -*- coding: utf-8 -*-\n\nimport math\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom layers.dynamic_rnn import DynamicLSTM\n\nclass ASCNN(nn.Module):\n def __init__(self, embedding_matrix, opt):\n super(ASCNN, self).__init__()\n self.opt = opt\n self.embed = nn.Embedding.from_pretrained(torch.tensor(embedding_matrix, dtype=torch.float))\n self.text_lstm = DynamicLSTM(opt.embed_dim, opt.hidden_dim, num_layers=1, batch_first=True, bidirectional=True)\n self.conv1 = nn.Conv1d(2*opt.hidden_dim, 2*opt.hidden_dim, 3, padding=1)\n self.conv2 = nn.Conv1d(2*opt.hidden_dim, 2*opt.hidden_dim, 3, padding=1)\n self.fc = nn.Linear(2*opt.hidden_dim, opt.polarities_dim)\n self.text_embed_dropout = nn.Dropout(0.3)\n\n def position_weight(self, x, aspect_double_idx, text_len, aspect_len):\n batch_size = x.shape[0]\n seq_len = x.shape[1]\n aspect_double_idx = aspect_double_idx.cpu().numpy()\n text_len = text_len.cpu().numpy()\n aspect_len = aspect_len.cpu().numpy()\n weight = [[] for i in range(batch_size)]\n for i in range(batch_size):\n context_len = text_len[i] - aspect_len[i]\n for j in range(aspect_double_idx[i,0]):\n weight[i].append(1-(aspect_double_idx[i,0]-j)/context_len)\n for j in range(aspect_double_idx[i,0], aspect_double_idx[i,1]+1):\n weight[i].append(0)\n for j in range(aspect_double_idx[i,1]+1, text_len[i]):\n weight[i].append(1-(j-aspect_double_idx[i,1])/context_len)\n for j in range(text_len[i], seq_len):\n weight[i].append(0)\n weight = torch.tensor(weight).unsqueeze(2).to(self.opt.device)\n return weight*x\n\n def mask(self, x, aspect_double_idx):\n batch_size, seq_len = x.shape[0], x.shape[1]\n aspect_double_idx = aspect_double_idx.cpu().numpy()\n mask = [[] for i in range(batch_size)]\n for i in range(batch_size):\n for j in range(aspect_double_idx[i,0]):\n mask[i].append(0)\n for j in range(aspect_double_idx[i,0], aspect_double_idx[i,1]+1):\n mask[i].append(1)\n for j in range(aspect_double_idx[i,1]+1, seq_len):\n mask[i].append(0)\n mask = torch.tensor(mask).unsqueeze(2).float().to(self.opt.device)\n return mask*x\n\n def forward(self, inputs):\n text_indices, aspect_indices, left_indices = inputs\n text_len = torch.sum(text_indices != 0, dim=1)\n aspect_len = torch.sum(aspect_indices != 0, dim=-1)\n left_len = torch.sum(left_indices != 0, dim=-1)\n aspect_double_idx = torch.cat([left_len.unsqueeze(1), (left_len+aspect_len-1).unsqueeze(1)], dim=1)\n text = self.embed(text_indices)\n text = self.text_embed_dropout(text)\n text_out, (_, _) = self.text_lstm(text, text_len)\n x = F.relu(self.conv1(self.position_weight(text_out, aspect_double_idx, text_len, aspect_len).transpose(1,2)))\n x = F.relu(self.conv2(self.position_weight(x.transpose(1,2), aspect_double_idx, text_len, aspect_len).transpose(1,2)))\n x = self.mask(x.transpose(1,2), aspect_double_idx)\n alpha_mat = torch.matmul(x, text_out.transpose(1, 2))\n alpha = F.softmax(alpha_mat.sum(1, keepdim=True), dim=2)\n x = torch.matmul(alpha, text_out).squeeze(1) # batch_size x 2*hidden_dim\n output = self.fc(x)\n return output\n" ]
[ [ "torch.nn.Linear", "torch.nn.Dropout", "torch.nn.Conv1d", "torch.tensor", "torch.matmul", "torch.sum" ] ]
anubhakabra/MoH-Hate-Speech-Detection
[ "042e98fef3035b515cf8ab9c772ff39bd240a3a3" ]
[ "model/model.py" ]
[ "import datetime\n\nimport keras\nimport numpy as np\nimport tokenization\nimport tensorflow as tf\nimport tensorflow_hub as hub\n\nfrom config import *\n\n\ndef model_train(model_type, train, test, is_training=False):\n\n if model_type == \"bert\":\n bert_layer = hub.KerasLayer(mBERT_MODULE_URL, trainable=True)\n else:\n bert_layer = hub.KerasLayer(MuRIL_MODULE_URL, trainable=True)\n vocab_file = bert_layer.resolved_object.vocab_file.asset_path.numpy()\n do_lower_case = bert_layer.resolved_object.do_lower_case.numpy()\n tokenizer = tokenization.FullTokenizer(vocab_file, do_lower_case)\n\n test_input = bert_encode(test.text.values, tokenizer, max_len=MAX_SEQ_LENGTH)\n label_list = list(range(len(train[\"label\"].unique())))\n model = build_model(bert_layer, num_classes=len(label_list))\n\n if is_training:\n train_input = bert_encode(train.text.values, tokenizer, max_len=MAX_SEQ_LENGTH)\n train_labels = keras.utils.to_categorical(\n train.label.values, num_classes=len(label_list)\n )\n\n checkpoint = tf.keras.callbacks.ModelCheckpoint(\n f\"{model_type}_model_{datetime.datetime.now().strftime('%Y%m%d%H%M%S')}.h5\",\n monitor=\"val_accuracy\",\n save_best_only=True,\n verbose=1,\n )\n earlystopping = tf.keras.callbacks.EarlyStopping(\n monitor=\"val_accuracy\", patience=5, verbose=1\n )\n\n model.fit(\n train_input,\n train_labels,\n epochs=NUM_TRAIN_EPOCHS,\n callbacks=[checkpoint, earlystopping],\n batch_size=BATCH_SIZE,\n verbose=1,\n )\n else:\n model.load_weights(f\"{model_type}_model.h5\")\n\n return model, test_input\n\n\ndef bert_encode(texts, tokenizer, max_len=512):\n all_tokens = []\n all_masks = []\n all_segments = []\n\n for text in texts:\n text = tokenizer.tokenize(text)\n\n text = text[: max_len - 2]\n input_sequence = [\"[CLS]\"] + text + [\"[SEP]\"]\n pad_len = max_len - len(input_sequence)\n\n tokens = tokenizer.convert_tokens_to_ids(input_sequence) + [0] * pad_len\n pad_masks = [1] * len(input_sequence) + [0] * pad_len\n segment_ids = [0] * max_len\n\n all_tokens.append(tokens)\n all_masks.append(pad_masks)\n all_segments.append(segment_ids)\n\n return np.array(all_tokens), np.array(all_masks), np.array(all_segments)\n\n\ndef build_model(bert_layer, num_classes):\n\n if num_classes == 2:\n loss = \"binary_crossentropy\"\n else:\n loss = \"categorical_crossentropy\"\n\n inputs = dict(\n input_word_ids=tf.keras.layers.Input(shape=(MAX_SEQ_LENGTH,), dtype=tf.int32),\n input_mask=tf.keras.layers.Input(shape=(MAX_SEQ_LENGTH,), dtype=tf.int32),\n input_type_ids=tf.keras.layers.Input(shape=(MAX_SEQ_LENGTH,), dtype=tf.int32),\n )\n\n output = bert_layer(inputs)\n clf_output = output[\"sequence_output\"][:, 0, :]\n net = tf.keras.layers.Dense(64, activation=\"relu\")(clf_output)\n net = tf.keras.layers.Dropout(0.2)(net)\n net = tf.keras.layers.Dense(BATCH_SIZE, activation=\"relu\")(net)\n net = tf.keras.layers.Dropout(0.2)(net)\n out = tf.keras.layers.Dense(num_classes, activation=\"softmax\")(net)\n\n model = tf.keras.models.Model(inputs=inputs, outputs=out)\n model.compile(\n tf.keras.optimizers.Adam(lr=LEARNING_RATE),\n loss=loss,\n metrics=[\"accuracy\"],\n )\n\n return model\n" ]
[ [ "numpy.array", "tensorflow.keras.layers.Input", "tensorflow.keras.models.Model", "tensorflow.keras.layers.Dense", "tensorflow.keras.layers.Dropout", "tensorflow.keras.optimizers.Adam", "tensorflow.keras.callbacks.EarlyStopping" ] ]
TrueNobility303/rainbow
[ "b976f1629fd30a28aaae68f7a453e4d5f2b5b59d" ]
[ "6_dist.py" ]
[ "import os\nfrom typing import Dict, List, Tuple\nimport gym\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\n\nfrom net.replay import ReplayBuffer \nfrom net.utils import initial_seed\n\ninitial_seed(42)\n\n#REF:https://github.com/Curt-Park/rainbow-is-all-you-need/\n\n#网络计算分布而非期望值\nclass Network(nn.Module):\n def __init__(\n self, \n in_dim: int, \n out_dim: int, \n atom_size: int, \n support: torch.Tensor\n ):\n super(Network, self).__init__()\n\n self.support = support\n self.out_dim = out_dim\n self.atom_size = atom_size\n \n self.layers = nn.Sequential(\n nn.Linear(in_dim, 128), \n nn.ReLU(),\n nn.Linear(128, 128), \n nn.ReLU(), \n nn.Linear(128, out_dim * atom_size)\n )\n\n #根据分布返回期望值\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n dist = self.dist(x)\n q = torch.sum(dist * self.support, dim=2)\n \n return q\n \n #返回分布\n def dist(self, x: torch.Tensor) -> torch.Tensor:\n q_atoms = self.layers(x).view(-1, self.out_dim, self.atom_size)\n dist = F.softmax(q_atoms, dim=-1)\n dist = dist.clamp(min=1e-3) # for avoiding nans\n \n return dist\n\nclass DQNAgent:\n def __init__(\n self, \n env: gym.Env,\n memory_size: int,\n batch_size: int,\n target_update: int,\n epsilon_decay: float,\n max_epsilon: float = 1.0,\n min_epsilon: float = 0.1,\n gamma: float = 0.99,\n ):\n obs_dim = env.observation_space.shape[0]\n action_dim = env.action_space.n\n \n self.env = env\n self.memory = ReplayBuffer(obs_dim, memory_size, batch_size)\n self.batch_size = batch_size\n self.epsilon = max_epsilon\n self.epsilon_decay = epsilon_decay\n self.max_epsilon = max_epsilon\n self.min_epsilon = min_epsilon\n self.target_update = target_update\n self.gamma = gamma\n \n self.device = torch.device('cuda:0')\n\n # Categorical DQN parameters:分布相关参数\n\n self.v_min = 0\n self.v_max = 200\n self.atom_size = 51\n self.support = torch.linspace(\n self.v_min, self.v_max, self.atom_size\n ).to(self.device)\n\n # networks: dqn, dqn_target\n\n #DQN网络:根据输入的state输出每个动作的Qvalue,从而选择该动作\n self.dqn = Network(obs_dim, action_dim,self.atom_size, self.support).to(self.device)\n self.dqn_target = Network(obs_dim, action_dim,self.atom_size, self.support).to(self.device)\n self.dqn_target.load_state_dict(self.dqn.state_dict())\n self.dqn_target.eval()\n \n # optimizer\n self.optimizer = optim.Adam(self.dqn.parameters())\n\n # transition to store in memory\n self.transition = list()\n \n # mode: train / test\n self.is_test = False\n\n #使用DQN,根据输入state,依据epsilon greegy策略选择action\n def select_action(self, state: np.ndarray) -> np.ndarray:\n # epsilon greedy policy\n if self.epsilon > np.random.random():\n selected_action = self.env.action_space.sample()\n else:\n selected_action = self.dqn(\n torch.FloatTensor(state).to(self.device)\n ).argmax()\n selected_action = selected_action.detach().cpu().numpy()\n \n #训练阶段,将state,action对放入内存\n if not self.is_test:\n self.transition = [state, selected_action]\n \n return selected_action\n\n #根据action返回env的response\n def step(self, action: np.ndarray) -> Tuple[np.ndarray, np.float64, bool]:\n next_state, reward, done, _ = self.env.step(action)\n\n #训练阶段将reward,next_state,done记录进内存并且放入回放记忆中\n if not self.is_test:\n self.transition += [reward, next_state, done]\n self.memory.store(*self.transition)\n \n return next_state, reward, done\n\n #使用Adam优化器更新DQN\n def update_model(self) -> torch.Tensor:\n samples = self.memory.sample_batch()\n loss = self._compute_dqn_loss(samples)\n\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n return loss.item()\n \n def train(self, num_frames: int, plotting_interval: int = 200):\n self.is_test = False\n \n state = self.env.reset()\n update_cnt = 0\n epsilons = []\n losses = []\n scores = []\n score = 0\n\n life_cnt = 0\n for frame_idx in range(1, num_frames + 1):\n #使用DQN进行action和step并且获取reward等response信息\n action = self.select_action(state)\n next_state, reward, done = self.step(action)\n\n state = next_state\n score += reward\n\n # if episode ends\n if done:\n life_cnt += 1\n print('life',life_cnt,'score',score)\n state = self.env.reset()\n\n scores.append(score)\n score = 0\n\n # if training is ready\n if len(self.memory) >= self.batch_size:\n #更新DQN\n loss = self.update_model()\n losses.append(loss)\n update_cnt += 1\n \n # linearly decrease epsilon\n self.epsilon = max(\n self.min_epsilon, self.epsilon - (\n self.max_epsilon - self.min_epsilon\n ) * self.epsilon_decay\n )\n epsilons.append(self.epsilon)\n \n # if hard update is needed\n if update_cnt % self.target_update == 0:\n self._target_hard_update()\n\n plt.figure()\n plt.plot(scores)\n mean_score = np.mean(scores) \n mean_score = np.around(mean_score,2)\n plt.title('score: ' + str(mean_score)) \n plt.savefig('results/6_dist.png')\n self.env.close()\n \n def test(self) -> List[np.ndarray]:\n self.is_test = True\n \n state = self.env.reset()\n done = False\n score = 0\n \n frames = []\n while not done:\n frames.append(self.env.render(mode=\"rgb_array\"))\n action = self.select_action(state)\n next_state, reward, done = self.step(action)\n\n state = next_state\n score += reward\n \n #print(\"score: \", score)\n self.env.close()\n \n return frames\n\n #根据分布计算损失\n def _compute_dqn_loss(self, samples: Dict[str, np.ndarray]) -> torch.Tensor:\n device = self.device # for shortening the following lines\n state = torch.FloatTensor(samples[\"obs\"]).to(device)\n next_state = torch.FloatTensor(samples[\"next_obs\"]).to(device)\n action = torch.LongTensor(samples[\"acts\"]).to(device)\n reward = torch.FloatTensor(samples[\"rews\"].reshape(-1, 1)).to(device)\n done = torch.FloatTensor(samples[\"done\"].reshape(-1, 1)).to(device)\n \n # Categorical DQN algorithm\n delta_z = float(self.v_max - self.v_min) / (self.atom_size - 1)\n\n with torch.no_grad():\n next_action = self.dqn_target(next_state).argmax(1)\n next_dist = self.dqn_target.dist(next_state)\n next_dist = next_dist[range(self.batch_size), next_action]\n\n #将每个reward分散到每个分布上\n t_z = reward + (1 - done) * self.gamma * self.support\n t_z = t_z.clamp(min=self.v_min, max=self.v_max)\n b = (t_z - self.v_min) / delta_z\n l = b.floor().long()\n u = b.ceil().long()\n\n offset = (\n torch.linspace(\n 0, (self.batch_size - 1) * self.atom_size, self.batch_size\n ).long()\n .unsqueeze(1)\n .expand(self.batch_size, self.atom_size)\n .to(self.device)\n )\n\n proj_dist = torch.zeros(next_dist.size(), device=self.device)\n proj_dist.view(-1).index_add_(\n 0, (l + offset).view(-1), (next_dist * (u.float() - b)).view(-1)\n )\n proj_dist.view(-1).index_add_(\n 0, (u + offset).view(-1), (next_dist * (b - l.float())).view(-1)\n )\n\n dist = self.dqn.dist(state)\n log_p = torch.log(dist[range(self.batch_size), action])\n\n loss = -(proj_dist * log_p).sum(1).mean()\n\n return loss\n\n #每隔一段时间,将DQN的状态复制给target DQN\n def _target_hard_update(self):\n self.dqn_target.load_state_dict(self.dqn.state_dict())\n \n def _plot(\n self, \n frame_idx: int, \n scores: List[float], \n losses: List[float], \n epsilons: List[float],\n ):\n \"\"\"Plot the training progresses.\"\"\"\n clear_output(True)\n plt.figure(figsize=(20, 5))\n plt.subplot(131)\n plt.title('frame %s. score: %s' % (frame_idx, np.mean(scores[-10:])))\n plt.plot(scores)\n plt.subplot(132)\n plt.title('loss')\n plt.plot(losses)\n plt.subplot(133)\n plt.title('epsilons')\n plt.plot(epsilons)\n plt.show()\n\nenv_id = \"CartPole-v0\"\nenv = gym.make(env_id)\n\n# parameters\nnum_frames = 20000\nmemory_size = 2000\nbatch_size = 32\ntarget_update = 200\nepsilon_decay = 1 / 2000\n\nagent = DQNAgent(env, memory_size, batch_size, target_update, epsilon_decay)\nagent.train(num_frames)\n#frames = agent.test()\n" ]
[ [ "torch.nn.Linear", "torch.device", "matplotlib.pyplot.subplot", "matplotlib.pyplot.savefig", "torch.no_grad", "matplotlib.pyplot.plot", "matplotlib.pyplot.title", "torch.linspace", "numpy.mean", "matplotlib.pyplot.figure", "torch.nn.ReLU", "torch.FloatTensor", "matplotlib.pyplot.show", "torch.LongTensor", "torch.nn.functional.softmax", "numpy.around", "numpy.random.random", "torch.sum" ] ]
bitwhys/mining-social-web
[ "8d84c85a415d63bd53b8eb441a4258dc914f4d9f" ]
[ "packt-social-media-mining/Chap04/facebook_top_posts_plot.py" ]
[ "# Chap04/facebook_top_posts_plot.py\nimport json\nfrom argparse import ArgumentParser\nimport numpy as np\nimport pandas as pd\nimport dateutil.parser\nimport matplotlib.pyplot as plt\nfrom datetime import datetime\n\n\ndef get_parser():\n parser = ArgumentParser()\n parser.add_argument('--page')\n return parser\n\n\nif __name__ == '__main__':\n parser = get_parser()\n args = parser.parse_args()\n\n fname = \"posts_{}.jsonl\".format(args.page)\n\n all_posts = []\n n_likes = []\n n_shares = []\n n_comments = []\n n_all = []\n with open(fname) as f:\n for line in f:\n post = json.loads(line)\n created_time = dateutil.parser.parse(post['created_time'])\n n_likes.append(post['likes']['summary']['total_count'])\n n_comments.append(post['comments']['summary']['total_count'])\n try:\n n_shares.append(post['shares']['count'])\n except KeyError:\n n_shares.append(0)\n n_all.append(n_likes[-1] + n_shares[-1] + n_comments[-1])\n all_posts.append(created_time.strftime('%H:%M:%S'))\n\n idx = pd.DatetimeIndex(all_posts)\n data = {\n 'likes': n_likes,\n 'comments': n_comments,\n 'shares': n_shares,\n 'all': n_all\n }\n my_series = pd.DataFrame(data=data, index=idx)\n\n # Resampling into 1-hour buckets\n per_hour = my_series.resample('1h', how='sum').fillna(0) \n \n # Plotting\n fig, ax = plt.subplots()\n ax.grid(True)\n ax.set_title(\"Interaction Frequencies\")\n width = 0.8\n ind = np.arange(len(per_hour['all']))\n plt.bar(ind, per_hour['all'])\n tick_pos = ind + width / 2\n labels = []\n for i in range(24):\n d = datetime.now().replace(hour=i, minute=0)\n labels.append(d.strftime('%H:%M'))\n plt.xticks(tick_pos, labels, rotation=90)\n plt.savefig('interactions_per_hour.png')\n" ]
[ [ "pandas.DatetimeIndex", "pandas.DataFrame", "matplotlib.pyplot.savefig", "matplotlib.pyplot.subplots", "matplotlib.pyplot.bar", "matplotlib.pyplot.xticks" ] ]
VedPatwardhan/pytorch-lightning
[ "623dc974f56505cfdb6a7c62ad75780229e101de" ]
[ "tests/checkpointing/test_model_checkpoint.py" ]
[ "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport logging\nimport math\nimport os\nimport pickle\nimport re\nimport time\nfrom argparse import Namespace\nfrom datetime import timedelta\nfrom logging import INFO\nfrom pathlib import Path\nfrom typing import Union\nfrom unittest import mock\nfrom unittest.mock import call, MagicMock, Mock, patch\n\nimport cloudpickle\nimport pytest\nimport torch\nimport yaml\nfrom torch import optim\n\nimport pytorch_lightning as pl\nimport tests.helpers.utils as tutils\nfrom pytorch_lightning import seed_everything, Trainer\nfrom pytorch_lightning.callbacks import ModelCheckpoint\nfrom pytorch_lightning.loggers import TensorBoardLogger\nfrom pytorch_lightning.utilities.cloud_io import load as pl_load\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\nfrom pytorch_lightning.utilities.imports import _OMEGACONF_AVAILABLE\nfrom tests.helpers import BoringModel\nfrom tests.helpers.runif import RunIf\n\nif _OMEGACONF_AVAILABLE:\n from omegaconf import Container, OmegaConf\n\n\ndef test_model_checkpoint_state_key():\n early_stopping = ModelCheckpoint(monitor=\"val_loss\")\n expected_id = (\n \"ModelCheckpoint{'monitor': 'val_loss', 'mode': 'min', 'every_n_train_steps': 0, 'every_n_epochs': 1,\"\n \" 'train_time_interval': None, 'save_on_train_epoch_end': None}\"\n )\n assert early_stopping.state_key == expected_id\n\n\nclass LogInTwoMethods(BoringModel):\n def training_step(self, batch, batch_idx):\n out = super().training_step(batch, batch_idx)\n self.log(\"early_stop_on\", out[\"loss\"])\n return out\n\n def validation_epoch_end(self, outputs):\n outs = torch.stack([x[\"x\"] for x in outputs]).mean()\n self.log(\"val_acc\", outs)\n\n\ndef mock_training_epoch_loop(trainer):\n # do not use `unittest.Mock` because we need to store the return value\n calls = {}\n old_get_monitor_value = trainer.fit_loop.epoch_loop._get_monitor_value\n\n def mock(key):\n value = old_get_monitor_value(key)\n calls[trainer.current_epoch] = {key: value}\n return value\n\n trainer.fit_loop.epoch_loop._get_monitor_value = mock\n return calls\n\n\[email protected](\n \"validation_step_none,val_dataloaders_none,monitor\",\n [(False, False, \"val_log\"), (True, False, \"train_log_epoch\"), (False, True, \"val_log\")],\n)\[email protected](\"reduce_lr_on_plateau\", [False, True])\ndef test_model_checkpoint_score_and_ckpt(\n tmpdir, validation_step_none: bool, val_dataloaders_none: bool, monitor: str, reduce_lr_on_plateau: bool\n):\n \"\"\"Test that when a model checkpoint is saved, it saves with the correct score appended to ckpt_path and\n checkpoint data.\"\"\"\n max_epochs = 3\n limit_train_batches = 5\n limit_val_batches = 7\n lr, gamma = 1e-1, 2\n\n class CustomBoringModel(BoringModel):\n def __init__(self):\n super().__init__()\n self.train_log_epochs = torch.randn(max_epochs, limit_train_batches)\n self.val_logs = torch.randn(max_epochs, limit_val_batches)\n self.scores = []\n\n def training_step(self, batch, batch_idx):\n log_value = self.train_log_epochs[self.current_epoch, batch_idx]\n self.log(\"train_log\", log_value, on_epoch=True)\n return super().training_step(batch, batch_idx)\n\n def validation_step(self, batch, batch_idx):\n log_value = self.val_logs[self.current_epoch, batch_idx]\n self.log(\"val_log\", log_value)\n self.log(\"epoch\", self.current_epoch, on_epoch=True)\n return super().validation_step(batch, batch_idx)\n\n def configure_optimizers(self):\n optimizer = optim.SGD(self.parameters(), lr=lr)\n\n if reduce_lr_on_plateau:\n lr_scheduler = {\n \"scheduler\": optim.lr_scheduler.ReduceLROnPlateau(optimizer),\n \"monitor\": monitor,\n \"strict\": True,\n }\n else:\n lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=gamma)\n\n return [optimizer], [lr_scheduler]\n\n def on_train_epoch_end(self):\n if \"train\" in monitor:\n self.scores.append(self.trainer.logged_metrics[monitor])\n\n def on_validation_epoch_end(self):\n if not self.trainer.sanity_checking and \"val\" in monitor:\n self.scores.append(self.trainer.logged_metrics[monitor])\n\n filename = \"{\" + f\"{monitor}\" + \":.4f}-{epoch}\"\n checkpoint = ModelCheckpoint(dirpath=tmpdir, filename=filename, monitor=monitor, save_top_k=-1)\n\n model = CustomBoringModel()\n\n if validation_step_none:\n model.validation_step = None\n if val_dataloaders_none:\n model.val_dataloaders = None\n\n trainer = Trainer(\n default_root_dir=tmpdir,\n callbacks=[checkpoint],\n limit_train_batches=limit_train_batches,\n limit_val_batches=limit_val_batches,\n max_epochs=max_epochs,\n enable_progress_bar=False,\n )\n calls = mock_training_epoch_loop(trainer)\n trainer.fit(model)\n\n ckpt_files = list(Path(tmpdir).glob(\"*.ckpt\"))\n assert len(ckpt_files) == len(model.scores) == max_epochs\n\n for epoch in range(max_epochs):\n score = model.scores[epoch]\n expected_score = getattr(model, f\"{monitor}s\")[epoch].mean().item()\n expected_filename = f\"{monitor}={score:.4f}-epoch={epoch}.ckpt\"\n assert math.isclose(score, expected_score, rel_tol=1e-4)\n\n chk = pl_load(os.path.join(checkpoint.dirpath, expected_filename))\n assert chk[\"epoch\"] == epoch + 1\n assert chk[\"global_step\"] == limit_train_batches * (epoch + 1)\n\n mc_specific_data = chk[\"callbacks\"][\n f\"ModelCheckpoint{{'monitor': '{monitor}', 'mode': 'min', 'every_n_train_steps': 0, 'every_n_epochs': 1,\"\n \" 'train_time_interval': None, 'save_on_train_epoch_end': True}\"\n ]\n assert mc_specific_data[\"dirpath\"] == checkpoint.dirpath\n assert mc_specific_data[\"monitor\"] == monitor\n assert mc_specific_data[\"current_score\"] == score\n\n if not reduce_lr_on_plateau:\n actual_step_count = chk[\"lr_schedulers\"][0][\"_step_count\"]\n actual_lr = chk[\"lr_schedulers\"][0][\"_last_lr\"][0]\n # checkpoint is saved after updating lr_scheduler states\n assert actual_step_count == epoch + 2 # step_count starts at 1\n assert actual_lr == lr * gamma ** (epoch + 1)\n else:\n assert calls[epoch] == {monitor: score}\n\n\[email protected](\n \"val_check_interval,reduce_lr_on_plateau,epoch_aligned\",\n [(0.25, True, True), (0.25, False, True), (0.42, False, False)],\n)\ndef test_model_checkpoint_score_and_ckpt_val_check_interval(\n tmpdir, val_check_interval, reduce_lr_on_plateau, epoch_aligned\n):\n \"\"\"Test that when a model checkpoint is saved, it saves with the correct score appended to ckpt_path and\n checkpoint data with val_check_interval.\"\"\"\n seed_everything(0)\n max_epochs = 3\n limit_train_batches = 12\n limit_val_batches = 7\n lr, gamma = 1e-1, 2\n monitor = \"val_log\"\n per_val_train_batches = int(limit_train_batches * val_check_interval)\n per_epoch_val_checks, leftover_train_batches = divmod(limit_train_batches, per_val_train_batches)\n\n class CustomBoringModel(BoringModel):\n def __init__(self):\n super().__init__()\n self.val_logs = torch.randn(per_epoch_val_checks * max_epochs, limit_val_batches)\n self.val_loop_count = 0\n self.scores = []\n\n def validation_step(self, batch, batch_idx):\n log_value = self.val_logs[self.val_loop_count, batch_idx]\n self.log(\"val_log\", log_value)\n return super().validation_step(batch, batch_idx)\n\n def validation_epoch_end(self, outputs):\n self.val_loop_count += 1\n super().validation_epoch_end(outputs)\n self.scores.append(self.trainer.logged_metrics[monitor])\n\n def configure_optimizers(self):\n optimizer = optim.SGD(self.parameters(), lr=lr)\n\n if reduce_lr_on_plateau:\n lr_scheduler = {\n \"scheduler\": optim.lr_scheduler.ReduceLROnPlateau(optimizer),\n \"monitor\": monitor,\n \"strict\": True,\n }\n else:\n lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=gamma)\n\n return [optimizer], [lr_scheduler]\n\n filename = \"{\" + f\"{monitor}\" + \":.4f}-{epoch}\"\n checkpoint = ModelCheckpoint(dirpath=tmpdir, filename=filename, monitor=monitor, save_top_k=-1)\n\n model = CustomBoringModel()\n\n trainer = Trainer(\n default_root_dir=tmpdir,\n callbacks=[checkpoint],\n limit_train_batches=limit_train_batches,\n limit_val_batches=limit_val_batches,\n max_epochs=max_epochs,\n val_check_interval=val_check_interval,\n enable_progress_bar=False,\n num_sanity_val_steps=0,\n )\n calls = mock_training_epoch_loop(trainer)\n trainer.fit(model)\n\n def _make_assertions(epoch, ix):\n global_ix = ix + per_epoch_val_checks * epoch\n\n # checkpoint saved at the end of training epoch will have updated lr_scheduler states\n epoch_end_checkpoint = epoch_aligned and ix == (per_epoch_val_checks - 1)\n\n score = model.scores[global_ix]\n expected_score = getattr(model, f\"{monitor}s\")[global_ix].mean().item()\n expected_filename = f\"{monitor}={score:.4f}-epoch={epoch}.ckpt\"\n assert math.isclose(score, expected_score, rel_tol=1e-4)\n\n chk = pl_load(os.path.join(checkpoint.dirpath, expected_filename))\n assert chk[\"epoch\"] == epoch + 1\n expected_global_step = per_val_train_batches * (global_ix + 1) + (leftover_train_batches * epoch)\n assert chk[\"global_step\"] == expected_global_step\n\n mc_specific_data = chk[\"callbacks\"][\n f\"ModelCheckpoint{{'monitor': '{monitor}', 'mode': 'min', 'every_n_train_steps': 0, 'every_n_epochs': 1,\"\n \" 'train_time_interval': None, 'save_on_train_epoch_end': False}\"\n ]\n assert mc_specific_data[\"dirpath\"] == checkpoint.dirpath\n assert mc_specific_data[\"monitor\"] == monitor\n assert mc_specific_data[\"current_score\"] == score\n\n if not reduce_lr_on_plateau:\n actual_step_count = chk[\"lr_schedulers\"][0][\"_step_count\"]\n actual_lr = chk[\"lr_schedulers\"][0][\"_last_lr\"][0]\n assert actual_step_count == epoch + 1 + epoch_end_checkpoint\n assert actual_lr == lr * gamma ** (epoch + epoch_end_checkpoint)\n\n return score\n\n ckpt_files = list(Path(tmpdir).glob(\"*.ckpt\"))\n assert len(ckpt_files) == len(model.scores) == per_epoch_val_checks * max_epochs\n\n for epoch in range(max_epochs):\n for i in range(per_epoch_val_checks):\n score = _make_assertions(epoch, i)\n\n if reduce_lr_on_plateau:\n assert calls[epoch] == {monitor: score}\n\n\[email protected](\"save_top_k\", [-1, 0, 1, 2])\ndef test_model_checkpoint_with_non_string_input(tmpdir, save_top_k: int):\n \"\"\"Test that dirpath=None in checkpoint callback is valid and that ckpt_path is set correctly.\"\"\"\n tutils.reset_seed()\n model = LogInTwoMethods()\n\n checkpoint = ModelCheckpoint(monitor=\"early_stop_on\", dirpath=None, filename=\"{epoch}\", save_top_k=save_top_k)\n max_epochs = 2\n trainer = Trainer(default_root_dir=tmpdir, callbacks=[checkpoint], overfit_batches=0.20, max_epochs=max_epochs)\n trainer.fit(model)\n assert checkpoint.dirpath == tmpdir / trainer.logger.name / \"version_0\" / \"checkpoints\"\n\n if save_top_k == -1:\n ckpt_files = os.listdir(checkpoint.dirpath)\n expected_ckpt_files = [f\"epoch={i}.ckpt\" for i in range(max_epochs)]\n assert len(ckpt_files) == len(expected_ckpt_files) == max_epochs\n assert set(ckpt_files) == set(expected_ckpt_files)\n\n\[email protected](\"save_top_k\", [-1, 0, 1, 2])\ndef test_model_checkpoint_to_yaml(tmpdir, save_top_k: int):\n \"\"\"Test that None in checkpoint callback is valid and that chkp_path is set correctly.\"\"\"\n tutils.reset_seed()\n model = LogInTwoMethods()\n\n checkpoint = ModelCheckpoint(dirpath=tmpdir, monitor=\"early_stop_on\", save_top_k=save_top_k)\n\n trainer = Trainer(default_root_dir=tmpdir, callbacks=[checkpoint], overfit_batches=0.20, max_epochs=2)\n trainer.fit(model)\n\n path_yaml = os.path.join(tmpdir, \"best_k_models.yaml\")\n checkpoint.to_yaml(path_yaml)\n d = yaml.full_load(open(path_yaml))\n best_k = dict(checkpoint.best_k_models.items())\n assert d == best_k\n\n\[email protected](\"logger_version,expected\", [(None, \"version_0\"), (1, \"version_1\"), (\"awesome\", \"awesome\")])\ndef test_model_checkpoint_path(tmpdir, logger_version: Union[None, int, str], expected: str):\n \"\"\"Test that \"version_\" prefix is only added when logger's version is an integer.\"\"\"\n tutils.reset_seed()\n model = LogInTwoMethods()\n logger = TensorBoardLogger(str(tmpdir), version=logger_version)\n\n trainer = Trainer(default_root_dir=tmpdir, overfit_batches=0.2, max_epochs=2, logger=logger)\n trainer.fit(model)\n\n ckpt_version = Path(trainer.checkpoint_callback.dirpath).parent.name\n assert ckpt_version == expected\n\n\ndef test_pickling(tmpdir):\n ckpt = ModelCheckpoint(dirpath=tmpdir)\n\n ckpt_pickled = pickle.dumps(ckpt)\n ckpt_loaded = pickle.loads(ckpt_pickled)\n assert vars(ckpt) == vars(ckpt_loaded)\n\n ckpt_pickled = cloudpickle.dumps(ckpt)\n ckpt_loaded = cloudpickle.loads(ckpt_pickled)\n assert vars(ckpt) == vars(ckpt_loaded)\n\n\nclass ModelCheckpointTestInvocations(ModelCheckpoint):\n # this class has to be defined outside the test function, otherwise we get pickle error\n # due to the way ddp process is launched\n\n def __init__(self, expected_count, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.expected_count = expected_count\n self.on_save_checkpoint_count = 0\n\n def on_train_start(self, trainer, pl_module):\n torch.save = Mock(wraps=torch.save)\n\n def on_save_checkpoint(self, trainer, pl_module, checkpoint):\n # only rank 0 will call ``torch.save``\n super().on_save_checkpoint(trainer, pl_module, checkpoint)\n self.on_save_checkpoint_count += 1\n\n def on_train_end(self, trainer, pl_module):\n super().on_train_end(trainer, pl_module)\n assert self.best_model_path\n assert self.best_model_score\n assert self.on_save_checkpoint_count == self.expected_count\n if trainer.is_global_zero:\n assert torch.save.call_count == self.expected_count\n else:\n assert torch.save.call_count == 0\n\n\n@RunIf(skip_windows=True, skip_49370=True)\ndef test_model_checkpoint_no_extraneous_invocations(tmpdir):\n \"\"\"Test to ensure that the model callback saves the checkpoints only once in distributed mode.\"\"\"\n model = LogInTwoMethods()\n num_epochs = 4\n model_checkpoint = ModelCheckpointTestInvocations(monitor=\"early_stop_on\", expected_count=num_epochs, save_top_k=-1)\n trainer = Trainer(\n strategy=\"ddp_spawn\",\n accelerator=\"cpu\",\n devices=2,\n default_root_dir=tmpdir,\n callbacks=[model_checkpoint],\n max_epochs=num_epochs,\n )\n trainer.fit(model)\n assert trainer.state.finished, f\"Training failed with {trainer.state}\"\n\n\ndef test_model_checkpoint_format_checkpoint_name(tmpdir):\n # empty filename:\n ckpt_name = ModelCheckpoint._format_checkpoint_name(\"\", {\"epoch\": 3, \"step\": 2})\n assert ckpt_name == \"epoch=3-step=2\"\n\n ckpt_name = ModelCheckpoint._format_checkpoint_name(None, {\"epoch\": 3, \"step\": 2}, prefix=\"test\")\n assert ckpt_name == \"test-epoch=3-step=2\"\n\n # no groups case:\n ckpt_name = ModelCheckpoint._format_checkpoint_name(\"ckpt\", {}, prefix=\"test\")\n assert ckpt_name == \"test-ckpt\"\n\n # no prefix\n ckpt_name = ModelCheckpoint._format_checkpoint_name(\"{epoch:03d}-{acc}\", {\"epoch\": 3, \"acc\": 0.03})\n assert ckpt_name == \"epoch=003-acc=0.03\"\n\n # prefix\n char_org = ModelCheckpoint.CHECKPOINT_JOIN_CHAR\n ModelCheckpoint.CHECKPOINT_JOIN_CHAR = \"@\"\n ckpt_name = ModelCheckpoint._format_checkpoint_name(\"{epoch},{acc:.5f}\", {\"epoch\": 3, \"acc\": 0.03}, prefix=\"test\")\n assert ckpt_name == \"test@epoch=3,acc=0.03000\"\n ModelCheckpoint.CHECKPOINT_JOIN_CHAR = char_org\n\n # no dirpath set\n ckpt_name = ModelCheckpoint(monitor=\"early_stop_on\", dirpath=None).format_checkpoint_name({\"epoch\": 3, \"step\": 2})\n assert ckpt_name == \"epoch=3-step=2.ckpt\"\n ckpt_name = ModelCheckpoint(monitor=\"early_stop_on\", dirpath=\"\").format_checkpoint_name({\"epoch\": 5, \"step\": 4})\n assert ckpt_name == \"epoch=5-step=4.ckpt\"\n\n # CWD\n ckpt_name = ModelCheckpoint(monitor=\"early_stop_on\", dirpath=\".\").format_checkpoint_name({\"epoch\": 3, \"step\": 4})\n assert ckpt_name == str(Path(\".\").resolve() / \"epoch=3-step=4.ckpt\")\n\n # with version\n ckpt = ModelCheckpoint(monitor=\"early_stop_on\", dirpath=tmpdir, filename=\"name\")\n ckpt_name = ckpt.format_checkpoint_name({}, ver=3)\n assert ckpt_name == tmpdir / \"name-v3.ckpt\"\n\n # using slashes\n ckpt = ModelCheckpoint(monitor=\"early_stop_on\", dirpath=None, filename=\"{epoch}_{val/loss:.5f}\")\n ckpt_name = ckpt.format_checkpoint_name({\"epoch\": 4, \"val/loss\": 0.03})\n assert ckpt_name == \"epoch=4_val/loss=0.03000.ckpt\"\n\n # auto_insert_metric_name=False\n ckpt_name = ModelCheckpoint._format_checkpoint_name(\n \"epoch={epoch:03d}-val_acc={val/acc}\", {\"epoch\": 3, \"val/acc\": 0.03}, auto_insert_metric_name=False\n )\n assert ckpt_name == \"epoch=003-val_acc=0.03\"\n\n\nclass ModelCheckpointExtensionTest(ModelCheckpoint):\n FILE_EXTENSION = \".tpkc\"\n\n\ndef test_model_checkpoint_file_extension(tmpdir):\n \"\"\"Test ModelCheckpoint with different file extension.\"\"\"\n\n model = LogInTwoMethods()\n model_checkpoint = ModelCheckpointExtensionTest(\n monitor=\"early_stop_on\", dirpath=tmpdir, save_top_k=1, save_last=True\n )\n trainer = Trainer(default_root_dir=tmpdir, callbacks=[model_checkpoint], max_steps=1, logger=False)\n trainer.fit(model)\n\n expected = [\"epoch=0-step=0.tpkc\", \"last.tpkc\"]\n assert set(expected) == set(os.listdir(tmpdir))\n\n\ndef test_model_checkpoint_save_last(tmpdir):\n \"\"\"Tests that save_last produces only one last checkpoint.\"\"\"\n seed_everything()\n model = LogInTwoMethods()\n epochs = 3\n ModelCheckpoint.CHECKPOINT_NAME_LAST = \"last-{epoch}\"\n model_checkpoint = ModelCheckpoint(monitor=\"early_stop_on\", dirpath=tmpdir, save_top_k=-1, save_last=True)\n trainer = Trainer(\n default_root_dir=tmpdir,\n callbacks=[model_checkpoint],\n max_epochs=epochs,\n limit_train_batches=10,\n limit_val_batches=10,\n logger=False,\n )\n trainer.fit(model)\n last_filename = model_checkpoint._format_checkpoint_name(\n ModelCheckpoint.CHECKPOINT_NAME_LAST, {\"epoch\": trainer.current_epoch}\n )\n last_filename = last_filename + \".ckpt\"\n assert str(tmpdir / last_filename) == model_checkpoint.last_model_path\n assert set(os.listdir(tmpdir)) == set(\n [f\"epoch={i}-step={j}.ckpt\" for i, j in zip(range(epochs), [9, 19, 29])] + [last_filename]\n )\n\n ModelCheckpoint.CHECKPOINT_NAME_LAST = \"last\"\n\n\ndef test_invalid_top_k(tmpdir):\n \"\"\"Make sure that a MisconfigurationException is raised for a negative save_top_k argument.\"\"\"\n with pytest.raises(MisconfigurationException, match=r\".*Must be >= -1\"):\n ModelCheckpoint(dirpath=tmpdir, save_top_k=-3)\n\n\ndef test_none_monitor_top_k(tmpdir):\n \"\"\"Test that a warning appears for positive top_k with monitor=None.\"\"\"\n with pytest.raises(\n MisconfigurationException, match=r\"ModelCheckpoint\\(save_top_k=3, monitor=None\\) is not a valid*\"\n ):\n ModelCheckpoint(dirpath=tmpdir, save_top_k=3)\n # These should not fail\n ModelCheckpoint(dirpath=tmpdir, save_top_k=-1)\n ModelCheckpoint(dirpath=tmpdir, save_top_k=0)\n ModelCheckpoint(dirpath=tmpdir, save_top_k=1)\n\n\ndef test_invalid_every_n_epochs(tmpdir):\n \"\"\"Make sure that a MisconfigurationException is raised for a negative every_n_epochs argument.\"\"\"\n with pytest.raises(MisconfigurationException, match=r\".*Must be >= 0\"):\n ModelCheckpoint(dirpath=tmpdir, every_n_epochs=-3)\n # These should not fail\n ModelCheckpoint(dirpath=tmpdir, every_n_epochs=0)\n ModelCheckpoint(dirpath=tmpdir, every_n_epochs=1)\n ModelCheckpoint(dirpath=tmpdir, every_n_epochs=2)\n\n\ndef test_invalid_every_n_train_steps(tmpdir):\n \"\"\"Make sure that a MisconfigurationException is raised for a negative every_n_epochs argument.\"\"\"\n with pytest.raises(MisconfigurationException, match=r\".*Must be >= 0\"):\n ModelCheckpoint(dirpath=tmpdir, every_n_train_steps=-3)\n # These should not fail\n ModelCheckpoint(dirpath=tmpdir, every_n_train_steps=0)\n ModelCheckpoint(dirpath=tmpdir, every_n_train_steps=1)\n ModelCheckpoint(dirpath=tmpdir, every_n_epochs=2)\n\n\ndef test_invalid_trigger_combination(tmpdir):\n \"\"\"Test that a MisconfigurationException is raised if more than one of every_n_epochs, every_n_train_steps, and\n train_time_interval are enabled together.\"\"\"\n with pytest.raises(MisconfigurationException, match=r\".*Combination of parameters every_n_train_steps\"):\n ModelCheckpoint(dirpath=tmpdir, every_n_train_steps=1, every_n_epochs=2)\n with pytest.raises(MisconfigurationException, match=r\".*Combination of parameters every_n_train_steps\"):\n ModelCheckpoint(train_time_interval=timedelta(minutes=1), every_n_epochs=2)\n with pytest.raises(MisconfigurationException, match=r\".*Combination of parameters every_n_train_steps\"):\n ModelCheckpoint(train_time_interval=timedelta(minutes=1), every_n_train_steps=2)\n\n # These should not fail\n ModelCheckpoint(dirpath=tmpdir, every_n_train_steps=0, every_n_epochs=3)\n ModelCheckpoint(dirpath=tmpdir, every_n_train_steps=4, every_n_epochs=0)\n ModelCheckpoint(dirpath=tmpdir, every_n_train_steps=0, every_n_epochs=0, train_time_interval=timedelta(minutes=1))\n\n\ndef test_none_every_n_train_steps_val_epochs(tmpdir):\n checkpoint_callback = ModelCheckpoint(dirpath=tmpdir)\n assert checkpoint_callback.every_n_epochs == 1\n assert checkpoint_callback._every_n_train_steps == 0\n\n\ndef test_model_checkpoint_save_last_none_monitor(tmpdir, caplog):\n \"\"\"Test that it is possible to save all checkpoints when monitor=None.\"\"\"\n seed_everything()\n model = LogInTwoMethods()\n\n epochs = 2\n checkpoint_callback = ModelCheckpoint(monitor=None, dirpath=tmpdir, save_top_k=-1, save_last=True)\n trainer = Trainer(\n default_root_dir=tmpdir,\n callbacks=[checkpoint_callback],\n limit_train_batches=10,\n limit_val_batches=10,\n max_epochs=epochs,\n logger=False,\n )\n\n with caplog.at_level(INFO):\n trainer.fit(model)\n assert \"will duplicate the last checkpoint saved\" in caplog.text\n\n # these should not be set if monitor is None\n assert checkpoint_callback.monitor is None\n assert checkpoint_callback.best_model_path == tmpdir / \"epoch=1-step=19.ckpt\"\n assert checkpoint_callback.last_model_path == tmpdir / \"last.ckpt\"\n assert checkpoint_callback.best_model_score is None\n assert checkpoint_callback.best_k_models == {}\n assert checkpoint_callback.kth_best_model_path == \"\"\n\n # check that the correct ckpts were created\n expected = [f\"epoch={i}-step={j}.ckpt\" for i, j in zip(range(epochs), [9, 19])]\n expected.append(\"last.ckpt\")\n assert set(os.listdir(tmpdir)) == set(expected)\n\n\[email protected](\"every_n_epochs\", list(range(4)))\ndef test_model_checkpoint_every_n_epochs(tmpdir, every_n_epochs):\n model = LogInTwoMethods()\n epochs = 5\n checkpoint_callback = ModelCheckpoint(\n dirpath=tmpdir, filename=\"{epoch}\", save_top_k=-1, every_n_epochs=every_n_epochs\n )\n trainer = Trainer(\n default_root_dir=tmpdir,\n callbacks=[checkpoint_callback],\n max_epochs=epochs,\n limit_train_batches=1,\n limit_val_batches=1,\n logger=False,\n )\n trainer.fit(model)\n\n # check that the correct ckpts were created\n expected = [f\"epoch={e}.ckpt\" for e in range(epochs) if not (e + 1) % every_n_epochs] if every_n_epochs > 0 else []\n assert set(os.listdir(tmpdir)) == set(expected)\n\n\ndef test_ckpt_every_n_train_steps(tmpdir):\n \"\"\"Tests that the checkpoints are saved every n training steps.\"\"\"\n\n model = LogInTwoMethods()\n every_n_train_steps = 16\n max_epochs = 2\n epoch_length = 64\n checkpoint_callback = ModelCheckpoint(\n filename=\"{step}\",\n every_n_epochs=0,\n every_n_train_steps=every_n_train_steps,\n dirpath=tmpdir,\n save_top_k=-1,\n save_last=False,\n )\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=2,\n enable_progress_bar=False,\n callbacks=[checkpoint_callback],\n logger=False,\n )\n\n trainer.fit(model)\n expected = [\n f\"step={i}.ckpt\" for i in range(every_n_train_steps - 1, max_epochs * epoch_length, every_n_train_steps)\n ]\n assert set(os.listdir(tmpdir)) == set(expected)\n\n\[email protected](\"pytorch_lightning.callbacks.model_checkpoint.time\")\ndef test_model_checkpoint_train_time_interval(mock_datetime, tmpdir) -> None:\n \"\"\"Tests that the checkpoints are saved at the specified time interval.\"\"\"\n seconds_per_batch = 7\n start_time = time.monotonic()\n batches_per_epoch = 64\n num_epochs = 2\n max_batches = batches_per_epoch * num_epochs + 1\n mock_datetime.monotonic.side_effect = [start_time + seconds_per_batch * i for i in range(max_batches)]\n\n model = BoringModel()\n trainer = Trainer(\n default_root_dir=tmpdir,\n min_epochs=num_epochs,\n max_epochs=num_epochs,\n enable_progress_bar=False,\n callbacks=[\n ModelCheckpoint(\n filename=\"{epoch}-{step}\",\n dirpath=tmpdir,\n train_time_interval=timedelta(minutes=1),\n save_top_k=-1,\n save_last=False,\n )\n ],\n logger=False,\n )\n\n trainer.fit(model)\n # Each batch takes 7 sec and we checkpoint every minute. There are 64\n # batches per epoch, so total time to run is 7*64*2 = 896 sec < 14.96 minutes,\n # so we should have 14 checkpoints.\n assert len(os.listdir(tmpdir)) == 14\n\n\ndef test_model_checkpoint_topk_zero(tmpdir):\n \"\"\"Test that no checkpoints are saved when save_top_k=0.\"\"\"\n model = LogInTwoMethods()\n checkpoint_callback = ModelCheckpoint(dirpath=tmpdir, save_top_k=0, save_last=True)\n trainer = Trainer(default_root_dir=tmpdir, callbacks=[checkpoint_callback], max_epochs=2, logger=False)\n trainer.fit(model)\n # these should not be set if monitor is None\n assert checkpoint_callback.monitor is None\n assert checkpoint_callback.best_model_path == \"\"\n assert checkpoint_callback.best_model_score is None\n assert checkpoint_callback.best_k_models == {}\n assert checkpoint_callback.kth_best_model_path == \"\"\n # check that only the last ckpt was created\n assert os.listdir(tmpdir) == [\"last.ckpt\"]\n assert checkpoint_callback.last_model_path == tmpdir / \"last.ckpt\"\n\n\ndef test_model_checkpoint_topk_all(tmpdir):\n \"\"\"Test that save_top_k=-1 tracks the best models when monitor key is provided.\"\"\"\n seed_everything(1000)\n epochs = 3\n\n model = BoringModel()\n checkpoint_callback = ModelCheckpoint(\n dirpath=tmpdir, filename=\"{epoch}\", monitor=\"epoch\", mode=\"max\", save_top_k=-1\n )\n trainer = Trainer(\n default_root_dir=tmpdir,\n callbacks=[checkpoint_callback],\n max_epochs=epochs,\n logger=False,\n val_check_interval=1.0,\n )\n trainer.fit(model)\n\n assert checkpoint_callback.monitor == \"epoch\"\n assert checkpoint_callback.best_model_path == tmpdir / \"epoch=2.ckpt\"\n assert checkpoint_callback.best_model_score == epochs - 1\n assert len(os.listdir(tmpdir)) == len(checkpoint_callback.best_k_models) == epochs\n assert set(checkpoint_callback.best_k_models.keys()) == {str(tmpdir / f\"epoch={i}.ckpt\") for i in range(epochs)}\n assert checkpoint_callback.kth_best_model_path == tmpdir / \"epoch=0.ckpt\"\n\n\ndef test_ckpt_metric_names(tmpdir):\n model = LogInTwoMethods()\n\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=1,\n gradient_clip_val=1.0,\n overfit_batches=0.20,\n enable_progress_bar=False,\n limit_train_batches=0.01,\n limit_val_batches=0.01,\n callbacks=[ModelCheckpoint(monitor=\"early_stop_on\", dirpath=tmpdir, filename=\"{val_loss:.2f}\")],\n )\n\n trainer.fit(model)\n\n # make sure the checkpoint we saved has the metric in the name\n ckpts = os.listdir(tmpdir)\n ckpts = [x for x in ckpts if \"val_loss\" in x]\n assert len(ckpts) == 1\n val = re.sub(\"[^0-9.]\", \"\", ckpts[0])\n assert len(val) > 3\n\n\ndef test_default_checkpoint_behavior(tmpdir):\n seed_everything(1234)\n\n model = LogInTwoMethods()\n trainer = Trainer(\n default_root_dir=tmpdir, max_epochs=3, enable_progress_bar=False, limit_train_batches=5, limit_val_batches=5\n )\n\n with patch.object(trainer, \"save_checkpoint\", wraps=trainer.save_checkpoint) as save_mock:\n trainer.fit(model)\n results = trainer.test()\n\n assert len(results) == 1\n save_dir = tmpdir / \"lightning_logs\" / \"version_0\" / \"checkpoints\"\n save_weights_only = trainer.checkpoint_callback.save_weights_only\n save_mock.assert_has_calls(\n [\n call(save_dir / \"epoch=0-step=4.ckpt\", save_weights_only),\n call(save_dir / \"epoch=1-step=9.ckpt\", save_weights_only),\n call(save_dir / \"epoch=2-step=14.ckpt\", save_weights_only),\n ]\n )\n ckpts = os.listdir(save_dir)\n assert len(ckpts) == 1\n assert ckpts[0] == \"epoch=2-step=14.ckpt\"\n\n\[email protected](\"max_epochs\", [1, 2])\[email protected](\"should_validate\", [True, False])\[email protected](\"save_last\", [True, False])\[email protected](\"verbose\", [True, False])\ndef test_model_checkpoint_save_last_warning(\n tmpdir, caplog, max_epochs: int, should_validate: bool, save_last: bool, verbose: bool\n):\n \"\"\"Tests 'Saving latest checkpoint...' log.\"\"\"\n model = LogInTwoMethods()\n if not should_validate:\n model.validation_step = None\n ckpt = ModelCheckpoint(monitor=\"early_stop_on\", dirpath=tmpdir, save_top_k=0, save_last=save_last, verbose=verbose)\n trainer = Trainer(\n default_root_dir=tmpdir, callbacks=[ckpt], max_epochs=max_epochs, limit_train_batches=1, limit_val_batches=1\n )\n with caplog.at_level(logging.INFO):\n trainer.fit(model)\n assert caplog.messages.count(\"Saving latest checkpoint...\") == (verbose and save_last)\n\n\ndef test_model_checkpoint_save_last_checkpoint_contents(tmpdir):\n \"\"\"Tests that the save_last checkpoint contains the latest information.\"\"\"\n seed_everything(100)\n model = LogInTwoMethods()\n num_epochs = 3\n model_checkpoint = ModelCheckpoint(\n monitor=\"early_stop_on\", dirpath=tmpdir, filename=\"{epoch}\", save_top_k=num_epochs, save_last=True\n )\n trainer = Trainer(\n default_root_dir=tmpdir,\n callbacks=[model_checkpoint],\n max_epochs=num_epochs,\n limit_train_batches=2,\n limit_val_batches=2,\n )\n trainer.fit(model)\n\n path_last_epoch = str(tmpdir / f\"epoch={num_epochs - 1}.ckpt\")\n path_last = str(tmpdir / \"last.ckpt\")\n assert path_last == model_checkpoint.last_model_path\n assert os.path.isfile(path_last_epoch)\n\n ckpt_last_epoch = torch.load(path_last_epoch)\n ckpt_last = torch.load(path_last)\n\n assert ckpt_last_epoch[\"epoch\"] == ckpt_last[\"epoch\"]\n assert ckpt_last_epoch[\"global_step\"] == ckpt_last[\"global_step\"]\n\n ckpt_id = (\n \"ModelCheckpoint{'monitor': 'early_stop_on', 'mode': 'min', 'every_n_train_steps': 0, 'every_n_epochs': 1,\"\n \" 'train_time_interval': None, 'save_on_train_epoch_end': True}\"\n )\n assert ckpt_last[\"callbacks\"][ckpt_id] == ckpt_last_epoch[\"callbacks\"][ckpt_id]\n\n # it is easier to load the model objects than to iterate over the raw dict of tensors\n model_last_epoch = LogInTwoMethods.load_from_checkpoint(path_last_epoch)\n model_last = LogInTwoMethods.load_from_checkpoint(model_checkpoint.last_model_path)\n for w0, w1 in zip(model_last_epoch.parameters(), model_last.parameters()):\n assert w0.eq(w1).all()\n\n\[email protected](\"mode\", [\"min\", \"max\"])\ndef test_checkpointing_with_nan_as_first(tmpdir, mode):\n monitor = [float(\"nan\")]\n monitor += [5, 7, 8] if mode == \"max\" else [8, 7, 5]\n\n class CurrentModel(LogInTwoMethods):\n def validation_epoch_end(self, outputs):\n val_loss = monitor[self.current_epoch]\n self.log(\"abc\", val_loss)\n\n model = CurrentModel()\n\n callback = ModelCheckpoint(monitor=\"abc\", mode=mode, save_top_k=1, dirpath=tmpdir)\n\n trainer = Trainer(\n callbacks=[callback],\n default_root_dir=tmpdir,\n val_check_interval=1.0,\n max_epochs=len(monitor),\n )\n trainer.save_checkpoint = MagicMock()\n\n trainer.fit(model)\n\n # check that last one is also the best one\n assert trainer.save_checkpoint.call_count == len(monitor)\n assert mode == \"min\" and callback.best_model_score == 5 or mode == \"max\" and callback.best_model_score == 8\n\n\ndef test_checkpoint_repeated_strategy(tmpdir):\n \"\"\"This test validates checkpoint can be called several times without increasing internally its global step if\n nothing run.\"\"\"\n checkpoint_callback = ModelCheckpoint(monitor=\"val_loss\", dirpath=tmpdir, filename=\"{epoch:02d}\")\n\n class ExtendedBoringModel(BoringModel):\n def validation_step(self, batch, batch_idx):\n output = self.layer(batch)\n loss = self.loss(batch, output)\n self.log(\"val_loss\", loss)\n\n model = ExtendedBoringModel()\n model.validation_epoch_end = None\n trainer_kwargs = {\n \"max_epochs\": 1,\n \"limit_train_batches\": 2,\n \"limit_val_batches\": 2,\n \"limit_test_batches\": 2,\n \"enable_progress_bar\": False,\n \"enable_model_summary\": False,\n }\n trainer = Trainer(**trainer_kwargs, callbacks=[checkpoint_callback])\n trainer.fit(model)\n assert os.listdir(tmpdir) == [\"epoch=00.ckpt\"]\n\n for idx in range(4):\n # load from checkpoint\n trainer = pl.Trainer(**trainer_kwargs, default_root_dir=tmpdir)\n trainer.fit(model, ckpt_path=checkpoint_callback.best_model_path)\n trainer.test(ckpt_path=checkpoint_callback.best_model_path, verbose=False)\n assert set(os.listdir(tmpdir)) == {\"epoch=00.ckpt\", \"lightning_logs\"}\n assert set(os.listdir(tmpdir / \"lightning_logs\")) == {f\"version_{i}\" for i in range(4)}\n\n\ndef test_checkpoint_repeated_strategy_extended(tmpdir):\n \"\"\"This test validates checkpoint can be called several times without increasing internally its global step if\n nothing run.\"\"\"\n\n class ExtendedBoringModel(BoringModel):\n def validation_step(self, batch, batch_idx):\n output = self.layer(batch)\n loss = self.loss(batch, output)\n return {\"val_loss\": loss}\n\n def validation_epoch_end(self, *_):\n ...\n\n def assert_trainer_init(trainer):\n assert trainer.global_step == 0\n assert trainer.current_epoch == 0\n\n def get_last_checkpoint(ckpt_dir):\n last = ckpt_dir.listdir(sort=True)[-1]\n return str(last)\n\n def assert_checkpoint_content(ckpt_dir):\n chk = pl_load(get_last_checkpoint(ckpt_dir))\n assert chk[\"epoch\"] == epochs\n assert chk[\"global_step\"] == 4\n\n def assert_checkpoint_log_dir(idx):\n lightning_logs = tmpdir / \"lightning_logs\"\n actual = [d.basename for d in lightning_logs.listdir(sort=True)]\n assert actual == [f\"version_{i}\" for i in range(idx + 1)]\n actual = [d.basename for d in ckpt_dir.listdir()]\n assert len(actual) == epochs, actual\n\n ckpt_dir = tmpdir / \"checkpoints\"\n checkpoint_cb = ModelCheckpoint(dirpath=ckpt_dir, save_top_k=-1)\n epochs = 2\n limit_train_batches = 2\n trainer_config = dict(\n default_root_dir=tmpdir,\n max_epochs=epochs,\n limit_train_batches=limit_train_batches,\n limit_val_batches=3,\n limit_test_batches=4,\n callbacks=[checkpoint_cb],\n )\n trainer = pl.Trainer(**trainer_config)\n assert_trainer_init(trainer)\n\n model = ExtendedBoringModel()\n trainer.fit(model)\n assert trainer.global_step == epochs * limit_train_batches\n assert trainer.current_epoch == epochs - 1\n assert_checkpoint_log_dir(0)\n assert_checkpoint_content(ckpt_dir)\n\n trainer.validate(model)\n assert trainer.current_epoch == epochs - 1\n\n trainer.test(model)\n assert trainer.current_epoch == epochs - 1\n\n for idx in range(1, 5):\n chk = get_last_checkpoint(ckpt_dir)\n assert_checkpoint_content(ckpt_dir)\n\n # load from checkpoint\n trainer_config[\"callbacks\"] = [ModelCheckpoint(dirpath=ckpt_dir, save_top_k=-1)]\n trainer = pl.Trainer(**trainer_config)\n assert_trainer_init(trainer)\n\n model = ExtendedBoringModel()\n\n trainer.test(model)\n assert trainer.global_step == 0\n assert trainer.current_epoch == 0\n\n trainer.fit(model, ckpt_path=chk)\n assert trainer.global_step == epochs * limit_train_batches\n assert trainer.current_epoch == epochs\n\n trainer.validate(model)\n assert trainer.global_step == epochs * limit_train_batches\n assert trainer.current_epoch == epochs\n\n trainer.fit(model)\n assert trainer.global_step == epochs * limit_train_batches\n assert trainer.current_epoch == epochs\n assert_checkpoint_log_dir(idx)\n\n\ndef test_configure_model_checkpoint(tmpdir):\n \"\"\"Test all valid and invalid ways a checkpoint callback can be passed to the Trainer.\"\"\"\n kwargs = dict(default_root_dir=tmpdir)\n callback1 = ModelCheckpoint()\n callback2 = ModelCheckpoint()\n\n # no callbacks\n trainer = Trainer(enable_checkpointing=False, callbacks=[], **kwargs)\n assert not any(isinstance(c, ModelCheckpoint) for c in trainer.callbacks)\n assert trainer.checkpoint_callback is None\n\n # default configuration\n trainer = Trainer(callbacks=[], **kwargs)\n assert sum(1 for c in trainer.callbacks if isinstance(c, ModelCheckpoint)) == 1\n assert isinstance(trainer.checkpoint_callback, ModelCheckpoint)\n\n # custom callback passed to callbacks list, enable_checkpointing=True is ignored\n trainer = Trainer(enable_checkpointing=True, callbacks=[callback1], **kwargs)\n assert [c for c in trainer.callbacks if isinstance(c, ModelCheckpoint)] == [callback1]\n assert trainer.checkpoint_callback == callback1\n\n # multiple checkpoint callbacks\n trainer = Trainer(callbacks=[callback1, callback2], **kwargs)\n assert trainer.checkpoint_callback == callback1\n assert trainer.checkpoint_callbacks == [callback1, callback2]\n\n with pytest.raises(MisconfigurationException, match=\"`enable_checkpointing=False` but found `ModelCheckpoint`\"):\n Trainer(enable_checkpointing=False, callbacks=[callback1], **kwargs)\n\n\ndef test_val_check_interval_checkpoint_files(tmpdir):\n \"\"\"Test correct checkpoint naming when validating/checkpointing multiple times per epoch.\"\"\"\n model = LogInTwoMethods()\n model_checkpoint = ModelCheckpoint(dirpath=tmpdir, save_top_k=-1, monitor=\"val_acc\", mode=\"max\")\n trainer = Trainer(\n default_root_dir=tmpdir,\n val_check_interval=0.2,\n max_epochs=1,\n limit_train_batches=10,\n callbacks=[model_checkpoint],\n logger=False,\n enable_progress_bar=False,\n enable_model_summary=False,\n )\n trainer.fit(model)\n files = {p.basename for p in tmpdir.listdir()}\n assert files == {f\"epoch=0-step={s}.ckpt\" for s in [1, 3, 5, 7, 9]}\n\n\ndef test_current_score(tmpdir):\n \"\"\"Check that the current_score value is correct and was saved.\"\"\"\n\n class TestModel(BoringModel):\n def training_step(self, *args):\n self.log(\"foo\", (self.current_epoch + 1) / 10)\n return super().training_step(*args)\n\n model_checkpoint = ModelCheckpoint(dirpath=tmpdir, save_top_k=3, monitor=\"foo\", mode=\"min\")\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=3,\n limit_train_batches=1,\n limit_val_batches=1,\n callbacks=[model_checkpoint],\n logger=False,\n enable_progress_bar=False,\n enable_model_summary=False,\n )\n trainer.fit(TestModel())\n assert model_checkpoint.current_score == 0.3\n ckpts = [torch.load(str(ckpt)) for ckpt in tmpdir.listdir()]\n ckpts = [\n ckpt[\"callbacks\"][\n \"ModelCheckpoint{'monitor': 'foo', 'mode': 'min', 'every_n_train_steps': 0, 'every_n_epochs': 1,\"\n \" 'train_time_interval': None, 'save_on_train_epoch_end': True}\"\n ]\n for ckpt in ckpts\n ]\n assert sorted(ckpt[\"current_score\"] for ckpt in ckpts) == [0.1, 0.2, 0.3]\n\n\[email protected](\"mode\", [\"min\", \"max\"])\ndef test_current_score_when_nan(tmpdir, mode: str):\n \"\"\"Check that ModelCheckpoint handles NaN values correctly.\"\"\"\n\n class TestModel(BoringModel):\n def training_step(self, *args):\n self.log(\"foo\", float(\"nan\"))\n return super().training_step(*args)\n\n model_checkpoint = ModelCheckpoint(dirpath=tmpdir, save_top_k=1, monitor=\"foo\", mode=mode)\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=1,\n limit_train_batches=1,\n limit_val_batches=1,\n callbacks=[model_checkpoint],\n logger=False,\n enable_progress_bar=False,\n enable_model_summary=False,\n )\n trainer.fit(TestModel())\n expected = float(\"inf\" if mode == \"min\" else \"-inf\")\n assert model_checkpoint.best_model_score == expected\n assert model_checkpoint.current_score == expected\n\n\[email protected](\"use_omegaconf\", [False, pytest.param(True, marks=RunIf(omegaconf=True))])\ndef test_hparams_type(tmpdir, use_omegaconf):\n class TestModel(BoringModel):\n def __init__(self, hparams):\n super().__init__()\n self.save_hyperparameters(hparams)\n\n model_checkpoint = ModelCheckpoint(dirpath=tmpdir, save_top_k=1, monitor=\"foo\")\n trainer = Trainer(\n max_epochs=1,\n default_root_dir=tmpdir,\n limit_train_batches=1,\n limit_val_batches=1,\n callbacks=[model_checkpoint],\n logger=False,\n enable_progress_bar=False,\n enable_model_summary=False,\n )\n hp = {\"test_hp_0\": 1, \"test_hp_1\": 2}\n hp = OmegaConf.create(hp) if use_omegaconf else Namespace(**hp)\n model = TestModel(hp)\n trainer.fit(model)\n ckpt = trainer.checkpoint_connector.dump_checkpoint()\n if use_omegaconf:\n assert isinstance(ckpt[model.CHECKPOINT_HYPER_PARAMS_KEY], Container)\n else:\n # make sure it's not AttributeDict\n assert type(ckpt[model.CHECKPOINT_HYPER_PARAMS_KEY]) is dict\n\n\ndef test_ckpt_version_after_rerun_new_trainer(tmpdir):\n \"\"\"Check that previous checkpoints are renamed to have the correct version suffix when new trainer instances\n are used.\"\"\"\n epochs = 2\n for i in range(epochs):\n mc = ModelCheckpoint(dirpath=tmpdir, save_top_k=-1, monitor=\"epoch\", filename=\"{epoch}\")\n trainer = Trainer(\n max_epochs=epochs,\n limit_train_batches=1,\n limit_val_batches=1,\n default_root_dir=tmpdir,\n callbacks=[mc],\n logger=False,\n enable_progress_bar=False,\n enable_model_summary=False,\n )\n trainer.fit(BoringModel())\n\n # check best_k_models state\n expected = {\"epoch=0-v1.ckpt\", \"epoch=1-v1.ckpt\"} if i else {\"epoch=0.ckpt\", \"epoch=1.ckpt\"}\n assert {Path(f).name for f in mc.best_k_models} == expected\n\n # check created ckpts\n actual = {f.basename for f in tmpdir.listdir()}\n assert actual == {\"epoch=0.ckpt\", \"epoch=1.ckpt\", \"epoch=0-v1.ckpt\", \"epoch=1-v1.ckpt\"}\n\n\ndef test_ckpt_version_after_rerun_same_trainer(tmpdir):\n \"\"\"Check that previous checkpoints are renamed to have the correct version suffix when the same trainer\n instance is used.\"\"\"\n mc = ModelCheckpoint(dirpath=tmpdir, save_top_k=-1, monitor=\"epoch\", filename=\"test\")\n mc.STARTING_VERSION = 9\n trainer = Trainer(\n max_epochs=2,\n limit_train_batches=1,\n limit_val_batches=1,\n default_root_dir=tmpdir,\n callbacks=[mc],\n logger=False,\n enable_progress_bar=False,\n enable_model_summary=False,\n )\n trainer.fit(BoringModel())\n trainer.fit_loop.max_epochs = 4\n trainer.fit(BoringModel())\n\n ckpt_range = range(mc.STARTING_VERSION, trainer.max_epochs + mc.STARTING_VERSION)\n expected = {\"test.ckpt\", *(f\"test-v{i}.ckpt\" for i in ckpt_range)}\n # check best_k_models state\n assert {Path(f).name for f in mc.best_k_models} == expected\n # check created ckpts\n assert set(os.listdir(tmpdir)) == expected\n\n\ndef test_model_checkpoint_mode_options():\n with pytest.raises(MisconfigurationException, match=\"`mode` can be .* but got unknown_option\"):\n ModelCheckpoint(mode=\"unknown_option\")\n\n\ndef test_check_val_every_n_epochs_top_k_integration(tmpdir):\n model = BoringModel()\n mc = ModelCheckpoint(dirpath=tmpdir, monitor=\"epoch\", save_top_k=-1, filename=\"{epoch}\")\n trainer = Trainer(\n default_root_dir=tmpdir,\n limit_train_batches=1,\n limit_val_batches=1,\n num_sanity_val_steps=0,\n max_epochs=5,\n check_val_every_n_epoch=2,\n callbacks=mc,\n enable_model_summary=False,\n logger=False,\n )\n trainer.fit(model)\n assert set(os.listdir(tmpdir)) == {\"epoch=1.ckpt\", \"epoch=3.ckpt\"}\n\n\ndef test_model_checkpoint_saveload_ckpt(tmpdir):\n ckpt = {\n \"monitor\": \"random_value\",\n \"best_model_path\": \"epoch=10-step=1436.ckpt\",\n \"best_model_score\": torch.tensor(2.246),\n \"current_score\": torch.tensor(1.5),\n \"dirpath\": tmpdir,\n \"best_k_models\": {\"epoch=10-step=1436.ckpt\": torch.tensor(2.246)},\n \"kth_best_model_path\": \"epoch=10-step=1436.ckpt\",\n \"kth_value\": torch.tensor(2.246),\n \"last_model_path\": \"last2245.ckpt\",\n }\n\n # test on_save_checkpoint\n cb_write = ModelCheckpoint(dirpath=tmpdir, monitor=\"random_value\", save_top_k=-1, save_last=True)\n for key, val in ckpt.items():\n setattr(cb_write, key, val)\n written_ckpt = cb_write.on_save_checkpoint(\"\", \"\", \"\")\n for state in ckpt:\n assert ckpt[state] == written_ckpt[state]\n\n # test on_load_checkpoint\n # Note: \"current_score\", \"dirpath\" and \"monitor\" are currently not restored by on_load_checkpoint.\n # We therefore set \"dirpath\" and \"monitor\" to something different than for ckpt/cb_write so we can assert them.\n # \"current_score\" is left as initialized, i.e. None, and can therefore also be asserted\n cb_restore = ModelCheckpoint(dirpath=tmpdir + \"restore\", monitor=None, save_top_k=-1, save_last=True)\n cb_restore.on_load_checkpoint(\"\", \"\", written_ckpt)\n for key, val in written_ckpt.items():\n if key not in (\"current_score\", \"dirpath\", \"monitor\"):\n assert getattr(cb_restore, key) == val\n else:\n assert getattr(cb_restore, key) != val\n" ]
[ [ "torch.stack", "torch.optim.lr_scheduler.StepLR", "torch.tensor", "torch.optim.lr_scheduler.ReduceLROnPlateau", "torch.load", "torch.randn" ] ]
JDACS4C-IMPROVE/TGSA
[ "cdd9903b889112b04325bec9f61935d05d9e9179" ]
[ "heterogeneous_graph.py" ]
[ "import os\nos.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\n\nimport torch\nimport numpy as np\nimport pickle\nfrom models.TGDRP import TGDRP\nfrom utils import *\nfrom rdkit import DataStructs,Chem\nfrom rdkit.Chem import AllChem\nfrom scipy.stats import pearsonr\nimport argparse\n\ndir = './data/similarity_augment/'\ndict_dir = './data/similarity_augment/dict/'\nwith open(dict_dir + \"cell_id2idx_dict\", 'rb') as f:\n cell_id2idx_dict = pickle.load(f)\nwith open(dict_dir + \"drug_name_cell_id_ic50\", 'rb') as f:\n drug_name_cell_id_ic50 = pickle.load(f)\nwith open(dict_dir + \"drug_idx_cell_idx_ic50\", 'rb') as f:\n drug_idx_cell_idx_ic50 = pickle.load(f)\nwith open(dict_dir + \"drug_name2smiles_dict\", 'rb') as f:\n drug_name2smiles_dict = pickle.load(f)\nwith open(dict_dir + \"drug_idx2smiles_dict\", 'rb') as f:\n drug_idx2smiles_dict = pickle.load(f)\nwith open(dict_dir + \"drug_name2idx_dict\", 'rb') as f:\n drug_name2idx_dict = pickle.load(f)\nwith open(dict_dir + \"cell_idx2id_dict\", 'rb') as f:\n cell_idx2id_dict = pickle.load(f)\nwith open(dict_dir + \"drug_idx2name_dict\", 'rb') as f:\n drug_idx2name_dict = pickle.load(f)\nwith open(dict_dir + \"cell_feature_normalized\", 'rb') as f:\n cell_feature_normalized = pickle.load(f)\nwith open(dict_dir + \"cell_feature\", 'rb') as f:\n cell_feature = pickle.load(f)\n\ndef arg_parse():\n parser = argparse.ArgumentParser()\n parser.add_argument('--seed', type=int, default=42,\n help='random seed (default: 42)')\n parser.add_argument('--device', type=str, default='cuda:7',\n help='device')\n parser.add_argument('--knn', type=int, default=5,\n help='knn')\n parser.add_argument('--batch_size', type=int, default=128,\n help='batch size (default: 128)')\n parser.add_argument('--lr', type=float, default=0.0001,\n help='learning rate (default: 0.0001)')\n parser.add_argument('--layer_drug', type=int, default=3, help='layer for drug')\n parser.add_argument('--dim_drug', type=int, default=128, help='hidden dim for drug')\n parser.add_argument('--layer', type=int, default=2, help='number of GNN layer')\n parser.add_argument('--hidden_dim', type=int, default=8, help='hidden dim for cell')\n parser.add_argument('--weight_decay', type=float, default=0,\n help='weight decay')\n parser.add_argument('--dropout_ratio', type=float, default=0.2,\n help='dropout ratio')\n parser.add_argument('--epochs', type=int, default=300,\n help='maximum number of epochs (default: 300)')\n parser.add_argument('--patience', type=int, default=10,\n help='patience for earlystopping (default: 10)')\n parser.add_argument('--edge', type=str, default='PPI_0.95', help='edge for gene graph')\n parser.add_argument('--mode', type=str, default='train', help='train or test')\n parser.add_argument('--pretrain', type=int, default=0, help='pretrain')\n parser.add_argument('--weight_path', type=str, default='',\n help='filepath for pretrained weights')\n\n return parser.parse_args(args=[])\n\n\n \ndef computing_sim_matrix():\n if os.path.exists(dict_dir + \"cell_sim_matrix\") and os.path.exists(dict_dir + \"drug_sim_matrix\"):\n with open(dict_dir+ \"cell_sim_matrix\", 'rb') as f:\n cell_sim_matrix = pickle.load(f)\n with open(dict_dir+ \"drug_sim_matrix\", 'rb') as f:\n drug_sim_matrix = pickle.load(f)\n return drug_sim_matrix, cell_sim_matrix\n drug_sim_matrix = np.zeros((len(drug_name2idx_dict), len(drug_name2idx_dict)))\n mi = [Chem.MolFromSmiles(drug_idx2smiles_dict[i]) for i in range(len(drug_name2idx_dict))]\n fps = [AllChem.GetMorganFingerprint(x, 4) for x in mi]\n for i in range(len(drug_name2idx_dict)):\n for j in range(len(drug_name2idx_dict)):\n if i != j:\n drug_sim_matrix[i][j] = DataStructs.DiceSimilarity(fps[i],fps[j])\n\n cell_sim_matrix = np.zeros((len(cell_id2idx_dict), len(cell_id2idx_dict)))\n for i in range(len(cell_id2idx_dict)):\n for j in range(len(cell_id2idx_dict)):\n if i != j:\n cell_sim_matrix[i][j], _ = pearsonr(cell_feature_normalized[i], cell_feature_normalized[j])\n if cell_sim_matrix[i][j] < 0:\n cell_sim_matrix[i][j] = 0\n with open(dict_dir+ \"cell_sim_matrix\", 'wb') as f:\n pickle.dump(cell_sim_matrix, f)\n with open(dict_dir+ \"drug_sim_matrix\", 'wb') as f:\n pickle.dump(drug_sim_matrix, f)\n return drug_sim_matrix, cell_sim_matrix\n\ndef computing_knn(k):\n drug_sim_matrix, cell_sim_matrix = computing_sim_matrix()\n cell_sim_matrix_new = np.zeros_like(cell_sim_matrix)\n for u in range(len(cell_id2idx_dict)):\n v = cell_sim_matrix[u].argsort()[-6:-1]\n cell_sim_matrix_new[u][v] = cell_sim_matrix[u][v]\n drug_sim_matrix_new = np.zeros_like(drug_sim_matrix)\n for u in range(len(drug_name2idx_dict)):\n v = drug_sim_matrix[u].argsort()[-6:-1]\n drug_sim_matrix_new[u][v] = drug_sim_matrix[u][v]\n drug_edges = np.argwhere(drug_sim_matrix_new > 0)\n cell_edges = np.argwhere(cell_sim_matrix_new > 0)\n with open(dir + \"edge/drug_cell_edges_{}_knn\".format(k), 'wb') as f:\n pickle.dump((drug_edges, cell_edges), f)\n\n\nif __name__ == '__main__':\n computing_knn(5)" ]
[ [ "numpy.zeros_like", "scipy.stats.pearsonr", "numpy.argwhere" ] ]
hellloxiaotian/DudeNet
[ "ec46e6a6bd8f3cdefdb1aeb9eb74e3a961bd0266" ]
[ "gray/dataset.py" ]
[ "import os\nimport os.path\nimport numpy as np\nimport random\nimport h5py\nimport torch\nimport cv2\nimport glob\nimport torch.utils.data as udata\nfrom utils import data_augmentation\n\ndef normalize(data):\n return data/255.\n\ndef Im2Patch(img, win, stride=1):\n k = 0\n endc = img.shape[0]\n endw = img.shape[1]\n endh = img.shape[2]\n patch = img[:, 0:endw-win+0+1:stride, 0:endh-win+0+1:stride]\n TotalPatNum = patch.shape[1] * patch.shape[2]\n Y = np.zeros([endc, win*win,TotalPatNum], np.float32)\n for i in range(win):\n for j in range(win):\n patch = img[:,i:endw-win+i+1:stride,j:endh-win+j+1:stride]\n Y[:,k,:] = np.array(patch[:]).reshape(endc, TotalPatNum)\n k = k + 1\n return Y.reshape([endc, win, win, TotalPatNum])\n\ndef prepare_data(data_path, patch_size, stride, aug_times=1):\n # train\n print('process training data')\n scales = [1, 0.9, 0.8, 0.7]\n #files = glob.glob(os.path.join(data_path, 'train', '*'))\n files = glob.glob(os.path.join(data_path, 'train', '*'))\n files.sort()\n h5f = h5py.File('train.h5', 'w')\n train_num = 0\n for i in range(len(files)):\n img = cv2.imread(files[i])\n h, w, c = img.shape\n for k in range(len(scales)):\n Img = cv2.resize(img, (int(h*scales[k]), int(w*scales[k])), interpolation=cv2.INTER_CUBIC)\n Img = np.expand_dims(Img[:,:,0].copy(), 0)\n Img = np.float32(normalize(Img))\n patches = Im2Patch(Img, win=patch_size, stride=stride)\n print(\"file: %s scale %.1f # samples: %d\" % (files[i], scales[k], patches.shape[3]*aug_times))\n for n in range(patches.shape[3]):\n data = patches[:,:,:,n].copy()\n h5f.create_dataset(str(train_num), data=data)\n train_num += 1\n for m in range(aug_times-1):\n data_aug = data_augmentation(data, np.random.randint(1,8))\n h5f.create_dataset(str(train_num)+\"_aug_%d\" % (m+1), data=data_aug)\n train_num += 1\n h5f.close()\n # val\n print('\\nprocess validation data')\n #files.clear()\n files = glob.glob(os.path.join(data_path, 'Set68', '*.png'))\n files.sort()\n h5f = h5py.File('val.h5', 'w')\n val_num = 0\n for i in range(len(files)):\n print(\"file: %s\" % files[i])\n img = cv2.imread(files[i])\n img = np.expand_dims(img[:,:,0], 0)\n img = np.float32(normalize(img))\n h5f.create_dataset(str(val_num), data=img)\n val_num += 1\n h5f.close()\n print('training set, # samples %d\\n' % train_num)\n print('val set, # samples %d\\n' % val_num)\n\nclass Dataset(udata.Dataset):\n def __init__(self, train=True):\n super(Dataset, self).__init__()\n self.train = train\n if self.train:\n h5f = h5py.File('train.h5', 'r')\n else:\n h5f = h5py.File('val.h5', 'r')\n self.keys = list(h5f.keys())\n random.shuffle(self.keys)\n h5f.close()\n def __len__(self):\n return len(self.keys)\n def __getitem__(self, index):\n if self.train:\n h5f = h5py.File('train.h5', 'r')\n else:\n h5f = h5py.File('val.h5', 'r')\n key = self.keys[index]\n data = np.array(h5f[key])\n h5f.close()\n return torch.Tensor(data)\n" ]
[ [ "numpy.array", "numpy.zeros", "numpy.random.randint", "torch.Tensor", "numpy.expand_dims" ] ]
WipadaChan/AzureMachineLearningGallery
[ "43b089074fc7090ab6449c1f29e29522af5ecdde" ]
[ "components/naive_bayes/naive_bayes_score_eval/naive_bayes_score_eval.py" ]
[ "import os\nimport sys\nimport argparse\nimport pandas as pd\nimport numpy as np\nfrom pathlib import Path\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.pipeline import Pipeline\nfrom azureml.studio.core.io.data_frame_directory import load_data_frame_from_directory, save_data_frame_to_directory\nimport pickle\nfrom sklearn import metrics\n#import xgboost as xgb\n\n## Parse args\nparser = argparse.ArgumentParser(\"MultinomialNBEvaluation\")\nparser.add_argument(\"--Evaluation_Data\", type=str, help=\"Evaluation dataset.\")\nparser.add_argument(\"--Lable_Col\", type=str, default='None', help=\"Lable column in the evaluation dataset.\")\nparser.add_argument(\"--Action_Type\", type=str, default='Score And Evaluate', help=\"Select action type\")\nparser.add_argument(\"--Model_Path\", type=str, help=\"Path where contains model file.\")\nparser.add_argument(\"--Model_FileName\", type=str, help=\"Name of the model file.\")\nparser.add_argument(\"--Evaluation_Output\", type=str, help=\"Evaluation result\")\nargs = parser.parse_args()\n\n## Load data from DataFrameDirectory to Pandas DataFrame\nevaluation_df = load_data_frame_from_directory(args.Evaluation_Data).data\n\n## Prepare evaluation data\nevaluation_df_features = evaluation_df[[c for c in evaluation_df.columns if c!=args.Lable_Col]]\nresult = pd.Series() if evaluation_df_features.empty else evaluation_df_features.iloc[:,0]\n\n#vect = CountVectorizer()\n#vect.fit(result)\n#X_eval_dtm = vect.transform(result)\n\n## Load model\nos.makedirs(args.Model_Path, exist_ok=True)\nf = open(args.Model_Path + \"/\" + args.Model_FileName, 'rb')\npipe= pickle.load(f)\n\n\nif args.Action_Type == 'Score And Evaluate':\n\t## Evaluation\n\tevaluation_df_lable = evaluation_df[args.Lable_Col].squeeze()\n\tpreds = pipe.predict(result)\n\tprint(\"Accuracy Metric is \",metrics.accuracy_score(evaluation_df_lable, preds))\n\tprint(\"Confusion Matrix: \\n\",metrics.confusion_matrix(evaluation_df_lable, preds))\n\t## Output evaluation result\n\tevaluation_df_features['Predict Result'] = pd.DataFrame(preds.T)\nelse: \n\tpreds = pipe.predict(result)\n\tevaluation_df_features['Predict Result'] = pd.DataFrame(preds.T)\n\nos.makedirs(args.Evaluation_Output, exist_ok=True)\nsave_data_frame_to_directory(args.Evaluation_Output, evaluation_df_features)\n\n\n\n" ]
[ [ "pandas.DataFrame", "sklearn.metrics.confusion_matrix", "sklearn.metrics.accuracy_score", "pandas.Series" ] ]
waiting-gy/Caltech_Pedestrian
[ "bd57a85a5fd4965616fe52f20a990abe8e28dda8" ]
[ "data/coco.py" ]
[ "from .config import HOME\nimport os\nimport os.path as osp\nimport sys\nimport torch\nimport torch.utils.data as data\nimport torchvision.transforms as transforms\nimport cv2\nimport numpy as np\n\n#COCO_ROOT = osp.join(HOME, 'data/coco/')\n#COCO_ROOT = \"/mnt/Younggao/coco/\"\nCOCO_ROOT = \"/kaggle/input/CaltechPedestrian/coco\"\nIMAGES = 'images'\nANNOTATIONS = 'annotations'\nCOCO_API = 'PythonAPI'\nINSTANCES_SET = 'instances_{}.json'\nCOCO_CLASSES = ('person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',\n 'train', 'truck', 'boat', 'traffic light', 'fire', 'hydrant',\n 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog',\n 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra',\n 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie',\n 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',\n 'kite', 'baseball bat', 'baseball glove', 'skateboard',\n 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup',\n 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',\n 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',\n 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed',\n 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',\n 'keyboard', 'cell phone', 'microwave oven', 'toaster', 'sink',\n 'refrigerator', 'book', 'clock', 'vase', 'scissors',\n 'teddy bear', 'hair drier', 'toothbrush')\n\n\ndef get_label_map(label_file):\n label_map = {}\n #labels = open(label_file, 'r')\n #labels = open(\"/mnt/Younggao/Caltech_DataDetection/data/coco_labels.txt\", 'r')\n labels = open(\"/kaggle/working/Caltech_Pedestrian/data/coco_labels.txt\", 'r')\n for line in labels:\n ids = line.split(',')\n label_map[int(ids[0])] = int(ids[1])\n return label_map\n\n\nclass COCOAnnotationTransform(object):\n \"\"\"Transforms a COCO annotation into a Tensor of bbox coords and label index\n Initilized with a dictionary lookup of classnames to indexes\n \"\"\"\n def __init__(self):\n self.label_map = get_label_map(osp.join(COCO_ROOT, 'coco_labels.txt'))\n\n def __call__(self, target, width, height):\n \"\"\"\n Args:\n target (dict): COCO target json annotation as a python dict\n height (int): height\n width (int): width\n Returns:\n a list containing lists of bounding boxes [bbox coords, class idx]\n \"\"\"\n scale = np.array([width, height, width, height])\n res = []\n for obj in target:\n if 'bbox' in obj:\n bbox = obj['bbox']\n bbox[2] += bbox[0]\n bbox[3] += bbox[1]\n label_idx = self.label_map[obj['category_id']] - 1\n final_box = list(np.array(bbox)/scale)\n final_box.append(label_idx)\n res += [final_box] # [xmin, ymin, xmax, ymax, label_idx]\n else:\n print(\"no bbox problem!\")\n\n return res # [[xmin, ymin, xmax, ymax, label_idx], ... ]\n\n\nclass COCODetection(data.Dataset):\n \"\"\"`MS Coco Detection <http://mscoco.org/dataset/#detections-challenge2016>`_ Dataset.\n Args:\n root (string): Root directory where images are downloaded to.\n set_name (string): Name of the specific set of COCO images.\n transform (callable, optional): A function/transform that augments the\n raw images`\n target_transform (callable, optional): A function/transform that takes\n in the target (bbox) and transforms it.\n \"\"\"\n\n def __init__(self, root, image_set='trainval35k', transform=None,\n target_transform=COCOAnnotationTransform(), dataset_name='MS COCO'):\n sys.path.append(osp.join(root, COCO_API))\n from pycocotools.coco import COCO\n self.root = osp.join(root, IMAGES, image_set)\n self.coco = COCO(osp.join(root, ANNOTATIONS,\n INSTANCES_SET.format(image_set)))\n self.ids = list(self.coco.imgToAnns.keys())\n self.transform = transform\n self.target_transform = target_transform\n self.name = dataset_name\n\n def __getitem__(self, index):\n \"\"\"\n Args:\n index (int): Index\n Returns:\n tuple: Tuple (image, target).\n target is the object returned by ``coco.loadAnns``.\n \"\"\"\n im, gt, h, w = self.pull_item(index)\n return im, gt\n\n def __len__(self):\n return len(self.ids)\n\n def pull_item(self, index):\n \"\"\"\n Args:\n index (int): Index\n Returns:\n tuple: Tuple (image, target, height, width).\n target is the object returned by ``coco.loadAnns``.\n \"\"\"\n img_id = self.ids[index]\n target = self.coco.imgToAnns[img_id]\n ann_ids = self.coco.getAnnIds(imgIds=img_id)\n\n target = self.coco.loadAnns(ann_ids)\n path = osp.join(self.root, self.coco.loadImgs(img_id)[0]['file_name'])\n assert osp.exists(path), 'Image path does not exist: {}'.format(path)\n img = cv2.imread(osp.join(self.root, path))\n height, width, _ = img.shape\n if self.target_transform is not None:\n target = self.target_transform(target, width, height)\n if self.transform is not None:\n target = np.array(target)\n img, boxes, labels = self.transform(img, target[:, :4],\n target[:, 4])\n # to rgb\n img = img[:, :, (2, 1, 0)]\n\n target = np.hstack((boxes, np.expand_dims(labels, axis=1)))\n return torch.from_numpy(img).permute(2, 0, 1), target, height, width\n\n def pull_image(self, index):\n '''Returns the original image object at index in PIL form\n\n Note: not using self.__getitem__(), as any transformations passed in\n could mess up this functionality.\n\n Argument:\n index (int): index of img to show\n Return:\n cv2 img\n '''\n img_id = self.ids[index]\n path = self.coco.loadImgs(img_id)[0]['file_name']\n return cv2.imread(osp.join(self.root, path), cv2.IMREAD_COLOR)\n\n def pull_anno(self, index):\n '''Returns the original annotation of image at index\n\n Note: not using self.__getitem__(), as any transformations passed in\n could mess up this functionality.\n\n Argument:\n index (int): index of img to get annotation of\n Return:\n list: [img_id, [(label, bbox coords),...]]\n eg: ('001718', [('dog', (96, 13, 438, 332))])\n '''\n img_id = self.ids[index]\n ann_ids = self.coco.getAnnIds(imgIds=img_id)\n return self.coco.loadAnns(ann_ids)\n\n def __repr__(self):\n fmt_str = 'Dataset ' + self.__class__.__name__ + '\\n'\n fmt_str += ' Number of datapoints: {}\\n'.format(self.__len__())\n fmt_str += ' Root Location: {}\\n'.format(self.root)\n tmp = ' Transforms (if any): '\n fmt_str += '{0}{1}\\n'.format(tmp, self.transform.__repr__().replace('\\n', '\\n' + ' ' * len(tmp)))\n tmp = ' Target Transforms (if any): '\n fmt_str += '{0}{1}'.format(tmp, self.target_transform.__repr__().replace('\\n', '\\n' + ' ' * len(tmp)))\n return fmt_str\n" ]
[ [ "numpy.array", "numpy.expand_dims", "torch.from_numpy" ] ]
tejasurya/Text_Classification_using_Neural_Networks
[ "d4852780e6c86843aee768d306d19428c8cb9c7f" ]
[ "Simple Text classifiers/Text Classification on Brown Corpus/B_classifier-Seq1.py" ]
[ "from numpy import asarray\nfrom numpy import zeros\nimport pandas as pd\nimport os\nfrom keras.datasets import reuters\n\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras import backend as K\nfrom keras.utils.generic_utils import get_custom_objects\nfrom keras.models import Sequential,Model\nfrom keras.layers import Dense\nfrom keras.layers import Flatten,Input\nfrom keras.layers import Dropout\nfrom keras.layers import GRU,CuDNNGRU,Reshape,maximum\nfrom keras.layers import Bidirectional,Concatenate\nfrom keras.layers import Conv1D\nfrom keras.layers.convolutional import Conv2D\nfrom keras.layers import MaxPooling1D\nfrom keras.layers import MaxPool2D\nfrom keras.layers import Embedding\nfrom keras.layers.merge import concatenate\nfrom collections import defaultdict\nfrom nltk.corpus import brown,stopwords\nimport random\nimport nltk\nimport numpy as np\nfrom sklearn.datasets import fetch_20newsgroups\n\n#Custom Activation function\nfrom keras import backend as K\nfrom keras.utils.generic_utils import get_custom_objects\nimport math as m\nfrom keras.callbacks import Callback\nfrom sklearn.metrics import confusion_matrix, f1_score, precision_score, recall_score\nclass Metrics(Callback):\n def on_train_begin(self, logs={}):\n self.val_f1s = []\n self.val_recalls = []\n self.val_precisions = []\n \n def on_epoch_end(self, epoch, logs={}):\n val_predict = (np.asarray(self.model.predict(self.validation_data[0]))).round()\n val_targ = self.validation_data[1]\n _val_f1 = f1_score(val_targ, val_predict, average='micro')\n _val_recall = recall_score(val_targ, val_predict, average='micro')\n _val_precision = precision_score(val_targ, val_predict, average='micro')\n self.val_f1s.append(_val_f1)\n self.val_recalls.append(_val_recall)\n self.val_precisions.append(_val_precision)\n print (\" — val_f1: %f — val_precision: %f — val_recall %f\" %(_val_f1, _val_precision, _val_recall))\n return\n \nmetriczs = Metrics()\n\nbatch_size=32\nembedding_size=128\nnclass=15\n\n# Convolution\nkernel_size = 5\nfilters1 = 64\nfilters2 =128\nfilters3=256\nfilters4=512\nfilters5=1024\npool_size = 4\n\n# GRU\ngru_output_size = 70\n#LSTM\nlstm_output_size = 70\n\ntrim_len=200\nsample_cnt=500\n\ndataset = [] # 500 samples\n\nfor category in brown.categories():\n for fileid in brown.fileids(category):\n dataset.append((brown.words(fileids = fileid),category))\n\ndataset = [([w.lower() for w in text],category) for text,category in dataset]\n\nlabels=[]\nfor sample in dataset:\n labels.append(sample[1])\n\ninputset=[]\nfor sample in dataset:\n inputset.append(' '.join(sample[0]))\nip=inputset\ncateg=brown.categories()\n\nlabel_class=[]\nfor x in labels:\n label_class.append(categ.index(x))\n\nlen_finder=[]\nfor dat in inputset:\n len_finder.append(len(dat))\n\n\n#Splitting train and test\n \ninput_train=[]\ninput_test=[]\ninput_valid=[]\nj=0;\nfor zz in ip:\n j=j+1\n if (j%5 is 0):\n input_test.append(zz)\n elif(j%5 is 1):\n input_valid.append(zz)\n else:\n input_train.append(zz)\n\n \nlabel_train=[]\nlabel_test=[]\nlabel_valid=[]\nj=0;\nfor zz in label_class:\n j=j+1\n if (j%5 is 0):\n label_test.append(zz)\n elif(j%5 is 1):\n label_valid.append(zz)\n else:\n label_train.append(zz)\n \n \n#one hot encoding\n\ni=0\ny=np.zeros((len(label_class),max(label_class)+1))\nfor x in label_class:\n y[i][x]=1\n i=i+1\n\n\ni=0\ny_train=np.zeros((len(label_train),max(label_train)+1))\nfor x in label_train:\n y_train[i][x]=1\n i=i+1\n\ni=0\ny_test=np.zeros((len(label_test),max(label_test)+1))\nfor x in label_test:\n y_test[i][x]=1\n i=i+1\n\ni=0\ny_valid=np.zeros((len(label_valid),max(label_valid)+1))\nfor x in label_valid:\n y_valid[i][x]=1\n i=i+1\n\nt = Tokenizer()\nt.fit_on_texts(input_train)\nvocab_size = len(t.word_index) + 1\n# integer encode the documents\nencoded_docs = t.texts_to_sequences(input_train)\n#print(encoded_docs)\n# pad documents to a max length of 4 words\nmax_length = max(len_finder)\npadded_docs = pad_sequences(encoded_docs, maxlen=max_length, padding='post')\n#print(padded_docs)\n# load the whole embedding into memory\nembeddings_index = dict()\nf = open(\"G:\\\\NLP\\\\Dataset\\\\GloVe\\\\glove.6B.100d.txt\", encoding=\"utf8\")\nfor line in f:\n values = line.split()\n word = values[0]\n coefs = asarray(values[1:], dtype='float32')\n embeddings_index[word] = coefs\nf.close()\n#print('Loaded %s word vectors.' % len(embeddings_index))\n# create a weight matrix for words in training docs\nembedding_matrix = zeros((vocab_size, 100))\nfor word, i in t.word_index.items():\n embedding_vector = embeddings_index.get(word)\n if embedding_vector is not None:\n embedding_matrix[i] = embedding_vector\n\n\n#Validating the model\nvt = Tokenizer()\nvt.fit_on_texts(input_valid)\nvvocab_size = len(vt.word_index) + 1\n# integer encode the documents\nvencoded_docs = vt.texts_to_sequences(input_valid)\n#print(encoded_docs)\n# pad documents to a max length of 4 words\nvpadded_docs = pad_sequences(vencoded_docs, maxlen=max_length, padding='post')\n#print(padded_docs)\n\n\n\n#Testing the model\ntt = Tokenizer()\ntt.fit_on_texts(input_test)\ntvocab_size = len(tt.word_index) + 1\n# integer encode the documents\ntencoded_docs = tt.texts_to_sequences(input_test)\n#print(encoded_docs)\n# pad documents to a max length of 4 words\ntpadded_docs = pad_sequences(tencoded_docs, maxlen=max_length, padding='post')\n#print(padded_docs)\n\n\n# define model \nmodel = Sequential()\ne = Embedding(vocab_size, 100, weights=[embedding_matrix], input_length=max_length, trainable=False)\nmodel.add(e)\nmodel.add(Conv1D(64,kernel_size,padding='valid',activation='relu',strides=1))\nmodel.add(MaxPooling1D(pool_size=pool_size))\nmodel.add(Conv1D(512,kernel_size,padding='valid',activation='relu',strides=1))\nmodel.add(MaxPooling1D(pool_size=pool_size))\nmodel.add(Bidirectional(GRU(gru_output_size, return_sequences=True, dropout=0.2, recurrent_dropout=0.2)))\nmodel.add(Bidirectional(LSTM(gru_output_size)))\nmodel.add(Dense(nclass, activation='softmax'))\n\nmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['top_k_categorical_accuracy'])\nprint(model.summary())\nmodel.fit(padded_docs,y_train, nb_epoch=10, batch_size=64, validation_data=(vpadded_docs, y_valid), callbacks=[metriczs])\nprint('Model built successfully...Please wait.....Evaluating......')\n# Final evaluation of the model\nscores = model.evaluate(tpadded_docs, y_test)\nprint(\"Loss: %.2f%%\" % (scores[0]*100))\nprint(\"Accuracy: %.2f%%\" % (scores[1]*100))\n" ]
[ [ "numpy.asarray", "numpy.zeros", "sklearn.metrics.precision_score", "sklearn.metrics.f1_score", "sklearn.metrics.recall_score" ] ]
thieu1995/mealpy
[ "7694c18e1514909f6727163a3e0899dd36822867" ]
[ "mealpy/math_based/SCA.py" ]
[ "#!/usr/bin/env python\n# ------------------------------------------------------------------------------------------------------%\n# Created by \"Thieu Nguyen\" at 17:44, 18/03/2020 %\n# %\n# Email: [email protected] %\n# Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 %\n# Github: https://github.com/thieu1995 %\n#-------------------------------------------------------------------------------------------------------%\n\nimport concurrent.futures as parallel\nfrom functools import partial\nimport numpy as np\nfrom mealpy.optimizer import Optimizer\n\n\nclass BaseSCA(Optimizer):\n \"\"\"\n The original version of: Sine Cosine Algorithm (SCA)\n A Sine Cosine Algorithm for solving optimization problems\n Link:\n https://doi.org/10.1016/j.knosys.2015.12.022\n https://www.mathworks.com/matlabcentral/fileexchange/54948-sca-a-sine-cosine-algorithm\n Notes:\n + I changed the flow as well as the equations.\n + Removed third loop for faster computational time\n + Batch size ideas\n \"\"\"\n\n def __init__(self, problem, epoch=10000, pop_size=100, **kwargs):\n \"\"\"\n Args:\n epoch (int): maximum number of iterations, default = 10000\n pop_size (int): number of population size, default = 100\n \"\"\"\n super().__init__(problem, kwargs)\n self.nfe_per_epoch = pop_size\n self.sort_flag = True\n\n self.epoch = epoch\n self.pop_size = pop_size\n\n def create_child(self, idx, pop, g_best, epoch):\n # Eq 3.4, r1 decreases linearly from a to 0\n a = 2.0\n r1 = a - (epoch + 1) * (a / self.epoch)\n # Update r2, r3, and r4 for Eq. (3.3), remove third loop here\n r2 = 2 * np.pi * np.random.uniform(0, 1, self.problem.n_dims)\n r3 = 2 * np.random.uniform(0, 1, self.problem.n_dims)\n # Eq. 3.3, 3.1 and 3.2\n pos_new1 = pop[idx][self.ID_POS] + r1 * np.sin(r2) * abs(r3 * g_best[self.ID_POS] - pop[idx][self.ID_POS])\n pos_new2 = pop[idx][self.ID_POS] + r1 * np.cos(r2) * abs(r3 * g_best[self.ID_POS] - pop[idx][self.ID_POS])\n pos_new = np.where(np.random.uniform(0, 1, self.problem.n_dims) < 0.5, pos_new1, pos_new2)\n # Check the bound\n pos_new = self.amend_position_random(pos_new)\n fit_new = self.get_fitness_position(pos_new)\n if self.compare_agent([pos_new, fit_new], pop[idx]):\n return [pos_new, fit_new]\n return pop[idx].copy()\n\n # ## Update the global best\n # if self.batch_idea:\n # if (i + 1) % self.batch_size == 0:\n # g_best = self.update_global_best_solution(pop, self.ID_MIN_PROB, g_best)\n # else:\n # if (i + 1) * self.pop_size == 0:\n # g_best = self.update_global_best_solution(pop, self.ID_MIN_PROB, g_best)\n #\n\n def evolve(self, mode='sequential', epoch=None, pop=None, g_best=None):\n \"\"\"\n Args:\n mode (str): 'sequential', 'thread', 'process'\n + 'sequential': recommended for simple and small task (< 10 seconds for calculating objective)\n + 'thread': recommended for IO bound task, or small computing task (< 2 minutes for calculating objective)\n + 'process': recommended for hard and big task (> 2 minutes for calculating objective)\n\n Returns:\n [position, fitness value]\n \"\"\"\n pop_copy = pop.copy()\n pop_idx = np.array(range(0, self.pop_size))\n\n if mode == \"thread\":\n with parallel.ThreadPoolExecutor() as executor:\n pop_child = executor.map(partial(self.create_child, pop=pop_copy, g_best=g_best, epoch=epoch), pop_idx)\n pop_new = [x for x in pop_child]\n elif mode == \"process\":\n with parallel.ProcessPoolExecutor() as executor:\n pop_child = executor.map(partial(self.create_child, pop=pop_copy, g_best=g_best, epoch=epoch), pop_idx)\n pop_new = [x for x in pop_child]\n else:\n pop_new = [self.create_child(idx, pop_copy, g_best, epoch) for idx in pop_idx]\n return pop_new\n\n\nclass OriginalSCA(BaseSCA):\n \"\"\"\n Original version of: Sine Cosine Algorithm (SCA)\n A Sine Cosine Algorithm for solving optimization problems\n Link:\n https://doi.org/10.1016/j.knosys.2015.12.022\n https://www.mathworks.com/matlabcentral/fileexchange/54948-sca-a-sine-cosine-algorithm\n \"\"\"\n\n def __init__(self, problem, epoch=10000, pop_size=100, **kwargs):\n \"\"\"\n Args:\n epoch (int): maximum number of iterations, default = 10000\n pop_size (int): number of population size, default = 100\n \"\"\"\n super().__init__(problem, epoch, pop_size, **kwargs)\n self.nfe_per_epoch = pop_size\n self.sort_flag = False\n\n def create_child(self, idx, pop, g_best, epoch):\n # Eq 3.4, r1 decreases linearly from a to 0\n a = 2.0\n r1 = a - (epoch + 1) * (a / self.epoch)\n pos_new = pop[idx][self.ID_POS].copy()\n for j in range(self.problem.n_dims): # j-th dimension\n # Update r2, r3, and r4 for Eq. (3.3)\n r2 = 2 * np.pi * np.random.uniform()\n r3 = 2 * np.random.uniform()\n r4 = np.random.uniform()\n # Eq. 3.3, 3.1 and 3.2\n if r4 < 0.5:\n pos_new[j] = pos_new[j] + r1 * np.sin(r2) * abs(r3 * g_best[self.ID_POS][j] - pos_new[j])\n else:\n pos_new[j] = pos_new[j] + r1 * np.cos(r2) * abs(r3 * g_best[self.ID_POS][j] - pos_new[j])\n # Check the bound\n pos_new = self.amend_position_random(pos_new)\n fit_new = self.get_fitness_position(pos_new)\n return [pos_new, fit_new]\n\n#\n# class FasterSCA(BaseSCA):\n# \"\"\"\n# A Sine Cosine Algorithm for solving optimization problems (SCA)\n# Link:\n# https://doi.org/10.1016/j.knosys.2015.12.022\n# https://www.mathworks.com/matlabcentral/fileexchange/54948-sca-a-sine-cosine-algorithm\n#\n# This is my version of SCA. The original version of SCA is not working. So I changed the flow of algorithm\n# \"\"\"\n# def __init__(self, problem, epoch=10000, pop_size=100, **kwargs):\n# \"\"\"\n# Args:\n# epoch (int): maximum number of iterations, default = 10000\n# pop_size (int): number of population size, default = 100\n# \"\"\"\n# super().__init__(problem, epoch, pop_size, **kwargs)\n# self.nfe_per_epoch = pop_size\n# self.sort_flag = False\n#\n# def create_solution(self, minmax=0):\n# position = uniform(self.lb, self.ub)\n# fitness = self.get_fitness_position(position=position, minmax=minmax)\n# return {0: position, 1: fitness}\n#\n# def train(self):\n# pop = {i: self.create_solution() for i in range(self.pop_size)}\n# pop_sorted = {k: v for k, v in sorted(pop.items(), key=lambda encoded: encoded[1][self.ID_FIT])}\n# g_best = next(iter(pop_sorted.values()))\n#\n# for epoch in range(self.epoch):\n# # Update the position of solutions with respect to destination\n# for i, (idx, item) in enumerate(pop.items()): # i-th position\n# # Eq 3.4, r1 decreases linearly from a to 0\n# a = 2.0\n# r1 = a - (epoch + 1) * (a / self.epoch)\n# # Update r2, r3, and r4 for Eq. (3.3), remove third loop here\n# r2 = 2 * pi * uniform(0, 1, self.problem_size)\n# r3 = 2 * uniform(0, 1, self.problem_size)\n# # Eq. 3.3, 3.1 and 3.2\n# pos_new1 = pop[i][self.ID_POS] + r1 * sin(r2) * abs(r3 * g_best[self.ID_POS] - pop[i][self.ID_POS])\n# pos_new2 = pop[i][self.ID_POS] + r1 * cos(r2) * abs(r3 * g_best[self.ID_POS] - pop[i][self.ID_POS])\n# pos_new = where(uniform(0, 1, self.problem_size) < 0.5, pos_new1, pos_new2)\n# # Check the bound\n# pos_new = self.amend_position_random(pos_new)\n# fit = self.get_fitness_position(pos_new)\n# if fit < item[self.ID_FIT]: # My improved part\n# pop[idx] = {0: pos_new, 1: fit}\n#\n# ## Update the global best\n# if self.batch_idea:\n# if (i + 1) % self.batch_size == 0:\n# pop_sorted = {k: v for k, v in sorted(pop.items(), key=lambda encoded: encoded[1][self.ID_FIT])}\n# current_best = next(iter(pop_sorted.values()))\n# if current_best[self.ID_FIT] < g_best[self.ID_FIT]:\n# g_best = deepcopy(current_best)\n# else:\n# if (i + 1) * self.pop_size == 0:\n# pop_sorted = {k: v for k, v in sorted(pop.items(), key=lambda encoded: encoded[1][self.ID_FIT])}\n# current_best = next(iter(pop_sorted.values()))\n# if current_best[self.ID_FIT] < g_best[self.ID_FIT]:\n# g_best = deepcopy(current_best)\n#\n# self.loss_train.append(g_best[self.ID_FIT])\n# if self.verbose:\n# print(\"> Epoch: {}, Best fit: {}\".format(epoch + 1, g_best[self.ID_FIT]))\n# self.solution = g_best\n# return g_best[self.ID_POS], g_best[self.ID_FIT], self.loss_train\n#\n#\n# class FastestSCA(Root):\n# \"\"\"\n# A Sine Cosine Algorithm for solving optimization problems (SCA)\n# Link:\n# https://doi.org/10.1016/j.knosys.2015.12.022\n# https://www.mathworks.com/matlabcentral/fileexchange/54948-sca-a-sine-cosine-algorithm\n#\n# This is my version of SCA. The original version of SCA is not working. So I changed the flow of algorithm\n# \"\"\"\n#\n# def __init__(self, obj_func=None, lb=None, ub=None, verbose=True, epoch=750, pop_size=100, **kwargs):\n# super().__init__(obj_func, lb, ub, verbose, kwargs)\n# self.epoch = epoch\n# self.pop_size = pop_size\n#\n# def create_solution(self, minmax=0):\n# position = uniform(self.lb, self.ub)\n# fitness = self.get_fitness_position(position=position, minmax=minmax)\n# return {0: position, 1: fitness}\n#\n# def train(self):\n# pop = [self.create_solution() for i in range(self.pop_size)]\n# g_best = self.get_global_best_solution(pop, self.ID_FIT, self.ID_MIN_PROB)\n#\n# for epoch in range(self.epoch):\n# # Update the position of solutions with respect to destination\n# for i in range(self.pop_size): # i-th position\n# # Eq 3.4, r1 decreases linearly from a to 0\n# a = 2.0\n# r1 = a - (epoch + 1) * (a / self.epoch)\n# # Update r2, r3, and r4 for Eq. (3.3), remove third loop here\n# r2 = 2 * pi * uniform(0, 1, self.problem_size)\n# r3 = 2 * uniform(0, 1, self.problem_size)\n# # Eq. 3.3, 3.1 and 3.2\n# pos_new1 = pop[i][self.ID_POS] + r1 * sin(r2) * abs(r3 * g_best[self.ID_POS] - pop[i][self.ID_POS])\n# pos_new2 = pop[i][self.ID_POS] + r1 * cos(r2) * abs(r3 * g_best[self.ID_POS] - pop[i][self.ID_POS])\n# pos_new = where(uniform(0, 1, self.problem_size) < 0.5, pos_new1, pos_new2)\n# # Check the bound\n# pos_new = self.amend_position_random(pos_new)\n# fit = self.get_fitness_position(pos_new)\n# if fit < pop[i][self.ID_FIT]: # My improved part\n# pop[i] = {0: pos_new, 1: fit}\n#\n# ## Update the global best\n# if self.batch_idea:\n# if (i + 1) % self.batch_size == 0:\n# g_best = self.update_global_best_solution(pop, self.ID_MIN_PROB, g_best)\n# else:\n# if (i + 1) * self.pop_size == 0:\n# g_best = self.update_global_best_solution(pop, self.ID_MIN_PROB, g_best)\n#\n# self.loss_train.append(g_best[self.ID_FIT])\n# if self.verbose:\n# print(\"> Epoch: {}, Best fit: {}\".format(epoch + 1, g_best[self.ID_FIT]))\n# self.solution = g_best\n# return g_best[self.ID_POS], g_best[self.ID_FIT], self.loss_train\n#\n" ]
[ [ "numpy.sin", "numpy.random.uniform", "numpy.cos" ] ]
liyang990803/CSCI-103
[ "6f84fbc242be90f7a9c3a58bdcc6f54352e4ae5a" ]
[ "CS231n - Convolutional Neural Networks for Visual Recognition/Assignments/assignment1/cs231n/classifiers/linear_svm.py" ]
[ "import numpy as np\nfrom random import shuffle\n\ndef svm_loss_naive(W, X, y, reg):\n \"\"\"\n Structured SVM loss function, naive implementation (with loops).\n\n Inputs have dimension D, there are C classes, and we operate on minibatches\n of N examples.\n\n Inputs:\n - W: A numpy array of shape (D, C) containing weights.\n - X: A numpy array of shape (N, D) containing a minibatch of data.\n - y: A numpy array of shape (N,) containing training labels; y[i] = c means\n that X[i] has label c, where 0 <= c < C.\n - reg: (float) regularization strength\n\n Returns a tuple of:\n - loss as single float\n - gradient with respect to weights W; an array of same shape as W\n \"\"\"\n dW = np.zeros(W.shape) # initialize the gradient as zero\n\n # compute the loss and the gradient\n num_classes = W.shape[1]\n num_train = X.shape[0]\n num_pixels = X.shape[1]\n loss = 0.0\n for i in range(num_train):\n # compute the score for ith training example \n # by multiplying each pixel with its corresponding weight \n # so that scores: A numpy array of shape (C, ) \n scores = X[i].dot(W)\n # as y[i] is the correct label, scores[y[i]] is the correct class's score\n correct_class_score = scores[y[i]]\n for j in range(num_classes):\n if j == y[i]:\n continue\n # if j is not the correct class, we compute the margin according to formula\n margin = scores[j] - correct_class_score + 1 # note delta = 1\n # we add loss += max(0, margin)\n if margin > 0:\n loss += margin\n # reference from: https://cs231n.github.io/optimization-1/#analytic\n \n # sum over all training examples \n dW[:,y[i]] -= X[i,:].T\n dW[:,j] += X[i,:].T\n \n # Right now the loss is a sum over all training examples, but we want it\n # to be an average instead so we divide by num_train.\n loss /= num_train\n\n # Add regularization to the loss.\n loss += reg * np.sum(W * W)\n\n # divide the gradient of weights by the number of training examples \n dW /= num_train\n\n # add the regularization term to the gradient of weights \n dW += reg*W\n\n #############################################################################\n # TODO: #\n # Compute the gradient of the loss function and store it dW. #\n # Rather that first computing the loss and then computing the derivative, #\n # it may be simpler to compute the derivative at the same time that the #\n # loss is being computed. As a result you may need to modify some of the #\n # code above to compute the gradient. #\n #############################################################################\n\n\n return loss, dW\n\n\ndef svm_loss_vectorized(W, X, y, reg):\n \"\"\"\n Structured SVM loss function, vectorized implementation.\n\n Inputs and outputs are the same as svm_loss_naive.\n \"\"\"\n loss = 0.0\n dW = np.zeros(W.shape) # initialize the gradient as zero\n #############################################################################\n # TODO: #\n # Implement a vectorized version of the structured SVM loss, storing the #\n # result in loss. #\n #############################################################################\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n num_train = X.shape[0]\n # scores: a numpy array of shape (N, C) containing scores for all training examples \n scores = X.dot(W) \n \n # correct_class_score: a numpy array of shape (N, ) mapping from ith training example to ith correct training label's score \n # np.arange(num_train): array([0, 1, 2, ..., num_train])\n # y: a numpy array of shape (N, ) containing correct training labels \n correct_class_score = scores[np.arange(num_train), y].reshape(-1, 1)\n\n # calculate the margins' score according to formula \n margins = np.maximum(0, scores - correct_class_score + 1)\n \n # set correct training labels' margins to 0\n margins[np.arange(num_train), y] = 0 \n \n # compute the average so we divide by num_train \n loss /= num_train\n\n # add regularization term to the loss\n loss += reg * np.sum(W * W)\n\n #############################################################################\n # TODO: #\n # Implement a vectorized version of the gradient for the structured SVM #\n # loss, storing the result in dW. #\n # #\n # Hint: Instead of computing the gradient from scratch, it may be easier #\n # to reuse some of the intermediate values that you used to compute the #\n # loss. #\n #############################################################################\n \n # \n margins[margins > 0] = 1\n \n margins[np.arange(num_train), y] = -np.sum(margins, axis=1)\n\n dW = X.T.dot(margins)\n \n # divide the number of training examples \n dW /= num_train\n \n # add regularization term to the gradient\n dW += reg*W\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW\n" ]
[ [ "numpy.sum", "numpy.arange", "numpy.zeros", "numpy.maximum" ] ]
lbechberger/LearningPsychologicalSpaces
[ "24fbf266f70874df394e08dfa9c53abecca19c00" ]
[ "code/mds/similarity_spaces/create_baseline_spaces.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreate random baseline spaces for later comparison with MDS results.\n\nCreated on Mon Sep 7 14:20:40 2020\n\n@author: lbechberger\n\"\"\"\n\nimport pickle, argparse\nimport numpy as np\nfrom itertools import zip_longest\nfrom code.util import normalize_vectors\n\nparser = argparse.ArgumentParser(description='Creating random baseline spaces')\nparser.add_argument('input_pickle_file', help = 'path to the input pickle file containing the preprocessed individual ratings')\nparser.add_argument('output_pickle_file', help = 'path to the output pickle file')\nparser.add_argument('n_spaces', type = int, help = 'number of example spaces to generate')\nparser.add_argument('max_dims', type = int, help = 'maximum number of dimensions to consider')\nparser.add_argument('-n', '--normal', action = 'store_true', help = 'use normal distribution')\nparser.add_argument('-u', '--uniform', action = 'store_true', help = 'use uniform distribution')\nparser.add_argument('-m', '--shuffled', nargs = '*', help = 'list of pickle files for obtaining shuffled coordinates of MDS spaces')\nparser.add_argument('-s', '--seed', type = int, help = 'seed for random number generator', default = None)\nargs = parser.parse_args()\n\nb_shuffle = False if args.shuffled is None else len(args.shuffled) > 0\n\nif sum([args.normal, args.uniform, b_shuffle]) == 0:\n raise Exception(\"At least one distribution type must be selected!\")\n\n# grab list of items\nwith open(args.input_pickle_file, 'rb') as f_in:\n input_data = pickle.load(f_in)\n items = sorted(input_data['items'].keys())\n\noutput = {}\n\n# first: normally distributed points\nif args.normal:\n\n # set seed for reproducibility\n if args.seed is not None:\n np.random.seed(args.seed)\n\n space_map = {}\n for n_dim in range(1, args.max_dims + 1):\n space_list = []\n for n_space in range(args.n_spaces):\n space = {}\n coordinates = np.random.normal(size=(len(items), n_dim, 1))\n coordinates = normalize_vectors(coordinates)\n\n for idx, item in enumerate(items):\n space[item] = coordinates[idx]\n space_list.append(space)\n \n space_map[n_dim] = space_list\n\n output['normal'] = space_map\n\n# second: uniformly distributed points\nif args.uniform:\n\n # set seed for reproducibility\n if args.seed is not None:\n np.random.seed(args.seed)\n\n space_map = {}\n for n_dim in range(1, args.max_dims + 1):\n space_list = []\n for n_space in range(args.n_spaces):\n space = {}\n coordinates = np.random.rand(len(items), n_dim, 1)\n coordinates = normalize_vectors(coordinates)\n\n for idx, item in enumerate(items):\n space[item] = coordinates[idx]\n space_list.append(space)\n \n space_map[n_dim] = space_list\n\n output['uniform'] = space_map\n\n\n# thirdly: shuffled versions of actual MDS vectors\nif b_shuffle:\n \n def grouper(n, iterable, fillvalue=None):\n \"grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx\"\n args = [iter(iterable)] * n\n return zip_longest(fillvalue=fillvalue, *args) \n\n for name, file_name in grouper(2, args.shuffled):\n\n # set seed for reproducibility\n if args.seed is not None:\n np.random.seed(args.seed)\n \n with open(file_name, 'rb') as f_in:\n mds_input = pickle.load(f_in)\n \n space_map = {}\n for n_dim in range(1, args.max_dims + 1):\n space_list = []\n original_coordinates = np.array([mds_input[n_dim][item] for item in items])\n for n_space in range(args.n_spaces):\n space = {}\n coordinates = np.copy(original_coordinates)\n np.random.shuffle(coordinates)\n \n for idx, item in enumerate(items):\n space[item] = coordinates[idx]\n space_list.append(space)\n \n space_map[n_dim] = space_list\n \n output['shuffled_{0}'.format(name)] = space_map\n \n# dump the result in a pickle file\nwith open(args.output_pickle_file, 'wb') as f_out:\n pickle.dump(output, f_out)" ]
[ [ "numpy.random.seed", "numpy.array", "numpy.copy", "numpy.random.shuffle" ] ]
jgericardo/pytorch-playground
[ "49609ec10f024f4d871a531b53968dc2b3167f93" ]
[ "scripts/train_ann.py" ]
[ "# PyTorch Playground\n# Copyright (C) 2021 Justin Gerard Ricardo\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as published\n# by the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see <https://www.gnu.org/licenses/>.\n\"\"\"Script for training a 1-hidden layer ANN model\"\"\"\nimport time\nimport argparse\n\nimport torch\n\nfrom ptpg.utils import (\n load_dataset_from_file,\n set_global_seed,\n torch_data_loader,\n torch_acc_score,\n send_message\n)\nfrom ptpg.models import ANN\n\n\ndef arg_parse() -> None:\n \"\"\"\n Parses arguments passed from a terminal execution.\n \"\"\"\n parser = argparse.ArgumentParser()\n args = {\n \"--dataset\": {\n \"type\": str,\n \"help\": \"The dataset you want to train the WISHNET on.\",\n \"default\": \"linear\"\n },\n \"--hidden_nodes\": {\n \"type\": int,\n \"help\": \"The number of nodes in the hidden layer.\",\n \"default\": 10\n },\n \"--learning_rate\": {\n \"type\": float,\n \"help\": \"The learning rate (alpha).\",\n \"default\": 0.01\n },\n \"--epochs\": {\n \"type\": int,\n \"help\": \"The number of epochs to train the WISHNET.\",\n \"default\": 1\n },\n \"--script_dir\": {\n \"type\": str,\n \"help\": \"The script's directory.\",\n \"default\": \"\"\n }\n }\n\n for key in args.keys():\n parser.add_argument(\n key,\n type=args[key][\"type\"],\n help=args[key][\"help\"],\n default=args[key][\"default\"]\n )\n\n parsed_args = parser.parse_args()\n\n return parsed_args\n\n\ndef main(args):\n set_global_seed(1024)\n\n abs_prefix = args.script_dir.replace('\\\\', '/')+'/scripts/'\n dataset_paths = {\n \"linear\": abs_prefix+\"data/synthetic/linear_dataset-500.csv\",\n \"square_root\": abs_prefix+\"data/synthetic/square_root_dataset-500.csv\",\n \"hyperbola\": abs_prefix+\"data/synthetic/hyperbola_dataset-500.csv\",\n \"circle\": abs_prefix+\"data/synthetic/circle_dataset-500.csv\",\n \"checkered_2x2\": abs_prefix+\"data/synthetic/checkered_2x2_dataset-500.csv\",\n \"checkered_4x4\": abs_prefix+\"data/synthetic/checkered_4x4_dataset-500.csv\"\n }\n\n X_train, y_train, X_test, y_test = load_dataset_from_file(\n dataset_path=dataset_paths[args.dataset],\n random_seed=1024,\n feature_range=None, lbl_encoding=(-1, 1)\n )\n\n train_data_loader = torch_data_loader(\n features=X_train, labels=y_train,\n batch_size=1, shuffle=False, num_workers=0\n )\n\n model = ANN(\n input_size=X_train.shape[1],\n hidden_size=args.hidden_nodes,\n output_size=y_train.shape[1],\n learning_rate=args.learning_rate,\n )\n\n init_data = {\n \"epochs\": args.epochs,\n \"epoch_steps\": X_train.shape[0],\n \"start_time\": time.time(),\n \"model\": str(model),\n \"W1\": model.model[0].weight.detach().numpy().tolist(),\n \"W2\": model.model[2].weight.detach().numpy().tolist()\n }\n send_message(\"init_data\", init_data)\n\n model.fit(\n train_data_loader=train_data_loader,\n X_test=torch.from_numpy(X_test),\n epochs=args.epochs,\n verbose=False,\n X_train=torch.from_numpy(X_train),\n y_train=torch.from_numpy(y_train),\n y_test=torch.from_numpy(y_test)\n )\n\n pred_train = model.predict(features=X_train)\n pred_test = model.predict(features=X_test)\n\n acc_train = torch_acc_score(y_pred=pred_train, y_true=y_train)\n acc_test = torch_acc_score(y_pred=pred_test, y_true=y_test)\n\n print(\"Train Accuracy: {:.6f}\".format(acc_train))\n print(\"Test Accuracy: {:.6f}\".format(acc_test))\n\n\nif __name__ == \"__main__\":\n args = arg_parse()\n main(args)\n" ]
[ [ "torch.from_numpy" ] ]
SamueleFerracin/pennylane
[ "67835d8dbd99a320165c76e7c25d24e3af3a266b" ]
[ "tests/templates/test_layers/test_simplified_twodesign.py" ]
[ "# Copyright 2018-2021 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nUnit tests for the SimplifiedTwoDesign template.\n\"\"\"\nimport pytest\nimport numpy as np\nimport pennylane as qml\nfrom pennylane import numpy as pnp\n\n\nclass TestDecomposition:\n \"\"\"Tests that the template defines the correct decomposition.\"\"\"\n\n QUEUES = [\n (1, (1,), [\"RY\"], [[0]]),\n (2, (1, 1, 2), [\"RY\", \"RY\", \"CZ\", \"RY\", \"RY\"], [[0], [1], [0, 1], [0], [1]]),\n (\n 2,\n (2, 1, 2),\n [\"RY\", \"RY\", \"CZ\", \"RY\", \"RY\", \"CZ\", \"RY\", \"RY\"],\n [[0], [1], [0, 1], [0], [1], [0, 1], [0], [1]],\n ),\n (\n 3,\n (1, 2, 2),\n [\"RY\", \"RY\", \"RY\", \"CZ\", \"RY\", \"RY\", \"CZ\", \"RY\", \"RY\"],\n [[0], [1], [2], [0, 1], [0], [1], [1, 2], [1], [2]],\n ),\n ]\n\n @pytest.mark.parametrize(\"n_wires, weight_shape, expected_names, expected_wires\", QUEUES)\n def test_expansion(self, n_wires, weight_shape, expected_names, expected_wires):\n \"\"\"Checks the queue for the default settings.\"\"\"\n\n weights = np.random.random(size=weight_shape)\n initial_layer = np.random.randn(n_wires)\n\n op = qml.templates.SimplifiedTwoDesign(initial_layer, weights, wires=range(n_wires))\n queue = op.expand().operations\n\n for i, gate in enumerate(queue):\n assert gate.name == expected_names[i]\n assert gate.wires.labels == tuple(expected_wires[i])\n\n @pytest.mark.parametrize(\n \"n_wires, n_layers, shape_weights\",\n [(1, 2, (0,)), (2, 2, (2, 1, 2)), (3, 2, (2, 2, 2)), (4, 2, (2, 3, 2))],\n )\n def test_circuit_parameters(self, n_wires, n_layers, shape_weights):\n \"\"\"Tests the parameter values in the circuit.\"\"\"\n\n initial_layer = np.random.randn(n_wires)\n weights = np.random.randn(*shape_weights)\n\n op = qml.templates.SimplifiedTwoDesign(initial_layer, weights, wires=range(n_wires))\n queue = op.expand().operations\n\n # test the device parameters\n for l in range(n_layers):\n # only select the rotation gates\n ops = [gate for gate in queue if isinstance(gate, qml.RY)]\n\n # check each initial_layer gate parameters\n for n in range(n_wires):\n res_param = ops[n].parameters[0]\n exp_param = initial_layer[n]\n assert res_param == exp_param\n\n # check layer gate parameters\n ops = ops[n_wires:]\n exp_params = weights.flatten()\n for o, exp_param in zip(ops, exp_params):\n res_param = o.parameters[0]\n assert res_param == exp_param\n\n @pytest.mark.parametrize(\n \"initial_layer_weights, weights, n_wires, target\",\n [\n ([np.pi], [], 1, [-1]),\n ([np.pi] * 2, [[[np.pi] * 2]], 2, [1, 1]),\n ([np.pi] * 3, [[[np.pi] * 2] * 2], 3, [1, -1, 1]),\n ([np.pi] * 4, [[[np.pi] * 2] * 3], 4, [1, -1, -1, 1]),\n ],\n )\n def test_correct_target_output(self, initial_layer_weights, weights, n_wires, target, tol):\n \"\"\"Tests the result of the template for simple cases.\"\"\"\n dev = qml.device(\"default.qubit\", wires=n_wires)\n\n @qml.qnode(dev)\n def circuit(initial_layer, weights):\n qml.templates.SimplifiedTwoDesign(\n initial_layer_weights=initial_layer, weights=weights, wires=range(n_wires)\n )\n return [qml.expval(qml.PauliZ(wires=i)) for i in range(n_wires)]\n\n expectations = circuit(initial_layer_weights, weights)\n for exp, target_exp in zip(expectations, target):\n assert np.allclose(exp, target_exp, atol=tol, rtol=0)\n\n def test_custom_wire_labels(self, tol):\n \"\"\"Test that template can deal with non-numeric, nonconsecutive wire labels.\"\"\"\n weights = np.random.random(size=(1, 2, 2))\n initial_layer = np.random.randn(3)\n\n dev = qml.device(\"default.qubit\", wires=3)\n dev2 = qml.device(\"default.qubit\", wires=[\"z\", \"a\", \"k\"])\n\n @qml.qnode(dev)\n def circuit():\n qml.templates.SimplifiedTwoDesign(initial_layer, weights, wires=range(3))\n return qml.expval(qml.Identity(0))\n\n @qml.qnode(dev2)\n def circuit2():\n qml.templates.SimplifiedTwoDesign(initial_layer, weights, wires=[\"z\", \"a\", \"k\"])\n return qml.expval(qml.Identity(\"z\"))\n\n circuit()\n circuit2()\n\n assert np.allclose(dev.state, dev2.state, atol=tol, rtol=0)\n\n\nclass TestInputs:\n \"\"\"Test inputs and pre-processing.\"\"\"\n\n def test_exception_wrong_dim(self):\n \"\"\"Verifies that exception is raised if the\n number of dimensions of features is incorrect.\"\"\"\n\n dev = qml.device(\"default.qubit\", wires=4)\n initial_layer = np.random.randn(2)\n\n @qml.qnode(dev)\n def circuit(initial_layer, weights):\n qml.templates.SimplifiedTwoDesign(initial_layer, weights, wires=range(2))\n return qml.expval(qml.PauliZ(0))\n\n with pytest.raises(ValueError, match=\"Weights tensor must have second dimension\"):\n weights = np.random.randn(2, 2, 2)\n circuit(initial_layer, weights)\n\n with pytest.raises(ValueError, match=\"Weights tensor must have third dimension\"):\n weights = np.random.randn(2, 1, 3)\n circuit(initial_layer, weights)\n\n with pytest.raises(ValueError, match=\"Initial layer weights must be of shape\"):\n initial_layer = np.random.randn(3)\n weights = np.random.randn(2, 1, 2)\n circuit(initial_layer, weights)\n\n def test_id(self):\n \"\"\"Tests that the id attribute can be set.\"\"\"\n weights = np.random.random(size=(1, 2, 2))\n initial_layer = np.random.randn(3)\n template = qml.templates.SimplifiedTwoDesign(initial_layer, weights, wires=range(3), id=\"a\")\n assert template.id == \"a\"\n\n\nclass TestAttributes:\n \"\"\"Tests additional methods and attributes\"\"\"\n\n @pytest.mark.parametrize(\n \"n_layers, n_wires, expected_shape\",\n [\n (2, 3, [(3,), (2, 2, 2)]),\n (2, 1, [(1,), (2,)]),\n (2, 2, [(2,), (2, 1, 2)]),\n ],\n )\n def test_shape(self, n_layers, n_wires, expected_shape):\n \"\"\"Test that the shape method returns the correct shape of the weights tensor\"\"\"\n\n shape = qml.templates.SimplifiedTwoDesign.shape(n_layers, n_wires)\n assert shape == expected_shape\n\n\ndef circuit_template(initial_weights, weights):\n qml.templates.SimplifiedTwoDesign(initial_weights, weights, range(3))\n return qml.expval(qml.PauliZ(0))\n\n\ndef circuit_decomposed(initial_weights, weights):\n qml.RY(initial_weights[0], wires=0)\n qml.RY(initial_weights[1], wires=1)\n qml.RY(initial_weights[2], wires=2)\n\n qml.CZ(wires=[0, 1])\n qml.RY(weights[0, 0, 0], wires=0)\n qml.RY(weights[0, 0, 1], wires=1)\n\n qml.CZ(wires=[1, 2])\n qml.RY(weights[0, 1, 0], wires=1)\n qml.RY(weights[0, 1, 1], wires=2)\n\n return qml.expval(qml.PauliZ(0))\n\n\nclass TestInterfaces:\n \"\"\"Tests that the template is compatible with all interfaces, including the computation\n of gradients.\"\"\"\n\n def test_list_and_tuples(self, tol):\n \"\"\"Tests common iterables as inputs.\"\"\"\n\n weights = [[[0.1, -1.1], [0.2, 0.1]]]\n initial_weights = [0.1, 0.2, 0.3]\n\n dev = qml.device(\"default.qubit\", wires=3)\n\n circuit = qml.QNode(circuit_template, dev)\n circuit2 = qml.QNode(circuit_decomposed, dev)\n\n res = circuit(initial_weights, weights)\n res2 = circuit2(initial_weights, weights)\n assert qml.math.allclose(res, res2, atol=tol, rtol=0)\n\n weights_tuple = [[tuple(weights[0][0]), tuple(weights[0][1])]]\n init_weights_tuple = tuple(initial_weights)\n res = circuit(init_weights_tuple, weights_tuple)\n res2 = circuit2(init_weights_tuple, weights_tuple)\n assert qml.math.allclose(res, res2, atol=tol, rtol=0)\n\n def test_autograd(self, tol):\n \"\"\"Tests the autograd interface.\"\"\"\n\n weights = np.random.random(size=(1, 2, 2))\n weights = pnp.array(weights, requires_grad=True)\n initial_weights = np.random.random(size=(3,))\n initial_weights = pnp.array(initial_weights, requires_grad=True)\n\n dev = qml.device(\"default.qubit\", wires=3)\n\n circuit = qml.QNode(circuit_template, dev)\n circuit2 = qml.QNode(circuit_decomposed, dev)\n\n res = circuit(initial_weights, weights)\n res2 = circuit2(initial_weights, weights)\n assert qml.math.allclose(res, res2, atol=tol, rtol=0)\n\n grad_fn = qml.grad(circuit)\n grads = grad_fn(initial_weights, weights)\n\n grad_fn2 = qml.grad(circuit2)\n grads2 = grad_fn2(initial_weights, weights)\n\n assert np.allclose(grads[0], grads2[0], atol=tol, rtol=0)\n assert np.allclose(grads[1], grads2[1], atol=tol, rtol=0)\n\n def test_jax(self, tol):\n \"\"\"Tests the jax interface.\"\"\"\n\n jax = pytest.importorskip(\"jax\")\n import jax.numpy as jnp\n\n weights = jnp.array(np.random.random(size=(1, 2, 2)))\n initial_weights = jnp.array(np.random.random(size=(3,)))\n\n dev = qml.device(\"default.qubit\", wires=3)\n\n circuit = qml.QNode(circuit_template, dev, interface=\"jax\")\n circuit2 = qml.QNode(circuit_decomposed, dev, interface=\"jax\")\n\n res = circuit(initial_weights, weights)\n res2 = circuit2(initial_weights, weights)\n assert qml.math.allclose(res, res2, atol=tol, rtol=0)\n\n grad_fn = jax.grad(circuit)\n grads = grad_fn(initial_weights, weights)\n\n grad_fn2 = jax.grad(circuit2)\n grads2 = grad_fn2(initial_weights, weights)\n\n assert np.allclose(grads[0], grads2[0], atol=tol, rtol=0)\n assert np.allclose(grads[1], grads2[1], atol=tol, rtol=0)\n\n def test_tf(self, tol):\n \"\"\"Tests the tf interface.\"\"\"\n\n tf = pytest.importorskip(\"tensorflow\")\n\n weights = tf.Variable(np.random.random(size=(1, 2, 2)))\n initial_weights = tf.Variable(np.random.random(size=(3,)))\n\n dev = qml.device(\"default.qubit\", wires=3)\n\n circuit = qml.QNode(circuit_template, dev, interface=\"tf\")\n circuit2 = qml.QNode(circuit_decomposed, dev, interface=\"tf\")\n\n res = circuit(initial_weights, weights)\n res2 = circuit2(initial_weights, weights)\n assert qml.math.allclose(res, res2, atol=tol, rtol=0)\n\n with tf.GradientTape() as tape:\n res = circuit(initial_weights, weights)\n grads = tape.gradient(res, [initial_weights, weights])\n\n with tf.GradientTape() as tape2:\n res2 = circuit2(initial_weights, weights)\n grads2 = tape2.gradient(res2, [initial_weights, weights])\n\n assert np.allclose(grads[0], grads2[0], atol=tol, rtol=0)\n assert np.allclose(grads[1], grads2[1], atol=tol, rtol=0)\n\n def test_torch(self, tol):\n \"\"\"Tests the torch interface.\"\"\"\n\n torch = pytest.importorskip(\"torch\")\n\n weights = torch.tensor(np.random.random(size=(1, 2, 2)), requires_grad=True)\n initial_weights = torch.tensor(np.random.random(size=(3,)), requires_grad=True)\n\n dev = qml.device(\"default.qubit\", wires=3)\n\n circuit = qml.QNode(circuit_template, dev, interface=\"torch\")\n circuit2 = qml.QNode(circuit_decomposed, dev, interface=\"torch\")\n\n res = circuit(initial_weights, weights)\n res2 = circuit2(initial_weights, weights)\n assert qml.math.allclose(res, res2, atol=tol, rtol=0)\n\n res = circuit(initial_weights, weights)\n res.backward()\n grads = [weights.grad, initial_weights.grad]\n\n res2 = circuit2(initial_weights, weights)\n res2.backward()\n grads2 = [weights.grad, initial_weights.grad]\n\n assert np.allclose(grads[0], grads2[0], atol=tol, rtol=0)\n assert np.allclose(grads[1], grads2[1], atol=tol, rtol=0)\n" ]
[ [ "numpy.allclose", "numpy.random.random", "numpy.random.randn" ] ]
PauloJPS/ComputationalVision
[ "31bdfdf9222417a3ccd92b8d30e845f0b35f1ced" ]
[ "edgeDetection/LOG.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nimport os \n\nclass filters():\n def __init__(self, image, sigma):\n self.sigma = sigma \n self.n = image.size[0]\n self.image = image\n image = image.convert('L')\n image = image.resize((256, 256), Image.ANTIALIAS)\n self.imageMat = np.asarray(image.getdata()).reshape((self.n, self.n))\n\n\n def fftImage(self):\n return np.fft.fft2(self.imageMat)\n\n def gauss(self, x, y):\n return 1/(2*np.pi*self.sigma) * np.exp(-(x**2 + y**2)/(2*self.sigma))\n \n def fftLaplacian(self):\n mat = np.zeros((self.n, self.n))\n for i in range(self.n):\n for j in range(self.n):\n mat[i-int(self.n/2)][j-int(self.n/2)] = (i-self.n/2)**2 + (j-self.n/2)**2\n return mat\n\n def fftGaussian(self):\n mat = np.zeros((self.n, self.n))\n for i in range(self.n):\n for j in range(self.n):\n mat[i-int(self.n/2)][j-int(self.n/2)] = self.gauss(i-int(self.n/2), j-int(self.n/2))\n return mat \n\n def gaussianFilter(self):\n return np.fft.ifft2(self.fftGaussian() * self.fftImage()).real\n\n def laplacianFilter(self):\n return np.fft.ifft2(self.fftLaplacian() * self.fftImage()).real\n\n def LOGFilter(self):\n return np.fft.ifft2(self.fftLaplacian() * self.fftImage() * self.fftGaussian()).real\n\n def thresholding(self):\n mat = self.LOGFilter()\n newMat = np.zeros((self.n, self.n))\n for i in range(self.n):\n for j in range(self.n):\n if mat[i][j] > 1: newMat[i][j] = 1\n else : newMat[i][j] = 0\n return newMat\n\n def zeroCrossing(self):\n mat = self.thresholding()\n newMat = np.zeros((self.n, self.n))\n for i in range(1, self.n-1):\n for j in range(1, self.n-1):\n if mat[i][j] == 1:\n if (mat[i+1][j] == 0 or mat[i-1][j] == 0\n or mat[i][j-1] == 0 or mat[i][j+1] == 0):\n newMat[i][j] = 1\n else: newMat[i][j] = 0 \n return newMat\n\n\n @staticmethod\n def gaussian(img, sigma):\n f = filters(img, sigma)\n return f.gaussianFilter()\n\n @staticmethod\n def laplacian(img):\n f = filters(img, sigma=1)\n return f.laplacianFilter()\n\n @staticmethod\n def LOG(img, sigma):\n f = filters(img, sigma)\n return f.LOGFilter()\n\n\n\n\n\n\n \n\ndef imageWork():\n mats = []\n for root, dirs, files in os.walk(\".\"):\n for filenames in files:\n if filenames[:4] != 'gray':\n pass\n else:\n img = Image.open(root + '/' + filenames).convert('L')\n mats.append(np.asarray(img.getdata()).reshape((256, 256)))\n return mats[0]\n \n\n\n" ]
[ [ "numpy.exp", "numpy.fft.fft2", "numpy.zeros" ] ]
synthesized-io/fairlens
[ "cd6dd0d38226a39a7d1b626d6bb86481c0b84f0f" ]
[ "tests/test_significance.py" ]
[ "import numpy as np\nimport pandas as pd\nimport pytest\n\nfrom fairlens.metrics.distance import BinomialDistance, MeanDistance\nfrom fairlens.metrics.significance import binominal_proportion_p_value as bin_prop\nfrom fairlens.metrics.significance import bootstrap_binned_statistic as bootstrap_binned\nfrom fairlens.metrics.significance import bootstrap_statistic as bootstrap\nfrom fairlens.metrics.significance import brunner_munzel_test as bm_test\nfrom fairlens.metrics.significance import permutation_statistic as perm_stat\nfrom fairlens.metrics.significance import resampling_interval, resampling_p_value\n\nepsilon = 1e-5\ndf = pd.read_csv(\"datasets/compas.csv\")\ntarget_attr = \"RawScore\"\ngroup1 = {\"Ethnicity\": [\"Caucasian\"]}\ngroup2 = {\"Ethnicity\": [\"African-American\"]}\n\n\ndef test_binomial():\n assert abs(bin_prop(0.2, 0.1, 10) - (1 - (0.9 ** 10 + 0.9 ** 9))) < epsilon\n assert BinomialDistance().p_value(pd.Series([1, 1]), pd.Series([0, 0])) == 0\n assert BinomialDistance().p_value(pd.Series([1, 0]), pd.Series([1, 0])) == 1\n assert BinomialDistance().p_value(pd.Series([1, 0, 1, 1]), pd.Series([1, 0, 1, 0])) == 0.625\n\n\ndef test_bootstrap():\n assert bootstrap(pd.Series([1]), pd.Series([0]), MeanDistance().distance, 100).min() == 1\n assert bootstrap(pd.Series(range(2)), pd.Series(range(2, 4)), MeanDistance().distance, 1000).max() == 3\n\n\ndef test_bootstrap_binned():\n def distance(h_x, h_y):\n return np.linalg.norm(h_x - h_y, ord=1)\n\n assert bootstrap_binned(pd.Series([1, 3, 0]), pd.Series([1, 4, 3]), distance, 10000).max() == 12\n\n\ndef test_permutation():\n assert perm_stat(pd.Series([1]), pd.Series([0]), MeanDistance().distance, 100).min() == 1\n assert perm_stat(pd.Series([1, 1]), pd.Series([0, 0]), MeanDistance().distance, 1000).min() == 0\n assert perm_stat(pd.Series(range(5)), pd.Series(range(5, 10)), MeanDistance().distance, 1000).max() == 5\n\n\ndef test_resampled_pvalue():\n assert resampling_p_value(12, pd.Series([13, 11]), \"two-sided\") == 0.5\n assert resampling_p_value(12, pd.Series([13, 11]), \"greater\") == 0.5\n assert resampling_p_value(12, pd.Series([13, 11]), \"less\") == 0.5\n\n assert resampling_p_value(12, pd.Series([15, 14, 13, 11]), \"two-sided\") == 0.75\n assert resampling_p_value(12, pd.Series([15, 14, 13, 11]), \"greater\") == 0.75\n assert resampling_p_value(12, pd.Series([15, 14, 13, 11]), \"less\") == 0.25\n\n assert resampling_p_value(0, pd.Series([-2, -1, 0, 1]), \"two-sided\") == 1\n assert resampling_p_value(0, pd.Series([-2, -1, 0, 1]), \"greater\") == 0.5\n assert resampling_p_value(0, pd.Series([-2, -1, 0, 1]), \"less\") == 0.5\n\n\ndef test_resampled_interval():\n assert resampling_interval(3, pd.Series([1, 4, 2, 3, 5]), cl=0.5) == (2.0, 4.0)\n assert resampling_interval(50, pd.Series(np.arange(101)), cl=0.8) == (10, 90)\n\n\ndef test_brunner_munzel():\n data = [\n [10, \"Caucasian\", \"24/4/1999\", \"Rejected\"],\n [25, \"African-American\", \"19/7/1997\", \"Accepted\"],\n [15, \"Hispanic\", \"31/12/2001\", \"Accepted\"],\n [34, \"Other\", \"20/2/1998\", \"Rejected\"],\n [35, \"Caucasian\", \"2/3/2002\", \"Accepted\"],\n [56, \"Hispanic\", \"6/6/1997\", \"Accepted\"],\n [80, \"African-American\", \"4/5/2000\", \"Accepted\"],\n [100, \"African-American\", \"3/1/1996\", \"Accepted\"],\n [134, \"Caucasian\", \"24/4/1999\", \"Rejected\"],\n [21, \"African-American\", \"19/7/1997\", \"Rejected\"],\n [14, \"Hispanic\", \"31/12/2001\", \"Rejected\"],\n [98, \"Other\", \"20/2/1998\", \"Rejected\"],\n [76, \"Caucasian\", \"2/3/2002\", \"Accepted\"],\n [51, \"Hispanic\", \"6/6/1997\", \"Accepted\"],\n [82, \"African-American\", \"4/5/2000\", \"Rejected\"],\n [145, \"African-American\", \"3/1/1996\", \"Accepted\"],\n ]\n df = pd.DataFrame(data=data, columns=[\"score\", \"race\", \"date\", \"status\"])\n group1 = {\"race\": [\"African-American\"]}\n group2 = {\"race\": [\"Caucasian\"]}\n res = bm_test(df, target_attr=\"score\", group1=group1, group2=group2)\n assert res[0] == pytest.approx(-0.5883, rel=1e-3)\n assert res[1] == pytest.approx(0.5777, rel=1e-3)\n" ]
[ [ "numpy.linalg.norm", "pandas.DataFrame", "numpy.arange", "pandas.Series", "pandas.read_csv" ] ]
tommyvsfu1/ADL2019_ReinforcementLearning
[ "45e1e3bdef129efd85ed1856f88a255c129f571e" ]
[ "mario_env.py" ]
[ "\"\"\"\n\n### NOTICE ###\nYou DO NOT need to upload this file\n\n\"\"\"\n\nimport numpy as np\nfrom collections import deque\nimport gym\nfrom gym import spaces\nfrom PIL import Image\nimport cv2\n\n#from nes_py.wrappers import BinarySpaceToDiscreteSpaceEnv\nfrom nes_py.wrappers import JoypadSpace\nimport gym_super_mario_bros\nfrom gym_super_mario_bros.actions import COMPLEX_MOVEMENT\n\n\ndef _process_frame_mario(frame):\n frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)\n frame = cv2.resize(frame, (84, 84), interpolation=cv2.INTER_AREA)\n frame = np.expand_dims(frame, 0)\n return frame.astype(np.float32)\n\nclass LazyFrames(object):\n def __init__(self, frames):\n \"\"\"This object ensures that common frames between the observations are only stored once.\n It exists purely to optimize memory usage which can be huge for DQN's 1M frames replay\n buffers.\n \n This object should only be converted to numpy array before being passed to the model.\n\n You'd not believe how complex the previous solution was.\"\"\"\n self._frames = frames\n self._out = None\n\n def _force(self):\n if self._out is None:\n self._out = np.concatenate(self._frames, axis=0)\n self._frames = None\n return self._out\n\n def __array__(self, dtype=None):\n out = self._force()\n if dtype is not None:\n out = out.astype(dtype)\n return out\n\n def __len__(self):\n return len(self._force())\n\n def __getitem__(self, i):\n return self._force()[i]\n\nclass ProcessFrameMario(gym.Wrapper):\n def __init__(self, env=None):\n super(ProcessFrameMario, self).__init__(env)\n self.observation_space = gym.spaces.Box(low=0, high=255, shape=(1, 84, 84), dtype=np.float32)\n self.status_order = {'small': 0, 'tall': 1, 'fireball': 2}\n self.prev_time = self.env.unwrapped._time\n self.prev_stat = self.status_order[self.env.unwrapped._player_status]\n self.prev_score = self.env.unwrapped._score\n self.prev_dist = self.env.unwrapped._x_position\n\n def step(self, action):\n ''' \n Implementing custom rewards\n Time = -0.1\n Distance = +1 or 0 \n Player Status = +/- 5\n Score = 2.5 x [Increase in Score]\n Done = +50 [Game Completed] or -50 [Game Incomplete]\n '''\n obs, reward, done, info = self.env.step(action)\n\n\n reward = min(max((info['x_pos'] - self.prev_dist), 0), 2)\n self.prev_dist = info['x_pos']\n \n reward += (self.prev_time - info['time']) * -0.1\n self.prev_time = info['time']\n \n reward += (self.status_order[info['status']] - self.prev_stat) * 5\n self.prev_stat = self.status_order[info['status']]\n\n reward += (info['score'] - self.prev_score) * 0.025\n self.prev_score = info['score']\n\n if done:\n if info['life'] != 255:\n reward += 50\n else:\n reward -= 50\n\n return _process_frame_mario(obs), reward, done, info\n\n def reset(self):\n obs = _process_frame_mario(self.env.reset())\n self.prev_time = self.env.unwrapped._time\n self.prev_stat = self.status_order[self.env.unwrapped._player_status]\n self.prev_score = self.env.unwrapped._score\n self.prev_dist = self.env.unwrapped._x_position\n return obs\n\n def change_level(self, level):\n self.env.change_level(level)\n\n\nclass BufferSkipFrames(gym.Wrapper):\n def __init__(self, env=None, skip=4, shape=(84, 84)):\n super(BufferSkipFrames, self).__init__(env)\n self.counter = 0\n self.observation_space = gym.spaces.Box(low=0, high=255, shape=(4, 84, 84), dtype=np.float32)\n self.skip = skip\n self.buffer = deque(maxlen=self.skip)\n\n def step(self, action):\n obs, reward, done, info = self.env.step(action)\n counter = 1\n total_reward = reward\n self.buffer.append(obs)\n\n for i in range(self.skip - 1): \n if not done:\n obs, reward, done, info = self.env.step(action)\n total_reward += reward\n counter +=1\n self.buffer.append(obs)\n else:\n self.buffer.append(obs)\n frame = LazyFrames(list(self.buffer))\n #frame = np.stack(self.buffer, axis=0)\n #frame = np.reshape(frame, (4, 84, 84))\n return frame, total_reward, done, info\n\n def reset(self):\n self.buffer.clear()\n obs = self.env.reset()\n for i in range(self.skip):\n self.buffer.append(obs)\n\n frame = np.stack(self.buffer, axis=0)\n frame = np.reshape(frame, (4, 84, 84))\n return frame\n \n def change_level(self, level):\n self.env.change_level(level)\n\n\nclass NormalizedEnv(gym.ObservationWrapper):\n def __init__(self, env=None):\n super(NormalizedEnv, self).__init__(env)\n self.state_mean = 0\n self.state_std = 0\n self.alpha = 0.9999\n self.num_steps = 0\n\n def observation(self, observation):\n if observation is not None: # for future meta implementation\n self.num_steps += 1\n self.state_mean = self.state_mean * self.alpha + \\\n observation.mean() * (1 - self.alpha)\n self.state_std = self.state_std * self.alpha + \\\n observation.std() * (1 - self.alpha)\n\n unbiased_mean = self.state_mean / (1 - pow(self.alpha, self.num_steps))\n unbiased_std = self.state_std / (1 - pow(self.alpha, self.num_steps))\n\n return (observation - unbiased_mean) / (unbiased_std + 1e-8)\n \n else:\n return observation\n\n def change_level(self, level):\n self.env.change_level(level)\n\ndef wrap_mario(env):\n env = ProcessFrameMario(env)\n env = NormalizedEnv(env)\n env = BufferSkipFrames(env)\n return env\n\ndef create_mario_env(env_id):\n env = gym_super_mario_bros.make(env_id)\n #env = BinarySpaceToDiscreteSpaceEnv(env, COMPLEX_MOVEMENT)\n env = JoypadSpace(env,COMPLEX_MOVEMENT)\n env = wrap_mario(env)\n return env\n" ]
[ [ "numpy.concatenate", "numpy.stack", "numpy.reshape", "numpy.expand_dims" ] ]
tddschn/Easy-Graph
[ "2cdecbde0b67a0c106e2bcf82d13c6fcd0b4c5d8" ]
[ "easygraph/functions/graph_embedding/line.py" ]
[ "from easygraph.utils.alias import create_alias_table, alias_sample\nfrom easygraph.utils.index_of_node import get_relation_of_index_and_node\nfrom easygraph.utils import *\nimport math\nimport random\n\nimport numpy as np\n\n\ndef line_loss(y_true, y_pred):\n import tensorflow as tf\n from tensorflow.python.keras import backend as K\n\n y = K.sigmoid(y_true * y_pred)\n # Avoid Nan in the result of 'K.log'\n return -K.mean(K.log(tf.clip_by_value(y, 1e-8, tf.reduce_max(y))))\n\n\ndef create_model(numNodes, embedding_size, order='second'):\n import tensorflow as tf\n from tensorflow.python.keras.layers import Embedding, Input, Lambda\n from tensorflow.python.keras.models import Model\n\n v_i = Input(shape=(1, ))\n v_j = Input(shape=(1, ))\n\n first_emb = Embedding(numNodes, embedding_size, name='first_emb')\n second_emb = Embedding(numNodes, embedding_size, name='second_emb')\n context_emb = Embedding(numNodes, embedding_size, name='context_emb')\n\n v_i_emb = first_emb(v_i)\n v_j_emb = first_emb(v_j)\n\n v_i_emb_second = second_emb(v_i)\n v_j_context_emb = context_emb(v_j)\n\n first = Lambda(\n lambda x: tf.reduce_sum(x[0] * x[1], axis=-1, keepdims=False),\n name='first_order')([v_i_emb, v_j_emb])\n second = Lambda(\n lambda x: tf.reduce_sum(x[0] * x[1], axis=-1, keepdims=False),\n name='second_order')([v_i_emb_second, v_j_context_emb])\n\n if order == 'first':\n output_list = [first]\n elif order == 'second':\n output_list = [second]\n else:\n output_list = [first, second]\n\n model = Model(inputs=[v_i, v_j], outputs=output_list)\n\n return model, {'first': first_emb, 'second': second_emb}\n\n\nclass LINE:\n\n @not_implemented_for(\"multigraph\")\n def __init__(\n self,\n graph,\n embedding_size=8,\n negative_ratio=5,\n order='all',\n ):\n \"\"\"Graph embedding via SDNE.\n\n Parameters\n ----------\n graph : easygraph.Graph or easygraph.DiGraph\n\n embedding_size : int, optional (default : 8)\n\n negative_ratio : int, optional (default : 5)\n\n order : string, optional (default : 'all')\n 'first','second','all'\n\n Examples\n --------\n\n >>> model = LINE(G,\n ... embedding_size=16,\n ... order='all') # The order of model LINE. 'first','second' or 'all'.\n >>> model.train(batch_size=1024, epochs=1, verbose=2)\n >>> embeddings = model.get_embeddings() # Returns the graph embedding results.\n\n References\n ----------\n .. [1] Tang J, Qu M, Wang M, et al. \n Line: Large-scale information network embedding[C]\n //Proceedings of the 24th international conference on World Wide Web. 2015: 1067-1077 \n\n \"\"\"\n if order not in ['first', 'second', 'all']:\n raise ValueError('mode must be first,second,or all')\n\n self.graph = graph\n self.idx2node, self.node2idx = get_relation_of_index_and_node(graph)\n self.use_alias = True\n\n self.rep_size = embedding_size\n self.order = order\n\n self._embeddings = {}\n self.negative_ratio = negative_ratio\n self.order = order\n\n self.node_size = graph.number_of_nodes()\n self.edge_size = graph.number_of_edges()\n self.samples_per_epoch = self.edge_size * (1 + negative_ratio)\n\n self._gen_sampling_table()\n self.reset_model()\n\n def reset_training_config(self, batch_size, times):\n self.batch_size = batch_size\n self.steps_per_epoch = (\n (self.samples_per_epoch - 1) // self.batch_size + 1) * times\n\n def reset_model(self, opt='adam'):\n\n self.model, self.embedding_dict = create_model(self.node_size,\n self.rep_size,\n self.order)\n self.model.compile(opt, line_loss)\n self.batch_it = self.batch_iter(self.node2idx)\n\n def _gen_sampling_table(self):\n\n # create sampling table for vertex\n power = 0.75\n numNodes = self.node_size\n node_degree = np.zeros(numNodes) # out degree\n node2idx = self.node2idx\n\n for edge in self.graph.edges:\n node_degree[node2idx[edge[0]]] += self.graph[edge[0]][edge[1]].get(\n 'weight', 1.0)\n\n total_sum = sum(\n [math.pow(node_degree[i], power) for i in range(numNodes)])\n norm_prob = [\n float(math.pow(node_degree[j], power)) / total_sum\n for j in range(numNodes)\n ]\n\n self.node_accept, self.node_alias = create_alias_table(norm_prob)\n\n # create sampling table for edge\n numEdges = self.graph.number_of_edges()\n total_sum = sum([\n self.graph[edge[0]][edge[1]].get('weight', 1.0)\n for edge in self.graph.edges\n ])\n norm_prob = [\n self.graph[edge[0]][edge[1]].get('weight', 1.0) * numEdges /\n total_sum for edge in self.graph.edges\n ]\n\n self.edge_accept, self.edge_alias = create_alias_table(norm_prob)\n\n def batch_iter(self, node2idx):\n\n edges = [(node2idx[x[0]], node2idx[x[1]]) for x in self.graph.edges]\n\n data_size = self.graph.number_of_edges()\n shuffle_indices = np.random.permutation(np.arange(data_size))\n # positive or negative mod\n mod = 0\n mod_size = 1 + self.negative_ratio\n h = []\n t = []\n sign = 0\n count = 0\n start_index = 0\n end_index = min(start_index + self.batch_size, data_size)\n while True:\n if mod == 0:\n\n h = []\n t = []\n for i in range(start_index, end_index):\n if random.random() >= self.edge_accept[shuffle_indices[i]]:\n shuffle_indices[i] = self.edge_alias[\n shuffle_indices[i]]\n cur_h = edges[shuffle_indices[i]][0]\n cur_t = edges[shuffle_indices[i]][1]\n h.append(cur_h)\n t.append(cur_t)\n sign = np.ones(len(h))\n else:\n sign = np.ones(len(h)) * -1\n t = []\n for i in range(len(h)):\n\n t.append(alias_sample(self.node_accept, self.node_alias))\n\n if self.order == 'all':\n yield ([np.array(h), np.array(t)], [sign, sign])\n else:\n yield ([np.array(h), np.array(t)], [sign])\n mod += 1\n mod %= mod_size\n if mod == 0:\n start_index = end_index\n end_index = min(start_index + self.batch_size, data_size)\n\n if start_index >= data_size:\n count += 1\n mod = 0\n h = []\n shuffle_indices = np.random.permutation(np.arange(data_size))\n start_index = 0\n end_index = min(start_index + self.batch_size, data_size)\n\n def get_embeddings(self, ):\n \"\"\"Returns the embedding of each node.\n\n Returns\n -------\n get_embeddings : dict\n The graph embedding result of each node.\n\n \"\"\"\n self._embeddings = {}\n if self.order == 'first':\n embeddings = self.embedding_dict['first'].get_weights()[0]\n elif self.order == 'second':\n embeddings = self.embedding_dict['second'].get_weights()[0]\n else:\n embeddings = np.hstack(\n (self.embedding_dict['first'].get_weights()[0],\n self.embedding_dict['second'].get_weights()[0]))\n idx2node = self.idx2node\n for i, embedding in enumerate(embeddings):\n self._embeddings[idx2node[i]] = embedding\n\n return self._embeddings\n\n def train(self,\n batch_size=1024,\n epochs=2,\n initial_epoch=0,\n verbose=1,\n times=1):\n \"\"\"Train LINE model.\n\n Parameters\n ----------\n batch_size : int, optional (default : 1024)\n\n epochs : int, optional (default : 2)\n\n initial_epoch : int, optional (default : 0)\n\n verbose : int, optional (default : 1)\n\n times : int, optional (default : 1)\n\n \"\"\"\n self.reset_training_config(batch_size, times)\n hist = self.model.fit(self.batch_it,\n epochs=epochs,\n initial_epoch=initial_epoch,\n steps_per_epoch=self.steps_per_epoch,\n verbose=verbose)\n return hist\n" ]
[ [ "numpy.array", "tensorflow.python.keras.backend.sigmoid", "tensorflow.python.keras.layers.Input", "numpy.zeros", "tensorflow.python.keras.layers.Embedding", "tensorflow.reduce_max", "numpy.arange", "tensorflow.reduce_sum", "tensorflow.python.keras.models.Model" ] ]
avcopan/mechdriver
[ "63069cfb21d6fdb6d0b091dfe204b1e09c8e10a1" ]
[ "routines/es/_routines/wells.py" ]
[ "\"\"\"\nFind a TS from the grid as well as associated vdW wells\n\"\"\"\n\nimport numpy\nimport automol\nimport elstruct\nimport autofile\nfrom routines.es._routines import _util as util\nfrom routines.es._routines import geom\nfrom routines.es import runner as es_runner\nfrom lib import filesys\nfrom lib.phydat import phycon\nfrom lib.submission import qchem_params\n\n\ndef kick_from_saddle():\n \"\"\" Find the wells from kicking off the saddle point by\n changingthe reaction coordinate some amount\n might have to be reaction class specific\n \"\"\"\n\n\ndef find_with_irc():\n \"\"\" Try and use the wells by reading a computed irc, or computing an irc\n first and then reading it\n \"\"\"\n\n\ndef find_vdw(ts_name, spc_dct, thy_info, ini_thy_info, vdw_params,\n nsamp_par, run_prefix, save_prefix,\n kickoff_size, kickoff_backward,\n overwrite):\n \"\"\" Find van der Waals structures for all the pairs of\n species in a reaction list.\n Fxn takes two species, performs a (random?) rotation,\n sticks them together and optimizes the combined geometry.\n Supposed to use the wells filesystem?\n \"\"\"\n new_vdws = []\n _, opt_script_str, _, opt_kwargs = qchem_params(\n *thy_info[:2])\n mul = spc_dct[ts_name]['low_mul']\n vdw_names_lst = []\n if vdw_params[0]:\n vdw_names_lst.append([sorted(spc_dct[ts_name]['reacs']), mul, 'r'])\n if vdw_params[1]:\n vdw_names_lst.append([sorted(spc_dct[ts_name]['prods']), mul, 'p'])\n\n for names, ts_mul, label in vdw_names_lst:\n if len(names) < 2:\n print('Cannot find van der Waals well for unimolecular',\n 'reactant or product')\n ichs = list(map(lambda name: spc_dct[name]['inchi'], names))\n chgs = list(map(lambda name: spc_dct[name]['chg'], names))\n muls = list(map(lambda name: spc_dct[name]['mul'], names))\n\n # theory\n prog = thy_info[0]\n method = thy_info[1]\n _, opt_script_str, _, opt_kwargs = es_runner.qchem_params(\n prog, method)\n\n geos = [(), ()]\n ntaudof = 0.\n for i, (nam, ich, chg, mul) in enumerate(zip(names, ichs, chgs, muls)):\n spc_info = [ich, chg, mul]\n orb_restr = filesys.inf.orbital_restriction(spc_info, ini_thy_info)\n ini_g = ini_thy_info[0:3]\n ini_g.append(orb_restr)\n orb_restr = filesys.inf.orbital_restriction(spc_info, thy_info)\n thy_info = thy_info[0:3]\n thy_info.append(orb_restr)\n spc_run_fs = autofile.fs.species(run_prefix)\n spc_run_fs[-1].create(spc_info)\n spc_run_path = spc_run_fs[-1].path(spc_info)\n spc_save_fs = autofile.fs.species(save_prefix)\n spc_save_fs[-1].create(spc_info)\n spc_save_path = spc_save_fs[-1].path(spc_info)\n\n thy_run_fs = autofile.fs.theory(spc_run_path)\n thy_run_fs[-1].create(thy_info[1:4])\n thy_run_path = thy_run_fs[-1].path(thy_info[1:4])\n thy_save_fs = autofile.fs.theory(spc_save_path)\n thy_save_fs[-1].create(thy_info[1:4])\n thy_save_path = thy_save_fs[-1].path(thy_info[1:4])\n run_fs = autofile.fs.run(thy_run_path)\n\n ini_thy_save_fs = autofile.fs.theory(spc_save_path)\n ini_thy_save_fs[-1].create(ini_thy_info[1:4])\n\n cnf_run_fs = autofile.fs.conformer(thy_run_path)\n cnf_save_fs = autofile.fs.conformer(thy_save_path)\n\n geo = geom.reference_geometry(\n spc_dct[nam], thy_info, ini_thy_info,\n thy_run_fs, thy_save_fs,\n ini_thy_save_fs,\n cnf_run_fs, cnf_save_fs,\n run_fs,\n kickoff_size=kickoff_size,\n kickoff_backward=kickoff_backward,\n overwrite=overwrite)\n geos[i] = geo\n gra = automol.geom.graph(geo)\n ntaudof += len(\n automol.graph.rotational_bond_keys(gra, with_h_rotors=False))\n nsamp = util.nsamp_init(nsamp_par, ntaudof)\n geo1, geo2 = geos\n geo1 = automol.geom.mass_centered(geo1)\n geo2 = automol.geom.mass_centered(geo2)\n min_ene = 0.\n for idx in range(int(nsamp)):\n print('Optimizing vdw geometry {}/{}'.format(idx+1, nsamp))\n angs1 = numpy.multiply(\n numpy.random.rand(3), [1*numpy.pi, 2*numpy.pi, 2*numpy.pi])\n angs2 = numpy.multiply(\n numpy.random.rand(3), [1*numpy.pi, 2*numpy.pi, 2*numpy.pi])\n angs12 = numpy.multiply(\n numpy.random.rand(2), [1*numpy.pi, 2*numpy.pi])\n geo1 = automol.geom.euler_rotate(geo1, *angs1)\n geo2 = automol.geom.euler_rotate(geo2, *angs2)\n dist_cutoff = 3.0 * phycon.ANG2BOHR\n\n geo = automol.geom.join(geo1, geo2, dist_cutoff, *angs12)\n print(\"Species: {}\".format('+'.join(names)))\n print('vdw starting geometry')\n print(automol.geom.xyz_string(geo))\n\n # Set up the filesystem\n ich = automol.inchi.recalculate(automol.inchi.join(ichs))\n chg = sum(chgs)\n mul = ts_mul\n spc_info = (ich, chg, mul)\n spc_run_fs = autofile.fs.species(run_prefix)\n spc_run_fs[-1].create(spc_info)\n spc_run_path = spc_run_fs[-1].path(spc_info)\n spc_save_fs = autofile.fs.species(save_prefix)\n spc_save_fs[-1].create(spc_info)\n spc_save_path = spc_save_fs[-1].path(spc_info)\n orb_restr = filesys.inf.orbital_restriction(spc_info, thy_info)\n thy_info = thy_info[0:3]\n thy_info.append(orb_restr)\n thy_run_fs = autofile.fs.theory(spc_run_path)\n thy_run_fs[-1].create(thy_info[1:4])\n thy_run_path = thy_run_fs[-1].path(thy_info[1:4])\n thy_save_fs = autofile.fs.theory(spc_save_path)\n thy_save_fs[-1].create(thy_info[1:4])\n thy_save_path = thy_save_fs[-1].path(thy_info[1:4])\n run_fs = autofile.fs.run(thy_run_path)\n\n # Generate reference geometry\n # Generate the z-matrix and sampling ranges\n es_runner.run_job(\n job=elstruct.Job.OPTIMIZATION,\n geom=geo,\n spc_info=spc_info,\n th_info=thy_info,\n run_fs=run_fs,\n script_str=opt_script_str,\n overwrite=overwrite,\n **opt_kwargs,\n )\n\n # Save info for the initial geometry (from ichi or fsave dir)\n success, ret = es_runner.read_job(\n job=elstruct.Job.OPTIMIZATION, run_fs=run_fs)\n if success:\n print('Saving reference geometry')\n print(\" - Save path: {}\".format(thy_save_path))\n\n inf_obj, inp_str, out_str = ret\n prog = inf_obj.prog\n method = inf_obj.method\n geo = elstruct.reader.opt_geometry(prog, out_str)\n print('vdw ending geometry')\n print(automol.geom.xyz_string(geo))\n thy_save_fs[-1].file.geometry.write(geo, thy_info[1:4])\n ene = elstruct.reader.energy(prog, method, out_str)\n if ene < min_ene:\n min_ene = ene\n print('ene test in vdw')\n print(ene)\n thy_save_fs[-1].file.energy.write(ene, thy_info[1:4])\n print('Saving reference geometry')\n print(\" - Save path: {}\".format(thy_save_path))\n vdw_name = label + ts_name.replace('ts', 'vdw')\n spc_dct[vdw_name] = spc_dct[ts_name].copy()\n spc_dct[vdw_name]['inchi'] = ich\n spc_dct[vdw_name]['mul'] = mul\n spc_dct[vdw_name]['chg'] = chg\n spc_dct[vdw_name]['dist_info'][1] = dist_cutoff\n\n # Make a fake conformer\n cnf_save_fs = autofile.fs.conformer(thy_save_path)\n cnf_run_fs = autofile.fs.conformer(thy_run_path)\n cnf_save_fs[0].create()\n cnf_run_fs[0].create()\n tors_range_dct = {}\n cinf_obj = autofile.schema.info_objects.conformer[0](\n 0, tors_range_dct)\n cinf_obj.nsamp = 1\n cnf_save_fs[0].file.info.write(cinf_obj)\n locs_lst = cnf_save_fs[-1].existing()\n if not locs_lst:\n cid = autofile.schema.generate_new_conformer_id()\n locs = [cid]\n else:\n locs = locs_lst[0]\n cnf_save_fs[-1].create(locs)\n cnf_run_fs[-1].create(locs)\n cnf_save_fs[-1].file.geometry_info.write(\n inf_obj, locs)\n cnf_save_fs[-1].file.geometry_input.write(\n inp_str, locs)\n cnf_save_fs[-1].file.energy.write(ene, locs)\n cnf_save_fs[-1].file.geometry.write(geo, locs)\n if min_ene:\n new_vdws.append(vdw_name)\n\n return new_vdws\n" ]
[ [ "numpy.random.rand" ] ]
failyang/tensorflow-examples
[ "48da6cd25138d448a4ddf7710e8abab0392c453c" ]
[ "c3d/data_gen.py" ]
[ "import numpy as np\nfrom tensorflow import keras\nimport cv2\nimport os\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nfrom data_helper import calculateRGBdiff, readfile_to_dict\nfrom tensorflow.python.keras.utils.data_utils import Sequence\n\nclass DataGen(Sequence):\n 'Generates data for Keras'\n def __init__(self, filepath, batch_size=32, class_num=2, dim=(32,32), \n n_channels=1,n_sequence=4, preprocess_input=None, with_aug=True, shuffle=True, path_dataset=None,\n type_gen='train', option=None):\n 'Initialization'\n \n data_dict = readfile_to_dict(filepath)\n data_keys = list(data_dict.keys())\n \n self.dim = dim\n self.batch_size = batch_size\n self.class_num = class_num\n self.labels = data_dict\n self.list_IDs = data_keys\n self.n_channels = n_channels\n self.n_sequence = n_sequence # get n_sequence diff image\n self.shuffle = shuffle\n self.path_dataset = path_dataset\n self.type_gen = type_gen\n self.option = option\n self.aug_gen = ImageDataGenerator() \n self.steps_per_epoch = len(self.list_IDs) // self.batch_size\n print(\"all:\", len(self.list_IDs), \" batch per epoch\", self.steps_per_epoch)\n self.on_epoch_end()\n\n def __len__(self):\n 'Denotes the number of batches per epoch'\n return int(np.floor(len(self.list_IDs) / self.batch_size))\n\n def on_epoch_end(self):\n 'Updates indexes after each epoch' \n self.indexes = np.arange(len(self.list_IDs))\n if self.shuffle == True:\n np.random.shuffle(self.indexes)\n\n def __getitem__(self, index):\n 'Generate one batch of data'\n # Generate indexes of the batch\n indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]\n # Find list of IDs\n list_IDs_temp = [self.list_IDs[k] for k in indexes]\n # Generate data\n X, y = self.__data_generation(list_IDs_temp)\n if self.type_gen == 'predict':\n return X\n else:\n return X, y\n\n def get_sampling_frame(self, len_frames): \n '''\n Sampling n_sequence frame from video file\n Input: \n len_frames -- number of frames that this video have\n Output: \n index_sampling -- n_sequence frame indexs from sampling algorithm \n ''' \n # Define maximum sampling rate\n random_sample_range = 9\n if random_sample_range*self.n_sequence > len_frames:\n random_sample_range = len_frames//self.n_sequence\n # Randomly choose sample interval and start frame\n# print(random_sample_range)\n sample_interval = np.random.randint(1, random_sample_range + 1)\n start_i = np.random.randint(0, len_frames - sample_interval * self.n_sequence + 1)\n \n # Get n_sequence index of frames\n index_sampling = []\n end_i = sample_interval * self.n_sequence + start_i\n for i in range(start_i, end_i, sample_interval):\n if len(index_sampling) < self.n_sequence:\n index_sampling.append(i)\n \n return index_sampling\n\n def sequence_augment(self, sequence):\n name_list = ['rotate','width_shift','height_shift',\n 'brightness','flip_horizontal','width_zoom',\n 'height_zoom']\n dictkey_list = ['theta','ty','tx',\n 'brightness','flip_horizontal','zy',\n 'zx']\n # dictkey_list = ['ty','tx','zy','zx']\n random_aug = np.random.randint(2, 5) # random 2-4 augmentation method\n pick_idx = np.random.choice(len(dictkey_list), random_aug, replace=False) #\n\n dict_input = {}\n for i in pick_idx:\n if dictkey_list[i] == 'theta':\n dict_input['theta'] = np.random.randint(-10, 10)\n\n elif dictkey_list[i] == 'ty': # width_shift\n dict_input['ty'] = np.random.randint(-30, 30)\n\n elif dictkey_list[i] == 'tx': # height_shift\n dict_input['tx'] = np.random.randint(-15, 15)\n\n elif dictkey_list[i] == 'brightness': \n dict_input['brightness'] = np.random.uniform(0.15,1)\n\n elif dictkey_list[i] == 'flip_horizontal': \n dict_input['flip_horizontal'] = True\n\n elif dictkey_list[i] == 'zy': # width_zoom\n dict_input['zy'] = np.random.uniform(0.5,1.5)\n\n elif dictkey_list[i] == 'zx': # height_zoom\n dict_input['zx'] = np.random.uniform(0.5,1.5)\n len_seq = sequence.shape[0]\n for i in range(len_seq):\n sequence[i] = self.aug_gen.apply_transform(sequence[i],dict_input)\n \n return sequence\n \n def __data_generation(self, list_IDs_temp):\n 'Generates data containing batch_size samples'\n # Initialization\n X = np.empty((self.batch_size, self.n_sequence, *self.dim, self.n_channels)) # X : (n_samples, *dim, n_channels)\n Y = np.zeros((self.batch_size, self.class_num))\n \n for i, ID in enumerate(list_IDs_temp): # ID is name of file\n path_file = os.path.join(self.path_dataset,ID) \n cap = cv2.VideoCapture(path_file)\n length_file = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) # get how many frames this video have\n# print(path_file,length_file)\n index_sampling = self.get_sampling_frame(length_file) # get sampling index\n for j, n_pic in enumerate(index_sampling):\n cap.set(cv2.CAP_PROP_POS_FRAMES, n_pic) # jump to that index\n ret, frame = cap.read()\n if ret is True:\n new_image = cv2.resize(frame, self.dim) \n X[i,j,:,:,:] = new_image\n else:\n print('read file ', path_file, 'error', length_file, n_pic)\n\n if self.type_gen =='train':\n X[i,] = self.sequence_augment(X[i,]) # apply the same rule\n else:\n X[i,] = X[i,]\n \n if self.option == 'RGBdiff':\n X[i,] = calculateRGBdiff(X[i,])\n\n Y[i][self.labels[ID]-1] = 1.0\n cap.release()\n\n return X, Y\n" ]
[ [ "tensorflow.keras.preprocessing.image.ImageDataGenerator", "numpy.empty", "numpy.zeros", "numpy.random.shuffle", "numpy.random.uniform", "numpy.random.randint" ] ]
loxacom123/AutoML
[ "8c5952467915f77d84705325b58b5dc9f3541d61" ]
[ "dataloaders/utils.py" ]
[ "import json\nfrom PIL import Image\nimport os\nfrom pycocotools.coco import COCO\nimport numpy as np\nimport cv2\n\n\ndef change_coco_image_sizes_with_annotations(path_to_dataset, min_side):\n os.chdir(path_to_dataset)\n\n folder_paths = ['train2017', 'val2017']\n for folder_path in folder_paths:\n images_folder_path = os.path.join(os.getcwd(), 'images', folder_path)\n annotations_path = os.path.join(os.getcwd(), 'annotations', 'instances_' + folder_path + '.json')\n coco = COCO(annotations_path)\n for img_key, item in coco.imgs.items():\n img_path = os.path.join(images_folder_path, item['file_name'])\n img = Image.open(img_path)\n width, height = img.size\n smallest_side = min(width, height)\n scale = min_side / smallest_side\n\n img = img.resize((int(scale * width), int(scale * height)), Image.ANTIALIAS)\n [x for x in coco.dataset['images'] if x['id'] == img_key][0]['width'] = int(scale * width)\n [x for x in coco.dataset['images'] if x['id'] == img_key][0]['height'] = int(scale * height)\n img.save(img_path)\n print(scale)\n anns = coco.imgToAnns[img_key]\n for ann in anns:\n bbox = ann['bbox']\n ann_id = ann['id']\n new_bbox = np.array(bbox) * scale\n [x for x in coco.dataset['annotations'] if x['id'] == ann_id][0]['bbox'] = new_bbox.tolist()\n print(bbox)\n print([x for x in coco.dataset['annotations'] if x['id'] == ann_id][0]['bbox'])\n\n new_annotations_path = os.path.join(os.getcwd(), 'annotations', 'instances_large_' + folder_path + '.json')\n\n with open(new_annotations_path, 'w') as fp:\n json.dump(coco.dataset, fp)\n\n\ndef draw_caption(image, box, caption):\n\n b = np.array(box).astype(int)\n cv2.putText(image, caption, (b[0], b[1] - 10), cv2.FONT_HERSHEY_PLAIN, 1, (1, 1, 1), 2)\n\ndef draw_bbox(img, bbox, label):\n\n draw_caption(img, bbox, label)\n x1, y1, x2, y2 = int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3])\n cv2.rectangle(img, (x1, y1), (x2, y2), color=(0, 0, 1), thickness=2)\n" ]
[ [ "numpy.array" ] ]
Michael-Hsu/pytorch-tutorial-1
[ "caabba7636c2baeed064b12cf484aecb3f8d3551" ]
[ "tutorials/03-advanced/image_captioning/sample.py" ]
[ "import torch\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport argparse\nimport pickle\nimport os\nfrom torchvision import transforms\nfrom build_vocab import Vocabulary\nfrom model import EncoderCNN, DecoderRNN\nfrom PIL import Image\n\n# Device configuration\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n\ndef load_image(image_path, transform=None):\n image = Image.open(image_path).convert('RGB')\n image = image.resize([224, 224], Image.LANCZOS)\n\n if transform is not None:\n image = transform(image).unsqueeze(0)\n\n return image\n\n\ndef main(args):\n # Image preprocessing\n transform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.485, 0.456, 0.406),\n (0.229, 0.224, 0.225))])\n\n # Load vocabulary wrapper\n with open(args.vocab_path, 'rb') as f:\n vocab = pickle.load(f)\n\n # Build models\n encoder = EncoderCNN(args.embed_size).eval() # eval mode (batchnorm uses moving mean/variance)\n decoder = DecoderRNN(args.embed_size, args.hidden_size, len(vocab), args.num_layers)\n encoder = encoder.to(device)\n decoder = decoder.to(device)\n\n # Load the trained model parameters\n encoder.load_state_dict(torch.load(args.encoder_path))\n decoder.load_state_dict(torch.load(args.decoder_path))\n\n # Prepare an image\n image = load_image(args.image, transform)\n image_tensor = image.to(device)\n\n # Generate an caption from the image\n feature = encoder(image_tensor)\n sampled_ids = decoder.sample(feature)\n sampled_ids = sampled_ids[0].cpu().numpy() # (1, max_seq_length) -> (max_seq_length)\n\n # Convert word_ids to words\n sampled_caption = []\n for word_id in sampled_ids:\n word = vocab.idx2word[word_id]\n sampled_caption.append(word)\n if word == '<end>':\n break\n sentence = ' '.join(sampled_caption)\n\n # Print out the image and the generated caption\n print(sentence)\n image = Image.open(args.image)\n plt.imshow(np.asarray(image))\n plt.show()\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--image', type=str, required=True, help='input image for generating caption')\n\n # 先使用作者预训练好的来做!\n parser.add_argument('--encoder_path', type=str, default='models/encoder-5-3000.pkl',\n help='path for trained encoder')\n parser.add_argument('--decoder_path', type=str, default='models/decoder-5-3000.pkl',\n help='path for trained decoder')\n # 这是已经训练好的神经网络模型\n # parser.add_argument('--encoder_path', type=str, default='models/encoder-2-1000.ckpt',\n # help='path for trained encoder')\n # parser.add_argument('--decoder_path', type=str, default='models/decoder-2-1000.ckpt',\n # help='path for trained decoder')\n\n parser.add_argument('--vocab_path', type=str, default='data/vocab.pkl', help='path for vocabulary wrapper')\n\n # Model parameters (should be same as paramters in train.py)\n parser.add_argument('--embed_size', type=int, default=256, help='dimension of word embedding vectors')\n parser.add_argument('--hidden_size', type=int, default=512, help='dimension of lstm hidden states')\n parser.add_argument('--num_layers', type=int, default=1, help='number of layers in lstm')\n args = parser.parse_args()\n main(args)\n" ]
[ [ "matplotlib.pyplot.show", "torch.cuda.is_available", "numpy.asarray", "torch.load" ] ]
esalesky/NMTGMinor
[ "b6eafff21f5aabb874720e6df30cd6b91c339a7c" ]
[ "onmt/modules/LSTMLM/Models.py" ]
[ "import numpy as np\nimport torch, math\nimport torch.nn as nn\nfrom onmt.modules.Transformer.Models import TransformerDecodingState\nfrom onmt.modules.BaseModel import NMTModel, Reconstructor, DecoderState\nimport onmt\nfrom onmt.modules.WordDrop import embedded_dropout\n#~ from onmt.modules.Checkpoint import checkpoint\nfrom torch.utils.checkpoint import checkpoint\nfrom collections import defaultdict\nfrom onmt.modules.Transformer.Layers import PositionalEncoding, PrePostProcessing\nfrom onmt.modules.TransformerLM.Layers import LMDecoderLayer\n\n\ndef custom_layer(module):\n def custom_forward(*args):\n output = module(*args)\n return output\n return custom_forward\n\nclass LSTMLMDecoder(nn.Module):\n \"\"\"Encoder in 'Attention is all you need'\n\n Args:\n opt\n dicts\n \"\"\"\n\n def __init__(self, opt, dicts):\n\n super().__init__()\n\n self.model_size = opt.model_size\n self.n_heads = opt.n_heads\n self.inner_size = opt.inner_size\n self.layers = opt.layers\n self.dropout = opt.dropout\n self.word_dropout = opt.word_dropout\n self.attn_dropout = opt.attn_dropout\n self.emb_dropout = opt.emb_dropout\n self.time = opt.time\n self.encoder_type = opt.encoder_type\n\n self.preprocess_layer = PrePostProcessing(self.model_size, self.emb_dropout, sequence='d', static=False)\n\n self.word_lut = nn.Embedding(dicts.size(),\n self.model_size,\n padding_idx=onmt.Constants.PAD)\n\n\n self.rnn = nn.LSTM(self.model_size, self.model_size, num_layers=3, dropout=self.dropout)\n\n self.postprocess_layer = PrePostProcessing(self.model_size, self.emb_dropout, sequence='d', static=False)\n\n self.h = None\n self.c = None\n\n def renew_buffer(self, new_len):\n\n return\n\n def forward(self, input, **kwargs):\n \"\"\"\n Inputs Shapes:\n input: (Variable) len_tgt x batch_size\n Outputs Shapes:\n out: len_tgt x batch_size x d_model\n \"\"\"\n\n emb = embedded_dropout(self.word_lut, input, dropout=self.word_dropout if self.training else 0)\n\n emb = self.preprocess_layer(emb)\n\n if self.h is None:\n lstm_mem = None\n else:\n lstm_mem = (self.h.detach(), self.c.detach())\n\n output, (h, c) = self.rnn(emb, lstm_mem)\n\n output = self.postprocess_layer(output)\n\n output_dict = defaultdict(lambda: None)\n output_dict['hidden'] = output\n output_dict['lstm_mem'] = (h, c)\n\n self.h = h\n self.c = c\n\n return output_dict\n\n def step(self, input, decoder_state):\n \"\"\"\n Inputs Shapes:\n input: (Variable) batch_size x len_tgt (wanna tranpose)\n context: (Variable) batch_size x len_src x d_model\n mask_src (Tensor) batch_size x len_src\n buffer (List of tensors) List of batch_size * len_tgt-1 * d_model for self-attention recomputing\n Outputs Shapes:\n out: batch_size x len_tgt x d_model\n coverage: batch_size x len_tgt x len_src\n\n \"\"\"\n buffers = decoder_state.attention_buffers\n\n if decoder_state.input_seq is None:\n decoder_state.input_seq = input\n else:\n # concatenate the last input to the previous input sequence\n decoder_state.input_seq = torch.cat([decoder_state.input_seq, input], 0)\n input = decoder_state.input_seq.transpose(0, 1)\n input_ = input[:,-1].unsqueeze(1)\n\n # output_buffer = list()\n\n # batch_size = input_.size(0)\n\n \"\"\" Embedding: batch_size x 1 x d_model \"\"\"\n emb = self.word_lut(input_)\n\n if isinstance(emb, tuple):\n emb = emb[0]\n\n # Preprocess layer: adding dropout\n emb = self.preprocess_layer(emb)\n\n emb = emb.transpose(0, 1)\n\n # batch_size x 1 x len_src\n\n\n len_tgt = input.size(1)\n mask_tgt = input.data.eq(onmt.Constants.PAD).unsqueeze(1) + self.mask[:len_tgt, :len_tgt]\n mask_tgt = torch.gt(mask_tgt, 0)\n mask_tgt = mask_tgt[:, -1, :].unsqueeze(1)\n # print(mask_tgt)\n\n output = emb.contiguous()\n\n for i, layer in enumerate(self.layer_modules):\n\n buffer = buffers[i] if i in buffers else None\n assert(output.size(0) == 1)\n\n output, coverage, buffer = layer.step(output, mask_tgt,buffer=buffer)\n\n decoder_state.update_attention_buffer(buffer, i)\n\n # From Google T2T\n # if normalization is done in layer_preprocess, then it should also be done\n # on the output, since the output can grow very large, being the sum of\n # a whole stack of unnormalized layer outputs.\n output = self.postprocess_layer(output)\n\n return output, coverage\n\n\nclass LSTMLM(NMTModel):\n \"\"\"Main model in 'Attention is all you need' \"\"\"\n\n def __init__(self, encoder, decoder, generator=None):\n super().__init__( encoder, decoder, generator)\n self.model_size = self.decoder.model_size\n\n def forward(self, batch):\n \"\"\"\n Inputs Shapes:\n src: len_src x batch_size\n tgt: len_tgt x batch_size\n\n Outputs Shapes:\n out: batch_size*len_tgt x model_size\n\n\n \"\"\"\n # we only need target for language model\n tgt = batch.get('target_input') # T x B\n tgt_out = batch.get('target_output') # T x B\n\n decoder_output = self.decoder(tgt)\n\n output_dict = defaultdict(lambda: None)\n output_dict['hidden'] = decoder_output['hidden']\n\n return output_dict\n\n def reset_states(self):\n\n self.decoder.h = None\n self.decoder.c = None\n\n def step(self, input_t, decoder_state):\n \"\"\"\n Decoding function:\n generate new decoder output based on the current input and current decoder state\n the decoder state is updated in the process\n :param input_t: the input word index at time t\n :param decoder_state: object DecoderState containing the buffers required for decoding\n :return: a dictionary containing: log-prob output and the attention coverage\n \"\"\"\n\n hidden, coverage = self.decoder.step(input_t, decoder_state)\n\n log_prob = self.generator[0](hidden.squeeze(0))\n\n output_dict = defaultdict(lambda: None)\n\n output_dict['log_prob'] = log_prob\n\n return output_dict\n\n # print a sample\n def sample(self):\n\n pass\n\n\n def create_decoder_state(self, batch, beam_size=1):\n\n return LSTMDecodingState(None, None, beam_size=beam_size, model_size=self.model_size)\n\n\nclass LSTMDecodingState(TransformerDecodingState):\n\n def __init__(self, src, context, beam_size=1, model_size=512):\n\n # if audio only take one dimension since only used for mask\n\n self.beam_size = beam_size\n\n self.input_seq = None\n self.h = None\n self.c = None\n self.model_size = model_size\n\n\n def update_beam(self, beam, b, remaining_sents, idx):\n\n for tensor in [self.src, self.input_seq] :\n\n if tensor is None:\n continue\n\n t_, br = tensor.size()\n sent_states = tensor.view(t_, self.beam_size, remaining_sents)[:, :, idx]\n\n sent_states.copy_(sent_states.index_select(\n 1, beam[b].getCurrentOrigin()))\n\n for l in self.attention_buffers:\n buffer_ = self.attention_buffers[l]\n\n if buffer_ is None:\n continue\n\n for k in buffer_:\n t_, br_, d_ = buffer_[k].size()\n sent_states = buffer_[k].view(t_, self.beam_size, remaining_sents, d_)[:, :, idx, :]\n\n sent_states.data.copy_(sent_states.data.index_select(\n 1, beam[b].getCurrentOrigin()))\n\n # in this section, the sentences that are still active are\n # compacted so that the decoder is not run on completed sentences\n def prune_complete_beam(self, active_idx, remaining_sents):\n\n model_size = self.model_size\n\n def update_active(t):\n if t is None:\n return t\n # select only the remaining active sentences\n view = t.data.view(-1, remaining_sents, model_size)\n new_size = list(t.size())\n new_size[-2] = new_size[-2] * len(active_idx) // remaining_sents\n return view.index_select(1, active_idx).view(*new_size)\n\n def update_active_2d(t):\n if t is None:\n return t\n view = t.view(-1, remaining_sents)\n new_size = list(t.size())\n new_size[-1] = new_size[-1] * len(active_idx) // remaining_sents\n new_t = view.index_select(1, active_idx).view(*new_size)\n return new_t\n\n self.context = update_active(self.context)\n\n self.input_seq = update_active_2d(self.input_seq)\n\n self.src = update_active_2d(self.src)\n\n for l in self.attention_buffers:\n buffer_ = self.attention_buffers[l]\n\n for k in buffer_:\n buffer_[k] = update_active(buffer_[k])\n" ]
[ [ "torch.cat", "torch.nn.LSTM", "torch.gt" ] ]
ayushgupta9198/photo_video_upscale_to_any_size
[ "467a8e1c6e3000952df0b0c6478594427db0ac06" ]
[ "infer-video.py" ]
[ "from argparse import ArgumentParser\nfrom tensorflow import keras\nimport numpy as np\nimport cv2\nimport os\n\nparser = ArgumentParser()\n\n\ndef main():\n args = parser.parse_args()\n\n model = keras.models.load_model('models/generator.h5')\n inputs = keras.Input((None, None, 3))\n output = model(inputs)\n model = keras.models.Model(inputs, output)\n\n #for videos\n\n cap = cv2.VideoCapture('/home/ayush-ai/Music/talking_head/talking-head-anime-demo/input_video/3.mp4')\n fourcc = cv2.VideoWriter_fourcc(*'mp4v')\n output_vid = cv2.VideoWriter('./outputs/srgan-output.mp4', fourcc, 25, (1024,1024))\n while True: \n there_is_frame, frame = cap.read()\n if not there_is_frame:\n return\n\n \n low_res = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n\n low_res = low_res / 255.0\n\n sr = model.predict(np.expand_dims(low_res, axis=0))[0]\n\n sr = ((sr + 1) / 2.) * 255\n\n sr = cv2.cvtColor(sr, cv2.COLOR_RGB2BGR)\n # print(sr.shape)\n # print(sr.dtype)\n res = sr.astype(np.uint8)\n output_vid.write(res)\n\n output_vid.release()\n cap.release()\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.expand_dims", "tensorflow.keras.models.load_model", "tensorflow.keras.Input", "tensorflow.keras.models.Model" ] ]
shikashyam/BigDataSystemsCoursework
[ "d7f9cabbfb18b0e3303292b65af1ffd530e24ccc" ]
[ "Assignment3/src/data/catalog_search.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 10 14:24:15 2022\n\n@author: shshyam\n\"\"\"\n\nfrom importlib.resources import path\nimport h5py\nimport boto3\nfrom botocore.handlers import disable_signing\nfrom os import walk\nimport os\nimport pandas as pd\nfrom geopy import distance\nfrom geopy import Point\n\ndef searchgeocoordinates(approxlat,approxlong,distlimit):\n catalog = pd.read_csv(\"https://raw.githubusercontent.com/MIT-AI-Accelerator/eie-sevir/master/CATALOG.csv\")\n catalog['lat']=(catalog.llcrnrlat+catalog.urcrnrlat)/2\n catalog['long']=(catalog.llcrnrlon+catalog.urcrnrlon)/2\n myloc=Point(approxlat,approxlong)\n catalog['distance']=catalog.apply(lambda row: distancer(row,myloc), axis=1)\n catalog=catalog[catalog[\"distance\"] < int(distlimit)]\n\n if catalog.empty:\n return None,None\n else:\n catalog=catalog.sort_values(by='distance')\n lat=catalog.iloc[0]['llcrnrlat']\n long=catalog.iloc[0]['llcrnrlon']\n \n return lat,long\n\ndef distancer(row,myloc):\n coords_1 = myloc\n coords_2 = (row['lat'], row['long'])\n return distance.distance(coords_1, coords_2).miles\n\n\ndef searchcataloglatlong(lat, long):\n filename=None\n event_id,date=get_event_id(lat,long)\n print(event_id)\n if(event_id!='None'):\n filename,fileindex,catalog=get_filename_index(event_id) \n print(filename,fileindex)\n catalog.to_csv('/Users/sairaghavendraviravalli/Desktop/Projects/neurips-2020-sevir-master-3/src/data/CATALOG.csv')\n return filename,fileindex[0]\n else:\n return None,None\n \n #Filter catalog to include only that event\n \ndef searchcatalogdatetime(date,time,city,state):\n stormdetails = pd.read_csv('/Users/sairaghavendraviravalli/Desktop/Projects/neurips-2020-sevir-master-3/src/data/StormEvents_details-ftp_v1.0_d2019_c20220214.csv')\n date=date.replace('-','')\n yrmonth=date[0:6]\n day=date[6:8]\n time=time.replace(':','')\n event_id = stormdetails[(stormdetails['BEGIN_YEARMONTH'] == int(yrmonth)) & (stormdetails['BEGIN_DAY']==int(day))& (stormdetails['BEGIN_TIME']==int(time)) & (stormdetails['CZ_NAME']==city)& (stormdetails['STATE']==state)]['EVENT_ID'].unique()[0] \n print(event_id)\n if(event_id):\n filename,fileindex,catalog=get_filename_index(event_id) \n print(filename,fileindex)\n catalog.to_csv('/Users/sairaghavendraviravalli/Desktop/Projects/neurips-2020-sevir-master-3/src/data/CATALOG.csv')\n return filename,fileindex[0]\n else:\n return None,None\ndef get_event_id(lat,lon):\n df1 = pd.read_csv(\"https://raw.githubusercontent.com/MIT-AI-Accelerator/eie-sevir/master/CATALOG.csv\")\n df1= df1.round({'llcrnrlat':6,'llcrnrlon':6})\n \n try:\n date = df1[(df1['llcrnrlon']== lon) & ( df1['llcrnrlat']==lat)]['time_utc'].unique()[0]\n event_id = df1[(df1['llcrnrlon']== lon) & ( df1['llcrnrlat']==lat)]['event_id'].unique()[0]\n \n except:\n print('Lat and long not found')\n date= 'None'\n event_id = 'None'\n return event_id,date\n\ndef get_filename_index(event_id):\n catlog = pd.read_csv(\"https://raw.githubusercontent.com/MIT-AI-Accelerator/eie-sevir/master/CATALOG.csv\")\n filtered = pd.DataFrame()\n filtered = pd.concat([filtered,catlog[(catlog[\"event_id\"] == int(event_id))]])\n allfilenames = filtered['file_name'].unique()\n \n vilpd=catlog[(catlog[\"event_id\"] == int(event_id)) & (catlog['img_type']=='vil')]\n filename=vilpd['file_name'].unique()\n fileindex = vilpd['file_index'].to_list()\n catalog = pd.read_csv(\"https://raw.githubusercontent.com/MIT-AI-Accelerator/eie-sevir/master/CATALOG.csv\")\n newcatalog=catalog[(catalog['file_name'].isin(allfilenames))]\n print(newcatalog.shape)\n print(newcatalog.head())\n print(\"We have got the locations, Lets Download the files\") \n return filename, fileindex,newcatalog\n \ndef download_hf(filename):\n resource = boto3.resource('s3')\n resource.meta.client.meta.events.register('choose-signer.s3.*', disable_signing)\n bucket=resource.Bucket('sevir')\n \n for i in range(len(filename)):\n filename1 = \"data/\" + filename[i]\n print(\"Downloading\",filename1) \n os.mkdir('/Users/sairaghavendraviravalli/Desktop/Projects/neurips-2020-sevir-master-3/data/vil/'+filename[i].split('/')[1])\n bucket.download_file(filename1 , '/Users/sairaghavendraviravalli/Desktop/Projects/neurips-2020-sevir-master-3/data/'+filename[i]) \n return '/Users/sairaghavendraviravalli/Desktop/Projects/neurips-2020-sevir-master-3/data/'+filename[i]\n \ndef One_Sample_HF(directory,fileindex,filenames):\n newfilepath=''\n for i in range(len(filenames)):\n print(directory+filenames[i])\n with h5py.File(directory+filenames[i],'r') as hf:\n print(directory+\"/\"+filenames[i])\n image_type = filenames[i].split('_')[1]\n \n if image_type == \"VIL\":\n VIL = hf['vil'][int(fileindex[0])]\n os.mkdir('/Users/sairaghavendraviravalli/Desktop/Projects/neurips-2020-sevir-master-3/data/newh5')\n os.mkdir('/Users/sairaghavendraviravalli/Desktop/Projects/neurips-2020-sevir-master-3/data/newh5/vil')\n os.mkdir('/Users/sairaghavendraviravalli/Desktop/Projects/neurips-2020-sevir-master-3/data/newh5/vil/'+filenames[i].split('/')[1])\n hf2 = h5py.File('/Users/sairaghavendraviravalli/Desktop/Projects/neurips-2020-sevir-master-3/data/newh5/'+filenames[i], 'w')\n hf2.create_dataset('vil', data=VIL)\n newfilepath='/Users/sairaghavendraviravalli/Desktop/Projects/neurips-2020-sevir-master-3/data/vil/'+filenames[i].split('/')[1]+filenames[i].split('/')[2] \n \n return newfilepath \n\n \n" ]
[ [ "pandas.DataFrame", "pandas.read_csv" ] ]
fabrien/aye-saac
[ "3416786caee874d4632bf98366b5a83fe4838e0b" ]
[ "ayesaac/services/object_detection/main.py" ]
[ "from pprint import pprint\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom ayesaac.services.common import QueueManager\nfrom ayesaac.services.common.crypter import decode\nfrom ayesaac.utils.config import Config\nfrom ayesaac.utils.logger import get_logger\n\nfrom .coco_category_index import coco_category_index\nfrom .epic_kitchens_category_index import epic_kitchens_category_index\nfrom .plot_bounding_boxes import draw_bounding_boxes\nfrom .utils import calculate_iou\n\nlogger = get_logger(__file__)\nconfig = Config()\n\n\nclass ObjectDetection(object):\n \"\"\"\n The class ObjectDetection purpose is to detect every object in the given pictures.\n \"\"\"\n \n # define constants\n # confidence threshold for retaining object detection\n CONFIDENCE_THRESHOLD = 0.5\n # IoU threshold for determining whether detections are overlapping\n IOU_THRESHOLD = 0.5\n # list of model preferences for selecting detection\n MODEL_PREFS = [\"coco\", \"epic-kitchens\"]\n \n def __init__(self):\n self.queue_manager = QueueManager(\n [\n self.__class__.__name__,\n \"Interpreter\",\n \"ColourDetection\",\n \"PositionDetection\",\n ]\n )\n \n self.models = [\n {\n \"name\": \"coco\",\n \"model_path\": config.directory.data.joinpath(\"coco_resnet\"),\n \"category_index\": coco_category_index\n },\n {\n \"name\": \"epic-kitchens\",\n \"model_path\": config.directory.data.joinpath(\"epic_kitchens\"),\n \"category_index\": epic_kitchens_category_index\n }\n ]\n \n for model in self.models:\n tf_model = tf.saved_model.load(str(model[\"model_path\"]))\n model[\"model\"] = tf_model.signatures[\"serving_default\"]\n \n logger.info(f\"{self.__class__.__name__} ready\")\n\n def run_inference_for_single_image(self, image, model):\n input_tensor = tf.convert_to_tensor(image)\n input_tensor = input_tensor[tf.newaxis, ...]\n output_dict = model(input_tensor)\n\n num_detections = int(output_dict.pop(\"num_detections\"))\n output_dict = {\n key: value[0, :num_detections].numpy() for key, value in output_dict.items()\n }\n output_dict[\"num_detections\"] = num_detections\n output_dict[\"detection_classes\"] = output_dict[\"detection_classes\"].astype(\n np.int32\n )\n return output_dict\n \n def filter_objects(self, objects, img_height, img_width):\n '''Method to filter duplicate detections from the output'''\n retained_objects = []\n for obj in objects:\n retain = True\n # duplicates are of the same class and have very high IoU\n for other_obj in objects:\n # ignore self\n if obj == other_obj:\n continue\n else:\n # calculate the IoU\n iou = calculate_iou(obj[\"bbox\"], other_obj[\"bbox\"], img_height, img_width)\n # check if IoU is greater than threshold\n if iou >= ObjectDetection.IOU_THRESHOLD:\n # we have a duplicate, don't retain the object if the model preference is lower\n if ObjectDetection.MODEL_PREFS.index(obj[\"model\"]) > ObjectDetection.MODEL_PREFS.index(other_obj[\"model\"]):\n retain = False\n break\n \n # append the object if it's okay\n if retain:\n retained_objects.append(obj)\n \n return retained_objects\n \n def callback(self, body, **_):\n all_objects = []\n for picture in body[\"pictures\"]:\n objects = []\n image = decode(picture[\"data\"], picture[\"shape\"], np.uint8)\n img_height = picture[\"shape\"][0]\n img_width = picture[\"shape\"][1]\n\n # iterate through the models, performing object detection\n for model in self.models:\n output = self.run_inference_for_single_image(image, model[\"model\"])\n for i in range(output[\"num_detections\"]):\n if float(output[\"detection_scores\"][i]) >= ObjectDetection.CONFIDENCE_THRESHOLD:\n bbox = output[\"detection_boxes\"][i].tolist()\n objects.append(\n {\n \"name\": model[\"category_index\"][output[\"detection_classes\"][i]][\n \"name\"\n ],\n \"confidence\": float(output[\"detection_scores\"][i]),\n \"bbox\": bbox,\n \"from\": picture[\"from\"],\n \"model\": model[\"name\"],\n \"img_height\": img_height,\n \"img_width\": img_width\n }\n )\n \n bboxes = [obj[\"bbox\"] for obj in objects]\n class_names = [obj[\"name\"] for obj in objects]\n scores = [obj[\"confidence\"] for obj in objects]\n models = [obj[\"model\"] for obj in objects]\n \n # draw the bounding boxes\n # (outputs image to docker/volumes/aye-saac_output_data/_data/bbox_[timestamp].png)\n draw_bounding_boxes(image, bboxes, class_names, scores, models, prefix=\"bbox\")\n \n # need to filter the results to remove massively overlapping object detections\n # (this can arise when different models identify the same object for example)\n objects = self.filter_objects(objects, img_height, img_width)\n \n bboxes = [obj[\"bbox\"] for obj in objects]\n class_names = [obj[\"name\"] for obj in objects]\n scores = [obj[\"confidence\"] for obj in objects]\n models = [obj[\"model\"] for obj in objects]\n\n # draw the bounding boxes\n # (outputs image to docker/volumes/aye-saac_output_data/_data/bbox_[timestamp].png)\n draw_bounding_boxes(image, bboxes, class_names, scores, models, prefix=\"bbox_filtered\")\n \n # append the objects to all_objects\n all_objects.extend(objects)\n \n # pprint(objects)\n body[\"objects\"] = all_objects\n body[\"path_done\"].append(self.__class__.__name__)\n\n if \"ColourDetection\" not in body[\"vision_path\"]:\n del body[\"pictures\"]\n\n next_service = body[\"vision_path\"].pop(0)\n self.queue_manager.publish(next_service, body)\n\n def run(self):\n self.queue_manager.start_consuming(self.__class__.__name__, self.callback)\n\n\ndef main():\n obj_detection = ObjectDetection()\n obj_detection.run()\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "tensorflow.convert_to_tensor" ] ]
cnyeh/pyscf
[ "95d852c9d18099a55207bb8da0155902c824c2aa" ]
[ "pyscf/mcscf/casci.py" ]
[ "#!/usr/bin/env python\n# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Author: Qiming Sun <[email protected]>\n#\n\nimport sys\n\nfrom functools import reduce\nimport numpy\nfrom pyscf import lib\nfrom pyscf.lib import logger\nfrom pyscf import gto\nfrom pyscf import scf\nfrom pyscf import ao2mo\nfrom pyscf import fci\nfrom pyscf.mcscf import addons\nfrom pyscf import __config__\n\nWITH_META_LOWDIN = getattr(__config__, 'mcscf_analyze_with_meta_lowdin', True)\nLARGE_CI_TOL = getattr(__config__, 'mcscf_analyze_large_ci_tol', 0.1)\nPENALTY = getattr(__config__, 'mcscf_casci_CASCI_fix_spin_shift', 0.2)\n\nif sys.version_info < (3,):\n RANGE_TYPE = list\nelse:\n RANGE_TYPE = range\n\n\ndef h1e_for_cas(casci, mo_coeff=None, ncas=None, ncore=None):\n '''CAS sapce one-electron hamiltonian\n\n Args:\n casci : a CASSCF/CASCI object or RHF object\n\n Returns:\n A tuple, the first is the effective one-electron hamiltonian defined in CAS space,\n the second is the electronic energy from core.\n '''\n if mo_coeff is None: mo_coeff = casci.mo_coeff\n if ncas is None: ncas = casci.ncas\n if ncore is None: ncore = casci.ncore\n mo_core = mo_coeff[:,:ncore]\n mo_cas = mo_coeff[:,ncore:ncore+ncas]\n\n hcore = casci.get_hcore()\n energy_core = casci.energy_nuc()\n if mo_core.size == 0:\n corevhf = 0\n else:\n core_dm = numpy.dot(mo_core, mo_core.conj().T) * 2\n corevhf = casci.get_veff(casci.mol, core_dm)\n energy_core += numpy.einsum('ij,ji', core_dm, hcore).real\n energy_core += numpy.einsum('ij,ji', core_dm, corevhf).real * .5\n h1eff = reduce(numpy.dot, (mo_cas.conj().T, hcore+corevhf, mo_cas))\n return h1eff, energy_core\n\ndef analyze(casscf, mo_coeff=None, ci=None, verbose=None,\n large_ci_tol=LARGE_CI_TOL, with_meta_lowdin=WITH_META_LOWDIN,\n **kwargs):\n from pyscf.lo import orth\n from pyscf.tools import dump_mat\n from pyscf.mcscf import addons\n log = logger.new_logger(casscf, verbose)\n\n if mo_coeff is None: mo_coeff = casscf.mo_coeff\n if ci is None: ci = casscf.ci\n nelecas = casscf.nelecas\n ncas = casscf.ncas\n ncore = casscf.ncore\n nocc = ncore + ncas\n mocore = mo_coeff[:,:ncore]\n mocas = mo_coeff[:,ncore:nocc]\n\n label = casscf.mol.ao_labels()\n if (isinstance(ci, (list, tuple, RANGE_TYPE)) and\n not isinstance(casscf.fcisolver, addons.StateAverageFCISolver)):\n log.warn('Mulitple states found in CASCI/CASSCF solver. Density '\n 'matrix of the first state is generated in .analyze() function.')\n civec = ci[0]\n else:\n civec = ci\n if getattr(casscf.fcisolver, 'make_rdm1s', None):\n casdm1a, casdm1b = casscf.fcisolver.make_rdm1s(civec, ncas, nelecas)\n casdm1 = casdm1a + casdm1b\n dm1b = numpy.dot(mocore, mocore.conj().T)\n dm1a = dm1b + reduce(numpy.dot, (mocas, casdm1a, mocas.conj().T))\n dm1b += reduce(numpy.dot, (mocas, casdm1b, mocas.conj().T))\n dm1 = dm1a + dm1b\n if log.verbose >= logger.DEBUG2:\n log.info('alpha density matrix (on AO)')\n dump_mat.dump_tri(log.stdout, dm1a, label, **kwargs)\n log.info('beta density matrix (on AO)')\n dump_mat.dump_tri(log.stdout, dm1b, label, **kwargs)\n else:\n casdm1 = casscf.fcisolver.make_rdm1(civec, ncas, nelecas)\n dm1a = (numpy.dot(mocore, mocore.conj().T) * 2 +\n reduce(numpy.dot, (mocas, casdm1, mocas.conj().T)))\n dm1b = None\n dm1 = dm1a\n\n if log.verbose >= logger.INFO:\n ovlp_ao = casscf._scf.get_ovlp()\n # note the last two args of ._eig for mc1step_symm\n occ, ucas = casscf._eig(-casdm1, ncore, nocc)\n log.info('Natural occ %s', str(-occ))\n mocas = numpy.dot(mocas, ucas)\n if with_meta_lowdin:\n log.info('Natural orbital (expansion on meta-Lowdin AOs) in CAS space')\n orth_coeff = orth.orth_ao(casscf.mol, 'meta_lowdin', s=ovlp_ao)\n mocas = reduce(numpy.dot, (orth_coeff.conj().T, ovlp_ao, mocas))\n else:\n log.info('Natural orbital (expansion on AOs) in CAS space')\n dump_mat.dump_rec(log.stdout, mocas, label, start=1, **kwargs)\n if log.verbose >= logger.DEBUG2:\n if not casscf.natorb:\n log.debug2('NOTE: mc.mo_coeff in active space is different to '\n 'the natural orbital coefficients printed in above.')\n if with_meta_lowdin:\n c = reduce(numpy.dot, (orth_coeff.conj().T, ovlp_ao, mo_coeff))\n log.debug2('MCSCF orbital (expansion on meta-Lowdin AOs)')\n else:\n c = mo_coeff\n log.debug2('MCSCF orbital (expansion on AOs)')\n dump_mat.dump_rec(log.stdout, c, label, start=1, **kwargs)\n\n if casscf._scf.mo_coeff is not None:\n addons.map2hf(casscf, casscf._scf.mo_coeff)\n\n if (ci is not None and\n (getattr(casscf.fcisolver, 'large_ci', None) or\n getattr(casscf.fcisolver, 'states_large_ci', None))):\n log.info('** Largest CI components **')\n if isinstance(ci, (list, tuple, RANGE_TYPE)):\n if hasattr(casscf.fcisolver, 'states_large_ci'):\n # defined in state_average_mix_ mcscf object\n res = casscf.fcisolver.states_large_ci(ci, casscf.ncas, casscf.nelecas,\n large_ci_tol, return_strs=False)\n else:\n res = [casscf.fcisolver.large_ci(civec, casscf.ncas, casscf.nelecas,\n large_ci_tol, return_strs=False)\n for civec in ci]\n for i, civec in enumerate(ci):\n log.info(' [alpha occ-orbitals] [beta occ-orbitals] state %-3d CI coefficient', i)\n for c,ia,ib in res[i]:\n log.info(' %-20s %-30s %.12f', ia, ib, c)\n else:\n log.info(' [alpha occ-orbitals] [beta occ-orbitals] CI coefficient')\n res = casscf.fcisolver.large_ci(ci, casscf.ncas, casscf.nelecas,\n large_ci_tol, return_strs=False)\n for c,ia,ib in res:\n log.info(' %-20s %-30s %.12f', ia, ib, c)\n\n if with_meta_lowdin:\n casscf._scf.mulliken_meta(casscf.mol, dm1, s=ovlp_ao, verbose=log)\n else:\n casscf._scf.mulliken_pop(casscf.mol, dm1, s=ovlp_ao, verbose=log)\n return dm1a, dm1b\n\ndef get_fock(mc, mo_coeff=None, ci=None, eris=None, casdm1=None, verbose=None):\n r'''\n Effective one-electron Fock matrix in AO representation\n f = \\sum_{pq} E_{pq} F_{pq}\n F_{pq} = h_{pq} + \\sum_{rs} [(pq|rs)-(ps|rq)] DM_{sr}\n\n Ref.\n Theor. Chim. Acta., 91, 31\n Chem. Phys. 48, 157\n\n For state-average CASCI/CASSCF object, the effective fock matrix is based\n on the state-average density matrix. To obtain Fock matrix of a specific\n state in the state-average calculations, you can pass \"casdm1\" of the\n specific state to this function.\n\n Args:\n mc: a CASSCF/CASCI object or RHF object\n\n Kwargs:\n mo_coeff (ndarray): orbitals that span the core, active and external\n space.\n ci (ndarray): CI coefficients (or objects to represent the CI\n wavefunctions in DMRG/QMC-MCSCF calculations).\n eris: Integrals for the MCSCF object. Input this object to reduce the\n overhead of computing integrals. It can be generated by\n :func:`mc.ao2mo` method.\n casdm1 (ndarray): 1-particle density matrix in active space. Without\n input casdm1, the density matrix is computed with the input ci\n coefficients/object. If neither ci nor casdm1 were given, density\n matrix is computed by :func:`mc.fcisolver.make_rdm1` method. For\n state-average CASCI/CASCF calculation, this results in the\n effective Fock matrix based on the state-average density matrix.\n To obtain the effective Fock matrix for one particular state, you\n can assign the density matrix of that state to the kwarg casdm1.\n\n Returns:\n Fock matrix\n '''\n\n if ci is None: ci = mc.ci\n if mo_coeff is None: mo_coeff = mc.mo_coeff\n nmo = mo_coeff.shape[1]\n ncore = mc.ncore\n ncas = mc.ncas\n nocc = ncore + ncas\n nelecas = mc.nelecas\n\n if casdm1 is None:\n casdm1 = mc.fcisolver.make_rdm1(ci, ncas, nelecas)\n if getattr(eris, 'ppaa', None) is not None:\n vj = numpy.empty((nmo,nmo))\n vk = numpy.empty((nmo,nmo))\n for i in range(nmo):\n vj[i] = numpy.einsum('ij,qij->q', casdm1, eris.ppaa[i])\n vk[i] = numpy.einsum('ij,iqj->q', casdm1, eris.papa[i])\n mo_inv = numpy.dot(mo_coeff.conj().T, mc._scf.get_ovlp())\n fock = (mc.get_hcore() +\n reduce(numpy.dot, (mo_inv.conj().T, eris.vhf_c+vj-vk*.5, mo_inv)))\n else:\n dm_core = numpy.dot(mo_coeff[:,:ncore]*2, mo_coeff[:,:ncore].conj().T)\n mocas = mo_coeff[:,ncore:nocc]\n dm = dm_core + reduce(numpy.dot, (mocas, casdm1, mocas.conj().T))\n vj, vk = mc._scf.get_jk(mc.mol, dm)\n fock = mc.get_hcore() + vj-vk*.5\n return fock\n\ndef cas_natorb(mc, mo_coeff=None, ci=None, eris=None, sort=False,\n casdm1=None, verbose=None, with_meta_lowdin=WITH_META_LOWDIN):\n '''Transform active orbitals to natrual orbitals, and update the CI wfn\n accordingly\n\n Args:\n mc : a CASSCF/CASCI object or RHF object\n\n Kwargs:\n sort : bool\n Sort natural orbitals wrt the occupancy.\n\n Returns:\n A tuple, the first item is natural orbitals, the second is updated CI\n coefficients, the third is the natural occupancy associated to the\n natural orbitals.\n '''\n from pyscf.lo import orth\n from pyscf.tools import dump_mat\n from pyscf.tools.mo_mapping import mo_1to1map\n if mo_coeff is None: mo_coeff = mc.mo_coeff\n if ci is None: ci = mc.ci\n log = logger.new_logger(mc, verbose)\n ncore = mc.ncore\n ncas = mc.ncas\n nocc = ncore + ncas\n nelecas = mc.nelecas\n if casdm1 is None:\n casdm1 = mc.fcisolver.make_rdm1(ci, ncas, nelecas)\n # orbital symmetry is reserved in this _eig call\n occ, ucas = mc._eig(-casdm1, ncore, nocc)\n if sort:\n casorb_idx = numpy.argsort(occ.round(9), kind='mergesort')\n occ = occ[casorb_idx]\n ucas = ucas[:,casorb_idx]\n\n occ = -occ\n mo_occ = numpy.zeros(mo_coeff.shape[1])\n mo_occ[:ncore] = 2\n mo_occ[ncore:nocc] = occ\n\n mo_coeff1 = mo_coeff.copy()\n mo_coeff1[:,ncore:nocc] = numpy.dot(mo_coeff[:,ncore:nocc], ucas)\n if getattr(mo_coeff, 'orbsym', None) is not None:\n orbsym = numpy.copy(mo_coeff.orbsym)\n if sort:\n orbsym[ncore:nocc] = orbsym[ncore:nocc][casorb_idx]\n mo_coeff1 = lib.tag_array(mo_coeff1, orbsym=orbsym)\n\n fcivec = None\n if getattr(mc.fcisolver, 'transform_ci_for_orbital_rotation', None):\n if isinstance(ci, numpy.ndarray):\n fcivec = mc.fcisolver.transform_ci_for_orbital_rotation(ci, ncas, nelecas, ucas)\n elif (isinstance(ci, (list, tuple)) and\n all(isinstance(x[0], numpy.ndarray) for x in ci)):\n fcivec = [mc.fcisolver.transform_ci_for_orbital_rotation(x, ncas, nelecas, ucas)\n for x in ci]\n elif getattr(mc.fcisolver, 'states_transform_ci_for_orbital_rotation', None):\n fcivec = mc.fcisolver.states_transform_ci_for_orbital_rotation(ci, ncas, nelecas, ucas)\n\n # Rerun fcisolver to get wavefunction if it cannot be transformed from\n # existed one.\n if fcivec is None:\n log.info('FCI vector not available, call CASCI to update wavefunction')\n mocas = mo_coeff1[:,ncore:nocc]\n hcore = mc.get_hcore()\n dm_core = numpy.dot(mo_coeff1[:,:ncore]*2, mo_coeff1[:,:ncore].conj().T)\n ecore = mc.energy_nuc()\n ecore+= numpy.einsum('ij,ji', hcore, dm_core)\n h1eff = reduce(numpy.dot, (mocas.conj().T, hcore, mocas))\n if getattr(eris, 'ppaa', None) is not None:\n ecore += eris.vhf_c[:ncore,:ncore].trace()\n h1eff += reduce(numpy.dot, (ucas.conj().T, eris.vhf_c[ncore:nocc,ncore:nocc], ucas))\n aaaa = ao2mo.restore(4, eris.ppaa[ncore:nocc,ncore:nocc,:,:], ncas)\n aaaa = ao2mo.incore.full(aaaa, ucas)\n else:\n if getattr(mc, 'with_df', None):\n aaaa = mc.with_df.ao2mo(mocas)\n else:\n aaaa = ao2mo.kernel(mc.mol, mocas)\n corevhf = mc.get_veff(mc.mol, dm_core)\n ecore += numpy.einsum('ij,ji', dm_core, corevhf) * .5\n h1eff += reduce(numpy.dot, (mocas.conj().T, corevhf, mocas))\n\n\n # See label_symmetry_ function in casci_symm.py which initialize the\n # orbital symmetry information in fcisolver. This orbital symmetry\n # labels should be reordered to match the sorted active space orbitals.\n if sort and getattr(mo_coeff1, 'orbsym', None) is not None:\n mc.fcisolver.orbsym = mo_coeff1.orbsym[ncore:nocc]\n\n max_memory = max(400, mc.max_memory-lib.current_memory()[0])\n e, fcivec = mc.fcisolver.kernel(h1eff, aaaa, ncas, nelecas, ecore=ecore,\n max_memory=max_memory, verbose=log)\n log.debug('In Natural orbital, CASCI energy = %s', e)\n\n if log.verbose >= logger.INFO:\n ovlp_ao = mc._scf.get_ovlp()\n # where_natorb gives the new locations of the natural orbitals\n where_natorb = mo_1to1map(ucas)\n log.debug('where_natorb %s', str(where_natorb))\n log.info('Natural occ %s', str(occ))\n if with_meta_lowdin:\n log.info('Natural orbital (expansion on meta-Lowdin AOs) in CAS space')\n label = mc.mol.ao_labels()\n orth_coeff = orth.orth_ao(mc.mol, 'meta_lowdin', s=ovlp_ao)\n mo_cas = reduce(numpy.dot, (orth_coeff.conj().T, ovlp_ao, mo_coeff1[:,ncore:nocc]))\n else:\n log.info('Natural orbital (expansion on AOs) in CAS space')\n label = mc.mol.ao_labels()\n mo_cas = mo_coeff1[:,ncore:nocc]\n dump_mat.dump_rec(log.stdout, mo_cas, label, start=1)\n\n if mc._scf.mo_coeff is not None:\n s = reduce(numpy.dot, (mo_coeff1[:,ncore:nocc].conj().T,\n mc._scf.get_ovlp(), mc._scf.mo_coeff))\n idx = numpy.argwhere(abs(s)>.4)\n for i,j in idx:\n log.info('<CAS-nat-orb|mo-hf> %d %d %12.8f',\n ncore+i+1, j+1, s[i,j])\n return mo_coeff1, fcivec, mo_occ\n\ndef canonicalize(mc, mo_coeff=None, ci=None, eris=None, sort=False,\n cas_natorb=False, casdm1=None, verbose=logger.NOTE,\n with_meta_lowdin=WITH_META_LOWDIN):\n '''Canonicalized CASCI/CASSCF orbitals of effecitive Fock matrix and\n update CI coefficients accordingly.\n\n Effective Fock matrix is built with one-particle density matrix (see\n also :func:`mcscf.casci.get_fock`). For state-average CASCI/CASSCF object,\n the canonicalized orbitals are based on the state-average density matrix.\n To obtain canonicalized orbitals for an individual state, you need to pass\n \"casdm1\" of the specific state to this function.\n\n Args:\n mc: a CASSCF/CASCI object or RHF object\n\n Kwargs:\n mo_coeff (ndarray): orbitals that span the core, active and external\n space.\n ci (ndarray): CI coefficients (or objects to represent the CI\n wavefunctions in DMRG/QMC-MCSCF calculations).\n eris: Integrals for the MCSCF object. Input this object to reduce the\n overhead of computing integrals. It can be generated by\n :func:`mc.ao2mo` method.\n sort (bool): Whether the canonicalized orbitals are sorted based on\n the orbital energy (diagonal part of the effective Fock matrix)\n within each subspace (core, active, external). If point group\n symmetry is not available in the system, orbitals are always\n sorted. When point group symmetry is available, sort=False will\n preserve the symmetry label of input orbitals and only sort the\n orbitals in each symmetry sector. sort=True will reorder all\n orbitals over all symmetry sectors in each subspace and the\n symmetry labels may be changed.\n cas_natorb (bool): Whether to transform active orbitals to natual\n orbitals. If enabled, the output orbitals in active space are\n transformed to natural orbitals and CI coefficients are updated\n accordingly.\n casdm1 (ndarray): 1-particle density matrix in active space. This\n density matrix is used to build effective fock matrix. Without\n input casdm1, the density matrix is computed with the input ci\n coefficients/object. If neither ci nor casdm1 were given, density\n matrix is computed by :func:`mc.fcisolver.make_rdm1` method. For\n state-average CASCI/CASCF calculation, this results in a set of\n canonicalized orbitals of state-average effective Fock matrix.\n To canonicalize the orbitals for one particular state, you can\n assign the density matrix of that state to the kwarg casdm1.\n\n Returns:\n A tuple, (natural orbitals, CI coefficients, orbital energies)\n The orbital energies are the diagonal terms of effective Fock matrix.\n '''\n from pyscf.mcscf import addons\n log = logger.new_logger(mc, verbose)\n\n if mo_coeff is None: mo_coeff = mc.mo_coeff\n if ci is None: ci = mc.ci\n if casdm1 is None:\n if (isinstance(ci, (list, tuple, RANGE_TYPE)) and\n not isinstance(mc.fcisolver, addons.StateAverageFCISolver)):\n log.warn('Mulitple states found in CASCI solver. First state is '\n 'used to compute the natural orbitals in active space.')\n casdm1 = mc.fcisolver.make_rdm1(ci[0], mc.ncas, mc.nelecas)\n else:\n casdm1 = mc.fcisolver.make_rdm1(ci, mc.ncas, mc.nelecas)\n\n ncore = mc.ncore\n nocc = ncore + mc.ncas\n nmo = mo_coeff.shape[1]\n fock_ao = mc.get_fock(mo_coeff, ci, eris, casdm1, verbose)\n if cas_natorb:\n mo_coeff1, ci, mc.mo_occ = mc.cas_natorb(mo_coeff, ci, eris, sort, casdm1,\n verbose, with_meta_lowdin)\n else:\n # Keep the active space unchanged by default. The rotation in active space\n # may cause problem for external CI solver eg DMRG.\n mo_coeff1 = mo_coeff.copy()\n log.info('Density matrix diagonal elements %s', casdm1.diagonal())\n\n fock = reduce(numpy.dot, (mo_coeff1.conj().T, fock_ao, mo_coeff1))\n mo_energy = fock.diagonal().copy()\n\n mask = numpy.ones(nmo, dtype=bool)\n frozen = getattr(mc, 'frozen', None)\n if frozen is not None:\n if isinstance(frozen, (int, numpy.integer)):\n mask[:frozen] = False\n else:\n mask[frozen] = False\n core_idx = numpy.where(mask[:ncore])[0]\n vir_idx = numpy.where(mask[nocc:])[0] + nocc\n\n if getattr(mo_coeff, 'orbsym', None) is not None:\n orbsym = mo_coeff.orbsym\n else:\n orbsym = numpy.zeros(nmo, dtype=int)\n\n if len(core_idx) > 0:\n # note the last two args of ._eig for mc1step_symm\n # mc._eig function is called to handle symmetry adapated fock\n w, c1 = mc._eig(fock[core_idx[:,None],core_idx], 0, ncore,\n orbsym[core_idx])\n if sort:\n idx = numpy.argsort(w.round(9), kind='mergesort')\n w = w[idx]\n c1 = c1[:,idx]\n orbsym[core_idx] = orbsym[core_idx][idx]\n mo_coeff1[:,core_idx] = numpy.dot(mo_coeff1[:,core_idx], c1)\n mo_energy[core_idx] = w\n\n if len(vir_idx) > 0:\n w, c1 = mc._eig(fock[vir_idx[:,None],vir_idx], nocc, nmo,\n orbsym[vir_idx])\n if sort:\n idx = numpy.argsort(w.round(9), kind='mergesort')\n w = w[idx]\n c1 = c1[:,idx]\n orbsym[vir_idx] = orbsym[vir_idx][idx]\n mo_coeff1[:,vir_idx] = numpy.dot(mo_coeff1[:,vir_idx], c1)\n mo_energy[vir_idx] = w\n\n if getattr(mo_coeff, 'orbsym', None) is not None:\n mo_coeff1 = lib.tag_array(mo_coeff1, orbsym=orbsym)\n\n if log.verbose >= logger.DEBUG:\n for i in range(nmo):\n log.debug('i = %d <i|F|i> = %12.8f', i+1, mo_energy[i])\n# still return ci coefficients, in case the canonicalization funciton changed\n# cas orbitals, the ci coefficients should also be updated.\n return mo_coeff1, ci, mo_energy\n\n\ndef kernel(casci, mo_coeff=None, ci0=None, verbose=logger.NOTE):\n '''CASCI solver\n '''\n if mo_coeff is None: mo_coeff = casci.mo_coeff\n log = logger.new_logger(casci, verbose)\n t0 = (logger.process_clock(), logger.perf_counter())\n log.debug('Start CASCI')\n\n ncas = casci.ncas\n nelecas = casci.nelecas\n\n # 2e\n eri_cas = casci.get_h2eff(mo_coeff)\n t1 = log.timer('integral transformation to CAS space', *t0)\n\n # 1e\n h1eff, energy_core = casci.get_h1eff(mo_coeff)\n log.debug('core energy = %.15g', energy_core)\n t1 = log.timer('effective h1e in CAS space', *t1)\n\n if h1eff.shape[0] != ncas:\n raise RuntimeError('Active space size error. nmo=%d ncore=%d ncas=%d' %\n (mo_coeff.shape[1], casci.ncore, ncas))\n\n # FCI\n max_memory = max(400, casci.max_memory-lib.current_memory()[0])\n e_tot, fcivec = casci.fcisolver.kernel(h1eff, eri_cas, ncas, nelecas,\n ci0=ci0, verbose=log,\n max_memory=max_memory,\n ecore=energy_core)\n\n t1 = log.timer('FCI solver', *t1)\n e_cas = e_tot - energy_core\n return e_tot, e_cas, fcivec\n\n\ndef as_scanner(mc):\n '''Generating a scanner for CASCI PES.\n\n The returned solver is a function. This function requires one argument\n \"mol\" as input and returns total CASCI energy.\n\n The solver will automatically use the results of last calculation as the\n initial guess of the new calculation. All parameters of MCSCF object\n are automatically applied in the solver.\n\n Note scanner has side effects. It may change many underlying objects\n (_scf, with_df, with_x2c, ...) during calculation.\n\n Examples:\n\n >>> from pyscf import gto, scf, mcscf\n >>> mf = scf.RHF(gto.Mole().set(verbose=0))\n >>> mc_scanner = mcscf.CASCI(mf, 4, 4).as_scanner()\n >>> mc_scanner(gto.M(atom='N 0 0 0; N 0 0 1.1'))\n >>> mc_scanner(gto.M(atom='N 0 0 0; N 0 0 1.5'))\n '''\n if isinstance(mc, lib.SinglePointScanner):\n return mc\n\n logger.info(mc, 'Create scanner for %s', mc.__class__)\n\n class CASCI_Scanner(mc.__class__, lib.SinglePointScanner):\n def __init__(self, mc):\n self.__dict__.update(mc.__dict__)\n self._scf = mc._scf.as_scanner()\n\n def __call__(self, mol_or_geom, mo_coeff=None, ci0=None):\n if isinstance(mol_or_geom, gto.Mole):\n mol = mol_or_geom\n else:\n mol = self.mol.set_geom_(mol_or_geom, inplace=False)\n\n # These properties can be updated when calling mf_scanner(mol) if\n # they are shared with mc._scf. In certain scenario the properties\n # may be created for mc separately, e.g. when mcscf.approx_hessian is\n # called. For safety, the code below explicitly resets these\n # properties.\n for key in ('with_df', 'with_x2c', 'with_solvent', 'with_dftd3'):\n sub_mod = getattr(self, key, None)\n if sub_mod:\n sub_mod.reset(mol)\n\n if mo_coeff is None:\n mf_scanner = self._scf\n mf_scanner(mol)\n mo_coeff = mf_scanner.mo_coeff\n if ci0 is None:\n ci0 = self.ci\n self.mol = mol\n e_tot = self.kernel(mo_coeff, ci0)[0]\n return e_tot\n return CASCI_Scanner(mc)\n\n\nclass CASCI(lib.StreamObject):\n '''CASCI\n\n Args:\n mf_or_mol : SCF object or Mole object\n SCF or Mole to define the problem size.\n ncas : int\n Number of active orbitals.\n nelecas : int or a pair of int\n Number of electrons in active space.\n\n Kwargs:\n ncore : int\n Number of doubly occupied core orbitals. If not presented, this\n parameter can be automatically determined.\n\n Attributes:\n verbose : int\n Print level. Default value equals to :class:`Mole.verbose`.\n max_memory : float or int\n Allowed memory in MB. Default value equals to :class:`Mole.max_memory`.\n ncas : int\n Active space size.\n nelecas : tuple of int\n Active (nelec_alpha, nelec_beta)\n ncore : int or tuple of int\n Core electron number. In UHF-CASSCF, it's a tuple to indicate the different core eletron numbers.\n natorb : bool\n Whether to transform natural orbitals in active space.\n Note: when CASCI/CASSCF are combined with DMRG solver or selected\n CI solver, enabling this parameter may slightly change the total energy.\n False by default.\n canonicalization : bool\n Whether to canonicalize orbitals in core and external space\n against the general Fock matrix.\n The orbitals in active space are NOT transformed by default. To\n get the natural orbitals in active space, the attribute .natorb\n needs to be enabled.\n True by default.\n sorting_mo_energy : bool\n Whether to sort the orbitals based on the diagonal elements of the\n general Fock matrix. Default is False.\n fcisolver : an instance of :class:`FCISolver`\n The pyscf.fci module provides several FCISolver for different scenario. Generally,\n fci.direct_spin1.FCISolver can be used for all RHF-CASSCF. However, a proper FCISolver\n can provide better performance and better numerical stability. One can either use\n :func:`fci.solver` function to pick the FCISolver by the program or manually assigen\n the FCISolver to this attribute, e.g.\n\n >>> from pyscf import fci\n >>> mc = mcscf.CASSCF(mf, 4, 4)\n >>> mc.fcisolver = fci.solver(mol, singlet=True)\n >>> mc.fcisolver = fci.direct_spin1.FCISolver(mol)\n\n You can control FCISolver by setting e.g.::\n\n >>> mc.fcisolver.max_cycle = 30\n >>> mc.fcisolver.conv_tol = 1e-7\n\n For more details of the parameter for FCISolver, See :mod:`fci`.\n\n Saved results\n\n e_tot : float\n Total MCSCF energy (electronic energy plus nuclear repulsion)\n e_cas : float\n CAS space FCI energy\n ci : ndarray\n CAS space FCI coefficients\n mo_coeff : ndarray\n When canonicalization is specified, the orbitals are canonical\n orbitals which make the general Fock matrix (Fock operator on top\n of MCSCF 1-particle density matrix) diagonalized within each\n subspace (core, active, external). If natorb (natural orbitals in\n active space) is specified, the active segment of the mo_coeff is\n natural orbitls.\n mo_energy : ndarray\n Diagonal elements of general Fock matrix (in mo_coeff\n representation).\n mo_occ : ndarray\n Occupation numbers of natural orbitals if natorb is specified.\n\n Examples:\n\n >>> from pyscf import gto, scf, mcscf\n >>> mol = gto.M(atom='N 0 0 0; N 0 0 1', basis='ccpvdz', verbose=0)\n >>> mf = scf.RHF(mol)\n >>> mf.scf()\n >>> mc = mcscf.CASCI(mf, 6, 6)\n >>> mc.kernel()[0]\n -108.980200816243354\n '''\n\n natorb = getattr(__config__, 'mcscf_casci_CASCI_natorb', False)\n canonicalization = getattr(__config__, 'mcscf_casci_CASCI_canonicalization', True)\n sorting_mo_energy = getattr(__config__, 'mcscf_casci_CASCI_sorting_mo_energy', False)\n\n def __init__(self, mf_or_mol, ncas, nelecas, ncore=None):\n if isinstance(mf_or_mol, gto.Mole):\n mf = scf.RHF(mf_or_mol)\n else:\n mf = mf_or_mol\n\n mol = mf.mol\n self.mol = mol\n self._scf = mf\n self.verbose = mol.verbose\n self.stdout = mol.stdout\n self.max_memory = mf.max_memory\n self.ncas = ncas\n if isinstance(nelecas, (int, numpy.integer)):\n nelecb = (nelecas-mol.spin)//2\n neleca = nelecas - nelecb\n self.nelecas = (neleca, nelecb)\n else:\n self.nelecas = (nelecas[0],nelecas[1])\n self.ncore = ncore\n singlet = (getattr(__config__, 'mcscf_casci_CASCI_fcisolver_direct_spin0', False)\n and self.nelecas[0] == self.nelecas[1]) # leads to direct_spin1\n self.fcisolver = fci.solver(mol, singlet, symm=False)\n# CI solver parameters are set in fcisolver object\n self.fcisolver.lindep = getattr(__config__,\n 'mcscf_casci_CASCI_fcisolver_lindep', 1e-10)\n self.fcisolver.max_cycle = getattr(__config__,\n 'mcscf_casci_CASCI_fcisolver_max_cycle', 200)\n self.fcisolver.conv_tol = getattr(__config__,\n 'mcscf_casci_CASCI_fcisolver_conv_tol', 1e-8)\n\n##################################################\n# don't modify the following attributes, they are not input options\n self.e_tot = 0\n self.e_cas = None\n self.ci = None\n self.mo_coeff = mf.mo_coeff\n self.mo_energy = mf.mo_energy\n self.mo_occ = None\n self.converged = False\n\n keys = set(('natorb', 'canonicalization', 'sorting_mo_energy'))\n self._keys = set(self.__dict__.keys()).union(keys)\n\n @property\n def ncore(self):\n if self._ncore is None:\n ncorelec = self.mol.nelectron - sum(self.nelecas)\n assert(ncorelec % 2 == 0)\n return ncorelec // 2\n else:\n return self._ncore\n @ncore.setter\n def ncore(self, x):\n assert(x is None or isinstance(x, (int, numpy.integer)))\n self._ncore = x\n\n def dump_flags(self, verbose=None):\n log = logger.new_logger(self, verbose)\n log.info('')\n log.info('******** CASCI flags ********')\n ncore = self.ncore\n ncas = self.ncas\n nvir = self.mo_coeff.shape[1] - ncore - ncas\n log.info('CAS (%de+%de, %do), ncore = %d, nvir = %d',\n self.nelecas[0], self.nelecas[1], ncas, ncore, nvir)\n assert(self.ncas > 0)\n log.info('natorb = %s', self.natorb)\n log.info('canonicalization = %s', self.canonicalization)\n log.info('sorting_mo_energy = %s', self.sorting_mo_energy)\n log.info('max_memory %d (MB)', self.max_memory)\n if getattr(self.fcisolver, 'dump_flags', None):\n self.fcisolver.dump_flags(log.verbose)\n if self.mo_coeff is None:\n log.error('Orbitals for CASCI are not specified. The relevant SCF '\n 'object may not be initialized.')\n\n if (getattr(self._scf, 'with_solvent', None) and\n not getattr(self, 'with_solvent', None)):\n log.warn('''Solvent model %s was found at SCF level but not applied to the CASCI object.\nThe SCF solvent model will not be applied to the current CASCI calculation.\nTo enable the solvent model for CASCI, the following code needs to be called\n from pyscf import solvent\n mc = mcscf.CASCI(...)\n mc = solvent.ddCOSMO(mc)\n''',\n self._scf.with_solvent.__class__)\n return self\n\n def reset(self, mol=None):\n if mol is not None:\n self.mol = mol\n self.fcisolver.mol = mol\n self._scf.reset(mol)\n return self\n\n def energy_nuc(self):\n return self._scf.energy_nuc()\n\n def get_hcore(self, mol=None):\n return self._scf.get_hcore(mol)\n\n @lib.with_doc(scf.hf.get_jk.__doc__)\n def get_jk(self, mol, dm, hermi=1, with_j=True, with_k=True, omega=None):\n return self._scf.get_jk(mol, dm, hermi,\n with_j=with_j, with_k=with_k, omega=omega)\n\n @lib.with_doc(scf.hf.get_veff.__doc__)\n def get_veff(self, mol=None, dm=None, hermi=1):\n if mol is None: mol = self.mol\n if dm is None:\n mocore = self.mo_coeff[:,:self.ncore]\n dm = numpy.dot(mocore, mocore.conj().T) * 2\n# don't call self._scf.get_veff because _scf might be DFT object\n vj, vk = self.get_jk(mol, dm, hermi)\n return vj - vk * .5\n\n def _eig(self, h, *args):\n return scf.hf.eig(h, None)\n\n def get_h2cas(self, mo_coeff=None):\n '''Compute the active space two-particle Hamiltonian.\n\n Note It is different to get_h2eff when df.approx_hessian is applied,\n in which get_h2eff function returns the DF integrals while get_h2cas\n returns the regular 2-electron integrals.\n '''\n return self.ao2mo(mo_coeff)\n\n def get_h2eff(self, mo_coeff=None):\n '''Compute the active space two-particle Hamiltonian.\n\n Note It is different to get_h2cas when df.approx_hessian is applied.\n in which get_h2eff function returns the DF integrals while get_h2cas\n returns the regular 2-electron integrals.\n '''\n return self.ao2mo(mo_coeff)\n\n def ao2mo(self, mo_coeff=None):\n '''Compute the active space two-particle Hamiltonian.\n '''\n ncore = self.ncore\n ncas = self.ncas\n nocc = ncore + ncas\n if mo_coeff is None:\n ncore = self.ncore\n mo_coeff = self.mo_coeff[:,ncore:nocc]\n elif mo_coeff.shape[1] != ncas:\n mo_coeff = mo_coeff[:,ncore:nocc]\n\n if self._scf._eri is not None:\n eri = ao2mo.full(self._scf._eri, mo_coeff,\n max_memory=self.max_memory)\n else:\n eri = ao2mo.full(self.mol, mo_coeff, verbose=self.verbose,\n max_memory=self.max_memory)\n return eri\n\n get_h1cas = h1e_for_cas = h1e_for_cas\n\n def get_h1eff(self, mo_coeff=None, ncas=None, ncore=None):\n return self.h1e_for_cas(mo_coeff, ncas, ncore)\n get_h1eff.__doc__ = h1e_for_cas.__doc__\n\n def casci(self, mo_coeff=None, ci0=None, verbose=None):\n return self.kernel(mo_coeff, ci0, verbose)\n def kernel(self, mo_coeff=None, ci0=None, verbose=None):\n '''\n Returns:\n Five elements, they are\n total energy,\n active space CI energy,\n the active space FCI wavefunction coefficients or DMRG wavefunction ID,\n the MCSCF canonical orbital coefficients,\n the MCSCF canonical orbital coefficients.\n\n They are attributes of mcscf object, which can be accessed by\n .e_tot, .e_cas, .ci, .mo_coeff, .mo_energy\n '''\n if mo_coeff is None:\n mo_coeff = self.mo_coeff\n else:\n self.mo_coeff = mo_coeff\n if ci0 is None:\n ci0 = self.ci\n log = logger.new_logger(self, verbose)\n\n if self.verbose >= logger.WARN:\n self.check_sanity()\n self.dump_flags(log)\n\n self.e_tot, self.e_cas, self.ci = \\\n kernel(self, mo_coeff, ci0=ci0, verbose=log)\n\n if self.canonicalization:\n self.canonicalize_(mo_coeff, self.ci,\n sort=self.sorting_mo_energy,\n cas_natorb=self.natorb, verbose=log)\n elif self.natorb:\n # FIXME (pyscf-2.0): Whether to transform natural orbitals in\n # active space when this flag is enabled?\n log.warn('The attribute .natorb of mcscf object affects only the '\n 'orbital canonicalization.\\n'\n 'If you would like to get natural orbitals in active space '\n 'without touching core and external orbitals, an explicit '\n 'call to mc.cas_natorb_() is required')\n\n if getattr(self.fcisolver, 'converged', None) is not None:\n self.converged = numpy.all(self.fcisolver.converged)\n if self.converged:\n log.info('CASCI converged')\n else:\n log.info('CASCI not converged')\n else:\n self.converged = True\n self._finalize()\n return self.e_tot, self.e_cas, self.ci, self.mo_coeff, self.mo_energy\n\n def _finalize(self):\n log = logger.Logger(self.stdout, self.verbose)\n if log.verbose >= logger.NOTE and getattr(self.fcisolver, 'spin_square', None):\n if isinstance(self.e_cas, (float, numpy.number)):\n ss = self.fcisolver.spin_square(self.ci, self.ncas, self.nelecas)\n log.note('CASCI E = %.15g E(CI) = %.15g S^2 = %.7f',\n self.e_tot, self.e_cas, ss[0])\n else:\n for i, e in enumerate(self.e_cas):\n ss = self.fcisolver.spin_square(self.ci[i], self.ncas, self.nelecas)\n log.note('CASCI state %d E = %.15g E(CI) = %.15g S^2 = %.7f',\n i, self.e_tot[i], e, ss[0])\n else:\n if isinstance(self.e_cas, (float, numpy.number)):\n log.note('CASCI E = %.15g E(CI) = %.15g', self.e_tot, self.e_cas)\n else:\n for i, e in enumerate(self.e_cas):\n log.note('CASCI state %d E = %.15g E(CI) = %.15g',\n i, self.e_tot[i], e)\n return self\n\n as_scanner = as_scanner\n\n @lib.with_doc(cas_natorb.__doc__)\n def cas_natorb(self, mo_coeff=None, ci=None, eris=None, sort=False,\n casdm1=None, verbose=None, with_meta_lowdin=WITH_META_LOWDIN):\n return cas_natorb(self, mo_coeff, ci, eris, sort, casdm1, verbose,\n with_meta_lowdin)\n @lib.with_doc(cas_natorb.__doc__)\n def cas_natorb_(self, mo_coeff=None, ci=None, eris=None, sort=False,\n casdm1=None, verbose=None, with_meta_lowdin=WITH_META_LOWDIN):\n self.mo_coeff, self.ci, self.mo_occ = cas_natorb(self, mo_coeff, ci, eris,\n sort, casdm1, verbose)\n return self.mo_coeff, self.ci, self.mo_occ\n\n def get_fock(self, mo_coeff=None, ci=None, eris=None, casdm1=None,\n verbose=None):\n return get_fock(self, mo_coeff, ci, eris, casdm1, verbose)\n\n canonicalize = canonicalize\n\n @lib.with_doc(canonicalize.__doc__)\n def canonicalize_(self, mo_coeff=None, ci=None, eris=None, sort=False,\n cas_natorb=False, casdm1=None, verbose=None,\n with_meta_lowdin=WITH_META_LOWDIN):\n self.mo_coeff, ci, self.mo_energy = \\\n canonicalize(self, mo_coeff, ci, eris,\n sort, cas_natorb, casdm1, verbose, with_meta_lowdin)\n if cas_natorb: # When active space is changed, the ci solution needs to be updated\n self.ci = ci\n return self.mo_coeff, ci, self.mo_energy\n\n analyze = analyze\n\n @lib.with_doc(addons.sort_mo.__doc__)\n def sort_mo(self, caslst, mo_coeff=None, base=1):\n if mo_coeff is None: mo_coeff = self.mo_coeff\n return addons.sort_mo(self, mo_coeff, caslst, base)\n\n @lib.with_doc(addons.state_average.__doc__)\n def state_average_(self, weights=(0.5,0.5)):\n addons.state_average_(self, weights)\n return self\n @lib.with_doc(addons.state_average.__doc__)\n def state_average(self, weights=(0.5,0.5)):\n return addons.state_average(self, weights)\n\n @lib.with_doc(addons.state_specific_.__doc__)\n def state_specific_(self, state=1):\n addons.state_specific(self, state)\n return self\n\n def make_rdm1s(self, mo_coeff=None, ci=None, ncas=None, nelecas=None,\n ncore=None, **kwargs):\n '''One-particle density matrices for alpha and beta spin on AO basis\n '''\n if mo_coeff is None: mo_coeff = self.mo_coeff\n if ci is None: ci = self.ci\n if ncas is None: ncas = self.ncas\n if nelecas is None: nelecas = self.nelecas\n if ncore is None: ncore = self.ncore\n\n casdm1a, casdm1b = self.fcisolver.make_rdm1s(ci, ncas, nelecas)\n mocore = mo_coeff[:,:ncore]\n mocas = mo_coeff[:,ncore:ncore+ncas]\n dm1b = numpy.dot(mocore, mocore.conj().T)\n dm1a = dm1b + reduce(numpy.dot, (mocas, casdm1a, mocas.conj().T))\n dm1b += reduce(numpy.dot, (mocas, casdm1b, mocas.conj().T))\n return dm1a, dm1b\n\n def make_rdm1(self, mo_coeff=None, ci=None, ncas=None, nelecas=None,\n ncore=None, **kwargs):\n '''One-particle density matrix in AO representation\n '''\n if mo_coeff is None: mo_coeff = self.mo_coeff\n if ci is None: ci = self.ci\n if ncas is None: ncas = self.ncas\n if nelecas is None: nelecas = self.nelecas\n if ncore is None: ncore = self.ncore\n\n casdm1 = self.fcisolver.make_rdm1(ci, ncas, nelecas)\n mocore = mo_coeff[:,:ncore]\n mocas = mo_coeff[:,ncore:ncore+ncas]\n dm1 = numpy.dot(mocore, mocore.conj().T) * 2\n dm1 = dm1 + reduce(numpy.dot, (mocas, casdm1, mocas.conj().T))\n return dm1\n\n def fix_spin_(self, shift=PENALTY, ss=None):\n r'''Use level shift to control FCI solver spin.\n\n .. math::\n\n (H + shift*S^2) |\\Psi\\rangle = E |\\Psi\\rangle\n\n Kwargs:\n shift : float\n Energy penalty for states which have wrong spin\n ss : number\n S^2 expection value == s*(s+1)\n '''\n fci.addons.fix_spin_(self.fcisolver, shift, ss)\n return self\n fix_spin = fix_spin_\n\n def density_fit(self, auxbasis=None, with_df=None):\n from pyscf.mcscf import df\n return df.density_fit(self, auxbasis, with_df)\n\n def sfx2c1e(self):\n from pyscf.x2c import sfx2c1e\n self._scf = sfx2c1e.sfx2c1e(self._scf).run()\n self.mo_coeff = self._scf.mo_coeff\n self.mo_energy = self._scf.mo_energy\n return self\n x2c = x2c1e = sfx2c1e\n\n def nuc_grad_method(self):\n from pyscf.grad import casci\n return casci.Gradients(self)\n\nscf.hf.RHF.CASCI = scf.rohf.ROHF.CASCI = lib.class_as_method(CASCI)\nscf.uhf.UHF.CASCI = None\n\ndel(WITH_META_LOWDIN, LARGE_CI_TOL, PENALTY)\n\n\nif __name__ == '__main__':\n from pyscf import mcscf\n mol = gto.Mole()\n mol.verbose = 0\n mol.output = None#\"out_h2o\"\n mol.atom = [\n ['O', ( 0., 0. , 0. )],\n ['H', ( 0., -0.757, 0.587)],\n ['H', ( 0., 0.757 , 0.587)],]\n\n mol.basis = {'H': 'sto-3g',\n 'O': '6-31g',}\n mol.build()\n\n m = scf.RHF(mol)\n ehf = m.scf()\n mc = mcscf.CASCI(m, 4, 4)\n mc.fcisolver = fci.solver(mol)\n mc.natorb = 1\n emc = mc.kernel()[0]\n print(ehf, emc, emc-ehf)\n #-75.9577817425 -75.9624554777 -0.00467373522233\n print(emc+75.9624554777)\n\n# mc = CASCI(m, 4, (3,1))\n# #mc.fcisolver = fci.direct_spin1\n# mc.fcisolver = fci.solver(mol, False)\n# emc = mc.casci()[0]\n# print(emc - -75.439016172976)\n#\n# mol = gto.Mole()\n# mol.verbose = 0\n# mol.output = \"out_casci\"\n# mol.atom = [\n# [\"C\", (-0.65830719, 0.61123287, -0.00800148)],\n# [\"C\", ( 0.73685281, 0.61123287, -0.00800148)],\n# [\"C\", ( 1.43439081, 1.81898387, -0.00800148)],\n# [\"C\", ( 0.73673681, 3.02749287, -0.00920048)],\n# [\"C\", (-0.65808819, 3.02741487, -0.00967948)],\n# [\"C\", (-1.35568919, 1.81920887, -0.00868348)],\n# [\"H\", (-1.20806619, -0.34108413, -0.00755148)],\n# [\"H\", ( 1.28636081, -0.34128013, -0.00668648)],\n# [\"H\", ( 2.53407081, 1.81906387, -0.00736748)],\n# [\"H\", ( 1.28693681, 3.97963587, -0.00925948)],\n# [\"H\", (-1.20821019, 3.97969587, -0.01063248)],\n# [\"H\", (-2.45529319, 1.81939187, -0.00886348)],]\n#\n# mol.basis = {'H': 'sto-3g',\n# 'C': 'sto-3g',}\n# mol.build()\n#\n# m = scf.RHF(mol)\n# ehf = m.scf()\n# mc = CASCI(m, 9, 8)\n# mc.fcisolver = fci.solver(mol)\n# emc = mc.casci()[0]\n# print(ehf, emc, emc-ehf)\n# print(emc - -227.948912536)\n#\n# mc = CASCI(m, 9, (5,3))\n# #mc.fcisolver = fci.direct_spin1\n# mc.fcisolver = fci.solver(mol, False)\n# mc.fcisolver.nroots = 3\n# emc = mc.casci()[0]\n# print(emc[0] - -227.7674519720)\n" ]
[ [ "numpy.dot", "numpy.empty", "numpy.zeros", "numpy.copy", "numpy.ones", "numpy.where", "numpy.einsum", "numpy.all" ] ]
epimap/covid19_datasets
[ "c58e7e2083d1432517b6504d3fad6212f4263d27" ]
[ "covid19_datasets/uk_area_stats.py" ]
[ "import pandas as pd\nimport numpy as np\nimport datetime\nfrom .constants import DATE_COLUMN_NAME\n\nimport logging\n_log = logging.getLogger(__name__)\n\n\nUK_CASES_PATH = 'https://api.coronavirus.data.gov.uk/v2/data?areaType=TOREPLACE&metric=cumCasesBySpecimenDate&metric=newCasesBySpecimenDate&metric=cumCasesBySpecimenDateRate&format=csv' # New link as of 29/4/21\nENGLAND_DEATHS_PATH = 'https://c19downloads.azureedge.net/downloads/csv/coronavirus-deaths_latest.csv' # TODO: This has been deprecated, update to new dashboard source\n\nSCOTLAND_PATH = 'https://raw.githubusercontent.com/DataScienceScotland/COVID-19-Management-Information/master/export/health-boards/cumulative-cases.csv'\n\ndef _backfill_missing_data(df):\n \"\"\"\n Datasets might have some dates missing if there were no cases reported on these dates\n Backfill them with 0\n \"\"\"\n # if there are NaNs, replace them with 0\n df = df.fillna(0.0)\n\n # Some dates are missing as there were no numbers reported\n # backfill them with 0\n all_days = pd.date_range(df.columns.min(), df.columns.max(), freq='D')\n missing_days = np.setdiff1d(all_days, df.columns)\n for missing_day in missing_days:\n df[missing_day] = 0.0\n\n df = df[np.sort(df.columns)]\n\n return df\n\n\ndef _load_cases_dataset(area_type, country=\"England\"):\n _log.info(f\"Loading {country} dataset from \" + UK_CASES_PATH)\n df = pd.read_csv(UK_CASES_PATH.replace(\"TOREPLACE\", area_type))\n _log.info(\"Loaded\")\n df = df[df[\"areaCode\"].str.startswith(country[0])] \n df[DATE_COLUMN_NAME] = pd.to_datetime(df[\"date\"].astype(str))\n\n # Convert so that\n # Each row corresponds to an area\n # Each column corresponds to a date\n df['Daily lab-confirmed cases'] = df['newCasesBySpecimenDate'].astype('float')#\n df[\"Area name\"] = df[\"areaName\"]\n df['Country'] = country\n\n df = df.pivot_table(index=['Country', 'Area name'], columns=DATE_COLUMN_NAME,\n values='Daily lab-confirmed cases')\n df = _backfill_missing_data(df)\n\n return df\n\n\ndef _load_scotland_cases_dataset():\n _log.info(\"Loading dataset from \" + SCOTLAND_PATH)\n df = pd.read_csv(SCOTLAND_PATH, error_bad_lines=False)\n _log.info(\"Loaded\")\n\n # downloaded file is (dates x areas), and we want the opposite\n df = df.transpose()\n\n # turn first row into a header\n new_header = df.iloc[0]\n df = df[1:]\n df.columns = pd.to_datetime(new_header.astype(str))\n df.columns.name = None\n\n df = df.replace('*', 0.0).astype(float)\n\n # original has cumulative data, and we want new cases per day\n for i in range(len(df.columns) - 1, 1, -1):\n df.iloc[:, i] = df.iloc[:, i] - df.iloc[:, i-1]\n\n # set multi index by country and area\n df['Country'] = 'Scotland'\n df = df.reset_index().rename(columns={'index': 'Area name'}).set_index(['Country', 'Area name'])\n\n return df\n\n\nclass UKCovid19Data:\n \"\"\"\n Provides COVID-19 data for various parts of the UK\n \"\"\"\n \n england_cases_data = None\n wales_cases_data = None\n wales_tests_data = None\n scotland_cases_data = None\n ENGLAND_UPPER_TIER_AUTHORITY = 'utla'\n ENGLAND_LOWER_TIER_AUTHORITY = 'ltla'\n\n def __init__(self, force_load=False, england_area_type=ENGLAND_UPPER_TIER_AUTHORITY):\n \"\"\"\n Loads datasets and store them in memory.\n Further instances of this class will reuse the same data\n\n :param force_load: If true, forces download of the dataset, even if it was loaded already\n \"\"\"\n if UKCovid19Data.england_cases_data is None or force_load or UKCovid19Data.england_area_type != england_area_type:\n UKCovid19Data.england_area_type = england_area_type\n UKCovid19Data.england_cases_data = _load_cases_dataset(england_area_type)\n\n if UKCovid19Data.wales_cases_data is None or UKCovid19Data.wales_tests_data is None or force_load:\n UKCovid19Data.wales_cases_data = _load_cases_dataset(england_area_type, \"Wales\")\n\n if UKCovid19Data.scotland_cases_data is None or force_load:\n UKCovid19Data.scotland_cases_data = _load_scotland_cases_dataset()\n\n def get_cases_data(self):\n \"\"\"\n Returns the dataset as Pandas dataframe\n\n Format:\n - Row index: Country (England, Wales or Scotland), Area name\n - Columns: Dates\n - Each cell value is a number of new cases registered on that day\n\n Note: Scotland provides data by NHS Board, not by county\n \"\"\"\n df = pd.concat([UKCovid19Data.england_cases_data, UKCovid19Data.wales_cases_data, UKCovid19Data.scotland_cases_data])\n # in case they have uneven number of columns\n df = df.fillna(0.0)\n\n return df\n\n" ]
[ [ "pandas.read_csv", "numpy.sort", "numpy.setdiff1d", "pandas.concat" ] ]
jasoncin/dewarp-deskew-stn
[ "050170fadceb9d98508aec6a855ea36b9b06cb74" ]
[ "ops.py" ]
[ "import tensorflow as tf\nimport tensorflow.contrib as tf_contrib\nimport numpy as np\nfrom transformer import spatial_transformer_network as stn\n\n# Xavier : tf_contrib.layers.xavier_initializer()\n# He : tf_contrib.layers.variance_scaling_initializer()\n# Normal : tf.random_normal_initializer(mean=0.0, stddev=0.02)\n# l2_decay : tf_contrib.layers.l2_regularizer(0.0001)\n\nweight_init = tf_contrib.layers.variance_scaling_initializer()\nweight_regularizer = tf_contrib.layers.l2_regularizer(0.0001)\n\n\n##################################################################################\n# Layer\n##################################################################################\n\ndef conv(x, channels, kernel=4, stride=2, padding='SAME', use_bias=True, scope='conv_0'):\n\n with tf.variable_scope(scope):\n x = tf.layers.conv2d(inputs=x, filters=channels,\n kernel_size=kernel, kernel_initializer=weight_init,\n kernel_regularizer=weight_regularizer,\n strides=stride, use_bias=use_bias, padding=padding)\n\n return x\n\ndef fully_conneted(x, units, use_bias=True, scope='fully_0'):\n with tf.variable_scope(scope):\n x = flatten(x)\n x = tf.layers.dense(x, units=units, kernel_initializer=weight_init, kernel_regularizer=weight_regularizer, use_bias=use_bias)\n\n return x\n\ndef spatial_transformer_layer(name_scope,\n input_tensor,\n img_size,\n kernel_size,\n pooling=None,\n strides=[1, 1, 1, 1],\n pool_strides=[1, 1, 1, 1],\n activation=tf.nn.relu,\n use_bn=False,\n use_mvn=False,\n is_training=False,\n use_lrn=False,\n keep_prob=1.0,\n dropout_maps=False,\n init_opt=0,\n bias_init=0.1):\n \"\"\"\n Define spatial transformer network layer\n Args:\n scope_or_name: `string` or `VariableScope`, the scope to open.\n inputs: `4-D Tensor`, it is assumed that `inputs` is shaped `[batch_size, Y, X, Z]`.\n kernel: `4-D Tensor`, [kernel_height, kernel_width, in_channels, out_channels] kernel.\n img_size: 2D array, [image_width. image_height]\n bias: `1-D Tensor`, [out_channels] bias.\n strides: list of `ints`, length 4, the stride of the sliding window for each dimension of `inputs`.\n activation: activation function to be used (default: `tf.nn.relu`).\n use_bn: `bool`, whether or not to include batch normalization in the layer.\n is_training: `bool`, whether or not the layer is in training mode. This is only used if `use_bn` == True.\n use_lrn: `bool`, whether or not to include local response normalization in the layer.\n keep_prob: `double`, dropout keep prob.\n dropout_maps: `bool`, If true whole maps are dropped or not, otherwise single elements.\n padding: `string` from 'SAME', 'VALID'. The type of padding algorithm used in the convolution.\n Returns:\n `4-D Tensor`, has the same type `inputs`.\n \"\"\"\n\n img_height = img_size[0]\n img_width = img_size[1]\n\n with tf.variable_scope(name_scope):\n if init_opt == 0:\n stddev = np.sqrt(2 / (kernel_size[0] * kernel_size[1] * kernel_size[2] * kernel_size[3]))\n\n elif init_opt == 1:\n stddev = 5e-2\n\n elif init_opt == 2:\n stddev = min(np.sqrt(2.0 / (kernel_size[0] * kernel_size[1] * kernel_size[2])), 5e-2)\n\n kernel = tf.get_variable('weights', kernel_size,\n initializer=tf.random_normal_initializer(stddev=stddev))\n\n conv = tf.nn.conv2d(input_tensor, kernel, strides, padding='SAME', name='conv')\n\n bias = tf.get_variable('bias', kernel_size[3],\n initializer=tf.constant_initializer(value=bias_init))\n\n output_tensor = tf.nn.bias_add(conv, bias, name='pre_activation')\n\n if activation:\n output_tensor = activation(output_tensor, name='activation')\n\n if use_lrn:\n output_tensor = tf.nn.local_response_normalization(output_tensor, name='local_responsive_normalization')\n\n if dropout_maps:\n conv_shape = tf.shape(output_tensor)\n n_shape = tf.stack([conv_shape[0], 1, 1, conv_shape[3]])\n output_tensor = tf.nn.dropout(output_tensor, keep_prob=keep_prob, noise_shape=n_shape)\n else:\n output_tensor = tf.nn.dropout(output_tensor, keep_prob=keep_prob)\n\n if pooling:\n output_tensor = tf.nn.max_pool(output_tensor, ksize=pooling, strides=pool_strides, padding='VALID')\n\n output_tensor = tf.contrib.layers.flatten(output_tensor)\n\n output_tensor = tf.contrib.layers.fully_connected(output_tensor, 64, scope='fully_connected_layer_1')\n output_tensor = tf.nn.tanh(output_tensor)\n\n output_tensor = tf.contrib.layers.fully_connected(output_tensor, 6, scope='fully_connected_layer_2')\n output_tensor = tf.nn.tanh(output_tensor)\n\n stn_output = stn(input_fmap=input_tensor, theta=output_tensor, out_dims=(img_height, img_width))\n\n return stn_output, output_tensor\n\ndef resblock(x_init, channels, is_training=True, use_bias=True, downsample=False, scope='resblock') :\n with tf.variable_scope(scope) :\n\n x = batch_norm(x_init, is_training, scope='batch_norm_0')\n x = relu(x)\n\n\n if downsample :\n x = conv(x, channels, kernel=3, stride=2, use_bias=use_bias, scope='conv_0')\n x_init = conv(x_init, channels, kernel=1, stride=2, use_bias=use_bias, scope='conv_init')\n\n else :\n x = conv(x, channels, kernel=3, stride=1, use_bias=use_bias, scope='conv_0')\n\n x = batch_norm(x, is_training, scope='batch_norm_1')\n x = relu(x)\n x = conv(x, channels, kernel=3, stride=1, use_bias=use_bias, scope='conv_1')\n\n return x + x_init\n\ndef bottle_resblock(x_init, channels, is_training=True, use_bias=True, downsample=False, scope='bottle_resblock') :\n with tf.variable_scope(scope) :\n x = batch_norm(x_init, is_training, scope='batch_norm_1x1_front')\n shortcut = relu(x)\n\n x = conv(shortcut, channels, kernel=1, stride=1, use_bias=use_bias, scope='conv_1x1_front')\n x = batch_norm(x, is_training, scope='batch_norm_3x3')\n x = relu(x)\n\n if downsample :\n x = conv(x, channels, kernel=3, stride=2, use_bias=use_bias, scope='conv_0')\n shortcut = conv(shortcut, channels*4, kernel=1, stride=2, use_bias=use_bias, scope='conv_init')\n\n else :\n x = conv(x, channels, kernel=3, stride=1, use_bias=use_bias, scope='conv_0')\n shortcut = conv(shortcut, channels * 4, kernel=1, stride=1, use_bias=use_bias, scope='conv_init')\n\n x = batch_norm(x, is_training, scope='batch_norm_1x1_back')\n x = relu(x)\n x = conv(x, channels*4, kernel=1, stride=1, use_bias=use_bias, scope='conv_1x1_back')\n\n return x + shortcut\n\ndef get_residual_layer(res_n) :\n x = []\n\n if res_n == 18 :\n x = [2, 2, 2, 2]\n\n if res_n == 34 :\n x = [3, 4, 6, 3]\n\n if res_n == 50 :\n x = [3, 4, 6, 3]\n\n if res_n == 101 :\n x = [3, 4, 23, 3]\n\n if res_n == 152 :\n x = [3, 8, 36, 3]\n\n return x\n\n##################################################################################\n# Sampling\n##################################################################################\n\ndef flatten(x) :\n return tf.layers.flatten(x)\n\ndef global_avg_pooling(x):\n gap = tf.reduce_mean(x, axis=[1, 2], keepdims=True)\n return gap\n\ndef avg_pooling(x) :\n return tf.layers.average_pooling2d(x, pool_size=2, strides=2, padding='SAME')\n\n##################################################################################\n# Activation function\n##################################################################################\n\n\ndef relu(x):\n return tf.nn.relu(x)\n\n##################################################################################\n# Normalization function\n##################################################################################\n\ndef batch_norm(x, is_training=True, scope='batch_norm'):\n return tf_contrib.layers.batch_norm(x,\n decay=0.9, epsilon=1e-05,\n center=True, scale=True, updates_collections=None,\n is_training=is_training, scope=scope)\n\n##################################################################################\n# Loss function\n##################################################################################\n\ndef focal_loss_(labels, theta, org , gamma=2.0, alpha=4.0):\n logits = stn(org, theta)\n # logits = (0.5 > logits).float() * 1\n logits = tf.cast(logits + 0.5, tf.float32)\n # logits = tf.one_hot(logits, depth=2)\n\n epsilon = 1.e-9\n\n labels = tf.convert_to_tensor(labels, tf.float32)\n logits = tf.convert_to_tensor(logits, tf.float32)\n\n logits = tf.nn.softmax(logits, dim=-1)\n model_out = tf.add(logits, epsilon)\n\n ce = tf.multiply(labels, -tf.log(model_out))\n weight = tf.multiply(labels, tf.pow(tf.subtract(1., model_out), gamma))\n fl = tf.multiply(alpha, tf.multiply(weight, ce))\n reduced_fl = tf.reduce_max(fl, axis=1)\n return reduced_fl\n\nfrom tensorflow.python.ops import array_ops\n\n\ndef focal_loss(target_tensor, theta, org, weights=None, alpha=0.25, gamma=2):\n r\"\"\"Compute focal loss for predictions.\n Multi-labels Focal loss formula:\n FL = -alpha * (z-p)^gamma * log(p) -(1-alpha) * p^gamma * log(1-p)\n ,which alpha = 0.25, gamma = 2, p = sigmoid(x), z = target_tensor.\n Args:\n prediction_tensor: A float tensor of shape [batch_size, num_anchors,\n num_classes] representing the predicted logits for each class\n target_tensor: A float tensor of shape [batch_size, num_anchors,\n num_classes] representing one-hot encoded classification targets\n weights: A float tensor of shape [batch_size, num_anchors]\n alpha: A scalar tensor for focal loss alpha hyper-parameter\n gamma: A scalar tensor for focal loss gamma hyper-parameter\n Returns:\n loss: A (scalar) tensor representing the value of the loss function\n \"\"\"\n prediction_tensor = stn(org, theta)\n prediction_tensor = tf.to_int32(prediction_tensor > 0.5)\n prediction_tensor = tf.one_hot(prediction_tensor, depth=2)\n prediction_tensor = tf.dtypes.cast(prediction_tensor, tf.float32)\n\n # target_tensor = tf.convert_to_tensor(target_tensor, tf.int32)\n target_tensor = tf.dtypes.cast(target_tensor, tf.int32)\n target_tensor = tf.one_hot(target_tensor, depth=2)\n target_tensor = tf.dtypes.cast(target_tensor, tf.float32)\n\n prediction_tensor = tf.convert_to_tensor(prediction_tensor, tf.float32)\n target_tensor = tf.convert_to_tensor(target_tensor, tf.float32)\n\n print(\"Target tensor shape\", target_tensor.get_shape().as_list())\n print(\"Prediction tensor shape\", prediction_tensor.get_shape().as_list())\n sigmoid_p = tf.nn.sigmoid(prediction_tensor)\n\n zeros = array_ops.zeros_like(sigmoid_p, dtype=sigmoid_p.dtype)\n\n # For poitive prediction, only need consider front part loss, back part is 0;\n # target_tensor > zeros <=> z=1, so poitive coefficient = z - p.\n pos_p_sub = array_ops.where(target_tensor > zeros, target_tensor - sigmoid_p, zeros)\n\n # For negative prediction, only need consider back part loss, front part is 0;\n # target_tensor > zeros <=> z=1, so negative coefficient = 0.\n neg_p_sub = array_ops.where(target_tensor > zeros, zeros, sigmoid_p)\n per_entry_cross_ent = - alpha * (pos_p_sub ** gamma) * tf.log(tf.clip_by_value(sigmoid_p, 1e-8, 1.0)) \\\n - (1 - alpha) * (neg_p_sub ** gamma) * tf.log(tf.clip_by_value(1.0 - sigmoid_p, 1e-8, 1.0))\n return tf.reduce_sum(per_entry_cross_ent)\n\ndef classification_loss(labels, theta, org) :\n logits = stn(org, theta)\n\n n_class = 1\n flat_logits = tf.reshape(logits, [-1])\n flat_labels = tf.reshape(labels, [-1])\n\n # print(tf.shape(flat_logits))\n # print(tf.shape(flat_labels))\n\n loss = tf.losses.mean_squared_error(flat_labels, flat_logits)\n\n # flat_logits = tf.multiply(flat_logits, 255.0)\n # flat_labels = tf.multiply(flat_labels, 255.0)\n\n # flat_logits = tf.dtypes.cast(flat_logits, dtype=tf.int32)\n # flat_labels = tf.dtypes.cast(flat_labels, dtype=tf.int32)\n\n # accuracy, update_op = tf.metrics.accuracy(labels=flat_labels[0],\n # predictions=flat_logits[0])\n\n # return loss, accuracy\n return loss\n\n\n\n" ]
[ [ "tensorflow.contrib.layers.batch_norm", "tensorflow.constant_initializer", "tensorflow.nn.conv2d", "tensorflow.contrib.layers.fully_connected", "tensorflow.python.ops.array_ops.where", "tensorflow.reshape", "tensorflow.clip_by_value", "tensorflow.losses.mean_squared_error", "tensorflow.stack", "tensorflow.nn.softmax", "tensorflow.nn.tanh", "tensorflow.one_hot", "tensorflow.contrib.layers.flatten", "tensorflow.cast", "tensorflow.random_normal_initializer", "tensorflow.shape", "tensorflow.layers.flatten", "tensorflow.contrib.layers.variance_scaling_initializer", "tensorflow.subtract", "tensorflow.variable_scope", "numpy.sqrt", "tensorflow.layers.dense", "tensorflow.nn.sigmoid", "tensorflow.add", "tensorflow.nn.bias_add", "tensorflow.nn.dropout", "tensorflow.nn.max_pool", "tensorflow.nn.relu", "tensorflow.log", "tensorflow.layers.conv2d", "tensorflow.reduce_sum", "tensorflow.to_int32", "tensorflow.contrib.layers.l2_regularizer", "tensorflow.layers.average_pooling2d", "tensorflow.dtypes.cast", "tensorflow.nn.local_response_normalization", "tensorflow.convert_to_tensor", "tensorflow.multiply", "tensorflow.python.ops.array_ops.zeros_like", "tensorflow.reduce_max", "tensorflow.reduce_mean" ] ]
eytanadler/OpenMDAO
[ "c9d2eb2444241d6551ba9a1853436de5dd673687", "c9d2eb2444241d6551ba9a1853436de5dd673687" ]
[ "openmdao/core/problem.py", "openmdao/utils/array_utils.py" ]
[ "\"\"\"Define the Problem class and a FakeComm class for non-MPI users.\"\"\"\n\nimport sys\nimport pprint\nimport os\nimport logging\nimport weakref\nimport time\n\nfrom collections import defaultdict, namedtuple, OrderedDict\nfrom fnmatch import fnmatchcase\nfrom itertools import product\n\nfrom io import StringIO\n\nimport numpy as np\nimport scipy.sparse as sparse\n\nfrom openmdao.core.component import Component\nfrom openmdao.jacobians.dictionary_jacobian import DictionaryJacobian, _CheckingJacobian\nfrom openmdao.core.driver import Driver, record_iteration\nfrom openmdao.core.explicitcomponent import ExplicitComponent\nfrom openmdao.core.group import Group, System\nfrom openmdao.core.total_jac import _TotalJacInfo\nfrom openmdao.core.constants import _DEFAULT_OUT_STREAM, _UNDEFINED\nfrom openmdao.approximation_schemes.complex_step import ComplexStep\nfrom openmdao.approximation_schemes.finite_difference import FiniteDifference\nfrom openmdao.solvers.solver import SolverInfo\nfrom openmdao.error_checking.check_config import _default_checks, _all_checks\nfrom openmdao.recorders.recording_iteration_stack import _RecIteration\nfrom openmdao.recorders.recording_manager import RecordingManager, record_viewer_data, \\\n record_model_options\nfrom openmdao.utils.record_util import create_local_meta\nfrom openmdao.utils.general_utils import ContainsAll, pad_name, _is_slicer_op, _slice_indices\nfrom openmdao.utils.mpi import FakeComm\nfrom openmdao.utils.mpi import MPI\nfrom openmdao.utils.name_maps import name2abs_names\nfrom openmdao.utils.options_dictionary import OptionsDictionary\nfrom openmdao.utils.units import simplify_unit\nfrom openmdao.core.constants import _SetupStatus\nfrom openmdao.utils.name_maps import abs_key2rel_key\nfrom openmdao.vectors.vector import _full_slice\nfrom openmdao.vectors.default_vector import DefaultVector\nfrom openmdao.utils.logger_utils import get_logger, TestLogger\nimport openmdao.utils.coloring as coloring_mod\nfrom openmdao.utils.hooks import _setup_hooks\nfrom openmdao.utils.om_warnings import issue_warning, DerivativesWarning, warn_deprecation, \\\n OMInvalidCheckDerivativesOptionsWarning\n\ntry:\n from openmdao.vectors.petsc_vector import PETScVector\nexcept ImportError:\n PETScVector = None\n\nfrom openmdao.utils.name_maps import rel_key2abs_key, rel_name2abs_name\n\n\nErrorTuple = namedtuple('ErrorTuple', ['forward', 'reverse', 'forward_reverse'])\nMagnitudeTuple = namedtuple('MagnitudeTuple', ['forward', 'reverse', 'fd'])\n\n_contains_all = ContainsAll()\n\n\nCITATION = \"\"\"@article{openmdao_2019,\n Author={Justin S. Gray and John T. Hwang and Joaquim R. R. A.\n Martins and Kenneth T. Moore and Bret A. Naylor},\n Title=\"{OpenMDAO: An Open-Source Framework for Multidisciplinary\n Design, Analysis, and Optimization}\",\n Journal=\"{Structural and Multidisciplinary Optimization}\",\n Year={2019},\n Publisher={Springer},\n pdf={http://openmdao.org/pubs/openmdao_overview_2019.pdf},\n note= {In Press}\n }\"\"\"\n\n\nclass Problem(object):\n \"\"\"\n Top-level container for the systems and drivers.\n\n Attributes\n ----------\n model : <System>\n Pointer to the top-level <System> object (root node in the tree).\n comm : MPI.Comm or <FakeComm>\n The global communicator.\n driver : <Driver>\n Slot for the driver. The default driver is `Driver`, which just runs\n the model once.\n _mode : 'fwd' or 'rev'\n Derivatives calculation mode, 'fwd' for forward, and 'rev' for\n reverse (adjoint).\n _orig_mode : 'fwd', 'rev', or 'auto'\n Derivatives calculation mode assigned by the user. If set to 'auto', _mode will be\n automatically assigned to 'fwd' or 'rev' based on relative sizes of design variables vs.\n responses.\n _initial_condition_cache : dict\n Any initial conditions that are set at the problem level via setitem are cached here\n until they can be processed.\n cite : str\n Listing of relevant citations that should be referenced when\n publishing work that uses this class.\n options : <OptionsDictionary>\n Dictionary with general options for the problem.\n recording_options : <OptionsDictionary>\n Dictionary with problem recording options.\n _rec_mgr : <RecordingManager>\n Object that manages all recorders added to this problem.\n _check : bool\n If True, call check_config at the end of final_setup.\n _filtered_vars_to_record : dict\n Dictionary of lists of design vars, constraints, etc. to record.\n _logger : object or None\n Object for logging config checks if _check is True.\n _name : str\n Problem name.\n _system_options_recorded : bool\n A flag to indicate whether the system options for all the systems have been recorded\n _metadata : dict\n Problem level metadata.\n _run_counter : int\n The number of times run_driver or run_model has been called.\n _warned : bool\n Bool to check if `value` deprecation warning has occured yet\n \"\"\"\n\n def __init__(self, model=None, driver=None, comm=None, name=None, **options):\n \"\"\"\n Initialize attributes.\n\n Parameters\n ----------\n model : <System> or None\n The top-level <System>. If not specified, an empty <Group> will be created.\n driver : <Driver> or None\n The driver for the problem. If not specified, a simple \"Run Once\" driver will be used.\n comm : MPI.Comm or <FakeComm> or None\n The global communicator.\n name : str\n Problem name. Can be used to specify a Problem instance when multiple Problems\n exist.\n **options : named args\n All remaining named args are converted to options.\n \"\"\"\n self.cite = CITATION\n self._name = name\n self._warned = False\n\n if comm is None:\n try:\n from mpi4py import MPI\n comm = MPI.COMM_WORLD\n except ImportError:\n comm = FakeComm()\n\n if model is None:\n self.model = Group()\n elif isinstance(model, System):\n self.model = model\n else:\n raise TypeError(self.msginfo +\n \": The value provided for 'model' is not a valid System.\")\n\n if driver is None:\n self.driver = Driver()\n elif isinstance(driver, Driver):\n self.driver = driver\n else:\n raise TypeError(self.msginfo +\n \": The value provided for 'driver' is not a valid Driver.\")\n\n self.comm = comm\n\n self._mode = None # mode is assigned in setup()\n\n self._initial_condition_cache = {}\n\n self._metadata = None\n self._run_counter = -1\n self._system_options_recorded = False\n self._rec_mgr = RecordingManager()\n\n # General options\n self.options = OptionsDictionary(parent_name=type(self).__name__)\n self.options.declare('coloring_dir', types=str,\n default=os.path.join(os.getcwd(), 'coloring_files'),\n desc='Directory containing coloring files (if any) for this Problem.')\n self.options.update(options)\n\n # Case recording options\n self.recording_options = OptionsDictionary(parent_name=type(self).__name__)\n\n self.recording_options.declare('record_desvars', types=bool, default=True,\n desc='Set to True to record design variables at the '\n 'problem level')\n self.recording_options.declare('record_objectives', types=bool, default=True,\n desc='Set to True to record objectives at the problem level')\n self.recording_options.declare('record_constraints', types=bool, default=True,\n desc='Set to True to record constraints at the '\n 'problem level')\n self.recording_options.declare('record_responses', types=bool, default=False,\n desc='Set True to record constraints and objectives at the '\n 'problem level.')\n self.recording_options.declare('record_inputs', types=bool, default=False,\n desc='Set True to record inputs at the '\n 'problem level.')\n self.recording_options.declare('record_outputs', types=bool, default=True,\n desc='Set True to record outputs at the '\n 'problem level.')\n self.recording_options.declare('record_residuals', types=bool, default=False,\n desc='Set True to record residuals at the '\n 'problem level.')\n self.recording_options.declare('record_derivatives', types=bool, default=False,\n desc='Set to True to record derivatives for the problem '\n 'level')\n self.recording_options.declare('record_abs_error', types=bool, default=True,\n desc='Set to True to record absolute error of '\n 'model nonlinear solver')\n self.recording_options.declare('record_rel_error', types=bool, default=True,\n desc='Set to True to record relative error of model \\\n nonlinear solver')\n self.recording_options.declare('includes', types=list, default=['*'],\n desc='Patterns for variables to include in recording. \\\n Uses fnmatch wildcards')\n self.recording_options.declare('excludes', types=list, default=[],\n desc='Patterns for vars to exclude in recording '\n '(processed post-includes). Uses fnmatch wildcards')\n\n _setup_hooks(self)\n\n def _get_var_abs_name(self, name):\n if name in self.model._var_allprocs_abs2meta:\n return name\n elif name in self.model._var_allprocs_prom2abs_list['output']:\n return self.model._var_allprocs_prom2abs_list['output'][name][0]\n elif name in self.model._var_allprocs_prom2abs_list['input']:\n abs_names = self.model._var_allprocs_prom2abs_list['input'][name]\n if len(abs_names) == 1:\n return abs_names[0]\n else:\n raise KeyError(\"{}: Using promoted name `{}' is ambiguous and matches unconnected \"\n \"inputs %s. Use absolute name to disambiguate.\".format(self.msginfo,\n name,\n abs_names))\n\n raise KeyError('{}: Variable \"{}\" not found.'.format(self.msginfo, name))\n\n @property\n def msginfo(self):\n \"\"\"\n Return info to prepend to messages.\n\n Returns\n -------\n str\n Info to prepend to messages.\n \"\"\"\n if self._name is None:\n return type(self).__name__\n return '{} {}'.format(type(self).__name__, self._name)\n\n def _get_inst_id(self):\n return self._name\n\n def is_local(self, name):\n \"\"\"\n Return True if the named variable or system is local to the current process.\n\n Parameters\n ----------\n name : str\n Name of a variable or system.\n\n Returns\n -------\n bool\n True if the named system or variable is local to this process.\n \"\"\"\n if self._metadata is None:\n raise RuntimeError(\"{}: is_local('{}') was called before setup() \"\n \"completed.\".format(self.msginfo, name))\n\n try:\n abs_name = self._get_var_abs_name(name)\n except KeyError:\n sub = self.model._get_subsystem(name)\n return sub is not None and sub._is_local\n\n # variable exists, but may be remote\n return abs_name in self.model._var_abs2meta['input'] or \\\n abs_name in self.model._var_abs2meta['output']\n\n def _get_cached_val(self, name, get_remote=False):\n # We have set and cached already\n if name in self._initial_condition_cache:\n return self._initial_condition_cache[name]\n\n # Vector not setup, so we need to pull values from saved metadata request.\n else:\n proms = self.model._var_allprocs_prom2abs_list\n meta = self.model._var_abs2meta\n try:\n conns = self.model._conn_abs_in2out\n except AttributeError:\n conns = {}\n\n abs_names = name2abs_names(self.model, name)\n if not abs_names:\n raise KeyError('{}: Variable \"{}\" not found.'.format(self.model.msginfo, name))\n\n abs_name = abs_names[0]\n vars_to_gather = self._metadata['vars_to_gather']\n\n io = 'output' if abs_name in meta['output'] else 'input'\n if abs_name in meta[io]:\n if abs_name in conns:\n val = meta['output'][conns[abs_name]]['val']\n else:\n val = meta[io][abs_name]['val']\n\n if get_remote and abs_name in vars_to_gather:\n owner = vars_to_gather[abs_name]\n if self.model.comm.rank == owner:\n self.model.comm.bcast(val, root=owner)\n else:\n val = self.model.comm.bcast(None, root=owner)\n\n if val is not _UNDEFINED:\n # Need to cache the \"get\" in case the user calls in-place numpy operations.\n self._initial_condition_cache[name] = val\n\n return val\n\n @property\n def _recording_iter(self):\n return self._metadata['recording_iter']\n\n def __getitem__(self, name):\n \"\"\"\n Get an output/input variable.\n\n Parameters\n ----------\n name : str\n Promoted or relative variable name in the root system's namespace.\n\n Returns\n -------\n float or ndarray or any python object\n the requested output/input variable.\n \"\"\"\n return self.get_val(name, get_remote=None)\n\n def get_val(self, name, units=None, indices=None, get_remote=False):\n \"\"\"\n Get an output/input variable.\n\n Function is used if you want to specify display units.\n\n Parameters\n ----------\n name : str\n Promoted or relative variable name in the root system's namespace.\n units : str, optional\n Units to convert to before return.\n indices : int or list of ints or tuple of ints or int ndarray or Iterable or None, optional\n Indices or slice to return.\n get_remote : bool or None\n If True, retrieve the value even if it is on a remote process. Note that if the\n variable is remote on ANY process, this function must be called on EVERY process\n in the Problem's MPI communicator.\n If False, only retrieve the value if it is on the current process, or only the part\n of the value that's on the current process for a distributed variable.\n If None and the variable is remote or distributed, a RuntimeError will be raised.\n\n Returns\n -------\n object\n The value of the requested output/input variable.\n \"\"\"\n if self._metadata['setup_status'] == _SetupStatus.POST_SETUP:\n val = self._get_cached_val(name, get_remote=get_remote)\n if val is not _UNDEFINED:\n if indices is not None:\n val = val[indices]\n if units is not None:\n val = self.model.convert2units(name, val, simplify_unit(units))\n else:\n val = self.model.get_val(name, units=units, indices=indices, get_remote=get_remote,\n from_src=True)\n\n if val is _UNDEFINED:\n if get_remote:\n raise KeyError('{}: Variable name \"{}\" not found.'.format(self.msginfo, name))\n else:\n raise RuntimeError(f\"{self.model.msginfo}: Variable '{name}' is not local to \"\n f\"rank {self.comm.rank}. You can retrieve values from \"\n \"other processes using `get_val(<name>, get_remote=True)`.\")\n\n return val\n\n def __setitem__(self, name, value):\n \"\"\"\n Set an output/input variable.\n\n Parameters\n ----------\n name : str\n Promoted or relative variable name in the root system's namespace.\n value : float or ndarray or any python object\n value to set this variable to.\n \"\"\"\n self.set_val(name, value)\n\n def set_val(self, name, val=None, units=None, indices=None, **kwargs):\n \"\"\"\n Set an output/input variable.\n\n Function is used if you want to set a value using a different unit.\n\n Parameters\n ----------\n name : str\n Promoted or relative variable name in the root system's namespace.\n units : str, optional\n Units that value is defined in.\n indices : int or list of ints or tuple of ints or int ndarray or Iterable or None, optional\n Indices or slice to set to specified value.\n val : float or ndarray or list or None\n Value to set this variable to.\n **kwargs : dict\n Additional keyword argument for deprecated `value` arg.\n \"\"\"\n if 'value' not in kwargs:\n value = None\n elif 'value' in kwargs:\n value = kwargs['value']\n\n if value is not None and not self._warned:\n self._warned = True\n warn_deprecation(f\"{self.msginfo} 'value' will be deprecated in 4.0. Please use 'val' \"\n \"in the future.\")\n elif val is not None:\n self._warned = True\n value = val\n\n model = self.model\n if self._metadata is not None:\n conns = model._conn_global_abs_in2out\n else:\n raise RuntimeError(f\"{self.msginfo}: '{name}' Cannot call set_val before setup.\")\n\n all_meta = model._var_allprocs_abs2meta\n loc_meta = model._var_abs2meta\n n_proms = 0 # if nonzero, name given was promoted input name w/o a matching prom output\n\n try:\n ginputs = model._group_inputs\n except AttributeError:\n ginputs = {} # could happen if top level system is not a Group\n\n abs_names = name2abs_names(model, name)\n if abs_names:\n n_proms = len(abs_names) # for output this will never be > 1\n if n_proms > 1 and name in ginputs:\n abs_name = ginputs[name][0].get('use_tgt', abs_names[0])\n else:\n abs_name = abs_names[0]\n else:\n raise KeyError(f'{model.msginfo}: Variable \"{name}\" not found.')\n\n if abs_name in conns:\n src = conns[abs_name]\n if abs_name not in model._var_allprocs_discrete['input']:\n value = np.asarray(value)\n tmeta = all_meta['input'][abs_name]\n tunits = tmeta['units']\n sunits = all_meta['output'][src]['units']\n if abs_name in loc_meta['input']:\n tlocmeta = loc_meta['input'][abs_name]\n else:\n tlocmeta = None\n\n gunits = ginputs[name][0].get('units') if name in ginputs else None\n if n_proms > 1: # promoted input name was used\n if gunits is None:\n tunit_list = [all_meta['input'][n]['units'] for n in abs_names]\n tu0 = tunit_list[0]\n for tu in tunit_list:\n if tu != tu0:\n model._show_ambiguity_msg(name, ('units',), abs_names)\n\n if units is None:\n # avoids double unit conversion\n if self._metadata['setup_status'] > _SetupStatus.POST_SETUP:\n ivalue = value\n if sunits is not None:\n if gunits is not None and gunits != tunits:\n value = model.convert_from_units(src, value, gunits)\n else:\n value = model.convert_from_units(src, value, tunits)\n else:\n if gunits is None:\n ivalue = model.convert_from_units(abs_name, value, units)\n else:\n ivalue = model.convert_units(name, value, units, gunits)\n if self._metadata['setup_status'] == _SetupStatus.POST_SETUP:\n value = ivalue\n else:\n value = model.convert_from_units(src, value, units)\n else:\n src = abs_name\n if units is not None:\n value = model.convert_from_units(abs_name, value, units)\n\n # Caching only needed if vectors aren't allocated yet.\n if self._metadata['setup_status'] == _SetupStatus.POST_SETUP:\n if indices is not None:\n self._get_cached_val(name)\n try:\n if _is_slicer_op(indices):\n self._initial_condition_cache[name] = value[indices]\n else:\n self._initial_condition_cache[name][indices] = value\n except IndexError:\n self._initial_condition_cache[name][indices] = value\n except Exception as err:\n raise RuntimeError(f\"Failed to set value of '{name}': {str(err)}.\")\n else:\n self._initial_condition_cache[name] = value\n else:\n myrank = model.comm.rank\n\n if indices is None:\n indices = _full_slice\n\n if model._outputs._contains_abs(abs_name):\n model._outputs.set_var(abs_name, value, indices)\n elif abs_name in conns: # input name given. Set value into output\n if model._outputs._contains_abs(src): # src is local\n if (model._outputs._abs_get_val(src).size == 0 and\n src.rsplit('.', 1)[0] == '_auto_ivc' and\n all_meta['output'][src]['distributed']):\n pass # special case, auto_ivc dist var with 0 local size\n elif tmeta['has_src_indices']:\n if tlocmeta: # target is local\n src_indices = tlocmeta['src_indices']\n flat = False\n if name in model._var_prom2inds:\n sshape, inds, flat = model._var_prom2inds[name]\n if inds is not None:\n if _is_slicer_op(inds):\n inds = _slice_indices(inds, np.prod(sshape), sshape)\n flat = True\n src_indices = inds\n if src_indices is None:\n model._outputs.set_var(src, value, None, flat)\n else:\n if flat:\n src_indices = src_indices.ravel()\n if tmeta['distributed']:\n ssizes = model._var_sizes['output']\n sidx = model._var_allprocs_abs2idx[src]\n ssize = ssizes[myrank, sidx]\n start = np.sum(ssizes[:myrank, sidx])\n end = start + ssize\n if np.any(src_indices < start) or np.any(src_indices >= end):\n raise RuntimeError(f\"{model.msginfo}: Can't set {name}: \"\n \"src_indices refer \"\n \"to out-of-process array entries.\")\n if start > 0:\n src_indices = src_indices - start\n model._outputs.set_var(src, value, src_indices[indices], flat)\n else:\n raise RuntimeError(f\"{model.msginfo}: Can't set {abs_name}: remote\"\n \" connected inputs with src_indices currently not\"\n \" supported.\")\n else:\n value = np.asarray(value)\n model._outputs.set_var(src, value, indices)\n elif src in model._discrete_outputs:\n model._discrete_outputs[src] = value\n # also set the input\n # TODO: maybe remove this if inputs are removed from case recording\n if n_proms < 2:\n if model._inputs._contains_abs(abs_name):\n model._inputs.set_var(abs_name, ivalue, indices)\n elif abs_name in model._discrete_inputs:\n model._discrete_inputs[abs_name] = value\n else:\n # must be a remote var. so, just do nothing on this proc. We can't get here\n # unless abs_name is found in connections, so the variable must exist.\n if abs_name in model._var_allprocs_abs2meta:\n print(f\"Variable '{name}' is remote on rank {self.comm.rank}. \"\n \"Local assignment ignored.\")\n elif abs_name in model._discrete_outputs:\n model._discrete_outputs[abs_name] = value\n elif model._inputs._contains_abs(abs_name): # could happen if model is a component\n model._inputs.set_var(abs_name, value, indices)\n elif abs_name in model._discrete_inputs: # could happen if model is a component\n model._discrete_inputs[abs_name] = value\n\n def _set_initial_conditions(self):\n \"\"\"\n Set all initial conditions that have been saved in cache after setup.\n \"\"\"\n for name, value in self._initial_condition_cache.items():\n self.set_val(name, value)\n\n # Clean up cache\n self._initial_condition_cache = OrderedDict()\n\n def run_model(self, case_prefix=None, reset_iter_counts=True):\n \"\"\"\n Run the model by calling the root system's solve_nonlinear.\n\n Parameters\n ----------\n case_prefix : str or None\n Prefix to prepend to coordinates when recording.\n\n reset_iter_counts : bool\n If True and model has been run previously, reset all iteration counters.\n \"\"\"\n if self._mode is None:\n raise RuntimeError(self.msginfo +\n \": The `setup` method must be called before `run_model`.\")\n\n if case_prefix:\n if not isinstance(case_prefix, str):\n raise TypeError(self.msginfo + \": The 'case_prefix' argument should be a string.\")\n self._recording_iter.prefix = case_prefix\n else:\n self._recording_iter.prefix = None\n\n if self.model.iter_count > 0 and reset_iter_counts:\n self.driver.iter_count = 0\n self.model._reset_iter_counts()\n\n self.final_setup()\n\n self._run_counter += 1\n record_model_options(self, self._run_counter)\n\n self.model._clear_iprint()\n self.model.run_solve_nonlinear()\n\n def run_driver(self, case_prefix=None, reset_iter_counts=True):\n \"\"\"\n Run the driver on the model.\n\n Parameters\n ----------\n case_prefix : str or None\n Prefix to prepend to coordinates when recording.\n\n reset_iter_counts : bool\n If True and model has been run previously, reset all iteration counters.\n\n Returns\n -------\n boolean\n Failure flag; True if failed to converge, False is successful.\n \"\"\"\n if self._mode is None:\n raise RuntimeError(self.msginfo +\n \": The `setup` method must be called before `run_driver`.\")\n\n if case_prefix:\n if not isinstance(case_prefix, str):\n raise TypeError(self.msginfo + \": The 'case_prefix' argument should be a string.\")\n self._recording_iter.prefix = case_prefix\n else:\n self._recording_iter.prefix = None\n\n if self.model.iter_count > 0 and reset_iter_counts:\n self.driver.iter_count = 0\n self.model._reset_iter_counts()\n\n self.final_setup()\n\n self._run_counter += 1\n record_model_options(self, self._run_counter)\n\n self.model._clear_iprint()\n return self.driver.run()\n\n def compute_jacvec_product(self, of, wrt, mode, seed):\n \"\"\"\n Given a seed and 'of' and 'wrt' variables, compute the total jacobian vector product.\n\n Parameters\n ----------\n of : list of str\n Variables whose derivatives will be computed.\n wrt : list of str\n Derivatives will be computed with respect to these variables.\n mode : str\n Derivative direction ('fwd' or 'rev').\n seed : dict or list\n Either a dict keyed by 'wrt' varnames (fwd) or 'of' varnames (rev), containing\n dresidual (fwd) or doutput (rev) values, OR a list of dresidual or doutput\n values that matches the corresponding 'wrt' (fwd) or 'of' (rev) varname list.\n\n Returns\n -------\n dict\n The total jacobian vector product, keyed by variable name.\n \"\"\"\n if mode == 'fwd':\n if len(wrt) != len(seed):\n raise RuntimeError(self.msginfo +\n \": seed and 'wrt' list must be the same length in fwd mode.\")\n lnames, rnames = of, wrt\n lkind, rkind = 'output', 'residual'\n else: # rev\n if len(of) != len(seed):\n raise RuntimeError(self.msginfo +\n \": seed and 'of' list must be the same length in rev mode.\")\n lnames, rnames = wrt, of\n lkind, rkind = 'residual', 'output'\n\n rvec = self.model._vectors[rkind]['linear']\n lvec = self.model._vectors[lkind]['linear']\n\n rvec.set_val(0.)\n\n conns = self.model._conn_global_abs_in2out\n\n # set seed values into dresids (fwd) or doutputs (rev)\n # seed may have keys that are inputs and must be converted into auto_ivcs\n try:\n seed[rnames[0]]\n except (IndexError, TypeError):\n for i, name in enumerate(rnames):\n if name in conns:\n rvec[conns[name]] = seed[i]\n else:\n rvec[name] = seed[i]\n else:\n for name in rnames:\n if name in conns:\n rvec[conns[name]] = seed[name]\n else:\n rvec[name] = seed[name]\n\n # We apply a -1 here because the derivative of the output is minus the derivative of\n # the residual in openmdao.\n data = rvec.asarray()\n data *= -1.\n\n self.model.run_solve_linear(mode)\n\n if mode == 'fwd':\n return {n: lvec[n].copy() for n in lnames}\n else:\n # may need to convert some lnames to auto_ivc names\n return {n: lvec[conns[n] if n in conns else n].copy() for n in lnames}\n\n def _setup_recording(self):\n \"\"\"\n Set up case recording.\n \"\"\"\n self._filtered_vars_to_record = self.driver._get_vars_to_record(self.recording_options)\n self._rec_mgr.startup(self)\n\n def add_recorder(self, recorder):\n \"\"\"\n Add a recorder to the problem.\n\n Parameters\n ----------\n recorder : CaseRecorder\n A recorder instance.\n \"\"\"\n self._rec_mgr.append(recorder)\n\n def cleanup(self):\n \"\"\"\n Clean up resources prior to exit.\n \"\"\"\n # shut down all recorders\n self._rec_mgr.shutdown()\n\n # clean up driver and model resources\n self.driver.cleanup()\n for system in self.model.system_iter(include_self=True, recurse=True):\n system.cleanup()\n\n def record(self, case_name):\n \"\"\"\n Record the variables at the Problem level.\n\n Must be called after `final_setup` has been called. This can either\n happen automatically through `run_driver` or `run_model`, or it can be\n called manually.\n\n Parameters\n ----------\n case_name : str\n Name used to identify this Problem case.\n \"\"\"\n if self._metadata['setup_status'] < _SetupStatus.POST_FINAL_SETUP:\n raise RuntimeError(f\"{self.msginfo}: Problem.record() cannot be called before \"\n \"`Problem.run_model()`, `Problem.run_driver()`, or \"\n \"`Problem.final_setup()`.\")\n else:\n record_iteration(self, self, case_name)\n\n def record_iteration(self, case_name):\n \"\"\"\n Record the variables at the Problem level.\n\n Parameters\n ----------\n case_name : str\n Name used to identify this Problem case.\n \"\"\"\n warn_deprecation(\"'Problem.record_iteration' has been deprecated. \"\n \"Use 'Problem.record' instead.\")\n\n record_iteration(self, self, case_name)\n\n def _get_recorder_metadata(self, case_name):\n \"\"\"\n Return metadata from the latest iteration for use in the recorder.\n\n Parameters\n ----------\n case_name : str\n Name of current case.\n\n Returns\n -------\n dict\n Metadata dictionary for the recorder.\n \"\"\"\n return create_local_meta(case_name)\n\n def setup(self, check=False, logger=None, mode='auto', force_alloc_complex=False,\n distributed_vector_class=PETScVector, local_vector_class=DefaultVector,\n derivatives=True):\n \"\"\"\n Set up the model hierarchy.\n\n When `setup` is called, the model hierarchy is assembled, the processors are allocated\n (for MPI), and variables and connections are all assigned. This method traverses down\n the model hierarchy to call `setup` on each subsystem, and then traverses up the model\n hierarchy to call `configure` on each subsystem.\n\n Parameters\n ----------\n check : None, boolean, list of strings, or the string ‘all’\n Determines what config checks, if any, are run after setup is complete.\n If None or False, no checks are run\n If True, the default checks ('out_of_order', 'system', 'solvers', 'dup_inputs',\n 'missing_recorders', 'comp_has_no_outputs', 'auto_ivc_warnings') are run\n If list of str, run those config checks\n If ‘all’, all the checks ('auto_ivc_warnings', 'comp_has_no_outputs', 'cycles',\n 'dup_inputs', 'missing_recorders', 'out_of_order', 'promotions', 'solvers',\n 'system', 'unconnected_inputs') are run\n logger : object\n Object for logging config checks if check is True.\n mode : string\n Derivatives calculation mode, 'fwd' for forward, and 'rev' for\n reverse (adjoint). Default is 'auto', which will pick 'fwd' or 'rev' based on\n the direction resulting in the smallest number of linear solves required to\n compute derivatives.\n force_alloc_complex : bool\n Force allocation of imaginary part in nonlinear vectors. OpenMDAO can generally\n detect when you need to do this, but in some cases (e.g., complex step is used\n after a reconfiguration) you may need to set this to True.\n distributed_vector_class : type\n Reference to the <Vector> class or factory function used to instantiate vectors\n and associated transfers involved in interprocess communication.\n local_vector_class : type\n Reference to the <Vector> class or factory function used to instantiate vectors\n and associated transfers involved in intraprocess communication.\n derivatives : bool\n If True, perform any memory allocations necessary for derivative computation.\n\n Returns\n -------\n self : <Problem>\n this enables the user to instantiate and setup in one line.\n \"\"\"\n model = self.model\n comm = self.comm\n\n # A distributed vector type is required for MPI\n if comm.size > 1:\n if distributed_vector_class is PETScVector and PETScVector is None:\n raise ValueError(self.msginfo +\n \": Attempting to run in parallel under MPI but PETScVector \"\n \"could not be imported.\")\n elif not distributed_vector_class.distributed:\n raise ValueError(\"%s: The `distributed_vector_class` argument must be a \"\n \"distributed vector class like `PETScVector` when running in \"\n \"parallel under MPI but '%s' was specified which is not \"\n \"distributed.\" % (self.msginfo, distributed_vector_class.__name__))\n\n if mode not in ['fwd', 'rev', 'auto']:\n msg = \"%s: Unsupported mode: '%s'. Use either 'fwd' or 'rev'.\" % (self.msginfo, mode)\n raise ValueError(msg)\n\n self._mode = self._orig_mode = mode\n\n model_comm = self.driver._setup_comm(comm)\n\n # this metadata will be shared by all Systems/Solvers in the system tree\n self._metadata = {\n 'coloring_dir': self.options['coloring_dir'], # directory for coloring files\n 'recording_iter': _RecIteration(), # manager of recorder iterations\n 'local_vector_class': local_vector_class,\n 'distributed_vector_class': distributed_vector_class,\n 'solver_info': SolverInfo(),\n 'use_derivatives': derivatives,\n 'force_alloc_complex': force_alloc_complex, # forces allocation of complex vectors\n 'vars_to_gather': {}, # vars that are remote somewhere. does not include distrib vars\n 'prom2abs': {'input': {}, 'output': {}}, # includes ALL promotes including buried ones\n 'static_mode': False, # used to determine where various 'static'\n # and 'dynamic' data structures are stored.\n # Dynamic ones are added during System\n # setup/configure. They are wiped out and re-created during\n # each Problem setup. Static ones are added outside of\n # Problem setup and they are never wiped out or re-created.\n 'config_info': None, # used during config to determine if additional updates required\n 'parallel_groups': [], # list of pathnames of parallel groups in this model (all procs)\n 'setup_status': _SetupStatus.PRE_SETUP,\n 'vec_names': None, # names of all nonlinear and linear vectors\n 'model_ref': weakref.ref(model), # ref to the model (needed to get out-of-scope\n # src data for inputs)\n 'using_par_deriv_color': False, # True if parallel derivative coloring is being used\n }\n model._setup(model_comm, mode, self._metadata)\n\n # set static mode back to True in all systems in this Problem\n self._metadata['static_mode'] = True\n\n # Cache all args for final setup.\n self._check = check\n self._logger = logger\n\n self._metadata['setup_status'] = _SetupStatus.POST_SETUP\n\n return self\n\n def final_setup(self):\n \"\"\"\n Perform final setup phase on problem in preparation for run.\n\n This is the second phase of setup, and is done automatically at the start of `run_driver`\n and `run_model`. At the beginning of final_setup, we have a model hierarchy with defined\n variables, solvers, case_recorders, and derivative settings. During this phase, the vectors\n are created and populated, the drivers and solvers are initialized, and the recorders are\n started, and the rest of the framework is prepared for execution.\n \"\"\"\n driver = self.driver\n\n response_size, desvar_size = driver._update_voi_meta(self.model)\n\n # update mode if it's been set to 'auto'\n if self._orig_mode == 'auto':\n mode = 'rev' if response_size < desvar_size else 'fwd'\n self._mode = mode\n else:\n mode = self._orig_mode\n\n if self._metadata['setup_status'] < _SetupStatus.POST_FINAL_SETUP:\n self.model._final_setup(self.comm)\n\n driver._setup_driver(self)\n\n info = driver._coloring_info\n coloring = info['coloring']\n if coloring is None and info['static'] is not None:\n coloring = driver._get_static_coloring()\n\n if coloring and coloring_mod._use_total_sparsity:\n # if we're using simultaneous total derivatives then our effective size is less\n # than the full size\n if coloring._fwd and coloring._rev:\n pass # we're doing both!\n elif mode == 'fwd' and coloring._fwd:\n desvar_size = coloring.total_solves()\n elif mode == 'rev' and coloring._rev:\n response_size = coloring.total_solves()\n\n if ((mode == 'fwd' and desvar_size > response_size) or\n (mode == 'rev' and response_size > desvar_size)):\n issue_warning(f\"Inefficient choice of derivative mode. You chose '{mode}' for a \"\n f\"problem with {desvar_size} design variables and {response_size} \"\n \"response variables (objectives and nonlinear constraints).\",\n category=DerivativesWarning)\n\n if self._metadata['setup_status'] == _SetupStatus.PRE_SETUP and \\\n hasattr(self.model, '_order_set') and self.model._order_set:\n raise RuntimeError(\"%s: Cannot call set_order without calling \"\n \"setup after\" % (self.msginfo))\n\n # set up recording, including any new recorders since last setup\n if self._metadata['setup_status'] >= _SetupStatus.POST_SETUP:\n driver._setup_recording()\n self._setup_recording()\n record_viewer_data(self)\n\n if self._metadata['setup_status'] < _SetupStatus.POST_FINAL_SETUP:\n self._metadata['setup_status'] = _SetupStatus.POST_FINAL_SETUP\n self._set_initial_conditions()\n\n if self._check:\n if self._check is True:\n checks = _default_checks\n else:\n checks = self._check\n if self.comm.rank == 0:\n logger = self._logger\n else:\n logger = TestLogger()\n self.check_config(logger, checks=checks)\n\n def check_partials(self, out_stream=_DEFAULT_OUT_STREAM, includes=None, excludes=None,\n compact_print=False, abs_err_tol=1e-6, rel_err_tol=1e-6,\n method='fd', step=None, form='forward', step_calc='abs',\n force_dense=True, show_only_incorrect=False):\n \"\"\"\n Check partial derivatives comprehensively for all components in your model.\n\n Parameters\n ----------\n out_stream : file-like object\n Where to send human readable output. By default it goes to stdout.\n Set to None to suppress.\n includes : None or list_like\n List of glob patterns for pathnames to include in the check. Default is None, which\n includes all components in the model.\n excludes : None or list_like\n List of glob patterns for pathnames to exclude from the check. Default is None, which\n excludes nothing.\n compact_print : bool\n Set to True to just print the essentials, one line per input-output pair.\n abs_err_tol : float\n Threshold value for absolute error. Errors about this value will have a '*' displayed\n next to them in output, making them easy to search for. Default is 1.0E-6.\n rel_err_tol : float\n Threshold value for relative error. Errors about this value will have a '*' displayed\n next to them in output, making them easy to search for. Note at times there may be a\n significant relative error due to a minor absolute error. Default is 1.0E-6.\n method : str\n Method, 'fd' for finite difference or 'cs' for complex step. Default is 'fd'.\n step : float\n Step size for approximation. Default is None, which means 1e-6 for 'fd' and 1e-40 for\n 'cs'.\n form : string\n Form for finite difference, can be 'forward', 'backward', or 'central'. Default\n 'forward'.\n step_calc : string\n Step type for finite difference, can be 'abs' for absolute', or 'rel' for relative.\n Default is 'abs'.\n force_dense : bool\n If True, analytic derivatives will be coerced into arrays. Default is True.\n show_only_incorrect : bool, optional\n Set to True if output should print only the subjacs found to be incorrect.\n\n Returns\n -------\n dict of dicts of dicts\n First key:\n is the component name;\n Second key:\n is the (output, input) tuple of strings;\n Third key:\n is one of ['rel error', 'abs error', 'magnitude', 'J_fd', 'J_fwd', 'J_rev'];\n\n For 'rel error', 'abs error', 'magnitude' the value is: A tuple containing norms for\n forward - fd, adjoint - fd, forward - adjoint.\n For 'J_fd', 'J_fwd', 'J_rev' the value is: A numpy array representing the computed\n Jacobian for the three different methods of computation.\n \"\"\"\n if self._metadata['setup_status'] < _SetupStatus.POST_FINAL_SETUP:\n self.final_setup()\n\n model = self.model\n\n if not model._use_derivatives:\n raise RuntimeError(self.msginfo +\n \": Can't check partials. Derivative support has been turned off.\")\n\n # TODO: Once we're tracking iteration counts, run the model if it has not been run before.\n\n includes = [includes] if isinstance(includes, str) else includes\n excludes = [excludes] if isinstance(excludes, str) else excludes\n\n comps = []\n for comp in model.system_iter(typ=Component, include_self=True):\n if comp._no_check_partials:\n continue\n\n name = comp.pathname\n\n # Process includes\n if includes is not None:\n for pattern in includes:\n if fnmatchcase(name, pattern):\n break\n else:\n continue\n\n # Process excludes\n if excludes is not None:\n match = False\n for pattern in excludes:\n if fnmatchcase(name, pattern):\n match = True\n break\n if match:\n continue\n\n comps.append(comp)\n\n # Check to make sure the method and settings used for checking\n # is different from the method used to calc the derivatives\n # Could do this later in this method but at that point some computations could have been\n # made and it would just waste time before the user is told there is an error and the\n # program errs out\n requested_method = method\n alloc_complex = model._outputs._alloc_complex\n\n for comp in comps:\n local_opts = comp._get_check_partial_options()\n\n for key, meta in comp._declared_partials.items():\n\n # Get the complete set of options, including defaults\n # for the computing of the derivs for this component\n if 'method' not in meta:\n meta_with_defaults = {}\n meta_with_defaults['method'] = 'exact'\n elif meta['method'] == 'cs':\n meta_with_defaults = ComplexStep.DEFAULT_OPTIONS.copy()\n else:\n meta_with_defaults = FiniteDifference.DEFAULT_OPTIONS.copy()\n for name in meta:\n meta_with_defaults[name] = meta[name]\n\n # For each of the partials, check to see if the\n # check partials options are different than the options used to compute\n # the partials\n pattern_matches = comp._find_partial_matches(*key)\n wrt_vars = pattern_matches[1]\n for wrt_var in wrt_vars:\n _, vars = wrt_var\n for var in vars:\n # we now have individual vars like 'x'\n # get the options for checking partials\n fd_options, _ = _get_fd_options(var, requested_method, local_opts, step,\n form, step_calc, alloc_complex)\n # compare the compute options to the check options\n if fd_options['method'] != meta_with_defaults['method']:\n all_same = False\n else:\n all_same = True\n if fd_options['method'] == 'fd':\n option_names = ['form', 'step', 'step_calc', 'directional']\n else:\n option_names = ['step', 'directional']\n for name in option_names:\n if fd_options[name] != meta_with_defaults[name]:\n all_same = False\n break\n if all_same:\n msg = f\"Checking partials with respect \" \\\n f\"to variable '{var}' in component \" \\\n f\"'{comp.pathname}' using the same \" \\\n \"method and options as are used to compute the \" \\\n \"component's derivatives \" \\\n \"will not provide any relevant information on the \" \\\n \"accuracy.\\n\" \\\n \"To correct this, change the options to do the \\n\" \\\n \"check_partials using either:\\n\" \\\n \" - arguments to Problem.check_partials. \\n\" \\\n \" - arguments to Component.set_check_partial_options\"\n\n issue_warning(msg, prefix=self.msginfo,\n category=OMInvalidCheckDerivativesOptionsWarning)\n\n self.set_solver_print(level=0)\n\n # This is a defaultdict of (defaultdict of dicts).\n partials_data = defaultdict(lambda: defaultdict(dict))\n\n # Caching current point to restore after setups.\n input_cache = model._inputs.asarray(copy=True)\n output_cache = model._outputs.asarray(copy=True)\n\n # Keep track of derivative keys that are declared dependent so that we don't print them\n # unless they are in error.\n indep_key = {}\n\n # Directional derivative directions for matrix free comps.\n mfree_directions = {}\n\n # Analytic Jacobians\n print_reverse = False\n for mode in ('fwd', 'rev'):\n model._inputs.set_val(input_cache)\n model._outputs.set_val(output_cache)\n # Make sure we're in a valid state\n model.run_apply_nonlinear()\n\n jac_key = 'J_' + mode\n\n for comp in comps:\n\n # Only really need to linearize once.\n if mode == 'fwd':\n comp.run_linearize(sub_do_ln=False)\n\n explicit = isinstance(comp, ExplicitComponent)\n matrix_free = comp.matrix_free\n c_name = comp.pathname\n if mode == 'fwd':\n indep_key[c_name] = set()\n\n with comp._unscaled_context():\n\n of_list, wrt_list = comp._get_partials_varlists()\n\n # Matrix-free components need to calculate their Jacobian by matrix-vector\n # product.\n if matrix_free:\n print_reverse = True\n local_opts = comp._get_check_partial_options()\n\n dstate = comp._vectors['output']['linear']\n if mode == 'fwd':\n dinputs = comp._vectors['input']['linear']\n doutputs = comp._vectors['residual']['linear']\n in_list = wrt_list\n out_list = of_list\n else:\n dinputs = comp._vectors['residual']['linear']\n doutputs = comp._vectors['input']['linear']\n in_list = of_list\n out_list = wrt_list\n\n for inp in in_list:\n inp_abs = rel_name2abs_name(comp, inp)\n if mode == 'fwd':\n directional = inp in local_opts and local_opts[inp]['directional']\n else:\n directional = c_name in mfree_directions\n\n try:\n flat_view = dinputs._abs_get_val(inp_abs)\n except KeyError:\n # Implicit state\n flat_view = dstate._abs_get_val(inp_abs)\n\n if directional:\n n_in = 1\n if c_name not in mfree_directions:\n mfree_directions[c_name] = {}\n\n if inp in mfree_directions[c_name]:\n perturb = mfree_directions[c_name][inp]\n else:\n perturb = 2.0 * np.random.random(len(flat_view)) - 1.0\n mfree_directions[c_name][inp] = perturb\n\n else:\n n_in = len(flat_view)\n perturb = 1.0\n\n for idx in range(n_in):\n\n dinputs.set_val(0.0)\n dstate.set_val(0.0)\n\n # Dictionary access returns a scalar for 1d input, and we\n # need a vector for clean code, so use _views_flat.\n if directional:\n flat_view[:] = perturb\n else:\n flat_view[idx] = perturb\n\n # Matrix Vector Product\n comp._apply_linear(None, _contains_all, mode)\n\n for out in out_list:\n out_abs = rel_name2abs_name(comp, out)\n\n try:\n derivs = doutputs._abs_get_val(out_abs)\n except KeyError:\n # Implicit state\n derivs = dstate._abs_get_val(out_abs)\n\n if mode == 'fwd':\n key = out, inp\n deriv = partials_data[c_name][key]\n\n # Allocate first time\n if jac_key not in deriv:\n shape = (len(derivs), n_in)\n deriv[jac_key] = np.zeros(shape)\n\n deriv[jac_key][:, idx] = derivs\n\n else:\n key = inp, out\n deriv = partials_data[c_name][key]\n\n if directional:\n # Dot product test for adjoint validity.\n m = mfree_directions[c_name][out]\n d = mfree_directions[c_name][inp]\n mhat = derivs\n dhat = partials_data[c_name][inp, out]['J_fwd'][:, idx]\n\n deriv['directional_fwd_rev'] = mhat.dot(m) - dhat.dot(d)\n\n # Allocate first time\n if jac_key not in deriv:\n shape = (n_in, len(derivs))\n deriv[jac_key] = np.zeros(shape)\n\n deriv[jac_key][idx, :] = derivs\n\n # These components already have a Jacobian with calculated derivatives.\n else:\n\n if mode == 'rev':\n # Skip reverse mode because it is not different than forward.\n continue\n\n subjacs = comp._jacobian._subjacs_info\n\n for rel_key in product(of_list, wrt_list):\n abs_key = rel_key2abs_key(comp, rel_key)\n of, wrt = abs_key\n\n # No need to calculate partials; they are already stored\n try:\n deriv_value = subjacs[abs_key]['val']\n rows = subjacs[abs_key]['rows']\n except KeyError:\n deriv_value = rows = None\n\n # Testing for pairs that are not dependent so that we suppress printing\n # them unless the fd is non zero. Note: subjacs_info is empty for\n # undeclared partials, which is the default behavior now.\n try:\n if not subjacs[abs_key]['dependent']:\n indep_key[c_name].add(rel_key)\n except KeyError:\n indep_key[c_name].add(rel_key)\n\n if wrt in comp._var_abs2meta['input']:\n wrt_meta = comp._var_abs2meta['input'][wrt]\n else:\n wrt_meta = comp._var_abs2meta['output'][wrt]\n\n if deriv_value is None:\n # Missing derivatives are assumed 0.\n in_size = wrt_meta['size']\n out_size = comp._var_abs2meta['output'][of]['size']\n deriv_value = np.zeros((out_size, in_size))\n\n if force_dense:\n if rows is not None:\n try:\n in_size = wrt_meta['size']\n except KeyError:\n in_size = wrt_meta['size']\n out_size = comp._var_abs2meta['output'][of]['size']\n tmp_value = np.zeros((out_size, in_size))\n # if a scalar value is provided (in declare_partials),\n # expand to the correct size array value for zipping\n if deriv_value.size == 1:\n deriv_value *= np.ones(rows.size)\n for i, j, val in zip(rows, subjacs[abs_key]['cols'],\n deriv_value):\n tmp_value[i, j] += val\n deriv_value = tmp_value\n\n elif sparse.issparse(deriv_value):\n deriv_value = deriv_value.todense()\n\n partials_data[c_name][rel_key][jac_key] = deriv_value.copy()\n\n model._inputs.set_val(input_cache)\n model._outputs.set_val(output_cache)\n model.run_apply_nonlinear()\n\n # Finite Difference to calculate Jacobian\n alloc_complex = model._outputs._alloc_complex\n all_fd_options = {}\n comps_could_not_cs = set()\n requested_method = method\n for comp in comps:\n\n c_name = comp.pathname\n all_fd_options[c_name] = {}\n\n approximations = {'fd': FiniteDifference(),\n 'cs': ComplexStep()}\n added_wrts = set()\n\n of, wrt = comp._get_partials_varlists()\n\n # Load up approximation objects with the requested settings.\n\n local_opts = comp._get_check_partial_options()\n for rel_key in product(of, wrt):\n abs_key = rel_key2abs_key(comp, rel_key)\n local_wrt = rel_key[1]\n\n fd_options, could_not_cs = _get_fd_options(local_wrt, requested_method, local_opts,\n step, form, step_calc, alloc_complex)\n\n if could_not_cs:\n comps_could_not_cs.add(c_name)\n\n # Determine if fd or cs.\n method = requested_method\n\n all_fd_options[c_name][local_wrt] = fd_options\n if c_name in mfree_directions:\n vector = mfree_directions[c_name].get(local_wrt)\n else:\n vector = None\n\n # prevent adding multiple approxs with same wrt (and confusing users with warnings)\n if abs_key[1] not in added_wrts:\n approximations[fd_options['method']].add_approximation(abs_key, self.model,\n fd_options,\n vector=vector)\n added_wrts.add(abs_key[1])\n\n approx_jac = _CheckingJacobian(comp)\n for approximation in approximations.values():\n # Perform the FD here.\n approximation.compute_approximations(comp, jac=approx_jac)\n\n for abs_key, partial in approx_jac.items():\n rel_key = abs_key2rel_key(comp, abs_key)\n partials_data[c_name][rel_key]['J_fd'] = partial\n\n # If this is a directional derivative, convert the analytic to a directional one.\n wrt = rel_key[1]\n if wrt in local_opts and local_opts[wrt]['directional']:\n deriv = partials_data[c_name][rel_key]\n deriv['J_fwd'] = np.atleast_2d(np.sum(deriv['J_fwd'], axis=1)).T\n if comp.matrix_free:\n deriv['J_rev'] = np.atleast_2d(np.sum(deriv['J_rev'], axis=0)).T\n\n # Dot product test for adjoint validity.\n m = mfree_directions[c_name][rel_key[0]].flatten()\n d = mfree_directions[c_name][wrt].flatten()\n mhat = partial.flatten()\n dhat = deriv['J_rev'].flatten()\n\n deriv['directional_fd_rev'] = dhat.dot(d) - mhat.dot(m)\n\n # Conversion of defaultdict to dicts\n partials_data = {comp_name: dict(outer) for comp_name, outer in partials_data.items()}\n\n if out_stream == _DEFAULT_OUT_STREAM:\n out_stream = sys.stdout\n\n if len(comps_could_not_cs) > 0:\n msg = \"The following components requested complex step, but force_alloc_complex \" + \\\n \"has not been set to True, so finite difference was used: \"\n msg += str(list(comps_could_not_cs))\n msg += \"\\nTo enable complex step, specify 'force_alloc_complex=True' when calling \" + \\\n \"setup on the problem, e.g. 'problem.setup(force_alloc_complex=True)'\"\n issue_warning(msg, category=DerivativesWarning)\n\n _assemble_derivative_data(partials_data, rel_err_tol, abs_err_tol, out_stream,\n compact_print, comps, all_fd_options, indep_key=indep_key,\n print_reverse=print_reverse,\n show_only_incorrect=show_only_incorrect)\n\n return partials_data\n\n def check_totals(self, of=None, wrt=None, out_stream=_DEFAULT_OUT_STREAM, compact_print=False,\n driver_scaling=False, abs_err_tol=1e-6, rel_err_tol=1e-6,\n method='fd', step=None, form=None, step_calc='abs', show_progress=False):\n \"\"\"\n Check total derivatives for the model vs. finite difference.\n\n Parameters\n ----------\n of : list of variable name strings or None\n Variables whose derivatives will be computed. Default is None, which\n uses the driver's objectives and constraints.\n wrt : list of variable name strings or None\n Variables with respect to which the derivatives will be computed.\n Default is None, which uses the driver's desvars.\n out_stream : file-like object\n Where to send human readable output. By default it goes to stdout.\n Set to None to suppress.\n compact_print : bool\n Set to True to just print the essentials, one line per input-output pair.\n driver_scaling : bool\n When True, return derivatives that are scaled according to either the adder and scaler\n or the ref and ref0 values that were specified when add_design_var, add_objective, and\n add_constraint were called on the model. Default is False, which is unscaled.\n abs_err_tol : float\n Threshold value for absolute error. Errors about this value will have a '*' displayed\n next to them in output, making them easy to search for. Default is 1.0E-6.\n rel_err_tol : float\n Threshold value for relative error. Errors about this value will have a '*' displayed\n next to them in output, making them easy to search for. Note at times there may be a\n significant relative error due to a minor absolute error. Default is 1.0E-6.\n method : str\n Method, 'fd' for finite difference or 'cs' for complex step. Default is 'fd'\n step : float\n Step size for approximation. Default is None, which means 1e-6 for 'fd' and 1e-40 for\n 'cs'.\n form : string\n Form for finite difference, can be 'forward', 'backward', or 'central'. Default\n None, which defaults to 'forward' for FD.\n step_calc : string\n Step type for finite difference, can be 'abs' for absolute', or 'rel' for relative.\n Default is 'abs'.\n show_progress : bool\n True to show progress of check_totals\n\n Returns\n -------\n Dict of Dicts of Tuples of Floats\n\n First key:\n is the (output, input) tuple of strings;\n Second key:\n is one of ['rel error', 'abs error', 'magnitude', 'fdstep'];\n\n For 'rel error', 'abs error', 'magnitude' the value is: A tuple containing norms for\n forward - fd, adjoint - fd, forward - adjoint.\n \"\"\"\n if out_stream == _DEFAULT_OUT_STREAM:\n out_stream = sys.stdout\n\n # Check to see if approximation options are the same as that used to compute totals\n # If yes, issue an warning\n if self.model._owns_approx_jac and method in self.model._approx_schemes:\n scheme = self.model._get_approx_scheme(method)\n\n # get approx options. Fill in with defaults, as needed\n approx_options = scheme.DEFAULT_OPTIONS.copy()\n approx_options.update(self.model._owns_approx_jac_meta)\n\n # get check options. Fill in with defaults, as needed\n check_options = scheme.DEFAULT_OPTIONS.copy()\n if step:\n check_options['step'] = step\n if method == 'fd':\n if form:\n check_options['form'] = form\n if step_calc:\n check_options['step_calc'] = step_calc\n\n # Compare the approx and check options\n all_same = True\n if approx_options['step'] != check_options['step']:\n all_same = False\n elif method == 'fd':\n if approx_options['form'] != check_options['form']:\n all_same = False\n if approx_options['step_calc'] != check_options['step_calc']:\n all_same = False\n\n if all_same:\n msg = \"Checking totals using the same \" \\\n \"method and options as are used to compute the \" \\\n \"totals will not provide any relevant \" \\\n \"information on the \" \\\n \"accuracy.\\n\" \\\n \"To correct this, change the options to do the \" \\\n \"check_totals or on the call to approx_totals \" \\\n \"for the model.\"\n\n issue_warning(msg, prefix=self.msginfo,\n category=OMInvalidCheckDerivativesOptionsWarning)\n\n if self._metadata['setup_status'] < _SetupStatus.POST_FINAL_SETUP:\n raise RuntimeError(self.msginfo + \": run_model must be called before total \"\n \"derivatives can be checked.\")\n\n model = self.model\n lcons = []\n\n if method == 'cs' and not model._outputs._alloc_complex:\n msg = \"\\n\" + self.msginfo + \": To enable complex step, specify \"\\\n \"'force_alloc_complex=True' when calling \" + \\\n \"setup on the problem, e.g. 'problem.setup(force_alloc_complex=True)'\"\n raise RuntimeError(msg)\n\n # TODO: Once we're tracking iteration counts, run the model if it has not been run before.\n\n if wrt is None:\n wrt = list(self.driver._designvars)\n if not wrt:\n raise RuntimeError(\"Driver is not providing any design variables \"\n \"for compute_totals.\")\n\n if of is None:\n of = list(self.driver._objs)\n of.extend(self.driver._cons)\n if not of:\n raise RuntimeError(\"Driver is not providing any response variables \"\n \"for compute_totals.\")\n lcons = [n for n, meta in self.driver._cons.items()\n if ('linear' in meta and meta['linear'])]\n\n # Calculate Total Derivatives\n if model._owns_approx_jac:\n # Support this, even though it is a bit silly (though you could compare fd with cs.)\n total_info = _TotalJacInfo(self, of, wrt, False, return_format='flat_dict',\n approx=True, driver_scaling=driver_scaling)\n Jcalc = total_info.compute_totals_approx(initialize=True)\n\n else:\n total_info = _TotalJacInfo(self, of, wrt, False, return_format='flat_dict',\n driver_scaling=driver_scaling)\n Jcalc = total_info.compute_totals()\n\n if step is None:\n if method == 'cs':\n step = ComplexStep.DEFAULT_OPTIONS['step']\n else:\n step = FiniteDifference.DEFAULT_OPTIONS['step']\n\n # Approximate FD\n fd_args = {\n 'step': step,\n 'form': form,\n 'step_calc': step_calc,\n }\n approx = model._owns_approx_jac\n approx_of = model._owns_approx_of\n approx_wrt = model._owns_approx_wrt\n old_jac = model._jacobian\n old_subjacs = model._subjacs_info.copy()\n\n model.approx_totals(method=method, step=step, form=form,\n step_calc=step_calc if method == 'fd' else None)\n total_info = _TotalJacInfo(self, of, wrt, False, return_format='flat_dict', approx=True,\n driver_scaling=driver_scaling)\n if show_progress:\n Jfd = total_info.compute_totals_approx(initialize=True, progress_out_stream=out_stream)\n else:\n Jfd = total_info.compute_totals_approx(initialize=True)\n # reset the _owns_approx_jac flag after approximation is complete.\n if not approx:\n model._jacobian = old_jac\n model._owns_approx_jac = False\n model._owns_approx_of = approx_of\n model._owns_approx_wrt = approx_wrt\n model._subjacs_info = old_subjacs\n\n # Assemble and Return all metrics.\n data = {}\n data[''] = {}\n resp = self.driver._responses\n # TODO key should not be fwd when exact computed in rev mode or auto\n for key, val in Jcalc.items():\n data[''][key] = {}\n data[''][key]['J_fwd'] = val\n data[''][key]['J_fd'] = Jfd[key]\n\n # Display whether indices were declared when response was added.\n of = key[0]\n if of in resp and resp[of]['indices'] is not None:\n data[''][key]['indices'] = len(resp[of]['indices'])\n\n fd_args['method'] = method\n\n if out_stream == _DEFAULT_OUT_STREAM:\n out_stream = sys.stdout\n\n _assemble_derivative_data(data, rel_err_tol, abs_err_tol, out_stream, compact_print,\n [model], {'': fd_args}, totals=True, lcons=lcons)\n return data['']\n\n def compute_totals(self, of=None, wrt=None, return_format='flat_dict', debug_print=False,\n driver_scaling=False, use_abs_names=False, get_remote=True):\n \"\"\"\n Compute derivatives of desired quantities with respect to desired inputs.\n\n Parameters\n ----------\n of : list of variable name strings or None\n Variables whose derivatives will be computed. Default is None, which\n uses the driver's objectives and constraints.\n wrt : list of variable name strings or None\n Variables with respect to which the derivatives will be computed.\n Default is None, which uses the driver's desvars.\n return_format : string\n Format to return the derivatives. Can be 'dict', 'flat_dict', or 'array'.\n Default is a 'flat_dict', which returns them in a dictionary whose keys are\n tuples of form (of, wrt).\n debug_print : bool\n Set to True to print out some debug information during linear solve.\n driver_scaling : bool\n When True, return derivatives that are scaled according to either the adder and scaler\n or the ref and ref0 values that were specified when add_design_var, add_objective, and\n add_constraint were called on the model. Default is False, which is unscaled.\n use_abs_names : bool\n Set to True when passing in absolute names to skip some translation steps.\n get_remote : bool\n If True, the default, the full distributed total jacobian will be retrieved.\n\n Returns\n -------\n derivs : object\n Derivatives in form requested by 'return_format'.\n \"\"\"\n if self._metadata['setup_status'] < _SetupStatus.POST_FINAL_SETUP:\n self.final_setup()\n\n if wrt is None:\n wrt = list(self.driver._designvars)\n if not wrt:\n raise RuntimeError(\"Driver is not providing any design variables \"\n \"for compute_totals.\")\n\n if of is None:\n of = list(self.driver._objs)\n of.extend(self.driver._cons)\n if not of:\n raise RuntimeError(\"Driver is not providing any response variables \"\n \"for compute_totals.\")\n\n if self.model._owns_approx_jac:\n total_info = _TotalJacInfo(self, of, wrt, use_abs_names, return_format,\n approx=True, driver_scaling=driver_scaling)\n return total_info.compute_totals_approx(initialize=True)\n else:\n total_info = _TotalJacInfo(self, of, wrt, use_abs_names, return_format,\n debug_print=debug_print, driver_scaling=driver_scaling,\n get_remote=get_remote)\n return total_info.compute_totals()\n\n def set_solver_print(self, level=2, depth=1e99, type_='all'):\n \"\"\"\n Control printing for solvers and subsolvers in the model.\n\n Parameters\n ----------\n level : int\n iprint level. Set to 2 to print residuals each iteration; set to 1\n to print just the iteration totals; set to 0 to disable all printing\n except for failures, and set to -1 to disable all printing including failures.\n depth : int\n How deep to recurse. For example, you can set this to 0 if you only want\n to print the top level linear and nonlinear solver messages. Default\n prints everything.\n type_ : str\n Type of solver to set: 'LN' for linear, 'NL' for nonlinear, or 'all' for all.\n \"\"\"\n self.model.set_solver_print(level=level, depth=depth, type_=type_)\n\n def list_problem_vars(self,\n show_promoted_name=True,\n print_arrays=False,\n driver_scaling=True,\n desvar_opts=[],\n cons_opts=[],\n objs_opts=[],\n ):\n \"\"\"\n Print all design variables and responses (objectives and constraints).\n\n Parameters\n ----------\n show_promoted_name : bool\n If True, then show the promoted names of the variables.\n print_arrays : bool, optional\n When False, in the columnar display, just display norm of any ndarrays with size > 1.\n The norm is surrounded by vertical bars to indicate that it is a norm.\n When True, also display full values of the ndarray below the row. Format is affected\n by the values set with numpy.set_printoptions\n Default is False.\n driver_scaling : bool, optional\n When True, return values that are scaled according to either the adder and scaler or\n the ref and ref0 values that were specified when add_design_var, add_objective, and\n add_constraint were called on the model. Default is True.\n desvar_opts : list of str\n List of optional columns to be displayed in the desvars table.\n Allowed values are:\n ['lower', 'upper', 'ref', 'ref0', 'indices', 'adder', 'scaler', 'parallel_deriv_color',\n 'cache_linear_solution', 'units']\n cons_opts : list of str\n List of optional columns to be displayed in the cons table.\n Allowed values are:\n ['lower', 'upper', 'equals', 'ref', 'ref0', 'indices', 'index', 'adder', 'scaler',\n 'linear', 'parallel_deriv_color',\n 'cache_linear_solution', 'units']\n objs_opts : list of str\n List of optional columns to be displayed in the objs table.\n Allowed values are:\n ['ref', 'ref0', 'indices', 'adder', 'scaler', 'units',\n 'parallel_deriv_color', 'cache_linear_solution']\n\n \"\"\"\n default_col_names = ['name', 'val', 'size']\n\n # Design vars\n desvars = self.driver._designvars\n vals = self.driver.get_design_var_values(get_remote=True, driver_scaling=driver_scaling)\n header = \"Design Variables\"\n col_names = default_col_names + desvar_opts\n self._write_var_info_table(header, col_names, desvars, vals,\n show_promoted_name=show_promoted_name,\n print_arrays=print_arrays,\n col_spacing=2)\n\n # Constraints\n cons = self.driver._cons\n vals = self.driver.get_constraint_values(driver_scaling=driver_scaling)\n header = \"Constraints\"\n col_names = default_col_names + cons_opts\n self._write_var_info_table(header, col_names, cons, vals,\n show_promoted_name=show_promoted_name,\n print_arrays=print_arrays,\n col_spacing=2)\n\n objs = self.driver._objs\n vals = self.driver.get_objective_values(driver_scaling=driver_scaling)\n header = \"Objectives\"\n col_names = default_col_names + objs_opts\n self._write_var_info_table(header, col_names, objs, vals,\n show_promoted_name=show_promoted_name,\n print_arrays=print_arrays,\n col_spacing=2)\n\n def _write_var_info_table(self, header, col_names, meta, vals, print_arrays=False,\n show_promoted_name=True, col_spacing=1):\n \"\"\"\n Write a table of information for the problem variable in meta and vals.\n\n Parameters\n ----------\n header : str\n The header line for the table.\n col_names : list of str\n List of column labels.\n meta : OrderedDict\n Dictionary of metadata for each problem variable.\n vals : OrderedDict\n Dictionary of values for each problem variable.\n print_arrays : bool, optional\n When False, in the columnar display, just display norm of any ndarrays with size > 1.\n The norm is surrounded by vertical bars to indicate that it is a norm.\n When True, also display full values of the ndarray below the row. Format is affected\n by the values set with numpy.set_printoptions\n Default is False.\n show_promoted_name : bool\n If True, then show the promoted names of the variables.\n col_spacing : int\n Number of spaces between columns in the table.\n \"\"\"\n abs2prom = self.model._var_abs2prom\n\n # Get the values for all the elements in the tables\n rows = []\n for name, meta in meta.items():\n\n row = {}\n for col_name in col_names:\n if col_name == 'name':\n if show_promoted_name:\n row[col_name] = name\n else:\n if name in abs2prom['input']:\n row[col_name] = abs2prom['input'][name]\n elif name in abs2prom['output']:\n row[col_name] = abs2prom['output'][name]\n else:\n # Promoted auto_ivc name. Keep it promoted\n row[col_name] = name\n\n elif col_name == 'val':\n row[col_name] = vals[name]\n else:\n row[col_name] = meta[col_name]\n rows.append(row)\n\n col_space = ' ' * col_spacing\n print(\"-\" * len(header))\n print(header)\n print(\"-\" * len(header))\n\n # loop through the rows finding the max widths\n max_width = {}\n for col_name in col_names:\n max_width[col_name] = len(col_name)\n for row in rows:\n for col_name in col_names:\n cell = row[col_name]\n if isinstance(cell, np.ndarray) and cell.size > 1:\n out = '|{}|'.format(str(np.linalg.norm(cell)))\n else:\n out = str(cell)\n max_width[col_name] = max(len(out), max_width[col_name])\n\n # print col headers\n header_div = ''\n header_col_names = ''\n for col_name in col_names:\n header_div += '-' * max_width[col_name] + col_space\n header_col_names += pad_name(col_name, max_width[col_name], quotes=False) + col_space\n print(header_col_names)\n print(header_div[:-1])\n\n # print rows with var info\n for row in rows:\n have_array_values = [] # keep track of which values are arrays\n row_string = ''\n for col_name in col_names:\n cell = row[col_name]\n if isinstance(cell, np.ndarray) and cell.size > 1:\n out = '|{}|'.format(str(np.linalg.norm(cell)))\n have_array_values.append(col_name)\n else:\n out = str(cell)\n row_string += pad_name(out, max_width[col_name], quotes=False) + col_space\n print(row_string)\n\n if print_arrays:\n left_column_width = max_width['name']\n for col_name in have_array_values:\n print(\"{}{}:\".format((left_column_width + col_spacing) * ' ', col_name))\n cell = row[col_name]\n out_str = pprint.pformat(cell)\n indented_lines = [(left_column_width + col_spacing) * ' ' +\n s for s in out_str.splitlines()]\n print('\\n'.join(indented_lines) + '\\n')\n\n print()\n\n def load_case(self, case):\n \"\"\"\n Pull all input and output variables from a case into the model.\n\n Parameters\n ----------\n case : Case object\n A Case from a CaseRecorder file.\n \"\"\"\n inputs = case.inputs if case.inputs is not None else None\n if inputs:\n for name in inputs.absolute_names():\n if name not in self.model._var_abs2prom['input']:\n raise KeyError(\"{}: Input variable, '{}', recorded in the case is not \"\n \"found in the model\".format(self.msginfo, name))\n self[name] = inputs[name]\n\n outputs = case.outputs if case.outputs is not None else None\n if outputs:\n for name in outputs.absolute_names():\n if name not in self.model._var_abs2prom['output']:\n raise KeyError(\"{}: Output variable, '{}', recorded in the case is not \"\n \"found in the model\".format(self.msginfo, name))\n self[name] = outputs[name]\n\n def check_config(self, logger=None, checks=_default_checks, out_file='openmdao_checks.out'):\n \"\"\"\n Perform optional error checks on a Problem.\n\n Parameters\n ----------\n logger : object\n Logging object.\n checks : list of str or None or the string 'all'\n Determines what config checks are run.\n If None, no checks are run\n If list of str, run those config checks\n If ‘all’, all the checks ('auto_ivc_warnings', 'comp_has_no_outputs', 'cycles',\n 'dup_inputs', 'missing_recorders', 'out_of_order', 'promotions', 'solvers',\n 'system', 'unconnected_inputs') are run\n out_file : str or None\n If not None, output will be written to this file in addition to stdout.\n \"\"\"\n if checks is None:\n return\n\n if logger is None:\n logger = get_logger('check_config', out_file=out_file, use_format=True)\n\n if checks == 'all':\n checks = sorted(_all_checks)\n\n for c in checks:\n if c not in _all_checks:\n print(\"WARNING: '%s' is not a recognized check. Available checks are: %s\" %\n (c, sorted(_all_checks)))\n continue\n logger.info('checking %s' % c)\n _all_checks[c](self, logger)\n\n def set_complex_step_mode(self, active):\n \"\"\"\n Turn on or off complex stepping mode.\n\n Parameters\n ----------\n active : bool\n Complex mode flag; set to True prior to commencing complex step.\n \"\"\"\n if self._metadata is None or \\\n self._metadata['setup_status'] < _SetupStatus.POST_FINAL_SETUP:\n raise RuntimeError(f\"{self.msginfo}: set_complex_step_mode cannot be called before \"\n \"`Problem.run_model()`, `Problem.run_driver()`, or \"\n \"`Problem.final_setup()`.\")\n\n if active and not self._metadata['force_alloc_complex']:\n raise RuntimeError(f\"{self.msginfo}: To enable complex step, specify \"\n \"'force_alloc_complex=True' when calling setup on the problem, \"\n \"e.g. 'problem.setup(force_alloc_complex=True)'\")\n\n self.model._set_complex_step_mode(active)\n\n\ndef _assemble_derivative_data(derivative_data, rel_error_tol, abs_error_tol, out_stream,\n compact_print, system_list, global_options, totals=False,\n indep_key=None, print_reverse=False,\n show_only_incorrect=False, lcons=None):\n \"\"\"\n Compute the relative and absolute errors in the given derivatives and print to the out_stream.\n\n Parameters\n ----------\n derivative_data : dict\n Dictionary containing derivative information keyed by system name.\n rel_error_tol : float\n Relative error tolerance.\n abs_error_tol : float\n Absolute error tolerance.\n out_stream : file-like object\n Where to send human readable output.\n Set to None to suppress.\n compact_print : bool\n If results should be printed verbosely or in a table.\n system_list : iterable\n The systems (in the proper order) that were checked.0\n global_options : dict\n Dictionary containing the options for the approximation.\n totals : bool\n Set to True if we are doing check_totals to skip a bunch of stuff.\n indep_key : dict of sets, optional\n Keyed by component name, contains the of/wrt keys that are declared not dependent.\n print_reverse : bool, optional\n Set to True if compact_print results need to include columns for reverse mode.\n show_only_incorrect : bool, optional\n Set to True if output should print only the subjacs found to be incorrect.\n lcons : list or None\n For total derivatives only, list of outputs that are actually linear constraints.\n \"\"\"\n nan = float('nan')\n suppress_output = out_stream is None\n\n if compact_print:\n if print_reverse:\n deriv_line = \"{0} wrt {1} | {2:.4e} | {3} | {4:.4e} | {5:.4e} | {6} | {7}\" \\\n \" | {8:.4e} | {9} | {10}\"\n else:\n deriv_line = \"{0} wrt {1} | {2:.4e} | {3:.4e} | {4:.4e} | {5:.4e}\"\n\n # Keep track of the worst subjac in terms of relative error for fwd and rev\n if not suppress_output and compact_print and not totals:\n worst_subjac_rel_err = 0.0\n worst_subjac = None\n\n if not suppress_output and not totals and show_only_incorrect:\n out_stream.write('\\n** Only writing information about components with '\n 'incorrect Jacobians **\\n\\n')\n\n for system in system_list:\n\n sys_name = system.pathname\n sys_class_name = type(system).__name__\n matrix_free = system.matrix_free\n\n # Match header to appropriate type.\n if isinstance(system, Component):\n sys_type = 'Component'\n elif isinstance(system, Group):\n sys_type = 'Group'\n else:\n sys_type = type(system).__name__\n\n if sys_name not in derivative_data:\n msg = \"No derivative data found for %s '%s'.\" % (sys_type, sys_name)\n issue_warning(msg, category=DerivativesWarning)\n continue\n\n derivatives = derivative_data[sys_name]\n\n if totals:\n sys_name = 'Full Model'\n\n # Sorted keys ensures deterministic ordering\n sorted_keys = sorted(derivatives.keys())\n num_bad_jacs = 0 # Keep track of number of bad derivative values for each component\n\n if not suppress_output:\n # Need to capture the output of a component's derivative\n # info so that it can be used if that component is the\n # worst subjac. That info is printed at the bottom of all the output\n out_buffer = StringIO()\n if out_stream:\n header_str = '-' * (len(sys_name) + len(sys_type) + len(sys_class_name) + 5) + '\\n'\n out_buffer.write(header_str)\n out_buffer.write(\"{}: {} '{}'\".format(sys_type, sys_class_name, sys_name) + '\\n')\n out_buffer.write(header_str)\n\n if compact_print:\n # Error Header\n if totals:\n header = \"{0} wrt {1} | {2} | {3} | {4} | {5}\"\\\n .format(\n pad_name('<output>', 30, quotes=True),\n pad_name('<variable>', 30, quotes=True),\n pad_name('calc mag.'),\n pad_name('check mag.'),\n pad_name('a(cal-chk)'),\n pad_name('r(cal-chk)'),\n )\n else:\n max_width_of = len(\"'<output>'\")\n max_width_wrt = len(\"'<variable>'\")\n for of, wrt in sorted_keys:\n max_width_of = max(max_width_of, len(of) + 2) # 2 to include quotes\n max_width_wrt = max(max_width_wrt, len(wrt) + 2)\n\n if print_reverse:\n header = \\\n \"{0} wrt {1} | {2} | {3} | {4} | {5} | {6} | {7} | {8} | {9} | {10}\" \\\n .format(\n pad_name('<output>', max_width_of, quotes=True),\n pad_name('<variable>', max_width_wrt, quotes=True),\n pad_name('fwd mag.'),\n pad_name('rev mag.'),\n pad_name('check mag.'),\n pad_name('a(fwd-chk)'),\n pad_name('a(rev-chk)'),\n pad_name('a(fwd-rev)'),\n pad_name('r(fwd-chk)'),\n pad_name('r(rev-chk)'),\n pad_name('r(fwd-rev)')\n )\n else:\n header = \"{0} wrt {1} | {2} | {3} | {4} | {5}\"\\\n .format(\n pad_name('<output>', max_width_of, quotes=True),\n pad_name('<variable>', max_width_wrt, quotes=True),\n pad_name('calc mag.'),\n pad_name('check mag.'),\n pad_name('a(cal-chk)'),\n pad_name('r(cal-chk)'),\n )\n\n if out_stream:\n out_buffer.write(header + '\\n')\n out_buffer.write('-' * len(header) + '\\n' + '\\n')\n\n def safe_norm(arr):\n return 0. if arr is None or arr.size == 0 else np.linalg.norm(arr)\n\n for of, wrt in sorted_keys:\n\n if totals:\n fd_opts = global_options['']\n else:\n fd_opts = global_options[sys_name][wrt]\n\n directional = fd_opts.get('directional')\n do_rev = not totals and matrix_free and not directional\n do_rev_dp = not totals and matrix_free and directional\n\n derivative_info = derivatives[of, wrt]\n # TODO total derivs may have been computed in rev mode, not fwd\n forward = derivative_info['J_fwd']\n try:\n fd = derivative_info['J_fd']\n except KeyError:\n # this can happen when a partial is not declared, which means it should be zero\n fd = np.zeros(forward.shape)\n\n if do_rev:\n reverse = derivative_info.get('J_rev')\n\n fwd_error = safe_norm(forward - fd)\n if do_rev_dp:\n fwd_rev_error = derivative_info['directional_fwd_rev']\n rev_error = derivative_info['directional_fd_rev']\n elif do_rev:\n rev_error = safe_norm(reverse - fd)\n fwd_rev_error = safe_norm(forward - reverse)\n else:\n rev_error = fwd_rev_error = None\n\n fwd_norm = safe_norm(forward)\n fd_norm = safe_norm(fd)\n if do_rev:\n rev_norm = safe_norm(reverse)\n else:\n rev_norm = None\n\n derivative_info['abs error'] = abs_err = ErrorTuple(fwd_error, rev_error, fwd_rev_error)\n derivative_info['magnitude'] = magnitude = MagnitudeTuple(fwd_norm, rev_norm, fd_norm)\n\n if fd_norm == 0.:\n if fwd_norm == 0.:\n derivative_info['rel error'] = rel_err = ErrorTuple(nan, nan, nan)\n\n else:\n # If fd_norm is zero, let's use fwd_norm as the divisor for relative\n # check. That way we don't accidentally squelch a legitimate problem.\n if do_rev or do_rev_dp:\n rel_err = ErrorTuple(fwd_error / fwd_norm,\n rev_error / fwd_norm,\n fwd_rev_error / fwd_norm)\n derivative_info['rel error'] = rel_err\n else:\n derivative_info['rel error'] = rel_err = ErrorTuple(fwd_error / fwd_norm,\n None,\n None)\n\n else:\n if do_rev or do_rev_dp:\n derivative_info['rel error'] = rel_err = ErrorTuple(fwd_error / fd_norm,\n rev_error / fd_norm,\n fwd_rev_error / fd_norm)\n else:\n derivative_info['rel error'] = rel_err = ErrorTuple(fwd_error / fd_norm,\n None,\n None)\n\n # Skip printing the dependent keys if the derivatives are fine.\n if not compact_print and indep_key is not None:\n rel_key = (of, wrt)\n if rel_key in indep_key[sys_name] and fd_norm < abs_error_tol:\n del derivative_data[sys_name][rel_key]\n continue\n\n # Informative output for responses that were declared with an index.\n indices = derivative_info.get('indices')\n if indices is not None:\n of = '{} (index size: {})'.format(of, indices)\n\n if not suppress_output:\n\n if compact_print:\n if totals:\n if out_stream:\n out_buffer.write(deriv_line.format(\n pad_name(of, 30, quotes=True),\n pad_name(wrt, 30, quotes=True),\n magnitude.forward,\n magnitude.fd,\n abs_err.forward,\n rel_err.forward,\n ) + '\\n')\n else:\n error_string = ''\n for error in abs_err:\n if error is None:\n continue\n if not np.isnan(error) and error >= abs_error_tol:\n error_string += ' >ABS_TOL'\n break\n\n # See if this component has the greater\n # error in the derivative computation\n # compared to the other components so far\n is_worst_subjac = False\n for i, error in enumerate(rel_err):\n if error is None:\n continue\n if not np.isnan(error):\n # only 1st and 2d errs\n if i < 2 and error > worst_subjac_rel_err:\n worst_subjac_rel_err = error\n worst_subjac = (sys_type, sys_class_name, sys_name)\n is_worst_subjac = True\n if not np.isnan(error) and error >= rel_error_tol:\n error_string += ' >REL_TOL'\n break\n\n if error_string: # Any error string indicates that at least one of the\n # derivative calcs is greater than the rel tolerance\n num_bad_jacs += 1\n\n if out_stream:\n if directional:\n wrt = \"(d)'%s'\" % wrt\n wrt_padded = pad_name(wrt, max_width_wrt, quotes=False)\n else:\n wrt_padded = pad_name(wrt, max_width_wrt, quotes=True)\n if print_reverse:\n deriv_info_line = \\\n deriv_line.format(\n pad_name(of, max_width_of, quotes=True),\n wrt_padded,\n magnitude.forward,\n _format_if_not_matrix_free(matrix_free and not directional,\n magnitude.reverse),\n magnitude.fd,\n abs_err.forward,\n _format_if_not_matrix_free(matrix_free,\n abs_err.reverse),\n _format_if_not_matrix_free(matrix_free,\n abs_err.forward_reverse),\n rel_err.forward,\n _format_if_not_matrix_free(matrix_free,\n rel_err.reverse),\n _format_if_not_matrix_free(matrix_free,\n rel_err.forward_reverse),\n )\n else:\n deriv_info_line = \\\n deriv_line.format(\n pad_name(of, max_width_of, quotes=True),\n wrt_padded,\n magnitude.forward,\n magnitude.fd,\n abs_err.forward,\n rel_err.forward,\n )\n if not show_only_incorrect or error_string:\n out_buffer.write(deriv_info_line + error_string + '\\n')\n\n if is_worst_subjac:\n worst_subjac_line = deriv_info_line\n else: # not compact print\n\n fd_desc = \"{}:{}\".format(fd_opts['method'], fd_opts['form'])\n\n # Magnitudes\n if out_stream:\n if directional:\n out_buffer.write(f\" {sys_name}: '{of}' wrt (d)'{wrt}'\")\n else:\n out_buffer.write(f\" {sys_name}: '{of}' wrt '{wrt}'\")\n if lcons and of in lcons:\n out_buffer.write(\" (Linear constraint)\")\n\n out_buffer.write('\\n')\n if do_rev or do_rev_dp:\n out_buffer.write(' Forward')\n else:\n out_buffer.write(' Analytic')\n out_buffer.write(' Magnitude: {:.6e}\\n'.format(magnitude.forward))\n if do_rev:\n txt = ' Reverse Magnitude: {:.6e}'\n if out_stream:\n out_buffer.write(txt.format(magnitude.reverse) + '\\n')\n if out_stream:\n out_buffer.write(' Fd Magnitude: {:.6e} ({})\\n'.format(\n magnitude.fd, fd_desc))\n\n # Absolute Errors\n if do_rev:\n error_descs = ('(Jfor - Jfd) ', '(Jrev - Jfd) ', '(Jfor - Jrev)')\n elif do_rev_dp:\n error_descs = ('(Jfor - Jfd) ', '(Jrev - Jfd Dot Product Test) ',\n '(Jrev - Jfor Dot Product Test) ')\n else:\n error_descs = ('(Jan - Jfd) ', )\n\n for error, desc in zip(abs_err, error_descs):\n error_str = _format_error(error, abs_error_tol)\n if error_str.endswith('*'):\n num_bad_jacs += 1\n if out_stream:\n out_buffer.write(' Absolute Error {}: {}\\n'.format(desc, error_str))\n if out_stream:\n out_buffer.write('\\n')\n\n # Relative Errors\n if do_rev:\n if fd_norm == 0.:\n error_descs = ('(Jfor - Jfd) / Jfor ', '(Jrev - Jfd) / Jfor ',\n '(Jfor - Jrev) / Jfor')\n else:\n error_descs = ('(Jfor - Jfd) / Jfd ', '(Jrev - Jfd) / Jfd ',\n '(Jfor - Jrev) / Jfd')\n elif do_rev_dp:\n if fd_norm == 0.:\n error_descs = ('(Jfor - Jfd) / Jfor ',\n '(Jrev - Jfd Dot Product Test) / Jfor ',\n '(Jrev - Jfor Dot Product Test) / Jfor ')\n else:\n error_descs = ('(Jfor - Jfd) / Jfd ',\n '(Jrev - Jfd Dot Product Test) / Jfd ',\n '(Jrev - Jfor Dot Product Test) / Jfd ')\n else:\n if fd_norm == 0.:\n error_descs = ('(Jan - Jfd) / Jan ', )\n else:\n error_descs = ('(Jan - Jfd) / Jfd ', )\n\n for error, desc in zip(rel_err, error_descs):\n error_str = _format_error(error, rel_error_tol)\n if error_str.endswith('*'):\n num_bad_jacs += 1\n if out_stream:\n out_buffer.write(' Relative Error {}: {}\\n'.format(desc, error_str))\n\n if out_stream:\n if MPI and MPI.COMM_WORLD.size > 1:\n out_buffer.write(' MPI Rank {}\\n'.format(MPI.COMM_WORLD.rank))\n out_buffer.write('\\n')\n\n # Raw Derivatives\n if out_stream:\n if do_rev_dp:\n out_buffer.write(' Directional Forward Derivative (Jfor)\\n')\n else:\n if not totals and matrix_free:\n out_buffer.write(' Raw Forward')\n else:\n out_buffer.write(' Raw Analytic')\n out_buffer.write(' Derivative (Jfor)\\n')\n out_buffer.write(str(forward) + '\\n')\n out_buffer.write('\\n')\n\n if not totals and matrix_free:\n if out_stream:\n if not directional:\n out_buffer.write(' Raw Reverse Derivative (Jrev)\\n')\n out_buffer.write(str(reverse) + '\\n')\n out_buffer.write('\\n')\n\n if out_stream:\n if directional:\n out_buffer.write(' Directional FD Derivative (Jfd)\\n')\n else:\n out_buffer.write(' Raw FD Derivative (Jfd)\\n')\n out_buffer.write(str(fd) + '\\n')\n out_buffer.write('\\n')\n\n if out_stream:\n out_buffer.write(' -' * 30 + '\\n')\n\n # End of if compact print if/else\n # End of if not suppress_output\n # End of for of, wrt in sorted_keys\n\n if not show_only_incorrect or num_bad_jacs:\n if out_stream and not suppress_output:\n out_stream.write(out_buffer.getvalue())\n\n # End of for system in system_list\n\n if not suppress_output and compact_print and not totals:\n if worst_subjac:\n worst_subjac_header = \\\n \"Sub Jacobian with Largest Relative Error: {1} '{2}'\".format(*worst_subjac)\n out_stream.write('\\n' + '#' * len(worst_subjac_header) + '\\n')\n out_stream.write(\"{}\\n\".format(worst_subjac_header))\n out_stream.write('#' * len(worst_subjac_header) + '\\n')\n out_stream.write(header + '\\n')\n out_stream.write('-' * len(header) + '\\n')\n out_stream.write(worst_subjac_line + '\\n')\n\n\ndef _format_if_not_matrix_free(matrix_free, val):\n \"\"\"\n Return string to represent deriv check value in compact display.\n\n Parameters\n ----------\n matrix_free : bool\n If True, then the associated Component is matrix-free.\n val : float\n The deriv check value.\n\n Returns\n -------\n str\n String which is the actual value if matrix-free, otherwise 'n/a'\n \"\"\"\n if matrix_free:\n return '{0:.4e}'.format(val)\n else:\n return pad_name('n/a')\n\n\ndef _format_error(error, tol):\n \"\"\"\n Format the error, flagging if necessary.\n\n Parameters\n ----------\n error : float\n The absolute or relative error.\n tol : float\n Tolerance above which errors are flagged\n\n Returns\n -------\n str\n Formatted and possibly flagged error.\n \"\"\"\n if np.isnan(error) or error < tol:\n return '{:.6e}'.format(error)\n return '{:.6e} *'.format(error)\n\n\ndef _get_fd_options(var, global_method, local_opts, global_step, global_form, global_step_calc,\n alloc_complex):\n local_wrt = var\n\n # Determine if fd or cs.\n method = global_method\n if local_wrt in local_opts:\n local_method = local_opts[local_wrt]['method']\n if local_method:\n method = local_method\n\n # We can't use CS if we haven't allocated a complex vector, so we fall back on fd.\n if method == 'cs' and not alloc_complex:\n method = 'fd'\n could_not_cs = True\n else:\n could_not_cs = False\n\n fd_options = {'order': None,\n 'method': method}\n\n if method == 'cs':\n defaults = ComplexStep.DEFAULT_OPTIONS\n\n fd_options['form'] = None\n fd_options['step_calc'] = None\n\n elif method == 'fd':\n defaults = FiniteDifference.DEFAULT_OPTIONS\n\n fd_options['form'] = global_form\n fd_options['step_calc'] = global_step_calc\n\n if global_step and global_method == method:\n fd_options['step'] = global_step\n else:\n fd_options['step'] = defaults['step']\n\n fd_options['directional'] = defaults['directional']\n\n # Precedence: component options > global options > defaults\n if local_wrt in local_opts:\n for name in ['form', 'step', 'step_calc', 'directional']:\n value = local_opts[local_wrt][name]\n if value is not None:\n fd_options[name] = value\n\n return fd_options, could_not_cs\n", "\"\"\"\nUtils for dealing with arrays.\n\"\"\"\nimport sys\nfrom itertools import product\nfrom copy import copy\n\nimport numpy as np\nfrom scipy.sparse import coo_matrix\n\nfrom openmdao.core.constants import INT_DTYPE\n\n\ndef shape_to_len(shape):\n \"\"\"\n Compute length given a shape tuple.\n\n For realistic-dimension arrays, looping over the shape tuple is much faster than np.prod.\n\n Parameters\n ----------\n shape : tuple\n Numpy shape tuple.\n\n Returns\n -------\n int\n Length of multidimensional array.\n \"\"\"\n if shape is None:\n return None\n\n length = 1\n for dim in shape:\n length *= dim\n\n return length\n\n\ndef evenly_distrib_idxs(num_divisions, arr_size):\n \"\"\"\n Return evenly distributed entries for the given array size.\n\n Given a number of divisions and the size of an array, chop the array up\n into pieces according to number of divisions, keeping the distribution\n of entries as even as possible.\n\n Parameters\n ----------\n num_divisions : int\n Number of parts to divide the array into.\n arr_size : int\n Number of entries in the array.\n\n Returns\n -------\n tuple\n a tuple of (sizes, offsets), where sizes and offsets contain values for all\n divisions.\n \"\"\"\n base, leftover = divmod(arr_size, num_divisions)\n sizes = np.full(num_divisions, base, dtype=INT_DTYPE)\n\n # evenly distribute the remainder across size-leftover procs,\n # instead of giving the whole remainder to one proc\n sizes[:leftover] += 1\n\n offsets = np.zeros(num_divisions, dtype=INT_DTYPE)\n offsets[1:] = np.cumsum(sizes)[:-1]\n\n return sizes, offsets\n\n\ndef take_nth(rank, size, seq):\n \"\"\"\n Iterate returning every nth value.\n\n Return an iterator over the sequence that returns every\n nth element of seq based on the given rank within a group of\n the given size. For example, if size = 2, a rank of 0 returns\n even indexed elements and a rank of 1 returns odd indexed elements.\n\n Parameters\n ----------\n rank : int\n MPI rank of this process.\n size : int\n Size of the array we're taking nth entries from.\n seq : iter\n Iterator containing the values being returned.\n \"\"\"\n assert(rank < size)\n it = iter(seq)\n while True:\n for proc in range(size):\n if rank == proc:\n try:\n yield next(it)\n except StopIteration:\n return\n else:\n try:\n next(it)\n except StopIteration:\n return\n\n\ndef convert_neg(arr, size):\n \"\"\"\n Convert any negative indices into their positive equivalent.\n\n This only works for a 1D array.\n\n Parameters\n ----------\n arr : ndarray\n Array having negative indices converted.\n size : int\n Dimension of the array.\n\n Returns\n -------\n ndarray\n The converted array.\n \"\"\"\n arr[arr < 0] += size\n return arr\n\n\ndef array_viz(arr, prob=None, of=None, wrt=None, stream=sys.stdout):\n \"\"\"\n Display the structure of a boolean array in a compact form.\n\n If prob, of, and wrt are supplied, print the name of the response alongside\n each row and print the names of the design vars, aligned with each column, at\n the bottom.\n\n Parameters\n ----------\n arr : ndarray\n Array being visualized.\n prob : Problem or None\n Problem object.\n of : list of str or None\n Names of response variables used in derivative calculation.\n wrt : list of str or None\n Names of design variables used in derivative calculation.\n stream : file-like\n Stream where output will be written.\n \"\"\"\n if len(arr.shape) != 2:\n raise RuntimeError(\"array_viz only works for 2d arrays.\")\n\n if prob is not None:\n if of is None:\n of = prob.driver._get_ordered_nl_responses()\n if wrt is None:\n wrt = list(prob.driver._designvars)\n\n if prob is None or of is None or wrt is None:\n for r in range(arr.shape[0]):\n for c in range(arr.shape[1]):\n if arr[r, c]:\n stream.write('x')\n else:\n stream.write('.')\n stream.write(' %d\\n' % r)\n else:\n\n row = 0\n for res in of:\n for r in range(row, row + prob.driver._responses[res]['size']):\n col = 0\n for dv in wrt:\n for c in range(col, col + prob.driver._designvars[dv]['size']):\n if arr[r, c]:\n stream.write('x')\n else:\n stream.write('.')\n col = c + 1\n stream.write(' %d %s\\n' % (r, res))\n row = r + 1\n\n start = 0\n for name in wrt:\n tab = ' ' * start\n stream.write('%s|%s\\n' % (tab, name))\n start += prob.driver._designvars[name]['size']\n\n\ndef array_connection_compatible(shape1, shape2):\n \"\"\"\n Return True if the two arrays shapes are compatible.\n\n Array shapes are compatible if the underlying data has the same size and is\n stored in the same contiguous order for the two shapes.\n\n Parameters\n ----------\n shape1 : tuple of int\n Shape of the first array.\n shape2 : tuple of int\n Shape of the second array.\n\n Returns\n -------\n bool\n True if the two shapes are compatible for connection, else False.\n \"\"\"\n ashape1 = np.asarray(shape1, dtype=INT_DTYPE)\n ashape2 = np.asarray(shape2, dtype=INT_DTYPE)\n\n size1 = shape_to_len(ashape1)\n size2 = shape_to_len(ashape2)\n\n # Shapes are not connection-compatible if size is different\n if size1 != size2:\n return False\n\n nz1 = np.where(ashape1 > 1)[0]\n nz2 = np.where(ashape2 > 1)[0]\n\n if len(nz1) > 0:\n fundamental_shape1 = ashape1[np.min(nz1): np.max(nz1) + 1]\n else:\n fundamental_shape1 = np.ones((1,))\n\n if len(nz2) > 0:\n fundamental_shape2 = ashape2[np.min(nz2): np.max(nz2) + 1]\n else:\n fundamental_shape2 = np.ones((1,))\n\n return np.all(fundamental_shape1 == fundamental_shape2)\n\n\ndef tile_sparse_jac(data, rows, cols, nrow, ncol, num_nodes):\n \"\"\"\n Assemble arrays necessary to define a COO sparse jacobian for a vectorized component.\n\n These arrays can also be passed to csc_matrix or csr_matrix to create CSC and CSR sparse\n matrices.\n\n Parameters\n ----------\n data : ndarray\n Array of values\n rows : index array\n Array of row indices.\n cols : index array\n Array of column indices.\n nrow : int\n Number of rows in sub jacobian.\n ncol : int\n Number of columns in sub jacobian.\n num_nodes : int\n Number of vectorized copies to tile.\n\n Returns\n -------\n ndarray, ndarray, ndarray\n Arrays to define a COO sparse jacobian of size num_nodes*nrow by num_nodes*ncol\n \"\"\"\n nnz = len(rows)\n\n if np.isscalar(data):\n data = data * np.ones(nnz)\n\n if not np.isscalar(nrow):\n nrow = shape_to_len(nrow)\n\n if not np.isscalar(ncol):\n ncol = shape_to_len(ncol)\n\n repeat_arr = np.repeat(np.arange(num_nodes), nnz)\n\n data = np.tile(data, num_nodes)\n rows = np.tile(rows, num_nodes) + repeat_arr * nrow\n cols = np.tile(cols, num_nodes) + repeat_arr * ncol\n\n return data, rows, cols\n\n\ndef _global2local_offsets(global_offsets):\n \"\"\"\n Given existing global offsets, return a copy with offsets localized to each process.\n\n Parameters\n ----------\n global_offsets : dict\n Arrays of global offsets keyed by vec_name and deriv direction.\n\n Returns\n -------\n dict\n Arrays of local offsets keyed by vec_name and deriv direction.\n \"\"\"\n offsets = {}\n for type_ in global_offsets:\n goff = global_offsets[type_]\n offsets[type_] = goff.copy()\n if goff[0].size > 0:\n # adjust offsets to be local in each process\n offsets[type_] -= goff[:, 0].reshape((goff.shape[0], 1))\n\n return offsets\n\n\ndef get_input_idx_split(full_idxs, inputs, outputs, use_full_cols, is_total):\n \"\"\"\n Split an array of indices into vec outs + ins into two arrays of indices into outs and ins.\n\n Parameters\n ----------\n full_idxs : ndarray\n Indices into the full array (which could be outs + ins or just ins)\n inputs : Vector\n Inputs vector.\n outputs : Vector\n Outputs vector.\n use_full_cols : bool\n If True, full idxs are into the full outs + ins vector.\n is_total : bool\n If True, total derivatives are being computed and wrt vector is the outputs vector.\n\n Returns\n -------\n list of tuples\n Each tuple is of the form (array, idxs).\n \"\"\"\n assert len(full_idxs) > 0, \"Empty index array passed to get_input_idx_split.\"\n full_idxs = np.asarray(full_idxs)\n if use_full_cols:\n out_size = len(outputs)\n out_idxs = full_idxs[full_idxs < out_size]\n in_idxs = full_idxs[full_idxs >= out_size] - out_size\n full = [(outputs, out_idxs), (inputs, in_idxs)]\n return [(vec, inds) for vec, inds in full if inds.size > 0]\n elif is_total:\n return [(outputs, full_idxs)]\n else:\n return [(inputs, full_idxs)]\n\n\ndef _flatten_src_indices(src_indices, shape_in, shape_out, size_out):\n \"\"\"\n Convert src_indices into a flat, non-negative form.\n\n Parameters\n ----------\n src_indices : ndarray\n Array of src_indices. Can be flat or multi-dimensional.\n shape_in : tuple\n Shape of the input variable.\n shape_out : tuple\n Shape of the output variable.\n size_out : int\n Size of the output variable.\n\n Returns\n -------\n ndarray\n The flattened src_indices.\n \"\"\"\n if len(shape_out) == 1 or shape_in == src_indices.shape:\n return convert_neg(src_indices.ravel(), size_out)\n\n entries = [list(range(x)) for x in shape_in]\n cols = np.vstack([src_indices[i] for i in product(*entries)])\n dimidxs = [convert_neg(cols[:, i], shape_out[i]) for i in range(cols.shape[1])]\n return np.ravel_multi_index(dimidxs, shape_out)\n\n\ndef sizes2offsets(size_array):\n \"\"\"\n For a given array of sizes, return an array of offsets.\n\n Offsets will be computed using a flattened version of size_array and then\n reshaped to match the shape of size_array.\n\n Parameters\n ----------\n size_array : ndarray\n Array of sizes.\n\n Returns\n -------\n ndarray\n Array of offsets.\n \"\"\"\n offsets = np.zeros(size_array.size, dtype=size_array.dtype)\n offsets[1:] = np.cumsum(size_array.flat)[:-1]\n return offsets.reshape(size_array.shape)\n\n\ndef abs_complex(x):\n \"\"\"\n Compute the absolute value of a complex-stepped vector.\n\n Rather than taking a Euclidian norm, simply negate the values that are less than zero.\n\n Parameters\n ----------\n x : ndarray\n Input array.\n\n Returns\n -------\n ndarray\n Complex-step absolute value of the array.\n \"\"\"\n idx_neg = np.where(x < 0)\n x[idx_neg] = -x[idx_neg]\n return x\n\n\ndef dv_abs_complex(x, x_deriv):\n \"\"\"\n Compute the complex-step derivative of the absolute value function and its derivative.\n\n Parameters\n ----------\n x : ndarray\n Input array, used for determining which elements to negate.\n x_deriv : ndarray\n Incominng partial derivative array, may have one additional dimension.\n\n Returns\n -------\n ndarray\n Absolute value applied to x.\n ndarray\n Absolute value applied to x_deriv.\n \"\"\"\n idx_neg = np.where(x < 0)\n\n # Special case when x is (1, ) and x_deriv is (1, n).\n if len(x_deriv.shape) == 1:\n if idx_neg[0].size != 0:\n return -x, -x_deriv\n\n x[idx_neg] = -x[idx_neg]\n x_deriv[idx_neg] = -x_deriv[idx_neg]\n\n return x, x_deriv\n\n\ndef rand_sparsity(shape, density_ratio, dtype=bool):\n \"\"\"\n Return a random boolean COO matrix of the given shape with given percent density.\n\n Row and column indices are generated using random integers so some duplication\n is possible, resulting in a matrix with somewhat lower density than specified.\n\n Parameters\n ----------\n shape : tuple\n Desired shape of the matrix.\n density_ratio : float\n Approximate ratio of nonzero to zero entries in the desired matrix.\n dtype : type\n Specifies type of the values in the returned matrix.\n\n Returns\n -------\n coo_matrix\n A COO matrix with approximately the nonzero density desired.\n\n \"\"\"\n assert len(shape) == 2, f\"shape must be a size 2 tuple but {shape} was given\"\n\n nrows, ncols = shape\n\n nnz = int(nrows * ncols * density_ratio)\n\n data = np.ones(nnz, dtype=dtype)\n rows = np.random.randint(0, nrows, nnz)\n cols = np.random.randint(0, ncols, nnz)\n\n coo = coo_matrix((data, (rows, cols)), shape=shape)\n\n # get rid of dup rows/cols\n coo.sum_duplicates()\n\n return coo\n\n\ndef sparse_subinds(orig, inds):\n \"\"\"\n Compute new rows or cols resulting from applying inds on top of an existing sparsity pattern.\n\n This only comes into play when we have an approx total jacobian where some dv/resp have\n indices.\n\n Parameters\n ----------\n orig : ndarray\n Either row or col indices (part of a subjac sparsity pattern).\n inds : ndarray or list\n Sub-indices introduced when adding a desvar or response.\n\n Returns\n -------\n ndarray\n New compressed rows or cols.\n ndarray\n Mask array that can be used to update subjac value and corresponding index array to orig.\n \"\"\"\n mask = np.zeros(orig.size, dtype=bool)\n for i in inds:\n mask |= orig == i\n newsp = orig[mask]\n\n # replace the index with the 'compressed' index after we've masked out entries\n for r, i in enumerate(np.sort(inds)):\n newsp[newsp == i] = r\n\n return newsp, mask\n" ]
[ [ "scipy.sparse.issparse", "numpy.isnan", "numpy.linalg.norm", "numpy.asarray", "numpy.zeros", "numpy.sum", "numpy.ones", "numpy.any", "numpy.prod" ], [ "numpy.max", "numpy.full", "scipy.sparse.coo_matrix", "numpy.asarray", "numpy.zeros", "numpy.ones", "numpy.tile", "numpy.min", "numpy.where", "numpy.isscalar", "numpy.random.randint", "numpy.arange", "numpy.cumsum", "numpy.all", "numpy.sort", "numpy.ravel_multi_index" ] ]
tamiresdatascience/napari
[ "deec4259e8e92bd6b42e11107e8f3dd6b756ad46" ]
[ "napari/_tests/test_viewer.py" ]
[ "import os\n\nimport numpy as np\nimport pytest\n\nfrom napari import Viewer, layers\nfrom napari._tests.utils import (\n add_layer_by_type,\n check_view_transform_consistency,\n check_viewer_functioning,\n layer_test_data,\n)\nfrom napari.utils._tests.test_naming import eval_with_filename\n\n\ndef _get_all_keybinding_methods(type_):\n obj_methods = set(super(type_, type_).class_keymap.values())\n obj_methods.update(type_.class_keymap.values())\n return obj_methods\n\n\nviewer_methods = _get_all_keybinding_methods(Viewer)\nEXPECTED_NUMBER_OF_VIEWER_METHODS = 19\n\n\ndef test_len_methods_viewer():\n \"\"\"\n Make sure we do find all the methods attached to a viewer via keybindings\n \"\"\"\n assert len(viewer_methods) == EXPECTED_NUMBER_OF_VIEWER_METHODS\n\n\[email protected]\ndef test_non_existing_bindings():\n \"\"\"\n Those are condition tested in next unittest; but do not exists; this is\n likely due to an oversight somewhere.\n \"\"\"\n assert 'play' in [x.__name__ for x in viewer_methods]\n assert 'toggle_fullscreen' in [x.__name__ for x in viewer_methods]\n\n\[email protected]('func', viewer_methods)\ndef test_viewer_methods(make_napari_viewer, func):\n \"\"\"Test instantiating viewer.\"\"\"\n viewer = make_napari_viewer()\n\n if func.__name__ == 'toggle_fullscreen' and not os.getenv(\"CI\"):\n pytest.skip(\"Fullscreen cannot be tested in CI\")\n if func.__name__ == 'play':\n pytest.skip(\"Play cannot be tested with Pytest\")\n func(viewer)\n\n\ndef test_viewer(make_napari_viewer):\n \"\"\"Test instantiating viewer.\"\"\"\n viewer = make_napari_viewer()\n view = viewer.window.qt_viewer\n\n assert viewer.title == 'napari'\n assert view.viewer == viewer\n\n assert len(viewer.layers) == 0\n assert view.layers.model().rowCount() == 0\n\n assert viewer.dims.ndim == 2\n assert view.dims.nsliders == viewer.dims.ndim\n assert np.sum(view.dims._displayed_sliders) == 0\n\n # Switch to 3D rendering mode and back to 2D rendering mode\n viewer.dims.ndisplay = 3\n assert viewer.dims.ndisplay == 3\n viewer.dims.ndisplay = 2\n assert viewer.dims.ndisplay == 2\n\n\nEXPECTED_NUMBER_OF_LAYER_METHODS = {\n 'Image': 0,\n 'Vectors': 0,\n 'Surface': 0,\n 'Tracks': 0,\n 'Points': 8,\n 'Labels': 14,\n 'Shapes': 17,\n}\n\n\n# We unroll the layer data, with the all the methods of the layer that we are\n# going to test, so that if one method fails we know which one, as well as\n# remove potential issues that would be triggered by calling methods after each\n# other.\n\n\nunrolled_layer_data = []\nfor layer_class, data, ndim in layer_test_data:\n methods = _get_all_keybinding_methods(layer_class)\n for func in methods:\n unrolled_layer_data.append(\n (layer_class, data, ndim, func, len(methods))\n )\n\n\[email protected](\n 'layer_class, data, ndim, func, Nmeth', unrolled_layer_data\n)\[email protected]('visible', [True, False])\ndef test_add_layer(\n make_napari_viewer, layer_class, data, ndim, func, Nmeth, visible\n):\n viewer = make_napari_viewer()\n layer = add_layer_by_type(viewer, layer_class, data, visible=visible)\n check_viewer_functioning(viewer, viewer.window.qt_viewer, data, ndim)\n\n func(layer)\n\n assert Nmeth == EXPECTED_NUMBER_OF_LAYER_METHODS[layer_class.__name__]\n\n\[email protected]('layer_class, a_unique_name, ndim', layer_test_data)\ndef test_add_layer_magic_name(\n make_napari_viewer, layer_class, a_unique_name, ndim\n):\n \"\"\"Test magic_name works when using add_* for layers\"\"\"\n # Tests for issue #1709\n viewer = make_napari_viewer() # noqa: F841\n layer = eval_with_filename(\n \"add_layer_by_type(viewer, layer_class, a_unique_name)\",\n \"somefile.py\",\n )\n assert layer.name == \"a_unique_name\"\n\n\ndef test_screenshot(make_napari_viewer):\n \"\"\"Test taking a screenshot.\"\"\"\n viewer = make_napari_viewer()\n\n np.random.seed(0)\n # Add image\n data = np.random.random((10, 15))\n viewer.add_image(data)\n\n # Add labels\n data = np.random.randint(20, size=(10, 15))\n viewer.add_labels(data)\n\n # Add points\n data = 20 * np.random.random((10, 2))\n viewer.add_points(data)\n\n # Add vectors\n data = 20 * np.random.random((10, 2, 2))\n viewer.add_vectors(data)\n\n # Add shapes\n data = 20 * np.random.random((10, 4, 2))\n viewer.add_shapes(data)\n\n # Take screenshot of the image canvas only\n screenshot = viewer.screenshot(canvas_only=True)\n assert screenshot.ndim == 3\n\n # Take screenshot with the viewer included\n screenshot = viewer.screenshot(canvas_only=False)\n assert screenshot.ndim == 3\n\n\ndef test_changing_theme(make_napari_viewer):\n \"\"\"Test changing the theme updates the full window.\"\"\"\n viewer = make_napari_viewer(show=False)\n viewer.window.qt_viewer.set_welcome_visible(False)\n viewer.add_points(data=None)\n size = viewer.window.qt_viewer.size()\n viewer.window.qt_viewer.setFixedSize(size)\n\n assert viewer.theme == 'dark'\n screenshot_dark = viewer.screenshot(canvas_only=False)\n\n viewer.theme = 'light'\n assert viewer.theme == 'light'\n screenshot_light = viewer.screenshot(canvas_only=False)\n\n equal = (screenshot_dark == screenshot_light).min(-1)\n\n # more than 99.5% of the pixels have changed\n assert (np.count_nonzero(equal) / equal.size) < 0.05, \"Themes too similar\"\n\n with pytest.raises(ValueError):\n viewer.theme = 'nonexistent_theme'\n\n\[email protected]('layer_class, data, ndim', layer_test_data)\ndef test_roll_traspose_update(make_napari_viewer, layer_class, data, ndim):\n \"\"\"Check that transpose and roll preserve correct transform sequence.\"\"\"\n\n viewer = make_napari_viewer()\n\n np.random.seed(0)\n\n layer = add_layer_by_type(viewer, layer_class, data)\n\n # Set translations and scalings (match type of visual layer storing):\n transf_dict = {\n 'translate': np.random.randint(0, 10, ndim).astype(np.float32),\n 'scale': np.random.rand(ndim).astype(np.float32),\n }\n for k, val in transf_dict.items():\n setattr(layer, k, val)\n\n if layer_class in [layers.Image, layers.Labels]:\n transf_dict['translate'] -= transf_dict['scale'] / 2\n\n # Check consistency:\n check_view_transform_consistency(layer, viewer, transf_dict)\n\n # Roll dims and check again:\n viewer.dims._roll()\n check_view_transform_consistency(layer, viewer, transf_dict)\n\n # Transpose and check again:\n viewer.dims._transpose()\n check_view_transform_consistency(layer, viewer, transf_dict)\n\n\ndef test_toggling_axes(make_napari_viewer):\n \"\"\"Test toggling axes.\"\"\"\n viewer = make_napari_viewer()\n\n # Check axes are not visible\n assert not viewer.axes.visible\n\n # Make axes visible\n viewer.axes.visible = True\n assert viewer.axes.visible\n\n # Enter 3D rendering and check axes still visible\n viewer.dims.ndisplay = 3\n assert viewer.axes.visible\n\n # Make axes not visible\n viewer.axes.visible = False\n assert not viewer.axes.visible\n\n\ndef test_toggling_scale_bar(make_napari_viewer):\n \"\"\"Test toggling scale bar.\"\"\"\n viewer = make_napari_viewer()\n\n # Check scale bar is not visible\n assert not viewer.scale_bar.visible\n\n # Make scale bar visible\n viewer.scale_bar.visible = True\n assert viewer.scale_bar.visible\n\n # Enter 3D rendering and check scale bar is still visible\n viewer.dims.ndisplay = 3\n assert viewer.scale_bar.visible\n\n # Make scale bar not visible\n viewer.scale_bar.visible = False\n assert not viewer.scale_bar.visible\n\n\ndef test_removing_points_data(make_napari_viewer):\n viewer = make_napari_viewer()\n points = np.random.random((4, 2)) * 4\n\n pts_layer = viewer.add_points(points)\n pts_layer.data = np.zeros([0, 2])\n\n assert len(pts_layer.data) == 0\n\n\ndef test_deleting_points(make_napari_viewer):\n viewer = make_napari_viewer()\n points = np.random.random((4, 2)) * 4\n\n pts_layer = viewer.add_points(points)\n pts_layer.selected_data = {0}\n pts_layer.remove_selected()\n\n assert len(pts_layer.data) == 3\n" ]
[ [ "numpy.count_nonzero", "numpy.random.rand", "numpy.zeros", "numpy.random.seed", "numpy.sum", "numpy.random.randint", "numpy.random.random" ] ]
rskene/phat
[ "84a946e1e638642f36ce5fd81dc85aa89f7b66f0" ]
[ "src/phat/learn/dists.py" ]
[ "\"\"\"\nTensorflow does not support scipy, therefore, all distributions in \nphat/dists.py must be replicated via tensorflow_probability \n\"\"\"\n\nimport numpy as np\n\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\nimport tensorflow_probability as tfp\nfrom tensorflow_probability import distributions as tfd\n\nfrom tensorflow_probability.python.internal import reparameterization, dtype_util, \\\n tensor_util, parameter_properties\nfrom tensorflow_probability.python.bijectors import softplus as softplus_bijector, identity as identity_bijector\n\n### DEBUG UTILS ###\[email protected]\ndef find_nans(val):\n cond = tf.math.is_nan(val)\n idx = tf.where(cond)\n return idx\n\[email protected]\ndef find_zeros(val):\n cond = tf.math.equal(val, 0)\n idx = tf.where(cond)\n return idx\n\[email protected]\ndef if_not_empty(x, idx):\n if tf.not_equal(tf.size(idx), 0):\n return x[idx[0,0]]\n else:\n return tf.constant(0., dtype=np.float64)\n\n# idx = find_nans(####)\n# tf.print('loc', self.loc[0], self.scale[0], self.mean[0], self.std[0], self.shape[0])\n# tf.print('idx', if_not_empty(x, idx), -if_not_empty(x, idx) + 10**-1, idx)\n\ndef gptf_prob(\n gptf,\n x,\n model\n ):\n loc = tf.convert_to_tensor(gptf.loc)\n scale = tf.convert_to_tensor(gptf.scale)\n concentration = tf.convert_to_tensor(gptf.concentration)\n conc_zero = tf.equal(concentration, 0)\n conc = tf.where(conc_zero, tf.constant(1, gptf.dtype), concentration)\n conc_neg = tf.math.less(conc, 0)\n neg_max = loc - (scale / conc)\n\n z = gptf._z(x, scale)\n base = tf.ones_like(z, gptf.dtype) + z*conc\n pow_ = -1 / conc - tf.ones_like(z, gptf.dtype)\n # tf.print('\\n****RIGHT TAIL???', model.rtail)\n # tf.print('\\nXXXXXXX', x)\n # tf.print('\\nLOC', loc)\n # tf.print('\\nSCALE', scale)\n # tf.print('\\nZZZZZZZ', z)\n # tf.print('\\nCONC', conc)\n # tf.print('\\nBASE', base)\n # tf.print('POW_', pow_)\n \n base_pow_ = tf.math.pow(base, pow_) / gptf.scale\n\n # idx3 = find_nans(base_pow_)\n # tf.print('BASE_POW_', if_not_empty(base_pow_, idx3), idx3)\n # tf.print(base_pow_)\n \n # if tf.not_equal(tf.size(idx3), 0):\n # tf.print('RTail?', model.rtail)\n # tf.print('BASE_POW_', tf.gather_nd(base_pow_, idx3))\n # tf.print (tf.shape(conc), tf.shape(idx3))\n \n # tf.print('X', tf.shape(x), x[idx3[0,0]])\n \n # tf.print('Loc', loc[idx3[0,0]], tf.unique(loc))\n # tf.print(x<loc[0])\n # tf.print('X<loc', tf.shape(x[x<loc[0]]))\n # tf.print('Scale', tf.shape(gptf.scale), tf.unique(gptf.scale))\n # tf.print('Z', tf.shape(z), tf.unique(z[idx3[0,0]]))\n # tf.print('CONC_', conc[idx3[0,0]])\n # tf.print('BASE', tf.shape(base[idx3[0,0]]), tf.unique(base[idx3[0,0]]))\n \n # inval_conc = conc[idx3[0,0]]\n # inval_z = z[idx3[0,0]][0]\n # inval_scale = gptf.scale[0]\n # inval_base = 1 + inval_z*inval_conc\n # inval_pow = -1 / inval_conc - 1\n \n # tf.print('inval conc', inval_conc, 'inval base', inval_base, 'inval pow', inval_pow)\n # inval_base_pow = tf.math.pow(inval_base, inval_pow) / inval_scale\n # tf.print('inval base pow, PRE SCALE', tf.math.pow(inval_base, inval_pow))\n # tf.print('inval base pow', inval_base_pow)\n\n # def base_pow_(x, scale, conc, gptf):\n # return base_pow_\n\n # valid_x = tf.greater_equal(x, loc[0])\n # w_valid_x = tf.where(valid_x, calc_prob(x), tf.constant(0, gptf.dtype))\n\n w_negconc = tf.where((x>=loc) & (x<=neg_max), base_pow_, 0+10**-10)\n w_posconc = tf.where(x >= loc, base_pow_, 0+10**-10)\n \n return tf.where(\n conc_zero, z, tf.where(conc_neg, w_negconc, w_posconc)\n )\n\nclass CarbenBase4TF(tfd.Distribution): \n def __init__(self, mean, std, shape,\n validate_args=False, \n allow_nan_stats=True,\n name='CarbenBase'\n ):\n parameters = dict(locals())\n with tf.name_scope(name) as name:\n dtype = tf.float64\n self._mean = tensor_util.convert_nonref_to_tensor(mean, dtype=dtype, name='mean')\n self._std = tensor_util.convert_nonref_to_tensor(std, dtype=dtype, name='std')\n self._shape = tensor_util.convert_nonref_to_tensor(shape, dtype=dtype, name='shape')\n self._loc = self._calc_loc()\n self._scale = self._calc_scale()\n self._body = tfd.Normal(self._mean, self._std)\n super(CarbenBase4TF, self).__init__(\n dtype=dtype,\n reparameterization_type=reparameterization.FULLY_REPARAMETERIZED,\n validate_args=validate_args,\n allow_nan_stats=allow_nan_stats,\n parameters=parameters,\n name=name\n )\n\n @classmethod\n def _parameter_properties(cls, dtype, num_classes=None):\n # pylint: disable=g-long-lambda\n return dict(\n mean=parameter_properties.ParameterProperties(),\n std=parameter_properties.ParameterProperties(\n default_constraining_bijector_fn=(\n lambda: softplus_bijector.Softplus(low=dtype_util.eps(dtype)))\n ),\n shape=parameter_properties.ParameterProperties()\n )\n # pylint: enable=g-long-lambda\n \n @property\n def mean(self):\n return self._mean\n \n @property\n def std(self):\n return self._std\n \n @property\n def shape(self):\n return self._shape\n\n @property\n def loc(self):\n return self._loc\n\n @property\n def scale(self):\n return self._scale\n\n @property\n def parameters(self):\n params = super().parameters\n params['loc'] = self.loc\n params['scale'] = self.scale\n return params\n\n def _z_for_W(self):\n \"\"\"\n in Carreau (2008), z defined as:\n\n (1 + shape)**2 / 2*pi\n \"\"\"\n num = (1 + self.shape)**2\n denom = 2*np.pi\n return num / denom \n\n def _W_z(self):\n \"\"\"\n in Carreau (2008), W(z) can be calculated directly as:\n \n std**2 * (1 + shape)**2 / scale**2\n\n scale is not a free parameter, however, so we must calculate W(z) via\n the Lambert function on z, which is defined only in free parameters.\n \"\"\"\n\n return tfp.math.lambertw(self._z_for_W())\n\n @property\n def gamma(self):\n val = tf.math.sqrt(self._W_z() / 2)\n return 1 + .5*(1 + tf.math.erf(val))\n \n def _calc_scale(self):\n num = self._std*(1 + self._shape)\n denom = tf.math.sqrt(self._W_z())\n return num / denom\n\n @property\n def body(self):\n if hasattr(self, '_body'):\n return self._body\n else:\n raise NotImplementedError\n\n @property\n def tail(self):\n if hasattr(self, '_tail'):\n return self._tail\n else:\n raise NotImplementedError\n\n def _log_prob(self, x):\n prob = self._prob(x)\n return tf.math.log(prob)\n\n def _event_shape_tensor(self):\n return tf.constant([], dtype=tf.int32)\n\n def _event_shape(self):\n return tf.TensorShape([])\n \n def _default_event_space_bijector(self):\n return identity_bijector.Identity(validate_args=self.validate_args)\n\nclass CarbenRight4TF(CarbenBase4TF):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._tail = tfd.GeneralizedPareto(loc=self._loc, scale=self._scale, concentration=self._shape)\n \n def _calc_loc(self):\n return self.mean + self.std*tf.math.sqrt(self._W_z())\n\n def _prob(self, x):\n \"\"\"\n Right tail only.\n where x>a,\n f(x) = f_t(x) / gamma\n \"\"\"\n x_tail = x > self.loc\n # tf.print('is _prob: any tail????', x_tail)\n # tf.print('Rtail?', self.rtail, 'does this work?', tf.shape(x[x>self.loc[0]]))\n prob = tf.where(\n x_tail,\n gptf_prob(self.tail, tf.where(x_tail, x, self.loc), self),\n self.body.prob(x) + 10**-10,\n )\n return prob / self.gamma\n\nclass CarbenLeft4TF(CarbenBase4TF):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._tail = tfd.GeneralizedPareto(loc=-self._loc, scale=self._scale, concentration=self._shape)\n \n def _calc_loc(self):\n return self.mean - self.std*tf.math.sqrt(self._W_z())\n\n def _prob(self, x):\n \"\"\"\n Left tail only.\n where x>a,\n f(x) = f_t(x) / gamma\n \"\"\"\n x_tail = x < self.loc\n # tf.print ('in _prob: right taill?????', self.rtail)\n # tf.print('in _prob: xxxxxx', x)\n # tf.print('in _prob: LOC', self.loc)\n # tf.print('is _prob: any tail????', x_tail)\n prob = tf.where(\n x < self.loc,\n gptf_prob(self.tail, tf.where(x_tail, -x, -self.loc), self),\n self.body.prob(x) + 10**-10,\n )\n return prob / self.gamma\n\nclass CarbenHybrid4TF:\n def __new__(cls, *args, **kwargs):\n args = list(args)\n shape_is_arg = len(args) >= 2\n shape = args[2] if shape_is_arg else kwargs['shape']\n \n rtail_is_arg = len(args) >= 4\n if rtail_is_arg:\n rtail = args.pop(3)\n else:\n rtail = kwargs.pop('rtail') if 'rtail' in kwargs else True\n\n if isinstance(shape, float):\n if shape < 0 and not rtail:\n txt = 'If you provide a negative shape parameter,'\n txt += ' do not provide `rtail`'\n txt += ' as a left-tailed Carben is computed automatically.'\n raise ValueError(txt) \n elif shape < 0:\n rtail = False\n if shape_is_arg:\n args[2] = -shape\n else:\n kwargs['shape'] = -shape\n \n if rtail:\n kwargs['name'] = 'CarbenRight'\n obj = CarbenRight4TF(*args, **kwargs)\n obj.rtail = True\n else:\n kwargs['name'] = 'CarbenLeft' \n obj = CarbenLeft4TF(*args, **kwargs)\n obj.rtail = False\n \n return obj\n\nclass PhatMixture(tfd.Mixture):\n def __repr__(self):\n prepend = '<Phat4TF (inherits from tfp.distributions.Mixture) '\n append = ' '.join(super().__repr__().split(' ')[2:])\n return prepend + append\n\n def __str__(self):\n return str(self.__repr__())\n\nclass Phat4TF:\n def __new__(cls, mean, std, shape_l, shape_r, mix=.5):\n m = tf.shape(mean)[0]\n mix = tf.constant([mix, 1-mix], dtype=tf.float64)\n p = tf.ones([m, 1], dtype=tf.float64) * mix\n \n c1 = CarbenHybrid4TF(mean, std, shape_l, rtail=False)\n c2 = CarbenHybrid4TF(mean, std, shape_r)\n \n obj = PhatMixture(\n cat=tfd.Categorical(probs=p),\n components=[c1,c2]\n )\n obj.left = obj.components[0]\n obj.right = obj.components[1]\n return obj\n\n" ]
[ [ "tensorflow.convert_to_tensor", "tensorflow.size", "tensorflow.shape", "tensorflow.where", "tensorflow.equal", "tensorflow.math.less", "tensorflow.ones_like", "tensorflow.math.log", "tensorflow.TensorShape", "tensorflow.ones", "tensorflow.constant", "tensorflow.math.pow", "tensorflow.math.erf", "tensorflow.math.is_nan", "tensorflow.name_scope", "tensorflow.math.equal" ] ]
Drizzy3D/OCROnWebpages
[ "35a5faea5f682f669adf2016a7e28d3329c72da8" ]
[ "evaluation/visualise.py" ]
[ "from optparse import OptionParser\nfrom pathlib import Path\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib\nimport json\nimport re\nimport csv\nfrom matplotlib import rcParams\nimport collections\n\nrcParams['font.family'] = 'serif'\nrcParams['font.sans-serif'] = ['Palatino']\nrcParams['font.serif'] = ['Palatino']\nrcParams[\"font.size\"] = \"10\"\nrcParams['text.usetex'] ='false'\nrcParams[\"font.weight\"] = \"normal\" # does not work :/\nrcParams[\"axes.labelweight\"] = \"normal\" # does not work :/\nrcParams['figure.dpi'] = \"300\" \n\ndef main():\n parser = OptionParser()\n parser.add_option( '-i',\n '--input',\n dest = 'input',\n metavar = 'FILE' )\n parser.add_option( '-o',\n '--output',\n dest = 'output',\n metavar = 'FOLDER' )\n (options, _) = parser.parse_args()\n\n in_path = str(Path(options.input))\n out_path = str(Path(options.output))\n\n visualise_crawl(in_path, out_path)\n # visualise_evaluation(in_path, out_path)\n\ndef visualise_crawl(in_path, out_path):\n with open(in_path, 'r') as f:\n log = json.load(f)\n\n for dic in log.keys():\n if dic == 'succeeded' or dic == 'failed':\n continue\n\n # BAR\n xs = [i * 100 for i in list(log[dic].values())[:10]]\n ys = list(log[dic].keys())[:10]\n\n plt.barh(ys, xs, color='b')\n\n # plt.title(dic)\n plt.xlabel('Occurences in %')\n plt.ylabel('Attributes')\n\n plt.tight_layout()\n\n save_path: str = str(Path(out_path).joinpath('bar').joinpath(dic)) + '.pdf'\n Path(save_path).parent.mkdir(parents=True, exist_ok=True)\n plt.savefig(save_path, bbox_inches='tight')\n \n plt.clf()\n\n # Dots\n if dic == 'font_weight_dict' or dic == 'font_size_dict':\n temp = {}\n\n for i, (k, v) in enumerate(log[dic].items()):\n if i >= 10:\n break\n temp[k] = v\n od = collections.OrderedDict(sorted(temp.items()))\n\n xs = [i * 100 for i in list(od.values())[:10]]\n ys = list(od.keys())[:10]\n\n plt.plot(ys, xs, 'bo')\n\n # plt.title(dic)\n plt.xlabel('Attributes')\n plt.ylabel('Occurences in %')\n\n plt.tight_layout()\n\n save_path: str = str(Path(out_path).joinpath('dot').joinpath(dic)) + '.pdf'\n Path(save_path).parent.mkdir(parents=True, exist_ok=True)\n plt.savefig(save_path, bbox_inches='tight')\n\n plt.clf()\n\n # PIE\n # plt.pie(labels=list(log[dic].keys())[:10], x=list(log[dic].values())[:10])\n plt.pie(labels=list(log[dic].keys()), x=list(log[dic].values()))\n\n # plt.title(dic)\n plt.tight_layout()\n\n save_path: str = str(Path(out_path).joinpath('pie').joinpath(dic)) + '.pdf'\n Path(save_path).parent.mkdir(parents=True, exist_ok=True)\n plt.savefig(save_path, bbox_inches='tight')\n plt.clf()\n # plt.show()\n\ndef visualise_evaluation(in_path, out_path):\n # path,tp_l,fp_l,fn_l,tp_d,fp_d,fn_d,time_l,time_d,time_c\n\n results: [{str: str}] = []\n cp_reg = r'cp(\\d+)'\n lp_reg = r'lp(\\d+)'\n for path in sorted(Path(in_path).rglob('*.csv')):\n cp: str = re.search(cp_reg, path.name).groups()[0]\n cp = cp[:1] + '.' + cp[1:]\n lp: str = re.search(lp_reg, path.name).groups()[0]\n lp = lp[:1] + '.' + lp[1:]\n\n tp_l_complete: int = 0\n fp_l_complete: int = 0\n fn_l_complete: int = 0\n t_d_complete: int = 0\n f_d_complete: int = 0\n time_l_complete: int = 0\n time_d_complete: int = 0\n time_c_complete: int = 0\n entries: int = 0\n\n with open(path) as f:\n reader = csv.reader(f)\n for i, l in enumerate(reader):\n if i > 0:\n tp_l_complete += int(l[1])\n fp_l_complete += int(l[2])\n fn_l_complete += int(l[3])\n t_d_complete += int(l[4])\n f_d_complete += int(l[5])\n time_l_complete += int(l[6])\n time_d_complete += int(l[7])\n time_c_complete += int(l[8])\n entries += 1\n\n # LOCALISATION\n accuracy_l: float = -1.0\n precision_l: float = -1.0\n recall_l: float = -1.0\n fone_score_l: float = -1.0\n try:\n accuracy_l = (tp_l_complete) / (tp_l_complete + fp_l_complete + fn_l_complete)\n except:\n pass\n try:\n precision_l = (tp_l_complete) / (tp_l_complete + fp_l_complete)\n except:\n pass\n try:\n recall_l = (tp_l_complete) / (tp_l_complete + fn_l_complete)\n except:\n pass\n try:\n fone_score_l = 2 * (precision_l * recall_l) / (precision_l + recall_l)\n except:\n pass\n\n # DETERMINATION\n precision_d: float = -1.0\n try:\n precision_d = (t_d_complete) / (t_d_complete + f_d_complete)\n except:\n pass\n\n results.append({'cp': cp, 'lp': lp, 'accuracy_l': accuracy_l, 'precision_l': precision_l, 'recall_l': recall_l, 'fone_score_l': fone_score_l, 'precision_d': precision_d, 'time_l_complete': time_l_complete, 'time_d_complete': time_d_complete, 'entries': entries})\n\n\n cps: [str] = sorted(list(set([r['cp'] for r in results])))\n lps: [str] = sorted(list(set([r['lp'] for r in results])))\n # Localisation\n accuracy_ls = np.zeros((len(cps),len(lps)))\n precision_ls = np.zeros((len(cps),len(lps)))\n recall_ls = np.zeros((len(cps),len(lps)))\n fone_score_ls = np.zeros((len(cps),len(lps)))\n\n accuracy_ll = np.zeros(len(cps))\n precision_ll = np.zeros(len(cps))\n recall_ll = np.zeros(len(cps))\n fone_score_ll = np.zeros(len(cps))\n\n time_l_complete = 0\n time_d_complete = 0\n entries_complete = 0\n\n for r in results:\n time_l_complete += r['time_l_complete']\n time_d_complete += r['time_d_complete']\n entries_complete += r['entries']\n time_c_complete = time_l_complete + time_d_complete\n\n mean_time_l = (time_l_complete / entries_complete) / 1000 / 1000\n mean_time_d = (time_d_complete / entries_complete) / 1000 / 1000\n mean_time_c = (time_c_complete / entries_complete) / 1000 / 1000\n\n print('mean_time_l (in s): ' + str(mean_time_l))\n print('mean_time_d (in s): ' + str(mean_time_d))\n print('mean_time_c (in s): ' + str(mean_time_c))\n\n # Determination\n precision_ds = np.zeros((len(cps),len(lps)))\n for i, cp in enumerate(cps):\n for j, lp in enumerate(lps):\n for d in results:\n if d['cp'] == cp and d['lp'] == lp:\n accuracy_ls[i, j] = d['accuracy_l'] * 100\n precision_ls[i, j] = d['precision_l'] * 100\n recall_ls[i, j] = d['recall_l'] * 100\n fone_score_ls[i, j] = d['fone_score_l'] * 100\n precision_ds[i, j] = d['precision_d'] * 100\n\n accuracy_ll[i] = d['accuracy_l'] * 100\n precision_ll[i] = d['precision_l'] * 100\n recall_ll[i] = d['recall_l'] * 100\n fone_score_ll[i] = d['fone_score_l'] * 100\n break\n\n # save_hm(accuracy_ls, 'accuracy_ls', cps, lps, out_path)\n # save_hm(precision_ls, 'precision_ls', cps, lps, out_path)\n # save_hm(recall_ls, 'recall_ls', cps, lps, out_path)\n # save_hm(fone_score_ls, 'fone_score_ls', cps, lps, out_path)\n\n a_plot, = create_dots(accuracy_ll, 'accuracy_ls', cps, 'r', out_path)\n p_plot, = create_dots(precision_ll, 'precision_ls', cps, 'b', out_path)\n r_plot, = create_dots(recall_ll, 'recall_ls', cps, 'g', out_path)\n f_plot, = create_dots(fone_score_ll, 'fone_score_ls', cps, 'y', out_path)\n\n plt.legend([a_plot, p_plot, r_plot, f_plot], ['Accuracy', 'Precision', 'Recall', 'F1 Score'])\n\n save_path = str(Path(out_path).joinpath('determination')) + '.pdf'\n Path(save_path).parent.mkdir(parents=True, exist_ok=True)\n plt.savefig(save_path, bbox_inches='tight')\n plt.clf()\n\n save_hm(precision_ds, 'precision_ds', cps, lps, out_path)\n\n\ndef create_dots(ys, label, xs, color, out_path):\n\n # plt.axis(ys)\n\n plt.xlabel('cp')\n plt.ylabel('Result')\n return plt.plot(xs, ys, color + 'o')\n\n\ndef show_hm(values, label, cps, lps):\n fig, ax = plt.subplots()\n\n im, cbar = heatmap(values, cps, lps, ax=ax,\n cmap=\"YlGn\", cbarlabel=(label + ' in %'))\n texts = annotate_heatmap(im, valfmt=\"{x:.2f}%\")\n\n fig.tight_layout()\n plt.show()\n # plt.clf()\n\ndef save_hm(values, label, cps, lps, out_path):\n fig, ax = plt.subplots()\n plt.xlabel(\"lp\")\n plt.ylabel(\"cp\")\n\n im, cbar = heatmap(values, cps, lps, ax=ax,\n cmap=\"YlGn\", cbarlabel=(label + ' in %'))\n texts = annotate_heatmap(im, valfmt=\"{x:.2f}%\")\n\n fig.tight_layout()\n\n save_path = str(Path(out_path).joinpath(label)) + '.pdf'\n Path(save_path).parent.mkdir(parents=True, exist_ok=True)\n plt.savefig(save_path, bbox_inches='tight')\n\ndef heatmap(data, row_labels, col_labels, ax=None,\n cbar_kw={}, cbarlabel=\"\", **kwargs):\n \"\"\"\n Create a heatmap from a numpy array and two lists of labels.\n\n Parameters\n ----------\n data\n A 2D numpy array of shape (N, M).\n row_labels\n A list or array of length N with the labels for the rows.\n col_labels\n A list or array of length M with the labels for the columns.\n ax\n A `matplotlib.axes.Axes` instance to which the heatmap is plotted. If\n not provided, use current axes or create a new one. Optional.\n cbar_kw\n A dictionary with arguments to `matplotlib.Figure.colorbar`. Optional.\n cbarlabel\n The label for the colorbar. Optional.\n **kwargs\n All other arguments are forwarded to `imshow`.\n \"\"\"\n\n if not ax:\n ax = plt.gca()\n\n # Plot the heatmap\n im = ax.imshow(data, **kwargs)\n\n # Create colorbar\n cbar = ax.figure.colorbar(im, ax=ax, **cbar_kw)\n cbar.ax.set_ylabel(cbarlabel, rotation=-90, va=\"bottom\")\n\n # We want to show all ticks...\n ax.set_xticks(np.arange(data.shape[1]))\n ax.set_yticks(np.arange(data.shape[0]))\n # ... and label them with the respective list entries.\n ax.set_xticklabels(col_labels)\n ax.set_yticklabels(row_labels)\n\n # Let the horizontal axes labeling appear on bottom.\n ax.tick_params(top=False, bottom=True,\n labeltop=False, labelbottom=True)\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=-30, ha=\"right\",\n rotation_mode=\"anchor\")\n\n # Turn spines off and create white grid.\n for edge, spine in ax.spines.items():\n spine.set_visible(False)\n\n ax.set_xticks(np.arange(data.shape[1]+1)-.5, minor=True)\n ax.set_yticks(np.arange(data.shape[0]+1)-.5, minor=True)\n ax.grid(which=\"minor\", color=\"w\", linestyle='-', linewidth=3)\n ax.tick_params(which=\"minor\", bottom=False, left=False)\n\n return im, cbar\n\ndef annotate_heatmap(im, data=None, valfmt=\"{x:.2f}\",\n textcolors=[\"black\", \"white\"],\n threshold=None, **textkw):\n \"\"\"\n A function to annotate a heatmap.\n\n Parameters\n ----------\n im\n The AxesImage to be labeled.\n data\n Data used to annotate. If None, the image's data is used. Optional.\n valfmt\n The format of the annotations inside the heatmap. This should either\n use the string format method, e.g. \"$ {x:.2f}\", or be a\n `matplotlib.ticker.Formatter`. Optional.\n textcolors\n A list or array of two color specifications. The first is used for\n values below a threshold, the second for those above. Optional.\n threshold\n Value in data units according to which the colors from textcolors are\n applied. If None (the default) uses the middle of the colormap as\n separation. Optional.\n **kwargs\n All other arguments are forwarded to each call to `text` used to create\n the text labels.\n \"\"\"\n\n if not isinstance(data, (list, np.ndarray)):\n data = im.get_array()\n\n # Normalize the threshold to the images color range.\n if threshold is not None:\n threshold = im.norm(threshold)\n else:\n threshold = im.norm(data.max())/2.\n\n # Set default alignment to center, but allow it to be\n # overwritten by textkw.\n kw = dict(horizontalalignment=\"center\",\n verticalalignment=\"center\")\n kw.update(textkw)\n\n # Get the formatter in case a string is supplied\n if isinstance(valfmt, str):\n valfmt = matplotlib.ticker.StrMethodFormatter(valfmt)\n\n # Loop over the data and create a `Text` for each \"pixel\".\n # Change the text's color depending on the data.\n texts = []\n for i in range(data.shape[0]):\n for j in range(data.shape[1]):\n kw.update(color=textcolors[int(im.norm(data[i, j]) > threshold)])\n text = im.axes.text(j, i, valfmt(data[i, j], None), **kw)\n texts.append(text)\n\n return texts\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "matplotlib.pyplot.savefig", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.plot", "matplotlib.pyplot.legend", "matplotlib.pyplot.subplots", "matplotlib.pyplot.show", "matplotlib.pyplot.barh", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.tight_layout", "numpy.arange", "matplotlib.pyplot.clf", "matplotlib.pyplot.gca", "matplotlib.ticker.StrMethodFormatter" ] ]
itruonghai/mmaction2
[ "ea94bfd9d2e43289a123276d3ec11d0deb887357" ]
[ "mmaction/models/recognizers/base.py" ]
[ "from abc import ABCMeta, abstractmethod\nfrom collections import OrderedDict\n\nimport torch\nimport torch.distributed as dist\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.runner import auto_fp16\n\nfrom .. import builder\n\n\nclass BaseRecognizer(nn.Module, metaclass=ABCMeta):\n \"\"\"Base class for recognizers.\n\n All recognizers should subclass it.\n All subclass should overwrite:\n\n - Methods:``forward_train``, supporting to forward when training.\n - Methods:``forward_test``, supporting to forward when testing.\n\n Args:\n backbone (dict): Backbone modules to extract feature.\n cls_head (dict): Classification head to process feature.\n train_cfg (dict): Config for training. Default: None.\n test_cfg (dict): Config for testing. Default: None.\n \"\"\"\n\n def __init__(self,\n backbone,\n cls_head,\n neck=None,\n train_cfg=None,\n test_cfg=None):\n super().__init__()\n self.backbone = builder.build_backbone(backbone)\n if neck is not None:\n self.neck = builder.build_neck(neck)\n self.cls_head = builder.build_head(cls_head)\n\n self.train_cfg = train_cfg\n self.test_cfg = test_cfg\n\n # aux_info is the list of tensor names beyond 'imgs' and 'label' which\n # will be used in train_step and val_step, data_batch should contain\n # these tensors\n self.aux_info = []\n if train_cfg is not None and 'aux_info' in train_cfg:\n self.aux_info = train_cfg['aux_info']\n\n self.init_weights()\n\n self.fp16_enabled = False\n\n def init_weights(self):\n \"\"\"Initialize the model network weights.\"\"\"\n self.backbone.init_weights()\n self.cls_head.init_weights()\n if hasattr(self, 'neck'):\n self.neck.init_weights()\n\n @auto_fp16()\n def extract_feat(self, imgs):\n \"\"\"Extract features through a backbone.\n\n Args:\n imgs (torch.Tensor): The input images.\n\n Returns:\n torch.tensor: The extracted features.\n \"\"\"\n x = self.backbone(imgs)\n return x\n\n def average_clip(self, cls_score, num_segs=1):\n \"\"\"Averaging class score over multiple clips.\n\n Using different averaging types ('score' or 'prob' or None,\n which defined in test_cfg) to computed the final averaged\n class score.\n\n Args:\n cls_score (torch.Tensor): Class score to be averaged.\n\n return:\n torch.Tensor: Averaged class score.\n \"\"\"\n if 'average_clips' not in self.test_cfg.keys():\n raise KeyError('\"average_clips\" must defined in test_cfg\\'s keys')\n\n average_clips = self.test_cfg['average_clips']\n if average_clips not in ['score', 'prob', None]:\n raise ValueError(f'{average_clips} is not supported. '\n f'Currently supported ones are '\n f'[\"score\", \"prob\", None]')\n\n if average_clips is None:\n return cls_score\n\n batch_size = cls_score.shape[0]\n cls_score = cls_score.view(batch_size // num_segs, num_segs, -1)\n\n if average_clips == 'prob':\n cls_score = F.softmax(cls_score, dim=2).mean(dim=1)\n elif average_clips == 'score':\n cls_score = cls_score.mean(dim=1)\n\n return cls_score\n\n @abstractmethod\n def forward_train(self, imgs, labels, **kwargs):\n \"\"\"Defines the computation performed at every call when training.\"\"\"\n pass\n\n @abstractmethod\n def forward_test(self, imgs):\n \"\"\"Defines the computation performed at every call when evaluation and\n testing.\"\"\"\n pass\n\n @staticmethod\n def _parse_losses(losses):\n \"\"\"Parse the raw outputs (losses) of the network.\n\n Args:\n losses (dict): Raw output of the network, which usually contain\n losses and other necessary information.\n\n Returns:\n tuple[Tensor, dict]: (loss, log_vars), loss is the loss tensor\n which may be a weighted sum of all losses, log_vars contains\n all the variables to be sent to the logger.\n \"\"\"\n log_vars = OrderedDict()\n for loss_name, loss_value in losses.items():\n if isinstance(loss_value, torch.Tensor):\n log_vars[loss_name] = loss_value.mean()\n elif isinstance(loss_value, list):\n log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value)\n else:\n raise TypeError(\n f'{loss_name} is not a tensor or list of tensors')\n\n loss = sum(_value for _key, _value in log_vars.items()\n if 'loss' in _key)\n\n log_vars['loss'] = loss\n for loss_name, loss_value in log_vars.items():\n # reduce loss when distributed training\n if dist.is_available() and dist.is_initialized():\n loss_value = loss_value.data.clone()\n dist.all_reduce(loss_value.div_(dist.get_world_size()))\n log_vars[loss_name] = loss_value.item()\n\n return loss, log_vars\n\n def forward(self, imgs, label=None, return_loss=True, **kwargs):\n \"\"\"Define the computation performed at every call.\"\"\"\n if return_loss:\n if label is None:\n raise ValueError('Label should not be None.')\n return self.forward_train(imgs, label, **kwargs)\n else:\n return self.forward_test(imgs, **kwargs)\n\n def train_step(self, data_batch, optimizer, **kwargs):\n \"\"\"The iteration step during training.\n\n This method defines an iteration step during training, except for the\n back propagation and optimizer updating, which are done in an optimizer\n hook. Note that in some complicated cases or models, the whole process\n including back propagation and optimizer updating is also defined in\n this method, such as GAN.\n\n Args:\n data_batch (dict): The output of dataloader.\n optimizer (:obj:`torch.optim.Optimizer` | dict): The optimizer of\n runner is passed to ``train_step()``. This argument is unused\n and reserved.\n\n Returns:\n dict: It should contain at least 3 keys: ``loss``, ``log_vars``,\n ``num_samples``.\n ``loss`` is a tensor for back propagation, which can be a\n weighted sum of multiple losses.\n ``log_vars`` contains all the variables to be sent to the\n logger.\n ``num_samples`` indicates the batch size (when the model is\n DDP, it means the batch size on each GPU), which is used for\n averaging the logs.\n \"\"\"\n imgs = data_batch['imgs']\n label = data_batch['label']\n\n aux_info = {}\n for item in self.aux_info:\n assert item in data_batch\n aux_info[item] = data_batch[item]\n\n losses = self(imgs, label, return_loss=True, **aux_info)\n\n loss, log_vars = self._parse_losses(losses)\n\n outputs = dict(\n loss=loss,\n log_vars=log_vars,\n num_samples=len(next(iter(data_batch.values()))))\n\n return outputs\n\n def val_step(self, data_batch, optimizer, **kwargs):\n \"\"\"The iteration step during validation.\n\n This method shares the same signature as :func:`train_step`, but used\n during val epochs. Note that the evaluation after training epochs is\n not implemented with this method, but an evaluation hook.\n \"\"\"\n imgs = data_batch['imgs']\n label = data_batch['label']\n\n aux_info = {}\n for item in self.aux_info:\n aux_info[item] = data_batch[item]\n\n losses = self(imgs, label, return_loss=True, **aux_info)\n\n loss, log_vars = self._parse_losses(losses)\n\n outputs = dict(\n loss=loss,\n log_vars=log_vars,\n num_samples=len(next(iter(data_batch.values()))))\n\n return outputs\n" ]
[ [ "torch.distributed.is_available", "torch.distributed.is_initialized", "torch.distributed.get_world_size", "torch.nn.functional.softmax" ] ]
economicnetwork/archon
[ "2090d0568f198e08c0c6f0ae535ab06630e1db9b" ]
[ "examples/bitmex/pandas.py" ]
[ "from numpy import array\nimport archon.broker as broker\nimport archon.exchange.exchanges as exc\nimport archon.exchange.bitmex.bitmex as mex\nimport archon.exchange.exchanges as exc\nimport archon.exchange.bitmex.bitmex as mex\nimport datetime\nfrom archon.brokerservice.brokerservice import Brokerservice\nfrom util import *\n\nfrom archon.util import *\n\nimport pandas as pd\nimport numpy\nimport matplotlib.pyplot as plt\nfrom arctic import Arctic\n\nimport argparse\nimport json\nimport csv\nimport sys\nimport time\n\n\nfrom arctic import Arctic\nimport quandl\n\nbroker = Brokerservice()\n\ndef setup_broker():\n user_id = parse_toml(\"conf.toml\")[\"user_id\"]\n broker.set_apikeys_fromfile(user_id)\n broker.activate_session(user_id)\n broker.set_client(exc.BITMEX) \n return broker\n\ncandles = client.trades_candle(\"XBTUSD\", mex.candle_1d)\ncandles.reverse()\ncloses = list()\nCOL_CLOSE = 'close'\nCOL_VOLUME = 'volume'\n\ncloses = [float(z[COL_CLOSE]) for z in candles]\nvolumes = [float(z[COL_VOLUME]) for z in candles]\ndates = [z['timestamp'] for z in candles]\n\nraw_data = {'close': closes, 'volume': volumes}\n\ndf = pd.DataFrame(raw_data, index=dates, columns = ['close', 'volume'])" ]
[ [ "pandas.DataFrame" ] ]
tsetimmy/kusanagi
[ "868b301b6064af7d21f0f716aa4390e402f2e2ec", "868b301b6064af7d21f0f716aa4390e402f2e2ec" ]
[ "kusanagi/utils/distributions.py", "kusanagi/base/ExperienceDataset.py" ]
[ "import numpy as np\n\n\nclass Distribution(object):\n '''\n Base class for distribution. Useful for estimating and sampling\n initial state distributions\n '''\n def fit(data):\n raise NotImplementedError\n\n def sample(self, n_samples=1):\n raise NotImplementedError\n\n @property\n def dim(self):\n return self.__dim\n\n @dim.setter\n def dim(self, dim):\n self.__dim = dim\n\n\nclass Delta(Distribution):\n def __init__(self, a):\n self.a = a\n\n def sample(self, n_samples=1):\n return np.tile(self.a, (n_samples, 1))\n\n\nclass Gaussian(Distribution):\n def __init__(self, mean, cov):\n self.mean = np.array(mean)\n self.cov = np.array(cov)\n self.dim = self.mean.size\n\n @property\n def cov(self):\n return self.__cov\n\n @cov.setter\n def cov(self, cov):\n self.__cov = cov\n if cov is not None:\n assert cov.shape[0] == cov.shape[1]\n self.cov_chol = np.linalg.cholesky(cov)\n\n def sample(self, n_samples=1):\n return self.mean + np.random.randn(\n n_samples, self.mean.size).dot(self.cov_chol)\n\n def __call__(self, mean=None, cov=None, n_samples=1):\n if mean is not None:\n self.mean = mean\n if cov is not None:\n self.cov = cov\n return self.sample(n_samples)\n", "import theano\nimport numpy as np\nfrom kusanagi import utils\nfrom kusanagi.base.Loadable import Loadable\n\n\nclass ExperienceDataset(Loadable):\n ''' Class used to store data from runs with a learning agent'''\n def __init__(self, name='Experience', filename_prefix=None, filename=None):\n self.name = name\n self.time_stamps = []\n self.states = []\n self.actions = []\n self.costs = []\n self.info = []\n self.policy_parameters = []\n self.curr_episode = -1\n self.state_changed = True\n if filename is not None:\n self.filename = filename\n else:\n self.filename = (self.name+'_dataset'\n if filename_prefix is None\n else filename_prefix+'_dataset')\n utils.print_with_stamp(\n 'Initialising new experience dataset', self.name)\n\n Loadable.__init__(self, name=name, filename=self.filename)\n\n # if a filename was passed, try loading it\n if filename is not None:\n self.load()\n\n self.register_types([list])\n self.register(['curr_episode'])\n\n def load(self, output_folder=None, output_filename=None):\n ''' Loads the state from file, and initializes additional variables'''\n # load state\n ret = super(ExperienceDataset, self).load(\n output_folder, output_filename)\n\n # if the policy parameters were saved as shared variables\n for i in range(len(self.policy_parameters)):\n pi = self.policy_parameters[i]\n for j in range(len(pi)):\n pij = self.policy_parameters[i][j]\n if isinstance(pij, theano.tensor.sharedvar.SharedVariable):\n self.policy_parameters[i][j] = pij.get_value()\n return ret\n\n def add_sample(self, x_t=None, u_t=None, c_t=None, info=None, t=None):\n '''\n Adds new set of observations to the current episode\n '''\n curr_episode = self.curr_episode\n if curr_episode < 0:\n self.new_episode()\n self.states[curr_episode].append(x_t)\n self.actions[curr_episode].append(u_t)\n self.costs[curr_episode].append(c_t)\n self.info[curr_episode].append(info)\n self.time_stamps[curr_episode].append(t)\n self.state_changed = True\n\n def new_episode(self, policy_params=None):\n '''\n Adds new episode to the experience dataset\n '''\n self.time_stamps.append([])\n self.states.append([])\n self.actions.append([])\n self.costs.append([])\n self.info.append([])\n if policy_params:\n self.policy_parameters.append(policy_params)\n else:\n self.policy_parameters.append([])\n\n self.curr_episode += 1\n self.state_changed = True\n\n def append_episode(self, states, actions, costs,\n infos=None, policy_params=None, ts=None):\n if policy_params is not None:\n self.policy_parameters.append(policy_params)\n if infos is not None:\n self.info.append(infos)\n if ts is not None:\n self.time_stamps.append(ts)\n self.states.append(states)\n self.actions.append(actions)\n self.costs.append(costs)\n\n def n_samples(self):\n ''' Returns the total number of samples in this dataset '''\n return sum([len(s) for s in self.states])\n\n def n_episodes(self):\n ''' Returns the total number of episodes in this dataset '''\n return len(self.states)\n\n def reset(self):\n ''' Empties the internal data structures'''\n fmt = 'Resetting experience dataset'\n fmt += '(WARNING: data from %s will be overwritten)'\n utils.print_with_stamp(fmt % (self.filename), self.name)\n self.time_stamps = []\n self.states = []\n self.actions = []\n self.costs = []\n self.info = []\n self.policy_parameters = []\n self.curr_episode = -1\n # Let's give people a last chance of recovering their data. Also, we\n # don't want to save an empty experience dataset\n self.state_changed = False\n\n def truncate(self, episode):\n ''' Resets the experience to start from the given episode number'''\n if episode <= self.curr_episode and episode > 0:\n # Let's give people a last chance of recovering their data. Also,\n # we don't want to save an empty experience dataset\n fmt = 'Resetting experience dataset to episode %d'\n fmt += ' (WARNING: data from %s will be overwritten)'\n utils.print_with_stamp(fmt % (episode, self.filename), self.name)\n self.curr_episode = episode\n self.time_stamps = self.time_stamps[:episode]\n self.states = self.states[:episode]\n self.actions = self.actions[:episode]\n self.costs = self.costs[:episode]\n self.info = self.info[:episode]\n self.policy_parameters = self.policy_parameters[:episode]\n self.state_changed = True\n\n def get_dynmodel_dataset(self, deltas=True, filter_episodes=None,\n angle_dims=None, x_steps=1,\n u_steps=1, output_steps=1, return_costs=False,\n stack=False):\n '''\n Returns a dataset where the inputs are state_actions and the outputs\n are next steps.\n Parameters:\n -----------\n deltas: wheter to return changes in state\n (x_t - x_{t-1}, x_{t-1} - x_{t-2}, ...)\n or future states\n (x_t, x_{t-1}, x_{t-2}, ...), in the output\n filter_episodes: list containing episode indices to extract from\n which to extract data.\n if list empty or undefined ( equal to None ),\n extracts data from all episodes\n angle_dims: indices of input state dimensions to linearize, by\n converting to complex\n representation \\theta => (sin(\\theta), cos(\\theta))\n x_steps: how many steps in the past to concatenate as input\n u_steps: how many steps in the past to concatenate as input\n output_steps: how many steps in the future to concatenate as output\n return_costs: whether to append the cost feedback to the output\n stack: whether to stack or concatenate the multi step data\n Returns:\n --------\n X: if stack is False X is a numpy array of shape\n [n, x_steps*D + u_steps*U], where n is the number of data samples,\n D the input state dimensions.abs if stack is True, the shape of X\n is [n, x_steps, D + U]\n '''\n filter_episodes = filter_episodes or []\n angle_dims = angle_dims or []\n inputs, targets = [], []\n join = np.stack if stack else np.concatenate\n if stack:\n # ignore the u_steps parameter\n u_steps = x_steps\n # output steps\n output_steps = x_steps + output_steps - 1\n\n if not isinstance(filter_episodes, list):\n filter_episodes = [filter_episodes]\n if len(filter_episodes) < 1:\n # use all data\n filter_episodes = list(range(self.n_episodes()))\n for epi in filter_episodes:\n if len(self.states[epi]) == 0:\n continue\n # get state action pairs for current episode\n states, actions = np.array(\n self.states[epi]), np.array(self.actions[epi])\n # convert input angle dimensions to complex representation\n states_ = utils.gTrig_np(np.array(states), angle_dims)\n # pad with initial state for the first x_steps timesteps\n states_ = np.concatenate([states_[[0]*(x_steps-1)], states_])\n # get input states up to x_steps in the past.\n\n states_ = join(\n [states_[i:i-x_steps-(output_steps-1), :]\n for i in range(x_steps)],\n axis=1)\n # same for actions (u_steps in the past, pad with zeros for the\n # first u_steps)\n actions_ = np.concatenate(\n [np.zeros((u_steps-1, actions.shape[1])), actions])\n actions_ = join(\n [actions_[i:i-u_steps-(output_steps-1), :]\n for i in range(u_steps)],\n axis=1)\n\n # create input vector\n inp = np.concatenate([states_, actions_], axis=-1)\n\n # get output states up to output_steps in the future\n H = states.shape[0]\n ostates = join(\n [states[i:H-(output_steps-i-1), :]\n for i in range(output_steps)],\n axis=1)\n\n # create output vector\n tgt = (ostates[1:, :] - ostates[:-1, :]\n if deltas else ostates[1:, :])\n\n # append costs if requested\n if return_costs:\n costs = np.array(self.costs[epi])\n ocosts = join(\n [costs[i:H-(output_steps-i-1), :]\n for i in range(output_steps)],\n axis=1)\n\n tgt = np.concatenate([tgt, ocosts[:-1, :]], axis=-1)\n\n inputs.append(inp)\n targets.append(tgt)\n\n ret = np.concatenate(inputs), np.concatenate(targets)\n return ret\n\n def sample_states(self, n_samples=1, timestep=0):\n # collect initial states\n x0 = [ep[timestep] for ep in self.states]\n # sample indices\n idx = np.random.choice(range(len(x0)), n_samples)\n return np.array(x0)[idx]\n" ]
[ [ "numpy.array", "numpy.random.randn", "numpy.tile", "numpy.linalg.cholesky" ], [ "numpy.concatenate", "numpy.array", "numpy.zeros" ] ]
zhh2005757/slambook2_in_Docker
[ "f0e71327d196cdad3b3c10d96eacdf95240d528b" ]
[ "slambook2/3rdparty/Pangolin/pyexamples/SimpleVideo.py" ]
[ "import sys\r\nfrom pathlib import Path\r\nimport os\r\nfrom matplotlib import pyplot as plt\r\nimport argparse\r\nimport numpy as np\r\nimport time\r\n\r\n\r\n# add pangolin to PYTHONPATH\r\nhomeDir = str(Path.home())\r\nsys.path.append(os.path.join(homeDir, 'ws/Pangolin/build/src'))\r\n\r\n# import pypangolin\r\nimport pypangolin as pango\r\n\r\ndef main(flags):\r\n\tvid_uri = flags.pango\r\n\tvout_uri = flags.pangoOut\r\n\tstream = flags.stream\r\n\tif vout_uri is None:\r\n\t\tvout_uri = \"pango://demo.pango\"\r\n\r\n\tvid = pango.VideoInput(vid_uri)\r\n\tvout = pango.VideoOutput(vout_uri)\r\n\r\n # print metadata\r\n\tprint(\"Opened video uri: '{}' with {} x {} dimensions\".format(\r\n\t\tvid_uri,vid.Width(),vid.Height()))\r\n\tfmt = vid.PixFormat()\r\n\tprint(\"format: channels: {}, channel_bits: {}, planar: {}\".format(\r\n\t\tfmt.channels, fmt.bpp, fmt.planar))\r\n\r\n # initialize display\r\n\tallStreams = vid.Grab()\t\r\n\tnumstreams = len(allStreams)\r\n\r\n\tstreams = [stream] if stream else list(range(numstreams))\r\n\tassert streams[-1] < numstreams, 'specified stream {} is out of bnd'.format(stream)\r\n\r\n\tfig, axes = plt.subplots(len(streams), 1, figsize=(12, 12*len(streams)), squeeze=False)\r\n\tfig.show()\r\n\r\n # show each frame\r\n\tframeCounter = 0\r\n\twhile (allStreams):\r\n\t\t# if frameCounter > 20:\r\n\t\t# \tbreak\r\n\t\tvout.WriteStreams(allStreams);\r\n\t\tfor i, s in enumerate(streams):\r\n\t\t\tarr = allStreams[s]\r\n\t\t\t# print(arr.shape)\r\n\t\t\taxes[i,0].cla()\r\n\t\t\tif arr.shape[-1] == 1:\r\n\t\t\t\taxes[i,0].imshow(np.squeeze(arr), cmap='Greys')\r\n\t\t\telse:\r\n\t\t\t\taxes[i,0].imshow(arr)\r\n\r\n\t\t# grab the next frame\r\n\t\tallStreams = vid.Grab()\r\n\t\tframeCounter += 1\r\n\r\n\t\t# update figures\r\n\t\tfig.canvas.draw()\r\n\r\n\t\t# printing\r\n\t\tif frameCounter % 10 == 0:\r\n\t\t\tprint('frame: {}'.format(frameCounter))\r\n\r\nif __name__ == \"__main__\":\r\n # input flags\r\n parser = argparse.ArgumentParser(\r\n 'Read a .pango file frame by frame.')\r\n parser.add_argument(\r\n '--pango', type=str,\r\n help='path to the input pango file.')\r\n parser.add_argument(\r\n '--pangoOut', type=str, default=None,\r\n help='path to the output pango file.')\r\n parser.add_argument(\r\n '--stream', type=int, default=None,\r\n help='stream to open.')\r\n FLAGS = parser.parse_args()\r\n\r\n # main function\r\n main(FLAGS)\r\n" ]
[ [ "numpy.squeeze" ] ]
beer-asr/gsm
[ "5722d9271b57f119ed5477895c8fc94c92e46b57" ]
[ "plotting.py" ]
[ "\nimport numpy as np\n\ndef plot_shaded_area(fig, xy1, xy2, **kwargs):\n upper_band = np.append(xy1[:,0], xy2[:, 0][::-1])\n lower_band = np.append(xy1[:,1], xy2[:, 1][::-1])\n fig.patch(upper_band, lower_band, **kwargs)\n \ndef create_upper_semicircle(radius, npoints):\n angles = np.linspace(0, np.pi, npoints)\n x, y = radius * np.cos(angles), radius * np.sin(angles)\n return np.c_[x, y]\n\ndef create_lower_semicircle(radius, npoints):\n xy = create_upper_semicircle(radius, npoints)\n return np.c_[xy[:, 0], -xy[:, 1]]\n\ndef plot_circle(fig, radius, npoints, tensor_metric=np.eye(2), transform=None, \n **kwargs):\n xy1 = create_upper_semicircle(radius, npoints // 2) @ tensor_metric\n xy2 = create_lower_semicircle(radius, npoints // 2) @ tensor_metric\n if transform is not None:\n xy1 = transform(xy1)\n xy2 = transform(xy2)\n plot_shaded_area(fig, xy1, xy2, **kwargs)\n \ndef plot_covariance(fig, covariance, n_std_dev=2, npoints=100, transform=None, \n **kwargs):\n tensor_metric = np.linalg.cholesky(covariance)\n for std_dev in range(n_std_dev, 0, -1):\n plot_circle(fig, std_dev, npoints, tensor_metric=tensor_metric.T, \n transform=transform, **kwargs)\n\ndef plot_normal(fig, mean, cov, n_std_dev=2, npoints=100, transform=None, **kwargs):\n 'Plot a Normal density'\n \n if transform is None:\n transform = lambda x: x\n def new_transform(xy):\n return mean + transform(xy)\n plot_covariance(fig, cov, n_std_dev, npoints, transform=new_transform,\n **kwargs)\n\n\ndef plot_gmm(fig, gmm, n_std_dev=2, npoints=100, alpha=1., colors=None, **kwargs):\n 'Plot a Normal density'\n if colors is None:\n colors = ['blue'] * len(gmm.modelset)\n for weight, comp, color in zip(gmm.weights, gmm.modelset, colors):\n kwargs['color'] = color\n plot_normal(fig, comp.mean.numpy(), comp.cov.numpy(),\n n_std_dev, npoints, alpha=alpha * weight.numpy(), **kwargs)\n\ndef plot_hmm(fig, hmm, n_std_dev=2, npoints=100, **kwargs):\n 'Plot a Normal density'\n for comp in hmm.modelset:\n plot_normal(fig, comp.mean.numpy(), comp.cov.numpy(),\n n_std_dev, npoints, **kwargs)\n " ]
[ [ "numpy.append", "numpy.sin", "numpy.eye", "numpy.cos", "numpy.linalg.cholesky", "numpy.linspace" ] ]
eltoto1219/multimodal-pretraining
[ "c5ca38c4f22901b6a94280b973f4a49dce4a828f" ]
[ "src/tasks/gqa_model.py" ]
[ "# coding=utf-8\n# Copyleft 2019 project LXRT.\n\nimport torch.nn as nn\n\nfrom param import args\nfrom lxrt.entry import LXRTEncoder\nfrom lxrt.modeling import BertLayerNorm, GeLU\n\n# Max length including <bos> and <eos>\nMAX_GQA_LENGTH = 20\n\n\nclass GQAModel(nn.Module):\n def __init__(self, num_answers):\n super().__init__()\n self.lxrt_encoder = LXRTEncoder(\n args,\n max_seq_length=MAX_GQA_LENGTH\n )\n hid_dim = self.lxrt_encoder.dim\n self.logit_fc = nn.Sequential(\n nn.Linear(hid_dim, hid_dim * 2),\n GeLU(),\n BertLayerNorm(hid_dim * 2, eps=1e-12),\n nn.Linear(hid_dim * 2, num_answers)\n )\n self.logit_fc.apply(self.lxrt_encoder.model.init_bert_weights)\n\n def forward(self, feat, pos, sent):\n \"\"\"\n b -- batch_size, o -- object_number, f -- visual_feature_size\n\n :param feat: (b, o, f)\n :param pos: (b, o, 4)\n :param sent: (b,) Type -- list of string\n :param leng: (b,) Type -- int numpy array\n :return: (b, num_answer) The logit of each answers.\n \"\"\"\n x = self.lxrt_encoder(sent, (feat, pos))\n logit = self.logit_fc(x)\n\n return logit\n\n\n" ]
[ [ "torch.nn.Linear" ] ]
nagnath001/reconcile-a-report-using-pandas
[ "f5bd3b62650b9c4976bb0814cd2a2255a09e5d7b" ]
[ "code.py" ]
[ "# --------------\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n# Code starts here\ndf=pd.read_csv(path)\ndf['state']=df['state'].str.lower()\ndf['total']=df['Jan']+df['Feb']+df['Mar']\nsum_row=df[[\"Jan\",\"Feb\",\"Mar\",\"total\"]].sum()\ndf_final=df.append(sum_row,ignore_index=True)\nprint(df.columns)\n\n\n# Code ends here\n\n\n# --------------\nimport requests\n\n# Code starts here\nurl='https://en.wikipedia.org/wiki/List_of_U.S._state_abbreviations '\n\nresponse=requests.get(url)\n\ndf1=pd.read_html(response.content)[0]\ndf1=df1.iloc[11:,:]\ndf1=df1.rename(columns=df1.iloc[0,:]).iloc[1:,:]\ndf1['United States of America']=df1['United States of America'].apply(lambda x:x.replace(\" \",\"\")).astype(object)\n# Code ends here\n\n\n# --------------\n# Load the dataframe\r\nscraped = pd.read_csv(path1)\r\ndf1['United States of America'] = df1['United States of America'].astype(str).apply(lambda x: x.lower())\r\ndf1['US'] = df1['US'].astype(str)\r\n# Mapping\r\nmapping = df1.set_index('United States of America')['US'].to_dict()\r\ndf_final.insert(6, 'abbr', np.nan)\r\ndf_final['abbr'] = df_final['state'].map(mapping)\n\n\n# --------------\n# Code stars here\n# replace missing values \ndf_mississipi = df_final[df_final['state'] == 'mississipi'].replace(np.nan, 'MS')\ndf_tenessee = df_final[df_final['state'] == 'tenessee'].replace(np.nan, 'TN')\n\n# replace the final_df\ndf_final.replace(df_final.iloc[6], df_mississipi, inplace=True)\ndf_final.replace(df_final.iloc[10], df_tenessee, inplace=True)\n\n\n# Code ends here\n\n\n# --------------\n# Code starts here\n# Calculate the total amount\ndf_sub=df_final[[\"abbr\", \"Jan\", \"Feb\", \"Mar\", \"total\"]].groupby(\"abbr\").sum()\n\n# Add the $ symbol\nformatted_df = df_sub.applymap(lambda x: \"${:,.0f}\".format(x))\n\n\n# Code ends here\n\n\n# --------------\n# Code starts here\n# Calculate the sum\nsum_row = df_sub[[\"Jan\", \"Feb\", \"Mar\", \"total\"]].sum()\ndf_sub_sum = pd.DataFrame(data=sum_row).T\n\n#apply $ to the sum \ndf_sub_sum = df_sub_sum.applymap(lambda x: \"${:,.0f}\".format(x))\n\n# append the sum\nfinal_table = formatted_df.append(df_sub_sum)\n# rename the index\nfinal_table = final_table.rename(index={0: \"Total\"})\n\n\n# Code ends here\n\n\n# --------------\n# Code starts here\ndf_sub['total']=df_sub['Jan'] + df_sub['Feb'] + df_sub['Mar']\ndf_sub['total'].plot(kind='pie')\n\n# Code ends here\n\n\n" ]
[ [ "pandas.DataFrame", "pandas.read_csv", "pandas.read_html" ] ]
uro1012/MIALab
[ "49c68613fa2f7b3f349df63f973f1d6f8549ef9b" ]
[ "exercise/exercise_rf.py" ]
[ "\"\"\"A decision forest toy example.\n\nTrains and evaluates a decision forest classifier on a 2-D point cloud.\n\"\"\"\n\nimport argparse\nimport datetime\nimport os\nimport sys\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import ListedColormap\nimport sklearn.ensemble as sk_ensemble\nfrom sklearn.datasets import make_moons\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\n\nsys.path.insert(0, os.path.join(os.path.dirname(sys.argv[0]), '..')) # append the MIALab root directory to Python path\n# fixes the ModuleNotFoundError when executing main.py in the console after code changes (e.g. git pull)\n# somehow pip install does not keep track of packages\n\n\ndef main(savefig: bool, result_dir: str, numtrees: int, treedepth: int):\n \"\"\"Trains a decision forest classifier on a the iris dataset.\"\"\"\n\n # generate result directory\n os.makedirs(result_dir, exist_ok=True)\n\n # load iris data\n\n data = make_moons(n_samples=1500, noise=0.23, random_state=None)\n features, labels = data[0], data[1]\n\n # split into training and testing data\n feat_train, feat_test, labels_train, labels_test = train_test_split(features, labels, test_size=0.5,\n random_state=42)\n\n # initialize the forest\n forest = sk_ensemble.RandomForestClassifier(max_features=feat_train.shape[1],\n n_estimators=args.numtrees,\n max_depth=args.treedepth)\n\n # train the forest\n print('Decision forest training...')\n forest.fit(feat_train, labels_train)\n\n # apply the forest to test data\n print('Decision forest testing...')\n predictions_test = forest.predict(feat_test)\n predictions_train = forest.predict(feat_train)\n\n # let's have a look at the feature importance\n print('Feature importances:')\n print(forest.feature_importances_)\n\n # calculate training and testing accuracies\n train_acc = accuracy_score(labels_train, predictions_train)\n test_acc = accuracy_score(labels_test, predictions_test)\n\n print(\"Training accuracy: {0:.2%}\".format(train_acc))\n print(\"Testing accuracy: {0:.2%}\".format(test_acc))\n\n # plot the result\n h = .02 # step size in the mesh\n # set font for text in figure\n font = {'family': 'sans-serif',\n 'color': 'black',\n 'weight': 'normal',\n 'size': 16,\n }\n\n figure = plt.figure(figsize=(10, 10))\n x_min, x_max = features[:, 0].min() - .5, features[:, 0].max() + .5\n y_min, y_max = features[:, 1].min() - .5, features[:, 1].max() + .5\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h),\n np.arange(y_min, y_max, h))\n\n # just plot the dataset first\n cm = plt.cm.RdBu\n cm_bright = ListedColormap(['#FF0000', '#0000FF'])\n # ax = plt.subplot(len(datasets), len(classifiers) + 1, i)\n\n # Plot the training points\n plt.scatter(feat_train[:, 0], feat_train[:, 1], c=labels_train, cmap=cm_bright,\n edgecolors='k', label=\"Train\")\n # and testing points\n plt.scatter(feat_test[:, 0], feat_test[:, 1], c=labels_test, cmap=cm_bright, alpha=0.6,\n edgecolors='k', marker=\"P\", label=\"Test\")\n plt.xlim(xx.min(), xx.max())\n plt.ylim(yy.min(), yy.max())\n plt.title(\"Random Forest Classification Exercise\")\n plt.xlabel(\"Feature 1\")\n plt.ylabel(\"Feature 2\")\n\n # Plot the decision boundary. For that, we will assign a color to each\n # point in the mesh [x_min, x_max]x[y_min, y_max].\n prob_boundary = forest.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]\n\n # Put the result into a color plot\n prob_boundary = prob_boundary.reshape(xx.shape)\n plt.contourf(xx, yy, prob_boundary, cmap=cm, alpha=.8)\n\n # add model information\n plt.text(x_min + 0.2, y_max - 0.2, \"Number of trees: {0:d}\".format(numtrees), fontdict=font)\n plt.text(x_min + 0.2, y_max - 0.3, \"Max. tree depth: {0:d}\".format(treedepth), fontdict=font)\n\n # add accuracy information to plot\n plt.text(x_max - 2, y_max - 0.2, \"Training accuracy: {0:.2%}\".format(train_acc), fontdict=font)\n plt.text(x_max - 2, y_max - 0.3, \"Testing accuracy: {0:.2%}\".format(test_acc), fontdict=font)\n\n # add legend\n plt.legend(loc='lower left')\n\n plt.show()\n\n # save figure if flag is set\n if savefig:\n t = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')\n figure.savefig(os.path.join(result_dir, 'rfplot_{}.png'.format(t)))\n print('Plot saved as ' + os.path.join(result_dir, 'rfplot_{}.png'.format(t)))\n\n\nif __name__ == \"__main__\":\n \"\"\"The program's entry point.\"\"\"\n\n script_dir = os.path.dirname(sys.argv[0])\n\n parser = argparse.ArgumentParser(description='2-dimensional point classification with decision forests')\n\n parser.add_argument(\n '--savefig',\n type=bool,\n default=True,\n help='Set to True to save plot to result_dir.'\n )\n\n parser.add_argument(\n '--result_dir',\n type=str,\n default=os.path.normpath(os.path.join(script_dir, 'randomforest_plots')),\n help='Directory for results.'\n )\n\n parser.add_argument(\n '--numtrees',\n type=int,\n default=5,\n help='Number of trees in the random forest classifier.'\n )\n\n parser.add_argument(\n '--treedepth',\n type=int,\n default=6,\n help='Maximum depth of the trees in the random forest classifier.'\n )\n\n args = parser.parse_args()\n main(args.savefig, args.result_dir, args.numtrees, args.treedepth)\n" ]
[ [ "matplotlib.pyplot.contourf", "sklearn.ensemble.RandomForestClassifier", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.title", "matplotlib.pyplot.legend", "sklearn.metrics.accuracy_score", "matplotlib.pyplot.figure", "matplotlib.pyplot.show", "numpy.arange", "matplotlib.pyplot.ylabel", "sklearn.model_selection.train_test_split", "matplotlib.pyplot.scatter", "matplotlib.colors.ListedColormap", "sklearn.datasets.make_moons" ] ]
nurshafa/uaspemkom
[ "f370621ba81f21fc31c139f9ab0ac5dbb75c384e" ]
[ "main_streamlit.py" ]
[ "#Nur Shafa Erinda / 12220150 / UAS\r\n\r\n#import data dan modul yang diperlukan\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport json, requests\r\nimport streamlit as st\r\nfrom PIL import Image\r\n\r\nst.set_page_config(layout=\"wide\")\r\nst.set_option('deprecation.showPyplotGlobalUse', False)\r\nst.markdown(\r\n \"\"\"\r\n <style>\r\n .reportview-container {\r\n background: url(\"https://i.ibb.co/SNHBPTr/206101.png\")\r\n }\r\n .sidebar .sidebar-content {\r\n background: url(\"https://dyrdkqpaj50j2.cloudfront.net/media/catalog/product/g/s/gsa-101-white_31.jpg\")\r\n }\r\n </style>\r\n \"\"\",\r\n unsafe_allow_html=True\r\n)\r\n\r\ndf = pd.read_csv(\"https://raw.githubusercontent.com/nurshafa/uaspemkom/main/produksi_minyak_mentah.csv\")\r\nurl = 'https://raw.githubusercontent.com/nurshafa/uaspemkom/main/kode_negara_lengkap.json'\r\nresp = requests.get(url)\r\ndatauas = json.loads(resp.text)\r\n\r\n#membuat list yang relevan\r\nlistorganisasi = list()\r\nlistkodecsv0 = list()\r\nlistkodecsv = list()\r\nlistkodejson = list()\r\nlistkodefix = list()\r\nlisttahunfix = list()\r\nlistnegarafix = list()\r\n\r\nfor i in datauas:\r\n listkodejson.append(i['alpha-3'])\r\n\r\nlistkodecsv0 = df['kode_negara'].tolist()\r\n\r\nfor j in listkodecsv0 :\r\n if j not in listkodecsv :\r\n listkodecsv.append(j)\r\n\r\nfor k in listkodecsv :\r\n if k not in listkodejson :\r\n listorganisasi.append(k) #list organisasi\r\n\r\nfor k in listkodecsv :\r\n if k in listkodejson :\r\n listkodefix.append(k) #list kode\r\n\r\nlistkodefix = list(dict.fromkeys(listkodefix))\r\n\r\n#Menghapus organisasi pada file csv\r\nfor line in listorganisasi :\r\n df = df[df.kode_negara != line]\r\n\r\n#membuat list lainnya yang diperlukan\r\nlisttahunfix = df['tahun'].tolist()\r\nlisttahunfix = list(dict.fromkeys(listtahunfix))\r\n\r\nfor line3 in listkodefix:\r\n dictionarynegarax = next(x for x in datauas if x[\"alpha-3\"] == line3)\r\n listnegarafix.append(dictionarynegarax['name'])\r\n\r\nlistnegarafix = list(dict.fromkeys(listnegarafix))\r\n\r\n#------------------------------------TITLE-------------------------------------#\r\nst.title(\"Statistik Produksi Minyak Mentah\")\r\nst.markdown(\"*Dibuat oleh Nur Shafa Erinda (12220150)*\")\r\nst.subheader(\" \")\r\n\r\n#------------------------------------SIDE BAR------------------------------------#\r\nimage = Image.open('itb.png')\r\nst.sidebar.image(image, width=150)\r\nst.sidebar.title(\"Main Menu\")\r\nst.sidebar.markdown(\"Pilih salah satu menu berikut: \")\r\n\r\nlist_userchoice = ['Grafik jumlah produksi minyak mentah terhadap waktu suatu negara', 'Grafik negara dengan produksi terbesar pada suatu tahun', 'Grafik negara dengan produksi kumulatif terbesar', 'Summary produksi minyak mentah']\r\nuserchoice = st.sidebar.radio('Menu: ',list_userchoice)\r\nif userchoice == list_userchoice[0]:\r\n userchoice = 1\r\nelif userchoice == list_userchoice[1]:\r\n userchoice = 2\r\nelif userchoice == list_userchoice[2]:\r\n userchoice = 3\r\nelif userchoice == list_userchoice[3]:\r\n userchoice = 4\r\n\r\n#---------------------------------MAIN PAGE----------------------------------------#\r\n#SOAL A, B, C, D\r\nif userchoice==1 : #SOAL A\r\n #input nama negara\r\n inputnegara1 = st.selectbox(\"Nama negara\", listnegarafix)\r\n\r\n #cari nama negara tersebut di data\r\n dictionarynegara1 = (next(x for x in datauas if x[\"name\"] == inputnegara1))\r\n codenegara1 = dictionarynegara1['alpha-3']\r\n\r\n #dari kode negara, bikin data frame tersendiri khusus negara itu\r\n kumpulandata1 = df.loc[df[\"kode_negara\"] == codenegara1]\r\n\r\n #buat grafiknya\r\n st.header(f\"Produksi Minyak Mentah {inputnegara1}\")\r\n kumpulandata1.plot(kind=\"line\", x=\"tahun\", y=\"produksi\", title=\"Grafik Produksi Minyak\", xlabel=\"tahun\", ylabel='jumlah produksi', color='indigo', marker='o',markerfacecolor='darkorchid', markersize=9)\r\n a = plt.show()\r\n plota = st.pyplot(a)\r\n\r\nelif userchoice==2 : #SOAL B\r\n st.markdown(\"*Disarankan untuk tidak memilih lebih dari 45 besar supaya grafik masih bisa jelas terlihat*\")\r\n\r\n #input B,T\r\n col1, col2 = st.columns(2)\r\n inputbesar2 = col1.slider('Jumlah negara yang ditampilkan: ', 1, 137)\r\n inputbesar2 = int(inputbesar2)\r\n inputtahun2 = col2.selectbox('Tahun: ', listtahunfix)\r\n\r\n st.header(f\"Produksi Minyak Mentah Terbesar Tahun {inputtahun2}\")\r\n #dari tahun produksi yang diinput, bikin data frame tersendiri khusus tahun itu\r\n kumpulandata2 = df.loc[df[\"tahun\"] == inputtahun2]\r\n kumpulandata22 = (kumpulandata2.sort_values([\"produksi\"], ascending=False).head(inputbesar2))\r\n\r\n listnegarasoalb = list()\r\n namanegarasoalb = list()\r\n listnegarasoalb = kumpulandata22['kode_negara'].tolist()\r\n\r\n for line2 in listnegarasoalb:\r\n dictionarynegarab = next(x for x in datauas if x[\"alpha-3\"] == line2)\r\n namanegarasoalb.append(dictionarynegarab['name'])\r\n\r\n kumpulandata22.insert(1, \"nama_negara\", namanegarasoalb, True)\r\n\r\n #print dataframe yang telah dibuat dan plotnya\r\n st.subheader(\"Data\")\r\n st.dataframe(kumpulandata22)\r\n st.subheader(\"Grafik\")\r\n kumpulandata22.plot(kind=\"bar\", x=\"kode_negara\", y=\"produksi\", title=\"Grafik Produksi Minyak Terbesar Pada Tahun Input User\", xlabel=\"negara\", ylabel='produksi',color='rebeccapurple')\r\n b = plt.show()\r\n plota = st.pyplot(b)\r\n\r\nelif userchoice == 3 : #SOAL C\r\n st.header('Produksi Minyak Mentah Kumulatif Terbesar')\r\n st.markdown(\"*Disarankan untuk tidak memilih lebih dari 45 besar supaya grafik masih bisa jelas terlihat*\")\r\n #input user\r\n inputbesar3 = st.slider('Jumlah negara yang ditampilkan: ', 1, 137)\r\n inputbesar3 = int(inputbesar3)\r\n\r\n #buat dataframe sesuai keperluan\r\n kumpulandata3 = df.groupby(\"kode_negara\")[\"produksi\"].sum().to_frame(name = 'produksi_kumulatif').reset_index()\r\n kumpulandata33 = kumpulandata3.sort_values([\"produksi_kumulatif\"], ascending=False).head(inputbesar3)\r\n\r\n listnegarasoalc = list()\r\n namanegarasoalc = list()\r\n listnegarasoalc = kumpulandata33['kode_negara'].tolist()\r\n\r\n for line2 in listnegarasoalc:\r\n dictionarynegarac = next(x for x in datauas if x[\"alpha-3\"] == line2)\r\n namanegarasoalc.append(dictionarynegarac['name'])\r\n\r\n kumpulandata33.insert(1, \"nama_negara\", namanegarasoalc, True)\r\n\r\n #print dataframe yang sudah dibuat dan plotnya\r\n st.subheader(\"Data\")\r\n st.dataframe(kumpulandata33)\r\n st.subheader(\"Grafik\")\r\n kumpulandata33.plot(kind=\"bar\", x=\"kode_negara\", y=\"produksi_kumulatif\", title=\"Grafik Produksi Minyak Kumulatif Terbesar\", xlabel=\"negara\", ylabel='produksi kumulatif',color='mediumvioletred')\r\n c = plt.show()\r\n plota = st.pyplot(c)\r\n\r\nelif userchoice == 4 : #SOAL D\r\n st.header('Summary Produksi Minyak Mentah')\r\n #input T\r\n inputtahun4 = st.selectbox('Tahun: ', listtahunfix)\r\n\r\n #-----------------------------------tabel 1-----------------------------------\r\n st.subheader(\"**Produksi Terbesar**\")\r\n col1, col2 = st.columns(2)\r\n #~~~~~kumulatif~~~~~\r\n kumpulandatad1 = df.groupby(\"kode_negara\")[\"produksi\"].sum().to_frame(name='produksi_kumulatif').reset_index()\r\n imax1 = kumpulandatad1[\"produksi_kumulatif\"].idxmax()\r\n imax1 = int(imax1)\r\n koded1 = kumpulandatad1.loc[imax1,\"kode_negara\"]\r\n prodd1 = kumpulandatad1.loc[imax1, \"produksi_kumulatif\"]\r\n\r\n col2.markdown(f\"**Kode negara (kumulatif)** : {koded1}\")\r\n col2.markdown(f\"**Jumlah produksi** : {prodd1}\")\r\n\r\n dictionarynegarad1 = (next(x for x in datauas if x[\"alpha-3\"] == kumpulandatad1.loc[imax1,\"kode_negara\"]))\r\n namanegarad1 = dictionarynegarad1['name']\r\n namaregiond1 = dictionarynegarad1['region']\r\n namasubregiond1 = dictionarynegarad1['sub-region']\r\n\r\n col2.markdown(f\"**Nama negara** : {namanegarad1}\")\r\n col2.markdown(f\"**Region** : {namaregiond1}\")\r\n col2.markdown(f\"**Sub-Region** : {namasubregiond1}\")\r\n col2.markdown(\" \")\r\n\r\n #~~~~~tahun T~~~~~\r\n kumpulandatad1t = df.loc[df[\"tahun\"] == inputtahun4]\r\n imax2 = kumpulandatad1t[\"produksi\"].idxmax()\r\n imax2 = int(imax2)\r\n koded2 = kumpulandatad1t.loc[imax2, \"kode_negara\"]\r\n prodd2 = kumpulandatad1t.loc[imax2, \"produksi\"]\r\n\r\n col1.markdown(f\"**Kode negara (tahun {inputtahun4})** : {koded2}\")\r\n col1.markdown(f\"**Jumlah produksi** : {prodd2}\" )\r\n\r\n dictionarynegarad1t = (next(x for x in datauas if x[\"alpha-3\"] == kumpulandatad1t.loc[imax2, \"kode_negara\"]))\r\n namanegarad1t = dictionarynegarad1t['name']\r\n namaregiond1t = dictionarynegarad1t['region']\r\n namasubregiond1t = dictionarynegarad1t['sub-region']\r\n\r\n col1.markdown(f\"**Nama negara** : {namanegarad1t}\")\r\n col1.markdown(f\"**Region** : {namaregiond1t}\")\r\n col1.markdown(f\"**Sub-Region** {namasubregiond1t}\")\r\n col1.markdown(\" \")\r\n\r\n #-----------------------------------tabel 2-----------------------------------\r\n st.subheader(\"**Produksi Terkecil**\")\r\n col1, col2 = st.columns(2)\r\n #~~~~~kumulatif~~~~~\r\n #menghilangkan yang value-nya 0 terlebih dahulu\r\n kumpulandatad2 = kumpulandatad1[kumpulandatad1.produksi_kumulatif != 0]\r\n imin1 = kumpulandatad2[\"produksi_kumulatif\"].idxmin()\r\n imin1 = int(imin1)\r\n koded3 = kumpulandatad2.loc[imin1, \"kode_negara\"]\r\n prodd3 = kumpulandatad2.loc[imin1, \"produksi_kumulatif\"]\r\n\r\n col2.markdown(f\"**Kode negara (kumulatif)** : {koded3} \" )\r\n col2.markdown(f\"**Jumlah produksi** : {prodd3}\")\r\n\r\n dictionarynegarad2 = (next(x for x in datauas if x[\"alpha-3\"] == kumpulandatad2.loc[imin1, \"kode_negara\"]))\r\n namanegarad2 = dictionarynegarad2['name']\r\n namaregiond2 = dictionarynegarad2['region']\r\n namasubregiond2 = dictionarynegarad2['sub-region']\r\n\r\n col2.markdown(f\"**Nama negara** : {namanegarad2}\")\r\n col2.markdown(f\"**Region** : {namaregiond2}\")\r\n col2.markdown(f\"**Sub-Region** : {namasubregiond2}\")\r\n col2.markdown(\" \")\r\n\r\n #~~~~~tahun T~~~~~\r\n #menghilangkan yang value-nya 0 terlebih dahulu\r\n kumpulandatad2t = kumpulandatad1t[kumpulandatad1t.produksi != 0]\r\n imin2 = kumpulandatad2t[\"produksi\"].idxmin()\r\n imin2 = int(imin2)\r\n koded4 = kumpulandatad2t.loc[imin2, \"kode_negara\"]\r\n prodd4 = kumpulandatad2t.loc[imin2, \"produksi\"]\r\n\r\n col1.markdown(f\"**Kode negara ({inputtahun4}) ** : {koded4}\")\r\n col1.markdown(f\"**Jumlah produksi** : {prodd4}\")\r\n\r\n dictionarynegarad2t = (next(x for x in datauas if x[\"alpha-3\"] == kumpulandatad2t.loc[imin2, \"kode_negara\"]))\r\n namanegarad2t = dictionarynegarad2t['name']\r\n namaregiond2t = dictionarynegarad2t['region']\r\n namasubregiond2t = dictionarynegarad2t['sub-region']\r\n\r\n col1.markdown(f\"**Nama negara** {namanegarad2t}\")\r\n col1.markdown(f\"**Region** : {namaregiond2t}\")\r\n col1.markdown(f\"**Sub-Region** : {namasubregiond2t}\")\r\n col1.markdown(\" \")\r\n\r\n #-----------------------------------tabel 3-----------------------------------\r\n st.subheader(\"**Negara yang Tidak Melakukan Produksi Minyak**\")\r\n #~~~~~tahun T~~~~~\r\n # membuat data frame berisi data yang hanya berisikan produksi 0 di tahun T\r\n st.markdown(f\"**Data produksi minyak pada tahun {inputtahun4} berjumlah 0:**\")\r\n kumpulandatad3t = kumpulandatad1t[kumpulandatad1t.produksi == 0]\r\n\r\n # membuat list untuk kolom nama asli negara, region, dan sub-region\r\n listnegaranolt = list()\r\n namanegarad3t = list()\r\n namaregiond3t = list()\r\n namasubregiond3t = list()\r\n listnegaranolt = kumpulandatad3t['kode_negara'].tolist()\r\n\r\n for line2 in listnegaranolt:\r\n dictionarynegarad3t = next(x for x in datauas if x[\"alpha-3\"] == line2)\r\n namanegarad3t.append(dictionarynegarad3t['name'])\r\n namaregiond3t.append(dictionarynegarad3t['region'])\r\n namasubregiond3t.append(dictionarynegarad3t['sub-region'])\r\n\r\n kumpulandatad3t.insert(1, \"nama_negara\", namanegarad3t, True)\r\n kumpulandatad3t.insert(4, \"region\", namaregiond3t, True)\r\n kumpulandatad3t.insert(5, \"sub-region\", namasubregiond3t, True)\r\n kumpulandatad3t.drop(columns=\"produksi\",inplace=True)\r\n st.dataframe(kumpulandatad3t)\r\n\r\n #~~~~~kumulatif~~~~~\r\n #membuat data frame berisi data yang hanya berisikan produksi kumulatif 0\r\n st.markdown(\"**Data produksi minyak kumulatif berjumlah 0**:\")\r\n kumpulandatad3 = kumpulandatad1[kumpulandatad1.produksi_kumulatif == 0]\r\n\r\n #membuat list untuk kolom nama asli negara, region, dan sub-region\r\n listnegaranol = list()\r\n namanegarad3 = list()\r\n namaregiond3 = list()\r\n namasubregiond3 = list()\r\n listnegaranol = kumpulandatad3['kode_negara'].tolist()\r\n\r\n for line in listnegaranol :\r\n dictionarynegarad3 = next(x for x in datauas if x[\"alpha-3\"] == line)\r\n namanegarad3.append(dictionarynegarad3['name'])\r\n namaregiond3.append(dictionarynegarad3['region'])\r\n namasubregiond3.append(dictionarynegarad3['sub-region'])\r\n\r\n kumpulandatad3.insert(1, \"nama_negara\", namanegarad3, True)\r\n kumpulandatad3.insert(3, \"region\", namaregiond3, True)\r\n kumpulandatad3.insert(4, \"sub-region\", namasubregiond3, True)\r\n kumpulandatad3.drop(columns=\"produksi_kumulatif\",inplace=True)\r\n st.dataframe(kumpulandatad3)" ]
[ [ "matplotlib.pyplot.show", "pandas.read_csv" ] ]
hsteffens/image-recognition
[ "5c465f2a4812dd56b822bf20f37123357c4cf054" ]
[ "HOG/training.py" ]
[ "import cv2\nimport pandas as pd\nimport numpy\nimport os\n\n\ndef getdataset(items, path, size, classification):\n for x in range(1, size):\n image = cv2.imread(path + '{:03d}'.format(x) + '.bmp')\n if image is None:\n continue\n dim = (64, 128)\n image = cv2.resize(image, dim, interpolation=cv2.INTER_AREA)\n hog = cv2.HOGDescriptor()\n vector = hog.compute(image)\n vector = numpy.append(vector, [classification])\n items.append(vector)\n return\n\ndef createDataFile(dataset, filename):\n outdir = './dir'\n if not os.path.exists(outdir):\n os.mkdir(outdir)\n\n fullname = os.path.join(outdir, filename)\n\n dataset.to_csv(fullname)\n\n\ndef createdata():\n training = list()\n getdataset(training, '../Treinamento/bart', 81, 0)\n getdataset(training, '../Treinamento/homer', 63, 1)\n\n dataset = pd.DataFrame.from_records(training)\n createDataFile(dataset, 'hog.csv')\n\n dataTest = list()\n getdataset(dataTest, '../Teste/bart', 116, 0)\n getdataset(dataTest, '../Teste/homer', 88, 1)\n\n dataset = pd.DataFrame.from_records(dataTest)\n createDataFile(dataset, 'test_hog.csv')" ]
[ [ "pandas.DataFrame.from_records", "numpy.append" ] ]
conan7882/variational-autoencoder
[ "4960f252784a7dd2fbe203d7dad65938b57ee9c2" ]
[ "experiment/vae_mnist.py" ]
[ "#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n# File: vae_mnist.py\r\n# Author: Qian Ge <[email protected]>\r\n\r\nimport os\r\nimport sys\r\nimport numpy as np\r\nimport tensorflow as tf\r\nimport platform\r\nimport scipy.misc\r\nimport argparse\r\nimport matplotlib.pyplot as plt\r\n\r\nsys.path.append('../')\r\nfrom src.dataflow.mnist import MNISTData\r\nfrom src.models.vae import VAE\r\nfrom src.helper.trainer import Trainer\r\nfrom src.helper.generator import Generator\r\nfrom src.helper.visualizer import Visualizer\r\nimport src.models.distribution as distribution\r\n\r\nDATA_PATH = '/home/qge2/workspace/data/MNIST_data/'\r\nSAVE_PATH = '/home/qge2/workspace/data/out/vae/vae/'\r\n\r\n\r\ndef get_args():\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('--train', action='store_true',\r\n help='Train the model')\r\n parser.add_argument('--generate', action='store_true',\r\n help='generate')\r\n parser.add_argument('--viz', action='store_true',\r\n help='visualize')\r\n parser.add_argument('--test', action='store_true',\r\n help='test')\r\n parser.add_argument('--load', type=int, default=99,\r\n help='Load step of pre-trained')\r\n parser.add_argument('--lr', type=float, default=1e-3,\r\n help='Init learning rate')\r\n parser.add_argument('--ncode', type=int, default=2,\r\n help='number of code')\r\n\r\n parser.add_argument('--bsize', type=int, default=128,\r\n help='Init learning rate')\r\n parser.add_argument('--maxepoch', type=int, default=100,\r\n help='Max iteration')\r\n\r\n \r\n return parser.parse_args()\r\n\r\n\r\ndef preprocess_im(im):\r\n im = im / 255.\r\n return im\r\n\r\ndef train():\r\n FLAGS = get_args()\r\n train_data = MNISTData('train',\r\n data_dir=DATA_PATH,\r\n shuffle=True,\r\n pf=preprocess_im,\r\n batch_dict_name=['im', 'label'])\r\n train_data.setup(epoch_val=0, batch_size=FLAGS.bsize)\r\n valid_data = MNISTData('test',\r\n data_dir=DATA_PATH,\r\n shuffle=True,\r\n pf=preprocess_im,\r\n batch_dict_name=['im', 'label'])\r\n valid_data.setup(epoch_val=0, batch_size=FLAGS.bsize)\r\n\r\n with tf.variable_scope('VAE') as scope:\r\n model = VAE(n_code=FLAGS.ncode, wd=0)\r\n model.create_train_model()\r\n\r\n with tf.variable_scope('VAE') as scope:\r\n scope.reuse_variables()\r\n valid_model = VAE(n_code=FLAGS.ncode, wd=0)\r\n valid_model.create_generate_model(b_size=400)\r\n\r\n trainer = Trainer(model, valid_model, train_data, init_lr=FLAGS.lr, save_path=SAVE_PATH)\r\n if FLAGS.ncode == 2:\r\n z = distribution.interpolate(plot_size=20)\r\n z = np.reshape(z, (400, 2))\r\n visualizer = Visualizer(model, save_path=SAVE_PATH)\r\n else:\r\n z = None\r\n generator = Generator(generate_model=valid_model, save_path=SAVE_PATH)\r\n\r\n sessconfig = tf.ConfigProto()\r\n sessconfig.gpu_options.allow_growth = True\r\n with tf.Session(config=sessconfig) as sess:\r\n writer = tf.summary.FileWriter(SAVE_PATH)\r\n saver = tf.train.Saver()\r\n sess.run(tf.global_variables_initializer())\r\n writer.add_graph(sess.graph)\r\n\r\n for epoch_id in range(FLAGS.maxepoch):\r\n trainer.train_epoch(sess, summary_writer=writer)\r\n trainer.valid_epoch(sess, summary_writer=writer)\r\n if epoch_id % 10 == 0:\r\n saver.save(sess, '{}vae-epoch-{}'.format(SAVE_PATH, epoch_id))\r\n if FLAGS.ncode == 2:\r\n generator.generate_samples(sess, plot_size=20, z=z, file_id=epoch_id)\r\n visualizer.viz_2Dlatent_variable(sess, valid_data, file_id=epoch_id)\r\n\r\ndef generate():\r\n FLAGS = get_args()\r\n plot_size = 20\r\n\r\n with tf.variable_scope('VAE') as scope:\r\n # scope.reuse_variables()\r\n generate_model = VAE(n_code=FLAGS.ncode, wd=0)\r\n generate_model.create_generate_model(b_size=plot_size*plot_size)\r\n\r\n generator = Generator(generate_model=generate_model, save_path=SAVE_PATH)\r\n\r\n sessconfig = tf.ConfigProto()\r\n sessconfig.gpu_options.allow_growth = True\r\n with tf.Session(config=sessconfig) as sess:\r\n saver = tf.train.Saver()\r\n sess.run(tf.global_variables_initializer())\r\n saver.restore(sess, '{}vae-epoch-{}'.format(SAVE_PATH, FLAGS.load))\r\n generator.generate_samples(sess, plot_size=plot_size, z=None)\r\n\r\ndef visualize():\r\n FLAGS = get_args()\r\n plot_size = 20\r\n\r\n valid_data = MNISTData('test',\r\n data_dir=DATA_PATH,\r\n shuffle=True,\r\n pf=preprocess_im,\r\n batch_dict_name=['im', 'label'])\r\n valid_data.setup(epoch_val=0, batch_size=FLAGS.bsize)\r\n\r\n with tf.variable_scope('VAE') as scope:\r\n model = VAE(n_code=FLAGS.ncode, wd=0)\r\n model.create_train_model()\r\n\r\n with tf.variable_scope('VAE') as scope:\r\n scope.reuse_variables()\r\n valid_model = VAE(n_code=FLAGS.ncode, wd=0)\r\n valid_model.create_generate_model(b_size=400)\r\n\r\n visualizer = Visualizer(model, save_path=SAVE_PATH)\r\n generator = Generator(generate_model=valid_model, save_path=SAVE_PATH)\r\n\r\n z = distribution.interpolate(plot_size=plot_size)\r\n z = np.reshape(z, (plot_size*plot_size, 2))\r\n\r\n sessconfig = tf.ConfigProto()\r\n sessconfig.gpu_options.allow_growth = True\r\n with tf.Session(config=sessconfig) as sess:\r\n saver = tf.train.Saver()\r\n sess.run(tf.global_variables_initializer())\r\n saver.restore(sess, '{}vae-epoch-{}'.format(SAVE_PATH, FLAGS.load))\r\n visualizer.viz_2Dlatent_variable(sess, valid_data)\r\n generator.generate_samples(sess, plot_size=plot_size, z=z)\r\n\r\ndef test():\r\n valid_data = MNISTData('test',\r\n data_dir=DATA_PATH,\r\n shuffle=True,\r\n pf=preprocess_im,\r\n batch_dict_name=['im', 'label'])\r\n batch_data = valid_data.next_batch_dict()\r\n plt.figure()\r\n plt.imshow(np.squeeze(batch_data['im'][0]))\r\n plt.show()\r\n print(batch_data['label'])\r\n\r\nif __name__ == '__main__':\r\n FLAGS = get_args()\r\n\r\n if FLAGS.train:\r\n train()\r\n elif FLAGS.generate:\r\n generate()\r\n elif FLAGS.viz:\r\n visualize()\r\n elif FLAGS.test:\r\n test()\r\n\r\n" ]
[ [ "tensorflow.summary.FileWriter", "numpy.reshape", "tensorflow.Session", "tensorflow.train.Saver", "matplotlib.pyplot.figure", "tensorflow.ConfigProto", "tensorflow.variable_scope", "matplotlib.pyplot.show", "tensorflow.global_variables_initializer", "numpy.squeeze" ] ]
kennetms/Accenture_Hybrid_Guided_VAE
[ "aae3c02d614b2ea7ceeebc9e70ef4f7a4b69ee2c" ]
[ "decolle/base_model.py" ]
[ "#!/bin/python\n# -----------------------------------------------------------------------------\n# -----------------------------------------------------------------------------\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch\nimport numpy as np\nfrom itertools import chain\nfrom collections import namedtuple, OrderedDict\nimport warnings\nfrom decolle.utils import train, test, accuracy, load_model_from_checkpoint, save_checkpoint, write_stats, get_output_shape, state_detach\n\ndtype = torch.float32\n\nsigmoid = nn.Sigmoid()\nrelu = nn.ReLU()\n\n\n## from snntorch\nclass FastSigmoid(torch.autograd.Function):\n @staticmethod\n def forward(ctx, input_):\n ctx.save_for_backward(input_)\n #return input_/(1+slope * torch.abs(input_))\n return (input_>0).type(input_.dtype)\n\n @staticmethod\n def backward(ctx, grad_output):\n (input_,) = ctx.saved_tensors\n grad_input = grad_output.clone()\n #10 here is the slope\n return grad_input / (10 * torch.abs(input_) + 1.0) ** 2\n\nclass SmoothStep(torch.autograd.Function):\n '''\n Modified from: https://pytorch.org/tutorials/beginner/examples_autograd/two_layer_net_custom_function.html\n '''\n\n @staticmethod\n def forward(aux, x):\n aux.save_for_backward(x)\n return (x >=0).type(x.dtype)\n\n def backward(aux, grad_output):\n # grad_input = grad_output.clone()\n input, = aux.saved_tensors\n grad_input = grad_output.clone()\n grad_input[input <= -.5] = 0\n grad_input[input > .5] = 0\n return grad_input\n \nclass SigmoidStep(torch.autograd.Function):\n @staticmethod\n def forward(aux, x):\n aux.save_for_backward(x)\n return (x >=0).type(x.dtype)\n\n def backward(aux, grad_output):\n # grad_input = grad_output.clone()\n input, = aux.saved_tensors\n res = torch.sigmoid(input)\n return res*(1-res)*grad_output\n\nsmooth_step = SmoothStep().apply\nsmooth_sigmoid = SigmoidStep().apply\nfast_sigmoid = FastSigmoid.apply\n\nclass LinearFAFunction(torch.autograd.Function):\n '''from https://github.com/L0SG/feedback-alignment-pytorch/'''\n @staticmethod\n # same as reference linear function, but with additional fa tensor for backward\n def forward(context, input, weight, weight_fa, bias=None):\n context.save_for_backward(input, weight, weight_fa, bias)\n output = input.mm(weight.t())\n if bias is not None:\n output += bias.unsqueeze(0).expand_as(output)\n return output\n\n @staticmethod\n def backward(context, grad_output):\n input, weight, weight_fa, bias = context.saved_tensors\n grad_input = grad_weight = grad_weight_fa = grad_bias = None\n\n if context.needs_input_grad[0]:\n # all of the logic of FA resides in this one line\n # calculate the gradient of input with fixed fa tensor, rather than the \"correct\" model weight\n grad_input = grad_output.mm(weight_fa)\n if context.needs_input_grad[1]:\n # grad for weight with FA'ed grad_output from downstream layer\n # it is same with original linear function\n grad_weight = grad_output.t().mm(input)\n if bias is not None and context.needs_input_grad[3]:\n grad_bias = grad_output.sum(0).squeeze(0)\n\n return grad_input, grad_weight, grad_weight_fa, grad_bias\n\n\n\nclass BaseLIFLayer(nn.Module):\n NeuronState = namedtuple('NeuronState', ['P', 'Q', 'R', 'S'])\n sg_function = smooth_step\n\n def __init__(self, layer, alpha=.9, alpharp=.65, wrp=1.0, beta=.85, deltat=1000, do_detach=True):\n '''\n deltat: timestep in microseconds (not milliseconds!)\n '''\n super(BaseLIFLayer, self).__init__()\n self.base_layer = layer\n self.deltat = deltat\n #self.dt = deltat/1e-6\n self.alpha = torch.tensor(alpha, requires_grad=False)\n self.beta = torch.tensor(beta, requires_grad=False)\n self.tau_m = torch.nn.Parameter(1. / (1 - self.alpha), requires_grad=False)\n self.tau_s = torch.nn.Parameter(1. / (1 - self.beta), requires_grad=False)\n self.alpharp = alpharp\n self.wrp = wrp\n self.state = None\n self.do_detach = do_detach\n\n def cuda(self, device=None):\n '''\n Handle the transfer of the neuron state to cuda\n '''\n self = super().cuda(device)\n self.state = None\n self.base_layer = self.base_layer.cuda()\n return self\n\n def cpu(self, device=None):\n '''\n Handle the transfer of the neuron state to cpu\n '''\n self = super().cpu(device)\n self.state = None\n self.base_layer = self.base_layer.cpu()\n return self\n\n @staticmethod\n def reset_parameters(layer):\n layer.reset_parameters()\n if hasattr(layer, 'out_channels'):\n n = layer.in_channels\n for k in layer.kernel_size:\n n *= k\n stdv = 1. / np.sqrt(n) / 250\n layer.weight.data.uniform_(-stdv * 1e-2, stdv * 1e-2)\n if layer.bias is not None: \n layer.bias.data.uniform_(-stdv, stdv)\n elif hasattr(layer, 'out_features'): \n layer.weight.data[:]*=0\n if layer.bias is not None:\n layer.bias.data.uniform_(-1e-3,1e-3)\n else:\n warnings.warn('Unhandled data type, not resetting parameters')\n \n @staticmethod\n def get_out_channels(layer):\n '''\n Wrapper for returning number of output channels in a LIFLayer\n '''\n if hasattr(layer, 'out_features'):\n return layer.out_features\n elif hasattr(layer, 'out_channels'): \n return layer.out_channels\n elif hasattr(layer, 'get_out_channels'): \n return layer.get_out_channels()\n else: \n raise Exception('Unhandled base layer type')\n \n @staticmethod\n def get_out_shape(layer, input_shape):\n if hasattr(layer, 'out_channels'):\n return get_output_shape(input_shape, \n kernel_size=layer.kernel_size,\n stride = layer.stride,\n padding = layer.padding,\n dilation = layer.dilation)\n elif hasattr(layer, 'out_features'): \n return []\n elif hasattr(layer, 'get_out_shape'): \n return layer.get_out_shape()\n else: \n raise Exception('Unhandled base layer type')\n\n def init_state(self, Sin_t):\n dtype = Sin_t.dtype\n device = self.base_layer.weight.device\n input_shape = list(Sin_t.shape)\n out_ch = self.get_out_channels(self.base_layer)\n out_shape = self.get_out_shape(self.base_layer, input_shape)\n self.state = self.NeuronState(P=torch.zeros(input_shape).type(dtype).to(device),\n Q=torch.zeros(input_shape).type(dtype).to(device),\n R=torch.zeros([input_shape[0], out_ch] + out_shape).type(dtype).to(device),\n S=torch.zeros([input_shape[0], out_ch] + out_shape).type(dtype).to(device))\n\n def init_parameters(self, Sin_a):\n self.reset_parameters(self.base_layer)\n\n def forward(self, Sin_t):\n if self.state is None:\n self.init_state(Sin_t)\n\n state = self.state\n Q = self.beta * state.Q + self.tau_s * Sin_t #Wrong dynamics, kept for backward compatibility\n P = self.alpha * state.P + self.tau_m * state.Q #Wrong dynamics, kept for backward compatibility \n R = self.alpharp * state.R - state.S * self.wrp\n U = self.base_layer(P) + R\n S = self.sg_function(U)\n self.state = self.NeuronState(P=P, Q=Q, R=R, S=S)\n if self.do_detach: \n state_detach(self.state)\n return S, U\n\n def get_output_shape(self, input_shape):\n layer = self.base_layer\n if hasattr(layer, 'out_channels'):\n im_height = input_shape[-2]\n im_width = input_shape[-1]\n height = int((im_height + 2 * layer.padding[0] - layer.dilation[0] *\n (layer.kernel_size[0] - 1) - 1) // layer.stride[0] + 1)\n weight = int((im_width + 2 * layer.padding[1] - layer.dilation[1] *\n (layer.kernel_size[1] - 1) - 1) // layer.stride[1] + 1)\n return [height, weight]\n else:\n return layer.out_features\n \n def get_device(self):\n return self.base_layer.weight.device\n\nclass LIFLayer(BaseLIFLayer):\n sg_function = FastSigmoid.apply\n\n def forward(self, Sin_t):\n if self.state is None:\n self.init_state(Sin_t)\n\n state = self.state\n Q = self.beta * state.Q + (1-self.beta)*Sin_t\n P = self.alpha * state.P + (1-self.alpha)*state.Q \n R = self.alpharp * state.R - state.S * self.wrp\n U = self.base_layer(P) #+ R\n S = self.sg_function(U)\n self.state = self.NeuronState(P=P, Q=Q, R=R, S=S)\n if self.do_detach: \n state_detach(self.state)\n return S, U\n\n def init_parameters(self, Sin_t, *args, **kwargs):\n self.reset_parameters(self.base_layer, *args, **kwargs)\n \n def reset_parameters(self, layer):\n layer.reset_parameters()\n if hasattr(layer, 'out_channels'):\n layer.bias.data = layer.bias.data*((1-self.alpha)*(1-self.beta))\n layer.weight.data[:] *= 1\n elif hasattr(layer, 'out_features'): \n layer.weight.data[:] *= 5e-2\n layer.bias.data[:] = layer.bias.data[:]*((1-self.alpha)*(1-self.beta))\n else:\n warnings.warn('Unhandled data type, not resetting parameters')\n \nclass LIFLayerNonorm(LIFLayer):\n sg_function = smooth_step\n\n def forward(self, Sin_t):\n if self.state is None:\n self.init_state(Sin_t)\n\n state = self.state\n Q = self.beta * state.Q + Sin_t\n P = self.alpha * state.P + state.Q \n R = self.alpharp * state.R - state.S * self.wrp\n U = self.base_layer(P) + R\n S = self.sg_function(U)\n self.state = self.NeuronState(P=P, Q=Q, R=R, S=S)\n if self.do_detach: \n state_detach(self.state)\n return S, U\n \n def reset_parameters(self, layer):\n layer.reset_parameters()\n if hasattr(layer, 'out_channels'): #its a convolution\n n = layer.in_channels\n for k in layer.kernel_size:\n n *= k\n stdv = 1. / np.sqrt(n) / 250\n layer.weight.data.uniform_(-stdv * 1e-2, stdv * 1e-2)\n if layer.bias is not None:\n layer.bias.data.uniform_(-stdv, stdv)\n elif hasattr(layer, 'out_features'): \n layer.weight.data[:]*=0\n if layer.bias is not None:\n layer.bias.data.uniform_(-1e-3,1e-3)\n else:\n warnings.warn('Unhandled data type, not resetting parameters')\n\nclass LIFLayerVariableTau(LIFLayer):\n def __init__(self, layer, alpha=.9, alpharp=.65, wrp=1.0, beta=.85, deltat=1000, random_tau=True, do_detach=True):\n super(LIFLayerVariableTau, self).__init__(layer, alpha, alpharp, wrp, beta, deltat)\n self.random_tau = random_tau\n self.alpha_mean = self.alpha\n self.beta_mean = self.beta\n self.do_detach = do_detach\n \n def randomize_tau(self, im_size, tau, std__mean = .25, tau_min = 5., tau_max = 200.):\n '''\n Returns a random (normally distributed) temporal constant of size im_size computed as\n `1 / Dt*tau where Dt is the temporal window, and tau is a random value expressed in microseconds\n between low and high.\n :param im_size: input shape\n :param mean__std: mean to standard deviation\n :return: 1/Dt*tau\n '''\n tau_v = torch.empty(im_size)\n tau_v.normal_(1, std__mean)\n tau_v.data[:] *= tau \n tau_v[tau_v<tau_min]=tau_min\n tau_v[tau_v>=tau_max]=tau_max\n #tau = np.broadcast_to(tau, (im_size[0], im_size[1], channels)).transpose(2, 0, 1)\n return torch.Tensor(1 - 1. / tau_v) \n \n def init_parameters(self, Sin_t):\n device = self.get_device()\n input_shape = list(Sin_t.shape)\n if self.random_tau:\n tau_m = 1./(1-self.alpha_mean)\n tau_s = 1./(1-self.beta_mean)\n self.alpha = self.randomize_tau(input_shape[1:], tau_m).to(device)\n self.beta = self.randomize_tau(input_shape[1:], tau_s).to(device)\n else:\n tau_m = 1./(1-self.alpha_mean)\n tau_s = 1./(1-self.beta_mean)\n self.alpha = torch.ones(input_shape[1:]).to(device)*self.alpha_mean.to(device)\n self.beta = torch.ones(input_shape[1:]).to(device)*self.beta_mean.to(device)\n self.alpha = self.alpha.view(Sin_t.shape[1:])\n self.beta = self.beta.view(Sin_t.shape[1:])\n self.tau_m = torch.nn.Parameter(1. / (1 - self.alpha), requires_grad = False)\n self.tau_s = torch.nn.Parameter(1. / (1 - self.beta), requires_grad = False)\n self.reset_parameters(self.base_layer)\n\nclass DECOLLEBase(nn.Module):\n requires_init = True\n output_statenames = OrderedDict(zip(['s', 'r', 'u'],[0, 1, 2]))\n def __init__(self):\n\n self.burnin = 0\n super(DECOLLEBase, self).__init__()\n\n self.LIF_layers = nn.ModuleList()\n self.readout_layers = nn.ModuleList()\n\n def __len__(self):\n return len(self.LIF_layers)\n\n def step(self, data_batch):\n raise NotImplemented('')\n\n def forward(self, data_batch, doinit=True, return_sequence=False, readout_state = 'u', *args, **kwargs):\n '''\n Run network on *data_batch* sequence.\n *args*\n data_batch : Sequence has shape [batch_size, time]+[input_shape]\n doinit : Do an state init prior to running\n return_sequence : Return u of all layers and states\n '''\n if doinit: \n state_ = self.init(data_batch)\n t_sample = data_batch.shape[1]\n if return_sequence: \n out = [torch.empty((t_sample-self.burnin,)+state_[i].S.shape, dtype=state_[i].S.dtype) for i in range(len(self))]\n\n tidx = 0\n for t in (range(self.burnin,t_sample)):\n data_batch_t = data_batch[:,t]\n out_ = self.step(data_batch_t, *args, **kwargs)\n \n if return_sequence: \n for i in range(len(self)):\n out[i][tidx,:] = out_[self.output_statenames[readout_state]][i]\n tidx += 1\n\n if not return_sequence:\n ret = out_[self.output_statenames[readout_state]][-1], None\n else: \n ret = out_[self.output_statenames[readout_state]][-1], out\n\n \n return ret \n\n def name_param(self):\n return self.named_parameters()\n\n def get_trainable_parameters(self, layer=None):\n if layer is None:\n return chain(*[l.parameters() for l in self.LIF_layers])\n else:\n return self.LIF_layers[layer].parameters()\n\n def get_trainable_named_parameters(self, layer=None):\n if layer is None:\n params = dict()\n for k,p in self.named_parameters():\n if p.requires_grad:\n params[k]=p\n\n return params\n else:\n return self.LIF_layers[layer].named_parameters()\n\n def init(self, data_batch, burnin = None):\n '''\n Necessary to reset the state of the network whenever a new batch is presented\n '''\n if burnin is None:\n burnin = self.burnin\n if self.requires_init is False:\n return\n for l in self.LIF_layers:\n l.state = None\n with torch.no_grad():\n for t in (range(0,max(self.burnin,1))):\n data_batch_t = data_batch[:,t]\n out_ = self.step(data_batch_t)\n\n for l in self.LIF_layers: state_detach(l.state)\n\n return [l.state for l in self.LIF_layers]\n\n def init_parameters(self, data_batch):\n with torch.no_grad():\n Sin_t = data_batch[:, 0, :, :]\n s_out, r_out = self.step(Sin_t)[:2]\n ins = [self.LIF_layers[0].state.Q]+s_out\n for i,l in enumerate(self.LIF_layers):\n l.init_parameters(ins[i])\n\n def reset_lc_parameters(self, layer, lc_ampl):\n stdv = lc_ampl / np.sqrt(layer.weight.size(1))\n layer.weight.data.uniform_(-stdv, stdv)\n self.reset_lc_bias_parameters(layer,lc_ampl)\n\n def reset_lc_bias_parameters(self, layer, lc_ampl):\n stdv = lc_ampl / np.sqrt(layer.weight.size(1))\n if layer.bias is not None:\n layer.bias.data.uniform_(-stdv, stdv)\n \n def get_input_layer_device(self):\n if hasattr(self.LIF_layers[0], 'get_device'):\n return self.LIF_layers[0].get_device() \n else:\n return list(self.LIF_layers[0].parameters())[0].device\n\n def get_output_layer_device(self):\n return self.output_layer.weight.device \n\n def process_output(net, data_batch):\n '''\n Process the outputs of step run over entire sequence data_batch as a continugous array.\n *data_batch*: batch of inputs, same shape as for data_batch in step()\n '''\n with torch.no_grad():\n from decolle.utils import tonp\n net.init(data_batch)\n t = (data_batch.shape[1],)\n out_states = net.step(data_batch[:,0])\n readouts = [None for _ in net.output_statenames]\n for k,v in net.output_statenames.items():\n readouts[v] = [np.zeros(t+tonp(layer).shape ) for layer in out_states[v] if layer is not None]\n\n for t in range(data_batch.shape[1]):\n net.state = None\n out_states = net.step(data_batch[:,t])\n for i in range(len(net.LIF_layers)):\n for k,v in net.output_statenames.items():\n if out_states[v] is not None:\n if len(out_states[v])>0:\n if out_states[v][i] is not None: \n readouts[v][i][t,:] = [tonp(output) for output in out_states[v][i]]\n\n return readouts\n\n\n\nclass DECOLLELoss(object):\n def __init__(self, loss_fn, net, reg_l = None):\n self.loss_fn = loss_fn\n self.nlayers = len(net)\n self.num_losses = len([l for l in loss_fn if l is not None])\n self.loss_layer = [i for i,l in enumerate(loss_fn) if l is not None]\n if len(loss_fn)!=self.nlayers:\n warnings.warn(\"Mismatch is in number of loss functions and layers. You need to specify one loss function per layer\")\n self.reg_l = reg_l\n if self.reg_l is None: \n self.reg_l = [0 for _ in range(self.nlayers)]\n\n def __len__(self):\n return self.nlayers\n\n def __call__(self, s, r, u, target, mask=1, sum_=True):\n loss_tv = []\n for i,loss_layer in enumerate(self.loss_fn):\n if loss_layer is not None:\n loss_tv.append(loss_layer(r[i]*mask, target*mask))\n if self.reg_l[i]>0:\n uflat = u[i].reshape(u[i].shape[0],-1)\n reg1_loss = self.reg_l[i]*((relu(uflat+.01)*mask)).mean()\n reg2_loss = self.reg_l[i]*3e-3*relu((mask*(.1-sigmoid(uflat))).mean())\n loss_tv[-1] += reg1_loss + reg2_loss\n\n if sum_:\n return sum(loss_tv)\n else:\n return loss_tv\n\n\n" ]
[ [ "torch.zeros", "torch.sigmoid", "torch.nn.ModuleList", "torch.nn.Sigmoid", "torch.no_grad", "torch.nn.Parameter", "torch.nn.ReLU", "torch.abs", "torch.ones", "torch.tensor", "numpy.sqrt", "torch.empty", "torch.Tensor" ] ]
huan/addons
[ "2de92d51297c7cfa631512dea4eaa3cc32530164" ]
[ "tensorflow_addons/optimizers/average_wrapper.py" ]
[ "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport abc\nimport six\n\nimport tensorflow as tf\n\n\[email protected]_metaclass(abc.ABCMeta)\nclass AveragedOptimizerWrapper(tf.keras.optimizers.Optimizer):\n def __init__(self,\n optimizer,\n sequential_update=True,\n name=\"AverageOptimizer\",\n **kwargs):\n super(AveragedOptimizerWrapper, self).__init__(name, **kwargs)\n\n if isinstance(optimizer, str):\n optimizer = tf.keras.optimizers.get(optimizer)\n\n if not isinstance(optimizer, tf.keras.optimizers.Optimizer):\n raise TypeError(\n 'optimizer is not an object of tf.keras.optimizers.Optimizer')\n\n if not isinstance(sequential_update, bool):\n raise TypeError(\"sequential_update must be of bool type\")\n\n self._optimizer = optimizer\n self._sequential_update = sequential_update\n\n def _create_slots(self, var_list):\n self._optimizer._create_slots(var_list=var_list) # pylint: disable=protected-access\n for var in var_list:\n self.add_slot(var, 'average')\n\n def _create_hypers(self):\n self._optimizer._create_hypers() # pylint: disable=protected-access\n\n def _prepare(self, var_list):\n return self._optimizer._prepare(var_list=var_list) # pylint: disable=protected-access\n\n def apply_gradients(self, grads_and_vars, name=None):\n self._optimizer._iterations = self.iterations # pylint: disable=protected-access\n return super(AveragedOptimizerWrapper, self).apply_gradients(\n grads_and_vars, name)\n\n @abc.abstractmethod\n def average_op(self, var, average_var):\n raise NotImplementedError\n\n def _apply_average_op(self, train_op, var):\n average_var = self.get_slot(var, 'average')\n if self._sequential_update:\n with tf.control_dependencies([train_op]):\n avg_op = self.average_op(var, average_var)\n else:\n avg_op = self.average_op(var, average_var)\n\n return avg_op\n\n def _resource_apply_dense(self, grad, var):\n train_op = self._optimizer._resource_apply_dense(grad, var) # pylint: disable=protected-access\n average_op = self._apply_average_op(train_op, var)\n return tf.group(train_op, average_op)\n\n def _resource_apply_sparse(self, grad, var, indices):\n train_op = self._optimizer._resource_apply_sparse( # pylint: disable=protected-access\n grad, var, indices)\n average_op = self._apply_average_op(train_op, var)\n return tf.group(train_op, average_op)\n\n def _resource_apply_sparse_duplicate_indices(self, grad, var, indices):\n train_op = self._optimizer._resource_apply_sparse_duplicate_indices( # pylint: disable=protected-access\n grad, var, indices)\n average_op = self._apply_average_op(train_op, var)\n return tf.group(train_op, average_op)\n\n def assign_average_vars(self, var_list):\n \"\"\"Assign variables in var_list with their respective averages.\n\n Args:\n var_list: List of model variables to be assigned to their average.\n\n Returns:\n assign_op: The op corresponding to the assignment operation of\n variables to their average.\n\n Example:\n ```python\n model = tf.Sequential([...])\n opt = tfa.optimizers.SWA(\n tf.keras.optimizers.SGD(lr=2.0), 100, 10)\n model.compile(opt, ...)\n model.fit(x, y, ...)\n\n # Update the weights to their mean before saving\n opt.assign_average_vars(model.variables)\n\n model.save('model.h5')\n ```\n \"\"\"\n assign_op = tf.group([\n var.assign(self.get_slot(var, 'average')) for var in var_list\n if var.trainable\n ])\n return assign_op\n\n def get_config(self):\n config = {\n 'optimizer': tf.keras.optimizers.serialize(self._optimizer),\n 'sequential_update': self._sequential_update\n }\n base_config = super(AveragedOptimizerWrapper, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n @classmethod\n def from_config(cls, config, custom_objects=None):\n optimizer = tf.keras.optimizers.deserialize(\n config.pop('optimizer'),\n custom_objects=custom_objects,\n )\n return cls(optimizer, **config)\n\n @property\n def weights(self):\n return self._weights + self._optimizer.weights\n\n @property\n def lr(self):\n return self._optimizer._get_hyper('learning_rate') # pylint: disable=protected-access\n\n @lr.setter\n def lr(self, lr):\n self._optimizer._set_hyper('learning_rate', lr) # pylint: disable=protected-access\n\n @property\n def learning_rate(self):\n return self._optimizer._get_hyper('learning_rate') # pylint: disable=protected-access\n\n @learning_rate.setter\n def learning_rate(self, learning_rate):\n self._optimizer._set_hyper('learning_rate', learning_rate) # pylint: disable=protected-access\n" ]
[ [ "tensorflow.group", "tensorflow.keras.optimizers.get", "tensorflow.keras.optimizers.serialize", "tensorflow.control_dependencies" ] ]
stevemandala/botorch
[ "4c094b8fe46513f895ce7c4409ff40f34fa9ddd6" ]
[ "test/models/test_utils.py" ]
[ "#! /usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport warnings\n\nimport torch\nfrom botorch import settings\nfrom botorch.exceptions import InputDataError, InputDataWarning\nfrom botorch.models.utils import (\n add_output_dim,\n check_min_max_scaling,\n check_no_nans,\n check_standardization,\n multioutput_to_batch_mode_transform,\n validate_input_scaling,\n)\nfrom botorch.utils.testing import BotorchTestCase\n\n\nclass TestMultiOutputToBatchModeTransform(BotorchTestCase):\n def test_multioutput_to_batch_mode_transform(self):\n for dtype in (torch.float, torch.double):\n tkwargs = {\"device\": self.device, \"dtype\": dtype}\n n = 3\n num_outputs = 2\n train_X = torch.rand(n, 1, **tkwargs)\n train_Y = torch.rand(n, num_outputs, **tkwargs)\n train_Yvar = torch.rand(n, num_outputs, **tkwargs)\n X_out, Y_out, Yvar_out = multioutput_to_batch_mode_transform(\n train_X=train_X,\n train_Y=train_Y,\n num_outputs=num_outputs,\n train_Yvar=train_Yvar,\n )\n expected_X_out = train_X.unsqueeze(0).expand(num_outputs, -1, 1)\n self.assertTrue(torch.equal(X_out, expected_X_out))\n self.assertTrue(torch.equal(Y_out, train_Y.transpose(0, 1)))\n self.assertTrue(torch.equal(Yvar_out, train_Yvar.transpose(0, 1)))\n\n\nclass TestAddOutputDim(BotorchTestCase):\n def test_add_output_dim(self):\n for dtype in (torch.float, torch.double):\n tkwargs = {\"device\": self.device, \"dtype\": dtype}\n original_batch_shape = torch.Size([2])\n # check exception is raised when trailing batch dims do not line up\n X = torch.rand(2, 3, 2, 1, **tkwargs)\n with self.assertRaises(RuntimeError):\n add_output_dim(X=X, original_batch_shape=original_batch_shape)\n # test no new batch dims\n X = torch.rand(2, 2, 1, **tkwargs)\n X_out, output_dim_idx = add_output_dim(\n X=X, original_batch_shape=original_batch_shape\n )\n self.assertTrue(torch.equal(X_out, X.unsqueeze(1)))\n self.assertEqual(output_dim_idx, 1)\n # test new batch dims\n X = torch.rand(3, 2, 2, 1, **tkwargs)\n X_out, output_dim_idx = add_output_dim(\n X=X, original_batch_shape=original_batch_shape\n )\n self.assertTrue(torch.equal(X_out, X.unsqueeze(2)))\n self.assertEqual(output_dim_idx, 2)\n\n\nclass TestInputDataChecks(BotorchTestCase):\n def test_check_no_nans(self):\n check_no_nans(torch.tensor([1.0, 2.0]))\n with self.assertRaises(InputDataError):\n check_no_nans(torch.tensor([1.0, float(\"nan\")]))\n\n def test_check_min_max_scaling(self):\n with settings.debug(True):\n # check unscaled input in unit cube\n X = 0.1 + 0.8 * torch.rand(4, 2, 3)\n with warnings.catch_warnings(record=True) as ws:\n check_min_max_scaling(X=X)\n self.assertFalse(\n any(issubclass(w.category, InputDataWarning) for w in ws)\n )\n check_min_max_scaling(X=X, raise_on_fail=True)\n with warnings.catch_warnings(record=True) as ws:\n check_min_max_scaling(X=X, strict=True)\n self.assertTrue(\n any(issubclass(w.category, InputDataWarning) for w in ws)\n )\n self.assertTrue(any(\"not scaled\" in str(w.message) for w in ws))\n with self.assertRaises(InputDataError):\n check_min_max_scaling(X=X, strict=True, raise_on_fail=True)\n # check proper input\n Xmin, Xmax = X.min(dim=-1, keepdim=True)[0], X.max(dim=-1, keepdim=True)[0]\n Xstd = (X - Xmin) / (Xmax - Xmin)\n with warnings.catch_warnings(record=True) as ws:\n check_min_max_scaling(X=Xstd)\n self.assertFalse(\n any(issubclass(w.category, InputDataWarning) for w in ws)\n )\n check_min_max_scaling(X=Xstd, raise_on_fail=True)\n with warnings.catch_warnings(record=True) as ws:\n check_min_max_scaling(X=Xstd, strict=True)\n self.assertFalse(\n any(issubclass(w.category, InputDataWarning) for w in ws)\n )\n check_min_max_scaling(X=Xstd, strict=True, raise_on_fail=True)\n # check violation\n X[0, 0, 0] = 2\n with warnings.catch_warnings(record=True) as ws:\n check_min_max_scaling(X=X)\n self.assertTrue(\n any(issubclass(w.category, InputDataWarning) for w in ws)\n )\n self.assertTrue(any(\"not contained\" in str(w.message) for w in ws))\n with self.assertRaises(InputDataError):\n check_min_max_scaling(X=X, raise_on_fail=True)\n with warnings.catch_warnings(record=True) as ws:\n check_min_max_scaling(X=X, strict=True)\n self.assertTrue(\n any(issubclass(w.category, InputDataWarning) for w in ws)\n )\n self.assertTrue(any(\"not contained\" in str(w.message) for w in ws))\n with self.assertRaises(InputDataError):\n check_min_max_scaling(X=X, strict=True, raise_on_fail=True)\n\n def test_check_standardization(self):\n Y = torch.randn(3, 4, 2)\n # check standardized input\n Yst = (Y - Y.mean(dim=-2, keepdim=True)) / Y.std(dim=-2, keepdim=True)\n with settings.debug(True):\n with warnings.catch_warnings(record=True) as ws:\n check_standardization(Y=Yst)\n self.assertFalse(\n any(issubclass(w.category, InputDataWarning) for w in ws)\n )\n check_standardization(Y=Yst, raise_on_fail=True)\n # check nonzero mean\n with warnings.catch_warnings(record=True) as ws:\n check_standardization(Y=Yst + 1)\n self.assertTrue(\n any(issubclass(w.category, InputDataWarning) for w in ws)\n )\n self.assertTrue(any(\"not standardized\" in str(w.message) for w in ws))\n with self.assertRaises(InputDataError):\n check_standardization(Y=Yst + 1, raise_on_fail=True)\n # check non-unit variance\n with warnings.catch_warnings(record=True) as ws:\n check_standardization(Y=Yst * 2)\n self.assertTrue(\n any(issubclass(w.category, InputDataWarning) for w in ws)\n )\n self.assertTrue(any(\"not standardized\" in str(w.message) for w in ws))\n with self.assertRaises(InputDataError):\n check_standardization(Y=Yst * 2, raise_on_fail=True)\n\n def test_validate_input_scaling(self):\n train_X = 2 + torch.rand(3, 4, 3)\n train_Y = torch.randn(3, 4, 2)\n # check that nothing is being checked\n with settings.validate_input_scaling(False), settings.debug(True):\n with warnings.catch_warnings(record=True) as ws:\n validate_input_scaling(train_X=train_X, train_Y=train_Y)\n self.assertFalse(\n any(issubclass(w.category, InputDataWarning) for w in ws)\n )\n # check that warnings are being issued\n with settings.debug(True), warnings.catch_warnings(record=True) as ws:\n validate_input_scaling(train_X=train_X, train_Y=train_Y)\n self.assertTrue(any(issubclass(w.category, InputDataWarning) for w in ws))\n # check that errors are raised when requested\n with settings.debug(True):\n with self.assertRaises(InputDataError):\n validate_input_scaling(\n train_X=train_X, train_Y=train_Y, raise_on_fail=True\n )\n # check that no errors are being raised if everything is standardized\n train_X_min = train_X.min(dim=-1, keepdim=True)[0]\n train_X_max = train_X.max(dim=-1, keepdim=True)[0]\n train_X_std = (train_X - train_X_min) / (train_X_max - train_X_min)\n train_Y_std = (train_Y - train_Y.mean(dim=-2, keepdim=True)) / train_Y.std(\n dim=-2, keepdim=True\n )\n with settings.debug(True), warnings.catch_warnings(record=True) as ws:\n validate_input_scaling(train_X=train_X_std, train_Y=train_Y_std)\n self.assertFalse(any(issubclass(w.category, InputDataWarning) for w in ws))\n # test that negative variances raise an error\n train_Yvar = torch.rand_like(train_Y_std)\n train_Yvar[0, 0, 1] = -0.5\n with settings.debug(True):\n with self.assertRaises(InputDataError):\n validate_input_scaling(\n train_X=train_X_std, train_Y=train_Y_std, train_Yvar=train_Yvar\n )\n # check that NaNs raise errors\n train_X_std[0, 0, 0] = float(\"nan\")\n with settings.debug(True):\n with self.assertRaises(InputDataError):\n validate_input_scaling(train_X=train_X_std, train_Y=train_Y_std)\n" ]
[ [ "torch.Size", "torch.rand", "torch.rand_like", "torch.tensor", "torch.equal", "torch.randn" ] ]
alal9987/keras-deeplab-v3-plus
[ "bab1a66021ce446ad601ee6a0a89ca9a156151e6" ]
[ "metrics.py" ]
[ "import numpy as np\nimport settings\nimport tensorflow as tf\nimport tensorflow.keras.backend as K\nfrom tensorflow.compat.v1 import to_int32\ntf.config.experimental_run_functions_eagerly(True)\n\n_IS_TF_2 = True\n\n\ndef Jaccard(y_true, y_pred):\n nb_classes = K.int_shape(y_pred)[-1]\n iou = []\n pred_pixels = K.argmax(y_pred, axis=-1)\n #print('***', K.shape(y_true))\n #print(pred_pixels[0,0,0])\n for i in range(0, nb_classes): # exclude first label (background) and last label (void)\n true_labels = K.equal(y_true[:,:,:,0], i)\n pred_labels = K.equal(pred_pixels, i)\n inter = to_int32(true_labels & pred_labels)\n union = to_int32(true_labels | pred_labels)\n legal_batches = K.sum(to_int32(true_labels), axis=1)>0\n ious = K.sum(inter, axis=1)/K.sum(union, axis=1)\n if _IS_TF_2:\n iou.append(K.mean(ious[legal_batches]))\n else:\n iou.append(K.mean(tf.gather(ious, indices=tf.where(legal_batches)))) # returns average IoU of the same objects\n iou = tf.stack(iou)\n legal_labels = ~tf.math.is_nan(iou) if _IS_TF_2 else ~tf.debugging.is_nan(iou)\n iou = iou[legal_labels] if _IS_TF_2 else tf.gather(iou, indices=tf.where(legal_labels))\n return K.mean(iou)\n\n\nclass MIOU(tf.keras.metrics.MeanIoU):\n # https://stackoverflow.com/questions/60507120/how-to-correctly-use-the-tensorflow-meaniou-metric\n def update_state(self, y_true, y_pred, sample_weight=None):\n return super().update_state(tf.argmax(y_true, axis=-1),\n tf.argmax(y_pred, axis=-1), sample_weight)\n" ]
[ [ "tensorflow.keras.backend.argmax", "tensorflow.keras.backend.int_shape", "tensorflow.keras.backend.sum", "tensorflow.where", "tensorflow.argmax", "tensorflow.config.experimental_run_functions_eagerly", "tensorflow.keras.backend.mean", "tensorflow.keras.backend.equal", "tensorflow.stack", "tensorflow.math.is_nan", "tensorflow.debugging.is_nan", "tensorflow.compat.v1.to_int32" ] ]
jyu00/qiskit-aer
[ "de5a91171c9e2111ad94fa564abbd9922e7303a0" ]
[ "test/terra/backends/qasm_simulator/qasm_snapshot.py" ]
[ "# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2018, 2019.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\"\"\"\nQasmSimulator Integration Tests for Snapshot instructions\n\"\"\"\n\nimport logging\nimport itertools as it\nimport numpy as np\n\nfrom qiskit import QuantumCircuit, execute\nimport qiskit.quantum_info as qi\nfrom qiskit.compiler import assemble\nfrom qiskit.quantum_info import DensityMatrix, Pauli, Operator\nfrom qiskit.providers.aer import QasmSimulator\nfrom qiskit.providers.aer import AerError\nfrom qiskit.providers.aer.extensions import Snapshot\n\nfrom test.terra.reference.ref_snapshot_state import (\n snapshot_state_circuits_deterministic, snapshot_state_counts_deterministic,\n snapshot_state_pre_measure_statevector_deterministic,\n snapshot_state_post_measure_statevector_deterministic,\n snapshot_state_circuits_nondeterministic,\n snapshot_state_counts_nondeterministic,\n snapshot_state_pre_measure_statevector_nondeterministic,\n snapshot_state_post_measure_statevector_nondeterministic)\nfrom test.terra.reference.ref_snapshot_probabilities import (\n snapshot_probabilities_circuits, snapshot_probabilities_counts,\n snapshot_probabilities_labels_qubits,\n snapshot_probabilities_post_meas_probs,\n snapshot_probabilities_pre_meas_probs)\nfrom test.terra.reference.ref_snapshot_expval import (\n snapshot_expval_circuits, snapshot_expval_counts, snapshot_expval_labels,\n snapshot_expval_post_meas_values, snapshot_expval_pre_meas_values)\n\n\nclass QasmSnapshotStatevectorTests:\n \"\"\"QasmSimulator snapshot statevector tests.\"\"\"\n\n SIMULATOR = QasmSimulator()\n SUPPORTED_QASM_METHODS = [\n 'automatic', 'statevector', 'statevector_gpu', 'statevector_thrust',\n 'matrix_product_state'\n ]\n BACKEND_OPTS = {}\n\n def statevector_snapshots(self, data, label):\n \"\"\"Format snapshots as list of Numpy arrays\"\"\"\n snaps = data.get(\"snapshots\", {}).get(\"statevector\", {}).get(label, [])\n statevecs = []\n for snap in snaps:\n self.assertIsInstance(snap, np.ndarray)\n statevecs.append(snap)\n return statevecs\n\n def test_snapshot_statevector_pre_measure_det(self):\n \"\"\"Test snapshot statevector before deterministic final measurement\"\"\"\n shots = 10\n label = \"snap\"\n counts_targets = snapshot_state_counts_deterministic(shots)\n statevec_targets = snapshot_state_pre_measure_statevector_deterministic(\n )\n circuits = snapshot_state_circuits_deterministic(label,\n 'statevector',\n post_measure=False)\n\n qobj = assemble(circuits, self.SIMULATOR, shots=shots)\n job = self.SIMULATOR.run(qobj, backend_options=self.BACKEND_OPTS)\n result = job.result()\n success = getattr(result, 'success', False)\n method = self.BACKEND_OPTS.get('method', 'automatic')\n if method not in QasmSnapshotStatevectorTests.SUPPORTED_QASM_METHODS:\n self.assertFalse(success)\n else:\n self.assertTrue(success)\n self.compare_counts(result, circuits, counts_targets, delta=0)\n # Check snapshots\n for j, circuit in enumerate(circuits):\n data = result.data(circuit)\n snaps = self.statevector_snapshots(data, label)\n self.assertTrue(len(snaps), 1)\n target = statevec_targets[j]\n value = snaps[0]\n self.assertTrue(np.allclose(value, target))\n\n def test_snapshot_statevector_pre_measure_nondet(self):\n \"\"\"Test snapshot statevector before non-deterministic final measurement\"\"\"\n shots = 100\n label = \"snap\"\n counts_targets = snapshot_state_counts_nondeterministic(shots)\n statevec_targets = snapshot_state_pre_measure_statevector_nondeterministic(\n )\n circuits = snapshot_state_circuits_nondeterministic(label,\n 'statevector',\n post_measure=False)\n\n qobj = assemble(circuits, self.SIMULATOR, shots=shots)\n job = self.SIMULATOR.run(qobj, backend_options=self.BACKEND_OPTS)\n result = job.result()\n success = getattr(result, 'success', False)\n method = self.BACKEND_OPTS.get('method', 'automatic')\n if method not in QasmSnapshotStatevectorTests.SUPPORTED_QASM_METHODS:\n self.assertFalse(success)\n else:\n self.assertTrue(success)\n self.compare_counts(result,\n circuits,\n counts_targets,\n delta=0.2 * shots)\n # Check snapshots\n for j, circuit in enumerate(circuits):\n data = result.data(circuit)\n snaps = self.statevector_snapshots(data, label)\n self.assertTrue(len(snaps), 1)\n target = statevec_targets[j]\n value = snaps[0]\n self.assertTrue(np.allclose(value, target))\n\n def test_snapshot_statevector_post_measure_det(self):\n \"\"\"Test snapshot statevector after deterministic final measurement\"\"\"\n shots = 10\n label = \"snap\"\n counts_targets = snapshot_state_counts_deterministic(shots)\n statevec_targets = snapshot_state_post_measure_statevector_deterministic(\n )\n circuits = snapshot_state_circuits_deterministic(label,\n 'statevector',\n post_measure=True)\n\n qobj = assemble(circuits, self.SIMULATOR, memory=True, shots=shots)\n job = self.SIMULATOR.run(qobj, backend_options=self.BACKEND_OPTS)\n result = job.result()\n success = getattr(result, 'success', False)\n method = self.BACKEND_OPTS.get('method', 'automatic')\n if method not in QasmSnapshotStatevectorTests.SUPPORTED_QASM_METHODS:\n logging.getLogger().setLevel(logging.CRITICAL)\n self.assertFalse(success)\n else:\n self.assertTrue(success)\n self.compare_counts(result, circuits, counts_targets, delta=0)\n # Check snapshots\n for i, circuit in enumerate(circuits):\n data = result.data(circuit)\n snaps = self.statevector_snapshots(data, label)\n for j, mem in enumerate(data['memory']):\n target = statevec_targets[i].get(mem)\n self.assertTrue(np.allclose(snaps[j], target))\n\n def test_snapshot_statevector_post_measure_nondet(self):\n \"\"\"Test snapshot statevector after non-deterministic final measurement\"\"\"\n shots = 100\n label = \"snap\"\n counts_targets = snapshot_state_counts_nondeterministic(shots)\n statevec_targets = snapshot_state_post_measure_statevector_nondeterministic(\n )\n circuits = snapshot_state_circuits_nondeterministic(label,\n 'statevector',\n post_measure=True)\n\n qobj = assemble(circuits, self.SIMULATOR, memory=True, shots=shots)\n job = self.SIMULATOR.run(qobj, backend_options=self.BACKEND_OPTS)\n result = job.result()\n success = getattr(result, 'success', False)\n method = self.BACKEND_OPTS.get('method', 'automatic')\n if method not in QasmSnapshotStatevectorTests.SUPPORTED_QASM_METHODS:\n self.assertFalse(success)\n else:\n self.assertTrue(success)\n self.compare_counts(result,\n circuits,\n counts_targets,\n delta=0.2 * shots)\n # Check snapshots\n for i, circuit in enumerate(circuits):\n data = result.data(circuit)\n snaps = self.statevector_snapshots(data, label)\n for j, mem in enumerate(data['memory']):\n target = statevec_targets[i].get(mem)\n self.assertTrue(np.allclose(snaps[j], target))\n\n\nclass QasmSnapshotStabilizerTests:\n \"\"\"QasmSimulator method snapshot stabilizer tests.\"\"\"\n\n SIMULATOR = QasmSimulator()\n SUPPORTED_QASM_METHODS = ['automatic', 'stabilizer']\n BACKEND_OPTS = {}\n\n @staticmethod\n def stabilizer_snapshots(data, label):\n \"\"\"Get stabilizer snapshots\"\"\"\n return data.get(\"snapshots\", {}).get(\"stabilizer\", {}).get(label, [])\n\n @staticmethod\n def stabilizes_statevector(stabilizer, statevector):\n \"\"\"Return True if two stabilizer states are equal.\"\"\"\n # Get stabilizer and destabilizers and convert to sets\n for stab in stabilizer:\n if stab[0] == '-':\n pauli_mat = -1 * Pauli.from_label(stab[1:]).to_matrix()\n else:\n pauli_mat = Pauli.from_label(stab).to_matrix()\n val = statevector.conj().dot(pauli_mat.dot(statevector))\n if not np.isclose(val, 1):\n return False\n return True\n\n def test_snapshot_stabilizer_pre_measure_det(self):\n \"\"\"Test snapshot stabilizer before deterministic final measurement\"\"\"\n shots = 10\n label = \"snap\"\n counts_targets = snapshot_state_counts_deterministic(shots)\n statevec_targets = snapshot_state_pre_measure_statevector_deterministic(\n )\n circuits = snapshot_state_circuits_deterministic(label,\n 'stabilizer',\n post_measure=False)\n\n qobj = assemble(circuits, self.SIMULATOR, shots=shots)\n job = self.SIMULATOR.run(qobj, backend_options=self.BACKEND_OPTS)\n result = job.result()\n success = getattr(result, 'success', False)\n method = self.BACKEND_OPTS.get('method', 'automatic')\n if method not in QasmSnapshotStabilizerTests.SUPPORTED_QASM_METHODS:\n self.assertFalse(success)\n else:\n self.assertTrue(success)\n self.compare_counts(result, circuits, counts_targets, delta=0)\n # Check snapshots\n for j, circuit in enumerate(circuits):\n data = result.data(circuit)\n snaps = self.stabilizer_snapshots(data, label)\n self.assertEqual(len(snaps), 1)\n statevec = statevec_targets[j]\n stabilizer = snaps[0]\n self.assertTrue(\n self.stabilizes_statevector(stabilizer, statevec))\n\n def test_snapshot_stabilizer_pre_measure_nondet(self):\n \"\"\"Test snapshot stabilizer before non-deterministic final measurement\"\"\"\n shots = 100\n label = \"snap\"\n counts_targets = snapshot_state_counts_nondeterministic(shots)\n statevec_targets = snapshot_state_pre_measure_statevector_nondeterministic(\n )\n circuits = snapshot_state_circuits_nondeterministic(label,\n 'stabilizer',\n post_measure=False)\n\n qobj = assemble(circuits, self.SIMULATOR, shots=shots)\n job = self.SIMULATOR.run(qobj, backend_options=self.BACKEND_OPTS)\n result = job.result()\n success = getattr(result, 'success', False)\n method = self.BACKEND_OPTS.get('method', 'automatic')\n if method not in QasmSnapshotStabilizerTests.SUPPORTED_QASM_METHODS:\n self.assertFalse(success)\n else:\n self.assertTrue(success)\n self.compare_counts(result,\n circuits,\n counts_targets,\n delta=0.2 * shots)\n # Check snapshots\n for j, circuit in enumerate(circuits):\n data = result.data(circuit)\n snaps = self.stabilizer_snapshots(data, label)\n self.assertEqual(len(snaps), 1)\n statevec = statevec_targets[j]\n stabilizer = snaps[0]\n self.assertTrue(\n self.stabilizes_statevector(stabilizer, statevec))\n\n def test_snapshot_stabilizer_post_measure_det(self):\n \"\"\"Test snapshot stabilizer after deterministic final measurement\"\"\"\n shots = 10\n label = \"snap\"\n counts_targets = snapshot_state_counts_deterministic(shots)\n statevec_targets = snapshot_state_post_measure_statevector_deterministic(\n )\n circuits = snapshot_state_circuits_deterministic(label,\n 'stabilizer',\n post_measure=True)\n\n qobj = assemble(circuits, self.SIMULATOR, memory=True, shots=shots)\n job = self.SIMULATOR.run(qobj, backend_options=self.BACKEND_OPTS)\n result = job.result()\n success = getattr(result, 'success', False)\n method = self.BACKEND_OPTS.get('method', 'automatic')\n if method not in QasmSnapshotStabilizerTests.SUPPORTED_QASM_METHODS:\n self.assertFalse(success)\n else:\n self.assertTrue(success)\n self.compare_counts(result, circuits, counts_targets, delta=0)\n # Check snapshots\n for i, circuit in enumerate(circuits):\n data = result.data(circuit)\n snaps = self.stabilizer_snapshots(data, label)\n for j, mem in enumerate(data['memory']):\n statevec = statevec_targets[i].get(mem)\n stabilizer = snaps[j]\n self.assertTrue(\n self.stabilizes_statevector(stabilizer, statevec))\n\n def test_snapshot_stabilizer_post_measure_nondet(self):\n \"\"\"Test snapshot stabilizer after non-deterministic final measurement\"\"\"\n shots = 100\n label = \"snap\"\n counts_targets = snapshot_state_counts_nondeterministic(shots)\n statevec_targets = snapshot_state_post_measure_statevector_nondeterministic(\n )\n circuits = snapshot_state_circuits_nondeterministic(label,\n 'stabilizer',\n post_measure=True)\n\n qobj = assemble(circuits, self.SIMULATOR, memory=True, shots=shots)\n job = self.SIMULATOR.run(qobj, backend_options=self.BACKEND_OPTS)\n result = job.result()\n success = getattr(result, 'success', False)\n method = self.BACKEND_OPTS.get('method', 'automatic')\n if method not in QasmSnapshotStabilizerTests.SUPPORTED_QASM_METHODS:\n self.assertFalse(success)\n else:\n self.assertTrue(success)\n self.compare_counts(result,\n circuits,\n counts_targets,\n delta=0.2 * shots)\n # Check snapshots\n for i, circuit in enumerate(circuits):\n data = result.data(circuit)\n snaps = self.stabilizer_snapshots(data, label)\n for j, mem in enumerate(data['memory']):\n statevec = statevec_targets[i].get(mem)\n stabilizer = snaps[j]\n self.assertTrue(\n self.stabilizes_statevector(stabilizer, statevec))\n\n\nclass QasmSnapshotDensityMatrixTests:\n \"\"\"QasmSimulator snapshot density matrix tests.\"\"\"\n\n SIMULATOR = QasmSimulator()\n SUPPORTED_QASM_METHODS = [\n 'automatic',\n 'statevector',\n 'statevector_gpu',\n 'statevector_thrust',\n 'density_matrix',\n 'density_matrix_gpu',\n 'density_matrix_thrust'\n ]\n BACKEND_OPTS = {}\n\n def test_density_matrix_snapshot_ideal(self):\n seed = 500\n op = qi.random_unitary(8, seed=seed)\n circ = QuantumCircuit(3)\n circ.append(op, [0, 1, 2])\n method = self.BACKEND_OPTS.get('method', 'automatic')\n label = 'density_matrix'\n snap_qargs = [[0, 1, 2], [0, 2, 1], [1, 0, 2], [1, 2, 0], [2, 0, 1], [2, 1, 0],\n [0, 1], [1, 0], [0, 2], [2, 0], [1, 2], [2, 1],\n [0], [1], [2]]\n evolve_qargs = [[0, 1, 2], [0, 2, 1], [1, 0, 2], [2, 0, 1], [1, 2, 0], [2, 1, 0],\n [0, 1, 2], [1, 0, 2], [0, 2, 1], [1, 2, 0], [2, 0, 1], [2, 1, 0],\n [0, 1, 2], [1, 0, 2], [2, 1, 0]]\n for squbits, equbits in zip(snap_qargs, evolve_qargs):\n with self.subTest(msg='qubits={}'.format(squbits)):\n num_qubits = len(squbits)\n tmp = circ.copy()\n tmp.append(Snapshot(label, 'density_matrix', num_qubits), squbits)\n result = execute(tmp, self.SIMULATOR,\n backend_options=self.BACKEND_OPTS).result()\n if method not in QasmSnapshotDensityMatrixTests.SUPPORTED_QASM_METHODS:\n self.assertFalse(result.success)\n else:\n self.assertSuccess(result)\n snapshots = result.data(0)['snapshots']['density_matrix']\n value = qi.DensityMatrix(snapshots[label][0]['value'])\n target = qi.DensityMatrix.from_label(3 * '0').evolve(circ, equbits)\n if num_qubits == 2:\n target = qi.partial_trace(target, [2])\n elif num_qubits == 1:\n target = qi.partial_trace(target, [1, 2])\n self.assertEqual(value, target)\n\n\nclass QasmSnapshotProbabilitiesTests:\n \"\"\"QasmSimulator snapshot probabilities tests.\"\"\"\n\n SIMULATOR = QasmSimulator()\n SUPPORTED_QASM_METHODS = [\n 'automatic',\n 'statevector',\n 'statevector_gpu',\n 'statevector_thrust',\n 'stabilizer',\n 'density_matrix',\n 'density_matrix_gpu',\n 'density_matrix_thrust',\n 'matrix_product_state',\n ]\n BACKEND_OPTS = {}\n\n @staticmethod\n def probability_snapshots(data, labels):\n \"\"\"Format snapshots as nested dicts\"\"\"\n # Check snapshot entry exists in data\n output = {}\n for label in labels:\n snaps = data.get(\"snapshots\", {}).get(\"probabilities\",\n {}).get(label, [])\n output[label] = {\n snap_dict['memory']: snap_dict['value']\n for snap_dict in snaps\n }\n return output\n\n def test_snapshot_probabilities_pre_measure(self):\n \"\"\"Test snapshot probabilities before final measurement\"\"\"\n shots = 1000\n labels = list(snapshot_probabilities_labels_qubits().keys())\n counts_targets = snapshot_probabilities_counts(shots)\n prob_targets = snapshot_probabilities_pre_meas_probs()\n\n circuits = snapshot_probabilities_circuits(post_measure=False)\n\n qobj = assemble(circuits, self.SIMULATOR, shots=shots)\n job = self.SIMULATOR.run(qobj, backend_options=self.BACKEND_OPTS)\n result = job.result()\n success = getattr(result, 'success', False)\n method = self.BACKEND_OPTS.get('method', 'automatic')\n if method not in QasmSnapshotProbabilitiesTests.SUPPORTED_QASM_METHODS:\n self.assertFalse(success)\n else:\n self.assertTrue(success)\n self.compare_counts(result,\n circuits,\n counts_targets,\n delta=0.1 * shots)\n # Check snapshots\n for j, circuit in enumerate(circuits):\n data = result.data(circuit)\n all_snapshots = self.probability_snapshots(data, labels)\n for label in labels:\n snaps = all_snapshots.get(label, {})\n self.assertTrue(len(snaps), 1)\n for memory, value in snaps.items():\n target = prob_targets[j].get(label, {}).get(memory, {})\n self.assertDictAlmostEqual(value, target, delta=1e-7)\n\n def test_snapshot_probabilities_post_measure(self):\n \"\"\"Test snapshot probabilities after final measurement\"\"\"\n shots = 1000\n labels = list(snapshot_probabilities_labels_qubits().keys())\n counts_targets = snapshot_probabilities_counts(shots)\n prob_targets = snapshot_probabilities_post_meas_probs()\n\n circuits = snapshot_probabilities_circuits(post_measure=True)\n\n qobj = assemble(circuits, self.SIMULATOR, shots=shots)\n job = self.SIMULATOR.run(qobj, backend_options=self.BACKEND_OPTS)\n result = job.result()\n success = getattr(result, 'success', False)\n method = self.BACKEND_OPTS.get('method', 'automatic')\n if method not in QasmSnapshotProbabilitiesTests.SUPPORTED_QASM_METHODS:\n self.assertFalse(success)\n else:\n self.assertTrue(success)\n self.compare_counts(result,\n circuits,\n counts_targets,\n delta=0.1 * shots)\n # Check snapshots\n for j, circuit in enumerate(circuits):\n data = result.data(circuit)\n all_snapshots = self.probability_snapshots(data, labels)\n for label in labels:\n snaps = all_snapshots.get(label, {})\n for memory, value in snaps.items():\n target = prob_targets[j].get(label, {}).get(memory, {})\n self.assertDictAlmostEqual(value, target, delta=1e-7)\n\n\nclass QasmSnapshotExpValPauliTests:\n \"\"\"QasmSimulator snapshot pauli expectation value tests.\"\"\"\n\n SIMULATOR = QasmSimulator()\n SUPPORTED_QASM_METHODS = [\n 'automatic', 'statevector', 'statevector_gpu', 'statevector_thrust',\n 'density_matrix', 'density_matrix_gpu', 'density_matrix_thrust',\n 'matrix_product_state', 'stabilizer'\n ]\n BACKEND_OPTS = {}\n\n @staticmethod\n def expval_snapshots(data, labels):\n \"\"\"Format snapshots as nested dicts\"\"\"\n # Check snapshot entry exists in data\n output = {}\n for label in labels:\n snaps = data.get(\"snapshots\", {}).get(\"expectation_value\",\n {}).get(label, [])\n # Convert list into dict\n inner = {}\n for snap_dict in snaps:\n val = snap_dict['value']\n inner[snap_dict['memory']] = val\n output[label] = inner\n return output\n\n def test_snapshot_expval_pauli_pre_measure(self):\n \"\"\"Test snapshot expectation value (pauli) before final measurement\"\"\"\n shots = 1000\n labels = snapshot_expval_labels()\n counts_targets = snapshot_expval_counts(shots)\n value_targets = snapshot_expval_pre_meas_values()\n\n circuits = snapshot_expval_circuits(pauli=True, post_measure=False)\n\n qobj = assemble(circuits, self.SIMULATOR, shots=shots)\n job = self.SIMULATOR.run(qobj, backend_options=self.BACKEND_OPTS)\n result = job.result()\n success = getattr(result, 'success', False)\n method = self.BACKEND_OPTS.get('method', 'automatic')\n if method not in QasmSnapshotExpValPauliTests.SUPPORTED_QASM_METHODS:\n self.assertFalse(success)\n else:\n self.assertTrue(success)\n self.compare_counts(result,\n circuits,\n counts_targets,\n delta=0.1 * shots)\n # Check snapshots\n for j, circuit in enumerate(circuits):\n data = result.data(circuit)\n all_snapshots = self.expval_snapshots(data, labels)\n for label in labels:\n snaps = all_snapshots.get(label, {})\n self.assertTrue(len(snaps), 1)\n for memory, value in snaps.items():\n target = value_targets[j].get(label,\n {}).get(memory, {})\n self.assertAlmostEqual(value, target, delta=1e-7)\n\n def test_snapshot_expval_pauli_post_measure(self):\n \"\"\"Test snapshot expectation value (pauli) after final measurement\"\"\"\n shots = 1000\n labels = snapshot_expval_labels()\n counts_targets = snapshot_expval_counts(shots)\n value_targets = snapshot_expval_post_meas_values()\n\n circuits = snapshot_expval_circuits(pauli=True, post_measure=True)\n\n qobj = assemble(circuits, self.SIMULATOR, shots=shots)\n job = self.SIMULATOR.run(qobj, backend_options=self.BACKEND_OPTS)\n result = job.result()\n success = getattr(result, 'success', False)\n method = self.BACKEND_OPTS.get('method', 'automatic')\n if method not in QasmSnapshotExpValPauliTests.SUPPORTED_QASM_METHODS:\n self.assertFalse(success)\n else:\n self.assertTrue(success)\n self.compare_counts(result,\n circuits,\n counts_targets,\n delta=0.1 * shots)\n # Check snapshots\n for j, circuit in enumerate(circuits):\n data = result.data(circuit)\n all_snapshots = self.expval_snapshots(data, labels)\n for label in labels:\n snaps = all_snapshots.get(label, {})\n self.assertTrue(len(snaps), 1)\n for memory, value in snaps.items():\n target = value_targets[j].get(label,\n {}).get(memory, {})\n self.assertAlmostEqual(value, target, delta=1e-7)\n\n\nclass QasmSnapshotExpValPauliNCTests:\n \"\"\"QasmSimulator snapshot pauli expectation value tests on random states.\"\"\"\n\n SIMULATOR = QasmSimulator()\n SUPPORTED_QASM_METHODS = [\n 'automatic', 'statevector', 'statevector_gpu', 'statevector_thrust',\n 'density_matrix', 'density_matrix_gpu', 'density_matrix_thrust',\n 'matrix_product_state',\n ]\n BACKEND_OPTS = {}\n\n def general_test(self, pauli, num_qubits=None, seed=None):\n \"\"\"General test case\"\"\"\n pauli_qubits = list(range(len(pauli)))\n if num_qubits is None:\n num_qubits = len(pauli_qubits)\n\n # Prepare random N-qubit product input state\n # from seed\n rng = np.random.default_rng(seed)\n params = rng.uniform(-1, 1, size=(num_qubits, 3))\n init_circ = QuantumCircuit(num_qubits)\n for i, par in enumerate(params):\n init_circ.u3(*par, i)\n\n # Compute the target expectation value\n rho = DensityMatrix.from_instruction(init_circ)\n op = Operator.from_label(pauli)\n target = np.trace(Operator(rho).compose(op, pauli_qubits).data)\n\n # Simulate expectation value\n qc = init_circ.copy()\n qc.snapshot_expectation_value('final', [(1, pauli)], pauli_qubits)\n qobj = assemble(qc)\n result = self.SIMULATOR.run(\n qobj, backend_options=self.BACKEND_OPTS).result()\n self.assertSuccess(result)\n snapshots = result.data(0).get('snapshots', {})\n self.assertIn('expectation_value', snapshots)\n self.assertIn('final', snapshots['expectation_value'])\n expval = snapshots.get('expectation_value', {})['final'][0]['value']\n self.assertAlmostEqual(expval, target)\n\n def test_pauli1(self):\n \"\"\"Test all 1-qubit Pauli snapshots.\"\"\"\n seed = 100\n for tup in ['I', 'X', 'Y', 'Z']:\n pauli = ''.join(reversed(tup))\n with self.subTest(msg='Pauli {}'.format(pauli)):\n self.general_test(pauli, num_qubits=3, seed=seed)\n\n def test_pauli2(self):\n \"\"\"Test all 2-qubit Pauli snapshots.\"\"\"\n seed = 100\n for tup in it.product(['I', 'X', 'Y', 'Z'], repeat=2):\n pauli = ''.join(reversed(tup))\n with self.subTest(msg='Pauli {}'.format(pauli)):\n self.general_test(pauli, num_qubits=3, seed=seed)\n\n def test_pauli3(self):\n \"\"\"Test all 3-qubit Pauli snapshots.\"\"\"\n seed = 100\n for tup in it.product(['I', 'X', 'Y', 'Z'], repeat=3):\n pauli = ''.join(reversed(tup))\n with self.subTest(msg='Pauli {}'.format(pauli)):\n self.general_test(pauli, num_qubits=3, seed=seed)\n\n\nclass QasmSnapshotExpValMatrixTests:\n \"\"\"QasmSimulator snapshot pauli expectation value tests.\"\"\"\n\n SIMULATOR = QasmSimulator()\n SUPPORTED_QASM_METHODS = [\n 'automatic', 'statevector', 'statevector_gpu', 'statevector_thrust',\n 'matrix_product_state'\n ]\n BACKEND_OPTS = {}\n\n @staticmethod\n def expval_snapshots(data, labels):\n \"\"\"Format snapshots as nested dicts\"\"\"\n # Check snapshot entry exists in data\n output = {}\n for label in labels:\n snaps = data.get(\"snapshots\", {}).get(\"expectation_value\",\n {}).get(label, [])\n # Convert list into dict\n inner = {}\n for snap_dict in snaps:\n inner[snap_dict['memory']] = snap_dict['value']\n output[label] = inner\n return output\n\n def test_snapshot_expval_matrix_pre_measure(self):\n \"\"\"Test snapshot expectation value (matrix) before final measurement\"\"\"\n shots = 1000\n labels = snapshot_expval_labels()\n counts_targets = snapshot_expval_counts(shots)\n value_targets = snapshot_expval_pre_meas_values()\n\n circuits = snapshot_expval_circuits(pauli=False, post_measure=False)\n\n qobj = assemble(circuits, self.SIMULATOR, shots=shots)\n job = self.SIMULATOR.run(qobj, backend_options=self.BACKEND_OPTS)\n result = job.result()\n success = getattr(result, 'success', False)\n method = self.BACKEND_OPTS.get('method', 'automatic')\n if method not in QasmSnapshotExpValMatrixTests.SUPPORTED_QASM_METHODS:\n self.assertFalse(success)\n else:\n self.assertTrue(success)\n self.compare_counts(result,\n circuits,\n counts_targets,\n delta=0.1 * shots)\n # Check snapshots\n for j, circuit in enumerate(circuits):\n data = result.data(circuit)\n all_snapshots = self.expval_snapshots(data, labels)\n for label in labels:\n snaps = all_snapshots.get(label, {})\n self.assertTrue(len(snaps), 1)\n for memory, value in snaps.items():\n target = value_targets[j].get(label,\n {}).get(memory, {})\n self.assertAlmostEqual(value, target, delta=1e-7)\n\n def test_snapshot_expval_matrix_post_measure(self):\n \"\"\"Test snapshot expectation value (matrix) after final measurement\"\"\"\n shots = 1000\n labels = snapshot_expval_labels()\n counts_targets = snapshot_expval_counts(shots)\n value_targets = snapshot_expval_post_meas_values()\n\n circuits = snapshot_expval_circuits(pauli=False, post_measure=True)\n\n qobj = assemble(circuits, self.SIMULATOR, shots=shots)\n job = self.SIMULATOR.run(qobj, backend_options=self.BACKEND_OPTS)\n result = job.result()\n success = getattr(result, 'success', False)\n method = self.BACKEND_OPTS.get('method', 'automatic')\n if method not in QasmSnapshotExpValMatrixTests.SUPPORTED_QASM_METHODS:\n self.assertFalse(success)\n else:\n self.assertTrue(success)\n self.compare_counts(result,\n circuits,\n counts_targets,\n delta=0.1 * shots)\n # Check snapshots\n for j, circuit in enumerate(circuits):\n data = result.data(circuit)\n all_snapshots = self.expval_snapshots(data, labels)\n for label in labels:\n snaps = all_snapshots.get(label, {})\n self.assertTrue(len(snaps), 1)\n for memory, value in snaps.items():\n target = value_targets[j].get(label,\n {}).get(memory, {})\n self.assertAlmostEqual(value, target, delta=1e-7)\n" ]
[ [ "numpy.allclose", "numpy.isclose", "numpy.random.default_rng" ] ]
charliezou/muzero-general
[ "342e35fcdb97cd200d8b4534b0546eee1da8c04e", "342e35fcdb97cd200d8b4534b0546eee1da8c04e" ]
[ "trainer.py", "games/lunarlander.py" ]
[ "import time\n\nimport numpy\nimport ray\nimport torch\n\nimport models\n\n\[email protected]\nclass Trainer:\n \"\"\"\n Class which run in a dedicated thread to train a neural network and save it\n in the shared storage.\n \"\"\"\n\n def __init__(self, initial_weights, config):\n self.config = config\n self.training_step = 0\n\n # Fix random generator seed\n numpy.random.seed(self.config.seed)\n torch.manual_seed(self.config.seed)\n\n # Initialize the network\n self.model = models.MuZeroNetwork(self.config)\n self.model.set_weights(initial_weights)\n self.model.to(torch.device(self.config.training_device))\n self.model.train()\n\n if \"cuda\" not in self.config.training_device:\n print(\"You are not training on GPU.\\n\")\n\n if self.config.optimizer == \"SGD\":\n self.optimizer = torch.optim.SGD(\n self.model.parameters(),\n lr=self.config.lr_init,\n momentum=self.config.momentum,\n weight_decay=self.config.weight_decay,\n )\n elif self.config.optimizer == \"Adam\":\n self.optimizer = torch.optim.Adam(\n self.model.parameters(),\n lr=self.config.lr_init,\n weight_decay=self.config.weight_decay,\n )\n else:\n raise NotImplementedError(\n \"{} is not implemented. You can change the optimizer manually in trainer.py.\"\n )\n\n def continuous_update_weights(self, replay_buffer, shared_storage_worker):\n # Wait for the replay buffer to be filled\n while ray.get(replay_buffer.get_info.remote())[\"num_played_games\"] < 1:\n time.sleep(0.1)\n\n # Training loop\n while self.training_step < self.config.training_steps:\n index_batch, batch = ray.get(\n replay_buffer.get_batch.remote(self.model.get_weights())\n )\n self.update_lr()\n (\n priorities,\n total_loss,\n value_loss,\n reward_loss,\n policy_loss,\n ) = self.update_weights(batch)\n\n if self.config.PER:\n # Save new priorities in the replay buffer (See https://arxiv.org/abs/1803.00933)\n replay_buffer.update_priorities.remote(priorities, index_batch)\n\n # Save to the shared storage\n if self.training_step % self.config.checkpoint_interval == 0:\n shared_storage_worker.set_weights.remote(self.model.get_weights())\n shared_storage_worker.set_info.remote(\"training_step\", self.training_step)\n shared_storage_worker.set_info.remote(\n \"lr\", self.optimizer.param_groups[0][\"lr\"]\n )\n shared_storage_worker.set_info.remote(\"total_loss\", total_loss)\n shared_storage_worker.set_info.remote(\"value_loss\", value_loss)\n shared_storage_worker.set_info.remote(\"reward_loss\", reward_loss)\n shared_storage_worker.set_info.remote(\"policy_loss\", policy_loss)\n\n # Managing the self-play / training ratio\n if self.config.training_delay:\n time.sleep(self.config.training_delay)\n if self.config.ratio:\n while (\n self.training_step\n / max(\n 1, ray.get(replay_buffer.get_info.remote())[\"num_played_steps\"]\n )\n > self.config.ratio\n and self.training_step < self.config.training_steps\n ):\n time.sleep(0.5)\n\n def update_weights(self, batch):\n \"\"\"\n Perform one training step.\n \"\"\"\n\n (\n observation_batch,\n action_batch,\n target_value,\n target_reward,\n target_policy,\n weight_batch,\n gradient_scale_batch,\n ) = batch\n\n # Keep values as scalars for calculating the priorities for the prioritized replay\n target_value_scalar = numpy.array(target_value, dtype=\"float32\")\n priorities = numpy.zeros_like(target_value_scalar)\n\n device = next(self.model.parameters()).device\n weight_batch = torch.tensor(weight_batch.copy()).float().to(device)\n observation_batch = torch.tensor(observation_batch).float().to(device)\n action_batch = torch.tensor(action_batch).float().to(device).unsqueeze(-1)\n target_value = torch.tensor(target_value).float().to(device)\n target_reward = torch.tensor(target_reward).float().to(device)\n target_policy = torch.tensor(target_policy).float().to(device)\n gradient_scale_batch = torch.tensor(gradient_scale_batch).float().to(device)\n # observation_batch: batch, channels, height, width\n # action_batch: batch, num_unroll_steps+1, 1 (unsqueeze)\n # target_value: batch, num_unroll_steps+1\n # target_reward: batch, num_unroll_steps+1\n # target_policy: batch, num_unroll_steps+1, len(action_space)\n # gradient_scale_batch: batch, num_unroll_steps+1\n\n target_value = models.scalar_to_support(target_value, self.config.support_size)\n target_reward = models.scalar_to_support(\n target_reward, self.config.support_size\n )\n # target_value: batch, num_unroll_steps+1, 2*support_size+1\n # target_reward: batch, num_unroll_steps+1, 2*support_size+1\n\n ## Generate predictions\n value, reward, policy_logits, hidden_state = self.model.initial_inference(\n observation_batch\n )\n predictions = [(value, reward, policy_logits)]\n for i in range(1, action_batch.shape[1]):\n value, reward, policy_logits, hidden_state = self.model.recurrent_inference(\n hidden_state, action_batch[:, i]\n )\n # Scale the gradient at the start of the dynamics function (See paper appendix Training)\n hidden_state.register_hook(lambda grad: grad * 0.5)\n predictions.append((value, reward, policy_logits))\n # predictions: num_unroll_steps+1, 3, batch, 2*support_size+1 | 2*support_size+1 | 9 (according to the 2nd dim)\n\n ## Compute losses\n value_loss, reward_loss, policy_loss = (0, 0, 0)\n value, reward, policy_logits = predictions[0]\n # Ignore reward loss for the first batch step\n current_value_loss, _, current_policy_loss = self.loss_function(\n value.squeeze(-1),\n reward.squeeze(-1),\n policy_logits,\n target_value[:, 0],\n target_reward[:, 0],\n target_policy[:, 0],\n )\n value_loss += current_value_loss\n policy_loss += current_policy_loss\n # Compute priorities for the prioritized replay (See paper appendix Training)\n pred_value_scalar = (\n models.support_to_scalar(value, self.config.support_size)\n .detach()\n .cpu()\n .numpy()\n .squeeze()\n )\n priorities[:, 0] = (\n numpy.abs(pred_value_scalar - target_value_scalar[:, 0])\n ** self.config.PER_alpha\n )\n\n for i in range(1, len(predictions)):\n value, reward, policy_logits = predictions[i]\n (\n current_value_loss,\n current_reward_loss,\n current_policy_loss,\n ) = self.loss_function(\n value.squeeze(-1),\n reward.squeeze(-1),\n policy_logits,\n target_value[:, i],\n target_reward[:, i],\n target_policy[:, i],\n )\n\n # Scale gradient by the number of unroll steps (See paper appendix Training)\n current_value_loss.register_hook(\n lambda grad: grad / gradient_scale_batch[:, i]\n )\n current_reward_loss.register_hook(\n lambda grad: grad / gradient_scale_batch[:, i]\n )\n current_policy_loss.register_hook(\n lambda grad: grad / gradient_scale_batch[:, i]\n )\n\n value_loss += current_value_loss\n reward_loss += current_reward_loss\n policy_loss += current_policy_loss\n\n # Compute priorities for the prioritized replay (See paper appendix Training)\n pred_value_scalar = (\n models.support_to_scalar(value, self.config.support_size)\n .detach()\n .cpu()\n .numpy()\n .squeeze()\n )\n priorities[:, i] = (\n numpy.abs(pred_value_scalar - target_value_scalar[:, i])\n ** self.config.PER_alpha\n )\n\n # Scale the value loss, paper recommends by 0.25 (See paper appendix Reanalyze)\n loss = value_loss * self.config.value_loss_weight + reward_loss + policy_loss\n if self.config.PER:\n # Correct PER bias by using importance-sampling (IS) weights\n loss *= weight_batch\n # Mean over batch dimension (pseudocode do a sum)\n loss = loss.mean()\n\n # Optimize\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n self.training_step += 1\n\n return (\n priorities,\n # For log purpose\n loss.item(),\n value_loss.mean().item(),\n reward_loss.mean().item(),\n policy_loss.mean().item(),\n )\n\n def update_lr(self):\n \"\"\"\n Update learning rate\n \"\"\"\n lr = self.config.lr_init * self.config.lr_decay_rate ** (\n self.training_step / self.config.lr_decay_steps\n )\n for param_group in self.optimizer.param_groups:\n param_group[\"lr\"] = lr\n\n @staticmethod\n def loss_function(\n value, reward, policy_logits, target_value, target_reward, target_policy,\n ):\n # Cross-entropy seems to have a better convergence than MSE\n value_loss = (-target_value * torch.nn.LogSoftmax(dim=1)(value)).sum(1)\n reward_loss = (-target_reward * torch.nn.LogSoftmax(dim=1)(reward)).sum(1)\n policy_loss = (-target_policy * torch.nn.LogSoftmax(dim=1)(policy_logits)).sum(\n 1\n )\n return value_loss, reward_loss, policy_loss\n", "import datetime\nimport os\n\nimport gym\nimport numpy\nimport torch\n\nfrom .abstract_game import AbstractGame\n\n\nclass MuZeroConfig:\n def __init__(self):\n # More information is available here: https://github.com/werner-duvaud/muzero-general/wiki/Hyperparameter-Optimization\n\n self.seed = 0 # Seed for numpy, torch and the game\n\n\n\n ### Game\n self.observation_shape = (1, 1, 8) # Dimensions of the game observation, must be 3D (channel, height, width). For a 1D array, please reshape it to (1, 1, length of array)\n self.action_space = list(range(4)) # Fixed list of all possible actions. You should only edit the length\n self.players = list(range(1)) # List of players. You should only edit the length\n self.stacked_observations = 0 # Number of previous observations and previous actions to add to the current observation\n\n # Evaluate\n self.muzero_player = 0 # Turn Muzero begins to play (0: MuZero plays first, 1: MuZero plays second)\n self.opponent = None # Hard coded agent that MuZero faces to assess his progress in multiplayer games. It doesn't influence training. None, \"random\" or \"expert\" if implemented in the Game class\n\n\n\n ### Self-Play\n self.num_workers = 1 # Number of simultaneous threads/workers self-playing to feed the replay buffer\n self.selfplay_device = \"cpu\" # \"cpu\" / \"cuda\"\n self.selfplay_num_gpus = 0 # Number of GPUs per actor to use for the selfplay, it can be fractional, don't fortget to take the training worker, the test worker and the other selfplay workers into account. (ex: if you have 1 GPU and num_workers=1 -> selfplay_num_gpus=1/3 because 1/3 for the training, 1/3 for test worker selfplay and 1/3 for this selfplay worker)\n self.max_moves = 700 # Maximum number of moves if game is not finished before\n self.num_simulations = 50 # Number of future moves self-simulated\n self.discount = 0.999 # Chronological discount of the reward\n self.temperature_threshold = None # Number of moves before dropping the temperature given by visit_softmax_temperature_fn to 0 (ie selecting the best action). If None, visit_softmax_temperature_fn is used every time\n\n # Root prior exploration noise\n self.root_dirichlet_alpha = 0.25\n self.root_exploration_fraction = 0.25\n\n # UCB formula\n self.pb_c_base = 19652\n self.pb_c_init = 1.25\n\n\n\n ### Network\n self.network = \"fullyconnected\" # \"resnet\" / \"fullyconnected\"\n self.support_size = 10 # Value and reward are scaled (with almost sqrt) and encoded on a vector with a range of -support_size to support_size\n \n # Residual Network\n self.downsample = False # Downsample observations before representation network (See paper appendix Network Architecture)\n self.blocks = 2 # Number of blocks in the ResNet\n self.channels = 16 # Number of channels in the ResNet\n self.reduced_channels_reward = 16 # Number of channels in reward head\n self.reduced_channels_value = 16 # Number of channels in value head\n self.reduced_channels_policy = 16 # Number of channels in policy head\n self.resnet_fc_reward_layers = [] # Define the hidden layers in the reward head of the dynamic network\n self.resnet_fc_value_layers = [] # Define the hidden layers in the value head of the prediction network\n self.resnet_fc_policy_layers = [] # Define the hidden layers in the policy head of the prediction network\n\n # Fully Connected Network\n self.encoding_size = 10\n self.fc_representation_layers = [] # Define the hidden layers in the representation network\n self.fc_dynamics_layers = [64] # Define the hidden layers in the dynamics network\n self.fc_reward_layers = [64] # Define the hidden layers in the reward network\n self.fc_value_layers = [64] # Define the hidden layers in the value network\n self.fc_policy_layers = [64] # Define the hidden layers in the policy network\n \n\n\n ### Training\n self.results_path = os.path.join(os.path.dirname(__file__), \"../results\", os.path.basename(__file__)[:-3], datetime.datetime.now().strftime(\"%Y-%m-%d--%H-%M-%S\")) # Path to store the model weights and TensorBoard logs\n self.save_weights = False # Save the weights in results_path as model.weights\n self.training_steps = 200000 # Total number of training steps (ie weights update according to a batch)\n self.batch_size = 64 # Number of parts of games to train on at each training step\n self.checkpoint_interval = 10 # Number of training steps before using the model for self-playing\n self.value_loss_weight = 1 # Scale the value loss to avoid overfitting of the value function, paper recommends 0.25 (See paper appendix Reanalyze)\n self.training_device = \"cuda\" if torch.cuda.is_available() else \"cpu\" # Train on GPU if available. \"cpu\" / \"cuda\"\n self.training_num_gpus = 1 # Number of GPUs to use for the training, it can be fractional, don't fortget to take the test worker and the selfplay workers into account\n\n self.optimizer = \"Adam\" # \"Adam\" or \"SGD\". Paper uses SGD\n self.weight_decay = 1e-4 # L2 weights regularization\n self.momentum = 0.9 # Used only if optimizer is SGD\n\n # Exponential learning rate schedule\n self.lr_init = 0.005 # Initial learning rate\n self.lr_decay_rate = 1 # Set it to 1 to use a constant learning rate\n self.lr_decay_steps = 1000\n\n\n\n ### Replay Buffer\n self.window_size = 2000 # Number of self-play games to keep in the replay buffer\n self.num_unroll_steps = 10 # Number of game moves to keep for every batch element\n self.td_steps = 30 # Number of steps in the future to take into account for calculating the target value\n self.use_last_model_value = True # Use the last model to provide a fresher, stable n-step value (See paper appendix Reanalyze)\n\n # Prioritized Replay (See paper appendix Training)\n self.PER = True # Select in priority the elements in the replay buffer which are unexpected for the network\n self.use_max_priority = True # If False, use the n-step TD error as initial priority. Better for large replay buffer\n self.PER_alpha = 1.0 # How much prioritization is used, 0 corresponding to the uniform case, paper suggests 1\n self.PER_beta = 1.0\n\n\n\n # Best known ratio for deterministic version: 0.8 --> 0.4 in 250 self played game (self_play_delay = 25 on GTX 1050Ti Max-Q).\n ### Adjust the self play / training ratio to avoid over/underfitting\n self.self_play_delay = 0 # Number of seconds to wait after each played game\n self.training_delay = 0 # Number of seconds to wait after each training step\n self.ratio = None # Desired training steps per self played step ratio. Equivalent to a synchronous version, training can take much longer. Set it to None to disable it\n\n\n def visit_softmax_temperature_fn(self, trained_steps):\n \"\"\"\n Parameter to alter the visit count distribution to ensure that the action selection becomes greedier as training progresses.\n The smaller it is, the more likely the best action (ie with the highest visit count) is chosen.\n\n Returns:\n Positive float.\n \"\"\"\n return 0.35\n\n\nclass Game(AbstractGame):\n \"\"\"\n Game wrapper.\n \"\"\"\n\n def __init__(self, seed=None):\n self.env = DeterministicLunarLander()\n # self.env = gym.make(\"LunarLander-v2\")\n if seed is not None:\n self.env.seed(seed)\n\n def step(self, action):\n \"\"\"\n Apply action to the game.\n \n Args:\n action : action of the action_space to take.\n\n Returns:\n The new observation, the reward and a boolean if the game has ended.\n \"\"\"\n observation, reward, done, _ = self.env.step(action)\n return numpy.array([[observation]]), reward / 3, done\n\n def legal_actions(self):\n \"\"\"\n Should return the legal actions at each turn, if it is not available, it can return\n the whole action space. At each turn, the game have to be able to handle one of returned actions.\n \n For complex game where calculating legal moves is too long, the idea is to define the legal actions\n equal to the action space but to return a negative reward if the action is illegal. \n\n Returns:\n An array of integers, subset of the action space.\n \"\"\"\n return list(range(4))\n\n def reset(self):\n \"\"\"\n Reset the game for a new game.\n \n Returns:\n Initial observation of the game.\n \"\"\"\n return numpy.array([[self.env.reset()]])\n\n def close(self):\n \"\"\"\n Properly close the game.\n \"\"\"\n self.env.close()\n\n def render(self):\n \"\"\"\n Display the game observation.\n \"\"\"\n self.env.render()\n input(\"Press enter to take a step \")\n\n def action_to_string(self, action_number):\n \"\"\"\n Convert an action number to a string representing the action.\n\n Args:\n action_number: an integer from the action space.\n\n Returns:\n String representing the action.\n \"\"\"\n actions = {\n 0: \"Do nothing\",\n 1: \"Fire left orientation engine\",\n 2: \"Fire main engine\",\n 3: \"Fire right orientation engine\",\n }\n return f\"{action_number}. {actions[action_number]}\"\n\n\n# OpenAI lunarlander, but stochastic parts have been removed except for the lander initial position.\n\n\"\"\"\nRocket trajectory optimization is a classic topic in Optimal Control.\n\nAccording to Pontryagin's maximum principle it's optimal to fire engine full throttle or\nturn it off. That's the reason this environment is OK to have discreet actions (engine on or off).\n\nThe landing pad is always at coordinates (0,0). The coordinates are the first two numbers in the state vector.\nReward for moving from the top of the screen to the landing pad and zero speed is about 100..140 points.\nIf the lander moves away from the landing pad it loses reward. The episode finishes if the lander crashes or\ncomes to rest, receiving an additional -100 or +100 points. Each leg with ground contact is +10 points.\nFiring the main engine is -0.3 points each frame. Firing the side engine is -0.03 points each frame.\nSolved is 200 points.\n\nLanding outside the landing pad is possible. Fuel is infinite, so an agent can learn to fly and then land\non its first attempt. Please see the source code for details.\n\nCreated by Oleg Klimov. Licensed on the same terms as the rest of OpenAI Gym.\n\"\"\"\n\n\nimport sys, math\nimport numpy as np\n\nimport Box2D\nfrom Box2D.b2 import (\n edgeShape,\n circleShape,\n fixtureDef,\n polygonShape,\n revoluteJointDef,\n contactListener,\n)\n\nimport gym\nfrom gym import spaces\nfrom gym.utils import seeding, EzPickle\n\nFPS = 50\nSCALE = 30.0 # affects how fast-paced the game is, forces should be adjusted as well\n\nMAIN_ENGINE_POWER = 13.0\nSIDE_ENGINE_POWER = 0.6\n\nINITIAL_RANDOM = 1000.0 # Set 1500 to make game harder\n\nLANDER_POLY = [(-14, +17), (-17, 0), (-17, -10), (+17, -10), (+17, 0), (+14, +17)]\nLEG_AWAY = 20\nLEG_DOWN = 18\nLEG_W, LEG_H = 2, 8\nLEG_SPRING_TORQUE = 40\n\nSIDE_ENGINE_HEIGHT = 14.0\nSIDE_ENGINE_AWAY = 12.0\n\nVIEWPORT_W = 600\nVIEWPORT_H = 400\n\n\nclass ContactDetector(contactListener):\n def __init__(self, env):\n contactListener.__init__(self)\n self.env = env\n\n def BeginContact(self, contact):\n if (\n self.env.lander == contact.fixtureA.body\n or self.env.lander == contact.fixtureB.body\n ):\n self.env.game_over = True\n for i in range(2):\n if self.env.legs[i] in [contact.fixtureA.body, contact.fixtureB.body]:\n self.env.legs[i].ground_contact = True\n\n def EndContact(self, contact):\n for i in range(2):\n if self.env.legs[i] in [contact.fixtureA.body, contact.fixtureB.body]:\n self.env.legs[i].ground_contact = False\n\n\nclass DeterministicLunarLander(gym.Env, EzPickle):\n metadata = {\"render.modes\": [\"human\", \"rgb_array\"], \"video.frames_per_second\": FPS}\n\n continuous = False\n\n def __init__(self):\n EzPickle.__init__(self)\n self.seed()\n self.viewer = None\n\n self.world = Box2D.b2World()\n self.moon = None\n self.lander = None\n self.particles = []\n\n self.prev_reward = None\n\n # useful range is -1 .. +1, but spikes can be higher\n self.observation_space = spaces.Box(\n -np.inf, np.inf, shape=(8,), dtype=np.float32\n )\n\n if self.continuous:\n # Action is two floats [main engine, left-right engines].\n # Main engine: -1..0 off, 0..+1 throttle from 50% to 100% power. Engine can't work with less than 50% power.\n # Left-right: -1.0..-0.5 fire left engine, +0.5..+1.0 fire right engine, -0.5..0.5 off\n self.action_space = spaces.Box(-1, +1, (2,), dtype=np.float32)\n else:\n # Nop, fire left engine, main engine, right engine\n self.action_space = spaces.Discrete(4)\n\n self.reset()\n\n def seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n\n def _destroy(self):\n if not self.moon:\n return\n self.world.contactListener = None\n self._clean_particles(True)\n self.world.DestroyBody(self.moon)\n self.moon = None\n self.world.DestroyBody(self.lander)\n self.lander = None\n self.world.DestroyBody(self.legs[0])\n self.world.DestroyBody(self.legs[1])\n\n def reset(self):\n self._destroy()\n self.world.contactListener_keepref = ContactDetector(self)\n self.world.contactListener = self.world.contactListener_keepref\n self.game_over = False\n self.prev_shaping = None\n\n W = VIEWPORT_W / SCALE\n H = VIEWPORT_H / SCALE\n\n # terrain\n CHUNKS = 11\n height = [H / 3] * (CHUNKS + 1)\n chunk_x = [W / (CHUNKS - 1) * i for i in range(CHUNKS)]\n self.helipad_x1 = chunk_x[CHUNKS // 2 - 1]\n self.helipad_x2 = chunk_x[CHUNKS // 2 + 1]\n self.helipad_y = H / 4\n height[CHUNKS // 2 - 2] = self.helipad_y\n height[CHUNKS // 2 - 1] = self.helipad_y\n height[CHUNKS // 2 + 0] = self.helipad_y\n height[CHUNKS // 2 + 1] = self.helipad_y\n height[CHUNKS // 2 + 2] = self.helipad_y\n smooth_y = [\n 0.33 * (height[i - 1] + height[i + 0] + height[i + 1])\n for i in range(CHUNKS)\n ]\n\n self.moon = self.world.CreateStaticBody(\n shapes=edgeShape(vertices=[(0, 0), (W, 0)])\n )\n self.sky_polys = []\n for i in range(CHUNKS - 1):\n p1 = (chunk_x[i], smooth_y[i])\n p2 = (chunk_x[i + 1], smooth_y[i + 1])\n self.moon.CreateEdgeFixture(vertices=[p1, p2], density=0, friction=0.1)\n self.sky_polys.append([p1, p2, (p2[0], H), (p1[0], H)])\n\n self.moon.color1 = (0.0, 0.0, 0.0)\n self.moon.color2 = (0.0, 0.0, 0.0)\n\n initial_y = VIEWPORT_H / SCALE\n self.lander = self.world.CreateDynamicBody(\n position=(VIEWPORT_W / SCALE / 2, initial_y),\n angle=0.0,\n fixtures=fixtureDef(\n shape=polygonShape(\n vertices=[(x / SCALE, y / SCALE) for x, y in LANDER_POLY]\n ),\n density=5.0,\n friction=0.1,\n categoryBits=0x0010,\n maskBits=0x001, # collide only with ground\n restitution=0.0,\n ), # 0.99 bouncy\n )\n self.lander.color1 = (0.5, 0.4, 0.9)\n self.lander.color2 = (0.3, 0.3, 0.5)\n self.lander.ApplyForceToCenter(\n (\n self.np_random.uniform(-INITIAL_RANDOM, INITIAL_RANDOM),\n self.np_random.uniform(-INITIAL_RANDOM, INITIAL_RANDOM),\n ),\n True,\n )\n\n self.legs = []\n for i in [-1, +1]:\n leg = self.world.CreateDynamicBody(\n position=(VIEWPORT_W / SCALE / 2 - i * LEG_AWAY / SCALE, initial_y),\n angle=(i * 0.05),\n fixtures=fixtureDef(\n shape=polygonShape(box=(LEG_W / SCALE, LEG_H / SCALE)),\n density=1.0,\n restitution=0.0,\n categoryBits=0x0020,\n maskBits=0x001,\n ),\n )\n leg.ground_contact = False\n leg.color1 = (0.5, 0.4, 0.9)\n leg.color2 = (0.3, 0.3, 0.5)\n rjd = revoluteJointDef(\n bodyA=self.lander,\n bodyB=leg,\n localAnchorA=(0, 0),\n localAnchorB=(i * LEG_AWAY / SCALE, LEG_DOWN / SCALE),\n enableMotor=True,\n enableLimit=True,\n maxMotorTorque=LEG_SPRING_TORQUE,\n motorSpeed=+0.3 * i, # low enough not to jump back into the sky\n )\n if i == -1:\n rjd.lowerAngle = (\n +0.9 - 0.5\n ) # The most esoteric numbers here, angled legs have freedom to travel within\n rjd.upperAngle = +0.9\n else:\n rjd.lowerAngle = -0.9\n rjd.upperAngle = -0.9 + 0.5\n leg.joint = self.world.CreateJoint(rjd)\n self.legs.append(leg)\n\n self.drawlist = [self.lander] + self.legs\n\n return self.step(np.array([0, 0]) if self.continuous else 0)[0]\n\n def _create_particle(self, mass, x, y, ttl):\n p = self.world.CreateDynamicBody(\n position=(x, y),\n angle=0.0,\n fixtures=fixtureDef(\n shape=circleShape(radius=2 / SCALE, pos=(0, 0)),\n density=mass,\n friction=0.1,\n categoryBits=0x0100,\n maskBits=0x001, # collide only with ground\n restitution=0.3,\n ),\n )\n p.ttl = ttl\n self.particles.append(p)\n self._clean_particles(False)\n return p\n\n def _clean_particles(self, all):\n while self.particles and (all or self.particles[0].ttl < 0):\n self.world.DestroyBody(self.particles.pop(0))\n\n def step(self, action):\n if self.continuous:\n action = np.clip(action, -1, +1).astype(np.float32)\n else:\n assert self.action_space.contains(action), \"%r (%s) invalid \" % (\n action,\n type(action),\n )\n\n # Engines\n tip = (math.sin(self.lander.angle), math.cos(self.lander.angle))\n side = (-tip[1], tip[0])\n dispersion = [0 for _ in range(2)]\n\n m_power = 0.0\n if (self.continuous and action[0] > 0.0) or (\n not self.continuous and action == 2\n ):\n # Main engine\n if self.continuous:\n m_power = (np.clip(action[0], 0.0, 1.0) + 1.0) * 0.5 # 0.5..1.0\n assert m_power >= 0.5 and m_power <= 1.0\n else:\n m_power = 1.0\n ox = (\n tip[0] * (4 / SCALE + 2 * dispersion[0]) + side[0] * dispersion[1]\n ) # 4 is move a bit downwards, +-2 for randomness\n oy = -tip[1] * (4 / SCALE + 2 * dispersion[0]) - side[1] * dispersion[1]\n impulse_pos = (self.lander.position[0] + ox, self.lander.position[1] + oy)\n p = self._create_particle(\n 3.5, # 3.5 is here to make particle speed adequate\n impulse_pos[0],\n impulse_pos[1],\n m_power,\n ) # particles are just a decoration\n p.ApplyLinearImpulse(\n (ox * MAIN_ENGINE_POWER * m_power, oy * MAIN_ENGINE_POWER * m_power),\n impulse_pos,\n True,\n )\n self.lander.ApplyLinearImpulse(\n (-ox * MAIN_ENGINE_POWER * m_power, -oy * MAIN_ENGINE_POWER * m_power),\n impulse_pos,\n True,\n )\n\n s_power = 0.0\n if (self.continuous and np.abs(action[1]) > 0.5) or (\n not self.continuous and action in [1, 3]\n ):\n # Orientation engines\n if self.continuous:\n direction = np.sign(action[1])\n s_power = np.clip(np.abs(action[1]), 0.5, 1.0)\n assert s_power >= 0.5 and s_power <= 1.0\n else:\n direction = action - 2\n s_power = 1.0\n ox = tip[0] * dispersion[0] + side[0] * (\n 3 * dispersion[1] + direction * SIDE_ENGINE_AWAY / SCALE\n )\n oy = -tip[1] * dispersion[0] - side[1] * (\n 3 * dispersion[1] + direction * SIDE_ENGINE_AWAY / SCALE\n )\n impulse_pos = (\n self.lander.position[0] + ox - tip[0] * 17 / SCALE,\n self.lander.position[1] + oy + tip[1] * SIDE_ENGINE_HEIGHT / SCALE,\n )\n p = self._create_particle(0.7, impulse_pos[0], impulse_pos[1], s_power)\n p.ApplyLinearImpulse(\n (ox * SIDE_ENGINE_POWER * s_power, oy * SIDE_ENGINE_POWER * s_power),\n impulse_pos,\n True,\n )\n self.lander.ApplyLinearImpulse(\n (-ox * SIDE_ENGINE_POWER * s_power, -oy * SIDE_ENGINE_POWER * s_power),\n impulse_pos,\n True,\n )\n\n self.world.Step(1.0 / FPS, 6 * 30, 2 * 30)\n\n pos = self.lander.position\n vel = self.lander.linearVelocity\n state = [\n (pos.x - VIEWPORT_W / SCALE / 2) / (VIEWPORT_W / SCALE / 2),\n (pos.y - (self.helipad_y + LEG_DOWN / SCALE)) / (VIEWPORT_H / SCALE / 2),\n vel.x * (VIEWPORT_W / SCALE / 2) / FPS,\n vel.y * (VIEWPORT_H / SCALE / 2) / FPS,\n self.lander.angle,\n 20.0 * self.lander.angularVelocity / FPS,\n 1.0 if self.legs[0].ground_contact else 0.0,\n 1.0 if self.legs[1].ground_contact else 0.0,\n ]\n assert len(state) == 8\n\n reward = 0\n shaping = (\n -100 * np.sqrt(state[0] * state[0] + state[1] * state[1])\n - 100 * np.sqrt(state[2] * state[2] + state[3] * state[3])\n - 100 * abs(state[4])\n + 10 * state[6]\n + 10 * state[7]\n ) # And ten points for legs contact, the idea is if you\n # lose contact again after landing, you get negative reward\n if self.prev_shaping is not None:\n reward = shaping - self.prev_shaping\n self.prev_shaping = shaping\n\n reward -= (\n m_power * 0.30\n ) # less fuel spent is better, about -30 for heuristic landing\n reward -= s_power * 0.03\n\n done = False\n if self.game_over or abs(state[0]) >= 1.0:\n done = True\n reward = -100\n if not self.lander.awake:\n done = True\n reward = +100\n return np.array(state, dtype=np.float32), reward, done, {}\n\n def render(self, mode=\"human\"):\n from gym.envs.classic_control import rendering\n\n if self.viewer is None:\n self.viewer = rendering.Viewer(VIEWPORT_W, VIEWPORT_H)\n self.viewer.set_bounds(0, VIEWPORT_W / SCALE, 0, VIEWPORT_H / SCALE)\n\n for obj in self.particles:\n obj.ttl -= 0.15\n obj.color1 = (\n max(0.2, 0.2 + obj.ttl),\n max(0.2, 0.5 * obj.ttl),\n max(0.2, 0.5 * obj.ttl),\n )\n obj.color2 = (\n max(0.2, 0.2 + obj.ttl),\n max(0.2, 0.5 * obj.ttl),\n max(0.2, 0.5 * obj.ttl),\n )\n\n self._clean_particles(False)\n\n for p in self.sky_polys:\n self.viewer.draw_polygon(p, color=(0, 0, 0))\n\n for obj in self.particles + self.drawlist:\n for f in obj.fixtures:\n trans = f.body.transform\n if type(f.shape) is circleShape:\n t = rendering.Transform(translation=trans * f.shape.pos)\n self.viewer.draw_circle(\n f.shape.radius, 20, color=obj.color1\n ).add_attr(t)\n self.viewer.draw_circle(\n f.shape.radius, 20, color=obj.color2, filled=False, linewidth=2\n ).add_attr(t)\n else:\n path = [trans * v for v in f.shape.vertices]\n self.viewer.draw_polygon(path, color=obj.color1)\n path.append(path[0])\n self.viewer.draw_polyline(path, color=obj.color2, linewidth=2)\n\n for x in [self.helipad_x1, self.helipad_x2]:\n flagy1 = self.helipad_y\n flagy2 = flagy1 + 50 / SCALE\n self.viewer.draw_polyline([(x, flagy1), (x, flagy2)], color=(1, 1, 1))\n self.viewer.draw_polygon(\n [\n (x, flagy2),\n (x, flagy2 - 10 / SCALE),\n (x + 25 / SCALE, flagy2 - 5 / SCALE),\n ],\n color=(0.8, 0.8, 0),\n )\n\n return self.viewer.render(return_rgb_array=mode == \"rgb_array\")\n\n def close(self):\n if self.viewer is not None:\n self.viewer.close()\n self.viewer = None\n" ]
[ [ "torch.nn.LogSoftmax", "torch.device", "numpy.array", "numpy.zeros_like", "numpy.random.seed", "torch.manual_seed", "torch.tensor", "numpy.abs" ], [ "numpy.array", "numpy.sign", "torch.cuda.is_available", "numpy.abs", "numpy.clip", "numpy.sqrt" ] ]
Osarez7/models
[ "841df8f136ea712d5505caf71db19fa5dee3ac1d" ]
[ "official/nlp/data/tagging_data_lib.py" ]
[ "# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Library to process data for tagging task such as NER/POS.\"\"\"\nimport collections\nimport os\n\nfrom absl import logging\nimport tensorflow as tf\n\nfrom official.nlp.data import classifier_data_lib\n\n# A negative label id for the padding label, which will not contribute\n# to loss/metrics in training.\n_PADDING_LABEL_ID = -1\n\n# The special unknown token, used to substitute a word which has too many\n# subwords after tokenization.\n_UNK_TOKEN = \"[UNK]\"\n\n\nclass InputExample(object):\n \"\"\"A single training/test example for token classification.\"\"\"\n\n def __init__(self, sentence_id, words=None, label_ids=None):\n \"\"\"Constructs an InputExample.\"\"\"\n self.sentence_id = sentence_id\n self.words = words if words else []\n self.label_ids = label_ids if label_ids else []\n\n def add_word_and_label_id(self, word, label_id):\n \"\"\"Adds word and label_id pair in the example.\"\"\"\n self.words.append(word)\n self.label_ids.append(label_id)\n\n\ndef _read_one_file(file_name, label_list):\n \"\"\"Reads one file and returns a list of `InputExample` instances.\"\"\"\n lines = tf.io.gfile.GFile(file_name, \"r\").readlines()\n examples = []\n label_id_map = {label: i for i, label in enumerate(label_list)}\n sentence_id = 0\n example = InputExample(sentence_id=0)\n for line in lines:\n line = line.strip(\"\\n\")\n if line:\n # The format is: <token>\\t<label> for train/dev set and <token> for test.\n items = line.split(\"\\t\")\n assert len(items) == 2 or len(items) == 1\n token = items[0].strip()\n\n # Assign a dummy label_id for test set\n label_id = label_id_map[items[1].strip()] if len(items) == 2 else 0\n example.add_word_and_label_id(token, label_id)\n else:\n # Empty line indicates a new sentence.\n if example.words:\n examples.append(example)\n sentence_id += 1\n example = InputExample(sentence_id=sentence_id)\n\n if example.words:\n examples.append(example)\n return examples\n\n\nclass PanxProcessor(classifier_data_lib.DataProcessor):\n \"\"\"Processor for the Panx data set.\"\"\"\n supported_languages = [\n \"ar\", \"he\", \"vi\", \"id\", \"jv\", \"ms\", \"tl\", \"eu\", \"ml\", \"ta\", \"te\", \"af\",\n \"nl\", \"en\", \"de\", \"el\", \"bn\", \"hi\", \"mr\", \"ur\", \"fa\", \"fr\", \"it\", \"pt\",\n \"es\", \"bg\", \"ru\", \"ja\", \"ka\", \"ko\", \"th\", \"sw\", \"yo\", \"my\", \"zh\", \"kk\",\n \"tr\", \"et\", \"fi\", \"hu\"\n ]\n\n def get_train_examples(self, data_dir):\n return _read_one_file(\n os.path.join(data_dir, \"train-en.tsv\"), self.get_labels())\n\n def get_dev_examples(self, data_dir):\n return _read_one_file(\n os.path.join(data_dir, \"dev-en.tsv\"), self.get_labels())\n\n def get_test_examples(self, data_dir):\n examples_dict = {}\n for language in self.supported_languages:\n examples_dict[language] = _read_one_file(\n os.path.join(data_dir, \"test-%s.tsv\" % language), self.get_labels())\n return examples_dict\n\n def get_labels(self):\n return [\"O\", \"B-PER\", \"I-PER\", \"B-LOC\", \"I-LOC\", \"B-ORG\", \"I-ORG\"]\n\n @staticmethod\n def get_processor_name():\n return \"panx\"\n\n\nclass UdposProcessor(classifier_data_lib.DataProcessor):\n \"\"\"Processor for the Udpos data set.\"\"\"\n supported_languages = [\n \"af\", \"ar\", \"bg\", \"de\", \"el\", \"en\", \"es\", \"et\", \"eu\", \"fa\", \"fi\", \"fr\",\n \"he\", \"hi\", \"hu\", \"id\", \"it\", \"ja\", \"kk\", \"ko\", \"mr\", \"nl\", \"pt\", \"ru\",\n \"ta\", \"te\", \"th\", \"tl\", \"tr\", \"ur\", \"vi\", \"yo\", \"zh\"\n ]\n\n def get_train_examples(self, data_dir):\n return _read_one_file(\n os.path.join(data_dir, \"train-en.tsv\"), self.get_labels())\n\n def get_dev_examples(self, data_dir):\n return _read_one_file(\n os.path.join(data_dir, \"dev-en.tsv\"), self.get_labels())\n\n def get_test_examples(self, data_dir):\n examples_dict = {}\n for language in self.supported_languages:\n examples_dict[language] = _read_one_file(\n os.path.join(data_dir, \"test-%s.tsv\" % language), self.get_labels())\n return examples_dict\n\n def get_labels(self):\n return [\n \"ADJ\", \"ADP\", \"ADV\", \"AUX\", \"CCONJ\", \"DET\", \"INTJ\", \"NOUN\", \"NUM\",\n \"PART\", \"PRON\", \"PROPN\", \"PUNCT\", \"SCONJ\", \"SYM\", \"VERB\", \"X\"\n ]\n\n @staticmethod\n def get_processor_name():\n return \"udpos\"\n\n\ndef _tokenize_example(example, max_length, tokenizer, text_preprocessing=None):\n \"\"\"Tokenizes words and breaks long example into short ones.\"\"\"\n # Needs additional [CLS] and [SEP] tokens.\n max_length = max_length - 2\n new_examples = []\n new_example = InputExample(sentence_id=example.sentence_id)\n for i, word in enumerate(example.words):\n if text_preprocessing:\n word = text_preprocessing(word)\n subwords = tokenizer.tokenize(word)\n if (not subwords or len(subwords) > max_length) and word:\n subwords = [_UNK_TOKEN]\n\n if len(subwords) + len(new_example.words) > max_length:\n # Start a new example.\n new_examples.append(new_example)\n new_example = InputExample(sentence_id=example.sentence_id)\n\n for j, subword in enumerate(subwords):\n # Use the real label for the first subword, and pad label for\n # the remainings.\n subword_label = example.label_ids[i] if j == 0 else _PADDING_LABEL_ID\n new_example.add_word_and_label_id(subword, subword_label)\n\n if new_example.words:\n new_examples.append(new_example)\n\n return new_examples\n\n\ndef _convert_single_example(example, max_seq_length, tokenizer):\n \"\"\"Converts an `InputExample` instance to a `tf.train.Example` instance.\"\"\"\n tokens = [\"[CLS]\"]\n tokens.extend(example.words)\n tokens.append(\"[SEP]\")\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n label_ids = [_PADDING_LABEL_ID]\n if any([x < 0 for x in example.label_ids]):\n raise ValueError(\"Unexpected negative label_id: %s\" % example.label_ids)\n\n label_ids.extend(example.label_ids)\n label_ids.append(_PADDING_LABEL_ID)\n\n segment_ids = [0] * len(input_ids)\n input_mask = [1] * len(input_ids)\n\n # Pad up to the sequence length.\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n label_ids.append(_PADDING_LABEL_ID)\n\n def create_int_feature(values):\n return tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))\n\n features = collections.OrderedDict()\n features[\"input_ids\"] = create_int_feature(input_ids)\n features[\"input_mask\"] = create_int_feature(input_mask)\n features[\"segment_ids\"] = create_int_feature(segment_ids)\n features[\"label_ids\"] = create_int_feature(label_ids)\n features[\"sentence_id\"] = create_int_feature([example.sentence_id])\n\n tf_example = tf.train.Example(features=tf.train.Features(feature=features))\n return tf_example\n\n\ndef write_example_to_file(examples,\n tokenizer,\n max_seq_length,\n output_file,\n text_preprocessing=None):\n \"\"\"Writes `InputExample`s into a tfrecord file with `tf.train.Example` protos.\n\n Note that the words inside each example will be tokenized and be applied by\n `text_preprocessing` if available. Also, if the length of sentence (plus\n special [CLS] and [SEP] tokens) exceeds `max_seq_length`, the long sentence\n will be broken into multiple short examples. For example:\n\n Example (text_preprocessing=lowercase, max_seq_length=5)\n words: [\"What\", \"a\", \"great\", \"weekend\"]\n labels: [ 7, 5, 9, 10]\n sentence_id: 0\n preprocessed: [\"what\", \"a\", \"great\", \"weekend\"]\n tokenized: [\"what\", \"a\", \"great\", \"week\", \"##end\"]\n\n will result in two tf.example protos:\n\n tokens: [\"[CLS]\", \"what\", \"a\", \"great\", \"[SEP]\"]\n label_ids: [-1, 7, 5, 9, -1]\n input_mask: [ 1, 1, 1, 1, 1]\n segment_ids: [ 0, 0, 0, 0, 0]\n input_ids: [ tokenizer.convert_tokens_to_ids(tokens) ]\n sentence_id: 0\n\n tokens: [\"[CLS]\", \"week\", \"##end\", \"[SEP]\", \"[PAD]\"]\n label_ids: [-1, 10, -1, -1, -1]\n input_mask: [ 1, 1, 1, 0, 0]\n segment_ids: [ 0, 0, 0, 0, 0]\n input_ids: [ tokenizer.convert_tokens_to_ids(tokens) ]\n sentence_id: 0\n\n Note the use of -1 in `label_ids` to indicate that a token should not be\n considered for classification (e.g., trailing ## wordpieces or special\n token). Token classification models should accordingly ignore these when\n calculating loss, metrics, etc...\n\n Args:\n examples: A list of `InputExample` instances.\n tokenizer: The tokenizer to be applied on the data.\n max_seq_length: Maximum length of generated sequences.\n output_file: The name of the output tfrecord file.\n text_preprocessing: optional preprocessing run on each word prior to\n tokenization.\n\n Returns:\n The total number of tf.train.Example proto written to file.\n \"\"\"\n tf.io.gfile.makedirs(os.path.dirname(output_file))\n writer = tf.io.TFRecordWriter(output_file)\n num_tokenized_examples = 0\n for (ex_index, example) in enumerate(examples):\n if ex_index % 10000 == 0:\n logging.info(\"Writing example %d of %d to %s\", ex_index, len(examples),\n output_file)\n\n tokenized_examples = _tokenize_example(example, max_seq_length,\n tokenizer, text_preprocessing)\n num_tokenized_examples += len(tokenized_examples)\n for per_tokenized_example in tokenized_examples:\n tf_example = _convert_single_example(\n per_tokenized_example, max_seq_length, tokenizer)\n writer.write(tf_example.SerializeToString())\n\n writer.close()\n return num_tokenized_examples\n\n\ndef token_classification_meta_data(train_data_size,\n max_seq_length,\n num_labels,\n eval_data_size=None,\n test_data_size=None,\n label_list=None,\n processor_type=None):\n \"\"\"Creates metadata for tagging (token classification) datasets.\"\"\"\n meta_data = {\n \"train_data_size\": train_data_size,\n \"max_seq_length\": max_seq_length,\n \"num_labels\": num_labels,\n \"task_type\": \"tagging\",\n \"label_type\": \"int\",\n \"label_shape\": [max_seq_length],\n }\n if eval_data_size:\n meta_data[\"eval_data_size\"] = eval_data_size\n if test_data_size:\n meta_data[\"test_data_size\"] = test_data_size\n if label_list:\n meta_data[\"label_list\"] = label_list\n if processor_type:\n meta_data[\"processor_type\"] = processor_type\n\n return meta_data\n\n\ndef generate_tf_record_from_data_file(processor,\n data_dir,\n tokenizer,\n max_seq_length,\n train_data_output_path,\n eval_data_output_path,\n test_data_output_path,\n text_preprocessing):\n \"\"\"Generates tfrecord files from the raw data.\"\"\"\n common_kwargs = dict(tokenizer=tokenizer, max_seq_length=max_seq_length,\n text_preprocessing=text_preprocessing)\n train_examples = processor.get_train_examples(data_dir)\n train_data_size = write_example_to_file(\n train_examples, output_file=train_data_output_path, **common_kwargs)\n\n eval_examples = processor.get_dev_examples(data_dir)\n eval_data_size = write_example_to_file(\n eval_examples, output_file=eval_data_output_path, **common_kwargs)\n\n test_input_data_examples = processor.get_test_examples(data_dir)\n test_data_size = {}\n for language, examples in test_input_data_examples.items():\n test_data_size[language] = write_example_to_file(\n examples,\n output_file=test_data_output_path.format(language),\n **common_kwargs)\n\n labels = processor.get_labels()\n meta_data = token_classification_meta_data(\n train_data_size,\n max_seq_length,\n len(labels),\n eval_data_size,\n test_data_size,\n label_list=labels,\n processor_type=processor.get_processor_name())\n return meta_data\n" ]
[ [ "tensorflow.io.gfile.GFile", "tensorflow.train.Features", "tensorflow.io.TFRecordWriter" ] ]