repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
grohith327/EasyGAN
[ "cbbff0283eda08f2d056215574f798e976d3ece0" ]
[ "simplegan/datasets/load_pix2pix_datasets.py" ]
[ "import tensorflow as tf\nimport numpy as np\nimport os\nimport cv2\nimport glob\nfrom tqdm import tqdm\n\n\"\"\"\nDatasets are retrieved from: https://people.eecs.berkeley.edu/~tinghuiz/projects/pix2pix/datasets/\n\"\"\"\n\n__all__ = [\"pix2pix_dataloader\"]\n\n\nclass pix2pix_dataloader:\n\n r\"\"\"A dataloader class for `Pix2Pix <https://github.com/grohith327/simplegan/blob/master/simplegan/gan/pix2pix.py>`_ network\n\n Args:\n dataset_name (str, optional): ``cityscapes`` ``edges2handbags`` ``edges2shoes`` ``facades`` ``maps``. Defaults to ``None``\n img_width (int, optional): width of the image. Defaults to ``256``\n img_height (int, optional): height of the image. Defaults to ``256``\n datadir (str, optional): Local directory to load data from. Defaults to ``None``\n \"\"\"\n\n def __init__(self, dataset_name=None, img_width=256, img_height=256, datadir=None):\n\n self.dataset_name = dataset_name\n self.img_width = img_width\n self.img_height = img_height\n self.datadir = datadir\n self.channels = 3\n\n def _load_path(self, dataset_name):\n\n URLs = {\n \"cityscapes\": \"https://people.eecs.berkeley.edu/~tinghuiz/projects/pix2pix/datasets/cityscapes.tar.gz\",\n \"edges2handbags\": \"https://people.eecs.berkeley.edu/~tinghuiz/projects/pix2pix/datasets/edges2handbags.tar.gz\",\n \"edges2shoes\": \"https://people.eecs.berkeley.edu/~tinghuiz/projects/pix2pix/datasets/edges2shoes.tar.gz\",\n \"facades\": \"https://people.eecs.berkeley.edu/~tinghuiz/projects/pix2pix/datasets/facades.tar.gz\",\n \"maps\": \"https://people.eecs.berkeley.edu/~tinghuiz/projects/pix2pix/datasets/maps.tar.gz\",\n }\n\n URL = URLs[dataset_name]\n\n filename = dataset_name + \".tar.gz\"\n\n path = tf.keras.utils.get_file(filename, origin=URL, extract=True)\n return os.path.join(os.path.dirname(path), dataset_name)\n\n def _load_image(self, filename):\n\n image = tf.io.read_file(filename)\n image = tf.image.decode_jpeg(image)\n\n w = tf.shape(image)[1]\n w = w // 2\n\n real_image = image[:, :w, :]\n input_image = image[:, w:, :]\n\n input_image = tf.cast(input_image, tf.float32)\n real_image = tf.cast(real_image, tf.float32)\n\n return input_image, real_image\n\n def _resize(self, input_image, real_image, height, width):\n\n input_image = tf.image.resize(\n input_image, [height, width], method=tf.image.ResizeMethod.NEAREST_NEIGHBOR\n )\n\n real_image = tf.image.resize(\n real_image, [height, width], method=tf.image.ResizeMethod.NEAREST_NEIGHBOR\n )\n\n return input_image, real_image\n\n def _random_crop(self, input_image, real_image):\n\n stacked_image = tf.stack([input_image, real_image], axis=0)\n cropped_image = tf.image.random_crop(\n stacked_image, size=[2, self.img_height, self.img_width, self.channels]\n )\n\n return cropped_image[0], cropped_image[1]\n\n def _normalize_image(self, input_image, real_image):\n\n input_image = (input_image / 127.5) - 1\n real_image = (real_image / 127.5) - 1\n\n return input_image, real_image\n\n @tf.function\n def _random_jitter(self, input_image, real_image):\n\n input_image, real_image = self._resize(input_image, real_image, 286, 286)\n input_image, real_image = self._random_crop(input_image, real_image)\n\n if tf.random.uniform(()) > 0.5:\n input_image = tf.image.flip_left_right(input_image)\n real_image = tf.image.flip_left_right(real_image)\n\n return input_image, real_image\n\n def _load_train_images(self, filename):\n\n input_image, real_image = self._load_image(filename)\n input_image, real_image = self._random_jitter(input_image, real_image)\n input_image, real_image = self._normalize_image(input_image, real_image)\n\n return input_image, real_image\n\n def _load_test_images(self, filename):\n\n input_image, real_image = self._load_image(filename)\n input_image, real_image = self._resize(\n input_image, real_image, self.img_height, self.img_width\n )\n input_image, real_image = self._normalize_image(input_image, real_image)\n\n return input_image, real_image\n\n def _load_pix2pix_data(self):\n\n train_data = tf.data.Dataset.list_files(\n self._load_path(self.dataset_name) + \"/train/*.jpg\"\n )\n train_ds = train_data.map(\n self._load_train_images, num_parallel_calls=tf.data.experimental.AUTOTUNE\n )\n\n try:\n test_data = tf.data.Dataset.list_files(\n self._load_path(self.dataset_name) + \"/test/*.jpg\"\n )\n except BaseException:\n test_data = tf.data.Dataset.list_files(\n self._load_path(self.dataset_name) + \"/val/*.jpg\"\n )\n\n test_ds = test_data.map(self._load_test_images)\n\n return train_ds, test_ds\n\n def _load_custom_data(self):\n\n error_message = \"train directory not found \\n Directory structure: \\n {} \\n {} -train \\n {} -*.jpg \\n {} -test \\n {} -*.jpg\".format(\n self.datadir, \" \" * 2, \" \" * 4, \" \" * 2, \" \" * 4\n )\n assert os.path.exists(os.path.join(self.datadir, \"train\")), error_message\n\n train_data = tf.data.Dataset.list_files(os.path.join(self.datadir, \"train/*.jpg\"))\n train_ds = train_data.map(\n self._load_train_images, num_parallel_calls=tf.data.experimental.AUTOTUNE\n )\n\n error_message = \"test directory not found \\n Directory structure: \\n {} \\n {} -train \\n {} -*.jpg \\n {} -test \\n {} -*.jpg\".format(\n self.datadir, \" \" * 2, \" \" * 4, \" \" * 2, \" \" * 4\n )\n assert os.path.exists(os.path.join(self.datadir, \"test\")), error_message\n\n try:\n test_data = tf.data.Dataset.list_files(os.path.join(self.datadir, \"test/*.jpg\"))\n\n except BaseException:\n test_data = tf.data.Dataset.list_files(os.path.join(self.datadir, \"val/*.jpg\"))\n\n test_ds = test_data.map(self._load_test_images)\n\n return train_ds, test_ds\n\n def load_dataset(self):\n\n r\"\"\"Loads the data according to given parameters\n\n Return:\n tensorflow dataset object for training and testing data\n \"\"\"\n\n assert (\n self.dataset_name is not None or self.datadir is not None\n ), \"Enter directory to load custom data or choose from existing datasets\"\n\n if self.dataset_name is not None:\n\n train_ds, test_ds = self._load_pix2pix_data()\n\n else:\n\n train_ds, test_ds = self._load_custom_data()\n\n return train_ds, test_ds\n" ]
[ [ "tensorflow.shape", "tensorflow.keras.utils.get_file", "tensorflow.io.read_file", "tensorflow.random.uniform", "tensorflow.image.random_crop", "tensorflow.image.flip_left_right", "tensorflow.stack", "tensorflow.image.resize", "tensorflow.image.decode_jpeg", "tensorflow.cast" ] ]
Annarien/GravitationalLenses
[ "c2606aacc62d2534fb199f5228dc21c0ea604251" ]
[ "ForThesis/Corrected Results/res/Exp1/20201104_run_01.py" ]
[ "\"\"\"\nThis is file performs the convolutional neural network algorithm, in which the k fold is performed as well.\nThe results were saved in a csv file.\n\"\"\"\n\nimport os\nimport sys\nimport random\nfrom datetime import datetime\nimport numpy as np\nimport tensorflow\nfrom astropy.io import fits\nfrom astropy.utils.data import get_pkg_data_filename\nfrom matplotlib import pyplot as plt\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.model_selection import train_test_split, StratifiedKFold\nfrom sklearn.utils import shuffle\nfrom tensorflow.python.keras import Sequential\nfrom tensorflow.python.keras.callbacks import ModelCheckpoint, EarlyStopping\nfrom tensorflow.python.keras.layers.convolutional import Conv2D, MaxPooling2D\nfrom tensorflow.python.keras.layers.core import Dense, Dropout, Flatten\nfrom tensorflow.python.keras.models import Model\nfrom tensorflow.python.keras.preprocessing.image import ImageDataGenerator\nfrom tensorflow.python.keras.utils.vis_utils import plot_model\n\n# added Adam opt for learning rate\nfrom tensorflow.python.keras.optimizers import Adam\n# from tensorflow.keras.optimizers import Adam\nfrom tensorflow.python.keras import backend as K\n\nfrom ExcelUtils import createExcelSheet, writeToFile\n\nprint(tensorflow.__version__)\n\nnow = datetime.now()\ndt_string = now.strftime(\"%d_%m_%Y_%H_%M_%S\")\nprint(dt_string)\nexcel_headers = []\nexcel_dictionary = []\nexcel_headers.append(\"Date and Time\")\nexcel_dictionary.append(dt_string)\n\n# Globals\nmakeNewCSVFile = True\nmax_num = sys.maxsize # Set to sys.maxsize when running entire data set\nmax_num_testing = sys.maxsize # Set to sys.maxsize when running entire data set\nmax_num_prediction = sys.maxsize # Set to sys.maxsize when running entire data set\nvalidation_split = 0.2 # A float value between 0 and 1 that determines what percentage of the training\n# data is used for validation.\nk_fold_num = 5 # A number between 2 and 10 that determines how many times the k-fold classifier\n# is trained.\nepochs = 50 # A number that dictates how many iterations should be run to train the classifier\nbatch_size = 128 # The number of items batched together during training.\nrun_k_fold_validation = True # Set this to True if you want to run K-Fold validation as well.\ninput_shape = (100, 100, 3) # The shape of the images being learned & evaluated.\naugmented_multiple = 2 # This uses data augmentation to generate x-many times as much data as there is on file.\nuse_augmented_data = True # Determines whether to use data augmentation or not.\npatience_num = 3 # Used in the early stopping to determine how quick/slow to react.\nuse_early_stopping = False # Determines whether to use early stopping or not.\nuse_model_checkpoint = True # Determines whether the classifiers keeps track of the most accurate iteration of itself.\nmonitor_early_stopping = 'val_loss'\nmonitor_model_checkpoint = 'val_acc'\nuse_shuffle = True\nlearning_rate = 0.0002\n\ntraining_positive_path = 'Training/PositiveAll'\n# training_positive_path = 'UnseenData/KnownLenses_training'\ntraining_negative_path = 'Training/Negative'\ntesting_positive_path = 'Testing/PositiveAll'\ntesting_negative_path = 'Testing/Negative'\n# unseen_known_file_path = 'UnseenData/Known131'\nunseen_known_file_path_select = 'UnseenData/SelectingSimilarLensesToPositiveSimulated'\nunseen_known_file_path_all = 'UnseenData/KnownLenses'\n\n# Adding global parameters to excel\nexcel_headers.append(\"Max Training Num\")\nexcel_dictionary.append(max_num)\nexcel_headers.append(\"Max Testing Num\")\nexcel_dictionary.append(max_num_testing)\nexcel_headers.append(\"Max Prediction Num\")\nexcel_dictionary.append(max_num_prediction)\nexcel_headers.append(\"Validation Split\")\nexcel_dictionary.append(validation_split)\nexcel_headers.append(\"K fold Num\")\nexcel_dictionary.append(k_fold_num)\nexcel_headers.append(\"Epochs\")\nexcel_dictionary.append(epochs)\nexcel_headers.append(\"Batch Size\")\nexcel_dictionary.append(batch_size)\nexcel_headers.append(\"Run K fold\")\nexcel_dictionary.append(run_k_fold_validation)\nexcel_headers.append(\"Input Shape\")\nexcel_dictionary.append(input_shape)\nexcel_headers.append(\"Augmented Multiple\")\nexcel_dictionary.append(augmented_multiple)\nexcel_headers.append(\"Use Augmented Data\")\nexcel_dictionary.append(use_augmented_data)\nexcel_headers.append(\"Patience\")\nexcel_dictionary.append(patience_num)\nexcel_headers.append(\"Use Early Stopping\")\nexcel_dictionary.append(use_early_stopping)\nexcel_headers.append(\"Use Model Checkpoint\")\nexcel_dictionary.append(use_model_checkpoint)\nexcel_headers.append(\"Monitor Early Stopping\")\nexcel_dictionary.append(monitor_early_stopping)\nexcel_headers.append(\"Monitor Model Checkpoint\")\nexcel_dictionary.append(monitor_model_checkpoint)\nexcel_headers.append(\"Use Shuffle\")\nexcel_dictionary.append(use_shuffle)\nexcel_headers.append(\"Learning Rate\")\nexcel_dictionary.append(learning_rate)\n\n\nif not os.path.exists('../Results/'):\n os.mkdir('../Results/%s/')\n\nif not os.path.exists('../Results/%s/' % dt_string):\n os.mkdir('../Results/%s/' % dt_string)\n\n\n# Helper methods\ndef getPositiveImages(images_dir, max_num, input_shape):\n \"\"\"\n This gets the positively simulated images in the g, r and i bands.\n Args:\n images_dir(string): This is the file path address of the positively simulated images.\n max_num(integer): This is the number of sources of the positively simulated images to be used.\n input_shape(tuple): This is the shape of the images.\n Returns:\n positive_images(numpy array): This is the numpy array of the positively simulated images with the shape of\n (num of images, input_shape[0], input_shape[1], input_shape[2]) =\n (num_of_images, 100, 100, 3).\n \"\"\"\n global g_img_path, r_img_path, i_img_path\n for root, dirs, _ in os.walk(images_dir):\n num_of_images = min(max_num, len(dirs))\n positive_images = np.zeros([num_of_images, 3, 100, 100])\n index = 0\n print('image_dir: ' + str(images_dir))\n for folder in dirs:\n if images_dir == 'Training/PositiveAll':\n g_img_path = get_pkg_data_filename('%s/%s_g_norm.fits' % (os.path.join(root, folder), folder))\n r_img_path = get_pkg_data_filename('%s/%s_r_norm.fits' % (os.path.join(root, folder), folder))\n i_img_path = get_pkg_data_filename('%s/%s_i_norm.fits' % (os.path.join(root, folder), folder))\n elif images_dir == 'UnseenData/KnownLenses_training':\n g_img_path = get_pkg_data_filename('%s/g_norm.fits' % (os.path.join(root, folder)))\n r_img_path = get_pkg_data_filename('%s/r_norm.fits' % (os.path.join(root, folder)))\n i_img_path = get_pkg_data_filename('%s/i_norm.fits' % (os.path.join(root, folder)))\n\n # print('g_img_path: ' + str(g_img_path))\n # print('r_img_path: ' + str(r_img_path))\n # print('i_img_path: ' + str(i_img_path))\n g_data = fits.open(g_img_path)[0].data[0:100, 0:100]\n r_data = fits.open(r_img_path)[0].data[0:100, 0:100]\n i_data = fits.open(i_img_path)[0].data[0:100, 0:100]\n\n img_data = [g_data, r_data, i_data]\n positive_images[index] = img_data\n index += 1\n\n if index >= num_of_images:\n break\n return positive_images.reshape(num_of_images, input_shape[0], input_shape[1], input_shape[2])\n\n\ndef getNegativeImages(images_dir, max_num, input_shape):\n \"\"\"\n This gets the negative images in the g, r and i bands.\n Args:\n images_dir(string): This is the file path address of the negative images.\n max_num(integer): This is the number of sources of the negative images to be used.\n input_shape(tuple): This is the shape of the images.\n Returns:\n negative_images(numpy array): This is the numpy array of the negative images with the shape of\n (num of images, input_shape[0], input_shape[1], input_shape[2]) =\n (num_of_images, 100, 100, 3).\n \"\"\"\n for root, dirs, _ in os.walk(images_dir):\n num_of_images = min(max_num, len(dirs))\n negative_images = np.zeros([num_of_images, 3, 100, 100])\n index = 0\n for folder in dirs:\n g_img_path = get_pkg_data_filename('%s/g_norm.fits' % (os.path.join(root, folder)))\n r_img_path = get_pkg_data_filename('%s/r_norm.fits' % (os.path.join(root, folder)))\n i_img_path = get_pkg_data_filename('%s/i_norm.fits' % (os.path.join(root, folder)))\n\n g_data = fits.open(g_img_path)[0].data[0:100, 0:100]\n r_data = fits.open(r_img_path)[0].data[0:100, 0:100]\n i_data = fits.open(i_img_path)[0].data[0:100, 0:100]\n\n img_data = [g_data, r_data, i_data]\n negative_images[index] = img_data\n index += 1\n\n if index >= num_of_images:\n break\n return negative_images.reshape(num_of_images, input_shape[0], input_shape[1], input_shape[2])\n\n\ndef getUnseenData(images_dir, max_num, input_shape):\n \"\"\"\n This gets the unseen images in the g, r and i bands containing the identified known lenses.\n Args:\n images_dir(string): This is the file path address of the unseen images.\n max_num(integer): This is the number of sources of the unseen images to be used.\n input_shape(tuple): This is the shape of the images.\n Returns:\n des_tiles(dictionary): This is the dictionary of the unseen images with the shape of\n (num of images, input_shape[0], input_shape[1], input_shape[2]) =\n (num_of_images, 100, 100, 3).\n \"\"\"\n\n des_tiles = {}\n\n for root, dirs, _ in os.walk(images_dir):\n num_of_images = min(max_num, len(dirs))\n index = 0\n for folder in dirs:\n g_img_path = get_pkg_data_filename('%s/g_norm.fits' % (os.path.join(root, folder)))\n r_img_path = get_pkg_data_filename('%s/r_norm.fits' % (os.path.join(root, folder)))\n i_img_path = get_pkg_data_filename('%s/i_norm.fits' % (os.path.join(root, folder)))\n\n # print(g_img_path)\n g_data = fits.open(g_img_path)[0].data[0:100, 0:100]\n # print(np.shape(g_data))\n r_data = fits.open(r_img_path)[0].data[0:100, 0:100]\n i_data = fits.open(i_img_path)[0].data[0:100, 0:100]\n\n img_data = np.array([g_data, r_data, i_data]).reshape(input_shape[0], input_shape[1], input_shape[2])\n des_tiles.update({folder: img_data})\n index += 1\n if index >= num_of_images:\n break\n\n return des_tiles\n\n\ndef makeImageSet(positive_images, negative_images=None, tile_names=None, shuffle_needed=use_shuffle):\n \"\"\"\n This is used to create data set of images and labels, in which the positive and negative images are all\n combined and shuffled.\n Args:\n positive_images(numpy array): This is the numpy array of the positively simulated images.\n negative_images(numpy array): This is the numpy array of the negative images, this is set to a\n default of None.\n tile_names(list): This is the dictionary of the unseen known lenses, this is set to a\n default of None.\n shuffle_needed(boolean): This is a boolean value to determine whether or not shuffling of the given data\n sets is required.\n Returns:\n image_set(numpy array): This is the image data set of numpy array of the combination positive\n and negative images.\n label_set(numpy array): This is the label data set of numpy array of the combination positive\n and negative label.\n des_names_set(numpy array): This is the des name data set of the known lenses and negative images used.\n \"\"\"\n\n image_set = []\n label_set = []\n tile_name_set = []\n\n if positive_images is not None:\n for index in range(0, len(positive_images)):\n image_set.append(positive_images[index])\n label_set.append(1)\n if tile_names is not None:\n tile_name_set.append(tile_names[index])\n\n if negative_images is not None:\n for index in range(0, len(negative_images)):\n image_set.append(negative_images[index])\n label_set.append(0)\n if tile_names is not None:\n tile_name_set.append(tile_names[index])\n\n # print(\"Label Set: \" + str(label_set))\n if shuffle_needed:\n if tile_names is not None:\n image_set, label_set, tile_name_set = shuffle(image_set, label_set, tile_name_set)\n else:\n image_set, label_set = shuffle(image_set, label_set)\n # print(\"Shuffled Label Set: \" + str(label_set))\n\n return np.array(image_set), np.array(label_set), np.array(tile_name_set)\n\n\ndef buildClassifier(input_shape=(100, 100, 3)):\n \"\"\"\n This creates the CNN algorithm.\n Args:\n input_shape(tuple): This is the image shape of (100,100,3)\n Returns:\n classifier(sequential): This is the sequential model.\n \"\"\"\n # Initialising the CNN\n opt = Adam(lr=learning_rate) # lr = learning rate\n classifier = Sequential()\n\n # JACOBS\n classifier.add(Conv2D(96, kernel_size=(2, 2), activation='relu', input_shape=input_shape)) # padding='same'\n classifier.add(MaxPooling2D(pool_size=(2, 2))) # padding='same'\n classifier.add(Conv2D(128, (2, 2), activation='relu')) # padding='same'\n classifier.add(MaxPooling2D(pool_size=(2, 2))) # padding='same'\n classifier.add(Conv2D(256, (2, 2), activation='relu')) # padding='same'\n classifier.add(MaxPooling2D(pool_size=(2, 2))) # padding='same' \n classifier.add(Conv2D(256, (2, 2), activation='relu')) # padding='same'\n classifier.add(Dropout(0.2))\n classifier.add(MaxPooling2D(pool_size=(2, 2), padding='same')) # padding='same'\n classifier.add(Dropout(0.2))\n classifier.add(Flatten())\n classifier.add(Dense(units=1024, activation='relu')) # added new dense layer\n classifier.add(Dropout(0.2))\n classifier.add(Dense(units=1024, activation='relu')) # added new dense layer\n classifier.add(Dropout(0.2))\n classifier.add(Dense(units=1, activation='sigmoid'))\n classifier.summary()\n\n # Compiling the CNN\n classifier.compile(optimizer=opt,\n loss='binary_crossentropy',\n metrics=['accuracy'])\n plot_model(classifier, to_file='model_plot.png', show_shapes=True, show_layer_names=True)\n\n return classifier\n\n\ndef visualiseActivations(img_tensor, base_dir):\n \"\"\"\n This makes images of the activations, as the selected image passed through the model\n Args:\n img_tensor(numpy array): This is the numpy array of the selected image\n base_dir(string): This is the file path name\n Saves:\n This saves the activation images of the selected source.\n \"\"\"\n global predicted_class, size\n # Run prediction on that image\n predicted_class = classifier.predict_classes(img_tensor, batch_size=10)\n print(\"Predicted class is: \", predicted_class)\n # Visualize activations\n layer_outputs = [layer.output for layer in classifier.layers[:12]]\n activation_model = Model(inputs=classifier.input, outputs=layer_outputs)\n activations = activation_model.predict(img_tensor)\n layer_names = []\n for layer in classifier.layers[:12]:\n layer_names.append(layer.name)\n images_per_row = 3\n count = 0\n for layer_name, layer_activation in zip(layer_names, activations):\n number_of_features = layer_activation.shape[-1]\n size = layer_activation.shape[1]\n number_of_columns = number_of_features // images_per_row\n display_grid = np.zeros((size * number_of_columns, images_per_row * size))\n for col in range(number_of_columns):\n for row in range(images_per_row):\n channel_image = layer_activation[0, :, :, col * images_per_row + row]\n channel_image -= channel_image.mean()\n channel_image /= channel_image.std()\n channel_image *= 64\n channel_image += 128\n channel_image = np.clip(channel_image, 0, 255).astype('uint8')\n display_grid[col * size: (col + 1) * size, row * size: (row + 1) * size] = channel_image\n scale = 1. / size\n activations_figure = plt.figure(figsize=(scale * display_grid.shape[1],\n scale * display_grid.shape[0]))\n plt.title(layer_name)\n plt.grid(False)\n plt.imshow(display_grid, aspect='auto', cmap='viridis')\n activations_figure.savefig('%s/%s_Activation_%s.png' % (base_dir, count, layer_name))\n plt.close()\n\n count += 1\n\n\ndef usingCnnModel(training_data, training_labels, val_data, val_labels):\n \"\"\"\n This is using the CNN model and setting it up.\n Args:\n training_data(numpy arrays): This is the numpy array of the training data.\n training_labels(numpy arrays): This is the numpy array of the training labels.\n val_data(numpy arrays): This is the numpy array of the validation data.\n val_labels(numpy arrays): This is the numpy array of the validation labels.\n Returns:\n history(history): This is the history of the classifier.\n classifier(sequential): This is the cnn model classifier fitted to the training data and labels.\n \"\"\"\n model_checkpoint = ModelCheckpoint(filepath=\"best_weights.hdf5\",\n monitor=monitor_model_checkpoint,\n save_best_only=True)\n\n early_stopping = EarlyStopping(monitor=monitor_early_stopping, patience=patience_num) # original patience =3\n\n classifier = buildClassifier()\n callbacks_array = []\n if use_early_stopping:\n callbacks_array.append(early_stopping)\n if use_model_checkpoint:\n callbacks_array.append(model_checkpoint)\n\n print(len(training_data))\n history = classifier.fit(training_data,\n training_labels,\n epochs=epochs,\n validation_data=(val_data, val_labels),\n callbacks=callbacks_array,\n batch_size=batch_size\n # steps_per_epoch=int(len(training_data) / batch_size),\n )\n return history, classifier\n\n\ndef createAugmentedData(training_data, training_labels):\n \"\"\"\n This is creates the augmented data.\n Args:\n training_data(numpy arrays): This is the numpy array of the training data.\n training_labels(numpy arrays): This is the numpy array of the training labels.\n Returns:\n complete_training_data_set(numpy array): This is the numpy array of the total training data, which is has\n undergone augmentation.\n complete_training_labels_set(numpy array): This is the numpy array of the total training labels, which is has\n undergone augmentation.\n \"\"\"\n complete_training_data_set = []\n complete_training_labels_set = []\n\n for data in training_data:\n complete_training_data_set.append(data)\n print(\"Complete Training Data: \" + str(len(complete_training_data_set)))\n\n for label in training_labels:\n complete_training_labels_set.append(label)\n print(\"Complete Training Label: \" + str(len(complete_training_labels_set)))\n\n # create augmented data\n data_augmented = ImageDataGenerator(featurewise_center=True,\n featurewise_std_normalization=True,\n rotation_range=90,\n width_shift_range=0.2,\n height_shift_range=0.2,\n horizontal_flip=True,\n vertical_flip=True)\n\n # data_augmented = ImageDataGenerator(featurewise_center=False,\n # featurewise_std_normalization=False,\n # rotation_range=90,\n # horizontal_flip=True,\n # vertical_flip=True)\n data_augmented.fit(training_data)\n\n training_data_size = training_data.shape[0]\n aug_counter = 0\n while aug_counter < (augmented_multiple - 1):\n iterator = data_augmented.flow(training_data, training_labels, batch_size=training_data_size)\n # iterator = data_augmented.flow(training_data, training_labels, batch_size=batch_size)\n augmented_data = iterator.next()\n for data in augmented_data[0]:\n complete_training_data_set.append(data)\n for label in augmented_data[1]:\n complete_training_labels_set.append(label)\n aug_counter += 1\n\n print(\"Size of All Training Data: \" + str(len(complete_training_data_set)))\n print(\"Size of All Training Labels: \" + str(len(complete_training_labels_set)))\n\n array_training_data = np.array(complete_training_data_set)\n array_training_labels = np.array(complete_training_labels_set)\n\n print(\"Shape of complete training data: \" + str(array_training_data.shape))\n print(\"Shape of complete training labels: \" + str(array_training_labels.shape))\n\n return np.array(complete_training_data_set), np.array(complete_training_labels_set)\n\n\ndef savePredictedLenses(des_names_array, predicted_class_probabilities, predicted_lenses_filepath, text_file_path):\n \"\"\"\n This saves the names of the predicted lenses in the respective textfiles.\n Args:\n des_names_array(numpy array): This is a list of the des names of the sources.\n predicted_class_probabilities(list): This is a list of the probabilities in which lenses are predicted by\n the algorithm.\n predicted_lenses_filepath(string): This is the string of the predicted lenses filepath, where this needs\n to be saved in the directory.\n text_file_path(string): This is the text file path address to which these images are saved.\n Saves:\n text_file(.txt file): This is the text file saved containing the predicted lenses DES names.\n \"\"\"\n predicted_lenses = []\n predicted_no_lenses = []\n if not os.path.exists(predicted_lenses_filepath):\n os.mkdir('%s/' % predicted_lenses_filepath)\n text_file = open('%s' % text_file_path, \"a+\")\n text_file.write('\\n')\n text_file.write('Predicted Lenses: \\n')\n for lens_index in range(len(predicted_class_probabilities)):\n if predicted_class_probabilities[lens_index] == 1:\n text_file.write(\"%s \\n \" % des_names_array[lens_index])\n predicted_lenses.append(des_names_array[lens_index])\n\n text_file.write('\\n')\n text_file.write('No Lenses Predicted: \\n')\n for lens_index in range(len(predicted_class_probabilities)):\n if predicted_class_probabilities[lens_index] == 0:\n text_file.write(\"%s \\n \" % des_names_array[lens_index])\n predicted_no_lenses.append(des_names_array[lens_index])\n text_file.close()\n\n return predicted_lenses, predicted_no_lenses\n\n\ndef gettingTrueFalsePositiveNegatives(testing_data, testing_labels, text_file_path,\n predicted_lenses_filepath, kf_counter=0):\n \"\"\"\n This is used to get the True/False Positive and Negative values gained from the CNN confusion matrix.\n Args:\n testing_data(numpy array): This is the unseen testing data numpy array.\n testing_labels(numpy array): This is the unseen testing label numpy array.\n text_file_path(string): This is the file path name of the text file in which the confusion\n matrix is saved.\n predicted_lenses_filepath(string): This is the file path in which the text file is saved.\n Saves:\n This saves a confusion matrix of the True/False Positive and Negative values.\n \"\"\"\n if not os.path.exists(predicted_lenses_filepath):\n os.mkdir('%s/' % predicted_lenses_filepath)\n\n predicted_data = classifier.predict_classes(testing_data)\n rounded_predicted_data = predicted_data.round()\n conf_matrix = confusion_matrix(testing_labels, rounded_predicted_data, labels=[0, 1])\n print(str(conf_matrix) + ' \\n ')\n true_negative, false_positive, false_negative, true_positive = conf_matrix.ravel()\n print(\"True Positive: %s \\n\" % true_positive)\n print(\"False Negative: %s \\n\" % false_negative)\n print(\"False Positive: %s \\n\" % false_positive)\n print(\"True Negative: %s \\n\" % true_negative)\n\n text_file = open('%s' % text_file_path, \"a+\")\n text_file.write('\\n')\n text_file.write('KFold Number: %s \\n' % str(kf_counter))\n text_file.write('Predicted vs True Matrix: \\n')\n text_file.write(str(conf_matrix) + \" \\n \")\n text_file.write(\"True Negative: %s \\n\" % str(true_negative))\n text_file.write(\"False Positive: %s \\n\" % str(false_positive))\n text_file.write(\"False Negative: %s \\n\" % str(false_negative))\n text_file.write(\"True Positive: %s \\n\" % str(true_positive))\n text_file.write(\"\\n\")\n text_file.close()\n\n confusion_matrix_array = [true_negative, false_positive, false_negative, true_positive]\n return confusion_matrix_array\n\n\ndef gettingKFoldConfusionMatrix(test_data, test_labels, unseen_images, unseen_labels, select_known_images,\n select_known_labels, kf_counter):\n test_confusion_matrix = gettingTrueFalsePositiveNegatives(test_data,\n test_labels,\n text_file_path='../Results/%s/TrainingTestingResults'\n '/KFold_PredictedMatrix.txt' % dt_string,\n predicted_lenses_filepath='../Results/%s'\n '/TrainingTestingResults '\n % dt_string,\n kf_counter=kf_counter)\n unseen_confusion_matrix = gettingTrueFalsePositiveNegatives(unseen_images,\n unseen_labels,\n text_file_path='../Results/%s/UnseenKnownLenses/'\n 'KFold_LensesPredicted.txt' % dt_string,\n predicted_lenses_filepath='../Results/%s'\n '/UnseenKnownLenses/ '\n % dt_string,\n kf_counter=kf_counter)\n\n select_confusion_matrix = gettingTrueFalsePositiveNegatives(select_known_images,\n select_known_labels,\n text_file_path='../Results/%s/UnseenKnownLensesSelect/'\n 'KFold_LensesPredicted.txt' % dt_string,\n predicted_lenses_filepath='../Results/%s'\n '/UnseenKnownLensesSelect/ '\n % dt_string,\n kf_counter=kf_counter)\n\n return test_confusion_matrix, unseen_confusion_matrix, select_confusion_matrix\n\n\ndef gettingRandomUnseenImage(filepath):\n g_img_path = get_pkg_data_filename('%s/g_norm.fits' % filepath)\n r_img_path = get_pkg_data_filename('%s/r_norm.fits' % filepath)\n i_img_path = get_pkg_data_filename('%s/i_norm.fits' % filepath)\n\n g_data = fits.open(g_img_path)[0].data[0:100, 0:100]\n r_data = fits.open(r_img_path)[0].data[0:100, 0:100]\n i_data = fits.open(i_img_path)[0].data[0:100, 0:100]\n\n img_data = np.array([g_data, r_data, i_data]).reshape(input_shape[0], input_shape[1], input_shape[2])\n return img_data\n\n\ndef executeKFoldValidation(train_data, train_labels, val_data, val_labels, testing_data, testing_labels,\n known_images, known_labels, known_des_names,\n select_known_images, select_known_labels):\n \"\"\"\n This does the k fold cross validation which is tested against the unseen testing and known lenses.\n Args:\n train_data(numpy arrays): This is the numpy array of the training data.\n train_labels(numpy arrays): This is the numpy array of the training labels.\n val_data(numpy arrays): This is the numpy array of the validation data.\n val_labels(numpy arrays): This is the numpy array of the validation labels.\n testing_data(numpy array): This is the numpy array of the unseen testing data.\n testing_labels(numpy array): This is the numpy array of the unseen testing label.\n images_47(numpy array): This is the numpy array of the unseen DES images data.\n labels_47(numpy array): This is the numpy array of the unseen DES images labels.\n images_84(numpy array): This is the numpy array of the unseen Jacobs images data.\n labels_84(numpy array): This is the numpy array of the unseen Jacobs images labels.\n all_unseen_images(numpy array): This is the numpy array of the unseen DES + Jacobs images data.\n all_unseen_labels(numpy array): This is the numpy array of the unseen DES + Jacobs images labels.\n\n Saves:\n This saves the scores, mean and std. of the unseen data that is evaluated in the k fold cross validation.\n \"\"\"\n if run_k_fold_validation:\n print(\"In executingKFoldValidation\")\n\n # this is doing it manually:\n kfold = StratifiedKFold(n_splits=k_fold_num, shuffle=True)\n\n test_scores_list = []\n test_loss_list = []\n unseen_scores_list = []\n unseen_loss_list = []\n select_unseen_scores_list = []\n select_unseen_loss_list = []\n test_matrix_list = []\n unseen_matrix_list = []\n select_matrix_list = []\n kf_counter = 0\n true_positives = {}\n false_negatives = {}\n\n for train, test in kfold.split(train_data, train_labels):\n kf_counter += 1\n print('KFold #:', kf_counter)\n\n model = buildClassifier()\n # fit the model\n model.fit(train_data[train],\n train_labels[train],\n epochs=epochs,\n validation_data=(val_data, val_labels),\n batch_size=batch_size)\n\n test_scores = model.evaluate(testing_data, testing_labels, batch_size=batch_size)\n test_scores_list.append(test_scores[1])\n test_loss_list.append(test_scores[0])\n print(\"Test Score: \" + str(test_scores_list))\n print(\"Test Loss: \" + str(test_loss_list))\n unseen_scores = model.evaluate(known_images, known_labels, batch_size=batch_size)\n unseen_scores_list.append(unseen_scores[1])\n unseen_loss_list.append(unseen_scores[0])\n print(\"Unseen Score: \" + str(unseen_scores_list))\n print(\"Unseen Loss: \" + str(unseen_loss_list))\n select_scores = model.evaluate(select_known_images, select_known_labels, batch_size=batch_size)\n select_unseen_scores_list.append(select_scores[1])\n select_unseen_loss_list.append((select_scores[0]))\n\n # show confusion matrix\n test_confusion_matrix, unseen_confusion_matrix, select_confusion_matrix = gettingKFoldConfusionMatrix(\n testing_data,\n testing_labels, known_images,\n known_labels, select_known_images, select_known_labels, kf_counter)\n\n probabilities_known_lenses = classifier.predict_classes(known_images, batch_size=batch_size)\n predicted_lens = np.count_nonzero(probabilities_known_lenses == 1)\n predicted_no_lens = np.count_nonzero(probabilities_known_lenses == 0)\n print(\"%s/%s known lenses predicted\" % (predicted_lens, len(known_images)))\n print(\"%s/%s non known lenses predicted\" % (predicted_no_lens, len(known_images)))\n\n predicted_lenses, predicted_no_lenses = savePredictedLenses(known_des_names,\n predicted_class_probabilities_known_lenses,\n text_file_path='../Results/%s'\n '/UnseenKnownLenses/'\n 'KFold_LensesPredicted.txt'\n % dt_string,\n predicted_lenses_filepath='../Results/%s/'\n 'UnseenKnownLenses'\n % dt_string)\n\n randomTP = None\n imageTP = None\n if predicted_lenses:\n randomTP = random.choice(predicted_lenses)\n filepathTP = unseen_known_file_path_all + '/%s' % randomTP\n imageTP = gettingRandomUnseenImage(filepathTP)\n true_positives[kf_counter] = (randomTP, imageTP)\n\n randomFN = None\n imageFN = None\n if predicted_no_lenses:\n randomFN = random.choice(predicted_no_lenses)\n filepathFN = unseen_known_file_path_all + '/%s' % randomFN\n imageFN = gettingRandomUnseenImage(filepathFN)\n false_negatives[kf_counter] = (randomFN, imageFN)\n\n # print(\"Lenses Predicted: \" + str(randomTP))\n # print(\"Lenses Not Predicted: \" + str(randomFN))\n\n test_matrix_list.append(test_confusion_matrix)\n unseen_matrix_list.append(unseen_confusion_matrix)\n select_matrix_list.append(select_confusion_matrix)\n\n test_scores_mean = np.mean(test_scores_list)\n test_loss_mean = np.mean(test_loss_list)\n test_scores_std = np.std(test_scores_list)\n unseen_scores_mean = np.mean(unseen_scores_list)\n unseen_loss_mean = np.mean(unseen_loss_list)\n unseen_scores_std = np.std(unseen_scores_list)\n select_scores_mean = np.mean(select_unseen_scores_list)\n select_loss_mean = np.mean(select_unseen_loss_list)\n select_scores_std = np.std(select_unseen_scores_list)\n\n print(\"Test Confusion Matrices: \" + str(test_matrix_list))\n print(\"Test Scores: \" + str(test_scores_list))\n print(\"Test Scores Mean: \" + str(test_scores_mean))\n print(\"Test Scores Std: \" + str(test_scores_std))\n print(\"Test Loss: \" + str(test_loss_list))\n print(\"Test Loss Mean: \" + str(test_loss_mean))\n print(\"Unseen Confusion Matrices: \" + str(unseen_matrix_list))\n print(\"Unseen Scores: \" + str(unseen_scores_list))\n print(\"Unseen Scores Mean: \" + str(unseen_scores_mean))\n print(\"Unseen Scores Std: \" + str(unseen_scores_std))\n print(\"Unseen Loss: \" + str(unseen_loss_list))\n print(\"Unseen Loss Mean: \" + str(unseen_loss_mean))\n print(\"Select Confusion Matrices: \" + str(select_matrix_list))\n print(\"Select Score: \" + str(select_unseen_scores_list))\n print(\"Select Scores Mean: \" + str(select_scores_mean))\n print(\"Select Unseen Scores Std: \" + str(select_scores_std))\n print(\"Select Loss: \" + str(select_unseen_loss_list))\n print(\"Unseen Loss Mean: \" + str(select_loss_mean))\n\n excel_headers.append(\"Test Loss Mean\")\n excel_dictionary.append(test_loss_mean)\n excel_headers.append(\"Test Scores Mean\")\n excel_dictionary.append(test_scores_mean)\n excel_headers.append(\"Test Scores Std\")\n excel_dictionary.append(test_scores_std)\n excel_headers.append(\"Unseen Loss Mean\")\n excel_dictionary.append(unseen_loss_mean)\n excel_headers.append(\"Unseen Known Lenses Mean\")\n excel_dictionary.append(unseen_scores_mean)\n excel_headers.append(\"Unseen Known Lenses Std\")\n excel_dictionary.append(unseen_scores_std)\n excel_headers.append(\"Select Loss Mean\")\n excel_dictionary.append(select_loss_mean)\n excel_headers.append(\"Select Scores Mean\")\n excel_dictionary.append(select_scores_mean)\n excel_headers.append(\"Select Std\")\n excel_dictionary.append(select_scores_std)\n\n plt.plot(test_scores_list, color='red', label='Testing Scores')\n plt.plot(unseen_scores_list, color='blue', label='Unseen Known Lenses Scores')\n plt.plot(select_unseen_scores_list, color='green', label=\"Selected Unseen Known Lenses Scores\")\n\n plt.xlabel('Folds')\n plt.ylabel('Accuracy')\n plt.legend()\n #plt.show()\n plt.savefig('../Results/%s/KFoldAccuracyScores.png' % dt_string)\n\n plotKFold(true_positives, false_negatives)\n\n\ndef viewActivationLayers():\n # make positive and negative directory\n if not os.path.exists('../Results/%s/PositiveResults/' % dt_string):\n os.mkdir('../Results/%s/PositiveResults/' % dt_string)\n\n if not os.path.exists('../Results/%s/NegativeResults/' % dt_string):\n os.mkdir('../Results/%s/NegativeResults/' % dt_string)\n\n # Plot original positive image\n img_positive_tensor = getPositiveImages('Training/PositiveAll', 1, input_shape=input_shape)\n positive_train_figure = plt.figure()\n plt.imshow(img_positive_tensor[0])\n # plt.show()\n print(img_positive_tensor.shape)\n positive_train_figure.savefig('../Results/%s/PositiveResults/PositiveTrainingFigure.png' % dt_string)\n plt.close()\n\n # Visualise Activations of positive image\n visualiseActivations(img_positive_tensor, base_dir='../Results/%s/PositiveResults/' % dt_string)\n\n # Plot original negative image\n img_negative_tensor = getNegativeImages('Training/Negative', 1, input_shape=input_shape)\n negative_train_figure = plt.figure()\n plt.imshow(img_negative_tensor[0])\n # plt.show()\n print(img_negative_tensor.shape)\n negative_train_figure.savefig('../Results/%s/NegativeResults/NegativeTrainingFigure.png' % dt_string)\n plt.close()\n\n # Visualise Activations of negative image\n visualiseActivations(img_negative_tensor, base_dir='../Results/%s/NegativeResults/' % dt_string)\n\n\ndef plotKFold(true_positives, false_negatives):\n # print('True Positives: ' + str(true_positives))\n # print('False Negatives: ' + str(false_negatives))\n fig, axs = plt.subplots(k_fold_num, 2)\n fig.tight_layout(pad=3.0)\n\n cols = ['True Positive', 'False Negative']\n\n for ax, col in zip(axs[0], cols):\n ax.set_title(col)\n\n # for ax, col in zip(axs[0], cols):\n # for i in range(len(cols)):\n # # axs[0, i].text(x=0.5, y=12, s=\"\", ha=\"center\", fontsize=12)\n # # axs[k_fold_num - 1, i].set_xlabel(cols[i])\n # axs[0, i].set_title(cols[i])\n # # ax.set_title(col)\n\n for i in range(0, k_fold_num):\n axs[i, 0].text(x=-0.8, y=5, s=\"\", rotation=90, va=\"center\")\n axs[i, 0].set_ylabel(\"k = %s\" % (i + 1))\n\n true_positive_tuple = true_positives[k_fold_num]\n if not true_positive_tuple[0] is None:\n axs[i, 0].set_xlabel(true_positive_tuple[0], fontsize=8)\n # axs[i, 0].set_title(true_positive_tuple[0], fontsize=6)\n axs[i, 0].imshow(true_positive_tuple[1])\n axs[i, 0].set_xticks([], [])\n axs[i, 0].set_yticks([], [])\n\n false_negative_tuple = false_negatives[k_fold_num]\n if not false_negative_tuple[0] is None:\n axs[i, 1].set_xlabel(false_negative_tuple[0], fontsize=8)\n # axs[i, 1].set_title(false_negative_tuple[0], fontsize=6)\n axs[i, 1].imshow(false_negative_tuple[1])\n axs[i, 1].set_xticks([], [])\n axs[i, 1].set_yticks([], [])\n\n fig.tight_layout()\n #plt.show()\n fig.savefig('../Results/%s/UnseenKnownLenses/KFoldImages.png' % dt_string)\n\n\n# __________________________________________________________________________\n# MAIN\n\n# Get positive training data\ntrain_pos = getPositiveImages(images_dir=training_positive_path, max_num=max_num, input_shape=input_shape)\nprint(\"Train Positive Shape: \" + str(train_pos.shape))\nexcel_headers.append(\"Train_Positive_Shape\")\nexcel_dictionary.append(train_pos.shape)\n\n# Get negative training data\ntrain_neg = getNegativeImages(images_dir=training_negative_path, max_num=max_num, input_shape=input_shape)\nprint(\"Train Negative Shape: \" + str(train_neg.shape))\nexcel_headers.append(\"Train_Negative_Shape\")\nexcel_dictionary.append(train_neg.shape)\n\nall_training_data, all_training_labels, _ = makeImageSet(train_pos, train_neg, shuffle_needed=use_shuffle)\nif use_augmented_data:\n all_training_data, all_training_labels = createAugmentedData(all_training_data, all_training_labels)\n\ntraining_data, val_data, training_labels, val_labels = train_test_split(all_training_data,\n all_training_labels,\n test_size=validation_split,\n shuffle=True)\nexcel_headers.append(\"All_Training_Data_Shape\")\nexcel_dictionary.append(all_training_labels.shape)\nexcel_headers.append(\"All_Training_Labels_Shape\")\nexcel_dictionary.append(all_training_labels.shape)\nexcel_headers.append(\"Training_Data_Shape\")\nexcel_dictionary.append(training_data.shape)\nexcel_headers.append(\"Validation_Data_Shape\")\nexcel_dictionary.append(val_data.shape)\nexcel_headers.append(\"Training_Labels_Shape\")\nexcel_dictionary.append(training_labels.shape)\nexcel_headers.append(\"Validation_Labels_Shape\")\nexcel_dictionary.append(val_labels.shape)\nexcel_headers.append(\"Validation_Split\")\nexcel_dictionary.append(validation_split)\n\nhistory, classifier = usingCnnModel(training_data,\n training_labels,\n val_data,\n val_labels)\n\nclassifier.load_weights('best_weights.hdf5')\nclassifier.save_weights('galaxies_cnn.h5')\n\nexcel_headers.append(\"Epochs\")\nexcel_dictionary.append(epochs)\nexcel_headers.append(\"Batch_size\")\nexcel_dictionary.append(batch_size)\n\n# Plot run metrics\nacc = history.history['acc']\nval_acc = history.history['val_acc']\nloss = history.history['loss']\nval_loss = history.history['val_loss']\nnumber_of_completed_epochs = range(1, len(acc) + 1)\n\n# Accuracies\nprint(\"Training Acc: \" + str(acc))\nprint(\"Training Loss:\" + str(loss))\nprint(\"Validation Acc: \" + str(val_acc))\nprint(\"Validation Loss: \" + str(val_loss))\n\ntrain_mean_acc = np.mean(acc)\ntrain_std_acc = np.std(acc)\ntrain_mean_loss = np.mean(loss)\ntrain_std_loss = np.std(loss)\nval_mean_acc = np.mean(val_acc)\nval_std_acc = np.std(val_acc)\nval_mean_loss = np.mean(val_loss)\nval_std_loss = np.std(val_loss)\n\nexcel_headers.append(\"Training Mean Accuracy\")\nexcel_dictionary.append(train_mean_acc)\nexcel_headers.append(\"Training Std Accuracy\")\nexcel_dictionary.append(train_std_acc)\nexcel_headers.append(\"Training Mean Loss\")\nexcel_dictionary.append(train_mean_loss)\nexcel_headers.append(\"Training Std Loss\")\nexcel_dictionary.append(train_std_loss)\nexcel_headers.append(\"Validation Mean Accuracy\")\nexcel_dictionary.append(val_mean_acc)\nexcel_headers.append(\"Validation Std Accuracy\")\nexcel_dictionary.append(val_std_acc)\nexcel_headers.append(\"Validation Mean Loss\")\nexcel_dictionary.append(val_mean_loss)\nexcel_headers.append(\"Validation Std Loss\")\nexcel_dictionary.append(val_std_loss)\n\nprint(f\"Mean Training Acc: {np.mean(acc)} +\\- {np.std(acc)}\")\nprint(f\"Mean Training Loss: {np.mean(loss)} +\\- {np.std(loss)}\")\nprint(f\"Mean Validation Acc: {np.mean(val_acc)} +\\- {np.std(val_acc)}\")\nprint(f\"Mean Validation Loss: {np.mean(val_loss)} +\\- {np.std(val_loss)}\")\n\ntrain_val_accuracy_figure = plt.figure()\nplt.plot(number_of_completed_epochs, acc, label='Training acc')\nplt.plot(number_of_completed_epochs, val_acc, label='Validation acc')\nplt.title('Training and validation accuracy')\nplt.legend()\nplt.xlabel(\"Epochs\")\nplt.ylabel(\"Accuracy\")\n#plt.show()\ntrain_val_accuracy_figure.savefig('../Results/%s/TrainingValidationAccuracy.png' % dt_string)\nplt.close()\n\n# Losses\ntrain_val_loss_figure = plt.figure()\nplt.plot(number_of_completed_epochs, loss, label='Training loss')\nplt.plot(number_of_completed_epochs, val_loss, label='Validation loss')\nplt.title('Training and validation loss')\nplt.legend()\nplt.xlabel(\"Epochs\")\nplt.ylabel(\"Loss\")\n#plt.show()\ntrain_val_loss_figure.savefig('../Results/%s/TrainingValidationLoss.png' % dt_string)\nplt.close()\n\n# make positive and negative results and plotting the activations of positive and negative images\n# viewActivationLayers()\n\n# Classifier evaluation\ntest_pos = getPositiveImages(images_dir=testing_positive_path, max_num=max_num_testing, input_shape=input_shape)\ntest_neg = getNegativeImages(images_dir=testing_negative_path, max_num=max_num_testing, input_shape=input_shape)\ntesting_data, testing_labels, _ = makeImageSet(test_pos, test_neg, shuffle_needed=True)\nprint(\"Testing Data Shape: \" + str(testing_data.shape))\nprint(\"Testing Labels Shape: \" + str(testing_labels.shape))\nprint(\"Got Unseen Testing data\")\n\nscores = classifier.evaluate(testing_data, testing_labels, batch_size=batch_size)\nloss = scores[0]\naccuracy = scores[1]\nprint(\"Test loss: %s\" % loss)\nprint(\"Test accuracy: %s\" % accuracy)\n\nexcel_headers.append(\"Test_Loss\")\nexcel_dictionary.append(loss)\nexcel_headers.append(\"Test_Accuracy\")\nexcel_dictionary.append(accuracy)\n\ngettingTrueFalsePositiveNegatives(testing_data,\n testing_labels,\n text_file_path='../Results/%s/TrainingTestingResults/PredictedMatrixBeforeKFOLD.txt'\n % dt_string,\n predicted_lenses_filepath='../Results/%s/TrainingTestingResults' % dt_string)\n\nunseen_known_images = getUnseenData(images_dir=unseen_known_file_path_all,\n max_num=max_num_prediction,\n input_shape=input_shape)\n\nknown_images, known_labels, known_des_names = makeImageSet(positive_images=list(unseen_known_images.values()),\n tile_names=list(unseen_known_images.keys()),\n shuffle_needed=True)\nprint(\"Unseen Known Images Shape: \" + str(known_images.shape))\nprint(\"Unseen Known Labels Shape: \" + str(known_labels.shape))\nprint(\"Got Unseen Known Lenses Data\")\n\nunseen_scores = classifier.evaluate(known_images, known_labels, batch_size=batch_size)\nunseen_loss_score = unseen_scores[0]\nunseen_accuracy_score = unseen_scores[1]\nprint(\"Unseen loss: %s\" % unseen_loss_score)\nprint(\"Unseen accuracy: %s\" % unseen_accuracy_score)\n\nexcel_headers.append(\"Unseen_Loss\")\nexcel_dictionary.append(unseen_loss_score)\nexcel_headers.append(\"Unseen_Accuracy\")\nexcel_dictionary.append(unseen_accuracy_score)\n\npredicted_class_probabilities_known_lenses = classifier.predict_classes(known_images, batch_size=batch_size)\nlens_predicted = np.count_nonzero(predicted_class_probabilities_known_lenses == 1)\nnon_lens_predicted = np.count_nonzero(predicted_class_probabilities_known_lenses == 0)\nprint(\"%s/%s known lenses predicted\" % (lens_predicted, len(known_images)))\nprint(\"%s/%s non known lenses predicted\" % (non_lens_predicted, len(known_images)))\n\ngettingTrueFalsePositiveNegatives(known_images, known_labels,\n text_file_path='../Results/%s/UnseenKnownLenses/PredictedMatrixBeforeKFOLD.txt' % dt_string,\n predicted_lenses_filepath='../Results/%s/UnseenKnownLenses' % dt_string)\n\npredicted_lenses, predicted_no_lenses = savePredictedLenses(known_des_names,\n predicted_class_probabilities_known_lenses,\n text_file_path='../Results/%s/UnseenKnownLenses/'\n 'PredictedMatrixBeforeKFOLD.txt' % dt_string,\n predicted_lenses_filepath='../Results/%s/UnseenKnownLenses'\n % dt_string)\n\n######################################################################################\nunseen_known_images_select = getUnseenData(images_dir=unseen_known_file_path_select,\n max_num=max_num_prediction,\n input_shape=input_shape)\n\nselect_known_images, select_known_labels, select_known_des_names = makeImageSet(\n positive_images=list(unseen_known_images_select.values()),\n tile_names=list(unseen_known_images_select.keys()),\n shuffle_needed=True)\nprint(\"Unseen Selected Known Images Shape: \" + str(select_known_images.shape))\nprint(\"Unseen Selected Known Labels Shape: \" + str(select_known_labels.shape))\nprint(\"Got Unseen Selected Known Lenses Data\")\n\nselect_unseen_scores = classifier.evaluate(select_known_images, select_known_labels, batch_size=batch_size)\nselect_unseen_loss_score = select_unseen_scores[0]\nselect_unseen_accuracy_score = select_unseen_scores[1]\nprint(\"Unseen Selected loss: %s\" % select_unseen_loss_score)\nprint(\"Unseen Selected accuracy: %s\" % select_unseen_accuracy_score)\n\nexcel_headers.append(\"Selected Unseen_Loss\")\nexcel_dictionary.append(select_unseen_loss_score)\nexcel_headers.append(\"Select Unseen_Accuracy\")\nexcel_dictionary.append(select_unseen_accuracy_score)\n\nselect_predicted_class_probabilities_known_lenses = classifier.predict_classes(select_known_images,\n batch_size=batch_size)\nselect_lens_predicted = np.count_nonzero(select_predicted_class_probabilities_known_lenses == 1)\nselect_non_lens_predicted = np.count_nonzero(select_predicted_class_probabilities_known_lenses == 0)\nprint(\"%s/%s known lenses predicted\" % (select_lens_predicted, len(select_known_images)))\nprint(\"%s/%s non known lenses predicted\" % (select_non_lens_predicted, len(select_known_images)))\n\ngettingTrueFalsePositiveNegatives(select_known_images, select_known_labels,\n text_file_path='../Results/%s/UnseenKnownLensesSelect/PredictedMatrixBeforeKFOLD.txt' % dt_string,\n predicted_lenses_filepath='../Results/%s/UnseenKnownLensesSelect' % dt_string)\n\nselect_predicted_lenses, select_predicted_no_lenses = savePredictedLenses(select_known_des_names,\n select_predicted_class_probabilities_known_lenses,\n text_file_path='../Results/%s'\n '/UnseenKnownLensesSelect/ '\n 'PredictedMatrixBeforeKFOLD'\n '.txt' % dt_string,\n predicted_lenses_filepath='../Results/%s'\n '/UnseenKnownLensesSelect'\n % dt_string)\n\nexcel_headers.append(\"Selected Unseen_Known_Lenses_Predicted\")\nexcel_dictionary.append(select_lens_predicted)\nexcel_headers.append(\"Selected Unseen_Known_Lenses_No_Lens_Predicted\")\nexcel_dictionary.append(select_non_lens_predicted)\n\n# K fold for training data\nexecuteKFoldValidation(training_data,\n training_labels,\n val_data,\n val_labels,\n testing_data,\n testing_labels,\n known_images,\n known_labels,\n known_des_names,\n select_known_images, select_known_labels)\n\nif makeNewCSVFile:\n createExcelSheet('../Results/Architecture_kerasCNN_Results.csv', excel_headers)\n writeToFile('../Results/Architecture_kerasCNN_Results.csv', excel_dictionary)\n createExcelSheet('../Results/%s/%s_KerasResults.csv' % (dt_string, dt_string), excel_headers)\n writeToFile('../Results/%s/%s_KerasResults.csv' % (dt_string, dt_string), excel_dictionary)\n\nelse:\n writeToFile('../Results/Architecture_kerasCNN_Results.csv', excel_dictionary)\n writeToFile('../Results/%s/%s_KerasResults.csv' % (dt_string, dt_string), excel_dictionary)\n" ]
[ [ "sklearn.metrics.confusion_matrix", "tensorflow.python.keras.layers.convolutional.Conv2D", "tensorflow.python.keras.callbacks.ModelCheckpoint", "numpy.mean", "tensorflow.python.keras.optimizers.Adam", "sklearn.utils.shuffle", "numpy.count_nonzero", "sklearn.model_selection.StratifiedKFold", "tensorflow.python.keras.layers.convolutional.MaxPooling2D", "matplotlib.pyplot.savefig", "tensorflow.python.keras.utils.vis_utils.plot_model", "tensorflow.python.keras.layers.core.Dropout", "matplotlib.pyplot.subplots", "tensorflow.python.keras.layers.core.Dense", "tensorflow.python.keras.Sequential", "tensorflow.python.keras.preprocessing.image.ImageDataGenerator", "tensorflow.python.keras.models.Model", "numpy.array", "numpy.zeros", "matplotlib.pyplot.title", "matplotlib.pyplot.close", "matplotlib.pyplot.figure", "numpy.std", "tensorflow.python.keras.layers.core.Flatten", "numpy.clip", "sklearn.model_selection.train_test_split", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.grid", "matplotlib.pyplot.plot", "matplotlib.pyplot.legend", "matplotlib.pyplot.ylabel", "tensorflow.python.keras.callbacks.EarlyStopping", "matplotlib.pyplot.imshow" ] ]
roger1993/classic_algorithm
[ "7b0cda39d6c48b76f2b12ad74a31ffb19776c0e7" ]
[ "jiaojian_temp_match/jj_temp.py" ]
[ "# import the necessary packages\nimport numpy as np\nimport argparse\nimport glob\nimport cv2\nfrom matplotlib import pyplot as plt\n \n# construct the argument parser and parse the arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-t\", \"--template\", required=True, help=\"Path to template image\")\nap.add_argument(\"-i\", \"--images\", required=True,\n\thelp=\"Path to images where template will be matched\")\nap.add_argument(\"-v\", \"--visualize\",\n\thelp=\"Flag indicating whether or not to visualize each iteration\")\nargs = vars(ap.parse_args())\n \n# load the image image, convert it to grayscale, and detect edges\ntemplate = cv2.imread(args[\"template\"])\n#plt.title(\"template\")\n#plt.imshow(template)\n#plt.show()\ntemplate = cv2.cvtColor(template, cv2.COLOR_BGR2GRAY)\n#template = cv2.Canny(template, 50, 200)\n(h, w) = template.shape[:2]\n#plt.subplot(121)\n#plt.imshow(template, cmap = \"gray\", interpolation = 'bicubic')\n#plt.xticks([]), plt.yticks([])\n\nwith open (\"/Users/roger/Downloads/demo/输出/角件_output/result.txt\", \"w\") as f:\n# loop over the images to find the template in\n\tcounter = 1\n\tfor imagePath in glob.glob(args[\"images\"] + \"/*.*\"):\n\t# load the image, convert it to grayscale, and initialize the\n\t# bookkeeping variable to keep track of the matched region\n\t\timage = cv2.imread(imagePath)\n\t#image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\t\timage_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\t\timage_gray = cv2.Canny(image_gray, 50, 200)\n\t\tres = cv2.matchTemplate(image_gray, template, cv2.TM_CCOEFF_NORMED)\n\t\tthreshold = 0.6\n\t\tloc = np.where(res >= threshold)\n\t\t#print(len(res[loc]))\n\t\tif len(res[loc]) <= 5:\n\t\t\tf.write(\"{} is NG !!\\n\".format(imagePath.split(\"/\")[-1]))\n\t\t\t#print(\"{}存在缺陷!\".format(imagePath.split(\"/\")[-1]))\n\t\t\tmin_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)\n\n\t\t\ttop_left = max_loc\n\t\t\tbottom_right = (top_left[0] + w, top_left[1] + h)\n\t\t\tcv2.putText(image, \"NG\", (100,0), cv2.FONT_HERSHEY_SIMPLEX, 2, 255)\n\t\t\tcv2.rectangle(image,top_left, bottom_right, (0,0,255), 2)\n\t\t\tcv2.imwrite(\"/Users/roger/Downloads/demo/输出/角件_output/\" + \"缺陷样本\" + str(counter) + \".bmp\", image)\n\t\t\tcounter += 1\n\t\telse:\n\t\t\tcounter += 1\n\t\t\tcontinue\n" ]
[ [ "numpy.where" ] ]
johndpope/finetune
[ "8cdc2a29104f3f2f6e032a9496b3c4e251ac028c", "8cdc2a29104f3f2f6e032a9496b3c4e251ac028c" ]
[ "tests/test_general_api.py", "finetune/lm_entailment.py" ]
[ "import os\nimport unittest\n\nfrom pathlib import Path\n\n# required for tensorflow logging control\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n\nimport tensorflow as tf\nimport pandas as pd\nimport enso\nfrom enso.download import generic_download\n\nfrom finetune import LanguageModelGeneralAPI\nimport numpy as np\n\nSST_FILENAME = \"SST-binary.csv\"\n\n\nclass TestLanguageModelClassifier(unittest.TestCase):\n n_sample = 100\n n_hidden = 768\n dataset_path = os.path.join(\n enso.config.DATA_DIRECTORY, 'Classify', 'SST-binary.csv'\n )\n\n @classmethod\n def _download_sst(cls):\n \"\"\"\n Download Stanford Sentiment Treebank to enso `data` directory\n \"\"\"\n path = Path(cls.dataset_path)\n if path.exists():\n return\n\n path.mkdir(parents=True, exist_ok=True)\n generic_download(\n url=\"https://s3.amazonaws.com/enso-data/SST-binary.csv\",\n text_column=\"Text\",\n target_column=\"Target\",\n filename=SST_FILENAME\n )\n\n @classmethod\n def setUpClass(cls):\n cls._download_sst()\n\n def setUp(self):\n save_file_autosave = 'tests/saved-models/autosave_path'\n self.save_file = 'tests/saved-models/test-save-load'\n self.model = LanguageModelGeneralAPI(verbose=False, autosave_path=save_file_autosave)\n\n self.dataset = pd.read_csv(self.dataset_path)\n train_sample = self.dataset.sample(n=self.n_sample)\n valid_sample = self.dataset.sample(n=self.n_sample)\n self.text_data_train = list(zip(train_sample.Text, train_sample.Text, train_sample.Text))\n self.text_data_valid = list(zip(valid_sample.Text, valid_sample.Text, valid_sample.Text))\n self.train_targets = train_sample.Target\n tf.reset_default_graph()\n\n def test_multifield_classify(self):\n \"\"\"\n Ensure fit predict works on classification with multi inputs\n Ensure saving + loading does not cause errors\n Ensure saving + loading does not change predictions\n \"\"\"\n self.model.fit(self.text_data_train, self.train_targets)\n self.assertTrue(self.model.is_classification)\n predictions = self.model.predict(self.text_data_valid)\n self.model.save(self.save_file)\n model = LanguageModelGeneralAPI.load(self.save_file)\n new_predictions = model.predict(self.text_data_valid)\n for new_pred, old_pred in zip(new_predictions, predictions):\n self.assertEqual(new_pred, old_pred)\n\n def test_multifield_regression(self):\n \"\"\" \n Ensure fit predict works with regression targets and multiple inputs.\n Ensure saving + loading does not cause errors \n Ensure saving + loading does not change predictions \n \"\"\"\n\n self.model.fit(self.text_data_train, [np.random.random() for _ in self.train_targets])\n self.assertTrue(not self.model.is_classification)\n predictions = self.model.predict(self.text_data_valid)\n self.model.save(self.save_file)\n model = LanguageModelGeneralAPI.load(self.save_file)\n new_predictions = model.predict(self.text_data_valid)\n for new_pred, old_pred in zip(new_predictions, predictions):\n self.assertEqual(new_pred, old_pred)\n", "import json\n\nfrom sklearn.model_selection import train_test_split\n\nfrom finetune.config import BATCH_SIZE\nfrom finetune.lm_base import LanguageModelBase\nfrom finetune.target_encoders import OrdinalClassificationEncoder\n\n\nclass LanguageModelEntailment(LanguageModelBase):\n\n def get_target_encoder(self):\n return OrdinalClassificationEncoder()\n\n def _text_to_ids(self, *Xs, max_length=None):\n max_length = max_length or self.max_length\n assert len(Xs) == 2, \"This implementation assumes 2 Xs\"\n\n question_answer_pairs = self.encoder.encode_for_entailment(*Xs, max_length=max_length)\n\n tokens, mask = self._array_format(question_answer_pairs)\n return tokens, mask\n\n def finetune(self, X_1, X_2, Y, batch_size=BATCH_SIZE, val_size=0.05, val_interval=150):\n \"\"\"\n :param X_1: list or array of text to embed as the queries.\n :param X_2: list or array of text to embed as the answers.\n :param Y: integer or string-valued class labels. It is necessary for the items of Y to be sortable.\n :param batch_size: integer number of examples per batch. When N_GPUS > 1, this number\n corresponds to the number of training examples provided to each GPU.\n :param val_size: Float fraction or int number that represents the size of the validation set.\n :param val_interval: The interval for which validation is performed, measured in number of steps.\n \"\"\"\n self.is_classification = True\n return self._finetune(X_1, X_2, Y=Y, batch_size=batch_size, val_size=val_size, val_interval=val_interval)\n\n def predict(self, X_1, X_2, max_length=None):\n \"\"\"\n Produces X_2 list of most likely class labels as determined by the fine-tuned model.\n\n :param X_1: list or array of text to embed as the queries.\n :param X_2: list or array of text to embed as the answers.\n :param max_length: the number of tokens to be included in the document representation.\n Providing more than `max_length` tokens as input will result in truncation.\n :returns: list of class labels.\n \"\"\"\n return self.label_encoder.inverse_transform(self._predict_proba(X_1, X_2, max_length=max_length))\n\n def predict_proba(self, X_1, X_2, max_length=None):\n \"\"\"\n Produces X_2 probability distribution over classes for each example in X.\n\n :param X_1: list or array of text to embed as the queries.\n :param X_2: list or array of text to embed as the answers.\n :param max_length: the number of tokens to be included in the document representation.\n Providing more than `max_length` tokens as input will result in truncation.\n :returns: list of dictionaries. Each dictionary maps from X_2 class label to its assigned class probability.\n \"\"\"\n return self._predict_proba(X_1, X_2, max_length=max_length)\n\n def featurize(self, X_1, X_2, max_length=None):\n \"\"\"\n Embeds inputs in learned feature space. Can be called before or after calling :meth:`finetune`.\n\n :param X_1: list or array of text to embed as the queries.\n :param X_2: list or array of text to embed as the answers.\n :param max_length: the number of tokens to be included in the document representation.\n Providing more than `max_length` tokens as input will result in truncation.\n :returns: np.array of features of shape (n_examples, embedding_size).\n \"\"\"\n return self._featurize(X_1, X_2, max_length=max_length)\n\n\nif __name__ == \"__main__\":\n\n with open(\"data/questions.json\", \"rt\") as fp:\n data = json.load(fp)\n\n scores = []\n questions = []\n answers = []\n save_path = 'saved-models/cola'\n model = LanguageModelEntailment(save_path)\n\n for item in data:\n row = data[item]\n scores.append(row[\"score\"])\n questions.append(row[\"question\"])\n answers.append(row[\"answers\"][0][\"answer\"])\n\n scores_train, scores_test, ques_train, ques_test, ans_train, ans_test = train_test_split(\n scores, questions, answers, test_size=0.33, random_state=5)\n\n #model.finetune(ques_train, ans_train, scores_train)\n\n model = LanguageModelEntailment.load(save_path)\n\n print(\"TRAIN EVAL\")\n predictions = model.predict(ques_train, ans_train)\n print(predictions)\n\n from scipy.stats import spearmanr\n\n print(spearmanr(predictions, scores_train))\n\n print(\"TEST EVAL\")\n predictions = model.predict(ques_test, ans_test)\n print(predictions)\n print(spearmanr(predictions, scores_test))\n" ]
[ [ "numpy.random.random", "pandas.read_csv", "tensorflow.reset_default_graph" ], [ "sklearn.model_selection.train_test_split", "scipy.stats.spearmanr" ] ]
zzc-tongji/gwu-csci-6554-computer-graphic-2
[ "dfbb16369878802f57a4c280e71cfd3ac28b41e7" ]
[ "lab4/window.py" ]
[ "from OpenGL.GL import *\nfrom OpenGL.GLU import *\nfrom OpenGL.GLUT import *\nfrom scipy.interpolate import interp2d\nimport datetime\nimport numpy as np\n\nfrom aet import AET\nfrom display import Display\nfrom light import Light\nfrom shading import Shading\nfrom space import Space\nfrom texture import Texture\nfrom transform import *\nimport illumination\n\n\nclass Window(object):\n\n def __init__(self):\n # input\n self.__world_space = None\n self.__device_space = None\n self.__camera = None\n self.__lay = None\n self.__light = None\n self.__shading = None\n self.__display = None\n self.__texture = None\n # calculate\n self.__polygon_list_back_face = None\n self.__polygon_list_color = None\n self.__polygon_list_aet = None\n self.__vertex_list_back_face = None\n self.__vertex_list_color = None\n self.__z_buffer = None # coordinate z of the pixel\n self.__z_buffer = None # coordinate w of the pixel\n self.__p_buffer = None # the polygon which the pixel belongs to\n self.__i_buffer = None # the color (RGB) of the pixel\n # ready\n self.__is_ready = False\n\n def set(self, world_space, device_space, camera, display, light, shading, texture, lay):\n if not isinstance(world_space, Space):\n raise Exception('There is a type error in parameter `device_space`.')\n if not isinstance(device_space, Space):\n raise Exception('There is a type error in parameter `device_space`.')\n if not isinstance(camera, Camera) or not camera.is_ready:\n raise Exception('Camera is not ready.')\n if not isinstance(display, Display) or not display.is_ready:\n raise Exception('Display is not ready.')\n if not isinstance(light, Light):\n raise Exception('There is a type error in parameter `light`.')\n if not isinstance(shading, Shading):\n raise Exception('There is a type error in parameter `shading`.')\n if not isinstance(texture, Texture):\n raise Exception('There is a type error in parameter `texture`.')\n if not isinstance(lay, Lay):\n raise Exception('There is a type error in parameter `lay`.')\n self.__world_space = world_space\n self.__device_space = device_space\n self.__camera = camera\n self.__display = display\n self.__lay = lay\n self.__light = light\n self.__shading = shading\n self.__texture = texture\n self.__calculate()\n\n def __calculate(self):\n # polygon\n print('Calculating: polygon ...')\n start = datetime.datetime.now()\n len_polygon = len(self.__device_space.polygon_list_vertex_list)\n self.__polygon_list_back_face = [False] * len_polygon\n self.__polygon_list_color = [None] * len_polygon\n self.__polygon_list_aet = [None] * len_polygon\n for i in range(1, len_polygon):\n # back-face culling\n if self.__device_space.polygon_list_normal_vector[i][2][0] >= 0:\n # ignore back-face polygon\n self.__polygon_list_back_face[i] = True\n continue\n if self.__shading.shading_type != 0:\n if self.__shading.shading_type == 1:\n # generate color (RGB) => for constant shading\n self.__polygon_list_color[i] = illumination.calculate(\n self.__camera,\n self.__light,\n self.__world_space.geometry_list_material[self.__world_space.polygon_list_geometry_index[i]],\n mr4c1_to_v3(self.__world_space.polygon_list_normal_vector[i])\n )\n # generate active edge table (AET)\n self.__polygon_list_aet[i] = AET()\n self.__polygon_list_aet[i].set(\n self.__device_space.vertex_list_coordinate,\n self.__device_space.polygon_list_vertex_list[i],\n self.__device_space.polygon_list_edge_list_vertex_index[i]\n )\n cost = datetime.datetime.now() - start\n print('Finish. (cost = ' + str(cost) + ')\\n')\n if self.__shading.shading_type == 2:\n # vertex\n print('Calculating: vertex ...')\n start = datetime.datetime.now()\n len_vertex = len(self.__device_space.vertex_list_coordinate)\n self.__vertex_list_back_face = [True] * len_vertex\n self.__vertex_list_color = [None] * len_vertex\n for i in range(1, len_vertex):\n # back-face culling\n #\n # consider a vertex \"back-faced\" if ALL polygons which the vertex belongs to are back-face\n for j in range(0, len(self.__device_space.vertex_list_polygon_list[i])):\n if self.__polygon_list_back_face[self.__device_space.vertex_list_polygon_list[i][j]] == False:\n self.__vertex_list_back_face[i] = False\n break\n for i in range(1, len_vertex):\n if self.__vertex_list_back_face[i]:\n # ignore back-face vertex\n continue\n # generate color (RGB) => for Gouraud shading\n self.__vertex_list_color[i] = illumination.calculate(\n self.__camera,\n self.__light,\n self.__world_space.geometry_list_material[self.__world_space.vertex_list_geometry_index[i]],\n mr4c1_to_v3(self.__world_space.vertex_list_normal_vector[i])\n )\n cost = datetime.datetime.now() - start\n print('Finish. (cost = ' + str(cost) + ')\\n')\n if self.__shading.shading_type != 0:\n # pixel\n print('Calculating: pixel ...')\n start = datetime.datetime.now()\n self.__z_buffer = []\n self.__w_buffer = []\n self.__p_buffer = []\n self.__i_buffer = []\n for i in range(0, self.__display.pixel_number + 1):\n self.__z_buffer.append([])\n self.__w_buffer.append([])\n self.__p_buffer.append([])\n self.__i_buffer.append([])\n for j in range(0, self.__display.pixel_number + 1):\n self.__z_buffer[-1].append(float('Inf'))\n self.__w_buffer[-1].append(float('Inf'))\n self.__p_buffer[-1].append(0)\n self.__i_buffer[-1].append([0, 0, 0])\n # iterate each polygon\n for i in range(1, len(self.__polygon_list_aet)):\n if self.__polygon_list_back_face[i]:\n # ignore back-face polygon\n continue\n # set parameter of interpolator\n source_x = []\n source_y = []\n object_z = []\n object_w = []\n object_color_r = []\n object_color_g = []\n object_color_b = []\n object_normal_x = []\n object_normal_y = []\n object_normal_z = []\n for j in range(0, len(self.__device_space.polygon_list_vertex_list[i])):\n coordinate_mr4c1 = self.__device_space.vertex_list_coordinate[\n self.__device_space.polygon_list_vertex_list[i][j]\n ]\n source_x.append(coordinate_mr4c1[0][0])\n source_y.append(coordinate_mr4c1[1][0])\n object_z.append(coordinate_mr4c1[2][0])\n object_w.append(coordinate_mr4c1[3][0])\n if self.__shading.shading_type == 2:\n color_v3 = self.__vertex_list_color[\n self.__device_space.polygon_list_vertex_list[i][j]\n ]\n object_color_r.append(color_v3[0])\n object_color_g.append(color_v3[1])\n object_color_b.append(color_v3[2])\n elif self.__shading.shading_type == 3:\n normal_mr4c1 = self.__world_space.vertex_list_normal_vector[\n self.__device_space.polygon_list_vertex_list[i][j]\n ]\n object_normal_x.append(normal_mr4c1[0][0])\n object_normal_y.append(normal_mr4c1[1][0])\n object_normal_z.append(normal_mr4c1[2][0])\n # Each list should contain at least 4 elements, or function `interp2d` will throw error.\n if len(source_x) < 4:\n source_x.append(source_x[-1])\n source_y.append(source_y[-1])\n object_z.append(object_z[-1])\n object_w.append(object_z[-1])\n if self.__shading.shading_type == 2:\n object_color_r.append(object_color_g[-1])\n object_color_g.append(object_color_g[-1])\n object_color_b.append(object_color_b[-1])\n elif self.__shading.shading_type == 3:\n object_normal_x.append(object_normal_x[-1])\n object_normal_y.append(object_normal_y[-1])\n object_normal_z.append(object_normal_z[-1])\n # generate interpolator\n try:\n get_z = interp2d(source_x, source_y, object_z)\n get_w = interp2d(source_x, source_y, object_w)\n get_color_r = None\n get_color_g = None\n get_color_b = None\n get_normal_x = None\n get_normal_y = None\n get_normal_z = None\n if self.__shading.shading_type == 2:\n get_color_r = interp2d(source_x, source_y, object_color_r)\n get_color_g = interp2d(source_x, source_y, object_color_g)\n get_color_b = interp2d(source_x, source_y, object_color_b)\n elif self.__shading.shading_type == 3:\n get_normal_x = interp2d(source_x, source_y, object_normal_x)\n get_normal_y = interp2d(source_x, source_y, object_normal_y)\n get_normal_z = interp2d(source_x, source_y, object_normal_z)\n except:\n get_z = None\n # iterate each scan line\n for j in range(0, len(self.__polygon_list_aet[i].active_edge_table)):\n if self.__polygon_list_aet[i].active_edge_table[j] is not None:\n scan_line = self.__polygon_list_aet[i].active_edge_table[j]\n len_scan_line = len(scan_line)\n if len_scan_line % 2 != 0:\n # ignore the wrong situation (each scan line should contain even edges)\n len_scan_line -= 1\n for k in range(0, len_scan_line, 2):\n for l in range(int(scan_line[k].bottom_vertex_x), int(scan_line[k + 1].bottom_vertex_x)):\n x = l\n y = j + self.__polygon_list_aet[i].offset_y\n if 0 <= x <= self.__display.pixel_number and 0 <= y <= self.__display.pixel_number:\n if get_z is not None:\n z = get_z(x, y)[0]\n else:\n z = float('Inf')\n if z < self.__z_buffer[x][y]:\n self.__z_buffer[x][y] = z\n self.__w_buffer[x][y] = get_w(x, y)[0]\n self.__p_buffer[x][y] = i\n #\n if self.__shading.shading_type == 1:\n # constant shading\n self.__i_buffer[x][y] = self.__polygon_list_color[i]\n elif self.__shading.shading_type == 2:\n # Gouraud shading\n self.__i_buffer[x][y] = [\n get_color_r(x, y)[0], get_color_g(x, y)[0], get_color_b(x, y)[0]\n ]\n elif self.__shading.shading_type == 3:\n # Phone shading\n if self.__texture.enable:\n mr4c1_device = np.array([\n [x],\n [y],\n [self.__z_buffer[x][y]],\n [self.__w_buffer[x][y]]\n ])\n mr4c1_local = world_to_local(\n view_to_world(\n screen_to_view(\n device_to_screen(\n mr4c1_device,\n self.__display\n ),\n self.__camera\n ),\n self.__camera\n ),\n self.__lay\n )\n self.__i_buffer[x][y] = illumination.calculate(\n self.__camera,\n self.__light,\n self.__world_space.geometry_list_material[\n self.__world_space.polygon_list_geometry_index[i]\n ],\n np.array([\n get_normal_x(x, y)[0],\n get_normal_y(x, y)[0],\n get_normal_z(x, y)[0]\n ]),\n self.__texture.calculate(mr4c1_local)\n )\n else:\n self.__i_buffer[x][y] = illumination.calculate(\n self.__camera,\n self.__light,\n self.__world_space.geometry_list_material[\n self.__world_space.polygon_list_geometry_index[i]\n ],\n np.array([\n get_normal_x(x, y)[0],\n get_normal_y(x, y)[0],\n get_normal_z(x, y)[0]\n ])\n )\n cost = datetime.datetime.now() - start\n print('Finish. (cost = ' + str(cost) + ')\\n')\n # ready\n self.is_ready = True\n\n def __draw(self):\n print('Rendering ...')\n start = datetime.datetime.now()\n glClear(GL_COLOR_BUFFER_BIT)\n if self.__shading.shading_type == 0:\n # no shading (framework)\n for i in range(1, len(self.__device_space.polygon_list_vertex_list)):\n if not self.__polygon_list_back_face[i]:\n # draw line\n glBegin(GL_LINE_LOOP)\n for vertex_index in self.__device_space.polygon_list_vertex_list[i]:\n # draw point\n glVertex2f(\n self.__device_space.vertex_list_coordinate[vertex_index][0][0],\n self.__device_space.vertex_list_coordinate[vertex_index][1][0]\n )\n glEnd()\n else:\n # constant shading / Gouraud shading / Phone shading\n glBegin(GL_POINTS)\n for i in range(0, self.__display.pixel_number + 1):\n for j in range(0, self.__display.pixel_number + 1):\n if self.__p_buffer[i][j] == 0:\n # accelerate: ignore \"non-polygon\" pixels\n continue\n glColor3fv(self.__i_buffer[i][j])\n glVertex2i(i, j)\n glEnd()\n glFlush()\n render_cost = datetime.datetime.now() - start\n print('Finish. (cost = ' + str(render_cost) + ')\\n')\n\n def show(self):\n if not self.is_ready:\n raise Exception('Window is not ready.')\n glutInit()\n glutInitDisplayMode(GLUT_SINGLE | GLUT_RGBA)\n glutInitWindowSize(self.__display.pixel_number, self.__display.pixel_number)\n glutCreateWindow('')\n gluOrtho2D(0, self.__display.pixel_number, 0, self.__display.pixel_number)\n glutDisplayFunc(self.__draw)\n glutMainLoop()\n" ]
[ [ "numpy.array", "scipy.interpolate.interp2d" ] ]
dliangsta/metal
[ "49c568e33b36f5e0887bd977dca936b7def02ad7" ]
[ "tests/metal/end_model/test_end_model.py" ]
[ "import os\nimport unittest\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\n\nfrom metal.end_model import EndModel, LogisticRegression\nfrom metal.end_model.identity_module import IdentityModule\nfrom metal.metrics import METRICS\n\n\nclass EndModelTest(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n # Set seed\n np.random.seed(1)\n\n n = 2000\n\n X = np.random.random((n, 2)) * 2 - 1\n Y = (X[:, 0] > X[:, 1] + 0.25).astype(int) + 1\n\n X = torch.tensor(X, dtype=torch.float)\n Y = torch.tensor(Y, dtype=torch.long)\n\n Xs = [X[:1000], X[1000:1500], X[1500:]]\n Ys = [Y[:1000], Y[1000:1500], Y[1500:]]\n cls.single_problem = (Xs, Ys)\n\n def test_logreg(self):\n em = LogisticRegression(seed=1, input_dim=2, verbose=False)\n Xs, Ys = self.single_problem\n em.train_model(\n (Xs[0], Ys[0]), valid_data=(Xs[1], Ys[1]), n_epochs=5, checkpoint=False\n )\n score = em.score((Xs[2], Ys[2]), verbose=False)\n self.assertGreater(score, 0.95)\n\n def test_softmax(self):\n em = LogisticRegression(seed=1, input_dim=2, output_dim=3, verbose=False)\n Xs, _ = self.single_problem\n Ys = []\n for X in Xs:\n class1 = X[:, 0] < X[:, 1]\n class2 = X[:, 0] > X[:, 1] + 0.5\n class3 = X[:, 0] > X[:, 1]\n Y = torch.argmax(torch.stack([class1, class2, class3], dim=1), dim=1) + 1\n Ys.append(Y)\n em.train_model(\n (Xs[0], Ys[0]),\n valid_data=(Xs[1], Ys[1]),\n lr=0.1,\n n_epochs=10,\n checkpoint=False,\n )\n score = em.score((Xs[2], Ys[2]), verbose=False)\n self.assertGreater(score, 0.95)\n\n def test_singletask(self):\n \"\"\"Test basic single-task end model\"\"\"\n em = EndModel(\n seed=1,\n input_batchnorm=False,\n middle_batchnorm=False,\n input_dropout=0.0,\n middle_dropout=0.0,\n layer_out_dims=[2, 10, 2],\n verbose=False,\n )\n Xs, Ys = self.single_problem\n em.train_model(\n (Xs[0], Ys[0]), valid_data=(Xs[1], Ys[1]), n_epochs=5, checkpoint=False\n )\n score = em.score((Xs[2], Ys[2]), verbose=False)\n self.assertGreater(score, 0.95)\n\n def test_singletask_extras(self):\n \"\"\"Test batchnorm and dropout\"\"\"\n em = EndModel(\n seed=1,\n input_batchnorm=True,\n middle_batchnorm=True,\n input_dropout=0.01,\n middle_dropout=0.01,\n layer_out_dims=[2, 10, 2],\n verbose=False,\n )\n Xs, Ys = self.single_problem\n em.train_model(\n (Xs[0], Ys[0]), valid_data=(Xs[1], Ys[1]), n_epochs=5, checkpoint=False\n )\n score = em.score((Xs[2], Ys[2]), verbose=False)\n self.assertGreater(score, 0.95)\n\n def test_custom_modules(self):\n \"\"\"Test custom input/head modules\"\"\"\n input_module = nn.Sequential(IdentityModule(), nn.Linear(2, 10))\n middle_modules = [nn.Linear(10, 8), IdentityModule()]\n head_module = nn.Sequential(nn.Linear(8, 2), IdentityModule())\n em = EndModel(\n seed=1,\n input_module=input_module,\n middle_modules=middle_modules,\n head_module=head_module,\n layer_out_dims=[10, 8, 8],\n verbose=False,\n )\n Xs, Ys = self.single_problem\n em.train_model(\n (Xs[0], Ys[0]),\n valid_data=(Xs[1], Ys[1]),\n n_epochs=5,\n verbose=False,\n checkpoint=False,\n show_plots=False,\n )\n score = em.score((Xs[2], Ys[2]), verbose=False)\n self.assertGreater(score, 0.95)\n\n def test_scoring(self):\n \"\"\"Test the metrics whole way through\"\"\"\n em = EndModel(\n seed=1,\n batchnorm=False,\n dropout=0.0,\n layer_out_dims=[2, 10, 2],\n verbose=False,\n )\n Xs, Ys = self.single_problem\n em.train_model(\n (Xs[0], Ys[0]), valid_data=(Xs[1], Ys[1]), n_epochs=5, checkpoint=False\n )\n metrics = list(METRICS.keys())\n scores = em.score((Xs[2], Ys[2]), metric=metrics, verbose=False)\n for i, metric in enumerate(metrics):\n self.assertGreater(scores[i], 0.95)\n\n def test_determinism(self):\n \"\"\"Test whether training and scoring is deterministic given seed\"\"\"\n em = EndModel(\n seed=123,\n batchnorm=True,\n dropout=0.1,\n layer_out_dims=[2, 10, 2],\n verbose=False,\n )\n Xs, Ys = self.single_problem\n em.train_model(\n (Xs[0], Ys[0]), valid_data=(Xs[1], Ys[1]), n_epochs=1, checkpoint=False\n )\n score_1 = em.score((Xs[2], Ys[2]), verbose=False)\n\n # Test scoring determinism\n score_2 = em.score((Xs[2], Ys[2]), verbose=False)\n self.assertEqual(score_1, score_2)\n\n # Test training determinism\n em_2 = EndModel(\n seed=123,\n batchnorm=True,\n dropout=0.1,\n layer_out_dims=[2, 10, 2],\n verbose=False,\n )\n em_2.train_model(\n (Xs[0], Ys[0]), valid_data=(Xs[1], Ys[1]), n_epochs=1, checkpoint=False\n )\n score_3 = em_2.score((Xs[2], Ys[2]), verbose=False)\n self.assertEqual(score_1, score_3)\n\n def test_save_and_load(self):\n \"\"\"Test basic saving and loading\"\"\"\n em = EndModel(\n seed=1337,\n input_batchnorm=False,\n middle_batchnorm=False,\n input_dropout=0.0,\n middle_dropout=0.0,\n layer_out_dims=[2, 10, 2],\n verbose=False,\n )\n Xs, Ys = self.single_problem\n em.train_model(\n (Xs[0], Ys[0]), valid_data=(Xs[1], Ys[1]), n_epochs=3, checkpoint=False\n )\n score = em.score((Xs[2], Ys[2]), verbose=False)\n\n # Save model\n SAVE_PATH = \"test_save_model.pkl\"\n em.save(SAVE_PATH)\n\n # Reload and make sure (a) score and (b) non-buffer, non-Parameter\n # attributes are the same\n em_2 = EndModel.load(SAVE_PATH)\n self.assertEqual(em.seed, em_2.seed)\n score_2 = em_2.score((Xs[2], Ys[2]), verbose=False)\n self.assertEqual(score, score_2)\n\n # Clean up\n os.remove(SAVE_PATH)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n" ]
[ [ "torch.nn.Linear", "torch.stack", "numpy.random.seed", "torch.tensor", "numpy.random.random" ] ]
ryfeus/aws-inference-benchmark
[ "1a726ecbfffa931493c17ce73a81ec06db2f59ba" ]
[ "lambda-tflite/compileTFLiteInception.py" ]
[ "import tensorflow as tf\nfrom tensorflow.keras.applications.inception_v3 import InceptionV3\nimport numpy as np\nfrom time import process_time\n\nmodel = InceptionV3(weights='imagenet')\nmodel.save(\"InceptionV3.h5\")\n\nconverter = tf.lite.TFLiteConverter.from_keras_model_file('InceptionV3.h5',\n\tinput_shapes={'input_1' : [1,299,299,3]}\n\t)\ntflite_model = converter.convert()\nopen(\"InceptionV3.tflite\", \"wb\").write(tflite_model)" ]
[ [ "tensorflow.keras.applications.inception_v3.InceptionV3", "tensorflow.lite.TFLiteConverter.from_keras_model_file" ] ]
mlbileschi/tensorflow
[ "8702c47666ffe3940d1171516f5feb47ccc625b1" ]
[ "tensorflow/python/ops/image_ops_impl.py" ]
[ "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Implementation of image ops.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport functools\nimport numpy as np\n\nfrom tensorflow.python.compat import compat\nfrom tensorflow.python.eager import def_function\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import random_seed\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import check_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import gen_image_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import nn\nfrom tensorflow.python.ops import nn_ops\nfrom tensorflow.python.ops import random_ops\nfrom tensorflow.python.ops import sort_ops\nfrom tensorflow.python.ops import stateless_random_ops\nfrom tensorflow.python.ops import string_ops\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.util import deprecation\nfrom tensorflow.python.util import dispatch\nfrom tensorflow.python.util.tf_export import tf_export\n\nops.NotDifferentiable('RandomCrop')\n# TODO(b/31222613): This op may be differentiable, and there may be\n# latent bugs here.\nops.NotDifferentiable('HSVToRGB')\nops.NotDifferentiable('DrawBoundingBoxes')\nops.NotDifferentiable('SampleDistortedBoundingBox')\nops.NotDifferentiable('SampleDistortedBoundingBoxV2')\n# TODO(bsteiner): Implement the gradient function for extract_glimpse\n# TODO(b/31222613): This op may be differentiable, and there may be\n# latent bugs here.\nops.NotDifferentiable('ExtractGlimpse')\nops.NotDifferentiable('NonMaxSuppression')\nops.NotDifferentiable('NonMaxSuppressionV2')\nops.NotDifferentiable('NonMaxSuppressionWithOverlaps')\nops.NotDifferentiable('GenerateBoundingBoxProposals')\n\n\n# pylint: disable=invalid-name\ndef _assert(cond, ex_type, msg):\n \"\"\"A polymorphic assert, works with tensors and boolean expressions.\n\n If `cond` is not a tensor, behave like an ordinary assert statement, except\n that a empty list is returned. If `cond` is a tensor, return a list\n containing a single TensorFlow assert op.\n\n Args:\n cond: Something evaluates to a boolean value. May be a tensor.\n ex_type: The exception class to use.\n msg: The error message.\n\n Returns:\n A list, containing at most one assert op.\n \"\"\"\n if _is_tensor(cond):\n return [control_flow_ops.Assert(cond, [msg])]\n else:\n if not cond:\n raise ex_type(msg)\n else:\n return []\n\n\ndef _is_tensor(x):\n \"\"\"Returns `True` if `x` is a symbolic tensor-like object.\n\n Args:\n x: A python object to check.\n\n Returns:\n `True` if `x` is a `tf.Tensor` or `tf.Variable`, otherwise `False`.\n \"\"\"\n return isinstance(x, (ops.Tensor, variables.Variable))\n\n\ndef _ImageDimensions(image, rank):\n \"\"\"Returns the dimensions of an image tensor.\n\n Args:\n image: A rank-D Tensor. For 3-D of shape: `[height, width, channels]`.\n rank: The expected rank of the image\n\n Returns:\n A list of corresponding to the dimensions of the\n input image. Dimensions that are statically known are python integers,\n otherwise, they are integer scalar tensors.\n \"\"\"\n if image.get_shape().is_fully_defined():\n return image.get_shape().as_list()\n else:\n static_shape = image.get_shape().with_rank(rank).as_list()\n dynamic_shape = array_ops.unstack(array_ops.shape(image), rank)\n return [\n s if s is not None else d for s, d in zip(static_shape, dynamic_shape)\n ]\n\n\ndef _Check3DImage(image, require_static=True):\n \"\"\"Assert that we are working with a properly shaped image.\n\n Args:\n image: 3-D Tensor of shape [height, width, channels]\n require_static: If `True`, requires that all dimensions of `image` are known\n and non-zero.\n\n Raises:\n ValueError: if `image.shape` is not a 3-vector.\n\n Returns:\n An empty list, if `image` has fully defined dimensions. Otherwise, a list\n containing an assert op is returned.\n \"\"\"\n try:\n image_shape = image.get_shape().with_rank(3)\n except ValueError:\n raise ValueError(\"'image' (shape %s) must be three-dimensional.\" %\n image.shape)\n if require_static and not image_shape.is_fully_defined():\n raise ValueError(\"'image' (shape %s) must be fully defined.\" % image_shape)\n if any(x == 0 for x in image_shape):\n raise ValueError(\"all dims of 'image.shape' must be > 0: %s\" % image_shape)\n if not image_shape.is_fully_defined():\n return [\n check_ops.assert_positive(\n array_ops.shape(image),\n [\"all dims of 'image.shape' \"\n 'must be > 0.'])\n ]\n else:\n return []\n\n\ndef _Assert3DImage(image):\n \"\"\"Assert that we are working with a properly shaped image.\n\n Performs the check statically if possible (i.e. if the shape\n is statically known). Otherwise adds a control dependency\n to an assert op that checks the dynamic shape.\n\n Args:\n image: 3-D Tensor of shape [height, width, channels]\n\n Raises:\n ValueError: if `image.shape` is not a 3-vector.\n\n Returns:\n If the shape of `image` could be verified statically, `image` is\n returned unchanged, otherwise there will be a control dependency\n added that asserts the correct dynamic shape.\n \"\"\"\n return control_flow_ops.with_dependencies(\n _Check3DImage(image, require_static=False), image)\n\n\ndef _AssertAtLeast3DImage(image):\n \"\"\"Assert that we are working with a properly shaped image.\n\n Performs the check statically if possible (i.e. if the shape\n is statically known). Otherwise adds a control dependency\n to an assert op that checks the dynamic shape.\n\n Args:\n image: >= 3-D Tensor of size [*, height, width, depth]\n\n Raises:\n ValueError: if image.shape is not a [>= 3] vector.\n\n Returns:\n If the shape of `image` could be verified statically, `image` is\n returned unchanged, otherwise there will be a control dependency\n added that asserts the correct dynamic shape.\n \"\"\"\n return control_flow_ops.with_dependencies(\n _CheckAtLeast3DImage(image, require_static=False), image)\n\n\ndef _CheckAtLeast3DImage(image, require_static=True):\n \"\"\"Assert that we are working with a properly shaped image.\n\n Args:\n image: >= 3-D Tensor of size [*, height, width, depth]\n require_static: If `True`, requires that all dimensions of `image` are known\n and non-zero.\n\n Raises:\n ValueError: if image.shape is not a [>= 3] vector.\n\n Returns:\n An empty list, if `image` has fully defined dimensions. Otherwise, a list\n containing an assert op is returned.\n \"\"\"\n try:\n if image.get_shape().ndims is None:\n image_shape = image.get_shape().with_rank(3)\n else:\n image_shape = image.get_shape().with_rank_at_least(3)\n except ValueError:\n raise ValueError(\"'image' (shape %s) must be at least three-dimensional.\" %\n image.shape)\n if require_static and not image_shape.is_fully_defined():\n raise ValueError('\\'image\\' must be fully defined.')\n if any(x == 0 for x in image_shape[-3:]):\n raise ValueError('inner 3 dims of \\'image.shape\\' must be > 0: %s' %\n image_shape)\n if not image_shape[-3:].is_fully_defined():\n return [\n check_ops.assert_positive(\n array_ops.shape(image)[-3:],\n [\"inner 3 dims of 'image.shape' \"\n 'must be > 0.']),\n check_ops.assert_greater_equal(\n array_ops.rank(image),\n 3,\n message=\"'image' must be at least three-dimensional.\")\n ]\n else:\n return []\n\n\ndef _AssertGrayscaleImage(image):\n \"\"\"Assert that we are working with a properly shaped grayscale image.\n\n Performs the check statically if possible (i.e. if the shape\n is statically known). Otherwise adds a control dependency\n to an assert op that checks the dynamic shape.\n\n Args:\n image: >= 2-D Tensor of size [*, 1]\n\n Raises:\n ValueError: if image.shape is not a [>= 2] vector or if\n last dimension is not size 1.\n\n Returns:\n If the shape of `image` could be verified statically, `image` is\n returned unchanged, otherwise there will be a control dependency\n added that asserts the correct dynamic shape.\n \"\"\"\n return control_flow_ops.with_dependencies(\n _CheckGrayscaleImage(image, require_static=False), image)\n\n\ndef _CheckGrayscaleImage(image, require_static=True):\n \"\"\"Assert that we are working with properly shaped grayscale image.\n\n Args:\n image: >= 2-D Tensor of size [*, 1]\n require_static: Boolean, whether static shape is required.\n\n Raises:\n ValueError: if image.shape is not a [>= 2] vector or if\n last dimension is not size 1.\n\n Returns:\n An empty list, if `image` has fully defined dimensions. Otherwise, a list\n containing an assert op is returned.\n \"\"\"\n try:\n if image.get_shape().ndims is None:\n image_shape = image.get_shape().with_rank(2)\n else:\n image_shape = image.get_shape().with_rank_at_least(2)\n except ValueError:\n raise ValueError('A grayscale image (shape %s) must be at least '\n 'two-dimensional.' % image.shape)\n if require_static and not image_shape.is_fully_defined():\n raise ValueError('\\'image\\' must be fully defined.')\n if image_shape.is_fully_defined():\n if image_shape[-1] != 1:\n raise ValueError('Last dimension of a grayscale image should be size 1.')\n if not image_shape.is_fully_defined():\n return [\n check_ops.assert_equal(\n array_ops.shape(image)[-1],\n 1,\n message='Last dimension of a grayscale image should be size 1.'),\n check_ops.assert_greater_equal(\n array_ops.rank(image),\n 3,\n message='A grayscale image must be at least two-dimensional.')\n ]\n else:\n return []\n\n\ndef fix_image_flip_shape(image, result):\n \"\"\"Set the shape to 3 dimensional if we don't know anything else.\n\n Args:\n image: original image size\n result: flipped or transformed image\n\n Returns:\n An image whose shape is at least (None, None, None).\n \"\"\"\n\n image_shape = image.get_shape()\n if image_shape == tensor_shape.unknown_shape():\n result.set_shape([None, None, None])\n else:\n result.set_shape(image_shape)\n return result\n\n\n@tf_export('image.random_flip_up_down')\[email protected]_dispatch_support\ndef random_flip_up_down(image, seed=None):\n \"\"\"Randomly flips an image vertically (upside down).\n\n With a 1 in 2 chance, outputs the contents of `image` flipped along the first\n dimension, which is `height`. Otherwise, output the image as-is.\n When passing a batch of images, each image will be randomly flipped\n independent of other images.\n\n Example usage:\n\n >>> image = np.array([[[1], [2]], [[3], [4]]])\n >>> tf.image.random_flip_up_down(image, 3).numpy().tolist()\n [[[3], [4]], [[1], [2]]]\n\n Randomly flip multiple images.\n\n >>> images = np.array(\n ... [\n ... [[[1], [2]], [[3], [4]]],\n ... [[[5], [6]], [[7], [8]]]\n ... ])\n >>> tf.image.random_flip_up_down(images, 4).numpy().tolist()\n [[[[3], [4]], [[1], [2]]], [[[5], [6]], [[7], [8]]]]\n\n Args:\n image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor\n of shape `[height, width, channels]`.\n seed: A Python integer. Used to create a random seed. See\n `tf.compat.v1.set_random_seed` for behavior.\n\n Returns:\n A tensor of the same type and shape as `image`.\n Raises:\n ValueError: if the shape of `image` not supported.\n \"\"\"\n random_func = functools.partial(random_ops.random_uniform, seed=seed)\n return _random_flip(image, 0, random_func, 'random_flip_up_down')\n\n\n@tf_export('image.random_flip_left_right')\[email protected]_dispatch_support\ndef random_flip_left_right(image, seed=None):\n \"\"\"Randomly flip an image horizontally (left to right).\n\n With a 1 in 2 chance, outputs the contents of `image` flipped along the\n second dimension, which is `width`. Otherwise output the image as-is.\n When passing a batch of images, each image will be randomly flipped\n independent of other images.\n\n Example usage:\n\n >>> image = np.array([[[1], [2]], [[3], [4]]])\n >>> tf.image.random_flip_left_right(image, 5).numpy().tolist()\n [[[2], [1]], [[4], [3]]]\n\n Randomly flip multiple images.\n\n >>> images = np.array(\n ... [\n ... [[[1], [2]], [[3], [4]]],\n ... [[[5], [6]], [[7], [8]]]\n ... ])\n >>> tf.image.random_flip_left_right(images, 6).numpy().tolist()\n [[[[2], [1]], [[4], [3]]], [[[5], [6]], [[7], [8]]]]\n\n Args:\n image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor\n of shape `[height, width, channels]`.\n seed: A Python integer. Used to create a random seed. See\n `tf.compat.v1.set_random_seed` for behavior.\n\n Returns:\n A tensor of the same type and shape as `image`.\n\n Raises:\n ValueError: if the shape of `image` not supported.\n \"\"\"\n random_func = functools.partial(random_ops.random_uniform, seed=seed)\n return _random_flip(image, 1, random_func, 'random_flip_left_right')\n\n\n@tf_export('image.stateless_random_flip_left_right', v1=[])\[email protected]_dispatch_support\ndef stateless_random_flip_left_right(image, seed):\n \"\"\"Randomly flip an image horizontally (left to right) deterministically.\n\n Guarantees the same results given the same `seed` independent of how many\n times the function is called, and independent of global seed settings (e.g.\n `tf.random.set_seed`).\n\n Example usage:\n\n >>> image = np.array([[[1], [2]], [[3], [4]]])\n >>> seed = (2, 3)\n >>> tf.image.stateless_random_flip_left_right(image, seed).numpy().tolist()\n [[[2], [1]], [[4], [3]]]\n\n Args:\n image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor\n of shape `[height, width, channels]`.\n seed: A shape [2] Tensor, the seed to the random number generator. Must have\n dtype `int32` or `int64`. (When using XLA, only `int32` is allowed.)\n\n Returns:\n A tensor of the same type and shape as `image`.\n \"\"\"\n random_func = functools.partial(\n stateless_random_ops.stateless_random_uniform, seed=seed)\n return _random_flip(\n image, 1, random_func, 'stateless_random_flip_left_right')\n\n\n@tf_export('image.stateless_random_flip_up_down', v1=[])\[email protected]_dispatch_support\ndef stateless_random_flip_up_down(image, seed):\n \"\"\"Randomly flip an image vertically (upside down) deterministically.\n\n Guarantees the same results given the same `seed` independent of how many\n times the function is called, and independent of global seed settings (e.g.\n `tf.random.set_seed`).\n\n Example usage:\n\n >>> image = np.array([[[1], [2]], [[3], [4]]])\n >>> seed = (2, 3)\n >>> tf.image.stateless_random_flip_up_down(image, seed).numpy().tolist()\n [[[3], [4]], [[1], [2]]]\n\n Args:\n image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor\n of shape `[height, width, channels]`.\n seed: A shape [2] Tensor, the seed to the random number generator. Must have\n dtype `int32` or `int64`. (When using XLA, only `int32` is allowed.)\n\n Returns:\n A tensor of the same type and shape as `image`.\n \"\"\"\n random_func = functools.partial(\n stateless_random_ops.stateless_random_uniform, seed=seed)\n return _random_flip(\n image, 0, random_func, 'stateless_random_flip_up_down')\n\n\ndef _random_flip(image, flip_index, random_func, scope_name):\n \"\"\"Randomly (50% chance) flip an image along axis `flip_index`.\n\n Args:\n image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor\n of shape `[height, width, channels]`.\n flip_index: Dimension along which to flip the image.\n Vertical is 0, Horizontal is 1.\n random_func: partial function for calling either stateful or stateless\n random ops with `seed` parameter specified.\n scope_name: Name of the scope in which the ops are added.\n\n Returns:\n A tensor of the same type and shape as `image`.\n\n Raises:\n ValueError: if the shape of `image` not supported.\n \"\"\"\n with ops.name_scope(None, scope_name, [image]) as scope:\n image = ops.convert_to_tensor(image, name='image')\n image = _AssertAtLeast3DImage(image)\n shape = image.get_shape()\n\n def f_rank3():\n uniform_random = random_func(shape=[], minval=0, maxval=1.0)\n mirror_cond = math_ops.less(uniform_random, .5)\n result = control_flow_ops.cond(\n mirror_cond,\n lambda: array_ops.reverse(image, [flip_index]),\n lambda: image,\n name=scope)\n return fix_image_flip_shape(image, result)\n\n def f_rank4():\n batch_size = array_ops.shape(image)[0]\n uniform_random = random_func(shape=[batch_size], minval=0, maxval=1.0)\n flips = math_ops.round(\n array_ops.reshape(uniform_random, [batch_size, 1, 1, 1]))\n flips = math_ops.cast(flips, image.dtype)\n flipped_input = array_ops.reverse(image, [flip_index + 1])\n return flips * flipped_input + (1 - flips) * image\n\n if shape.ndims is None:\n rank = array_ops.rank(image)\n return control_flow_ops.cond(math_ops.equal(rank, 3), f_rank3, f_rank4)\n if shape.ndims == 3:\n return f_rank3()\n elif shape.ndims == 4:\n return f_rank4()\n else:\n raise ValueError(\n '\\'image\\' (shape %s) must have either 3 or 4 dimensions.' % shape)\n\n\n@tf_export('image.flip_left_right')\[email protected]_dispatch_support\ndef flip_left_right(image):\n \"\"\"Flip an image horizontally (left to right).\n\n Outputs the contents of `image` flipped along the width dimension.\n\n See also `reverse()`.\n\n Usage Example:\n\n >>> x = [[[1.0, 2.0, 3.0],\n ... [4.0, 5.0, 6.0]],\n ... [[7.0, 8.0, 9.0],\n ... [10.0, 11.0, 12.0]]]\n >>> tf.image.flip_left_right(x)\n <tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy=\n array([[[ 4., 5., 6.],\n [ 1., 2., 3.]],\n [[10., 11., 12.],\n [ 7., 8., 9.]]], dtype=float32)>\n\n Args:\n image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor\n of shape `[height, width, channels]`.\n\n Returns:\n A tensor of the same type and shape as `image`.\n\n Raises:\n ValueError: if the shape of `image` not supported.\n \"\"\"\n return _flip(image, 1, 'flip_left_right')\n\n\n@tf_export('image.flip_up_down')\[email protected]_dispatch_support\ndef flip_up_down(image):\n \"\"\"Flip an image vertically (upside down).\n\n Outputs the contents of `image` flipped along the height dimension.\n\n See also `reverse()`.\n\n Usage Example:\n\n >>> x = [[[1.0, 2.0, 3.0],\n ... [4.0, 5.0, 6.0]],\n ... [[7.0, 8.0, 9.0],\n ... [10.0, 11.0, 12.0]]]\n >>> tf.image.flip_up_down(x)\n <tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy=\n array([[[ 7., 8., 9.],\n [10., 11., 12.]],\n [[ 1., 2., 3.],\n [ 4., 5., 6.]]], dtype=float32)>\n\n Args:\n image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor\n of shape `[height, width, channels]`.\n\n Returns:\n A `Tensor` of the same type and shape as `image`.\n\n Raises:\n ValueError: if the shape of `image` not supported.\n \"\"\"\n return _flip(image, 0, 'flip_up_down')\n\n\ndef _flip(image, flip_index, scope_name):\n \"\"\"Flip an image either horizontally or vertically.\n\n Outputs the contents of `image` flipped along the dimension `flip_index`.\n\n See also `reverse()`.\n\n Args:\n image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor\n of shape `[height, width, channels]`.\n flip_index: 0 For vertical, 1 for horizontal.\n scope_name: string, scope name.\n\n Returns:\n A `Tensor` of the same type and shape as `image`.\n\n Raises:\n ValueError: if the shape of `image` not supported.\n \"\"\"\n with ops.name_scope(None, scope_name, [image]):\n image = ops.convert_to_tensor(image, name='image')\n image = _AssertAtLeast3DImage(image)\n shape = image.get_shape()\n\n def f_rank3():\n return fix_image_flip_shape(image, array_ops.reverse(image, [flip_index]))\n\n def f_rank4():\n return array_ops.reverse(image, [flip_index + 1])\n\n if shape.ndims is None:\n rank = array_ops.rank(image)\n return control_flow_ops.cond(math_ops.equal(rank, 3), f_rank3, f_rank4)\n elif shape.ndims == 3:\n return f_rank3()\n elif shape.ndims == 4:\n return f_rank4()\n else:\n raise ValueError(\n '\\'image\\' (shape %s)must have either 3 or 4 dimensions.' % shape)\n\n\n@tf_export('image.rot90')\[email protected]_dispatch_support\ndef rot90(image, k=1, name=None):\n \"\"\"Rotate image(s) counter-clockwise by 90 degrees.\n\n\n For example:\n\n >>> a=tf.constant([[[1],[2]],\n ... [[3],[4]]])\n >>> # rotating `a` counter clockwise by 90 degrees\n >>> a_rot=tf.image.rot90(a)\n >>> print(a_rot[...,0].numpy())\n [[2 4]\n [1 3]]\n >>> # rotating `a` counter clockwise by 270 degrees\n >>> a_rot=tf.image.rot90(a, k=3)\n >>> print(a_rot[...,0].numpy())\n [[3 1]\n [4 2]]\n\n Args:\n image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor\n of shape `[height, width, channels]`.\n k: A scalar integer. The number of times the image is rotated by 90 degrees.\n name: A name for this operation (optional).\n\n Returns:\n A rotated tensor of the same type and shape as `image`.\n\n Raises:\n ValueError: if the shape of `image` not supported.\n \"\"\"\n with ops.name_scope(name, 'rot90', [image, k]) as scope:\n image = ops.convert_to_tensor(image, name='image')\n image = _AssertAtLeast3DImage(image)\n k = ops.convert_to_tensor(k, dtype=dtypes.int32, name='k')\n k.get_shape().assert_has_rank(0)\n k = math_ops.mod(k, 4)\n\n shape = image.get_shape()\n if shape.ndims is None:\n rank = array_ops.rank(image)\n\n def f_rank3():\n return _rot90_3D(image, k, scope)\n\n def f_rank4():\n return _rot90_4D(image, k, scope)\n\n return control_flow_ops.cond(math_ops.equal(rank, 3), f_rank3, f_rank4)\n elif shape.ndims == 3:\n return _rot90_3D(image, k, scope)\n elif shape.ndims == 4:\n return _rot90_4D(image, k, scope)\n else:\n raise ValueError(\n '\\'image\\' (shape %s) must have either 3 or 4 dimensions.' % shape)\n\n\ndef _rot90_3D(image, k, name_scope):\n \"\"\"Rotate image counter-clockwise by 90 degrees `k` times.\n\n Args:\n image: 3-D Tensor of shape `[height, width, channels]`.\n k: A scalar integer. The number of times the image is rotated by 90 degrees.\n name_scope: A valid TensorFlow name scope.\n\n Returns:\n A 3-D tensor of the same type and shape as `image`.\n\n \"\"\"\n\n def _rot90():\n return array_ops.transpose(array_ops.reverse_v2(image, [1]), [1, 0, 2])\n\n def _rot180():\n return array_ops.reverse_v2(image, [0, 1])\n\n def _rot270():\n return array_ops.reverse_v2(array_ops.transpose(image, [1, 0, 2]), [1])\n\n cases = [(math_ops.equal(k, 1), _rot90), (math_ops.equal(k, 2), _rot180),\n (math_ops.equal(k, 3), _rot270)]\n\n result = control_flow_ops.case(\n cases, default=lambda: image, exclusive=True, name=name_scope)\n result.set_shape([None, None, image.get_shape()[2]])\n return result\n\n\ndef _rot90_4D(images, k, name_scope):\n \"\"\"Rotate batch of images counter-clockwise by 90 degrees `k` times.\n\n Args:\n images: 4-D Tensor of shape `[height, width, channels]`.\n k: A scalar integer. The number of times the images are rotated by 90\n degrees.\n name_scope: A valid TensorFlow name scope.\n\n Returns:\n A 4-D `Tensor` of the same type and shape as `images`.\n \"\"\"\n\n def _rot90():\n return array_ops.transpose(array_ops.reverse_v2(images, [2]), [0, 2, 1, 3])\n\n def _rot180():\n return array_ops.reverse_v2(images, [1, 2])\n\n def _rot270():\n return array_ops.reverse_v2(array_ops.transpose(images, [0, 2, 1, 3]), [2])\n\n cases = [(math_ops.equal(k, 1), _rot90), (math_ops.equal(k, 2), _rot180),\n (math_ops.equal(k, 3), _rot270)]\n\n result = control_flow_ops.case(\n cases, default=lambda: images, exclusive=True, name=name_scope)\n shape = result.get_shape()\n result.set_shape([shape[0], None, None, shape[3]])\n return result\n\n\n@tf_export('image.transpose', v1=['image.transpose', 'image.transpose_image'])\[email protected]_dispatch_support\ndef transpose(image, name=None):\n \"\"\"Transpose image(s) by swapping the height and width dimension.\n\n Usage Example:\n\n >>> x = [[[1.0, 2.0, 3.0],\n ... [4.0, 5.0, 6.0]],\n ... [[7.0, 8.0, 9.0],\n ... [10.0, 11.0, 12.0]]]\n >>> tf.image.transpose(x)\n <tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy=\n array([[[ 1., 2., 3.],\n [ 7., 8., 9.]],\n [[ 4., 5., 6.],\n [10., 11., 12.]]], dtype=float32)>\n\n Args:\n image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor\n of shape `[height, width, channels]`.\n name: A name for this operation (optional).\n\n Returns:\n If `image` was 4-D, a 4-D float Tensor of shape\n `[batch, width, height, channels]`\n If `image` was 3-D, a 3-D float Tensor of shape\n `[width, height, channels]`\n\n Raises:\n ValueError: if the shape of `image` not supported.\n\n Usage Example:\n\n >>> image = [[[1, 2], [3, 4]],\n ... [[5, 6], [7, 8]],\n ... [[9, 10], [11, 12]]]\n >>> image = tf.constant(image)\n >>> tf.image.transpose(image)\n <tf.Tensor: shape=(2, 3, 2), dtype=int32, numpy=\n array([[[ 1, 2],\n [ 5, 6],\n [ 9, 10]],\n [[ 3, 4],\n [ 7, 8],\n [11, 12]]], dtype=int32)>\n \"\"\"\n with ops.name_scope(name, 'transpose', [image]):\n image = ops.convert_to_tensor(image, name='image')\n image = _AssertAtLeast3DImage(image)\n shape = image.get_shape()\n if shape.ndims is None:\n rank = array_ops.rank(image)\n\n def f_rank3():\n return array_ops.transpose(image, [1, 0, 2], name=name)\n\n def f_rank4():\n return array_ops.transpose(image, [0, 2, 1, 3], name=name)\n\n return control_flow_ops.cond(math_ops.equal(rank, 3), f_rank3, f_rank4)\n elif shape.ndims == 3:\n return array_ops.transpose(image, [1, 0, 2], name=name)\n elif shape.ndims == 4:\n return array_ops.transpose(image, [0, 2, 1, 3], name=name)\n else:\n raise ValueError(\n '\\'image\\' (shape %s) must have either 3 or 4 dimensions.' % shape)\n\n\n@tf_export('image.central_crop')\[email protected]_dispatch_support\ndef central_crop(image, central_fraction):\n \"\"\"Crop the central region of the image(s).\n\n Remove the outer parts of an image but retain the central region of the image\n along each dimension. If we specify central_fraction = 0.5, this function\n returns the region marked with \"X\" in the below diagram.\n\n --------\n | |\n | XXXX |\n | XXXX |\n | | where \"X\" is the central 50% of the image.\n --------\n\n This function works on either a single image (`image` is a 3-D Tensor), or a\n batch of images (`image` is a 4-D Tensor).\n\n Usage Example:\n\n >>> x = [[[1.0, 2.0, 3.0],\n ... [4.0, 5.0, 6.0],\n ... [7.0, 8.0, 9.0],\n ... [10.0, 11.0, 12.0]],\n ... [[13.0, 14.0, 15.0],\n ... [16.0, 17.0, 18.0],\n ... [19.0, 20.0, 21.0],\n ... [22.0, 23.0, 24.0]],\n ... [[25.0, 26.0, 27.0],\n ... [28.0, 29.0, 30.0],\n ... [31.0, 32.0, 33.0],\n ... [34.0, 35.0, 36.0]],\n ... [[37.0, 38.0, 39.0],\n ... [40.0, 41.0, 42.0],\n ... [43.0, 44.0, 45.0],\n ... [46.0, 47.0, 48.0]]]\n >>> tf.image.central_crop(x, 0.5)\n <tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy=\n array([[[16., 17., 18.],\n [19., 20., 21.]],\n [[28., 29., 30.],\n [31., 32., 33.]]], dtype=float32)>\n\n Args:\n image: Either a 3-D float Tensor of shape [height, width, depth], or a 4-D\n Tensor of shape [batch_size, height, width, depth].\n central_fraction: float (0, 1], fraction of size to crop\n\n Raises:\n ValueError: if central_crop_fraction is not within (0, 1].\n\n Returns:\n 3-D / 4-D float Tensor, as per the input.\n \"\"\"\n with ops.name_scope(None, 'central_crop', [image]):\n image = ops.convert_to_tensor(image, name='image')\n if central_fraction <= 0.0 or central_fraction > 1.0:\n raise ValueError('central_fraction must be within (0, 1]')\n if central_fraction == 1.0:\n return image\n\n _AssertAtLeast3DImage(image)\n rank = image.get_shape().ndims\n if rank != 3 and rank != 4:\n raise ValueError('`image` should either be a Tensor with rank = 3 or '\n 'rank = 4. Had rank = {}.'.format(rank))\n\n # Helper method to return the `idx`-th dimension of `tensor`, along with\n # a boolean signifying if the dimension is dynamic.\n def _get_dim(tensor, idx):\n static_shape = tensor.get_shape().dims[idx].value\n if static_shape is not None:\n return static_shape, False\n return array_ops.shape(tensor)[idx], True\n\n # Get the height, width, depth (and batch size, if the image is a 4-D\n # tensor).\n if rank == 3:\n img_h, dynamic_h = _get_dim(image, 0)\n img_w, dynamic_w = _get_dim(image, 1)\n img_d = image.get_shape()[2]\n else:\n img_bs = image.get_shape()[0]\n img_h, dynamic_h = _get_dim(image, 1)\n img_w, dynamic_w = _get_dim(image, 2)\n img_d = image.get_shape()[3]\n\n # Compute the bounding boxes for the crop. The type and value of the\n # bounding boxes depend on the `image` tensor's rank and whether / not the\n # dimensions are statically defined.\n if dynamic_h:\n img_hd = math_ops.cast(img_h, dtypes.float64)\n bbox_h_start = math_ops.cast((img_hd - img_hd * central_fraction) / 2,\n dtypes.int32)\n else:\n img_hd = float(img_h)\n bbox_h_start = int((img_hd - img_hd * central_fraction) / 2)\n\n if dynamic_w:\n img_wd = math_ops.cast(img_w, dtypes.float64)\n bbox_w_start = math_ops.cast((img_wd - img_wd * central_fraction) / 2,\n dtypes.int32)\n else:\n img_wd = float(img_w)\n bbox_w_start = int((img_wd - img_wd * central_fraction) / 2)\n\n bbox_h_size = img_h - bbox_h_start * 2\n bbox_w_size = img_w - bbox_w_start * 2\n\n if rank == 3:\n bbox_begin = array_ops.stack([bbox_h_start, bbox_w_start, 0])\n bbox_size = array_ops.stack([bbox_h_size, bbox_w_size, -1])\n else:\n bbox_begin = array_ops.stack([0, bbox_h_start, bbox_w_start, 0])\n bbox_size = array_ops.stack([-1, bbox_h_size, bbox_w_size, -1])\n\n image = array_ops.slice(image, bbox_begin, bbox_size)\n\n # Reshape the `image` tensor to the desired size.\n if rank == 3:\n image.set_shape([\n None if dynamic_h else bbox_h_size,\n None if dynamic_w else bbox_w_size, img_d\n ])\n else:\n image.set_shape([\n img_bs, None if dynamic_h else bbox_h_size,\n None if dynamic_w else bbox_w_size, img_d\n ])\n return image\n\n\n@tf_export('image.pad_to_bounding_box')\[email protected]_dispatch_support\ndef pad_to_bounding_box(image, offset_height, offset_width, target_height,\n target_width):\n \"\"\"Pad `image` with zeros to the specified `height` and `width`.\n\n Adds `offset_height` rows of zeros on top, `offset_width` columns of\n zeros on the left, and then pads the image on the bottom and right\n with zeros until it has dimensions `target_height`, `target_width`.\n\n This op does nothing if `offset_*` is zero and the image already has size\n `target_height` by `target_width`.\n\n Usage Example:\n\n >>> x = [[[1., 2., 3.],\n ... [4., 5., 6.]],\n ... [[7., 8., 9.],\n ... [10., 11., 12.]]]\n >>> padded_image = tf.image.pad_to_bounding_box(x, 1, 1, 4, 4)\n >>> padded_image\n <tf.Tensor: shape=(4, 4, 3), dtype=float32, numpy=\n array([[[ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.]],\n [[ 0., 0., 0.],\n [ 1., 2., 3.],\n [ 4., 5., 6.],\n [ 0., 0., 0.]],\n [[ 0., 0., 0.],\n [ 7., 8., 9.],\n [10., 11., 12.],\n [ 0., 0., 0.]],\n [[ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.]]], dtype=float32)>\n\n Args:\n image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor\n of shape `[height, width, channels]`.\n offset_height: Number of rows of zeros to add on top.\n offset_width: Number of columns of zeros to add on the left.\n target_height: Height of output image.\n target_width: Width of output image.\n\n Returns:\n If `image` was 4-D, a 4-D float Tensor of shape\n `[batch, target_height, target_width, channels]`\n If `image` was 3-D, a 3-D float Tensor of shape\n `[target_height, target_width, channels]`\n\n Raises:\n ValueError: If the shape of `image` is incompatible with the `offset_*` or\n `target_*` arguments, or either `offset_height` or `offset_width` is\n negative.\n \"\"\"\n with ops.name_scope(None, 'pad_to_bounding_box', [image]):\n image = ops.convert_to_tensor(image, name='image')\n\n is_batch = True\n image_shape = image.get_shape()\n if image_shape.ndims == 3:\n is_batch = False\n image = array_ops.expand_dims(image, 0)\n elif image_shape.ndims is None:\n is_batch = False\n image = array_ops.expand_dims(image, 0)\n image.set_shape([None] * 4)\n elif image_shape.ndims != 4:\n raise ValueError(\n '\\'image\\' (shape %s) must have either 3 or 4 dimensions.' %\n image_shape)\n\n assert_ops = _CheckAtLeast3DImage(image, require_static=False)\n batch, height, width, depth = _ImageDimensions(image, rank=4)\n\n after_padding_width = target_width - offset_width - width\n\n after_padding_height = target_height - offset_height - height\n\n assert_ops += _assert(offset_height >= 0, ValueError,\n 'offset_height must be >= 0')\n assert_ops += _assert(offset_width >= 0, ValueError,\n 'offset_width must be >= 0')\n assert_ops += _assert(after_padding_width >= 0, ValueError,\n 'width must be <= target - offset')\n assert_ops += _assert(after_padding_height >= 0, ValueError,\n 'height must be <= target - offset')\n image = control_flow_ops.with_dependencies(assert_ops, image)\n\n # Do not pad on the depth dimensions.\n paddings = array_ops.reshape(\n array_ops.stack([\n 0, 0, offset_height, after_padding_height, offset_width,\n after_padding_width, 0, 0\n ]), [4, 2])\n padded = array_ops.pad(image, paddings)\n\n padded_shape = [\n None if _is_tensor(i) else i\n for i in [batch, target_height, target_width, depth]\n ]\n padded.set_shape(padded_shape)\n\n if not is_batch:\n padded = array_ops.squeeze(padded, axis=[0])\n\n return padded\n\n\n@tf_export('image.crop_to_bounding_box')\[email protected]_dispatch_support\ndef crop_to_bounding_box(image, offset_height, offset_width, target_height,\n target_width):\n \"\"\"Crops an image to a specified bounding box.\n\n This op cuts a rectangular part out of `image`. The top-left corner of the\n returned image is at `offset_height, offset_width` in `image`, and its\n lower-right corner is at\n `offset_height + target_height, offset_width + target_width`.\n\n Args:\n image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor\n of shape `[height, width, channels]`.\n offset_height: Vertical coordinate of the top-left corner of the result in\n the input.\n offset_width: Horizontal coordinate of the top-left corner of the result in\n the input.\n target_height: Height of the result.\n target_width: Width of the result.\n\n Returns:\n If `image` was 4-D, a 4-D float Tensor of shape\n `[batch, target_height, target_width, channels]`\n If `image` was 3-D, a 3-D float Tensor of shape\n `[target_height, target_width, channels]`\n\n Raises:\n ValueError: If the shape of `image` is incompatible with the `offset_*` or\n `target_*` arguments, or either `offset_height` or `offset_width` is\n negative, or either `target_height` or `target_width` is not positive.\n \"\"\"\n with ops.name_scope(None, 'crop_to_bounding_box', [image]):\n image = ops.convert_to_tensor(image, name='image')\n\n is_batch = True\n image_shape = image.get_shape()\n if image_shape.ndims == 3:\n is_batch = False\n image = array_ops.expand_dims(image, 0)\n elif image_shape.ndims is None:\n is_batch = False\n image = array_ops.expand_dims(image, 0)\n image.set_shape([None] * 4)\n elif image_shape.ndims != 4:\n raise ValueError(\n '\\'image\\' (shape %s) must have either 3 or 4 dimensions.' %\n image_shape)\n\n assert_ops = _CheckAtLeast3DImage(image, require_static=False)\n\n batch, height, width, depth = _ImageDimensions(image, rank=4)\n\n assert_ops += _assert(offset_width >= 0, ValueError,\n 'offset_width must be >= 0.')\n assert_ops += _assert(offset_height >= 0, ValueError,\n 'offset_height must be >= 0.')\n assert_ops += _assert(target_width > 0, ValueError,\n 'target_width must be > 0.')\n assert_ops += _assert(target_height > 0, ValueError,\n 'target_height must be > 0.')\n assert_ops += _assert(width >= (target_width + offset_width), ValueError,\n 'width must be >= target + offset.')\n assert_ops += _assert(height >= (target_height + offset_height), ValueError,\n 'height must be >= target + offset.')\n image = control_flow_ops.with_dependencies(assert_ops, image)\n\n cropped = array_ops.slice(\n image, array_ops.stack([0, offset_height, offset_width, 0]),\n array_ops.stack([-1, target_height, target_width, -1]))\n\n cropped_shape = [\n None if _is_tensor(i) else i\n for i in [batch, target_height, target_width, depth]\n ]\n cropped.set_shape(cropped_shape)\n\n if not is_batch:\n cropped = array_ops.squeeze(cropped, axis=[0])\n\n return cropped\n\n\n@tf_export(\n 'image.resize_with_crop_or_pad',\n v1=['image.resize_with_crop_or_pad', 'image.resize_image_with_crop_or_pad'])\[email protected]_dispatch_support\ndef resize_image_with_crop_or_pad(image, target_height, target_width):\n \"\"\"Crops and/or pads an image to a target width and height.\n\n Resizes an image to a target width and height by either centrally\n cropping the image or padding it evenly with zeros.\n\n If `width` or `height` is greater than the specified `target_width` or\n `target_height` respectively, this op centrally crops along that dimension.\n If `width` or `height` is smaller than the specified `target_width` or\n `target_height` respectively, this op centrally pads with 0 along that\n dimension.\n\n Args:\n image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor\n of shape `[height, width, channels]`.\n target_height: Target height.\n target_width: Target width.\n\n Raises:\n ValueError: if `target_height` or `target_width` are zero or negative.\n\n Returns:\n Cropped and/or padded image.\n If `images` was 4-D, a 4-D float Tensor of shape\n `[batch, new_height, new_width, channels]`.\n If `images` was 3-D, a 3-D float Tensor of shape\n `[new_height, new_width, channels]`.\n \"\"\"\n with ops.name_scope(None, 'resize_image_with_crop_or_pad', [image]):\n image = ops.convert_to_tensor(image, name='image')\n image_shape = image.get_shape()\n is_batch = True\n if image_shape.ndims == 3:\n is_batch = False\n image = array_ops.expand_dims(image, 0)\n elif image_shape.ndims is None:\n is_batch = False\n image = array_ops.expand_dims(image, 0)\n image.set_shape([None] * 4)\n elif image_shape.ndims != 4:\n raise ValueError(\n '\\'image\\' (shape %s) must have either 3 or 4 dimensions.' %\n image_shape)\n\n assert_ops = _CheckAtLeast3DImage(image, require_static=False)\n assert_ops += _assert(target_width > 0, ValueError,\n 'target_width must be > 0.')\n assert_ops += _assert(target_height > 0, ValueError,\n 'target_height must be > 0.')\n\n image = control_flow_ops.with_dependencies(assert_ops, image)\n # `crop_to_bounding_box` and `pad_to_bounding_box` have their own checks.\n # Make sure our checks come first, so that error messages are clearer.\n if _is_tensor(target_height):\n target_height = control_flow_ops.with_dependencies(\n assert_ops, target_height)\n if _is_tensor(target_width):\n target_width = control_flow_ops.with_dependencies(assert_ops,\n target_width)\n\n def max_(x, y):\n if _is_tensor(x) or _is_tensor(y):\n return math_ops.maximum(x, y)\n else:\n return max(x, y)\n\n def min_(x, y):\n if _is_tensor(x) or _is_tensor(y):\n return math_ops.minimum(x, y)\n else:\n return min(x, y)\n\n def equal_(x, y):\n if _is_tensor(x) or _is_tensor(y):\n return math_ops.equal(x, y)\n else:\n return x == y\n\n _, height, width, _ = _ImageDimensions(image, rank=4)\n width_diff = target_width - width\n offset_crop_width = max_(-width_diff // 2, 0)\n offset_pad_width = max_(width_diff // 2, 0)\n\n height_diff = target_height - height\n offset_crop_height = max_(-height_diff // 2, 0)\n offset_pad_height = max_(height_diff // 2, 0)\n\n # Maybe crop if needed.\n cropped = crop_to_bounding_box(image, offset_crop_height, offset_crop_width,\n min_(target_height, height),\n min_(target_width, width))\n\n # Maybe pad if needed.\n resized = pad_to_bounding_box(cropped, offset_pad_height, offset_pad_width,\n target_height, target_width)\n\n # In theory all the checks below are redundant.\n if resized.get_shape().ndims is None:\n raise ValueError('resized contains no shape.')\n\n _, resized_height, resized_width, _ = _ImageDimensions(resized, rank=4)\n\n assert_ops = []\n assert_ops += _assert(\n equal_(resized_height, target_height), ValueError,\n 'resized height is not correct.')\n assert_ops += _assert(\n equal_(resized_width, target_width), ValueError,\n 'resized width is not correct.')\n\n resized = control_flow_ops.with_dependencies(assert_ops, resized)\n\n if not is_batch:\n resized = array_ops.squeeze(resized, axis=[0])\n\n return resized\n\n\n@tf_export(v1=['image.ResizeMethod'])\nclass ResizeMethodV1(object):\n \"\"\"See `v1.image.resize` for details.\"\"\"\n BILINEAR = 0\n NEAREST_NEIGHBOR = 1\n BICUBIC = 2\n AREA = 3\n\n\n@tf_export('image.ResizeMethod', v1=[])\nclass ResizeMethod(object):\n \"\"\"See `tf.image.resize` for details.\"\"\"\n BILINEAR = 'bilinear'\n NEAREST_NEIGHBOR = 'nearest'\n BICUBIC = 'bicubic'\n AREA = 'area'\n LANCZOS3 = 'lanczos3'\n LANCZOS5 = 'lanczos5'\n GAUSSIAN = 'gaussian'\n MITCHELLCUBIC = 'mitchellcubic'\n\n\ndef _resize_images_common(images, resizer_fn, size, preserve_aspect_ratio, name,\n skip_resize_if_same):\n \"\"\"Core functionality for v1 and v2 resize functions.\"\"\"\n with ops.name_scope(name, 'resize', [images, size]):\n images = ops.convert_to_tensor(images, name='images')\n if images.get_shape().ndims is None:\n raise ValueError('\\'images\\' contains no shape.')\n # TODO(shlens): Migrate this functionality to the underlying Op's.\n is_batch = True\n if images.get_shape().ndims == 3:\n is_batch = False\n images = array_ops.expand_dims(images, 0)\n elif images.get_shape().ndims != 4:\n raise ValueError('\\'images\\' must have either 3 or 4 dimensions.')\n\n _, height, width, _ = images.get_shape().as_list()\n\n try:\n size = ops.convert_to_tensor(size, dtypes.int32, name='size')\n except (TypeError, ValueError):\n raise ValueError('\\'size\\' must be a 1-D int32 Tensor')\n if not size.get_shape().is_compatible_with([2]):\n raise ValueError('\\'size\\' must be a 1-D Tensor of 2 elements: '\n 'new_height, new_width')\n\n if preserve_aspect_ratio:\n # Get the current shapes of the image, even if dynamic.\n _, current_height, current_width, _ = _ImageDimensions(images, rank=4)\n\n # do the computation to find the right scale and height/width.\n scale_factor_height = (\n math_ops.cast(size[0], dtypes.float32) /\n math_ops.cast(current_height, dtypes.float32))\n scale_factor_width = (\n math_ops.cast(size[1], dtypes.float32) /\n math_ops.cast(current_width, dtypes.float32))\n scale_factor = math_ops.minimum(scale_factor_height, scale_factor_width)\n scaled_height_const = math_ops.cast(\n math_ops.round(scale_factor *\n math_ops.cast(current_height, dtypes.float32)),\n dtypes.int32)\n scaled_width_const = math_ops.cast(\n math_ops.round(scale_factor *\n math_ops.cast(current_width, dtypes.float32)),\n dtypes.int32)\n\n # NOTE: Reset the size and other constants used later.\n size = ops.convert_to_tensor([scaled_height_const, scaled_width_const],\n dtypes.int32,\n name='size')\n\n size_const_as_shape = tensor_util.constant_value_as_shape(size)\n new_height_const = tensor_shape.dimension_at_index(size_const_as_shape,\n 0).value\n new_width_const = tensor_shape.dimension_at_index(size_const_as_shape,\n 1).value\n\n # If we can determine that the height and width will be unmodified by this\n # transformation, we avoid performing the resize.\n if skip_resize_if_same and all(\n x is not None\n for x in [new_width_const, width, new_height_const, height]) and (\n width == new_width_const and height == new_height_const):\n if not is_batch:\n images = array_ops.squeeze(images, axis=[0])\n return images\n\n images = resizer_fn(images, size)\n\n # NOTE(mrry): The shape functions for the resize ops cannot unpack\n # the packed values in `new_size`, so set the shape here.\n images.set_shape([None, new_height_const, new_width_const, None])\n\n if not is_batch:\n images = array_ops.squeeze(images, axis=[0])\n return images\n\n\n@tf_export(v1=['image.resize_images', 'image.resize'])\[email protected]_dispatch_support\ndef resize_images(images,\n size,\n method=ResizeMethodV1.BILINEAR,\n align_corners=False,\n preserve_aspect_ratio=False,\n name=None):\n \"\"\"Resize `images` to `size` using the specified `method`.\n\n Resized images will be distorted if their original aspect ratio is not\n the same as `size`. To avoid distortions see\n `tf.image.resize_with_pad` or `tf.image.resize_with_crop_or_pad`.\n\n The `method` can be one of:\n\n * <b>`tf.image.ResizeMethod.BILINEAR`</b>: [Bilinear interpolation.](\n https://en.wikipedia.org/wiki/Bilinear_interpolation)\n * <b>`tf.image.ResizeMethod.NEAREST_NEIGHBOR`</b>: [\n Nearest neighbor interpolation.](\n https://en.wikipedia.org/wiki/Nearest-neighbor_interpolation)\n * <b>`tf.image.ResizeMethod.BICUBIC`</b>: [Bicubic interpolation.](\n https://en.wikipedia.org/wiki/Bicubic_interpolation)\n * <b>`tf.image.ResizeMethod.AREA`</b>: Area interpolation.\n\n The return value has the same type as `images` if `method` is\n `tf.image.ResizeMethod.NEAREST_NEIGHBOR`. It will also have the same type\n as `images` if the size of `images` can be statically determined to be the\n same as `size`, because `images` is returned in this case. Otherwise, the\n return value has type `float32`.\n\n Args:\n images: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor\n of shape `[height, width, channels]`.\n size: A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The new\n size for the images.\n method: ResizeMethod. Defaults to `tf.image.ResizeMethod.BILINEAR`.\n align_corners: bool. If True, the centers of the 4 corner pixels of the\n input and output tensors are aligned, preserving the values at the corner\n pixels. Defaults to `False`.\n preserve_aspect_ratio: Whether to preserve the aspect ratio. If this is set,\n then `images` will be resized to a size that fits in `size` while\n preserving the aspect ratio of the original image. Scales up the image if\n `size` is bigger than the current size of the `image`. Defaults to False.\n name: A name for this operation (optional).\n\n Raises:\n ValueError: if the shape of `images` is incompatible with the\n shape arguments to this function\n ValueError: if `size` has invalid shape or type.\n ValueError: if an unsupported resize method is specified.\n\n Returns:\n If `images` was 4-D, a 4-D float Tensor of shape\n `[batch, new_height, new_width, channels]`.\n If `images` was 3-D, a 3-D float Tensor of shape\n `[new_height, new_width, channels]`.\n \"\"\"\n\n def resize_fn(images_t, new_size):\n \"\"\"Legacy resize core function, passed to _resize_images_common.\"\"\"\n if method == ResizeMethodV1.BILINEAR or method == ResizeMethod.BILINEAR:\n return gen_image_ops.resize_bilinear(\n images_t, new_size, align_corners=align_corners)\n elif (method == ResizeMethodV1.NEAREST_NEIGHBOR or\n method == ResizeMethod.NEAREST_NEIGHBOR):\n return gen_image_ops.resize_nearest_neighbor(\n images_t, new_size, align_corners=align_corners)\n elif method == ResizeMethodV1.BICUBIC or method == ResizeMethod.BICUBIC:\n return gen_image_ops.resize_bicubic(\n images_t, new_size, align_corners=align_corners)\n elif method == ResizeMethodV1.AREA or method == ResizeMethod.AREA:\n return gen_image_ops.resize_area(\n images_t, new_size, align_corners=align_corners)\n else:\n raise ValueError('Resize method is not implemented: {}'.format(method))\n\n return _resize_images_common(\n images,\n resize_fn,\n size,\n preserve_aspect_ratio=preserve_aspect_ratio,\n name=name,\n skip_resize_if_same=True)\n\n\n@tf_export('image.resize', v1=[])\[email protected]_dispatch_support\ndef resize_images_v2(images,\n size,\n method=ResizeMethod.BILINEAR,\n preserve_aspect_ratio=False,\n antialias=False,\n name=None):\n \"\"\"Resize `images` to `size` using the specified `method`.\n\n Resized images will be distorted if their original aspect ratio is not\n the same as `size`. To avoid distortions see\n `tf.image.resize_with_pad`.\n\n >>> image = tf.constant([\n ... [1,0,0,0,0],\n ... [0,1,0,0,0],\n ... [0,0,1,0,0],\n ... [0,0,0,1,0],\n ... [0,0,0,0,1],\n ... ])\n >>> # Add \"batch\" and \"channels\" dimensions\n >>> image = image[tf.newaxis, ..., tf.newaxis]\n >>> image.shape.as_list() # [batch, height, width, channels]\n [1, 5, 5, 1]\n >>> tf.image.resize(image, [3,5])[0,...,0].numpy()\n array([[0.6666667, 0.3333333, 0. , 0. , 0. ],\n [0. , 0. , 1. , 0. , 0. ],\n [0. , 0. , 0. , 0.3333335, 0.6666665]],\n dtype=float32)\n\n It works equally well with a single image instead of a batch of images:\n\n >>> tf.image.resize(image[0], [3,5]).shape.as_list()\n [3, 5, 1]\n\n When `antialias` is true, the sampling filter will anti-alias the input image\n as well as interpolate. When downsampling an image with [anti-aliasing](\n https://en.wikipedia.org/wiki/Spatial_anti-aliasing) the sampling filter\n kernel is scaled in order to properly anti-alias the input image signal.\n `antialias` has no effect when upsampling an image:\n\n >>> a = tf.image.resize(image, [5,10])\n >>> b = tf.image.resize(image, [5,10], antialias=True)\n >>> tf.reduce_max(abs(a - b)).numpy()\n 0.0\n\n The `method` argument expects an item from the `image.ResizeMethod` enum, or\n the string equivalent. The options are:\n\n * <b>`bilinear`</b>: [Bilinear interpolation.](\n https://en.wikipedia.org/wiki/Bilinear_interpolation) If `antialias` is\n true, becomes a hat/tent filter function with radius 1 when downsampling.\n * <b>`lanczos3`</b>: [Lanczos kernel](\n https://en.wikipedia.org/wiki/Lanczos_resampling) with radius 3.\n High-quality practical filter but may have some ringing, especially on\n synthetic images.\n * <b>`lanczos5`</b>: [Lanczos kernel] (\n https://en.wikipedia.org/wiki/Lanczos_resampling) with radius 5.\n Very-high-quality filter but may have stronger ringing.\n * <b>`bicubic`</b>: [Cubic interpolant](\n https://en.wikipedia.org/wiki/Bicubic_interpolation) of Keys. Equivalent to\n Catmull-Rom kernel. Reasonably good quality and faster than Lanczos3Kernel,\n particularly when upsampling.\n * <b>`gaussian`</b>: [Gaussian kernel](\n https://en.wikipedia.org/wiki/Gaussian_filter) with radius 3,\n sigma = 1.5 / 3.0.\n * <b>`nearest`</b>: [Nearest neighbor interpolation.](\n https://en.wikipedia.org/wiki/Nearest-neighbor_interpolation)\n `antialias` has no effect when used with nearest neighbor interpolation.\n * <b>`area`</b>: Anti-aliased resampling with area interpolation.\n `antialias` has no effect when used with area interpolation; it\n always anti-aliases.\n * <b>`mitchellcubic`</b>: Mitchell-Netravali Cubic non-interpolating filter.\n For synthetic images (especially those lacking proper prefiltering), less\n ringing than Keys cubic kernel but less sharp.\n\n Note: Near image edges the filtering kernel may be partially outside the\n image boundaries. For these pixels, only input pixels inside the image will be\n included in the filter sum, and the output value will be appropriately\n normalized.\n\n The return value has type `float32`, unless the `method` is\n `ResizeMethod.NEAREST_NEIGHBOR`, then the return dtype is the dtype\n of `images`:\n\n >>> nn = tf.image.resize(image, [5,7], method='nearest')\n >>> nn[0,...,0].numpy()\n array([[1, 0, 0, 0, 0, 0, 0],\n [0, 1, 1, 0, 0, 0, 0],\n [0, 0, 0, 1, 0, 0, 0],\n [0, 0, 0, 0, 1, 1, 0],\n [0, 0, 0, 0, 0, 0, 1]], dtype=int32)\n\n With `preserve_aspect_ratio=True`, the aspect ratio is preserved, so `size`\n is the maximum for each dimension:\n\n >>> max_10_20 = tf.image.resize(image, [10,20], preserve_aspect_ratio=True)\n >>> max_10_20.shape.as_list()\n [1, 10, 10, 1]\n\n Args:\n images: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor\n of shape `[height, width, channels]`.\n size: A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The new\n size for the images.\n method: An `image.ResizeMethod`, or string equivalent. Defaults to\n `bilinear`.\n preserve_aspect_ratio: Whether to preserve the aspect ratio. If this is set,\n then `images` will be resized to a size that fits in `size` while\n preserving the aspect ratio of the original image. Scales up the image if\n `size` is bigger than the current size of the `image`. Defaults to False.\n antialias: Whether to use an anti-aliasing filter when downsampling an\n image.\n name: A name for this operation (optional).\n\n Raises:\n ValueError: if the shape of `images` is incompatible with the\n shape arguments to this function\n ValueError: if `size` has an invalid shape or type.\n ValueError: if an unsupported resize method is specified.\n\n Returns:\n If `images` was 4-D, a 4-D float Tensor of shape\n `[batch, new_height, new_width, channels]`.\n If `images` was 3-D, a 3-D float Tensor of shape\n `[new_height, new_width, channels]`.\n \"\"\"\n\n def resize_fn(images_t, new_size):\n \"\"\"Resize core function, passed to _resize_images_common.\"\"\"\n scale_and_translate_methods = [\n ResizeMethod.LANCZOS3, ResizeMethod.LANCZOS5, ResizeMethod.GAUSSIAN,\n ResizeMethod.MITCHELLCUBIC\n ]\n\n def resize_with_scale_and_translate(method):\n scale = (\n math_ops.cast(new_size, dtype=dtypes.float32) /\n math_ops.cast(array_ops.shape(images_t)[1:3], dtype=dtypes.float32))\n return gen_image_ops.scale_and_translate(\n images_t,\n new_size,\n scale,\n array_ops.zeros([2]),\n kernel_type=method,\n antialias=antialias)\n\n if method == ResizeMethod.BILINEAR:\n if antialias:\n return resize_with_scale_and_translate('triangle')\n else:\n return gen_image_ops.resize_bilinear(\n images_t, new_size, half_pixel_centers=True)\n elif method == ResizeMethod.NEAREST_NEIGHBOR:\n return gen_image_ops.resize_nearest_neighbor(\n images_t, new_size, half_pixel_centers=True)\n elif method == ResizeMethod.BICUBIC:\n if antialias:\n return resize_with_scale_and_translate('keyscubic')\n else:\n return gen_image_ops.resize_bicubic(\n images_t, new_size, half_pixel_centers=True)\n elif method == ResizeMethod.AREA:\n return gen_image_ops.resize_area(images_t, new_size)\n elif method in scale_and_translate_methods:\n return resize_with_scale_and_translate(method)\n else:\n raise ValueError('Resize method is not implemented: {}'.format(method))\n\n return _resize_images_common(\n images,\n resize_fn,\n size,\n preserve_aspect_ratio=preserve_aspect_ratio,\n name=name,\n skip_resize_if_same=False)\n\n\ndef _resize_image_with_pad_common(image, target_height, target_width,\n resize_fn):\n \"\"\"Core functionality for v1 and v2 resize_image_with_pad functions.\"\"\"\n with ops.name_scope(None, 'resize_image_with_pad', [image]):\n image = ops.convert_to_tensor(image, name='image')\n image_shape = image.get_shape()\n is_batch = True\n if image_shape.ndims == 3:\n is_batch = False\n image = array_ops.expand_dims(image, 0)\n elif image_shape.ndims is None:\n is_batch = False\n image = array_ops.expand_dims(image, 0)\n image.set_shape([None] * 4)\n elif image_shape.ndims != 4:\n raise ValueError(\n '\\'image\\' (shape %s) must have either 3 or 4 dimensions.' %\n image_shape)\n\n assert_ops = _CheckAtLeast3DImage(image, require_static=False)\n assert_ops += _assert(target_width > 0, ValueError,\n 'target_width must be > 0.')\n assert_ops += _assert(target_height > 0, ValueError,\n 'target_height must be > 0.')\n\n image = control_flow_ops.with_dependencies(assert_ops, image)\n\n def max_(x, y):\n if _is_tensor(x) or _is_tensor(y):\n return math_ops.maximum(x, y)\n else:\n return max(x, y)\n\n _, height, width, _ = _ImageDimensions(image, rank=4)\n\n # convert values to float, to ease divisions\n f_height = math_ops.cast(height, dtype=dtypes.float32)\n f_width = math_ops.cast(width, dtype=dtypes.float32)\n f_target_height = math_ops.cast(target_height, dtype=dtypes.float32)\n f_target_width = math_ops.cast(target_width, dtype=dtypes.float32)\n\n # Find the ratio by which the image must be adjusted\n # to fit within the target\n ratio = max_(f_width / f_target_width, f_height / f_target_height)\n resized_height_float = f_height / ratio\n resized_width_float = f_width / ratio\n resized_height = math_ops.cast(\n math_ops.floor(resized_height_float), dtype=dtypes.int32)\n resized_width = math_ops.cast(\n math_ops.floor(resized_width_float), dtype=dtypes.int32)\n\n padding_height = (f_target_height - resized_height_float) / 2\n padding_width = (f_target_width - resized_width_float) / 2\n f_padding_height = math_ops.floor(padding_height)\n f_padding_width = math_ops.floor(padding_width)\n p_height = max_(0, math_ops.cast(f_padding_height, dtype=dtypes.int32))\n p_width = max_(0, math_ops.cast(f_padding_width, dtype=dtypes.int32))\n\n # Resize first, then pad to meet requested dimensions\n resized = resize_fn(image, [resized_height, resized_width])\n\n padded = pad_to_bounding_box(resized, p_height, p_width, target_height,\n target_width)\n\n if padded.get_shape().ndims is None:\n raise ValueError('padded contains no shape.')\n\n _ImageDimensions(padded, rank=4)\n\n if not is_batch:\n padded = array_ops.squeeze(padded, axis=[0])\n\n return padded\n\n\n@tf_export(v1=['image.resize_image_with_pad'])\[email protected]_dispatch_support\ndef resize_image_with_pad_v1(image,\n target_height,\n target_width,\n method=ResizeMethodV1.BILINEAR,\n align_corners=False):\n \"\"\"Resizes and pads an image to a target width and height.\n\n Resizes an image to a target width and height by keeping\n the aspect ratio the same without distortion. If the target\n dimensions don't match the image dimensions, the image\n is resized and then padded with zeroes to match requested\n dimensions.\n\n Args:\n image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor\n of shape `[height, width, channels]`.\n target_height: Target height.\n target_width: Target width.\n method: Method to use for resizing image. See `resize_images()`\n align_corners: bool. If True, the centers of the 4 corner pixels of the\n input and output tensors are aligned, preserving the values at the corner\n pixels. Defaults to `False`.\n\n Raises:\n ValueError: if `target_height` or `target_width` are zero or negative.\n\n Returns:\n Resized and padded image.\n If `images` was 4-D, a 4-D float Tensor of shape\n `[batch, new_height, new_width, channels]`.\n If `images` was 3-D, a 3-D float Tensor of shape\n `[new_height, new_width, channels]`.\n \"\"\"\n\n def _resize_fn(im, new_size):\n return resize_images(im, new_size, method, align_corners=align_corners)\n\n return _resize_image_with_pad_common(image, target_height, target_width,\n _resize_fn)\n\n\n@tf_export('image.resize_with_pad', v1=[])\[email protected]_dispatch_support\ndef resize_image_with_pad_v2(image,\n target_height,\n target_width,\n method=ResizeMethod.BILINEAR,\n antialias=False):\n \"\"\"Resizes and pads an image to a target width and height.\n\n Resizes an image to a target width and height by keeping\n the aspect ratio the same without distortion. If the target\n dimensions don't match the image dimensions, the image\n is resized and then padded with zeroes to match requested\n dimensions.\n\n Args:\n image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor\n of shape `[height, width, channels]`.\n target_height: Target height.\n target_width: Target width.\n method: Method to use for resizing image. See `image.resize()`\n antialias: Whether to use anti-aliasing when resizing. See 'image.resize()'.\n\n Raises:\n ValueError: if `target_height` or `target_width` are zero or negative.\n\n Returns:\n Resized and padded image.\n If `images` was 4-D, a 4-D float Tensor of shape\n `[batch, new_height, new_width, channels]`.\n If `images` was 3-D, a 3-D float Tensor of shape\n `[new_height, new_width, channels]`.\n \"\"\"\n\n def _resize_fn(im, new_size):\n return resize_images_v2(im, new_size, method, antialias=antialias)\n\n return _resize_image_with_pad_common(image, target_height, target_width,\n _resize_fn)\n\n\n@tf_export('image.per_image_standardization')\[email protected]_dispatch_support\ndef per_image_standardization(image):\n \"\"\"Linearly scales each image in `image` to have mean 0 and variance 1.\n\n For each 3-D image `x` in `image`, computes `(x - mean) / adjusted_stddev`,\n where\n\n - `mean` is the average of all values in `x`\n - `adjusted_stddev = max(stddev, 1.0/sqrt(N))` is capped away from 0 to\n protect against division by 0 when handling uniform images\n - `N` is the number of elements in `x`\n - `stddev` is the standard deviation of all values in `x`\n\n Args:\n image: An n-D Tensor with at least 3 dimensions, the last 3 of which are the\n dimensions of each image.\n\n Returns:\n A `Tensor` with the same shape and dtype as `image`.\n\n Raises:\n ValueError: if the shape of 'image' is incompatible with this function.\n \"\"\"\n with ops.name_scope(None, 'per_image_standardization', [image]) as scope:\n image = ops.convert_to_tensor(image, name='image')\n image = _AssertAtLeast3DImage(image)\n\n # Remember original dtype to so we can convert back if needed\n orig_dtype = image.dtype\n if orig_dtype not in [dtypes.float16, dtypes.float32]:\n image = convert_image_dtype(image, dtypes.float32)\n\n num_pixels = math_ops.reduce_prod(array_ops.shape(image)[-3:])\n image_mean = math_ops.reduce_mean(image, axis=[-1, -2, -3], keepdims=True)\n\n # Apply a minimum normalization that protects us against uniform images.\n stddev = math_ops.reduce_std(image, axis=[-1, -2, -3], keepdims=True)\n min_stddev = math_ops.rsqrt(math_ops.cast(num_pixels, image.dtype))\n adjusted_stddev = math_ops.maximum(stddev, min_stddev)\n\n image -= image_mean\n image = math_ops.divide(image, adjusted_stddev, name=scope)\n return convert_image_dtype(image, orig_dtype, saturate=True)\n\n\n@tf_export('image.random_brightness')\[email protected]_dispatch_support\ndef random_brightness(image, max_delta, seed=None):\n \"\"\"Adjust the brightness of images by a random factor.\n\n Equivalent to `adjust_brightness()` using a `delta` randomly picked in the\n interval `[-max_delta, max_delta)`.\n\n Args:\n image: An image or images to adjust.\n max_delta: float, must be non-negative.\n seed: A Python integer. Used to create a random seed. See\n `tf.compat.v1.set_random_seed` for behavior.\n\n Usage Example:\n\n >>> x = [[[1.0, 2.0, 3.0],\n ... [4.0, 5.0, 6.0]],\n ... [[7.0, 8.0, 9.0],\n ... [10.0, 11.0, 12.0]]]\n >>> tf.image.random_brightness(x, 0.2)\n <tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy=...>\n\n Returns:\n The brightness-adjusted image(s).\n\n Raises:\n ValueError: if `max_delta` is negative.\n \"\"\"\n if max_delta < 0:\n raise ValueError('max_delta must be non-negative.')\n\n delta = random_ops.random_uniform([], -max_delta, max_delta, seed=seed)\n return adjust_brightness(image, delta)\n\n\n@tf_export('image.stateless_random_brightness', v1=[])\[email protected]_dispatch_support\ndef stateless_random_brightness(image, max_delta, seed):\n \"\"\"Adjust the brightness of images by a random factor deterministically.\n\n Equivalent to `adjust_brightness()` using a `delta` randomly picked in the\n interval `[-max_delta, max_delta)`.\n\n Guarantees the same results given the same `seed` independent of how many\n times the function is called, and independent of global seed settings (e.g.\n `tf.random.set_seed`).\n\n Usage Example:\n\n >>> x = [[[1.0, 2.0, 3.0],\n ... [4.0, 5.0, 6.0]],\n ... [[7.0, 8.0, 9.0],\n ... [10.0, 11.0, 12.0]]]\n >>> seed = (1, 2)\n >>> tf.image.stateless_random_brightness(x, 0.2, seed)\n <tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy=\n array([[[ 1.1376241, 2.1376243, 3.1376243],\n [ 4.1376243, 5.1376243, 6.1376243]],\n [[ 7.1376243, 8.137624 , 9.137624 ],\n [10.137624 , 11.137624 , 12.137624 ]]], dtype=float32)>\n\n Args:\n image: An image or images to adjust.\n max_delta: float, must be non-negative.\n seed: A shape [2] Tensor, the seed to the random number generator. Must have\n dtype `int32` or `int64`. (When using XLA, only `int32` is allowed.)\n\n Returns:\n The brightness-adjusted image(s).\n\n Raises:\n ValueError: if `max_delta` is negative.\n \"\"\"\n if max_delta < 0:\n raise ValueError('max_delta must be non-negative.')\n\n delta = stateless_random_ops.stateless_random_uniform(\n shape=[], minval=-max_delta, maxval=max_delta, seed=seed)\n return adjust_brightness(image, delta)\n\n\n@tf_export('image.random_contrast')\[email protected]_dispatch_support\ndef random_contrast(image, lower, upper, seed=None):\n \"\"\"Adjust the contrast of an image or images by a random factor.\n\n Equivalent to `adjust_contrast()` but uses a `contrast_factor` randomly\n picked in the interval `[lower, upper)`.\n\n Args:\n image: An image tensor with 3 or more dimensions.\n lower: float. Lower bound for the random contrast factor.\n upper: float. Upper bound for the random contrast factor.\n seed: A Python integer. Used to create a random seed. See\n `tf.compat.v1.set_random_seed` for behavior.\n\n Usage Example:\n\n >>> x = [[[1.0, 2.0, 3.0],\n ... [4.0, 5.0, 6.0]],\n ... [[7.0, 8.0, 9.0],\n ... [10.0, 11.0, 12.0]]]\n >>> tf.image.random_contrast(x, 0.2, 0.5)\n <tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy=...>\n\n Returns:\n The contrast-adjusted image(s).\n\n Raises:\n ValueError: if `upper <= lower` or if `lower < 0`.\n \"\"\"\n if upper <= lower:\n raise ValueError('upper must be > lower.')\n\n if lower < 0:\n raise ValueError('lower must be non-negative.')\n\n contrast_factor = random_ops.random_uniform([], lower, upper, seed=seed)\n return adjust_contrast(image, contrast_factor)\n\n\n@tf_export('image.stateless_random_contrast', v1=[])\[email protected]_dispatch_support\ndef stateless_random_contrast(image, lower, upper, seed):\n \"\"\"Adjust the contrast of images by a random factor deterministically.\n\n Guarantees the same results given the same `seed` independent of how many\n times the function is called, and independent of global seed settings (e.g.\n `tf.random.set_seed`).\n\n Args:\n image: An image tensor with 3 or more dimensions.\n lower: float. Lower bound for the random contrast factor.\n upper: float. Upper bound for the random contrast factor.\n seed: A shape [2] Tensor, the seed to the random number generator. Must have\n dtype `int32` or `int64`. (When using XLA, only `int32` is allowed.)\n\n Usage Example:\n\n >>> x = [[[1.0, 2.0, 3.0],\n ... [4.0, 5.0, 6.0]],\n ... [[7.0, 8.0, 9.0],\n ... [10.0, 11.0, 12.0]]]\n >>> seed = (1, 2)\n >>> tf.image.stateless_random_contrast(x, 0.2, 0.5, seed)\n <tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy=\n array([[[3.4605184, 4.4605184, 5.4605184],\n [4.820173 , 5.820173 , 6.820173 ]],\n [[6.179827 , 7.179827 , 8.179828 ],\n [7.5394816, 8.539482 , 9.539482 ]]], dtype=float32)>\n\n Returns:\n The contrast-adjusted image(s).\n\n Raises:\n ValueError: if `upper <= lower` or if `lower < 0`.\n \"\"\"\n if upper <= lower:\n raise ValueError('upper must be > lower.')\n\n if lower < 0:\n raise ValueError('lower must be non-negative.')\n\n contrast_factor = stateless_random_ops.stateless_random_uniform(\n shape=[], minval=lower, maxval=upper, seed=seed)\n return adjust_contrast(image, contrast_factor)\n\n\n@tf_export('image.adjust_brightness')\[email protected]_dispatch_support\ndef adjust_brightness(image, delta):\n \"\"\"Adjust the brightness of RGB or Grayscale images.\n\n This is a convenience method that converts RGB images to float\n representation, adjusts their brightness, and then converts them back to the\n original data type. If several adjustments are chained, it is advisable to\n minimize the number of redundant conversions.\n\n The value `delta` is added to all components of the tensor `image`. `image` is\n converted to `float` and scaled appropriately if it is in fixed-point\n representation, and `delta` is converted to the same data type. For regular\n images, `delta` should be in the range `(-1,1)`, as it is added to the image\n in floating point representation, where pixel values are in the `[0,1)` range.\n\n Usage Example:\n\n >>> x = [[[1.0, 2.0, 3.0],\n ... [4.0, 5.0, 6.0]],\n ... [[7.0, 8.0, 9.0],\n ... [10.0, 11.0, 12.0]]]\n >>> tf.image.adjust_brightness(x, delta=0.1)\n <tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy=\n array([[[ 1.1, 2.1, 3.1],\n [ 4.1, 5.1, 6.1]],\n [[ 7.1, 8.1, 9.1],\n [10.1, 11.1, 12.1]]], dtype=float32)>\n\n Args:\n image: RGB image or images to adjust.\n delta: A scalar. Amount to add to the pixel values.\n\n Returns:\n A brightness-adjusted tensor of the same shape and type as `image`.\n \"\"\"\n with ops.name_scope(None, 'adjust_brightness', [image, delta]) as name:\n image = ops.convert_to_tensor(image, name='image')\n # Remember original dtype to so we can convert back if needed\n orig_dtype = image.dtype\n\n if orig_dtype in [dtypes.float16, dtypes.float32]:\n flt_image = image\n else:\n flt_image = convert_image_dtype(image, dtypes.float32)\n\n adjusted = math_ops.add(\n flt_image, math_ops.cast(delta, flt_image.dtype), name=name)\n\n return convert_image_dtype(adjusted, orig_dtype, saturate=True)\n\n\n@tf_export('image.adjust_contrast')\[email protected]_dispatch_support\ndef adjust_contrast(images, contrast_factor):\n \"\"\"Adjust contrast of RGB or grayscale images.\n\n This is a convenience method that converts RGB images to float\n representation, adjusts their contrast, and then converts them back to the\n original data type. If several adjustments are chained, it is advisable to\n minimize the number of redundant conversions.\n\n `images` is a tensor of at least 3 dimensions. The last 3 dimensions are\n interpreted as `[height, width, channels]`. The other dimensions only\n represent a collection of images, such as `[batch, height, width, channels].`\n\n Contrast is adjusted independently for each channel of each image.\n\n For each channel, this Op computes the mean of the image pixels in the\n channel and then adjusts each component `x` of each pixel to\n `(x - mean) * contrast_factor + mean`.\n\n Usage Example:\n\n >>> x = [[[1.0, 2.0, 3.0],\n ... [4.0, 5.0, 6.0]],\n ... [[7.0, 8.0, 9.0],\n ... [10.0, 11.0, 12.0]]]\n >>> tf.image.adjust_contrast(x, 2)\n <tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy=\n array([[[-3.5, -2.5, -1.5],\n [ 2.5, 3.5, 4.5]],\n [[ 8.5, 9.5, 10.5],\n [14.5, 15.5, 16.5]]], dtype=float32)>\n\n Args:\n images: Images to adjust. At least 3-D.\n contrast_factor: A float multiplier for adjusting contrast.\n\n Returns:\n The contrast-adjusted image or images.\n \"\"\"\n with ops.name_scope(None, 'adjust_contrast',\n [images, contrast_factor]) as name:\n images = ops.convert_to_tensor(images, name='images')\n # Remember original dtype to so we can convert back if needed\n orig_dtype = images.dtype\n\n if orig_dtype in (dtypes.float16, dtypes.float32):\n flt_images = images\n else:\n flt_images = convert_image_dtype(images, dtypes.float32)\n\n adjusted = gen_image_ops.adjust_contrastv2(\n flt_images, contrast_factor=contrast_factor, name=name)\n\n return convert_image_dtype(adjusted, orig_dtype, saturate=True)\n\n\n@tf_export('image.adjust_gamma')\[email protected]_dispatch_support\ndef adjust_gamma(image, gamma=1, gain=1):\n \"\"\"Performs [Gamma Correction](http://en.wikipedia.org/wiki/Gamma_correction).\n\n on the input image.\n\n Also known as Power Law Transform. This function converts the\n input images at first to float representation, then transforms them\n pixelwise according to the equation `Out = gain * In**gamma`,\n and then converts the back to the original data type.\n\n Usage Example:\n\n >>> x = [[[1.0, 2.0, 3.0],\n ... [4.0, 5.0, 6.0]],\n ... [[7.0, 8.0, 9.0],\n ... [10.0, 11.0, 12.0]]]\n >>> tf.image.adjust_gamma(x, 0.2)\n <tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy=\n array([[[1. , 1.1486983, 1.2457309],\n [1.319508 , 1.3797297, 1.4309691]],\n [[1.4757731, 1.5157166, 1.5518456],\n [1.5848932, 1.6153942, 1.6437519]]], dtype=float32)>\n\n Args:\n image : RGB image or images to adjust.\n gamma : A scalar or tensor. Non-negative real number.\n gain : A scalar or tensor. The constant multiplier.\n\n Returns:\n A Tensor. A Gamma-adjusted tensor of the same shape and type as `image`.\n\n Raises:\n ValueError: If gamma is negative.\n Notes:\n For gamma greater than 1, the histogram will shift towards left and\n the output image will be darker than the input image.\n For gamma less than 1, the histogram will shift towards right and\n the output image will be brighter than the input image.\n References:\n [Wikipedia](http://en.wikipedia.org/wiki/Gamma_correction)\n \"\"\"\n\n with ops.name_scope(None, 'adjust_gamma', [image, gamma, gain]) as name:\n image = ops.convert_to_tensor(image, name='image')\n # Remember original dtype to so we can convert back if needed\n orig_dtype = image.dtype\n\n if orig_dtype in [dtypes.float16, dtypes.float32]:\n flt_image = image\n else:\n flt_image = convert_image_dtype(image, dtypes.float32)\n\n assert_op = _assert(gamma >= 0, ValueError,\n 'Gamma should be a non-negative real number.')\n if assert_op:\n gamma = control_flow_ops.with_dependencies(assert_op, gamma)\n\n # According to the definition of gamma correction.\n adjusted_img = gain * flt_image**gamma\n\n return convert_image_dtype(adjusted_img, orig_dtype, saturate=True)\n\n\n@tf_export('image.convert_image_dtype')\[email protected]_dispatch_support\ndef convert_image_dtype(image, dtype, saturate=False, name=None):\n \"\"\"Convert `image` to `dtype`, scaling its values if needed.\n\n Images that are represented using floating point values are expected to have\n values in the range [0,1). Image data stored in integer data types are\n expected to have values in the range `[0,MAX]`, where `MAX` is the largest\n positive representable number for the data type.\n\n This op converts between data types, scaling the values appropriately before\n casting.\n\n Note that converting from floating point inputs to integer types may lead to\n over/underflow problems. Set saturate to `True` to avoid such problem in\n problematic conversions. If enabled, saturation will clip the output into the\n allowed range before performing a potentially dangerous cast (and only before\n performing such a cast, i.e., when casting from a floating point to an integer\n type, and when casting from a signed to an unsigned type; `saturate` has no\n effect on casts between floats, or on casts that increase the type's range).\n\n Usage Example:\n\n >>> x = [[[1.0, 2.0, 3.0],\n ... [4.0, 5.0, 6.0]],\n ... [[7.0, 8.0, 9.0],\n ... [10.0, 11.0, 12.0]]]\n >>> tf.image.convert_image_dtype(x, dtype=tf.float16, saturate=False)\n <tf.Tensor: shape=(2, 2, 3), dtype=float16, numpy=\n array([[[ 1., 2., 3.],\n [ 4., 5., 6.]],\n [[ 7., 8., 9.],\n [10., 11., 12.]]], dtype=float16)>\n\n Args:\n image: An image.\n dtype: A `DType` to convert `image` to.\n saturate: If `True`, clip the input before casting (if necessary).\n name: A name for this operation (optional).\n\n Returns:\n `image`, converted to `dtype`.\n\n Raises:\n AttributeError: Raises an attribute error when dtype is neither\n float nor integer\n \"\"\"\n image = ops.convert_to_tensor(image, name='image')\n dtype = dtypes.as_dtype(dtype)\n if not dtype.is_floating and not dtype.is_integer:\n raise AttributeError('dtype must be either floating point or integer')\n if dtype == image.dtype:\n return array_ops.identity(image, name=name)\n\n with ops.name_scope(name, 'convert_image', [image]) as name:\n # Both integer: use integer multiplication in the larger range\n if image.dtype.is_integer and dtype.is_integer:\n scale_in = image.dtype.max\n scale_out = dtype.max\n if scale_in > scale_out:\n # Scaling down, scale first, then cast. The scaling factor will\n # cause in.max to be mapped to above out.max but below out.max+1,\n # so that the output is safely in the supported range.\n scale = (scale_in + 1) // (scale_out + 1)\n scaled = math_ops.floordiv(image, scale)\n\n if saturate:\n return math_ops.saturate_cast(scaled, dtype, name=name)\n else:\n return math_ops.cast(scaled, dtype, name=name)\n else:\n # Scaling up, cast first, then scale. The scale will not map in.max to\n # out.max, but converting back and forth should result in no change.\n if saturate:\n cast = math_ops.saturate_cast(image, dtype)\n else:\n cast = math_ops.cast(image, dtype)\n scale = (scale_out + 1) // (scale_in + 1)\n return math_ops.multiply(cast, scale, name=name)\n elif image.dtype.is_floating and dtype.is_floating:\n # Both float: Just cast, no possible overflows in the allowed ranges.\n # Note: We're ignoring float overflows. If your image dynamic range\n # exceeds float range, you're on your own.\n return math_ops.cast(image, dtype, name=name)\n else:\n if image.dtype.is_integer:\n # Converting to float: first cast, then scale. No saturation possible.\n cast = math_ops.cast(image, dtype)\n scale = 1. / image.dtype.max\n return math_ops.multiply(cast, scale, name=name)\n else:\n # Converting from float: first scale, then cast\n scale = dtype.max + 0.5 # avoid rounding problems in the cast\n scaled = math_ops.multiply(image, scale)\n if saturate:\n return math_ops.saturate_cast(scaled, dtype, name=name)\n else:\n return math_ops.cast(scaled, dtype, name=name)\n\n\n@tf_export('image.rgb_to_grayscale')\[email protected]_dispatch_support\ndef rgb_to_grayscale(images, name=None):\n \"\"\"Converts one or more images from RGB to Grayscale.\n\n Outputs a tensor of the same `DType` and rank as `images`. The size of the\n last dimension of the output is 1, containing the Grayscale value of the\n pixels.\n\n >>> original = tf.constant([[[1.0, 2.0, 3.0]]])\n >>> converted = tf.image.rgb_to_grayscale(original)\n >>> print(converted.numpy())\n [[[1.81...]]]\n\n Args:\n images: The RGB tensor to convert. The last dimension must have size 3 and\n should contain RGB values.\n name: A name for the operation (optional).\n\n Returns:\n The converted grayscale image(s).\n \"\"\"\n with ops.name_scope(name, 'rgb_to_grayscale', [images]) as name:\n images = ops.convert_to_tensor(images, name='images')\n # Remember original dtype to so we can convert back if needed\n orig_dtype = images.dtype\n flt_image = convert_image_dtype(images, dtypes.float32)\n\n # Reference for converting between RGB and grayscale.\n # https://en.wikipedia.org/wiki/Luma_%28video%29\n rgb_weights = [0.2989, 0.5870, 0.1140]\n gray_float = math_ops.tensordot(flt_image, rgb_weights, [-1, -1])\n gray_float = array_ops.expand_dims(gray_float, -1)\n return convert_image_dtype(gray_float, orig_dtype, name=name)\n\n\n@tf_export('image.grayscale_to_rgb')\[email protected]_dispatch_support\ndef grayscale_to_rgb(images, name=None):\n \"\"\"Converts one or more images from Grayscale to RGB.\n\n Outputs a tensor of the same `DType` and rank as `images`. The size of the\n last dimension of the output is 3, containing the RGB value of the pixels.\n The input images' last dimension must be size 1.\n\n >>> original = tf.constant([[[1.0], [2.0], [3.0]]])\n >>> converted = tf.image.grayscale_to_rgb(original)\n >>> print(converted.numpy())\n [[[1. 1. 1.]\n [2. 2. 2.]\n [3. 3. 3.]]]\n\n Args:\n images: The Grayscale tensor to convert. The last dimension must be size 1.\n name: A name for the operation (optional).\n\n Returns:\n The converted grayscale image(s).\n \"\"\"\n with ops.name_scope(name, 'grayscale_to_rgb', [images]) as name:\n images = _AssertGrayscaleImage(images)\n\n images = ops.convert_to_tensor(images, name='images')\n rank_1 = array_ops.expand_dims(array_ops.rank(images) - 1, 0)\n shape_list = ([array_ops.ones(rank_1, dtype=dtypes.int32)] +\n [array_ops.expand_dims(3, 0)])\n multiples = array_ops.concat(shape_list, 0)\n rgb = array_ops.tile(images, multiples, name=name)\n rgb.set_shape(images.get_shape()[:-1].concatenate([3]))\n return rgb\n\n\n# pylint: disable=invalid-name\n@tf_export('image.random_hue')\[email protected]_dispatch_support\ndef random_hue(image, max_delta, seed=None):\n \"\"\"Adjust the hue of RGB images by a random factor.\n\n Equivalent to `adjust_hue()` but uses a `delta` randomly\n picked in the interval `[-max_delta, max_delta)`.\n\n `max_delta` must be in the interval `[0, 0.5]`.\n\n Usage Example:\n\n >>> x = [[[1.0, 2.0, 3.0],\n ... [4.0, 5.0, 6.0]],\n ... [[7.0, 8.0, 9.0],\n ... [10.0, 11.0, 12.0]]]\n >>> tf.image.random_hue(x, 0.2)\n <tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy=...>\n\n Args:\n image: RGB image or images. The size of the last dimension must be 3.\n max_delta: float. The maximum value for the random delta.\n seed: An operation-specific seed. It will be used in conjunction with the\n graph-level seed to determine the real seeds that will be used in this\n operation. Please see the documentation of set_random_seed for its\n interaction with the graph-level random seed.\n\n Returns:\n Adjusted image(s), same shape and DType as `image`.\n\n Raises:\n ValueError: if `max_delta` is invalid.\n \"\"\"\n if max_delta > 0.5:\n raise ValueError('max_delta must be <= 0.5.')\n\n if max_delta < 0:\n raise ValueError('max_delta must be non-negative.')\n\n delta = random_ops.random_uniform([], -max_delta, max_delta, seed=seed)\n return adjust_hue(image, delta)\n\n\n@tf_export('image.stateless_random_hue', v1=[])\[email protected]_dispatch_support\ndef stateless_random_hue(image, max_delta, seed):\n \"\"\"Adjust the hue of RGB images by a random factor deterministically.\n\n Equivalent to `adjust_hue()` but uses a `delta` randomly picked in the\n interval `[-max_delta, max_delta)`.\n\n Guarantees the same results given the same `seed` independent of how many\n times the function is called, and independent of global seed settings (e.g.\n `tf.random.set_seed`).\n\n `max_delta` must be in the interval `[0, 0.5]`.\n\n Usage Example:\n\n >>> x = [[[1.0, 2.0, 3.0],\n ... [4.0, 5.0, 6.0]],\n ... [[7.0, 8.0, 9.0],\n ... [10.0, 11.0, 12.0]]]\n >>> seed = (1, 2)\n >>> tf.image.stateless_random_hue(x, 0.2, seed)\n <tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy=\n array([[[ 1.6514902, 1. , 3. ],\n [ 4.65149 , 4. , 6. ]],\n [[ 7.65149 , 7. , 9. ],\n [10.65149 , 10. , 12. ]]], dtype=float32)>\n\n Args:\n image: RGB image or images. The size of the last dimension must be 3.\n max_delta: float. The maximum value for the random delta.\n seed: A shape [2] Tensor, the seed to the random number generator. Must have\n dtype `int32` or `int64`. (When using XLA, only `int32` is allowed.)\n\n Returns:\n Adjusted image(s), same shape and DType as `image`.\n\n Raises:\n ValueError: if `max_delta` is invalid.\n \"\"\"\n if max_delta > 0.5:\n raise ValueError('max_delta must be <= 0.5.')\n\n if max_delta < 0:\n raise ValueError('max_delta must be non-negative.')\n\n delta = stateless_random_ops.stateless_random_uniform(\n shape=[], minval=-max_delta, maxval=max_delta, seed=seed)\n return adjust_hue(image, delta)\n\n\n@tf_export('image.adjust_hue')\[email protected]_dispatch_support\ndef adjust_hue(image, delta, name=None):\n \"\"\"Adjust hue of RGB images.\n\n This is a convenience method that converts an RGB image to float\n representation, converts it to HSV, adds an offset to the\n hue channel, converts back to RGB and then back to the original\n data type. If several adjustments are chained it is advisable to minimize\n the number of redundant conversions.\n\n `image` is an RGB image. The image hue is adjusted by converting the\n image(s) to HSV and rotating the hue channel (H) by\n `delta`. The image is then converted back to RGB.\n\n `delta` must be in the interval `[-1, 1]`.\n\n Usage Example:\n\n >>> x = [[[1.0, 2.0, 3.0],\n ... [4.0, 5.0, 6.0]],\n ... [[7.0, 8.0, 9.0],\n ... [10.0, 11.0, 12.0]]]\n >>> tf.image.adjust_hue(x, 0.2)\n <tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy=\n array([[[ 2.3999996, 1. , 3. ],\n [ 5.3999996, 4. , 6. ]],\n [[ 8.4 , 7. , 9. ],\n [11.4 , 10. , 12. ]]], dtype=float32)>\n\n Args:\n image: RGB image or images. The size of the last dimension must be 3.\n delta: float. How much to add to the hue channel.\n name: A name for this operation (optional).\n\n Returns:\n Adjusted image(s), same shape and DType as `image`.\n\n Usage Example:\n\n >>> image = [[[1, 2, 3], [4, 5, 6]],\n ... [[7, 8, 9], [10, 11, 12]],\n ... [[13, 14, 15], [16, 17, 18]]]\n >>> image = tf.constant(image)\n >>> tf.image.adjust_hue(image, 0.2)\n <tf.Tensor: shape=(3, 2, 3), dtype=int32, numpy=\n array([[[ 2, 1, 3],\n [ 5, 4, 6]],\n [[ 8, 7, 9],\n [11, 10, 12]],\n [[14, 13, 15],\n [17, 16, 18]]], dtype=int32)>\n \"\"\"\n with ops.name_scope(name, 'adjust_hue', [image]) as name:\n image = ops.convert_to_tensor(image, name='image')\n # Remember original dtype to so we can convert back if needed\n orig_dtype = image.dtype\n if orig_dtype in (dtypes.float16, dtypes.float32):\n flt_image = image\n else:\n flt_image = convert_image_dtype(image, dtypes.float32)\n\n rgb_altered = gen_image_ops.adjust_hue(flt_image, delta)\n\n return convert_image_dtype(rgb_altered, orig_dtype)\n\n\n# pylint: disable=invalid-name\n@tf_export('image.random_jpeg_quality')\[email protected]_dispatch_support\ndef random_jpeg_quality(image, min_jpeg_quality, max_jpeg_quality, seed=None):\n \"\"\"Randomly changes jpeg encoding quality for inducing jpeg noise.\n\n `min_jpeg_quality` must be in the interval `[0, 100]` and less than\n `max_jpeg_quality`.\n `max_jpeg_quality` must be in the interval `[0, 100]`.\n\n Usage Example:\n\n >>> x = [[[1.0, 2.0, 3.0],\n ... [4.0, 5.0, 6.0]],\n ... [[7.0, 8.0, 9.0],\n ... [10.0, 11.0, 12.0]]]\n >>> tf.image.random_jpeg_quality(x, 75, 95)\n <tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy=...>\n\n Args:\n image: 3D image. Size of the last dimension must be 1 or 3.\n min_jpeg_quality: Minimum jpeg encoding quality to use.\n max_jpeg_quality: Maximum jpeg encoding quality to use.\n seed: An operation-specific seed. It will be used in conjunction with the\n graph-level seed to determine the real seeds that will be used in this\n operation. Please see the documentation of set_random_seed for its\n interaction with the graph-level random seed.\n\n Returns:\n Adjusted image(s), same shape and DType as `image`.\n\n Raises:\n ValueError: if `min_jpeg_quality` or `max_jpeg_quality` is invalid.\n \"\"\"\n if (min_jpeg_quality < 0 or max_jpeg_quality < 0 or min_jpeg_quality > 100 or\n max_jpeg_quality > 100):\n raise ValueError('jpeg encoding range must be between 0 and 100.')\n\n if min_jpeg_quality >= max_jpeg_quality:\n raise ValueError('`min_jpeg_quality` must be less than `max_jpeg_quality`.')\n\n jpeg_quality = random_ops.random_uniform([],\n min_jpeg_quality,\n max_jpeg_quality,\n seed=seed,\n dtype=dtypes.int32)\n return adjust_jpeg_quality(image, jpeg_quality)\n\n\n@tf_export('image.stateless_random_jpeg_quality', v1=[])\[email protected]_dispatch_support\ndef stateless_random_jpeg_quality(image,\n min_jpeg_quality,\n max_jpeg_quality,\n seed):\n \"\"\"Deterministically radomize jpeg encoding quality for inducing jpeg noise.\n\n Guarantees the same results given the same `seed` independent of how many\n times the function is called, and independent of global seed settings (e.g.\n `tf.random.set_seed`).\n\n `min_jpeg_quality` must be in the interval `[0, 100]` and less than\n `max_jpeg_quality`.\n `max_jpeg_quality` must be in the interval `[0, 100]`.\n\n Usage Example:\n\n >>> x = [[[1, 2, 3],\n ... [4, 5, 6]],\n ... [[7, 8, 9],\n ... [10, 11, 12]]]\n >>> x_uint8 = tf.cast(x, tf.uint8)\n >>> seed = (1, 2)\n >>> tf.image.stateless_random_jpeg_quality(x_uint8, 75, 95, seed)\n <tf.Tensor: shape=(2, 2, 3), dtype=uint8, numpy=\n array([[[ 0, 4, 5],\n [ 1, 5, 6]],\n [[ 5, 9, 10],\n [ 5, 9, 10]]], dtype=uint8)>\n\n Args:\n image: 3D image. Size of the last dimension must be 1 or 3.\n min_jpeg_quality: Minimum jpeg encoding quality to use.\n max_jpeg_quality: Maximum jpeg encoding quality to use.\n seed: A shape [2] Tensor, the seed to the random number generator. Must have\n dtype `int32` or `int64`. (When using XLA, only `int32` is allowed.)\n\n Returns:\n Adjusted image(s), same shape and DType as `image`.\n\n Raises:\n ValueError: if `min_jpeg_quality` or `max_jpeg_quality` is invalid.\n \"\"\"\n if (min_jpeg_quality < 0 or max_jpeg_quality < 0 or min_jpeg_quality > 100 or\n max_jpeg_quality > 100):\n raise ValueError('jpeg encoding range must be between 0 and 100.')\n\n if min_jpeg_quality >= max_jpeg_quality:\n raise ValueError('`min_jpeg_quality` must be less than `max_jpeg_quality`.')\n\n jpeg_quality = stateless_random_ops.stateless_random_uniform(\n shape=[], minval=min_jpeg_quality, maxval=max_jpeg_quality, seed=seed,\n dtype=dtypes.int32)\n return adjust_jpeg_quality(image, jpeg_quality)\n\n\n@tf_export('image.adjust_jpeg_quality')\[email protected]_dispatch_support\ndef adjust_jpeg_quality(image, jpeg_quality, name=None):\n \"\"\"Adjust jpeg encoding quality of an image.\n\n This is a convenience method that converts an image to uint8 representation,\n encodes it to jpeg with `jpeg_quality`, decodes it, and then converts back\n to the original data type.\n\n `jpeg_quality` must be in the interval `[0, 100]`.\n\n Usage Example:\n\n >>> x = [[[1.0, 2.0, 3.0],\n ... [4.0, 5.0, 6.0]],\n ... [[7.0, 8.0, 9.0],\n ... [10.0, 11.0, 12.0]]]\n >>> tf.image.adjust_jpeg_quality(x, 75)\n <tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy=\n array([[[1., 1., 1.],\n [1., 1., 1.]],\n [[1., 1., 1.],\n [1., 1., 1.]]], dtype=float32)>\n\n Args:\n image: 3D image. The size of the last dimension must be None, 1 or 3.\n jpeg_quality: Python int or Tensor of type int32. jpeg encoding quality.\n name: A name for this operation (optional).\n\n Returns:\n Adjusted image, same shape and DType as `image`.\n\n Raises:\n InvalidArgumentError: quality must be in [0,100]\n InvalidArgumentError: image must have 1 or 3 channels\n \"\"\"\n with ops.name_scope(name, 'adjust_jpeg_quality', [image]):\n image = ops.convert_to_tensor(image, name='image')\n channels = image.shape.as_list()[-1]\n # Remember original dtype to so we can convert back if needed\n orig_dtype = image.dtype\n image = convert_image_dtype(image, dtypes.uint8, saturate=True)\n if not _is_tensor(jpeg_quality):\n # If jpeg_quality is a int (not tensor).\n jpeg_quality = ops.convert_to_tensor(jpeg_quality, dtype=dtypes.int32)\n image = gen_image_ops.encode_jpeg_variable_quality(image, jpeg_quality)\n\n image = gen_image_ops.decode_jpeg(image, channels=channels)\n return convert_image_dtype(image, orig_dtype, saturate=True)\n\n\n@tf_export('image.random_saturation')\[email protected]_dispatch_support\ndef random_saturation(image, lower, upper, seed=None):\n \"\"\"Adjust the saturation of RGB images by a random factor.\n\n Equivalent to `adjust_saturation()` but uses a `saturation_factor` randomly\n picked in the interval `[lower, upper)`.\n\n Usage Example:\n\n >>> x = [[[1.0, 2.0, 3.0],\n ... [4.0, 5.0, 6.0]],\n ... [[7.0, 8.0, 9.0],\n ... [10.0, 11.0, 12.0]]]\n >>> tf.image.random_saturation(x, 5, 10)\n <tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy=\n array([[[ 0. , 1.5, 3. ],\n [ 0. , 3. , 6. ]],\n [[ 0. , 4.5, 9. ],\n [ 0. , 6. , 12. ]]], dtype=float32)>\n\n Args:\n image: RGB image or images. The size of the last dimension must be 3.\n lower: float. Lower bound for the random saturation factor.\n upper: float. Upper bound for the random saturation factor.\n seed: An operation-specific seed. It will be used in conjunction with the\n graph-level seed to determine the real seeds that will be used in this\n operation. Please see the documentation of set_random_seed for its\n interaction with the graph-level random seed.\n\n Returns:\n Adjusted image(s), same shape and DType as `image`.\n\n Raises:\n ValueError: if `upper <= lower` or if `lower < 0`.\n \"\"\"\n if upper <= lower:\n raise ValueError('upper must be > lower.')\n\n if lower < 0:\n raise ValueError('lower must be non-negative.')\n\n saturation_factor = random_ops.random_uniform([], lower, upper, seed=seed)\n return adjust_saturation(image, saturation_factor)\n\n\n@tf_export('image.stateless_random_saturation', v1=[])\[email protected]_dispatch_support\ndef stateless_random_saturation(image, lower, upper, seed=None):\n \"\"\"Adjust the saturation of RGB images by a random factor deterministically.\n\n Equivalent to `adjust_saturation()` but uses a `saturation_factor` randomly\n picked in the interval `[lower, upper)`.\n\n Guarantees the same results given the same `seed` independent of how many\n times the function is called, and independent of global seed settings (e.g.\n `tf.random.set_seed`).\n\n Usage Example:\n\n >>> x = [[[1.0, 2.0, 3.0],\n ... [4.0, 5.0, 6.0]],\n ... [[7.0, 8.0, 9.0],\n ... [10.0, 11.0, 12.0]]]\n >>> seed = (1, 2)\n >>> tf.image.stateless_random_saturation(x, 0.5, 1.0, seed)\n <tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy=\n array([[[ 1.1559395, 2.0779698, 3. ],\n [ 4.1559396, 5.07797 , 6. ]],\n [[ 7.1559396, 8.07797 , 9. ],\n [10.155939 , 11.07797 , 12. ]]], dtype=float32)>\n\n Args:\n image: RGB image or images. The size of the last dimension must be 3.\n lower: float. Lower bound for the random saturation factor.\n upper: float. Upper bound for the random saturation factor.\n seed: A shape [2] Tensor, the seed to the random number generator. Must have\n dtype `int32` or `int64`. (When using XLA, only `int32` is allowed.)\n\n Returns:\n Adjusted image(s), same shape and DType as `image`.\n\n Raises:\n ValueError: if `upper <= lower` or if `lower < 0`.\n \"\"\"\n if upper <= lower:\n raise ValueError('upper must be > lower.')\n\n if lower < 0:\n raise ValueError('lower must be non-negative.')\n\n saturation_factor = stateless_random_ops.stateless_random_uniform(\n shape=[], minval=lower, maxval=upper, seed=seed)\n return adjust_saturation(image, saturation_factor)\n\n\n@tf_export('image.adjust_saturation')\[email protected]_dispatch_support\ndef adjust_saturation(image, saturation_factor, name=None):\n \"\"\"Adjust saturation of RGB images.\n\n This is a convenience method that converts RGB images to float\n representation, converts them to HSV, adds an offset to the\n saturation channel, converts back to RGB and then back to the original\n data type. If several adjustments are chained it is advisable to minimize\n the number of redundant conversions.\n\n `image` is an RGB image or images. The image saturation is adjusted by\n converting the images to HSV and multiplying the saturation (S) channel by\n `saturation_factor` and clipping. The images are then converted back to RGB.\n\n Usage Example:\n\n >>> x = [[[1.0, 2.0, 3.0],\n ... [4.0, 5.0, 6.0]],\n ... [[7.0, 8.0, 9.0],\n ... [10.0, 11.0, 12.0]]]\n >>> tf.image.adjust_saturation(x, 0.5)\n <tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy=\n array([[[ 2. , 2.5, 3. ],\n [ 5. , 5.5, 6. ]],\n [[ 8. , 8.5, 9. ],\n [11. , 11.5, 12. ]]], dtype=float32)>\n\n Args:\n image: RGB image or images. The size of the last dimension must be 3.\n saturation_factor: float. Factor to multiply the saturation by.\n name: A name for this operation (optional).\n\n Returns:\n Adjusted image(s), same shape and DType as `image`.\n\n Raises:\n InvalidArgumentError: input must have 3 channels\n \"\"\"\n with ops.name_scope(name, 'adjust_saturation', [image]) as name:\n image = ops.convert_to_tensor(image, name='image')\n # Remember original dtype to so we can convert back if needed\n orig_dtype = image.dtype\n if orig_dtype in (dtypes.float16, dtypes.float32):\n flt_image = image\n else:\n flt_image = convert_image_dtype(image, dtypes.float32)\n\n adjusted = gen_image_ops.adjust_saturation(flt_image, saturation_factor)\n\n return convert_image_dtype(adjusted, orig_dtype)\n\n\n@tf_export('io.is_jpeg', 'image.is_jpeg', v1=['io.is_jpeg', 'image.is_jpeg'])\ndef is_jpeg(contents, name=None):\n r\"\"\"Convenience function to check if the 'contents' encodes a JPEG image.\n\n Args:\n contents: 0-D `string`. The encoded image bytes.\n name: A name for the operation (optional)\n\n Returns:\n A scalar boolean tensor indicating if 'contents' may be a JPEG image.\n is_jpeg is susceptible to false positives.\n \"\"\"\n # Normal JPEGs start with \\xff\\xd8\\xff\\xe0\n # JPEG with EXIF starts with \\xff\\xd8\\xff\\xe1\n # Use \\xff\\xd8\\xff to cover both.\n with ops.name_scope(name, 'is_jpeg'):\n substr = string_ops.substr(contents, 0, 3)\n return math_ops.equal(substr, b'\\xff\\xd8\\xff', name=name)\n\n\ndef _is_png(contents, name=None):\n r\"\"\"Convenience function to check if the 'contents' encodes a PNG image.\n\n Args:\n contents: 0-D `string`. The encoded image bytes.\n name: A name for the operation (optional)\n\n Returns:\n A scalar boolean tensor indicating if 'contents' may be a PNG image.\n is_png is susceptible to false positives.\n \"\"\"\n with ops.name_scope(name, 'is_png'):\n substr = string_ops.substr(contents, 0, 3)\n return math_ops.equal(substr, b'\\211PN', name=name)\n\n\ntf_export(\n 'io.decode_and_crop_jpeg',\n 'image.decode_and_crop_jpeg',\n v1=['io.decode_and_crop_jpeg', 'image.decode_and_crop_jpeg'])(\n dispatch.add_dispatch_support(gen_image_ops.decode_and_crop_jpeg))\n\ntf_export(\n 'io.decode_bmp',\n 'image.decode_bmp',\n v1=['io.decode_bmp', 'image.decode_bmp'])(\n dispatch.add_dispatch_support(gen_image_ops.decode_bmp))\ntf_export(\n 'io.decode_gif',\n 'image.decode_gif',\n v1=['io.decode_gif', 'image.decode_gif'])(\n dispatch.add_dispatch_support(gen_image_ops.decode_gif))\ntf_export(\n 'io.decode_jpeg',\n 'image.decode_jpeg',\n v1=['io.decode_jpeg', 'image.decode_jpeg'])(\n dispatch.add_dispatch_support(gen_image_ops.decode_jpeg))\ntf_export(\n 'io.decode_png',\n 'image.decode_png',\n v1=['io.decode_png', 'image.decode_png'])(\n dispatch.add_dispatch_support(gen_image_ops.decode_png))\n\ntf_export(\n 'io.encode_jpeg',\n 'image.encode_jpeg',\n v1=['io.encode_jpeg', 'image.encode_jpeg'])(\n dispatch.add_dispatch_support(gen_image_ops.encode_jpeg))\ntf_export(\n 'io.extract_jpeg_shape',\n 'image.extract_jpeg_shape',\n v1=['io.extract_jpeg_shape', 'image.extract_jpeg_shape'])(\n dispatch.add_dispatch_support(gen_image_ops.extract_jpeg_shape))\n\n\n@tf_export('io.encode_png', 'image.encode_png')\[email protected]_dispatch_support\ndef encode_png(image, compression=-1, name=None):\n r\"\"\"PNG-encode an image.\n\n `image` is a 3-D uint8 or uint16 Tensor of shape `[height, width, channels]`\n where `channels` is:\n\n * 1: for grayscale.\n * 2: for grayscale + alpha.\n * 3: for RGB.\n * 4: for RGBA.\n\n The ZLIB compression level, `compression`, can be -1 for the PNG-encoder\n default or a value from 0 to 9. 9 is the highest compression level,\n generating the smallest output, but is slower.\n\n Args:\n image: A `Tensor`. Must be one of the following types: `uint8`, `uint16`.\n 3-D with shape `[height, width, channels]`.\n compression: An optional `int`. Defaults to `-1`. Compression level.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `string`.\n \"\"\"\n return gen_image_ops.encode_png(\n ops.convert_to_tensor(image), compression, name)\n\n\n@tf_export(\n 'io.decode_image',\n 'image.decode_image',\n v1=['io.decode_image', 'image.decode_image'])\[email protected]_dispatch_support\ndef decode_image(contents,\n channels=None,\n dtype=dtypes.uint8,\n name=None,\n expand_animations=True):\n \"\"\"Function for `decode_bmp`, `decode_gif`, `decode_jpeg`, and `decode_png`.\n\n Detects whether an image is a BMP, GIF, JPEG, or PNG, and performs the\n appropriate operation to convert the input bytes `string` into a `Tensor`\n of type `dtype`.\n\n Note: `decode_gif` returns a 4-D array `[num_frames, height, width, 3]`, as\n opposed to `decode_bmp`, `decode_jpeg` and `decode_png`, which return 3-D\n arrays `[height, width, num_channels]`. Make sure to take this into account\n when constructing your graph if you are intermixing GIF files with BMP, JPEG,\n and/or PNG files. Alternately, set the `expand_animations` argument of this\n function to `False`, in which case the op will return 3-dimensional tensors\n and will truncate animated GIF files to the first frame.\n\n Args:\n contents: 0-D `string`. The encoded image bytes.\n channels: An optional `int`. Defaults to `0`. Number of color channels for\n the decoded image.\n dtype: The desired DType of the returned `Tensor`.\n name: A name for the operation (optional)\n expand_animations: Controls the shape of the returned op's output. If\n `True`, the returned op will produce a 3-D tensor for PNG, JPEG, and BMP\n files; and a 4-D tensor for all GIFs, whether animated or not. If,\n `False`, the returned op will produce a 3-D tensor for all file types and\n will truncate animated GIFs to the first frame.\n\n Returns:\n `Tensor` with type `dtype` and a 3- or 4-dimensional shape, depending on\n the file type and the value of the `expand_animations` parameter.\n\n Raises:\n ValueError: On incorrect number of channels.\n \"\"\"\n with ops.name_scope(name, 'decode_image'):\n if compat.forward_compatible(2020, 8, 14):\n channels = 0 if channels is None else channels\n if dtype not in [dtypes.float32, dtypes.uint8, dtypes.uint16]:\n dest_dtype = dtype\n dtype = dtypes.uint16\n return convert_image_dtype(gen_image_ops.decode_image(\n contents=contents,\n channels=channels,\n expand_animations=expand_animations,\n dtype=dtype), dest_dtype)\n else:\n return gen_image_ops.decode_image(\n contents=contents,\n channels=channels,\n expand_animations=expand_animations,\n dtype=dtype)\n\n if channels not in (None, 0, 1, 3, 4):\n raise ValueError('channels must be in (None, 0, 1, 3, 4)')\n substr = string_ops.substr(contents, 0, 3)\n\n def _bmp():\n \"\"\"Decodes a BMP image.\"\"\"\n signature = string_ops.substr(contents, 0, 2)\n # Create assert op to check that bytes are BMP decodable\n is_bmp = math_ops.equal(signature, 'BM', name='is_bmp')\n decode_msg = 'Unable to decode bytes as JPEG, PNG, GIF, or BMP'\n assert_decode = control_flow_ops.Assert(is_bmp, [decode_msg])\n bmp_channels = 0 if channels is None else channels\n good_channels = math_ops.not_equal(bmp_channels, 1, name='check_channels')\n channels_msg = ('Channels must be in (None, 0, 3, 4) when decoding BMP '\n 'images')\n assert_channels = control_flow_ops.Assert(good_channels, [channels_msg])\n with ops.control_dependencies([assert_decode, assert_channels]):\n return convert_image_dtype(\n gen_image_ops.decode_bmp(contents, channels=bmp_channels), dtype)\n\n def _gif():\n \"\"\"Decodes a GIF image.\"\"\"\n # Create assert to make sure that channels is not set to 1\n # Already checked above that channels is in (None, 0, 1, 3)\n gif_channels = 0 if channels is None else channels\n good_channels = math_ops.logical_and(\n math_ops.not_equal(gif_channels, 1, name='check_gif_channels'),\n math_ops.not_equal(gif_channels, 4, name='check_gif_channels'))\n channels_msg = 'Channels must be in (None, 0, 3) when decoding GIF images'\n assert_channels = control_flow_ops.Assert(good_channels, [channels_msg])\n with ops.control_dependencies([assert_channels]):\n result = convert_image_dtype(gen_image_ops.decode_gif(contents), dtype)\n if not expand_animations:\n # For now we decode animated GIFs fully and toss out all but the\n # first frame when expand_animations is False\n result = array_ops.gather(result, 0)\n return result\n\n def check_gif():\n # Create assert op to check that bytes are GIF decodable\n is_gif = math_ops.equal(substr, b'\\x47\\x49\\x46', name='is_gif')\n return control_flow_ops.cond(is_gif, _gif, _bmp, name='cond_gif')\n\n def _png():\n \"\"\"Decodes a PNG image.\"\"\"\n return convert_image_dtype(\n gen_image_ops.decode_png(\n contents,\n channels,\n dtype=dtypes.uint8 if dtype == dtypes.uint8 else dtypes.uint16),\n dtype)\n\n def check_png():\n \"\"\"Checks if an image is PNG.\"\"\"\n return control_flow_ops.cond(\n _is_png(contents), _png, check_gif, name='cond_png')\n\n def _jpeg():\n \"\"\"Decodes a jpeg image.\"\"\"\n jpeg_channels = 0 if channels is None else channels\n good_channels = math_ops.not_equal(\n jpeg_channels, 4, name='check_jpeg_channels')\n channels_msg = ('Channels must be in (None, 0, 1, 3) when decoding JPEG '\n 'images')\n assert_channels = control_flow_ops.Assert(good_channels, [channels_msg])\n with ops.control_dependencies([assert_channels]):\n return convert_image_dtype(\n gen_image_ops.decode_jpeg(contents, channels), dtype)\n\n # Decode normal JPEG images (start with \\xff\\xd8\\xff\\xe0)\n # as well as JPEG images with EXIF data (start with \\xff\\xd8\\xff\\xe1).\n return control_flow_ops.cond(\n is_jpeg(contents), _jpeg, check_png, name='cond_jpeg')\n\n\n@tf_export('image.total_variation')\[email protected]_dispatch_support\ndef total_variation(images, name=None):\n \"\"\"Calculate and return the total variation for one or more images.\n\n The total variation is the sum of the absolute differences for neighboring\n pixel-values in the input images. This measures how much noise is in the\n images.\n\n This can be used as a loss-function during optimization so as to suppress\n noise in images. If you have a batch of images, then you should calculate\n the scalar loss-value as the sum:\n `loss = tf.reduce_sum(tf.image.total_variation(images))`\n\n This implements the anisotropic 2-D version of the formula described here:\n\n https://en.wikipedia.org/wiki/Total_variation_denoising\n\n Args:\n images: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor\n of shape `[height, width, channels]`.\n name: A name for the operation (optional).\n\n Raises:\n ValueError: if images.shape is not a 3-D or 4-D vector.\n\n Returns:\n The total variation of `images`.\n\n If `images` was 4-D, return a 1-D float Tensor of shape `[batch]` with the\n total variation for each image in the batch.\n If `images` was 3-D, return a scalar float with the total variation for\n that image.\n \"\"\"\n\n with ops.name_scope(name, 'total_variation'):\n ndims = images.get_shape().ndims\n\n if ndims == 3:\n # The input is a single image with shape [height, width, channels].\n\n # Calculate the difference of neighboring pixel-values.\n # The images are shifted one pixel along the height and width by slicing.\n pixel_dif1 = images[1:, :, :] - images[:-1, :, :]\n pixel_dif2 = images[:, 1:, :] - images[:, :-1, :]\n\n # Sum for all axis. (None is an alias for all axis.)\n sum_axis = None\n elif ndims == 4:\n # The input is a batch of images with shape:\n # [batch, height, width, channels].\n\n # Calculate the difference of neighboring pixel-values.\n # The images are shifted one pixel along the height and width by slicing.\n pixel_dif1 = images[:, 1:, :, :] - images[:, :-1, :, :]\n pixel_dif2 = images[:, :, 1:, :] - images[:, :, :-1, :]\n\n # Only sum for the last 3 axis.\n # This results in a 1-D tensor with the total variation for each image.\n sum_axis = [1, 2, 3]\n else:\n raise ValueError('\\'images\\' must be either 3 or 4-dimensional.')\n\n # Calculate the total variation by taking the absolute value of the\n # pixel-differences and summing over the appropriate axis.\n tot_var = (\n math_ops.reduce_sum(math_ops.abs(pixel_dif1), axis=sum_axis) +\n math_ops.reduce_sum(math_ops.abs(pixel_dif2), axis=sum_axis))\n\n return tot_var\n\n\n@tf_export('image.sample_distorted_bounding_box', v1=[])\[email protected]_dispatch_support\ndef sample_distorted_bounding_box_v2(image_size,\n bounding_boxes,\n seed=0,\n min_object_covered=0.1,\n aspect_ratio_range=None,\n area_range=None,\n max_attempts=None,\n use_image_if_no_bounding_boxes=None,\n name=None):\n \"\"\"Generate a single randomly distorted bounding box for an image.\n\n Bounding box annotations are often supplied in addition to ground-truth labels\n in image recognition or object localization tasks. A common technique for\n training such a system is to randomly distort an image while preserving\n its content, i.e. *data augmentation*. This Op outputs a randomly distorted\n localization of an object, i.e. bounding box, given an `image_size`,\n `bounding_boxes` and a series of constraints.\n\n The output of this Op is a single bounding box that may be used to crop the\n original image. The output is returned as 3 tensors: `begin`, `size` and\n `bboxes`. The first 2 tensors can be fed directly into `tf.slice` to crop the\n image. The latter may be supplied to `tf.image.draw_bounding_boxes` to\n visualize what the bounding box looks like.\n\n Bounding boxes are supplied and returned as `[y_min, x_min, y_max, x_max]`.\n The bounding box coordinates are floats in `[0.0, 1.0]` relative to the width\n and the height of the underlying image.\n\n For example,\n\n ```python\n # Generate a single distorted bounding box.\n begin, size, bbox_for_draw = tf.image.sample_distorted_bounding_box(\n tf.shape(image),\n bounding_boxes=bounding_boxes,\n min_object_covered=0.1)\n\n # Draw the bounding box in an image summary.\n image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0),\n bbox_for_draw)\n tf.compat.v1.summary.image('images_with_box', image_with_box)\n\n # Employ the bounding box to distort the image.\n distorted_image = tf.slice(image, begin, size)\n ```\n\n Note that if no bounding box information is available, setting\n `use_image_if_no_bounding_boxes = true` will assume there is a single implicit\n bounding box covering the whole image. If `use_image_if_no_bounding_boxes` is\n false and no bounding boxes are supplied, an error is raised.\n\n Args:\n image_size: A `Tensor`. Must be one of the following types: `uint8`, `int8`,\n `int16`, `int32`, `int64`. 1-D, containing `[height, width, channels]`.\n bounding_boxes: A `Tensor` of type `float32`. 3-D with shape `[batch, N, 4]`\n describing the N bounding boxes associated with the image.\n seed: An optional `int`. Defaults to `0`. If `seed` is set to non-zero, the\n random number generator is seeded by the given `seed`. Otherwise, it is\n seeded by a random seed.\n min_object_covered: A Tensor of type `float32`. Defaults to `0.1`. The\n cropped area of the image must contain at least this fraction of any\n bounding box supplied. The value of this parameter should be non-negative.\n In the case of 0, the cropped area does not need to overlap any of the\n bounding boxes supplied.\n aspect_ratio_range: An optional list of `floats`. Defaults to `[0.75,\n 1.33]`. The cropped area of the image must have an aspect `ratio = width /\n height` within this range.\n area_range: An optional list of `floats`. Defaults to `[0.05, 1]`. The\n cropped area of the image must contain a fraction of the supplied image\n within this range.\n max_attempts: An optional `int`. Defaults to `100`. Number of attempts at\n generating a cropped region of the image of the specified constraints.\n After `max_attempts` failures, return the entire image.\n use_image_if_no_bounding_boxes: An optional `bool`. Defaults to `False`.\n Controls behavior if no bounding boxes supplied. If true, assume an\n implicit bounding box covering the whole input. If false, raise an error.\n name: A name for the operation (optional).\n\n Returns:\n A tuple of `Tensor` objects (begin, size, bboxes).\n\n begin: A `Tensor`. Has the same type as `image_size`. 1-D, containing\n `[offset_height, offset_width, 0]`. Provide as input to\n `tf.slice`.\n size: A `Tensor`. Has the same type as `image_size`. 1-D, containing\n `[target_height, target_width, -1]`. Provide as input to\n `tf.slice`.\n bboxes: A `Tensor` of type `float32`. 3-D with shape `[1, 1, 4]` containing\n the distorted bounding box.\n Provide as input to `tf.image.draw_bounding_boxes`.\n \"\"\"\n seed1, seed2 = random_seed.get_seed(seed) if seed else (0, 0)\n with ops.name_scope(name, 'sample_distorted_bounding_box'):\n return gen_image_ops.sample_distorted_bounding_box_v2(\n image_size,\n bounding_boxes,\n seed=seed1,\n seed2=seed2,\n min_object_covered=min_object_covered,\n aspect_ratio_range=aspect_ratio_range,\n area_range=area_range,\n max_attempts=max_attempts,\n use_image_if_no_bounding_boxes=use_image_if_no_bounding_boxes,\n name=name)\n\n\n@tf_export('image.stateless_sample_distorted_bounding_box', v1=[])\[email protected]_dispatch_support\ndef stateless_sample_distorted_bounding_box(image_size,\n bounding_boxes,\n seed,\n min_object_covered=0.1,\n aspect_ratio_range=None,\n area_range=None,\n max_attempts=None,\n use_image_if_no_bounding_boxes=None,\n name=None):\n \"\"\"Generate a randomly distorted bounding box for an image deterministically.\n\n Bounding box annotations are often supplied in addition to ground-truth labels\n in image recognition or object localization tasks. A common technique for\n training such a system is to randomly distort an image while preserving\n its content, i.e. *data augmentation*. This Op, given the same `seed`,\n deterministically outputs a randomly distorted localization of an object, i.e.\n bounding box, given an `image_size`, `bounding_boxes` and a series of\n constraints.\n\n The output of this Op is a single bounding box that may be used to crop the\n original image. The output is returned as 3 tensors: `begin`, `size` and\n `bboxes`. The first 2 tensors can be fed directly into `tf.slice` to crop the\n image. The latter may be supplied to `tf.image.draw_bounding_boxes` to\n visualize what the bounding box looks like.\n\n Bounding boxes are supplied and returned as `[y_min, x_min, y_max, x_max]`.\n The bounding box coordinates are floats in `[0.0, 1.0]` relative to the width\n and the height of the underlying image.\n\n The output of this Op is guaranteed to be the same given the same `seed` and\n is independent of how many times the function is called, and independent of\n global seed settings (e.g. `tf.random.set_seed`).\n\n Example usage:\n\n >>> image = np.array([[[1], [2], [3]], [[4], [5], [6]], [[7], [8], [9]]])\n >>> bbox = tf.constant(\n ... [0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4])\n >>> seed = (1, 2)\n >>> # Generate a single distorted bounding box.\n >>> bbox_begin, bbox_size, bbox_draw = (\n ... tf.image.stateless_sample_distorted_bounding_box(\n ... tf.shape(image), bounding_boxes=bbox, seed=seed))\n >>> # Employ the bounding box to distort the image.\n >>> tf.slice(image, bbox_begin, bbox_size)\n <tf.Tensor: shape=(2, 2, 1), dtype=int64, numpy=\n array([[[1],\n [2]],\n [[4],\n [5]]])>\n >>> # Draw the bounding box in an image summary.\n >>> colors = np.array([[1.0, 0.0, 0.0], [0.0, 0.0, 1.0]])\n >>> tf.image.draw_bounding_boxes(\n ... tf.expand_dims(tf.cast(image, tf.float32),0), bbox_draw, colors)\n <tf.Tensor: shape=(1, 3, 3, 1), dtype=float32, numpy=\n array([[[[1.],\n [1.],\n [3.]],\n [[1.],\n [1.],\n [6.]],\n [[7.],\n [8.],\n [9.]]]], dtype=float32)>\n\n Note that if no bounding box information is available, setting\n `use_image_if_no_bounding_boxes = true` will assume there is a single implicit\n bounding box covering the whole image. If `use_image_if_no_bounding_boxes` is\n false and no bounding boxes are supplied, an error is raised.\n\n Args:\n image_size: A `Tensor`. Must be one of the following types: `uint8`, `int8`,\n `int16`, `int32`, `int64`. 1-D, containing `[height, width, channels]`.\n bounding_boxes: A `Tensor` of type `float32`. 3-D with shape `[batch, N, 4]`\n describing the N bounding boxes associated with the image.\n seed: A shape [2] Tensor, the seed to the random number generator. Must have\n dtype `int32` or `int64`. (When using XLA, only `int32` is allowed.)\n min_object_covered: A Tensor of type `float32`. Defaults to `0.1`. The\n cropped area of the image must contain at least this fraction of any\n bounding box supplied. The value of this parameter should be non-negative.\n In the case of 0, the cropped area does not need to overlap any of the\n bounding boxes supplied.\n aspect_ratio_range: An optional list of `floats`. Defaults to `[0.75,\n 1.33]`. The cropped area of the image must have an aspect `ratio = width /\n height` within this range.\n area_range: An optional list of `floats`. Defaults to `[0.05, 1]`. The\n cropped area of the image must contain a fraction of the supplied image\n within this range.\n max_attempts: An optional `int`. Defaults to `100`. Number of attempts at\n generating a cropped region of the image of the specified constraints.\n After `max_attempts` failures, return the entire image.\n use_image_if_no_bounding_boxes: An optional `bool`. Defaults to `False`.\n Controls behavior if no bounding boxes supplied. If true, assume an\n implicit bounding box covering the whole input. If false, raise an error.\n name: A name for the operation (optional).\n\n Returns:\n A tuple of `Tensor` objects (begin, size, bboxes).\n\n begin: A `Tensor`. Has the same type as `image_size`. 1-D, containing\n `[offset_height, offset_width, 0]`. Provide as input to\n `tf.slice`.\n size: A `Tensor`. Has the same type as `image_size`. 1-D, containing\n `[target_height, target_width, -1]`. Provide as input to\n `tf.slice`.\n bboxes: A `Tensor` of type `float32`. 3-D with shape `[1, 1, 4]` containing\n the distorted bounding box.\n Provide as input to `tf.image.draw_bounding_boxes`.\n \"\"\"\n with ops.name_scope(name, 'stateless_sample_distorted_bounding_box'):\n return gen_image_ops.stateless_sample_distorted_bounding_box(\n image_size=image_size,\n bounding_boxes=bounding_boxes,\n seed=seed,\n min_object_covered=min_object_covered,\n aspect_ratio_range=aspect_ratio_range,\n area_range=area_range,\n max_attempts=max_attempts,\n use_image_if_no_bounding_boxes=use_image_if_no_bounding_boxes,\n name=name)\n\n\n@tf_export(v1=['image.sample_distorted_bounding_box'])\[email protected]_dispatch_support\[email protected](\n date=None,\n instructions='`seed2` arg is deprecated.'\n 'Use sample_distorted_bounding_box_v2 instead.')\ndef sample_distorted_bounding_box(image_size,\n bounding_boxes,\n seed=None,\n seed2=None,\n min_object_covered=0.1,\n aspect_ratio_range=None,\n area_range=None,\n max_attempts=None,\n use_image_if_no_bounding_boxes=None,\n name=None):\n \"\"\"Generate a single randomly distorted bounding box for an image.\n\n Bounding box annotations are often supplied in addition to ground-truth labels\n in image recognition or object localization tasks. A common technique for\n training such a system is to randomly distort an image while preserving\n its content, i.e. *data augmentation*. This Op outputs a randomly distorted\n localization of an object, i.e. bounding box, given an `image_size`,\n `bounding_boxes` and a series of constraints.\n\n The output of this Op is a single bounding box that may be used to crop the\n original image. The output is returned as 3 tensors: `begin`, `size` and\n `bboxes`. The first 2 tensors can be fed directly into `tf.slice` to crop the\n image. The latter may be supplied to `tf.image.draw_bounding_boxes` to\n visualize what the bounding box looks like.\n\n Bounding boxes are supplied and returned as `[y_min, x_min, y_max, x_max]`.\n The\n bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and\n height of the underlying image.\n\n For example,\n\n ```python\n # Generate a single distorted bounding box.\n begin, size, bbox_for_draw = tf.image.sample_distorted_bounding_box(\n tf.shape(image),\n bounding_boxes=bounding_boxes,\n min_object_covered=0.1)\n\n # Draw the bounding box in an image summary.\n image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0),\n bbox_for_draw)\n tf.compat.v1.summary.image('images_with_box', image_with_box)\n\n # Employ the bounding box to distort the image.\n distorted_image = tf.slice(image, begin, size)\n ```\n\n Note that if no bounding box information is available, setting\n `use_image_if_no_bounding_boxes = True` will assume there is a single implicit\n bounding box covering the whole image. If `use_image_if_no_bounding_boxes` is\n false and no bounding boxes are supplied, an error is raised.\n\n Args:\n image_size: A `Tensor`. Must be one of the following types: `uint8`, `int8`,\n `int16`, `int32`, `int64`. 1-D, containing `[height, width, channels]`.\n bounding_boxes: A `Tensor` of type `float32`. 3-D with shape `[batch, N, 4]`\n describing the N bounding boxes associated with the image.\n seed: An optional `int`. Defaults to `0`. If either `seed` or `seed2` are\n set to non-zero, the random number generator is seeded by the given\n `seed`. Otherwise, it is seeded by a random seed.\n seed2: An optional `int`. Defaults to `0`. A second seed to avoid seed\n collision.\n min_object_covered: A Tensor of type `float32`. Defaults to `0.1`. The\n cropped area of the image must contain at least this fraction of any\n bounding box supplied. The value of this parameter should be non-negative.\n In the case of 0, the cropped area does not need to overlap any of the\n bounding boxes supplied.\n aspect_ratio_range: An optional list of `floats`. Defaults to `[0.75,\n 1.33]`. The cropped area of the image must have an aspect ratio = width /\n height within this range.\n area_range: An optional list of `floats`. Defaults to `[0.05, 1]`. The\n cropped area of the image must contain a fraction of the supplied image\n within this range.\n max_attempts: An optional `int`. Defaults to `100`. Number of attempts at\n generating a cropped region of the image of the specified constraints.\n After `max_attempts` failures, return the entire image.\n use_image_if_no_bounding_boxes: An optional `bool`. Defaults to `False`.\n Controls behavior if no bounding boxes supplied. If true, assume an\n implicit bounding box covering the whole input. If false, raise an error.\n name: A name for the operation (optional).\n\n Returns:\n A tuple of `Tensor` objects (begin, size, bboxes).\n\n begin: A `Tensor`. Has the same type as `image_size`. 1-D, containing\n `[offset_height, offset_width, 0]`. Provide as input to\n `tf.slice`.\n size: A `Tensor`. Has the same type as `image_size`. 1-D, containing\n `[target_height, target_width, -1]`. Provide as input to\n `tf.slice`.\n bboxes: A `Tensor` of type `float32`. 3-D with shape `[1, 1, 4]` containing\n the distorted bounding box.\n Provide as input to `tf.image.draw_bounding_boxes`.\n \"\"\"\n with ops.name_scope(name, 'sample_distorted_bounding_box'):\n return gen_image_ops.sample_distorted_bounding_box_v2(\n image_size,\n bounding_boxes,\n seed=seed,\n seed2=seed2,\n min_object_covered=min_object_covered,\n aspect_ratio_range=aspect_ratio_range,\n area_range=area_range,\n max_attempts=max_attempts,\n use_image_if_no_bounding_boxes=use_image_if_no_bounding_boxes,\n name=name)\n\n\n@tf_export('image.non_max_suppression')\[email protected]_dispatch_support\ndef non_max_suppression(boxes,\n scores,\n max_output_size,\n iou_threshold=0.5,\n score_threshold=float('-inf'),\n name=None):\n \"\"\"Greedily selects a subset of bounding boxes in descending order of score.\n\n Prunes away boxes that have high intersection-over-union (IOU) overlap\n with previously selected boxes. Bounding boxes are supplied as\n `[y1, x1, y2, x2]`, where `(y1, x1)` and `(y2, x2)` are the coordinates of any\n diagonal pair of box corners and the coordinates can be provided as normalized\n (i.e., lying in the interval `[0, 1]`) or absolute. Note that this algorithm\n is agnostic to where the origin is in the coordinate system. Note that this\n algorithm is invariant to orthogonal transformations and translations\n of the coordinate system; thus translating or reflections of the coordinate\n system result in the same boxes being selected by the algorithm.\n The output of this operation is a set of integers indexing into the input\n collection of bounding boxes representing the selected boxes. The bounding\n box coordinates corresponding to the selected indices can then be obtained\n using the `tf.gather` operation. For example:\n ```python\n selected_indices = tf.image.non_max_suppression(\n boxes, scores, max_output_size, iou_threshold)\n selected_boxes = tf.gather(boxes, selected_indices)\n ```\n\n Args:\n boxes: A 2-D float `Tensor` of shape `[num_boxes, 4]`.\n scores: A 1-D float `Tensor` of shape `[num_boxes]` representing a single\n score corresponding to each box (each row of boxes).\n max_output_size: A scalar integer `Tensor` representing the maximum number\n of boxes to be selected by non-max suppression.\n iou_threshold: A float representing the threshold for deciding whether boxes\n overlap too much with respect to IOU.\n score_threshold: A float representing the threshold for deciding when to\n remove boxes based on score.\n name: A name for the operation (optional).\n\n Returns:\n selected_indices: A 1-D integer `Tensor` of shape `[M]` representing the\n selected indices from the boxes tensor, where `M <= max_output_size`.\n \"\"\"\n with ops.name_scope(name, 'non_max_suppression'):\n iou_threshold = ops.convert_to_tensor(iou_threshold, name='iou_threshold')\n score_threshold = ops.convert_to_tensor(\n score_threshold, name='score_threshold')\n return gen_image_ops.non_max_suppression_v3(boxes, scores, max_output_size,\n iou_threshold, score_threshold)\n\n\n@tf_export('image.non_max_suppression_with_scores')\[email protected]_dispatch_support\ndef non_max_suppression_with_scores(boxes,\n scores,\n max_output_size,\n iou_threshold=0.5,\n score_threshold=float('-inf'),\n soft_nms_sigma=0.0,\n name=None):\n \"\"\"Greedily selects a subset of bounding boxes in descending order of score.\n\n Prunes away boxes that have high intersection-over-union (IOU) overlap\n with previously selected boxes. Bounding boxes are supplied as\n `[y1, x1, y2, x2]`, where `(y1, x1)` and `(y2, x2)` are the coordinates of any\n diagonal pair of box corners and the coordinates can be provided as normalized\n (i.e., lying in the interval `[0, 1]`) or absolute. Note that this algorithm\n is agnostic to where the origin is in the coordinate system. Note that this\n algorithm is invariant to orthogonal transformations and translations\n of the coordinate system; thus translating or reflections of the coordinate\n system result in the same boxes being selected by the algorithm.\n The output of this operation is a set of integers indexing into the input\n collection of bounding boxes representing the selected boxes. The bounding\n box coordinates corresponding to the selected indices can then be obtained\n using the `tf.gather` operation. For example:\n ```python\n selected_indices, selected_scores = tf.image.non_max_suppression_padded(\n boxes, scores, max_output_size, iou_threshold=1.0, score_threshold=0.1,\n soft_nms_sigma=0.5)\n selected_boxes = tf.gather(boxes, selected_indices)\n ```\n\n This function generalizes the `tf.image.non_max_suppression` op by also\n supporting a Soft-NMS (with Gaussian weighting) mode (c.f.\n Bodla et al, https://arxiv.org/abs/1704.04503) where boxes reduce the score\n of other overlapping boxes instead of directly causing them to be pruned.\n Consequently, in contrast to `tf.image.non_max_suppression`,\n `tf.image.non_max_suppression_padded` returns the new scores of each input box\n in the second output, `selected_scores`.\n\n To enable this Soft-NMS mode, set the `soft_nms_sigma` parameter to be\n larger than 0. When `soft_nms_sigma` equals 0, the behavior of\n `tf.image.non_max_suppression_padded` is identical to that of\n `tf.image.non_max_suppression` (except for the extra output) both in function\n and in running time.\n\n Args:\n boxes: A 2-D float `Tensor` of shape `[num_boxes, 4]`.\n scores: A 1-D float `Tensor` of shape `[num_boxes]` representing a single\n score corresponding to each box (each row of boxes).\n max_output_size: A scalar integer `Tensor` representing the maximum number\n of boxes to be selected by non-max suppression.\n iou_threshold: A float representing the threshold for deciding whether boxes\n overlap too much with respect to IOU.\n score_threshold: A float representing the threshold for deciding when to\n remove boxes based on score.\n soft_nms_sigma: A scalar float representing the Soft NMS sigma parameter;\n See Bodla et al, https://arxiv.org/abs/1704.04503). When\n `soft_nms_sigma=0.0` (which is default), we fall back to standard (hard)\n NMS.\n name: A name for the operation (optional).\n\n Returns:\n selected_indices: A 1-D integer `Tensor` of shape `[M]` representing the\n selected indices from the boxes tensor, where `M <= max_output_size`.\n selected_scores: A 1-D float tensor of shape `[M]` representing the\n corresponding scores for each selected box, where `M <= max_output_size`.\n Scores only differ from corresponding input scores when using Soft NMS\n (i.e. when `soft_nms_sigma>0`)\n \"\"\"\n with ops.name_scope(name, 'non_max_suppression_with_scores'):\n iou_threshold = ops.convert_to_tensor(iou_threshold, name='iou_threshold')\n score_threshold = ops.convert_to_tensor(\n score_threshold, name='score_threshold')\n soft_nms_sigma = ops.convert_to_tensor(\n soft_nms_sigma, name='soft_nms_sigma')\n (selected_indices, selected_scores,\n _) = gen_image_ops.non_max_suppression_v5(\n boxes,\n scores,\n max_output_size,\n iou_threshold,\n score_threshold,\n soft_nms_sigma,\n pad_to_max_output_size=False)\n return selected_indices, selected_scores\n\n\n@tf_export('image.non_max_suppression_overlaps')\[email protected]_dispatch_support\ndef non_max_suppression_with_overlaps(overlaps,\n scores,\n max_output_size,\n overlap_threshold=0.5,\n score_threshold=float('-inf'),\n name=None):\n \"\"\"Greedily selects a subset of bounding boxes in descending order of score.\n\n Prunes away boxes that have high overlap with previously selected boxes.\n N-by-n overlap values are supplied as square matrix.\n The output of this operation is a set of integers indexing into the input\n collection of bounding boxes representing the selected boxes. The bounding\n box coordinates corresponding to the selected indices can then be obtained\n using the `tf.gather` operation. For example:\n ```python\n selected_indices = tf.image.non_max_suppression_overlaps(\n overlaps, scores, max_output_size, iou_threshold)\n selected_boxes = tf.gather(boxes, selected_indices)\n ```\n\n Args:\n overlaps: A 2-D float `Tensor` of shape `[num_boxes, num_boxes]`.\n scores: A 1-D float `Tensor` of shape `[num_boxes]` representing a single\n score corresponding to each box (each row of boxes).\n max_output_size: A scalar integer `Tensor` representing the maximum number\n of boxes to be selected by non-max suppression.\n overlap_threshold: A float representing the threshold for deciding whether\n boxes overlap too much with respect to the provided overlap values.\n score_threshold: A float representing the threshold for deciding when to\n remove boxes based on score.\n name: A name for the operation (optional).\n\n Returns:\n selected_indices: A 1-D integer `Tensor` of shape `[M]` representing the\n selected indices from the overlaps tensor, where `M <= max_output_size`.\n \"\"\"\n with ops.name_scope(name, 'non_max_suppression_overlaps'):\n overlap_threshold = ops.convert_to_tensor(\n overlap_threshold, name='overlap_threshold')\n # pylint: disable=protected-access\n return gen_image_ops.non_max_suppression_with_overlaps(\n overlaps, scores, max_output_size, overlap_threshold, score_threshold)\n # pylint: enable=protected-access\n\n\n_rgb_to_yiq_kernel = [[0.299, 0.59590059, 0.2115],\n [0.587, -0.27455667, -0.52273617],\n [0.114, -0.32134392, 0.31119955]]\n\n\n@tf_export('image.rgb_to_yiq')\[email protected]_dispatch_support\ndef rgb_to_yiq(images):\n \"\"\"Converts one or more images from RGB to YIQ.\n\n Outputs a tensor of the same shape as the `images` tensor, containing the YIQ\n value of the pixels.\n The output is only well defined if the value in images are in [0,1].\n\n Usage Example:\n\n >>> x = tf.constant([[[1.0, 2.0, 3.0]]])\n >>> tf.image.rgb_to_yiq(x)\n <tf.Tensor: shape=(1, 1, 3), dtype=float32,\n numpy=array([[[ 1.815 , -0.91724455, 0.09962624]]], dtype=float32)>\n\n Args:\n images: 2-D or higher rank. Image data to convert. Last dimension must be\n size 3.\n\n Returns:\n images: tensor with the same shape as `images`.\n \"\"\"\n images = ops.convert_to_tensor(images, name='images')\n kernel = ops.convert_to_tensor(\n _rgb_to_yiq_kernel, dtype=images.dtype, name='kernel')\n ndims = images.get_shape().ndims\n return math_ops.tensordot(images, kernel, axes=[[ndims - 1], [0]])\n\n\n_yiq_to_rgb_kernel = [[1, 1, 1], [0.95598634, -0.27201283, -1.10674021],\n [0.6208248, -0.64720424, 1.70423049]]\n\n\n@tf_export('image.yiq_to_rgb')\[email protected]_dispatch_support\ndef yiq_to_rgb(images):\n \"\"\"Converts one or more images from YIQ to RGB.\n\n Outputs a tensor of the same shape as the `images` tensor, containing the RGB\n value of the pixels.\n The output is only well defined if the Y value in images are in [0,1],\n I value are in [-0.5957,0.5957] and Q value are in [-0.5226,0.5226].\n\n Args:\n images: 2-D or higher rank. Image data to convert. Last dimension must be\n size 3.\n\n Returns:\n images: tensor with the same shape as `images`.\n \"\"\"\n images = ops.convert_to_tensor(images, name='images')\n kernel = ops.convert_to_tensor(\n _yiq_to_rgb_kernel, dtype=images.dtype, name='kernel')\n ndims = images.get_shape().ndims\n return math_ops.tensordot(images, kernel, axes=[[ndims - 1], [0]])\n\n\n_rgb_to_yuv_kernel = [[0.299, -0.14714119, 0.61497538],\n [0.587, -0.28886916, -0.51496512],\n [0.114, 0.43601035, -0.10001026]]\n\n\n@tf_export('image.rgb_to_yuv')\[email protected]_dispatch_support\ndef rgb_to_yuv(images):\n \"\"\"Converts one or more images from RGB to YUV.\n\n Outputs a tensor of the same shape as the `images` tensor, containing the YUV\n value of the pixels.\n The output is only well defined if the value in images are in [0, 1].\n There are two ways of representing an image: [0, 255] pixel values range or \n [0, 1] (as float) pixel values range. Users need to convert the input image \n into a float [0, 1] range.\n\n Args:\n images: 2-D or higher rank. Image data to convert. Last dimension must be\n size 3.\n\n Returns:\n images: tensor with the same shape as `images`.\n \"\"\"\n images = ops.convert_to_tensor(images, name='images')\n kernel = ops.convert_to_tensor(\n _rgb_to_yuv_kernel, dtype=images.dtype, name='kernel')\n ndims = images.get_shape().ndims\n return math_ops.tensordot(images, kernel, axes=[[ndims - 1], [0]])\n\n\n_yuv_to_rgb_kernel = [[1, 1, 1], [0, -0.394642334, 2.03206185],\n [1.13988303, -0.58062185, 0]]\n\n\n@tf_export('image.yuv_to_rgb')\[email protected]_dispatch_support\ndef yuv_to_rgb(images):\n \"\"\"Converts one or more images from YUV to RGB.\n\n Outputs a tensor of the same shape as the `images` tensor, containing the RGB\n value of the pixels.\n The output is only well defined if the Y value in images are in [0,1],\n U and V value are in [-0.5,0.5].\n\n As per the above description, you need to scale your YUV images if their\n pixel values are not in the required range. Below given example illustrates\n preprocessing of each channel of images before feeding them to `yuv_to_rgb`.\n\n ```python\n yuv_images = tf.random.uniform(shape=[100, 64, 64, 3], maxval=255)\n last_dimension_axis = len(yuv_images.shape) - 1\n yuv_tensor_images = tf.truediv(\n tf.subtract(\n yuv_images,\n tf.reduce_min(yuv_images)\n ),\n tf.subtract(\n tf.reduce_max(yuv_images),\n tf.reduce_min(yuv_images)\n )\n )\n y, u, v = tf.split(yuv_tensor_images, 3, axis=last_dimension_axis)\n target_uv_min, target_uv_max = -0.5, 0.5\n u = u * (target_uv_max - target_uv_min) + target_uv_min\n v = v * (target_uv_max - target_uv_min) + target_uv_min\n preprocessed_yuv_images = tf.concat([y, u, v], axis=last_dimension_axis)\n rgb_tensor_images = tf.image.yuv_to_rgb(preprocessed_yuv_images)\n ```\n\n Args:\n images: 2-D or higher rank. Image data to convert. Last dimension must be\n size 3.\n\n Returns:\n images: tensor with the same shape as `images`.\n \"\"\"\n images = ops.convert_to_tensor(images, name='images')\n kernel = ops.convert_to_tensor(\n _yuv_to_rgb_kernel, dtype=images.dtype, name='kernel')\n ndims = images.get_shape().ndims\n return math_ops.tensordot(images, kernel, axes=[[ndims - 1], [0]])\n\n\ndef _verify_compatible_image_shapes(img1, img2):\n \"\"\"Checks if two image tensors are compatible for applying SSIM or PSNR.\n\n This function checks if two sets of images have ranks at least 3, and if the\n last three dimensions match.\n\n Args:\n img1: Tensor containing the first image batch.\n img2: Tensor containing the second image batch.\n\n Returns:\n A tuple containing: the first tensor shape, the second tensor shape, and a\n list of control_flow_ops.Assert() ops implementing the checks.\n\n Raises:\n ValueError: When static shape check fails.\n \"\"\"\n shape1 = img1.get_shape().with_rank_at_least(3)\n shape2 = img2.get_shape().with_rank_at_least(3)\n shape1[-3:].assert_is_compatible_with(shape2[-3:])\n\n if shape1.ndims is not None and shape2.ndims is not None:\n for dim1, dim2 in zip(\n reversed(shape1.dims[:-3]), reversed(shape2.dims[:-3])):\n if not (dim1 == 1 or dim2 == 1 or dim1.is_compatible_with(dim2)):\n raise ValueError('Two images are not compatible: %s and %s' %\n (shape1, shape2))\n\n # Now assign shape tensors.\n shape1, shape2 = array_ops.shape_n([img1, img2])\n\n # TODO(sjhwang): Check if shape1[:-3] and shape2[:-3] are broadcastable.\n checks = []\n checks.append(\n control_flow_ops.Assert(\n math_ops.greater_equal(array_ops.size(shape1), 3), [shape1, shape2],\n summarize=10))\n checks.append(\n control_flow_ops.Assert(\n math_ops.reduce_all(math_ops.equal(shape1[-3:], shape2[-3:])),\n [shape1, shape2],\n summarize=10))\n return shape1, shape2, checks\n\n\n@tf_export('image.psnr')\[email protected]_dispatch_support\ndef psnr(a, b, max_val, name=None):\n \"\"\"Returns the Peak Signal-to-Noise Ratio between a and b.\n\n This is intended to be used on signals (or images). Produces a PSNR value for\n each image in batch.\n\n The last three dimensions of input are expected to be [height, width, depth].\n\n Example:\n\n ```python\n # Read images from file.\n im1 = tf.decode_png('path/to/im1.png')\n im2 = tf.decode_png('path/to/im2.png')\n # Compute PSNR over tf.uint8 Tensors.\n psnr1 = tf.image.psnr(im1, im2, max_val=255)\n\n # Compute PSNR over tf.float32 Tensors.\n im1 = tf.image.convert_image_dtype(im1, tf.float32)\n im2 = tf.image.convert_image_dtype(im2, tf.float32)\n psnr2 = tf.image.psnr(im1, im2, max_val=1.0)\n # psnr1 and psnr2 both have type tf.float32 and are almost equal.\n ```\n\n Arguments:\n a: First set of images.\n b: Second set of images.\n max_val: The dynamic range of the images (i.e., the difference between the\n maximum the and minimum allowed values).\n name: Namespace to embed the computation in.\n\n Returns:\n The scalar PSNR between a and b. The returned tensor has type `tf.float32`\n and shape [batch_size, 1].\n \"\"\"\n with ops.name_scope(name, 'PSNR', [a, b]):\n # Need to convert the images to float32. Scale max_val accordingly so that\n # PSNR is computed correctly.\n max_val = math_ops.cast(max_val, a.dtype)\n max_val = convert_image_dtype(max_val, dtypes.float32)\n a = convert_image_dtype(a, dtypes.float32)\n b = convert_image_dtype(b, dtypes.float32)\n mse = math_ops.reduce_mean(math_ops.squared_difference(a, b), [-3, -2, -1])\n psnr_val = math_ops.subtract(\n 20 * math_ops.log(max_val) / math_ops.log(10.0),\n np.float32(10 / np.log(10)) * math_ops.log(mse),\n name='psnr')\n\n _, _, checks = _verify_compatible_image_shapes(a, b)\n with ops.control_dependencies(checks):\n return array_ops.identity(psnr_val)\n\n\ndef _ssim_helper(x, y, reducer, max_val, compensation=1.0, k1=0.01, k2=0.03):\n r\"\"\"Helper function for computing SSIM.\n\n SSIM estimates covariances with weighted sums. The default parameters\n use a biased estimate of the covariance:\n Suppose `reducer` is a weighted sum, then the mean estimators are\n \\mu_x = \\sum_i w_i x_i,\n \\mu_y = \\sum_i w_i y_i,\n where w_i's are the weighted-sum weights, and covariance estimator is\n cov_{xy} = \\sum_i w_i (x_i - \\mu_x) (y_i - \\mu_y)\n with assumption \\sum_i w_i = 1. This covariance estimator is biased, since\n E[cov_{xy}] = (1 - \\sum_i w_i ^ 2) Cov(X, Y).\n For SSIM measure with unbiased covariance estimators, pass as `compensation`\n argument (1 - \\sum_i w_i ^ 2).\n\n Arguments:\n x: First set of images.\n y: Second set of images.\n reducer: Function that computes 'local' averages from the set of images. For\n non-convolutional version, this is usually tf.reduce_mean(x, [1, 2]), and\n for convolutional version, this is usually tf.nn.avg_pool2d or\n tf.nn.conv2d with weighted-sum kernel.\n max_val: The dynamic range (i.e., the difference between the maximum\n possible allowed value and the minimum allowed value).\n compensation: Compensation factor. See above.\n k1: Default value 0.01\n k2: Default value 0.03 (SSIM is less sensitivity to K2 for lower values, so\n it would be better if we took the values in the range of 0 < K2 < 0.4).\n\n Returns:\n A pair containing the luminance measure, and the contrast-structure measure.\n \"\"\"\n\n c1 = (k1 * max_val)**2\n c2 = (k2 * max_val)**2\n\n # SSIM luminance measure is\n # (2 * mu_x * mu_y + c1) / (mu_x ** 2 + mu_y ** 2 + c1).\n mean0 = reducer(x)\n mean1 = reducer(y)\n num0 = mean0 * mean1 * 2.0\n den0 = math_ops.square(mean0) + math_ops.square(mean1)\n luminance = (num0 + c1) / (den0 + c1)\n\n # SSIM contrast-structure measure is\n # (2 * cov_{xy} + c2) / (cov_{xx} + cov_{yy} + c2).\n # Note that `reducer` is a weighted sum with weight w_k, \\sum_i w_i = 1, then\n # cov_{xy} = \\sum_i w_i (x_i - \\mu_x) (y_i - \\mu_y)\n # = \\sum_i w_i x_i y_i - (\\sum_i w_i x_i) (\\sum_j w_j y_j).\n num1 = reducer(x * y) * 2.0\n den1 = reducer(math_ops.square(x) + math_ops.square(y))\n c2 *= compensation\n cs = (num1 - num0 + c2) / (den1 - den0 + c2)\n\n # SSIM score is the product of the luminance and contrast-structure measures.\n return luminance, cs\n\n\ndef _fspecial_gauss(size, sigma):\n \"\"\"Function to mimic the 'fspecial' gaussian MATLAB function.\"\"\"\n size = ops.convert_to_tensor(size, dtypes.int32)\n sigma = ops.convert_to_tensor(sigma)\n\n coords = math_ops.cast(math_ops.range(size), sigma.dtype)\n coords -= math_ops.cast(size - 1, sigma.dtype) / 2.0\n\n g = math_ops.square(coords)\n g *= -0.5 / math_ops.square(sigma)\n\n g = array_ops.reshape(g, shape=[1, -1]) + array_ops.reshape(g, shape=[-1, 1])\n g = array_ops.reshape(g, shape=[1, -1]) # For tf.nn.softmax().\n g = nn_ops.softmax(g)\n return array_ops.reshape(g, shape=[size, size, 1, 1])\n\n\ndef _ssim_per_channel(img1,\n img2,\n max_val=1.0,\n filter_size=11,\n filter_sigma=1.5,\n k1=0.01,\n k2=0.03):\n \"\"\"Computes SSIM index between img1 and img2 per color channel.\n\n This function matches the standard SSIM implementation from:\n Wang, Z., Bovik, A. C., Sheikh, H. R., & Simoncelli, E. P. (2004). Image\n quality assessment: from error visibility to structural similarity. IEEE\n transactions on image processing.\n\n Details:\n - 11x11 Gaussian filter of width 1.5 is used.\n - k1 = 0.01, k2 = 0.03 as in the original paper.\n\n Args:\n img1: First image batch.\n img2: Second image batch.\n max_val: The dynamic range of the images (i.e., the difference between the\n maximum the and minimum allowed values).\n filter_size: Default value 11 (size of gaussian filter).\n filter_sigma: Default value 1.5 (width of gaussian filter).\n k1: Default value 0.01\n k2: Default value 0.03 (SSIM is less sensitivity to K2 for lower values, so\n it would be better if we took the values in the range of 0 < K2 < 0.4).\n\n Returns:\n A pair of tensors containing and channel-wise SSIM and contrast-structure\n values. The shape is [..., channels].\n \"\"\"\n filter_size = constant_op.constant(filter_size, dtype=dtypes.int32)\n filter_sigma = constant_op.constant(filter_sigma, dtype=img1.dtype)\n\n shape1, shape2 = array_ops.shape_n([img1, img2])\n checks = [\n control_flow_ops.Assert(\n math_ops.reduce_all(\n math_ops.greater_equal(shape1[-3:-1], filter_size)),\n [shape1, filter_size],\n summarize=8),\n control_flow_ops.Assert(\n math_ops.reduce_all(\n math_ops.greater_equal(shape2[-3:-1], filter_size)),\n [shape2, filter_size],\n summarize=8)\n ]\n\n # Enforce the check to run before computation.\n with ops.control_dependencies(checks):\n img1 = array_ops.identity(img1)\n\n # TODO(sjhwang): Try to cache kernels and compensation factor.\n kernel = _fspecial_gauss(filter_size, filter_sigma)\n kernel = array_ops.tile(kernel, multiples=[1, 1, shape1[-1], 1])\n\n # The correct compensation factor is `1.0 - tf.reduce_sum(tf.square(kernel))`,\n # but to match MATLAB implementation of MS-SSIM, we use 1.0 instead.\n compensation = 1.0\n\n # TODO(sjhwang): Try FFT.\n # TODO(sjhwang): Gaussian kernel is separable in space. Consider applying\n # 1-by-n and n-by-1 Gaussian filters instead of an n-by-n filter.\n def reducer(x):\n shape = array_ops.shape(x)\n x = array_ops.reshape(x, shape=array_ops.concat([[-1], shape[-3:]], 0))\n y = nn.depthwise_conv2d(x, kernel, strides=[1, 1, 1, 1], padding='VALID')\n return array_ops.reshape(\n y, array_ops.concat([shape[:-3], array_ops.shape(y)[1:]], 0))\n\n luminance, cs = _ssim_helper(img1, img2, reducer, max_val, compensation, k1,\n k2)\n\n # Average over the second and the third from the last: height, width.\n axes = constant_op.constant([-3, -2], dtype=dtypes.int32)\n ssim_val = math_ops.reduce_mean(luminance * cs, axes)\n cs = math_ops.reduce_mean(cs, axes)\n return ssim_val, cs\n\n\n@tf_export('image.ssim')\[email protected]_dispatch_support\ndef ssim(img1,\n img2,\n max_val,\n filter_size=11,\n filter_sigma=1.5,\n k1=0.01,\n k2=0.03):\n \"\"\"Computes SSIM index between img1 and img2.\n\n This function is based on the standard SSIM implementation from:\n Wang, Z., Bovik, A. C., Sheikh, H. R., & Simoncelli, E. P. (2004). Image\n quality assessment: from error visibility to structural similarity. IEEE\n transactions on image processing.\n\n Note: The true SSIM is only defined on grayscale. This function does not\n perform any colorspace transform. (If the input is already YUV, then it will\n compute YUV SSIM average.)\n\n Details:\n - 11x11 Gaussian filter of width 1.5 is used.\n - k1 = 0.01, k2 = 0.03 as in the original paper.\n\n The image sizes must be at least 11x11 because of the filter size.\n\n Example:\n\n ```python\n # Read images from file.\n im1 = tf.decode_png('path/to/im1.png')\n im2 = tf.decode_png('path/to/im2.png')\n # Compute SSIM over tf.uint8 Tensors.\n ssim1 = tf.image.ssim(im1, im2, max_val=255, filter_size=11,\n filter_sigma=1.5, k1=0.01, k2=0.03)\n\n # Compute SSIM over tf.float32 Tensors.\n im1 = tf.image.convert_image_dtype(im1, tf.float32)\n im2 = tf.image.convert_image_dtype(im2, tf.float32)\n ssim2 = tf.image.ssim(im1, im2, max_val=1.0, filter_size=11,\n filter_sigma=1.5, k1=0.01, k2=0.03)\n # ssim1 and ssim2 both have type tf.float32 and are almost equal.\n ```\n\n Args:\n img1: First image batch.\n img2: Second image batch.\n max_val: The dynamic range of the images (i.e., the difference between the\n maximum the and minimum allowed values).\n filter_size: Default value 11 (size of gaussian filter).\n filter_sigma: Default value 1.5 (width of gaussian filter).\n k1: Default value 0.01\n k2: Default value 0.03 (SSIM is less sensitivity to K2 for lower values, so\n it would be better if we took the values in the range of 0 < K2 < 0.4).\n\n Returns:\n A tensor containing an SSIM value for each image in batch. Returned SSIM\n values are in range (-1, 1], when pixel values are non-negative. Returns\n a tensor with shape: broadcast(img1.shape[:-3], img2.shape[:-3]).\n \"\"\"\n with ops.name_scope(None, 'SSIM', [img1, img2]):\n # Convert to tensor if needed.\n img1 = ops.convert_to_tensor(img1, name='img1')\n img2 = ops.convert_to_tensor(img2, name='img2')\n # Shape checking.\n _, _, checks = _verify_compatible_image_shapes(img1, img2)\n with ops.control_dependencies(checks):\n img1 = array_ops.identity(img1)\n\n # Need to convert the images to float32. Scale max_val accordingly so that\n # SSIM is computed correctly.\n max_val = math_ops.cast(max_val, img1.dtype)\n max_val = convert_image_dtype(max_val, dtypes.float32)\n img1 = convert_image_dtype(img1, dtypes.float32)\n img2 = convert_image_dtype(img2, dtypes.float32)\n ssim_per_channel, _ = _ssim_per_channel(img1, img2, max_val, filter_size,\n filter_sigma, k1, k2)\n # Compute average over color channels.\n return math_ops.reduce_mean(ssim_per_channel, [-1])\n\n\n# Default values obtained by Wang et al.\n_MSSSIM_WEIGHTS = (0.0448, 0.2856, 0.3001, 0.2363, 0.1333)\n\n\n@tf_export('image.ssim_multiscale')\[email protected]_dispatch_support\ndef ssim_multiscale(img1,\n img2,\n max_val,\n power_factors=_MSSSIM_WEIGHTS,\n filter_size=11,\n filter_sigma=1.5,\n k1=0.01,\n k2=0.03):\n \"\"\"Computes the MS-SSIM between img1 and img2.\n\n This function assumes that `img1` and `img2` are image batches, i.e. the last\n three dimensions are [height, width, channels].\n\n Note: The true SSIM is only defined on grayscale. This function does not\n perform any colorspace transform. (If the input is already YUV, then it will\n compute YUV SSIM average.)\n\n Original paper: Wang, Zhou, Eero P. Simoncelli, and Alan C. Bovik. \"Multiscale\n structural similarity for image quality assessment.\" Signals, Systems and\n Computers, 2004.\n\n Arguments:\n img1: First image batch.\n img2: Second image batch. Must have the same rank as img1.\n max_val: The dynamic range of the images (i.e., the difference between the\n maximum the and minimum allowed values).\n power_factors: Iterable of weights for each of the scales. The number of\n scales used is the length of the list. Index 0 is the unscaled\n resolution's weight and each increasing scale corresponds to the image\n being downsampled by 2. Defaults to (0.0448, 0.2856, 0.3001, 0.2363,\n 0.1333), which are the values obtained in the original paper.\n filter_size: Default value 11 (size of gaussian filter).\n filter_sigma: Default value 1.5 (width of gaussian filter).\n k1: Default value 0.01\n k2: Default value 0.03 (SSIM is less sensitivity to K2 for lower values, so\n it would be better if we took the values in the range of 0 < K2 < 0.4).\n\n Returns:\n A tensor containing an MS-SSIM value for each image in batch. The values\n are in range [0, 1]. Returns a tensor with shape:\n broadcast(img1.shape[:-3], img2.shape[:-3]).\n \"\"\"\n with ops.name_scope(None, 'MS-SSIM', [img1, img2]):\n # Convert to tensor if needed.\n img1 = ops.convert_to_tensor(img1, name='img1')\n img2 = ops.convert_to_tensor(img2, name='img2')\n # Shape checking.\n shape1, shape2, checks = _verify_compatible_image_shapes(img1, img2)\n with ops.control_dependencies(checks):\n img1 = array_ops.identity(img1)\n\n # Need to convert the images to float32. Scale max_val accordingly so that\n # SSIM is computed correctly.\n max_val = math_ops.cast(max_val, img1.dtype)\n max_val = convert_image_dtype(max_val, dtypes.float32)\n img1 = convert_image_dtype(img1, dtypes.float32)\n img2 = convert_image_dtype(img2, dtypes.float32)\n\n imgs = [img1, img2]\n shapes = [shape1, shape2]\n\n # img1 and img2 are assumed to be a (multi-dimensional) batch of\n # 3-dimensional images (height, width, channels). `heads` contain the batch\n # dimensions, and `tails` contain the image dimensions.\n heads = [s[:-3] for s in shapes]\n tails = [s[-3:] for s in shapes]\n\n divisor = [1, 2, 2, 1]\n divisor_tensor = constant_op.constant(divisor[1:], dtype=dtypes.int32)\n\n def do_pad(images, remainder):\n padding = array_ops.expand_dims(remainder, -1)\n padding = array_ops.pad(padding, [[1, 0], [1, 0]])\n return [array_ops.pad(x, padding, mode='SYMMETRIC') for x in images]\n\n mcs = []\n for k in range(len(power_factors)):\n with ops.name_scope(None, 'Scale%d' % k, imgs):\n if k > 0:\n # Avg pool takes rank 4 tensors. Flatten leading dimensions.\n flat_imgs = [\n array_ops.reshape(x, array_ops.concat([[-1], t], 0))\n for x, t in zip(imgs, tails)\n ]\n\n remainder = tails[0] % divisor_tensor\n need_padding = math_ops.reduce_any(math_ops.not_equal(remainder, 0))\n # pylint: disable=cell-var-from-loop\n padded = control_flow_ops.cond(need_padding,\n lambda: do_pad(flat_imgs, remainder),\n lambda: flat_imgs)\n # pylint: enable=cell-var-from-loop\n\n downscaled = [\n nn_ops.avg_pool(\n x, ksize=divisor, strides=divisor, padding='VALID')\n for x in padded\n ]\n tails = [x[1:] for x in array_ops.shape_n(downscaled)]\n imgs = [\n array_ops.reshape(x, array_ops.concat([h, t], 0))\n for x, h, t in zip(downscaled, heads, tails)\n ]\n\n # Overwrite previous ssim value since we only need the last one.\n ssim_per_channel, cs = _ssim_per_channel(\n *imgs,\n max_val=max_val,\n filter_size=filter_size,\n filter_sigma=filter_sigma,\n k1=k1,\n k2=k2)\n mcs.append(nn_ops.relu(cs))\n\n # Remove the cs score for the last scale. In the MS-SSIM calculation,\n # we use the l(p) at the highest scale. l(p) * cs(p) is ssim(p).\n mcs.pop() # Remove the cs score for the last scale.\n mcs_and_ssim = array_ops.stack(\n mcs + [nn_ops.relu(ssim_per_channel)], axis=-1)\n # Take weighted geometric mean across the scale axis.\n ms_ssim = math_ops.reduce_prod(\n math_ops.pow(mcs_and_ssim, power_factors), [-1])\n\n return math_ops.reduce_mean(ms_ssim, [-1]) # Avg over color channels.\n\n\n@tf_export('image.image_gradients')\[email protected]_dispatch_support\ndef image_gradients(image):\n \"\"\"Returns image gradients (dy, dx) for each color channel.\n\n Both output tensors have the same shape as the input: [batch_size, h, w,\n d]. The gradient values are organized so that [I(x+1, y) - I(x, y)] is in\n location (x, y). That means that dy will always have zeros in the last row,\n and dx will always have zeros in the last column.\n\n Usage Example:\n ```python\n BATCH_SIZE = 1\n IMAGE_HEIGHT = 5\n IMAGE_WIDTH = 5\n CHANNELS = 1\n image = tf.reshape(tf.range(IMAGE_HEIGHT * IMAGE_WIDTH * CHANNELS,\n delta=1, dtype=tf.float32),\n shape=(BATCH_SIZE, IMAGE_HEIGHT, IMAGE_WIDTH, CHANNELS))\n dx, dy = tf.image.image_gradients(image)\n print(image[0, :,:,0])\n tf.Tensor(\n [[ 0. 1. 2. 3. 4.]\n [ 5. 6. 7. 8. 9.]\n [10. 11. 12. 13. 14.]\n [15. 16. 17. 18. 19.]\n [20. 21. 22. 23. 24.]], shape=(5, 5), dtype=float32)\n print(dx[0, :,:,0])\n tf.Tensor(\n [[5. 5. 5. 5. 5.]\n [5. 5. 5. 5. 5.]\n [5. 5. 5. 5. 5.]\n [5. 5. 5. 5. 5.]\n [0. 0. 0. 0. 0.]], shape=(5, 5), dtype=float32)\n print(dy[0, :,:,0])\n tf.Tensor(\n [[1. 1. 1. 1. 0.]\n [1. 1. 1. 1. 0.]\n [1. 1. 1. 1. 0.]\n [1. 1. 1. 1. 0.]\n [1. 1. 1. 1. 0.]], shape=(5, 5), dtype=float32)\n ```\n\n Arguments:\n image: Tensor with shape [batch_size, h, w, d].\n\n Returns:\n Pair of tensors (dy, dx) holding the vertical and horizontal image\n gradients (1-step finite difference).\n\n Raises:\n ValueError: If `image` is not a 4D tensor.\n \"\"\"\n if image.get_shape().ndims != 4:\n raise ValueError('image_gradients expects a 4D tensor '\n '[batch_size, h, w, d], not {}.'.format(image.get_shape()))\n image_shape = array_ops.shape(image)\n batch_size, height, width, depth = array_ops.unstack(image_shape)\n dy = image[:, 1:, :, :] - image[:, :-1, :, :]\n dx = image[:, :, 1:, :] - image[:, :, :-1, :]\n\n # Return tensors with same size as original image by concatenating\n # zeros. Place the gradient [I(x+1,y) - I(x,y)] on the base pixel (x, y).\n shape = array_ops.stack([batch_size, 1, width, depth])\n dy = array_ops.concat([dy, array_ops.zeros(shape, image.dtype)], 1)\n dy = array_ops.reshape(dy, image_shape)\n\n shape = array_ops.stack([batch_size, height, 1, depth])\n dx = array_ops.concat([dx, array_ops.zeros(shape, image.dtype)], 2)\n dx = array_ops.reshape(dx, image_shape)\n\n return dy, dx\n\n\n@tf_export('image.sobel_edges')\[email protected]_dispatch_support\ndef sobel_edges(image):\n \"\"\"Returns a tensor holding Sobel edge maps.\n\n Arguments:\n image: Image tensor with shape [batch_size, h, w, d] and type float32 or\n float64. The image(s) must be 2x2 or larger.\n\n Returns:\n Tensor holding edge maps for each channel. Returns a tensor with shape\n [batch_size, h, w, d, 2] where the last two dimensions hold [[dy[0], dx[0]],\n [dy[1], dx[1]], ..., [dy[d-1], dx[d-1]]] calculated using the Sobel filter.\n \"\"\"\n # Define vertical and horizontal Sobel filters.\n static_image_shape = image.get_shape()\n image_shape = array_ops.shape(image)\n kernels = [[[-1, -2, -1], [0, 0, 0], [1, 2, 1]],\n [[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]]]\n num_kernels = len(kernels)\n kernels = np.transpose(np.asarray(kernels), (1, 2, 0))\n kernels = np.expand_dims(kernels, -2)\n kernels_tf = constant_op.constant(kernels, dtype=image.dtype)\n\n kernels_tf = array_ops.tile(\n kernels_tf, [1, 1, image_shape[-1], 1], name='sobel_filters')\n\n # Use depth-wise convolution to calculate edge maps per channel.\n pad_sizes = [[0, 0], [1, 1], [1, 1], [0, 0]]\n padded = array_ops.pad(image, pad_sizes, mode='REFLECT')\n\n # Output tensor has shape [batch_size, h, w, d * num_kernels].\n strides = [1, 1, 1, 1]\n output = nn.depthwise_conv2d(padded, kernels_tf, strides, 'VALID')\n\n # Reshape to [batch_size, h, w, d, num_kernels].\n shape = array_ops.concat([image_shape, [num_kernels]], 0)\n output = array_ops.reshape(output, shape=shape)\n output.set_shape(static_image_shape.concatenate([num_kernels]))\n return output\n\n\ndef resize_bicubic(images,\n size,\n align_corners=False,\n name=None,\n half_pixel_centers=False):\n return gen_image_ops.resize_bicubic(\n images=images,\n size=size,\n align_corners=align_corners,\n half_pixel_centers=half_pixel_centers,\n name=name)\n\n\ndef resize_bilinear(images,\n size,\n align_corners=False,\n name=None,\n half_pixel_centers=False):\n return gen_image_ops.resize_bilinear(\n images=images,\n size=size,\n align_corners=align_corners,\n half_pixel_centers=half_pixel_centers,\n name=name)\n\n\ndef resize_nearest_neighbor(images,\n size,\n align_corners=False,\n name=None,\n half_pixel_centers=False):\n return gen_image_ops.resize_nearest_neighbor(\n images=images,\n size=size,\n align_corners=align_corners,\n half_pixel_centers=half_pixel_centers,\n name=name)\n\n\nresize_area_deprecation = deprecation.deprecated(\n date=None,\n instructions=(\n 'Use `tf.image.resize(...method=ResizeMethod.AREA...)` instead.'))\ntf_export(v1=['image.resize_area'])(\n resize_area_deprecation(\n dispatch.add_dispatch_support(gen_image_ops.resize_area)))\n\nresize_bicubic_deprecation = deprecation.deprecated(\n date=None,\n instructions=(\n 'Use `tf.image.resize(...method=ResizeMethod.BICUBIC...)` instead.'))\ntf_export(v1=['image.resize_bicubic'])(\n dispatch.add_dispatch_support(resize_bicubic_deprecation(resize_bicubic)))\n\nresize_bilinear_deprecation = deprecation.deprecated(\n date=None,\n instructions=(\n 'Use `tf.image.resize(...method=ResizeMethod.BILINEAR...)` instead.'))\ntf_export(v1=['image.resize_bilinear'])(\n dispatch.add_dispatch_support(resize_bilinear_deprecation(resize_bilinear)))\n\nresize_nearest_neighbor_deprecation = deprecation.deprecated(\n date=None,\n instructions=(\n 'Use `tf.image.resize(...method=ResizeMethod.NEAREST_NEIGHBOR...)` '\n 'instead.'))\ntf_export(v1=['image.resize_nearest_neighbor'])(\n dispatch.add_dispatch_support(\n resize_nearest_neighbor_deprecation(resize_nearest_neighbor)))\n\n\n@tf_export('image.crop_and_resize', v1=[])\[email protected]_dispatch_support\ndef crop_and_resize_v2(image,\n boxes,\n box_indices,\n crop_size,\n method='bilinear',\n extrapolation_value=0,\n name=None):\n \"\"\"Extracts crops from the input image tensor and resizes them.\n\n Extracts crops from the input image tensor and resizes them using bilinear\n sampling or nearest neighbor sampling (possibly with aspect ratio change) to a\n common output size specified by `crop_size`. This is more general than the\n `crop_to_bounding_box` op which extracts a fixed size slice from the input\n image and does not allow resizing or aspect ratio change.\n\n Returns a tensor with `crops` from the input `image` at positions defined at\n the bounding box locations in `boxes`. The cropped boxes are all resized (with\n bilinear or nearest neighbor interpolation) to a fixed\n `size = [crop_height, crop_width]`. The result is a 4-D tensor\n `[num_boxes, crop_height, crop_width, depth]`. The resizing is corner aligned.\n In particular, if `boxes = [[0, 0, 1, 1]]`, the method will give identical\n results to using `tf.compat.v1.image.resize_bilinear()` or\n `tf.compat.v1.image.resize_nearest_neighbor()`(depends on the `method`\n argument) with\n `align_corners=True`.\n\n Args:\n image: A 4-D tensor of shape `[batch, image_height, image_width, depth]`.\n Both `image_height` and `image_width` need to be positive.\n boxes: A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor\n specifies the coordinates of a box in the `box_ind[i]` image and is\n specified in normalized coordinates `[y1, x1, y2, x2]`. A normalized\n coordinate value of `y` is mapped to the image coordinate at `y *\n (image_height - 1)`, so as the `[0, 1]` interval of normalized image\n height is mapped to `[0, image_height - 1]` in image height coordinates.\n We do allow `y1` > `y2`, in which case the sampled crop is an up-down\n flipped version of the original image. The width dimension is treated\n similarly. Normalized coordinates outside the `[0, 1]` range are allowed,\n in which case we use `extrapolation_value` to extrapolate the input image\n values.\n box_indices: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0,\n batch)`. The value of `box_ind[i]` specifies the image that the `i`-th box\n refers to.\n crop_size: A 1-D tensor of 2 elements, `size = [crop_height, crop_width]`.\n All cropped image patches are resized to this size. The aspect ratio of\n the image content is not preserved. Both `crop_height` and `crop_width`\n need to be positive.\n method: An optional string specifying the sampling method for resizing. It\n can be either `\"bilinear\"` or `\"nearest\"` and default to `\"bilinear\"`.\n Currently two sampling methods are supported: Bilinear and Nearest\n Neighbor.\n extrapolation_value: An optional `float`. Defaults to `0`. Value used for\n extrapolation, when applicable.\n name: A name for the operation (optional).\n\n Returns:\n A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`.\n\n Example:\n\n ```python\n import tensorflow as tf\n BATCH_SIZE = 1\n NUM_BOXES = 5\n IMAGE_HEIGHT = 256\n IMAGE_WIDTH = 256\n CHANNELS = 3\n CROP_SIZE = (24, 24)\n\n image = tf.random.normal(shape=(BATCH_SIZE, IMAGE_HEIGHT, IMAGE_WIDTH,\n CHANNELS) )\n boxes = tf.random.uniform(shape=(NUM_BOXES, 4))\n box_indices = tf.random.uniform(shape=(NUM_BOXES,), minval=0,\n maxval=BATCH_SIZE, dtype=tf.int32)\n output = tf.image.crop_and_resize(image, boxes, box_indices, CROP_SIZE)\n output.shape #=> (5, 24, 24, 3)\n ```\n \"\"\"\n return gen_image_ops.crop_and_resize(image, boxes, box_indices, crop_size,\n method, extrapolation_value, name)\n\n\n@tf_export(v1=['image.crop_and_resize'])\[email protected]_dispatch_support\[email protected]_args(None,\n 'box_ind is deprecated, use box_indices instead',\n 'box_ind')\ndef crop_and_resize_v1( # pylint: disable=missing-docstring\n image,\n boxes,\n box_ind=None,\n crop_size=None,\n method='bilinear',\n extrapolation_value=0,\n name=None,\n box_indices=None):\n box_ind = deprecation.deprecated_argument_lookup('box_indices', box_indices,\n 'box_ind', box_ind)\n return gen_image_ops.crop_and_resize(image, boxes, box_ind, crop_size, method,\n extrapolation_value, name)\n\n\ncrop_and_resize_v1.__doc__ = gen_image_ops.crop_and_resize.__doc__\n\n\n@tf_export(v1=['image.extract_glimpse'])\[email protected]_dispatch_support\ndef extract_glimpse(\n input, # pylint: disable=redefined-builtin\n size,\n offsets,\n centered=True,\n normalized=True,\n uniform_noise=True,\n name=None):\n \"\"\"Extracts a glimpse from the input tensor.\n\n Returns a set of windows called glimpses extracted at location\n `offsets` from the input tensor. If the windows only partially\n overlaps the inputs, the non-overlapping areas will be filled with\n random noise.\n\n The result is a 4-D tensor of shape `[batch_size, glimpse_height,\n glimpse_width, channels]`. The channels and batch dimensions are the\n same as that of the input tensor. The height and width of the output\n windows are specified in the `size` parameter.\n\n The argument `normalized` and `centered` controls how the windows are built:\n\n * If the coordinates are normalized but not centered, 0.0 and 1.0\n correspond to the minimum and maximum of each height and width\n dimension.\n * If the coordinates are both normalized and centered, they range from\n -1.0 to 1.0. The coordinates (-1.0, -1.0) correspond to the upper\n left corner, the lower right corner is located at (1.0, 1.0) and the\n center is at (0, 0).\n * If the coordinates are not normalized they are interpreted as\n numbers of pixels.\n\n Usage Example:\n\n >>> x = [[[[0.0],\n ... [1.0],\n ... [2.0]],\n ... [[3.0],\n ... [4.0],\n ... [5.0]],\n ... [[6.0],\n ... [7.0],\n ... [8.0]]]]\n >>> tf.compat.v1.image.extract_glimpse(x, size=(2, 2), offsets=[[1, 1]],\n ... centered=False, normalized=False)\n <tf.Tensor: shape=(1, 2, 2, 1), dtype=float32, numpy=\n array([[[[0.],\n [1.]],\n [[3.],\n [4.]]]], dtype=float32)>\n\n Args:\n input: A `Tensor` of type `float32`. A 4-D float tensor of shape\n `[batch_size, height, width, channels]`.\n size: A `Tensor` of type `int32`. A 1-D tensor of 2 elements containing the\n size of the glimpses to extract. The glimpse height must be specified\n first, following by the glimpse width.\n offsets: A `Tensor` of type `float32`. A 2-D integer tensor of shape\n `[batch_size, 2]` containing the y, x locations of the center of each\n window.\n centered: An optional `bool`. Defaults to `True`. indicates if the offset\n coordinates are centered relative to the image, in which case the (0, 0)\n offset is relative to the center of the input images. If false, the (0,0)\n offset corresponds to the upper left corner of the input images.\n normalized: An optional `bool`. Defaults to `True`. indicates if the offset\n coordinates are normalized.\n uniform_noise: An optional `bool`. Defaults to `True`. indicates if the\n noise should be generated using a uniform distribution or a Gaussian\n distribution.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `float32`.\n \"\"\"\n return gen_image_ops.extract_glimpse(\n input=input,\n size=size,\n offsets=offsets,\n centered=centered,\n normalized=normalized,\n uniform_noise=uniform_noise,\n name=name)\n\n\n@tf_export('image.extract_glimpse', v1=[])\[email protected]_dispatch_support\ndef extract_glimpse_v2(\n input, # pylint: disable=redefined-builtin\n size,\n offsets,\n centered=True,\n normalized=True,\n noise='uniform',\n name=None):\n \"\"\"Extracts a glimpse from the input tensor.\n\n Returns a set of windows called glimpses extracted at location\n `offsets` from the input tensor. If the windows only partially\n overlaps the inputs, the non-overlapping areas will be filled with\n random noise.\n\n The result is a 4-D tensor of shape `[batch_size, glimpse_height,\n glimpse_width, channels]`. The channels and batch dimensions are the\n same as that of the input tensor. The height and width of the output\n windows are specified in the `size` parameter.\n\n The argument `normalized` and `centered` controls how the windows are built:\n\n * If the coordinates are normalized but not centered, 0.0 and 1.0\n correspond to the minimum and maximum of each height and width\n dimension.\n * If the coordinates are both normalized and centered, they range from\n -1.0 to 1.0. The coordinates (-1.0, -1.0) correspond to the upper\n left corner, the lower right corner is located at (1.0, 1.0) and the\n center is at (0, 0).\n * If the coordinates are not normalized they are interpreted as\n numbers of pixels.\n\n Usage Example:\n\n >>> x = [[[[0.0],\n ... [1.0],\n ... [2.0]],\n ... [[3.0],\n ... [4.0],\n ... [5.0]],\n ... [[6.0],\n ... [7.0],\n ... [8.0]]]]\n >>> tf.image.extract_glimpse(x, size=(2, 2), offsets=[[1, 1]],\n ... centered=False, normalized=False)\n <tf.Tensor: shape=(1, 2, 2, 1), dtype=float32, numpy=\n array([[[[4.],\n [5.]],\n [[7.],\n [8.]]]], dtype=float32)>\n\n Args:\n input: A `Tensor` of type `float32`. A 4-D float tensor of shape\n `[batch_size, height, width, channels]`.\n size: A `Tensor` of type `int32`. A 1-D tensor of 2 elements containing the\n size of the glimpses to extract. The glimpse height must be specified\n first, following by the glimpse width.\n offsets: A `Tensor` of type `float32`. A 2-D integer tensor of shape\n `[batch_size, 2]` containing the y, x locations of the center of each\n window.\n centered: An optional `bool`. Defaults to `True`. indicates if the offset\n coordinates are centered relative to the image, in which case the (0, 0)\n offset is relative to the center of the input images. If false, the (0,0)\n offset corresponds to the upper left corner of the input images.\n normalized: An optional `bool`. Defaults to `True`. indicates if the offset\n coordinates are normalized.\n noise: An optional `string`. Defaults to `uniform`. indicates if the noise\n should be `uniform` (uniform distribution), `gaussian` (gaussian\n distribution), or `zero` (zero padding).\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `float32`.\n \"\"\"\n return gen_image_ops.extract_glimpse_v2(\n input=input,\n size=size,\n offsets=offsets,\n centered=centered,\n normalized=normalized,\n noise=noise,\n uniform_noise=False,\n name=name)\n\n\n@tf_export('image.combined_non_max_suppression')\[email protected]_dispatch_support\ndef combined_non_max_suppression(boxes,\n scores,\n max_output_size_per_class,\n max_total_size,\n iou_threshold=0.5,\n score_threshold=float('-inf'),\n pad_per_class=False,\n clip_boxes=True,\n name=None):\n \"\"\"Greedily selects a subset of bounding boxes in descending order of score.\n\n This operation performs non_max_suppression on the inputs per batch, across\n all classes.\n Prunes away boxes that have high intersection-over-union (IOU) overlap\n with previously selected boxes. Bounding boxes are supplied as\n [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any\n diagonal pair of box corners and the coordinates can be provided as normalized\n (i.e., lying in the interval [0, 1]) or absolute. Note that this algorithm\n is agnostic to where the origin is in the coordinate system. Also note that\n this algorithm is invariant to orthogonal transformations and translations\n of the coordinate system; thus translating or reflections of the coordinate\n system result in the same boxes being selected by the algorithm.\n The output of this operation is the final boxes, scores and classes tensor\n returned after performing non_max_suppression.\n\n Args:\n boxes: A 4-D float `Tensor` of shape `[batch_size, num_boxes, q, 4]`. If `q`\n is 1 then same boxes are used for all classes otherwise, if `q` is equal\n to number of classes, class-specific boxes are used.\n scores: A 3-D float `Tensor` of shape `[batch_size, num_boxes, num_classes]`\n representing a single score corresponding to each box (each row of boxes).\n max_output_size_per_class: A scalar integer `Tensor` representing the\n maximum number of boxes to be selected by non-max suppression per class\n max_total_size: A scalar representing the maximum number of boxes retained\n over all classes.\n iou_threshold: A float representing the threshold for deciding whether boxes\n overlap too much with respect to IOU.\n score_threshold: A float representing the threshold for deciding when to\n remove boxes based on score.\n pad_per_class: If false, the output nmsed boxes, scores and classes are\n padded/clipped to `max_total_size`. If true, the output nmsed boxes,\n scores and classes are padded to be of length\n `max_size_per_class`*`num_classes`, unless it exceeds `max_total_size` in\n which case it is clipped to `max_total_size`. Defaults to false.\n clip_boxes: If true, the coordinates of output nmsed boxes will be clipped\n to [0, 1]. If false, output the box coordinates as it is. Defaults to\n true.\n name: A name for the operation (optional).\n\n Returns:\n 'nmsed_boxes': A [batch_size, max_detections, 4] float32 tensor\n containing the non-max suppressed boxes.\n 'nmsed_scores': A [batch_size, max_detections] float32 tensor containing\n the scores for the boxes.\n 'nmsed_classes': A [batch_size, max_detections] float32 tensor\n containing the class for boxes.\n 'valid_detections': A [batch_size] int32 tensor indicating the number of\n valid detections per batch item. Only the top valid_detections[i] entries\n in nms_boxes[i], nms_scores[i] and nms_class[i] are valid. The rest of the\n entries are zero paddings.\n \"\"\"\n with ops.name_scope(name, 'combined_non_max_suppression'):\n iou_threshold = ops.convert_to_tensor(\n iou_threshold, dtype=dtypes.float32, name='iou_threshold')\n score_threshold = ops.convert_to_tensor(\n score_threshold, dtype=dtypes.float32, name='score_threshold')\n return gen_image_ops.combined_non_max_suppression(\n boxes, scores, max_output_size_per_class, max_total_size, iou_threshold,\n score_threshold, pad_per_class, clip_boxes)\n\n\ndef _bbox_overlap(boxes_a, boxes_b):\n \"\"\"Calculates the overlap (iou - intersection over union) between boxes_a and boxes_b.\n\n Args:\n boxes_a: a tensor with a shape of [batch_size, N, 4]. N is the number of\n boxes per image. The last dimension is the pixel coordinates in\n [ymin, xmin, ymax, xmax] form.\n boxes_b: a tensor with a shape of [batch_size, M, 4]. M is the number of\n boxes. The last dimension is the pixel coordinates in\n [ymin, xmin, ymax, xmax] form.\n Returns:\n intersection_over_union: a tensor with as a shape of [batch_size, N, M],\n representing the ratio of intersection area over union area (IoU) between\n two boxes\n \"\"\"\n with ops.name_scope('bbox_overlap'):\n a_y_min, a_x_min, a_y_max, a_x_max = array_ops.split(\n value=boxes_a, num_or_size_splits=4, axis=2)\n b_y_min, b_x_min, b_y_max, b_x_max = array_ops.split(\n value=boxes_b, num_or_size_splits=4, axis=2)\n\n # Calculates the intersection area.\n i_xmin = math_ops.maximum(\n a_x_min, array_ops.transpose(b_x_min, [0, 2, 1]))\n i_xmax = math_ops.minimum(\n a_x_max, array_ops.transpose(b_x_max, [0, 2, 1]))\n i_ymin = math_ops.maximum(\n a_y_min, array_ops.transpose(b_y_min, [0, 2, 1]))\n i_ymax = math_ops.minimum(\n a_y_max, array_ops.transpose(b_y_max, [0, 2, 1]))\n i_area = math_ops.maximum(\n (i_xmax - i_xmin), 0) * math_ops.maximum((i_ymax - i_ymin), 0)\n\n # Calculates the union area.\n a_area = (a_y_max - a_y_min) * (a_x_max - a_x_min)\n b_area = (b_y_max - b_y_min) * (b_x_max - b_x_min)\n EPSILON = 1e-8\n # Adds a small epsilon to avoid divide-by-zero.\n u_area = a_area + array_ops.transpose(b_area, [0, 2, 1]) - i_area + EPSILON\n\n # Calculates IoU.\n intersection_over_union = i_area / u_area\n\n return intersection_over_union\n\n\ndef _self_suppression(iou, _, iou_sum, iou_threshold):\n \"\"\"Suppress boxes in the same tile.\n\n Compute boxes that cannot be suppressed by others (i.e.,\n can_suppress_others), and then use them to suppress boxes in the same tile.\n\n Args:\n iou: a tensor of shape [batch_size, num_boxes_with_padding] representing\n intersection over union.\n iou_sum: a scalar tensor.\n iou_threshold: a scalar tensor.\n\n Returns:\n iou_suppressed: a tensor of shape [batch_size, num_boxes_with_padding].\n iou_diff: a scalar tensor representing whether any box is supressed in\n this step.\n iou_sum_new: a scalar tensor of shape [batch_size] that represents\n the iou sum after suppression.\n iou_threshold: a scalar tensor.\n \"\"\"\n batch_size = array_ops.shape(iou)[0]\n can_suppress_others = math_ops.cast(\n array_ops.reshape(\n math_ops.reduce_max(iou, 1) < iou_threshold, [batch_size, -1, 1]),\n iou.dtype)\n iou_after_suppression = array_ops.reshape(\n math_ops.cast(\n math_ops.reduce_max(can_suppress_others * iou, 1) < iou_threshold,\n iou.dtype),\n [batch_size, -1, 1]) * iou\n iou_sum_new = math_ops.reduce_sum(iou_after_suppression, [1, 2])\n return [\n iou_after_suppression,\n math_ops.reduce_any(iou_sum - iou_sum_new > iou_threshold), iou_sum_new,\n iou_threshold\n ]\n\n\ndef _cross_suppression(boxes, box_slice, iou_threshold, inner_idx, tile_size):\n \"\"\"Suppress boxes between different tiles.\n\n Args:\n boxes: a tensor of shape [batch_size, num_boxes_with_padding, 4]\n box_slice: a tensor of shape [batch_size, tile_size, 4]\n iou_threshold: a scalar tensor\n inner_idx: a scalar tensor representing the tile index of the tile\n that is used to supress box_slice\n tile_size: an integer representing the number of boxes in a tile\n\n Returns:\n boxes: unchanged boxes as input\n box_slice_after_suppression: box_slice after suppression\n iou_threshold: unchanged\n \"\"\"\n batch_size = array_ops.shape(boxes)[0]\n new_slice = array_ops.slice(\n boxes, [0, inner_idx * tile_size, 0],\n [batch_size, tile_size, 4])\n iou = _bbox_overlap(new_slice, box_slice)\n box_slice_after_suppression = array_ops.expand_dims(\n math_ops.cast(math_ops.reduce_all(iou < iou_threshold, [1]),\n box_slice.dtype),\n 2) * box_slice\n return boxes, box_slice_after_suppression, iou_threshold, inner_idx + 1\n\n\ndef _suppression_loop_body(boxes, iou_threshold, output_size, idx, tile_size):\n \"\"\"Process boxes in the range [idx*tile_size, (idx+1)*tile_size).\n\n Args:\n boxes: a tensor with a shape of [batch_size, anchors, 4].\n iou_threshold: a float representing the threshold for deciding whether boxes\n overlap too much with respect to IOU.\n output_size: an int32 tensor of size [batch_size]. Representing the number\n of selected boxes for each batch.\n idx: an integer scalar representing induction variable.\n tile_size: an integer representing the number of boxes in a tile\n\n Returns:\n boxes: updated boxes.\n iou_threshold: pass down iou_threshold to the next iteration.\n output_size: the updated output_size.\n idx: the updated induction variable.\n \"\"\"\n with ops.name_scope('suppression_loop_body'):\n num_tiles = array_ops.shape(boxes)[1] // tile_size\n batch_size = array_ops.shape(boxes)[0]\n\n def cross_suppression_func(boxes, box_slice, iou_threshold, inner_idx):\n return _cross_suppression(boxes, box_slice, iou_threshold, inner_idx,\n tile_size)\n\n # Iterates over tiles that can possibly suppress the current tile.\n box_slice = array_ops.slice(boxes, [0, idx * tile_size, 0],\n [batch_size, tile_size, 4])\n _, box_slice, _, _ = control_flow_ops.while_loop(\n lambda _boxes, _box_slice, _threshold, inner_idx: inner_idx < idx,\n cross_suppression_func,\n [boxes, box_slice, iou_threshold, constant_op.constant(0)])\n\n # Iterates over the current tile to compute self-suppression.\n iou = _bbox_overlap(box_slice, box_slice)\n mask = array_ops.expand_dims(\n array_ops.reshape(\n math_ops.range(tile_size), [1, -1]) > array_ops.reshape(\n math_ops.range(tile_size), [-1, 1]), 0)\n iou *= math_ops.cast(\n math_ops.logical_and(mask, iou >= iou_threshold), iou.dtype)\n suppressed_iou, _, _, _ = control_flow_ops.while_loop(\n lambda _iou, loop_condition, _iou_sum, _: loop_condition,\n _self_suppression,\n [iou, constant_op.constant(True), math_ops.reduce_sum(iou, [1, 2]),\n iou_threshold])\n suppressed_box = math_ops.reduce_sum(suppressed_iou, 1) > 0\n box_slice *= array_ops.expand_dims(\n 1.0 - math_ops.cast(suppressed_box, box_slice.dtype), 2)\n\n # Uses box_slice to update the input boxes.\n mask = array_ops.reshape(\n math_ops.cast(\n math_ops.equal(math_ops.range(num_tiles), idx), boxes.dtype),\n [1, -1, 1, 1])\n boxes = array_ops.tile(array_ops.expand_dims(\n box_slice, [1]), [1, num_tiles, 1, 1]) * mask + array_ops.reshape(\n boxes, [batch_size, num_tiles, tile_size, 4]) * (1 - mask)\n boxes = array_ops.reshape(boxes, [batch_size, -1, 4])\n\n # Updates output_size.\n output_size += math_ops.reduce_sum(\n math_ops.cast(\n math_ops.reduce_any(box_slice > 0, [2]), dtypes.int32), [1])\n return boxes, iou_threshold, output_size, idx + 1\n\n\n@tf_export('image.non_max_suppression_padded')\[email protected]_dispatch_support\ndef non_max_suppression_padded(boxes,\n scores,\n max_output_size,\n iou_threshold=0.5,\n score_threshold=float('-inf'),\n pad_to_max_output_size=False,\n name=None,\n sorted_input=False,\n canonicalized_coordinates=False,\n tile_size=512):\n \"\"\"Greedily selects a subset of bounding boxes in descending order of score.\n\n Performs algorithmically equivalent operation to tf.image.non_max_suppression,\n with the addition of an optional parameter which zero-pads the output to\n be of size `max_output_size`.\n The output of this operation is a tuple containing the set of integers\n indexing into the input collection of bounding boxes representing the selected\n boxes and the number of valid indices in the index set. The bounding box\n coordinates corresponding to the selected indices can then be obtained using\n the `tf.slice` and `tf.gather` operations. For example:\n ```python\n selected_indices_padded, num_valid = tf.image.non_max_suppression_padded(\n boxes, scores, max_output_size, iou_threshold,\n score_threshold, pad_to_max_output_size=True)\n selected_indices = tf.slice(\n selected_indices_padded, tf.constant([0]), num_valid)\n selected_boxes = tf.gather(boxes, selected_indices)\n\n Args:\n boxes: a tensor of rank 2 or higher with a shape of [..., num_boxes, 4].\n Dimensions except the last two are batch dimensions.\n scores: a tensor of rank 1 or higher with a shape of [..., num_boxes].\n max_output_size: a scalar integer `Tensor` representing the maximum number\n of boxes to be selected by non max suppression.\n iou_threshold: a float representing the threshold for deciding whether boxes\n overlap too much with respect to IoU (intersection over union).\n score_threshold: a float representing the threshold for box scores. Boxes\n with a score that is not larger than this threshold will be suppressed.\n pad_to_max_output_size: whether to pad the output idx to max_output_size.\n Must be set to True when the input is a batch of images.\n name: name of operation.\n sorted_input: a boolean indicating whether the input boxes and scores\n are sorted in descending order by the score.\n canonicalized_coordinates: if box coordinates are given as\n `[y_min, x_min, y_max, x_max]`, setting to True eliminate redundant\n computation to canonicalize box coordinates.\n tile_size: an integer representing the number of boxes in a tile, i.e.,\n the maximum number of boxes per image that can be used to suppress other\n boxes in parallel; larger tile_size means larger parallelism and\n potentially more redundant work.\n Returns:\n idx: a tensor with a shape of [..., num_boxes] representing the\n indices selected by non-max suppression. The leading dimensions\n are the batch dimensions of the input boxes. All numbers are within\n [0, num_boxes). For each image (i.e., idx[i]), only the first num_valid[i]\n indices (i.e., idx[i][:num_valid[i]]) are valid.\n num_valid: a tensor of rank 0 or higher with a shape of [...]\n representing the number of valid indices in idx. Its dimensions are the\n batch dimensions of the input boxes.\n Raises:\n ValueError: When set pad_to_max_output_size to False for batched input.\n \"\"\"\n # if no new arguments are used and no later than 2020/6/23, use the old\n # version to give us time to fix TFLite conversion after the TF 2.3 release.\n if (not sorted_input) and \\\n (not canonicalized_coordinates) and \\\n tile_size == 512 and not compat.forward_compatible(2020, 6, 23):\n return non_max_suppression_padded_v1(\n boxes, scores, max_output_size, iou_threshold, score_threshold,\n pad_to_max_output_size, name)\n else:\n with ops.name_scope(name, 'non_max_suppression_padded'):\n if not pad_to_max_output_size:\n # pad_to_max_output_size may be set to False only when the shape of\n # boxes is [num_boxes, 4], i.e., a single image. We make best effort to\n # detect violations at compile time. If `boxes` does not have a static\n # rank, the check allows computation to proceed.\n if boxes.get_shape().rank is not None and boxes.get_shape().rank > 2:\n raise ValueError(\n \"'pad_to_max_output_size' (value {}) must be True for \"\n 'batched input'.format(pad_to_max_output_size))\n if name is None:\n name = ''\n idx, num_valid = non_max_suppression_padded_v2(\n boxes, scores, max_output_size, iou_threshold, score_threshold,\n sorted_input, canonicalized_coordinates, tile_size)\n # def_function.function seems to lose shape information, so set it here.\n if not pad_to_max_output_size:\n idx = idx[0, :num_valid]\n else:\n batch_dims = array_ops.concat([\n array_ops.shape(boxes)[:-2],\n array_ops.expand_dims(max_output_size, 0)\n ], 0)\n idx = array_ops.reshape(idx, batch_dims)\n return idx, num_valid\n\n\n# TODO(b/158709815): Improve performance regression due to\n# def_function.function.\n@def_function.function(\n experimental_implements='non_max_suppression_padded_v2')\ndef non_max_suppression_padded_v2(boxes,\n scores,\n max_output_size,\n iou_threshold=0.5,\n score_threshold=float('-inf'),\n sorted_input=False,\n canonicalized_coordinates=False,\n tile_size=512):\n \"\"\"Non-maximum suppression.\n\n Prunes away boxes that have high intersection-over-union (IOU) overlap\n with previously selected boxes. Bounding boxes are supplied as\n `[y1, x1, y2, x2]`, where `(y1, x1)` and `(y2, x2)` are the coordinates of any\n diagonal pair of box corners and the coordinates can be provided as normalized\n (i.e., lying in the interval `[0, 1]`) or absolute. The bounding box\n coordinates are cannonicalized to `[y_min, x_min, y_max, x_max]`,\n where `(y_min, x_min)` and `(y_max, x_mas)` are the coordinates of the lower\n left and upper right corner. User may indiciate the input box coordinates are\n already canonicalized to eliminate redundant work by setting\n canonicalized_coordinates to `True`. Note that this algorithm is agnostic to\n where the origin is in the coordinate system. Note that this algorithm is\n invariant to orthogonal transformations and translations of the coordinate\n system; thus translating or reflections of the coordinate system result in the\n same boxes being selected by the algorithm.\n\n Similar to tf.image.non_max_suppression, non_max_suppression_padded\n implements hard NMS but can operate on a batch of images and improves\n performance by titling the bounding boxes. Non_max_suppression_padded should\n be preferred over tf.image_non_max_suppression when running on devices with\n abundant parallelsim for higher computation speed. For soft NMS, refer to\n tf.image.non_max_suppression_with_scores.\n\n While a serial NMS algorithm iteratively uses the highest-scored unprocessed\n box to suppress boxes, this algorithm uses many boxes to suppress other boxes\n in parallel. The key idea is to partition boxes into tiles based on their\n score and suppresses boxes tile by tile, thus achieving parallelism within a\n tile. The tile size determines the degree of parallelism.\n\n In cross suppression (using boxes of tile A to suppress boxes of tile B),\n all boxes in A can independently suppress boxes in B.\n\n Self suppression (suppressing boxes of the same tile) needs to be iteratively\n applied until there's no more suppression. In each iteration, boxes that\n cannot be suppressed are used to suppress boxes in the same tile.\n\n boxes = boxes.pad_to_multiply_of(tile_size)\n num_tiles = len(boxes) // tile_size\n output_boxes = []\n for i in range(num_tiles):\n box_tile = boxes[i*tile_size : (i+1)*tile_size]\n for j in range(i - 1):\n # in parallel suppress boxes in box_tile using boxes from suppressing_tile\n suppressing_tile = boxes[j*tile_size : (j+1)*tile_size]\n iou = _bbox_overlap(box_tile, suppressing_tile)\n # if the box is suppressed in iou, clear it to a dot\n box_tile *= _update_boxes(iou)\n # Iteratively handle the diagnal tile.\n iou = _box_overlap(box_tile, box_tile)\n iou_changed = True\n while iou_changed:\n # boxes that are not suppressed by anything else\n suppressing_boxes = _get_suppressing_boxes(iou)\n # boxes that are suppressed by suppressing_boxes\n suppressed_boxes = _get_suppressed_boxes(iou, suppressing_boxes)\n # clear iou to 0 for boxes that are suppressed, as they cannot be used\n # to suppress other boxes any more\n new_iou = _clear_iou(iou, suppressed_boxes)\n iou_changed = (new_iou != iou)\n iou = new_iou\n # remaining boxes that can still suppress others, are selected boxes.\n output_boxes.append(_get_suppressing_boxes(iou))\n if len(output_boxes) >= max_output_size:\n break\n\n Args:\n boxes: a tensor of rank 2 or higher with a shape of [..., num_boxes, 4].\n Dimensions except the last two are batch dimensions. The last dimension\n represents box coordinates, given as [y_1, x_1, y_2, x_2]. The coordinates\n on each dimension can be given in any order\n (see also `canonicalized_coordinates`) but must describe a box with\n a positive area.\n scores: a tensor of rank 1 or higher with a shape of [..., num_boxes].\n max_output_size: a scalar integer `Tensor` representing the maximum number\n of boxes to be selected by non max suppression.\n iou_threshold: a float representing the threshold for deciding whether boxes\n overlap too much with respect to IoU (intersection over union).\n score_threshold: a float representing the threshold for box scores. Boxes\n with a score that is not larger than this threshold will be suppressed.\n sorted_input: a boolean indicating whether the input boxes and scores\n are sorted in descending order by the score.\n canonicalized_coordinates: if box coordinates are given as\n `[y_min, x_min, y_max, x_max]`, setting to True eliminate redundant\n computation to canonicalize box coordinates.\n tile_size: an integer representing the number of boxes in a tile, i.e.,\n the maximum number of boxes per image that can be used to suppress other\n boxes in parallel; larger tile_size means larger parallelism and\n potentially more redundant work.\n Returns:\n idx: a tensor with a shape of [..., num_boxes] representing the\n indices selected by non-max suppression. The leading dimensions\n are the batch dimensions of the input boxes. All numbers are within\n [0, num_boxes). For each image (i.e., idx[i]), only the first num_valid[i]\n indices (i.e., idx[i][:num_valid[i]]) are valid.\n num_valid: a tensor of rank 0 or higher with a shape of [...]\n representing the number of valid indices in idx. Its dimensions are the\n batch dimensions of the input boxes.\n Raises:\n ValueError: When set pad_to_max_output_size to False for batched input.\n \"\"\"\n def _sort_scores_and_boxes(scores, boxes):\n \"\"\"Sort boxes based their score from highest to lowest.\n\n Args:\n scores: a tensor with a shape of [batch_size, num_boxes] representing\n the scores of boxes.\n boxes: a tensor with a shape of [batch_size, num_boxes, 4] representing\n the boxes.\n Returns:\n sorted_scores: a tensor with a shape of [batch_size, num_boxes]\n representing the sorted scores.\n sorted_boxes: a tensor representing the sorted boxes.\n sorted_scores_indices: a tensor with a shape of [batch_size, num_boxes]\n representing the index of the scores in a sorted descending order.\n \"\"\"\n with ops.name_scope('sort_scores_and_boxes'):\n batch_size = array_ops.shape(boxes)[0]\n num_boxes = array_ops.shape(boxes)[1]\n sorted_scores_indices = sort_ops.argsort(\n scores, axis=1, direction='DESCENDING')\n index_offsets = math_ops.range(batch_size) * num_boxes\n indices = array_ops.reshape(\n sorted_scores_indices + array_ops.expand_dims(index_offsets, 1), [-1])\n sorted_scores = array_ops.reshape(\n array_ops.gather(array_ops.reshape(scores, [-1]), indices),\n [batch_size, -1])\n sorted_boxes = array_ops.reshape(\n array_ops.gather(array_ops.reshape(boxes, [-1, 4]), indices),\n [batch_size, -1, 4])\n return sorted_scores, sorted_boxes, sorted_scores_indices\n\n batch_dims = array_ops.shape(boxes)[:-2]\n num_boxes = array_ops.shape(boxes)[-2]\n boxes = array_ops.reshape(boxes, [-1, num_boxes, 4])\n scores = array_ops.reshape(scores, [-1, num_boxes])\n batch_size = array_ops.shape(boxes)[0]\n if score_threshold != float('-inf'):\n with ops.name_scope('filter_by_score'):\n score_mask = math_ops.cast(scores > score_threshold, scores.dtype)\n scores *= score_mask\n box_mask = array_ops.expand_dims(\n math_ops.cast(score_mask, boxes.dtype), 2)\n boxes *= box_mask\n\n if not canonicalized_coordinates:\n with ops.name_scope('canonicalize_coordinates'):\n y_1, x_1, y_2, x_2 = array_ops.split(\n value=boxes, num_or_size_splits=4, axis=2)\n y_1_is_min = math_ops.reduce_all(\n math_ops.less_equal(y_1[0, 0, 0], y_2[0, 0, 0]))\n y_min, y_max = control_flow_ops.cond(\n y_1_is_min, lambda: (y_1, y_2), lambda: (y_2, y_1))\n x_1_is_min = math_ops.reduce_all(\n math_ops.less_equal(x_1[0, 0, 0], x_2[0, 0, 0]))\n x_min, x_max = control_flow_ops.cond(\n x_1_is_min, lambda: (x_1, x_2), lambda: (x_2, x_1))\n boxes = array_ops.concat([y_min, x_min, y_max, x_max], axis=2)\n\n if not sorted_input:\n scores, boxes, sorted_indices = _sort_scores_and_boxes(scores, boxes)\n else:\n # Default value required for Autograph.\n sorted_indices = array_ops.zeros_like(scores, dtype=dtypes.int32)\n\n pad = math_ops.cast(\n math_ops.ceil(\n math_ops.cast(\n math_ops.maximum(num_boxes, max_output_size), dtypes.float32) /\n math_ops.cast(tile_size, dtypes.float32)),\n dtypes.int32) * tile_size - num_boxes\n boxes = array_ops.pad(\n math_ops.cast(boxes, dtypes.float32), [[0, 0], [0, pad], [0, 0]])\n scores = array_ops.pad(\n math_ops.cast(scores, dtypes.float32), [[0, 0], [0, pad]])\n num_boxes_after_padding = num_boxes + pad\n num_iterations = num_boxes_after_padding // tile_size\n def _loop_cond(unused_boxes, unused_threshold, output_size, idx):\n return math_ops.logical_and(\n math_ops.reduce_min(output_size) < max_output_size,\n idx < num_iterations)\n\n def suppression_loop_body(boxes, iou_threshold, output_size, idx):\n return _suppression_loop_body(\n boxes, iou_threshold, output_size, idx, tile_size)\n\n selected_boxes, _, output_size, _ = control_flow_ops.while_loop(\n _loop_cond,\n suppression_loop_body,\n [\n boxes, iou_threshold,\n array_ops.zeros([batch_size], dtypes.int32),\n constant_op.constant(0)\n ],\n shape_invariants=[\n tensor_shape.TensorShape([None, None, 4]),\n tensor_shape.TensorShape([]),\n tensor_shape.TensorShape([None]),\n tensor_shape.TensorShape([]),\n ],\n )\n num_valid = math_ops.minimum(output_size, max_output_size)\n idx = num_boxes_after_padding - math_ops.cast(\n nn_ops.top_k(\n math_ops.cast(math_ops.reduce_any(\n selected_boxes > 0, [2]), dtypes.int32) *\n array_ops.expand_dims(\n math_ops.range(num_boxes_after_padding, 0, -1), 0),\n max_output_size)[0], dtypes.int32)\n idx = math_ops.minimum(idx, num_boxes - 1)\n\n if not sorted_input:\n index_offsets = math_ops.range(batch_size) * num_boxes\n gather_idx = array_ops.reshape(\n idx + array_ops.expand_dims(index_offsets, 1), [-1])\n idx = array_ops.reshape(\n array_ops.gather(array_ops.reshape(sorted_indices, [-1]),\n gather_idx),\n [batch_size, -1])\n invalid_index = array_ops.fill([batch_size, max_output_size], 0)\n idx_index = array_ops.expand_dims(math_ops.range(max_output_size), 0)\n num_valid_expanded = array_ops.expand_dims(num_valid, 1)\n idx = array_ops.where(idx_index < num_valid_expanded,\n idx, invalid_index)\n\n num_valid = array_ops.reshape(num_valid, batch_dims)\n return idx, num_valid\n\n\ndef non_max_suppression_padded_v1(boxes,\n scores,\n max_output_size,\n iou_threshold=0.5,\n score_threshold=float('-inf'),\n pad_to_max_output_size=False,\n name=None):\n \"\"\"Greedily selects a subset of bounding boxes in descending order of score.\n\n Performs algorithmically equivalent operation to tf.image.non_max_suppression,\n with the addition of an optional parameter which zero-pads the output to\n be of size `max_output_size`.\n The output of this operation is a tuple containing the set of integers\n indexing into the input collection of bounding boxes representing the selected\n boxes and the number of valid indices in the index set. The bounding box\n coordinates corresponding to the selected indices can then be obtained using\n the `tf.slice` and `tf.gather` operations. For example:\n ```python\n selected_indices_padded, num_valid = tf.image.non_max_suppression_padded(\n boxes, scores, max_output_size, iou_threshold,\n score_threshold, pad_to_max_output_size=True)\n selected_indices = tf.slice(\n selected_indices_padded, tf.constant([0]), num_valid)\n selected_boxes = tf.gather(boxes, selected_indices)\n ```\n\n Args:\n boxes: A 2-D float `Tensor` of shape `[num_boxes, 4]`.\n scores: A 1-D float `Tensor` of shape `[num_boxes]` representing a single\n score corresponding to each box (each row of boxes).\n max_output_size: A scalar integer `Tensor` representing the maximum number\n of boxes to be selected by non-max suppression.\n iou_threshold: A float representing the threshold for deciding whether boxes\n overlap too much with respect to IOU.\n score_threshold: A float representing the threshold for deciding when to\n remove boxes based on score.\n pad_to_max_output_size: bool. If True, size of `selected_indices` output is\n padded to `max_output_size`.\n name: A name for the operation (optional).\n\n Returns:\n selected_indices: A 1-D integer `Tensor` of shape `[M]` representing the\n selected indices from the boxes tensor, where `M <= max_output_size`.\n valid_outputs: A scalar integer `Tensor` denoting how many elements in\n `selected_indices` are valid. Valid elements occur first, then padding.\n \"\"\"\n with ops.name_scope(name, 'non_max_suppression_padded'):\n iou_threshold = ops.convert_to_tensor(iou_threshold, name='iou_threshold')\n score_threshold = ops.convert_to_tensor(\n score_threshold, name='score_threshold')\n return gen_image_ops.non_max_suppression_v4(boxes, scores, max_output_size,\n iou_threshold, score_threshold,\n pad_to_max_output_size)\n\n\n@tf_export('image.draw_bounding_boxes', v1=[])\[email protected]_dispatch_support\ndef draw_bounding_boxes_v2(images, boxes, colors, name=None):\n \"\"\"Draw bounding boxes on a batch of images.\n\n Outputs a copy of `images` but draws on top of the pixels zero or more\n bounding boxes specified by the locations in `boxes`. The coordinates of the\n each bounding box in `boxes` are encoded as `[y_min, x_min, y_max, x_max]`.\n The bounding box coordinates are floats in `[0.0, 1.0]` relative to the width\n and the height of the underlying image.\n\n For example, if an image is 100 x 200 pixels (height x width) and the bounding\n box is `[0.1, 0.2, 0.5, 0.9]`, the upper-left and bottom-right coordinates of\n the bounding box will be `(40, 10)` to `(180, 50)` (in (x,y) coordinates).\n\n Parts of the bounding box may fall outside the image.\n\n Args:\n images: A `Tensor`. Must be one of the following types: `float32`, `half`.\n 4-D with shape `[batch, height, width, depth]`. A batch of images.\n boxes: A `Tensor` of type `float32`. 3-D with shape `[batch,\n num_bounding_boxes, 4]` containing bounding boxes.\n colors: A `Tensor` of type `float32`. 2-D. A list of RGBA colors to cycle\n through for the boxes.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `images`.\n\n Usage Example:\n\n >>> # create an empty image\n >>> img = tf.zeros([1, 3, 3, 3])\n >>> # draw a box around the image\n >>> box = np.array([0, 0, 1, 1])\n >>> boxes = box.reshape([1, 1, 4])\n >>> # alternate between red and blue\n >>> colors = np.array([[1.0, 0.0, 0.0], [0.0, 0.0, 1.0]])\n >>> tf.image.draw_bounding_boxes(img, boxes, colors)\n <tf.Tensor: shape=(1, 3, 3, 3), dtype=float32, numpy=\n array([[[[1., 0., 0.],\n [1., 0., 0.],\n [1., 0., 0.]],\n [[1., 0., 0.],\n [0., 0., 0.],\n [1., 0., 0.]],\n [[1., 0., 0.],\n [1., 0., 0.],\n [1., 0., 0.]]]], dtype=float32)>\n \"\"\"\n if colors is None:\n return gen_image_ops.draw_bounding_boxes(images, boxes, name)\n return gen_image_ops.draw_bounding_boxes_v2(images, boxes, colors, name)\n\n\n@tf_export(v1=['image.draw_bounding_boxes'])\[email protected]_dispatch_support\ndef draw_bounding_boxes(images, boxes, name=None, colors=None):\n \"\"\"Draw bounding boxes on a batch of images.\n\n Outputs a copy of `images` but draws on top of the pixels zero or more\n bounding boxes specified by the locations in `boxes`. The coordinates of the\n each bounding box in `boxes` are encoded as `[y_min, x_min, y_max, x_max]`.\n The bounding box coordinates are floats in `[0.0, 1.0]` relative to the width\n and the height of the underlying image.\n\n For example, if an image is 100 x 200 pixels (height x width) and the bounding\n box is `[0.1, 0.2, 0.5, 0.9]`, the upper-left and bottom-right coordinates of\n the bounding box will be `(40, 10)` to `(180, 50)` (in (x,y) coordinates).\n\n Parts of the bounding box may fall outside the image.\n\n Args:\n images: A `Tensor`. Must be one of the following types: `float32`, `half`.\n 4-D with shape `[batch, height, width, depth]`. A batch of images.\n boxes: A `Tensor` of type `float32`. 3-D with shape `[batch,\n num_bounding_boxes, 4]` containing bounding boxes.\n name: A name for the operation (optional).\n colors: A `Tensor` of type `float32`. 2-D. A list of RGBA colors to cycle\n through for the boxes.\n\n Returns:\n A `Tensor`. Has the same type as `images`.\n\n Usage Example:\n\n >>> # create an empty image\n >>> img = tf.zeros([1, 3, 3, 3])\n >>> # draw a box around the image\n >>> box = np.array([0, 0, 1, 1])\n >>> boxes = box.reshape([1, 1, 4])\n >>> # alternate between red and blue\n >>> colors = np.array([[1.0, 0.0, 0.0], [0.0, 0.0, 1.0]])\n >>> tf.image.draw_bounding_boxes(img, boxes, colors)\n <tf.Tensor: shape=(1, 3, 3, 3), dtype=float32, numpy=\n array([[[[1., 0., 0.],\n [1., 0., 0.],\n [1., 0., 0.]],\n [[1., 0., 0.],\n [0., 0., 0.],\n [1., 0., 0.]],\n [[1., 0., 0.],\n [1., 0., 0.],\n [1., 0., 0.]]]], dtype=float32)>\n \"\"\"\n return draw_bounding_boxes_v2(images, boxes, colors, name)\n\n\n@tf_export('image.generate_bounding_box_proposals')\[email protected]_dispatch_support\ndef generate_bounding_box_proposals(scores,\n bbox_deltas,\n image_info,\n anchors,\n nms_threshold=0.7,\n pre_nms_topn=6000,\n min_size=16,\n post_nms_topn=300,\n name=None):\n \"\"\"Generate bounding box proposals from encoded bounding boxes.\n\n Returns:\n rois: Region of interest boxes sorted by their scores.\n roi_probabilities: scores of the ROI boxes in the ROIs' tensor.\n \"\"\"\n return gen_image_ops.generate_bounding_box_proposals(\n scores=scores,\n bbox_deltas=bbox_deltas,\n image_info=image_info,\n anchors=anchors,\n nms_threshold=nms_threshold,\n pre_nms_topn=pre_nms_topn,\n min_size=min_size,\n post_nms_topn=post_nms_topn,\n name=name)\n" ]
[ [ "tensorflow.python.ops.array_ops.identity", "tensorflow.python.ops.math_ops.less", "tensorflow.python.ops.array_ops.ones", "tensorflow.python.ops.array_ops.unstack", "tensorflow.python.ops.math_ops.reduce_all", "tensorflow.python.ops.gen_image_ops.decode_image", "numpy.log", "tensorflow.python.ops.math_ops.tensordot", "tensorflow.python.ops.gen_image_ops.non_max_suppression_with_overlaps", "tensorflow.python.ops.array_ops.split", "tensorflow.python.ops.random_ops.random_uniform", "tensorflow.python.ops.array_ops.pad", "numpy.expand_dims", "tensorflow.python.ops.gen_image_ops.draw_bounding_boxes_v2", "tensorflow.python.ops.string_ops.substr", "tensorflow.python.ops.array_ops.rank", "tensorflow.python.ops.array_ops.tile", "tensorflow.python.ops.math_ops.reduce_any", "tensorflow.python.ops.array_ops.gather", "tensorflow.python.ops.array_ops.shape_n", "tensorflow.python.ops.math_ops.reduce_std", "tensorflow.python.framework.tensor_shape.TensorShape", "tensorflow.python.ops.math_ops.abs", "tensorflow.python.ops.math_ops.logical_and", "tensorflow.python.framework.tensor_shape.unknown_shape", "tensorflow.python.ops.math_ops.reduce_max", "tensorflow.python.framework.random_seed.get_seed", "tensorflow.python.ops.array_ops.zeros_like", "tensorflow.python.ops.control_flow_ops.case", "tensorflow.python.ops.gen_image_ops.extract_glimpse", "tensorflow.python.ops.array_ops.transpose", "tensorflow.python.ops.array_ops.shape", "tensorflow.python.ops.math_ops.multiply", "tensorflow.python.util.tf_export.tf_export", "tensorflow.python.ops.math_ops.floordiv", "tensorflow.python.ops.array_ops.slice", "tensorflow.python.ops.array_ops.expand_dims", "tensorflow.python.ops.math_ops.less_equal", "tensorflow.python.ops.math_ops.floor", "tensorflow.python.ops.gen_image_ops.extract_glimpse_v2", "tensorflow.python.util.deprecation.deprecated", "tensorflow.python.ops.math_ops.squared_difference", "tensorflow.python.framework.dtypes.as_dtype", "tensorflow.python.ops.gen_image_ops.resize_bilinear", "tensorflow.python.ops.math_ops.reduce_mean", "tensorflow.python.ops.gen_image_ops.stateless_sample_distorted_bounding_box", "tensorflow.python.ops.gen_image_ops.decode_jpeg", "tensorflow.python.ops.control_flow_ops.with_dependencies", "tensorflow.python.framework.constant_op.constant", "tensorflow.python.ops.array_ops.reverse", "tensorflow.python.framework.tensor_shape.dimension_at_index", "tensorflow.python.ops.array_ops.size", "tensorflow.python.ops.array_ops.reshape", "tensorflow.python.ops.math_ops.pow", "tensorflow.python.ops.gen_image_ops.adjust_saturation", "tensorflow.python.ops.math_ops.greater_equal", "tensorflow.python.ops.math_ops.minimum", "tensorflow.python.ops.array_ops.where", "tensorflow.python.ops.gen_image_ops.non_max_suppression_v5", "tensorflow.python.ops.gen_image_ops.adjust_contrastv2", "tensorflow.python.ops.gen_image_ops.decode_bmp", "tensorflow.python.ops.array_ops.stack", "tensorflow.python.framework.ops.control_dependencies", "tensorflow.python.util.dispatch.add_dispatch_support", "tensorflow.python.ops.math_ops.cast", "tensorflow.python.ops.gen_image_ops.non_max_suppression_v4", "tensorflow.python.framework.tensor_util.constant_value_as_shape", "tensorflow.python.ops.math_ops.maximum", "tensorflow.python.ops.gen_image_ops.crop_and_resize", "tensorflow.python.ops.gen_image_ops.encode_jpeg_variable_quality", "tensorflow.python.ops.array_ops.fill", "tensorflow.python.ops.array_ops.reverse_v2", "tensorflow.python.ops.array_ops.squeeze", "tensorflow.python.ops.math_ops.range", "tensorflow.python.ops.array_ops.zeros", "tensorflow.python.ops.math_ops.saturate_cast", "tensorflow.python.ops.gen_image_ops.non_max_suppression_v3", "tensorflow.python.ops.math_ops.mod", "numpy.asarray", "tensorflow.python.ops.math_ops.not_equal", "tensorflow.python.ops.control_flow_ops.cond", "tensorflow.python.ops.gen_image_ops.decode_png", "tensorflow.python.ops.gen_image_ops.decode_gif", "tensorflow.python.util.deprecation.deprecated_args", "tensorflow.python.ops.sort_ops.argsort", "tensorflow.python.ops.math_ops.equal", "tensorflow.python.ops.math_ops.divide", "tensorflow.python.ops.math_ops.reduce_sum", "tensorflow.python.ops.nn_ops.avg_pool", "tensorflow.python.ops.gen_image_ops.combined_non_max_suppression", "tensorflow.python.ops.gen_image_ops.generate_bounding_box_proposals", "tensorflow.python.ops.gen_image_ops.adjust_hue", "tensorflow.python.ops.nn.depthwise_conv2d", "tensorflow.python.compat.compat.forward_compatible", "tensorflow.python.ops.array_ops.concat", "tensorflow.python.ops.math_ops.log", "tensorflow.python.ops.gen_image_ops.draw_bounding_boxes", "tensorflow.python.ops.nn_ops.softmax", "tensorflow.python.ops.gen_image_ops.resize_nearest_neighbor", "tensorflow.python.ops.control_flow_ops.Assert", "tensorflow.python.framework.ops.NotDifferentiable", "tensorflow.python.ops.gen_image_ops.resize_bicubic", "tensorflow.python.ops.gen_image_ops.sample_distorted_bounding_box_v2", "tensorflow.python.eager.def_function.function", "tensorflow.python.ops.math_ops.square", "tensorflow.python.ops.gen_image_ops.resize_area", "tensorflow.python.util.deprecation.deprecated_argument_lookup", "tensorflow.python.ops.nn_ops.relu", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.python.framework.ops.name_scope", "tensorflow.python.ops.math_ops.reduce_min", "tensorflow.python.ops.stateless_random_ops.stateless_random_uniform" ] ]
ocetintas/evolutionary_art
[ "2c63f68c2f5e8afcc19d3900d1aff01410c0583b" ]
[ "classes.py" ]
[ "import numpy as np\r\nimport cv2\r\nimport random\r\nfrom copy import deepcopy\r\nfrom operator import attrgetter\r\n\r\n\r\n# Check if the circle is in the boundary of the image frame\r\ndef CheckBoundaries(x, y, radius):\r\n # Case 1: x < 0\r\n if x < 0:\r\n if y < 0:\r\n return (x+radius >= 0) and (y+radius >= 0)\r\n elif 222 >= y >= 0:\r\n return x+radius >= 0\r\n else:\r\n return (x+radius >= 0) and (222 >= y-radius)\r\n\r\n # Case 2: x is inside the boundary of the Mona Lisa Image\r\n elif 149 >= x >= 0:\r\n if y < 0:\r\n return y+radius >= 0\r\n elif 222 >= y >= 0:\r\n return True\r\n else:\r\n return 222 >= y-radius\r\n\r\n # Case 3: x is greater than the boundary of the Mona Lisa Image\r\n else:\r\n if y < 0:\r\n return (149 >= x-radius) and (y+radius >= 0)\r\n elif 222 >= y >= 0:\r\n return 149 >= x-radius\r\n else:\r\n return (149 >= x-radius) and (222 >= y-radius)\r\n\r\n\r\n# Define the Gene as a class\r\nclass Gene:\r\n def __init__(self, x=0, y=0, radius=1, B=0, G=0, R=0, A=0, randomGene=False):\r\n self.coordinate = None\r\n self.radius = None\r\n self.color = None\r\n self.A = None\r\n self.geneArray = self.createGene(x, y, radius, B, G, R, A, randomGene)\r\n\r\n # Create a Gene with the assigned values or randomly\r\n def createGene(self, x, y, radius, B, G, R, A, randomGene):\r\n # Create the gene randomly if randomGene is selected or circle does not lie in the frame limits\r\n if randomGene or (not CheckBoundaries(x, y, radius)):\r\n self.coordinate = (random.randint(0, 149), random.randint(0, 222))\r\n self.radius = random.randint(1, 20)\r\n self.color = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))\r\n self.A = random.random()\r\n\r\n # Create the gene with the given values\r\n else:\r\n self.coordinate = (x, y)\r\n self.radius = radius\r\n self.color = (B, G, R)\r\n self.A = A\r\n\r\n return [self.coordinate[0], self.coordinate[1], self.radius, self.color[0], self.color[1], self.color[2],\r\n self.A]\r\n\r\n # Mutate the gene\r\n def mutate(self, mutationMethod=\"Guided\"):\r\n if mutationMethod == \"Unguided\":\r\n self.coordinate = (random.randint(0, 149), random.randint(0, 222))\r\n self.radius = random.randint(1, 100)\r\n self.color = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))\r\n self.A = random.random()\r\n elif mutationMethod == \"Guided\":\r\n mutationCoordinate = (random.randint(self.coordinate[0] - 37, self.coordinate[0] + 37),\r\n random.randint(self.coordinate[1] - 64, self.coordinate[1] + 64))\r\n mutationRadius = random.randint(max(1, self.radius-10), self.radius+10)\r\n # If the circle is not in the frame range, repeat the process until it is in range\r\n if not CheckBoundaries(mutationCoordinate[0], mutationCoordinate[1], mutationRadius):\r\n return self.mutate(mutationMethod)\r\n self.coordinate = deepcopy(mutationCoordinate)\r\n self.radius = deepcopy(mutationRadius)\r\n self.color = (random.randint(max(0, self.color[0] - 64), min(255, self.color[0] + 64)),\r\n random.randint(max(0, self.color[1] - 64), min(255, self.color[1] + 64)),\r\n random.randint(max(0, self.color[2] - 64), min(255, self.color[2] + 64)))\r\n self.A = random.uniform(max(0, self.A - 0.25), min(1, self.A + 0.25))\r\n self.geneArray = [self.coordinate[0], self.coordinate[1], self.radius, self.color[0], self.color[1],\r\n self.color[2], self.A]\r\n\r\n\r\n# Define the Individual as a class\r\nclass Individual:\r\n def __init__(self, num_genes=50):\r\n self.num_genes = num_genes\r\n self.fitness = None\r\n self.chromosome = self.initializeChromosome()\r\n\r\n # Initialize the chromosome with the number of genes\r\n def initializeChromosome(self):\r\n self.fitness = None\r\n return [Gene(randomGene=True) for i in range(self.num_genes)]\r\n #return sorted(unsortedChromosome, key=lambda x: x.radius, reverse=True)\r\n\r\n # Mutate the individual\r\n def mutateIndividual(self, mutation_prob=0.2, mutationMethod=\"Guided\"):\r\n if random.random() < mutation_prob:\r\n mutatedGeneIndex = random.randint(0, self.num_genes-1)\r\n self.chromosome[mutatedGeneIndex].mutate(mutationMethod)\r\n self.fitness = None\r\n #self.chromosome.sort(key=lambda x: x.radius, reverse=True)\r\n\r\n # Evaluate the individual's fitness\r\n def evaluateFitness(self, monaLisaImage):\r\n # Get the size of the Mona Lisa\r\n height = monaLisaImage.shape[0] # 223\r\n width = monaLisaImage.shape[1] # 150\r\n # Create the blank image\r\n img = np.full((height, width, 3), (255, 255, 255), np.uint8)\r\n # For each gene in the chromosome\r\n for gene in self.chromosome:\r\n # Overlay <- img\r\n overlay = deepcopy(img)\r\n # Draw the circle on overlay\r\n cv2.circle(overlay, gene.coordinate, gene.radius, gene.color, thickness=-1)\r\n # img <- overlay x alpha + img x (1 - alpha)\r\n img = np.add((overlay * gene.A), (img * (1 - gene.A)))\r\n # Calculate the fitness in a vectorized fashion to increase efficiency of the code\r\n self.fitness = -np.sum(np.square(np.subtract(monaLisaImage, img)))\r\n\r\n # Create offspring from parents\r\n def createOffspring(self, Parent0, Parent1, geneOwnerArray):\r\n for i in range(self.num_genes):\r\n # If gene comes from Parent0\r\n if geneOwnerArray[i] == 0:\r\n self.chromosome[i] = deepcopy(Parent0.chromosome[i])\r\n elif geneOwnerArray[i] == 1:\r\n self.chromosome[i] = deepcopy(Parent1.chromosome[i])\r\n #self.chromosome.sort(key=lambda x: x.radius, reverse=True)\r\n self.fitness = None\r\n\r\n def drawImage(self, monaLisaImage):\r\n # Get the size of the Mona Lisa\r\n height = monaLisaImage.shape[0] # 223\r\n width = monaLisaImage.shape[1] # 150\r\n # Create the blank image\r\n img = np.full((height, width, 3), (255, 255, 255), np.uint8)\r\n # For each gene in the chromosome\r\n for gene in self.chromosome:\r\n # Overlay <- img\r\n overlay = deepcopy(img)\r\n # Draw the circle on overlay\r\n cv2.circle(overlay, gene.coordinate, gene.radius, gene.color, thickness=-1)\r\n # img <- overlay x alpha + img x (1 - alpha)\r\n img = np.add((overlay * gene.A), (img * (1 - gene.A)))\r\n return img\r\n\r\n\r\n# Define the population as a class\r\nclass Population:\r\n def __init__(self, num_inds=20, num_genes=50, tm_size=5, frac_elites=0.2, frac_parents=0.6, mutation_prob=0.2,\r\n mutationMethod=\"Guided\"):\r\n # Characteristics of a population - They do not change from generation to generation\r\n self.num_inds = num_inds\r\n self.num_genes = num_genes\r\n self.tm_size = tm_size\r\n self.frac_elites = frac_elites\r\n self.frac_parents = frac_parents\r\n self.mutation_prob = mutation_prob\r\n self.mutationMethod = mutationMethod\r\n\r\n # These values may change from generation to generation\r\n self.members = self.initializePopulation()\r\n self.elites = []\r\n self.tournamentWinners = []\r\n self.parentTournamentWinners = []\r\n self.advancingTournamentWinners = []\r\n self.children = []\r\n # This list holds the members that will be in the next generation\r\n self.nextGeneration = []\r\n\r\n # Initialize the population with num_inds and num_genes\r\n def initializePopulation(self):\r\n return [Individual(self.num_genes) for i in range(self.num_inds)]\r\n\r\n # Evaluate all members\r\n def evaluatePopulation(self, monaLisaImage):\r\n for member in self.members:\r\n member.evaluateFitness(monaLisaImage)\r\n\r\n # Find the elites\r\n def findElites(self):\r\n self.elites = sorted(self.members, key=lambda x: x.fitness, reverse=True)[0:int(self.num_inds*self.frac_elites)]\r\n\r\n # Tournament Selection\r\n def tournamentSelection(self):\r\n for i in range(self.num_inds - int(self.num_inds*self.frac_elites)):\r\n tournamentMembers = random.sample(self.members, self.tm_size)\r\n self.tournamentWinners.append(max(tournamentMembers, key=attrgetter(\"fitness\")))\r\n # If we want to choose the parents according to their fitnesses, activate the following line\r\n #self.tournamentWinners.sort(key=lambda x: x.fitness, reverse=True)\r\n\r\n # Selection of elites and tournament winners(both parentWinners and advancingWinners)\r\n def selection(self):\r\n self.findElites()\r\n self.tournamentSelection()\r\n # Choose parentWinners and advancingWinners - Best tournament winners will be parents\r\n self.parentTournamentWinners = self.tournamentWinners[0:int(self.num_inds*self.frac_parents)]\r\n self.advancingTournamentWinners = self.tournamentWinners[int(self.num_inds*self.frac_parents):]\r\n\r\n # Create children with crossover\r\n def crossover(self):\r\n for i in range(0, len(self.parentTournamentWinners), 2):\r\n # If the number of parents are odd, the single parent advances to the next generation since no pair left\r\n if i == len(self.parentTournamentWinners) - 1:\r\n self.children.append(self.parentTournamentWinners[i])\r\n else:\r\n # Select the parents\r\n Parent0 = self.parentTournamentWinners[i]\r\n Parent1 = self.parentTournamentWinners[i+1]\r\n # Initialize the children individuals\r\n Children0 = Individual(self.num_genes)\r\n Children1 = Individual(self.num_genes)\r\n # geneOwnerArrays are binomial and each element describes which parent that gene comes from\r\n geneOwnerArray0 = np.random.binomial(1, 0.5, self.num_genes)\r\n # geneOwnerArray1 is binomial complement of geneOwnerArray0\r\n geneOwnerArray1 = np.ones(self.num_genes, dtype=int) - geneOwnerArray0\r\n # Create the children\r\n Children0.createOffspring(Parent0, Parent1, geneOwnerArray0)\r\n Children1.createOffspring(Parent0, Parent1, geneOwnerArray1)\r\n # Add the children to the children list\r\n self.children.append(Children0)\r\n self.children.append(Children1)\r\n\r\n # Next generation members are elites + children + tournament winners that didn't participate to crossover\r\n def nextGenerationMembers(self):\r\n self.nextGeneration = self.elites + self.children + self.advancingTournamentWinners\r\n\r\n # Mutate the population\r\n def mutatePopulation(self):\r\n for member in self.nextGeneration:\r\n if member not in self.elites:\r\n member.mutateIndividual(self.mutation_prob, self.mutationMethod)\r\n\r\n # Next generation becomes the current generation at the end of each epoch\r\n def generationUpdate(self):\r\n self.members = deepcopy(self.nextGeneration)\r\n # Reset the lists\r\n self.elites = []\r\n self.tournamentWinners = []\r\n self.parentTournamentWinners = []\r\n self.advancingTournamentWinners = []\r\n self.children = []\r\n self.nextGeneration = []\r\n\r\n # Find the best Individual of the population for the current generation\r\n def bestIndividual(self):\r\n bestIndividual = max(self.members, key=attrgetter(\"fitness\"))\r\n return bestIndividual\r\n\r\n # Find the total fitness of the population for the current generation\r\n def totalFitness(self):\r\n total = 0\r\n for member in self.members:\r\n total += member.fitness\r\n return total\r\n\r\n def sortAllChromosomes(self):\r\n for member in self.members:\r\n member.chromosome.sort(key=lambda x: x.radius, reverse=True)\r\n" ]
[ [ "numpy.full", "numpy.add", "numpy.random.binomial", "numpy.ones", "numpy.subtract" ] ]
lagrassa/pipescrew
[ "6ec162f2f1e48fa0a21a3dd708ef4875c4cbd148" ]
[ "planorparam/env/nav_env.py" ]
[ "import pygame\nfrom obstacle import line_world_obstacles, Obstacle, two_openings_obstacles, Quicksand\nimport numpy as np\nfrom gym.spaces import Box\nfrom Box2D import *\npygame.init()\n\n# convert to Box2D\n\n\"\"\"\n2D gridworld. obstacles are represented as Obstacle\nThey need to be boxes. Origin is the top right\n300x400 grid\n\"\"\"\nLIGHT_GREEN = (172, 255, 192)\nLIGHT_BLUE = (158, 242, 254)\n\n\nclass NavEnv:\n def __init__(self, start: object, goal: object, slip: object = True, visualize=False,\n observation_space=None,\n autoencoder = None, shift: object = None,\n obstacles: object = two_openings_obstacles(),\n gridsize: object = np.array([300, 400])) -> object:\n self.slip = slip\n self.gridsize = gridsize\n self.autoencoder = autoencoder\n self.m = 1\n self.steps_taken = 0\n self.visualize = visualize\n self.metadata = {}\n self.mu = -1\n self.ppm = 100\n self.view_wid = 4\n self.reward_range = Box(low=np.array([-100]),high=np.array([0]))\n self.joint = None\n self.obs_width = 12\n obs_size = (2 * self.obs_width + 1) ** 2\n self.observation_space = Box(low=np.zeros(obs_size), high=np.ones(obs_size) * 255) if observation_space is None else observation_space\n self.observation_space = Box(low=np.zeros(4), high=np.ones(4) * 100)\n self.action_space = Box(low=np.array([0, -2]), high=np.array([0, 2]))\n self.ice_boundary_x = 150. / self.ppm\n\n self.quicksand_ring = Quicksand((0.4, 0.4), 0.00)\n self.goal = goal\n self.start = start\n self.max_force = 4\n self.mass = 1\n self.world = b2World(gravity=(0, 0), doSleep=True)\n self.robot_width = 0.01\n self.agent = self.world.CreateDynamicBody(\n position=self.start,\n fixtures=b2FixtureDef(\n shape=b2PolygonShape(box=(0.01, 0.01)),\n density=self.mass/(0.01*0.01)\n )\n )\n self.ground = self.world.CreateStaticBody(\n shapes=create_box2d_box(self.gridsize[1] / self.ppm, self.gridsize[0] / self.ppm)\n )\n self.check_and_set_mu(dirty_bit=True)\n self.pos_history = []\n self.desired_pos_history = []\n self.path_color = (1, 0, 0, 1)\n self.obstacles = obstacles\n for obstacle in self.obstacles:\n self.world.CreateStaticBody(\n position=obstacle.origin,\n shapes=create_box2d_box(obstacle.y, obstacle.x)\n )\n\n self.dt = 0.05\n self.visualize = visualize\n self.belief_screen = pygame.display.set_mode(self.gridsize)\n self.belief_screen.fill((255, 255, 255))\n self.render(belief_only=True, flip=True)\n if self.visualize:\n self.setup_visuals()\n self.render()\n\n def get_pos(self):\n return np.array(self.agent.position.tuple)\n\n def setup_visuals(self):\n self.world_screen = pygame.display.set_mode(self.gridsize)\n self.world_screen.fill((255, 255, 255))\n pygame.display.flip()\n\n def get_vel(self):\n return np.array(self.agent.GetLinearVelocityFromLocalPoint((0, 0)))\n\n def close(self):\n pass\n\n def check_and_set_mu(self, dirty_bit=False):\n old_pos = np.array(self.agent.position.tuple)\n no_ice_mu = 0.6\n self.ext_mu = no_ice_mu\n ice_mu = 0.02\n quicksand_mu = 3\n if old_pos[0] < self.ice_boundary_x:\n if self.mu != no_ice_mu:\n self.mu = no_ice_mu\n dirty_bit = True\n else:\n if self.mu != ice_mu:\n self.mu = ice_mu\n dirty_bit = True\n if self.quicksand_ring.in_collision(old_pos):\n self.mu = quicksand_mu\n dirty_bit = True\n if dirty_bit:\n if self.joint is not None:\n self.world.DestroyJoint(self.joint)\n self.joint = self.world.CreateFrictionJoint(bodyA=self.agent, bodyB=self.ground, maxForce=self.m * self.mu)\n\n '''\n Rolls dynamical system 1 dt according to x'', y''\n implements openai gym interface\n '''\n def step(self, action, dt = None, rl=True):\n old_pos = np.array(self.agent.position.tuple)\n if dt is None: dt = self.dt\n self.pos_history.append(old_pos)\n self.check_and_set_mu()\n if np.linalg.norm(action) > self.max_force:\n action = (action/ np.linalg.norm(action))*self.max_force\n move = action\n self.agent.ApplyForceToCenter(force=move.tolist(), wake=True)\n self.world.Step(dt, 6, 2)\n self.world.ClearForces()\n self.steps_taken += 1\n if self.visualize:\n self.render()\n done = (self.goal_distance() <= 0.01) or (self.goal_distance() > 0.15)\n rew_scale = 3\n if rl:\n return self.rl_obs(), -rew_scale * self.goal_distance(), done, {}\n else:\n return self.get_obs_low_dim() -rew_scale * self.goal_distance(), done, {}\n\n def rl_obs(self):\n #return np.hstack([self.autoencoder(self.get_obs()), self.get_pos(), self.get_vel()]).flatten()\n return np.hstack([self.autoencoder(self.get_obs()), self.get_vel()]).flatten()\n def get_state(self):\n return np.hstack([self.get_pos(), self.get_vel()])\n def plot_path(self, path):\n self.render(flip=False)\n pygame.draw.lines(self.world_screen, (0, 0, 255), False, self.ppm * path, 6)\n pygame.display.flip()\n\n def goal_distance(self):\n return np.linalg.norm(self.get_pos() - self.goal)\n\n def goal_condition_met(self):\n ret = self.goal_distance() < 0.025\n return ret\n\n def reset(self):\n assert(self.autoencoder is not None)\n autoencoder = self.autoencoder\n self.__init__(start=self.start, obstacles=self.obstacles,\n goal=self.goal, gridsize=self.gridsize, autoencoder = autoencoder,\n observation_space=self.observation_space,\n visualize=self.visualize)\n assert(self.autoencoder is not None)\n return self.rl_obs()\n\n def set_autoencoder(self, fn):\n assert fn is not None\n self.autoencoder = fn\n obs_shape = self.rl_obs()\n self.observation_space = Box(low = -np.inf*np.ones(obs_shape.shape), high = np.inf*np.ones(obs_shape.shape))\n\n def get_obs_low_dim(self):\n return np.array([self.agent.position, self.agent.GetLinearVelocityFromLocalPoint((0, 0))]).flatten()\n\n \"\"\"\n 2D occupancy grid @param width units in pix away from the agent\n with the agent centered. \n \n \"\"\"\n def get_obs(self):\n self.belief_screen.fill((255,255,255))\n self.render(belief_only=True, flip=True)\n grid = np.zeros((2 * self.obs_width + 1, 2 * self.obs_width + 1))\n i_range = range(int(self.ppm*(self.agent.position[0])) - self.obs_width, int(self.ppm * (self.agent.position[0])) + self.obs_width + 1)\n j_range = range(int(self.ppm*(self.agent.position[1])) - self.obs_width, int(self.ppm * (self.agent.position[1])) + self.obs_width + 1)\n for grid_i, i in zip(range(len(i_range)), i_range):\n for grid_j, j in zip(range(len(j_range)),j_range):\n i = np.clip(i, 0,self.gridsize[0]-1)\n j = np.clip(j, 0,self.gridsize[1]-1)\n grid[grid_i,grid_j] = np.mean(self.belief_screen.get_at((i,j))[0:3])\n\n return grid.T #n\n # .flatten()\n #scalar = 1.0\n #return scalar * np.array([self.agent.position, self.agent.GetLinearVelocityFromLocalPoint((0, 0))]).flatten()\n\n # returns image of area around agent\n\n def render_start_goal(self):\n start_rect = pygame.Rect(self.ppm * self.start[0] - self.view_wid / 2,\n self.ppm * self.start[1] - self.view_wid / 2, self.view_wid, self.view_wid)\n goal_rect = pygame.Rect(self.ppm * self.goal[0] - self.view_wid / 2.,\n self.ppm * self.goal[1] - self.view_wid / 2., self.view_wid, self.view_wid)\n pygame.draw.rect(self.world_screen, (170, 0, 0, 1), start_rect, 0)\n pygame.draw.rect(self.world_screen, (0, 170, 0, 1), goal_rect, 0)\n\n def render(self, flip=True, belief_only=False):\n # draw green for normal, light blue for the ice\n if not belief_only:\n green_rect = pygame.Rect(0, 0, self.gridsize[0], self.ice_boundary_x * self.ppm)\n ice_rect = pygame.Rect(0, self.ice_boundary_x * self.ppm, self.gridsize[0], self.gridsize[1])\n pygame.draw.rect(self.world_screen, LIGHT_GREEN, green_rect, 0)\n pygame.draw.rect(self.world_screen, LIGHT_BLUE, ice_rect, 0)\n self.render_start_goal()\n\n robot_rect = pygame.Rect(self.ppm * self.agent.position[0] - self.view_wid / 2.,\n self.ppm * self.agent.position[1] - self.view_wid / 2., self.view_wid, self.view_wid)\n pygame.draw.rect(self.belief_screen, (10, 0, 200, 1), robot_rect, 0)\n if not belief_only:\n pygame.draw.rect(self.world_screen, (10, 0, 200, 1), robot_rect, 0)\n for obs in self.obstacles:\n obs.render(self.world_screen, ppm=self.ppm)\n for obs in self.obstacles:\n obs.render(self.belief_screen, ppm=self.ppm)\n if not belief_only:\n self.quicksand_ring.render(self.world_screen, ppm=self.ppm)\n for i in range(len(self.pos_history) - 1):\n pygame.draw.line(self.world_screen, self.path_color, (self.ppm * self.pos_history[i]).astype(np.int32),\n (self.ppm * self.pos_history[i + 1]).astype(np.int32), 8)\n for i in range(len(self.desired_pos_history) - 1):\n pygame.draw.line(self.world_screen, (0, 100, 0, 1),\n (self.ppm * self.desired_pos_history[i]).astype(np.int32),\n (self.ppm * self.desired_pos_history[i + 1]).astype(np.int32), 2)\n # if np.random.randint(2) == 2:\n if flip:\n pygame.display.flip()\n\n def collision_fn(self, pt):\n ret = not (not np.array([obs.get_particles_in_collision(pt) for obs in self.obstacles]).any() and not (\n pt > self.gridsize / self.ppm).any()) or (pt < 0).any()\n return ret\n\n\ndef create_box2d_box(h, w):\n return b2PolygonShape(vertices=[(0, 0), (0, h), (w, h), (w, 0), (0, 0)])\n\n\nif __name__ == \"__main__\":\n ne = NavEnv(np.array([0., 0.]), np.array([0.5, 0.5]))\n ne.render()\n ne.collision_fn((2, 0.1 + 150 / ne.ppm))\n for i in range(50):\n ne.step(0, 3)\n" ]
[ [ "numpy.array", "numpy.linalg.norm", "numpy.zeros", "numpy.ones", "numpy.clip" ] ]
HCY123902/Bert-BiLSTM-CRF-pytorch
[ "427b76a17ffac7980742af38785d81bf18661307" ]
[ "utils.py" ]
[ "# -*- encoding: utf-8 -*-\n'''\n@File : utils.py\n@Time : 2019/11/07 22:11:33\n@Author : Cao Shuai\n@Version : 1.0\n@Contact : [email protected]\n@License : (C)Copyright 2018-2019, MILAB_SCU\n@Desc : None\n'''\n\nimport os\nimport numpy as np\nimport logging\nimport torch\nfrom torch.utils.data import Dataset\nfrom typing import Tuple, List\nfrom pytorch_pretrained_bert import BertTokenizer\n\nlogger = logging.getLogger(__name__)\n\nbert_model = '/root/workspace/qa_project/chinese_L-12_H-768_A-12'\ntokenizer = BertTokenizer.from_pretrained(bert_model)\n# VOCAB = ('<PAD>', 'O', 'I-LOC', 'B-PER', 'I-PER', 'I-ORG', 'B-LOC', 'B-ORG')\nVOCAB = ('<PAD>', '[CLS]', '[SEP]', 'O', 'B-INF', 'I-INF', 'B-PAT', 'I-PAT', 'B-OPS', \n 'I-OPS', 'B-DSE', 'I-DSE', 'B-DRG', 'I-DRG', 'B-LAB', 'I-LAB')\ntag2idx = {tag: idx for idx, tag in enumerate(VOCAB)}\nidx2tag = {idx: tag for idx, tag in enumerate(VOCAB)}\nMAX_LEN = 256 - 2\n\n\nclass NerDataset(Dataset):\n def __init__(self, f_path):\n with open(f_path, 'r', encoding='utf-8') as fr:\n entries = fr.read().strip().split('\\n\\n')\n sents, tags_li = [], [] # list of lists\n for entry in entries:\n words = [line.split()[0] for line in entry.splitlines()]\n tags = ([line.split()[-1] for line in entry.splitlines()])\n if len(words) > MAX_LEN:\n # 先对句号分段\n word, tag = [], []\n for char, t in zip(words, tags):\n \n if char != '。':\n if char != '\\ue236': # 测试集中有这个字符\n word.append(char)\n tag.append(t)\n else:\n sents.append([\"[CLS]\"] + word[:MAX_LEN] + [\"[SEP]\"])\n tags_li.append(['[CLS]'] + tag[:MAX_LEN] + ['[SEP]'])\n word, tag = [], [] \n # 最后的末尾\n if len(word):\n sents.append([\"[CLS]\"] + word[:MAX_LEN] + [\"[SEP]\"])\n tags_li.append(['[CLS]'] + tag[:MAX_LEN] + ['[SEP]'])\n word, tag = [], []\n else:\n sents.append([\"[CLS]\"] + words[:MAX_LEN] + [\"[SEP]\"])\n tags_li.append(['[CLS]'] + tags[:MAX_LEN] + ['[SEP]'])\n self.sents, self.tags_li = sents, tags_li\n \n\n def __getitem__(self, idx):\n words, tags = self.sents[idx], self.tags_li[idx]\n x, y = [], []\n is_heads = []\n for w, t in zip(words, tags):\n tokens = tokenizer.tokenize(w) if w not in (\"[CLS]\", \"[SEP]\") else [w]\n xx = tokenizer.convert_tokens_to_ids(tokens)\n # assert len(tokens) == len(xx), f\"len(tokens)={len(tokens)}, len(xx)={len(xx)}\"\n\n # 中文没有英文wordpiece后分成几块的情况\n is_head = [1] + [0]*(len(tokens) - 1)\n t = [t] + ['<PAD>'] * (len(tokens) - 1)\n yy = [tag2idx[each] for each in t] # (T,)\n\n x.extend(xx)\n is_heads.extend(is_head)\n y.extend(yy)\n assert len(x)==len(y)==len(is_heads), f\"len(x)={len(x)}, len(y)={len(y)}, len(is_heads)={len(is_heads)}\"\n\n # seqlen\n seqlen = len(y)\n\n # to string\n words = \" \".join(words)\n tags = \" \".join(tags)\n return words, x, is_heads, tags, y, seqlen\n\n\n def __len__(self):\n return len(self.sents)\n\n\ndef pad(batch):\n '''Pads to the longest sample'''\n f = lambda x: [sample[x] for sample in batch]\n words = f(0)\n is_heads = f(2)\n tags = f(3)\n seqlens = f(-1)\n maxlen = np.array(seqlens).max()\n\n f = lambda x, seqlen: [sample[x] + [0] * (seqlen - len(sample[x])) for sample in batch] # 0: <pad>\n x = f(1, maxlen)\n y = f(-2, maxlen)\n\n\n f = torch.LongTensor\n\n return words, f(x), is_heads, tags, f(y), seqlens\n \n" ]
[ [ "numpy.array" ] ]
gengoai/mono-repo
[ "50e95c16579aaa8a4ee0776582964868b5625415" ]
[ "python/apollo/blocks/base.py" ]
[ "from typing import Any, Union, List, Dict, Iterable\n\nimport numpy as np\n\n\n# ---------------------------------------------------------------------------------------------------\n# Base Block Types\n# ---------------------------------------------------------------------------------------------------\n\n\nclass Block:\n \"\"\"\n A block can describe a single layer or multiple layers.\n \"\"\"\n\n def reset(self) -> None:\n \"\"\"\n Resets any data-specific parameters to the Block\n :return: None\n \"\"\"\n raise NotImplementedError()\n\n def __call__(self, *args, **kwargs):\n raise NotImplementedError()\n\n\nclass ArchitectureBlock(Block):\n \"\"\"\n An Architectural Block typically defines a combination of Keras Layers defining the main architecture of a neural\n model (i.e. all the stuff between the input and output). Architectural blocks act similar to Keras layers in\n functional mode, e.g. arch(Tensor). Thus, implementations should implement the **__call__** method taking a\n tensorflow Tensor as input and returning a Tensor as output. Optionally, the implementation can override the\n **returns_sequences** to indicate that the architecture generates sequences as output.\n\n \"\"\"\n\n def reset(self) -> None:\n pass\n\n def __call__(self,\n input_layer: 'tensorflow.python.framework.ops.Tensor') -> 'tensorflow.python.framework.ops.Tensor':\n raise NotImplementedError()\n\n def returns_sequences(self) -> bool:\n \"\"\"\n Indicates whether the Architectural block returns a sequence as output or not.\n :return: bool - True if returns sequence, False otherwise\n \"\"\"\n return False\n\n\nclass InputBlock(Block):\n \"\"\"\n An InputBlock defines input(s) to a model. The input block will define an Input layer and Model layer where the\n input layer is a **keras.layer.Input** and a model layer can either be the same input or a keras layer(s) feed via\n the input (e.g. embedding).\n\n === Input and Model Layers ===\n\n The base input block class takes care of remembering the input and model layers so that are only created once.\n Implementing classes will implement the **_create_input_layers** and **_create_model_layers** methods to create the\n actual layers (these methods should call **input_layers** or **model_layers()** when needing to reference the input\n or model layers.\n\n === Building the Block ===\n\n In addition to defining the layers, input blocks should reshape the data as needed. This is done using by treating\n the InputBlock as a callable on an Dataset, e.g. input_block(dataset). The input block may also use this call to\n record information about the data (e.g. dimension information).\n\n \"\"\"\n\n def __init__(self):\n super().__init__()\n self.__input_layers = None\n self.__model_layers = None\n\n def reset(self) -> None:\n self.__input_layers = None\n self.__model_layers = None\n\n def input_layers(self) -> Dict[str, 'keras.layers.Layer']:\n \"\"\"\n Returns the input layers as a dict where keys are the name of the observation and values are the associated\n Keras Input. Child classes should not override this method, but instead override the ** _create_input_layers **.\n :return: dict of observation names to Keras Input\n \"\"\"\n if self.__input_layers is None:\n self.__input_layers = self._create_input_layers()\n return self.__input_layers\n\n def _create_input_layers(self) -> Dict[str, 'keras.layers.Layer']:\n \"\"\"\n Returns the input layers as a dict where keys are the name of the observation and values are the associated\n Keras Input.\n :return: dict of observation names to Keras Input\n \"\"\"\n raise NotImplementedError()\n\n def model_layers(self) -> Dict[str, 'keras.layers.Layer']:\n \"\"\"\n Returns the model layers as a dict where keys are the name of the observation and values are the associated\n Keras Layer. Child classes should not override this method, but instead override the ** _create_model_layers **\n :return: dict of observation names to Keras Layer\n \"\"\"\n if self.__model_layers is None:\n self.__model_layers = self._create_model_layers()\n return self.__model_layers\n\n def _create_model_layers(self) -> Dict[str, 'keras.layers.Layer']:\n \"\"\"\n Returns the model layers as a dict where keys are the name of the observation and values are the associated\n Keras Layer.\n :return: dict of observation names to Keras Layer\n \"\"\"\n raise NotImplementedError()\n\n def observations(self) -> Iterable[str]:\n \"\"\"\n Returns the input observation names.\n :return: the input observation names.\n \"\"\"\n raise NotImplementedError()\n\n def __call__(self, data: 'apollo.DataSet'):\n \"\"\"\n Process the dataset to modify the input observation data and record any data-specific information needed\n to create the input and model layers.\n :param data: the Apollo DataSet to process\n :return: None\n \"\"\"\n raise NotImplementedError()\n\n\nclass OutputBlock(Block):\n \"\"\"\n An OutputBlock defines outputs(s) of a model. The output block will define one or more output layers that will be\n feed by a Keras Layer (typically generated from ArchitectureBlock)\n\n === Output Layers ===\n\n The base output block class takes care of remembering the output layers so that are only created once.\n Implementing classes will implement the **_create_output_layers** to create the actual layers.\n\n === Building the Block ===\n\n In addition to defining the layers, output blocks should reshape the data as needed. This is done using by treating\n the OutputBlock as a callable on an Dataset, e.g. output_block(dataset). The output block may also use this call to\n record information about the data (e.g. dimension information).\n \"\"\"\n\n def __init__(self):\n super().__init__()\n self.__output_layers = None\n\n def reset(self) -> None:\n self.__output_layers = None\n\n def output_layers(self) -> Dict[str, 'keras.layers.Layer']:\n \"\"\"\n Returns the output layers as a dict where keys are the name of the observation and values are the associated\n Keras Layer. Child classes should not override this method, but instead override the ** _create_output_layers **.\n :return: dict of observation names to Keras Layer\n \"\"\"\n if self.__output_layers is None:\n self.__output_layers = self._create_output_layers()\n return self.__output_layers\n\n def losses(self) -> Dict[str, Any]:\n \"\"\"\n Generates a dictionary mapping output observation names to their associated loss functions used for training.\n :return: a dictionary mapping output observation names to their associated loss functions used for training.\n \"\"\"\n raise NotImplementedError()\n\n def metrics(self) -> Dict[str, Any]:\n \"\"\"\n Generates a dictionary mapping output observation names to their associated metrics used for training.\n :return: a dictionary mapping output observation names to their associated metrics used for training.\n \"\"\"\n raise NotImplementedError()\n\n def _create_output_layers(self) -> Dict[str, 'keras.layers.Layer']:\n \"\"\"\n Returns the output layers as a dict where keys are the name of the observation and values are the associated\n Keras Layer.\n :return: dict of observation names to Keras Layer\n \"\"\"\n raise NotImplementedError()\n\n def observations(self) -> Iterable[str]:\n \"\"\"\n Returns the output observation names.\n :return: the output observation names.\n \"\"\"\n raise NotImplementedError()\n\n def __call__(self, data: 'apollo.DataSet'):\n \"\"\"\n Process the dataset to modify the output observation data and record any data-specific information needed\n to create the output layers.\n :param data: the Apollo DataSet to process\n :return: None\n \"\"\"\n raise NotImplementedError()\n\n\n# ---------------------------------------------------------------------------------------------------\n# Helper Functions\n# ---------------------------------------------------------------------------------------------------\n\ndef pad_list(l: Union[List[Any], np.ndarray],\n max_length: int,\n pad_value: Any) -> Union[List[Any], np.ndarray]:\n \"\"\"\n Pads lists or numpy arrays to a given maximum length with a given value (padding is done at the end of the list /\n array).\n :param l: the list of numpy array to pad\n :param max_length: the maximum sequence length\n :param pad_value: the value to use for padding\n :return: the padded list or array\n \"\"\"\n if isinstance(l, np.ndarray):\n return _pad_ndarray(l, max_length, pad_value)\n\n return _pad_list(l, max_length, pad_value)\n\n\ndef _pad_list(l: List[Any], max_length: int, pad_value: List[Any]) -> List[Any]:\n l = l[:max_length]\n while len(l) < max_length:\n l.append(pad_value)\n return l\n\n\ndef _pad_ndarray(n: np.ndarray, max_length: int, pad_value: np.ndarray) -> np.ndarray:\n if n.shape[0] == max_length:\n return n\n elif n.shape[0] > max_length:\n return n[:max_length]\n return np.pad(n, (pad_value, max_length - n.shape[0]), 'constant')\n" ]
[ [ "numpy.pad" ] ]
ManuelMBaumann/elastic_benchmarks
[ "bf231661e9dc79cec5fa1e9603de03d01edff2e3" ]
[ "marmousi2.py" ]
[ "from __future__ import print_function, division\n\nimport matplotlib\nmatplotlib.use('agg')\n\nfrom nutils import *\nimport scipy\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import sparse\nimport obspy\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\n\n\[email protected]\ndef makeplots( domain, geom, verts_x, verts_z, value, name, title, ndigits=0, index=None, imgtype=None):\n points, colors = domain.elem_eval( [ geom, value ], ischeme='bezier3', separate=True )\n\n with plot.PyPlot( name, ndigits=ndigits, index=index, imgtype=imgtype ) as plt:\n\n plt.mesh( points, colors, triangulate='bezier', edgecolors='none' )\n \n plt.title(title)\n \n plt.xlabel('x [m]', fontsize=10)\n plt.xticks([0, max(verts_x)/2.0, max(verts_x)], ['0', '2000', '4000'], fontsize=10)\n plt.ylabel('z [m]', fontsize=10)\n plt.yticks([max(verts_z), min(verts_z)], ['0', '1850'], fontsize=10)\n plt.ylim\n \n ax = plt.gca()\n divider = make_axes_locatable(ax)\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n cb = plt.colorbar(cax=cax)\n cb.ax.tick_params(labelsize=10) \n \n \ndef makespyplot( matrix, name, imgtype=None ):\n if not scipy.sparse.isspmatrix( matrix ):\n matrix = matrix.toscipy()\n\n with plot.PyPlot( name, ndigits=0, imgtype=imgtype ) as plt:\n plt.spy( matrix, markersize=0.8, color='black')\n plt.title( name+', nnz = '+str(matrix.nnz) )\n\n\ndef makevtk(domain, geom, rho, lam, mu, cp, cs, sol, freq, vec_basis, name):\n Nom = sol.shape[0]\n vtk_geom, vtk_rho, vtk_lam, vtk_mu, vtk_cp, vtk_cs = domain.simplex.elem_eval( [ geom, rho, lam, mu, cp, cs ], ischeme='vtk', separate=True )\n with plot.VTKFile( name ) as vtk:\n vtk.unstructuredgrid( vtk_geom )\n vtk.pointdataarray( 'rho', vtk_rho )\n vtk.pointdataarray( 'lambda', vtk_lam )\n vtk.pointdataarray( 'mu', vtk_mu )\n vtk.pointdataarray( 'cp', vtk_cp )\n vtk.pointdataarray( 'cs', vtk_cs )\n for i in range(0,Nom):\n disp = vec_basis.dot( sol[i,:] ).real\n vtk_disp = domain.simplex.elem_eval( disp, ischeme='vtk', separate=True )\n vtk.pointdataarray( 'disp_'+str(int(freq[i])), vtk_disp )\n \n\ndef point_eval(func, domain, geom, point):\n domain = domain[tuple(slice(0, p) if p > 0 else slice(None) for p in point)]\n for p in point:\n domain = domain.boundary['right' if p > 0 else 'left']\n return numpy.asarray(domain.integrate( func, geometry=geom, ischeme='gauss2' ).toscipy().todense())\n\n\ndef elast_mat(rho, cp, cs, lam, mu, ndims, nx, ny, nz, vec_basis, domain, geom):\n # define PDE\n stress = lambda u: lam*u.div(geom)[:,_,_]*function.eye(ndims) + 2.0*mu*u.symgrad(geom)\n elasticity = function.outer( stress(vec_basis), vec_basis.grad(geom) ).sum([2,3])\n \n w_mass = lambda u: rho*u\n mass = function.outer( w_mass(vec_basis), vec_basis ).sum(-1)\n\n # define BC\n n = geom.normal()\n t = np.eye(ndims)\n t = t-(t*n[_,:]).sum(1)\n B_bc = cp*n[:,_]*n[_,:]+cs*(t[:,:,_]*t[:,_,:]).sum(0)\n\n bc_fun = lambda u: rho*(B_bc*u[:,_,:]).sum(-1)\n sommerfeld = function.outer( bc_fun(vec_basis), vec_basis ).sum(-1)\n\n # build matrices\n K, M = domain.integrate( [elasticity, mass], geometry=geom, ischeme='gauss2' )\n C = domain.boundary['left,right,bottom'].integrate( sommerfeld, geometry=geom, ischeme='gauss2' )\n \n # build RHS\n source_position = nx//3, nz\n rhs = point_eval(vec_basis, domain, geom, source_position)[:,-1]\n #rhs = rhs+point_eval(vec_basis, domain, geom, source_position)[:,0]\n \n return K, C, M, rhs \n\ndef shrink_marmousi(segy1, segy2, segy3, n_course, x0, xe, ze):\n # original problem dimensions\n Lx = 17000.0\n Lz = 3500.0\n nx = len(segy1.traces)\n nz = len(segy1.traces[0].data)\n dx = Lx/(nx-1)\n dz = Lz/(nz-1) \n\n # cast segy data into arrays\n rho_coeffs = np.zeros((nz, nx))\n cp_coeffs = np.zeros((nz, nx))\n cs_coeffs = np.zeros((nz, nx))\n\n for i, tr in enumerate(segy1.traces):\n rho_coeffs[:,i] = tr.data\n for i, tr in enumerate(segy2.traces):\n cp_coeffs[:,i] = tr.data\n for i, tr in enumerate(segy3.traces):\n cs_coeffs[:,i] = tr.data\n rho_coeffs *= 1000.0\n\n # Shrink computational domain\n z0_ind = 370 # delete water layer\n ze_ind = 0\n x0_ind = 0\n xe_ind = 0\n \n z0 = z0_ind*dz \n ze = ze + z0\n \n for i in range(z0_ind,nz):\n if (i*dz>ze):\n ze_ind = i\n break\n\n for i in range(0,nx):\n if (i*dx>x0):\n x0_ind = i\n break\n \n for i in range(x0_ind,nx):\n if (i*dx>xe):\n xe_ind = i\n break\n \n rho_coeffs = rho_coeffs[z0_ind:ze_ind,x0_ind:xe_ind]\n cp_coeffs = cp_coeffs[z0_ind:ze_ind,x0_ind:xe_ind]\n cs_coeffs = cs_coeffs[z0_ind:ze_ind,x0_ind:xe_ind] \n \n rho_coeffs = rho_coeffs[::-n_course, ::n_course]\n cp_coeffs = cp_coeffs[::-n_course, ::n_course]\n cs_coeffs = cs_coeffs[::-n_course, ::n_course]\n \n nx = rho_coeffs.shape[1]\n nz = rho_coeffs.shape[0]\n Lx = xe-x0\n Lz = ze-z0\n \n # Coursen the data\n dx *= n_course\n dz *= n_course\n verts_x = np.linspace( 0.0, Lx+dz, nx )\n verts_z = np.linspace( -Lz-z0, -z0+dz, nz )\n \n rho_coeffs = rho_coeffs.T.ravel()\n cp_coeffs = cp_coeffs.T.ravel()\n cs_coeffs = cs_coeffs.T.ravel()\n\n return verts_x, verts_z, rho_coeffs, cp_coeffs, cs_coeffs\n \n\ndef main( n_course=16, # coursening of original problem\n freq=[4.0], # frequency in Hz \n plots=True, # plot of parameters and num. solution\n spy=True, # provide spy plot of matrices\n storing=False, # storing=True saves matrices in matrix market format (no solve) \n degree=1 ): # degree of FEM splines\n\n \n ndims = 2\n freq = np.array(freq)\n om = 2.0*np.pi*freq\n Nom = len(om)\n\n print(\"Reading Marmousi-II data...\\n\")\n if obspy.__version__<'1.0':\n segy1 = obspy.segy.core.readSEGY(\"data/MODEL_DENSITY_1.25m.segy\")\n segy2 = obspy.segy.core.readSEGY(\"data/MODEL_P-WAVE_VELOCITY_1.25m.segy\")\n segy3 = obspy.segy.core.readSEGY(\"data/MODEL_S-WAVE_VELOCITY_1.25m.segy\")\n else:\n segy1 = obspy.io.segy.core._read_segy(\"data/MODEL_DENSITY_1.25m.segy\")\n segy2 = obspy.io.segy.core._read_segy(\"data/MODEL_P-WAVE_VELOCITY_1.25m.segy\")\n segy3 = obspy.io.segy.core._read_segy(\"data/MODEL_S-WAVE_VELOCITY_1.25m.segy\")\n\n\n # shrink domain to [0,4000] x [0,1850]\n [verts_x, verts_z, rho_coeffs, cp_coeffs, cs_coeffs] = shrink_marmousi(segy1, segy2, segy3, n_course, 7500, 11500, 1850 )\n\n mu_coeffs = cs_coeffs**2 * rho_coeffs\n lam_coeffs = rho_coeffs * (cp_coeffs**2 - 2.0*cs_coeffs**2) \n\n nx = len(verts_x)\n nz = len(verts_z)\n dx = verts_x[1]-verts_x[0]\n dz = verts_z[1]-verts_z[0]\n\n # define Cartesian grid\n domain, geom = mesh.rectilinear( [verts_x, verts_z] ) \n vec_basis = domain.splinefunc( degree=degree ).vector( ndims )\n scal_basis = domain.splinefunc( degree=1 )\n\n # problem summary\n print( '--- MARMOUSI-II PROBLEM ---' )\n print( 'problem size : '+str(nx-1+degree)+' x '+str(nz-1+degree) )\n print( '# dofs : '+str(len(vec_basis)) )\n print( 'grid size : '+str(round(dx,1))+' x '+str(round(dz,1)) )\n ppw = 20.0 \n print( 'max. frequency : '+str( round(min(np.amin(cp_coeffs),np.amin(cs_coeffs))/(ppw*max(dx,dz)),1) ) )\n print( '----------------------------\\n' )\n \n # Create discretization matrices using nutils\n mu = scal_basis.dot(mu_coeffs)\n lam = scal_basis.dot(lam_coeffs)\n rho = scal_basis.dot(rho_coeffs)\n cp = scal_basis.dot(cp_coeffs)\n cs = scal_basis.dot(cs_coeffs)\n \n K, C, M, rhs = elast_mat(rho, cp, cs, lam, mu, ndims, nx, 1, nz, vec_basis, domain, geom)\n\n if storing:\n mmwrite('matrix_io/K.mtx', K.toscipy())\n mmwrite('matrix_io/C.mtx', C.toscipy())\n mmwrite('matrix_io/M.mtx', M.toscipy())\n else:\n print('Use pythons sparse linear solver solver...')\n sol = np.zeros((Nom, len(vec_basis)), dtype=complex)\n for k in range(0,Nom):\n matrix = K + 1j*om[k]*C - om[k]**2*M \n sol[k,:] = scipy.sparse.linalg.spsolve( matrix.toscipy().tocsc(), rhs )\n if spy:\n makespyplot( matrix, 'spy_plot' )\n \n if plots:\n makeplots( domain, geom, verts_x, verts_z, rho, 'rho', 'rho [kg/m**3]')\n #makeplots( domain, geom, verts_x, verts_z, cp, 'cp', 'c_p [m/s]')\n #makeplots( domain, geom, verts_x, verts_z, cs, 'cs', 'c_s [m/s]')\n \n for k in range(0,Nom):\n disp = vec_basis.dot( sol[k,:] ) # FEM summation\n disp_x = disp[0].real # Plot Re(u_x) \n disp_z = disp[-1].real # Plot Re(u_z)\n makeplots( domain, geom, verts_x, verts_z, disp_x, 'disp_x'+str(k), 'u_x at {} Hz'.format(freq[k]) )\n makeplots( domain, geom, verts_x, verts_z, disp_z, 'disp_z'+str(k), 'u_z at {} Hz'.format(freq[k]) )\n makevtk(domain, geom, rho, lam, mu, cp, cs, sol, freq, vec_basis, 'marmousi2')\n \nutil.run( main )\n" ]
[ [ "matplotlib.use", "scipy.sparse.isspmatrix", "numpy.array", "matplotlib.pyplot.colorbar", "numpy.zeros", "matplotlib.pyplot.spy", "matplotlib.pyplot.gca", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.title", "numpy.eye", "matplotlib.pyplot.ylabel", "numpy.amin", "numpy.linspace", "matplotlib.pyplot.mesh" ] ]
gianscarpe/vipm-project
[ "2d9384173a8741e4e56439a06f2fd837b5e1ce4e" ]
[ "matcher/train_classification_model.py" ]
[ "import torch\nfrom torch import optim\nimport torch.nn.functional as F\nfrom torchvision import transforms\nfrom matcher.models import ClassificationNet\nfrom matcher.dataset import ClassificationDataset\nfrom torch.utils.data import DataLoader\nfrom torch.nn import CrossEntropyLoss\nimport time\nimport os\nimport numpy as np\n\n\ndef main():\n config = {\n \"save_every_freq\": False,\n \"save_frequency\": 2,\n \"save_best\": True,\n \"labels\": [\"masterCategory\", \"subCategory\"],\n \"model_name\": \"resnet18\",\n \"batch_size\": 16,\n \"lr\": 0.001,\n \"num_epochs\": 50,\n \"weight_decay\": 0.0001,\n \"exp_base_dir\": \"data/exps\",\n \"image_size\": [224, 224],\n \"load_path\": None,\n }\n\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n start_epoch = 1\n normalize = transforms.Normalize(\n mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]\n )\n\n train_dataset = ClassificationDataset(\n \"./data/images/\",\n \"./data/small_train.csv\",\n distinguish_class=config[\"labels\"],\n load_path=None,\n image_size=config[\"image_size\"],\n transform=normalize,\n )\n train_loader = DataLoader(\n train_dataset, batch_size=config[\"batch_size\"], shuffle=True\n )\n\n val_loader = DataLoader(\n ClassificationDataset(\n \"./data/images\",\n \"./data/small_val.csv\",\n distinguish_class=config[\"labels\"],\n image_size=config[\"image_size\"],\n transform=normalize,\n label_encoder=train_dataset.les,\n ),\n batch_size=config[\"batch_size\"],\n shuffle=True,\n )\n\n model = ClassificationNet(\n image_size=config[\"image_size\"],\n n_classes=train_loader.dataset.n_classes,\n name=config[\"model_name\"],\n ).to(device)\n\n if config[\"load_path\"]:\n print(\"Loading and evaluating model\")\n start_epoch = int(config[\"load_path\"][-6:-3])\n model = torch.load(config[\"load_path\"])\n test(model, device, val_loader)\n\n optimizer = optim.Adam(\n model.parameters(), lr=config[\"lr\"], weight_decay=config[\"weight_decay\"]\n )\n\n best_accu = 0.0\n for epoch in range(start_epoch, config[\"num_epochs\"] + 1):\n train(\n model,\n device,\n train_loader,\n epoch,\n optimizer,\n config[\"batch_size\"],\n len(config[\"labels\"]),\n )\n accuracies = test(model, device, val_loader, len(config[\"labels\"]))\n if config[\"save_every_freq\"]:\n if epoch % config[\"save_frequency\"] == 0:\n torch.save(\n model,\n os.path.join(\n config[\"exp_base_dir\"],\n config[\"model_name\"] + \"_{:03}.pt\".format(epoch),\n ),\n )\n if config[\"save_best\"]:\n accu = sum(accuracies) / len(config[\"labels\"])\n if accu > best_accu:\n print(\"* PORCA L'OCA SAVE BEST\")\n best_accu = accu\n torch.save(\n model,\n os.path.join(\n config[\"exp_base_dir\"], config[\"model_name\"] + \"_best.pt\"\n ),\n )\n\n\ndef train(model, device, train_loader, epoch, optimizer, batch_size, n_label=3):\n model.train()\n t0 = time.time()\n training_loss = []\n criterions = [CrossEntropyLoss() for i in range(n_label)]\n for batch_idx, (data, target) in enumerate(train_loader):\n for i in range(len(data)):\n data[i] = data[i].to(device)\n\n optimizer.zero_grad()\n output = model(data)\n target = target.long()\n loss = 0\n for i in range(n_label):\n loss = loss + criterions[i](torch.squeeze(output[i]), target[:, i])\n\n loss.backward()\n # loss_items = []\n # for i in range(n_label):\n # loss_items.append(loss[i].item())\n # loss[i].backward()\n\n training_loss.append(loss.item())\n optimizer.step()\n if batch_idx % 10 == 0:\n print(\n \"Train Epoch: {} [{}/{} ({:.0f}%)] \\tBatch Loss: ({})\".format(\n epoch,\n batch_idx * batch_size,\n len(train_loader.dataset),\n 100.0 * batch_idx * batch_size / len(train_loader.dataset),\n \"{:.6f}\".format(loss.item()),\n )\n )\n print(\n \"Train Epoch: {}\\t time:{:.3f}s \\tMeanLoss: ({})\".format(\n epoch, (time.time() - t0), \"{:.6f}\".format(np.average(training_loss))\n )\n )\n\n\ndef test(model, device, test_loader, n_label=3):\n model.eval()\n\n with torch.no_grad():\n accurate_labels = 0\n all_labels = 0\n val_loss = []\n accurate_labels = [0, 0, 0]\n accuracies = [0, 0, 0]\n for batch_idx, (data, target) in enumerate(test_loader):\n for i in range(len(data)):\n data[i] = data[i].to(device)\n\n output = model(data)\n target = target.long()\n val_loss.append(\n [\n F.cross_entropy(torch.squeeze(output[i]), target[:, i])\n for i in range(n_label)\n ]\n )\n\n for i in range(n_label):\n accurate_labels[i] += torch.sum(\n (torch.argmax(F.softmax(output[i]), dim=1) == target[:, i])\n )\n\n all_labels += len(target)\n\n for i in range(n_label):\n accuracies[i] = 100.0 * accurate_labels[i].item() / all_labels\n print(\n \"Test accuracy: ({})/{} ({}), Loss: ({})\".format(\n \", \".join([str(accurate_labels[i].item()) for i in range(n_label)]),\n all_labels,\n \", \".join([\"{:.3f}%\".format(accuracies[i]) for i in range(n_label)]),\n \", \".join(\n \"{:.6f}\".format(loss)\n for loss in torch.mean(torch.tensor(val_loss), dim=0).data.tolist()\n ),\n )\n )\n return accuracies\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "torch.no_grad", "torch.squeeze", "torch.cuda.is_available", "torch.tensor", "torch.utils.data.DataLoader", "torch.load", "torch.nn.functional.softmax", "numpy.average", "torch.nn.CrossEntropyLoss" ] ]
CHEN-yongquan/Asteroid_CPO_seeker
[ "b180d08228e51a11b87d0a579023f7f09e332a7c" ]
[ "RL_lib/Policies/AWR/policy_awr2.py" ]
[ "\n\"\"\"\n\n Implements AWR PPO variants\n \n\"\"\"\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport rl_utils\nimport advantage_utils\nfrom time import time\nimport sklearn.utils\n \nclass Policy(object):\n \"\"\" NN-based policy approximation \"\"\"\n def __init__(self, net, pd, adv_func=None, kl_limit=0.1, \n epochs=20, init_func=rl_utils.default_init, \n test_mode=False, shuffle=True, shuffle_by_chunks=False, max_grad_norm=999, \n obs_key='observes', scale_vector_obs=True, scale_image_obs=False, verbose=False, rollout_limit=1):\n\n print('AWR Policy: ')\n net.apply(init_func)\n\n self.net = net\n self.pd = pd\n\n if adv_func is None:\n self.adv_func = advantage_utils.Adv_relu()\n else:\n self.adv_func = adv_func\n\n self.test_mode = test_mode\n self.shuffle = shuffle\n self.shuffle_by_chunks = shuffle_by_chunks\n\n if self.net.recurrent_steps > 1 and not self.shuffle_by_chunks:\n print('Policy: recurrent steps > 1, disabling shuffle')\n self.shuffle = False\n self.kl_limit = kl_limit\n self.epochs = epochs \n self.max_grad_norm = max_grad_norm\n self.obs_key = obs_key\n self.grad_monitor = rl_utils.Grad_monitor('Policy', net)\n self.vector_scaler = rl_utils.Scaler(net.obs_dim)\n self.image_scaler = rl_utils.Image_scaler(net.obs_dim)\n self.scale_image_obs = scale_image_obs\n self.scale_vector_obs = scale_vector_obs\n\n self.verbose = verbose \n self.rollout_limit = rollout_limit\n self.rollout_list = []\n\n if self.net.recurrent_steps > 1:\n self.use_padding = True\n else:\n self.use_padding = False\n\n self.calc_loss = self.calc_loss1\n self.optimizer = torch.optim.Adam(self.net.parameters(), self.net.lr)\n\n print('\\tTest Mode: ',self.test_mode)\n print('\\tShuffle : ',self.shuffle)\n print('\\tShuffle by Chunks: ',self.shuffle_by_chunks)\n print('\\tMax Grad Norm: ',self.max_grad_norm)\n print('\\tRecurrent Steps: ',self.net.recurrent_steps)\n print('\\tRollout Limit: ',self.rollout_limit)\n print('\\tAdvantage Func: ',self.adv_func)\n print('\\tAdvantage Norm: ',self.adv_func.normalizer)\n print('\\tPD: ',self.pd)\n\n def save_params(self,fname):\n fname = 'policy_' + fname + '.pt'\n param_dict = {}\n param_dict['image_scaler_u'] = self.image_scaler.means\n param_dict['image_scaler_var'] = self.image_scaler.vars\n param_dict['vector_scaler_u'] = self.vector_scaler.means\n param_dict['vector_scaler_var'] = self.vector_scaler.vars\n param_dict['net_state'] = self.net.state_dict()\n torch.save(param_dict, fname)\n\n def load_params(self,fname):\n fname = 'policy_' + fname + '.pt'\n param_dict = torch.load(fname)\n self.image_scaler.means = param_dict['image_scaler_u']\n self.image_scaler.vars = param_dict['image_scaler_var']\n self.vector_scaler.means = param_dict['vector_scaler_u']\n self.vector_scaler.vars = param_dict['vector_scaler_var']\n self.net.load_state_dict(param_dict['net_state'])\n\n\n def sample(self, image_obs, vector_obs, state):\n\n if self.scale_image_obs:\n image_obs = self.image_scaler.apply(image_obs)\n if self.scale_vector_obs:\n vector_obs = self.vector_scaler.apply(vector_obs)\n logits, log_vars, state = self.net.forward(image_obs, vector_obs, state, np.ones(1), np.zeros(1), return_tensor=False)\n action, env_action = self.pd.sample(logits, log_vars, self.test_mode)\n return action, env_action, state \n\n def update_scalers(self, rollouts):\n self.image_scaler.update(rollouts['image_observes'])\n self.vector_scaler.update(rollouts['vector_observes'])\n\n \n def update(self, rollouts, logger):\n if len(self.rollout_list) == self.rollout_limit:\n del self.rollout_list[0]\n self.rollout_list.append(rollouts)\n keys = self.rollout_list[0].keys()\n comb_rollouts = {}\n for k in keys:\n comb_rollouts[k] = np.concatenate([r[k] for r in self.rollout_list])\n self.update1(comb_rollouts, logger)\n \n def update1(self, rollouts, logger):\n \n if self.use_padding:\n key = 'padded_'\n else:\n key = '' \n image_observes = rollouts[key + 'image_observes']\n vector_observes = rollouts[key + 'vector_observes']\n\n actions = rollouts[key + 'actions']\n states = rollouts[key + 'policy_states']\n vtarg = rollouts[key + 'disc_sum_rew']\n vpred = rollouts[key + 'vpreds']\n masks = rollouts[key + 'masks']\n flags = rollouts[key + 'flags']\n\n if self.scale_vector_obs:\n vector_observes = self.vector_scaler.apply(vector_observes)\n if self.scale_image_obs:\n image_observes = self.image_scaler.apply(image_observes)\n \n vtarg_unp = rollouts['disc_sum_rew']\n vpred_unp = rollouts['vpreds']\n\n actions_pt = self.pd.from_numpy(actions)\n\n with torch.no_grad():\n old_logits_pt, log_vars_pt, _ = self.net.forward(image_observes, vector_observes, states, masks, flags)\n\n old_logp_pt = self.pd.logp(actions_pt, old_logits_pt, log_vars_pt) \n old_logp = old_logp_pt.detach().numpy() \n loss, kl, entropy = 0, 0, 0\n\n advantages_unp = vtarg_unp - vpred_unp\n advantages = vtarg - vpred\n new_masks = masks * (advantages > 0)\n idx = np.where(advantages_unp > 0)[0]\n print('ADVA: ', idx.shape, advantages_unp.shape, idx.shape[0] / advantages_unp.shape[0])\n print('ADV1: ', np.median(advantages), np.mean(advantages), np.std(advantages), np.max(advantages), np.min(advantages))\n advantages = self.adv_func.calc_adv(advantages_unp, advantages)\n #print(advantages)\n foo = rl_utils.unpad(advantages,masks)\n idx = np.where(foo > 0)[0]\n print('ADVB: ', idx.shape, foo.shape, idx.shape[0] / foo.shape[0])\n print('ADV2: ', np.median(foo), np.mean(foo), np.std(foo), np.max(foo), np.min(foo))\n\n t0 = time()\n for e in range(self.epochs):\n\n if self.shuffle:\n if self.shuffle_by_chunks:\n image_observes, vector_observes, actions, advantages, states, masks, new_masks, flags, old_logp = \\\n rl_utils.shuffle_list_by_chunks([image_observes, vector_observes, actions, advantages, states, masks, new_masks, flags, old_logp], self.net.recurrent_steps)\n else:\n image_observes, vector_observes, actions, advantages, states, masks, new_masks, flags, old_logp = \\\n sklearn.utils.shuffle(image_observes, vector_observes, actions, advantages, states, masks, new_masks, flags, old_logp)\n\n actions_pt = self.pd.from_numpy(actions)\n\n self.optimizer.zero_grad()\n logits_pt, log_vars_pt, _ = self.net.forward(image_observes, vector_observes, states, masks, flags, unroll=True)\n logp_pt = self.pd.logp(actions_pt, logits_pt, log_vars_pt)\n loss = self.calc_loss(logp_pt, torch.from_numpy(advantages).float(), new_masks)\n loss.backward()\n if self.max_grad_norm is not None:\n ng = nn.utils.clip_grad_norm_(self.net.parameters(), self.max_grad_norm)\n else:\n ng = None\n self.optimizer.step()\n self.grad_monitor.add(ng)\n\n kl = self.pd.kl(old_logp, logp_pt.detach().numpy(), log_vars_pt, masks)\n entropy = self.pd.entropy(logp_pt.detach().numpy(), log_vars_pt, masks) \n if kl > self.kl_limit: \n print(' *** BROKE *** ',e, kl)\n break \n\n t1 = time()\n \n\n #self.kl_stat = kl\n #self.entropy_stat = entropy\n self.grad_monitor.show()\n\n if self.verbose:\n print('POLICY ROLLOUT LIST: ',len(self.rollout_list))\n print('POLICY Update: ',t1-t0,observes.shape)\n print('u_adv: ',u_adv)\n print('std_adv: ',std_adv)\n\n logger.log({'PolicyLoss': loss,\n 'Policy_SD' : np.mean(self.pd.sd(logits_pt, log_vars_pt)), \n 'Policy_Entropy': entropy,\n 'Policy_KL': kl})\n\n def calc_loss0(self,logp, advantages, masks):\n if self.use_padding:\n logp, advantages = rl_utils.unpad_list([logp, advantages], masks)\n loss = -torch.mean(advantages * logp)\n return loss\n\n def calc_loss1(self,logp, advantages, masks):\n if self.use_padding:\n logp, advantages = rl_utils.unpad_list([logp, advantages], masks)\n loss = -torch.mean(advantages * logp) \n return loss\n\n\n" ]
[ [ "numpy.concatenate", "numpy.max", "numpy.zeros", "numpy.median", "torch.save", "numpy.ones", "torch.no_grad", "numpy.min", "numpy.mean", "torch.from_numpy", "numpy.where", "numpy.std", "torch.load", "torch.mean" ] ]
fyabc/Toys
[ "3b2abd4fb2be986a8be6ed83897f7ba7fad6929e" ]
[ "LearnTensorFlow/alex_net.py" ]
[ "#! /usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport tensorflow as tf\n\nfrom libs.models.cnn.alex_net import inference\nfrom libs.utils.basic import time_tensorflow_run\nfrom libs.utils.constants import floatX\n\n__author__ = 'fyabc'\n\n\ndef run_benchmark():\n batch_size = 32\n num_batches = 100\n\n options = {\n 'lrn': False,\n }\n\n with tf.Graph().as_default():\n image_size = 224\n images = tf.Variable(\n tf.random_normal(\n [batch_size, image_size, image_size, 3],\n dtype=floatX,\n stddev=1e-1,\n )\n )\n\n fc3, parameters = inference(images, options)\n\n init = tf.global_variables_initializer()\n sess = tf.Session()\n sess.run(init)\n\n # Forward benchmark\n time_tensorflow_run(sess, fc3, 'Forward', num_batches)\n\n # Backward benchmark\n loss = tf.nn.l2_loss(fc3)\n grad = tf.gradients(loss, parameters)\n time_tensorflow_run(sess, grad, 'Forward-backward', num_batches)\n\n\ndef main():\n run_benchmark()\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "tensorflow.Graph", "tensorflow.Session", "tensorflow.gradients", "tensorflow.nn.l2_loss", "tensorflow.global_variables_initializer", "tensorflow.random_normal" ] ]
micahkwok/pypkgs_mk
[ "1dd6e846385c185055612b83148db5077cf3268f" ]
[ "tests/test_pypkgs_mk.py" ]
[ "from pypkgs_mk import __version__\nfrom pypkgs_mk import pypkgs_mk\n\nimport pandas as pd\n\ndef test_version():\n assert __version__ == '0.1.0'\n\ndef test_catbind():\n a = pd.Categorical([\"character\", \"hits\", \"your\", \"eyeballs\"])\n b = pd.Categorical([\"but\", \"integer\", \"where it\", \"counts\"])\n assert ((pypkgs_mk.catbind(a, b)).codes == [1, 4, 7, 3, 0, 5, 6, 2]).all()\n assert ((pypkgs_mk.catbind(a, b)).categories == [\"but\", \"character\",\n \"counts\", \"eyeballs\", \"hits\", \"integer\", \"where it\", \"your\"]).all()" ]
[ [ "pandas.Categorical" ] ]
kcrumb/EfficientDet
[ "cdc2ac4099c28ee89c2dd77bbcd36b0a589799b4" ]
[ "generators/csv_.py" ]
[ "\"\"\"\nCopyright 2017-2018 yhenon (https://github.com/yhenon/)\nCopyright 2017-2018 Fizyr (https://fizyr.com)\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nfrom generators.common import Generator\nimport cv2\nimport numpy as np\nfrom PIL import Image\nfrom six import raise_from\nimport csv\nimport sys\nimport os.path as osp\nfrom collections import OrderedDict\n\n\ndef _parse(value, function, fmt):\n \"\"\"\n Parse a string into a value, and format a nice ValueError if it fails.\n\n Returns `function(value)`.\n Any `ValueError` raised is catched and a new `ValueError` is raised\n with message `fmt.format(e)`, where `e` is the caught `ValueError`.\n \"\"\"\n try:\n return function(value)\n except ValueError as e:\n raise_from(ValueError(fmt.format(e)), None)\n\n\ndef _read_classes(csv_reader):\n \"\"\"\n Parse the classes file given by csv_reader.\n \"\"\"\n result = OrderedDict()\n for line, row in enumerate(csv_reader):\n line += 1\n\n try:\n class_name, class_id = row\n except ValueError:\n raise_from(ValueError('line {}: format should be \\'class_name,class_id\\''.format(line)), None)\n class_id = _parse(class_id, int, 'line {}: malformed class ID: {{}}'.format(line))\n\n if class_name in result:\n raise ValueError('line {}: duplicate class name: \\'{}\\''.format(line, class_name))\n result[class_name] = class_id\n return result\n\n\ndef _read_quadrangle_annotations(csv_reader, classes, detect_text=False):\n \"\"\"\n Read annotations from the csv_reader.\n Args:\n csv_reader: csv reader of args.annotations_path\n classes: list[str] all the class names read from args.classes_path\n\n Returns:\n result: dict, dict is like {image_path: [{'x1': x1, 'y1': y1, 'x2': x2, 'y2': y2,\n 'x3': x3, 'y3': y3, 'x4': x4, 'y4': y4, 'class': class_name}]}\n\n \"\"\"\n result = OrderedDict()\n for line, row in enumerate(csv_reader, 1):\n try:\n img_file, x1, y1, x2, y2, x3, y3, x4, y4, class_name = row[:10]\n if img_file not in result:\n result[img_file] = []\n\n # If a row contains only an image path, it's an image without annotations.\n if (x1, y1, x2, y2, x3, y3, x4, y4, class_name) == ('', '', '', '', '', '', '', '', ''):\n continue\n\n x1 = _parse(x1, int, 'line {}: malformed x1: {{}}'.format(line))\n y1 = _parse(y1, int, 'line {}: malformed y1: {{}}'.format(line))\n x2 = _parse(x2, int, 'line {}: malformed x2: {{}}'.format(line))\n y2 = _parse(y2, int, 'line {}: malformed y2: {{}}'.format(line))\n x3 = _parse(x3, int, 'line {}: malformed x3: {{}}'.format(line))\n y3 = _parse(y3, int, 'line {}: malformed y3: {{}}'.format(line))\n x4 = _parse(x4, int, 'line {}: malformed x4: {{}}'.format(line))\n y4 = _parse(y4, int, 'line {}: malformed y4: {{}}'.format(line))\n\n # check if the current class name is correctly present\n if detect_text:\n if class_name == '###':\n continue\n else:\n class_name = 'text'\n\n if class_name not in classes:\n raise ValueError(f'line {line}: unknown class name: \\'{class_name}\\' (classes: {classes})')\n\n result[img_file].append({'x1': x1, 'y1': y1, 'x2': x2, 'y2': y2,\n 'x3': x3, 'y3': y3, 'x4': x4, 'y4': y4, 'class': class_name})\n except ValueError:\n raise_from(ValueError(\n f'line {line}: format should be \\'img_file,x1,y1,x2,y2,x3,y3,x4,y4,class_name\\' or \\'img_file,,,,,\\''),\n None)\n\n return result\n\n\ndef _read_annotations(csv_reader, classes):\n \"\"\"\n Read annotations from the csv_reader.\n Args:\n csv_reader: csv reader of args.annotations_path\n classes: list[str] all the class names read from args.classes_path\n\n Returns:\n result: dict, dict is like {image_path: [{'x1': x1, 'y1': y1, 'x2': x2, 'y2': y2, 'class': class_name}]}\n\n \"\"\"\n result = OrderedDict()\n for line, row in enumerate(csv_reader, 1):\n try:\n img_file, x1, y1, x2, y2, class_name = row[:10]\n if img_file not in result:\n result[img_file] = []\n\n # If a row contains only an image path, it's an image without annotations.\n if (x1, y1, x2, y2, class_name) == ('', '', '', '', ''):\n continue\n\n x1 = _parse(x1, int, 'line {}: malformed x1: {{}}'.format(line))\n y1 = _parse(y1, int, 'line {}: malformed y1: {{}}'.format(line))\n x2 = _parse(x2, int, 'line {}: malformed x2: {{}}'.format(line))\n y2 = _parse(y2, int, 'line {}: malformed y2: {{}}'.format(line))\n\n if class_name not in classes:\n raise ValueError(f'line {line}: unknown class name: \\'{class_name}\\' (classes: {classes})')\n\n result[img_file].append({'x1': x1, 'y1': y1, 'x2': x2, 'y2': y2, 'class': class_name})\n except ValueError:\n raise_from(ValueError(\n f'line {line}: format should be \\'img_file,x1,y1,x2,y2,class_name\\' or \\'img_file,,,,,\\''),\n None)\n\n return result\n\n\ndef _open_for_csv(path):\n \"\"\"\n Open a file with flags suitable for csv.reader.\n\n This is different for python2 it means with mode 'rb', for python3 this means 'r' with \"universal newlines\".\n \"\"\"\n if sys.version_info[0] < 3:\n return open(path, 'rb')\n else:\n return open(path, 'r', newline='')\n\n\nclass CSVGenerator(Generator):\n \"\"\"\n Generate data for a custom CSV dataset.\n\n See https://github.com/fizyr/keras-retinanet#csv-datasets for more information.\n \"\"\"\n\n def __init__(\n self,\n csv_data_file,\n csv_class_file,\n base_dir=None,\n detect_quadrangle=False,\n detect_text=False,\n **kwargs\n ):\n \"\"\"\n Initialize a CSV data generator.\n\n Args\n csv_data_file: Path to the CSV annotations file.\n csv_class_file: Path to the CSV classes file.\n detect_text: if do text detection\n base_dir: Directory w.r.t. where the files are to be searched (defaults to the directory containing the csv_data_file).\n \"\"\"\n self.image_names = []\n self.image_data = {}\n self.base_dir = base_dir\n self.detect_quadrangle = detect_quadrangle\n self.detect_text = detect_text\n\n # Take base_dir from annotations file if not explicitly specified.\n if self.base_dir is None:\n if not osp.exists(csv_data_file):\n self.base_dir = ''\n else:\n self.base_dir = osp.dirname(csv_data_file)\n\n # parse the provided class file\n try:\n with _open_for_csv(csv_class_file) as file:\n # class_name --> class_id\n self.classes = _read_classes(csv.reader(file, delimiter=','))\n except ValueError as e:\n raise_from(ValueError('invalid CSV class file: {}: {}'.format(csv_class_file, e)), None)\n\n self.labels = {}\n # class_id --> class_name\n for key, value in self.classes.items():\n self.labels[value] = key\n\n # csv with img_path, x1, y1, x2, y2, x3, y3, x4, y4, class_name\n try:\n with _open_for_csv(csv_data_file) as file:\n # {'img_path1':[{'x1':xx,'y1':xx,'x2':xx,'y2':xx,'x3':xx,'y3':xx,'x4':xx,'y4':xx, 'class':xx}...],...}\n if self.detect_quadrangle:\n self.image_data = _read_quadrangle_annotations(csv.reader(file, delimiter=','), self.classes,\n self.detect_text)\n else:\n self.image_data = _read_annotations(csv.reader(file, delimiter=','), self.classes)\n except ValueError as e:\n raise_from(ValueError('invalid CSV annotations file: {}: {}'.format(csv_data_file, e)), None)\n self.image_names = list(self.image_data.keys())\n\n super(CSVGenerator, self).__init__(detect_text=detect_text, detect_quadrangle=detect_quadrangle, **kwargs)\n\n def size(self):\n \"\"\"\n Size of the dataset.\n \"\"\"\n return len(self.image_names)\n\n def num_classes(self):\n \"\"\"\n Number of classes in the dataset.\n \"\"\"\n return max(self.classes.values()) + 1\n\n def has_label(self, label):\n \"\"\"\n Return True if label is a known label.\n \"\"\"\n return label in self.labels\n\n def has_name(self, name):\n \"\"\"\n Returns True if name is a known class.\n \"\"\"\n return name in self.classes\n\n def name_to_label(self, name):\n \"\"\"\n Map name to label.\n \"\"\"\n return self.classes[name]\n\n def label_to_name(self, label):\n \"\"\"\n Map label to name.\n \"\"\"\n return self.labels[label]\n\n def image_path(self, image_index):\n \"\"\"\n Returns the image path for image_index.\n \"\"\"\n return osp.join(self.base_dir, self.image_names[image_index])\n\n def image_aspect_ratio(self, image_index):\n \"\"\"\n Compute the aspect ratio for an image with image_index.\n \"\"\"\n # PIL is fast for metadata\n image = Image.open(self.image_path(image_index))\n return float(image.width) / float(image.height)\n\n def load_image(self, image_index):\n \"\"\"\n Load an image at the image_index.\n \"\"\"\n image = cv2.imread(self.image_path(image_index))\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n return image\n\n def load_annotations(self, image_index):\n \"\"\"\n Load annotations for an image_index.\n \"\"\"\n path = self.image_names[image_index]\n annotations = {'labels': np.empty((0,), dtype=np.int32),\n 'bboxes': np.empty((0, 4), dtype=np.float32),\n 'quadrangles': np.empty((0, 4, 2), dtype=np.float32),\n }\n\n for idx, annot in enumerate(self.image_data[path]):\n annotations['labels'] = np.concatenate((annotations['labels'], [self.name_to_label(annot['class'])]))\n if self.detect_quadrangle:\n quadrangle = np.array([[float(annot['x1']), float(annot['y1'])],\n [float(annot['x2']), float(annot['y2'])],\n [float(annot['x3']), float(annot['y3'])],\n [float(annot['x4']), float(annot['y4'])]])\n ordered_quadrangle = self.reorder_vertexes(quadrangle)\n annotations['quadrangles'] = np.concatenate((annotations['quadrangles'], ordered_quadrangle[None]))\n annotations['bboxes'] = np.concatenate((annotations['bboxes'], [[\n float(min(annot['x1'], annot['x2'], annot['x3'], annot['x4'])),\n float(min(annot['y1'], annot['y2'], annot['y3'], annot['y4'])),\n float(max(annot['x1'], annot['x2'], annot['x3'], annot['x4'])),\n float(max(annot['y1'], annot['y2'], annot['y3'], annot['y4'])),\n ]]))\n else:\n annotations['bboxes'] = np.concatenate((annotations['bboxes'], [[\n float(annot['x1']),\n float(annot['y1']),\n float(annot['x2']),\n float(annot['y2']),\n ]]))\n return annotations\n\n def reorder_vertexes(self, vertexes):\n \"\"\"\n reorder vertexes as the paper shows, (top, right, bottom, left)\n Args:\n vertexes:\n\n Returns:\n\n \"\"\"\n assert vertexes.shape == (4, 2)\n xmin, ymin = np.min(vertexes, axis=0)\n xmax, ymax = np.max(vertexes, axis=0)\n\n # determine the first point with the smallest y,\n # if two vertexes has same y, choose that with smaller x,\n ordered_idxes = np.argsort(vertexes, axis=0)\n ymin1_idx = ordered_idxes[0, 1]\n ymin2_idx = ordered_idxes[1, 1]\n if vertexes[ymin1_idx, 1] == vertexes[ymin2_idx, 1]:\n if vertexes[ymin1_idx, 0] <= vertexes[ymin2_idx, 0]:\n first_vertex_idx = ymin1_idx\n else:\n first_vertex_idx = ymin2_idx\n else:\n first_vertex_idx = ymin1_idx\n ordered_idxes = [(first_vertex_idx + i) % 4 for i in range(4)]\n ordered_vertexes = vertexes[ordered_idxes]\n # drag the point to the corresponding edge\n ordered_vertexes[0, 1] = ymin\n ordered_vertexes[1, 0] = xmax\n ordered_vertexes[2, 1] = ymax\n ordered_vertexes[3, 0] = xmin\n return ordered_vertexes\n" ]
[ [ "numpy.max", "numpy.concatenate", "numpy.empty", "numpy.min", "numpy.argsort" ] ]
sameerpawar/CarND-Advanced-lane-lines
[ "1ca61c5e1ee5d0d32c3bee0a3aec4cf3dc2dbe81" ]
[ "lane_lines_temp.py" ]
[ "'''\nChoices made\n1. histogram computation height: Think of robustness in a sense sometimes the lanes will not be in lower few pixel part.\n1. [also addressed in lecture 35 with convolution) leftx_base, rightx_base are taken at \"midpoint\" using \"max\". Both chocies can be thought again, \ne.g., instead of max per column take aggregation of few columns to account for line thickness or Sliding window in histogram \nof certain width (another hyper parameter) to determine a position where we accumaulate maximum pixels.\n1. Window size: vertical (nWindows) and horizontal (margin), \n1. minpix to be found in a window to re-center the x positions.\n1. When you reuse the polynomioal lane estimate from previous frame the margin can be different than the window margin in first frame. Also, you can have a minpix_2 threshold to determine \nif we need to re-do the sliding window again since the older estimate is less trust worthy.\n\n# Propagation of belief [not very clear how to proceed]\n1. Initial prior on the first layer of first frame is uniform.\n1. Histogram is an observation. Convolution on histogram is just ML boosting of SNR by assuming averaging imporves signal but not noise.\n1. Find the center of lane.\n1. Take a window around the center as first box of lane line.\n1. Take 3 windows (1.5 on each side of lane center) worth observations normalize to generate distribution of possible lane positions.\n\nIn new frame prior can be last frame posterier with some confidence.\nConfidence can be based on how many pixels are in the estimated zone. If it is clear that we have detected a strong lane line we can have a more concentrated posterior.\n\n1. Current code uses mean to find center within a selected box for next layer. We can try some other statistics such as median or other ways of eliminating outliers before averaging \n\n\n'''\nimport numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\n\n# Assuming you have created a warped binary image called \"binary_warped\"\n# Take a histogram of the bottom half of the image\nhistogram = np.sum(binary_warped[binary_warped.shape[0]//2:,:], axis=0)\n# Create an output image to draw on and visualize the result\nout_img = np.dstack((binary_warped, binary_warped, binary_warped))*255\n# Find the peak of the left and right halves of the histogram\n# These will be the starting point for the left and right lines\nmidpoint = np.int(histogram.shape[0]//2)\nleftx_base = np.argmax(histogram[:midpoint])\nrightx_base = np.argmax(histogram[midpoint:]) + midpoint\n\n# Choose the number of sliding windows\nnwindows = 9\n# Set height of windows\nwindow_height = np.int(binary_warped.shape[0]//nwindows)\n# Identify the x and y positions of all nonzero pixels in the image\nnonzero = binary_warped.nonzero()\nnonzeroy = np.array(nonzero[0])\nnonzerox = np.array(nonzero[1])\n# Current positions to be updated for each window\nleftx_current = leftx_base\nrightx_current = rightx_base\n# Set the width of the windows +/- margin\nmargin = 100\n# Set minimum number of pixels found to recenter window\nminpix = 50\n# Create empty lists to receive left and right lane pixel indices\nleft_lane_inds = []\nright_lane_inds = []\n\n# Step through the windows one by one\nfor window in range(nwindows):\n # Identify window boundaries in x and y (and right and left)\n win_y_low = binary_warped.shape[0] - (window+1)*window_height\n win_y_high = binary_warped.shape[0] - window*window_height\n win_xleft_low = leftx_current - margin\n win_xleft_high = leftx_current + margin\n win_xright_low = rightx_current - margin\n win_xright_high = rightx_current + margin\n # Draw the windows on the visualization image\n cv2.rectangle(out_img,(win_xleft_low,win_y_low),(win_xleft_high,win_y_high),\n (0,255,0), 2) \n cv2.rectangle(out_img,(win_xright_low,win_y_low),(win_xright_high,win_y_high),\n (0,255,0), 2) \n # Identify the nonzero pixels in x and y within the window\n good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & \n (nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]\n good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & \n (nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]\n # Append these indices to the lists\n left_lane_inds.append(good_left_inds)\n right_lane_inds.append(good_right_inds)\n # If you found > minpix pixels, recenter next window on their mean position\n if len(good_left_inds) > minpix:\n leftx_current = np.int(np.mean(nonzerox[good_left_inds]))\n if len(good_right_inds) > minpix: \n rightx_current = np.int(np.mean(nonzerox[good_right_inds]))\n\n# Concatenate the arrays of indices\nleft_lane_inds = np.concatenate(left_lane_inds)\nright_lane_inds = np.concatenate(right_lane_inds)\n\n# Extract left and right line pixel positions\nleftx = nonzerox[left_lane_inds]\nlefty = nonzeroy[left_lane_inds] \nrightx = nonzerox[right_lane_inds]\nrighty = nonzeroy[right_lane_inds] \n\n# Fit a second order polynomial to each\nleft_fit = np.polyfit(lefty, leftx, 2)\nright_fit = np.polyfit(righty, rightx, 2)" ]
[ [ "numpy.concatenate", "numpy.int", "numpy.array", "numpy.sum", "numpy.mean", "numpy.argmax", "numpy.polyfit", "numpy.dstack" ] ]
Huanghongru/fairseq
[ "d80ad54f75186adf9b597ef0bcef005c98381b9e" ]
[ "fairseq/trainer.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n\"\"\"\nTrain a network across multiple GPUs.\n\"\"\"\n\nfrom collections import OrderedDict\nimport contextlib\nfrom itertools import chain\nimport math\nimport os\nimport sys\n\nimport torch\n\nfrom fairseq import checkpoint_utils, distributed_utils, models, optim, utils\nfrom fairseq.meters import AverageMeter, StopwatchMeter, TimeMeter\nfrom fairseq.optim import lr_scheduler\n\n\nclass Trainer(object):\n \"\"\"Main class for data parallel training.\n\n This class supports synchronous distributed data parallel training,\n where multiple workers each have a full model replica and gradients\n are accumulated across workers before each update. We use\n :class:`~torch.nn.parallel.DistributedDataParallel` to handle\n communication of the gradients across workers.\n \"\"\"\n\n def __init__(self, args, task, model, criterion, dummy_batch=None, oom_batch=None):\n self.args = args\n self.task = task\n\n # copy model and criterion to current device\n self._criterion = criterion\n self._model = model\n self.cuda = torch.cuda.is_available() and not args.cpu\n if args.fp16:\n self._criterion = self._criterion.half()\n self._model = self._model.half()\n if self.cuda:\n self._criterion = self._criterion.cuda()\n self._model = self._model.cuda()\n\n self._dummy_batch = dummy_batch\n self._oom_batch = oom_batch or dummy_batch\n\n self._lr_scheduler = None\n self._num_updates = 0\n self._optim_history = None\n self._optimizer = None\n self._prev_grad_norm = None\n self._wrapped_criterion = None\n self._wrapped_model = None\n\n # Fast stats sync avoids memcpy and is 7% faster when tested on 16 nodes.\n # It is less flexible and syncs only the default stats.\n self._all_reduce_list = [0.0] * 6\n self.fast_stat_sync = args.fast_stat_sync\n\n self.init_meters(args)\n\n def init_meters(self, args):\n self.meters = OrderedDict()\n self.meters['train_loss'] = AverageMeter()\n self.meters['train_nll_loss'] = AverageMeter()\n self.meters['valid_loss'] = AverageMeter()\n self.meters['valid_nll_loss'] = AverageMeter()\n self.meters['wps'] = TimeMeter() # words per second\n self.meters['ups'] = TimeMeter() # updates per second\n self.meters['wpb'] = AverageMeter() # words per batch\n self.meters['bsz'] = AverageMeter() # sentences per batch\n self.meters['gnorm'] = AverageMeter() # gradient norm\n self.meters['clip'] = AverageMeter() # % of updates clipped\n self.meters['oom'] = AverageMeter() # out of memory\n if args.fp16:\n self.meters['loss_scale'] = AverageMeter() # dynamic loss scale\n self.meters['wall'] = TimeMeter() # wall time in seconds\n self.meters['train_wall'] = StopwatchMeter() # train wall time in seconds\n\n @property\n def criterion(self):\n if self._wrapped_criterion is None:\n if (\n utils.has_parameters(self._criterion)\n and self.args.distributed_world_size > 1\n and not self.args.use_bmuf\n ):\n self._wrapped_criterion = models.DistributedFairseqModel(\n self.args, self._criterion\n )\n else:\n self._wrapped_criterion = self._criterion\n return self._wrapped_criterion\n\n @property\n def model(self):\n if self._wrapped_model is None:\n if self.args.distributed_world_size > 1 and not self.args.use_bmuf:\n self._wrapped_model = models.DistributedFairseqModel(\n self.args, self._model,\n )\n else:\n self._wrapped_model = self._model\n return self._wrapped_model\n\n @property\n def optimizer(self):\n if self._optimizer is None:\n self._build_optimizer()\n return self._optimizer\n\n @property\n def lr_scheduler(self):\n if self._lr_scheduler is None:\n self._build_optimizer() # this will initialize self._lr_scheduler\n return self._lr_scheduler\n\n def _build_optimizer(self):\n params = list(\n filter(\n lambda p: p.requires_grad,\n chain(self.model.parameters(), self.criterion.parameters()),\n )\n )\n\n if self.args.fp16:\n if self.cuda and torch.cuda.get_device_capability(0)[0] < 7:\n print('| WARNING: your device does NOT support faster training with --fp16, '\n 'please switch to FP32 which is likely to be faster')\n if self.args.memory_efficient_fp16:\n self._optimizer = optim.MemoryEfficientFP16Optimizer.build_optimizer(self.args, params)\n else:\n self._optimizer = optim.FP16Optimizer.build_optimizer(self.args, params)\n else:\n if self.cuda and torch.cuda.get_device_capability(0)[0] >= 7:\n print('| NOTICE: your device may support faster training with --fp16')\n self._optimizer = optim.build_optimizer(self.args, params)\n\n if self.args.use_bmuf:\n self._optimizer = optim.FairseqBMUF(self.args, self._optimizer)\n\n # We should initialize the learning rate scheduler immediately after\n # building the optimizer, so that the initial learning rate is set.\n self._lr_scheduler = lr_scheduler.build_lr_scheduler(self.args, self.optimizer)\n self._lr_scheduler.step_update(0)\n\n def save_checkpoint(self, filename, extra_state):\n \"\"\"Save all training state in a checkpoint file.\"\"\"\n if distributed_utils.is_master(self.args): # only save one checkpoint\n extra_state['train_meters'] = self.meters\n checkpoint_utils.save_state(\n filename, self.args, self.get_model().state_dict(), self.get_criterion(),\n self.optimizer, self.lr_scheduler, self.get_num_updates(),\n self._optim_history, extra_state,\n )\n\n def load_checkpoint(\n self,\n filename,\n reset_optimizer=False,\n reset_lr_scheduler=False,\n optimizer_overrides=None,\n reset_meters=False,\n ):\n \"\"\"Load all training state from a checkpoint file.\"\"\"\n extra_state, self._optim_history, last_optim_state = None, [], None\n\n try:\n from fairseq.fb_pathmgr import fb_pathmgr\n bexists = fb_pathmgr.isfile(filename)\n except Exception:\n bexists = os.path.exists(filename)\n\n if bexists:\n state = checkpoint_utils.load_checkpoint_to_cpu(filename)\n\n # load model parameters\n try:\n self.get_model().load_state_dict(state['model'], strict=True)\n if utils.has_parameters(self.get_criterion()):\n self.get_criterion().load_state_dict(state['criterion'], strict=True)\n except Exception:\n raise Exception(\n 'Cannot load model parameters from checkpoint {}; '\n 'please ensure that the architectures match.'.format(filename)\n )\n\n extra_state = state['extra_state']\n self._optim_history = state['optimizer_history']\n last_optim_state = state.get('last_optimizer_state', None)\n\n if last_optim_state is not None and not reset_optimizer:\n # rebuild optimizer after loading model, since params may have changed\n self._build_optimizer()\n\n # only reload optimizer and lr_scheduler if they match\n last_optim = self._optim_history[-1]\n assert last_optim['criterion_name'] == self.get_criterion().__class__.__name__, \\\n 'Criterion does not match; please reset the optimizer (--reset-optimizer).'\n assert last_optim['optimizer_name'] == self.optimizer.__class__.__name__, \\\n 'Optimizer does not match; please reset the optimizer (--reset-optimizer).'\n\n if not reset_lr_scheduler:\n self.lr_scheduler.load_state_dict(last_optim['lr_scheduler_state'])\n self.optimizer.load_state_dict(last_optim_state, optimizer_overrides)\n\n self.set_num_updates(last_optim['num_updates'])\n\n if extra_state is not None:\n epoch = extra_state['train_iterator']['epoch']\n print('| loaded checkpoint {} (epoch {} @ {} updates)'.format(\n filename, epoch, self.get_num_updates()))\n\n self.lr_step(epoch)\n\n if 'train_meters' in extra_state and not reset_meters:\n self.meters.update(extra_state['train_meters'])\n del extra_state['train_meters']\n\n # reset TimeMeters, since their start times don't make sense anymore\n for meter in self.meters.values():\n if isinstance(meter, TimeMeter):\n meter.reset()\n else:\n print('| no existing checkpoint found {}'.format(filename))\n\n return extra_state\n\n def get_train_iterator(self, epoch, combine=True, load_dataset=True, data_selector=None):\n \"\"\"Return an EpochBatchIterator over the training set for a given epoch.\"\"\"\n if load_dataset:\n print('| loading train data for epoch {}'.format(epoch))\n self.task.load_dataset(\n self.args.train_subset,\n epoch=epoch,\n combine=combine,\n data_selector=data_selector,\n )\n return self.task.get_batch_iterator(\n dataset=self.task.dataset(self.args.train_subset),\n max_tokens=self.args.max_tokens,\n max_sentences=self.args.max_sentences,\n max_positions=utils.resolve_max_positions(\n self.task.max_positions(),\n self.model.max_positions(),\n ),\n ignore_invalid_inputs=True,\n required_batch_size_multiple=self.args.required_batch_size_multiple,\n seed=self.args.seed,\n num_shards=self.args.distributed_world_size,\n shard_id=self.args.distributed_rank,\n num_workers=self.args.num_workers,\n epoch=epoch,\n )\n\n def train_step(self, samples, dummy_batch=False, raise_oom=False):\n \"\"\"Do forward, backward and parameter update.\"\"\"\n if self._dummy_batch is None:\n self._dummy_batch = samples[0]\n\n self._set_seed()\n self.model.train()\n self.criterion.train()\n self.zero_grad()\n\n if not dummy_batch:\n self.meters['train_wall'].start()\n\n # forward and backward pass\n logging_outputs, sample_sizes, ooms = [], [], 0\n for i, sample in enumerate(samples):\n sample = self._prepare_sample(sample)\n if sample is None:\n # when sample is None, run forward/backward on a dummy batch\n # and ignore the resulting gradients\n sample = self._prepare_sample(self._dummy_batch)\n ignore_grad = True\n else:\n ignore_grad = False\n\n def maybe_no_sync():\n \"\"\"\n Whenever *samples* contains more than one mini-batch, we\n want to accumulate gradients locally and only call\n all-reduce in the last backwards pass.\n \"\"\"\n if (\n self.args.distributed_world_size > 1\n and hasattr(self.model, 'no_sync')\n and i < len(samples) - 1\n ):\n return self.model.no_sync()\n else:\n return contextlib.ExitStack() # dummy contextmanager\n\n try:\n with maybe_no_sync():\n # forward and backward\n loss, sample_size, logging_output = self.task.train_step(\n sample, self.model, self.criterion, self.optimizer,\n ignore_grad\n )\n\n if not ignore_grad:\n logging_outputs.append(logging_output)\n sample_sizes.append(sample_size)\n\n if self.fast_stat_sync:\n self._all_reduce_list[0] += sample_size\n self._all_reduce_list[1] += logging_output.get('nsentences', 0.0)\n self._all_reduce_list[2] += logging_output.get('loss', 0.0)\n self._all_reduce_list[3] += logging_output.get('nll_loss', 0.0)\n self._all_reduce_list[4] += logging_output.get('ntokens', 0.0)\n except RuntimeError as e:\n if 'out of memory' in str(e):\n msg = (\n '| WARNING: ran out of memory with exception: '\n + '{};'.format(e)\n + '\\n Skipping batch'\n )\n # TODO: print should really go to logger, this print goes\n # to stderr, which is buffered, which in many cases is not\n # printed out if another exception happens.\n # NB(jerry): added a flush to mitigate this\n print(msg, file=sys.stderr)\n if torch.cuda.is_available() and hasattr(torch.cuda, \"memory_summary\"):\n for device_idx in range(torch.cuda.device_count()):\n print(torch.cuda.memory_summary(device=torch.cuda.device(device_idx)),\n file=sys.stderr)\n sys.stderr.flush()\n\n if raise_oom:\n raise ValueError(msg)\n ooms += 1\n self.zero_grad()\n else:\n raise e\n\n if self.fast_stat_sync:\n self._all_reduce_list[5] += ooms\n\n\n if ooms > 0 and self._oom_batch is not None:\n self.handle_ooms(ooms)\n\n if dummy_batch:\n return None\n\n # gather logging outputs from all replicas\n if self.fast_stat_sync:\n # rework all_gather_list\n all_reduce_list_tensor = torch.cuda.DoubleTensor(self._all_reduce_list)\n if self._sync_stats():\n torch.distributed.all_reduce(all_reduce_list_tensor)\n # Normalize loss and nll_loss by \"sample_size\"\n # and convert to log base 2\n all_reduce_list_tensor[2:4].div_(\n (\n all_reduce_list_tensor[0:1] *\n torch.log(torch.cuda.DoubleTensor([2]))\n )\n )\n self._all_reduce_list = all_reduce_list_tensor.tolist()\n logging_output = {}\n [\n sample_size,\n logging_output['nsentences'],\n logging_output['loss'],\n logging_output['nll_loss'],\n logging_output['ntokens'],\n ooms,\n ] = self._all_reduce_list\n elif self._sync_stats():\n logging_outputs, sample_sizes, ooms, prev_norms = \\\n zip(*distributed_utils.all_gather_list(\n [logging_outputs, sample_sizes, ooms, self._prev_grad_norm],\n ))\n logging_outputs = list(chain.from_iterable(logging_outputs))\n sample_sizes = list(chain.from_iterable(sample_sizes))\n ooms = sum(ooms)\n\n if not self.args.use_bmuf:\n assert (\n all(norm == prev_norms[0] for norm in prev_norms)\n or all(math.isnan(norm) or math.isinf(norm) for norm in prev_norms)\n ), 'Fatal error: gradients are inconsistent between workers'\n\n self.meters['oom'].update(ooms, len(samples))\n if ooms == self.args.distributed_world_size * len(samples):\n print('| WARNING: OOM in all workers, skipping update')\n self.zero_grad()\n return None\n\n if not self.fast_stat_sync:\n # aggregate logging outputs and sample sizes\n logging_output = self.task.aggregate_logging_outputs(\n logging_outputs, self.get_criterion()\n )\n sample_size = self.task.grad_denom(sample_sizes, self.get_criterion())\n\n if not all(k in logging_output for k in ['ntokens', 'nsentences']):\n raise Exception((\n 'Please update the {}.aggregate_logging_outputs() method to '\n 'return ntokens and nsentences'\n ).format(self.task.__class__.__name__))\n\n try:\n # normalize grads by sample size\n if sample_size > 0:\n self.optimizer.multiply_grads(self.args.distributed_world_size / float(sample_size))\n\n # clip grads\n grad_norm = self.optimizer.clip_grad_norm(self.args.clip_norm)\n self._prev_grad_norm = grad_norm\n\n # take an optimization step\n self.optimizer.step()\n self.set_num_updates(self.get_num_updates() + 1)\n\n # task specific update per step\n self.task.update_step(self._num_updates)\n\n # update meters\n ntokens = logging_output.get('ntokens', 0)\n nsentences = logging_output.get('nsentences', 0)\n self.meters['wps'].update(ntokens)\n self.meters['ups'].update(1.)\n self.meters['wpb'].update(ntokens)\n self.meters['bsz'].update(nsentences)\n self.meters['gnorm'].update(grad_norm)\n self.meters['clip'].update(\n 1. if grad_norm > self.args.clip_norm and self.args.clip_norm > 0 else 0.\n )\n self.meters['train_loss'].update(logging_output.get('loss', 0), sample_size)\n if 'train_acc' in self.meters:\n self.meters['train_acc'].update(\n logging_output.get('acc', 0), sample_size)\n\n if 'nll_loss' in logging_output:\n self.meters['train_nll_loss'].update(logging_output.get('nll_loss', 0), ntokens)\n\n # clear CUDA cache to reduce memory fragmentation\n if (self.args.empty_cache_freq > 0 and\n ((self.get_num_updates() + self.args.empty_cache_freq - 1) %\n self.args.empty_cache_freq) == 0 and\n torch.cuda.is_available() and\n not self.args.cpu):\n torch.cuda.empty_cache()\n except OverflowError as e:\n print('| WARNING: overflow detected, ' + str(e))\n self.zero_grad()\n logging_output = None\n\n if self.args.fp16:\n self.meters['loss_scale'].reset()\n self.meters['loss_scale'].update(self.optimizer.scaler.loss_scale)\n\n self.clear_buffered_stats()\n self.meters['train_wall'].stop()\n\n return logging_output\n\n def valid_step(self, sample, raise_oom=False):\n \"\"\"Do forward pass in evaluation mode.\"\"\"\n with torch.no_grad():\n self.model.eval()\n self.criterion.eval()\n\n sample = self._prepare_sample(sample)\n if sample is None:\n sample = self._prepare_sample(self._dummy_batch)\n ignore_results = True\n else:\n ignore_results = False\n\n try:\n _loss, sample_size, logging_output = self.task.valid_step(\n sample, self.model, self.criterion\n )\n except RuntimeError as e:\n if 'out of memory' in str(e) and not raise_oom:\n print('| WARNING: ran out of memory, retrying batch')\n for p in self.model.parameters():\n if p.grad is not None:\n p.grad = None # free some memory\n if self.cuda:\n torch.cuda.empty_cache()\n return self.valid_step(sample, raise_oom=True)\n else:\n raise e\n\n if ignore_results:\n logging_output, sample_size = {}, 0\n\n # gather logging outputs from all replicas\n if self.args.distributed_world_size > 1:\n logging_output, sample_size = zip(*distributed_utils.all_gather_list(\n [logging_output, sample_size],\n ))\n logging_output = list(logging_output)\n sample_size = list(sample_size)\n else:\n logging_output = [logging_output]\n sample_size = [sample_size]\n\n # aggregate logging outputs and sample sizes\n logging_output = self.task.aggregate_logging_outputs(\n logging_output, self.get_criterion()\n )\n sample_size = self.task.grad_denom(\n sample_size, self.get_criterion()\n )\n\n # update meters for validation\n ntokens = logging_output.get('ntokens', 0)\n self.meters['valid_loss'].update(logging_output.get('loss', 0), sample_size)\n if 'valid_acc' in self.meters:\n self.meters['valid_acc'].update(\n logging_output.get('acc', 0), sample_size)\n\n if 'nll_loss' in logging_output:\n self.meters['valid_nll_loss'].update(logging_output.get('nll_loss', 0), ntokens)\n\n return logging_output\n\n def dummy_train_step(self, dummy_batch):\n \"\"\"Dummy training step for warming caching allocator.\"\"\"\n self.train_step(dummy_batch, dummy_batch=True)\n self.zero_grad()\n\n def handle_ooms(self, number_of_ooms):\n \"\"\"\n c10d accumulates/syncs gradients between gpus during backward pass.\n In case of OOMs, gpus may fail to sync, so we manually iterate\n extra to make sure each gpu makes same number of iterations.\n \"\"\"\n for _ in range(number_of_ooms):\n self.train_step([self._oom_batch], True)\n\n def zero_grad(self):\n self.optimizer.zero_grad()\n\n def clear_buffered_stats(self):\n self._all_reduce_list = [0.0] * 6\n\n def lr_step(self, epoch, val_loss=None):\n \"\"\"Adjust the learning rate based on the validation loss.\"\"\"\n self.lr_scheduler.step(epoch, val_loss)\n # prefer updating the LR based on the number of steps\n return self.lr_step_update()\n\n def lr_step_update(self):\n \"\"\"Update the learning rate after each update.\"\"\"\n return self.lr_scheduler.step_update(self.get_num_updates())\n\n def get_lr(self):\n \"\"\"Get the current learning rate.\"\"\"\n return self.optimizer.get_lr()\n\n def get_model(self):\n \"\"\"Get the (non-wrapped) model instance.\"\"\"\n return self._model\n\n def get_criterion(self):\n \"\"\"Get the (non-wrapped) criterion instance.\"\"\"\n return self._criterion\n\n def get_meter(self, name):\n \"\"\"Get a specific meter by name.\"\"\"\n if name not in self.meters:\n return None\n return self.meters[name]\n\n def get_num_updates(self):\n \"\"\"Get the number of parameters updates.\"\"\"\n return self._num_updates\n\n def set_num_updates(self, num_updates):\n \"\"\"Set the number of parameters updates.\"\"\"\n self._num_updates = num_updates\n self.lr_step_update()\n\n def _prepare_sample(self, sample):\n if sample is None or len(sample) == 0:\n return None\n\n if self.cuda:\n sample = utils.move_to_cuda(sample)\n\n def apply_half(t):\n if t.dtype is torch.float32:\n return t.half()\n return t\n\n if self.args.fp16:\n sample = utils.apply_to_sample(apply_half, sample)\n\n return sample\n\n def _set_seed(self):\n # Set seed based on args.seed and the update number so that we get\n # reproducible results when resuming from checkpoints\n seed = self.args.seed + self.get_num_updates()\n torch.manual_seed(seed)\n if self.cuda:\n torch.cuda.manual_seed(seed)\n\n def _sync_stats(self):\n return (\n self.args.distributed_world_size > 1 and\n (\n (not self.args.use_bmuf) or\n (\n self.args.use_bmuf\n and (self.get_num_updates() + 1) % self.args.global_sync_iter == 0\n )\n )\n )\n" ]
[ [ "torch.cuda.manual_seed", "torch.no_grad", "torch.cuda.device", "torch.cuda.device_count", "torch.manual_seed", "torch.cuda.empty_cache", "torch.cuda.is_available", "torch.cuda.get_device_capability", "torch.distributed.all_reduce", "torch.cuda.DoubleTensor" ] ]
ShankarNara/shogun
[ "8ab196de16b8d8917e5c84770924c8d0f5a3d17c" ]
[ "applications/tapkee/samples/dm.py" ]
[ "import shogun as sg\nimport data\nimport numpy as np\n\n# load data\nfeature_matrix = data.swissroll()\n# create features instance\nfeatures = sg.RealFeatures(feature_matrix)\n\n# create Diffusion Maps converter instance\nconverter = sg.DiffusionMaps()\n\n# set target dimensionality\nconverter.set_target_dim(2)\n# set number of time-steps\nconverter.set_t(2)\n# set width of gaussian kernel\nconverter.set_width(10.0)\n\n# create euclidean distance instance\ndistance = sg.EuclideanDistance()\n# enable converter instance to use created distance instance\nconverter.set_distance(distance)\n\n# compute embedding with Diffusion Maps method\nembedding = converter.embed(features)\n\n# compute custom distance matrix\ndistance_matrix = np.exp(-np.dot(feature_matrix.T,feature_matrix))\n# create Custom Kernel instance\ncustom_distance = sg.CustomDistance(distance_matrix)\n# construct embedding based on created distance\ndistance_embedding = converter.embed_distance(custom_distance)\n" ]
[ [ "numpy.dot" ] ]
edwinjiang703/ruiqi_aiops
[ "ce10eae41cb15ee3d1f07692d5c92491a3fca9bf" ]
[ "time_series_detector/test.py" ]
[ "# -*- coding: utf-8 -*-\n# @Time : 2019/4/11 10:07 PM\n# @Author : Edwin\n# @File : test.py\n# @Software: PyCharm\n\n\nfrom time_series_detector import detect\nimport pandas as pd\nimport warnings\nimport matplotlib.pyplot as plt\nfrom ora_dual import models\nimport os,time,datetime\nimport pandas\n\nwarnings.filterwarnings('ignore')\npd.set_option('display.max_columns', 1000)\npd.set_option('display.width', 1000)\npd.set_option('display.max_colwidth', 1000)\npd.set_option('display.max_rows',1000)\n\n\n#\n# spacechange_metric_data = pd.read_csv('spacechange_metric_05_04_2019.csv')\n# spacechange_metric_data = spacechange_metric_data[\n# ['begin_time','DB Block Changes Per Sec','DIFF_KB']]\n#\n# total_block_change = spacechange_metric_data[['begin_time','DB Block Changes Per Sec']]\n# #print(total_block_change)\n# total_block_change.plot(x='begin_time',y='DB Block Changes Per Sec')\n# plt.show()\n#\n# # detect_data = spacechange_metric_data.loc[spacechange_metric_data['begin_time']=='2019-04-03 09']\n# # detect_data = list(detect_data['DB Block Changes Per Sec'])\n# # detect_data = [str(int(x)) for x in detect_data]detect_data\n# detect_data = [\"1000\"]\n# detect_data = \",\".join(detect_data)\n#\n#\n# datac = list(spacechange_metric_data['DB Block Changes Per Sec'][0:360])\n# datac.append(detect_data)\n# datac = [str(int(x)) for x in datac]\n# datac = \",\".join(datac)\n# #print(\"datac is\",datac)\n# # print(len(datac.split(',')))\n# #\n# datab = list(spacechange_metric_data['DB Block Changes Per Sec'][361:721])\n# datab.append(detect_data)\n# datab = [str(int(x)) for x in datab]\n# datab = \",\".join(datab)\n# #print(\"datab is\",datab)\n# #\n# dataa = list(spacechange_metric_data['DB Block Changes Per Sec'][722:902])\n# dataa.append(detect_data)\n# dataa = [str(int(x)) for x in dataa]\n# dataa = \",\".join(dataa)\n# #print(\"dataa is\",dataa)\n# #\n# #\n# detect_obj = detect.Detect()\n# data = {\"window\":180,\n# \"dataC\":datac,\n# \"dataB\":datab,\n# \"dataA\":dataa\n# }\n# # combined_data = data[\"dataC\"] + \",\" + data[\"dataB\"] + \",\" + data[\"dataA\"]\n# # time_series = map(int, combined_data.split(','))\n# print(detect_obj.value_predict(data))\n\nload_profile_per_hour = list(models.system_metric_period.objects.values('begin_time', 'metric_name', 'metric_average').distinct().order_by(\n 'begin_time'))\n\n# load_profile_columns = list(models.system_metric_period.objects.values('metric_name').distinct())\n# load_porfile_time = list(models.system_metric_period.objects.values('begin_time').distinct())\n\nload_profile_per_hour = pd.DataFrame(load_profile_per_hour)\n\nload_profile_per_hour_out = load_profile_per_hour.pivot(index='begin_time', columns='metric_name',values='metric_average')\n\n\nload_profile_per_hour_out.to_csv('spacechange_metric_' + time.strftime(\"%d_%m_%Y\") + '.csv', index=True, header=True,\n na_rep=0)\nload_profile_per_hour_out = pandas.read_csv('spacechange_metric_' + time.strftime(\"%d_%m_%Y\") + '.csv')\n\nload_profile_per_hour_out['begin_time'] = load_profile_per_hour_out['begin_time'].apply(\n lambda x: datetime.datetime.strftime(datetime.datetime.strptime(x, '%Y-%m-%d %H:%M:%S'),'%Y-%m-%d %H'))\n\n\n" ]
[ [ "pandas.DataFrame", "pandas.set_option" ] ]
totti0223/deepstomata
[ "e4f5dd5d1a65232ed13f6bea6f4d1f02d1494558" ]
[ "deepstomata/stomata_model.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport tensorflow as tf\nfrom math import sqrt\nMOVING_AVERAGE_DECAY = 0.9999\n\n\ndef tf_inference(images, BATCH_SIZE, image_size, NUM_CLASSES):\n\n def _variable_with_weight_decay(name, shape, stddev, wd):\n var = tf.get_variable(name, shape=shape, initializer=tf.truncated_normal_initializer(stddev=stddev))\n if wd:\n weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')\n tf.add_to_collection('losses', weight_decay)\n return var\n\n def _activation_summary(x):\n tensor_name = x.op.name\n tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(x))\n\n with tf.variable_scope('conv1') as scope:\n kernel = tf.get_variable('weights', shape=[3, 3, 3, 32], initializer=tf.truncated_normal_initializer(stddev=0.1))\n conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME')\n biases = tf.get_variable('biases', shape=[32], initializer=tf.constant_initializer(0.0))\n bias = tf.nn.bias_add(conv, biases)\n conv1 = tf.nn.relu(bias, name=scope.name)\n pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool1')\n\n with tf.variable_scope('conv2') as scope:\n kernel = tf.get_variable('weights', shape=[3, 3, 32, 64], initializer=tf.truncated_normal_initializer(stddev=0.1))\n conv = tf.nn.conv2d(pool1, kernel, [1, 1, 1, 1], padding='SAME')\n biases = tf.get_variable('biases', shape=[64], initializer=tf.constant_initializer(0.0))\n bias = tf.nn.bias_add(conv, biases)\n conv2 = tf.nn.relu(bias, name=scope.name)\n pool2 = tf.nn.max_pool(conv2, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool2')\n\n with tf.variable_scope('conv3') as scope:\n kernel = tf.get_variable('weights', shape=[3, 3, 64, 128], initializer=tf.truncated_normal_initializer(stddev=0.1))\n conv = tf.nn.conv2d(pool2, kernel, [1, 1, 1, 1], padding='SAME')\n biases = tf.get_variable('biases', shape=[128], initializer=tf.constant_initializer(0.0))\n bias = tf.nn.bias_add(conv, biases)\n conv3 = tf.nn.relu(bias, name=scope.name)\n pool3 = tf.nn.max_pool(conv3, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool3')\n\n with tf.variable_scope('conv4') as scope:\n kernel = tf.get_variable('weights', shape=[3, 3, 128, 256], initializer=tf.truncated_normal_initializer(stddev=0.1))\n conv = tf.nn.conv2d(pool3, kernel, [1, 1, 1, 1], padding='SAME')\n biases = tf.get_variable('biases', shape=[256], initializer=tf.constant_initializer(0.0))\n bias = tf.nn.bias_add(conv, biases)\n conv4 = tf.nn.relu(bias, name=scope.name)\n pool4 = tf.nn.max_pool(conv4, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool4')\n\n with tf.variable_scope('fc5') as scope:\n dim = 1\n for d in pool4.get_shape()[1:].as_list():\n dim *= d\n reshape = tf.reshape(pool4, [BATCH_SIZE, dim])\n weights = _variable_with_weight_decay('weights', shape=[dim, 1024], stddev=0.02, wd=0.005)\n biases = tf.get_variable('biases', shape=[1024], initializer=tf.constant_initializer(0.0))\n fc5 = tf.nn.relu(tf.nn.bias_add(tf.matmul(reshape, weights), biases), name=scope.name)\n\n with tf.variable_scope('fc6') as scope:\n weights = _variable_with_weight_decay('weights', shape=[1024, 256], stddev=0.02, wd=0.005)\n biases = tf.get_variable('biases', shape=[256], initializer=tf.constant_initializer(0.0))\n fc6 = tf.nn.relu(tf.nn.bias_add(tf.matmul(fc5, weights), biases), name=scope.name)\n\n with tf.variable_scope('fc7') as scope:\n weights = tf.get_variable('weights', shape=[256, NUM_CLASSES], initializer=tf.truncated_normal_initializer(stddev=0.02))\n biases = tf.get_variable('biases', shape=[NUM_CLASSES], initializer=tf.constant_initializer(0.0))\n fc7 = tf.nn.bias_add(tf.matmul(fc6, weights), biases, name=scope.name)\n\n return fc7" ]
[ [ "tensorflow.constant_initializer", "tensorflow.nn.relu", "tensorflow.nn.conv2d", "tensorflow.matmul", "tensorflow.reshape", "tensorflow.nn.l2_loss", "tensorflow.variable_scope", "tensorflow.truncated_normal_initializer", "tensorflow.nn.bias_add", "tensorflow.nn.max_pool", "tensorflow.add_to_collection", "tensorflow.nn.zero_fraction" ] ]
RileyWClarke/flarubin
[ "eb7b1ee21c828523f8a5374fe4510fe6e5ec2a2a" ]
[ "rubin_sim/maf/metrics/cadenceMetrics.py" ]
[ "import numpy as np\nfrom .baseMetric import BaseMetric\n\n__all__ = ['TemplateExistsMetric', 'UniformityMetric',\n 'RapidRevisitUniformityMetric', 'RapidRevisitMetric','NRevisitsMetric', 'IntraNightGapsMetric',\n 'InterNightGapsMetric', 'VisitGapMetric']\n\n\nclass fSMetric(BaseMetric):\n \"\"\"Calculate the fS value (Nvisit-weighted delta(M5-M5srd)).\n \"\"\"\n def __init__(self, filterCol='filter', metricName='fS', **kwargs):\n self.filterCol = filterCol\n cols = [self.filterCol]\n super().__init__(cols=cols, metricName=metricName, units='fS', **kwargs)\n\n def run(self, dataSlice, slicePoint=None):\n \"\"\"\"Calculate the fS (reserve above/below the m5 values from the LSST throughputs)\n\n Parameters\n ----------\n dataSlice : numpy.array\n Numpy structured array containing the data related to the visits provided by the slicer.\n slicePoint : dict, optional\n Dictionary containing information about the slicepoint currently active in the slicer.\n\n Returns\n -------\n float\n The fS value.\n \"\"\"\n # We could import this from the m5_flat_sed values, but it makes sense to calculate the m5\n # directly from the throughputs. This is easy enough to do and will allow variation of\n # the throughput curves and readnoise and visit length, etc.\n pass\n\n\nclass TemplateExistsMetric(BaseMetric):\n \"\"\"Calculate the fraction of images with a previous template image of desired quality.\n \"\"\"\n def __init__(self, seeingCol='seeingFwhmGeom', observationStartMJDCol='observationStartMJD',\n metricName='TemplateExistsMetric', **kwargs):\n cols = [seeingCol, observationStartMJDCol]\n super(TemplateExistsMetric, self).__init__(col=cols, metricName=metricName,\n units='fraction', **kwargs)\n self.seeingCol = seeingCol\n self.observationStartMJDCol = observationStartMJDCol\n\n def run(self, dataSlice, slicePoint=None):\n \"\"\"\"Calculate the fraction of images with a previous template image of desired quality.\n\n Parameters\n ----------\n dataSlice : numpy.array\n Numpy structured array containing the data related to the visits provided by the slicer.\n slicePoint : dict, optional\n Dictionary containing information about the slicepoint currently active in the slicer.\n\n Returns\n -------\n float\n The fraction of images with a 'good' previous template image.\n \"\"\"\n # Check that data is sorted in observationStartMJD order\n dataSlice.sort(order=self.observationStartMJDCol)\n # Find the minimum seeing up to a given time\n seeing_mins = np.minimum.accumulate(dataSlice[self.seeingCol])\n # Find the difference between the seeing and the minimum seeing at the previous visit\n seeing_diff = dataSlice[self.seeingCol] - np.roll(seeing_mins, 1)\n # First image never has a template; check how many others do\n good = np.where(seeing_diff[1:] >= 0.)[0]\n frac = (good.size) / float(dataSlice[self.seeingCol].size)\n return frac\n\n\nclass UniformityMetric(BaseMetric):\n \"\"\"Calculate how uniformly the observations are spaced in time.\n Returns a value between -1 and 1.\n A value of zero means the observations are perfectly uniform.\n\n Parameters\n ----------\n surveyLength : float, optional\n The overall duration of the survey. Default 10.\n \"\"\"\n def __init__(self, mjdCol='observationStartMJD', units='',\n surveyLength=10., **kwargs):\n \"\"\"surveyLength = time span of survey (years) \"\"\"\n self.mjdCol = mjdCol\n super(UniformityMetric, self).__init__(col=self.mjdCol, units=units, **kwargs)\n self.surveyLength = surveyLength\n\n def run(self, dataSlice, slicePoint=None):\n \"\"\"\"Calculate the survey uniformity.\n\n This is based on how a KS-test works: look at the cumulative distribution of observation dates,\n and compare to a perfectly uniform cumulative distribution.\n Perfectly uniform observations = 0, perfectly non-uniform = 1.\n\n Parameters\n ----------\n dataSlice : numpy.array\n Numpy structured array containing the data related to the visits provided by the slicer.\n slicePoint : dict, optional\n Dictionary containing information about the slicepoint currently active in the slicer.\n\n Returns\n -------\n float\n Uniformity of 'observationStartMJDCol'.\n \"\"\"\n # If only one observation, there is no uniformity\n if dataSlice[self.mjdCol].size == 1:\n return 1\n # Scale dates to lie between 0 and 1, where 0 is the first observation date and 1 is surveyLength\n dates = (dataSlice[self.mjdCol] - dataSlice[self.mjdCol].min()) / \\\n (self.surveyLength * 365.25)\n dates.sort() # Just to be sure\n n_cum = np.arange(1, dates.size + 1) / float(dates.size)\n D_max = np.max(np.abs(n_cum - dates - dates[1]))\n return D_max\n\n\nclass RapidRevisitUniformityMetric(BaseMetric):\n \"\"\"Calculate uniformity of time between consecutive visits on short timescales (for RAV1).\n\n Parameters\n ----------\n mjdCol : str, optional\n The column containing the 'time' value. Default observationStartMJD.\n minNvisits : int, optional\n The minimum number of visits required within the time interval (dTmin to dTmax).\n Default 100.\n dTmin : float, optional\n The minimum dTime to consider (in days). Default 40 seconds.\n dTmax : float, optional\n The maximum dTime to consider (in days). Default 30 minutes.\n \"\"\"\n def __init__(self, mjdCol='observationStartMJD', minNvisits=100,\n dTmin=40.0 / 60.0 / 60.0 / 24.0, dTmax=30.0 / 60.0 / 24.0,\n metricName='RapidRevisitUniformity', **kwargs):\n self.mjdCol = mjdCol\n self.minNvisits = minNvisits\n self.dTmin = dTmin\n self.dTmax = dTmax\n super().__init__(col=self.mjdCol, metricName=metricName, **kwargs)\n # Update minNvisits, as 0 visits will crash algorithm and 1 is nonuniform by definition.\n if self.minNvisits <= 1:\n self.minNvisits = 2\n\n def run(self, dataSlice, slicePoint=None):\n \"\"\"Calculate the uniformity of visits within dTmin to dTmax.\n\n Uses a the same 'uniformity' calculation as the UniformityMetric, based on the KS-test.\n A value of 0 is perfectly uniform; a value of 1 is purely non-uniform.\n\n Parameters\n ----------\n dataSlice : numpy.array\n Numpy structured array containing the data related to the visits provided by the slicer.\n slicePoint : dict, optional\n Dictionary containing information about the slicepoint currently active in the slicer.\n\n Returns\n -------\n float\n The uniformity measurement of the visits within time interval dTmin to dTmax.\n \"\"\"\n # Calculate consecutive visit time intervals\n dtimes = np.diff(np.sort(dataSlice[self.mjdCol]))\n # Identify dtimes within interval from dTmin/dTmax.\n good = np.where((dtimes >= self.dTmin) & (dtimes <= self.dTmax))[0]\n # If there are not enough visits in this time range, return bad value.\n if good.size < self.minNvisits:\n return self.badval\n # Throw out dtimes outside desired range, and sort, then scale to 0-1.\n dtimes = np.sort(dtimes[good])\n dtimes = (dtimes - dtimes.min()) / float(self.dTmax - self.dTmin)\n # Set up a uniform distribution between 0-1 (to match dtimes).\n uniform_dtimes = np.arange(1, dtimes.size + 1, 1) / float(dtimes.size)\n # Look at the differences between our times and the uniform times.\n dmax = np.max(np.abs(uniform_dtimes - dtimes - dtimes[1]))\n return dmax\n\n\nclass RapidRevisitMetric(BaseMetric):\n def __init__(self, mjdCol='observationStartMJD', metricName='RapidRevisit',\n dTmin=40.0 / 60.0 / 60.0 / 24.0, dTpairs = 20.0 / 60.0 / 24.0,\n dTmax = 30.0 / 60.0 / 24.0, minN1 = 28, minN2 = 82, **kwargs):\n self.mjdCol = mjdCol\n self.dTmin = dTmin\n self.dTpairs = dTpairs\n self.dTmax = dTmax\n self.minN1 = minN1\n self.minN2 = minN2\n super().__init__(col=self.mjdCol, metricName=metricName, **kwargs)\n\n def run(self, dataSlice, slicePoint=None):\n dtimes = np.diff(np.sort(dataSlice[self.mjdCol]))\n N1 = len(np.where((dtimes >= self.dTmin) & (dtimes <= self.dTpairs))[0])\n N2 = len(np.where((dtimes >= self.dTmin) & (dtimes <= self.dTmax))[0])\n if (N1 >= self.minN1) and (N2 >= self.minN2):\n val = 1\n else:\n val = 0\n return val\n\n\nclass NRevisitsMetric(BaseMetric):\n \"\"\"Calculate the number of consecutive visits with time differences less than dT.\n\n Parameters\n ----------\n dT : float, optional\n The time interval to consider (in minutes). Default 30.\n normed : bool, optional\n Flag to indicate whether to return the total number of consecutive visits with time\n differences less than dT (False), or the fraction of overall visits (True).\n Note that we would expect (if all visits occur in pairs within dT) this fraction would be 0.5!\n \"\"\"\n def __init__(self, mjdCol='observationStartMJD', dT=30.0, normed=False, metricName=None, **kwargs):\n units = ''\n if metricName is None:\n if normed:\n metricName = 'Fraction of revisits faster than %.1f minutes' % (dT)\n else:\n metricName = 'Number of revisits faster than %.1f minutes' % (dT)\n units = '#'\n self.mjdCol = mjdCol\n self.dT = dT / 60. / 24. # convert to days\n self.normed = normed\n super(NRevisitsMetric, self).__init__(col=self.mjdCol, units=units, metricName=metricName, **kwargs)\n\n def run(self, dataSlice, slicePoint=None):\n \"\"\"Count the number of consecutive visits occuring within time intervals dT.\n\n Parameters\n ----------\n dataSlice : numpy.array\n Numpy structured array containing the data related to the visits provided by the slicer.\n slicePoint : dict, optional\n Dictionary containing information about the slicepoint currently active in the slicer.\n\n Returns\n -------\n float\n Either the total number of consecutive visits within dT or the fraction compared to overall visits.\n \"\"\"\n dtimes = np.diff(np.sort(dataSlice[self.mjdCol]))\n nFastRevisits = np.size(np.where(dtimes <= self.dT)[0])\n if self.normed:\n nFastRevisits = nFastRevisits / float(np.size(dataSlice[self.mjdCol]))\n return nFastRevisits\n\n\nclass IntraNightGapsMetric(BaseMetric):\n \"\"\"\n Calculate the gap between consecutive observations within a night, in hours.\n\n Parameters\n ----------\n reduceFunc : function, optional\n Function that can operate on array-like structures. Typically numpy function.\n Default np.median.\n \"\"\"\n\n def __init__(self, mjdCol='observationStartMJD', nightCol='night', reduceFunc=np.median,\n metricName='Median Intra-Night Gap', **kwargs):\n units = 'hours'\n self.mjdCol = mjdCol\n self.nightCol = nightCol\n self.reduceFunc = reduceFunc\n super(IntraNightGapsMetric, self).__init__(col=[self.mjdCol, self.nightCol],\n units=units, metricName=metricName, **kwargs)\n\n def run(self, dataSlice, slicePoint=None):\n \"\"\"Calculate the (reduceFunc) of the gap between consecutive obervations within a night.\n\n Parameters\n ----------\n dataSlice : numpy.array\n Numpy structured array containing the data related to the visits provided by the slicer.\n slicePoint : dict, optional\n Dictionary containing information about the slicepoint currently active in the slicer.\n\n Returns\n -------\n float\n The (reduceFunc) value of the gap, in hours.\n \"\"\"\n dataSlice.sort(order=self.mjdCol)\n dt = np.diff(dataSlice[self.mjdCol])\n dn = np.diff(dataSlice[self.nightCol])\n\n good = np.where(dn == 0)\n if np.size(good[0]) == 0:\n result = self.badval\n else:\n result = self.reduceFunc(dt[good]) * 24\n return result\n\n\nclass InterNightGapsMetric(BaseMetric):\n \"\"\"\n Calculate the gap between consecutive observations in different nights, in days.\n\n Parameters\n ----------\n reduceFunc : function, optional\n Function that can operate on array-like structures. Typically numpy function.\n Default np.median.\n \"\"\"\n def __init__(self, mjdCol='observationStartMJD', nightCol='night', reduceFunc=np.median,\n metricName='Median Inter-Night Gap', **kwargs):\n units = 'days'\n self.mjdCol = mjdCol\n self.nightCol = nightCol\n self.reduceFunc = reduceFunc\n super(InterNightGapsMetric, self).__init__(col=[self.mjdCol, self.nightCol],\n units=units, metricName=metricName, **kwargs)\n\n def run(self, dataSlice, slicePoint=None):\n \"\"\"Calculate the (reduceFunc) of the gap between consecutive nights of observations.\n Parameters\n ----------\n dataSlice : numpy.array\n Numpy structured array containing the data related to the visits provided by the slicer.\n slicePoint : dict, optional\n Dictionary containing information about the slicepoint currently active in the slicer.\n\n Returns\n -------\n float\n The (reduceFunc) of the gap between consecutive nights of observations, in days.\n \"\"\"\n dataSlice.sort(order=self.mjdCol)\n unights = np.unique(dataSlice[self.nightCol])\n if np.size(unights) < 2:\n result = self.badval\n else:\n # Find the first and last observation of each night\n firstOfNight = np.searchsorted(dataSlice[self.nightCol], unights)\n lastOfNight = np.searchsorted(dataSlice[self.nightCol], unights, side='right') - 1\n diff = dataSlice[self.mjdCol][firstOfNight[1:]] - dataSlice[self.mjdCol][lastOfNight[:-1]]\n result = self.reduceFunc(diff)\n return result\n\n\nclass VisitGapMetric(BaseMetric):\n \"\"\"\n Calculate the gap between any consecutive observations, in hours, regardless of night boundaries.\n\n Parameters\n ----------\n reduceFunc : function, optional\n Function that can operate on array-like structures. Typically numpy function.\n Default np.median.\n \"\"\"\n def __init__(self, mjdCol='observationStartMJD', nightCol='night', reduceFunc=np.median,\n metricName='VisitGap', **kwargs):\n units = 'hours'\n self.mjdCol = mjdCol\n self.nightCol = nightCol\n self.reduceFunc = reduceFunc\n super().__init__(col=[self.mjdCol, self.nightCol],\n units=units, metricName=metricName, **kwargs)\n\n def run(self, dataSlice, slicePoint=None):\n \"\"\"Calculate the (reduceFunc) of the gap between consecutive observations.\n\n Different from inter-night and intra-night gaps, between this is really just counting\n all of the times between consecutive observations (not time between nights or time within a night).\n\n Parameters\n ----------\n dataSlice : numpy.array\n Numpy structured array containing the data related to the visits provided by the slicer.\n slicePoint : dict, optional\n Dictionary containing information about the slicepoint currently active in the slicer.\n\n Returns\n -------\n float\n The (reduceFunc) of the time between consecutive observations, in hours.\n \"\"\"\n dataSlice.sort(order=self.mjdCol)\n diff = np.diff(dataSlice[self.mjdCol])\n result = self.reduceFunc(diff) * 24.\n return result\n" ]
[ [ "numpy.minimum.accumulate", "numpy.roll", "numpy.diff", "numpy.where", "numpy.arange", "numpy.sort", "numpy.abs", "numpy.size", "numpy.searchsorted", "numpy.unique" ] ]
eric-erki/datacube-core
[ "952fabcdda89d58479ef5962ad40ed85f9da7913" ]
[ "datacube_apps/pixeldrill.py" ]
[ "#!/usr/bin/env python\n\"\"\"\nInteractive Pixel Drill for AGDCv2.\n\n\"\"\"\n# pylint: disable=import-error, wrong-import-position\n# Unavoidable with TK class hierarchy.\n# pylint: disable=too-many-ancestors, redefined-builtin\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport argparse\nimport os\nimport sys\nimport warnings\n\nimport matplotlib\nimport numpy as np\nimport pandas as pd\n\nmatplotlib.use('TkAgg')\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as anim\nfrom matplotlib.backends.backend_tkagg import NavigationToolbar2TkAgg, ToolTip\n\nimport six\nfrom six.moves import tkinter_tkfiledialog, tkinter_messagebox\n\nimport datacube\n\ntry:\n import tkinter as tk\n from tkinter import tkk\n from tkinter import font\nexcept ImportError: # Python 2\n import Tkinter as tk\n import ttk\n import tkFont as font\n\n# pylint: disable=invalid-name, too-many-locals, global-variable-undefined, too-many-statements, redefined-outer-name\n# pylint: disable=broad-except\n\nFONT = (\"Helvetica\", 9)\n\n# Set our plot parameters\n\nplt.rcParams.update({\n 'legend.fontsize': 8,\n 'legend.handlelength': 3,\n 'axes.titlesize': 9,\n 'axes.labelsize': 9,\n 'xtick.labelsize': 9,\n 'ytick.labelsize': 9,\n 'font.family': 'sans'})\n\n\nclass Toolbar(NavigationToolbar2TkAgg):\n def __init__(self, canvas, parent):\n self.toolitems = (\n ('Unzoom', 'Reset original view', 'home', 'home'),\n ('Zoom', 'Zoom to rectangle', 'zoom_to_rect', 'zoom'),\n ('Pan', 'Pan axes with left mouse, zoom with right', 'move', 'pan'),\n (None, None, None, None),\n ('Save', 'Save', 'filesave', 'save_movie'),\n (None, None, None, None),\n ('Prev', 'Previous observation', 'back', 'backimg'),\n ('Next', 'Next observation', 'forward', 'fwdimg'),\n (None, None, None, None),\n )\n NavigationToolbar2TkAgg.__init__(self, canvas, parent)\n self._init_toolbar()\n self.configure(background='black')\n\n def _Button(self, text, file, command, extension='.gif'):\n b = tk.Button(master=self, text=text, padx=2, pady=2, command=command,\n relief=tk.FLAT, font=FONT, justify=tk.CENTER)\n b.pack(side=tk.LEFT)\n return b\n\n def _init_toolbar(self):\n xmin, xmax = self.canvas.figure.bbox.intervalx\n height, width = 40, xmax - xmin\n tk.Frame.__init__(self, master=self.window,\n width=int(width), height=int(height),\n borderwidth=2)\n self.update()\n\n for text, tooltip_text, image_file, callback in self.toolitems:\n if text is None:\n pass\n else:\n button = self._Button(text=text, file=image_file,\n command=getattr(self, callback))\n if tooltip_text is not None:\n ToolTip.createToolTip(button, tooltip_text)\n button.configure(background='black', foreground='white')\n\n self.message = tk.StringVar(master=self)\n self._label = tk.Label(master=self, textvariable=self.message,\n font=FONT)\n self._label.pack(side=tk.RIGHT)\n self.pack(side=tk.BOTTOM, fill=tk.X)\n self._label.configure(background='black', foreground='white')\n\n def mouse_move(self, event):\n self._set_cursor(event)\n if event.inaxes and event.inaxes.get_navigate():\n try:\n s = event.inaxes.format_coord(event.xdata, event.ydata)\n self.set_message(s)\n except (ValueError, OverflowError):\n pass\n\n def fwdimg(self, *args):\n fwdimg()\n\n def backimg(self, *args):\n backimg()\n\n def save_movie(self, *args):\n filetypes = self.canvas.get_supported_filetypes().copy()\n default_filetype = self.canvas.get_default_filetype()\n\n default_filetype_name = filetypes[default_filetype]\n del filetypes[default_filetype]\n\n sorted_filetypes = list(six.iteritems(filetypes))\n sorted_filetypes.sort()\n sorted_filetypes.insert(0, (default_filetype, default_filetype_name))\n\n defaultextension = ''\n initialdir = plt.rcParams.get('savefig.directory', '')\n initialdir = os.path.expanduser(initialdir)\n initialfile = 'movie.mp4'\n fname = tkinter_tkfiledialog.asksaveasfilename(\n master=self.window,\n title='Save the stack',\n filetypes=[('MPEG 4', '*.mp4')],\n defaultextension=defaultextension,\n initialdir=initialdir,\n initialfile=initialfile,\n )\n\n if fname == \"\" or fname == ():\n return\n else:\n if initialdir == '':\n plt.rcParams['savefig.directory'] = initialdir\n else:\n plt.rcParams['savefig.directory'] = os.path.dirname(six.text_type(fname))\n try:\n writer = anim.writers['ffmpeg']\n mwriter = writer(fps=1,\n bitrate=0,\n codec='h264',\n # extra_args=['-crf', '23', '-pix_fmt' 'yuv420p'],\n metadata={})\n with mwriter.saving(mainfig, fname, 140):\n print(' '.join(mwriter._args())) # pylint: disable=protected-access\n for i in range(ntime):\n changeimg(i)\n mwriter.grab_frame()\n except Exception as e:\n tkinter_messagebox.showerror(\"Error saving file\", str(e))\n\n\nclass DrillToolbar(NavigationToolbar2TkAgg):\n def __init__(self, canvas, parent):\n self.toolitems = (\n ('CSV', 'Save CSV', 'filesave', 'save_csv'),\n ('FIG', 'Save figure', 'filesave', 'save_figure'),\n )\n NavigationToolbar2TkAgg.__init__(self, canvas, parent)\n self._init_toolbar()\n self.configure(background='black')\n\n def _Button(self, text, file, command, extension='.gif'):\n b = tk.Button(master=self, text=text, padx=2, pady=2, command=command,\n relief=tk.FLAT, font=FONT)\n b.pack(side=tk.LEFT)\n return b\n\n def _init_toolbar(self):\n xmin, xmax = self.canvas.figure.bbox.intervalx\n height, width = 30, xmax - xmin\n tk.Frame.__init__(self, master=self.window,\n width=int(width), height=int(height),\n borderwidth=2)\n self.update()\n\n for text, tooltip_text, image_file, callback in self.toolitems:\n if text is None:\n # spacer, unhandled in Tk\n pass\n else:\n button = self._Button(text=text, file=image_file,\n command=getattr(self, callback))\n if tooltip_text is not None:\n ToolTip.createToolTip(button, tooltip_text)\n button.configure(background='black', foreground='white')\n\n self.message = tk.StringVar(master=self)\n\n def save_csv(self, *args):\n initialdir = plt.rcParams.get('savefig.directory', '')\n initialdir = os.path.expanduser(initialdir)\n fname = tkinter_tkfiledialog.asksaveasfilename(\n master=self.window,\n title='Save the pixel drill',\n filetypes=[('CSV', '*.csv')],\n defaultextension='',\n initialdir=initialdir,\n initialfile='pixeldrill.csv',\n )\n\n if fname == \"\" or fname == ():\n return\n else:\n if initialdir == '':\n plt.rcParams['savefig.directory'] = initialdir\n else:\n plt.rcParams['savefig.directory'] = os.path.dirname(six.text_type(fname))\n try:\n ds = pd.DataFrame(data=ts,\n index=times,\n columns=bands)\n ds.to_csv(fname)\n\n except Exception as e:\n tkinter_messagebox.showerror(\"Error saving file\", str(e))\n\n def save_figure(self, *args):\n filetypes = self.canvas.get_supported_filetypes().copy()\n default_filetype = self.canvas.get_default_filetype()\n\n default_filetype_name = filetypes[default_filetype]\n del filetypes[default_filetype]\n\n sorted_filetypes = list(six.iteritems(filetypes))\n sorted_filetypes.sort()\n sorted_filetypes.insert(0, (default_filetype, default_filetype_name))\n\n initialdir = plt.rcParams.get('savefig.directory', '')\n initialdir = os.path.expanduser(initialdir)\n initialfile = 'pixeldrill.pdf'\n fname = tkinter_tkfiledialog.asksaveasfilename(\n master=self.window,\n title='Save the pixel drill',\n filetypes=[('PNG', '*.png'), ('PDF', '*.pdf')],\n defaultextension='',\n initialdir=initialdir,\n initialfile=initialfile,\n )\n\n if fname == \"\" or fname == ():\n return\n else:\n if initialdir == '':\n plt.rcParams['savefig.directory'] = initialdir\n else:\n plt.rcParams['savefig.directory'] = os.path.dirname(six.text_type(fname))\n try:\n fig = plt.figure(figsize=(6, 4.5))\n\n ax3 = fig.add_subplot(211, xmargin=0, ymargin=0)\n ax3.set_xticks(range(nband))\n ax3.set_xticklabels(bands)\n ax3.set_title('Spectral profiles through time')\n ax3.set_xlim((-0.2, nband - 0.8))\n ax3.set_ylim((0, np.nanmax(data)))\n ax3.xaxis.grid(color='black', linestyle='dotted')\n\n box = ax3.get_position()\n ax3.set_position([box.x0, box.y0 + box.height * 0.1,\n box.width, box.height * 0.8])\n\n tindex = range(1, len(times) + 1)\n\n ax4 = fig.add_subplot(212, xmargin=0, ymargin=0)\n ax4.set_title('Band time series')\n\n ax4.set_xticks(tindex)\n ax4.set_xlim(0.9, tindex[-1] + 0.1)\n ax4.set_ylim((0, np.nanmax(data)))\n\n for i, p in enumerate(ts.T):\n ax3.plot(range(nband), p, c='k')\n\n for i in range(ts.shape[0]):\n tt = ts[i, :]\n ax4.plot(tindex, tt, lw=1,\n marker='.', linestyle='-', color=colors[i],\n label=bands[i])\n\n ax4.legend(loc='upper center', bbox_to_anchor=(0.5, -0.2),\n labelspacing=0.8, handletextpad=0, handlelength=2,\n borderaxespad=0, ncol=nband, columnspacing=0.5)\n\n fig.savefig(fname, bbox_inches='tight')\n\n # plt.close(fig)\n\n except Exception as e:\n tkinter_messagebox.showerror(\"Error saving file\", str(e))\n\n\nclass Formatter(object):\n def __init__(self, vi, names, data):\n self.vi = vi\n self.names = names\n self.data = data\n\n def __call__(self, x, y):\n xi, yi = int(round(x, 0)), int(round(y, 0))\n values = ' '.join(['{}:{}'.format(n, d) for n, d in\n zip(self.names, self.data[yi, xi, :, vi])])\n return 'x:{} y:{}\\n{}'.format(xi, yi, values)\n\n\ndef dcmap(length, base_cmap=None):\n \"\"\"Create an length-bin discrete colormap from the specified input map.\"\"\"\n base = plt.cm.get_cmap(base_cmap)\n color_list = base(np.linspace(0, 1, length))\n cmap_name = base.name + str(length)\n return base.from_list(cmap_name, color_list, length)\n\n\ndef sizefmt(num, suffix='B'):\n for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']:\n if abs(num) < 1024.0:\n return \"%3.1f%s%s\" % (num, unit, suffix)\n num /= 1024.0\n return \"%.1f%s%s\" % (num, 'Yi', suffix)\n\n\ndef setfg(ax, color):\n \"\"\"Set the color of the frame, major ticks, tick labels, axis labels,\n title and legend.\"\"\"\n for tl in ax.get_xticklines() + ax.get_yticklines():\n tl.set_color(color)\n for spine in ax.spines:\n ax.spines[spine].set_edgecolor(color)\n for tick in ax.xaxis.get_major_ticks():\n tick.label1.set_color(color)\n for tick in ax.yaxis.get_major_ticks():\n tick.label1.set_color(color)\n ax.axes.xaxis.label.set_color(color)\n ax.axes.yaxis.label.set_color(color)\n ax.axes.xaxis.get_offset_text().set_color(color)\n ax.axes.yaxis.get_offset_text().set_color(color)\n ax.axes.title.set_color(color)\n lh = ax.get_legend()\n if lh is not None:\n lh.get_title().set_color(color)\n lh.legendPatch.set_edgecolor('none')\n labels = lh.get_texts()\n for lab in labels:\n lab.set_color(color)\n for tl in ax.get_xticklabels():\n tl.set_color(color)\n for tl in ax.get_yticklabels():\n tl.set_color(color)\n\n\ndef setbg(ax, color):\n \"\"\"Set the background color of the current axes (and legend).\"\"\"\n ax.patch.set_facecolor(color)\n lh = ax.get_legend()\n if lh is not None:\n lh.legendPatch.set_facecolor(color)\n\n\ndef drill(x=0, y=0):\n \"\"\"Do the pixel drill.\"\"\"\n\n # Get slice\n\n global ts\n ts = data[y, x, :, :]\n\n # Plot spectral profile\n\n ax1.lines = []\n for i, p in enumerate(ts.T):\n ax1.plot(range(nband), p, c='w')\n # ax1.set_ylim((0, np.nanmax(ts)*1.2))\n\n # Plot time series\n\n ax2.lines = []\n for i in range(ts.shape[0]):\n tt = ts[i, :]\n ax2.plot(tindex, tt, lw=1,\n marker='.', linestyle='-', color=colors[i],\n label=bands[i])\n # ax2.set_xlim(-0.1, tindex[-1] + 0.1)\n # ax2.set_xticks(tindex)\n\n ax2.legend(loc='upper center', bbox_to_anchor=(0.5, -0.2),\n labelspacing=0.8, handletextpad=0, handlelength=2,\n borderaxespad=0, ncol=nband, columnspacing=0.5)\n\n setfg(ax2, 'white')\n setbg(ax2, 'black')\n\n # Update figure\n\n drillfig.canvas.set_window_title('Pixel drill @ ({},{})'.format(x, y))\n drillfig.canvas.draw()\n\n\ndef changeimg(i):\n \"\"\"Change image shown.\"\"\"\n global vi\n\n if vi == i:\n return\n\n # Scale and fix image\n img = data[:, :, vbnds, i].copy()\n mask = (img > maxvalue).any(axis=2)\n img = img / maxvalue\n img[mask] = 1.0\n mask = np.isnan(img).any(axis=2)\n img[mask] = 0.0\n\n # Draw it\n mainimg.set_data(img)\n mainfig.canvas.set_window_title('[{}/{}] {}. Data mem usage: {}'.format(i + 1, ntime, times[i], memusage))\n mainfig.canvas.draw()\n\n vi = i\n\n\ndef onclick(event):\n \"\"\"Handle a click event on the main image.\"\"\"\n global lastclick\n try:\n x = int(round(event.xdata))\n y = int(round(event.ydata))\n b = int(event.button)\n if b in [2, 3]:\n lastclick = (x, y)\n drill(x, y)\n except TypeError:\n pass\n\n\ndef onclickpd(event):\n \"\"\"Handle a click event in the pixel drill.\"\"\"\n global vi\n vi = int(round(event.xdata))\n changeimg(vi)\n\n\ndef onpress(event):\n \"\"\"Handle a keyboard event.\"\"\"\n\n if event.key == 'right':\n fwdimg()\n return\n\n if event.key == 'left':\n backimg()\n return\n\n\ndef fwdimg():\n \"\"\"Show next observation.\"\"\"\n i = min(vi + 1, data.shape[3] - 1)\n changeimg(i)\n\n\ndef backimg():\n \"\"\"Show previous observation.\"\"\"\n i = max(0, vi - 1)\n changeimg(i)\n\n\ndef run(latrange=None, lonrange=None, timerange=None, measurements=None,\n valuemax=None, product=None, groupby=None, verbose=False):\n \"\"\"Do all the work.\"\"\"\n\n # Keep track of some variables globally instead of wrapping\n # everything in a big object\n\n global vi\n global lastclick\n global data\n global ax, ax1, ax2\n global nband, tindex, colors, bands, vbnds, ntime, times\n global drillfig, mainfig, mainimg\n global maxvalue, memusage\n\n # Try to get data\n\n try:\n print('loading data from the datacube...', end='')\n\n # Query the data\n\n dc = datacube.Datacube()\n dcdata = dc.load(product=product,\n measurements=measurements,\n time=timerange,\n latitude=latrange,\n longitude=lonrange,\n group_by=groupby)\n\n # Check that we have data returned\n\n if dcdata.data_vars == {}:\n print('loading data failed, no data in that range.')\n sys.exit(1)\n\n # Extract times and band information\n dcdata = dcdata.to_array(dim='band')\n\n times = dcdata.coords['time'].to_index().tolist()\n bands = dcdata.coords['band'].to_index().tolist()\n bcols = {b: i for i, b in enumerate(bands)}\n\n nband = len(bands)\n ntime = len(times)\n\n # Work out what to show for images\n\n visible = ['red', 'green', 'blue']\n if all([b in bands for b in visible]):\n vbnds = [bcols[b] for b in visible]\n elif len(bands) >= 3:\n vbnds = [bcols[b] for b in bands[:3]]\n else:\n vbnds = [0, 0, 0]\n\n print('done')\n\n except LookupError:\n print('failed')\n\n # Display a list of valid products\n\n if product is None:\n print('valid products are:')\n prods = dc.list_products()[['name', 'description']]\n print(prods.to_string(index=False,\n justify='left',\n header=False,\n formatters={'description': lambda s: '(' + s + ')'}))\n sys.exit(1)\n\n except Exception:\n print('failed')\n sys.exit(2)\n\n # Nasty but it has to be done\n\n data = dcdata.transpose('y', 'x', 'band', 'time').data.astype(np.float32)\n data[data == -999] = np.nan\n\n # Set variables\n\n vi = 0\n lastclick = (0, 0)\n memusage = sizefmt(data.nbytes)\n maxvalue = valuemax\n\n # Setup the main figure\n\n mainfig = plt.figure(figsize=(6, 6))\n mainfig.canvas.set_window_title('[{}/{}] {}. Data mem usage: {}'.format(1, ntime, times[0], memusage))\n mainfig.patch.set_facecolor('black')\n\n ax = plt.Axes(mainfig, [0., 0., 1., 1.])\n ax.format_coord = Formatter(vi, bands, data)\n ax.set_axis_off()\n ax.invert_yaxis()\n mainfig.add_axes(ax)\n\n # Surgery on the toolbar\n\n canvas = mainfig.canvas\n canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=1)\n\n window = mainfig.canvas.toolbar.window\n mainfig.canvas.toolbar.pack_forget()\n mainfig.canvas.toolbar = Toolbar(mainfig.canvas, window)\n mainfig.canvas.toolbar.update()\n mainfig.canvas.toolbar.pack(side=tk.BOTTOM, fill=tk.BOTH, expand=0)\n canvas.show()\n\n # Scale and fix visible image\n\n img = data[:, :, vbnds, 0].copy()\n mask = (img > maxvalue).any(axis=2)\n img = img / maxvalue\n img[mask] = 1.0\n mask = np.isnan(img).any(axis=2)\n img[mask] = 0.0\n\n # Show the image\n\n mainimg = plt.imshow(img, interpolation='nearest', origin='upper', aspect='auto', vmin=0, vmax=1)\n\n # Setup the drill figure\n\n drillfig = plt.figure(figsize=(4, 3))\n drillfig.patch.set_facecolor('black')\n drillfig.canvas.toolbar.pack_forget()\n\n # Surgery on the toolbar\n\n canvas = drillfig.canvas\n window = drillfig.canvas.toolbar.window\n drillfig.canvas.toolbar.pack_forget()\n drillfig.canvas.toolbar = DrillToolbar(drillfig.canvas, window)\n drillfig.canvas.toolbar.update()\n drillfig.canvas.toolbar.pack(side=tk.BOTTOM, fill=tk.BOTH, expand=0)\n canvas.show()\n\n # Spectral profile graph\n\n ax1 = drillfig.add_subplot(211, xmargin=0)\n ax1.set_xticks(range(nband))\n ax1.set_xticklabels(bands)\n ax1.set_title('Spectral profiles through time')\n ax1.set_xlim((-0.2, nband - 0.8))\n ax1.set_ylim((0, np.nanmax(data)))\n ax1.xaxis.grid(color='white', linestyle='dotted')\n\n setfg(ax1, 'white')\n setbg(ax1, 'black')\n\n box = ax1.get_position()\n ax1.set_position([box.x0, box.y0 + box.height * 0.2,\n box.width, box.height * 0.8])\n\n # Time series graph\n\n tindex = range(1, len(times) + 1)\n\n ax2 = drillfig.add_subplot(212, xmargin=0)\n ax2.set_title('Band time series')\n\n ax2.set_xticks(tindex)\n # ax2.set_xticklabels(times)\n ax2.set_xlim(0.9, tindex[-1] + 0.1)\n ax2.set_ylim((0, np.nanmax(data)))\n\n setfg(ax2, 'white')\n setbg(ax2, 'black')\n\n box = ax2.get_position()\n ax2.set_position([box.x0, box.y0 + box.height * 0.2,\n box.width, box.height * 0.8])\n\n # Work out colors for bands in time series\n\n colors = [m[0] for m in bands if m[0] in ['r', 'g', 'b']]\n ntoadd = max(0, len(bands) - len(colors))\n cmap = dcmap(ntoadd, 'spring')\n colors = colors + [cmap(i) for i in range(ntoadd)]\n\n drill(*lastclick)\n\n # Hook up the event handlers\n\n mainfig.canvas.mpl_connect('button_press_event', onclick)\n mainfig.canvas.mpl_connect('key_press_event', onpress)\n mainfig.canvas.mpl_connect('close_event', lambda x: plt.close())\n\n drillfig.canvas.mpl_connect('close_event', lambda x: plt.close())\n drillfig.canvas.mpl_connect('button_press_event', onclickpd)\n\n # Show it\n\n plt.show()\n\n\ndef main():\n parser = argparse.ArgumentParser()\n\n parser.add_argument('-latrange',\n help='latitude range',\n nargs=2,\n default=[-34.5, -35],\n required=False)\n\n parser.add_argument('-lonrange',\n help='longitude range',\n nargs=2,\n default=[148.5, 149],\n required=False)\n\n parser.add_argument('-timerange',\n help='time range',\n nargs=2,\n default=['2011-3-2', '2011-6-5'],\n type=str,\n required=False)\n\n parser.add_argument('-measurements',\n help='measurement',\n action='append',\n type=str,\n required=False)\n\n parser.add_argument('-product',\n help='product',\n required=False)\n\n parser.add_argument('-groupby',\n help='groupby',\n required=False)\n\n parser.add_argument('-valuemax',\n help='max value',\n type=float,\n default=4000,\n required=False)\n\n parser.add_argument('-verbose',\n help='verbose output',\n default=True,\n required=False)\n\n args = parser.parse_args()\n kwargs = vars(args)\n\n if not args.product:\n parser.print_help()\n print('\\n\\nValid choices for PRODUCT are:')\n dc = datacube.Datacube()\n prods = dc.list_products()['name']\n print(prods.to_string(index=False, header=False))\n parser.exit()\n\n if args.verbose:\n print(kwargs)\n\n run(**kwargs)\n\n\nif __name__ == '__main__':\n try:\n with warnings.catch_warnings():\n warnings.filterwarnings('ignore', r'All-NaN (slice|axis) encountered')\n main()\n except KeyboardInterrupt:\n pass\n" ]
[ [ "matplotlib.use", "matplotlib.pyplot.rcParams.update", "matplotlib.pyplot.rcParams.get", "numpy.isnan", "numpy.nanmax", "pandas.DataFrame", "matplotlib.pyplot.close", "matplotlib.pyplot.figure", "matplotlib.backends.backend_tkagg.NavigationToolbar2TkAgg.__init__", "matplotlib.pyplot.Axes", "matplotlib.backends.backend_tkagg.ToolTip.createToolTip", "matplotlib.pyplot.show", "numpy.linspace", "matplotlib.pyplot.cm.get_cmap", "matplotlib.pyplot.imshow" ] ]
LaudateCorpus1/ml-equivariant-neural-rendering
[ "44e5bfc6ebf479ea2b7bf25dddf94e05637e4fc8" ]
[ "misc/quantitative_evaluation.py" ]
[ "import torch\nimport torch.nn.functional as F\nfrom misc.dataloaders import create_batch_from_data_list\n\n\ndef get_dataset_psnr(device, model, dataset, source_img_idx_shift=64,\n batch_size=10, max_num_scenes=None):\n \"\"\"Returns PSNR for each scene in a dataset by comparing the view predicted\n by a model and the ground truth view.\n\n Args:\n device (torch.device): Device to perform PSNR calculation on.\n model (models.neural_renderer.NeuralRenderer): Model to evaluate.\n dataset (misc.dataloaders.SceneRenderDataset): Dataset to evaluate model\n performance on. Should be one of \"chairs-test\" or \"cars-test\".\n source_img_idx_shift (int): Index of source image for each scene. For\n example if 00064.png is the source view, then\n source_img_idx_shift = 64.\n batch_size (int): Batch size to use when generating predictions. This\n should be a divisor of the number of images per scene.\n max_num_scenes (None or int): Optionally limit the maximum number of\n scenes to calculate PSNR for.\n\n Notes:\n This function should be used with the ShapeNet chairs and cars *test*\n sets.\n \"\"\"\n num_imgs_per_scene = dataset.num_imgs_per_scene\n # Set number of scenes to calculate\n num_scenes = dataset.num_scenes\n if max_num_scenes is not None:\n num_scenes = min(max_num_scenes, num_scenes)\n # Calculate number of batches per scene\n assert (num_imgs_per_scene - 1) % batch_size == 0, \"Batch size {} must divide number of images per scene {}.\"\n # Comparison are made against all images except the source image (and\n # therefore subtract 1 from total number of images) \n batches_per_scene = (num_imgs_per_scene - 1) // batch_size\n # Initialize psnr values\n psnrs = []\n for i in range(num_scenes):\n # Extract source view\n source_img_idx = i * num_imgs_per_scene + source_img_idx_shift\n img_source = dataset[source_img_idx][\"img\"].unsqueeze(0).repeat(batch_size, 1, 1, 1).to(device)\n render_params = dataset[source_img_idx][\"render_params\"]\n azimuth_source = torch.Tensor([render_params[\"azimuth\"]]).repeat(batch_size).to(device)\n elevation_source = torch.Tensor([render_params[\"elevation\"]]).repeat(batch_size).to(device)\n # Infer source scene\n scenes = model.inverse_render(img_source)\n\n # Iterate over all other views of scene\n num_points_in_batch = 0\n data_list = []\n scene_psnr = 0.\n for j in range(num_imgs_per_scene):\n if j == source_img_idx_shift:\n continue # Do not compare against same image\n # Add new image to list of images we want to compare to\n data_list.append(dataset[i * num_imgs_per_scene + j])\n num_points_in_batch += 1\n # If we have filled up a batch, make psnr calculation\n if num_points_in_batch == batch_size:\n # Create batch for target data\n img_target, azimuth_target, elevation_target = create_batch_from_data_list(data_list)\n img_target = img_target.to(device)\n azimuth_target = azimuth_target.to(device)\n elevation_target = elevation_target.to(device)\n # Rotate scene and render image\n rotated = model.rotate_source_to_target(scenes, azimuth_source,\n elevation_source, azimuth_target,\n elevation_target)\n img_predicted = model.render(rotated).detach()\n scene_psnr += get_psnr(img_predicted, img_target)\n data_list = []\n num_points_in_batch = 0\n\n psnrs.append(scene_psnr / batches_per_scene)\n\n print(\"{}/{}: Current - {:.3f}, Mean - {:.4f}\".format(i + 1,\n num_scenes,\n psnrs[-1],\n torch.mean(torch.Tensor(psnrs))))\n\n return psnrs\n\n\ndef get_psnr(prediction, target):\n \"\"\"Returns PSNR between a batch of predictions and a batch of targets.\n\n Args:\n prediction (torch.Tensor): Shape (batch_size, channels, height, width).\n target (torch.Tensor): Shape (batch_size, channels, height, width).\n \"\"\"\n batch_size = prediction.shape[0]\n mse_per_pixel = F.mse_loss(prediction, target, reduction='none')\n mse_per_img = mse_per_pixel.view(batch_size, -1).mean(dim=1)\n psnr = 10 * torch.log10(1 / mse_per_img)\n return torch.mean(psnr).item()\n" ]
[ [ "torch.nn.functional.mse_loss", "torch.Tensor", "torch.mean", "torch.log10" ] ]
LorenzoM1997/Multiclass-classifier
[ "06dd0d855282cc22389073cdad5968f7e3ac8a5d" ]
[ "TA classifier tensorflow.py" ]
[ "#personal best: 53.6424% accurancy\r\n\r\nimport numpy as np\r\nimport tensorflow as tf\r\n\r\ndef get_data():\r\n \r\n X = np.array([[0,0,0]])\r\n y = np.array([[0,0,0]])\r\n \r\n # to work properly, you must have the file tae_datatset.txt in the same folder\r\n input_file = open(\"tae_dataset.txt\",\"r\")\r\n\r\n # there are 4 parameters in the dataset\r\n # NATIVE SPEAKER: 1 = English Speaker, 2 = non-English speaker\r\n # SUMMER: 1 = Summer, 2= Regular\r\n # CLASS SIZE: (numerical)\r\n # TA evaluation: 1 = Low, 2 = Medium, 3 = High\r\n for line in input_file:\r\n native_speaker = int(line.split(',')[0])-1\r\n summer = int(line.split(',')[1])-1\r\n class_size = 1/(float(line.split(',')[2]))\r\n pre_eval = int(line.split(',')[3])\r\n if pre_eval == 1: evaluation = [1,0,0]\r\n elif pre_eval == 2: evaluation = [0,1,0]\r\n else: evaluation = [0,0,1]\r\n X = np.append(X,[[native_speaker,summer,class_size]],axis=0)\r\n y = np.append(y,[evaluation],axis=0)\r\n\r\n X = np.delete(X, 0, 0)\r\n y = np.delete(y, 0, 0)\r\n\r\n input_file.close()\r\n\r\n return X,y\r\n\r\n# all variables that define the structure of the network \r\nx = tf.placeholder(tf.float32, [None, 3])\r\nW1 = tf.Variable(tf.random_uniform([3, 5]))\r\nW2 = tf.Variable(tf.random_uniform([5, 3]))\r\nb1 = tf.Variable(tf.zeros([5]))\r\nb2 = tf.Variable(tf.zeros([3]))\r\n\r\n# for the single hidden layer we are using I choose the sigmoid activation function\r\nh1 = tf.sigmoid(tf.matmul(x, W1) + b1)\r\np = tf.matmul(h1, W2) + b2\r\n\r\n#the expected output matrix\r\ny = tf.placeholder(tf.float32, [None, 3])\r\n\r\n#the learning rate of the Gradient Descent Optimizer\r\nlr = 0.5\r\n\r\n# we are going to use the cross entropy function to measure the loss of the neural network\r\ncross_entropy = tf.reduce_mean(\r\n tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=p))\r\ntrain_step = tf.train.GradientDescentOptimizer(lr).minimize(cross_entropy)\r\n\r\nsess = tf.InteractiveSession()\r\ntf.global_variables_initializer().run()\r\n\r\n# Train\r\n# we get the batch of data from the dataset file. In this case we use all the inputs together\r\nbatch_xs, batch_ys = get_data()\r\nfor i in range(50000):\r\n sess.run(train_step, feed_dict={x: batch_xs, y: batch_ys})\r\n if i%5000 == 0:\r\n # Test trained model\r\n print(\"loss = \", sess.run(cross_entropy, feed_dict={x: batch_xs,\r\n y: batch_ys}))\r\n correct_prediction = tf.equal(tf.argmax(p, 1), tf.argmax(y, 1))\r\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\r\n print(\"accurancy = \", sess.run(accuracy, feed_dict={x: batch_xs,\r\n y: batch_ys}))\r\n\r\n" ]
[ [ "numpy.array", "numpy.delete", "tensorflow.zeros", "tensorflow.nn.softmax_cross_entropy_with_logits", "tensorflow.argmax", "tensorflow.random_uniform", "tensorflow.matmul", "tensorflow.placeholder", "numpy.append", "tensorflow.train.GradientDescentOptimizer", "tensorflow.global_variables_initializer", "tensorflow.cast", "tensorflow.InteractiveSession" ] ]
rhjmoore/dask
[ "8943efd28a5106e4617a706062c2a3b05b0355e7" ]
[ "dask/dataframe/groupby.py" ]
[ "import collections\nimport itertools as it\nimport operator\nimport warnings\n\nimport numpy as np\nimport pandas as pd\n\nfrom ..base import tokenize\nfrom ..highlevelgraph import HighLevelGraph\nfrom ..utils import M, derived_from, funcname, itemgetter\nfrom .core import (\n DataFrame,\n Series,\n _extract_meta,\n aca,\n map_partitions,\n new_dd_object,\n no_default,\n split_out_on_index,\n)\nfrom .methods import concat, drop_columns\nfrom .shuffle import shuffle\nfrom .utils import (\n PANDAS_GT_110,\n insert_meta_param_description,\n is_dataframe_like,\n is_series_like,\n make_meta,\n raise_on_meta_error,\n)\n\n# #############################################\n#\n# GroupBy implementation notes\n#\n# Dask groupby supports reductions, i.e., mean, sum and alike, and apply. The\n# former do not shuffle the data and are efficiently implemented as tree\n# reductions. The latter is implemented by shuffling the underlying partiitons\n# such that all items of a group can be found in the same parititon.\n#\n# The argument to ``.groupby``, the index, can be a ``str``, ``dd.DataFrame``,\n# ``dd.Series``, or a list thereof. In operations on the grouped object, the\n# divisions of the the grouped object and the items of index have to align.\n# Currently, there is no support to shuffle the index values as part of the\n# groupby operation. Therefore, the alignment has to be guaranteed by the\n# caller.\n#\n# To operate on matching partitions, most groupby operations exploit the\n# corresponding support in ``apply_concat_apply``. Specifically, this function\n# operates on matching partitions of frame-like objects passed as varargs.\n#\n# After the initial chunk step, the passed index is implicitly passed along to\n# subsequent operations as the index of the partitions. Groupby operations on\n# the individual partitions can then access the index via the ``levels``\n# parameter of the ``groupby`` function. The correct argument is determined by\n# the ``_determine_levels`` function.\n#\n# To minimize overhead, series in an index that were obtained by getitem on the\n# object to group are not passed as series to the various operations, but as\n# columnn keys. This transformation is implemented as ``_normalize_index``.\n#\n# #############################################\n\n\ndef _determine_levels(index):\n \"\"\"Determine the correct levels argument to groupby.\"\"\"\n if isinstance(index, (tuple, list)) and len(index) > 1:\n return list(range(len(index)))\n else:\n return 0\n\n\ndef _normalize_index(df, index):\n \"\"\"Replace series with column names in an index wherever possible.\"\"\"\n if not isinstance(df, DataFrame):\n return index\n\n elif isinstance(index, list):\n return [_normalize_index(df, col) for col in index]\n\n elif (\n is_series_like(index)\n and index.name in df.columns\n and index._name == df[index.name]._name\n ):\n return index.name\n\n elif (\n isinstance(index, DataFrame)\n and set(index.columns).issubset(df.columns)\n and index._name == df[index.columns]._name\n ):\n return list(index.columns)\n\n else:\n return index\n\n\ndef _maybe_slice(grouped, columns):\n \"\"\"\n Slice columns if grouped is pd.DataFrameGroupBy\n \"\"\"\n # FIXME: update with better groupby object detection (i.e.: ngroups, get_group)\n if \"groupby\" in type(grouped).__name__.lower():\n if columns is not None:\n if isinstance(columns, (tuple, list, set, pd.Index)):\n columns = list(columns)\n return grouped[columns]\n return grouped\n\n\ndef _is_aligned(df, by):\n \"\"\"Check if `df` and `by` have aligned indices\"\"\"\n if is_series_like(by) or is_dataframe_like(by):\n return df.index.equals(by.index)\n elif isinstance(by, (list, tuple)):\n return all(_is_aligned(df, i) for i in by)\n else:\n return True\n\n\ndef _groupby_raise_unaligned(df, **kwargs):\n \"\"\"Groupby, but raise if df and `by` key are unaligned.\n\n Pandas supports grouping by a column that doesn't align with the input\n frame/series/index. However, the reindexing does not seem to be\n threadsafe, and can result in incorrect results. Since grouping by an\n unaligned key is generally a bad idea, we just error loudly in dask.\n\n For more information see pandas GH issue #15244 and Dask GH issue #1876.\"\"\"\n by = kwargs.get(\"by\", None)\n if by is not None and not _is_aligned(df, by):\n msg = (\n \"Grouping by an unaligned index is unsafe and unsupported.\\n\"\n \"This can be caused by filtering only one of the object or\\n\"\n \"grouping key. For example, the following works in pandas,\\n\"\n \"but not in dask:\\n\"\n \"\\n\"\n \"df[df.foo < 0].groupby(df.bar)\\n\"\n \"\\n\"\n \"This can be avoided by either filtering beforehand, or\\n\"\n \"passing in the name of the column instead:\\n\"\n \"\\n\"\n \"df2 = df[df.foo < 0]\\n\"\n \"df2.groupby(df2.bar)\\n\"\n \"# or\\n\"\n \"df[df.foo < 0].groupby('bar')\\n\"\n \"\\n\"\n \"For more information see dask GH issue #1876.\"\n )\n raise ValueError(msg)\n elif by is not None and len(by):\n # since we're coming through apply, `by` will be a tuple.\n # Pandas treats tuples as a single key, and lists as multiple keys\n # We want multiple keys\n if isinstance(by, str):\n by = [by]\n kwargs.update(by=list(by))\n return df.groupby(**kwargs)\n\n\ndef _groupby_slice_apply(\n df, grouper, key, func, *args, group_keys=True, dropna=None, observed=None, **kwargs\n):\n # No need to use raise if unaligned here - this is only called after\n # shuffling, which makes everything aligned already\n dropna = {\"dropna\": dropna} if dropna is not None else {}\n observed = {\"observed\": observed} if observed is not None else {}\n g = df.groupby(grouper, group_keys=group_keys, **observed, **dropna)\n if key:\n g = g[key]\n return g.apply(func, *args, **kwargs)\n\n\ndef _groupby_slice_transform(\n df, grouper, key, func, *args, group_keys=True, dropna=None, observed=None, **kwargs\n):\n # No need to use raise if unaligned here - this is only called after\n # shuffling, which makes everything aligned already\n dropna = {\"dropna\": dropna} if dropna is not None else {}\n observed = {\"observed\": observed} if observed is not None else {}\n g = df.groupby(grouper, group_keys=group_keys, **observed, **dropna)\n if key:\n g = g[key]\n\n # Cannot call transform on an empty dataframe\n if len(df) == 0:\n return g.apply(func, *args, **kwargs)\n\n return g.transform(func, *args, **kwargs)\n\n\ndef _groupby_get_group(df, by_key, get_key, columns):\n # SeriesGroupBy may pass df which includes group key\n grouped = _groupby_raise_unaligned(df, by=by_key)\n\n if get_key in grouped.groups:\n if is_dataframe_like(df):\n grouped = grouped[columns]\n return grouped.get_group(get_key)\n\n else:\n # to create empty DataFrame/Series, which has the same\n # dtype as the original\n if is_dataframe_like(df):\n # may be SeriesGroupBy\n df = df[columns]\n return df.iloc[0:0]\n\n\n###############################################################\n# Aggregation\n###############################################################\n\n\nclass Aggregation:\n \"\"\"User defined groupby-aggregation.\n\n This class allows users to define their own custom aggregation in terms of\n operations on Pandas dataframes in a map-reduce style. You need to specify\n what operation to do on each chunk of data, how to combine those chunks of\n data together, and then how to finalize the result.\n\n See :ref:`dataframe.groupby.aggregate` for more.\n\n Parameters\n ----------\n name : str\n the name of the aggregation. It should be unique, since intermediate\n result will be identified by this name.\n chunk : callable\n a function that will be called with the grouped column of each\n partition. It can either return a single series or a tuple of series.\n The index has to be equal to the groups.\n agg : callable\n a function that will be called to aggregate the results of each chunk.\n Again the argument(s) will be grouped series. If ``chunk`` returned a\n tuple, ``agg`` will be called with all of them as individual positional\n arguments.\n finalize : callable\n an optional finalizer that will be called with the results from the\n aggregation.\n\n Examples\n --------\n We could implement ``sum`` as follows:\n\n >>> custom_sum = dd.Aggregation(\n ... name='custom_sum',\n ... chunk=lambda s: s.sum(),\n ... agg=lambda s0: s0.sum()\n ... ) # doctest: +SKIP\n >>> df.groupby('g').agg(custom_sum) # doctest: +SKIP\n\n We can implement ``mean`` as follows:\n\n >>> custom_mean = dd.Aggregation(\n ... name='custom_mean',\n ... chunk=lambda s: (s.count(), s.sum()),\n ... agg=lambda count, sum: (count.sum(), sum.sum()),\n ... finalize=lambda count, sum: sum / count,\n ... ) # doctest: +SKIP\n >>> df.groupby('g').agg(custom_mean) # doctest: +SKIP\n\n Though of course, both of these are built-in and so you don't need to\n implement them yourself.\n \"\"\"\n\n def __init__(self, name, chunk, agg, finalize=None):\n self.chunk = chunk\n self.agg = agg\n self.finalize = finalize\n self.__name__ = name\n\n\ndef _groupby_aggregate(\n df, aggfunc=None, levels=None, dropna=None, sort=False, observed=None, **kwargs\n):\n dropna = {\"dropna\": dropna} if dropna is not None else {}\n observed = {\"observed\": observed} if observed is not None else {}\n\n grouped = df.groupby(level=levels, sort=sort, **observed, **dropna)\n return aggfunc(grouped, **kwargs)\n\n\ndef _apply_chunk(df, *index, dropna=None, observed=None, **kwargs):\n func = kwargs.pop(\"chunk\")\n columns = kwargs.pop(\"columns\")\n dropna = {\"dropna\": dropna} if dropna is not None else {}\n observed = {\"observed\": observed} if observed is not None else {}\n\n g = _groupby_raise_unaligned(df, by=index, **observed, **dropna)\n if is_series_like(df) or columns is None:\n return func(g, **kwargs)\n else:\n if isinstance(columns, (tuple, list, set, pd.Index)):\n columns = list(columns)\n return func(g[columns], **kwargs)\n\n\ndef _var_chunk(df, *index):\n if is_series_like(df):\n df = df.to_frame()\n\n df = df.copy()\n\n g = _groupby_raise_unaligned(df, by=index)\n x = g.sum()\n\n n = g[x.columns].count().rename(columns=lambda c: (c, \"-count\"))\n\n cols = x.columns\n df[cols] = df[cols] ** 2\n\n g2 = _groupby_raise_unaligned(df, by=index)\n x2 = g2.sum().rename(columns=lambda c: (c, \"-x2\"))\n\n return concat([x, x2, n], axis=1)\n\n\ndef _var_combine(g, levels, sort=False):\n return g.groupby(level=levels, sort=sort).sum()\n\n\ndef _var_agg(g, levels, ddof, sort=False):\n g = g.groupby(level=levels, sort=sort).sum()\n nc = len(g.columns)\n x = g[g.columns[: nc // 3]]\n # chunks columns are tuples (value, name), so we just keep the value part\n x2 = g[g.columns[nc // 3 : 2 * nc // 3]].rename(columns=lambda c: c[0])\n n = g[g.columns[-nc // 3 :]].rename(columns=lambda c: c[0])\n\n # TODO: replace with _finalize_var?\n result = x2 - x ** 2 / n\n div = n - ddof\n div[div < 0] = 0\n result /= div\n result[(n - ddof) == 0] = np.nan\n assert is_dataframe_like(result)\n result[result < 0] = 0 # avoid rounding errors that take us to zero\n return result\n\n\ndef _cov_combine(g, levels):\n return g\n\n\ndef _cov_finalizer(df, cols, std=False):\n vals = []\n num_elements = len(list(it.product(cols, repeat=2)))\n num_cols = len(cols)\n vals = list(range(num_elements))\n col_idx_mapping = dict(zip(cols, range(num_cols)))\n for i, j in it.combinations_with_replacement(df[cols].columns, 2):\n x = col_idx_mapping[i]\n y = col_idx_mapping[j]\n idx = x + num_cols * y\n mul_col = \"%s%s\" % (i, j)\n ni = df[\"%s-count\" % i]\n nj = df[\"%s-count\" % j]\n\n n = np.sqrt(ni * nj)\n div = n - 1\n div[div < 0] = 0\n val = (df[mul_col] - df[i] * df[j] / n).values[0] / div.values[0]\n if std:\n ii = \"%s%s\" % (i, i)\n jj = \"%s%s\" % (j, j)\n std_val_i = (df[ii] - (df[i] ** 2) / ni).values[0] / div.values[0]\n std_val_j = (df[jj] - (df[j] ** 2) / nj).values[0] / div.values[0]\n val = val / np.sqrt(std_val_i * std_val_j)\n\n vals[idx] = val\n if i != j:\n idx = num_cols * x + y\n vals[idx] = val\n\n level_1 = cols\n index = pd.MultiIndex.from_product([level_1, level_1])\n return pd.Series(vals, index=index)\n\n\ndef _mul_cols(df, cols):\n \"\"\"Internal function to be used with apply to multiply\n each column in a dataframe by every other column\n\n a b c -> a*a, a*b, b*b, b*c, c*c\n \"\"\"\n _df = type(df)()\n for i, j in it.combinations_with_replacement(cols, 2):\n col = \"%s%s\" % (i, j)\n _df[col] = df[i] * df[j]\n return _df\n\n\ndef _cov_chunk(df, *index):\n \"\"\"Covariance Chunk Logic\n\n Parameters\n ----------\n df : Pandas.DataFrame\n std : bool, optional\n When std=True we are calculating with Correlation\n\n Returns\n -------\n tuple\n Processed X, Multiplied Cols,\n \"\"\"\n if is_series_like(df):\n df = df.to_frame()\n df = df.copy()\n\n # mapping columns to str(numerical) values allows us to easily handle\n # arbitrary column names (numbers, string, empty strings)\n col_mapping = collections.OrderedDict()\n for i, c in enumerate(df.columns):\n col_mapping[c] = str(i)\n df = df.rename(columns=col_mapping)\n cols = df._get_numeric_data().columns\n\n # when grouping by external series don't exclude columns\n is_mask = any(is_series_like(s) for s in index)\n if not is_mask:\n index = [col_mapping[k] for k in index]\n cols = cols.drop(np.array(index))\n\n g = _groupby_raise_unaligned(df, by=index)\n x = g.sum()\n\n level = len(index)\n mul = g.apply(_mul_cols, cols=cols).reset_index(level=level, drop=True)\n n = g[x.columns].count().rename(columns=lambda c: \"{}-count\".format(c))\n return (x, mul, n, col_mapping)\n\n\ndef _cov_agg(_t, levels, ddof, std=False, sort=False):\n sums = []\n muls = []\n counts = []\n\n # sometime we get a series back from concat combiner\n t = list(_t)\n\n cols = t[0][0].columns\n for x, mul, n, col_mapping in t:\n sums.append(x)\n muls.append(mul)\n counts.append(n)\n col_mapping = col_mapping\n\n total_sums = concat(sums).groupby(level=levels, sort=sort).sum()\n total_muls = concat(muls).groupby(level=levels, sort=sort).sum()\n total_counts = concat(counts).groupby(level=levels).sum()\n result = (\n concat([total_sums, total_muls, total_counts], axis=1)\n .groupby(level=levels)\n .apply(_cov_finalizer, cols=cols, std=std)\n )\n\n inv_col_mapping = {v: k for k, v in col_mapping.items()}\n idx_vals = result.index.names\n idx_mapping = list()\n\n # when index is None we probably have selected a particular column\n # df.groupby('a')[['b']].cov()\n if len(idx_vals) == 1 and all(n is None for n in idx_vals):\n idx_vals = list(inv_col_mapping.keys() - set(total_sums.columns))\n\n for idx, val in enumerate(idx_vals):\n idx_name = inv_col_mapping.get(val, val)\n idx_mapping.append(idx_name)\n\n if len(result.columns.levels[0]) < len(col_mapping):\n # removing index from col_mapping (produces incorrect multiindexes)\n try:\n col_mapping.pop(idx_name)\n except KeyError:\n # when slicing the col_map will not have the index\n pass\n\n keys = list(col_mapping.keys())\n for level in range(len(result.columns.levels)):\n result.columns = result.columns.set_levels(keys, level=level)\n\n result.index.set_names(idx_mapping, inplace=True)\n\n # stacking can lead to a sorted index\n s_result = result.stack(dropna=False)\n assert is_dataframe_like(s_result)\n return s_result\n\n\n###############################################################\n# nunique\n###############################################################\n\n\ndef _nunique_df_chunk(df, *index, **kwargs):\n levels = kwargs.pop(\"levels\")\n name = kwargs.pop(\"name\")\n\n g = _groupby_raise_unaligned(df, by=index)\n if len(df) > 0:\n grouped = g[[name]].apply(M.drop_duplicates)\n # we set the index here to force a possibly duplicate index\n # for our reduce step\n if isinstance(levels, list):\n grouped.index = pd.MultiIndex.from_arrays(\n [grouped.index.get_level_values(level=level) for level in levels]\n )\n else:\n grouped.index = grouped.index.get_level_values(level=levels)\n else:\n # Manually create empty version, since groupby-apply for empty frame\n # results in df with no columns\n grouped = g[[name]].nunique()\n grouped = grouped.astype(df.dtypes[grouped.columns].to_dict())\n\n return grouped\n\n\ndef _drop_duplicates_rename(df):\n # Avoid duplicate index labels in a groupby().apply() context\n # https://github.com/dask/dask/issues/3039\n # https://github.com/pandas-dev/pandas/pull/18882\n names = [None] * df.index.nlevels\n return df.drop_duplicates().rename_axis(names, copy=False)\n\n\ndef _nunique_df_combine(df, levels, sort=False):\n result = df.groupby(level=levels, sort=sort).apply(_drop_duplicates_rename)\n\n if isinstance(levels, list):\n result.index = pd.MultiIndex.from_arrays(\n [result.index.get_level_values(level=level) for level in levels]\n )\n else:\n result.index = result.index.get_level_values(level=levels)\n\n return result\n\n\ndef _nunique_df_aggregate(df, levels, name, sort=False):\n return df.groupby(level=levels, sort=sort)[name].nunique()\n\n\ndef _nunique_series_chunk(df, *index, **_ignored_):\n # convert series to data frame, then hand over to dataframe code path\n assert is_series_like(df)\n\n df = df.to_frame()\n kwargs = dict(name=df.columns[0], levels=_determine_levels(index))\n return _nunique_df_chunk(df, *index, **kwargs)\n\n\n###############################################################\n# Aggregate support\n#\n# Aggregate is implemented as:\n#\n# 1. group-by-aggregate all partitions into intermediate values\n# 2. collect all partitions into a single partition\n# 3. group-by-aggregate the result into intermediate values\n# 4. transform all intermediate values into the result\n#\n# In Step 1 and 3 the dataframe is grouped on the same columns.\n#\n###############################################################\ndef _make_agg_id(func, column):\n return \"{!s}-{!s}-{}\".format(func, column, tokenize(func, column))\n\n\ndef _normalize_spec(spec, non_group_columns):\n \"\"\"\n Return a list of ``(result_column, func, input_column)`` tuples.\n\n Spec can be\n\n - a function\n - a list of functions\n - a dictionary that maps input-columns to functions\n - a dictionary that maps input-columns to a lists of functions\n - a dictionary that maps input-columns to a dictionaries that map\n output-columns to functions.\n\n The non-group columns are a list of all column names that are not used in\n the groupby operation.\n\n Usually, the result columns are mutli-level names, returned as tuples.\n If only a single function is supplied or dictionary mapping columns\n to single functions, simple names are returned as strings (see the first\n two examples below).\n\n Examples\n --------\n >>> _normalize_spec('mean', ['a', 'b', 'c'])\n [('a', 'mean', 'a'), ('b', 'mean', 'b'), ('c', 'mean', 'c')]\n\n >>> spec = collections.OrderedDict([('a', 'mean'), ('b', 'count')])\n >>> _normalize_spec(spec, ['a', 'b', 'c'])\n [('a', 'mean', 'a'), ('b', 'count', 'b')]\n\n >>> _normalize_spec(['var', 'mean'], ['a', 'b', 'c'])\n ... # doctest: +NORMALIZE_WHITESPACE\n [(('a', 'var'), 'var', 'a'), (('a', 'mean'), 'mean', 'a'), \\\n (('b', 'var'), 'var', 'b'), (('b', 'mean'), 'mean', 'b'), \\\n (('c', 'var'), 'var', 'c'), (('c', 'mean'), 'mean', 'c')]\n\n >>> spec = collections.OrderedDict([('a', 'mean'), ('b', ['sum', 'count'])])\n >>> _normalize_spec(spec, ['a', 'b', 'c'])\n ... # doctest: +NORMALIZE_WHITESPACE\n [(('a', 'mean'), 'mean', 'a'), (('b', 'sum'), 'sum', 'b'), \\\n (('b', 'count'), 'count', 'b')]\n\n >>> spec = collections.OrderedDict()\n >>> spec['a'] = ['mean', 'size']\n >>> spec['b'] = collections.OrderedDict([('e', 'count'), ('f', 'var')])\n >>> _normalize_spec(spec, ['a', 'b', 'c'])\n ... # doctest: +NORMALIZE_WHITESPACE\n [(('a', 'mean'), 'mean', 'a'), (('a', 'size'), 'size', 'a'), \\\n (('b', 'e'), 'count', 'b'), (('b', 'f'), 'var', 'b')]\n \"\"\"\n if not isinstance(spec, dict):\n spec = collections.OrderedDict(zip(non_group_columns, it.repeat(spec)))\n\n res = []\n\n if isinstance(spec, dict):\n for input_column, subspec in spec.items():\n if isinstance(subspec, dict):\n res.extend(\n ((input_column, result_column), func, input_column)\n for result_column, func in subspec.items()\n )\n\n else:\n if not isinstance(subspec, list):\n subspec = [subspec]\n\n res.extend(\n ((input_column, funcname(func)), func, input_column)\n for func in subspec\n )\n\n else:\n raise ValueError(\"unsupported agg spec of type {}\".format(type(spec)))\n\n compounds = (list, tuple, dict)\n use_flat_columns = not any(\n isinstance(subspec, compounds) for subspec in spec.values()\n )\n\n if use_flat_columns:\n res = [(input_col, func, input_col) for (_, func, input_col) in res]\n\n return res\n\n\ndef _build_agg_args(spec):\n \"\"\"\n Create transformation functions for a normalized aggregate spec.\n\n Parameters\n ----------\n spec: a list of (result-column, aggregation-function, input-column) triples.\n To work with all argument forms understood by pandas use\n ``_normalize_spec`` to normalize the argment before passing it on to\n ``_build_agg_args``.\n\n Returns\n -------\n chunk_funcs: a list of (intermediate-column, function, keyword) triples\n that are applied on grouped chunks of the initial dataframe.\n\n agg_funcs: a list of (intermediate-column, functions, keyword) triples that\n are applied on the grouped concatination of the preprocessed chunks.\n\n finalizers: a list of (result-column, function, keyword) triples that are\n applied after the ``agg_funcs``. They are used to create final results\n from intermediate representations.\n \"\"\"\n known_np_funcs = {np.min: \"min\", np.max: \"max\"}\n\n # check that there are no name conflicts for a single input column\n by_name = {}\n for _, func, input_column in spec:\n key = funcname(known_np_funcs.get(func, func)), input_column\n by_name.setdefault(key, []).append((func, input_column))\n\n for funcs in by_name.values():\n if len(funcs) != 1:\n raise ValueError(\"conflicting aggregation functions: {}\".format(funcs))\n\n chunks = {}\n aggs = {}\n finalizers = []\n\n for (result_column, func, input_column) in spec:\n if not isinstance(func, Aggregation):\n func = funcname(known_np_funcs.get(func, func))\n\n impls = _build_agg_args_single(result_column, func, input_column)\n\n # overwrite existing result-columns, generate intermediates only once\n for spec in impls[\"chunk_funcs\"]:\n chunks[spec[0]] = spec\n for spec in impls[\"aggregate_funcs\"]:\n aggs[spec[0]] = spec\n\n finalizers.append(impls[\"finalizer\"])\n\n chunks = sorted(chunks.values())\n aggs = sorted(aggs.values())\n\n return chunks, aggs, finalizers\n\n\ndef _build_agg_args_single(result_column, func, input_column):\n simple_impl = {\n \"sum\": (M.sum, M.sum),\n \"min\": (M.min, M.min),\n \"max\": (M.max, M.max),\n \"count\": (M.count, M.sum),\n \"size\": (M.size, M.sum),\n \"first\": (M.first, M.first),\n \"last\": (M.last, M.last),\n \"prod\": (M.prod, M.prod),\n }\n\n if func in simple_impl.keys():\n return _build_agg_args_simple(\n result_column, func, input_column, simple_impl[func]\n )\n\n elif func == \"var\":\n return _build_agg_args_var(result_column, func, input_column)\n\n elif func == \"std\":\n return _build_agg_args_std(result_column, func, input_column)\n\n elif func == \"mean\":\n return _build_agg_args_mean(result_column, func, input_column)\n\n elif func == \"list\":\n return _build_agg_args_list(result_column, func, input_column)\n\n elif isinstance(func, Aggregation):\n return _build_agg_args_custom(result_column, func, input_column)\n\n else:\n raise ValueError(\"unknown aggregate {}\".format(func))\n\n\ndef _build_agg_args_simple(result_column, func, input_column, impl_pair):\n intermediate = _make_agg_id(func, input_column)\n chunk_impl, agg_impl = impl_pair\n\n return dict(\n chunk_funcs=[\n (\n intermediate,\n _apply_func_to_column,\n dict(column=input_column, func=chunk_impl),\n )\n ],\n aggregate_funcs=[\n (\n intermediate,\n _apply_func_to_column,\n dict(column=intermediate, func=agg_impl),\n )\n ],\n finalizer=(result_column, itemgetter(intermediate), dict()),\n )\n\n\ndef _build_agg_args_var(result_column, func, input_column):\n int_sum = _make_agg_id(\"sum\", input_column)\n int_sum2 = _make_agg_id(\"sum2\", input_column)\n int_count = _make_agg_id(\"count\", input_column)\n\n return dict(\n chunk_funcs=[\n (int_sum, _apply_func_to_column, dict(column=input_column, func=M.sum)),\n (int_count, _apply_func_to_column, dict(column=input_column, func=M.count)),\n (int_sum2, _compute_sum_of_squares, dict(column=input_column)),\n ],\n aggregate_funcs=[\n (col, _apply_func_to_column, dict(column=col, func=M.sum))\n for col in (int_sum, int_count, int_sum2)\n ],\n finalizer=(\n result_column,\n _finalize_var,\n dict(sum_column=int_sum, count_column=int_count, sum2_column=int_sum2),\n ),\n )\n\n\ndef _build_agg_args_std(result_column, func, input_column):\n impls = _build_agg_args_var(result_column, func, input_column)\n\n result_column, _, kwargs = impls[\"finalizer\"]\n impls[\"finalizer\"] = (result_column, _finalize_std, kwargs)\n\n return impls\n\n\ndef _build_agg_args_mean(result_column, func, input_column):\n int_sum = _make_agg_id(\"sum\", input_column)\n int_count = _make_agg_id(\"count\", input_column)\n\n return dict(\n chunk_funcs=[\n (int_sum, _apply_func_to_column, dict(column=input_column, func=M.sum)),\n (int_count, _apply_func_to_column, dict(column=input_column, func=M.count)),\n ],\n aggregate_funcs=[\n (col, _apply_func_to_column, dict(column=col, func=M.sum))\n for col in (int_sum, int_count)\n ],\n finalizer=(\n result_column,\n _finalize_mean,\n dict(sum_column=int_sum, count_column=int_count),\n ),\n )\n\n\ndef _build_agg_args_list(result_column, func, input_column):\n intermediate = _make_agg_id(\"list\", input_column)\n\n return dict(\n chunk_funcs=[\n (\n intermediate,\n _apply_func_to_column,\n dict(column=input_column, func=lambda s: s.apply(list)),\n )\n ],\n aggregate_funcs=[\n (\n intermediate,\n _apply_func_to_column,\n dict(\n column=intermediate,\n func=lambda s0: s0.apply(\n lambda chunks: list(it.chain.from_iterable(chunks))\n ),\n ),\n )\n ],\n finalizer=(result_column, itemgetter(intermediate), dict()),\n )\n\n\ndef _build_agg_args_custom(result_column, func, input_column):\n col = _make_agg_id(funcname(func), input_column)\n\n if func.finalize is None:\n finalizer = (result_column, operator.itemgetter(col), dict())\n\n else:\n finalizer = (\n result_column,\n _apply_func_to_columns,\n dict(func=func.finalize, prefix=col),\n )\n\n return dict(\n chunk_funcs=[\n (col, _apply_func_to_column, dict(func=func.chunk, column=input_column))\n ],\n aggregate_funcs=[\n (col, _apply_func_to_columns, dict(func=func.agg, prefix=col))\n ],\n finalizer=finalizer,\n )\n\n\ndef _groupby_apply_funcs(df, *index, **kwargs):\n \"\"\"\n Group a dataframe and apply multiple aggregation functions.\n\n Parameters\n ----------\n df: pandas.DataFrame\n The dataframe to work on.\n index: list of groupers\n If given, they are added to the keyword arguments as the ``by``\n argument.\n funcs: list of result-colum, function, keywordargument triples\n The list of functions that are applied on the grouped data frame.\n Has to be passed as a keyword argument.\n kwargs:\n All keyword arguments, but ``funcs``, are passed verbatim to the groupby\n operation of the dataframe\n\n Returns\n -------\n aggregated:\n the aggregated dataframe.\n \"\"\"\n if len(index):\n # since we're coming through apply, `by` will be a tuple.\n # Pandas treats tuples as a single key, and lists as multiple keys\n # We want multiple keys\n kwargs.update(by=list(index))\n\n funcs = kwargs.pop(\"funcs\")\n grouped = _groupby_raise_unaligned(df, **kwargs)\n\n result = collections.OrderedDict()\n for result_column, func, func_kwargs in funcs:\n r = func(grouped, **func_kwargs)\n\n if isinstance(r, tuple):\n for idx, s in enumerate(r):\n result[\"{}-{}\".format(result_column, idx)] = s\n\n else:\n result[result_column] = r\n\n if is_dataframe_like(df):\n return type(df)(result)\n else:\n # Get the DataFrame type of this Series object\n return type(df.head(0).to_frame())(result)\n\n\ndef _compute_sum_of_squares(grouped, column):\n # Note: CuDF cannot use `groupby.apply`.\n # Need to unpack groupby to compute sum of squares\n if hasattr(grouped, \"grouper\"):\n keys = grouped.grouper\n else:\n # Handle CuDF groupby object (different from pandas)\n keys = grouped.grouping.keys\n df = grouped.obj[column].pow(2) if column else grouped.obj.pow(2)\n return df.groupby(keys).sum()\n\n\ndef _agg_finalize(df, aggregate_funcs, finalize_funcs, level, sort=False, **kwargs):\n # finish the final aggregation level\n df = _groupby_apply_funcs(\n df, funcs=aggregate_funcs, level=level, sort=sort, **kwargs\n )\n\n # and finalize the result\n result = collections.OrderedDict()\n for result_column, func, finalize_kwargs in finalize_funcs:\n result[result_column] = func(df, **finalize_kwargs)\n\n return type(df)(result)\n\n\ndef _apply_func_to_column(df_like, column, func):\n if column is None:\n return func(df_like)\n\n return func(df_like[column])\n\n\ndef _apply_func_to_columns(df_like, prefix, func):\n if is_dataframe_like(df_like):\n columns = df_like.columns\n else:\n # handle GroupBy objects\n columns = df_like._selected_obj.columns\n\n columns = sorted(col for col in columns if col.startswith(prefix))\n\n columns = [df_like[col] for col in columns]\n return func(*columns)\n\n\ndef _finalize_mean(df, sum_column, count_column):\n return df[sum_column] / df[count_column]\n\n\ndef _finalize_var(df, count_column, sum_column, sum2_column, ddof=1):\n n = df[count_column]\n x = df[sum_column]\n x2 = df[sum2_column]\n\n result = x2 - x ** 2 / n\n div = n - ddof\n div[div < 0] = 0\n result /= div\n result[(n - ddof) == 0] = np.nan\n\n return result\n\n\ndef _finalize_std(df, count_column, sum_column, sum2_column, ddof=1):\n result = _finalize_var(df, count_column, sum_column, sum2_column, ddof)\n return np.sqrt(result)\n\n\ndef _cum_agg_aligned(part, cum_last, index, columns, func, initial):\n align = cum_last.reindex(part.set_index(index).index, fill_value=initial)\n align.index = part.index\n return func(part[columns], align)\n\n\ndef _cum_agg_filled(a, b, func, initial):\n union = a.index.union(b.index)\n return func(\n a.reindex(union, fill_value=initial),\n b.reindex(union, fill_value=initial),\n fill_value=initial,\n )\n\n\ndef _cumcount_aggregate(a, b, fill_value=None):\n return a.add(b, fill_value=fill_value) + 1\n\n\nclass _GroupBy:\n \"\"\"Superclass for DataFrameGroupBy and SeriesGroupBy\n\n Parameters\n ----------\n\n obj: DataFrame or Series\n DataFrame or Series to be grouped\n by: str, list or Series\n The key for grouping\n slice: str, list\n The slice keys applied to GroupBy result\n group_keys: bool\n Passed to pandas.DataFrame.groupby()\n dropna: bool\n Whether to drop null values from groupby index\n sort: bool, defult None\n Passed along to aggregation methods. If allowed,\n the output aggregation will have sorted keys.\n observed: bool, default False\n This only applies if any of the groupers are Categoricals.\n If True: only show observed values for categorical groupers.\n If False: show all values for categorical groupers.\n \"\"\"\n\n def __init__(\n self,\n df,\n by=None,\n slice=None,\n group_keys=True,\n dropna=None,\n sort=None,\n observed=None,\n ):\n\n by_ = by if isinstance(by, (tuple, list)) else [by]\n if any(isinstance(key, pd.Grouper) for key in by_):\n raise NotImplementedError(\"pd.Grouper is currently not supported by Dask.\")\n\n assert isinstance(df, (DataFrame, Series))\n self.group_keys = group_keys\n self.obj = df\n # grouping key passed via groupby method\n self.index = _normalize_index(df, by)\n self.sort = sort\n\n if isinstance(self.index, list):\n do_index_partition_align = all(\n item.npartitions == df.npartitions if isinstance(item, Series) else True\n for item in self.index\n )\n elif isinstance(self.index, Series):\n do_index_partition_align = df.npartitions == self.index.npartitions\n else:\n do_index_partition_align = True\n\n if not do_index_partition_align:\n raise NotImplementedError(\n \"The grouped object and index of the \"\n \"groupby must have the same divisions.\"\n )\n\n # slicing key applied to _GroupBy instance\n self._slice = slice\n\n if isinstance(self.index, list):\n index_meta = [\n item._meta if isinstance(item, Series) else item for item in self.index\n ]\n\n elif isinstance(self.index, Series):\n index_meta = self.index._meta\n\n else:\n index_meta = self.index\n\n self.dropna = {}\n if dropna is not None:\n self.dropna[\"dropna\"] = dropna\n\n # Hold off on setting observed by default: https://github.com/dask/dask/issues/6951\n self.observed = {}\n if observed is not None:\n self.observed[\"observed\"] = observed\n\n self._meta = self.obj._meta.groupby(\n index_meta, group_keys=group_keys, **self.observed, **self.dropna\n )\n\n @property\n def _meta_nonempty(self):\n \"\"\"\n Return a pd.DataFrameGroupBy / pd.SeriesGroupBy which contains sample data.\n \"\"\"\n sample = self.obj._meta_nonempty\n\n if isinstance(self.index, list):\n index_meta = [\n item._meta_nonempty if isinstance(item, Series) else item\n for item in self.index\n ]\n\n elif isinstance(self.index, Series):\n index_meta = self.index._meta_nonempty\n\n else:\n index_meta = self.index\n\n grouped = sample.groupby(\n index_meta,\n group_keys=self.group_keys,\n **self.observed,\n **self.dropna,\n )\n return _maybe_slice(grouped, self._slice)\n\n def _aca_agg(\n self,\n token,\n func,\n aggfunc=None,\n meta=None,\n split_every=None,\n split_out=1,\n chunk_kwargs={},\n aggregate_kwargs={},\n ):\n if aggfunc is None:\n aggfunc = func\n\n if meta is None:\n meta = func(self._meta_nonempty)\n\n columns = meta.name if is_series_like(meta) else meta.columns\n\n token = self._token_prefix + token\n levels = _determine_levels(self.index)\n\n return aca(\n [self.obj, self.index]\n if not isinstance(self.index, list)\n else [self.obj] + self.index,\n chunk=_apply_chunk,\n chunk_kwargs=dict(\n chunk=func,\n columns=columns,\n **self.observed,\n **chunk_kwargs,\n **self.dropna,\n ),\n aggregate=_groupby_aggregate,\n meta=meta,\n token=token,\n split_every=split_every,\n aggregate_kwargs=dict(\n aggfunc=aggfunc,\n levels=levels,\n **self.observed,\n **aggregate_kwargs,\n **self.dropna,\n ),\n split_out=split_out,\n split_out_setup=split_out_on_index,\n sort=self.sort,\n )\n\n def _cum_agg(self, token, chunk, aggregate, initial):\n \"\"\"Wrapper for cumulative groupby operation\"\"\"\n meta = chunk(self._meta)\n columns = meta.name if is_series_like(meta) else meta.columns\n index = self.index if isinstance(self.index, list) else [self.index]\n\n name = self._token_prefix + token\n name_part = name + \"-map\"\n name_last = name + \"-take-last\"\n name_cum = name + \"-cum-last\"\n\n # cumulate each partitions\n cumpart_raw = map_partitions(\n _apply_chunk,\n self.obj,\n *index,\n chunk=chunk,\n columns=columns,\n token=name_part,\n meta=meta,\n **self.dropna,\n )\n\n cumpart_raw_frame = (\n cumpart_raw.to_frame() if is_series_like(meta) else cumpart_raw\n )\n\n cumpart_ext = cumpart_raw_frame.assign(\n **{\n i: self.obj[i]\n if np.isscalar(i) and i in self.obj.columns\n else self.obj.index\n for i in index\n }\n )\n\n # Use pd.Grouper objects to specify that we are grouping by columns.\n # Otherwise, pandas will throw an ambiguity warning if the\n # DataFrame's index (self.obj.index) was included in the grouping\n # specification (self.index). See pandas #14432\n index_groupers = [pd.Grouper(key=ind) for ind in index]\n cumlast = map_partitions(\n _apply_chunk,\n cumpart_ext,\n *index_groupers,\n columns=0 if columns is None else columns,\n chunk=M.last,\n meta=meta,\n token=name_last,\n **self.dropna,\n )\n\n # aggregate cumulated partitions and its previous last element\n _hash = tokenize(self, token, chunk, aggregate, initial)\n name += \"-\" + _hash\n name_cum += \"-\" + _hash\n dask = {}\n dask[(name, 0)] = (cumpart_raw._name, 0)\n\n for i in range(1, self.obj.npartitions):\n # store each cumulative step to graph to reduce computation\n if i == 1:\n dask[(name_cum, i)] = (cumlast._name, i - 1)\n else:\n # aggregate with previous cumulation results\n dask[(name_cum, i)] = (\n _cum_agg_filled,\n (name_cum, i - 1),\n (cumlast._name, i - 1),\n aggregate,\n initial,\n )\n dask[(name, i)] = (\n _cum_agg_aligned,\n (cumpart_ext._name, i),\n (name_cum, i),\n index,\n 0 if columns is None else columns,\n aggregate,\n initial,\n )\n graph = HighLevelGraph.from_collections(\n name, dask, dependencies=[cumpart_raw, cumpart_ext, cumlast]\n )\n return new_dd_object(graph, name, chunk(self._meta), self.obj.divisions)\n\n def _shuffle(self, meta):\n df = self.obj\n\n if isinstance(self.obj, Series):\n # Temporarily convert series to dataframe for shuffle\n df = df.to_frame(\"__series__\")\n convert_back_to_series = True\n else:\n convert_back_to_series = False\n\n if isinstance(self.index, DataFrame): # add index columns to dataframe\n df2 = df.assign(\n **{\"_index_\" + c: self.index[c] for c in self.index.columns}\n )\n index = self.index\n elif isinstance(self.index, Series):\n df2 = df.assign(_index=self.index)\n index = self.index\n else:\n df2 = df\n index = df._select_columns_or_index(self.index)\n\n df3 = shuffle(df2, index) # shuffle dataframe and index\n\n if isinstance(self.index, DataFrame):\n # extract index from dataframe\n cols = [\"_index_\" + c for c in self.index.columns]\n index2 = df3[cols]\n if is_dataframe_like(meta):\n df4 = df3.map_partitions(drop_columns, cols, meta.columns.dtype)\n else:\n df4 = df3.drop(cols, axis=1)\n elif isinstance(self.index, Series):\n index2 = df3[\"_index\"]\n index2.name = self.index.name\n if is_dataframe_like(meta):\n df4 = df3.map_partitions(drop_columns, \"_index\", meta.columns.dtype)\n else:\n df4 = df3.drop(\"_index\", axis=1)\n else:\n df4 = df3\n index2 = self.index\n\n if convert_back_to_series:\n df4 = df4[\"__series__\"].rename(self.obj.name)\n\n return df4, index2\n\n @derived_from(pd.core.groupby.GroupBy)\n def cumsum(self, axis=0):\n if axis:\n return self.obj.cumsum(axis=axis)\n else:\n return self._cum_agg(\"cumsum\", chunk=M.cumsum, aggregate=M.add, initial=0)\n\n @derived_from(pd.core.groupby.GroupBy)\n def cumprod(self, axis=0):\n if axis:\n return self.obj.cumprod(axis=axis)\n else:\n return self._cum_agg(\"cumprod\", chunk=M.cumprod, aggregate=M.mul, initial=1)\n\n @derived_from(pd.core.groupby.GroupBy)\n def cumcount(self, axis=None):\n return self._cum_agg(\n \"cumcount\", chunk=M.cumcount, aggregate=_cumcount_aggregate, initial=-1\n )\n\n @derived_from(pd.core.groupby.GroupBy)\n def sum(self, split_every=None, split_out=1, min_count=None):\n result = self._aca_agg(\n token=\"sum\", func=M.sum, split_every=split_every, split_out=split_out\n )\n if min_count:\n return result.where(self.count() >= min_count, other=np.NaN)\n else:\n return result\n\n @derived_from(pd.core.groupby.GroupBy)\n def prod(self, split_every=None, split_out=1, min_count=None):\n result = self._aca_agg(\n token=\"prod\", func=M.prod, split_every=split_every, split_out=split_out\n )\n if min_count:\n return result.where(self.count() >= min_count, other=np.NaN)\n else:\n return result\n\n @derived_from(pd.core.groupby.GroupBy)\n def min(self, split_every=None, split_out=1):\n return self._aca_agg(\n token=\"min\", func=M.min, split_every=split_every, split_out=split_out\n )\n\n @derived_from(pd.core.groupby.GroupBy)\n def max(self, split_every=None, split_out=1):\n return self._aca_agg(\n token=\"max\", func=M.max, split_every=split_every, split_out=split_out\n )\n\n @derived_from(pd.DataFrame)\n def idxmin(self, split_every=None, split_out=1, axis=None, skipna=True):\n return self._aca_agg(\n token=\"idxmin\",\n func=M.idxmin,\n aggfunc=M.first,\n split_every=split_every,\n split_out=split_out,\n chunk_kwargs=dict(skipna=skipna),\n )\n\n @derived_from(pd.DataFrame)\n def idxmax(self, split_every=None, split_out=1, axis=None, skipna=True):\n return self._aca_agg(\n token=\"idxmax\",\n func=M.idxmax,\n aggfunc=M.first,\n split_every=split_every,\n split_out=split_out,\n chunk_kwargs=dict(skipna=skipna),\n )\n\n @derived_from(pd.core.groupby.GroupBy)\n def count(self, split_every=None, split_out=1):\n return self._aca_agg(\n token=\"count\",\n func=M.count,\n aggfunc=M.sum,\n split_every=split_every,\n split_out=split_out,\n )\n\n @derived_from(pd.core.groupby.GroupBy)\n def mean(self, split_every=None, split_out=1):\n s = self.sum(split_every=split_every, split_out=split_out)\n c = self.count(split_every=split_every, split_out=split_out)\n if is_dataframe_like(s):\n c = c[s.columns]\n return s / c\n\n @derived_from(pd.core.groupby.GroupBy)\n def size(self, split_every=None, split_out=1):\n return self._aca_agg(\n token=\"size\",\n func=M.size,\n aggfunc=M.sum,\n split_every=split_every,\n split_out=split_out,\n )\n\n @derived_from(pd.core.groupby.GroupBy)\n def var(self, ddof=1, split_every=None, split_out=1):\n levels = _determine_levels(self.index)\n result = aca(\n [self.obj, self.index]\n if not isinstance(self.index, list)\n else [self.obj] + self.index,\n chunk=_var_chunk,\n aggregate=_var_agg,\n combine=_var_combine,\n token=self._token_prefix + \"var\",\n aggregate_kwargs={\"ddof\": ddof, \"levels\": levels},\n combine_kwargs={\"levels\": levels},\n split_every=split_every,\n split_out=split_out,\n split_out_setup=split_out_on_index,\n sort=self.sort,\n )\n\n if isinstance(self.obj, Series):\n result = result[result.columns[0]]\n if self._slice:\n result = result[self._slice]\n\n return result\n\n @derived_from(pd.core.groupby.GroupBy)\n def std(self, ddof=1, split_every=None, split_out=1):\n v = self.var(ddof, split_every=split_every, split_out=split_out)\n result = map_partitions(np.sqrt, v, meta=v)\n return result\n\n @derived_from(pd.DataFrame)\n def corr(self, ddof=1, split_every=None, split_out=1):\n \"\"\"Groupby correlation:\n corr(X, Y) = cov(X, Y) / (std_x * std_y)\n \"\"\"\n return self.cov(split_every=split_every, split_out=split_out, std=True)\n\n @derived_from(pd.DataFrame)\n def cov(self, ddof=1, split_every=None, split_out=1, std=False):\n \"\"\"Groupby covariance is accomplished by\n\n 1. Computing intermediate values for sum, count, and the product of\n all columns: a b c -> a*a, a*b, b*b, b*c, c*c.\n\n 2. The values are then aggregated and the final covariance value is calculated:\n cov(X, Y) = X*Y - Xbar * Ybar\n\n When `std` is True calculate Correlation\n \"\"\"\n\n levels = _determine_levels(self.index)\n\n is_mask = any(is_series_like(s) for s in self.index)\n if self._slice:\n if is_mask:\n self.obj = self.obj[self._slice]\n else:\n sliced_plus = list(self._slice) + list(self.index)\n self.obj = self.obj[sliced_plus]\n\n result = aca(\n [self.obj, self.index]\n if not isinstance(self.index, list)\n else [self.obj] + self.index,\n chunk=_cov_chunk,\n aggregate=_cov_agg,\n combine=_cov_combine,\n token=self._token_prefix + \"cov\",\n aggregate_kwargs={\"ddof\": ddof, \"levels\": levels, \"std\": std},\n combine_kwargs={\"levels\": levels},\n split_every=split_every,\n split_out=split_out,\n split_out_setup=split_out_on_index,\n sort=self.sort,\n )\n\n if isinstance(self.obj, Series):\n result = result[result.columns[0]]\n if self._slice:\n result = result[self._slice]\n return result\n\n @derived_from(pd.core.groupby.GroupBy)\n def first(self, split_every=None, split_out=1):\n return self._aca_agg(\n token=\"first\", func=M.first, split_every=split_every, split_out=split_out\n )\n\n @derived_from(pd.core.groupby.GroupBy)\n def last(self, split_every=None, split_out=1):\n return self._aca_agg(\n token=\"last\", func=M.last, split_every=split_every, split_out=split_out\n )\n\n @derived_from(pd.core.groupby.GroupBy)\n def get_group(self, key):\n token = self._token_prefix + \"get_group\"\n\n meta = self._meta.obj\n if is_dataframe_like(meta) and self._slice is not None:\n meta = meta[self._slice]\n columns = meta.columns if is_dataframe_like(meta) else meta.name\n\n return map_partitions(\n _groupby_get_group,\n self.obj,\n self.index,\n key,\n columns,\n meta=meta,\n token=token,\n )\n\n def aggregate(self, arg, split_every, split_out=1):\n if isinstance(self.obj, DataFrame):\n if isinstance(self.index, tuple) or np.isscalar(self.index):\n group_columns = {self.index}\n\n elif isinstance(self.index, list):\n group_columns = {\n i for i in self.index if isinstance(i, tuple) or np.isscalar(i)\n }\n\n else:\n group_columns = set()\n\n if self._slice:\n # pandas doesn't exclude the grouping column in a SeriesGroupBy\n # like df.groupby('a')['a'].agg(...)\n non_group_columns = self._slice\n if not isinstance(non_group_columns, list):\n non_group_columns = [non_group_columns]\n else:\n # NOTE: this step relies on the index normalization to replace\n # series with their name in an index.\n non_group_columns = [\n col for col in self.obj.columns if col not in group_columns\n ]\n\n spec = _normalize_spec(arg, non_group_columns)\n\n elif isinstance(self.obj, Series):\n if isinstance(arg, (list, tuple, dict)):\n # implementation detail: if self.obj is a series, a pseudo column\n # None is used to denote the series itself. This pseudo column is\n # removed from the result columns before passing the spec along.\n spec = _normalize_spec({None: arg}, [])\n spec = [\n (result_column, func, input_column)\n for ((_, result_column), func, input_column) in spec\n ]\n\n else:\n spec = _normalize_spec({None: arg}, [])\n spec = [\n (self.obj.name, func, input_column)\n for (_, func, input_column) in spec\n ]\n\n else:\n raise ValueError(\"aggregate on unknown object {}\".format(self.obj))\n\n chunk_funcs, aggregate_funcs, finalizers = _build_agg_args(spec)\n\n if isinstance(self.index, (tuple, list)) and len(self.index) > 1:\n levels = list(range(len(self.index)))\n else:\n levels = 0\n\n if not isinstance(self.index, list):\n chunk_args = [self.obj, self.index]\n\n else:\n chunk_args = [self.obj] + self.index\n\n if not PANDAS_GT_110 and self.dropna:\n raise NotImplementedError(\n \"dropna is not a valid argument for dask.groupby.agg\"\n f\"if pandas < 1.1.0. Pandas version is {pd.__version__}\"\n )\n\n return aca(\n chunk_args,\n chunk=_groupby_apply_funcs,\n chunk_kwargs=dict(funcs=chunk_funcs, **self.observed, **self.dropna),\n combine=_groupby_apply_funcs,\n combine_kwargs=dict(\n funcs=aggregate_funcs, level=levels, **self.observed, **self.dropna\n ),\n aggregate=_agg_finalize,\n aggregate_kwargs=dict(\n aggregate_funcs=aggregate_funcs,\n finalize_funcs=finalizers,\n level=levels,\n **self.observed,\n **self.dropna,\n ),\n token=\"aggregate\",\n split_every=split_every,\n split_out=split_out,\n split_out_setup=split_out_on_index,\n sort=self.sort,\n )\n\n @insert_meta_param_description(pad=12)\n def apply(self, func, *args, **kwargs):\n \"\"\"Parallel version of pandas GroupBy.apply\n\n This mimics the pandas version except for the following:\n\n 1. If the grouper does not align with the index then this causes a full\n shuffle. The order of rows within each group may not be preserved.\n 2. Dask's GroupBy.apply is not appropriate for aggregations. For custom\n aggregations, use :class:`dask.dataframe.groupby.Aggregation`.\n\n .. warning::\n\n Pandas' groupby-apply can be used to to apply arbitrary functions,\n including aggregations that result in one row per group. Dask's\n groupby-apply will apply ``func`` once to each partition-group pair,\n so when ``func`` is a reduction you'll end up with one row per\n partition-group pair. To apply a custom aggregation with Dask,\n use :class:`dask.dataframe.groupby.Aggregation`.\n\n Parameters\n ----------\n func: function\n Function to apply\n args, kwargs : Scalar, Delayed or object\n Arguments and keywords to pass to the function.\n $META\n\n Returns\n -------\n applied : Series or DataFrame depending on columns keyword\n \"\"\"\n meta = kwargs.get(\"meta\", no_default)\n\n if meta is no_default:\n with raise_on_meta_error(\n \"groupby.apply({0})\".format(funcname(func)), udf=True\n ):\n meta_args, meta_kwargs = _extract_meta((args, kwargs), nonempty=True)\n meta = self._meta_nonempty.apply(func, *meta_args, **meta_kwargs)\n\n msg = (\n \"`meta` is not specified, inferred from partial data. \"\n \"Please provide `meta` if the result is unexpected.\\n\"\n \" Before: .apply(func)\\n\"\n \" After: .apply(func, meta={'x': 'f8', 'y': 'f8'}) for dataframe result\\n\"\n \" or: .apply(func, meta=('x', 'f8')) for series result\"\n )\n warnings.warn(msg, stacklevel=2)\n\n meta = make_meta(meta, parent_meta=self._meta.obj)\n\n # Validate self.index\n if isinstance(self.index, list) and any(\n isinstance(item, Series) for item in self.index\n ):\n raise NotImplementedError(\n \"groupby-apply with a multiple Series is currently not supported\"\n )\n\n df = self.obj\n should_shuffle = not (\n df.known_divisions and df._contains_index_name(self.index)\n )\n\n if should_shuffle:\n df2, index = self._shuffle(meta)\n else:\n df2 = df\n index = self.index\n\n # Perform embarrassingly parallel groupby-apply\n kwargs[\"meta\"] = meta\n df3 = map_partitions(\n _groupby_slice_apply,\n df2,\n index,\n self._slice,\n func,\n token=funcname(func),\n *args,\n group_keys=self.group_keys,\n **self.observed,\n **self.dropna,\n **kwargs,\n )\n\n return df3\n\n @insert_meta_param_description(pad=12)\n def transform(self, func, *args, **kwargs):\n \"\"\"Parallel version of pandas GroupBy.transform\n\n This mimics the pandas version except for the following:\n\n 1. If the grouper does not align with the index then this causes a full\n shuffle. The order of rows within each group may not be preserved.\n 2. Dask's GroupBy.transform is not appropriate for aggregations. For custom\n aggregations, use :class:`dask.dataframe.groupby.Aggregation`.\n\n .. warning::\n\n Pandas' groupby-transform can be used to to apply arbitrary functions,\n including aggregations that result in one row per group. Dask's\n groupby-transform will apply ``func`` once to each partition-group pair,\n so when ``func`` is a reduction you'll end up with one row per\n partition-group pair. To apply a custom aggregation with Dask,\n use :class:`dask.dataframe.groupby.Aggregation`.\n\n Parameters\n ----------\n func: function\n Function to apply\n args, kwargs : Scalar, Delayed or object\n Arguments and keywords to pass to the function.\n $META\n\n Returns\n -------\n applied : Series or DataFrame depending on columns keyword\n \"\"\"\n meta = kwargs.get(\"meta\", no_default)\n\n if meta is no_default:\n with raise_on_meta_error(\n \"groupby.transform({0})\".format(funcname(func)), udf=True\n ):\n meta_args, meta_kwargs = _extract_meta((args, kwargs), nonempty=True)\n meta = self._meta_nonempty.transform(func, *meta_args, **meta_kwargs)\n\n msg = (\n \"`meta` is not specified, inferred from partial data. \"\n \"Please provide `meta` if the result is unexpected.\\n\"\n \" Before: .transform(func)\\n\"\n \" After: .transform(func, meta={'x': 'f8', 'y': 'f8'}) for dataframe result\\n\"\n \" or: .transform(func, meta=('x', 'f8')) for series result\"\n )\n warnings.warn(msg, stacklevel=2)\n\n meta = make_meta(meta, parent_meta=self._meta.obj)\n\n # Validate self.index\n if isinstance(self.index, list) and any(\n isinstance(item, Series) for item in self.index\n ):\n raise NotImplementedError(\n \"groupby-transform with a multiple Series is currently not supported\"\n )\n\n df = self.obj\n should_shuffle = not (\n df.known_divisions and df._contains_index_name(self.index)\n )\n\n if should_shuffle:\n df2, index = self._shuffle(meta)\n else:\n df2 = df\n index = self.index\n\n # Perform embarrassingly parallel groupby-transform\n kwargs[\"meta\"] = meta\n df3 = map_partitions(\n _groupby_slice_transform,\n df2,\n index,\n self._slice,\n func,\n token=funcname(func),\n *args,\n group_keys=self.group_keys,\n **self.observed,\n **self.dropna,\n **kwargs,\n )\n\n return df3\n\n\nclass DataFrameGroupBy(_GroupBy):\n _token_prefix = \"dataframe-groupby-\"\n\n def __getitem__(self, key):\n if isinstance(key, list):\n g = DataFrameGroupBy(\n self.obj, by=self.index, slice=key, sort=self.sort, **self.dropna\n )\n else:\n g = SeriesGroupBy(\n self.obj, by=self.index, slice=key, sort=self.sort, **self.dropna\n )\n\n # error is raised from pandas\n g._meta = g._meta[key]\n return g\n\n def __dir__(self):\n return sorted(\n set(\n dir(type(self))\n + list(self.__dict__)\n + list(filter(M.isidentifier, self.obj.columns))\n )\n )\n\n def __getattr__(self, key):\n try:\n return self[key]\n except KeyError as e:\n raise AttributeError(e) from e\n\n @derived_from(pd.core.groupby.DataFrameGroupBy)\n def aggregate(self, arg, split_every=None, split_out=1):\n if arg == \"size\":\n return self.size()\n\n return super().aggregate(arg, split_every=split_every, split_out=split_out)\n\n @derived_from(pd.core.groupby.DataFrameGroupBy)\n def agg(self, arg, split_every=None, split_out=1):\n return self.aggregate(arg, split_every=split_every, split_out=split_out)\n\n\nclass SeriesGroupBy(_GroupBy):\n _token_prefix = \"series-groupby-\"\n\n def __init__(self, df, by=None, slice=None, observed=None, **kwargs):\n # for any non series object, raise pandas-compat error message\n # Hold off on setting observed by default: https://github.com/dask/dask/issues/6951\n observed = {\"observed\": observed} if observed is not None else {}\n\n if isinstance(df, Series):\n if isinstance(by, Series):\n pass\n elif isinstance(by, list):\n if len(by) == 0:\n raise ValueError(\"No group keys passed!\")\n\n non_series_items = [item for item in by if not isinstance(item, Series)]\n # raise error from pandas, if applicable\n\n df._meta.groupby(non_series_items, **observed)\n else:\n # raise error from pandas, if applicable\n df._meta.groupby(by, **observed)\n\n super().__init__(df, by=by, slice=slice, **observed, **kwargs)\n\n @derived_from(pd.core.groupby.SeriesGroupBy)\n def nunique(self, split_every=None, split_out=1):\n \"\"\"\n Examples\n --------\n >>> import pandas as pd\n >>> import dask.dataframe as dd\n >>> d = {'col1': [1, 2, 3, 4], 'col2': [5, 6, 7, 8]}\n >>> df = pd.DataFrame(data=d)\n >>> ddf = dd.from_pandas(df, 2)\n >>> ddf.groupby(['col1']).col2.nunique().compute()\n \"\"\"\n name = self._meta.obj.name\n levels = _determine_levels(self.index)\n\n if isinstance(self.obj, DataFrame):\n chunk = _nunique_df_chunk\n\n else:\n chunk = _nunique_series_chunk\n\n return aca(\n [self.obj, self.index]\n if not isinstance(self.index, list)\n else [self.obj] + self.index,\n chunk=chunk,\n aggregate=_nunique_df_aggregate,\n combine=_nunique_df_combine,\n token=\"series-groupby-nunique\",\n chunk_kwargs={\"levels\": levels, \"name\": name},\n aggregate_kwargs={\"levels\": levels, \"name\": name},\n combine_kwargs={\"levels\": levels},\n split_every=split_every,\n split_out=split_out,\n split_out_setup=split_out_on_index,\n sort=self.sort,\n )\n\n @derived_from(pd.core.groupby.SeriesGroupBy)\n def aggregate(self, arg, split_every=None, split_out=1):\n result = super().aggregate(arg, split_every=split_every, split_out=split_out)\n if self._slice:\n result = result[self._slice]\n\n if not isinstance(arg, (list, dict)) and isinstance(result, DataFrame):\n result = result[result.columns[0]]\n\n return result\n\n @derived_from(pd.core.groupby.SeriesGroupBy)\n def agg(self, arg, split_every=None, split_out=1):\n return self.aggregate(arg, split_every=split_every, split_out=split_out)\n\n @derived_from(pd.core.groupby.SeriesGroupBy)\n def value_counts(self, split_every=None, split_out=1):\n return self._aca_agg(\n token=\"value_counts\",\n func=_value_counts,\n aggfunc=_value_counts_aggregate,\n split_every=split_every,\n split_out=split_out,\n )\n\n @derived_from(pd.core.groupby.SeriesGroupBy)\n def unique(self, split_every=None, split_out=1):\n name = self._meta.obj.name\n return self._aca_agg(\n token=\"unique\",\n func=M.unique,\n aggfunc=_unique_aggregate,\n aggregate_kwargs={\"name\": name},\n split_every=split_every,\n split_out=split_out,\n )\n\n @derived_from(pd.core.groupby.SeriesGroupBy)\n def tail(self, n=5, split_every=None, split_out=1):\n index_levels = len(self.index) if isinstance(self.index, list) else 1\n return self._aca_agg(\n token=\"tail\",\n func=_tail_chunk,\n aggfunc=_tail_aggregate,\n meta=M.tail(self._meta_nonempty),\n chunk_kwargs={\"n\": n},\n aggregate_kwargs={\"n\": n, \"index_levels\": index_levels},\n split_every=split_every,\n split_out=split_out,\n )\n\n @derived_from(pd.core.groupby.SeriesGroupBy)\n def head(self, n=5, split_every=None, split_out=1):\n index_levels = len(self.index) if isinstance(self.index, list) else 1\n return self._aca_agg(\n token=\"head\",\n func=_head_chunk,\n aggfunc=_head_aggregate,\n meta=M.head(self._meta_nonempty),\n chunk_kwargs={\"n\": n},\n aggregate_kwargs={\"n\": n, \"index_levels\": index_levels},\n split_every=split_every,\n split_out=split_out,\n )\n\n\ndef _unique_aggregate(series_gb, name=None):\n ret = type(series_gb.obj)(\n {k: v.explode().unique() for k, v in series_gb}, name=name\n )\n ret.index.names = series_gb.obj.index.names\n return ret\n\n\ndef _value_counts(x, **kwargs):\n if len(x):\n return M.value_counts(x, **kwargs)\n else:\n return pd.Series(dtype=int)\n\n\ndef _value_counts_aggregate(series_gb):\n to_concat = {k: v.groupby(level=1).sum() for k, v in series_gb}\n names = list(series_gb.obj.index.names)\n return pd.Series(pd.concat(to_concat, names=names))\n\n\ndef _tail_chunk(series_gb, **kwargs):\n keys, groups = zip(*series_gb) if len(series_gb) else ((True,), (series_gb,))\n return pd.concat([group.tail(**kwargs) for group in groups], keys=keys)\n\n\ndef _tail_aggregate(series_gb, **kwargs):\n levels = kwargs.pop(\"index_levels\")\n return series_gb.tail(**kwargs).droplevel(list(range(levels)))\n\n\ndef _head_chunk(series_gb, **kwargs):\n keys, groups = zip(*series_gb) if len(series_gb) else ((True,), (series_gb,))\n return pd.concat([group.head(**kwargs) for group in groups], keys=keys)\n\n\ndef _head_aggregate(series_gb, **kwargs):\n levels = kwargs.pop(\"index_levels\")\n return series_gb.head(**kwargs).droplevel(list(range(levels)))\n" ]
[ [ "numpy.array", "pandas.Grouper", "numpy.isscalar", "pandas.MultiIndex.from_product", "numpy.sqrt", "pandas.concat", "pandas.Series" ] ]
philipco/benchmark-pytorch
[ "d75da8224279a2c468f0b03c4582e668ffe07d68" ]
[ "Trainer.py" ]
[ "\"\"\"\nCreated by Philippenko, 17th February 2022.\n\"\"\"\nfrom datetime import datetime\nimport random\n\nimport numpy as np\nimport torch\n\nimport torch.nn as nn\nimport torch.optim as optim\nfrom pympler import asizeof\nfrom torch.backends import cudnn\n\nfrom Timer import Timer\nfrom DeepLearningRunLogger import DeepLearningRunLogger\n\nNB_EPOCH = 100\nLEARNING_RATE = 0.01\nMOMENTUM = 0.9\n\nclass Training:\n\n def __init__(self, network, train_loader, full_train_loader, test_loader, id: str) -> None:\n super().__init__()\n self.seed_everything()\n self.id = id\n\n self.timer = Timer()\n self.timer.start()\n\n ############## Logs file ##############\n self.logs_file = \"logs.txt\"\n\n ############## Train/Test dataset loader ##############\n self.train_loader = train_loader\n self.test_loader = test_loader\n\n self.full_train_loader = full_train_loader\n\n ############## Device: GPU or CPU ##############\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n ############## Global model ##############\n self.global_model = network().to(self.device)\n\n ############## Settings for cuda ##############\n if self.device == 'cuda':\n self.global_model = torch.nn.DataParallel(self.global_model)\n cudnn.benchmark = True if torch.cuda.is_available() else False\n\n ############## Algorithm used for optimization ##############\n self.optimizer = optim.SGD(self.global_model.parameters(), lr=LEARNING_RATE, momentum=MOMENTUM)\n\n ############## Loss function ##############\n self.criterion = nn.CrossEntropyLoss().to(self.device)\n\n ############## Class that stores all train/test losses and the test accuracies ##############\n self.run_logger = DeepLearningRunLogger(id = self.id)\n self.timer.stop()\n\n with open(self.logs_file, 'a') as f:\n print(f\"============================= NEW RUN \" + datetime.now().strftime(\"%d/%m/%Y at %H:%M:%S\") +\n \" =============================\", file=f)\n print(\"learning_rate -> {0}, momentum -> {1}, model -> {2}\"\n .format(LEARNING_RATE, MOMENTUM, type(self.global_model).__name__), file=f)\n print(\"Device :\", self.device, file=f)\n print(\"Size of the global model: {:.2e} bits\".format(asizeof.asizeof(self.global_model)), file=f)\n print(\"Size of the optimizer: {:.2e} bits\".format(asizeof.asizeof(self.optimizer)), file=f)\n print(\"Time of initialization: {:.2e}s\".format(self.timer.time), file=f)\n\n def seed_everything(self):\n # Seed\n seed = 25\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n np.random.seed(seed)\n random.seed(seed)\n torch.backends.cudnn.benchmark = False\n torch.backends.cudnn.deterministic = True\n\n def __update_run__(self, train_loss, test_loss, test_acc):\n self.run_logger.update_run(train_loss, test_loss, test_acc)\n\n def run_training(self):\n\n ### Initialization of the loss/accuracy\n train_loss = self.__compute_train_loss__()\n test_loss, test_accuracy = self.__compute_test_accuracy_and_loss__()\n self.__update_run__(train_loss, test_loss, test_accuracy)\n\n for epoch in range(NB_EPOCH):\n self.timer.start()\n\n ### Updating the model and computing the train loss\n self.__run_one_epoch__()\n\n ### Computing the train loss on the full dataset with the new model\n train_loss = self.__compute_train_loss__()\n\n ### Computing the test loss/accuracy\n test_loss, test_accuracy = self.__compute_test_accuracy_and_loss__()\n\n ### Update the run with this new epoch\n self.__update_run__(train_loss, test_loss, test_accuracy)\n\n self.timer.stop()\n\n ### Save key informations in the logs file\n with open(self.logs_file, 'a') as f:\n print(f'[Epoch {epoch + 1}] train loss: {train_loss :.3f}\\t test loss: {test_loss :.3f}\\t '\n f'test accuracy: {test_accuracy :.2f}% \\t time: {self.timer.time :.0f}s', file=f)\n\n return self.run_logger\n\n def __run_one_epoch__(self):\n running_loss = 0.0\n train_loader_iter = iter(self.train_loader)\n nb_inner_iterations = len(self.train_loader)\n for _ in range(int(nb_inner_iterations)):\n\n ### Getting the next batch a putting it to the right device.\n data, target = next(train_loader_iter)\n data, target = data.to(self.device), target.to(self.device)\n\n ### Set to zero the parameter gradients\n self.optimizer.zero_grad()\n\n ### Forward pass\n outputs = self.global_model(data)\n\n ### Compute the loss\n loss = self.criterion(outputs, target)\n\n ### Backward pass\n loss.backward()\n\n ### Optimizer step\n self.optimizer.step()\n\n ### Update running loss\n running_loss += loss.item()\n\n def __compute_train_loss__(self) -> (int, int):\n \"\"\"Compute train loss on the full dataset using a batcof size 6000.\"\"\"\n train_loss = 0.0\n with torch.no_grad():\n for data in self.full_train_loader:\n data, target = data\n data, target = data.to(self.device), target.to(self.device)\n\n ### Calculate the output\n output = self.global_model(data)\n\n ### Computing the test loss\n loss = self.criterion(output, target)\n train_loss += loss.item()\n\n train_loss = train_loss / len(self.full_train_loader)\n return train_loss\n\n def __compute_test_accuracy_and_loss__(self) -> (int, int):\n \"\"\"Compute test loss/accuracy.\"\"\"\n correct = 0\n total = 0\n test_loss = 0.0\n with torch.no_grad():\n for data in self.test_loader:\n data, target = data\n data, target = data.to(self.device), target.to(self.device)\n\n ### Calculate the output\n output = self.global_model(data)\n\n ### Computing the test loss\n loss = self.criterion(output, target)\n test_loss += loss.item()\n\n ### Computing the test accuracy\n # (The class with the highest energy is what we choose as prediction)\n _, predicted = torch.max(output.data, 1)\n total += target.size(0)\n correct += (predicted == target).sum().item()\n\n test_accuracy = 100 * correct / total\n test_loss = test_loss / len(self.test_loader)\n return test_loss, test_accuracy" ]
[ [ "torch.cuda.manual_seed", "torch.cuda.manual_seed_all", "torch.max", "numpy.random.seed", "torch.no_grad", "torch.manual_seed", "torch.cuda.is_available", "torch.nn.CrossEntropyLoss", "torch.nn.DataParallel" ] ]
caoquan95/6D-pose-project
[ "98c3debc7e8ebcc0c78c54ebb37859fe5d9876db" ]
[ "lib/knn/__init__.py" ]
[ "import unittest\nimport gc\nimport operator as op\nimport functools\nimport torch\nfrom torch.autograd import Variable, Function\n# from lib.knn import knn_pytorch as knn_pytorch\nfrom lib.knn.knn_pytorch import knn_pytorch\n\nclass KNearestNeighbor(Function):\n \"\"\" Compute k nearest neighbors for each query point.\n \"\"\"\n def __init__(self, k):\n self.k = k\n\n def forward(self, ref, query):\n ref = ref.float().cuda()\n query = query.float().cuda()\n\n inds = torch.empty(query.shape[0], self.k, query.shape[2]).long().cuda()\n\n knn_pytorch.knn(ref, query, inds)\n\n return inds\n\n\nclass TestKNearestNeighbor(unittest.TestCase):\n\n def test_forward(self):\n knn = KNearestNeighbor(2)\n while(1):\n D, N, M = 128, 100, 1000\n ref = Variable(torch.rand(2, D, N))\n query = Variable(torch.rand(2, D, M))\n\n inds = knn(ref, query)\n for obj in gc.get_objects():\n if torch.is_tensor(obj):\n print(functools.reduce(op.mul, obj.size()) if len(obj.size()) > 0 else 0, type(obj), obj.size())\n #ref = ref.cpu()\n #query = query.cpu()\n print(inds)\n\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "torch.is_tensor", "torch.rand", "torch.empty" ] ]
butyuhao/SimCLR
[ "fa0d47f91f52d496634e34b57af47d725dd7290d" ]
[ "run.py" ]
[ "import argparse\nimport torch\nimport torch.backends.cudnn as cudnn\nfrom torchvision import models\nfrom data_aug.contrastive_learning_dataset import ContrastiveLearningDataset\nfrom models.resnet_simclr import ResNetSimCLR\nfrom simclr import SimCLR\n\nmodel_names = sorted(name for name in models.__dict__\n if name.islower() and not name.startswith(\"__\")\n and callable(models.__dict__[name]))\n\nparser = argparse.ArgumentParser(description='PyTorch SimCLR')\nparser.add_argument('-data', metavar='DIR', default='./datasets',\n help='path to dataset')\nparser.add_argument('-dataset-name', default='stl10',\n help='dataset name', choices=['stl10', 'cifar10'])\nparser.add_argument('-a', '--arch', metavar='ARCH', default='resnet18',\n choices=model_names,\n help='model architecture: ' +\n ' | '.join(model_names) +\n ' (default: resnet50)')\nparser.add_argument('-j', '--workers', default=12, type=int, metavar='N',\n help='number of data loading workers (default: 32)')\nparser.add_argument('--epochs', default=200, type=int, metavar='N',\n help='number of total epochs to run')\nparser.add_argument('-b', '--batch-size', default=256, type=int,\n metavar='N',\n help='mini-batch size (default: 256), this is the total '\n 'batch size of all GPUs on the current node when '\n 'using Data Parallel or Distributed Data Parallel')\nparser.add_argument('--lr', '--learning-rate', default=0.0003, type=float,\n metavar='LR', help='initial learning rate', dest='lr')\nparser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,\n metavar='W', help='weight decay (default: 1e-4)',\n dest='weight_decay')\nparser.add_argument('--seed', default=None, type=int,\n help='seed for initializing training. ')\nparser.add_argument('--disable-cuda', action='store_true',\n help='Disable CUDA')\nparser.add_argument('--fp16_precision', default=False, type=bool,\n help='Whether or not to use 16-bit precision GPU training.')\n\nparser.add_argument('--out_dim', default=128, type=int,\n help='feature dimension (default: 128)')\nparser.add_argument('--log-every-n-steps', default=100, type=int,\n help='Log every n steps')\nparser.add_argument('--temperature', default=0.07, type=float,\n help='softmax temperature (default: 0.07)')\nparser.add_argument('--n-views', default=2, type=int, metavar='N',\n help='Number of views for contrastive learning training.')\nparser.add_argument('--gpu-index', default=0, type=int, help='Gpu index.')\n\n\ndef main():\n args = parser.parse_args()\n assert args.n_views == 2, \"Only two view training is supported. Please use --n-views 2.\"\n # check if gpu training is available\n if not args.disable_cuda and torch.cuda.is_available():\n args.device = torch.device('cuda')\n cudnn.deterministic = True\n cudnn.benchmark = True\n else:\n args.device = torch.device('cpu')\n args.gpu_index = -1\n\n dataset = ContrastiveLearningDataset(args.data)\n\n train_dataset = dataset.get_dataset(args.dataset_name, args.n_views)\n\n train_loader = torch.utils.data.DataLoader(\n train_dataset, batch_size=args.batch_size, shuffle=True,\n num_workers=args.workers, pin_memory=True, drop_last=True)\n\n model = ResNetSimCLR(base_model=args.arch, out_dim=args.out_dim)\n\n optimizer = torch.optim.Adam(model.parameters(), args.lr, weight_decay=args.weight_decay)\n\n scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=len(train_loader), eta_min=0,\n last_epoch=-1)\n\n # It’s a no-op if the 'gpu_index' argument is a negative integer or None.\n with torch.cuda.device(args.gpu_index):\n simclr = SimCLR(model=model, optimizer=optimizer, scheduler=scheduler, args=args)\n simclr.train(train_loader)\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "torch.device", "torch.cuda.is_available", "torch.utils.data.DataLoader", "torch.cuda.device" ] ]
isazi/kernel_tuner
[ "5571bbcd2b48f32b4af249f5dd48cf64afaac22b" ]
[ "test/test_util_functions.py" ]
[ "from __future__ import print_function\n\nfrom collections import OrderedDict\nimport os\nimport json\nimport warnings\n\nimport numpy as np\nimport pytest\nfrom pytest import raises\n\nfrom .context import skip_if_no_cuda, skip_if_no_opencl\n\nfrom kernel_tuner.interface import Options\nimport kernel_tuner.core as core\nimport kernel_tuner.pycuda as pycuda\nimport kernel_tuner.opencl as opencl\nfrom kernel_tuner.util import *\n\nblock_size_names = [\"block_size_x\", \"block_size_y\", \"block_size_z\"]\n\n\ndef test_get_grid_dimensions1():\n problem_size = (1024, 1024, 1)\n params = {\"block_x\": 41, \"block_y\": 37}\n\n grid_div = ([\"block_x\"], [\"block_y\"], None)\n\n grid = get_grid_dimensions(problem_size, params, grid_div, block_size_names)\n\n assert len(grid) == 3\n assert isinstance(grid[0], int)\n assert isinstance(grid[1], int)\n\n assert grid[0] == 25\n assert grid[1] == 28\n assert grid[2] == 1\n\n grid = get_grid_dimensions(problem_size, params, (grid_div[0], None, None), block_size_names)\n\n assert grid[0] == 25\n assert grid[1] == 1024\n assert grid[2] == 1\n\n grid = get_grid_dimensions(problem_size, params, (None, grid_div[1], None), block_size_names)\n\n assert grid[0] == 1024\n assert grid[1] == 28\n assert grid[2] == 1\n\n grid = get_grid_dimensions(problem_size, params,\n (None, lambda p: p[\"block_x\"], lambda p: p[\"block_y\"]*p[\"block_x\"]), block_size_names)\n\n assert grid[0] == 1024\n assert grid[1] == 25\n assert grid[2] == 1\n\n\ndef test_get_grid_dimensions2():\n problem_size = (1024, 1024, 1)\n params = {\"block_x\": 41, \"block_y\": 37}\n\n grid_div_x = [\"block_x*8\"]\n grid_div_y = [\"(block_y+2)/8\"]\n\n grid = get_grid_dimensions(problem_size, params, (grid_div_x, grid_div_y, None), block_size_names)\n\n assert grid[0] == 4\n assert grid[1] == 256\n\n\ndef test_get_grid_dimensions3():\n problem_size = (1024, 1024, 1)\n params = {\"block_x\": 41, \"block_y\": 37}\n\n grid_div_x = [\"block_x\", \"block_y\"]\n grid_div_y = [\"(block_y+2)/8\"]\n\n def assert_grid_dimensions(problem_size):\n grid = get_grid_dimensions(problem_size, params,\n (grid_div_x, grid_div_y, None), block_size_names)\n assert grid[0] == 1\n assert grid[1] == 256\n assert grid[2] == 1\n\n assert_grid_dimensions(problem_size)\n\n problem_size = (np.int32(1024), np.int64(1024), 1)\n assert_grid_dimensions(problem_size)\n\n\ndef test_get_problem_size1():\n problem_size = (\"num_blocks_x\", \"num_blocks_y*3\")\n params = {\"num_blocks_x\": 71, \"num_blocks_y\": 57}\n\n answer = get_problem_size(problem_size, params)\n assert answer[0] == 71\n assert answer[1] == 171\n assert answer[2] == 1\n\n\ndef test_get_problem_size2():\n problem_size = \"num_blocks_x\"\n params = {\"num_blocks_x\": 71}\n\n answer = get_problem_size(problem_size, params)\n assert answer[0] == 71\n assert answer[1] == 1\n assert answer[2] == 1\n\n\ndef test_get_problem_size3():\n with raises(TypeError):\n problem_size = (3.8, \"num_blocks_y*3\")\n params = {\"num_blocks_y\": 57}\n get_problem_size(problem_size, params)\n\n\ndef test_get_problem_size4():\n params = {\"num_blocks_x\": 71}\n\n answer = get_problem_size(lambda p: (p[\"num_blocks_x\"], 1, 13), params)\n assert answer[0] == 71\n assert answer[1] == 1\n assert answer[2] == 13\n\n\ndef test_get_thread_block_dimensions():\n\n params = {\"block_size_x\": 123, \"block_size_y\": 257}\n\n threads = get_thread_block_dimensions(params)\n assert len(threads) == 3\n assert isinstance(threads[0], int)\n assert isinstance(threads[1], int)\n assert isinstance(threads[2], int)\n\n assert threads[0] == 123\n assert threads[1] == 257\n assert threads[2] == 1\n\n\ndef test_prepare_kernel_string():\n kernel = \"this is a weird kernel\"\n params = dict()\n params[\"is\"] = 8\n\n _, output = prepare_kernel_string(\"this\", kernel, params, (3, 7), (1, 2, 3), block_size_names, \"\")\n expected = \"#define kernel_tuner 1\\n\" \\\n \"#define is 8\\n\" \\\n \"#define block_size_z 3\\n\" \\\n \"#define block_size_y 2\\n\" \\\n \"#define block_size_x 1\\n\" \\\n \"#define grid_size_y 7\\n\" \\\n \"#define grid_size_x 3\\n\" \\\n \"this is a weird kernel\"\n assert output == expected\n\n\ndef test_replace_param_occurrences():\n kernel = \"this is a weird kernel\"\n params = dict()\n params[\"is\"] = 8\n params[\"weird\"] = 14\n\n new_kernel = replace_param_occurrences(kernel, params)\n assert new_kernel == \"th8 8 a 14 kernel\"\n\n new_kernel = replace_param_occurrences(kernel, dict())\n assert kernel == new_kernel\n\n params = dict()\n params[\"blablabla\"] = 8\n new_kernel = replace_param_occurrences(kernel, params)\n assert kernel == new_kernel\n\n\ndef test_check_restrictions():\n params = {\"a\": 7, \"b\": 4, \"c\": 3}\n print(params.values())\n print(params.keys())\n restrictions = [[\"a==b+c\"], [\"a==b+c\", \"b==b\", \"a-b==c\"],\n [\"a==b+c\", \"b!=b\", \"a-b==c\"],\n lambda p:p[\"a\"] == p[\"b\"] + p[\"c\"]]\n expected = [True, True, False, True]\n # test the call returns expected\n for r, e in zip(restrictions, expected):\n answer = check_restrictions(r, params.values(), params.keys(), False)\n print(answer)\n assert answer == e\n\n\ndef test_detect_language1():\n kernel_string = \"__global__ void vector_add( ... );\"\n lang = detect_language(kernel_string)\n assert lang == \"CUDA\"\n\n\ndef test_detect_language2():\n kernel_string = \"__kernel void vector_add( ... );\"\n lang = detect_language(kernel_string)\n assert lang == \"OpenCL\"\n\n\ndef test_detect_language3():\n kernel_string = \"blabla\"\n lang = detect_language(kernel_string)\n assert lang == \"C\"\n\n\n@skip_if_no_cuda\ndef test_get_device_interface1():\n lang = \"CUDA\"\n with core.DeviceInterface(core.KernelSource(\"\", \"\", lang=lang)) as dev:\n assert isinstance(dev, core.DeviceInterface)\n assert isinstance(dev.dev, pycuda.PyCudaFunctions)\n\n\n@skip_if_no_opencl\ndef test_get_device_interface2():\n lang = \"OpenCL\"\n with core.DeviceInterface(core.KernelSource(\"\", \"\", lang=lang)) as dev:\n assert isinstance(dev, core.DeviceInterface)\n assert isinstance(dev.dev, opencl.OpenCLFunctions)\n\n\ndef test_get_device_interface3():\n with raises(Exception):\n lang = \"blabla\"\n with core.DeviceInterface(lang) as dev:\n pass\n\n\ndef assert_user_warning(f, args, substring=None):\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"always\")\n f(*args)\n assert len(w) == 1\n assert issubclass(w[-1].category, UserWarning)\n if substring:\n assert substring in str(w[-1].message)\n\n\ndef assert_no_user_warning(f, args):\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"always\")\n f(*args)\n assert len(w) == 0\n\n\ndef test_check_argument_list1():\n kernel_name = \"test_kernel\"\n kernel_string = \"\"\"__kernel void test_kernel(int number, char * message, int * numbers) {\n numbers[get_global_id(0)] = numbers[get_global_id(0)] * number;\n }\n \"\"\"\n args = [np.int32(5), 'blah', np.array([1, 2, 3])]\n try:\n check_argument_list(kernel_name, kernel_string, args)\n print(\"Expected a TypeError to be raised\")\n assert False\n except TypeError as e:\n print(str(e))\n assert \"at position 1\" in str(e)\n except Exception:\n print(\"Expected a TypeError to be raised\")\n assert False\n\n\ndef test_check_argument_list2():\n kernel_name = \"test_kernel\"\n kernel_string = \"\"\"__kernel void test_kernel\n (char number, double factors, int * numbers, const unsigned long * moreNumbers) {\n numbers[get_global_id(0)] = numbers[get_global_id(0)] * factors[get_global_id(0)] + number;\n }\n \"\"\"\n args = [np.byte(5), np.float64(4.6), np.int32([1, 2, 3]), np.uint64([3, 2, 111])]\n assert_no_user_warning(check_argument_list, [kernel_name, kernel_string, args])\n\n\ndef test_check_argument_list3():\n kernel_name = \"test_kernel\"\n kernel_string = \"\"\"__kernel void test_kernel (__global const ushort number, __global half * factors, __global long * numbers) {\n numbers[get_global_id(0)] = numbers[get_global_id(0)] * factors[get_global_id(0)] + number;\n }\n \"\"\"\n args = [np.uint16(42), np.float16([3, 4, 6]), np.int32([300])]\n assert_user_warning(check_argument_list, [kernel_name, kernel_string, args], \"at position 2\")\n\n\ndef test_check_argument_list4():\n kernel_name = \"test_kernel\"\n kernel_string = \"\"\"__kernel void test_kernel(__global const ushort number, __global half * factors, __global long * numbers) {\n numbers[get_global_id(0)] = numbers[get_global_id(0)] * factors[get_global_id(0)] + number;\n }\n \"\"\"\n args = [np.uint16(42), np.float16([3, 4, 6]), np.int64([300]), np.ubyte(32)]\n assert_user_warning(check_argument_list, [kernel_name, kernel_string, args], \"do not match in size\")\n\n\ndef test_check_argument_list5():\n kernel_name = \"my_test_kernel\"\n kernel_string = \"\"\" //more complicated test function(because I can)\n\n __device__ float some_lame_device_function(float *a) {\n return a[0];\n }\n\n __global__ void my_test_kernel(double *a,\n float *b, int c,\n int d) {\n\n a[threadIdx.x] = b[blockIdx.x]*c*d;\n }\n \"\"\"\n args = [np.array([1, 2, 3]).astype(np.float64),\n np.array([1, 2, 3]).astype(np.float32),\n np.int32(6), np.int32(7)]\n assert_no_user_warning(check_argument_list, [kernel_name, kernel_string, args])\n\n\ndef test_check_argument_list6():\n kernel_name = \"test_kernel\"\n kernel_string = \"\"\"// This is where we define test_kernel\n #define SUM(A, B) (A + B)\n __kernel void test_kernel\n (char number, double factors, int * numbers, const unsigned long * moreNumbers) {\n numbers[get_global_id(0)] = SUM(numbers[get_global_id(0)] * factors[get_global_id(0)], number);\n }\n // /test_kernel\n \"\"\"\n args = [np.byte(5), np.float64(4.6), np.int32([1, 2, 3]), np.uint64([3, 2, 111])]\n check_argument_list(kernel_name, kernel_string, args)\n # test that no exception is raised\n assert True\n\n\ndef test_check_argument_list7():\n kernel_name = \"test_kernel\"\n kernel_string = \"\"\"#define SUM(A, B) (A + B)\n // In this file we define test_kernel\n __kernel void another_kernel (char number, double factors, int * numbers, const unsigned long * moreNumbers)\n __kernel void test_kernel\n (double number, double factors, int * numbers, const unsigned long * moreNumbers) {\n numbers[get_global_id(0)] = SUM(numbers[get_global_id(0)] * factors[get_global_id(0)], number);\n }\n // /test_kernel\n \"\"\"\n args = [np.byte(5), np.float64(4.6), np.int32([1, 2, 3]), np.uint64([3, 2, 111])]\n assert_user_warning(check_argument_list, [kernel_name, kernel_string, args])\n\n\ndef test_check_tune_params_list():\n tune_params = dict(zip([\"one_thing\", \"led_to_another\", \"and_before_you_know_it\",\n \"grid_size_y\"], [1, 2, 3, 4]))\n try:\n check_tune_params_list(tune_params)\n print(\"Expected a ValueError to be raised\")\n assert False\n except ValueError as e:\n print(str(e))\n assert \"Tune parameter grid_size_y with value 4 has a forbidden name!\" == str(e)\n except Exception:\n print(\"Expected a ValueError to be raised\")\n assert False\n\n\ndef test_check_tune_params_list2():\n tune_params = dict(zip([\"rock\", \"paper\", \"scissors\"], [1, 2, 3]))\n check_tune_params_list(tune_params)\n # test that no exception is raised\n assert True\n\n\ndef test_check_block_size_params_names_list():\n def test_warnings(function, args, number, warning_type):\n with warnings.catch_warnings(record=True) as w:\n # Cause all warnings to always be triggered.\n warnings.simplefilter(\"always\")\n # Trigger a warning.\n function(*args)\n # Verify some things\n assert len(w) == number\n for warn in w:\n assert issubclass(warn.category, warning_type)\n\n # check warning triggers for both unused blocksize names\n block_size_names = [\"block_size_a\", \"block_size_b\"]\n tune_params = dict(zip([\"hyper\", \"ultra\", \"mega\", \"turbo\"], [1, 2, 3, 4]))\n test_warnings(check_block_size_params_names_list, [block_size_names, tune_params], 2, UserWarning)\n\n # check warning does not triger when nondefault block size names are used correctly\n block_size_names = [\"block_size_a\", \"block_size_b\"]\n tune_params = dict(zip([\"block_size_a\", \"block_size_b\", \"many_other_things\"], [1, 2, 3]))\n test_warnings(check_block_size_params_names_list, [block_size_names, tune_params], 0, None)\n\n # check that a warning is issued when none of the default names are used and no alternative names are specified\n block_size_names = None\n tune_params = dict(zip([\"block_size_a\", \"block_size_b\", \"many_other_things\"], [1, 2, 3]))\n test_warnings(check_block_size_params_names_list, [block_size_names, tune_params], 1, UserWarning)\n\n # check that no error is raised when any of the default block size names is being used\n block_size_names = None\n tune_params = dict(zip([\"block_size_x\", \"several_other_things\"], [[1, 2, 3, 4], [2, 4]]))\n test_warnings(check_block_size_params_names_list, [block_size_names, tune_params], 0, None)\n\n\ndef test_get_kernel_string_func():\n # test whether passing a function instead of string works\n def gen_kernel(params):\n return \"__global__ void kernel_name() { %s }\" % params[\"block_size_x\"]\n params = {\"block_size_x\": \"//do that kernel thing!\"}\n expected = \"__global__ void kernel_name() { //do that kernel thing! }\"\n answer = get_kernel_string(gen_kernel, params)\n assert answer == expected\n\n\ndef test_get_kernel_string_filename_not_found():\n # when the string looks like a filename, but the file does not exist\n # assume the string is not a filename after all\n bogus_filename = \"filename_3456789.cu\"\n answer = get_kernel_string(bogus_filename)\n assert answer == bogus_filename\n\n\ndef test_looks_like_a_filename1():\n string = \"filename.c\"\n assert looks_like_a_filename(string)\n\n\ndef test_looks_like_a_filename2():\n string = \"__global__ void kernel_name() { //do that kernel thing! }\"\n assert not looks_like_a_filename(string)\n\n\ndef test_read_write_file():\n filename = get_temp_filename()\n\n my_string = \"this is the test string\"\n try:\n write_file(filename, my_string)\n with open(filename, 'r') as f:\n answer = f.read()\n assert my_string == answer\n answer2 = read_file(filename)\n assert my_string == answer2\n\n finally:\n delete_temp_file(filename)\n\n\ndef test_normalize_verify_function():\n assert normalize_verify_function(None) is None\n\n def verify1(answer, result_host):\n return True\n v = normalize_verify_function(verify1)\n assert v(1, 2, atol=3)\n\n def verify2(answer, result_host, atol):\n return True\n v = normalize_verify_function(verify2)\n assert v(1, 2, atol=3)\n\n v = normalize_verify_function(lambda a, b: True)\n assert v(1, 2, atol=3)\n\n v = normalize_verify_function(lambda a, b, atol: True)\n assert v(1, 2, atol=3)\n\n\ndef test_process_cache():\n\n def assert_open_cachefile_is_correctly_parsed(cache):\n with open(cache, \"r\") as cachefile:\n filestr = cachefile.read()\n if filestr[-1] == \",\":\n filestr = filestr[:-1]\n file_contents = filestr + \"}\\n}\"\n cache_object = json.loads(file_contents)\n assert cache_object[\"device_name\"] == \"test_device\"\n assert cache_object[\"kernel_name\"] == \"test_kernel\"\n\n # get temp filename, but remove the file\n cache = get_temp_filename(suffix=\".json\")\n delete_temp_file(cache)\n\n kernel_options = Options(kernel_name=\"test_kernel\")\n tuning_options = Options(cache=cache, tune_params=Options(x=[1, 2, 3, 4]), simulation_mode=False)\n runner = Options(dev=Options(name=\"test_device\"), simulation_mode=False)\n\n try:\n # call process_cache without pre-existing cache\n process_cache(cache, kernel_options, tuning_options, runner)\n\n # check if file has been created\n assert os.path.isfile(cache)\n assert_open_cachefile_is_correctly_parsed(cache)\n assert tuning_options.cachefile == cache\n assert isinstance(tuning_options.cache, dict)\n assert len(tuning_options.cache) == 0\n\n # store one entry in the cache\n params = {\"x\": 4, \"time\": np.float32(0.1234)}\n store_cache(\"4\", params, tuning_options)\n assert len(tuning_options.cache) == 1\n\n # close the cache\n close_cache(cache)\n\n # now test process cache with a pre-existing cache file\n process_cache(cache, kernel_options, tuning_options, runner)\n assert_open_cachefile_is_correctly_parsed(cache)\n\n assert tuning_options.cache[\"4\"][\"time\"] == params[\"time\"]\n\n # check that exceptions are raised when using a cache file for\n # a different kernel, device, or parameter set\n with pytest.raises(ValueError) as excep:\n kernel_options.kernel_name = \"wrong_kernel\"\n process_cache(cache, kernel_options, tuning_options, runner)\n assert \"kernel\" in str(excep.value)\n\n with pytest.raises(ValueError) as excep:\n runner.dev.name = \"wrong_device\"\n process_cache(cache, kernel_options, tuning_options, runner)\n assert \"device\" in str(excep.value)\n\n with pytest.raises(ValueError) as excep:\n tuning_options.tune_params[\"y\"] = [\"a\", \"b\"]\n process_cache(cache, kernel_options, tuning_options, runner)\n assert \"parameter\" in str(excep.value)\n\n finally:\n delete_temp_file(cache)\n # pass\n\n\ndef test_process_metrics():\n params = {\"x\": 15, \"b\": 12}\n metrics = OrderedDict()\n metrics[\"y\"] = lambda p: p[\"x\"]\n\n # test if lambda function is correctly evaluated\n params = process_metrics(params, metrics)\n assert params[\"y\"] == params[\"x\"]\n\n # test if we can do the same with a string\n params = {\"x\": 15, \"b\": 12}\n metrics[\"y\"] = \"x\"\n params = process_metrics(params, metrics)\n assert params[\"y\"] == params[\"x\"]\n\n # test if composability works correctly\n params = {\"x\": 15, \"b\": 12}\n metrics = OrderedDict()\n metrics[\"y\"] = \"x\"\n metrics[\"z\"] = \"y\"\n params = process_metrics(params, metrics)\n assert params[\"z\"] == params[\"x\"]\n\n # test ValueError is raised when metrics is not an OrderedDict\n with pytest.raises(ValueError):\n params = process_metrics(params, {})\n\n # test ValueError is raised when b already exists in params\n params = {\"x\": 15, \"b\": 12}\n metrics = OrderedDict()\n metrics[\"b\"] = \"x\"\n with pytest.raises(ValueError):\n params = process_metrics(params, metrics)\n" ]
[ [ "numpy.array", "numpy.float16", "numpy.uint16", "numpy.int64", "numpy.byte", "numpy.float64", "numpy.float32", "numpy.uint64", "numpy.int32", "numpy.ubyte" ] ]
AdityaTewari/first_expts
[ "f2cf97a58bd2443764f41651d746cc33c55632bd" ]
[ "sample.py" ]
[ "#!/usr/bin/env python \n\nfrom __future__ import print_function, division\n\nimport logging\nimport theano\nimport theano.tensor as T\nimport cPickle as pickle\n\nimport numpy as np\nimport os\n\nfrom PIL import Image\nfrom blocks.main_loop import MainLoop\nfrom blocks.model import Model\nfrom blocks.config import config\n\nFORMAT = '[%(asctime)s] %(name)-15s %(message)s'\nDATEFMT = \"%H:%M:%S\"\nlogging.basicConfig(format=FORMAT, datefmt=DATEFMT, level=logging.INFO)\n\ndef scale_norm(arr):\n arr = arr - arr.min()\n scale = (arr.max() - arr.min())\n return arr / scale\n\n# these aren't paramed yet in a generic way, but these values work\nROWS = 10\nCOLS = 20\n\ndef img_grid(arr, global_scale=True):\n N, channels, height, width = arr.shape\n\n global ROWS, COLS\n rows = ROWS\n cols = COLS\n # rows = int(np.sqrt(N))\n # cols = int(np.sqrt(N))\n\n # if rows*cols < N:\n # cols = cols + 1\n\n # if rows*cols < N:\n # rows = rows + 1\n\n total_height = rows * height + 9\n total_width = cols * width + 19\n\n if global_scale:\n arr = scale_norm(arr)\n\n I = np.zeros((channels, total_height, total_width))\n I.fill(1)\n\n for i in xrange(N):\n r = i // cols\n c = i % cols\n\n if global_scale:\n this = arr[i]\n else:\n this = scale_norm(arr[i])\n\n offset_y, offset_x = r*height+r, c*width+c\n I[0:channels, offset_y:(offset_y+height), offset_x:(offset_x+width)] = this\n \n I = (255*I).astype(np.uint8)\n if(channels == 1):\n out = I.reshape( (total_height, total_width) )\n else:\n out = np.dstack(I).astype(np.uint8)\n return Image.fromarray(out)\n\ndef generate_samples(p, subdir, output_size, channels):\n if isinstance(p, Model):\n model = p\n else:\n print(\"Don't know how to handle unpickled %s\" % type(p))\n return\n\n draw = model.get_top_bricks()[0]\n # reset the random generator\n del draw._theano_rng\n del draw._theano_seed\n draw.seed_rng = np.random.RandomState(config.default_seed)\n\n #------------------------------------------------------------\n logging.info(\"Compiling sample function...\")\n\n n_samples = T.iscalar(\"n_samples\")\n samples = draw.sample(n_samples)\n\n do_sample = theano.function([n_samples], outputs=samples, allow_input_downcast=True)\n\n #------------------------------------------------------------\n logging.info(\"Sampling and saving images...\")\n\n global ROWS, COLS\n samples = do_sample(ROWS*COLS)\n #samples = np.random.normal(size=(16, 100, 28*28))\n\n n_iter, N, D = samples.shape\n # logging.info(\"SHAPE IS: {}\".format(samples.shape))\n\n samples = samples.reshape( (n_iter, N, channels, output_size, output_size) )\n\n if(n_iter > 0):\n img = img_grid(samples[n_iter-1,:,:,:])\n img.save(\"{0}/sample.png\".format(subdir))\n\n for i in xrange(n_iter-1):\n img = img_grid(samples[i,:,:,:])\n img.save(\"{0}/time-{1:03d}.png\".format(subdir, i))\n\n #with open(\"centers.pkl\", \"wb\") as f:\n # pikle.dump(f, (center_y, center_x, delta))\n os.system(\"convert -delay 5 {0}/time-*.png -delay 300 {0}/sample.png {0}/sequence.gif\".format(subdir))\n\nif __name__ == \"__main__\":\n from argparse import ArgumentParser\n\n parser = ArgumentParser()\n parser.add_argument(\"model_file\", help=\"filename of a pickled DRAW model\")\n parser.add_argument(\"--channels\", type=int,\n default=1, help=\"number of channels\")\n parser.add_argument(\"--size\", type=int,\n default=28, help=\"Output image size (width and height)\")\n args = parser.parse_args()\n\n logging.info(\"Loading file %s...\" % args.model_file)\n with open(args.model_file, \"rb\") as f:\n p = pickle.load(f)\n\n subdir = \"sample\"\n if not os.path.exists(subdir):\n os.makedirs(subdir)\n\n generate_samples(p, subdir, args.size, args.channels)\n\n\n\n" ]
[ [ "numpy.dstack", "numpy.zeros", "numpy.random.RandomState" ] ]
Rishav1/PySyft
[ "f620ee12727b52b19a317f263789830b57ee2539" ]
[ "test/torch/tensors/test_gc.py" ]
[ "\"\"\"All the tests relative to garbage collection of all kinds of remote or local tensors\"\"\"\nimport time\n\nimport torch\n\nfrom syft.frameworks.torch.tensors.decorators.logging import LoggingTensor\nfrom syft.workers.websocket_server import WebsocketServerWorker\nfrom syft.workers.websocket_client import WebsocketClientWorker\n\n# TESTING POINTERS\n\n\ndef test_explicit_garbage_collect_pointer(workers):\n \"\"\"Tests whether deleting a PointerTensor garbage collects the remote object too\"\"\"\n bob = workers[\"bob\"]\n\n # create tensor\n x = torch.Tensor([1, 2])\n\n # send tensor to bob\n x_ptr = x.send(bob)\n\n # ensure bob has tensor\n assert x.id in bob._objects\n\n # delete pointer to tensor, which should\n # automatically garbage collect the remote\n # object on Bob's machine\n del x_ptr\n\n # ensure bob's object was garbage collected\n assert x.id not in bob._objects\n\n\ndef test_explicit_garbage_collect_double_pointer(workers):\n \"\"\"Tests whether deleting a pointer to a pointer garbage collects\n the remote object too\"\"\"\n\n alice, bob = workers[\"alice\"], workers[\"bob\"]\n\n # create tensor\n x = torch.Tensor([1, 2])\n\n # send tensor to bob and then pointer to alice\n x_ptr = x.send(bob)\n x_ptr_ptr = x_ptr.send(alice)\n\n # ensure bob has tensor\n assert x.id in bob._objects\n\n # delete pointer to pointer to tensor, which should automatically\n # garbage collect the remote object on Bob's machine\n del x_ptr_ptr\n\n # ensure bob's object was garbage collected\n assert x.id not in bob._objects\n # ensure alice's object was garbage collected\n assert x_ptr.id not in workers[\"alice\"]._objects\n\n # Chained version\n x = torch.Tensor([1, 2])\n x_id = x.id\n\n # send tensor to bob and then pointer to alice\n # overwriting variable names at sending in the test, is on purpose,\n # to be sure nothing weird happens when people do this\n x = x.send(bob).send(alice)\n\n # ensure bob has tensor\n assert x_id in bob._objects\n # delete pointer to pointer to tensor\n del x\n # ensure bob's object was garbage collected\n assert x_id not in bob._objects\n\n\ndef test_implicit_garbage_collection_pointer(workers):\n \"\"\"Tests whether GCing a PointerTensor GCs the remote object too.\"\"\"\n bob = workers[\"bob\"]\n\n # create tensor\n x = torch.Tensor([1, 2])\n\n # send tensor to bob\n x_ptr = x.send(bob)\n\n # ensure bob has tensor\n assert x.id in bob._objects\n\n # delete pointer to tensor, which should\n # automatically garbage collect the remote\n # object on Bob's machine\n x_ptr = \"asdf\"\n\n # ensure bob's object was garbage collected\n assert x.id not in bob._objects\n\n\ndef test_implicit_garbage_collect_double_pointer(workers):\n \"\"\"Tests whether GCing a pointer to a pointer garbage collects\n the remote object too\"\"\"\n\n alice, bob = workers[\"alice\"], workers[\"bob\"]\n\n # create tensor\n x = torch.Tensor([1, 2])\n\n # send tensor to bob and then pointer to alice\n x_ptr = x.send(bob)\n x_ptr_ptr = x_ptr.send(alice)\n\n # ensure bob has tensor\n assert x.id in bob._objects\n # ensure alice has tensor\n assert x_ptr.id in alice._objects\n\n # delete pointer to pointer to tensor, which should automatically\n # garbage collect the remote object on Bob's machine\n x_ptr_ptr = \"asdf\"\n\n # ensure bob's object was garbage collected\n assert x.id not in bob._objects\n # ensure alice's object was garbage collected\n assert x_ptr.id not in alice._objects\n\n # Chained version\n x = torch.Tensor([1, 2])\n x_id = x.id\n # send tensor to bob and then pointer to alice\n # overwriting variable names at sending in the test, is on purpose,\n # to be sure nothing weird happens when people do this\n x = x.send(bob).send(alice)\n\n # ensure bob has tensor\n assert x_id in bob._objects\n\n # delete pointer to pointer to tensor\n x = \"asdf\"\n\n # ensure bob's object was garbage collected\n assert x_id not in bob._objects\n\n\n# TESTING IN PLACE METHODS\n\n\ndef test_inplace_method_on_pointer(workers):\n bob = workers[\"bob\"]\n\n tensor = torch.tensor([[1.0, 2], [4.0, 2]])\n pointer = tensor.send(bob)\n pointer.add_(pointer)\n tensor_back = pointer.get()\n assert (tensor * 2 == tensor_back).all()\n\n\n# TESTING LOGGING TENSORS\n\n\ndef test_explicit_garbage_collect_logging_on_pointer(workers):\n \"\"\"\n Tests whether deleting a LoggingTensor on a PointerTensor\n garbage collects the remote object too\n \"\"\"\n bob = workers[\"bob\"]\n\n x = torch.Tensor([1, 2])\n x_id = x.id\n\n x = x.send(bob)\n x = LoggingTensor().on(x)\n assert x_id in bob._objects\n\n del x\n\n assert x_id not in bob._objects\n\n\ndef test_implicit_garbage_collect_logging_on_pointer(workers):\n \"\"\"\n Tests whether GCing a LoggingTensor on a PointerTensor\n garbage collects the remote object too\n \"\"\"\n bob = workers[\"bob\"]\n\n x = torch.Tensor([1, 2])\n x_id = x.id\n\n x = x.send(bob)\n x = LoggingTensor().on(x)\n assert x_id in bob._objects\n\n x = \"open-source\"\n assert x_id not in bob._objects\n\n\ndef test_websocket_garbage_collection(hook, start_remote_worker):\n server, remote_proxy = start_remote_worker(id=\"ws_gc\", hook=hook, port=8555)\n\n sample_data = torch.tensor([1, 2, 3, 4])\n sample_ptr = sample_data.send(remote_proxy)\n\n _ = sample_ptr.get()\n assert sample_data not in remote_proxy._objects\n\n remote_proxy.close()\n server.terminate()\n" ]
[ [ "torch.Tensor", "torch.tensor" ] ]
FasahatSiddiqui/Read-VGG-json-and-COCO-json-labeling-files
[ "b30ad19397018dbd4c9e33bb4733431086f128a9" ]
[ "json_data_labelling.py" ]
[ "import numpy as np\nimport cv2\nimport os\nfrom matplotlib import pyplot as plt\nimport json\nfrom skimage.draw import polygon\n\ndef json_mask(file_name,json_filename,file_extension, dir):\n img = cv2.imread(os.path.join(dir,'train_data',file_name),1)\n mask = np.zeros((img.shape[0],img.shape[1]))\n \n with open(os.path.join(dir,json_filename)) as f:\n data = json.load(f)\n\n ##-------------------------------------VGG json file\n if file_extension == 'VGG':\n for i in range(len(data['2.jpg']['regions'])):\n if data['2.jpg']['regions'][str(i)]['region_attributes']['label'] == 'leaf':\n x_points = data['2.jpg']['regions'][str(i)]['shape_attributes']['all_points_x']\n y_points = data['2.jpg']['regions'][str(i)]['shape_attributes']['all_points_y']\n x=np.round(np.array(x_points))\n y=np.round(np.array(y_points))\n rr, cc = polygon(y, x)\n mask[rr, cc] = 120\n if data['2.jpg']['regions'][str(i)]['region_attributes']['label'] == 'background':\n x_points = data['2.jpg']['regions'][str(i)]['shape_attributes']['all_points_x']\n y_points = data['2.jpg']['regions'][str(i)]['shape_attributes']['all_points_y']\n x=np.round(np.array(x_points))\n y=np.round(np.array(y_points))\n rr, cc = polygon(y, x)\n mask[rr, cc] = 255\n\n cv2.imshow('mask',mask)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n plt.imsave(dir+'/label_img_vgg.tiff',mask, cmap ='jet')\n \n ##--------------------------------COCO json file\n elif file_extension == 'COCO':\n from skimage.draw import polygon\n for i in range(len(data['annotations'])):\n if data['categories'][(data['annotations'][i]['category_id']-1)]['name'] == 'leaf':\n points = data['annotations'][i]['segmentation']\n points = np.array(points)\n points=np.transpose(points)\n points=np.round(points)\n x=[]\n y=[]\n for j in range (points.shape[0]):\n if j%2==0:\n x.append(points[j])\n else:\n y.append(points[j])\n rr, cc = polygon(y, x)\n mask[rr, cc] = 120\n if data['categories'][(data['annotations'][i]['category_id']-1)]['name'] == 'background':\n points = data['annotations'][i]['segmentation']\n points = np.array(points)\n points=np.transpose(points)\n points=np.round(points)\n x=[]\n y=[]\n for j in range (points.shape[0]):\n if j%2==0:\n x.append(points[j])\n else:\n y.append(points[j])\n rr, cc = polygon(y, x)\n mask[rr, cc] = 255\n\n cv2.imshow('mask',mask)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n plt.imsave(dir+'/label_img_coco.tiff',mask, cmap ='jet')\n\n\ndir=os.getcwd() \njson_mask('.jpg','.json','COCO', dir)" ]
[ [ "numpy.array", "numpy.zeros", "matplotlib.pyplot.imsave", "numpy.round", "numpy.transpose" ] ]
engeir/isr-spectrum
[ "7ac0562dd71c3d55bf5991c3fe8de3b5d8a55a02" ]
[ "src/isr_spectrum/plotting/reproduce.py" ]
[ "\"\"\"Reproduce the plots used in the thesis, and/or create new\n\"experiments\" based on the abstract base class `Reproduce`.\n\nRun from `main.py`.\n\"\"\"\n\nimport sys\nimport time\nfrom abc import ABC, abstractmethod\n\nimport matplotlib\nimport matplotlib.patheffects as PathEffects\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport scipy.constants as const\nfrom matplotlib import gridspec\n\n# from inputs import config as cf\n\n# Customize matplotlib\nmatplotlib.rcParams.update(\n {\n \"text.usetex\": True,\n \"font.family\": \"DejaVu Sans\",\n \"axes.unicode_minus\": False,\n \"pgf.texsystem\": \"pdflatex\",\n }\n)\n\nif __name__ != \"__main__\":\n from isr_spectrum.utils import spectrum_calculation as isr\n\n\nclass Reproduce(ABC):\n \"\"\"Abstract base class to reproduce figures.\n\n Arguments:\n ABC {class} -- abstract base class\n \"\"\"\n\n def __init__(self, p):\n self.f = np.ndarray([])\n self.data = []\n self.meta_data = []\n self.legend_txt = []\n self.ridge_txt = []\n self.p = p\n\n def create_it(self, *args, from_file=False):\n if not from_file:\n self.create_from_code()\n else:\n self.create_from_file(*args)\n\n @abstractmethod\n def create_from_code(self):\n \"\"\"Method that create needed data.\"\"\"\n\n def create_from_file(self, *args):\n \"\"\"Accepts zero, one or two arguments.\n\n If zero arguments are given, a default path is used to look for files.\n ::\n If one argument is given, it should include\n the full path (with or without file ending).\n ::\n If two arguments are given, the first should be the path to\n the directory where the file is located, and the second\n argument must be the name of the file.\n \"\"\"\n if len(args) != 0:\n if len(args) == 1:\n args = args[0]\n parts = args.split(\"/\")\n path = \"/\".join(parts[:-1]) + \"/\"\n name = parts[-1]\n elif len(args) == 2:\n path = args[0]\n name = args[1]\n else:\n path = \"../../figures/\"\n name = \"hello_kitty_2020_6_9_2--28--4.npz\"\n name = name.split(\".\")[0]\n try:\n f = np.load(path + name + \".npz\", allow_pickle=True)\n except Exception:\n sys.exit(print(f\"Could not open file {path + name}.npz\"))\n sorted(f)\n self.f, self.data, self.meta_data = (\n f[\"frequency\"],\n list(f[\"spectra\"]),\n list(f[\"meta\"]),\n )\n self.legend_txt, self.ridge_txt = list(f[\"legend_txt\"]), list(f[\"ridge_txt\"])\n\n if self.p.save in [\"y\", \"yes\"]:\n self.p.save_path = name\n\n @abstractmethod\n def plot_it(self):\n \"\"\"Method that plot relevant plots.\"\"\"\n\n\nclass PlotNumerical(Reproduce):\n \"\"\"Reproduce figure with a comparison between the semi-analytic\n and numerical implementation.\n\n In config, set\n ```\n 'F_MIN': - 2e6, 'F_MAX': 9e6\n ```\n Also, using\n ```\n F_N_POINTS = 1e3\n ```\n is sufficient.\n \"\"\"\n\n def create_from_code(self):\n F0 = 430e6\n K_RADAR = -2 * F0 * 2 * np.pi / const.c # Radar wavenumber\n sys_set = {\n \"K_RADAR\": K_RADAR,\n \"B\": 35000e-9,\n \"MI\": 16,\n \"NE\": 1e12,\n \"NU_E\": 100,\n \"NU_I\": 100,\n \"T_E\": 2000,\n \"T_I\": 1500,\n \"T_ES\": 90000,\n \"THETA\": 30 * np.pi / 180,\n \"Z\": 300,\n \"mat_file\": \"fe_zmuE-07.mat\",\n \"pitch_angle\": \"all\",\n }\n params = {\"kappa\": 3, \"vdf\": \"maxwell\", \"area\": False}\n\n ridge = []\n self.f, s1, meta_data = isr.isr_spectrum(\"maxwell\", sys_set, **params)\n ridge.append(s1)\n self.meta_data.append(meta_data)\n _, s2, _ = isr.isr_spectrum(\"a_vdf\", sys_set, **params)\n ridge.append(s2)\n self.data.append(ridge)\n\n ridge = []\n params[\"vdf\"] = \"kappa\"\n self.f, s1, meta_data = isr.isr_spectrum(\"kappa\", sys_set, **params)\n ridge.append(s1)\n self.meta_data.append(meta_data)\n _, s2, _ = isr.isr_spectrum(\"a_vdf\", sys_set, **params)\n ridge.append(s2)\n self.data.append(ridge)\n\n def plot_it(self):\n for maxwell, data in enumerate(self.data):\n self.plotter(maxwell, data)\n\n def plotter(self, maxwell, data):\n s1 = data[0]\n s2 = data[1]\n plot = plt.semilogy\n # xlim = [1e3, self.f[-1]]\n d = s1 - s2\n rd = d / s1\n plt.figure(figsize=(8, 5))\n plt.subplot(3, 1, 1)\n if maxwell == 0:\n plt.title(\"Maxwell\")\n else:\n plt.title(\"Kappa\")\n plot(self.f, s1, \"k\", label=\"Semi-analytic (SA)\")\n plot(self.f, s2, \"r--\", label=\"Numerical (N)\")\n plt.legend()\n # plt.xlim(xlim)\n plt.minorticks_on()\n plt.grid(True, which=\"both\", ls=\"-\", alpha=0.4)\n plt.subplot(3, 1, 2)\n plt.title(\"Difference (SA - N)\")\n plot(self.f, d, \"k\", label=\"Positive\")\n plot(self.f, -d, \"r\", label=\"Negative\")\n plt.legend()\n # plt.xlim(xlim)\n plt.minorticks_on()\n plt.grid(True, which=\"both\", ls=\"-\", alpha=0.4)\n plt.subplot(3, 1, 3)\n plt.title(\"Difference relative to semi-analytic [(SA - N) / SA]\")\n plot(self.f, rd, \"k\", label=\"Positive\")\n plot(self.f, -rd, \"r\", label=\"Negative\")\n plt.legend()\n # plt.xlim(xlim)\n plt.minorticks_on()\n plt.grid(True, which=\"both\", ls=\"-\", alpha=0.4)\n plt.yticks([1e-9, 1e-6, 1e-3, 1e0])\n\n plt.tight_layout()\n\n if self.p.save in [\"y\", \"yes\"]:\n self.p.pdffig.attach_note(\"numerical precision test\")\n plt.savefig(self.p.pdffig, bbox_inches=\"tight\", format=\"pdf\", dpi=600)\n plt.savefig(\n str(self.p.save_path) + f\"_page_{self.p.page}.pgf\", bbox_inches=\"tight\"\n )\n self.p.page += 1\n\n\nclass PlotTestDebye(Reproduce):\n \"\"\"Reproduce figure of IS spectra using two kappa\n dist with and without Debye length correction.\n\n In config, set\n ```\n 'F_MIN': - 2e6, 'F_MAX': 2e6\n ```\n Also, using\n ```\n F_N_POINTS = 5e5\n ```\n is sufficient.\n \"\"\"\n\n def create_from_code(self):\n F0 = 430e6\n K_RADAR = -2 * F0 * 2 * np.pi / const.c # Radar wavenumber\n self.legend_txt = [\n r\"$\\lambda_{\\mathrm{D}} = \\lambda_{\\mathrm{D},\\kappa}$\",\n r\"$\\lambda_{\\mathrm{D}} = \\lambda_{\\mathrm{D,M}}$\",\n ]\n sys_set = {\n \"K_RADAR\": K_RADAR,\n \"B\": 35000e-9,\n \"MI\": 29,\n \"NE\": 2e10,\n \"NU_E\": 0,\n \"NU_I\": 0,\n \"T_E\": 200,\n \"T_I\": 200,\n \"T_ES\": 90000,\n \"THETA\": 45 * np.pi / 180,\n \"Z\": 599,\n \"mat_file\": \"fe_zmuE-07.mat\",\n }\n params = {\"kappa\": 3, \"vdf\": \"real_data\", \"area\": False}\n self.f, s, meta_data = isr.isr_spectrum(\"kappa\", sys_set, **params)\n self.data.append(s)\n self.meta_data.append(meta_data)\n params[\"debye\"] = \"maxwell\"\n self.f, s, meta_data = isr.isr_spectrum(\"kappa\", sys_set, **params)\n self.data.append(s)\n self.meta_data.append(meta_data)\n\n def plot_it(self):\n self.p.plot_normal(self.f, self.data, \"semilogy\", self.legend_txt)\n\n\nclass PlotSpectra(Reproduce):\n \"\"\"Reproduce figure with ridge plot over different temperatures.\n\n In config, set\n ```\n 'F_MIN': - 2e6, 'F_MAX': 2e6\n ```\n Also, using\n ```\n F_N_POINTS = 1e5\n ```\n is sufficient.\n \"\"\"\n\n def create_from_code(self):\n F0 = 430e6\n K_RADAR = -2 * F0 * 2 * np.pi / const.c # Radar wavenumber\n self.legend_txt = [\n \"Maxwellian\",\n r\"$\\kappa = 20$\",\n r\"$\\kappa = 8$\",\n r\"$\\kappa = 3$\",\n ]\n kappa = [20, 8, 3]\n sys_set = {\n \"K_RADAR\": K_RADAR,\n \"B\": 35000e-9,\n \"MI\": 29,\n \"NE\": 2e10,\n \"NU_E\": 0,\n \"NU_I\": 0,\n \"T_E\": 200,\n \"T_I\": 200,\n \"T_ES\": 90000,\n \"THETA\": 45 * np.pi / 180,\n \"Z\": 599,\n \"mat_file\": \"fe_zmuE-07.mat\",\n }\n params = {\"kappa\": 20, \"vdf\": \"real_data\", \"area\": False}\n t0 = time.perf_counter()\n self.f, s, meta_data = isr.isr_spectrum(\"maxwell\", sys_set, **params)\n t1 = time.perf_counter()\n print(f\"Took {t1-t0:.2f} seconds.\")\n self.data.append(s)\n for k in kappa:\n params[\"kappa\"] = k\n t0 = time.perf_counter()\n self.f, s, meta_data = isr.isr_spectrum(\"kappa\", sys_set, **params)\n t1 = time.perf_counter()\n print(f\"Took {t1-t0:.2f} seconds.\")\n self.data.append(s)\n meta_data[\"version\"] = \"both\"\n self.meta_data.append(meta_data)\n\n def plot_it(self):\n self.p.plot_normal(self.f, self.data, \"semilogy\", self.legend_txt)\n\n\nclass PlotIonLine(Reproduce):\n \"\"\"Reproduce figure with ridge plot over different temperatures.\n\n In config, set\n ```\n 'F_MIN': - 3e3, 'F_MAX': 3e3\n ```\n Also, using\n ```\n F_N_POINTS = 1e3\n ```\n is sufficient.\n \"\"\"\n\n def create_from_code(self):\n F0 = 430e6\n K_RADAR = -2 * F0 * 2 * np.pi / const.c\n self.legend_txt = [\n \"Maxwellian\",\n r\"$\\kappa = 20$\",\n r\"$\\kappa = 8$\",\n r\"$\\kappa = 3$\",\n ]\n kappa = [20, 8, 3]\n sys_set = {\n \"K_RADAR\": K_RADAR,\n \"B\": 35000e-9,\n \"MI\": 29,\n \"NE\": 2e10,\n \"NU_E\": 0,\n \"NU_I\": 0,\n \"T_E\": 200,\n \"T_I\": 200,\n \"T_ES\": 90000,\n \"THETA\": 45 * np.pi / 180,\n \"Z\": 599,\n \"mat_file\": \"fe_zmuE-07.mat\",\n }\n params = {\"kappa\": 20, \"vdf\": \"real_data\", \"area\": False}\n self.f, s, meta_data = isr.isr_spectrum(\"maxwell\", sys_set, **params)\n self.data.append(s)\n for k in kappa:\n params[\"kappa\"] = k\n self.f, s, meta_data = isr.isr_spectrum(\"kappa\", sys_set, **params)\n self.data.append(s)\n meta_data[\"version\"] = \"both\"\n self.meta_data.append(meta_data)\n\n def plot_it(self):\n self.p.plot_normal(self.f, self.data, \"plot\", self.legend_txt)\n\n\nclass PlotPlasmaLine(Reproduce):\n \"\"\"Reproduce figure with ridge plot over different temperatures.\n\n In config, set\n ```\n 'F_MIN': 3.5e6, 'F_MAX': 7e6\n ```\n Also, using\n ```\n F_N_POINTS = 1e3\n ```\n is sufficient.\n \"\"\"\n\n def create_from_code(self):\n F0 = 933e6\n K_RADAR = -2 * F0 * 2 * np.pi / const.c\n self.legend_txt = [\n \"Maxwellian\",\n r\"$\\kappa = 20$\",\n r\"$\\kappa = 8$\",\n r\"$\\kappa = 3$\",\n ]\n kappa = [20, 8, 3]\n sys_set = {\n \"K_RADAR\": K_RADAR,\n \"B\": 50000e-9,\n \"MI\": 16,\n \"NE\": 2e11,\n \"NU_E\": 0,\n \"NU_I\": 0,\n \"T_E\": 5000,\n \"T_I\": 2000,\n \"T_ES\": 90000,\n \"THETA\": 0 * np.pi / 180,\n \"Z\": 599,\n \"mat_file\": \"fe_zmuE-07.mat\",\n }\n params = {\"kappa\": 20, \"vdf\": \"real_data\", \"area\": False}\n self.f, s, meta_data = isr.isr_spectrum(\"maxwell\", sys_set, **params)\n self.data.append(s)\n for k in kappa:\n params[\"kappa\"] = k\n self.f, s, meta_data = isr.isr_spectrum(\"kappa\", sys_set, **params)\n self.data.append(s)\n meta_data[\"version\"] = \"both\"\n self.meta_data.append(meta_data)\n\n def plot_it(self):\n self.p.plot_normal(self.f, self.data, \"plot\", self.legend_txt)\n\n\nclass PlotTemperature(Reproduce):\n \"\"\"Reproduce figure with ridge plot over different temperatures.\n\n In config, set\n ```\n 'F_MIN': 3.5e6, 'F_MAX': 7.5e6\n ```\n Also, using\n ```\n F_N_POINTS = 5e3\n ```\n is sufficient.\n \"\"\"\n\n def __init__(self, p):\n super(PlotTemperature, self).__init__(p)\n self.f_list = [[], [], []]\n\n def create_from_file(self, *args):\n \"\"\"Accepts zero, one or two arguments.\n\n If zero arguments are given,\n a default path is used to look for files.\n ::\n If one argument is given, it should include\n the full path (with or without file ending).\n ::\n If two arguments are given, the first should be the path to\n the directory where the file is located, and the second\n argument must be the name of the file.\n \"\"\"\n if len(args) != 0:\n if len(args) == 1:\n args = args[0]\n parts = args.split(\"/\")\n path = \"/\".join(parts[:-1]) + \"/\"\n name = parts[-1]\n elif len(args) == 2:\n path = args[0]\n name = args[1]\n else:\n path = \"../../figures/\"\n name = \"hello_kitty_2020_6_9_2--28--4.npz\"\n name = name.split(\".\")[0]\n try:\n f = np.load(path + name + \".npz\", allow_pickle=True)\n except Exception:\n sys.exit(print(f\"Could not open file {path + name}.npz\"))\n sorted(f)\n self.f, self.data, self.meta_data = (\n f[\"frequency\"],\n list(f[\"spectra\"]),\n list(f[\"meta\"]),\n )\n self.legend_txt, self.ridge_txt = list(f[\"legend_txt\"]), list(f[\"ridge_txt\"])\n\n for r in self.data:\n peak = int(np.argwhere(r[0] == np.max(r[0])))\n self.f_list[0].append(self.f[peak])\n peak = int(np.argwhere(r[1] == np.max(r[1])))\n self.f_list[1].append(self.f[peak])\n peak = int(np.argwhere(r[2] == np.max(r[2])))\n self.f_list[2].append(self.f[peak])\n\n if self.p.save in [\"y\", \"yes\"]:\n self.p.save_path = name\n\n def create_from_code(self):\n F0 = 933e6\n K_RADAR = -2 * F0 * 2 * np.pi / const.c\n T = [2000, 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000]\n self.ridge_txt = [r\"$T_{\\mathrm{e}} = %d \\mathrm{K}$\" % j for j in T]\n self.legend_txt = [\"Maxwellian\", r\"$\\kappa = 20$\", r\"$\\kappa = 3$\"]\n sys_set = {\n \"K_RADAR\": K_RADAR,\n \"B\": 50000e-9,\n \"MI\": 16,\n \"NE\": 2e11,\n \"NU_E\": 0,\n \"NU_I\": 0,\n \"T_E\": 2000,\n \"T_I\": 2000,\n \"T_ES\": 90000,\n \"THETA\": 0 * np.pi / 180,\n \"Z\": 599,\n \"mat_file\": \"fe_zmuE-07.mat\",\n }\n params = {\"kappa\": 8, \"vdf\": \"real_data\", \"area\": False}\n kappa = [20, 3]\n for t in T:\n ridge = []\n sys_set[\"T_E\"] = t\n self.f, s, meta_data = isr.isr_spectrum(\"maxwell\", sys_set, **params)\n ridge.append(s)\n for k in kappa:\n params[\"kappa\"] = k\n self.f, s, meta_data = isr.isr_spectrum(\"kappa\", sys_set, **params)\n ridge.append(s)\n self.data.append(ridge)\n self.meta_data.append(meta_data)\n\n for r in self.data:\n peak = int(np.argwhere(r[0] == np.max(r[0])))\n self.f_list[0].append(self.f[peak])\n peak = int(np.argwhere(r[1] == np.max(r[1])))\n self.f_list[1].append(self.f[peak])\n peak = int(np.argwhere(r[2] == np.max(r[2])))\n self.f_list[2].append(self.f[peak])\n\n def plot_it(self):\n self.p.plot_ridge(self.f, self.data, \"plot\", self.legend_txt, self.ridge_txt)\n\n T = [2000, 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000]\n plt.figure(figsize=(6, 3))\n plt.plot(T, self.f_list[0], \"k\", label=\"Maxwellian\")\n plt.plot(T, self.f_list[1], \"k--\", label=r\"$\\kappa = 20$\")\n plt.plot(T, self.f_list[2], \"k:\", label=r\"$\\kappa = 3$\")\n plt.legend()\n\n if self.p.save in [\"y\", \"yes\"]:\n self.p.pdffig.attach_note(\"freq change\")\n plt.savefig(self.p.pdffig, bbox_inches=\"tight\", format=\"pdf\", dpi=600)\n plt.savefig(\n str(self.p.save_path) + f\"_page_{self.p.page}.pgf\", bbox_inches=\"tight\"\n )\n self.p.page += 1\n\n\nclass PlotHKExtremes(Reproduce):\n \"\"\"Reproduce figure with ridge plot over the extremes from\n the Hello Kitty plot.\n\n In config, set\n ```\n 'F_MIN': 2.5e6, 'F_MAX': 9.5e6\n ```\n Also, using\n ```\n F_N_POINTS = 1e4\n ```\n is sufficient.\n \"\"\"\n\n def create_from_code(self):\n F0 = 430e6\n K_RADAR = -2 * F0 * 2 * np.pi / const.c # Radar wavenumber\n sys_set = {\n \"K_RADAR\": K_RADAR,\n \"B\": 35000e-9,\n \"MI\": 16,\n \"NE\": 1e11,\n \"NU_E\": 100,\n \"NU_I\": 100,\n \"T_E\": 2000,\n \"T_I\": 1500,\n \"T_ES\": 90000,\n \"THETA\": 30 * np.pi / 180,\n \"Z\": 599,\n \"mat_file\": \"fe_zmuE-07.mat\",\n \"pitch_angle\": list(range(10)),\n }\n params = {\"kappa\": 8, \"vdf\": \"real_data\", \"area\": False}\n # Ridge 1\n ridge = []\n # Line 1\n self.f, s, meta_data = isr.isr_spectrum(\"a_vdf\", sys_set, **params)\n ridge.append(s)\n self.meta_data.append(meta_data)\n # Line 2\n sys_set[\"NE\"] = 1e12\n self.f, s, meta_data = isr.isr_spectrum(\"a_vdf\", sys_set, **params)\n ridge.append(s)\n self.data.append(ridge)\n self.meta_data.append(meta_data)\n\n # Ridge 2\n ridge = []\n # Line 1\n sys_set[\"THETA\"] = 60 * np.pi / 180\n sys_set[\"NE\"] = 1e11\n self.f, s, meta_data = isr.isr_spectrum(\"a_vdf\", sys_set, **params)\n ridge.append(s)\n self.meta_data.append(meta_data)\n # Line 2\n sys_set[\"NE\"] = 1e12\n self.f, s, meta_data = isr.isr_spectrum(\"a_vdf\", sys_set, **params)\n ridge.append(s)\n self.data.append(ridge)\n self.meta_data.append(meta_data)\n\n self.legend_txt = [\"1e11\", \"1e12\"]\n self.ridge_txt = [\"30\", \"60\"]\n\n def plot_it(self):\n self.p.plot_ridge(\n self.f, self.data, \"semilogy\", self.legend_txt, self.ridge_txt\n )\n\n\nclass PlotHK:\n \"\"\"Reproduce the Hello Kitty figures from saved data.\"\"\"\n\n def __init__(self, *args):\n \"\"\"Accepts zero, one or two arguments.\n\n If zero arguments are given, a default path is used to look for files.\n ::\n If one argument is given, it should include\n the full path (with or without file ending).\n ::\n If two arguments are given, the first should be the path to\n the directory where the file is located, and the second\n argument must be the name of the file.\n \"\"\"\n if len(args) != 0:\n if len(args) == 1:\n args = args[0]\n parts = args.split(\"/\")\n path = \"/\".join(parts[:-1]) + \"/\"\n self.name = parts[-1]\n elif len(args) == 2:\n path = args[0]\n self.name = args[1]\n else:\n path = \"../../figures/\"\n # Old\n # self.name = 'hello_kitty_2020_6_9_2--28--4.npz'\n self.name = \"hello_kitty_2020_6_8_22--1--51.npz\"\n # New\n # self.name = 'hello_kitty_2020_6_15_22--27--16.npz'\n # self.name = 'hello_kitty_2020_6_15_15--50--18.npz'\n self.name = self.name.split(\".\")[0]\n try:\n self.file = np.load(path + self.name + \".npz\")\n except Exception:\n sys.exit(print(f\"Could not open file {path + self.name}\"))\n self.g = self.file[\"power\"]\n\n def shade(self):\n dots_x = []\n dots_y = []\n for i, d in enumerate(self.file[\"dots\"][1]):\n arg = np.argwhere(self.file[\"angle\"] == self.file[\"angle\"][int(d)])\n dots_x = np.r_[dots_x, arg[:1, 0]]\n dots_y = np.r_[dots_y, np.ones(len(arg[:1, 0])) * self.file[\"dots\"][2][i]]\n\n s = set(self.file[\"dots\"][0])\n for i in s:\n mask = np.argwhere(self.file[\"dots\"][0] == i)\n xs = []\n y_min = []\n y_max = []\n for x in range(30):\n arg = np.argwhere(dots_x[mask].flatten() == x)\n if bool(arg.any()):\n xs.append(x)\n y_min.append(np.min(dots_y[mask][arg]))\n y_max.append(np.max(dots_y[mask][arg]))\n plt.fill_between(xs, y_min, y_max, color=\"g\", alpha=0.8)\n x, y = xs[-1], (y_max[-1] + y_min[-1]) / 2\n txt = plt.text(\n x,\n y,\n r\"$\\mathrm{}$\".format(int(i)),\n color=\"k\",\n va=\"center\",\n ha=\"right\",\n fontsize=15,\n )\n txt.set_path_effects([PathEffects.withStroke(linewidth=1, foreground=\"w\")])\n\n def shade2p0(self, *args):\n \"\"\"Mark points on the plasma line power plot\n that map to any number of energy (eV) intervals.\n\n *args can be any number of lists\n or tuples of length 2 (E_min, E_max)\n \"\"\"\n l = const.c / 430e6\n deg = self.file[\"angle\"][: self.file[\"fr\"].shape[1]]\n E_plasma = (\n 0.5\n * const.m_e\n * (self.file[\"fr\"] * l / (2 * np.cos(deg * np.pi / 180) ** (1))) ** 2\n / const.eV\n )\n for a in args:\n try:\n if len(a) == 2:\n m = (a[0] < E_plasma) & (E_plasma < a[1])\n self.g[:, :30][m] = np.nan\n except Exception:\n pass\n\n def plot_it(self):\n # self.shade2p0([15.88, 18.72], [22.47, 23.75], [60, 64])\n # self.shade2p0([20.29, 21.99], [22.45, 23.82], (25.38, 27.03), [32.82, 34.33], [46, 47], [61.55, 65])\n f = plt.figure(figsize=(8, 5))\n gs = gridspec.GridSpec(2, 1, height_ratios=[4, 1])\n ax0 = plt.subplot(gs[0])\n im = ax0.imshow(\n self.g,\n extent=[\n 0,\n len(self.file[\"angle\"]) - 1,\n np.min(self.file[\"density\"]),\n np.max(self.file[\"density\"]),\n ],\n origin=\"lower\",\n aspect=\"auto\",\n cmap=\"gist_heat\",\n )\n current_cmap = im.get_cmap()\n current_cmap.set_bad(color=\"green\", alpha=0.6)\n self.shade()\n plt.ylabel(r\"Electron number density, $n_{\\mathrm{e}}$\")\n plt.tick_params(\n axis=\"x\", which=\"both\", bottom=False, top=False, labelbottom=False\n )\n ax1 = plt.subplot(gs[1])\n ax1.plot(180 - self.file[\"angle\"], \"k\")\n plt.xlim([0, len(self.file[\"angle\"]) - 1])\n plt.yticks([150, 135, 120])\n plt.ylabel(\"Aspect angle\")\n axs = []\n axs += [ax0]\n axs += [ax1]\n gs.update(hspace=0.05)\n f.colorbar(im, ax=axs).ax.set_ylabel(\"Echo power\")\n plt.tick_params(\n axis=\"x\", which=\"both\", bottom=False, top=False, labelbottom=False\n )\n plt.savefig(f\"{self.name}.pgf\", bbox_inches=\"tight\", transparent=True)\n\n plt.show()\n\n\nif __name__ == \"__main__\":\n PlotHK().plot_it() # $\\label{lst:plotHK}$\n" ]
[ [ "numpy.load", "numpy.min", "numpy.cos", "matplotlib.pyplot.minorticks_on", "numpy.max", "matplotlib.pyplot.savefig", "matplotlib.pyplot.tick_params", "matplotlib.pyplot.fill_between", "matplotlib.pyplot.tight_layout", "matplotlib.patheffects.withStroke", "matplotlib.pyplot.subplot", "matplotlib.pyplot.title", "matplotlib.pyplot.yticks", "matplotlib.pyplot.figure", "numpy.ndarray", "matplotlib.rcParams.update", "numpy.argwhere", "matplotlib.pyplot.show", "matplotlib.gridspec.GridSpec", "matplotlib.pyplot.grid", "matplotlib.pyplot.plot", "matplotlib.pyplot.legend", "matplotlib.pyplot.ylabel" ] ]
renan-cunha/intelligent-systems-project
[ "66b31a2dd3ad19d13a357fb74ba524c5884ff5ea" ]
[ "server/test_api.py" ]
[ "import pytest\nfrom flask import json\nfrom api import app\nimport os\nimport cloudpickle as cp\nimport pandas as pd\nimport numpy as np\nfrom sklearn.pipeline import Pipeline\nfrom scipy.sparse import csr_matrix\nfrom sklearn.metrics import roc_auc_score\n\ncategorize_route = \"v1/categorize\"\nvalid_categories_set = {\"Lembrancinhas\", \"Bebê\", \"Decoração\", \"Outros\",\n \"Papel e Cia\", \"Bijuterias e Jóias\"}\n\nTEST_PRODUCTS_PATH = os.getenv(\"TEST_PRODUCTS_PATH\")\nTEST_PRODUCTS_CSV_PATH = os.path.join(\"../\", \"data\", \"test_products.csv\")\nwith open(TEST_PRODUCTS_PATH, \"r\") as json_file:\n test_json = json.load(json_file)\n\n\ndef categorize_request(input_data):\n return app.test_client().post(categorize_route,\n data=json.dumps(input_data),\n content_type=\"application/json\")\n\n\[email protected](\"input_data\", [\n None,\n \"\",\n {},\n {\"products\": []},\n {\"products\": [{\"title\": \"\"}]},\n {\"products\": [{\"concatenated_tags\": \"\"}]},\n {\"products\": [{\"other1\": \"\", \"other2\": \"\"}]}\n ])\ndef test_request_with_invalid_data(input_data):\n response = categorize_request(input_data)\n\n assert response.status_code == 400\n assert response.data == b\"(Bad Request)\"\n\n\[email protected](\"input_data\", [\n {\"products\": [{\"title\": None, \"concatenated_tags\": None}]},\n {\"products\": [{\"title\": \"\", \"concatenated_tags\": \"\"}]},\n {\"products\": [{\"title\": \"\", \"concatenated_tags\": \"\", \"other\": \"\"}]},\n {\"products\": [{\"title\": \"a\", \"concatenated_tags\": \"a\"},\n {\"title\": \"b\", \"concatenated_tags\": \"b\"}]},\n test_json])\ndef test_request_with_valid_data(input_data):\n response = categorize_request(input_data)\n\n assert response.status_code == 200\n assert len(response.json[\"categories\"]) == len(input_data['products'])\n assert set(response.json['categories']).issubset(valid_categories_set)\n\ndef load_model():\n with open(os.getenv(\"MODEL_PATH\"), \"rb\") as file:\n return cp.load(file)\n\ndef load_data():\n data = pd.read_csv(TEST_PRODUCTS_CSV_PATH)\n string_columns = data.select_dtypes(\"object\").columns.tolist()\n data.loc[:, string_columns] = data.loc[:, string_columns].fillna(\"\")\n return data\n\n\ndef test_check_columns(): \n data = load_data()\n expected = ['title', 'query', 'concatenated_tags']\n\n assert np.all(pd.Series(expected).isin(data.columns))\n\ndef test_load_pipeline_model():\n model = load_model()\n expected = Pipeline\n assert expected == model.__class__\n\ndef test_column_concatenation():\n data = load_data()\n model = load_model()\n\n expected = data[\"title\"] + \" \" + data[\"concatenated_tags\"]\n assert expected.equals(model[\"preprocessor\"][\"text_column_concatenation\"].transform(data))\n\ndef test_preprocessor_pipeline_output_class():\n data = load_data()\n model = load_model()\n\n expected = csr_matrix\n assert expected == model[\"preprocessor\"].transform(data).__class__\n\ndef test_pipeline_predict():\n data = load_data()\n model = load_model()\n labels = model.classes_\n\n y_true = data[\"category\"]\n y_proba = model.predict_proba(data)\n\n assert roc_auc_score(y_true, y_proba, multi_class=\"ovr\") > 0.97\n" ]
[ [ "pandas.read_csv", "sklearn.metrics.roc_auc_score", "pandas.Series" ] ]
AAmineRYT/C106
[ "23f96e82f580ce1cd859571e14f69be19dc47525" ]
[ "setup.py" ]
[ "import plotly.express as px\nimport csv\nimport numpy as np\n\ndef plotFigure(data_path):\n with open(data_path) as csv_file:\n df = csv.DictReader(csv_file)\n fig = px.scatter(df,x=\"Temperature\", y=\"Ice-cream Sales\")\n fig.show()\n\ndef getDataSource(data_path):\n ice_cream_sales = []\n cold_drink_sales = []\n with open(data_path) as csv_file:\n csv_reader = csv.DictReader(csv_file)\n for row in csv_reader:\n ice_cream_sales.append(float(row[\"Temperature\"]))\n cold_drink_sales.append(float(row[\"Ice-cream Sales\"]))\n\n return {\"x\" : ice_cream_sales, \"y\": cold_drink_sales}\n\ndef findCorrelation(datasource):\n correlation = np.corrcoef(datasource[\"x\"], datasource[\"y\"])\n print(\"Correlation between Temperature vs Ice Cream Sales :- \\n--->\",correlation[0,1])\n\ndef setup():\n data_path = \"data/Ice-Cream vs Cold-Drink vs Temperature - Ice Cream Sale vs Temperature data.csv\"\n\n datasource = getDataSource(data_path)\n findCorrelation(datasource)\n plotFigure(data_path)\n\nsetup()\n" ]
[ [ "numpy.corrcoef" ] ]
omerferhatt/ml-on-genes
[ "ead86d9e8bcf83c3bbc3f15528c93eb766c637cd" ]
[ "models/decision_tree.py" ]
[ "import numpy as np\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.model_selection import cross_val_score\n\n\ndef train_decision_tree(x_train, y_train, fold):\n dt = DecisionTreeClassifier()\n scores = cross_val_score(dt, x_train, y_train, cv=fold)\n print(f\"Decision Tree\\n\"\n f\"\\tAccuracy: %0.3f\\n\" % scores.mean())\n\n return scores.mean()\n" ]
[ [ "sklearn.model_selection.cross_val_score", "sklearn.tree.DecisionTreeClassifier" ] ]
Jeffrey0Liao/2020-2021-Final-Year-Project-Joint-Sentimental-Analysis-Based-on-Tree-topology
[ "7aa8fe1ce901a61c41bd8d51a22d6dc313b1740b" ]
[ "model.py" ]
[ "import torch as th\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data import Dataset\nimport torch.nn.functional as F\nimport numpy as np\n\nimport dgl\nimport dgl.function as fn\n\nclass TreeLSTMCell(nn.Module):\n def __init__(self, x_size, h_size):\n super(TreeLSTMCell, self).__init__()\n self.W_iou = nn.Linear(x_size, 3 * h_size, bias=False)\n self.U_iou = nn.Linear(2 * h_size, 3 * h_size, bias=False)\n self.b_iou = nn.Parameter(th.zeros(1, 3 * h_size))\n self.U_f = nn.Linear(2 * h_size, 2 * h_size)\n\n def message_func(self, edges):\n return {'h': edges.src['h'], 'c': edges.src['c']}\n\n def reduce_func(self, nodes):\n # concatenate h_jl for equation (1), (2), (3), (4)\n h_cat = nodes.mailbox['h'].view(nodes.mailbox['h'].size(0), -1)\n # equation (2)\n f = th.sigmoid(self.U_f(h_cat)).view(*nodes.mailbox['h'].size())\n # second term of equation (5)\n c = th.sum(f * nodes.mailbox['c'], 1)\n return {'iou': self.U_iou(h_cat), 'c': c}\n\n def apply_node_func(self, nodes):\n # equation (1), (3), (4)\n iou = nodes.data['iou'] + self.b_iou\n i, o, u = th.chunk(iou, 3, 1)\n i, o, u = th.sigmoid(i), th.sigmoid(o), th.tanh(u)\n # equation (5)\n c = i * u + nodes.data['c']\n # equation (6)\n h = o * th.tanh(c)\n return {'h' : h, 'c' : c}\n\nclass Attention(nn.Module):\n def __init__(self, embed_size, feature_size):\n super(Attention, self).__init__()\n \n self.embed_size = embed_size\n self.feature_size = feature_size\n \n self.linear_in = nn.Linear(feature_size, embed_size, bias=False)\n self.linear_out = nn.Linear(embed_size+feature_size, embed_size)\n \n def forward(self, sent, img, mask):\n # sent: snetence_len * embed_size\n # img: num_region * feature_size\n snetence_len = sent.size(0)\n num_region = img.size(0)\n \n # img_in: num_region * embed_size\n img_in = self.linear_in(img)\n \n atten = th.mm(sent, img_in.transpose(0, 1))\n #atten.data.masked_fill(mask, -1e6)\n atten = F.softmax(atten, dim=1)\n # atten: snetence_len * num_region\n context = th.mm(atten, img)\n # context: snetence_len * feature_size\n output = th.cat((context, sent), dim=1) # output: snetence_len * (feature_size+embed_size)\n output = th.tanh(self.linear_out(output))\n # output: snetence_len * embed_size\n return output\n\nclass TreeLSTM(nn.Module):\n def __init__(self,\n num_vocabs,\n x_size,\n h_size,\n feature_size, \n num_classes,\n dropout,\n pretrained_emb=None):\n super(TreeLSTM, self).__init__()\n self.x_size = x_size\n self.embedding = nn.Embedding(num_vocabs, x_size)\n self.attention = Attention(x_size, feature_size)\n if pretrained_emb is not None:\n print('Using glove')\n self.embedding.from_pretrained(pretrained_emb)\n self.embedding.weight.requires_grad = True\n self.dropout = nn.Dropout(dropout)\n self.linear = nn.Linear(h_size, num_classes)\n self.cell = TreeLSTMCell(x_size, h_size)\n\n def forward(self, batch, h, c):\n \"\"\"Compute tree-lstm prediction given a batch.\n\n Parameters\n ----------\n batch : dgl.data.SSTBatch\n The data batch.\n h : Tensor\n Initial hidden state.\n c : Tensor\n Initial cell state.\n\n Returns\n -------\n logits : Tensor\n The prediction of each node.\n \"\"\"\n g = batch.graph\n # to heterogenous graph\n g = dgl.graph(g.edges())\n # feed embedding\n embeds = self.embedding(batch.wordid * batch.mask)\n attn_mask = batch.mask.expand(batch.image.shape[0], batch.wordid.shape[0]).T\n attn_embeds = self.attention(embeds, batch.image, attn_mask)\n g.ndata['iou'] = self.cell.W_iou(self.dropout(attn_embeds)) * batch.mask.float().unsqueeze(-1)\n g.ndata['h'] = h\n g.ndata['c'] = c\n # propagate\n dgl.prop_nodes_topo(g,\n message_func=self.cell.message_func,\n reduce_func=self.cell.reduce_func,\n apply_node_func=self.cell.apply_node_func)\n # compute logits\n h = self.dropout(g.ndata.pop('h'))\n logits = self.linear(h)\n return logits\n" ]
[ [ "torch.nn.Linear", "torch.zeros", "torch.cat", "torch.nn.Dropout", "torch.sigmoid", "torch.mm", "torch.nn.functional.softmax", "torch.tanh", "torch.nn.Embedding", "torch.chunk", "torch.sum" ] ]
Anita1017/nlp-recipes
[ "a5cd2303187239799ae0b1597a7c16eb99a97108", "a5cd2303187239799ae0b1597a7c16eb99a97108" ]
[ "utils_nlp/eval/SentEval/senteval/utils.py", "utils_nlp/models/transformers/abstractive_summarization_bertsum.py" ]
[ "# Copyright (c) 2017-present, Facebook, Inc.\n# All rights reserved.\n#\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n#\n\nfrom __future__ import absolute_import, division, unicode_literals\n\nimport numpy as np\nimport re\nimport inspect\nfrom torch import optim\n\n\ndef create_dictionary(sentences):\n words = {}\n for s in sentences:\n for word in s:\n if word in words:\n words[word] += 1\n else:\n words[word] = 1\n words['<s>'] = 1e9 + 4\n words['</s>'] = 1e9 + 3\n words['<p>'] = 1e9 + 2\n # words['<UNK>'] = 1e9 + 1\n sorted_words = sorted(words.items(), key=lambda x: -x[1]) # inverse sort\n id2word = []\n word2id = {}\n for i, (w, _) in enumerate(sorted_words):\n id2word.append(w)\n word2id[w] = i\n\n return id2word, word2id\n\n\ndef cosine(u, v):\n return np.dot(u, v) / (np.linalg.norm(u) * np.linalg.norm(v))\n\n\nclass dotdict(dict):\n \"\"\" dot.notation access to dictionary attributes \"\"\"\n __getattr__ = dict.get\n __setattr__ = dict.__setitem__\n __delattr__ = dict.__delitem__\n\n\ndef get_optimizer(s):\n \"\"\"\n Parse optimizer parameters.\n Input should be of the form:\n - \"sgd,lr=0.01\"\n - \"adagrad,lr=0.1,lr_decay=0.05\"\n \"\"\"\n if \",\" in s:\n method = s[:s.find(',')]\n optim_params = {}\n for x in s[s.find(',') + 1:].split(','):\n split = x.split('=')\n assert len(split) == 2\n assert re.match(\"^[+-]?(\\d+(\\.\\d*)?|\\.\\d+)$\", split[1]) is not None\n optim_params[split[0]] = float(split[1])\n else:\n method = s\n optim_params = {}\n\n if method == 'adadelta':\n optim_fn = optim.Adadelta\n elif method == 'adagrad':\n optim_fn = optim.Adagrad\n elif method == 'adam':\n optim_fn = optim.Adam\n elif method == 'adamax':\n optim_fn = optim.Adamax\n elif method == 'asgd':\n optim_fn = optim.ASGD\n elif method == 'rmsprop':\n optim_fn = optim.RMSprop\n elif method == 'rprop':\n optim_fn = optim.Rprop\n elif method == 'sgd':\n optim_fn = optim.SGD\n assert 'lr' in optim_params\n else:\n raise Exception('Unknown optimization method: \"%s\"' % method)\n\n # check that we give good parameters to the optimizer\n expected_args = inspect.getfullargspec(optim_fn.__init__)[0]\n assert expected_args[:2] == ['self', 'params']\n if not all(k in expected_args[2:] for k in optim_params.keys()):\n raise Exception('Unexpected parameters: expected \"%s\", got \"%s\"' % (\n str(expected_args[2:]), str(optim_params.keys())))\n\n return optim_fn, optim_params\n", "# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT License.\n\n# This script reuses some code from https://github.com/nlpyang/Presumm\n# This script reuses some code from https://github.com/huggingface/transformers/\n# Add to noticefile\n\nimport logging\nimport os\nimport pickle\nfrom collections import namedtuple\n\nimport torch\nfrom torch.utils.data import DataLoader, RandomSampler, SequentialSampler\nfrom torch.utils.data.distributed import DistributedSampler\nfrom tqdm import tqdm\nfrom transformers import AutoTokenizer, BertModel\n\nfrom utils_nlp.common.pytorch_utils import (\n compute_training_steps,\n get_amp,\n get_device,\n move_model_to_device,\n parallelize_model,\n)\nfrom utils_nlp.eval import compute_rouge_python\nfrom utils_nlp.models.transformers.bertsum import model_builder\nfrom utils_nlp.models.transformers.bertsum.model_builder import AbsSummarizer\nfrom utils_nlp.models.transformers.bertsum.predictor import build_predictor\nfrom utils_nlp.models.transformers.common import Transformer\n\nMODEL_CLASS = {\"bert-base-uncased\": BertModel}\n\nlogger = logging.getLogger(__name__)\n\n\ndef fit_to_block_size(sequence, block_size, pad_token_id):\n \"\"\" Adapt the source and target sequences' lengths to the block size.\n If the sequence is shorter we append padding token to the right of the sequence.\n\n Args:\n sequence (list): sequence to be truncated to padded\n block_size (int): length of the output\n\n Returns:\n sequence (list): padded or shortend list\n\n \"\"\"\n if len(sequence) > block_size:\n return sequence[:block_size]\n else:\n sequence.extend([pad_token_id] * (block_size - len(sequence)))\n return sequence\n\n\ndef build_mask(sequence, pad_token_id):\n \"\"\" Builds the mask. The attention mechanism will only attend to positions\n with value 1.\n\n Args:\n sequence (list): sequences for which the mask is built for.\n pad_token_id (long): padding token id for which the mask is 0.\n\n Returns:\n mask (list): sequences of 1s and 0s.\n\n \"\"\"\n mask = torch.ones_like(sequence)\n idx_pad_tokens = sequence == pad_token_id\n mask[idx_pad_tokens] = 0\n return mask\n\n\ndef compute_token_type_ids(batch, separator_token_id):\n \"\"\" Segment embeddings as described in [1]\n The values {0,1} were found in the repository [2].\n\n Args:\n batch (torch.Tensor, size [batch_size, block_size]):\n Batch of input.\n separator_token_id: int\n The value of the token that separates the segments.\n\n Returns:\n torch.Tensor, size [batch_size, block_size]): segment embeddings.\n\n [1] Liu, Yang, and Mirella Lapata. \"Text summarization with pretrained encoders.\"\n arXiv preprint arXiv:1908.08345 (2019).\n [2] https://github.com/nlpyang/PreSumm (/src/prepro/data_builder.py, commit fac1217)\n \"\"\"\n batch_embeddings = []\n for sequence in batch:\n sentence_num = -1\n embeddings = []\n for s in sequence:\n if s == separator_token_id:\n sentence_num += 1\n embeddings.append(sentence_num % 2)\n batch_embeddings.append(embeddings)\n return torch.tensor(batch_embeddings)\n\n\nclass BertSumAbsProcessor:\n \"\"\"Class for preprocessing abstractive summarization data for\n BertSumAbs algorithm.\"\"\"\n\n def __init__(\n self,\n model_name=\"bert-base-uncased\",\n to_lower=True,\n cache_dir=\".\",\n max_src_len=640,\n max_tgt_len=140,\n ):\n \"\"\" Initialize the preprocessor.\n\n Args:\n model_name (str, optional): Transformer model name used in preprocessing.\n check MODEL_CLASS for supported models. Defaults to \"bert-base-cased\".\n to_lower (bool, optional): Whether to convert all letters to lower case\n during tokenization. This is determined by if a cased model is used.\n Defaults to True, which corresponds to a uncased model.\n cache_dir (str, optional): Directory to cache the tokenizer.\n Defaults to \".\".\n max_src_len (int, optional): Max number of tokens that be used\n as input. Defaults to 640.\n max_tgt_len (int, optional): Max number of tokens that be used\n as in target. Defaults to 140.\n\n \"\"\"\n self.model_name = model_name\n self.tokenizer = AutoTokenizer.from_pretrained(\n model_name,\n do_lower_case=to_lower,\n cache_dir=cache_dir,\n output_loading_info=False,\n )\n\n self.symbols = {\n \"BOS\": self.tokenizer.vocab[\"[unused0]\"],\n \"EOS\": self.tokenizer.vocab[\"[unused1]\"],\n \"PAD\": self.tokenizer.vocab[\"[PAD]\"],\n \"EOQ\": self.tokenizer.vocab[\"[unused2]\"],\n }\n\n self.sep_token = \"[SEP]\"\n self.cls_token = \"[CLS]\"\n self.pad_token = \"[PAD]\"\n self.tgt_bos = self.symbols[\"BOS\"]\n self.tgt_eos = self.symbols[\"EOS\"]\n\n self.max_src_len = max_src_len\n self.max_tgt_len = max_tgt_len\n\n @staticmethod\n def list_supported_models():\n return list(MODEL_CLASS)\n\n @property\n def model_name(self):\n return self._model_name\n\n @model_name.setter\n def model_name(self, value):\n if value not in self.list_supported_models():\n raise ValueError(\n \"Model name {} is not supported by BertSumAbsProcessor. \"\n \"Call 'BertSumAbsProcessor.list_supported_models()' to \"\n \"get all supported model names.\".format(value)\n )\n\n self._model_name = value\n\n @staticmethod\n def get_inputs(batch, device, model_name, train_mode=True):\n \"\"\"\n Creates an input dictionary given a model name.\n\n Args:\n batch (object): A Batch containing input ids, segment ids,\n masks for the input ids and source text. If train_mode is True, it\n also contains the target ids and the number of tokens\n in the target and target text.\n device (torch.device): A PyTorch device.\n model_name (bool): Model name used to format the inputs.\n train_mode (bool, optional): Training mode flag.\n Defaults to True.\n\n Returns:\n dict: Dictionary containing input ids, segment ids, sentence class ids,\n masks for the input ids. Target ids and number of tokens in the target are\n only returned when train_mode is True.\n \"\"\"\n\n if model_name.split(\"-\")[0] in [\"bert\"]:\n if train_mode:\n # labels must be the last\n\n return {\n \"src\": batch.src,\n \"segs\": batch.segs,\n \"mask_src\": batch.mask_src,\n \"tgt\": batch.tgt,\n \"tgt_num_tokens\": batch.tgt_num_tokens,\n }\n else:\n return {\n \"src\": batch.src,\n \"segs\": batch.segs,\n \"mask_src\": batch.mask_src,\n }\n else:\n raise ValueError(\"Model not supported: {}\".format(model_name))\n\n def collate(self, data, block_size, device, train_mode=True):\n \"\"\" Collate formats the data passed to the data loader.\n In particular we tokenize the data batch after batch to avoid keeping them\n all in memory.\n\n Args:\n data (list of (str, str)): input data to be loaded.\n block_size (long): size of the encoded data to be passed into the data loader\n device (torch.device): A PyTorch device.\n train_mode (bool, optional): Training mode flag.\n Defaults to True.\n\n Returns:\n namedtuple: a nametuple containing input ids, segment ids,\n masks for the input ids and source text. If train_mode is True, it\n also contains the target ids and the number of tokens\n in the target and target text.\n \"\"\"\n data = [x for x in data if not len(x[\"src\"]) == 0] # remove empty_files\n if len(data) == 0:\n return None\n stories = [\" \".join(d[\"src\"]) for d in data]\n if train_mode is True and \"tgt\" in data[0]:\n summaries = [\" \".join(d[\"tgt\"]) for d in data]\n encoded_text = [self.preprocess(d[\"src\"], d[\"tgt\"]) for d in data]\n else:\n encoded_text = [self.preprocess(d[\"src\"], None) for d in data]\n\n encoded_stories = torch.tensor(\n [\n fit_to_block_size(story, block_size, self.tokenizer.pad_token_id)\n for story, _ in encoded_text\n ]\n )\n encoder_token_type_ids = compute_token_type_ids(\n encoded_stories, self.tokenizer.cls_token_id\n )\n encoder_mask = build_mask(encoded_stories, self.tokenizer.pad_token_id)\n\n if train_mode and \"tgt\" in data[0]:\n encoded_summaries = torch.tensor(\n [\n [self.tgt_bos]\n + fit_to_block_size(\n summary, block_size - 2, self.tokenizer.pad_token_id\n )\n + [self.tgt_eos]\n for _, summary in encoded_text\n ]\n )\n summary_num_tokens = [\n encoded_summary.ne(self.tokenizer.pad_token_id).sum()\n for encoded_summary in encoded_summaries\n ]\n\n Batch = namedtuple(\n \"Batch\",\n [\n \"src\",\n \"segs\",\n \"mask_src\",\n \"tgt\",\n \"tgt_num_tokens\",\n \"src_str\",\n \"tgt_str\",\n ],\n )\n batch = Batch(\n src=encoded_stories.to(device),\n segs=encoder_token_type_ids.to(device),\n mask_src=encoder_mask.to(device),\n tgt_num_tokens=torch.stack(summary_num_tokens).to(device),\n tgt=encoded_summaries.to(device),\n src_str=stories,\n tgt_str=summaries,\n )\n else:\n Batch = namedtuple(\"Batch\", [\"src\", \"segs\", \"mask_src\"])\n batch = Batch(\n src=encoded_stories.to(device),\n segs=encoder_token_type_ids.to(device),\n mask_src=encoder_mask.to(device),\n )\n\n return batch\n\n def preprocess(self, story_lines, summary_lines=None):\n \"\"\"preprocess multiple data points\n\n Args:\n story_lines (list of strings): List of sentences.\n targets (list of strings, optional): List of sentences.\n Defaults to None, which means it doesn't include summary and is\n not training data.\n\n Returns:\n If summary_lines is None, return list of list of token ids. Otherwise,\n return a tuple of (list of list of token ids, list of list of token ids).\n\n \"\"\"\n story_lines_token_ids = []\n for line in story_lines:\n try:\n if len(line) <= 0:\n continue\n story_lines_token_ids.append(\n self.tokenizer.encode(line, max_length=self.max_src_len)\n )\n except:\n print(line)\n raise\n story_token_ids = [\n token for sentence in story_lines_token_ids for token in sentence\n ]\n if summary_lines:\n summary_lines_token_ids = []\n for line in summary_lines:\n try:\n if len(line) <= 0:\n continue\n summary_lines_token_ids.append(\n self.tokenizer.encode(line, max_length=self.max_tgt_len)\n )\n except:\n print(line)\n raise\n summary_token_ids = [\n token for sentence in summary_lines_token_ids for token in sentence\n ]\n return story_token_ids, summary_token_ids\n else:\n return story_token_ids, None\n\n\ndef validate(summarizer, validate_dataset):\n \"\"\" validation function to be used optionally in fine tuning.\n\n Args:\n summarizer(BertSumAbs): The summarizer under fine tuning.\n validate_dataset (SummarizationDataset): dataset for validation.\n\n Returns:\n string: A string which contains the rouge score on a subset of\n the validation dataset.\n\n \"\"\"\n TOP_N = 8\n shortened_dataset = validate_dataset.shorten(TOP_N)\n reference_summaries = [\n \" \".join(t).rstrip(\"\\n\") for t in shortened_dataset.get_target()\n ]\n generated_summaries = summarizer.predict(\n shortened_dataset, num_gpus=1, batch_size=4\n )\n assert len(generated_summaries) == len(reference_summaries)\n print(\"###################\")\n print(\"prediction is {}\".format(generated_summaries[0]))\n print(\"reference is {}\".format(reference_summaries[0]))\n\n rouge_score = compute_rouge_python(\n cand=generated_summaries, ref=reference_summaries\n )\n return \"rouge score: {}\".format(rouge_score)\n\n\nclass BertSumAbs(Transformer):\n \"\"\"class which performs abstractive summarization fine tuning and\n prediction based on BertSumAbs model \"\"\"\n\n def __init__(\n self,\n processor,\n model_name=\"bert-base-uncased\",\n finetune_bert=True,\n cache_dir=\".\",\n label_smoothing=0.1,\n test=False,\n max_pos_length=768,\n ):\n \"\"\"Initialize an object of BertSumAbs.\n\n Args:\n processor (BertSumAbsProcessor): A processor with symbols, tokenizers\n and collate functions that are used in finetuning and prediction.\n model_name (str, optional:) Name of the pretrained model which is used\n to initialize the encoder of the BertSumAbs model.\n check MODEL_CLASS for supported models. Defaults to \"bert-base-uncased\".\n finetune_bert (bool, option): Whether the bert model in the encoder is\n finetune or not. Defaults to True.\n cache_dir (str, optional): Directory to cache the tokenizer.\n Defaults to \".\".\n label_smoothing (float, optional): The amount of label smoothing.\n Value range is [0, 1]. Defaults to 0.1.\n test (bool, optional): Whether the class is initiated for test or not.\n It must be True if the class obj is only initialized to load a\n checkpoint for test/inferencing. Defaults to False.\n max_pos_length (int, optional): maximum postional embedding length for the\n input. Defaults to 768.\n \"\"\"\n model = MODEL_CLASS[model_name].from_pretrained(\n model_name, cache_dir=cache_dir, num_labels=0, output_loading_info=False\n )\n super().__init__(model_name=model_name, model=model, cache_dir=cache_dir)\n\n if model_name not in self.list_supported_models():\n raise ValueError(\n \"Model name {} is not supported by BertSumAbs. \"\n \"Call 'BertSumAbs.list_supported_models()' to get all supported model \"\n \"names.\".format(value)\n )\n\n self.model_class = MODEL_CLASS[model_name]\n self.cache_dir = cache_dir\n self.max_pos_length = max_pos_length\n\n self.model = AbsSummarizer(\n temp_dir=cache_dir,\n finetune_bert=finetune_bert,\n checkpoint=None,\n label_smoothing=label_smoothing,\n symbols=processor.symbols,\n test=test,\n max_pos=self.max_pos_length,\n )\n self.processor = processor\n self.optim_bert = None\n self.optim_dec = None\n\n @staticmethod\n def list_supported_models():\n return list(MODEL_CLASS.keys())\n\n def fit(\n self,\n train_dataset,\n num_gpus=None,\n gpu_ids=None,\n batch_size=4,\n local_rank=-1,\n max_steps=5e4,\n warmup_steps_bert=20000,\n warmup_steps_dec=10000,\n learning_rate_bert=0.002,\n learning_rate_dec=0.2,\n optimization_method=\"adam\",\n max_grad_norm=0,\n beta1=0.9,\n beta2=0.999,\n decay_method=\"noam\",\n gradient_accumulation_steps=1,\n report_every=10,\n save_every=1000,\n verbose=True,\n seed=None,\n fp16=False,\n fp16_opt_level=\"O2\",\n world_size=1,\n rank=0,\n validation_function=None,\n checkpoint=None,\n **kwargs,\n ):\n \"\"\"\n Fine-tune pre-trained transofmer models for extractive summarization.\n\n Args:\n train_dataset (SummarizationDataset): Training dataset.\n num_gpus (int, optional): The number of GPUs to use. If None, all\n available GPUs will be used. If set to 0 or GPUs are not available,\n CPU device will be used. Defaults to None.\n gpu_ids (list): List of GPU IDs to be used.\n If set to None, the first num_gpus GPUs will be used.\n Defaults to None.\n batch_size (int, optional): Maximum number of tokens in each batch.\n local_rank (int, optional): Local_rank for distributed training on GPUs.\n Local rank means the ranking of the current GPU device on the current\n node. Defaults to -1, which means non-distributed training.\n max_steps (int, optional): Maximum number of training steps. Defaults to 5e5.\n warmup_steps_bert (int, optional): Number of steps taken to increase\n learning rate from 0 to `learning_rate` for tuning the BERT encoder.\n Defaults to 2e4.\n warmup_steps_dec (int, optional): Number of steps taken to increase\n learning rate from 0 to `learning_rate` for tuning the decoder.\n Defaults to 1e4.\n learning_rate_bert (float, optional): Learning rate of the optimizer\n for the encoder. Defaults to 0.002.\n learning_rate_dec (float, optional): Learning rate of the optimizer\n for the decoder. Defaults to 0.2.\n optimization_method (string, optional): Optimization method used in fine\n tuning. Defaults to \"adam\".\n max_grad_norm (float, optional): Maximum gradient norm for gradient clipping.\n Defaults to 0.\n beta1 (float, optional): The exponential decay rate for the first moment\n estimates. Defaults to 0.9.\n beta2 (float, optional): The exponential decay rate for the second-moment\n estimates. This value should be set close to 1.0 on problems with\n a sparse gradient. Defaults to 0.99.\n decay_method (string, optional): learning rate decrease method.\n Default to 'noam'.\n gradient_accumulation_steps (int, optional): Number of batches to accumulate\n gradients on between each model parameter update. Defaults to 1.\n report_every (int, optional): The interval by steps to print out the\n training log. Defaults to 10.\n save_every (int, optional): The interval by steps to save the finetuned \n model. Defaults to 100.\n verbose (bool, optional): Whether to print out the training log.\n Defaults to True.\n seed (int, optional): Random seed used to improve reproducibility.\n Defaults to None.\n fp16 (bool, optional): Whether to use mixed precision training.\n Defaults to False.\n fp16_opt_level (str, optional): optimization level, refer to\n https://nvidia.github.io/apex/amp.html#opt-levels for details.\n Value choices are: \"O0\", \"O1\", \"O2\", \"O3\". Defaults to \"O2\".\n world_size (int, optional): Total number of GPUs that will be used.\n Defaults to 1.\n rank (int, optional): Global rank of the current GPU in distributed\n training. It's calculated with the rank of the current node in the\n cluster/world and the `local_rank` of the device in the current node.\n See an example in :file: `examples/text_summarization/\n abstractive_summarization_bertsum_cnndm_distributed_train.py`.\n Defaults to 0.\n validation_function (function, optional): function used in fitting to\n validate the performance. Default to None.\n checkpoint (str, optional): file path for a checkpoint based on which the\n training continues. Default to None.\n \"\"\"\n\n # get device\n device, num_gpus = get_device(\n num_gpus=num_gpus, gpu_ids=gpu_ids, local_rank=local_rank\n )\n # move model to devices\n print(\"device is {}\".format(device))\n if checkpoint:\n checkpoint = torch.load(checkpoint, map_location=\"cpu\")\n self.model.load_checkpoint(checkpoint[\"model\"])\n self.model = move_model_to_device(model=self.model, device=device)\n\n # init optimizer\n self.optim_bert = model_builder.build_optim_bert(\n self.model,\n optim=optimization_method,\n lr_bert=learning_rate_bert,\n warmup_steps_bert=warmup_steps_bert,\n max_grad_norm=max_grad_norm,\n beta1=beta1,\n beta2=beta2,\n )\n self.optim_dec = model_builder.build_optim_dec(\n self.model,\n optim=optimization_method,\n lr_dec=learning_rate_dec,\n warmup_steps_dec=warmup_steps_dec,\n max_grad_norm=max_grad_norm,\n beta1=beta1,\n beta2=beta2,\n )\n\n optimizers = [self.optim_bert, self.optim_dec]\n\n self.amp = get_amp(fp16)\n if self.amp:\n self.model, optim = self.amp.initialize(\n self.model, optimizers, opt_level=fp16_opt_level\n )\n\n global_step = 0\n if checkpoint:\n if checkpoint[\"optimizers\"]:\n for i in range(len(optimizers)):\n model_builder.load_optimizer_checkpoint(\n optimizers[i], checkpoint[\"optimizers\"][i]\n )\n if self.amp and \"amp\" in checkpoint and checkpoint[\"amp\"]:\n self.amp.load_state_dict(checkpoint[\"amp\"])\n if \"global_step\" in checkpoint and checkpoint[\"global_step\"]:\n global_step = checkpoint[\"global_step\"] / world_size\n print(\"global_step is {}\".format(global_step))\n\n self.model = parallelize_model(\n model=self.model,\n device=device,\n num_gpus=num_gpus,\n gpu_ids=gpu_ids,\n local_rank=local_rank,\n )\n\n if local_rank == -1:\n sampler = RandomSampler(train_dataset)\n else:\n sampler = DistributedSampler(\n train_dataset, num_replicas=world_size, rank=rank\n )\n\n def collate_fn(data):\n return self.processor.collate(\n data, block_size=self.max_pos_length, device=device\n )\n\n train_dataloader = DataLoader(\n train_dataset, sampler=sampler, batch_size=batch_size, collate_fn=collate_fn\n )\n\n # compute the max number of training steps\n max_steps = compute_training_steps(\n train_dataloader,\n max_steps=max_steps,\n gradient_accumulation_steps=gradient_accumulation_steps,\n )\n\n super().fine_tune(\n train_dataloader=train_dataloader,\n get_inputs=BertSumAbsProcessor.get_inputs,\n device=device,\n num_gpus=num_gpus,\n max_steps=max_steps,\n global_step=global_step,\n max_grad_norm=max_grad_norm,\n gradient_accumulation_steps=gradient_accumulation_steps,\n verbose=verbose,\n seed=seed,\n report_every=report_every,\n save_every=save_every,\n clip_grad_norm=False,\n optimizer=optimizers,\n scheduler=None,\n fp16=fp16,\n amp=self.amp,\n validation_function=validation_function,\n )\n\n # release GPU memories\n self.model.cpu()\n torch.cuda.empty_cache()\n\n self.save_model(max_steps)\n\n def predict(\n self,\n test_dataset,\n num_gpus=None,\n gpu_ids=None,\n local_rank=-1,\n batch_size=16,\n alpha=0.6,\n beam_size=5,\n min_length=15,\n max_length=150,\n fp16=False,\n verbose=True,\n ):\n \"\"\"\n Predict the summarization for the input data iterator.\n\n Args:\n test_dataset (SummarizationDataset): Dataset for which the summary\n to be predicted.\n num_gpus (int, optional): The number of GPUs used in prediction.\n Defaults to 1.\n gpu_ids (list): List of GPU IDs to be used.\n If set to None, the first num_gpus GPUs will be used.\n Defaults to None.\n local_rank (int, optional): Local rank of the device in distributed\n inferencing. Defaults to -1, which means non-distributed inferencing.\n batch_size (int, optional): The number of test examples in each batch.\n Defaults to 16.\n alpha (float, optional): Length penalty. Defaults to 0.6.\n beam_size (int, optional): Beam size of beam search. Defaults to 5.\n min_length (int, optional): Minimum number of tokens in the output sequence.\n Defaults to 15.\n max_length (int, optional): Maximum number of tokens in output\n sequence. Defaults to 150.\n fp16 (bool, optional): Whether to use half-precision model for prediction.\n Defaults to False.\n verbose (bool, optional): Whether to print out the training log.\n Defaults to True.\n\n Returns:\n List of strings which are the summaries\n\n \"\"\"\n device, num_gpus = get_device(\n num_gpus=num_gpus, gpu_ids=gpu_ids, local_rank=local_rank\n )\n\n # move model to devices\n def this_model_move_callback(model, device):\n model = move_model_to_device(model, device)\n return parallelize_model(\n model, device, num_gpus=num_gpus, gpu_ids=gpu_ids, local_rank=local_rank\n )\n\n if fp16:\n self.model = self.model.half()\n\n self.model = move_model_to_device(self.model, device)\n self.model.eval()\n\n predictor = build_predictor(\n self.processor.tokenizer,\n self.processor.symbols,\n self.model,\n alpha=alpha,\n beam_size=beam_size,\n min_length=min_length,\n max_length=max_length,\n )\n predictor = this_model_move_callback(predictor, device)\n self.model = parallelize_model(\n self.model,\n device,\n num_gpus=num_gpus,\n gpu_ids=gpu_ids,\n local_rank=local_rank,\n )\n\n test_sampler = SequentialSampler(test_dataset)\n\n def collate_fn(data):\n return self.processor.collate(\n data, self.max_pos_length, device, train_mode=False\n )\n\n test_dataloader = DataLoader(\n test_dataset,\n sampler=test_sampler,\n batch_size=batch_size,\n collate_fn=collate_fn,\n )\n print(\"dataset length is {}\".format(len(test_dataset)))\n\n def format_summary(translation):\n \"\"\" Transforms the output of the `from_batch` function\n into nicely formatted summaries.\n \"\"\"\n raw_summary = translation\n summary = (\n raw_summary.replace(\"[unused0]\", \"\")\n .replace(\"[unused3]\", \"\")\n .replace(\"[CLS]\", \"\")\n .replace(\"[SEP]\", \"\")\n .replace(\"[PAD]\", \"\")\n .replace(\"[unused1]\", \"\")\n .replace(r\" +\", \" \")\n .replace(\" [unused2] \", \".\")\n .replace(\"[unused2]\", \"\")\n .strip()\n )\n\n return summary\n\n def generate_summary_from_tokenid(preds, pred_score):\n batch_size = preds.size()[0] # batch.batch_size\n translations = []\n for b in range(batch_size):\n if len(preds[b]) < 1:\n pred_sents = \"\"\n else:\n pred_sents = self.processor.tokenizer.convert_ids_to_tokens(\n [int(n) for n in preds[b] if int(n) != 0]\n )\n pred_sents = \" \".join(pred_sents).replace(\" ##\", \"\")\n translations.append(pred_sents)\n return translations\n\n generated_summaries = []\n\n for batch in tqdm(\n test_dataloader, desc=\"Generating summary\", disable=not verbose\n ):\n input = self.processor.get_inputs(batch, device, \"bert\", train_mode=False)\n translations, scores = predictor(**input)\n\n translations_text = generate_summary_from_tokenid(translations, scores)\n summaries = [format_summary(t) for t in translations_text]\n generated_summaries.extend(summaries)\n\n # release GPU memories\n self.model.cpu()\n torch.cuda.empty_cache()\n\n return generated_summaries\n\n def save_model(self, global_step=None, full_name=None):\n \"\"\"\n save the trained model.\n\n Args:\n global_step (int, optional): The number of steps that the model has been\n finetuned for. Defaults to None.\n full_name (str, optional): File name to save the model's `state_dict()`.\n If it's None, the model is going to be saved under \"fine_tuned\" folder\n of the cached directory of the object. Defaults to None.\n \"\"\"\n model_to_save = (\n self.model.module if hasattr(self.model, \"module\") else self.model\n ) # Take care of distributed/parallel training\n\n if full_name is None:\n output_model_dir = os.path.join(self.cache_dir, \"fine_tuned\")\n os.makedirs(self.cache_dir, exist_ok=True)\n os.makedirs(output_model_dir, exist_ok=True)\n full_name = os.path.join(output_model_dir, \"bertsumabs.pt\")\n else:\n path, filename = os.path.split(full_name)\n print(path)\n os.makedirs(path, exist_ok=True)\n\n checkpoint = {\n \"optimizers\": [self.optim_bert.state_dict(), self.optim_dec.state_dict()],\n \"model\": model_to_save.state_dict(),\n \"amp\": self.amp.state_dict() if self.amp else None,\n \"global_step\": global_step,\n \"max_pos_length\": self.max_pos_length,\n }\n\n logger.info(\"Saving model checkpoint to %s\", full_name)\n try:\n print(\"saving through pytorch to {}\".format(full_name))\n torch.save(checkpoint, full_name)\n except OSError:\n try:\n print(\"saving as pickle\")\n pickle.dump(checkpoint, open(full_name, \"wb\"))\n except Exception:\n raise\n except Exception:\n raise\n" ]
[ [ "numpy.dot", "numpy.linalg.norm" ], [ "torch.utils.data.RandomSampler", "torch.stack", "torch.save", "torch.utils.data.SequentialSampler", "torch.cuda.empty_cache", "torch.tensor", "torch.utils.data.DataLoader", "torch.ones_like", "torch.load", "torch.utils.data.distributed.DistributedSampler" ] ]
terrifyzhao/information_extract
[ "d950c25911f5a8d9f7e10eb9ecfd53f897cef8d0" ]
[ "final/train.py" ]
[ "import json\nfrom final.model import model\nfrom keras.callbacks import Callback\nimport keras.backend as K\nfrom tqdm import tqdm\nimport numpy as np\nimport os\nfrom gensim.models import Word2Vec\nimport jieba\nimport ahocorasick\n\nos.environ[\"KMP_DUPLICATE_LIB_OK\"] = \"TRUE\"\n\ntrain_model_path = 'out2/train_model.weights'\nsubject_model_path = 'out2/subject_model.weights'\nobject_model_path = 'out2/object_model.weights'\n\n# 读取数据\ntrain_data = json.load(open('data/train.json'))\ndev_data = json.load(open('data/dev.json'))[:500]\nid2predicate, predicate2id = json.load(open('data/schemas.json'))\nid2predicate = {int(i): j for i, j in id2predicate.items()}\nid2char, char2id = json.load(open('data/vocab.json'))\nnum_classes = len(id2predicate)\n\n# 词向量\nword2vec = Word2Vec.load('word2vec.model')\nid2word = {i + 1: j for i, j in enumerate(word2vec.wv.index2word)}\nword2id = {j: i for i, j in id2word.items()}\nword2vec = word2vec.wv.syn0\nword2vec = np.concatenate([np.zeros((1, word2vec.shape[1])), word2vec])\n\n# subject最大长度\nmax_s = 14\n# text最大长度\nmax_len = 140\n\ntrain_model, subject_model, object_model = model(len(char2id), max_len, len(predicate2id))\n\n\nclass SopSearch:\n def __init__(self):\n self.ac_s = ahocorasick.Automaton()\n self.ac_o = ahocorasick.Automaton()\n\n self.sop_dic = {}\n self.sop_total = {}\n for i, d in enumerate(tqdm(train_data, desc='build SOP search')):\n for s, p, o in d['spo_list']:\n self.ac_s.add_word(s, s)\n self.ac_o.add_word(o, o)\n if (s, o) not in self.sop_dic:\n self.sop_dic[(s, o)] = set()\n if (s, p, o) not in self.sop_total:\n self.sop_total[(s, p, o)] = set()\n self.sop_dic[(s, o)].add(p)\n self.sop_total[(s, p, o)].add(i)\n\n self.ac_s.make_automaton()\n self.ac_o.make_automaton()\n\n def find(self, text, i=None):\n spo = set()\n for s in self.ac_s.iter(text):\n for o in self.ac_o.iter(text):\n if (s[1], o[1]) in self.sop_dic.keys():\n for p in self.sop_dic.get((s[1], o[1])):\n if i is None:\n spo.add((s[1], p, o[1]))\n elif self.sop_total[(s[1], p, o[1])] - {i}:\n spo.add((s[1], p, o[1]))\n return list(spo)\n\n\nspo_search = SopSearch()\n\n\ndef seq_padding(X, padding=0):\n L = [len(x) for x in X]\n ML = max(L)\n return np.array([\n np.concatenate([x, [padding] * (ML - len(x))]) if len(x) < ML else x for x in X\n ])\n\n\ndef sent2vec(S):\n V = []\n for s in S:\n V.append([])\n for w in s:\n for _ in w:\n V[-1].append(word2id.get(w, 0))\n V = seq_padding(V)\n V = word2vec[V]\n return V\n\n\nclass DataGenerator:\n def __init__(self, data, batch_size=64):\n self.data = data\n self.batch_size = batch_size\n self.steps = len(self.data) // self.batch_size\n if len(self.data) % self.batch_size != 0:\n self.steps += 1\n\n def __len__(self):\n return self.steps\n\n def __iter__(self):\n while True:\n char_index, word, s_indexs, s_stars, s_ends, po_stars, po_ends, pres_s, pres_po = [], [], [], [], [], [], [], [], []\n # star = time.time()\n for d_i, d in enumerate(self.data):\n text = d['text'][:max_len]\n s_star_v, s_end_v = np.zeros(len(text)), np.zeros(len(text))\n po_star_v, po_end_v = np.zeros((len(text), num_classes)), np.zeros((len(text), num_classes))\n pre_s = np.zeros((len(text), 2))\n pre_po = np.zeros((len(text), num_classes, 2))\n for sop in d['spo_list']:\n s_index = [char2id.get(c, 1) for c in sop[0]]\n s_index = s_index[:max_s]\n s_star = text.find(sop[0])\n po_star = text.find(sop[2])\n\n if s_star != -1 and po_star != -1:\n s_end = s_star + len(sop[0]) - 1\n po_end = po_star + len(sop[2]) - 1\n p_index = predicate2id[sop[1]]\n\n s_star_v[s_star] = 1\n s_end_v[s_end] = 1\n\n po_star_v[po_star][p_index] = 1\n po_end_v[po_end][p_index] = 1\n\n for s, p, o in spo_search.find(text, d_i):\n pre_s[text.find(s), 0] = 1\n pre_s[text.find(s) + len(s) - 1, 1] = 1\n pre_po[text.find(o), predicate2id[p], 0] = 1\n pre_po[text.find(o) + len(o) - 1, predicate2id[p], 1] = 1\n\n pre_po = pre_po.reshape(len(text), -1)\n\n char_index.append([char2id.get(c, 1) for c in text])\n word.append(list(jieba.cut(text)))\n s_indexs.append(s_index)\n s_stars.append(s_star_v)\n s_ends.append(s_end_v)\n po_stars.append(po_star_v)\n po_ends.append(po_end_v)\n pres_s.append(pre_s)\n pres_po.append(pre_po)\n\n if len(char_index) == self.batch_size or d == self.data[-1]:\n s_indexs = seq_padding(s_indexs)\n word = sent2vec(word)\n word = seq_padding(word)\n char_index = seq_padding(char_index)\n s_stars = seq_padding(s_stars)\n s_ends = seq_padding(s_ends)\n po_stars = seq_padding(po_stars, np.zeros(num_classes))\n po_ends = seq_padding(po_ends, np.zeros(num_classes))\n pres_s = seq_padding(pres_s, np.zeros(2))\n pres_po = seq_padding(pres_po, np.zeros(num_classes * 2))\n\n yield [char_index, word, s_indexs, s_stars, s_ends, po_stars, po_ends, pres_s, pres_po], None\n char_index, word, s_indexs, s_stars, s_ends, po_stars, po_ends, pres_s, pres_po = [], [], [], [], [], [], [], [], []\n\n\ndef extract_items(text):\n R = []\n text = text[:max_len]\n char_index = [char2id.get(c, 1) for c in text]\n pre_po = np.zeros((len(text), num_classes, 2))\n pre_s = np.zeros((len(text), 2))\n for s, p, o in spo_search.find(text):\n pre_s[text.find(s), 0] = 1\n pre_s[text.find(s) + len(s) - 1, 1] = 1\n pre_po[text.find(o), predicate2id[p], 0] = 1\n pre_po[text.find(o) + len(o) - 1, predicate2id[p], 1] = 1\n\n word = sent2vec([list(jieba.cut(text))])\n pre_s = np.expand_dims(pre_s, 0)\n char_index = np.array([char_index])\n s_star, s_end = subject_model.predict([char_index, word, pre_s])\n\n s_star, s_end = s_star[0, :, 0], s_end[0, :, 0]\n # index\n s_star_out, s_end_out = np.where(s_star > 0.5)[0], np.where(s_end > 0.5)[0]\n # one-hot\n s_star_in, s_end_in = np.where(s_star > 0.5, 1, 0), np.where(s_end > 0.5, 1, 0)\n s_star, s_end = s_star_out, s_end_out\n subjects = []\n for i in s_star:\n j = s_end[s_end >= i]\n if len(j) > 0:\n j = j[0]\n subject = text[i: j + 1]\n subjects.append((subject, i, j))\n\n # subjects.append(('阿斯达', 1, 4))\n # subjects.append(('得到的', 2, 5))\n if subjects:\n\n s_index = []\n for subject in subjects:\n s_index.append([char2id.get(c, 1) for c in subject[0]])\n # s_index = [char2id.get(c, 1) for c in subjects[0][0]]\n # s_index = np.array([s_index])\n s_index = seq_padding(s_index)\n\n s_star_in = np.array([s_star_in])\n s_end_in = np.array([s_end_in])\n pre_po = pre_po.reshape(pre_po.shape[0], -1)\n pre_po = np.expand_dims(pre_po, 0)\n\n char_index = np.repeat(char_index, len(subjects), 0)\n word = np.repeat(word, len(subjects), 0)\n s_star_in = np.repeat(s_star_in, len(subjects), 0)\n s_end_in = np.repeat(s_end_in, len(subjects), 0)\n pre_s = np.repeat(pre_s, len(subjects), 0)\n pre_po = np.repeat(pre_po, len(subjects), 0)\n\n o1, o2 = object_model.predict([char_index, word, s_index, s_star_in, s_end_in, pre_s, pre_po])\n\n for i, subject in enumerate(subjects):\n _oo1, _oo2 = np.where(o1[i] > 0.5), np.where(o2[i] > 0.5)\n for _ooo1, _c1 in zip(*_oo1):\n for _ooo2, _c2 in zip(*_oo2):\n if _ooo1 <= _ooo2 and _c1 == _c2:\n _object = text[_ooo1: _ooo2 + 1]\n _predicate = id2predicate[_c1]\n R.append((subject[0], _predicate, _object))\n break\n zhuanji, gequ = [], []\n for s, p, o in R[:]:\n if p == u'妻子':\n R.append((o, u'丈夫', s))\n elif p == u'丈夫':\n R.append((o, u'妻子', s))\n if p == u'所属专辑':\n zhuanji.append(o)\n gequ.append(s)\n spo_list = set()\n for s, p, o in R:\n if p in [u'歌手', u'作词', u'作曲']:\n if s in zhuanji and s not in gequ:\n continue\n spo_list.add((s, p, o))\n return list(spo_list)\n else:\n return []\n\n\nclass Evaluate(Callback):\n def __init__(self):\n super().__init__()\n self.F1 = []\n self.best = 0\n self.passed = 0\n self.stage = 0\n\n def on_batch_begin(self, batch, logs=None):\n if self.passed < self.params['steps']:\n lr = (self.passed + 1) / self.params['steps'] * 1e-3\n K.set_value(self.model.optimizer.lr, lr)\n self.passed += 1\n\n def on_epoch_end(self, epoch, logs=None):\n f1, precision, recall = self.evaluate()\n self.F1.append(f1)\n if f1 >= self.best:\n self.best = f1\n train_model.save_weights(train_model_path)\n subject_model.save_weights(subject_model_path)\n object_model.save_weights(object_model_path)\n print('f1: %.4f, precision: %.4f, recall: %.4f, best f1: %.4f\\n' % (f1, precision, recall, self.best))\n if epoch + 1 == 50 or (\n self.stage == 0 and epoch > 10 and\n (f1 < 0.5 or np.argmax(self.F1) < len(self.F1) - 8)\n ):\n self.stage = 1\n train_model.load_weights(train_model_path)\n K.set_value(self.model.optimizer.lr, 1e-4)\n K.set_value(self.model.optimizer.iterations, 0)\n opt_weights = K.batch_get_value(self.model.optimizer.weights)\n opt_weights = [w * 0. for w in opt_weights]\n K.batch_set_value(zip(self.model.optimizer.weights, opt_weights))\n\n def evaluate(self):\n A, B, C = 1e-10, 1e-10, 1e-10\n\n for d in tqdm(dev_data):\n text = extract_items(d['text'])\n R = set(text)\n spo = []\n for z in d['spo_list']:\n spo.append(tuple(z))\n T = set(spo)\n A += len(R & T)\n B += len(R)\n C += len(T)\n return 2 * A / (B + C), A / B, A / C\n\n\ndef evaluate():\n A, B, C = 1e-10, 1e-10, 1e-10\n\n for d in dev_data:\n text = extract_items(d['text'])\n R = set(text)\n spo = []\n for z in d['spo_list']:\n spo.append(tuple(z))\n print('pred:', text, ' ori:', str(spo))\n T = set(spo)\n A += len(R & T)\n B += len(R)\n C += len(T)\n return 2 * A / (B + C), A / B, A / C\n\n\nif __name__ == '__main__':\n is_test = 1\n batch_size = 512\n # train_model.load_weights(train_model_path)\n if is_test:\n subject_model.load_weights(subject_model_path)\n object_model.load_weights(object_model_path)\n evaluate()\n else:\n train_ge = DataGenerator(train_data, batch_size=batch_size)\n callback = Evaluate()\n train_model.fit_generator(train_ge.__iter__(),\n steps_per_epoch=len(train_ge),\n epochs=200,\n callbacks=[callback])\n" ]
[ [ "numpy.array", "numpy.zeros", "numpy.where", "numpy.argmax", "numpy.expand_dims" ] ]
cnheider/imgaug
[ "f0a7432205975e4435c81894a63bb147af67c476" ]
[ "checks/check_elastic_transformation.py" ]
[ "from __future__ import print_function, division\nimport imgaug as ia\nfrom imgaug import augmenters as iaa\nimport imageio\nimport numpy as np\nfrom skimage import data\n\ndef main():\n image = data.astronaut()\n image = ia.imresize_single_image(image, (128, 128))\n\n #image = ia.imresize_single_image(np.tile(data.checkerboard()[:,:,np.newaxis], (1, 1, 3)), (124, 124))\n #image = ia.pad(image, top=2, right=2, bottom=2, left=2)\n\n #image = np.tile(np.linspace(0, 255, 128).astype(np.float32).reshape(1, 128), (128, 1))\n #image = np.tile(image[:, :, np.newaxis], (1, 1, 3))\n #image = ia.pad(image, top=2, right=2, bottom=2, left=2)\n #image = np.rot90(image)\n\n #image = np.zeros((126, 126+20, 3), dtype=np.uint8)\n #image[5:10, :, :] = 255\n #image = ia.pad(image, top=2, right=2, bottom=2, left=2, cval=128)\n\n #image = np.tile(np.arange(0, 100, 1).reshape(10, 10)[:,:,np.newaxis], (1, 1, 3))\n\n print(\"alpha=vary, sigma=0.25\")\n augs = [iaa.ElasticTransformation(alpha=alpha, sigma=0.25) for alpha in np.arange(0.0, 50.0, 0.1)]\n images_aug = [aug.augment_image(image) for aug in augs]\n ia.imshow(ia.draw_grid(images_aug, cols=10))\n\n print(\"alpha=vary, sigma=1.0\")\n augs = [iaa.ElasticTransformation(alpha=alpha, sigma=1.0) for alpha in np.arange(0.0, 50.0, 0.1)]\n images_aug = [aug.augment_image(image) for aug in augs]\n ia.imshow(ia.draw_grid(images_aug, cols=10))\n\n print(\"alpha=vary, sigma=3.0\")\n augs = [iaa.ElasticTransformation(alpha=alpha, sigma=3.0) for alpha in np.arange(0.0, 50.0, 0.1)]\n images_aug = [aug.augment_image(image) for aug in augs]\n ia.imshow(ia.draw_grid(images_aug, cols=10))\n\n print(\"alpha=vary, sigma=5.0\")\n augs = [iaa.ElasticTransformation(alpha=alpha, sigma=5.0) for alpha in np.arange(0.0, 50.0, 0.1)]\n images_aug = [aug.augment_image(image) for aug in augs]\n ia.imshow(ia.draw_grid(images_aug, cols=10))\n\n print(\"alpha=1.0, sigma=vary\")\n augs = [iaa.ElasticTransformation(alpha=1.0, sigma=sigma) for sigma in np.arange(0.0, 50.0, 0.1)]\n images_aug = [aug.augment_image(image) for aug in augs]\n ia.imshow(ia.draw_grid(images_aug, cols=10))\n\n print(\"alpha=10.0, sigma=vary\")\n augs = [iaa.ElasticTransformation(alpha=10.0, sigma=sigma) for sigma in np.arange(0.0, 50.0, 0.1)]\n images_aug = [aug.augment_image(image) for aug in augs]\n ia.imshow(ia.draw_grid(images_aug, cols=10))\n\n kps = ia.KeypointsOnImage(\n [ia.Keypoint(x=1, y=1),\n ia.Keypoint(x=50, y=24), ia.Keypoint(x=42, y=96), ia.Keypoint(x=88, y=106), ia.Keypoint(x=88, y=53),\n ia.Keypoint(x=0, y=0), ia.Keypoint(x=128, y=128), ia.Keypoint(x=-20, y=30), ia.Keypoint(x=20, y=-30),\n ia.Keypoint(x=-20, y=-30)],\n shape=image.shape\n )\n\n images = []\n params = [\n (0.0, 0.0),\n (0.2, 0.2),\n #(0.25, 0.25),\n (2.0, 0.25),\n #(6.0, 0.25),\n #(12.0, 0.25),\n #(0.25, 1.5),\n #(2.0, 1.50),\n #(6.0, 1.50),\n #(12.0, 1.50),\n (0.25, 3.0),\n (2.0, 3.0),\n (6.0, 3.0),\n (12.0, 3.0),\n (50.0, 5.0),\n (100.0, 5.0),\n (100.0, 10.0)\n ]\n\n for (alpha, sigma) in params:\n images_row = []\n kps_row = []\n seqs_row = [\n iaa.ElasticTransformation(alpha=alpha, sigma=sigma, mode=\"constant\", cval=0, order=0),\n iaa.ElasticTransformation(alpha=alpha, sigma=sigma, mode=\"constant\", cval=128, order=0),\n iaa.ElasticTransformation(alpha=alpha, sigma=sigma, mode=\"constant\", cval=255, order=0),\n iaa.ElasticTransformation(alpha=alpha, sigma=sigma, mode=\"constant\", cval=0, order=1),\n iaa.ElasticTransformation(alpha=alpha, sigma=sigma, mode=\"constant\", cval=128, order=1),\n iaa.ElasticTransformation(alpha=alpha, sigma=sigma, mode=\"constant\", cval=255, order=1),\n iaa.ElasticTransformation(alpha=alpha, sigma=sigma, mode=\"constant\", cval=0, order=3),\n iaa.ElasticTransformation(alpha=alpha, sigma=sigma, mode=\"constant\", cval=128, order=3),\n iaa.ElasticTransformation(alpha=alpha, sigma=sigma, mode=\"constant\", cval=255, order=3),\n iaa.ElasticTransformation(alpha=alpha, sigma=sigma, mode=\"nearest\", order=0),\n iaa.ElasticTransformation(alpha=alpha, sigma=sigma, mode=\"nearest\", order=1),\n iaa.ElasticTransformation(alpha=alpha, sigma=sigma, mode=\"nearest\", order=2),\n iaa.ElasticTransformation(alpha=alpha, sigma=sigma, mode=\"nearest\", order=3),\n iaa.ElasticTransformation(alpha=alpha, sigma=sigma, mode=\"reflect\", order=0),\n iaa.ElasticTransformation(alpha=alpha, sigma=sigma, mode=\"reflect\", order=1),\n iaa.ElasticTransformation(alpha=alpha, sigma=sigma, mode=\"reflect\", order=2),\n iaa.ElasticTransformation(alpha=alpha, sigma=sigma, mode=\"reflect\", order=3),\n iaa.ElasticTransformation(alpha=alpha, sigma=sigma, mode=\"wrap\", order=0),\n iaa.ElasticTransformation(alpha=alpha, sigma=sigma, mode=\"wrap\", order=1),\n iaa.ElasticTransformation(alpha=alpha, sigma=sigma, mode=\"wrap\", order=2),\n iaa.ElasticTransformation(alpha=alpha, sigma=sigma, mode=\"wrap\", order=3)\n ]\n\n for seq in seqs_row:\n seq_det = seq.to_deterministic()\n image_aug = seq_det.augment_image(image)\n kps_aug = seq_det.augment_keypoints([kps])[0]\n image_aug_kp = np.copy(image_aug)\n #image_aug_kp = kps.draw_on_image(image_aug_kp, color=[0, 0, 255])\n image_aug_kp = kps_aug.draw_on_image(image_aug_kp, size=3)\n images_row.append(image_aug_kp)\n #exit()\n\n images.append(np.hstack(images_row))\n\n ia.imshow(np.vstack(images))\n imageio.imwrite(\"elastic_transformations.jpg\", np.vstack(images))\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.hstack", "numpy.copy", "numpy.arange", "numpy.vstack" ] ]
danilobellini/Axelrod
[ "2c9212553e06095c24adcb82a5979279cbdf45fb" ]
[ "axelrod/tests/unit/test_eigen.py" ]
[ "\"\"\"Test for eigen.py.\"\"\"\n\nimport unittest\n\nimport numpy\nfrom axelrod.eigen import _normalise, principal_eigenvector\nfrom numpy.testing import assert_array_almost_equal\n\n\nclass FunctionCases(unittest.TestCase):\n def test_identity_matrices(self):\n for size in range(2, 6):\n mat = numpy.identity(size)\n evector, evalue = principal_eigenvector(mat)\n self.assertAlmostEqual(evalue, 1)\n assert_array_almost_equal(evector, _normalise(numpy.ones(size)))\n\n def test_2x2_matrix(self):\n mat = numpy.array([[2, 1], [1, 2]])\n evector, evalue = principal_eigenvector(mat)\n self.assertAlmostEqual(evalue, 3)\n assert_array_almost_equal(evector, numpy.dot(mat, evector) / evalue)\n assert_array_almost_equal(evector, _normalise(numpy.array([1, 1])))\n\n def test_3x3_matrix(self):\n mat = numpy.array([[1, 2, 0], [-2, 1, 2], [1, 3, 1]])\n evector, evalue = principal_eigenvector(\n mat, maximum_iterations=None, max_error=1e-10\n )\n self.assertAlmostEqual(evalue, 3)\n assert_array_almost_equal(evector, numpy.dot(mat, evector) / evalue)\n assert_array_almost_equal(evector, _normalise(numpy.array([0.5, 0.5, 1])))\n\n def test_4x4_matrix(self):\n mat = numpy.array([[2, 0, 0, 0], [1, 2, 0, 0], [0, 1, 3, 0], [0, 0, 1, 3]])\n evector, evalue = principal_eigenvector(\n mat, maximum_iterations=None, max_error=1e-10\n )\n self.assertAlmostEqual(evalue, 3, places=3)\n assert_array_almost_equal(evector, numpy.dot(mat, evector) / evalue)\n assert_array_almost_equal(\n evector, _normalise(numpy.array([0, 0, 0, 1])), decimal=4\n )\n" ]
[ [ "numpy.identity", "numpy.array", "numpy.dot", "numpy.ones" ] ]
davidsvaughn/har-pytorch
[ "334733a1e870637c9077d16fc15e0b1954a6dfc5" ]
[ "preproc.py" ]
[ "import torch\nfrom torch import nn\n\ndef add_magnitudes(x):\n G,A = x[:,:3,:], x[:,3:,:]\n \n ## add magnitude of accel and gyro...\n a = torch.norm(A, dim=1, keepdim=True)\n g = torch.norm(G, dim=1, keepdim=True)\n# x = torch.cat((G, g, A, a), 1)\n \n return torch.cat((G, g, A, a), 1)" ]
[ [ "torch.norm", "torch.cat" ] ]
TAREQMAHIN/NLP-Task-Assignment-TAREQ-
[ "4edd8ace9063a09ee38e643731badd490517187b" ]
[ "train.py" ]
[ "from keras.models import Sequential\r\nfrom keras.layers import Dense, Embedding, LSTM\r\nfrom keras.utils.np_utils import to_categorical\r\nfrom keras.preprocessing.text import Tokenizer\r\nfrom keras.preprocessing.sequence import pad_sequences\r\nfrom keras.callbacks import ModelCheckpoint, EarlyStopping\r\nimport pandas as pd\r\nimport numpy as np\r\nimport re\r\nimport pickle as pkl\r\nfrom sklearn import preprocessing\r\nimport json\r\nimport random\r\n\r\n# load the user configs\r\nwith open('config.json') as f:\r\n\tconfig = json.load(f)\r\n\r\nrandom.seed(config[\"seed\"])\r\n\r\nMAX_NB_WORDS = config[\"MAX_NB_WORDS\"]\r\nMAX_SEQUENCE_LENGTH=config[\"MAX_SEQUENCE_LENGTH\"]\r\nVALIDATION_SPLIT=config[\"VALIDATION_SPLIT\"]\r\nEMBEDDING_DIM=300\r\nLSTM_OUT=config[\"LSTM_OUT\"]\r\nBATCH_SIZE=config[\"BATCH_SIZE\"]\r\nEPOCHS=config[\"EPOCHS\"]\r\nGLOVE_EMBEDDING_PATH=config[\"GLOVE_EMBEDDING_PATH\"]\r\nVECTORIZER_PATH=config[\"VECTORIZER_PATH\"]\r\nLABEL_ENCODER_PATH=config[\"LABEL_ENCODER_PATH\"]\r\nmodel_json_file=config[\"model_json_file\"]\r\nweights=config[\"weights\"]\r\ninput_file=config[\"input_file\"]\r\n\r\ndf = pd.read_csv(input_file,sep=\",,,\",header=None ,names=['question','type'])\r\ndf['type']=df['type'].str.strip()\r\ndf['question'] = df['question'].apply(lambda x: x.lower())\r\ndf['question'] = df['question'].apply((lambda x: re.sub('[^a-zA-z0-9\\s]','',x)))\r\n\r\nNUM_CLASSES=len(df['type'].unique())\r\nprint(df['type'].value_counts())\r\n\r\ntokenizer = Tokenizer(num_words=MAX_NB_WORDS, split=' ')\r\ntokenizer.fit_on_texts(df['question'].values)\r\nX = tokenizer.texts_to_sequences(df['question'].values)\r\nX = pad_sequences(X, maxlen=MAX_SEQUENCE_LENGTH)\r\nY = df['type']\r\nwith open(VECTORIZER_PATH, 'wb') as fil:\r\n pkl.dump(tokenizer, fil)\r\n\r\nword_index = tokenizer.word_index\r\nprint('Found %s unique tokens.' % len(word_index))\r\n\r\n\r\nle = preprocessing.LabelEncoder()\r\nle.fit(Y)\r\nY=le.transform(Y)\r\nlabels = to_categorical(np.asarray(Y))\r\nwith open(LABEL_ENCODER_PATH, 'wb') as fil:\r\n pkl.dump(le, fil)\r\n\r\n# split the data into a training set and a validation set\r\nindices = np.arange(X.shape[0])\r\nnp.random.shuffle(indices)\r\nX = X[indices]\r\nlabels = labels[indices]\r\nnb_validation_samples = int(VALIDATION_SPLIT * X.shape[0])\r\n\r\nx_train = X[:-nb_validation_samples]\r\ny_train = labels[:-nb_validation_samples]\r\nx_val = X[-nb_validation_samples:]\r\ny_val = labels[-nb_validation_samples:]\r\n\r\nembeddings_index = {}\r\nf = open(GLOVE_EMBEDDING_PATH, encoding=\"utf8\")\r\nfor line in f:\r\n values = line.split()\r\n word = values[0]\r\n coefs = np.asarray(values[1:], dtype='float32')\r\n embeddings_index[word] = coefs\r\nf.close()\r\n\r\nprint('Found %s word vectors.' % len(embeddings_index))\r\n\r\nembedding_matrix = np.zeros((len(word_index) + 1, EMBEDDING_DIM))\r\nfor word, i in word_index.items():\r\n embedding_vector = embeddings_index.get(word)\r\n if embedding_vector is not None:\r\n # words not found in embedding index will be all-zeros.\r\n embedding_matrix[i] = embedding_vector\r\n\r\nembedding_layer = Embedding(len(word_index) + 1,\r\n EMBEDDING_DIM,\r\n weights=[embedding_matrix],\r\n input_length=MAX_SEQUENCE_LENGTH,\r\n trainable=False)\r\n\r\nmodel = Sequential()\r\nmodel.add(embedding_layer)\r\nmodel.add(LSTM(LSTM_OUT, dropout_U=0.25, dropout_W=0.25))\r\nmodel.add(Dense(NUM_CLASSES,activation='softmax'))\r\nmodel.compile(loss = 'categorical_crossentropy', optimizer='adam',metrics = ['accuracy'])\r\nprint(model.summary())\r\n\r\ncheckpoint = ModelCheckpoint(weights, monitor='val_acc', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=1)\r\nearly = EarlyStopping(monitor='val_acc', min_delta=0, patience=10, verbose=1, mode='auto')\r\n\r\nmodel.fit(x_train, y_train,\r\n batch_size=BATCH_SIZE,\r\n epochs=EPOCHS,\r\n validation_data=(x_val, y_val),\r\n callbacks = [checkpoint,early])\r\n\r\n# serialize model to JSON\r\nmodel_json = model.to_json()\r\nwith open(model_json_file, \"w\") as json_file:\r\n json_file.write(model_json)\r\n# serialize weights to HDF5\r\n# model.save_weights(\"model.h5\")\r\nprint(\"Saved model to disk\")\r\n" ]
[ [ "sklearn.preprocessing.LabelEncoder", "numpy.asarray", "numpy.random.shuffle", "numpy.arange", "pandas.read_csv" ] ]
Davidbeltran99/maffin
[ "d4a80ff5e2a3623f04e871a8d08a0b12e764cf9d" ]
[ "cogs/feature/analytics.py" ]
[ "from discord.ext import commands\n\nimport discord\n# import asyncio\n# import timeago\nimport datetime\nimport timedelta\nimport utils.db\nfrom utils.checks import dev\nimport seaborn as sns\nfrom matplotlib import pyplot as plt\nimport pandas as pd\nimport timeago\n\nclass analytics(commands.Cog):\n\n\tdef __init__(self, bot):\n\t\tself.bot = bot\n\t\tself.converter = commands.MemberConverter()\n\t\tself.dynamic_connection_data = {}\n\n\n\tdef validate_settings(self, settings, guild):\n\t\ttry:\n\t\t\tif settings[\"enabled\"]:\n\t\t\t\treturn True\n\t\texcept KeyError as e:\n\t\t\treturn f\"analytics, Missing field {e}\"\n\n\t\treturn True\n\n\t# moved, disconnected, connected\n\t# @commands.Cog.listener()\n\t# async def on_ready(self):\n\t# \tawait self.crawl()\n\n\n\t# async def on_reload(self):\n\t# \tawait self.crawl()\n\n\tasync def crawl(self):\n\t\tprint(\"crawlin\")\n\t\tnow = datetime.datetime.now()\n\t\tfor g in self.bot.guilds:\n\t\t\tfor c in g.voice_channels:\n\t\t\t\tfor m in c.members:\n\t\t\t\t\tawait self.log_connection(m, now)\n\n\[email protected]()\n\tasync def on_voice_state_update(self, member, before, after):\n\t\tsettings = await self.bot.cogs[\"Settings\"].get(member.guild.id, \"analytics\")\n\t\tif not settings['enabled']:\n\t\t\treturn\n\n\t\taction = voice_state_diff(before, after)\n\n\t\tnow = datetime.datetime.now()\n\t\tif action == \"connected\":\n\t\t\tawait self.log_connection(member, now)\n\t\telif action == \"disconnected\":\n\t\t\tawait self.log_disconnection(member, before.channel, now)\n\t\telif action == \"move\":\n\t\t\tawait self.log_disconnection(member, before.channel, now)\n\t\t\tawait self.log_connection(member, now)\n\n\tasync def log_connection(self, member, now):\n\t\ttry:\n\t\t\tself.dynamic_connection_data[member.guild.id][member.id] = now\n\t\texcept KeyError:\n\t\t\tself.dynamic_connection_data[member.guild.id] = {}\n\t\t\tself.dynamic_connection_data[member.guild.id][member.id] = now\n\n\tasync def log_disconnection(self, member, channel, now):\n\t\ttry:\n\t\t\tstart = self.dynamic_connection_data[member.guild.id][member.id]\n\t\t\tdel self.dynamic_connection_data[member.guild.id][member.id]\n\t\texcept KeyError:\n\t\t\treturn\n\n\t\tconnection_time = now - start\n\n\t\td = {\n\t\t\t\"member_id\": member.id,\n\t\t\t\"voice_channel id\": channel.id,\n\t\t\t\"guild_id\": member.guild.id,\n\t\t\t\"length_mins\": connection_time.total_seconds() / 60,\n\t\t\t\"datetime\": now\n\t\t}\n\n\t\tawait utils.db.insertOne(f\"analytics.voice.a{member.guild.id}\", d)\n\n\n\n\n\n\t@dev()\n\[email protected]()\n\tasync def pull_text_data_from_api(self, ctx, n = 5):\n\t\ttoday = datetime.datetime.now()\n\t\tlast_day = today - timedelta.Timedelta(days=n)\n\n\t\tthis_marker = {\n\t\t\t\"from\":today,\n\t\t\t\"to\":last_day,\n\t\t\t\"id\":ctx.message.id,\n\t\t}\n\n\t\tquery_periods = [this_marker]\n\n\n\t\tquery = {\n\t\t\t\"from\": {\n\t\t\t\t\"$gte\": last_day\n\t\t\t\t}\n\t\t}\t\t\n\n\t\tcache_markers = await utils.db.find(f\"analytics.text.a{ctx.guild.id}.cache_markers\", query)\n\n\n\t\tfor marker in cache_markers:\n\t\t\tquery_periods = insert_marker(marker,query_periods)\n\t\t\n\t\tdef to_string(m):\n\t\t\tnow = datetime.datetime.now()\n\t\t\treturn f\"From {(now - m['from']).days} days go, To : {(now - m['to']).days} days go\"\n\n\n\t\ttotal_query_days = 0\n\t\tfor period in query_periods:\n\t\t\ttotal_query_days += (period[\"from\"] - period[\"to\"]).days\n\n\t\tnewline = '\\n'\n\n\t\tawait ctx.send( f\"Periods to query: {newline}{newline.join([to_string(p) for p in query_periods ])}\" + '\\n' + f\"total: {total_query_days} days\")\n\n\n\t\tcount = 0\n\t\tfor period in query_periods:\n\t\t\tfor channel in ctx.guild.text_channels:\n\t\t\t\ttry:\n\t\t\t\t\tasync for message in channel.history(limit=None,after=period[\"to\"],before=period[\"from\"]):\n\t\t\t\t\t\tawait self._log_message(message)\n\t\t\t\t\t\tcount += 1\n\t\t\t\texcept Exception as e:\n\t\t\t\t\tprint(e)\n\n\t\tfor marker in cache_markers:\n\t\t\tawait utils.db.deleteOne(f\"analytics.text.a{ctx.guild.id}.cache_markers\",marker)\n\t\t\n\t\tawait utils.db.insertOne(f\"analytics.text.a{ctx.guild.id}.cache_markers\",this_marker)\n\n\t\tawait ctx.send(f\"Cached {count} messages :thumbsup:\")\n\n\n\t@dev() # temp!\n\[email protected]()\n\tasync def insert_test_marker(self, ctx, n_from:int, n_to:int):\n\t\tfom = datetime.datetime.now() - timedelta.Timedelta(days= n_from)\n\t\tto = datetime.datetime.now() - timedelta.Timedelta(days= n_to)\n\t\t\n\n\t\tawait utils.db.insertOne(f\"analytics.text.a{ctx.guild.id}.cache_markers\", {\"to\":to,\"from\":fom})\n\n\n\n\tasync def _log_message(self,message):\n\n\t\tsettings = await self.bot.cogs[\"Settings\"].get(message.guild.id, \"analytics\")\n\t\tif not settings['enabled']:\n\t\t\treturn\n\n\n\t\tdata = {\n\t\t\t\"member_id\": message.author.id,\n\t\t\t\"text_channel_id\": message.channel.id,\n\t\t\t\"guild_id\": message.guild.id,\n\t\t\t\"length\": len(message.content),\n\t\t\t\"datetime\": message.created_at,\n\t\t\t\"qj\": False,\n\t\t\t\"qn\": False,\n\t\t\t\"message_id\": message.id\n\t\t}\n\n\t\tif message.content == \"!qj\":\n\t\t\tdata[\"qj\"] = True\n\t\telif message.content == \"!qn\":\n\t\t\tdata[\"qn\"] = True\n\n\t\tawait utils.db.updateOne(f\"analytics.text.a{message.guild.id}\",{\"message_id\":message.id}, {\"$set\":data})\n\n\[email protected]()\n\tasync def on_message(self, message):\n\t\tawait self._log_message(message)\n\n\n\n\t\n\[email protected](aliases=['g'])\n\tasync def info(self, ctx, *args):\n\n\t\tn, member = await self.parse_params(ctx, args)\n\n\t\tnow = datetime.datetime.now()\n\n\t\tquery = {\n\t\t\t\"datetime\": {\n\t\t\t\t\"$lt\": now,\n\t\t\t\t\"$gte\": now - timedelta.Timedelta(days=n)\n\t\t\t},\n\t\t\t\"member_id\": member.id,\n\t\t\t\"guild_id\": member.guild.id\n\t\t}\n\n\t\ttext_data = await utils.db.find(f\"analytics.text.a{member.guild.id}\", query)\n\t\tvoice_data = await utils.db.find(f\"analytics.voice.a{member.guild.id}\", query)\n\n\t\tdays = [now.date()]\n\n\t\tfor i in range(1, n + 1):\n\t\t\tdays.append((now - timedelta.Timedelta(days=i)).date())\n\n\t\tdays = list(reversed(days))\n\n\t\tcolumns = ['#Messages', '#Characters', \"#qj\", \"#qn\", \"#VcMins\"]\n\t\tdf = pd.DataFrame(index=days)\n\t\tfor c in columns:\n\t\t\tdf[c] = 0\n\n\t\tfor datapoint in voice_data:\n\t\t\tday = datapoint['datetime'].date()\n\t\t\tdf[\"#VcMins\"][day] += datapoint[\"length_mins\"]\n\n\t\tfor datapoint in text_data:\n\t\t\tday = datapoint['datetime'].date()\n\n\t\t\tdf['#Messages'][day] += 1\n\n\t\t\tdf['#Characters'][day] += datapoint['length']\n\n\t\t\tif datapoint['qn']:\n\t\t\t\tdf['#qn'][day] += 1\n\n\t\t\telif datapoint['qj']:\n\t\t\t\tdf['#qj'][day] += 1\n\n\t\tbar_plot(df[[\"#Characters\", \"#VcMins\"]], \"g1.png\", palette=\"hls\")\n\t\tbar_plot(df[[\"#qn\", \"#qj\"]], \"g2.png\", palette=\"Set2\")\n\n\t\twith open('g1.png', 'rb') as f:\n\t\t\twith open('g2.png', 'rb') as f2:\n\t\t\t\tfs = [discord.File(f, filename='Activity Graph.png'),\n\t\t\t\t\t discord.File(f2, filename='QueueBot Usage Graph.png')]\n\n\t\t\t\tawait ctx.send(\"User data for \" + member.display_name + \":\", files=fs)\n\n\tasync def parse_params(self, ctx, args):\n\t\tn = None\n\t\tmember = None\n\t\tfor a in args:\n\t\t\tif member == None:\n\t\t\t\ttry:\n\t\t\t\t\tmember = await self.converter.convert(ctx, a)\n\t\t\t\t\tcontinue\n\t\t\t\texcept discord.ext.commands.errors.BadArgument as e:\n\t\t\t\t\tpass\n\n\t\t\tif n == None:\n\t\t\t\ttry:\n\t\t\t\t\tn = int(a)\n\t\t\t\t\tif len(str(n)) > 13:\n\t\t\t\t\t\tn = None\n\t\t\t\texcept (ValueError, OverflowError):\n\t\t\t\t\tpass\n\n\t\tif n == None:\n\t\t\tn = 30\n\t\tif member == None:\n\t\t\tmember = ctx.author\n\n\t\treturn n, member\n\n\n# def insert_marker(marker,periods):\n#\n# \tdef es_antes(d1,d2):\n# \t\treturn d1 > d2\n#\n#\n# \tfor i in range(len(periods)):\n# \t\tperiod = periods[i]\n#\n# \t\tif es_antes(period[\"from\"],marker[\"from\"]) and es_antes(marker[\"to\"],period[\"to\"]):\n# \t\t\t\tperiods.insert(i+1,{\"from\":marker[\"to\"], \"to\": period[\"to\"] })\n# \t\t\t\tperiods[i][\"to\"] = marker[\"from\"]\n# \t\t\t\treturn periods\n#\n# \t\telif es_antes(period[\"from\"],marker[\"from\"]) and es_antes(period[\"to\"],marker[\"to\"]):\n# \t\t\tcontinue\n#\n#\n# \t\telse:\n# \t\t\tprint(\"DAFUQ?! analytics.py:insert_marker\")\n#\n\t\t\t\t\t\n\ndef bar_plot(df, name, palette=None):\n\tplt.close(\"all\")\n\tplt.clf()\n\tif palette != None:\n\t\tsns.set_palette(palette)\n\tplot = df.plot.bar(subplots=True)\n\n\t# plot.set(xticks=days)\n\tplt.xticks(rotation=90)\n\tfor ax in plot:\n\t\tax.set_title(\"\")\n\tfig = plot[0].get_figure()\n\n\tfig.savefig(name)\n\n\ndef voice_state_diff(before, after):\n\tif before.channel != after.channel:\n\t\tif before.self_mute and not after.self_mute:\n\t\t\treturn \"unmute\"\n\t\telif not before.self_mute and after.self_mute:\n\t\t\treturn \"mute\"\n\t\telif before.channel == None or before.afk:\n\t\t\treturn f\"connected\"\n\t\telif after.channel == None:\n\t\t\treturn f\"disconnected\"\n\t\telse:\n\t\t\treturn f\"moved\"\n\n\n\ndef make_naive(dt):\n\tc = dt\n\tres = c.astimezone()\n\tres.replace(tzinfo=None)\n\treturn res\n\n\ndef setup(bot):\n\tbot.add_cog(analytics(bot))\n" ]
[ [ "matplotlib.pyplot.clf", "matplotlib.pyplot.close", "matplotlib.pyplot.xticks", "pandas.DataFrame" ] ]
Varun0801/ga-learner-dsb-repo
[ "45ce5ae273ca66a2443d4b9417e74ef0f872d8ec" ]
[ "Probability-Basics-/code.py" ]
[ "# --------------\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n# code starts here\ndf = pd.read_csv(path)\n#print(df.info())\np_a = ((df['fico'] > 700).sum())/len(df)\nprint(p_a)\np_b = ((df['purpose'] == 'debt_consolidation').sum())/len(df)\nprint(p_b)\ndf1 = df[df['purpose']== 'debt_consolidation']\np_a_b = df1[df1['fico'].astype(float) >700].shape[0]/df1.shape[0]\nprint(p_a_b)\nresult = p_a_b == p_a\nprint(result)\n# code ends here\n\n\n# --------------\n# code starts here\nprob_lp = (df['paid.back.loan'] == 'Yes').sum()/len(df)\nprint(prob_lp)\nprob_cs = (df['credit.policy'] == 'Yes').sum()/len(df)\nprint(prob_cs)\nnew_df = df[df['paid.back.loan'] == 'Yes']\nprob_pd_cs = (new_df['credit.policy'] == 'Yes').sum()/len(new_df)\nprint(prob_pd_cs)\nbayes = (prob_pd_cs*prob_lp)/prob_cs\nprint(bayes)\n# code ends here\n\n\n# --------------\n# code starts here\nplt.bar(df['purpose'],df['purpose'].index)\ndf1 = df[df['paid.back.loan'] == 'No']\ndf1\nplt.bar(df1['purpose'],df1['purpose'].index)\n# code ends here\n\n\n# --------------\n# code starts here\ninst_median = df['installment'].median()\nprint(inst_median)\ninst_mean = df['installment'].mean()\nprint(inst_mean)\nplt.hist(df['installment'])\nplt.hist(df['log.annual.inc'])\n# code ends here\n\n\n" ]
[ [ "matplotlib.pyplot.hist", "pandas.read_csv", "matplotlib.pyplot.bar" ] ]
timothyleslie/CodeBERT
[ "6767a3b7076f5b481cc228d2032c21327269c5b8" ]
[ "CodeBERT/code2nl/run.py" ]
[ "# coding=utf-8\r\n# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.\r\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\"\"\"\r\nFine-tuning the library models for language modeling on a text file (GPT, GPT-2, BERT, RoBERTa).\r\nGPT and GPT-2 are fine-tuned using a causal language modeling (CLM) loss while BERT and RoBERTa are fine-tuned\r\nusing a masked language modeling (MLM) loss.\r\n\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nimport os\r\nimport sys\r\nimport bleu\r\nimport pickle\r\nimport torch\r\nimport json\r\nimport random\r\nimport logging\r\nimport argparse\r\nimport numpy as np\r\nfrom io import open\r\nfrom itertools import cycle\r\nimport torch.nn as nn\r\nfrom model import Seq2Seq\r\nfrom tqdm import tqdm, trange\r\nfrom torch.utils.data import DataLoader, Dataset, SequentialSampler, RandomSampler,TensorDataset\r\nfrom torch.utils.data.distributed import DistributedSampler\r\nfrom transformers import (WEIGHTS_NAME, AdamW, get_linear_schedule_with_warmup,\r\n RobertaConfig, RobertaModel, RobertaTokenizer)\r\nMODEL_CLASSES = {'roberta': (RobertaConfig, RobertaModel, RobertaTokenizer)}\r\n\r\nlogging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',\r\n datefmt = '%m/%d/%Y %H:%M:%S',\r\n level = logging.INFO)\r\nlogger = logging.getLogger(__name__)\r\n\r\nclass Example(object):\r\n \"\"\"A single training/test example.\"\"\"\r\n def __init__(self,\r\n idx,\r\n source,\r\n target,\r\n ):\r\n self.idx = idx\r\n self.source = source\r\n self.target = target\r\n\r\ndef read_examples(filename):\r\n \"\"\"Read examples from filename.\"\"\"\r\n examples=[]\r\n with open(filename,encoding=\"utf-8\") as f:\r\n for idx, line in enumerate(f):\r\n line=line.strip()\r\n js=json.loads(line)\r\n if 'idx' not in js:\r\n js['idx']=idx\r\n code=' '.join(js['code_tokens']).replace('\\n',' ')\r\n code=' '.join(code.strip().split())\r\n nl=' '.join(js['docstring_tokens']).replace('\\n','')\r\n nl=' '.join(nl.strip().split()) \r\n examples.append(\r\n Example(\r\n idx = idx,\r\n source=code,\r\n target = nl,\r\n ) \r\n )\r\n return examples\r\n\r\n\r\nclass InputFeatures(object):\r\n \"\"\"A single training/test features for a example.\"\"\"\r\n def __init__(self,\r\n example_id,\r\n source_ids,\r\n target_ids,\r\n source_mask,\r\n target_mask,\r\n\r\n ):\r\n self.example_id = example_id\r\n self.source_ids = source_ids\r\n self.target_ids = target_ids\r\n self.source_mask = source_mask\r\n self.target_mask = target_mask \r\n \r\n\r\n\r\ndef convert_examples_to_features(examples, tokenizer, args,stage=None):\r\n features = []\r\n for example_index, example in enumerate(examples):\r\n #source\r\n source_tokens = tokenizer.tokenize(example.source)[:args.max_source_length-2]\r\n source_tokens =[tokenizer.cls_token]+source_tokens+[tokenizer.sep_token]\r\n source_ids = tokenizer.convert_tokens_to_ids(source_tokens) \r\n source_mask = [1] * (len(source_tokens))\r\n padding_length = args.max_source_length - len(source_ids)\r\n source_ids+=[tokenizer.pad_token_id]*padding_length\r\n source_mask+=[0]*padding_length\r\n \r\n #target\r\n if stage==\"test\":\r\n target_tokens = tokenizer.tokenize(\"None\")\r\n else:\r\n target_tokens = tokenizer.tokenize(example.target)[:args.max_target_length-2]\r\n target_tokens = [tokenizer.cls_token]+target_tokens+[tokenizer.sep_token] \r\n target_ids = tokenizer.convert_tokens_to_ids(target_tokens)\r\n target_mask = [1] *len(target_ids)\r\n padding_length = args.max_target_length - len(target_ids)\r\n target_ids+=[tokenizer.pad_token_id]*padding_length\r\n target_mask+=[0]*padding_length \r\n \r\n if example_index < 5:\r\n if stage=='train':\r\n logger.info(\"*** Example ***\")\r\n logger.info(\"idx: {}\".format(example.idx))\r\n\r\n logger.info(\"source_tokens: {}\".format([x.replace('\\u0120','_') for x in source_tokens]))\r\n logger.info(\"source_ids: {}\".format(' '.join(map(str, source_ids))))\r\n logger.info(\"source_mask: {}\".format(' '.join(map(str, source_mask))))\r\n \r\n logger.info(\"target_tokens: {}\".format([x.replace('\\u0120','_') for x in target_tokens]))\r\n logger.info(\"target_ids: {}\".format(' '.join(map(str, target_ids))))\r\n logger.info(\"target_mask: {}\".format(' '.join(map(str, target_mask))))\r\n \r\n features.append(\r\n InputFeatures(\r\n example_index,\r\n source_ids,\r\n target_ids,\r\n source_mask,\r\n target_mask,\r\n )\r\n )\r\n return features\r\n\r\n\r\n\r\ndef set_seed(args):\r\n \"\"\"set random seed.\"\"\"\r\n random.seed(args.seed)\r\n np.random.seed(args.seed)\r\n torch.manual_seed(args.seed)\r\n if args.n_gpu > 0:\r\n torch.cuda.manual_seed_all(args.seed)\r\n \r\ndef main():\r\n parser = argparse.ArgumentParser()\r\n\r\n ## Required parameters \r\n parser.add_argument(\"--model_type\", default=None, type=str, required=True,\r\n help=\"Model type: e.g. roberta\")\r\n parser.add_argument(\"--model_name_or_path\", default=None, type=str, required=True,\r\n help=\"Path to pre-trained model: e.g. roberta-base\" ) \r\n parser.add_argument(\"--output_dir\", default=None, type=str, required=True,\r\n help=\"The output directory where the model predictions and checkpoints will be written.\")\r\n parser.add_argument(\"--load_model_path\", default=None, type=str, \r\n help=\"Path to trained model: Should contain the .bin files\" ) \r\n ## Other parameters\r\n parser.add_argument(\"--train_filename\", default=None, type=str, \r\n help=\"The train filename. Should contain the .jsonl files for this task.\")\r\n parser.add_argument(\"--dev_filename\", default=None, type=str, \r\n help=\"The dev filename. Should contain the .jsonl files for this task.\")\r\n parser.add_argument(\"--test_filename\", default=None, type=str, \r\n help=\"The test filename. Should contain the .jsonl files for this task.\") \r\n \r\n parser.add_argument(\"--config_name\", default=\"\", type=str,\r\n help=\"Pretrained config name or path if not the same as model_name\")\r\n parser.add_argument(\"--tokenizer_name\", default=\"\", type=str,\r\n help=\"Pretrained tokenizer name or path if not the same as model_name\") \r\n parser.add_argument(\"--max_source_length\", default=64, type=int,\r\n help=\"The maximum total source sequence length after tokenization. Sequences longer \"\r\n \"than this will be truncated, sequences shorter will be padded.\")\r\n parser.add_argument(\"--max_target_length\", default=32, type=int,\r\n help=\"The maximum total target sequence length after tokenization. Sequences longer \"\r\n \"than this will be truncated, sequences shorter will be padded.\")\r\n \r\n parser.add_argument(\"--do_train\", action='store_true',\r\n help=\"Whether to run training.\")\r\n parser.add_argument(\"--do_eval\", action='store_true',\r\n help=\"Whether to run eval on the dev set.\")\r\n parser.add_argument(\"--do_test\", action='store_true',\r\n help=\"Whether to run eval on the dev set.\")\r\n parser.add_argument(\"--do_lower_case\", action='store_true',\r\n help=\"Set this flag if you are using an uncased model.\")\r\n parser.add_argument(\"--no_cuda\", action='store_true',\r\n help=\"Avoid using CUDA when available\") \r\n \r\n parser.add_argument(\"--train_batch_size\", default=8, type=int,\r\n help=\"Batch size per GPU/CPU for training.\")\r\n parser.add_argument(\"--eval_batch_size\", default=8, type=int,\r\n help=\"Batch size per GPU/CPU for evaluation.\")\r\n parser.add_argument('--gradient_accumulation_steps', type=int, default=1,\r\n help=\"Number of updates steps to accumulate before performing a backward/update pass.\")\r\n parser.add_argument(\"--learning_rate\", default=5e-5, type=float,\r\n help=\"The initial learning rate for Adam.\")\r\n parser.add_argument(\"--beam_size\", default=10, type=int,\r\n help=\"beam size for beam search\") \r\n parser.add_argument(\"--weight_decay\", default=0.0, type=float,\r\n help=\"Weight deay if we apply some.\")\r\n parser.add_argument(\"--adam_epsilon\", default=1e-8, type=float,\r\n help=\"Epsilon for Adam optimizer.\")\r\n parser.add_argument(\"--max_grad_norm\", default=1.0, type=float,\r\n help=\"Max gradient norm.\")\r\n parser.add_argument(\"--num_train_epochs\", default=3.0, type=float,\r\n help=\"Total number of training epochs to perform.\")\r\n parser.add_argument(\"--max_steps\", default=-1, type=int,\r\n help=\"If > 0: set total number of training steps to perform. Override num_train_epochs.\")\r\n parser.add_argument(\"--eval_steps\", default=-1, type=int,\r\n help=\"\")\r\n parser.add_argument(\"--train_steps\", default=-1, type=int,\r\n help=\"\")\r\n parser.add_argument(\"--warmup_steps\", default=0, type=int,\r\n help=\"Linear warmup over warmup_steps.\")\r\n parser.add_argument(\"--local_rank\", type=int, default=-1,\r\n help=\"For distributed training: local_rank\") \r\n parser.add_argument('--seed', type=int, default=42,\r\n help=\"random seed for initialization\")\r\n # print arguments\r\n args = parser.parse_args()\r\n logger.info(args)\r\n\r\n # Setup CUDA, GPU & distributed training\r\n if args.local_rank == -1 or args.no_cuda:\r\n device = torch.device(\"cuda\" if torch.cuda.is_available() and not args.no_cuda else \"cpu\")\r\n args.n_gpu = torch.cuda.device_count()\r\n else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs\r\n torch.cuda.set_device(args.local_rank)\r\n device = torch.device(\"cuda\", args.local_rank)\r\n torch.distributed.init_process_group(backend='nccl')\r\n args.n_gpu = 1\r\n logger.warning(\"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s\",\r\n args.local_rank, device, args.n_gpu, bool(args.local_rank != -1))\r\n args.device = device\r\n # Set seed\r\n set_seed(args)\r\n # make dir if output_dir not exist\r\n if os.path.exists(args.output_dir) is False:\r\n os.makedirs(args.output_dir)\r\n \r\n config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]\r\n config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path)\r\n tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,do_lower_case=args.do_lower_case)\r\n \r\n #budild model\r\n encoder = model_class.from_pretrained(args.model_name_or_path,config=config) \r\n decoder_layer = nn.TransformerDecoderLayer(d_model=config.hidden_size, nhead=config.num_attention_heads)\r\n decoder = nn.TransformerDecoder(decoder_layer, num_layers=6)\r\n model=Seq2Seq(encoder=encoder,decoder=decoder,config=config,\r\n beam_size=args.beam_size,max_length=args.max_target_length,\r\n sos_id=tokenizer.cls_token_id,eos_id=tokenizer.sep_token_id)\r\n if args.load_model_path is not None:\r\n logger.info(\"reload model from {}\".format(args.load_model_path))\r\n model.load_state_dict(torch.load(args.load_model_path))\r\n \r\n model.to(device)\r\n if args.local_rank != -1:\r\n # Distributed training\r\n try:\r\n from apex.parallel import DistributedDataParallel as DDP\r\n except ImportError:\r\n raise ImportError(\"Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.\")\r\n\r\n model = DDP(model)\r\n elif args.n_gpu > 1:\r\n # multi-gpu training\r\n model = torch.nn.DataParallel(model)\r\n\r\n\r\n\r\n\r\n if args.do_train:\r\n # Prepare training data loader\r\n train_examples = read_examples(args.train_filename)\r\n train_features = convert_examples_to_features(train_examples, tokenizer,args,stage='train')\r\n all_source_ids = torch.tensor([f.source_ids for f in train_features], dtype=torch.long)\r\n all_source_mask = torch.tensor([f.source_mask for f in train_features], dtype=torch.long)\r\n all_target_ids = torch.tensor([f.target_ids for f in train_features], dtype=torch.long)\r\n all_target_mask = torch.tensor([f.target_mask for f in train_features], dtype=torch.long) \r\n train_data = TensorDataset(all_source_ids,all_source_mask,all_target_ids,all_target_mask)\r\n \r\n if args.local_rank == -1:\r\n train_sampler = RandomSampler(train_data)\r\n else:\r\n train_sampler = DistributedSampler(train_data)\r\n train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size//args.gradient_accumulation_steps)\r\n\r\n num_train_optimization_steps = args.train_steps\r\n\r\n # Prepare optimizer and schedule (linear warmup and decay)\r\n no_decay = ['bias', 'LayerNorm.weight']\r\n optimizer_grouped_parameters = [\r\n {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],\r\n 'weight_decay': args.weight_decay},\r\n {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}\r\n ]\r\n optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)\r\n scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps,\r\n num_training_steps=num_train_optimization_steps)\r\n \r\n \r\n #Start training\r\n logger.info(\"***** Running training *****\")\r\n logger.info(\" Num examples = %d\", len(train_examples))\r\n logger.info(\" Batch size = %d\", args.train_batch_size)\r\n logger.info(\" Num epoch = %d\", num_train_optimization_steps*args.train_batch_size//len(train_examples))\r\n \r\n\r\n model.train()\r\n dev_dataset={}\r\n nb_tr_examples, nb_tr_steps,tr_loss,global_step,best_bleu,best_loss = 0, 0,0,0,0,1e6 \r\n bar = tqdm(range(num_train_optimization_steps),total=num_train_optimization_steps)\r\n train_dataloader=cycle(train_dataloader)\r\n eval_flag = True\r\n for step in bar:\r\n batch = next(train_dataloader)\r\n batch = tuple(t.to(device) for t in batch)\r\n source_ids,source_mask,target_ids,target_mask = batch\r\n loss,_,_ = model(source_ids=source_ids,source_mask=source_mask,target_ids=target_ids,target_mask=target_mask)\r\n \r\n if args.n_gpu > 1:\r\n loss = loss.mean() # mean() to average on multi-gpu.\r\n if args.gradient_accumulation_steps > 1:\r\n loss = loss / args.gradient_accumulation_steps\r\n tr_loss += loss.item()\r\n train_loss=round(tr_loss*args.gradient_accumulation_steps/(nb_tr_steps+1),4)\r\n bar.set_description(\"loss {}\".format(train_loss))\r\n nb_tr_examples += source_ids.size(0)\r\n nb_tr_steps += 1\r\n loss.backward()\r\n\r\n if (nb_tr_steps + 1) % args.gradient_accumulation_steps == 0:\r\n #Update parameters\r\n optimizer.step()\r\n optimizer.zero_grad()\r\n scheduler.step()\r\n global_step += 1\r\n eval_flag = True\r\n \r\n if args.do_eval and ((global_step + 1) %args.eval_steps == 0) and eval_flag:\r\n #Eval model with dev dataset\r\n tr_loss = 0\r\n nb_tr_examples, nb_tr_steps = 0, 0 \r\n eval_flag=False \r\n if 'dev_loss' in dev_dataset:\r\n eval_examples,eval_data=dev_dataset['dev_loss']\r\n else:\r\n eval_examples = read_examples(args.dev_filename)\r\n eval_features = convert_examples_to_features(eval_examples, tokenizer, args,stage='dev')\r\n all_source_ids = torch.tensor([f.source_ids for f in eval_features], dtype=torch.long)\r\n all_source_mask = torch.tensor([f.source_mask for f in eval_features], dtype=torch.long)\r\n all_target_ids = torch.tensor([f.target_ids for f in eval_features], dtype=torch.long)\r\n all_target_mask = torch.tensor([f.target_mask for f in eval_features], dtype=torch.long) \r\n eval_data = TensorDataset(all_source_ids,all_source_mask,all_target_ids,all_target_mask) \r\n dev_dataset['dev_loss']=eval_examples,eval_data\r\n eval_sampler = SequentialSampler(eval_data)\r\n eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)\r\n \r\n logger.info(\"\\n***** Running evaluation *****\")\r\n logger.info(\" Num examples = %d\", len(eval_examples))\r\n logger.info(\" Batch size = %d\", args.eval_batch_size)\r\n\r\n #Start Evaling model\r\n model.eval()\r\n eval_loss,tokens_num = 0,0\r\n for batch in eval_dataloader:\r\n batch = tuple(t.to(device) for t in batch)\r\n source_ids,source_mask,target_ids,target_mask = batch \r\n\r\n with torch.no_grad():\r\n _,loss,num = model(source_ids=source_ids,source_mask=source_mask,\r\n target_ids=target_ids,target_mask=target_mask) \r\n eval_loss += loss.sum().item()\r\n tokens_num += num.sum().item()\r\n #Pring loss of dev dataset \r\n model.train()\r\n eval_loss = eval_loss / tokens_num\r\n result = {'eval_ppl': round(np.exp(eval_loss),5),\r\n 'global_step': global_step+1,\r\n 'train_loss': round(train_loss,5)}\r\n for key in sorted(result.keys()):\r\n logger.info(\" %s = %s\", key, str(result[key]))\r\n logger.info(\" \"+\"*\"*20) \r\n \r\n #save last checkpoint\r\n last_output_dir = os.path.join(args.output_dir, 'checkpoint-last')\r\n if not os.path.exists(last_output_dir):\r\n os.makedirs(last_output_dir)\r\n model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self\r\n output_model_file = os.path.join(last_output_dir, \"pytorch_model.bin\")\r\n torch.save(model_to_save.state_dict(), output_model_file) \r\n if eval_loss<best_loss:\r\n logger.info(\" Best ppl:%s\",round(np.exp(eval_loss),5))\r\n logger.info(\" \"+\"*\"*20)\r\n best_loss=eval_loss\r\n # Save best checkpoint for best ppl\r\n output_dir = os.path.join(args.output_dir, 'checkpoint-best-ppl')\r\n if not os.path.exists(output_dir):\r\n os.makedirs(output_dir)\r\n model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self\r\n output_model_file = os.path.join(output_dir, \"pytorch_model.bin\")\r\n torch.save(model_to_save.state_dict(), output_model_file) \r\n \r\n \r\n #Calculate bleu \r\n if 'dev_bleu' in dev_dataset:\r\n eval_examples,eval_data=dev_dataset['dev_bleu']\r\n else:\r\n eval_examples = read_examples(args.dev_filename)\r\n eval_examples = random.sample(eval_examples,min(1000,len(eval_examples)))\r\n eval_features = convert_examples_to_features(eval_examples, tokenizer, args,stage='test')\r\n all_source_ids = torch.tensor([f.source_ids for f in eval_features], dtype=torch.long)\r\n all_source_mask = torch.tensor([f.source_mask for f in eval_features], dtype=torch.long) \r\n eval_data = TensorDataset(all_source_ids,all_source_mask) \r\n dev_dataset['dev_bleu']=eval_examples,eval_data\r\n\r\n\r\n \r\n eval_sampler = SequentialSampler(eval_data)\r\n eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)\r\n\r\n model.eval() \r\n p=[]\r\n for batch in eval_dataloader:\r\n batch = tuple(t.to(device) for t in batch)\r\n source_ids,source_mask= batch \r\n with torch.no_grad():\r\n preds = model(source_ids=source_ids,source_mask=source_mask) \r\n for pred in preds:\r\n t=pred[0].cpu().numpy()\r\n t=list(t)\r\n if 0 in t:\r\n t=t[:t.index(0)]\r\n text = tokenizer.decode(t,clean_up_tokenization_spaces=False)\r\n p.append(text)\r\n model.train()\r\n predictions=[]\r\n with open(os.path.join(args.output_dir,\"dev.output\"),'w') as f, open(os.path.join(args.output_dir,\"dev.gold\"),'w') as f1:\r\n for ref,gold in zip(p,eval_examples):\r\n predictions.append(str(gold.idx)+'\\t'+ref)\r\n f.write(str(gold.idx)+'\\t'+ref+'\\n')\r\n f1.write(str(gold.idx)+'\\t'+gold.target+'\\n') \r\n\r\n (goldMap, predictionMap) = bleu.computeMaps(predictions, os.path.join(args.output_dir, \"dev.gold\")) \r\n dev_bleu=round(bleu.bleuFromMaps(goldMap, predictionMap)[0],2)\r\n logger.info(\" %s = %s \"%(\"bleu-4\",str(dev_bleu)))\r\n logger.info(\" \"+\"*\"*20) \r\n if dev_bleu>best_bleu:\r\n logger.info(\" Best bleu:%s\",dev_bleu)\r\n logger.info(\" \"+\"*\"*20)\r\n best_bleu=dev_bleu\r\n # Save best checkpoint for best bleu\r\n output_dir = os.path.join(args.output_dir, 'checkpoint-best-bleu')\r\n if not os.path.exists(output_dir):\r\n os.makedirs(output_dir)\r\n model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self\r\n output_model_file = os.path.join(output_dir, \"pytorch_model.bin\")\r\n torch.save(model_to_save.state_dict(), output_model_file)\r\n \r\n if args.do_test:\r\n files=[]\r\n if args.dev_filename is not None:\r\n files.append(args.dev_filename)\r\n if args.test_filename is not None:\r\n files.append(args.test_filename)\r\n for idx,file in enumerate(files): \r\n logger.info(\"Test file: {}\".format(file))\r\n eval_examples = read_examples(file)\r\n eval_features = convert_examples_to_features(eval_examples, tokenizer, args,stage='test')\r\n all_source_ids = torch.tensor([f.source_ids for f in eval_features], dtype=torch.long)\r\n all_source_mask = torch.tensor([f.source_mask for f in eval_features], dtype=torch.long) \r\n eval_data = TensorDataset(all_source_ids,all_source_mask) \r\n\r\n # Calculate bleu\r\n eval_sampler = SequentialSampler(eval_data)\r\n eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)\r\n\r\n model.eval() \r\n p=[]\r\n for batch in tqdm(eval_dataloader,total=len(eval_dataloader)):\r\n batch = tuple(t.to(device) for t in batch)\r\n source_ids,source_mask= batch \r\n with torch.no_grad():\r\n preds = model(source_ids=source_ids,source_mask=source_mask) \r\n for pred in preds:\r\n t=pred[0].cpu().numpy()\r\n t=list(t)\r\n if 0 in t:\r\n t=t[:t.index(0)]\r\n text = tokenizer.decode(t,clean_up_tokenization_spaces=False)\r\n p.append(text)\r\n model.train()\r\n predictions=[]\r\n with open(os.path.join(args.output_dir,\"test_{}.output\".format(str(idx))),'w') as f, open(os.path.join(args.output_dir,\"test_{}.gold\".format(str(idx))),'w') as f1:\r\n for ref,gold in zip(p,eval_examples):\r\n predictions.append(str(gold.idx)+'\\t'+ref)\r\n f.write(str(gold.idx)+'\\t'+ref+'\\n')\r\n f1.write(str(gold.idx)+'\\t'+gold.target+'\\n') \r\n\r\n (goldMap, predictionMap) = bleu.computeMaps(predictions, os.path.join(args.output_dir, \"test_{}.gold\".format(idx))) \r\n dev_bleu=round(bleu.bleuFromMaps(goldMap, predictionMap)[0],2)\r\n logger.info(\" %s = %s \"%(\"bleu-4\",str(dev_bleu)))\r\n logger.info(\" \"+\"*\"*20) \r\n\r\n\r\n\r\n \r\n\r\n \r\n \r\nif __name__ == \"__main__\":\r\n main()\r\n\r\n\r\n" ]
[ [ "torch.utils.data.RandomSampler", "numpy.exp", "torch.cuda.is_available", "torch.load", "torch.nn.DataParallel", "torch.distributed.init_process_group", "torch.manual_seed", "torch.tensor", "torch.utils.data.DataLoader", "torch.device", "torch.cuda.manual_seed_all", "torch.utils.data.SequentialSampler", "torch.cuda.device_count", "torch.cuda.set_device", "torch.utils.data.TensorDataset", "torch.nn.TransformerDecoder", "torch.nn.TransformerDecoderLayer", "numpy.random.seed", "torch.no_grad", "torch.utils.data.distributed.DistributedSampler" ] ]
juluobruce/Biodiversity-dashboard
[ "158fb826b15e80da401775e7ca97d88b6e893cbb" ]
[ "Belly_Button_Biodiversity/app.py" ]
[ "import os\n\nimport pandas as pd\nimport numpy as np\n\nimport sqlalchemy\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import create_engine\n\nfrom flask import Flask, jsonify, render_template\nfrom flask_sqlalchemy import SQLAlchemy\n\napp = Flask(__name__)\n\n\n#################################################\n# Database Setup\n#################################################\n\napp.config[\"SQLALCHEMY_DATABASE_URI\"] = \"sqlite:///db/bellybutton.sqlite\"\ndb = SQLAlchemy(app)\n\n# reflect an existing database into a new model\nBase = automap_base()\n# reflect the tables\nBase.prepare(db.engine, reflect=True)\n\n# Save references to each table\nSamples_Metadata = Base.classes.sample_metadata\nSamples = Base.classes.samples\n\n\[email protected](\"/\")\ndef index():\n \"\"\"Return the homepage.\"\"\"\n return render_template(\"index.html\")\n\n\[email protected](\"/names\")\ndef names():\n \"\"\"Return a list of sample names.\"\"\"\n\n # Use Pandas to perform the sql query\n stmt = db.session.query(Samples).statement\n df = pd.read_sql_query(stmt, db.session.bind)\n\n # Return a list of the column names (sample names)\n return jsonify(list(df.columns)[2:])\n\n\[email protected](\"/ data/<sample>\")\ndef sample_metadata(sample):\n \"\"\"Return the MetaData for a given sample.\"\"\"\n sel = [\n Samples_Metadata.sample,\n Samples_Metadata.ETHNICITY,\n Samples_Metadata.GENDER,\n Samples_Metadata.AGE,\n Samples_Metadata.LOCATION,\n Samples_Metadata.BBTYPE,\n Samples_Metadata.WFREQ,\n ]\n\n results = db.session.query(*sel).filter(Samples_Metadata.sample == sample).all()\n\n # Create a dictionary entry for each row of metadata information\n sample_metadata = {}\n for result in results:\n sample_metadata[\"sample\"] = result[0]\n sample_metadata[\"ETHNICITY\"] = result[1]\n sample_metadata[\"GENDER\"] = result[2]\n sample_metadata[\"AGE\"] = result[3]\n sample_metadata[\"LOCATION\"] = result[4]\n sample_metadata[\"BBTYPE\"] = result[5]\n sample_metadata[\"WFREQ\"] = result[6]\n\n print(sample_metadata)\n return jsonify(sample_metadata)\n\n\[email protected](\"/samples/<sample>\")\ndef samples(sample):\n \"\"\"Return `otu_ids`, `otu_labels`,and `sample_values`.\"\"\"\n stmt = db.session.query(Samples).statement\n df = pd.read_sql_query(stmt, db.session.bind)\n\n # Filter the data based on the sample number and\n # only keep rows with values above 1\n sample_data = df.loc[df[sample] > 1, [\"otu_id\", \"otu_label\", sample]]\n # Format the data to send as json\n data = {\n \"otu_ids\": sample_data.otu_id.values.tolist(),\n \"sample_values\": sample_data[sample].values.tolist(),\n \"otu_labels\": sample_data.otu_label.tolist(),\n }\n return jsonify(data)\n\n\nif __name__ == \"__main__\":\n app.run()\n" ]
[ [ "pandas.read_sql_query" ] ]
TeamEpochGithub/Jigsaw
[ "0b2f65681e585de4bcb0913f01f10c92358ca376" ]
[ "src/training/train.py" ]
[ "import gc\nimport time\nimport copy\nimport os\n\nimport numpy as np\n\nfrom collections import defaultdict\n\nimport torch\nfrom torch.optim import lr_scheduler\nfrom torch.utils.data import DataLoader\nimport torch.nn as nn\n\nfrom tqdm import tqdm\n\nfrom data_loader import JigsawDataset\n\n# For colored terminal text\nfrom colorama import Fore, Back, Style\n\nb_ = Fore.BLUE\ny_ = Fore.YELLOW\nsr_ = Style.RESET_ALL\n\n# Suppress warnings\nimport warnings\n\n\nclass JigsawTrainer:\n \"\"\"\n A class that holds training-specific data (model, config, wandb, dataset)\n to streamline trainig.\n \"\"\"\n\n def __init__(self, model, config, wandb, run, df):\n self.model = model\n self.config = config\n self.wandb = wandb\n self.device = config[\"device\"]\n self.run = run\n self.df = df\n\n # For descriptive error messages\n warnings.filterwarnings(\"ignore\")\n\n os.environ[\"CUDA_LAUNCH_BLOCKING\"] = \"1\"\n\n @torch.no_grad()\n def valid_one_epoch(self, dataloader, epoch, optimizer):\n \"\"\"\n Evaluate the current model + epoch\n :param dataloader:\n :param epoch: The current epoch\n :param optimizer:\n \"\"\"\n # Set model to eval mode\n self.model.eval()\n\n dataset_size = 0\n running_loss = 0.0\n epoch_loss = 0\n\n # Main eval loop\n bar = tqdm(enumerate(dataloader), total=len(dataloader))\n for _, data in bar:\n # Send relevant data to processing device\n more_toxic_ids = data[\"more_toxic_ids\"].to(self.device, dtype=torch.long)\n more_toxic_mask = data[\"more_toxic_mask\"].to(self.device, dtype=torch.long)\n less_toxic_ids = data[\"less_toxic_ids\"].to(self.device, dtype=torch.long)\n less_toxic_mask = data[\"less_toxic_mask\"].to(self.device, dtype=torch.long)\n targets = data[\"target\"].to(self.device, dtype=torch.long)\n\n batch_size = more_toxic_ids.size(0)\n\n # Get result of more/less toxic input\n more_toxic_outputs = self.model(more_toxic_ids, more_toxic_mask)\n less_toxic_outputs = self.model(less_toxic_ids, less_toxic_mask)\n\n # Get loss from output + targets\n loss = self.criterion(more_toxic_outputs, less_toxic_outputs, targets)\n\n # Add loss to total loss, weighted by batch size\n running_loss += loss.item() * batch_size\n dataset_size += batch_size\n\n epoch_loss = running_loss / dataset_size\n\n bar.set_postfix(\n Epoch=epoch, Valid_Loss=epoch_loss, LR=optimizer.param_groups[0][\"lr\"]\n )\n\n # Force garbage collection\n gc.collect()\n\n return epoch_loss\n\n def train_one_epoch(self, optimizer, scheduler, dataloader, epoch):\n\n # Set model to training mode\n self.model.train()\n\n dataset_size = 0\n running_loss = 0.0\n epoch_loss = 0\n\n # Main training loop\n bar = tqdm(enumerate(dataloader), total=len(dataloader))\n for step, data in bar:\n # Send relevant data to processing device\n more_toxic_ids = data[\"more_toxic_ids\"].to(self.device, dtype=torch.long)\n more_toxic_mask = data[\"more_toxic_mask\"].to(self.device, dtype=torch.long)\n less_toxic_ids = data[\"less_toxic_ids\"].to(self.device, dtype=torch.long)\n less_toxic_mask = data[\"less_toxic_mask\"].to(self.device, dtype=torch.long)\n targets = data[\"target\"].to(self.device, dtype=torch.long)\n\n batch_size = more_toxic_ids.size(0)\n\n # Get result of more/less toxic input\n more_toxic_outputs = self.model(more_toxic_ids, more_toxic_mask)\n less_toxic_outputs = self.model(less_toxic_ids, less_toxic_mask)\n\n # Get loss from output + targets\n loss = self.criterion(more_toxic_outputs, less_toxic_outputs, targets)\n\n # Use loss for backpropagation\n loss = loss / self.config[\"n_accumulate\"]\n loss.backward()\n\n # Only update the gradients every *n_accumulate* training steps\n # Otherwise, just store the gradients without updating the weights\n if (step + 1) % self.config[\"n_accumulate\"] == 0:\n optimizer.step()\n\n # zero the parameter gradients\n optimizer.zero_grad()\n\n if scheduler is not None:\n scheduler.step()\n\n # Add loss to total loss, weighted by batch size\n running_loss += loss.item() * batch_size\n dataset_size += batch_size\n\n epoch_loss = running_loss / dataset_size\n\n bar.set_postfix(\n Epoch=epoch, Train_Loss=epoch_loss, LR=optimizer.param_groups[0][\"lr\"]\n )\n\n # Force garbage collection\n gc.collect()\n\n return epoch_loss\n\n def run_training(\n self, optimizer, scheduler, num_epochs, fold, train_loader, valid_loader\n ):\n \"\"\"\n This function is responsible for the entire training process.\n After setup, it repeatedly trains and evaluates the model, for each epoch.\n :param optimizer:\n :param scheduler:\n :param num_epochs: The amount of training + validation epochs\n :param fold: The data fold to use\n :param train_loader: The class that loads training data\n :param train_loader: The class that loads validation data\n \"\"\"\n # To automatically log gradients\n self.wandb.watch(self.model, log_freq=100)\n\n if torch.cuda.is_available():\n print(\"[INFO] Using GPU: {}\\n\".format(torch.cuda.get_device_name()))\n\n # Setup\n start = time.time()\n\n # Save the current state of the model, that can be reused\n # for future training loops after the result of this loop\n # is saved\n best_model_wts = copy.deepcopy(self.model.state_dict())\n best_epoch_loss = np.inf\n history = defaultdict(list)\n\n # Main train + eval loop\n for epoch in range(1, num_epochs + 1):\n # Force garbage collection\n gc.collect()\n\n # Run a training round\n train_epoch_loss = self.train_one_epoch(\n optimizer, scheduler, dataloader=train_loader, epoch=epoch\n )\n\n # Run a validation round\n val_epoch_loss = self.valid_one_epoch(\n valid_loader, epoch=epoch, optimizer=optimizer\n )\n\n history[\"Train Loss\"].append(train_epoch_loss)\n history[\"Valid Loss\"].append(val_epoch_loss)\n\n # Log the metrics\n self.wandb.log({\"Train Loss\": train_epoch_loss})\n self.wandb.log({\"Valid Loss\": val_epoch_loss})\n\n # deep copy the model\n if val_epoch_loss <= best_epoch_loss:\n print(\n f\"{b_}Validation Loss Improved ({best_epoch_loss} ---> {val_epoch_loss})\"\n )\n best_epoch_loss = val_epoch_loss\n self.run.summary[\"Best Loss\"] = best_epoch_loss\n best_model_wts = copy.deepcopy(self.model.state_dict())\n\n if self.config[\"dataset_name\"]:\n dataset_nm = self.config[\"dataset_name\"]\n PATH = f\"Loss-Fold-{fold}-{dataset_nm}.bin\"\n else:\n PATH = f\"Loss-Fold-{fold}.bin\"\n torch.save(self.model.state_dict(), PATH)\n # Save a model file from the current directory\n print(f\"Model Saved{sr_}\")\n\n print()\n\n end = time.time()\n time_elapsed = end - start\n print(\n \"Training complete in {:.0f}h {:.0f}m {:.0f}s\".format(\n time_elapsed // 3600,\n (time_elapsed % 3600) // 60,\n (time_elapsed % 3600) % 60,\n )\n )\n print(\"Best Loss: {:.4f}\".format(best_epoch_loss))\n\n # Load best model weights so that future calls of\n # this function don't accidentally reuse the\n # result of this invocation\n self.model.load_state_dict(best_model_wts)\n\n return self.model, history\n\n def prepare_loaders(self, fold):\n \"\"\"\n Creates data loaders for training and validation, based upon\n the current fold. This ensures that only data corresponding\n to the fold is provided.\n :param fold: The fold\n \"\"\"\n\n # Drop data (not) part of the fold\n df_train = self.df[self.df.kfold != fold].reset_index(drop=True)\n df_valid = self.df[self.df.kfold == fold].reset_index(drop=True)\n\n train_dataset = JigsawDataset(\n df_train,\n tokenizer=self.config[\"tokenizer\"],\n max_length=self.config[\"max_length\"],\n )\n valid_dataset = JigsawDataset(\n df_valid,\n tokenizer=self.config[\"tokenizer\"],\n max_length=self.config[\"max_length\"],\n )\n\n train_loader = DataLoader(\n train_dataset,\n batch_size=self.config[\"train_batch_size\"],\n num_workers=2,\n shuffle=True,\n pin_memory=True,\n drop_last=True,\n )\n valid_loader = DataLoader(\n valid_dataset,\n batch_size=self.config[\"valid_batch_size\"],\n num_workers=2,\n shuffle=False,\n pin_memory=True,\n )\n\n return train_loader, valid_loader\n\n def fetch_scheduler(self, optimizer):\n \"\"\"\n Create the correct scheduler for the given optimizer\n Returns None if the config does not specify the scheduler to be used\n :param optimizer:\n \"\"\"\n if self.config[\"scheduler\"] == \"CosineAnnealingLR\":\n scheduler = lr_scheduler.CosineAnnealingLR(\n optimizer, T_max=self.config[\"T_max\"], eta_min=self.config[\"min_lr\"]\n )\n elif self.config[\"scheduler\"] == \"CosineAnnealingWarmRestarts\":\n scheduler = lr_scheduler.CosineAnnealingWarmRestarts(\n optimizer, T_0=self.config[\"T_0\"], eta_min=self.config[\"min_lr\"]\n )\n else:\n return None\n\n return scheduler\n\n def criterion(self, out1, out2, targets):\n \"\"\"\n Evaluate the loss of the given outputs + targets\n :param out1: The first set of outputs\n :param out2: The second set of outputs\n :param targets: The ground truth\n \"\"\"\n return nn.MarginRankingLoss(margin=self.config[\"margin\"])(out1, out2, targets)\n" ]
[ [ "torch.nn.MarginRankingLoss", "torch.optim.lr_scheduler.CosineAnnealingLR", "torch.no_grad", "torch.cuda.get_device_name", "torch.optim.lr_scheduler.CosineAnnealingWarmRestarts", "torch.cuda.is_available", "torch.utils.data.DataLoader" ] ]
matthiasdiener/pyopencl
[ "b072dd5298d4ecd340afe61089529fb3148872af" ]
[ "test/test_algorithm.py" ]
[ "#! /usr/bin/env python\n\nfrom __future__ import division, with_statement, absolute_import, print_function\n\n__copyright__ = \"Copyright (C) 2013 Andreas Kloeckner\"\n\n__license__ = \"\"\"\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n\"\"\"\n\nfrom six.moves import range, zip\nimport numpy as np\nimport numpy.linalg as la\nimport sys\nfrom pytools import memoize\nfrom test_array import general_clrand\n\nimport pytest\n\nimport pyopencl as cl\nimport pyopencl.array as cl_array # noqa\nfrom pyopencl.tools import ( # noqa\n pytest_generate_tests_for_pyopencl as pytest_generate_tests)\nfrom pyopencl.characterize import has_double_support, has_struct_arg_count_bug\nfrom pyopencl.scan import (InclusiveScanKernel, ExclusiveScanKernel,\n GenericScanKernel, GenericDebugScanKernel)\nfrom pyopencl.characterize import get_pocl_version\n\n\n# {{{ elementwise\n\ndef test_elwise_kernel(ctx_factory):\n context = ctx_factory()\n queue = cl.CommandQueue(context)\n\n from pyopencl.clrandom import rand as clrand\n\n a_gpu = clrand(queue, (50,), np.float32)\n b_gpu = clrand(queue, (50,), np.float32)\n\n from pyopencl.elementwise import ElementwiseKernel\n lin_comb = ElementwiseKernel(context,\n \"float a, float *x, float b, float *y, float *z\",\n \"z[i] = a*x[i] + b*y[i]\",\n \"linear_combination\")\n\n c_gpu = cl_array.empty_like(a_gpu)\n lin_comb(5, a_gpu, 6, b_gpu, c_gpu)\n\n assert la.norm((c_gpu - (5 * a_gpu + 6 * b_gpu)).get()) < 1e-5\n\n\ndef test_elwise_kernel_with_options(ctx_factory):\n from pyopencl.clrandom import rand as clrand\n from pyopencl.elementwise import ElementwiseKernel\n\n context = ctx_factory()\n queue = cl.CommandQueue(context)\n\n in_gpu = clrand(queue, (50,), np.float32)\n\n options = ['-D', 'ADD_ONE']\n add_one = ElementwiseKernel(\n context,\n \"float* out, const float *in\",\n \"\"\"\n out[i] = in[i]\n #ifdef ADD_ONE\n +1\n #endif\n ;\n \"\"\",\n options=options,\n )\n\n out_gpu = cl_array.empty_like(in_gpu)\n add_one(out_gpu, in_gpu)\n\n gt = in_gpu.get() + 1\n gv = out_gpu.get()\n assert la.norm(gv - gt) < 1e-5\n\n\ndef test_ranged_elwise_kernel(ctx_factory):\n context = ctx_factory()\n queue = cl.CommandQueue(context)\n\n from pyopencl.elementwise import ElementwiseKernel\n set_to_seven = ElementwiseKernel(context,\n \"float *z\", \"z[i] = 7\", \"set_to_seven\")\n\n for i, slc in enumerate([\n slice(5, 20000),\n slice(5, 20000, 17),\n slice(3000, 5, -1),\n slice(1000, -1),\n ]):\n\n a_gpu = cl_array.zeros(queue, (50000,), dtype=np.float32)\n a_cpu = np.zeros(a_gpu.shape, a_gpu.dtype)\n\n a_cpu[slc] = 7\n set_to_seven(a_gpu, slice=slc)\n\n assert (a_cpu == a_gpu.get()).all()\n\n\ndef test_take(ctx_factory):\n context = ctx_factory()\n queue = cl.CommandQueue(context)\n\n idx = cl_array.arange(queue, 0, 200000, 2, dtype=np.uint32)\n a = cl_array.arange(queue, 0, 600000, 3, dtype=np.float32)\n result = cl_array.take(a, idx)\n assert ((3 * idx).get() == result.get()).all()\n\n\ndef test_arange(ctx_factory):\n context = ctx_factory()\n queue = cl.CommandQueue(context)\n\n n = 5000\n a = cl_array.arange(queue, n, dtype=np.float32)\n assert (np.arange(n, dtype=np.float32) == a.get()).all()\n\n\ndef test_reverse(ctx_factory):\n context = ctx_factory()\n queue = cl.CommandQueue(context)\n\n n = 5000\n a = np.arange(n).astype(np.float32)\n a_gpu = cl_array.to_device(queue, a)\n\n a_gpu = a_gpu.reverse()\n\n assert (a[::-1] == a_gpu.get()).all()\n\n\ndef test_if_positive(ctx_factory):\n context = ctx_factory()\n queue = cl.CommandQueue(context)\n\n from pyopencl.clrandom import rand as clrand\n\n ary_len = 20000\n a_gpu = clrand(queue, (ary_len,), np.float32)\n b_gpu = clrand(queue, (ary_len,), np.float32)\n a = a_gpu.get()\n b = b_gpu.get()\n\n max_a_b_gpu = cl_array.maximum(a_gpu, b_gpu)\n min_a_b_gpu = cl_array.minimum(a_gpu, b_gpu)\n\n print(max_a_b_gpu)\n print(np.maximum(a, b))\n\n assert la.norm(max_a_b_gpu.get() - np.maximum(a, b)) == 0\n assert la.norm(min_a_b_gpu.get() - np.minimum(a, b)) == 0\n\n\ndef test_take_put(ctx_factory):\n context = ctx_factory()\n queue = cl.CommandQueue(context)\n\n for n in [5, 17, 333]:\n one_field_size = 8\n buf_gpu = cl_array.zeros(queue,\n n * one_field_size, dtype=np.float32)\n dest_indices = cl_array.to_device(queue,\n np.array([0, 1, 2, 3, 32, 33, 34, 35], dtype=np.uint32))\n read_map = cl_array.to_device(queue,\n np.array([7, 6, 5, 4, 3, 2, 1, 0], dtype=np.uint32))\n\n cl_array.multi_take_put(\n arrays=[buf_gpu for i in range(n)],\n dest_indices=dest_indices,\n src_indices=read_map,\n src_offsets=[i * one_field_size for i in range(n)],\n dest_shape=(96,))\n\n\ndef test_astype(ctx_factory):\n context = ctx_factory()\n queue = cl.CommandQueue(context)\n\n from pyopencl.clrandom import rand as clrand\n\n if not has_double_support(context.devices[0]):\n from pytest import skip\n skip(\"double precision not supported on %s\" % context.devices[0])\n\n a_gpu = clrand(queue, (2000,), dtype=np.float32)\n\n a = a_gpu.get().astype(np.float64)\n a2 = a_gpu.astype(np.float64).get()\n\n assert a2.dtype == np.float64\n assert la.norm(a - a2) == 0, (a, a2)\n\n a_gpu = clrand(queue, (2000,), dtype=np.float64)\n\n a = a_gpu.get().astype(np.float32)\n a2 = a_gpu.astype(np.float32).get()\n\n assert a2.dtype == np.float32\n assert la.norm(a - a2) / la.norm(a) < 1e-7\n\n# }}}\n\n\n# {{{ reduction\n\ndef test_sum(ctx_factory):\n from pytest import importorskip\n importorskip(\"mako\")\n\n context = ctx_factory()\n queue = cl.CommandQueue(context)\n\n n = 200000\n for dtype in [np.float32, np.complex64]:\n a_gpu = general_clrand(queue, (n,), dtype)\n\n a = a_gpu.get()\n\n for slc in [\n slice(None),\n slice(1000, 3000),\n slice(1000, -3000),\n slice(1000, None),\n slice(1000, None, 3),\n ]:\n sum_a = np.sum(a[slc])\n\n if slc.step is None:\n sum_a_gpu = cl_array.sum(a_gpu[slc]).get()\n assert abs(sum_a_gpu - sum_a) / abs(sum_a) < 1e-4\n\n sum_a_gpu_2 = cl_array.sum(a_gpu, slice=slc).get()\n assert abs(sum_a_gpu_2 - sum_a) / abs(sum_a) < 1e-4\n\n\ndef test_sum_without_data(ctx_factory):\n from pytest import importorskip\n importorskip(\"mako\")\n\n context = ctx_factory()\n queue = cl.CommandQueue(context)\n\n n = 2000\n\n from pyopencl.reduction import ReductionKernel\n red = ReductionKernel(context, np.int32,\n neutral=\"0\",\n reduce_expr=\"a+b\", map_expr=\"i\",\n arguments=[])\n\n result_dev = red(range=slice(n), queue=queue).get()\n result_ref = n*(n-1)//2\n\n assert result_dev == result_ref\n\n\ndef test_minmax(ctx_factory):\n from pytest import importorskip\n importorskip(\"mako\")\n\n context = ctx_factory()\n queue = cl.CommandQueue(context)\n\n from pyopencl.clrandom import rand as clrand\n\n if has_double_support(context.devices[0]):\n dtypes = [np.float64, np.float32, np.int32]\n else:\n dtypes = [np.float32, np.int32]\n\n for what in [\"min\", \"max\"]:\n for dtype in dtypes:\n a_gpu = clrand(queue, (200000,), dtype)\n a = a_gpu.get()\n\n op_a = getattr(np, what)(a)\n op_a_gpu = getattr(cl_array, what)(a_gpu).get()\n\n assert op_a_gpu == op_a, (op_a_gpu, op_a, dtype, what)\n\n\ndef test_subset_minmax(ctx_factory):\n from pytest import importorskip\n importorskip(\"mako\")\n\n context = ctx_factory()\n queue = cl.CommandQueue(context)\n\n from pyopencl.clrandom import rand as clrand\n\n l_a = 200000\n gran = 5\n l_m = l_a - l_a // gran + 1\n\n if has_double_support(context.devices[0]):\n dtypes = [np.float64, np.float32, np.int32]\n else:\n dtypes = [np.float32, np.int32]\n\n for dtype in dtypes:\n a_gpu = clrand(queue, (l_a,), dtype)\n a = a_gpu.get()\n\n meaningful_indices_gpu = cl_array.zeros(\n queue, l_m, dtype=np.int32)\n meaningful_indices = meaningful_indices_gpu.get()\n j = 0\n for i in range(len(meaningful_indices)):\n meaningful_indices[i] = j\n j = j + 1\n if j % gran == 0:\n j = j + 1\n\n meaningful_indices_gpu = cl_array.to_device(\n queue, meaningful_indices)\n b = a[meaningful_indices]\n\n min_a = np.min(b)\n min_a_gpu = cl_array.subset_min(meaningful_indices_gpu, a_gpu).get()\n\n assert min_a_gpu == min_a\n\n\ndef test_dot(ctx_factory):\n from pytest import importorskip\n importorskip(\"mako\")\n\n context = ctx_factory()\n queue = cl.CommandQueue(context)\n\n dev = context.devices[0]\n\n dtypes = [np.float32, np.complex64]\n if has_double_support(dev):\n if has_struct_arg_count_bug(dev) == \"apple\":\n dtypes.extend([np.float64])\n else:\n dtypes.extend([np.float64, np.complex128])\n\n for a_dtype in dtypes:\n for b_dtype in dtypes:\n print(a_dtype, b_dtype)\n a_gpu = general_clrand(queue, (200000,), a_dtype)\n a = a_gpu.get()\n b_gpu = general_clrand(queue, (200000,), b_dtype)\n b = b_gpu.get()\n\n dot_ab = np.dot(a, b)\n dot_ab_gpu = cl_array.dot(a_gpu, b_gpu).get()\n\n assert abs(dot_ab_gpu - dot_ab) / abs(dot_ab) < 1e-4\n\n try:\n vdot_ab = np.vdot(a, b)\n except NotImplementedError:\n import sys\n is_pypy = '__pypy__' in sys.builtin_module_names\n if is_pypy:\n print(\"PYPY: VDOT UNIMPLEMENTED\")\n continue\n else:\n raise\n\n vdot_ab_gpu = cl_array.vdot(a_gpu, b_gpu).get()\n\n rel_err = abs(vdot_ab_gpu - vdot_ab) / abs(vdot_ab)\n assert rel_err < 1e-4, rel_err\n\n\n@memoize\ndef make_mmc_dtype(device):\n dtype = np.dtype([\n (\"cur_min\", np.int32),\n (\"cur_max\", np.int32),\n (\"pad\", np.int32),\n ])\n\n name = \"minmax_collector\"\n from pyopencl.tools import get_or_register_dtype, match_dtype_to_c_struct\n\n dtype, c_decl = match_dtype_to_c_struct(device, name, dtype)\n dtype = get_or_register_dtype(name, dtype)\n\n return dtype, c_decl\n\n\ndef test_struct_reduce(ctx_factory):\n pytest.importorskip(\"mako\")\n\n context = ctx_factory()\n queue = cl.CommandQueue(context)\n\n dev, = context.devices\n if (dev.vendor == \"NVIDIA\" and dev.platform.vendor == \"Apple\"\n and dev.driver_version == \"8.12.47 310.40.00.05f01\"):\n pytest.skip(\"causes a compiler hang on Apple/Nv GPU\")\n\n mmc_dtype, mmc_c_decl = make_mmc_dtype(context.devices[0])\n\n preamble = mmc_c_decl + r\"\"\"//CL//\n\n minmax_collector mmc_neutral()\n {\n // FIXME: needs infinity literal in real use, ok here\n minmax_collector result;\n result.cur_min = 1<<30;\n result.cur_max = -(1<<30);\n return result;\n }\n\n minmax_collector mmc_from_scalar(float x)\n {\n minmax_collector result;\n result.cur_min = x;\n result.cur_max = x;\n return result;\n }\n\n minmax_collector agg_mmc(minmax_collector a, minmax_collector b)\n {\n minmax_collector result = a;\n if (b.cur_min < result.cur_min)\n result.cur_min = b.cur_min;\n if (b.cur_max > result.cur_max)\n result.cur_max = b.cur_max;\n return result;\n }\n\n \"\"\"\n\n from pyopencl.clrandom import rand as clrand\n a_gpu = clrand(queue, (20000,), dtype=np.int32, a=0, b=10**6)\n a = a_gpu.get()\n\n from pyopencl.reduction import ReductionKernel\n red = ReductionKernel(context, mmc_dtype,\n neutral=\"mmc_neutral()\",\n reduce_expr=\"agg_mmc(a, b)\", map_expr=\"mmc_from_scalar(x[i])\",\n arguments=\"__global int *x\", preamble=preamble)\n\n minmax = red(a_gpu).get()\n #print minmax[\"cur_min\"], minmax[\"cur_max\"]\n #print np.min(a), np.max(a)\n\n assert abs(minmax[\"cur_min\"] - np.min(a)) < 1e-5\n assert abs(minmax[\"cur_max\"] - np.max(a)) < 1e-5\n\n# }}}\n\n\n# {{{ scan-related\n\ndef summarize_error(obtained, desired, orig, thresh=1e-5):\n from pytest import importorskip\n importorskip(\"mako\")\n\n err = obtained - desired\n ok_count = 0\n bad_count = 0\n\n bad_limit = 200\n\n def summarize_counts():\n if ok_count:\n entries.append(\"<%d ok>\" % ok_count)\n if bad_count >= bad_limit:\n entries.append(\"<%d more bad>\" % (bad_count-bad_limit))\n\n entries = []\n for i, val in enumerate(err):\n if abs(val) > thresh:\n if ok_count:\n summarize_counts()\n ok_count = 0\n\n bad_count += 1\n\n if bad_count < bad_limit:\n entries.append(\"%r (want: %r, got: %r, orig: %r)\" % (\n obtained[i], desired[i], obtained[i], orig[i]))\n else:\n if bad_count:\n summarize_counts()\n bad_count = 0\n\n ok_count += 1\n\n summarize_counts()\n\n return \" \".join(entries)\n\n\nscan_test_counts = [\n 10,\n 2 ** 8 - 1,\n 2 ** 8,\n 2 ** 8 + 1,\n 2 ** 10 - 5,\n 2 ** 10,\n 2 ** 10 + 5,\n 2 ** 12 - 5,\n 2 ** 12,\n 2 ** 12 + 5,\n 2 ** 20 - 2 ** 18,\n 2 ** 20 - 2 ** 18 + 5,\n 2 ** 20 + 1,\n 2 ** 20,\n 2 ** 23 + 3,\n # larger sizes cause out of memory on low-end AMD APUs\n ]\n\n\[email protected](\"dtype\", [np.int32, np.int64])\[email protected](\"scan_cls\", [InclusiveScanKernel, ExclusiveScanKernel])\ndef test_scan(ctx_factory, dtype, scan_cls):\n from pytest import importorskip\n importorskip(\"mako\")\n\n context = ctx_factory()\n queue = cl.CommandQueue(context)\n\n knl = scan_cls(context, dtype, \"a+b\", \"0\")\n\n for n in scan_test_counts:\n host_data = np.random.randint(0, 10, n).astype(dtype)\n dev_data = cl_array.to_device(queue, host_data)\n\n # /!\\ fails on Nv GT2?? for some drivers\n assert (host_data == dev_data.get()).all()\n\n knl(dev_data)\n\n desired_result = np.cumsum(host_data, axis=0)\n if scan_cls is ExclusiveScanKernel:\n desired_result -= host_data\n\n is_ok = (dev_data.get() == desired_result).all()\n if 1 and not is_ok:\n print(\"something went wrong, summarizing error...\")\n print(summarize_error(dev_data.get(), desired_result, host_data))\n\n print(\"dtype:%s n:%d %s worked:%s\" % (dtype, n, scan_cls, is_ok))\n assert is_ok\n from gc import collect\n collect()\n\n\[email protected](\"scan_cls\", (GenericScanKernel, GenericDebugScanKernel))\ndef test_scan_with_vectorargs_with_offsets(ctx_factory, scan_cls):\n context = ctx_factory()\n queue = cl.CommandQueue(context)\n\n from pyopencl.tools import VectorArg\n\n knl = scan_cls(\n context, float,\n arguments=[\n VectorArg(float, \"input\", with_offset=True),\n VectorArg(int, \"segment\", with_offset=True),\n ],\n input_expr=\"input[i]\",\n is_segment_start_expr=\"segment[i]\",\n scan_expr=\"a+b\", neutral=\"0\",\n output_statement=\"\"\"\n input[i] = item;\n \"\"\")\n\n n = 20\n\n host_data = np.random.randint(0, 10, n).astype(float)\n dev_data = cl.array.to_device(queue, host_data)\n segment_data = np.zeros(n, dtype=int)\n dev_segment_data = cl.array.to_device(queue, segment_data)\n\n knl(dev_data, dev_segment_data)\n\n assert (dev_data.get() == np.cumsum(host_data)).all()\n\n\ndef test_copy_if(ctx_factory):\n from pytest import importorskip\n importorskip(\"mako\")\n\n context = ctx_factory()\n queue = cl.CommandQueue(context)\n\n from pyopencl.clrandom import rand as clrand\n for n in scan_test_counts:\n a_dev = clrand(queue, (n,), dtype=np.int32, a=0, b=1000)\n a = a_dev.get()\n\n from pyopencl.algorithm import copy_if\n\n crit = a_dev.dtype.type(300)\n selected = a[a > crit]\n selected_dev, count_dev, evt = copy_if(\n a_dev, \"ary[i] > myval\", [(\"myval\", crit)])\n\n assert (selected_dev.get()[:count_dev.get()] == selected).all()\n from gc import collect\n collect()\n\n\ndef test_partition(ctx_factory):\n from pytest import importorskip\n importorskip(\"mako\")\n\n context = ctx_factory()\n queue = cl.CommandQueue(context)\n\n from pyopencl.clrandom import rand as clrand\n for n in scan_test_counts:\n print(\"part\", n)\n\n a_dev = clrand(queue, (n,), dtype=np.int32, a=0, b=1000)\n a = a_dev.get()\n\n crit = a_dev.dtype.type(300)\n true_host = a[a > crit]\n false_host = a[a <= crit]\n\n from pyopencl.algorithm import partition\n true_dev, false_dev, count_true_dev, evt = partition(\n a_dev, \"ary[i] > myval\", [(\"myval\", crit)])\n\n count_true_dev = count_true_dev.get()\n\n assert (true_dev.get()[:count_true_dev] == true_host).all()\n assert (false_dev.get()[:n-count_true_dev] == false_host).all()\n\n\ndef test_unique(ctx_factory):\n from pytest import importorskip\n importorskip(\"mako\")\n\n context = ctx_factory()\n queue = cl.CommandQueue(context)\n\n from pyopencl.clrandom import rand as clrand\n for n in scan_test_counts:\n a_dev = clrand(queue, (n,), dtype=np.int32, a=0, b=1000)\n a = a_dev.get()\n a = np.sort(a)\n a_dev = cl_array.to_device(queue, a)\n\n a_unique_host = np.unique(a)\n\n from pyopencl.algorithm import unique\n a_unique_dev, count_unique_dev, evt = unique(a_dev)\n\n count_unique_dev = count_unique_dev.get()\n\n assert (a_unique_dev.get()[:count_unique_dev] == a_unique_host).all()\n from gc import collect\n collect()\n\n\ndef test_index_preservation(ctx_factory):\n from pytest import importorskip\n importorskip(\"mako\")\n\n context = ctx_factory()\n queue = cl.CommandQueue(context)\n\n classes = [GenericScanKernel]\n\n dev = context.devices[0]\n if dev.type & cl.device_type.CPU:\n classes.append(GenericDebugScanKernel)\n\n for cls in classes:\n for n in scan_test_counts:\n knl = cls(\n context, np.int32,\n arguments=\"__global int *out\",\n input_expr=\"i\",\n scan_expr=\"b\", neutral=\"0\",\n output_statement=\"\"\"\n out[i] = item;\n \"\"\")\n\n out = cl_array.empty(queue, n, dtype=np.int32)\n knl(out)\n\n assert (out.get() == np.arange(n)).all()\n from gc import collect\n collect()\n\n\ndef test_segmented_scan(ctx_factory):\n from pytest import importorskip\n importorskip(\"mako\")\n\n context = ctx_factory()\n queue = cl.CommandQueue(context)\n\n from pyopencl.tools import dtype_to_ctype\n dtype = np.int32\n ctype = dtype_to_ctype(dtype)\n\n #for is_exclusive in [False, True]:\n for is_exclusive in [True, False]:\n if is_exclusive:\n output_statement = \"out[i] = prev_item\"\n else:\n output_statement = \"out[i] = item\"\n\n knl = GenericScanKernel(context, dtype,\n arguments=\"__global %s *ary, __global char *segflags, \"\n \"__global %s *out\" % (ctype, ctype),\n input_expr=\"ary[i]\",\n scan_expr=\"across_seg_boundary ? b : (a+b)\", neutral=\"0\",\n is_segment_start_expr=\"segflags[i]\",\n output_statement=output_statement,\n options=[])\n\n np.set_printoptions(threshold=2000)\n from random import randrange\n from pyopencl.clrandom import rand as clrand\n for n in scan_test_counts:\n a_dev = clrand(queue, (n,), dtype=dtype, a=0, b=10)\n a = a_dev.get()\n\n if 10 <= n < 20:\n seg_boundaries_values = [\n [0, 9],\n [0, 3],\n [4, 6],\n ]\n else:\n seg_boundaries_values = []\n for i in range(10):\n seg_boundary_count = max(2, min(100, randrange(0, int(0.4*n))))\n seg_boundaries = [\n randrange(n) for i in range(seg_boundary_count)]\n if n >= 1029:\n seg_boundaries.insert(0, 1028)\n seg_boundaries.sort()\n seg_boundaries_values.append(seg_boundaries)\n\n for seg_boundaries in seg_boundaries_values:\n #print \"BOUNDARIES\", seg_boundaries\n #print a\n\n seg_boundary_flags = np.zeros(n, dtype=np.uint8)\n seg_boundary_flags[seg_boundaries] = 1\n seg_boundary_flags_dev = cl_array.to_device(\n queue, seg_boundary_flags)\n\n seg_boundaries.insert(0, 0)\n\n result_host = a.copy()\n for i, seg_start in enumerate(seg_boundaries):\n if i+1 < len(seg_boundaries):\n seg_end = seg_boundaries[i+1]\n else:\n seg_end = None\n\n if is_exclusive:\n result_host[seg_start+1:seg_end] = np.cumsum(\n a[seg_start:seg_end][:-1])\n result_host[seg_start] = 0\n else:\n result_host[seg_start:seg_end] = np.cumsum(\n a[seg_start:seg_end])\n\n #print \"REF\", result_host\n\n result_dev = cl_array.empty_like(a_dev)\n knl(a_dev, seg_boundary_flags_dev, result_dev)\n\n #print \"RES\", result_dev\n is_correct = (result_dev.get() == result_host).all()\n if not is_correct:\n diff = result_dev.get() - result_host\n print(\"RES-REF\", diff)\n print(\"ERRWHERE\", np.where(diff))\n print(n, list(seg_boundaries))\n\n assert is_correct\n from gc import collect\n collect()\n\n print(\"%d excl:%s done\" % (n, is_exclusive))\n\n\[email protected](\"scan_kernel\", [GenericScanKernel, GenericDebugScanKernel])\ndef test_sort(ctx_factory, scan_kernel):\n from pytest import importorskip\n importorskip(\"mako\")\n\n context = ctx_factory()\n queue = cl.CommandQueue(context)\n\n dtype = np.int32\n\n from pyopencl.algorithm import RadixSort\n sort = RadixSort(context, \"int *ary\", key_expr=\"ary[i]\",\n sort_arg_names=[\"ary\"], scan_kernel=scan_kernel)\n\n from pyopencl.clrandom import RanluxGenerator\n rng = RanluxGenerator(queue, seed=15)\n\n from time import time\n\n # intermediate arrays for largest size cause out-of-memory on low-end GPUs\n for n in scan_test_counts[:-1]:\n if n >= 2000 and isinstance(scan_kernel, GenericDebugScanKernel):\n continue\n\n print(n)\n\n print(\" rng\")\n a_dev = rng.uniform(queue, (n,), dtype=dtype, a=0, b=2**16)\n a = a_dev.get()\n\n dev_start = time()\n print(\" device\")\n (a_dev_sorted,), evt = sort(a_dev, key_bits=16)\n queue.finish()\n dev_end = time()\n print(\" numpy\")\n a_sorted = np.sort(a)\n numpy_end = time()\n\n numpy_elapsed = numpy_end-dev_end\n dev_elapsed = dev_end-dev_start\n print(\" dev: %.2f MKeys/s numpy: %.2f MKeys/s ratio: %.2fx\" % (\n 1e-6*n/dev_elapsed, 1e-6*n/numpy_elapsed, numpy_elapsed/dev_elapsed))\n assert (a_dev_sorted.get() == a_sorted).all()\n\n\ndef test_list_builder(ctx_factory):\n from pytest import importorskip\n importorskip(\"mako\")\n\n context = ctx_factory()\n queue = cl.CommandQueue(context)\n\n from pyopencl.algorithm import ListOfListsBuilder\n builder = ListOfListsBuilder(context, [(\"mylist\", np.int32)], \"\"\"//CL//\n void generate(LIST_ARG_DECL USER_ARG_DECL index_type i)\n {\n int count = i % 4;\n for (int j = 0; j < count; ++j)\n {\n APPEND_mylist(count);\n }\n }\n \"\"\", arg_decls=[])\n\n result, evt = builder(queue, 2000)\n\n inf = result[\"mylist\"]\n assert inf.count == 3000\n assert (inf.lists.get()[-6:] == [1, 2, 2, 3, 3, 3]).all()\n\n\ndef test_list_builder_with_memoryobject(ctx_factory):\n from pytest import importorskip\n importorskip(\"mako\")\n\n context = ctx_factory()\n queue = cl.CommandQueue(context)\n\n from pyopencl.algorithm import ListOfListsBuilder\n from pyopencl.tools import VectorArg\n builder = ListOfListsBuilder(context, [(\"mylist\", np.int32)], \"\"\"//CL//\n void generate(LIST_ARG_DECL USER_ARG_DECL index_type i)\n {\n APPEND_mylist(input_list[i]);\n }\n \"\"\", arg_decls=[VectorArg(float, \"input_list\")])\n\n n = 10000\n input_list = cl.array.zeros(queue, (n,), float)\n result, evt = builder(queue, n, input_list.data)\n\n inf = result[\"mylist\"]\n assert inf.count == n\n assert (inf.lists.get() == 0).all()\n\n\ndef test_list_builder_with_offset(ctx_factory):\n from pytest import importorskip\n importorskip(\"mako\")\n\n context = ctx_factory()\n queue = cl.CommandQueue(context)\n\n from pyopencl.algorithm import ListOfListsBuilder\n from pyopencl.tools import VectorArg\n builder = ListOfListsBuilder(context, [(\"mylist\", np.int32)], \"\"\"//CL//\n void generate(LIST_ARG_DECL USER_ARG_DECL index_type i)\n {\n APPEND_mylist(input_list[i]);\n }\n \"\"\", arg_decls=[\n VectorArg(float, \"input_list\", with_offset=True)])\n\n n = 10000\n input_list = cl.array.zeros(queue, (n + 10,), float)\n input_list[10:] = 1\n\n result, evt = builder(queue, n, input_list[10:])\n\n inf = result[\"mylist\"]\n assert inf.count == n\n assert (inf.lists.get() == 1).all()\n\n\ndef test_list_builder_with_empty_elim(ctx_factory):\n from pytest import importorskip\n importorskip(\"mako\")\n\n context = ctx_factory()\n queue = cl.CommandQueue(context)\n\n from pyopencl.algorithm import ListOfListsBuilder\n\n builder = ListOfListsBuilder(\n context,\n [(\"mylist1\", np.int32), (\"mylist2\", np.int32), (\"mylist3\", np.int32)],\n \"\"\"//CL//\n void generate(LIST_ARG_DECL USER_ARG_DECL index_type i)\n {\n if (i % 5 == 0)\n {\n for (int j = 0; j < i / 5; ++j)\n {\n APPEND_mylist1(j);\n APPEND_mylist2(j + 1);\n APPEND_mylist3(j);\n }\n }\n }\n \"\"\",\n arg_decls=[],\n eliminate_empty_output_lists=[\"mylist1\", \"mylist2\"])\n\n result, evt = builder(queue, 1000)\n\n mylist1 = result[\"mylist1\"]\n assert mylist1.count == 19900\n assert (mylist1.starts.get()[:5] == [0, 1, 3, 6, 10]).all()\n assert (mylist1.nonempty_indices.get()[:5] == [5, 10, 15, 20, 25]).all()\n assert (mylist1.lists.get()[:6] == [0, 0, 1, 0, 1, 2]).all()\n mylist2 = result[\"mylist2\"]\n assert mylist2.count == 19900\n assert (mylist2.lists.get()[:6] == [1, 1, 2, 1, 2, 3]).all()\n mylist3 = result[\"mylist3\"]\n assert mylist3.count == 19900\n assert (mylist3.starts.get()[:10] == [0, 0, 0, 0, 0, 0, 1, 1, 1, 1]).all()\n assert (mylist3.lists.get()[:6] == [0, 0, 1, 0, 1, 2]).all()\n\n\ndef test_key_value_sorter(ctx_factory):\n from pytest import importorskip\n importorskip(\"mako\")\n\n context = ctx_factory()\n queue = cl.CommandQueue(context)\n\n n = 10**5\n nkeys = 2000\n from pyopencl.clrandom import rand as clrand\n keys = clrand(queue, n, np.int32, b=nkeys)\n values = clrand(queue, n, np.int32, b=n).astype(np.int64)\n\n assert np.max(keys.get()) < nkeys\n\n from pyopencl.algorithm import KeyValueSorter\n kvs = KeyValueSorter(context)\n starts, lists, evt = kvs(queue, keys, values, nkeys, starts_dtype=np.int32)\n\n starts = starts.get()\n lists = lists.get()\n\n mydict = dict()\n for k, v in zip(keys.get(), values.get()):\n mydict.setdefault(k, []).append(v)\n\n for i in range(nkeys):\n start, end = starts[i:i+2]\n assert sorted(mydict[i]) == sorted(lists[start:end])\n\n# }}}\n\n\n# {{{ bitonic sort\n\[email protected](\"size\", [\n 512,\n 4,\n 16\n ])\[email protected](\"dtype\", [\n np.int32,\n np.float32,\n np.float64\n ])\[email protected]\ndef test_bitonic_sort(ctx_factory, size, dtype):\n ctx = cl.create_some_context()\n queue = cl.CommandQueue(ctx)\n\n dev = ctx.devices[0]\n if (dev.platform.name == \"Apple\" and dev.type & cl.device_type.CPU):\n pytest.xfail(\"Bitonic sort won't work on Apple CPU: no workgroup \"\n \"parallelism\")\n if (dev.platform.name == \"Portable Computing Language\"\n and dtype == np.float64\n and get_pocl_version(dev.platform) < (1, 0)):\n pytest.xfail(\"Double precision bitonic sort doesn't work on POCL < 1.0\")\n\n if dtype == np.float64 and not has_double_support(dev):\n from pytest import skip\n skip(\"double precision not supported on %s\" % dev)\n\n import pyopencl.clrandom as clrandom\n from pyopencl.bitonic_sort import BitonicSort\n\n s = clrandom.rand(queue, (2, size, 3,), dtype, luxury=None, a=0, b=239482333)\n sgs = s.copy()\n # enqueue_marker crashes under CL 1.1 pocl if there is anything to wait for\n # (no clEnqueueWaitForEvents) https://github.com/inducer/pyopencl/pull/237\n if (dev.platform.name == \"Portable Computing Language\"\n and cl.get_cl_header_version() < (1, 2)):\n sgs.finish()\n sorter = BitonicSort(ctx)\n sgs, evt = sorter(sgs, axis=1)\n assert np.array_equal(np.sort(s.get(), axis=1), sgs.get())\n\n\[email protected](\"size\", [\n 0,\n 4,\n 2**14,\n 2**18,\n ])\[email protected](\"dtype\", [\n np.int32,\n np.float32,\n np.float64\n ])\[email protected]\ndef test_bitonic_argsort(ctx_factory, size, dtype):\n import sys\n is_pypy = '__pypy__' in sys.builtin_module_names\n\n if not size and is_pypy:\n # https://bitbucket.org/pypy/numpy/issues/53/specifying-strides-on-zero-sized-array\n pytest.xfail(\"pypy doesn't seem to handle as_strided \"\n \"on zero-sized arrays very well\")\n\n ctx = cl.create_some_context()\n queue = cl.CommandQueue(ctx)\n\n dev = ctx.devices[0]\n if (dev.platform.name == \"Portable Computing Language\"\n and sys.platform == \"darwin\"):\n pytest.xfail(\"Bitonic sort crashes on Apple POCL\")\n if (dev.platform.name == \"Apple\" and dev.type & cl.device_type.CPU):\n pytest.xfail(\"Bitonic sort won't work on Apple CPU: no workgroup \"\n \"parallelism\")\n if (dev.platform.name == \"Portable Computing Language\"\n and dtype == np.float64\n and get_pocl_version(dev.platform) < (1, 0)):\n pytest.xfail(\"Double precision bitonic sort doesn't work on POCL < 1.0\")\n\n if dtype == np.float64 and not has_double_support(dev):\n from pytest import skip\n skip(\"double precision not supported on %s\" % dev)\n\n import pyopencl.clrandom as clrandom\n from pyopencl.bitonic_sort import BitonicSort\n\n index = cl_array.arange(queue, 0, size, 1, dtype=np.int32)\n m = clrandom.rand(queue, (size,), dtype, luxury=None, a=0, b=239432234)\n\n sorterm = BitonicSort(ctx)\n\n ms = m.copy()\n # enqueue_marker crashes under CL 1.1 pocl if there is anything to wait for\n # (no clEnqueueWaitForEvents) https://github.com/inducer/pyopencl/pull/237\n if (dev.platform.name == \"Portable Computing Language\"\n and cl.get_cl_header_version() < (1, 2)):\n ms.finish()\n index.finish()\n ms, evt = sorterm(ms, idx=index, axis=0)\n\n assert np.array_equal(np.sort(m.get()), ms.get())\n\n # may be False because of identical values in array\n # assert np.array_equal(np.argsort(m.get()), index.get())\n\n # Check values by indices\n assert np.array_equal(m.get()[np.argsort(m.get())], m.get()[index.get()])\n\n# }}}\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) > 1:\n exec(sys.argv[1])\n else:\n from pytest import main\n main([__file__])\n\n# vim: filetype=pyopencl:fdm=marker\n" ]
[ [ "numpy.max", "numpy.array", "numpy.linalg.norm", "numpy.dot", "numpy.zeros", "numpy.minimum", "numpy.set_printoptions", "numpy.sum", "numpy.min", "numpy.where", "numpy.arange", "numpy.sort", "numpy.random.randint", "numpy.cumsum", "numpy.vdot", "numpy.dtype", "numpy.unique", "numpy.maximum" ] ]
animolopez/arctic_sound
[ "33a63d10b7734e2539efa5f318d2cc98c6d491e9" ]
[ "sinewave.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\nimport IPython.display\n\nimport sys\nsys.path.append(r\"C:\\Users\\作業用\\Documents\\module\")\nfrom iowave import *\nfrom spectrogram import *\n\nA = 0.8\nf = 440\nlength = 1\nfrs = 44100\n\nt = np.arange(0, length, 1 / frs)\ny = A * np.sin(2*np.pi*f*t)\n\nplt.plot(t, y)\nplt.xlabel(\"time [s]\")\nplt.ylabel(\"amplitude\")\nplt.show()\nshowSpectrogram(y,length,N=1024,Window='hamming')\nshowFFT(y,frs=frs,frange=4000)\nshowSTFT(y,frs=frs,frange=4000)\n\nIPython.display.Audio(y, rate = frs)\n\nwriteWave(r\"C:\\Users\\作業用\\Documents\\python\\sinewave.wav\", y, params=(1, 2, 44100))\n" ]
[ [ "numpy.sin", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.plot", "numpy.arange", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.show" ] ]
ameli/detkit
[ "54a1d5116e668f18be57954ba012f6061044668e" ]
[ "detkit/_utilities/plot_utilities.py" ]
[ "# SPDX-FileCopyrightText: Copyright 2021, Siavash Ameli <[email protected]>\n# SPDX-License-Identifier: BSD-3-Clause\n# SPDX-FileType: SOURCE\n#\n# This program is free software: you can redistribute it and/or modify it\n# under the terms of the license found in the LICENSE.txt file in the root\n# directory of this source tree.\n\n\n# =======\n# Imports\n# =======\n\nimport os\nimport platform\nimport matplotlib\nimport matplotlib.ticker\nfrom matplotlib.ticker import PercentFormatter # noqa: F401\nfrom mpl_toolkits.mplot3d import Axes3D # noqa: F401\nfrom mpl_toolkits.axes_grid1.inset_locator import inset_axes # noqa: F401\nfrom mpl_toolkits.axes_grid1.inset_locator import InsetPosition # noqa: F401\nfrom mpl_toolkits.axes_grid1.inset_locator import mark_inset # noqa: F401\nfrom matplotlib.ticker import ScalarFormatter, NullFormatter # noqa: F401\nfrom matplotlib.ticker import FormatStrFormatter # noqa: F401\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable # noqa: F401\n\nfrom distutils.spawn import find_executable\nfrom .display_utilities import is_notebook\nimport logging\nimport warnings\n\n# Check DISPLAY\nif ((not bool(os.environ.get('DISPLAY', None))) or\n (bool(os.environ.get('DETKIT_NO_DISPLAY', None)))) and \\\n (not is_notebook()):\n\n # No display found (used on servers). Using non-interactive backend\n if platform.system() == 'Darwin':\n # For MacOS, first, use macos backend, \"then\" import pyplot\n matplotlib.use('agg')\n import matplotlib.pyplot as plt\n else:\n # For Linux and Windows, \"first\" import pyplot, then use Agg backend.\n import matplotlib.pyplot as plt\n plt.switch_backend('agg')\nelse:\n # Display exists. Import pyplot without changing any backend.\n import matplotlib.pyplot as plt\n\n# Remove plt.tight_layout() warning\nlogging.captureWarnings(True)\nwarnings.filterwarnings(\n action='ignore',\n module='matplotlib',\n category=UserWarning,\n message=('This figure includes Axes that are not compatible with ' +\n 'tight_layout, so results might be incorrect.'))\n\n\n# ==================\n# load plot settings\n# ==================\n\ndef load_plot_settings():\n \"\"\"\n Specifies general settings for the plots in the example scripts,\n namely, it sets plot themes by ``seaborn``, fonts by LaTeX if available.\n \"\"\"\n\n # Color palette\n import seaborn as sns\n # sns.set()\n\n # LaTeX\n if find_executable('latex'):\n try:\n # plt.rc('text',usetex=True)\n matplotlib.rcParams['text.usetex'] = True\n matplotlib.rcParams['text.latex.preamble'] = \\\n r'\\usepackage{amsmath}'\n\n # LaTeX font is a bit small. Increase axes font size\n sns.set(font_scale=1.2)\n\n except Exception:\n pass\n\n # Style sheet\n sns.set_style(\"white\")\n sns.set_style(\"ticks\")\n\n # Font (Note: this should be AFTER the plt.style.use)\n plt.rc('font', family='serif')\n plt.rcParams['svg.fonttype'] = 'none' # text in svg will be text not path\n\n # from cycler import cycler\n # matplotlib.rcParams['axes.prop_cycle'] = cycler(color='bgrcmyk')\n\n\n# =========\n# save plot\n# =========\n\ndef save_plot(\n plt,\n filename,\n transparent_background=True,\n pdf=True,\n bbox_extra_artists=None,\n verbose=False):\n \"\"\"\n Saves plot as svg format in the current working directory.\n\n :param plt: matplotlib.pyplot object for the plots.\n :type plt: matplotlib.pyplot\n\n :param filename: Name of the file without extension or directory name.\n :type filename: string\n\n :param transparent_background: Sets the background of svg file to be\n transparent.\n :type transparent_background: bool\n \"\"\"\n\n # Write in the current working directory\n save_dir = os.getcwd()\n\n # Save plot in svg format\n filename_svg = filename + '.svg'\n filename_pdf = filename + '.pdf'\n if os.access(save_dir, os.W_OK):\n save_fullname_svg = os.path.join(save_dir, filename_svg)\n save_fullname_pdf = os.path.join(save_dir, filename_pdf)\n\n plt.savefig(\n save_fullname_svg,\n transparent=transparent_background,\n bbox_inches='tight')\n if verbose:\n print('Plot saved to \"%s\".' % (save_fullname_svg))\n\n if pdf:\n plt.savefig(\n save_fullname_pdf,\n transparent=transparent_background,\n bbox_extra_artists=bbox_extra_artists, bbox_inches='tight')\n if verbose:\n print('Plot saved to \"%s\".' % (save_fullname_pdf))\n else:\n print('Cannot save plot to %s. Directory is not writable.' % save_dir)\n\n\n# =================\n# show or save plot\n# =================\n\ndef show_or_save_plot(\n plt,\n filename,\n transparent_background=True,\n pdf=True,\n bbox_extra_artists=None,\n verbose=False):\n \"\"\"\n Shows the plot. If no graphical beckend exists, saves the plot.\n \"\"\"\n\n # Check if the graphical back-end exists\n if matplotlib.get_backend() != 'agg' or is_notebook():\n plt.show()\n else:\n # write the plot as SVG file in the current working directory\n save_plot(plt, filename, transparent_background=transparent_background,\n pdf=pdf, bbox_extra_artists=bbox_extra_artists,\n verbose=verbose)\n" ]
[ [ "matplotlib.use", "matplotlib.pyplot.switch_backend", "matplotlib.pyplot.savefig", "matplotlib.pyplot.rc", "matplotlib.pyplot.show", "matplotlib.get_backend" ] ]
Cognive-in/coursera_ML_Python
[ "f174d350321aab0ef9cd14a41e425f3e753fdaa5" ]
[ "Ex_1/data/gradientDescent.py" ]
[ "import numpy as np\nimport data.computeCost as cc\n\ndef gradientDescent(X,y,theta,alpha,num_iters):\n m = len(y)\n J_hist = np.zeros((num_iters,1))\n for i in range(num_iters):\n theta = theta - alpha*(1.0/m)*np.transpose(X).dot(X.dot(theta)-np.transpose([y]))\n J_hist[i] = cc.computeCost(X,y,theta)\n return theta" ]
[ [ "numpy.transpose", "numpy.zeros" ] ]
vatsan/shap
[ "6a23d95321501ac3c1c203cdc9eedf211ee34706" ]
[ "shap/plots/summary.py" ]
[ "\"\"\" Summary plots of SHAP values across a whole dataset.\n\"\"\"\n\nfrom __future__ import division\n\nimport warnings\nimport numpy as np\nfrom scipy.stats import gaussian_kde\ntry:\n import matplotlib.pyplot as pl\nexcept ImportError as e:\n warnings.warn(\"matplotlib could not be loaded!\", e)\n pass\nfrom . import labels\nfrom . import colors\n\n# TODO: remove unused title argument / use title argument\ndef summary_plot(shap_values, features=None, feature_names=None, max_display=None, plot_type=\"dot\",\n color=None, axis_color=\"#333333\", title=None, alpha=1, show=True, sort=True,\n color_bar=True, auto_size_plot=True, layered_violin_max_num_bins=20, class_names=None):\n \"\"\"Create a SHAP summary plot, colored by feature values when they are provided.\n\n Parameters\n ----------\n shap_values : numpy.array\n Matrix of SHAP values (# samples x # features)\n\n features : numpy.array or pandas.DataFrame or list\n Matrix of feature values (# samples x # features) or a feature_names list as shorthand\n\n feature_names : list\n Names of the features (length # features)\n\n max_display : int\n How many top features to include in the plot (default is 20, or 7 for interaction plots)\n\n plot_type : \"dot\" (default) or \"violin\"\n What type of summary plot to produce\n \"\"\"\n\n multi_class = False\n if isinstance(shap_values, list):\n multi_class = True\n plot_type = \"bar\" # only type supported for now\n else:\n assert len(shap_values.shape) != 1, \"Summary plots need a matrix of shap_values, not a vector.\"\n\n # default color:\n if color is None:\n if plot_type == 'layered_violin':\n color = \"coolwarm\"\n elif multi_class:\n color = lambda i: colors.red_blue_circle(i/len(shap_values))\n else:\n color = colors.blue_rgb\n\n # convert from a DataFrame or other types\n if str(type(features)) == \"<class 'pandas.core.frame.DataFrame'>\":\n if feature_names is None:\n feature_names = features.columns\n features = features.values\n elif isinstance(features, list):\n if feature_names is None:\n feature_names = features\n features = None\n elif (features is not None) and len(features.shape) == 1 and feature_names is None:\n feature_names = features\n features = None\n\n num_features = (shap_values[0].shape[1] if multi_class else shap_values.shape[1])\n\n if feature_names is None:\n feature_names = np.array([labels['FEATURE'] % str(i) for i in range(num_features)])\n\n # plotting SHAP interaction values\n if not multi_class and len(shap_values.shape) == 3:\n if max_display is None:\n max_display = 7\n else:\n max_display = min(len(feature_names), max_display)\n\n sort_inds = np.argsort(-np.abs(shap_values.sum(1)).sum(0))\n\n # get plotting limits\n delta = 1.0 / (shap_values.shape[1] ** 2)\n slow = np.nanpercentile(shap_values, delta)\n shigh = np.nanpercentile(shap_values, 100 - delta)\n v = max(abs(slow), abs(shigh))\n slow = -v\n shigh = v\n\n pl.figure(figsize=(1.5 * max_display + 1, 0.8 * max_display + 1))\n pl.subplot(1, max_display, 1)\n proj_shap_values = shap_values[:, sort_inds[0], sort_inds]\n proj_shap_values[:, 1:] *= 2 # because off diag effects are split in half\n summary_plot(\n proj_shap_values, features[:, sort_inds] if features is not None else None,\n feature_names=feature_names[sort_inds],\n sort=False, show=False, color_bar=False,\n auto_size_plot=False,\n max_display=max_display\n )\n pl.xlim((slow, shigh))\n pl.xlabel(\"\")\n title_length_limit = 11\n pl.title(shorten_text(feature_names[sort_inds[0]], title_length_limit))\n for i in range(1, min(len(sort_inds), max_display)):\n ind = sort_inds[i]\n pl.subplot(1, max_display, i + 1)\n proj_shap_values = shap_values[:, ind, sort_inds]\n proj_shap_values *= 2\n proj_shap_values[:, i] /= 2 # because only off diag effects are split in half\n summary_plot(\n proj_shap_values, features[:, sort_inds] if features is not None else None,\n sort=False,\n feature_names=[\"\" for i in range(len(feature_names))],\n show=False,\n color_bar=False,\n auto_size_plot=False,\n max_display=max_display\n )\n pl.xlim((slow, shigh))\n pl.xlabel(\"\")\n if i == min(len(sort_inds), max_display) // 2:\n pl.xlabel(labels['INTERACTION_VALUE'])\n pl.title(shorten_text(feature_names[ind], title_length_limit))\n pl.tight_layout(pad=0, w_pad=0, h_pad=0.0)\n pl.subplots_adjust(hspace=0, wspace=0.1)\n if show:\n pl.show()\n return\n\n if max_display is None:\n max_display = 20\n\n if sort:\n # order features by the sum of their effect magnitudes\n if multi_class:\n feature_order = np.argsort(np.sum(np.mean(np.abs(shap_values), axis=0), axis=0))\n else:\n feature_order = np.argsort(np.sum(np.abs(shap_values), axis=0))\n feature_order = feature_order[-min(max_display, len(feature_order)):]\n else:\n feature_order = np.flip(np.arange(min(max_display, num_features)), 0)\n\n row_height = 0.4\n if auto_size_plot:\n pl.gcf().set_size_inches(8, len(feature_order) * row_height + 1.5)\n pl.axvline(x=0, color=\"#999999\", zorder=-1)\n\n if plot_type == \"dot\":\n for pos, i in enumerate(feature_order):\n pl.axhline(y=pos, color=\"#cccccc\", lw=0.5, dashes=(1, 5), zorder=-1)\n shaps = shap_values[:, i]\n values = None if features is None else features[:, i]\n inds = np.arange(len(shaps))\n np.random.shuffle(inds)\n if values is not None:\n values = values[inds]\n shaps = shaps[inds]\n colored_feature = True\n try:\n values = np.array(values, dtype=np.float64) # make sure this can be numeric\n except:\n colored_feature = False\n N = len(shaps)\n # hspacing = (np.max(shaps) - np.min(shaps)) / 200\n # curr_bin = []\n nbins = 100\n quant = np.round(nbins * (shaps - np.min(shaps)) / (np.max(shaps) - np.min(shaps) + 1e-8))\n inds = np.argsort(quant + np.random.randn(N) * 1e-6)\n layer = 0\n last_bin = -1\n ys = np.zeros(N)\n for ind in inds:\n if quant[ind] != last_bin:\n layer = 0\n ys[ind] = np.ceil(layer / 2) * ((layer % 2) * 2 - 1)\n layer += 1\n last_bin = quant[ind]\n ys *= 0.9 * (row_height / np.max(ys + 1))\n\n if features is not None and colored_feature:\n # trim the color range, but prevent the color range from collapsing\n vmin = np.nanpercentile(values, 5)\n vmax = np.nanpercentile(values, 95)\n if vmin == vmax:\n vmin = np.nanpercentile(values, 1)\n vmax = np.nanpercentile(values, 99)\n if vmin == vmax:\n vmin = np.min(values)\n vmax = np.max(values)\n\n assert features.shape[0] == len(shaps), \"Feature and SHAP matrices must have the same number of rows!\"\n\n # plot the nan values in the interaction feature as grey\n nan_mask = np.isnan(values)\n pl.scatter(shaps[nan_mask], pos + ys[nan_mask], color=\"#777777\", vmin=vmin,\n vmax=vmax, s=16, alpha=alpha, linewidth=0,\n zorder=3, rasterized=len(shaps) > 500)\n\n # plot the non-nan values colored by the trimmed feature value\n cvals = values[np.invert(nan_mask)].astype(np.float64)\n cvals_imp = cvals.copy()\n cvals_imp[np.isnan(cvals)] = (vmin + vmax) / 2.0\n cvals[cvals_imp > vmax] = vmax\n cvals[cvals_imp < vmin] = vmin\n pl.scatter(shaps[np.invert(nan_mask)], pos + ys[np.invert(nan_mask)],\n cmap=colors.red_blue, vmin=vmin, vmax=vmax, s=16,\n c=cvals, alpha=alpha, linewidth=0,\n zorder=3, rasterized=len(shaps) > 500)\n else:\n\n pl.scatter(shaps, pos + ys, s=16, alpha=alpha, linewidth=0, zorder=3,\n color=color if colored_feature else \"#777777\", rasterized=len(shaps) > 500)\n\n elif plot_type == \"violin\":\n for pos, i in enumerate(feature_order):\n pl.axhline(y=pos, color=\"#cccccc\", lw=0.5, dashes=(1, 5), zorder=-1)\n\n if features is not None:\n global_low = np.nanpercentile(shap_values[:, :len(feature_names)].flatten(), 1)\n global_high = np.nanpercentile(shap_values[:, :len(feature_names)].flatten(), 99)\n for pos, i in enumerate(feature_order):\n shaps = shap_values[:, i]\n shap_min, shap_max = np.min(shaps), np.max(shaps)\n rng = shap_max - shap_min\n xs = np.linspace(np.min(shaps) - rng * 0.2, np.max(shaps) + rng * 0.2, 100)\n if np.std(shaps) < (global_high - global_low) / 100:\n ds = gaussian_kde(shaps + np.random.randn(len(shaps)) * (global_high - global_low) / 100)(xs)\n else:\n ds = gaussian_kde(shaps)(xs)\n ds /= np.max(ds) * 3\n\n values = features[:, i]\n window_size = max(10, len(values) // 20)\n smooth_values = np.zeros(len(xs) - 1)\n sort_inds = np.argsort(shaps)\n trailing_pos = 0\n leading_pos = 0\n running_sum = 0\n back_fill = 0\n for j in range(len(xs) - 1):\n\n while leading_pos < len(shaps) and xs[j] >= shaps[sort_inds[leading_pos]]:\n running_sum += values[sort_inds[leading_pos]]\n leading_pos += 1\n if leading_pos - trailing_pos > 20:\n running_sum -= values[sort_inds[trailing_pos]]\n trailing_pos += 1\n if leading_pos - trailing_pos > 0:\n smooth_values[j] = running_sum / (leading_pos - trailing_pos)\n for k in range(back_fill):\n smooth_values[j - k - 1] = smooth_values[j]\n else:\n back_fill += 1\n\n vmin = np.nanpercentile(values, 5)\n vmax = np.nanpercentile(values, 95)\n if vmin == vmax:\n vmin = np.nanpercentile(values, 1)\n vmax = np.nanpercentile(values, 99)\n if vmin == vmax:\n vmin = np.min(values)\n vmax = np.max(values)\n pl.scatter(shaps, np.ones(shap_values.shape[0]) * pos, s=9, cmap=colors.red_blue, vmin=vmin, vmax=vmax,\n c=values, alpha=alpha, linewidth=0, zorder=1)\n # smooth_values -= nxp.nanpercentile(smooth_values, 5)\n # smooth_values /= np.nanpercentile(smooth_values, 95)\n smooth_values -= vmin\n if vmax - vmin > 0:\n smooth_values /= vmax - vmin\n for i in range(len(xs) - 1):\n if ds[i] > 0.05 or ds[i + 1] > 0.05:\n pl.fill_between([xs[i], xs[i + 1]], [pos + ds[i], pos + ds[i + 1]],\n [pos - ds[i], pos - ds[i + 1]], color=colors.red_blue(smooth_values[i]),\n zorder=2)\n\n else:\n parts = pl.violinplot(shap_values[:, feature_order], range(len(feature_order)), points=200, vert=False,\n widths=0.7,\n showmeans=False, showextrema=False, showmedians=False)\n\n for pc in parts['bodies']:\n pc.set_facecolor(color)\n pc.set_edgecolor('none')\n pc.set_alpha(alpha)\n\n elif plot_type == \"layered_violin\": # courtesy of @kodonnell\n num_x_points = 200\n bins = np.linspace(0, features.shape[0], layered_violin_max_num_bins + 1).round(0).astype(\n 'int') # the indices of the feature data corresponding to each bin\n shap_min, shap_max = np.min(shap_values), np.max(shap_values)\n x_points = np.linspace(shap_min, shap_max, num_x_points)\n\n # loop through each feature and plot:\n for pos, ind in enumerate(feature_order):\n # decide how to handle: if #unique < layered_violin_max_num_bins then split by unique value, otherwise use bins/percentiles.\n # to keep simpler code, in the case of uniques, we just adjust the bins to align with the unique counts.\n feature = features[:, ind]\n unique, counts = np.unique(feature, return_counts=True)\n if unique.shape[0] <= layered_violin_max_num_bins:\n order = np.argsort(unique)\n thesebins = np.cumsum(counts[order])\n thesebins = np.insert(thesebins, 0, 0)\n else:\n thesebins = bins\n nbins = thesebins.shape[0] - 1\n # order the feature data so we can apply percentiling\n order = np.argsort(feature)\n # x axis is located at y0 = pos, with pos being there for offset\n y0 = np.ones(num_x_points) * pos\n # calculate kdes:\n ys = np.zeros((nbins, num_x_points))\n for i in range(nbins):\n # get shap values in this bin:\n shaps = shap_values[order[thesebins[i]:thesebins[i + 1]], ind]\n # if there's only one element, then we can't\n if shaps.shape[0] == 1:\n warnings.warn(\n \"not enough data in bin #%d for feature %s, so it'll be ignored. Try increasing the number of records to plot.\"\n % (i, feature_names[ind]))\n # to ignore it, just set it to the previous y-values (so the area between them will be zero). Not ys is already 0, so there's\n # nothing to do if i == 0\n if i > 0:\n ys[i, :] = ys[i - 1, :]\n continue\n # save kde of them: note that we add a tiny bit of gaussian noise to avoid singular matrix errors\n ys[i, :] = gaussian_kde(shaps + np.random.normal(loc=0, scale=0.001, size=shaps.shape[0]))(x_points)\n # scale it up so that the 'size' of each y represents the size of the bin. For continuous data this will\n # do nothing, but when we've gone with the unqique option, this will matter - e.g. if 99% are male and 1%\n # female, we want the 1% to appear a lot smaller.\n size = thesebins[i + 1] - thesebins[i]\n bin_size_if_even = features.shape[0] / nbins\n relative_bin_size = size / bin_size_if_even\n ys[i, :] *= relative_bin_size\n # now plot 'em. We don't plot the individual strips, as this can leave whitespace between them.\n # instead, we plot the full kde, then remove outer strip and plot over it, etc., to ensure no\n # whitespace\n ys = np.cumsum(ys, axis=0)\n width = 0.8\n scale = ys.max() * 2 / width # 2 is here as we plot both sides of x axis\n for i in range(nbins - 1, -1, -1):\n y = ys[i, :] / scale\n c = pl.get_cmap(color)(i / (\n nbins - 1)) if color in pl.cm.datad else color # if color is a cmap, use it, otherwise use a color\n pl.fill_between(x_points, pos - y, pos + y, facecolor=c)\n pl.xlim(shap_min, shap_max)\n\n elif not multi_class and plot_type == \"bar\":\n feature_inds = feature_order[:max_display]\n y_pos = np.arange(len(feature_inds))\n global_shap_values = np.abs(shap_values).mean(0)\n pl.barh(y_pos, global_shap_values[feature_inds], 0.7, align='center', color=color)\n pl.yticks(y_pos, fontsize=13)\n pl.gca().set_yticklabels([feature_names[i] for i in feature_inds])\n\n elif multi_class and plot_type == \"bar\":\n if class_names is None:\n class_names = [\"Class \"+str(i) for i in range(len(shap_values))]\n feature_inds = feature_order[:max_display]\n y_pos = np.arange(len(feature_inds))\n left_pos = np.zeros(len(feature_inds))\n\n class_inds = np.argsort([-np.abs(shap_values[i]).mean() for i in range(len(shap_values))])\n for i,ind in enumerate(class_inds):\n global_shap_values = np.abs(shap_values[ind]).mean(0)\n pl.barh(\n y_pos, global_shap_values[feature_inds], 0.7, left=left_pos, align='center',\n color=color(i), label=class_names[ind]\n )\n left_pos += global_shap_values[feature_inds]\n pl.yticks(y_pos, fontsize=13)\n pl.gca().set_yticklabels([feature_names[i] for i in feature_inds])\n pl.legend(frameon=False, fontsize=12)\n\n # draw the color bar\n if color_bar and features is not None and plot_type != \"bar\" and \\\n (plot_type != \"layered_violin\" or color in pl.cm.datad):\n import matplotlib.cm as cm\n m = cm.ScalarMappable(cmap=colors.red_blue if plot_type != \"layered_violin\" else pl.get_cmap(color))\n m.set_array([0, 1])\n cb = pl.colorbar(m, ticks=[0, 1], aspect=1000)\n cb.set_ticklabels([labels['FEATURE_VALUE_LOW'], labels['FEATURE_VALUE_HIGH']])\n cb.set_label(labels['FEATURE_VALUE'], size=12, labelpad=0)\n cb.ax.tick_params(labelsize=11, length=0)\n cb.set_alpha(1)\n cb.outline.set_visible(False)\n bbox = cb.ax.get_window_extent().transformed(pl.gcf().dpi_scale_trans.inverted())\n cb.ax.set_aspect((bbox.height - 0.9) * 20)\n # cb.draw_all()\n\n pl.gca().xaxis.set_ticks_position('bottom')\n pl.gca().yaxis.set_ticks_position('none')\n pl.gca().spines['right'].set_visible(False)\n pl.gca().spines['top'].set_visible(False)\n pl.gca().spines['left'].set_visible(False)\n pl.gca().tick_params(color=axis_color, labelcolor=axis_color)\n pl.yticks(range(len(feature_order)), [feature_names[i] for i in feature_order], fontsize=13)\n if plot_type != \"bar\":\n pl.gca().tick_params('y', length=20, width=0.5, which='major')\n pl.gca().tick_params('x', labelsize=11)\n pl.ylim(-1, len(feature_order))\n if plot_type == \"bar\":\n pl.xlabel(labels['GLOBAL_VALUE'], fontsize=13)\n else:\n pl.xlabel(labels['VALUE'], fontsize=13)\n if show:\n pl.show()\n\ndef shorten_text(text, length_limit):\n if len(text) > length_limit:\n return text[:length_limit - 3] + \"...\"\n else:\n return text\n" ]
[ [ "matplotlib.pyplot.xlim", "numpy.min", "numpy.invert", "matplotlib.pyplot.gcf", "numpy.cumsum", "numpy.max", "numpy.random.normal", "matplotlib.pyplot.colorbar", "numpy.nanpercentile", "matplotlib.pyplot.get_cmap", "matplotlib.pyplot.fill_between", "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.gca", "matplotlib.pyplot.subplots_adjust", "matplotlib.pyplot.subplot", "numpy.array", "numpy.zeros", "matplotlib.pyplot.axhline", "numpy.random.randn", "numpy.random.shuffle", "matplotlib.pyplot.figure", "matplotlib.pyplot.yticks", "numpy.std", "numpy.argsort", "matplotlib.pyplot.show", "matplotlib.pyplot.axvline", "numpy.insert", "numpy.ceil", "numpy.isnan", "matplotlib.pyplot.xlabel", "numpy.ones", "matplotlib.pyplot.legend", "scipy.stats.gaussian_kde", "matplotlib.pyplot.barh", "numpy.abs", "numpy.linspace", "numpy.unique" ] ]
james94/P2-Advanced-Lane-Lines-CarND
[ "7661f14086c3e960826f3116d9d89048151670f8" ]
[ "lib/cv/LaneVehiclePosition.py" ]
[ "import matplotlib.image as mpimg\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport cv2\nimport os\n\n# LaneVehiclePosition calculates vehicle's position with respect to the center\n# of the lane\n\nclass LaneVehiclePosition:\n def __init__(self):\n \"\"\"\n Initializes conversion from pixels to meters\n \"\"\"\n # Camera image has 720 relevant pixels or 30 meters long in the y-dimension\n self.ym_per_pix_m = 30/720 # Meters per Pixel in y dimension\n \n # Camera image has 700 relevant pixels or 3.7 meters wide in the x-dimension\n # 200 pixels were used on the left and 900 on the right\n self.xm_per_pix_m = 3.7/700 # Meters per Pixel in x dimension\n \n def measure_vehicle_position(self, binary_warped, left_fit, right_fit, unit_type):\n \"\"\"\n Determines vehicle's distance from center of the lane\n \"\"\"\n imshape = binary_warped.shape\n img_h = imshape[0]\n img_w = imshape[1]\n \n # Vehicle position with respect to camera mounted at the center of the car\n vehicle_position = img_w/2\n \n # Calculate x-intercept for the left and right polynomial\n left_fit_x_int = left_fit[0]*img_h**2 + left_fit[1]*img_h + left_fit[2]\n right_fit_x_int = right_fit[0]*img_h**2 + right_fit[1]*img_h + right_fit[2]\n \n # Calculate lane center position from x-intercepts\n lane_center_position = (left_fit_x_int + right_fit_x_int)/2\n \n # Calculate vehicle's distance from center of lane in pixels or meters\n if(unit_type == \"pixels\"):\n self.dist_center_m = np.abs(vehicle_position - lane_center_position)\n self.units_m = \"(p)\"\n elif(unit_type == \"meters\"):\n self.dist_center_m = np.abs(vehicle_position - lane_center_position)*self.xm_per_pix_m\n self.units_m = \"(m)\"\n else:\n self.dist_center_m = \"undefined\"\n \n # Check if vehicle's position is left to center or right to center\n if(lane_center_position > vehicle_position):\n # Side of center that the vehicle is on\n self.side_center_m = \"left of center\"\n else:\n self.side_center_m = \"right of center\"\n \n return self.dist_center_m, self.units_m, self.side_center_m\n \n def display_vehicle_position(self, frame_title):\n \"\"\"\n Displays to screen vehicle's position with respect to center\n \"\"\"\n print(\"Frame: %s\" %(frame_title))\n print(\"Vehicle is %.2f %s %s\" %(self.dist_center_m, self.units_m, self.side_center_m))\n print(\"\\n\")\n \n def save_img(self, dst_path, filename, dst_img):\n \"\"\"\n Save image using OpenCV during bird's eye view transformation process,\n such as warped image\n \"\"\"\n # If filepath doesn't exist, create it\n if not os.path.exists(dst_path):\n os.makedirs(dst_path)\n \n # Save binary image resulting from gradient thresholding\n plt.imsave(dst_path + filename, dst_img, cmap = \"gray\")\n \n def save_fig(self, dst_path, filename):\n \"\"\"\n Save figure using OpenCV during bird's eye view transformation process,\n such as source_points, destination_points, etc\n \"\"\"\n # If filepath doesn't exist, create it\n if not os.path.exists(dst_path):\n os.makedirs(dst_path)\n \n # Save current figure\n plt.savefig(dst_path + filename) " ]
[ [ "matplotlib.pyplot.imsave", "matplotlib.pyplot.savefig", "numpy.abs" ] ]
essy00/mw_classification
[ "7a8a963a9c34f51e7c4a3a64d71849ee632fc950" ]
[ "mw_classification/gui.py" ]
[ "from PIL import ImageTk, Image\r\nimport cv2\r\nimport numpy as np\r\nimport tensorflow\r\n\r\nimport tkinter.filedialog\r\nimport tkinter as tk\r\nimport shutil\r\nimport os\r\n\r\nface_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\r\nmodel = tensorflow.keras.models.load_model('model.h5')\r\n\r\n\r\ndef ai(image_path: str):\r\n \"\"\"\r\n Predicts if the person in the image is man or woman.\r\n Then with a face detection prints the prediction.\r\n\r\n Args:\r\n image_path (str): The image path.\r\n\r\n Returns:\r\n str: The result (\"Man\" or \"Woman\").\r\n int: The prediction of the machine (0 <= pred <= 1)\r\n np.array: The image to show\r\n \"\"\"\r\n img_size = 64\r\n\r\n ai_image = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)\r\n ai_image = cv2.resize(ai_image, (img_size, img_size))\r\n ai_image = np.array(ai_image).reshape(-1, img_size, img_size, 1) / 255.0\r\n\r\n pred = model.predict(ai_image)\r\n result = \"Man\" if pred < 0.5 else \"Woman\"\r\n\r\n real_image = cv2.cvtColor(cv2.imread(image_path), cv2.COLOR_BGR2RGB)\r\n real_image = cv2.resize(real_image, (640, 640))\r\n gray = cv2.cvtColor(real_image, cv2.COLOR_BGR2GRAY)\r\n faces = face_cascade.detectMultiScale(gray, 1.1, 4)\r\n for (x, y, w, h) in faces:\r\n cv2.rectangle(real_image, (x, y), (x+w, y+h), (255, 0, 0), 2)\r\n cv2.putText(\r\n real_image,\r\n result,\r\n (x, y-10),\r\n cv2.FONT_HERSHEY_SIMPLEX,\r\n 0.9,\r\n (0, 255, 69),\r\n 2\r\n )\r\n\r\n return result, pred, real_image\r\n\r\n\r\nclass Root(tk.Tk):\r\n def __init__(self):\r\n super(Root, self).__init__()\r\n self.title(\"AI\")\r\n self.minsize(640, 400)\r\n\r\n self.labelFrame = tk.LabelFrame(self, text=\"Open A File\")\r\n self.labelFrame.grid(column=0, row=1, padx=20, pady=20)\r\n self.upload_btn()\r\n\r\n self.image_path_label = tk.Label(text=\"\")\r\n self.image_path_label.grid(column=0, row=2)\r\n\r\n self.image = None\r\n\r\n self.image_label = tk.Label()\r\n self.image_label.grid()\r\n\r\n def upload_btn(self):\r\n \"\"\"\r\n When it's clicked, calls the self.file_dialog function.\r\n \"\"\"\r\n button = tk.Button(\r\n self.labelFrame,\r\n text=\"Browse A File\",\r\n command=self.file_dialog\r\n )\r\n button.grid(column=1, row=1)\r\n\r\n def file_dialog(self):\r\n \"\"\"\r\n Takes the path, predicts and shows the returned image.\r\n \"\"\"\r\n real_path = tk.filedialog.askopenfilename(\r\n initialdir=\"/\",\r\n title=\"Select A File\",\r\n filetypes=(\r\n (\"jpeg\", \"*.jpg\"),\r\n (\"png\", \"*.png\")\r\n )\r\n )\r\n\r\n tmp_path = f\"./{real_path.split('/')[-1]}\"\r\n\r\n try:\r\n shutil.copyfile(\r\n real_path,\r\n tmp_path\r\n )\r\n except Exception:\r\n pass\r\n\r\n result, pred, real_image = ai(tmp_path)\r\n\r\n os.remove(tmp_path)\r\n\r\n self.image_path_label.configure(\r\n text=f'Uploaded: {real_path.split(\"/\")[-1]}'\r\n )\r\n self.image = ImageTk.PhotoImage(Image.fromarray(real_image))\r\n self.image_label.configure(image=self.image)\r\n self.image_label.image = self.image\r\n\r\n\r\ndef main():\r\n root = Root()\r\n root.mainloop()\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n" ]
[ [ "tensorflow.keras.models.load_model", "numpy.array" ] ]
raphael-roemer/ML-inference-effective-parameters-prediction
[ "e5b34e7222579720e9e3a453f41af4762557fc41" ]
[ "ML/machine_learning/model_fit.py" ]
[ "import pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import GridSearchCV, learning_curve,ShuffleSplit, train_test_split\nimport os\nimport time\nimport shap\nimport xgboost as xgb\n\nareas = ['CE']\ndata_version = '2021-07-14_3'\n#targets = ['g1','g2','q','r','D','mu_w_0','mu_a_0','RoCof','nadir','MeanDevInFirstHalf','Loglike']\ntargets = ['MeanDevInFirstHalf']#['vari','nadir','nad','g1','g2','D','q','r']#,'RoCoFLong','mu_w_0','mu_a_0','RoCof','MeanDevInFirstHalf','Loglike']\n\n\nstart_time = time.time()\nfor area in areas:\n\n print('---------------------------- ', area, ' ------------------------------------')\n\n #data_folder = './prepared_data/{}/version-{}/'.format(area,data_version)\n data_folder = './prepared_data/{}/{}/'.format(area,data_version)\n\n for target in targets:\n\n print('-------- ', target, ' --------')\n\n res_folder = './Results/model_fit/{}/version-{}/target_{}/'.format(area,data_version, target)\n\n if not os.path.exists(res_folder):\n os.makedirs(res_folder)\n\n y_train = pd.read_hdf(data_folder+'y_train.h5').loc[:, target]\n y_test = pd.read_hdf(data_folder+'y_test.h5').loc[:, target]\n\n if os.path.exists(res_folder+'y_pred.h5'):\n y_pred = pd.read_hdf(res_folder+'y_pred.h5')\n y_pred_cont = pd.read_hdf(res_folder+'y_pred_cont.h5')\n else:\n y_pred = pd.read_hdf(data_folder+'y_pred.h5') #contains only time index\n y_pred_cont = pd.read_hdf(data_folder+'y_pred_cont.h5') #contains only time index\n\n for actual in ['_act']: #_act: full model, '': just day ahead\n\n # Load data\n X_train = pd.read_hdf(data_folder+'X_train{}.h5'.format(actual))\n X_test = pd.read_hdf(data_folder+'X_test{}.h5'.format(actual))\n X_test_cont = pd.read_hdf(data_folder+'X_test{}_cont.h5'.format(actual))\n\n\n # Daily profile prediction\n\n daily_profile = y_train.groupby(X_train.index.time).mean()\n y_pred['daily_profile'] = [daily_profile[time] for time in y_test.index.time]\n y_pred_cont['daily_profile'] = [daily_profile[time] for time in y_pred_cont.index.time]\n\n\n # Gradient boosting Regressor CV hyperparameter optimization\n\n X_train_train, X_train_val, y_train_train, y_train_val = train_test_split(X_train, y_train,\n test_size=0.2)\n\n #params_grid = {\n # 'max_depth':[5],\n # 'learning_rate':[0.1],#[0.01,0.05,0.1, 0.2],\n # 'subsample':[1,0.7],#[1,0.7,0.4,0.1],\n # #'reg_lambda':[ 0.1, 1, 10, 50]\n #}\n params_grid = {\n 'max_depth':[2,3,5,7,9,11],\n 'learning_rate':[0.01,0.05,0.1, 0.2],\n 'subsample': [1,0.7,0.4,0.1] ,\n #'reg_lambda':[ 0.1, 1, 10],\n 'min_child_weight':[1,5,10]\n }\n\n fit_params = {\n 'eval_set':[(X_train_train, y_train_train),(X_train_val, y_train_val)],\n 'early_stopping_rounds':20,\n 'verbose':0\n }\n\n grid_search = GridSearchCV(xgb.XGBRegressor(objective='reg:squarederror', n_estimators=1000,\n verbosity=0),\n params_grid, verbose=1, n_jobs=-1, refit=False, cv=5)\n\n grid_search.fit(X_train_train, y_train_train, **fit_params)\n\n pd.DataFrame(grid_search.cv_results_).to_csv(res_folder+'cv_results_gtb{}.csv'.format(actual))\n pd.DataFrame(grid_search.best_params_,\n index=['optimal']).to_csv(res_folder+'cv_best_params_gtb{}.csv'.format(actual))\n\n\n # Gradient boosting regression best model evaluation\n\n params=pd.read_csv(res_folder+'cv_best_params_gtb{}.csv'.format(actual),\n index_col=[0]).to_dict('records')[0]\n\n\n model = xgb.XGBRegressor(objective='reg:squarederror', n_estimators=1000, **params)\n model.fit(X_train_train, y_train_train, **fit_params)\n\n\n shap_vals = shap.TreeExplainer(model).shap_values(X_test)\n np.save(res_folder + 'shap_values_gtb{}.npy'.format(actual), shap_vals)\n\n # shap_interact_vals = shap.TreeExplainer(model).shap_interaction_values(X_test)\n # np.save(res_folder + 'shap_interaction_values_gtb{}.npy'.format(actual), shap_interact_vals)\n\n y_pred['gtb{}'.format(actual)] = model.predict(X_test)\n y_pred_cont['gtb{}'.format(actual)] = model.predict(X_test_cont)\n\n\n\n # GTB learning curve for evaluating the fit\n\n # train_sizes, train_scores, valid_scores = learning_curve(model,\n # X_train, y_train,\n # cv=5, verbose=1, n_jobs=-1)\n # res = pd.DataFrame(index=np.arange(5), data={'train_sizes':train_sizes,\n # 'mean_train_scores':train_scores.mean(axis=-1),\n # 'mean_valid_scores': valid_scores.mean(axis=-1),\n # 'std_valid_scores': valid_scores.std(axis=-1),\n # 'std_train_scores': train_scores.std(axis=-1) })\n # res.to_csv(res_folder+'learning_curve_gtb{}.csv'.format(actual))\n\n # GTB prediction stages for evaluating the fit\n\n # res = pd.DataFrame(columns=['train_rmse','test_rmse'])\n # res.loc[:,'train_rmse'] = model.evals_result()['validation_0']['rmse']\n # res.loc[:,'test_rmse'] = model.evals_result()['validation_1']['rmse']\n # res.to_csv(res_folder+'staged_predict_gtb{}.csv'.format(actual))\n\n\n # Save prediction\n\n y_pred.to_hdf(res_folder+'y_pred.h5',key='df')\n y_pred_cont.to_hdf(res_folder+'y_pred_cont.h5',key='df')\n\n\nprint(\"Execution time: {}\".format(time.time() - start_time))\n\n# %%\n" ]
[ [ "sklearn.model_selection.train_test_split", "pandas.DataFrame", "pandas.read_hdf" ] ]
Andy-yeongjin/crawling_project_1_hotel_reviews
[ "db680e02e2349d0bdfe157d7628e2a39d94a990f" ]
[ "pros/2-1-pros.py" ]
[ "# -*- coding: utf8 -*- \nimport requests\nimport pandas as pd\nimport selenium\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By \nfrom selenium.webdriver.support.ui import WebDriverWait \nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.common.keys import Keys\nimport time \nimport re \nimport json\nimport csv\nimport random\nimport nltk\nfrom PIL import Image\nfrom konlpy.corpus import kobill\nfrom konlpy.tag import Okt\nimport matplotlib.pyplot as plt\nfrom wordcloud import WordCloud\nfrom matplotlib import font_manager, rc\nfrom wordcloud import ImageColorGenerator\nimport platform\nfrom matplotlib import font_manager, rc\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\nwith open('naver_hotels_links.csv', 'r', encoding='utf-8') as f: \n rdr = csv.reader(f) \n for line in rdr:\n links = line\n\nnumber = 800\n\nplt.rcParams['axes.unicode_minus'] = False\npath = '/usr/share/fonts/truetype/nanum/NanumMyeongjo.ttf'\nfont_name = font_manager.FontProperties(fname=path).get_name()\nrc('font', family='NanumBarunGothic')\n\nfor link in links[801:1600]:\n name = link[50:][:-8]\n\n print(number)\n number += 1\n\n doc = open(f'./txtfiles/{name}.txt').read()\n mask = np.array(Image.open('./like.png'))\n image_color = ImageColorGenerator(mask)\n name = name.replace(\"_\", '').lower()\n\n print('brought_t')\n\n try:\n t = Okt()\n tokens_ko = t.nouns(doc)\n\n ko = nltk.Text(tokens_ko)\n\n stop = ['호텔', '좀', '조금', '머리', '선택', '잠', '짐', '옆', '이용', '것', '안', '사용', '층', '방', '룸', '더', '정말', '점', '객실', '때', '수', '도', '신경', '부분', '생각', '곳', '하나', '물이', '아이', '내', '위', '듯', '다시', '줄', '느낌', '부분', '방이', '설치', '서울', '경우', '디', '시', '전혀', '때문', '등', '정도', '다른', '쪽', '알', '제공', '바로', '문의', '크게', '주변', '제', '그냥', '도로', '위', '막', '해', '아주', '이해', '분', '약간', '다음', '다른', '전', '함', '느낌', '처음', '매우', '번', '그', '꽤', '계속', '말씀', '크게', '진짜', '하나', '편이', '대한', '문제', '분', '또', '움', '확인', '자가', '관련', '두', '이', '그', '꼽자', '굳이', '거의', '모두', '구', '살짝', '굿', '날', '말', '객', '밤']\n\n ko = [each_word for each_word in ko if each_word not in stop]\n ko = nltk.Text(ko)\n\n data = ko.vocab().most_common(100)\n\n wordcloud = WordCloud(color_func=image_color, font_path=path, mask=mask, relative_scaling=0.2, background_color='black').generate_from_frequencies(dict(data))\n\n name = name.strip()\n name = name.replace('_','')\n name = name.lower()\n plt.imshow(wordcloud, interpolation='bilinear')\n plt.axis('off'), plt.xticks([]), plt.yticks([])\n plt.tight_layout()\n plt.subplots_adjust(left = 0, bottom = 0, right = 1, top = 1, hspace = 0, wspace = 0)\n\n plt.savefig(f'./wc4/{name}.jpg', \n bbox_inces='tight', \n pad_inches=0, \n dpi=100\n )\n print(f'{name}done_worldcloud')\n\n except:\n print(f'{name}no_worldcloud')\n\n " ]
[ [ "matplotlib.font_manager.FontProperties", "matplotlib.pyplot.savefig", "matplotlib.pyplot.xticks", "matplotlib.pyplot.yticks", "matplotlib.rc", "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.subplots_adjust", "matplotlib.pyplot.axis", "matplotlib.pyplot.imshow" ] ]
federico2001/OpenBlender
[ "613b4c60726551f845782c961851c07d53f86aad" ]
[ "OpenBlender/OpenBlender.py" ]
[ "# Copyright (c) 2021 OpenBlender.io\n# Simplicity is key.\n\n\nfrom urllib.request import Request, urlopen\nfrom base64 import b64encode, b64decode\nfrom urllib.request import urlopen\nfrom urllib.parse import urlencode\nfrom contextlib import closing\nfrom datetime import datetime\nfrom io import StringIO\nimport datetime as dt\nimport pandas as pd\nimport numpy as np\nimport traceback\nimport requests\nimport pytz\nimport time\nimport math\nimport json\nimport zlib\nimport os\n\nVERSION = 2.11\n\ndef dameRespuestaLlamado(url, data):\n\trespuesta = ''\n\twith closing(urlopen(url, data)) as response:\n\t\trespuesta = json.loads(response.read().decode())\n\t\tif 'base64_zip' in respuesta:\n\t\t\ttry:\n\t\t\t\trespuesta = json.loads(zlib.decompress(b64decode(respuesta['base64_zip'])).decode('utf-8'))\n\t\t\texcept:\n\t\t\t\trespuesta = json.loads(zlib.decompress(b64decode(respuesta['base64_zip'])))\n\ttry:\n\t\tif 'error' in respuesta['status']:\n\t\t\tprint(\"------------------------------------------------\")\n\t\t\tprint(\"API call error: \" + str(respuesta['response']))\n\t\t\tprint(\"------------------------------------------------\")\n\t\t\tprint(\"\")\n\t\t\treturn False\n\texcept:\n\t\tprint(\"--Internal error. Please upgrade OpenBlender verison via Pip.--\")\n\t\tprint(\"-----\")\n\t\t#print(traceback.format_exc())\n\treturn respuesta\n\n\ndef call(action, json_parametros):\n\trespuesta = ''\n\ttry:\n\t\trespuesta = ''\n\t\turl = ''\n\t\tif 'oblender' in json_parametros and json_parametros['oblender'] == 1:\n\t\t\turl = 'http://3.16.237.62:8080/bronce'\n\t\telse:\n\t\t\turl = 'http://52.8.156.139/oro/'\n\t\t#print(url)\n\t\tif action == 'API_createDataset':\n\t\t\trespuesta = API_createDataset(json_parametros, url)\n\t\telif action == 'API_insertObservations':\n\t\t\trespuesta = API_insertObservationsFromDataFrame(json_parametros, url)\n\t\telif action == 'API_getObservationsFromDataset':\n\t\t\trespuesta = API_getSampleObservationsFromDataset(json_parametros, url)\n\t\telif action == 'API_powerModel':\n\t\t\trespuesta = API_powerModel(json_parametros, url)\n\t\telif action == 'API_getDataWithVectorizer':\n\t\t\trespuesta = API_getSampleObservationsWithVectorizer(json_parametros, url)\n\t\telif action == 'API_getSampleObservationsWithVectorizer':\n\t\t\trespuesta = API_getSampleObservationsWithVectorizer(json_parametros, url)\n\t\telif action == 'API_getOpenTextData':\n\t\t\trespuesta = API_getOpenTextData(json_parametros, url)\n\t\telse:\n\t\t\tdata = urlencode({'action' : action, 'json' : json.dumps(json_parametros), 'compress' : 1}).encode()\n\t\t\trespuesta = dameRespuestaLlamado(url, data)\n\t\treturn respuesta\n\texcept Exception as e:\n\t\tif 'oblender' in json_parametros and json_parametros['oblender'] == 1:\n\t\t\tprint(json.dumps({\"status\": \"internal error\", \"msg\": (e)}))\n\t\telse:\n\t\t\tprint(json.dumps({\"status\": \"internal error\", \"msg\": str(e)}))\n\t\treturn json.dumps({\"status\": \"internal error\", \"msg\": str(e)})\n\n\ndef dateToUnix(date_time_str, date_format, timezone = 'GMT'):\n timezone = pytz.timezone(timezone)\n \n if isinstance(date_time_str, str):\n date_time_obj = dt.datetime.strptime(date_time_str, date_format)\n timezone_date_time_obj = timezone.localize(date_time_obj)\n timestamp = (timezone_date_time_obj - dt.datetime(1970, 1, 1, tzinfo=pytz.utc)).total_seconds()\n return timestamp\n else:\n try:\n return [((timezone.localize(dt.datetime.strptime(val, date_format)) - dt.datetime(1970, 1, 1, tzinfo=pytz.utc)).total_seconds()) for val in date_time_str]\n except:\n print(traceback.format_exc())\n\ndef unixToDate(unix_ts, date_format = '%d-%m-%Y %H:%M:%S', timezone = 'GMT'):\n return dt.datetime.fromtimestamp(unix_ts, tz= pytz.timezone(timezone)).strftime(date_format)\n \ndef searchTimeBlends(token, anchor_ts, search_text, oblender = None):\n\tglobal VERSION\n\ttry:\n\t\tif oblender != None:\n\t\t\turl = 'http://3.16.237.62:8080/bronce'\n\t\telse:\n\t\t\turl = 'http://52.8.156.139/oro/'\n\t\ttry:\n\t\t\tanchor_ts = anchor_ts.tolist()\n\t\texcept:\n\t\t\t1 * 1\n\t\t\t#print('excepcion')\n \n\t\tjson_parametros = {\n 'token' : token,\n 'anchor_max' : max(anchor_ts),\n 'anchor_min' : min(anchor_ts),\n 'search_text' : search_text\n }\n\n\t\tjson_parametros['python_version'] = VERSION\n\t\tdata = urlencode({'action' : 'API2_searchTimeBlends', \n 'json' : json.dumps(json_parametros), 'compress' : 1}).encode()\n\t\trespuesta = dameRespuestaLlamado(url, data)\n\t\tif respuesta['status'] == 'success':\n\t\t\treturn respuesta['blends']\n\t\telse:\n\t\t\tprint(respuesta)\n\texcept Exception as e:\n\t\tif oblender != None:\n\t\t\tprint(json.dumps({\"status\": \"internal error\", \"msg\": (e)}))\n\t\telse:\n\t\t\tprint(json.dumps({\"status\": \"internal error\", \"msg\": str(e)}))\n\t\treturn json.dumps({\"status\": \"internal error\", \"msg\": str(e)})\n\ndef searchLocationBlends(token, anchor_lat, anchor_lon, search_text, oblender = None):\n\tglobal VERSION\n\ttry:\n\t\tif oblender != None:\n\t\t\turl = 'http://3.16.237.62:8080/bronce'\n\t\telse:\n\t\t\turl = 'http://52.8.156.139/oro/'\n\t\ttry:\n\t\t\tanchor_lat = anchor_lat.tolist()\n\t\t\tanchor_lon = anchor_lon.tolist()\n\t\texcept:\n\t\t\t1 * 1\n\t\t\t#print('excepcion')\n\t\tif len(anchor_lat) != len(anchor_lon):\n\t\t\tprint(' ERROR: Size of \"anchor_lat\" (' + str(len(anchor_lat)) + ') and \"anchor_lon\" (' + str(len(anchor_lon)) + ') must be the same.')\n\t\t\treturn False\n\n\t\ttry:\n\t\t\tjson_parametros = {\n\t\t\t'token' : token,\n\t\t\t'anchor_rectangle' : {'top' : max(anchor_lat),\n 'bottom' : min(anchor_lat),\n 'right' : max(anchor_lon),\n 'left' : min(anchor_lon)},\n\t\t\t'search_text' : search_text\n\t\t\t}\n\t\texcept:\n\t\t\tprint(' ERROR: All values of \"anchor_lat\" and \"anchor_lon\" must be numerical.')\n\t\t\treturn False\n \n\t\tjson_parametros['python_version'] = VERSION\n\t\tdata = urlencode({'action' : 'API2_searchLocationBlends', \n 'json' : json.dumps(json_parametros), 'compress' : 1}).encode()\n\t\trespuesta = dameRespuestaLlamado(url, data)\n\t\tif respuesta['status'] == 'success':\n\t\t\treturn respuesta['blends']\n\t\telse:\n\t\t\tprint(respuesta)\n\texcept Exception as e:\n\t\tif oblender != None:\n\t\t\tprint(json.dumps({\"status\": \"internal error\", \"msg\": (e)}))\n\t\telse:\n\t\t\tprint(json.dumps({\"status\": \"internal error\", \"msg\": str(e)}))\n\t\treturn json.dumps({\"status\": \"internal error\", \"msg\": str(e)})\n \ndef timeBlend(token, anchor_ts, blend_source,\n blend_type = 'closest_observation',\n direction = 'time_prior',\n interval_output = 'count',\n oblender = None,\n interval_size = 3600,\n consumption_confirmation = 'off',\n missing_values = 'raw',\n data_format = 'dataframe',\n print_progress = 1):\n\tglobal VERSION\n\ttry:\n\t\tif oblender != None:\n\t\t\turl = 'http://3.16.237.62:8080/bronce'\n\t\telse:\n\t\t\turl = 'http://52.8.156.139/oro/'\n\t\ttry:\n\t\t\tanchor_ts = anchor_ts.tolist()\n\t\texcept:\n\t\t\t1 * 1\n\t\t\t#print('excepcion')\n\t\t#anchor_ts.sort()\n \n\t\tjson_parametros = {\n 'token' : token,\n 'anchor_ts' : anchor_ts,\n 'blend_source' : blend_source,\n 'blend_type' : blend_type,\n 'direction' : direction,\n 'agg_output' : interval_output,\n 'agg_interval_size' : interval_size,\n 'missing_values' : missing_values,\n 'print_progress' : print_progress\n }\n \n\t\tjson_parameters_task = {'token' : token, \n 'number_of_rows' : len(anchor_ts),\n 'blend_source' : blend_source, \n 'consumption_confirmation' : consumption_confirmation}\n\t\tprint_progress = False if 'print_progress' in json_parametros and (json_parametros['print_progress'] == 0 or json_parametros['print_progress'] == 'off') else True\n\t\tconfirm, consumption_id = initializeTask(json_parameters_task, url) #'y', 1 #\n\t\ttam_ini = 1000 if 'id_dataset' in blend_source else 350\n\t\tif confirm == 'y':\n\t\t\tif print_progress:\n\t\t\t\tprint(\"Task confirmed. Starting download..\")\n\t\t\tdf_resp = None\n\t\t\tresp_vacio = True\n\t\t\tuniverse_size = len(anchor_ts)\n\t\t\tpiece_size = len(anchor_ts) if len(anchor_ts) <= tam_ini else tam_ini\n\t\t\tfor i_act in range(0, universe_size, piece_size):\n\t\t\t\t#print(str(i_act) + ':' + str(i_act + piece_size))\n\t\t\t\tprogress = round((i_act + piece_size) / universe_size if (i_act + piece_size) < universe_size else 1, 2)\n\t\t\t\tjson_parametros['consumption_id'] = consumption_id\n\t\t\t\tjson_parametros['python_version'] = VERSION\n\t\t\t\tjson_parametros['progreso'] = progress\n\t\t\t\tjson_parametros['anchor_ts'] = anchor_ts[i_act : i_act + piece_size]\n\t\t\t\tdata = urlencode({'action' : 'API2_getTimeBlend', \n 'json' : json.dumps(json_parametros), \n 'compress' : 1}).encode()\n\t\t\t\trespuesta = dameRespuestaLlamado(url, data)\n\t\t\t\tif respuesta['status'] == 'success':\n\t\t\t\t\tif print_progress:\n\t\t\t\t\t\tprint(str(progress * 100) + '%')\n\t\t\t\t\ttime.sleep(2)\n\t\t\t\t\tif resp_vacio:\n\t\t\t\t\t\tdf_resp = pd.read_json(StringIO(json.dumps(respuesta['df_resp'])), convert_dates=False,convert_axes=False)\n\t\t\t\t\t\tresp_vacio = False\n\t\t\t\t\telse:\n\t\t\t\t\t\tdf_resp = pd.concat([df_resp, pd.read_json(StringIO(json.dumps(respuesta['df_resp'])), convert_dates=False,convert_axes=False)], ignore_index=True)\n\t\t\t\telse:\n\t\t\t\t\tprint(respuesta)\n\t\t\tif data_format == 'dataframe':\n\t\t\t\treturn df_resp\n\t\t\telse:\n\t\t\t\treturn df_resp.to_json()\n\t\telse:\n\t\t\tprint(\"\")\n\t\t\tprint(\"Task cancelled. To execute tasks without prompt set 'consumption_confirmation' to 'off'.\")\n\t\t\treturn {'status' : 'cancelled'}\n\n\n\texcept Exception as e:\n\t\tif oblender != None:\n\t\t\tprint(json.dumps({\"status\": \"internal error\", \"msg\": (e)}))\n\t\telse:\n\t\t\tprint(json.dumps({\"status\": \"internal error\", \"msg\": str(e)}))\n\t\treturn json.dumps({\"status\": \"internal error\", \"msg\": str(e)})\n \n \ndef locationBlend(token, anchor_lat, anchor_lon, blend_source,\n blend_type = 'closest_observation',\n agg_output = 'count',\n oblender = None,\n n = 3,\n r = 1000,\n consumption_confirmation = 'off',\n data_format = 'dataframe',\n print_progress = 1):\n \n\tglobal VERSION\n\ttry:\n\t\tif oblender != None:\n\t\t\turl = 'http://3.16.237.62:8080/bronce'\n\t\telse:\n\t\t\turl = 'http://52.8.156.139/oro/'\n\t\ttry:\n\t\t\tanchor_lat = anchor_lat.tolist()\n\t\t\tanchor_lon = anchor_lon.tolist()\n\t\texcept:\n\t\t\t1 * 1\n\t\t\t#print('excepcion')\n\t\tif len(anchor_lat) != len(anchor_lon):\n\t\t\tprint(' ERROR: Size of \"anchor_lat\" (' + str(len(anchor_lat)) + ') and \"anchor_lon\" (' + str(len(anchor_lon)) + ') must be the same.')\n\t\t\treturn False\n\n\t\ttry:\n\t\t\tjson_parametros = {\n\t\t\t'token' : token,\n\t\t\t'anchor_rectangle' : {'top' : max(anchor_lat),\n 'bottom' : min(anchor_lat),\n 'right' : max(anchor_lon),\n 'left' : min(anchor_lon)}\n\t\t\t}\n\t\texcept:\n\t\t\tprint(traceback.format_exc())\n\t\t\tprint(' ERROR: All values of \"anchor_lat\" and \"anchor_lon\" must be numerical.')\n\t\t\treturn False\n\t\t\t#print('excepcion')\n \n\t\tjson_parametros = {\n 'token' : token,\n 'anchor_lat' : anchor_lat,\n 'anchor_lon' : anchor_lon,\n 'blend_source' : blend_source,\n 'blend_type' : blend_type,\n 'agg_output' : agg_output,\n 'n' : n,\n 'r' : r,\n 'print_progress' : print_progress\n }\n \n\t\tjson_parameters_task = {'token' : token, \n 'number_of_rows' : len(anchor_lat),\n 'blend_source' : blend_source, \n 'consumption_confirmation' : consumption_confirmation}\n\t\tprint_progress = False if 'print_progress' in json_parametros and (json_parametros['print_progress'] == 0 or json_parametros['print_progress'] == 'off') else True\n\t\tconfirm, consumption_id = initializeTask(json_parameters_task, url) #'y', 1 #\n\t\ttam_ini = 500 \n\t\tif confirm == 'y':\n\t\t\tif print_progress:\n\t\t\t\tprint(\"Task confirmed. Starting download..\")\n\t\t\tdf_resp = None\n\t\t\tresp_vacio = True\n\t\t\tuniverse_size = len(anchor_lat)\n\t\t\tpiece_size = len(anchor_lat) if len(anchor_lat) <= tam_ini else tam_ini\n\t\t\tfor i_act in range(0, universe_size, piece_size):\n\t\t\t\t#print(str(i_act) + ':' + str(i_act + piece_size))\n\t\t\t\tprogress = round((i_act + piece_size) / universe_size if (i_act + piece_size) < universe_size else 1, 2)\n\t\t\t\tjson_parametros['consumption_id'] = consumption_id\n\t\t\t\tjson_parametros['python_version'] = VERSION\n\t\t\t\tjson_parametros['progreso'] = progress\n\t\t\t\tjson_parametros['anchor_lat'] = anchor_lat[i_act : i_act + piece_size]\n\t\t\t\tjson_parametros['anchor_lon'] = anchor_lon[i_act : i_act + piece_size]\n\t\t\t\tdata = urlencode({'action' : 'API2_getLocationBlend', \n 'json' : json.dumps(json_parametros), \n 'compress' : 1}).encode()\n\t\t\t\trespuesta = dameRespuestaLlamado(url, data)\n\t\t\t\tif respuesta['status'] == 'success':\n\t\t\t\t\tif print_progress:\n\t\t\t\t\t\tprint(str(progress * 100) + '%')\n\t\t\t\t\ttime.sleep(2)\n\t\t\t\t\tif resp_vacio:\n\t\t\t\t\t\tdf_resp = pd.read_json(StringIO(json.dumps(respuesta['df_resp'])), convert_dates=False,convert_axes=False)\n\t\t\t\t\t\tresp_vacio = False\n\t\t\t\t\telse:\n\t\t\t\t\t\tdf_resp = pd.concat([df_resp, pd.read_json(StringIO(json.dumps(respuesta['df_resp'])), convert_dates=False,convert_axes=False)], ignore_index=True)\n\t\t\t\telse:\n\t\t\t\t\tprint(respuesta)\n\t\t\tif data_format == 'dataframe':\n\t\t\t\treturn df_resp\n\t\t\telse:\n\t\t\t\treturn df_resp.to_json()\n\t\telse:\n\t\t\tprint(\"\")\n\t\t\tprint(\"Task cancelled. To execute tasks without prompt set 'consumption_confirmation' to 'off'.\")\n\t\t\treturn {'status' : 'cancelled'}\n\n\n\texcept Exception as e:\n\t\tif oblender != None:\n\t\t\tprint(json.dumps({\"status\": \"internal error\", \"msg\": (e)}))\n\t\telse:\n\t\t\tprint(json.dumps({\"status\": \"internal error\", \"msg\": str(e)}))\n\t\treturn json.dumps({\"status\": \"internal error\", \"msg\": str(e)})\n \ndef API_createDataset(json_parametros, url):\n\trespuesta = ''\n\taction = 'API_createDataset'\n\tnom_obs = 'dataframe' if 'dataframe' in json_parametros else 'observations'\n\tdf, valido_df, msj = comprobarJSONaDF(json_parametros[nom_obs])\n\tif not valido_df:\n\t\treturn msj\n\tn_filas = df.shape[0]\n\ttam_pedazo_ini = 1000\n\tinsert_observations = True\n\tjson_particion = json_parametros.copy()\n\tif 'insert_observations' in json_parametros:\n\t\tinsert_observations = json_parametros['insert_observations'] == 1 or json_parametros['insert_observations'] == 'on'\n\t\t\n\ttest_call = 1 if 'test_call' in json_parametros and (json_parametros['test_call'] == 1 or json_parametros['test_call'] == 'on') else False\n\t\n\tif test_call == 1:\n\t\tprint(\"\")\n\t\tprint('This is a TEST CALL, set \"test_call\" : \"off\" or remove to execute service.')\n\t\tprint(\"\")\n\trespuesta0 = None\n \n\t# Primer pedazo para crear el dataset\n\tif not test_call and n_filas > tam_pedazo_ini:\n\t\tif insert_observations:\n\t\t\tstart = time.time()\n\t\t\ttam_pedazo_ini = tam_pedazo_ini if n_filas > tam_pedazo_ini else n_filas\n\t\t\t\n\t\t\tjson_particion[nom_obs] = df.sample(n=tam_pedazo_ini).to_json()\n\t\t\tjson_particion_molde = json_particion.copy()\n\t\t\tjson_particion_molde['insert_observations'] = 0\n\t\t\tdata = urlencode({'action' : action, 'json' : json.dumps(json_particion_molde), 'compress' : 1}).encode()\n\t\t\trespuesta = dameRespuestaLlamado(url, data)\n\t\t\t#print(respuesta)\n\t\t\trespuesta0 = respuesta\n\t\t\tjson_particion['id_dataset'] = respuesta['id_dataset']\n\t\t\tprint(\"Dataset created succesfully, id: \" + str(json_particion['id_dataset']))\n\t\t\tprint(\"Starting upload..\")\n\t\t\tstop = time.time()\n\t\t\tsegundos = math.ceil(stop - start)\n\t\t\ttam_pedazo = int(round(600 / segundos))\n\t\t\taction = 'API_insertObservationsFromDataFrame'\n\t\t\tfor i in range(0, n_filas, tam_pedazo):\n\t\t\t\tjson_particion[nom_obs] = df[i:i+tam_pedazo].to_json()\n\t\t\t\tdata = urlencode({'action' : action, 'json' : json.dumps(json_particion), 'compress' : 1}).encode()\n\t\t\t\trespuesta = dameRespuestaLlamado(url, data)\n\t\t\t\t# Imprimir avance\n\t\t\t\tavance = round((i + tam_pedazo) / n_filas * 100, 2)\n\t\t\t\tif avance > 100:\n\t\t\t\t\tprint('100%')\n\t\t\t\t\tprint(\"Wrapping Up..\")\n\t\t\t\telse:\n\t\t\t\t\tprint(str(avance) + \"%\")\n\t\t\t\t\ttime.sleep(2)\n\t\t\t\t\t#print(\"Uploading...\")\n\t\telse:\n\t\t\tjson_particion[nom_obs] = df[0:tam_pedazo_ini].to_json()\n\t\t\tdata = urlencode({'action' : action, 'json' : json.dumps(json_particion), 'compress' : 1}).encode()\n\t\t\trespuesta = dameRespuestaLlamado(url, data)\n\t\t\treturn respuesta\n\telse:\n\t\ttam_pedazo_ini = tam_pedazo_ini if n_filas > tam_pedazo_ini else n_filas\n\t\tjson_particion[nom_obs] = df.sample(n=tam_pedazo_ini).to_json()\n\t\tdata = urlencode({'action' : action, 'json' : json.dumps(json_particion), 'compress' : 1}).encode()\n\t\trespuesta = dameRespuestaLlamado(url, data)\n\t\treturn respuesta\n\treturn respuesta0\n\ndef API_insertObservationsFromDataFrame(json_parametros, url):\n\taction = 'API_insertObservationsFromDataFrame'\n\trespuesta = ''\n\ttest_call = 1 if 'test_call' in json_parametros and (json_parametros['test_call'] == 1 or json_parametros['test_call'] == 'on') else False\n\tif test_call == 1:\n\t\tprint(\"\")\n\t\tprint('This is a TEST CALL, set \"test_call\" : \"off\" or remove to execute service.')\n\t\tprint(\"\")\n\t\t\n\tnom_obs = 'dataframe' if 'dataframe' in json_parametros else 'observations'\n\tdf, valido_df, msj = comprobarJSONaDF(json_parametros[nom_obs])\n\tif not valido_df:\n\t\treturn msj\n\tn_filas = df.shape[0]\n\tn_columnas = df.shape[1]\n\ttam_pedazo_ini = 1000\n\tjson_particion = json_parametros.copy()\n\n\tif n_filas > tam_pedazo_ini:\n\t\t# Inserta por primera vez para medir tiempos.\n\t\tstart = time.time()\n\t\tjson_particion[nom_obs] = df[0:tam_pedazo_ini].to_json()\n\t\tdata = urlencode({'action' : action, 'json' : json.dumps(json_particion), 'compress' : 1}).encode()\n\t\trespuesta = dameRespuestaLlamado(url, data)\n\t\tstop = time.time()\n\t\tsegundos = math.ceil(stop - start)\n\t\ttam_pedazo = int(round(600 / segundos))\n\t\tprint(\"Uploading..\")\n\t\tjson_particion = json_parametros.copy()\n\t\tfor i in range(tam_pedazo_ini, n_filas, tam_pedazo):\n\t\t\ttry:\n\t\t\t\tjson_particion[nom_obs] = df[i:i+tam_pedazo].to_json()\n\t\t\t\tdata = urlencode({'action' : action, 'json' : json.dumps(json_particion), 'compress' : 1}).encode()\n\t\t\t\trespuesta = dameRespuestaLlamado(url, data)\n\t\t\t\t#print(respuesta)\n\t\t\t\t# Imprimir avance\n\t\t\t\tavance = round((i + tam_pedazo)/n_filas * 100, 2)\n\t\t\t\tif avance > 100:\n\t\t\t\t\tprint('100%')\n\t\t\t\t\tprint(\"Wrapping Up..\")\n\t\t\t\telse:\n\t\t\t\t\tprint(str(avance) + \"%\")\n\t\t\t\t\t#print(\"Uploading...\")\n\t\t\t\t\ttime.sleep(2)\n\t\t\texcept:\n\t\t\t\tprint(\"Warning: Some observations might not have been uploaded.\")\n\telse:\n\t\tdata = urlencode({'action' : action, 'json' : json.dumps(json_parametros), 'compress' : 1}).encode()\n\t\trespuesta = dameRespuestaLlamado(url, data)\n\treturn respuesta\n\n\ndef API_getSampleObservationsWithVectorizer(json_parametros, url):\n\tglobal VERSION\n\tconfirm, consumption_id = initializeTask(json_parametros, url)\n\tif confirm == 'y':\n\t\t#print(\"Task confirmed. Starting download..\")\n\t\tjson_parametros['consumption_id'] = consumption_id\n\t\tjson_parametros['python_version'] = VERSION\n\t\treturn API_genericDownloadCall(json_parametros, url, 'API_getSampleObservationsWithVectorizerPlus', 5, 300)\n\telse:\n\t\tprint(\"\")\n\t\tprint(\"Task cancelled. To execute tasks without prompt set 'consumption_confirmation' to 0.\")\n\t\treturn {'status' : 'cancelled'}\n \n \ndef API_getSampleObservationsFromDataset(json_parametros, url):\n\tglobal VERSION\n\tconfirm, consumption_id = initializeTask(json_parametros, url)\n\tif confirm == 'y':\n\t\t#print(\"Task confirmed. Starting download..\")\n\t\tjson_parametros['consumption_id'] = consumption_id\n\t\tjson_parametros['python_version'] = VERSION\n\t\treturn API_genericDownloadCall(json_parametros, url, 'API_getSampleObservationsFromDataset', 25, 600)\n\telse:\n\t\tprint(\"\")\n\t\tprint(\"Task cancelled. To execute tasks without prompt set 'consumption_confirmation' to 'off'.\")\n\t\treturn {'status' : 'cancelled'}\n \n\ndef API_getOpenTextData(json_parametros, url):\n\tglobal VERSION\n\tconfirm, consumption_id = initializeTask(json_parametros, url)\n\tif confirm == 'y':\n\t\t#print(\"Task confirmed. Starting download..\")\n\t\tjson_parametros['consumption_id'] = consumption_id\n\t\tjson_parametros['python_version'] = VERSION\n\t\treturn API_genericDownloadCall(json_parametros, url, 'API_getOpenTextData', 25, 500)\n\telse:\n\t\tprint(\"\")\n\t\tprint(\"Task cancelled. To execute tasks without prompt set 'consumption_confirmation' to 0.\")\n\t\treturn {'status' : 'cancelled'}\n \n \ndef initializeTask(json_parametros, url):\n\tjson_parametros['python_version'] = VERSION\n\tprint_progress = False if 'print_progress' in json_parametros and (json_parametros['print_progress'] == 0 or json_parametros['print_progress'] == 'off') else True\n\tdata = urlencode({'action' : 'API_initializeTask', 'json' : json.dumps(json_parametros), 'compress' : 1}).encode()\n\tdetails_task = dameRespuestaLlamado(url, data)\n\t#print(details_task)\n\tconsumption_id = details_task['consumption_id']\n\tif print_progress:\n\t\tprint(\"Task ID: '\" + str(consumption_id) + \"'.\")\n\t\tprint(\"Total estimated consumption: \" + str(round(details_task['details']['total_consumption'],2)) + \" processing units.\")\n\tconsumption_confirmation = json_parametros['consumption_confirmation'] if 'consumption_confirmation' in json_parametros else 0\n\ttime.sleep(0.5)\n\tconfirm = input(\"Continue? [y] yes \\t [n] no\") if consumption_confirmation == 'on' else 'y'\n\treturn confirm, consumption_id\n\ndef API_genericDownloadCall(json_parametros, url, action, n_test_observations, slice_mult):\n\trespuesta = ''\n\tnom_archivo = str(time.time()) + '.csv'\n\ttry:\n\t\tstart = time.time()\n\t\ttest_call = 1 if 'test_call' in json_parametros and (json_parametros['test_call'] == 1 or json_parametros['test_call'] == 'on') else False\n\t\tprint_progress = False if 'print_progress' in json_parametros and (json_parametros['print_progress'] == 0 or json_parametros['print_progress'] == 'off') else True\n\t\tif test_call == 1:\n\t\t\tprint(\"\")\n\t\t\tprint('This is a TEST CALL, set \"test_call\" : \"off\" or remove to execute service.')\n\t\t\tprint(\"\")\n\t\t\tdata = urlencode({'action' : action, 'json' : json.dumps(json_parametros), 'compress' : 1}).encode()\n\t\t\trespuesta = dameRespuestaLlamado(url, data)\n\t\t\tdf_resp = pd.DataFrame.from_dict(respuesta['sample'])\n\t\t\tdf_resp = df_resp.reset_index(drop=True)\n\t\t\tt_universo = 0\n\t\telse:\n\t\t\tjson_parametros['tamano_bin'] = n_test_observations\n\t\t\tjson_parametros['skip'] = 0\n\t\t\tdata = urlencode({'action' : action, 'json' : json.dumps(json_parametros), 'compress' : 1}).encode()\n\t\t\trespuesta = dameRespuestaLlamado(url, data)\n\t\t\tt_universo = respuesta['universe_size']\n\t\t\tstop = time.time()\n\t\t\tsegundos = math.ceil(stop - start)\n\t\t\ttam_pedazo = int(round(slice_mult / segundos))\n\t\t\tnum_pedazos = math.ceil(t_universo/tam_pedazo)\n\t\t\tnum_pedazos = num_pedazos if num_pedazos > 0 else 1\n\t\t\tdf_resp = None\n\t\t\tfor i in range(0, num_pedazos):\n\t\t\t\ttry:\n\t\t\t\t\tjson_parametros['tamano_bin'] = tam_pedazo\n\t\t\t\t\tjson_parametros['skip'] = tam_pedazo * i\n\t\t\t\t\tdata = urlencode({'action' : action, 'json' : json.dumps(json_parametros), 'compress' : 1}).encode()\n\t\t\t\t\trespuesta = dameRespuestaLlamado(url, data)\n\t\t\t\t\tdf = pd.DataFrame.from_dict(respuesta['sample'])\n\t\t\t\t\tif df_resp is None:\n\t\t\t\t\t\tdf_resp = df\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tif 'save_while_downloading' in json_parametros and json_parametros['save_while_downloading'] == 'on':\n\t\t\t\t\t\t\t\tif not os.path.isfile(nom_archivo):\n\t\t\t\t\t\t\t\t\tdf_resp.to_csv(nom_archivo)\n\t\t\t\t\t\t\t\telse: # else it exists so append without writing the header\n\t\t\t\t\t\t\t\t\tdf_resp.to_csv(nom_archivo, mode='a', header=False)\n\t\t\t\t\t\t\t\tprint('CSV will be stored in: ' + nom_archivo)\n\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\tprint('Unable to save CSV locally, please save dataframe when download completes.')\n\t\t\t\t\telse:\n\t\t\t\t\t\tdf_resp = df_resp.append(df).reset_index(drop=True)\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tif 'save_while_downloading' in json_params and json_parametros['save_while_downloading'] == 'on':\n\t\t\t\t\t\t\t\tif not os.path.isfile(nom_archivo):\n\t\t\t\t\t\t\t\t\tdf.to_csv(nom_archivo)\n\t\t\t\t\t\t\t\telse: # else it exists so append without writing the header\n\t\t\t\t\t\t\t\t\tdf.to_csv(nom_archivo, mode='a', header=False)\n\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\t1 + 1\n\t\t\t\t\tavance = round(((i + 1)/num_pedazos) * 100, 2)\n\t\t\t\t\tif avance >= 100:\n\t\t\t\t\t\tif print_progress:\n\t\t\t\t\t\t\tprint(str(avance) + \" % completed.\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tif print_progress:\n\t\t\t\t\t\t\tprint(str(avance) + \" %\")\n\t\t\t\texcept Exception as e:\n\t\t\t\t\t#print(str(e))\n\t\t\t\t\tif print_progress:\n\t\t\t\t\t\tprint(\"Warning: Some observations could not be processed.\")\n\t\t\tif 'sample_size' in json_parametros:\n\t\t\t\tif int(json_parametros['sample_size']) < df_resp.shape[0]:\n\t\t\t\t drop_indices = np.random.choice(df_resp.index, df_resp.shape[0] - int(json_parametros['sample_size']), replace=False)\n\t\t\t\t df_resp = df_resp.drop(drop_indices)\n\t\tif 'lag_feature' in json_parametros : \n\t\t\tdf_resp = agregarLagsFeatures(df_resp, json_parametros['lag_feature'])\n\t\trespuesta = json.loads(json.dumps({'universe_size' : t_universo, 'sample' : json.loads(df_resp.to_json()), 'csv_stored' : nom_archivo}))\n\texcept:\n\t\tprint(\"\")\n\t\tprint(\"\")\n\t\tprint(\"Generic error.\")\n\treturn respuesta\n\n\ndef API_powerModel(json_parametros, url):\n\taction = 'API_powerModel'\n\tdata = urlencode({'action' : action, 'json' : json.dumps(json_parametros), 'compress' : 1}).encode()\n\trespuesta = dameRespuestaLlamado(url, data)\n\treturn respuesta\n\n\ndef comprobarJSONaDF(df_json):\n\tvalido = True\n\tmsj = \"Sucess\"\n\ttry:\n\t\tdf_nuevo = pd.read_json(StringIO(df_json), convert_dates=False, convert_axes=False)\n\texcept Exception as e:\n\t\tdf_nuevo = None\n\t\tvalido = False\n\t\tmsj = \"Error transforming json: \" + str(e)\n\treturn df_nuevo, valido, msj\n\ndef agregarLagsFeatures(df, lag_feature):\n\ttry:\n\t\tdf = df.sort_values('timestamp', ascending=False)\n\t\tdf.reset_index(drop=True, inplace=True)\n\t\tfeatures = [lag_feature['feature']]\n\t\tlag_type = lag_feature['add_poc'] if 'add_poc' in lag_feature else 0\n\t\tarr_periods = lag_feature['periods'] if 'periods' in lag_feature else [1]\n\t\tnumerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64', np.number]\n\t\tarr_periods.sort()\n\t\tif 'timestamp' in df:\n\t\t\tdf_res = df.copy()\n\t\t\tfor periods in arr_periods:\n\t\t\t\tdf_lag = df[features].copy()\n\t\t\t\tdf_lag = df_lag.shift(periods = -periods)\n\t\t\t\tdf_lag.columns = [\"lag\" + str(periods) + \"_\" + column for column in df_lag.columns]\n\t\t\t\tif lag_type == 1:\n\t\t\t\t\tif df[features].select_dtypes(include = numerics).shape[1] > 0:\n\t\t\t\t\t\tfor column in df[features].select_dtypes(include = numerics):\n\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\tnom_col = \"lagPoc\" + str(periods) + \"_\" + column\n\t\t\t\t\t\t\t\tcol_lag = \"lag\" + str(periods) + \"_\" + column\n\t\t\t\t\t\t\t\tdf_lag[nom_col] = (df[column] - df_lag[col_lag]) / [0.1 if row == 0 else row for row in df_lag[col_lag]]\n\t\t\t\t\t\t\texcept Exception as e:\n\t\t\t\t\t\t\t\tprint(\"Warning : Poc was not performed for lag period: \" + str(periods))\n\t\t\t\t\telse:\n\t\t\t\t\t\tprint(\"Warning : Poc was not performed for lag period: \" + str(periods))\n\t\t\t\tdf_res = pd.concat([df_res.reset_index(drop=True), df_lag.reset_index(drop=True)], axis=1)\n\t\t\tdf = df_res\n\texcept:\n\t\tprint(\"Warning : Lags were not performed.\")\n\treturn df\n" ]
[ [ "pandas.DataFrame.from_dict" ] ]
alex-muci/finance-musings
[ "e6baf3bdca67451b98b939fedff40d93ec2d75d3" ]
[ "sabr/sabr/samples/complete_run.py" ]
[ "\"\"\"\nINCOMPLETE ** INCOMPLETE ** INCOMPLETE\nSample vol surface fitting using SABR:\n 1. read raw prices for [SPX, ...] options (using .csv file in analytics.data from Raj) &\n get raw implied vols\n 2. TODO: get implied vol spline for each liquid maturity (e.g. LIQUID: 4 or more prices) &\n calibrate SABR params from implied vol spline (using constrained version of the Nelder-Mead optimization) &\n interpolate SABR params for non-LIQUID maturities\n 3. check initial raw prices vs. calibrated model prices\n\"\"\"\n\nimport os\nimport numpy as np\nimport analytics.utils as utils\nimport pandas as pd\nimport analytics.calibration.sabrfit as sabrfit\nimport analytics.model.bs as bs\n\n\ndef complete_sample():\n\n # insert tickers (.csv files in /.data)\n underlyings = ['spx', ]\n\n data_folder = utils.DEFAULT_data_dir\n sabr_params = {} # {tckr: sabr_param}\n\n for tckr in underlyings:\n\n tckr_fullpath = os.path.join(data_folder, \"%s.csv\" % tckr)\n\n # 1. get raw data and calculate implieds\n # df.columns = 'Expires', 'Strikes', 'Prices', 'CP', 'implied_vols'\n mat, f, df_tckr = utils.get_data(tckr_fullpath)\n\n # 2. for each maturity, SABR calibration\n #\n mats = set(df_tckr['Expires'])\n mats = np.sort(list(mats))\n sabr = {}\n\n for T in mats:\n # TODO: create multi-index pd rather than this fudge ?\n df_mat = df_tckr[df_tckr['Expires'] == T]\n\n strikes = df_mat['Strikes'].values\n vols = df_mat['implied_vols'].values\n\n # TODO: spline before calibration\n if len(vols) > 4:\n # CONSTRAINED: vol surface parametrization: returns tuple (p[], gof)\n sabr[T], _ = sabrfit.Sabrfit(f, mat[T], strikes, vols).sabrp()\n else: # do not even try to fit\n _arr = np.zeros(3, dtype=float); _arr.fill(np.nan)\n sabr[T], _ = _arr, 0.\n\n sabr_params[tckr] = pd.DataFrame.from_dict(sabr, orient='index')\n sabr_params[tckr].columns = ['rho', 'alpha', 'ini_vol']\n\n # interpolate missing maturities\n '''\n ...\n sabr_params[tckr]['rho'].interpolate(method='spline', order=3).plot()\n sabr_params[tckr]['rho'].interpolate(method='time').plot()\n ...\n '''\n\n # 3. check initial raw prices vs. calibrated model prices\n #\n\n\nif __name__ == '__main__':\n complete_sample()\n" ]
[ [ "pandas.DataFrame.from_dict", "numpy.zeros" ] ]
bunderhi/donkeycar
[ "c8cc941af681820dfbf3d212dd155552cbdc1e6b" ]
[ "donkeycar/parts/realsenseT265.py" ]
[ "'''\nAuthor: Tawn Kramer\nFile: realsense2.py\nDate: April 14 2019\nNotes: Parts to input data from Intel Realsense 2 cameras\n'''\nimport time\nimport logging\n\nimport numpy as np\nimport cv2\nimport os\nfrom math import tan, pi, asin, atan2\nimport pyrealsense2 as rs\nimport donkeycar as dk\n\nclass RPY:\n def __init__(self, rotation):\n w = rotation.w\n x = -rotation.z\n y = rotation.x\n z = -rotation.y\n\n self.pitch = -asin(2.0 * (x*z - w*y)) \n self.roll = atan2(2.0 * (w*x + y*z), w*w - x*x - y*y + z*z) \n self.yaw = atan2(2.0 * (w*z + x*y), w*w + x*x - y*y - z*z) # * 180.0 / pi (rad to degree)\n\n\nclass RS_T265(object):\n '''\n The Intel Realsense T265 camera is a device which uses an imu, twin fisheye cameras,\n and an Movidius chip to do sensor fusion and emit a world space coordinate frame that \n is remarkably consistent.\n '''\n\n \"\"\"\n Returns R, T transform from src to dst\n \"\"\"\n def get_extrinsics(self,src,dst):\n extrinsics = src.get_extrinsics_to(dst)\n R = np.reshape(extrinsics.rotation, [3,3]).T\n T = np.array(extrinsics.translation)\n return (R, T)\n\n \"\"\"\n Returns a camera matrix K from librealsense intrinsics\n \"\"\"\n def camera_matrix(self,intrinsics):\n return np.array([[intrinsics.fx, 0, intrinsics.ppx],\n [ 0, intrinsics.fy, intrinsics.ppy],\n [ 0, 0, 1]])\n\n \"\"\"\n Returns the fisheye distortion from librealsense intrinsics\n \"\"\"\n def fisheye_distortion(self,intrinsics):\n return np.array(intrinsics.coeffs[:4])\n\n\n def __init__(self, image_output=False, calib_filename=None):\n # Using the image_output will grab two image streams from the fisheye cameras but return only one.\n # This can be a bit much for USB2, but you can try it. Docs recommend USB3 connection for this.\n self.image_output = image_output\n\n # When we have and encoder, this will be the last vel measured. \n self.enc_vel_ms = 0.0\n self.wheel_odometer = None\n\n # Declare RealSense pipeline, encapsulating the actual device and sensors\n print(\"starting T265\")\n self.pipe = rs.pipeline()\n cfg = rs.config()\n cfg.enable_stream(rs.stream.pose)\n # bug workaround\n #profile = cfg.resolve(self.pipe)\n #dev = profile.get_device()\n #tm2 = dev.as_tm2()\n \n\n if self.image_output:\n cfg.enable_stream(rs.stream.fisheye, 1) # Left camera\n cfg.enable_stream(rs.stream.fisheye, 2) # Right camera\n #disable wheel odometery for now due to bug\n #if calib_filename is not None:\n # pose_sensor = tm2.first_pose_sensor()\n # self.wheel_odometer = pose_sensor.as_wheel_odometer() \n\n # calibration to list of uint8\n # f = open(calib_filename)\n # chars = []\n # for line in f:\n # for c in line:\n # chars.append(ord(c)) # char to uint8\n\n # load/configure wheel odometer\n print(\"loading wheel config\", calib_filename)\n # self.wheel_odometer.load_wheel_odometery_config(chars) \n\n\n # Start streaming with requested config\n self.pipe.start(cfg)\n self.running = True\n print(\"Warning: T265 needs a warmup period of a few seconds before it will emit tracking data.\")\n if self.image_output:\n # Configure the OpenCV stereo algorithm. See\n # https://docs.opencv.org/3.4/d2/d85/classcv_1_1StereoSGBM.html for a\n # description of the parameters\n #window_size = 5\n min_disp = 0\n # must be divisible by 16\n num_disp = 112 - min_disp\n self.max_disp = min_disp + num_disp\n # Retreive the stream and intrinsic properties for both cameras\n profiles = self.pipe.get_active_profile()\n streams = {\"left\" : profiles.get_stream(rs.stream.fisheye, 1).as_video_stream_profile(),\n \"right\" : profiles.get_stream(rs.stream.fisheye, 2).as_video_stream_profile()}\n intrinsics = {\"left\" : streams[\"left\"].get_intrinsics(),\n \"right\" : streams[\"right\"].get_intrinsics()}\n\n # Print information about both cameras\n print(\"Left camera:\", intrinsics[\"left\"])\n print(\"Right camera:\", intrinsics[\"right\"])\n\n # Translate the intrinsics from librealsense into OpenCV\n K_left = self.camera_matrix(intrinsics[\"left\"])\n D_left = self.fisheye_distortion(intrinsics[\"left\"])\n K_right = self.camera_matrix(intrinsics[\"right\"])\n D_right = self.fisheye_distortion(intrinsics[\"right\"])\n #(width, height) = (intrinsics[\"left\"].width, intrinsics[\"left\"].height)\n\n # Get the relative extrinsics between the left and right camera\n (R, T) = self.get_extrinsics(streams[\"left\"], streams[\"right\"])\n # We need to determine what focal length our undistorted images should have\n # in order to set up the camera matrices for initUndistortRectifyMap. We\n # could use stereoRectify, but here we show how to derive these projection\n # matrices from the calibration and a desired height and field of view\n\n # We calculate the undistorted focal length:\n #\n # h\n # -----------------\n # \\ | /\n # \\ | f /\n # \\ | /\n # \\ fov /\n # \\|/\n stereo_fov_rad = 90 * (pi/180) # 90 degree desired fov\n stereo_height_px = 300 # 300x300 pixel stereo output\n stereo_focal_px = stereo_height_px/2 / tan(stereo_fov_rad/2)\n\n # We set the left rotation to identity and the right rotation\n # the rotation between the cameras\n R_left = np.eye(3)\n R_right = R\n\n # The stereo algorithm needs max_disp extra pixels in order to produce valid\n # disparity on the desired output region. This changes the width, but the\n # center of projection should be on the center of the cropped image\n stereo_width_px = stereo_height_px + self.max_disp\n stereo_size = (stereo_width_px, stereo_height_px)\n stereo_cx = (stereo_height_px - 1)/2 + self.max_disp\n stereo_cy = (stereo_height_px - 1)/2\n\n # Construct the left and right projection matrices, the only difference is\n # that the right projection matrix should have a shift along the x axis of\n # baseline*focal_length\n P_left = np.array([[stereo_focal_px, 0, stereo_cx, 0],\n [0, stereo_focal_px, stereo_cy, 0],\n [0, 0, 1, 0]])\n P_right = P_left.copy()\n P_right[0][3] = T[0]*stereo_focal_px\n\n # Construct Q for use with cv2.reprojectImageTo3D. Subtract max_disp from x\n # since we will crop the disparity later\n Q = np.array([[1, 0, 0, -(stereo_cx - self.max_disp)],\n [0, 1, 0, -stereo_cy],\n [0, 0, 0, stereo_focal_px],\n [0, 0, -1/T[0], 0]])\n\n # Create an undistortion map for the left and right camera which applies the\n # rectification and undoes the camera distortion. This only has to be done\n # once\n m1type = cv2.CV_32FC1\n (lm1, lm2) = cv2.fisheye.initUndistortRectifyMap(K_left, D_left, R_left, P_left, stereo_size, m1type)\n (rm1, rm2) = cv2.fisheye.initUndistortRectifyMap(K_right, D_right, R_right, P_right, stereo_size, m1type)\n self.undistort_rectify = {\"left\" : (lm1, lm2),\n \"right\" : (rm1, rm2)}\n zero_vec = (0.0, 0.0, 0.0)\n self.pos = zero_vec\n self.vel = zero_vec\n self.acc = zero_vec\n self.rpy = zero_vec\n self.img = None\n\n def poll(self):\n\n if self.wheel_odometer:\n wo_sensor_id = 0 # indexed from 0, match to order in calibration file\n frame_num = 0 # not used\n v = rs.vector()\n v.x = -1.0 * self.enc_vel_ms # m/s\n #v.z = -1.0 * self.enc_vel_ms # m/s\n self.wheel_odometer.send_wheel_odometry(wo_sensor_id, frame_num, v)\n\n try:\n frames = self.pipe.wait_for_frames()\n logging.info(\"Wait for frames complete\")\n except Exception as e:\n logging.error(e)\n return\n\n # Fetch pose frame\n pose = frames.get_pose_frame()\n logging.info(\"Fetch pose\")\n if pose:\n data = pose.get_pose_data()\n self.pos = data.translation\n self.vel = data.velocity\n self.acc = data.acceleration\n self.rotation = data.rotation\n logging.info('realsense pos(%f, %f, %f)' % (self.pos.x, self.pos.y, self.pos.z))\n\n # Compute roll, pitch, and yaw\n self.rpy = RPY(self.rotation)\n \n logging.info('realsense RPandY(%f, %f, %f)' % (self.rpy.roll,self.rpy.pitch,self.rpy.yaw))\n \n if self.image_output:\n #We will just get one image for now.\n # Left fisheye camera frame\n left = frames.get_fisheye_frame(1)\n left_data = np.asanyarray(left.get_data())\n left_undistorted = cv2.remap(src = left_data,\n map1 = self.undistort_rectify[\"left\"][0],\n map2 = self.undistort_rectify[\"left\"][1],\n interpolation = cv2.INTER_LINEAR)\n self.img = cv2.cvtColor(left_undistorted[:,self.max_disp:], cv2.COLOR_GRAY2RGB)\n logging.info(\"Get image\")\n\n def update(self):\n while self.running:\n self.poll()\n\n def run_threaded(self, enc_vel_ms):\n self.enc_vel_ms = enc_vel_ms\n return self.pos, self.vel, self.acc, self.rpy, self.img\n\n def run(self, enc_vel_ms):\n self.enc_vel_ms = enc_vel_ms\n self.poll()\n return self.run_threaded()\n\n def shutdown(self):\n self.running = False\n time.sleep(0.1)\n self.pipe.stop()\n\n\nclass RS_T265RAW(object):\n '''\n The Intel Realsense T265 camera is a device which uses an imu, twin fisheye cameras,\n and an Movidius chip to do sensor fusion and emit a world space coordinate frame.\n '''\n\n def __init__(self, image_output=False, calib_filename=None):\n # Using the image_output will grab two image streams from the fisheye cameras but return only one.\n # This can be a bit much for USB2, but you can try it. Docs recommend USB3 connection for this.\n self.image_output = image_output\n\n # When we have and encoder, this will be the last vel measured. \n self.enc_vel_ms = 0.0\n self.wheel_odometer = None\n\n # Declare RealSense pipeline, encapsulating the actual device and sensors\n print(\"starting T265\")\n self.pipe = rs.pipeline()\n cfg = rs.config()\n cfg.enable_stream(rs.stream.pose)\n # bug workaround\n #profile = cfg.resolve(self.pipe)\n #dev = profile.get_device()\n #tm2 = dev.as_tm2()\n \n\n if self.image_output:\n cfg.enable_stream(rs.stream.fisheye, 1) # Left camera\n cfg.enable_stream(rs.stream.fisheye, 2) # Right camera\n #disable wheel odometery for now due to bug\n #if calib_filename is not None:\n # pose_sensor = tm2.first_pose_sensor()\n # self.wheel_odometer = pose_sensor.as_wheel_odometer() \n\n # calibration to list of uint8\n # f = open(calib_filename)\n # chars = []\n # for line in f:\n # for c in line:\n # chars.append(ord(c)) # char to uint8\n\n # load/configure wheel odometer\n print(\"loading wheel config\", calib_filename)\n # self.wheel_odometer.load_wheel_odometery_config(chars) \n\n\n # Start streaming with requested config\n self.pipe.start(cfg)\n self.running = True\n print(\"Warning: T265 needs a warmup period of a few seconds before it will emit tracking data.\")\n if self.image_output:\n # Retreive the stream and intrinsic properties for both cameras\n profiles = self.pipe.get_active_profile()\n streams = {\"left\" : profiles.get_stream(rs.stream.fisheye, 1).as_video_stream_profile(),\n \"right\" : profiles.get_stream(rs.stream.fisheye, 2).as_video_stream_profile()}\n\n zero_vec = (0.0, 0.0, 0.0)\n self.pos = zero_vec\n self.vel = zero_vec\n self.acc = zero_vec\n self.rpy = zero_vec\n self.img = None\n\n def poll(self):\n\n if self.wheel_odometer:\n wo_sensor_id = 0 # indexed from 0, match to order in calibration file\n frame_num = 0 # not used\n v = rs.vector()\n v.x = -1.0 * self.enc_vel_ms # m/s\n #v.z = -1.0 * self.enc_vel_ms # m/s\n self.wheel_odometer.send_wheel_odometry(wo_sensor_id, frame_num, v)\n\n try:\n frames = self.pipe.wait_for_frames()\n logging.info(\"Wait for frames complete\")\n except Exception as e:\n logging.error(e)\n return\n\n # Fetch pose frame\n pose = frames.get_pose_frame()\n logging.info(\"Fetch pose\")\n if pose:\n data = pose.get_pose_data()\n self.pos = data.translation\n self.vel = data.velocity\n self.acc = data.acceleration\n self.rotation = data.rotation\n self.mapper_confidence = data.mapper_confidence\n self.timestamp = pose.get_timestamp()\n logging.info('realsense pos(%f, %f, %f)' % (self.pos.x, self.pos.y, self.pos.z))\n\n # Compute roll, pitch, and yaw\n self.rpy = RPY(self.rotation)\n logging.info('realsense RPandY(%f, %f, %f)' % (self.rpy.roll,self.rpy.pitch,self.rpy.yaw))\n \n if self.image_output:\n #We will just get one image for now.\n # Left fisheye camera frame\n left = frames.get_fisheye_frame(1)\n left_data = np.asanyarray(left.get_data())\n self.img = cv2.cvtColor(left_data, cv2.COLOR_GRAY2RGB)\n logging.info(\"Get image\")\n\n def update(self):\n while self.running:\n self.poll() \n\n def run_threaded(self, enc_vel_ms):\n self.enc_vel_ms = enc_vel_ms\n return self.pos, self.vel, self.acc, self.rpy, self.img\n\n def run(self, enc_vel_ms):\n self.enc_vel_ms = enc_vel_ms\n self.poll()\n return self.run_threaded(enc_vel_ms)\n\n def shutdown(self):\n self.running = False\n time.sleep(0.1)\n self.pipe.stop()\n\nclass ImgPreProcess(object):\n '''\n preprocess camera image for inference.\n '''\n def __init__(self, cfg):\n self.cfg = cfg\n self.gray = None\n self.crop_img = None\n self.im2 = None\n self.image = None\n self.inf_inputs = None\n self.clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))\n self.framecount = 0\n\n def run(self, img_arr):\n if img_arr is not None:\n t0 = time.time()\n self.gray = cv2.cvtColor(img_arr,cv2.COLOR_RGB2GRAY)\n self.crop_img = self.gray[230:550, 130:770]\n t1 = time.time()\n self.crop_img = self.clahe.apply(self.crop_img)\n t2 = time.time()\n self.im2 = cv2.resize(self.crop_img,None,fx=0.5,fy=0.5,interpolation=cv2.INTER_AREA)\n t3 = time.time()\n self.image = cv2.cvtColor(self.im2,cv2.COLOR_GRAY2RGB)\n self.inf_inputs = self.image.transpose(2,0,1).reshape(1,3,160,320)\n t4 = time.time()\n self.framecount += 1\n print(f'framecount {self.framecount} crop {t1 - t0} clahe {t2- t1} resize {t3 - t2} reshape {t4 - t3} ')\n return self.image,np.array(self.inf_inputs, dtype=np.float32, order='C')/255,self.framecount\n else:\n return None,None,self.framecount\n\nclass ImgAlphaBlend(object):\n '''\n Combine camera image and inference mask for fpv viewer.\n '''\n def __init__(self, cfg):\n self.cfg = cfg \n if cfg.ALPHA:\n self.alpha = cfg.ALPHA\n else:\n self.alpha = 0.5\n self.beta = (1.0 - self.alpha)\n self.fill = np.zeros((2,160,320),dtype=np.uint8)\n\n def run(self, mask, img, camcount, infcount):\n print(f'alpha framecount {camcount} infcount {infcount}')\n fctext = f'frame {camcount} \\n inf {infcount}'\n red = (mask*255).reshape(1,160,320)\n redmask = np.vstack((self.fill,red)).transpose(1,2,0)\n dst = cv2.addWeighted(redmask, self.alpha, img, self.beta, 0.0)\n dk.utils.draw_text(dst,text=fctext,uv_top_left=(10,10))\n return dst\n\n" ]
[ [ "numpy.array", "numpy.reshape", "numpy.zeros", "numpy.eye", "numpy.vstack" ] ]
weiyunfei/taichi
[ "52a7cd8325672bc160e5540e54064c960c78256d" ]
[ "tests/python/test_bit_struct.py" ]
[ "import numpy as np\nfrom pytest import approx\n\nimport taichi as ti\nfrom tests import test_utils\n\n\n@test_utils.test(require=ti.extension.quant_basic, debug=True)\ndef test_simple_array():\n ci13 = ti.types.quant.int(13, True)\n cu19 = ti.types.quant.int(19, False)\n\n x = ti.field(dtype=ci13)\n y = ti.field(dtype=cu19)\n\n N = 12\n\n ti.root.dense(ti.i, N).bit_struct(num_bits=32).place(x, y)\n\n @ti.kernel\n def set_val():\n for i in range(N):\n x[i] = -2**i\n y[i] = 2**i - 1\n\n @ti.kernel\n def verify_val():\n for i in range(N):\n assert x[i] == -2**i\n assert y[i] == 2**i - 1\n\n set_val()\n verify_val()\n\n # Test bit_struct SNode read and write in Python-scope by calling the wrapped, untranslated function body\n set_val.__wrapped__()\n verify_val.__wrapped__()\n\n\n# TODO: remove excluding of ti.metal\n@test_utils.test(require=ti.extension.quant_basic,\n exclude=[ti.metal],\n debug=True)\ndef test_custom_int_load_and_store():\n ci13 = ti.types.quant.int(13, True)\n cu14 = ti.types.quant.int(14, False)\n ci5 = ti.types.quant.int(5, True)\n\n x = ti.field(dtype=ci13)\n y = ti.field(dtype=cu14)\n z = ti.field(dtype=ci5)\n\n test_case_np = np.array(\n [[2**12 - 1, 2**14 - 1, -(2**3)], [2**11 - 1, 2**13 - 1, -(2**2)],\n [0, 0, 0], [123, 4567, 8], [10, 31, 11]],\n dtype=np.int32)\n\n ti.root.bit_struct(num_bits=32).place(x, y, z)\n test_case = ti.Vector.field(3, dtype=ti.i32, shape=len(test_case_np))\n test_case.from_numpy(test_case_np)\n\n @ti.kernel\n def set_val(idx: ti.i32):\n x[None] = test_case[idx][0]\n y[None] = test_case[idx][1]\n z[None] = test_case[idx][2]\n\n @ti.kernel\n def verify_val(idx: ti.i32):\n assert x[None] == test_case[idx][0]\n assert y[None] == test_case[idx][1]\n assert z[None] == test_case[idx][2]\n\n for idx in range(len(test_case_np)):\n set_val(idx)\n verify_val(idx)\n\n # Test bit_struct SNode read and write in Python-scope by calling the wrapped, untranslated function body\n for idx in range(len(test_case_np)):\n set_val.__wrapped__(idx)\n verify_val.__wrapped__(idx)\n\n\n@test_utils.test(require=ti.extension.quant_basic)\ndef test_custom_int_full_struct():\n cit = ti.types.quant.int(32, True)\n x = ti.field(dtype=cit)\n ti.root.dense(ti.i, 1).bit_struct(num_bits=32).place(x)\n\n x[0] = 15\n assert x[0] == 15\n\n x[0] = 12\n assert x[0] == 12\n\n\ndef test_bit_struct():\n def test_single_bit_struct(physical_type, compute_type, custom_bits,\n test_case):\n ti.init(arch=ti.cpu, debug=True)\n\n cit1 = ti.types.quant.int(custom_bits[0], True, compute_type)\n cit2 = ti.types.quant.int(custom_bits[1], False, compute_type)\n cit3 = ti.types.quant.int(custom_bits[2], True, compute_type)\n\n a = ti.field(dtype=cit1)\n b = ti.field(dtype=cit2)\n c = ti.field(dtype=cit3)\n ti.root.bit_struct(num_bits=physical_type).place(a, b, c)\n\n @ti.kernel\n def set_val(test_val: ti.types.ndarray()):\n a[None] = test_val[0]\n b[None] = test_val[1]\n c[None] = test_val[2]\n\n @ti.kernel\n def verify_val(test_val: ti.types.ndarray()):\n assert a[None] == test_val[0]\n assert b[None] == test_val[1]\n assert c[None] == test_val[2]\n\n set_val(test_case)\n verify_val(test_case)\n\n ti.reset()\n\n test_single_bit_struct(8, ti.i8, [3, 3, 2],\n np.array([2**2 - 1, 2**3 - 1, -2**1]))\n test_single_bit_struct(16, ti.i16, [4, 7, 5],\n np.array([2**3 - 1, 2**7 - 1, -2**4]))\n test_single_bit_struct(32, ti.i32, [17, 11, 4],\n np.array([2**16 - 1, 2**10 - 1, -2**3]))\n test_single_bit_struct(64, ti.i64, [32, 23, 9],\n np.array([2**31 - 1, 2**23 - 1, -2**8]))\n test_single_bit_struct(32, ti.i16, [7, 12, 13],\n np.array([2**6 - 1, 2**12 - 1, -2**12]))\n test_single_bit_struct(64, ti.i32, [18, 22, 24],\n np.array([2**17 - 1, 2**22 - 1, -2**23]))\n\n test_single_bit_struct(16, ti.i16, [5, 5, 6], np.array([15, 5, 20]))\n test_single_bit_struct(32, ti.i32, [10, 10, 12], np.array([11, 19, 2020]))\n\n\n@test_utils.test(require=[ti.extension.quant_basic, ti.extension.sparse],\n debug=True)\ndef test_bit_struct_struct_for():\n block_size = 16\n N = 64\n cell = ti.root.pointer(ti.i, N // block_size)\n fixed32 = ti.types.quant.fixed(frac=32, range=1024)\n\n x = ti.field(dtype=fixed32)\n cell.dense(ti.i, block_size).bit_struct(32).place(x)\n\n for i in range(N):\n if i // block_size % 2 == 0:\n x[i] = 0\n\n @ti.kernel\n def assign():\n for i in x:\n x[i] = ti.cast(i, float)\n\n assign()\n\n for i in range(N):\n if i // block_size % 2 == 0:\n assert x[i] == approx(i, abs=1e-3)\n else:\n assert x[i] == 0\n" ]
[ [ "numpy.array" ] ]
bear9608/NIU
[ "07168e981cf261e2e20c577c7d75df0cc2fb0c19" ]
[ "utils/few_shot.py" ]
[ "import torch\n\n\ndef split_shot_query(data, way, shot, query, ep_per_batch=1):\n img_shape = data.shape[1:]\n data = data.view(ep_per_batch, way, shot + query, *img_shape)\n x_shot, x_query = data.split([shot, query], dim=2)\n x_shot = x_shot.contiguous()\n x_query = x_query.contiguous().view(ep_per_batch, way * query, *img_shape)\n return x_shot, x_query\n\ndef base_shot_query(data, way, shot, query, ep_per_batch=1):\n img_shape = data.shape[1:]\n data = data.view(ep_per_batch, way, shot + query, *img_shape)\n x_shot, x_query = data.split([shot, query], dim=2)\n x_shot = x_shot.contiguous()\n x_query = x_query.contiguous().view(ep_per_batch, way * query, *img_shape)\n return x_shot, x_query\n\ndef make_nk_label(n, k, ep_per_batch=1):\n label = torch.arange(n).unsqueeze(1).expand(n, k).reshape(-1)\n label = label.repeat(ep_per_batch)\n return label\n\n" ]
[ [ "torch.arange" ] ]
klee229/CS179J-Smart-Water-Station-Project
[ "7568c23dc7cd4108ca42c27bf77e18a2a74aa36f" ]
[ "tests/test_csv.py" ]
[ "import unittest\nimport csv\nimport pandas as pd\n\n\nclass TestCSVFile(unittest.TestCase):\n\n # NOTE: all items in user_data converted to str from int or float, this way we compare string to string for testing\n\n def test_open_write_read_close(self):\n # example users for testing\n columns = ['card_uid', 'registration_state', 'name', 'age', 'sex', 'activity_level',\n 'daily_hydration_lower', 'daily_hydration_upper', 'water_dispensed', 'total_dispensed',\n 'percent_dispensed_of_daily', 'num_days', 'num_days_goal', 'avg_intake', 'last_login'\n ]\n\n user_data = [\n ['734a266f', 'True', 'name one', '5', 'Male', 'Sedentary', '1400', '1600', '0', '100000', '0.0', '1', '0',\n '1517.0', '20/08/2021 05:42:21'],\n ['5d81e96d', 'True', 'name two', '12', 'Female', 'Sedentary', '1600', '2000', '200', '200000', '20.0', '14',\n '12', '1984.0', '11/07/2021 07:15:09'],\n ['4d71f56d', 'True', 'name three', '17', 'Male', 'Moderate', '2400', '2800', '500', '300000', '50.0', '28',\n '19', '1000.0', '20/08/2021 16:58:59'],\n ['fdd1a46b', 'True', 'name four', '29', 'Female', 'Moderate', '2000', '2200', '1300', '400000', '130.0',\n '33', '3', '0.0', '20/08/2021 24:24:24'],\n ['1d4ba46b', 'True', 'name five', '48', 'Male', 'Active', '2400', '2600', '1800', '500000', '180.0', '99',\n '99', '2256.0', '20/08/2021 17:36:10'],\n ['dd8b9f6b', 'True', 'name six', '76', 'Female', 'Active', '1800', '1800', '2400', '600000', '240.0', '257',\n '202', '1234.0', '01/01/1970 00:00:00']\n ]\n\n # NOTE: enter the exact path for your machine to run locally\n path = ''\n\n # open file, write data to file\n with open(path, 'w', newline='') as csv_file:\n writer = csv.writer(csv_file)\n writer.writerow(columns)\n writer.writerows(user_data)\n\n # open file, read data from file\n with open(path, 'r', newline='') as csv_file:\n reader = csv.reader(csv_file)\n row_num = 0\n for row in reader:\n if row_num == 0:\n self.assertEqual(columns, row)\n else:\n self.assertEqual(user_data[row_num - 1], row)\n row_num += 1\n\n csv_file.close()\n\n def test_reopen_read_close(self):\n # example users for testing\n columns = ['card_uid', 'registration_state', 'name', 'age', 'sex', 'activity_level',\n 'daily_hydration_lower', 'daily_hydration_upper', 'water_dispensed', 'total_dispensed',\n 'percent_dispensed_of_daily', 'num_days', 'num_days_goal', 'avg_intake', 'last_login'\n ]\n\n user_data = [\n ['734a266f', 'True', 'name one', '5', 'Male', 'Sedentary', '1400', '1600', '0', '100000', '0.0', '1', '0',\n '1517.0', '20/08/2021 05:42:21'],\n ['5d81e96d', 'True', 'name two', '12', 'Female', 'Sedentary', '1600', '2000', '200', '200000', '20.0', '14',\n '12', '1984.0', '11/07/2021 07:15:09'],\n ['4d71f56d', 'True', 'name three', '17', 'Male', 'Moderate', '2400', '2800', '500', '300000', '50.0', '28',\n '19', '1000.0', '20/08/2021 16:58:59'],\n ['fdd1a46b', 'True', 'name four', '29', 'Female', 'Moderate', '2000', '2200', '1300', '400000', '130.0',\n '33', '3', '0.0', '20/08/2021 24:24:24'],\n ['1d4ba46b', 'True', 'name five', '48', 'Male', 'Active', '2400', '2600', '1800', '500000', '180.0', '99',\n '99', '2256.0', '20/08/2021 17:36:10'],\n ['dd8b9f6b', 'True', 'name six', '76', 'Female', 'Active', '1800', '1800', '2400', '600000', '240.0', '257',\n '202', '1234.0', '01/01/1970 00:00:00']\n ]\n\n # NOTE: enter the exact path for your machine to run locally\n path = ''\n\n # open file, read data from file\n with open(path, 'r', newline='') as csv_file:\n reader = csv.reader(csv_file)\n row_num = 0\n for row in reader:\n if row_num == 0:\n self.assertEqual(columns, row)\n else:\n self.assertEqual(user_data[row_num - 1], row)\n row_num += 1\n\n csv_file.close()\n\n def test_add_row(self):\n # example user for testing\n example_user = ['1a2b3c4d', 'True', 'Test User', '76', 'Female', 'Active', '2000', '2000', '4000', '900000',\n '4457.0', '400', '235', '1578.0', '02/12/1970 01:02:03']\n\n # NOTE: enter the exact path for your machine to run locally\n path = ''\n\n # open file in append mode, write data to end of file\n with open(path, 'a', newline='') as csv_file:\n writer = csv.writer(csv_file)\n writer.writerow(example_user)\n\n # open file, read data from file\n with open(path, 'r', newline='') as csv_file:\n reader = csv.reader(csv_file)\n new_row = []\n for row in reader:\n if row[0] == example_user[0]:\n new_row = row\n\n self.assertEqual(example_user, new_row)\n\n csv_file.close()\n\n def test_edit_user_data(self):\n # example users for testing\n columns = ['card_uid', 'registration_state', 'name', 'age', 'sex', 'activity_level',\n 'daily_hydration_lower', 'daily_hydration_upper', 'water_dispensed', 'total_dispensed',\n 'percent_dispensed_of_daily', 'num_days', 'num_days_goal', 'avg_intake', 'last_login'\n ]\n\n user_data = [\n ['734a266f', 'True', 'name one', '5', 'Male', 'Sedentary', '1400', '1600', '0', '100000', '0.0', '1', '0',\n '1517.0', '20/08/2021 05:42:21'],\n ['5d81e96d', 'True', 'name two', '12', 'Female', 'Sedentary', '1600', '2000', '200', '200000', '20.0', '14',\n '12', '1984.0', '11/07/2021 07:15:09'],\n ['4d71f56d', 'True', 'name three', '17', 'Male', 'Moderate', '2400', '2800', '500', '300000', '50.0', '28',\n '19', '1000.0', '20/08/2021 16:58:59'],\n ['fdd1a46b', 'True', 'name four', '29', 'Female', 'Moderate', '2000', '2200', '1300', '400000', '130.0',\n '33', '3', '0.0', '20/08/2021 24:24:24'],\n ['1d4ba46b', 'True', 'name five', '48', 'Male', 'Active', '2400', '2600', '1800', '500000', '180.0', '99',\n '99', '2256.0', '20/08/2021 17:36:10'],\n ['dd8b9f6b', 'True', 'name six', '76', 'Female', 'Active', '1800', '1800', '2400', '600000', '240.0', '257',\n '202', '1234.0', '01/01/1970 00:00:00']\n ]\n\n # NOTE: enter the exact path for your machine to run locally\n path = ''\n\n # open file, write data to file\n with open(path, 'w', newline='') as csv_file:\n writer = csv.writer(csv_file)\n writer.writerow(columns)\n writer.writerows(user_data)\n\n row_to_change = 0\n\n # open file, read data from file\n with open(path, 'r', newline='') as csv_file:\n reader = csv.reader(csv_file)\n row_num = 0\n for row in reader:\n if row[0] == '734a266f':\n row_to_change = row_num\n row_num += 1\n\n # create pandas dataframe of the csv file, make a few changes\n df = pd.read_csv(path)\n\n df.at[row_to_change - 1, 'num_days'] += 1\n df.at[row_to_change, 'water_dispensed'] += 500\n df.at[row_to_change + 1, 'activity_level'] = 'Moderate'\n\n temp_water_dispensed = df.at[row_to_change, 'water_dispensed']\n temp_water_dispensed += 1000\n\n df.at[row_to_change, 'water_dispensed'] = temp_water_dispensed\n\n df.to_csv(path, index=False)\n\n csv_file.close()\n\n edited_user_data = [\n ['734a266f', 'True', 'name one', '5', 'Male', 'Sedentary', '1400', '1600', '0', '100000', '0.0', '2', '0',\n '1517.0', '20/08/2021 05:42:21'],\n ['5d81e96d', 'True', 'name two', '12', 'Female', 'Sedentary', '1600', '2000', '1700', '200000', '20.0',\n '14',\n '12', '1984.0', '11/07/2021 07:15:09'],\n ['4d71f56d', 'True', 'name three', '17', 'Male', 'Moderate', '2400', '2800', '500', '300000', '50.0', '28',\n '19', '1000.0', '20/08/2021 16:58:59'],\n ['fdd1a46b', 'True', 'name four', '29', 'Female', 'Moderate', '2000', '2200', '1300', '400000', '130.0',\n '33', '3', '0.0', '20/08/2021 24:24:24'],\n ['1d4ba46b', 'True', 'name five', '48', 'Male', 'Active', '2400', '2600', '1800', '500000', '180.0', '99',\n '99', '2256.0', '20/08/2021 17:36:10'],\n ['dd8b9f6b', 'True', 'name six', '76', 'Female', 'Active', '1800', '1800', '2400', '600000', '240.0', '257',\n '202', '1234.0', '01/01/1970 00:00:00']\n ]\n\n # open file, read data from file\n with open(path, 'r', newline='') as csv_file:\n reader = csv.reader(csv_file)\n row_num = 0\n for row in reader:\n if row_num == 0:\n self.assertEqual(columns, row)\n else:\n self.assertEqual(edited_user_data[row_num - 1], row)\n row_num += 1\n\n csv_file.close()\n\n def test_open_write_empty_read_close(self):\n # example users for testing\n columns = ['card_uid', 'registration_state', 'name', 'age', 'sex', 'activity_level',\n 'daily_hydration_lower', 'daily_hydration_upper', 'water_dispensed', 'total_dispensed',\n 'percent_dispensed_of_daily', 'num_days', 'num_days_goal', 'avg_intake', 'last_login'\n ]\n\n user_data = [\n ['', '', '', '', '', '', '', '', '', '', '', '', '', '', ''],\n ['', '', '', '', '', '', '', '', '', '', '', '', '', '', ''],\n ['', '', '', '', '', '', '', '', '', '', '', '', '', '', ''],\n ['', '', '', '', '', '', '', '', '', '', '', '', '', '', ''],\n ['', '', '', '', '', '', '', '', '', '', '', '', '', '', ''],\n ['', '', '', '', '', '', '', '', '', '', '', '', '', '', '']\n ]\n\n # NOTE: enter the exact path for your machine to run locally\n path = ''\n\n # open file, write data to file\n with open(path, 'w', newline='') as csv_file:\n writer = csv.writer(csv_file)\n writer.writerow(columns)\n writer.writerows(user_data)\n\n # open file, read data from file\n with open(path, 'r', newline='') as csv_file:\n reader = csv.reader(csv_file)\n row_num = 0\n for row in reader:\n if row_num == 0:\n self.assertEqual(columns, row)\n else:\n self.assertEqual(user_data[row_num - 1], row)\n row_num += 1\n\n csv_file.close()\n\n def test_file_initialization_for_boot_up(self):\n # example users for testing\n columns = ['card_uid', 'registration_state', 'name', 'age', 'sex', 'activity_level',\n 'daily_hydration_lower', 'daily_hydration_upper', 'water_dispensed', 'total_dispensed',\n 'percent_dispensed_of_daily', 'num_days', 'num_days_goal', 'avg_intake', 'last_login'\n ]\n\n user_data = [\n ['734a266f', 'False', ' ', '0', ' ', ' ', '0', '0', '0', '0', '0.0', '0', '0', '0.0', ' '],\n ['5d81e96d', 'False', ' ', '0', ' ', ' ', '0', '0', '0', '0', '0.0', '0', '0', '0.0', ' '],\n ['4d71f56d', 'False', ' ', '0', ' ', ' ', '0', '0', '0', '0', '0.0', '0', '0', '0.0', ' '],\n ['fdd1a46b', 'False', ' ', '0', ' ', ' ', '0', '0', '0', '0', '0.0', '0', '0', '0.0', ' '],\n ['1d4ba46b', 'False', ' ', '0', ' ', ' ', '0', '0', '0', '0', '0.0', '0', '0', '0.0', ' '],\n ['dd8b9f6b', 'False', ' ', '0', ' ', ' ', '0', '0', '0', '0', '0.0', '0', '0', '0.0', ' ']\n ]\n\n # NOTE: enter the exact path for your machine to run locally\n path = ''\n\n # open file, write data to file\n with open(path, 'w', newline='') as csv_file:\n writer = csv.writer(csv_file)\n writer.writerow(columns)\n writer.writerows(user_data)\n\n # open file, read data from file\n with open(path, 'r', newline='') as csv_file:\n reader = csv.reader(csv_file)\n row_num = 0\n for row in reader:\n if row_num == 0:\n self.assertEqual(columns, row)\n else:\n self.assertEqual(user_data[row_num - 1], row)\n row_num += 1\n\n csv_file.close()\n\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "pandas.read_csv" ] ]
cuigucohen/Code-width-and-height-of-vehicle
[ "0c4c0876089459da87a0bfb67f02e34bd764dda1" ]
[ "lib/datasets/imdb.py" ]
[ "# --------------------------------------------------------\n# Fast R-CNN\n# Copyright (c) 2015 Microsoft\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Ross Girshick\n# --------------------------------------------------------\n\nimport os\nimport os.path as osp\nimport PIL\nfrom utils.cython_bbox import bbox_overlaps\nimport numpy as np\nimport scipy.sparse\nfrom fast_rcnn.config import cfg\n\nclass imdb(object):\n \"\"\"Image database.\"\"\"\n\n def __init__(self, name):\n self._name = name\n self._num_classes = 0\n self._classes = []\n self._image_index = []\n self._obj_proposer = 'selective_search'\n self._roidb = None\n self._roidb_handler = self.default_roidb\n # Use this dict for storing dataset specific config options\n self.config = {}\n\n @property\n def name(self):\n return self._name\n\n @property\n def num_classes(self):\n return len(self._classes)\n\n @property\n def classes(self):\n return self._classes\n\n @property\n def image_index(self):\n return self._image_index\n\n @property\n def roidb_handler(self):\n return self._roidb_handler\n\n @roidb_handler.setter\n def roidb_handler(self, val):\n self._roidb_handler = val\n\n def set_proposal_method(self, method):\n method = eval('self.' + method + '_roidb')\n self.roidb_handler = method\n\n @property\n def roidb(self):\n # A roidb is a list of dictionaries, each with the following keys:\n # boxes\n # gt_overlaps\n # gt_classes\n # flipped\n if self._roidb is not None:\n return self._roidb\n self._roidb = self.roidb_handler()\n return self._roidb\n\n @property\n def cache_path(self):\n cache_path = osp.abspath(osp.join(cfg.DATA_DIR, 'cache'))\n if not os.path.exists(cache_path):\n os.makedirs(cache_path)\n return cache_path\n\n @property\n def num_images(self):\n return len(self.image_index)\n\n def image_path_at(self, i):\n raise NotImplementedError\n\n def default_roidb(self):\n raise NotImplementedError\n\n def evaluate_detections(self, all_boxes, output_dir=None):\n \"\"\"\n all_boxes is a list of length number-of-classes.\n Each list element is a list of length number-of-images.\n Each of those list elements is either an empty list []\n or a numpy array of detection.\n\n all_boxes[class][image] = [] or np.array of shape #dets x 5\n \"\"\"\n raise NotImplementedError\n\n def _get_widths(self):\n return [PIL.Image.open(self.image_path_at(i)).size[0]\n for i in xrange(self.num_images)]\n\n def append_flipped_images(self):\n num_images = self.num_images\n widths = [PIL.Image.open(self.image_path_at(i)).size[0] \n for i in xrange(num_images)] \n for i in xrange(num_images):\n boxes = self.roidb[i]['boxes'].copy()\n oldx1 = boxes[:, 0].copy()\n oldx2 = boxes[:, 2].copy()\n boxes[:, 0] = widths[i] - oldx2 - 1\n boxes[:, 2] = widths[i] - oldx1 - 1\n assert (boxes[:, 2] >= boxes[:, 0]).all()\n entry = {'boxes' : boxes,\n 'gt_overlaps' : self.roidb[i]['gt_overlaps'],\n 'gt_classes' : self.roidb[i]['gt_classes'],\n 'flipped' : True}\n self.roidb.append(entry)\n self._image_index = self._image_index * 2\n\n def evaluate_recall(self, candidate_boxes=None, thresholds=None,\n area='all', limit=None):\n \"\"\"Evaluate detection proposal recall metrics.\n\n Returns:\n results: dictionary of results with keys\n 'ar': average recall\n 'recalls': vector recalls at each IoU overlap threshold\n 'thresholds': vector of IoU overlap thresholds\n 'gt_overlaps': vector of all ground-truth overlaps\n \"\"\"\n # Record max overlap value for each gt box\n # Return vector of overlap values\n areas = { 'all': 0, 'small': 1, 'medium': 2, 'large': 3,\n '96-128': 4, '128-256': 5, '256-512': 6, '512-inf': 7}\n area_ranges = [ [0**2, 1e5**2], # all\n [0**2, 32**2], # small\n [32**2, 96**2], # medium\n [96**2, 1e5**2], # large\n [96**2, 128**2], # 96-128\n [128**2, 256**2], # 128-256\n [256**2, 512**2], # 256-512\n [512**2, 1e5**2], # 512-inf\n ]\n assert areas.has_key(area), 'unknown area range: {}'.format(area)\n area_range = area_ranges[areas[area]]\n gt_overlaps = np.zeros(0)\n num_pos = 0\n for i in xrange(self.num_images):\n # Checking for max_overlaps == 1 avoids including crowd annotations\n # (...pretty hacking :/)\n max_gt_overlaps = self.roidb[i]['gt_overlaps'].toarray().max(axis=1)\n gt_inds = np.where((self.roidb[i]['gt_classes'] > 0) &\n (max_gt_overlaps == 1))[0]\n gt_boxes = self.roidb[i]['boxes'][gt_inds, :]\n gt_areas = self.roidb[i]['seg_areas'][gt_inds]\n valid_gt_inds = np.where((gt_areas >= area_range[0]) &\n (gt_areas <= area_range[1]))[0]\n gt_boxes = gt_boxes[valid_gt_inds, :]\n num_pos += len(valid_gt_inds)\n\n if candidate_boxes is None:\n # If candidate_boxes is not supplied, the default is to use the\n # non-ground-truth boxes from this roidb\n non_gt_inds = np.where(self.roidb[i]['gt_classes'] == 0)[0]\n boxes = self.roidb[i]['boxes'][non_gt_inds, :]\n else:\n boxes = candidate_boxes[i]\n if boxes.shape[0] == 0:\n continue\n if limit is not None and boxes.shape[0] > limit:\n boxes = boxes[:limit, :]\n\n overlaps = bbox_overlaps(boxes.astype(np.float),\n gt_boxes.astype(np.float))\n\n _gt_overlaps = np.zeros((gt_boxes.shape[0]))\n for j in xrange(gt_boxes.shape[0]):\n # find which proposal box maximally covers each gt box\n argmax_overlaps = overlaps.argmax(axis=0)\n # and get the iou amount of coverage for each gt box\n max_overlaps = overlaps.max(axis=0)\n # find which gt box is 'best' covered (i.e. 'best' = most iou)\n gt_ind = max_overlaps.argmax()\n gt_ovr = max_overlaps.max()\n assert(gt_ovr >= 0)\n # find the proposal box that covers the best covered gt box\n box_ind = argmax_overlaps[gt_ind]\n # record the iou coverage of this gt box\n _gt_overlaps[j] = overlaps[box_ind, gt_ind]\n assert(_gt_overlaps[j] == gt_ovr)\n # mark the proposal box and the gt box as used\n overlaps[box_ind, :] = -1\n overlaps[:, gt_ind] = -1\n # append recorded iou coverage level\n gt_overlaps = np.hstack((gt_overlaps, _gt_overlaps))\n\n gt_overlaps = np.sort(gt_overlaps)\n if thresholds is None:\n step = 0.05\n thresholds = np.arange(0.5, 0.95 + 1e-5, step)\n recalls = np.zeros_like(thresholds)\n # compute recall for each iou threshold\n for i, t in enumerate(thresholds):\n recalls[i] = (gt_overlaps >= t).sum() / float(num_pos)\n # ar = 2 * np.trapz(recalls, thresholds)\n ar = recalls.mean()\n return {'ar': ar, 'recalls': recalls, 'thresholds': thresholds,\n 'gt_overlaps': gt_overlaps}\n\n def create_roidb_from_box_list(self, box_list, gt_roidb):\n assert len(box_list) == self.num_images, \\\n 'Number of boxes must match number of ground-truth images'\n roidb = []\n for i in xrange(self.num_images):\n boxes = box_list[i]\n num_boxes = boxes.shape[0]\n overlaps = np.zeros((num_boxes, self.num_classes), dtype=np.float32)\n\n if gt_roidb is not None and gt_roidb[i]['boxes'].size > 0:\n gt_boxes = gt_roidb[i]['boxes']\n gt_classes = gt_roidb[i]['gt_classes']\n gt_overlaps = bbox_overlaps(boxes.astype(np.float),\n gt_boxes.astype(np.float))\n argmaxes = gt_overlaps.argmax(axis=1)\n maxes = gt_overlaps.max(axis=1)\n I = np.where(maxes > 0)[0]\n overlaps[I, gt_classes[argmaxes[I]]] = maxes[I]\n\n overlaps = scipy.sparse.csr_matrix(overlaps)\n roidb.append({\n 'boxes' : boxes,\n 'gt_classes' : np.zeros((num_boxes,), dtype=np.int32),\n 'gt_overlaps' : overlaps,\n 'flipped' : False,\n 'seg_areas' : np.zeros((num_boxes,), dtype=np.float32),\n })\n return roidb\n\n @staticmethod\n def merge_roidbs(a, b):\n assert len(a) == len(b)\n for i in xrange(len(a)):\n a[i]['boxes'] = np.vstack((a[i]['boxes'], b[i]['boxes']))\n a[i]['gt_classes'] = np.hstack((a[i]['gt_classes'],\n b[i]['gt_classes']))\n a[i]['gt_overlaps'] = scipy.sparse.vstack([a[i]['gt_overlaps'],\n b[i]['gt_overlaps']])\n a[i]['seg_areas'] = np.hstack((a[i]['seg_areas'],\n b[i]['seg_areas']))\n return a\n\n def competition_mode(self, on):\n \"\"\"Turn competition mode on or off.\"\"\"\n pass\n" ]
[ [ "numpy.zeros_like", "numpy.zeros", "numpy.where", "numpy.arange", "numpy.sort", "numpy.hstack", "numpy.vstack" ] ]
stjordanis/mljar-supervised
[ "8c3f9d1ed527dfcfdaef91cf82e2779c5832e294" ]
[ "supervised/utils/importance.py" ]
[ "import os\nimport json\nimport logging\nimport warnings\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.inspection import permutation_importance\nfrom supervised.algorithms.registry import (\n BINARY_CLASSIFICATION,\n MULTICLASS_CLASSIFICATION,\n REGRESSION,\n)\nfrom supervised.utils.subsample import subsample\n\nlogger = logging.getLogger(__name__)\nfrom supervised.utils.config import LOG_LEVEL\n\nlogger.setLevel(LOG_LEVEL)\n\nfrom sklearn.metrics import make_scorer, log_loss\nimport sys\n\n\ndef log_loss_eps(y_true, y_pred):\n ll = log_loss(y_true, y_pred, eps=1e-7)\n return ll\n\n\nlog_loss_scorer = make_scorer(log_loss_eps, greater_is_better=False, needs_proba=True)\n\n\nclass PermutationImportance:\n @staticmethod\n def compute_and_plot(\n model,\n X_validation,\n y_validation,\n model_file_path,\n learner_name,\n metric_name=None,\n ml_task=None,\n n_jobs=-1\n ):\n # for scoring check https://scikit-learn.org/stable/modules/model_evaluation.html#scoring-parameter\n if ml_task == BINARY_CLASSIFICATION:\n scoring = log_loss_scorer\n model.classes_ = np.unique(y_validation)\n elif ml_task == MULTICLASS_CLASSIFICATION:\n scoring = log_loss_scorer\n model.classes_ = np.unique(y_validation)\n else:\n scoring = \"neg_mean_squared_error\"\n\n try:\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n # subsample validation data to speed-up importance computation\n # in the case of large number of columns, it can take a lot of time\n rows, cols = X_validation.shape\n if cols > 5000 and rows > 100:\n X_vald, _, y_vald, _ = subsample(\n X_validation, y_validation, train_size=100, ml_task=ml_task\n )\n elif cols > 50 and rows * cols > 200000 and rows > 1000:\n X_vald, _, y_vald, _ = subsample(\n X_validation, y_validation, train_size=1000, ml_task=ml_task\n )\n else:\n X_vald = X_validation\n y_vald = y_validation\n\n importance = permutation_importance(\n model,\n X_vald,\n y_vald,\n scoring=scoring,\n n_jobs=n_jobs, \n random_state=12,\n n_repeats=5, # default\n )\n\n sorted_idx = importance[\"importances_mean\"].argsort()\n\n # save detailed importance\n df_imp = pd.DataFrame(\n {\n \"feature\": X_vald.columns[sorted_idx],\n \"mean_importance\": importance[\"importances_mean\"][sorted_idx],\n }\n )\n df_imp.to_csv(\n os.path.join(model_file_path, f\"{learner_name}_importance.csv\"),\n index=False,\n )\n except Exception as e:\n print(str(e))\n print(\"Problem during computing permutation importance. Skipping ...\")\n" ]
[ [ "pandas.DataFrame", "sklearn.metrics.log_loss", "sklearn.metrics.make_scorer", "sklearn.inspection.permutation_importance", "numpy.unique" ] ]
janapavlasek/faster-rcnn.pytorch
[ "8080ee08165931e3480558a6b10db63d52eeb5f8" ]
[ "lib/faster_rcnn/model/faster_rcnn/faster_rcnn.py" ]
[ "import random\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport torchvision.models as models\nfrom torch.autograd import Variable\nimport numpy as np\nfrom ..utils.config import cfg\nfrom ..rpn.rpn import _RPN\n\nfrom ..roi_layers import ROIAlign, ROIPool\n\n# from ..roi_pooling.modules.roi_pool import _RoIPooling\n# from ..roi_align.modules.roi_align import RoIAlignAvg\n\nfrom ..rpn.proposal_target_layer_cascade import _ProposalTargetLayer\nimport time\nimport pdb\nfrom ..utils.net_utils import _smooth_l1_loss, _crop_pool_layer, _affine_grid_gen, _affine_theta\n\nclass _fasterRCNN(nn.Module):\n \"\"\" faster RCNN \"\"\"\n def __init__(self, classes, class_agnostic):\n super(_fasterRCNN, self).__init__()\n self.classes = classes\n self.n_classes = len(classes)\n self.class_agnostic = class_agnostic\n # loss\n self.RCNN_loss_cls = 0\n self.RCNN_loss_bbox = 0\n\n # define rpn\n self.RCNN_rpn = _RPN(self.dout_base_model)\n self.RCNN_proposal_target = _ProposalTargetLayer(self.n_classes)\n\n # self.RCNN_roi_pool = _RoIPooling(cfg.POOLING_SIZE, cfg.POOLING_SIZE, 1.0/16.0)\n # self.RCNN_roi_align = RoIAlignAvg(cfg.POOLING_SIZE, cfg.POOLING_SIZE, 1.0/16.0)\n\n self.RCNN_roi_pool = ROIPool((cfg.POOLING_SIZE, cfg.POOLING_SIZE), 1.0/16.0)\n self.RCNN_roi_align = ROIAlign((cfg.POOLING_SIZE, cfg.POOLING_SIZE), 1.0/16.0, 0)\n\n def forward(self, im_data, im_info, gt_boxes, num_boxes):\n batch_size = im_data.size(0)\n\n im_info = im_info.data\n gt_boxes = gt_boxes.data\n num_boxes = num_boxes.data\n\n # feed image data to base model to obtain base feature map\n base_feat = self.RCNN_base(im_data)\n\n # feed base feature map tp RPN to obtain rois\n rois, rpn_loss_cls, rpn_loss_bbox = self.RCNN_rpn(base_feat, im_info, gt_boxes, num_boxes)\n\n # if it is training phrase, then use ground trubut bboxes for refining\n if self.training:\n roi_data = self.RCNN_proposal_target(rois, gt_boxes, num_boxes)\n rois, rois_label, rois_target, rois_inside_ws, rois_outside_ws = roi_data\n\n rois_label = Variable(rois_label.view(-1).long())\n rois_target = Variable(rois_target.view(-1, rois_target.size(2)))\n rois_inside_ws = Variable(rois_inside_ws.view(-1, rois_inside_ws.size(2)))\n rois_outside_ws = Variable(rois_outside_ws.view(-1, rois_outside_ws.size(2)))\n else:\n rois_label = None\n rois_target = None\n rois_inside_ws = None\n rois_outside_ws = None\n rpn_loss_cls = 0\n rpn_loss_bbox = 0\n\n rois = Variable(rois)\n # do roi pooling based on predicted rois\n\n if cfg.POOLING_MODE == 'align':\n pooled_feat = self.RCNN_roi_align(base_feat, rois.view(-1, 5))\n elif cfg.POOLING_MODE == 'pool':\n pooled_feat = self.RCNN_roi_pool(base_feat, rois.view(-1,5))\n\n # feed pooled features to top model\n pooled_feat = self._head_to_tail(pooled_feat)\n\n # compute bbox offset\n bbox_pred = self.RCNN_bbox_pred(pooled_feat)\n if self.training and not self.class_agnostic:\n # select the corresponding columns according to roi labels\n bbox_pred_view = bbox_pred.view(bbox_pred.size(0), int(bbox_pred.size(1) / 4), 4)\n bbox_pred_select = torch.gather(bbox_pred_view, 1, rois_label.view(rois_label.size(0), 1, 1).expand(rois_label.size(0), 1, 4))\n bbox_pred = bbox_pred_select.squeeze(1)\n\n # compute object classification probability\n cls_score = self.RCNN_cls_score(pooled_feat)\n cls_prob = F.softmax(cls_score, 1)\n\n RCNN_loss_cls = 0\n RCNN_loss_bbox = 0\n\n if self.training:\n # classification loss\n RCNN_loss_cls = F.cross_entropy(cls_score, rois_label)\n\n # bounding box regression L1 loss\n RCNN_loss_bbox = _smooth_l1_loss(bbox_pred, rois_target, rois_inside_ws, rois_outside_ws)\n\n\n cls_prob = cls_prob.view(batch_size, rois.size(1), -1)\n bbox_pred = bbox_pred.view(batch_size, rois.size(1), -1)\n\n return rois, cls_prob, bbox_pred, rpn_loss_cls, rpn_loss_bbox, RCNN_loss_cls, RCNN_loss_bbox, rois_label\n\n def _init_weights(self):\n def normal_init(m, mean, stddev, truncated=False):\n \"\"\"\n weight initalizer: truncated normal and random normal.\n \"\"\"\n # x is a parameter\n if truncated:\n m.weight.data.normal_().fmod_(2).mul_(stddev).add_(mean) # not a perfect approximation\n else:\n m.weight.data.normal_(mean, stddev)\n m.bias.data.zero_()\n\n normal_init(self.RCNN_rpn.RPN_Conv, 0, 0.01, cfg.TRAIN.TRUNCATED)\n normal_init(self.RCNN_rpn.RPN_cls_score, 0, 0.01, cfg.TRAIN.TRUNCATED)\n normal_init(self.RCNN_rpn.RPN_bbox_pred, 0, 0.01, cfg.TRAIN.TRUNCATED)\n normal_init(self.RCNN_cls_score, 0, 0.01, cfg.TRAIN.TRUNCATED)\n normal_init(self.RCNN_bbox_pred, 0, 0.001, cfg.TRAIN.TRUNCATED)\n\n def create_architecture(self):\n self._init_modules()\n self._init_weights()\n" ]
[ [ "torch.autograd.Variable", "torch.nn.functional.cross_entropy", "torch.nn.functional.softmax" ] ]
aditya-taparia/ImageEditor-Tkinter-Application
[ "f8444c24bc9b1da9702c54ff2659abc419c39998" ]
[ "Image_editor.py" ]
[ "import cv2\r\nimport os\r\nimport tkinter as tk\r\nimport numpy as np\r\nimport random\r\nfrom tkinter import *\r\nfrom tkinter import ttk\r\nfrom tkinter import filedialog\r\nfrom PIL import ImageTk, Image \r\nglobal count,emig\r\nglobal bright,con\r\nglobal frp,tname#list of paths\r\nfrp=[]\r\ntname=[]\r\ncon=1\r\nbright=0\r\n\r\ndef getpath(path):\r\n a=path.split(r'/')\r\n print(a)\r\n fname=a[-1]\r\n l=len(fname)\r\n location=path[:-l]\r\n return location\r\n\r\ndef getfoldername(path):\r\n a=path.split(r'/')\r\n print(a)\r\n name=a[-1]\r\n return name\r\n\r\ndef getfilename(path):\r\n a=path.split(r'/')\r\n fname=a[-1]\r\n a=fname.split('.')\r\n a=a[0]\r\n return a\r\n\r\ndef openfilename():\r\n filename = filedialog.askopenfilename(title ='\"pen') \r\n return filename \r\n\r\n\r\ndef open_img():\r\n global x, panelA, panelB\r\n global count,eimg,location,filename\r\n count=0\r\n x = openfilename() \r\n img = Image.open(x) \r\n eimg=img\r\n img = ImageTk.PhotoImage(img)\r\n temp=x\r\n location=getpath(temp)\r\n filename=getfilename(temp)\r\n if panelA is None or panelB is None:\r\n panelA = Label(image=img)\r\n panelA.image = img\r\n panelA.pack(side=\"left\", padx=10, pady=10)\r\n panelB = Label(image=img)\r\n panelB.image = img\r\n panelB.pack(side=\"right\", padx=10, pady=10)\r\n else:\r\n panelA.configure(image=img)\r\n panelB.configure(image=img)\r\n panelA.image = img\r\n panelB.image = img\r\n\r\ndef sketch():\r\n image = cv2.imread(x)\r\n global count,eimg\r\n count=1\r\n if image is None:\r\n print(\"can not find image\")\r\n sys.exit()\r\n # gray scale\r\n grayImage = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\r\n #invert gray image\r\n grayImageInv = 255 - grayImage\r\n # gaussian blur\r\n grayImageInv = cv2.GaussianBlur(grayImageInv, (21, 21), 0)\r\n #blend using color dodge\r\n output = cv2.divide(grayImage, 255-grayImageInv, scale=256.0)\r\n #edge \r\n gray = cv2.medianBlur(grayImage, 1)\r\n edges = cv2.adaptiveThreshold(gray, 10, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 9, 9)\r\n color = cv2.bilateralFilter(output, 6, 60, 200)\r\n global o1\r\n o1 = cv2.bitwise_and(color, color, mask=edges)\r\n image = Image.fromarray(o1)\r\n eimg=image\r\n image = ImageTk.PhotoImage(image)\r\n panelB.configure(image=image)\r\n panelB.image = image\r\n \r\n \r\ndef sharp():\r\n image = cv2.imread(x)[:, :, ::-1]\r\n global count,eimg\r\n count=2\r\n if image is None:\r\n print(\"can not find image\")\r\n sys.exit()\r\n k2 = np.array([[0, -1, 0],[-1, 5, -1],[0, -1, 0]])\r\n global o2\r\n o2 = cv2.filter2D(image, -1, k2)\r\n image = Image.fromarray(o2)\r\n eimg=image\r\n image = ImageTk.PhotoImage(image)\r\n panelB.configure(image=image)\r\n panelB.image = image\r\n\r\ndef cartoon():\r\n image = cv2.imread(x)[:, :, ::-1]\r\n global count,eimg\r\n count=4\r\n if image is None:\r\n print(\"can not find image\")\r\n exit()\r\n image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\r\n image_gray = cv2.GaussianBlur(image_gray, (3, 3), 0)\r\n image_edge = cv2.Laplacian(image_gray, -1, ksize=5)\r\n image_edge = 255 - image_edge\r\n ret, image_edge = cv2.threshold(image_edge, 150, 255, cv2.THRESH_BINARY)\r\n edgePreservingImage = cv2.edgePreservingFilter(image, flags=2, sigma_s=50, sigma_r=0.4)\r\n global o4\r\n output =np.zeros(image_gray.shape)\r\n output = cv2.bitwise_and(edgePreservingImage, edgePreservingImage, mask=image_edge)\r\n o4 = cv2.convertScaleAbs(output,alpha=1, beta=60)\r\n image = Image.fromarray(o4)\r\n eimg=image\r\n image = ImageTk.PhotoImage(image)\r\n panelB.configure(image=image)\r\n panelB.image = image\r\n\r\ndef conto():\r\n img = cv2.imread(x)[:, :, ::-1]\r\n global count,eimg\r\n count=3\r\n image = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n #check if image exists\r\n if image is None:\r\n print(\"can not find image\")\r\n sys.exit() \r\n #apply canny to the input image\r\n canny = cv2.Canny(image, 50, 150, apertureSize=3)\r\n #find contours\r\n contours, hierarchy = cv2.findContours(canny, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) \r\n #output image to draw contours on\r\n output = np.zeros((image.shape[0], image.shape[1], 3), dtype=np.uint8)\r\n #draw contours\r\n for i in range(0, len(contours)):\r\n cv2.drawContours(output, contours, i, (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)), 2)\r\n global o3\r\n o3=output\r\n image = Image.fromarray(o3)\r\n eimg=image\r\n image = ImageTk.PhotoImage(image)\r\n panelB.configure(image=image)\r\n panelB.image = image\r\n\r\ndef jeetesh():\r\n image = cv2.imread(x)[:, :, ::-1] \r\n global count,eimg\r\n count=5\r\n result = np.copy(image)\r\n\r\n original_Val = np.array([0, 50, 100, 150, 200, 255])\r\n red_Val = np.array([0, 20, 40, 75, 150, 255])\r\n blue_Val = np.array([0, 80, 150, 190, 220, 255])\r\n all_Val = np.arange(0, 256)\r\n redLookupTable = np.interp(all_Val, original_Val, red_Val)\r\n\r\n blueLookupTable = np.interp(all_Val, original_Val, blue_Val)\r\n\r\n B, G, R = cv2.split(result)\r\n R = cv2.LUT(R, redLookupTable)\r\n R = np.uint8(R)\r\n B = cv2.LUT(B, blueLookupTable)\r\n B = np.uint8(B)\r\n\r\n result = cv2.merge([B, G, R])\r\n global o5\r\n o5=result\r\n image = Image.fromarray(o5)\r\n eimg=image\r\n image = ImageTk.PhotoImage(image)\r\n panelB.configure(image=image)\r\n panelB.image = image\r\n\r\ndef reset():\r\n image = cv2.imread(x)[:, :, ::-1] \r\n global count,eimg\r\n count=6\r\n global o6\r\n o6=image\r\n image = Image.fromarray(o6)\r\n eimg=image\r\n image = ImageTk.PhotoImage(image)\r\n panelB.configure(image=image)\r\n panelB.image = image\r\n \r\ndef adjust_brightness(value): \r\n \r\n value=int(float(value))\r\n txtbrit_per= Label(sliderframe,text=str(value)+\"%\").grid(row=0,column=5)\r\n global bright,eimg\r\n bright=value\r\n if count == 0:\r\n img = cv2.imread(x)[:, :, ::-1]\r\n elif count == 1:\r\n img = o1\r\n elif count == 2:\r\n img = o2\r\n elif count == 3:\r\n img = o3\r\n elif count == 4:\r\n img = o4\r\n elif count == 5:\r\n img = o5\r\n elif count == 6:\r\n img = o6\r\n if img is None:\r\n print(\"can not find image\")\r\n sys.exit()\r\n final_output = cv2.convertScaleAbs(img,alpha=con, beta=value)\r\n image = Image.fromarray(final_output)\r\n eimg=image\r\n image = ImageTk.PhotoImage(image)\r\n panelB.configure(image=image)\r\n panelB.image = image\r\n \r\ndef adjust_contrast(value): \r\n value=int(float(value))\r\n global con,eimg\r\n con=value\r\n txtcon_per= Label(sliderframe,text=str(int((float((value-1)/4)*100)))+\"%\").grid(row=1,column=5)\r\n if count == 0:\r\n img = cv2.imread(x)[:, :, ::-1]\r\n elif count ==1:\r\n img = o1\r\n elif count == 2:\r\n img = o2\r\n elif count == 3:\r\n img = o3\r\n elif count == 4:\r\n img = o4\r\n elif count == 5:\r\n img = o5\r\n elif count == 6:\r\n img = o6\r\n if img is None:\r\n print(\"can not find image\")\r\n sys.exit()\r\n final_output = cv2.convertScaleAbs(img,alpha=value, beta=bright)\r\n image = Image.fromarray(final_output)\r\n eimg=image\r\n image = ImageTk.PhotoImage(image)\r\n panelB.configure(image=image)\r\n panelB.image = image \r\n\r\ndef save():\r\n global location,filename,eimg\r\n eimg.save(location+filename+r\"_edit.png\")\r\n\r\n###########################\r\n\r\n \r\nwin = tk.Tk()\r\nwin.title(\"Aviato\")\r\nwin.resizable(width = True, height = True) \r\nmenubar = Menu(win)\r\n\r\n\r\n \r\n# Adding File Menu and commands\r\nfile = Menu(menubar, tearoff=0)\r\nmenubar.add_cascade(label='File', menu=file)\r\nfile.add_command(label='New File', command=open_img)\r\nfile.add_command(label='Save', command=save)\r\nfile.add_command(label='Exit', command=win.destroy)\r\n\r\n#frame for buttons and slider\r\nbframe=Frame(win)\r\nbframe.pack(side=BOTTOM,fill=Y,pady=10)\r\n\r\n#frame for the image\r\nimgframe=Frame(win)\r\npanelB=None\r\npanelA=None\r\nimgframe.pack(side=TOP,fill=Y)\r\n\r\n#frame for slider\r\nsliderframe=Frame(bframe)\r\nsliderframe.pack(side=TOP)\r\n\r\n#frame for Buttons\r\nbutframe=Frame(bframe)\r\nbutframe.pack(side=BOTTOM)\r\n\r\n \r\n#buttons in the button frame\r\nttk.Button(butframe, text=\"sketch\", command=sketch).grid(column=0, row=0)\r\nttk.Button(butframe, text=\"sharpen\", command=sharp).grid(column=1, row=0)\r\nttk.Button(butframe, text=\"contour\",command=conto).grid(column=2, row=0)\r\nttk.Button(butframe, text=\"cartoon\", command=cartoon).grid(column=3, row=0)\r\nttk.Button(butframe, text=\"kelvin\", command=jeetesh).grid(column=4, row=0)\r\nttk.Button(butframe, text=\"reset\", command=reset).grid(column=5, row=0)\r\n\r\n#slider\r\ntxtbrit= Label(sliderframe,text=\"Brightness:\").grid(row=0,column=3)\r\ntxtcon=Label(sliderframe,text=\"Contrast:\").grid(row=1,column=3)\r\nslider=ttk.Scale(sliderframe,from_=0,to=100,command=adjust_brightness).grid(row=0,column=4)\r\nslider=ttk.Scale(sliderframe,from_=1,to=5,command=adjust_contrast).grid(row=1,column=4)\r\ntxtbrit_per= Label(sliderframe,text=\"0%\").grid(row=0,column=5)\r\ntxtcon_per= Label(sliderframe,text=\"0%\").grid(row=1,column=5)\r\n#======================\r\n# Start GUI\r\n#======================\r\n\r\nwin.config(menu = menubar)\r\nwin.mainloop()" ]
[ [ "numpy.array", "numpy.uint8", "numpy.zeros", "numpy.copy", "numpy.interp", "numpy.arange" ] ]
xiongzihua/ncnn-yolo
[ "8d0d05af4263c127d22642c9fe17957b5c068a83" ]
[ "pytorch-yolo-v1/yoloLoss.py" ]
[ "#encoding:utf-8\r\n#\r\n#created by xiongzihua 2017.12.26\r\n#\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nfrom torch.autograd import Variable\r\n\r\nclass yoloLoss(nn.Module):\r\n def __init__(self,S,B,l_coord,l_noobj):\r\n super(yoloLoss,self).__init__()\r\n self.S = S\r\n self.B = B\r\n self.l_coord = l_coord\r\n self.l_noobj = l_noobj\r\n\r\n def compute_iou(self, box1, box2):\r\n '''Compute the intersection over union of two set of boxes, each box is [x1,y1,x2,y2].\r\n Args:\r\n box1: (tensor) bounding boxes, sized [N,4].\r\n box2: (tensor) bounding boxes, sized [M,4].\r\n Return:\r\n (tensor) iou, sized [N,M].\r\n '''\r\n N = box1.size(0)\r\n M = box2.size(0)\r\n\r\n lt = torch.max(\r\n box1[:,:2].unsqueeze(1).expand(N,M,2), # [N,2] -> [N,1,2] -> [N,M,2]\r\n box2[:,:2].unsqueeze(0).expand(N,M,2), # [M,2] -> [1,M,2] -> [N,M,2]\r\n )\r\n\r\n rb = torch.min(\r\n box1[:,2:].unsqueeze(1).expand(N,M,2), # [N,2] -> [N,1,2] -> [N,M,2]\r\n box2[:,2:].unsqueeze(0).expand(N,M,2), # [M,2] -> [1,M,2] -> [N,M,2]\r\n )\r\n\r\n wh = rb - lt # [N,M,2]\r\n wh[wh<0] = 0 # clip at 0\r\n inter = wh[:,:,0] * wh[:,:,1] # [N,M]\r\n\r\n area1 = (box1[:,2]-box1[:,0]) * (box1[:,3]-box1[:,1]) # [N,]\r\n area2 = (box2[:,2]-box2[:,0]) * (box2[:,3]-box2[:,1]) # [M,]\r\n area1 = area1.unsqueeze(1).expand_as(inter) # [N,] -> [N,1] -> [N,M]\r\n area2 = area2.unsqueeze(0).expand_as(inter) # [M,] -> [1,M] -> [N,M]\r\n\r\n iou = inter / (area1 + area2 - inter)\r\n return iou\r\n def forward(self,pred_tensor,target_tensor):\r\n '''\r\n pred_tensor: (tensor) size(batchsize,S,S,Bx5+20=30) [x,y,w,h,c]\r\n target_tensor: (tensor) size(batchsize,S,S,30)\r\n '''\r\n N = pred_tensor.size()[0]\r\n coo_mask = target_tensor[:,:,:,4] > 0\r\n noo_mask = target_tensor[:,:,:,4] == 0\r\n coo_mask = coo_mask.unsqueeze(-1).expand_as(target_tensor)\r\n noo_mask = noo_mask.unsqueeze(-1).expand_as(target_tensor)\r\n\r\n coo_pred = pred_tensor[coo_mask].view(-1,30)\r\n box_pred = coo_pred[:,:10].contiguous().view(-1,5) #box[x1,y1,w1,h1,c1]\r\n class_pred = coo_pred[:,10:] #[x2,y2,w2,h2,c2]\r\n \r\n coo_target = target_tensor[coo_mask].view(-1,30)\r\n box_target = coo_target[:,:10].contiguous().view(-1,5)\r\n class_target = coo_target[:,10:]\r\n\r\n # compute not contain obj loss\r\n noo_pred = pred_tensor[noo_mask].view(-1,30)\r\n noo_target = target_tensor[noo_mask].view(-1,30)\r\n noo_pred_mask = torch.cuda.ByteTensor(noo_pred.size())\r\n noo_pred_mask.zero_()\r\n noo_pred_mask[:,4]=1;noo_pred_mask[:,9]=1\r\n noo_pred_c = noo_pred[noo_pred_mask] #noo pred只需要计算 c 的损失 size[-1,2]\r\n noo_target_c = noo_target[noo_pred_mask]\r\n nooobj_loss = F.mse_loss(noo_pred_c,noo_target_c,size_average=False)\r\n\r\n #compute contain obj loss\r\n coo_response_mask = torch.cuda.ByteTensor(box_target.size())\r\n coo_response_mask.zero_()\r\n coo_not_response_mask = torch.cuda.ByteTensor(box_target.size())\r\n coo_not_response_mask.zero_()\r\n box_target_iou = torch.zeros(box_target.size()).cuda()\r\n for i in range(0,box_target.size()[0],2): #choose the best iou box\r\n box1 = box_pred[i:i+2]\r\n box1_xyxy = Variable(torch.FloatTensor(box1.size()))\r\n box1_xyxy[:,:2] = 1./15.*box1[:,:2] -0.5*box1[:,2:4]\r\n box1_xyxy[:,2:4] = 1./15.*box1[:,:2] +0.5*box1[:,2:4]\r\n box2 = box_target[i].view(-1,5)\r\n box2_xyxy = Variable(torch.FloatTensor(box2.size()))\r\n box2_xyxy[:,:2] = 1./15.*box2[:,:2] -0.5*box2[:,2:4]\r\n box2_xyxy[:,2:4] = 1./15.*box2[:,:2] +0.5*box2[:,2:4]\r\n iou = self.compute_iou(box1_xyxy[:,:4],box2_xyxy[:,:4]) #[2,1]\r\n max_iou,max_index = iou.max(0)\r\n max_index = max_index.data.cuda()\r\n \r\n coo_response_mask[i+max_index]=1\r\n coo_not_response_mask[i+1-max_index]=1\r\n\r\n #####\r\n # we want the confidence score to equal the\r\n # intersection over union (IOU) between the predicted box\r\n # and the ground truth\r\n #####\r\n box_target_iou[i+max_index,torch.LongTensor([4]).cuda()] = (max_iou).data.cuda()\r\n box_target_iou = Variable(box_target_iou).cuda()\r\n #1.response loss\r\n box_pred_response = box_pred[coo_response_mask].view(-1,5)\r\n box_target_response_iou = box_target_iou[coo_response_mask].view(-1,5)\r\n box_target_response = box_target[coo_response_mask].view(-1,5)\r\n contain_loss = F.mse_loss(box_pred_response[:,4],box_target_response_iou[:,4],size_average=False)\r\n loc_loss = F.mse_loss(box_pred_response[:,:2],box_target_response[:,:2],size_average=False) + F.mse_loss(torch.sqrt(box_pred_response[:,2:4]),torch.sqrt(box_target_response[:,2:4]),size_average=False)\r\n #2.not response loss\r\n box_pred_not_response = box_pred[coo_not_response_mask].view(-1,5)\r\n box_target_not_response = box_target[coo_not_response_mask].view(-1,5)\r\n box_target_not_response[:,4]= 0\r\n not_contain_loss = F.mse_loss(box_pred_not_response[:,4],box_target_not_response[:,4],size_average=False)\r\n #3.class loss\r\n class_loss = F.mse_loss(class_pred,class_target,size_average=False)\r\n\r\n return (self.l_coord*loc_loss + 2*contain_loss + not_contain_loss + self.l_noobj*nooobj_loss + class_loss)/N\r\n\r\n\r\n\r\n\r\n" ]
[ [ "torch.nn.functional.mse_loss", "torch.sqrt", "torch.LongTensor", "torch.autograd.Variable" ] ]
holwech/pysa
[ "d002e3b635b88037024555e1f9aab701b31044f3" ]
[ "pysa/nhht.py" ]
[ "import numpy as np\nimport scipy.signal as signal\nfrom . import utils\n\n# Calculates the normalized HHt.\n# Takes in all the IMFs, but not the residue. That is; the last row of the the return value\n# of the EMD function should not be included in the input variable \"imfs\"\ndef nhht(imfs, sample_frequency):\n # Non-optimal fix to some array overwrite issue\n imfs = np.copy(imfs)\n\n n_imfs = len(imfs)\n max_freq = sample_frequency / 2.0\n amplitudes = np.zeros(imfs.shape, np.float32)\n scaled_imfs = np.zeros(imfs.shape, np.float32)\n frequencies = np.zeros(imfs.shape, np.float32)\n\n for i in range(n_imfs):\n scaled_imf, am = utils.scale_amplitudes(imfs[i])\n scaled_imfs[i] = scaled_imf\n h = signal.hilbert(scaled_imf)\n amplitudes[i] = am\n frequencies[i] = np.r_[\n 0.0,\n 0.5*(np.angle(-h[2:]*np.conj(h[0:-2]))+np.pi)/(2.0*np.pi) * np.float32(sample_frequency),\n 0.0\n ]\n\n frequencies[i, 0] = frequencies[i, 1]\n frequencies[i, -1] = frequencies[i, -2]\n\n frequencies[i] = utils.check_rapid_changes_in_frequency(frequencies[i], max_freq)\n\n return frequencies, amplitudes\n\n\ndef get_instantaneous_frequency(imfs, sample_frequency=500.0):\n sample_frequency = float(sample_frequency)\n max_freq = sample_frequency / 2.0\n freq = np.zeros(imfs.shape, np.float)\n\n for i in range(len(imfs)):\n # Do Hilbert Transform - NB! Must be normalized (scaled amplitudes)\n hi = signal.hilbert(imfs[i])\n freq[i, :] = np.r_[\n 0.0,\n 0.5*(np.angle(-hi[2:]*np.conj(hi[0:-2]))+np.pi)/(2.0*np.pi) * sample_frequency,\n 0.0\n ]\n\n freq[i, 0] = freq[i, 1]\n freq[i, -1] = freq[i, -2]\n\n for k in range(len(freq[i])):\n\n if freq[i, k] > max_freq:\n if k > 0:\n freq[i, k] = freq[i, k-1]\n else:\n freq[i, k] = max_freq\n\n # Check if change in frequency is unrealistic (too rapid change):\n if k > 0:\n if np.fabs(freq[i, k] - freq[i, k-1]) > 50.0:\n if freq[i, k] > freq[i, k-1]:\n freq[i, k] = freq[i, k-1]\n else:\n freq[i, k-1] = freq[i, k]\n return freq\n\n\n\n\n" ]
[ [ "numpy.zeros", "numpy.copy", "numpy.fabs", "numpy.float32", "numpy.conj", "scipy.signal.hilbert" ] ]
rajkshah3/val
[ "1436335ae5d810ddccd30ea5251624ca797bcaaf" ]
[ "nets/rnn.py" ]
[ "from tensorflow.python.util import nest\nfrom tensorflow.python.framework import tensor_shape\n\nimport tensorflow as tf\nimport numpy as np\n\nslim = tf.contrib.slim\n\n\ndef _state_size_with_prefix(state_size, prefix=None):\n\n result_state_size = tensor_shape.as_shape(state_size).as_list()\n if prefix is not None:\n if not isinstance(prefix, list):\n raise TypeError(\"prefix of _state_size_with_prefix should be a list.\")\n result_state_size = prefix + result_state_size\n return result_state_size\n\n\ndef make_variable_state_initializer(**kwargs):\n\n def variable_state_initializer(shape, batch_size, dtype, index):\n args = kwargs.copy()\n if args.get('name'):\n args['name'] = args['name'] + '_' + str(index)\n else:\n args['name'] = 'init_state_' + str(index)\n args['shape'] = shape\n args['dtype'] = dtype\n var = tf.get_variable(**args)\n var = tf.expand_dims(var, 0)\n var = tf.tile(var, tf.stack([batch_size] + [1] * len(shape)))\n var.set_shape(_state_size_with_prefix(shape, prefix=[None]))\n return var\n return variable_state_initializer\n\n\ndef get_initial_cell_state(cell, initializer, batch_size, dtype):\n\n state_size = cell.state_size\n if nest.is_sequence(state_size):\n state_size_flat = nest.flatten(state_size)\n init_state_flat = [\n initializer(_state_size_with_prefix(s), batch_size, dtype, i)\n for i, s in enumerate(state_size_flat)]\n init_state = nest.pack_sequence_as(structure=state_size, flat_sequence=init_state_flat)\n else:\n init_state_size = _state_size_with_prefix(state_size)\n init_state = initializer(init_state_size, batch_size, dtype, None)\n return init_state\n\n\ndef rnn_layers(model_name, inputs, sequence_length, batch_size, hidden_state_dimension, dropout_keep_prob=0.999, is_training=True, reuse=False):\n\n state_initializer = make_variable_state_initializer()\n with tf.variable_scope(\"bidirectional_lstm\", reuse=reuse):\n cell = {}\n initial_state = {}\n if 'bi' in model_name:\n directions = [\"forward\", \"backward\"]\n hidden_state_dimension = int(hidden_state_dimension/2)\n else:\n directions = [\"forward\"]\n print(directions)\n for direction in directions:\n with tf.variable_scope(direction):\n # LSTM or GRU cell\n if 'lstm' in model_name:\n print('lstm') \n cell[direction] = tf.contrib.rnn.BasicLSTMCell(num_units=hidden_state_dimension, state_is_tuple=True)\n elif 'gru' in model_name:\n print('gru') \n cell[direction] = tf.contrib.rnn.GRUCell(num_units=hidden_state_dimension)\n else:\n raise ValueError(\"cell must be either 'lstm' or 'gru'.\")\n initial_state[direction] = get_initial_cell_state(cell[direction], state_initializer, batch_size, tf.float32)\n\n if 'bi' in model_name:\n # bidirection LSTM\n # sequence_length must be provided for tf.nn.bidirectional_dynamic_rnn due to internal bug\n (outputs_forward, outputs_backward), (final_states_forward, final_states_backward) = \\\n tf.nn.bidirectional_dynamic_rnn(cell[\"forward\"],\n cell[\"backward\"],\n inputs=inputs,\n dtype=tf.float32,\n sequence_length=sequence_length,\n initial_state_fw=initial_state[\"forward\"],\n initial_state_bw=initial_state[\"backward\"])\n # batch_size * T * 1024\n output = tf.concat([outputs_forward, outputs_backward], axis=2, name='output_sequence')\n else:\n outputs_forward, final_states_forward = \\\n tf.nn.dynamic_rnn(cell[\"forward\"],\n inputs=inputs,\n dtype=tf.float32,\n sequence_length=sequence_length,\n initial_state=initial_state[\"forward\"]) \n output = outputs_forward\n states = tf.reduce_max(output, axis=1, name='mean_states')\n return states\n\n\ndef extract_text_features(model_name, inputs, sequence_length, vocab_size, word_embedding_size, text_embedding_size, batch_size, is_training, word_embedding_dir=None, scope=\"RNN\"):\n\n initializer = tf.contrib.layers.xavier_initializer()\n with tf.variable_scope(scope):\n if word_embedding_dir is not None:\n print('load pre-trained embeddings from ' + word_embedding_dir)\n pretrain_words = np.load(word_embedding_dir)\n print(pretrain_words.shape)\n pretrain_words = pretrain_words.astype(np.float32)\n token_embedding_weights = tf.get_variable(name=\"token_embedding_weights\", \n dtype=tf.float32,\n initializer=pretrain_words)\n print(token_embedding_weights)\n else:\n token_embedding_weights = tf.get_variable(name=\"token_embedding_weights\",\n shape=[vocab_size, word_embedding_size],\n initializer=initializer) \n token_lstm_input = tf.nn.embedding_lookup(token_embedding_weights, inputs)\n\n batch_size = inputs.get_shape().as_list()[0]\n states = rnn_layers(\n model_name=model_name,\n inputs=token_lstm_input,\n sequence_length=sequence_length, \n batch_size=batch_size,\n hidden_state_dimension=text_embedding_size,\n is_training=is_training)\n features = tf.expand_dims(tf.expand_dims(states, 1), 1) # batch_size * 1 x 1 x 1024\n return features\n" ]
[ [ "tensorflow.concat", "tensorflow.expand_dims", "tensorflow.python.util.nest.is_sequence", "tensorflow.contrib.layers.xavier_initializer", "tensorflow.nn.bidirectional_dynamic_rnn", "tensorflow.contrib.rnn.BasicLSTMCell", "numpy.load", "tensorflow.python.framework.tensor_shape.as_shape", "tensorflow.python.util.nest.pack_sequence_as", "tensorflow.variable_scope", "tensorflow.reduce_max", "tensorflow.python.util.nest.flatten", "tensorflow.get_variable", "tensorflow.nn.embedding_lookup", "tensorflow.contrib.rnn.GRUCell", "tensorflow.nn.dynamic_rnn" ] ]
razvanc92/ST-WA
[ "90259082f5b872aa584dd72c0857499f96d2cd76" ]
[ "componenets/metrics.py" ]
[ "import numpy as np\nimport torch\n\ndef MAE_torch(pred, true, mask_value=None):\n if mask_value != None:\n mask = torch.gt(true, mask_value)\n pred = torch.masked_select(pred, mask)\n true = torch.masked_select(true, mask)\n return torch.mean(torch.abs(true-pred))\n\ndef MSE_torch(pred, true, mask_value=None):\n if mask_value != None:\n mask = torch.gt(true, mask_value)\n pred = torch.masked_select(pred, mask)\n true = torch.masked_select(true, mask)\n return torch.mean((pred - true) ** 2)\n\ndef RMSE_torch(pred, true, mask_value=None):\n if mask_value != None:\n mask = torch.gt(true, mask_value)\n pred = torch.masked_select(pred, mask)\n true = torch.masked_select(true, mask)\n return torch.sqrt(torch.mean((pred - true) ** 2))\n\ndef RRSE_torch(pred, true, mask_value=None):\n if mask_value != None:\n mask = torch.gt(true, mask_value)\n pred = torch.masked_select(pred, mask)\n true = torch.masked_select(true, mask)\n return torch.sqrt(torch.sum((pred - true) ** 2)) / torch.sqrt(torch.sum((pred - true.mean()) ** 2))\n\ndef CORR_torch(pred, true, mask_value=None):\n #input B, T, N, D or B, N, D or B, N\n if len(pred.shape) == 2:\n pred = pred.unsqueeze(dim=1).unsqueeze(dim=1)\n true = true.unsqueeze(dim=1).unsqueeze(dim=1)\n elif len(pred.shape) == 3:\n pred = pred.transpose(1, 2).unsqueeze(dim=1)\n true = true.transpose(1, 2).unsqueeze(dim=1)\n elif len(pred.shape) == 4:\n #B, T, N, D -> B, T, D, N\n pred = pred.transpose(2, 3)\n true = true.transpose(2, 3)\n else:\n raise ValueError\n dims = (0, 1, 2)\n pred_mean = pred.mean(dim=dims)\n true_mean = true.mean(dim=dims)\n pred_std = pred.std(dim=dims)\n true_std = true.std(dim=dims)\n correlation = ((pred - pred_mean)*(true - true_mean)).mean(dim=dims) / (pred_std*true_std)\n index = (true_std != 0)\n correlation = (correlation[index]).mean()\n return correlation\n\n\ndef MAPE_torch(pred, true, mask_value=None):\n if mask_value != None:\n mask = torch.gt(true, mask_value)\n pred = torch.masked_select(pred, mask)\n true = torch.masked_select(true, mask)\n return torch.mean(torch.abs(torch.div((true - pred), true)))\n\ndef PNBI_torch(pred, true, mask_value=None):\n if mask_value != None:\n mask = torch.gt(true, mask_value)\n pred = torch.masked_select(pred, mask)\n true = torch.masked_select(true, mask)\n indicator = torch.gt(pred - true, 0).float()\n return indicator.mean()\n\ndef oPNBI_torch(pred, true, mask_value=None):\n if mask_value != None:\n mask = torch.gt(true, mask_value)\n pred = torch.masked_select(pred, mask)\n true = torch.masked_select(true, mask)\n bias = (true+pred) / (2*true)\n return bias.mean()\n\ndef MARE_torch(pred, true, mask_value=None):\n if mask_value != None:\n mask = torch.gt(true, mask_value)\n pred = torch.masked_select(pred, mask)\n true = torch.masked_select(true, mask)\n return torch.div(torch.sum(torch.abs((true - pred))), torch.sum(true))\n\ndef SMAPE_torch(pred, true, mask_value=None):\n if mask_value != None:\n mask = torch.gt(true, mask_value)\n pred = torch.masked_select(pred, mask)\n true = torch.masked_select(true, mask)\n return torch.mean(torch.abs(true-pred)/(torch.abs(true)+torch.abs(pred)))\n\n\ndef MAE_np(pred, true, mask_value=None):\n if mask_value != None:\n mask = np.where(true > (mask_value), True, False)\n true = true[mask]\n pred = pred[mask]\n MAE = np.mean(np.absolute(pred-true))\n return MAE\n\ndef RMSE_np(pred, true, mask_value=None):\n if mask_value != None:\n mask = np.where(true > (mask_value), True, False)\n true = true[mask]\n pred = pred[mask]\n RMSE = np.sqrt(np.mean(np.square(pred-true)))\n return RMSE\n\n#Root Relative Squared Error\ndef RRSE_np(pred, true, mask_value=None):\n if mask_value != None:\n mask = np.where(true > (mask_value), True, False)\n true = true[mask]\n pred = pred[mask]\n mean = true.mean()\n return np.divide(np.sqrt(np.sum((pred-true) ** 2)), np.sqrt(np.sum((true-mean) ** 2)))\n\ndef MAPE_np(pred, true, mask_value=None):\n if mask_value != None:\n mask = np.where(true > (mask_value), True, False)\n true = true[mask]\n pred = pred[mask]\n return np.mean(np.absolute(np.divide((true - pred), true)))\n\ndef PNBI_np(pred, true, mask_value=None):\n #if PNBI=0, all pred are smaller than true\n #if PNBI=1, all pred are bigger than true\n if mask_value != None:\n mask = np.where(true > (mask_value), True, False)\n true = true[mask]\n pred = pred[mask]\n bias = pred-true\n indicator = np.where(bias>0, True, False)\n return indicator.mean()\n\ndef oPNBI_np(pred, true, mask_value=None):\n #if oPNBI>1, pred are bigger than true\n #if oPNBI<1, pred are smaller than true\n #however, this metric is too sentive to small values. Not good!\n if mask_value != None:\n mask = np.where(true > (mask_value), True, False)\n true = true[mask]\n pred = pred[mask]\n bias = (true + pred) / (2 * true)\n return bias.mean()\n\ndef MARE_np(pred, true, mask_value=None):\n if mask_value != None:\n mask = np.where(true> (mask_value), True, False)\n true = true[mask]\n pred = pred[mask]\n return np.divide(np.sum(np.absolute((true - pred))), np.sum(true))\n\ndef CORR_np(pred, true, mask_value=None):\n #input B, T, N, D or B, N, D or B, N\n if len(pred.shape) == 2:\n #B, N\n pred = pred.unsqueeze(dim=1).unsqueeze(dim=1)\n true = true.unsqueeze(dim=1).unsqueeze(dim=1)\n elif len(pred.shape) == 3:\n #np.transpose include permute, B, T, N\n pred = np.expand_dims(pred.transpose(0, 2, 1), axis=1)\n true = np.expand_dims(true.transpose(0, 2, 1), axis=1)\n elif len(pred.shape) == 4:\n #B, T, N, D -> B, T, D, N\n pred = pred.transpose(0, 1, 2, 3)\n true = true.transpose(0, 1, 2, 3)\n else:\n raise ValueError\n dims = (0, 1, 2)\n pred_mean = pred.mean(axis=dims)\n true_mean = true.mean(axis=dims)\n pred_std = pred.std(axis=dims)\n true_std = true.std(axis=dims)\n correlation = ((pred - pred_mean)*(true - true_mean)).mean(axis=dims) / (pred_std*true_std)\n index = (true_std != 0)\n correlation = (correlation[index]).mean()\n return correlation\n\ndef metrics(pred, true, mask1, mask2):\n #mask1 filter the very small value, mask2 filter the value lower than a defined threshold\n assert type(pred) == type(true)\n if type(pred) == np.ndarray:\n mae = MAE_np(pred, true, mask1)\n rmse = RMSE_np(pred, true, mask1)\n mape = MAPE_np(pred, true, mask2)\n rrse = RRSE_np(pred, true, mask1)\n corr = 0\n elif type(pred) == torch.Tensor:\n mae = MAE_torch(pred, true, mask1)\n rmse = RMSE_torch(pred, true, mask1)\n mape = MAPE_torch(pred, true, mask2)\n rrse = RRSE_torch(pred, true, mask1)\n corr = CORR_torch(pred, true, mask1)\n else:\n raise TypeError\n return mae, rmse, mape, rrse, corr\n\ndef SIGIR_Metrics(pred, true, mask1, mask2):\n rrse = RRSE_torch(pred, true, mask1)\n corr = CORR_torch(pred, true, 0)\n return rrse, corr\n\n\n" ]
[ [ "numpy.square", "numpy.divide", "torch.gt", "numpy.sum", "torch.abs", "numpy.where", "torch.sum", "numpy.absolute", "torch.div", "torch.mean", "torch.masked_select" ] ]
Crazy-Jack/Cl-InfoNCE
[ "b1ce87bc016a4f7ca95839b31f5bb66ebb04748e" ]
[ "clinfonce/util.py" ]
[ "from __future__ import print_function\nimport os, sys\nimport logging\nimport argparse\nimport shutil\nimport getpass\nimport subprocess\n\nimport math\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\n\nclass TwoCropTransform:\n \"\"\"Create two crops of the same image\"\"\"\n def __init__(self, transform):\n self.transform = transform\n\n def __call__(self, x):\n return [self.transform(x), self.transform(x)]\n\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n def __init__(self, name, fmt=':f'):\n self.name = name\n self.fmt = fmt\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n def __str__(self):\n fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'\n return fmtstr.format(**self.__dict__)\nclass ProgressMeter(object):\n def __init__(self, num_batches, meters, prefix=\"\"):\n self.batch_fmtstr = self._get_batch_fmtstr(num_batches)\n self.meters = meters\n self.prefix = prefix\n\n def display(self, batch):\n entries = [self.prefix + self.batch_fmtstr.format(batch)]\n entries += [str(meter) for meter in self.meters]\n print('\\t'.join(entries))\n\n def _get_batch_fmtstr(self, num_batches):\n num_digits = len(str(num_batches // 1))\n fmt = '{:' + str(num_digits) + 'd}'\n return '[' + fmt + '/' + fmt.format(num_batches) + ']'\n\n\nclass txt_logger:\n def __init__(self, save_folder, opt, argv):\n self.save_folder = save_folder\n self.logger = logging.getLogger()\n self.logger.setLevel(logging.INFO)\n\n if os.path.isfile(os.path.join(save_folder, 'logfile.log')):\n os.remove(os.path.join(save_folder, 'logfile.log'))\n\n file_log_handler = logging.FileHandler(os.path.join(save_folder, 'logfile.log'))\n self.logger.addHandler(file_log_handler)\n\n stdout_log_handler = logging.StreamHandler(sys.stdout)\n self.logger.addHandler(stdout_log_handler)\n # commend line\n self.logger.info(\"# COMMEND LINE ===========\")\n self.logger.info(argv)\n self.logger.info(\"# =====================\")\n # meta info\n self.logger.info(\"# META INFO ===========\")\n attrs = vars(opt)\n for item in attrs.items():\n self.logger.info(\"%s: %s\"%item)\n # self.logger.info(\"Saved in: {}\".format(save_folder))\n self.logger.info(\"# =====================\")\n\n def log_value(self, epoch, *info_pairs):\n log_str = \"Epoch: {}; \".format(epoch)\n for name, value in info_pairs:\n log_str += (str(name) + \": {}; \").format(value)\n self.logger.info(log_str)\n\n def save_value(self, name, list_of_values):\n np.save(os.path.join(self.save_folder, name), list_of_values)\n\n\n\ndef accuracy(output, target, topk=(1,)):\n \"\"\"Computes the accuracy over the k top predictions for the specified values of k\"\"\"\n with torch.no_grad():\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size).item())\n return res\n\n\ndef trim_accuracy(output, target, num_attr_vals):\n '''\n compute the accurcay for every attribute\n\n return:\n - res: [accuracy for each attribute]\n '''\n with torch.no_grad():\n batch_size = target.size(0)\n\n pred = (output > 0.5).float()\n correct = pred.eq(target).sum(axis=0).float()/batch_size * 100.0\n\n res = []\n for attr_val in range(num_attr_vals):\n res.append(correct[attr_val].item())\n\n return res\n\n\nclass MyReduceLROnPlateau:\n def __init__(self, opt, factor=0.1, patience=10,\n verbose=False, threshold=1e-4, change_init_lr=True,\n optimizer=None, stage_info=[(2e+5, 8), (5e+4, -1)], min_lr=1e-5):\n\n if factor >= 1.0:\n raise ValueError('Factor should be < 1.0.')\n self.factor = factor\n\n # Attach optimizer\n self.patience = patience\n self.verbose = verbose\n self.lowest_loss = 1e+32\n self.num_bad_epochs = 0\n self.threshold = threshold\n self.opt = opt\n self.change_init_lr = change_init_lr\n self.optimizer = optimizer\n self.num_data_encounter = 0\n self.stage = 0\n self.in_stage_tracker = 0\n self.stage_info = stage_info\n self.call_num_left = stage_info[self.stage][1]\n self.dynamic_call_interval = stage_info[self.stage][0]\n self.last_stage = False\n self.min_lr = min_lr\n\n\n def step(self, loss):\n # print(\"lowest loss: {}; loss now: {}; bad epoch so far: {}\".format(self.lowest_loss, loss, self.bad_epoch))\n if loss < self.lowest_loss - self.threshold:\n self.lowest_loss = loss\n self.num_bad_epochs = 0\n else:\n self.num_bad_epochs += 1\n\n if self.num_bad_epochs > self.patience:\n if self.change_init_lr:\n self.opt.learning_rate = max(self.min_lr, self.opt.learning_rate * self.factor)\n self.num_bad_epochs = 0\n if self.verbose:\n print(\"Reduce on Pleateu : init learning rate -> : {}\".format(self.opt.learning_rate))\n\n else:\n self.num_bad_epochs = 0\n for param in self.optimizer.param_groups:\n lr_ = param['lr']\n lr = max(self.min_lr, lr_ * self.factor)\n param['lr'] = lr\n if self.verbose:\n print(\"Reduce on Pleateu : learning rate {} -> : {}\".format(lr_, lr))\n\n def batch_step(self, loss, num_data):\n self.num_data_encounter += num_data\n self.change_stage()\n\n if self.num_data_encounter >= self.dynamic_call_interval:\n if self.opt.lr_scheduling == 'exp_decay':\n lr = self.optimizer.param_groups[0]['lr'] * self.opt.exp_decay_rate\n self.optimizer.param_groups[0]['lr'] = lr\n\n self.step(loss)\n self.num_data_encounter = 0\n if not self.last_stage:\n self.call_num_left -= 1\n if self.verbose:\n print(\"Calling scheduler: interval: {}; self.call_num_left: {}\".format(self.dynamic_call_interval, self.call_num_left))\n\n def change_stage(self):\n # decide where to\n if not self.last_stage:\n if self.call_num_left <= 0:\n self.stage += 1\n if self.stage >= len(self.stage_info):\n print(\"Enter the last phase\")\n self.stage = -1\n self.last_stage = True\n self.call_num_left = self.stage_info[self.stage][1]\n self.dynamic_call_interval = self.stage_info[self.stage][0]\n print(\"Change scheduler stage to ({},{})\".format(self.dynamic_call_interval, self.call_num_left))\n\n\n\n\ndef adjust_learning_rate(args, optimizer, epoch):\n lr = args.learning_rate\n if args.lr_scheduling == 'adam':\n return None\n elif args.lr_scheduling == 'cosine':\n eta_min = lr * (args.lr_decay_rate ** 3)\n\n lr = eta_min + (lr - eta_min) * (\n 1 + math.cos(math.pi * epoch / args.epochs)) / 2\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n elif args.lr_scheduling == 'exp_decay':\n if epoch == 1:\n for param_group in optimizer.param_groups:\n param_group['lr'] = max(lr, args.min_lr)\n else:\n for param_group in optimizer.param_groups:\n param_group['lr'] = max(param_group['lr'] * args.exp_decay_rate, args.min_lr)\n\n elif args.lr_scheduling == 'warmup':\n assert args.learning_rate >= args.min_lr, \"learning rate should >= min lr\"\n warmup_epochs = int(args.epochs * args.warmup_percent)\n up_slope = (args.learning_rate - args.min_lr) / warmup_epochs\n down_slope = (args.learning_rate - args.min_lr) / (args.epochs - warmup_epochs)\n if epoch <= warmup_epochs:\n lr = args.min_lr + up_slope * epoch\n else:\n # lr = args.learning_rate - slope * (epoch - warmup_epochs)\n eta_min = args.learning_rate * (args.lr_decay_rate ** 3)\n\n lr = eta_min + (args.learning_rate - eta_min) * (\n 1 + math.cos(math.pi * (epoch - warmup_epochs) / (args.epochs - warmup_epochs))) / 2\n\n for param_group in optimizer.param_groups:\n param_group['lr'] = max(lr, args.min_lr)\n\n\n else:\n steps = np.sum(epoch > np.asarray(args.lr_decay_epochs))\n if steps > 0:\n lr = lr * (args.lr_decay_rate ** steps)\n\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\ndef exclude_bias_and_norm(p):\n return p.ndim == 1\n\n\n\ndef set_default_path(opt):\n # set the path according to the environment\n\n if opt.dataset == 'ut-zap50k-sub':\n if not opt.data_folder:\n opt.data_folder = '../data_processing/ut-zap50k-data-subcategory'\n if not opt.data_root_name:\n opt.data_root_name = 'ut-zap50k-images-square'\n elif opt.dataset == 'CUB':\n if not opt.data_folder:\n opt.data_folder = '../data_processing/CUB_200_2011'\n if not opt.data_root_name:\n opt.data_root_name = 'images'\n elif opt.dataset == 'Wider':\n if not opt.data_folder:\n opt.data_folder = '../data_processing/Wider'\n if not opt.data_root_name:\n opt.data_root_name = ''\n elif opt.dataset == 'imagenet100':\n if not opt.data_folder:\n opt.data_folder = '../data_processing/imagenet100'\n if not opt.data_root_name:\n opt.data_root_name = 'imagenet_unzip'\n else:\n raise ValueError(opt.dataset)\n\n\n\ndef set_optimizer(opt, model, load_opt=True):\n\n \n optimizer = optim.SGD(model.parameters(),\n lr=opt.learning_rate,\n momentum=opt.momentum,\n weight_decay=opt.weight_decay,\n nesterov=True)\n\n \n # load optimizer\n if opt.resume_model_path and load_opt:\n ckpt = torch.load(opt.resume_model_path, map_location='cpu')\n opt_state_dict = ckpt['optimizer']\n opt_new_state_dict = {}\n for k, v in opt_state_dict.items():\n k = k.replace(\"module.\", \"\")\n opt_new_state_dict[k] = v\n opt_state_dict = opt_new_state_dict\n optimizer.load_state_dict(opt_state_dict)\n\n elif hasattr(opt, 'resume_linear'):\n # load optimizer for linear\n print(\"Load optimizer for linear classifier...\")\n ckpt = torch.load(os.path.join(\n opt.save_folder, 'classifier_ckpt.pth'), map_location='cpu')\n opt_state_dict = ckpt['optimizer']\n opt_new_state_dict = {}\n for k, v in opt_state_dict.items():\n k = k.replace(\"module.\", \"\")\n opt_new_state_dict[k] = v\n opt_state_dict = opt_new_state_dict\n optimizer.load_state_dict(opt_state_dict)\n\n\n return optimizer\n\n\ndef save_model(model, optimizer, opt, epoch, save_file):\n print('==> Saving...')\n state = {\n 'opt': opt,\n 'model': model.state_dict(),\n 'optimizer': optimizer.state_dict(),\n 'epoch': epoch,\n }\n torch.save(state, save_file)\n del state\n\n\n\ndef set_parser(parser_str, additional_argu_func=None, if_linear=False):\n '''\n construct parser and add required argument\n\n arguments:\n - parser_str: parser name\n - additional_argu_func: function to add more argument\n - if_linear: if linear evaluation\n return:\n a parser object\n '''\n\n parser = argparse.ArgumentParser(parser_str)\n\n parser.add_argument('--pipeline', action='store_true', help='decide whether performing pipeline or not, if true, suppressing all std in main')\n parser.add_argument('--save_freq', type=int, default=20, help='save frequency')\n parser.add_argument('--batch_size', type=int, default=256, help='batch_size')\n parser.add_argument('--save_path', type=str, default='/projects/rsalakhugroup/tianqinl/train_related', help='where to save file')\n parser.add_argument('--num_workers', type=int, default=8, help='num of workers to use')\n parser.add_argument('--epochs', type=int, default=500, help='number of training epochs')\n parser.add_argument('--method', type=str, default=\"SupCon\")\n parser.add_argument('--instruction', type=str, required=True)\n parser.add_argument('--model', type=str, default='resnet50')\n parser.add_argument('--dataset', type=str, required=True, help='what dataset to use')\n parser.add_argument('--data_folder', type=str, default='', help='dataset')\n parser.add_argument('--data_root_name', type=str, default='', help=\"dataset img folder name, only needed when dataset is organized by folders of img\")\n parser.add_argument('--meta_file_train', type=str, default='meta_data_train.csv',\n help='meta data for ssl training')\n parser.add_argument('--gran_lvl', type=str, required=True, help='what granularity it is using')\n parser.add_argument('--linear_gran_lvl', type=str, default='', help=\"what granularity the linear will be using\")\n parser.add_argument('--img_size', type=int, default=32, required=True, choices=[32, 64, 112, 224], help=\"image size to train/val\")\n parser.add_argument('--customized_name', type=str, default='', help='adding customized name for saving folder')\n\n parser.add_argument('--overwrite', action='store_true',\n help='if true, it will allow the program to overwrite any results')\n # seed\n parser.add_argument('--seed', type=int, default=0, help=\"seed for selecting part of data per class as training\")\n\n\n # optimization\n optimization_argument(parser)\n\n # resume training\n parser.add_argument('--resume_model_path', type=str, default='',\n help='model_path to resume')\n \n if additional_argu_func:\n additional_argu_func(parser)\n\n opt = parser.parse_args()\n \n set_default_path(opt)\n\n \n if opt.method == 'SimCLR':\n opt.gran_lvl = '-1'\n\n \n\n # get user name\n opt.user_name = getpass.getuser()\n\n opt.data_root_folder = os.path.join(opt.data_folder, opt.data_root_name)\n\n return opt\n\ndef parser_processing(opt, default_save_folder=True):\n '''\n process parser and create datafolder.\n\n opt must have model_path, model_name attributes.\n\n input:\n - opt: argparser object\n '''\n\n\n\n if opt.lr_scheduling == 'cosine':\n opt.change_init_lr = True\n else:\n opt.change_init_lr = False\n\n if default_save_folder:\n set_save_folder(opt)\n if not os.path.isdir(opt.save_folder):\n os.makedirs(opt.save_folder, exist_ok=True)\n\n opt.tb_folder = os.path.join(opt.save_folder, 'tensorboard')\n\n if os.path.isdir(opt.tb_folder):\n if not hasattr(opt, 'overwrite'):\n if opt.overwrite == False:\n delete = input(\"Are you sure to delete folder {}? (Y/n)\".format(opt.tb_folder))\n else:\n delete = 'y'\n if delete.lower() == 'y':\n rm_command = \"rm -rf \" + str(opt.tb_folder)\n os.system(rm_command)\n # shutil.rmtree(opt.tb_folder)\n else:\n sys.exit(\"{} FOLDER is untouched.\".format(opt.tb_folder))\n\n os.makedirs(opt.tb_folder, exist_ok=True)\n\n\n\n return\n\ndef optimization_argument(parser):\n # optimization\n parser.add_argument('--learning_rate', type=float, default=0.2,\n help='learning rate')\n parser.add_argument('--lr_decay_epochs', type=str, default='350,400,450',\n help='where to decay lr, can be a list')\n parser.add_argument('--lr_decay_rate', type=float, default=0.1,\n help='decay rate for learning rate')\n parser.add_argument('--exp_decay_rate', type=float, default=0.95,\n help='decay rate for learning rate')\n parser.add_argument('--weight_decay', type=float, default=1e-4,\n help='weight decay')\n parser.add_argument('--momentum', type=float, default=0.95,\n help='momentum')\n parser.add_argument('--min_lr', type=float, default=1e-5,\n help='SGD min learning rate')\n parser.add_argument('--lr_scheduling', type=str, required=True, choices=['cosine', 'exp_decay', 'adam', 'warmup'], help='what learning rate scheduling to use')\n parser.add_argument('--syncBN', action='store_true',\n help='using synchronized batch normalization')\n parser.add_argument('--trial', type=str, default='0',\n help='id for recording multiple runs')\n parser.add_argument('--warmup_percent', type=float, default=0.33,\n help='percent of epochs that used for warmup')\n\n\n\ndef set_save_folder(opt):\n opt.model_path = os.path.join(opt.save_path, 'Cl-InfoNCE/{}/{}/train_file_{}/{}_models_granlvl_{}_img_size_{}_{}'.format(opt.dataset, opt.instruction,\n opt.meta_file_train.replace(\"meta_data_train_\", \"\").replace(\".csv\", \"\"), opt.method,\\\n opt.gran_lvl, opt.img_size, opt.customized_name))\n if not hasattr(opt, 'temp'):\n opt.temp = 'NA'\n opt.model_name = '{}_lr_{}_decay_{}_bsz_{}_temp_{}_scheduling_{}_epochs_{}_trial_{}'.\\\n format(opt.model, opt.learning_rate,\n opt.weight_decay, opt.batch_size, opt.temp, opt.lr_scheduling, opt.epochs, opt.trial)\n\n if opt.resume_model_path:\n opt.pre_ssl_epoch = int(opt.resume_model_path.split('/')[-1].split('.')[0].split('_')[-1])\n opt.model_name += '_resume_from_epoch_{}'.format(opt.pre_ssl_epoch)\n\n opt.save_folder = os.path.join(opt.model_path, opt.model_name)\n\n\n\n\ndef update_and_save_acc(train_accs_multiple, train_acc, epoch, tf_logger, scalar_logger, key='top1', mode='train'):\n \"\"\"Update acc result and save it to file\"\"\"\n train_accs_multiple[key].append(train_acc[key])\n tf_logger.log_value('Train_Accuaracy ({})'.format(key), train_acc[key], epoch)\n scalar_logger.save_value('{}_acc_{}.npy'.format(mode, key), train_accs_multiple[key])\n\n\ndef suppress_std(func):\n \"\"\"a decorator that used for suppressing std out when using pipeline\"\"\"\n def wrapper(*args, **kwargs):\n stderr_tmp = sys.stderr\n stdout_tmp = sys.stdout\n null = open(os.devnull, 'w')\n sys.stdout = null\n sys.stderr = null\n try:\n result = func(*args, **kwargs)\n sys.stderr = stderr_tmp\n sys.stdout = stdout_tmp\n return result\n except:\n sys.stderr = stderr_tmp\n sys.stdout = stdout_tmp\n raise\n return wrapper\n\n\ndef init_weights(m):\n if type(m) == nn.Linear:\n torch.nn.init.xavier_uniform_(m.weight)\n m.bias.data.fill_(0.00)\n\n\n" ]
[ [ "numpy.asarray", "torch.save", "torch.no_grad", "torch.nn.init.xavier_uniform_", "torch.load" ] ]
alexholcombe/twoWords
[ "9e86b2dc13cb6dd9a79820957525a3e9e1d253b3", "9e86b2dc13cb6dd9a79820957525a3e9e1d253b3" ]
[ "specialFieldsStudentCode/Charlie/noiseStaircaseHelpers.py", "twoWordsCheryl.py" ]
[ "import numpy as np\nfrom psychopy import visual, data, logging\nimport itertools\nfrom math import log\nfrom copy import deepcopy\nfrom pandas import DataFrame\nimport pylab, os\nfrom matplotlib.ticker import ScalarFormatter\n\ndef toStaircase(x,descendingPsycho):\n #Don't need to take log, staircase internals will do that\n if descendingPsycho:\n y = 100 - np.array(x) #100 because assuming maximum value is 100. E.g. percentNoise is 0 to 100\n else:\n y = np.array(x)\n return y\n\ndef outOfStaircase(y,staircase,descendingPsycho):\n #To get inside staircase, it was (100-x)\n #and inside log was taken. So y = log(100-x)\n #So to get x out, it's\n #10**y = 100 - x\n #-x = 10**y - 100\n # x = 100 - 10**y\n if staircase.stepType == 'log': #HOW DO I KNOW IT IS BASE 10? and why doesnt psychopy protect me from log values. I guess actual intensities not meant for user\n x = 10**np.array(y)\n else:\n x = y\n if descendingPsycho:\n x = 100-x\n\n return x\n \ndef printStaircase(s, descendingPsycho=False, briefTrialUpdate=False, printInternalVal = False, alsoLog=False):\n #if briefTrialUpdate, don't print everything, just the kind of stuff you like to know after each trial\n #needs logging as a global variable, otherwise will fail when alsoLog=True\n #add is what to add to intensities,\n #mult is what to multiply intensities by, e.g .if descending psychometric function had to fool QUEST by -1*i + 2\n msg = 'staircase.data (incorrect/correct)=' + str(s.data)\n print(msg)\n if alsoLog: logging.info(msg)\n\n if printInternalVal:\n msg = '\\tstaircase.intensities, *internal* values [' #(these are log intensities)=['\n for i in range( len(s.intensities) ):\n msg += '{:.2f}, '.format( s.intensities[i] ) #I cant figure out a simpler way to prevent scientific notation\n msg+= ']'\n print(msg)\n if alsoLog: logging.info(msg)\n msg = '\\tstaircase.intensities, values [' \n for j in range( len(s.intensities) ):\n msg += '{:.2f}, '.format( outOfStaircase(s.intensities[j], s, descendingPsycho) )\n msg+= ']'\n print(msg)\n if alsoLog: logging.info(msg)\n\n if type(staircase) is data.StairHandler:\n numReversals = len(s.reversalIntensities)\n msg= 'staircase number of reversals=' + str(numReversals) + '] '\n msg+= 'reversal noiseProportions=' + str( 1- np.array( outofStaircase(s.reversalIntensities,s,descendingPsycho)) )\n print(msg)\n if alsoLog: logging.info(msg)\n if numReversals>0:\n numReversalsToAvg = numReversals-1\n msg= ('mean of final' + str(numReversalsToAvg) + \n ' reversals =' + str( 1-np.average( outofStaircase(s.reversalIntensities[-numReversalsToAvg:],s,descendingPsycho), ) ) )\n print(msg)\n if alsoLog: logging.info(msg)\n elif type(s) is data.QuestHandler:\n #some of below are private initialization variables I'm not really supposed to access\n if not briefTrialUpdate:\n msg= ('\\tpThreshold (proportion correct for which trying to zero in on the corresponding parameter value) =' +\n str(s._quest.pThreshold) + '\\n')\n msg+= ('\\tstopInterval (min 5-95% confidence interval required for thresh before stopping. If both this and nTrials is specified, whichever happens first)='+\n str(s.stopInterval) + '\\n')\n msg+= '\\tstepType=' + str(s.stepType) + '\\n'\n msg+= '\\tminVal=' + str(s.minVal) + ' maxVal=' + str(s.maxVal) + '\\n'\n msg+= '\\tnTrials=' + str(s.nTrials)\n print(msg)\n if alsoLog: logging.info(msg)\n\n #below applies to both types of staircase\n if s.thisTrialN == -1:\n msg= 'thisTrialN = -1, suggesting you have not started it yet; need to call staircase.next()'\n print(msg)\n if alsoLog: logging.info(msg)\n else:\n msg= 'staircase thisTrialN =' + str(s.thisTrialN)\n print(msg)\n if alsoLog: logging.info(msg)\n # staircase.calculateNextIntensity() sounds like something useful to get a preview of the next trial. Instead, seems to be \n #the internal function used to advance to the next trial.\n \ndef createNoise(proportnNoise,win,fieldWidthPix,noiseColor): \n #noiseColor, assumes that colorSpace='rgb', triple between -1 and 1\n numDots = int(proportnNoise*fieldWidthPix*fieldWidthPix)\n if numDots ==0:\n return None\n #create a matrix of all possible pixel locations, shuffle it, pick off the first numDots ones\n #0,0 is center of field\n possibleXcoords = -fieldWidthPix/2 + np.arange(fieldWidthPix) \n possibleXcoords += fieldWidthPix/30 #adding one-tenth because for some mysterious reason not centered, I guess letters aren't drawn centered\n possibleYcoords = deepcopy(possibleXcoords)\n def expandgrid(*itrs):\n product = list(itertools.product(*itrs))\n return product\n allFieldCoords = expandgrid(possibleXcoords,possibleYcoords)\n #shuffle it\n np.random.shuffle(allFieldCoords)\n dotCoords = allFieldCoords[0:numDots]\n\n #create opacity for each dot\n opacs = np.ones(numDots)#all opaque\n verticalAdjust = 3 #number of pixels to raise rectangle by. Using only uppercase letters and seem to be drawn above the line\n noise = visual.ElementArrayStim(win,units='pix', elementTex=None, elementMask=None,\n nElements=numDots, fieldSize=[fieldWidthPix,fieldWidthPix],\n fieldPos=(0.0, verticalAdjust),\n colorSpace='rgb',\n colors=noiseColor, #set to black\n xys= dotCoords, \n opacities=opacs,\n sizes=1)\n return (noise,allFieldCoords,numDots) #Can just use noise, but if want to generate new noise of same coherence level quickly, can just shuffle coords\n\ndef plotDataAndPsychometricCurve(staircase,fit,descendingPsycho,threshVal):\n #Expects staircase, which has intensities and responses in it\n #May or may not be log steps staircase internals\n #Plotting with linear axes\n #Fit is a psychopy data fit object. Assuming that it couldn't handle descendingPsycho so have to invert the values from it\n intensLinear= outOfStaircase(staircase.intensities, staircase, descendingPsycho)\n if fit is not None:\n #generate psychometric curve\n intensitiesForCurve = pylab.arange(min(intensLinear), max(intensLinear), 0.01)\n thresh = fit.inverse(threshVal)\n if descendingPsycho:\n intensitiesForFit = 100-intensitiesForCurve\n thresh = 100 - thresh\n ysForCurve = fit.eval(intensitiesForFit)\n #print('intensitiesForCurve=',intensitiesForCurve)\n #print('ysForCurve=',ysForCurve) #debug\n else: #post-staircase function fitting failed, but can fall back on what staircase returned\n thresh = staircase.quantile()\n if descendingPsycho:\n thresh = 100-thresh\n #plot staircase in left hand panel\n pylab.subplot(121)\n pylab.plot(intensLinear)\n pylab.xlabel(\"staircase trial\")\n pylab.ylabel(\"% noise\")\n #plot psychometric function on the right.\n ax1 = pylab.subplot(122)\n if fit is not None:\n pylab.plot(intensitiesForCurve, ysForCurve, 'k-') #fitted curve\n pylab.plot([thresh, thresh],[0,threshVal],'k--') #vertical dashed line\n pylab.plot([0, thresh],[threshVal,threshVal],'k--') #horizontal dashed line\n figure_title = 'threshold (%.2f) = %0.2f' %(threshVal, thresh) + '%'\n #print thresh proportion top of plot\n pylab.text(0, 1.11, figure_title, horizontalalignment='center', fontsize=12)\n if fit is None:\n pylab.title('Fit failed')\n \n #Use pandas to calculate proportion correct at each level\n df= DataFrame({'intensity': intensLinear, 'response': staircase.data})\n #print('df='); print(df) #debug\n grouped = df.groupby('intensity')\n groupMeans= grouped.mean() #a groupBy object, kind of like a DataFrame but without column names, only an index?\n intensitiesTested = list(groupMeans.index)\n pCorrect = list(groupMeans['response']) #x.iloc[:]\n ns = grouped.sum() #want n per trial to scale data point size\n ns = list(ns['response'])\n print('df mean at each intensity\\n'); print( DataFrame({'intensity': intensitiesTested, 'pCorr': pCorrect, 'n': ns }) )\n #data point sizes. One entry in array for each datapoint\n\n pointSizes = 5+ 40 * np.array(ns) / max(ns) #the more trials, the bigger the datapoint size for maximum of 6\n #print('pointSizes = ',pointSizes)\n points = pylab.scatter(intensitiesTested, pCorrect, s=pointSizes, \n edgecolors=(0,0,0), facecolors= 'none', linewidths=1,\n zorder=10, #make sure the points plot on top of the line\n )\n pylab.ylim([-0.01,1.01])\n pylab.xlim([-2,102])\n pylab.xlabel(\"%noise\")\n pylab.ylabel(\"proportion correct\")\n #save a vector-graphics format for future\n #outputFile = os.path.join(dataFolder, 'last.pdf')\n #pylab.savefig(outputFile)\n createSecondAxis = False\n if createSecondAxis: #presently not used, if fit to log would need this to also show linear scale\n #create second x-axis to show linear percentNoise instead of log\n ax2 = ax1.twiny()\n ax2.set(xlabel='%noise', xlim=[2, 102]) #not quite right but if go to 0, end up with -infinity? and have error\n #ax2.axis.set_major_formatter(ScalarFormatter()) #Show linear labels, not scientific notation\n #ax2 seems to be the wrong object. Why am I using pylab anyway? Matplotlib documentation seems more clear\n #for programming it is recommended that the namespaces be kept separate, http://matplotlib.org/api/pyplot_api.html\n #http://stackoverflow.com/questions/21920233/matplotlib-log-scale-tick-label-number-formatting\n ax2.set_xscale('log')\n ax2.tick_params(axis='x',which='minor',bottom='off')\n \n# #save figure to file\n# outputFile = os.path.join(dataDir, 'test.pdf')\n# pylab.savefig(outputFile)\n\n\nif __name__ == \"__main__\":\n #Test staircase functions\n threshCriterion = 0.25\n staircaseTrials = 5\n staircase = data.QuestHandler(startVal = 95, \n startValSd = 80,\n stopInterval= 1, #sd of posterior has to be this small or smaller for staircase to stop, unless nTrials reached\n nTrials = staircaseTrials,\n #extraInfo = thisInfo,\n pThreshold = threshCriterion, #0.25, \n gamma = 1./26,\n delta=0.02, #lapse rate, I suppose for Weibull function fit\n method = 'quantile', #uses the median of the posterior as the final answer\n stepType = 'log', #will home in on the 80% threshold. But stepType = 'log' doesn't usually work\n minVal=1, maxVal = 100\n )\n print('created QUEST staircase')\n \n descendingPsycho = True\n prefaceStaircaseNoise = np.array([5,95]) #will be recycled / not all used, as needed\n corrEachTrial = list([1,0])\n print('Importing responses ',np.array(corrEachTrial),' and intensities ',prefaceStaircaseNoise)\n #Act of importing will cause staircase to log transform\n #staircase internal will be i = log(100-x)\n #-(10**i)-100\n staircase.importData( toStaircase(prefaceStaircaseNoise,descendingPsycho), np.array(corrEachTrial) )\n printStaircase(staircase, briefTrialUpdate=False, printInternalVal=True, alsoLog=False)", "#Alex Holcombe [email protected]\r\n#See the github repository for more information: https://github.com/alexholcombe/twoWords\r\nfrom __future__ import print_function #use python3 style print\r\nfrom psychopy import monitors, visual, event, data, logging, core, sound, gui\r\nimport psychopy.info\r\nimport numpy as np\r\nfrom math import atan, log, ceil\r\nfrom copy import deepcopy\r\nimport copy\r\nimport time, sys, os, pylab, random, string \r\ntry:\r\n from noiseStaircaseHelpers import printStaircase, toStaircase, outOfStaircase, createNoise, plotDataAndPsychometricCurve\r\nexcept ImportError:\r\n print('Could not import from noiseStaircaseHelpers.py (you need that file to be in the same directory)')\r\ntry:\r\n import stringResponse\r\nexcept ImportError:\r\n print('Could not import stringResponse.py (you need that file to be in the same directory)')\r\n \r\ntry:\r\n import letterLineupResponse\r\nexcept ImportError:\r\n print('Could not import letterLineupResponse.py (you need that file to be in the same directory)')\r\n\r\ntasks=['T1']; task = tasks[0]\r\n#THINGS THAT COULD PREVENT SUCCESS ON A NEW MACHINE\r\n#same screen or external screen? Set scrn=0 if one screen. scrn=1 means display stimulus on second screen.\r\n#widthPix, heightPix\r\n\r\nquitFinder = False #if checkRefreshEtc, quitFinder becomes True.\r\nautopilot=False\r\ndemo=False #False\r\nexportImages= False #quits after one trial\r\nsubject='Hubert' #user is prompted to enter true subject name\r\nif autopilot: subject='auto'\r\nif os.path.isdir('.'+os.sep+'data'):\r\n dataDir='data'\r\nelse:\r\n print('\"data\" directory does not exist, so saving data in present working directory')\r\n dataDir='.'\r\ntimeAndDateStr = time.strftime(\"%d%b%Y_%H-%M\", time.localtime())\r\n\r\nshowRefreshMisses=True #flicker fixation at refresh rate, to visualize if frames missed\r\nfeedback=False\r\nautoLogging=False\r\nrefreshRate = 60; #100\r\nif demo:\r\n refreshRate = 60; #100\r\n\r\nstaircaseTrials = 25\r\nprefaceStaircaseTrialsN = 20 #22\r\nprefaceStaircaseNoise = np.array([5,20,20,20, 50,50,50,5,80,80,80,5,95,95,95]) #will be recycled / not all used, as needed\r\ndescendingPsycho = True #psychometric function- more noise means worse performance\r\nthreshCriterion = 0.58\r\n\r\nnumWordsInStream = 26 #Experiment will only work if all 26 letters are presented, otherwise error when you pick a letter that was not presented\r\nwordsUnparsed=\"a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,w,x,y,z\" \r\nwordList = wordsUnparsed.split(\",\") #split into list\r\nfor i in range(len(wordList)):\r\n wordList[i] = wordList[i].replace(\" \", \"\") #delete spaces\r\nif len(wordList) > numWordsInStream:\r\n print(\"WARNING: you have asked for streams that have more stimuli than are in the wordList, so some will be duplicated\")\r\n#Later on, a list of indices into this list will be randomly permuted for each trial\r\nprint(wordList)\r\nprint(len(wordList))\r\n\r\nbgColor = [-.7,-.7,-.7] # [-1,-1,-1]\r\ncueColor = [1.,1.,1.]\r\nletterColor = [1.,1.,1.]\r\ncueRadius = 3 #6 deg in Goodbourn & Holcombe\r\nwidthPix= 1024 #1280 #monitor width in pixels of Agosta\r\nheightPix= 768 #800 #monitor height in pixels\r\nmonitorwidth = 30.4 #52.2 #38.7 #monitor width in cm\r\nscrn=1 #0 to use main screen, 1 to use external screen connected to computer\r\nfullscr=True #True to use fullscreen, False to not. Timing probably won't be quite right if fullscreen = False\r\nallowGUI = False\r\nif demo: monitorwidth = 23#18.0\r\nif exportImages:\r\n widthPix = 600; heightPix = 600\r\n monitorwidth = 13.0\r\n fullscr=False; scrn=0\r\n framesSaved=0\r\nif demo: \r\n scrn=0; fullscr=False\r\n widthPix = 800; heightPix = 600\r\n monitorname='testMonitor'\r\n allowGUI = True\r\nviewdist = 57 #cm\r\npixelperdegree = widthPix/ (atan(monitorwidth/viewdist) /np.pi*180)\r\nprint('pixelperdegree=',pixelperdegree)\r\n \r\n \r\n \r\ntry:\r\n click=sound.Sound('406__tictacshutup__click-1-d.wav')\r\nexcept: #in case file missing, create inferiro click manually\r\n logging.warn('Could not load the desired click sound file, instead using manually created inferior click')\r\n click=sound.Sound('D',octave=4, sampleRate=22050, secs=0.015, bits=8)\r\n\r\n\r\nclickSound, badKeySound = stringResponse.setupSoundsForResponse()\r\n\r\n# create a dialog from dictionary \r\ninfoFirst = { 'Do staircase (only)': False, 'Check refresh etc':True, 'Fullscreen (timing errors if not)': False, 'Screen refresh rate':refreshRate }\r\nOK = gui.DlgFromDict(dictionary=infoFirst, \r\n title='Dual-RSVP experiment OR staircase to find thresh noise level for performance criterion', \r\n order=['Do staircase (only)', 'Check refresh etc', 'Fullscreen (timing errors if not)'], \r\n tip={'Check refresh etc': 'To confirm refresh rate and that can keep up, at least when drawing a grating'},\r\n #fixed=['Check refresh etc'])#this attribute can't be changed by the user\r\n )\r\nif not OK.OK:\r\n print('User cancelled from dialog box'); core.quit()\r\ndoStaircase = infoFirst['Do staircase (only)']\r\ncheckRefreshEtc = infoFirst['Check refresh etc']\r\nfullscr = infoFirst['Fullscreen (timing errors if not)']\r\nrefreshRate = infoFirst['Screen refresh rate']\r\nif checkRefreshEtc:\r\n quitFinder = True \r\nif quitFinder:\r\n import os\r\n applescript=\"\\'tell application \\\"Finder\\\" to quit\\'\"\r\n shellCmd = 'osascript -e '+applescript\r\n os.system(shellCmd)\r\n\r\n#letter size 2.5 deg\r\nSOAms = 100 # 133 #Battelli, Agosta, Goodbourn, Holcombe mostly using 133\r\n#Minimum SOAms should be 84 because any shorter, I can't always notice the second ring when lag1. 71 in Martini E2 and E1b (actually he used 66.6 but that's because he had a crazy refresh rate of 90 Hz)\r\nletterDurMs = 80 #23.6 in Martini E2 and E1b (actually he used 22.2 but that's because he had a crazy refresh rate of 90 Hz)\r\n\r\nISIms = SOAms - letterDurMs\r\nletterDurFrames = int( np.floor(letterDurMs / (1000./refreshRate)) )\r\ncueDurFrames = letterDurFrames\r\nISIframes = int( np.floor(ISIms / (1000./refreshRate)) )\r\n#have set ISIframes and letterDurFrames to integer that corresponds as close as possible to originally intended ms\r\nrateInfo = 'total SOA=' + str(round( (ISIframes + letterDurFrames)*1000./refreshRate, 2)) + ' or ' + str(ISIframes + letterDurFrames) + ' frames, comprising\\n'\r\nrateInfo+= 'ISIframes ='+str(ISIframes)+' or '+str(ISIframes*(1000./refreshRate))+' ms and letterDurFrames ='+str(letterDurFrames)+' or '+str(round( letterDurFrames*(1000./refreshRate), 2))+'ms'\r\nlogging.info(rateInfo); print(rateInfo)\r\n\r\ntrialDurFrames = int( numWordsInStream*(ISIframes+letterDurFrames) ) #trial duration in frames\r\n\r\nmonitorname = 'testmonitor'\r\nwaitBlank = False\r\nmon = monitors.Monitor(monitorname,width=monitorwidth, distance=viewdist)#relying on monitorwidth cm (39 for Mitsubishi to do deg calculations) and gamma info in calibratn\r\nmon.setSizePix( (widthPix,heightPix) )\r\nunits='deg' #'cm'\r\ndef openMyStimWindow(): #make it a function because have to do it several times, want to be sure is identical each time\r\n myWin = visual.Window(monitor=mon,size=(widthPix,heightPix),allowGUI=allowGUI,units=units,color=bgColor,colorSpace='rgb',fullscr=fullscr,screen=scrn,waitBlanking=waitBlank) #Holcombe lab monitor\r\n return myWin\r\nmyWin = openMyStimWindow()\r\nrefreshMsg2 = ''\r\nif not checkRefreshEtc:\r\n refreshMsg1 = 'REFRESH RATE WAS NOT CHECKED'\r\n refreshRateWrong = False\r\nelse: #checkRefreshEtc\r\n runInfo = psychopy.info.RunTimeInfo(\r\n # if you specify author and version here, it overrides the automatic detection of __author__ and __version__ in your script\r\n #author='<your name goes here, plus whatever you like, e.g., your lab or contact info>',\r\n #version=\"<your experiment version info>\",\r\n win=myWin, ## a psychopy.visual.Window() instance; None = default temp window used; False = no win, no win.flips()\r\n refreshTest='grating', ## None, True, or 'grating' (eye-candy to avoid a blank screen)\r\n verbose=True, ## True means report on everything \r\n userProcsDetailed=True ## if verbose and userProcsDetailed, return (command, process-ID) of the user's processes\r\n )\r\n #print(runInfo)\r\n logging.info(runInfo)\r\n print('Finished runInfo- which assesses the refresh and processes of this computer') \r\n #check screen refresh is what assuming it is ##############################################\r\n Hzs=list()\r\n myWin.flip(); myWin.flip();myWin.flip();myWin.flip();\r\n myWin.setRecordFrameIntervals(True) #otherwise myWin.fps won't work\r\n print('About to measure frame flips') \r\n for i in range(50):\r\n myWin.flip()\r\n Hzs.append( myWin.fps() ) #varies wildly on successive runs!\r\n myWin.setRecordFrameIntervals(False)\r\n # end testing of screen refresh########################################################\r\n Hzs = np.array( Hzs ); Hz= np.median(Hzs)\r\n msPerFrame= 1000./Hz\r\n refreshMsg1= 'Frames per second ~='+ str( np.round(Hz,1) )\r\n refreshRateTolerancePct = 3\r\n pctOff = abs( (np.median(Hzs)-refreshRate) / refreshRate)\r\n refreshRateWrong = pctOff > (refreshRateTolerancePct/100.)\r\n if refreshRateWrong:\r\n refreshMsg1 += ' BUT'\r\n refreshMsg1 += ' program assumes ' + str(refreshRate)\r\n refreshMsg2 = 'which is off by more than' + str(round(refreshRateTolerancePct,0)) + '%!!'\r\n else:\r\n refreshMsg1 += ', which is close enough to desired val of ' + str( round(refreshRate,1) )\r\n myWinRes = myWin.size\r\n myWin.allowGUI =True\r\nmyWin.close() #have to close window to show dialog box\r\n\r\ndefaultNoiseLevel = 0.0 #to use if no staircase, can be set by user\r\ntrialsPerCondition = 10 #default value\r\ndlgLabelsOrdered = list()\r\nif doStaircase:\r\n myDlg = gui.Dlg(title=\"Staircase to find appropriate noisePercent\", pos=(200,400))\r\nelse: \r\n myDlg = gui.Dlg(title=\"RSVP experiment\", pos=(200,400))\r\nif not autopilot:\r\n myDlg.addField('Subject name (default=\"Hubert\"):', 'Hubert', tip='or subject code')\r\n dlgLabelsOrdered.append('subject')\r\nif doStaircase:\r\n easyTrialsCondText = 'Num preassigned noise trials to preface staircase with (default=' + str(prefaceStaircaseTrialsN) + '):'\r\n myDlg.addField(easyTrialsCondText, tip=str(prefaceStaircaseTrialsN))\r\n dlgLabelsOrdered.append('easyTrials')\r\n myDlg.addField('Staircase trials (default=' + str(staircaseTrials) + '):', tip=\"Staircase will run until this number is reached or it thinks it has precise estimate of threshold\")\r\n dlgLabelsOrdered.append('staircaseTrials')\r\n pctCompletedBreak = 101\r\nelse:\r\n myDlg.addField('\\tPercent noise dots=', defaultNoiseLevel, tip=str(defaultNoiseLevel))\r\n dlgLabelsOrdered.append('defaultNoiseLevel')\r\n myDlg.addField('Trials per condition (default=' + str(trialsPerCondition) + '):', trialsPerCondition, tip=str(trialsPerCondition))\r\n dlgLabelsOrdered.append('trialsPerCondition')\r\n pctCompletedBreak = 20\r\n \r\nmyDlg.addText(refreshMsg1, color='Black')\r\nif refreshRateWrong:\r\n myDlg.addText(refreshMsg2, color='Red')\r\nif refreshRateWrong:\r\n logging.error(refreshMsg1+refreshMsg2)\r\nelse: logging.info(refreshMsg1+refreshMsg2)\r\n\r\nif checkRefreshEtc and (not demo) and (myWinRes != [widthPix,heightPix]).any():\r\n msgWrongResolution = 'Screen apparently NOT the desired resolution of '+ str(widthPix)+'x'+str(heightPix)+ ' pixels!!'\r\n myDlg.addText(msgWrongResolution, color='Red')\r\n logging.error(msgWrongResolution)\r\n print(msgWrongResolution)\r\n\r\ndimGreyForDlgBox = 'DimGrey'\r\nfrom distutils.version import LooseVersion\r\nif LooseVersion(psychopy.__version__) < LooseVersion(\"1.84.2\"):\r\n dimGreyForDlgBox = [-1.,1.,-1.] #color names stopped working along the way, for unknown reason\r\nmyDlg.addText('Note: to abort press ESC at a trials response screen', color=dimGreyForDlgBox) # color='DimGrey') color names stopped working along the way, for unknown reason\r\nmyDlg.show()\r\n\r\nif myDlg.OK: #unpack information entered in dialogue box\r\n thisInfo = myDlg.data #this will be a list of data returned from each field added in order\r\n if not autopilot:\r\n name=thisInfo[dlgLabelsOrdered.index('subject')]\r\n if len(name) > 0: #if entered something\r\n subject = name #change subject default name to what user entered\r\n if doStaircase:\r\n if len(thisInfo[dlgLabelsOrdered.index('staircaseTrials')]) >0:\r\n staircaseTrials = int( thisInfo[ dlgLabelsOrdered.index('staircaseTrials') ] ) #convert string to integer\r\n print('staircaseTrials entered by user=',staircaseTrials)\r\n logging.info('staircaseTrials entered by user=',staircaseTrials)\r\n if len(thisInfo[dlgLabelsOrdered.index('easyTrials')]) >0:\r\n prefaceStaircaseTrialsN = int( thisInfo[ dlgLabelsOrdered.index('easyTrials') ] ) #convert string to integer\r\n print('prefaceStaircaseTrialsN entered by user=',thisInfo[dlgLabelsOrdered.index('easyTrials')])\r\n logging.info('prefaceStaircaseTrialsN entered by user=',prefaceStaircaseTrialsN)\r\n else: #not doing staircase\r\n trialsPerCondition = int( thisInfo[ dlgLabelsOrdered.index('trialsPerCondition') ] ) #convert string to integer\r\n print('trialsPerCondition=',trialsPerCondition)\r\n logging.info('trialsPerCondition =',trialsPerCondition)\r\n defaultNoiseLevel = int (thisInfo[ dlgLabelsOrdered.index('defaultNoiseLevel') ])\r\nelse: \r\n print('User cancelled from dialog box.')\r\n logging.flush()\r\n core.quit()\r\nif not demo: \r\n allowGUI = False\r\n\r\nmyWin = openMyStimWindow() #reopen stim window. Had to close test window to allow for dialogue boxes\r\n#set up output data file, log file, copy of program code, and logging\r\ninfix = '' #part of the filenames\r\nif doStaircase:\r\n infix = 'staircase_'\r\nfileName = os.path.join(dataDir, subject + '_' + infix+ timeAndDateStr)\r\nif not demo and not exportImages:\r\n dataFile = open(fileName+'.txt', 'w')\r\n saveCodeCmd = 'cp \\'' + sys.argv[0] + '\\' '+ fileName + '.py'\r\n os.system(saveCodeCmd) #save a copy of the code as it was when that subject was run\r\n logFname = fileName+'.log'\r\n ppLogF = logging.LogFile(logFname, \r\n filemode='w',#if you set this to 'a' it will append instead of overwriting\r\n level=logging.INFO)#errors, data and warnings will be sent to this logfile\r\nif demo or exportImages: \r\n dataFile = sys.stdout; logF = sys.stdout\r\n logging.console.setLevel(logging.ERROR) #only show this level messages and higher\r\nlogging.console.setLevel(logging.ERROR) #DEBUG means set console to receive nearly all messges, INFO next level, EXP, DATA, WARNING and ERROR \r\n\r\nif fullscr and not demo and not exportImages:\r\n runInfo = psychopy.info.RunTimeInfo(\r\n # if you specify author and version here, it overrides the automatic detection of __author__ and __version__ in your script\r\n #author='<your name goes here, plus whatever you like, e.g., your lab or contact info>',\r\n #version=\"<your experiment version info>\",\r\n win=myWin, ## a psychopy.visual.Window() instance; None = default temp window used; False = no win, no win.flips()\r\n refreshTest='grating', ## None, True, or 'grating' (eye-candy to avoid a blank screen)\r\n verbose=False, ## True means report on everything \r\n userProcsDetailed=True, ## if verbose and userProcsDetailed, return (command, process-ID) of the user's processes\r\n #randomSeed='set:42', ## a way to record, and optionally set, a random seed of type str for making reproducible random sequences\r\n ## None -> default \r\n ## 'time' will use experimentRuntime.epoch as the value for the seed, different value each time the script is run\r\n ##'set:time' --> seed value is set to experimentRuntime.epoch, and initialized: random.seed(info['randomSeed'])\r\n ##'set:42' --> set & initialize to str('42'), and will give the same sequence of random.random() for all runs of the script\r\n )\r\n logging.info(runInfo)\r\nlogging.flush()\r\n\r\ndef detectDuplicates(myList):\r\n uniqueVals = set(myList)\r\n if len( list(uniqueVals) ) < len(myList):\r\n return True\r\n else: return False\r\n \r\ndef readFileAndScramble(numWordsInStream):\r\n #Abandoning use of this for Cheryl's experiment because too hard to find enough non-word bigrams for which letters not repeated in either stream\r\n stimFile = 'wordStimuliGeneration/twoLetters-Cheryl.txt'\r\n stimListFile= open(stimFile)\r\n bigramList = [x.rstrip() for x in stimListFile.readlines()]\r\n print('Read in', len(bigramList), 'strings')\r\n #print('bigramList = ',bigramList)\r\n stimListFile.close()\r\n #Scramble \r\n shuffled = deepcopy(bigramList)\r\n \r\n shuffleUntilNoDuplicatesOfFirstOrSecondLetter = True\r\n duplicates = True #intiialise this as true so the loop will run at least once\r\n while shuffleUntilNoDuplicatesOfFirstOrSecondLetter and duplicates:\r\n random.shuffle(shuffled)\r\n #print('first 10 unshuffled=',bigramList[:10])\r\n #print('first 10 shuffled=',shuffled[:10])\r\n #Break into two\r\n firstLetters = list()\r\n secondLetters = list()\r\n for bigram in shuffled[:numWordsInStream]:\r\n firstLetter = bigram[0]\r\n secondLetter = bigram[1]\r\n firstLetters.append( firstLetter )\r\n secondLetters.append ( secondLetter ) \r\n print(\"shuffled firstLetters=\",firstLetters,\" secondLetters=\",secondLetters)\r\n duplicates = detectDuplicates(firstLetters)\r\n if not duplicates:\r\n duplicates = detectDuplicates(secondLetters)\r\n \r\n print('first 20 shuffled firstLetters=',firstLetters[:20])\r\n print('first 20 shuffled secondLetters=',secondLetters[:20])\r\n return firstLetters, secondLetters\r\n\r\ndef findLtrInList(letter,wordList):\r\n try:\r\n idx = wordList.index(letter)\r\n except ValueError:\r\n print(\"Error! \", letter,\" not found in wordList\")\r\n except Exception as e:\r\n print('Unexpected error',e)\r\n #print(\"Searched for \",letter,\" in the wordList and index returned was \",idx)\r\n return idx\r\n \r\ndef calcSequenceForThisTrial():\r\n print(\"lenWordlist\",len(wordList))\r\n idxsIntoWordList = range(len(wordList)) #create a list of indexes of the entire word list: 0,1,2,3,4,5,...23\r\n print(\"idxsInto\",idxsIntoWordList)\r\n readFromFile = False\r\n if readFromFile:\r\n #read in the file of list of bigrams. Doesn't work because too hard to find enough non-word bigrams for which letters not repeated in either stream\r\n firstLetters, secondLetters = readFileAndScramble(numWordsInStream)\r\n #Now must determine what indexes into the wordList (list of letters pre-drawn) correspond to these\r\n idxsStream1 = list()\r\n print(\"idxsStream1FirstTime\",idxsStream1)\r\n idxsStream2 = list()\r\n print(\"idxsStream2FirstTime\",idxsStream2)\r\n for ltri in range(numWordsInStream): #Find where in the \"wordList\" each letter is, add it to idxsStream1\r\n letter = firstLetters[ltri]\r\n idx = findLtrInList(letter, wordList)\r\n idxsStream1.append(idx)\r\n print(\"idxsStream1SecondTime\",idxsStream1)\r\n #print(\"final idxsStream1=\",idxsStream1)\r\n for ltri in range(numWordsInStream): #Find where in the \"wordList\" each letter is, add it to idxsStream1\r\n letter = secondLetters[ltri]\r\n idx = findLtrInList(letter, wordList)\r\n idxsStream2.append(idx)\r\n print(\"idxsStream2SecondTime\",idxsStream2)\r\n else: #if not readFromFile: #just create a shuffled index of all the possibilities\r\n np.random.shuffle(idxsIntoWordList) #0,1,2,3,4,5,... -> randomly permuted 3,2,5,...\r\n print(\"idxsintoWordList\",idxsIntoWordList)\r\n idxsStream1 = copy.deepcopy(idxsIntoWordList) #first RSVP stream\r\n idxsStream1= idxsStream1[:numWordsInStream] #take the first numWordsInStream of the shuffled list\r\n idxsStream2 = copy.deepcopy(idxsIntoWordList) #make a copy for the right stream, and permute them on the next list\r\n np.random.shuffle(idxsStream2)\r\n idxsStream2= idxsStream2[:numWordsInStream] #take the first numWordsInStream of the shuffled list\r\n print(\"idxsStream1\",idxsStream1)\r\n print(\"idxsStream2\",idxsStream2)\r\n return idxsStream1, idxsStream2\r\n \r\ntextStimuliStream1 = list()\r\ntextStimuliStream2 = list() #used for second, simultaneous RSVP stream\r\ndef calcAndPredrawStimuli(wordList,cues, preCues,thisTrial): #Called before each trial \r\n #textStimuliStream1 and 2 assumed to be global variables\r\n if len(wordList) < numWordsInStream:\r\n print('Error! Your word list must have at least ',numWordsInStream,'strings')\r\n #print('wordList=',wordList)\r\n textStimuliStream1[:] = [] #Delete all items in the list\r\n textStimuliStream2[:] = [] #Delete all items in the list\r\n for i in xrange( len(cues) ):\r\n eccentricity = thisTrial['wordEccentricity']\r\n if eccentricity < 2: #kludge to deal with very low separation case where want just one cue - draw them both in the same place\r\n eccentricity = 0\r\n if i==0: \r\n cues[i].setPos( [-eccentricity, 0] )\r\n preCues[i].setPos( [-eccentricity, 0] )\r\n else: \r\n cues[i].setPos( [eccentricity, 0] )\r\n preCues[i].setPos( [eccentricity, 0] )\r\n for i in range(0,len(wordList)): #draw all the words. Later, the seq will indicate which one to present on each frame. The seq might be shorter than the wordList\r\n word = wordList[ i ]\r\n #flipHoriz, flipVert textStim http://www.psychopy.org/api/visual/textstim.html\r\n #Create one bucket of words for the left stream\r\n textStimulusStream1 = visual.TextStim(myWin,text=word,height=ltrHeight,colorSpace='rgb',color=letterColor,alignHoriz='center',alignVert='center',units='deg',autoLog=autoLogging) \r\n #Create a bucket of words for the right stream\r\n textStimulusStream2 = visual.TextStim(myWin,text=word,height=ltrHeight,colorSpace='rgb',color=letterColor,alignHoriz='center',alignVert='center',units='deg',autoLog=autoLogging)\r\n textStimulusStream1.setPos([-thisTrial['wordEccentricity'],0]) #left\r\n textStimuliStream1.append(textStimulusStream1) #add to list of text stimuli that comprise stream 1\r\n textStimulusStream2.setPos([thisTrial['wordEccentricity'],0]) #right\r\n textStimuliStream2.append(textStimulusStream2) #add to list of text stimuli that comprise stream 2\r\n \r\n #Use these buckets by pulling out the drawn words in the order you want them. For now, just create the order you want.\r\n idxsStream1, idxsStream2 = calcSequenceForThisTrial()\r\n\r\n return idxsStream1, idxsStream2, cues, preCues\r\n \r\n#create click sound for keyboard\r\ntry:\r\n click=sound.Sound('406__tictacshutup__click-1-d.wav')\r\nexcept: #in case file missing, create inferiro click manually\r\n logging.warn('Could not load the desired click sound file, instead using manually created inferior click')\r\n click=sound.Sound('D',octave=4, sampleRate=22050, secs=0.015, bits=8)\r\n\r\nif showRefreshMisses:\r\n fixSizePix = 32 #2.6 #make fixation bigger so flicker more conspicuous\r\nelse: fixSizePix = 32\r\nfixColor = [1,1,1]\r\nif exportImages: fixColor= [0,0,0]\r\nfixatnNoiseTexture = np.round( np.random.rand(fixSizePix/4,fixSizePix/4) ,0 ) *2.0-1 #Can counterphase flicker noise texture to create salient flicker if you break fixation\r\n\r\n#Construct the fixation point.\r\nfixation= visual.PatchStim(myWin, tex=fixatnNoiseTexture, size=(fixSizePix,fixSizePix), units='pix', mask='circle', interpolate=False, autoLog=False)\r\nfixationBlank= visual.PatchStim(myWin, tex= -1*fixatnNoiseTexture, size=(fixSizePix,fixSizePix), units='pix', mask='circle', interpolate=False, autoLog=False) #reverse contrast\r\nfixationPoint= visual.PatchStim(myWin,tex='none',colorSpace='rgb',color=(1,-1,-1),size=4,units='pix',autoLog=autoLogging)\r\n#Construct the holders for the experiment text that will appear on screen\r\nrespPromptStim = visual.TextStim(myWin,pos=(0, -.9),colorSpace='rgb',color=(1,1,1),alignHoriz='center', alignVert='center',height=.1,units='norm',autoLog=autoLogging)\r\nacceptTextStim = visual.TextStim(myWin,pos=(0, -.8),colorSpace='rgb',color=(1,1,1),alignHoriz='center', alignVert='center',height=.1,units='norm',autoLog=autoLogging)\r\nacceptTextStim.setText('Hit ENTER to accept. Backspace to edit')\r\nrespStim = visual.TextStim(myWin,pos=(0,0),colorSpace='rgb',color=(1,1,0),alignHoriz='center', alignVert='center',height=3,units='deg',autoLog=autoLogging)\r\nrequireAcceptance = True\r\nnextText = visual.TextStim(myWin,pos=(0, .1),colorSpace='rgb',color = (1,1,1),alignHoriz='center', alignVert='center',height=.1,units='norm',autoLog=autoLogging)\r\nNextRemindCountText = visual.TextStim(myWin,pos=(0,.2),colorSpace='rgb',color= (1,1,1),alignHoriz='center', alignVert='center',height=.1,units='norm',autoLog=autoLogging)\r\n\r\n#clickSound, badKeySound = stringResponse.setupSoundsForResponse()\r\n\r\nscreenshot= False; screenshotDone = False\r\nstimList = []\r\n#SETTING THE CONDITIONS, This implements the full factorial design!\r\ncueSerialPositions = np.array([7,9,11,13,15])\r\nfor cueSerialPos in cueSerialPositions:\r\n for rightResponseFirst in [False,True]:\r\n for wordEcc in [1,6]:\r\n stimList.append( {'cueSerialPos':cueSerialPos, 'rightResponseFirst':rightResponseFirst,\r\n 'leftStreamFlip':False, 'rightStreamFlip':False,\r\n 'wordEccentricity':wordEcc } )\r\n\r\ntrials = data.TrialHandler(stimList,trialsPerCondition) #constant stimuli method. Duplicate the list of conditions trialsPerCondition times to create the full experiment\r\ntrialsForPossibleStaircase = data.TrialHandler(stimList,trialsPerCondition) #independent randomization, just to create random trials for staircase phase\r\nnumRightWrongEachCuepos = np.zeros([ len(cueSerialPositions), 1 ]); #summary results to print out at end\r\n\r\nlogging.info( 'numtrials=' + str(trials.nTotal) + ' and each trialDurFrames='+str(trialDurFrames)+' or '+str(trialDurFrames*(1000./refreshRate))+ \\\r\n ' ms' + ' task=' + task)\r\n\r\ndef numberToLetter(number): #0 = A, 25 = Z\r\n #if it's not really a letter, return @\r\n if number < 0 or number > 25:\r\n return ('@')\r\n else: #it's probably a letter\r\n try:\r\n return chr( ord('A')+number )\r\n except:\r\n return('@')\r\n\r\ndef letterToNumber(letter): #A = 0, Z = 25\r\n #if it's not really a letter, return -999\r\n #HOW CAN I GENERICALLY TEST FOR LENGTH. EVEN IN CASE OF A NUMBER THAT'S NOT PART OF AN ARRAY?\r\n try:\r\n #if len(letter) > 1:\r\n # return (-999)\r\n if letter < 'A' or letter > 'Z':\r\n return (-999)\r\n else: #it's a letter\r\n return ord(letter)-ord('A')\r\n except:\r\n return (-999)\r\n\r\ndef wordToIdx(word,wordList, responseMustBeInWordList):\r\n #if it's not in the list of stimuli, return None\r\n try:\r\n #http://stackoverflow.com/questions/7102050/how-can-i-get-a-python-generator-to-return-none-rather-than-stopiteration\r\n firstMatchIdx = next((i for i, val in enumerate(wordList) if val.upper()==word), None) #return i (index) unless no matches, in which case return None\r\n #print('Looked for ',word,' in ',wordList,'\\nfirstMatchIdx =',firstMatchIdx)\r\n return firstMatchIdx\r\n except:\r\n if responseMustBeInWordList:\r\n print('Unexpected error in wordToIdx with word=',word)\r\n return (None)\r\n \r\n#print header for data file\r\nprint('experimentPhase\\ttrialnum\\tsubject\\ttask\\twordEcc\\t',file=dataFile,end='')\r\nprint('noisePercent\\tleftStreamFlip\\trightStreamFlip\\t',end='',file=dataFile)\r\nif task=='T1':\r\n numRespsWanted = 2\r\ndataFile.write('rightResponseFirst\\t')\r\nfor i in range(numRespsWanted):\r\n dataFile.write('cueSerialPos'+str(i)+'\\t') #have to use write to avoid ' ' between successive text, at least until Python 3\r\n dataFile.write('answer'+str(i)+'\\t')\r\n dataFile.write('response'+str(i)+'\\t')\r\n dataFile.write('correct'+str(i)+'\\t')\r\n dataFile.write('responsePosRelative'+str(i)+'\\t')\r\nprint('seq1\\tseq2\\t',end='', file=dataFile) #assuming 2 streams\r\nprint('timingBlips',file=dataFile)\r\n#end of header\r\n\r\ndef oneFrameOfStim( n,cues,cuesSerialPos,seq1,seq2,cueDurFrames,letterDurFrames,ISIframes,thisTrial,textStimuliStream1,textStimuliStream2,\r\n noise,proportnNoise,allFieldCoords,numNoiseDots ): \r\n#defining a function to draw each frame of stim.\r\n#seq1 is an array of indices corresponding to the appropriate pre-drawn stimulus, contained in textStimuli\r\n SOAframes = letterDurFrames+ISIframes\r\n cueFrames = cuesSerialPos*SOAframes\r\n stimN = int( np.floor(n/SOAframes) )\r\n frameOfThisLetter = n % SOAframes #every SOAframes, new letter\r\n showLetter = frameOfThisLetter < letterDurFrames #if true, it's not time for the blank ISI. it's still time to draw the letter\r\n thisStimIdx = seq1[stimN] #which letter, from A to Z (1 to 26), should be shown?\r\n #print ('stimN=',stimN, 'thisStimIdx=', thisStimIdx, ' SOAframes=',SOAframes, ' letterDurFrames=', letterDurFrames, ' (n % SOAframes) =', (n % SOAframes) ) #DEBUGOFF\r\n if seq2 is not None:\r\n thisStim2Idx = seq2[stimN]\r\n #so that any timing problems occur just as often for every frame, always draw the letter and the cue, but simply draw it in the bgColor when it's not meant to be on\r\n for cue in cues:\r\n cue.setLineColor( bgColor )\r\n if type(cueFrames) not in [tuple,list,np.ndarray]: #scalar. But need collection to do loop based on it\r\n cueFrames = list([cueFrames])\r\n for i in xrange( len(cueFrames) ): #check whether it's time for any cue. Assume first cueFrame is for first cue, etc.\r\n thisCueFrame = cueFrames[i]\r\n if n>=thisCueFrame and n<thisCueFrame+cueDurFrames:\r\n cues[i].setLineColor( cueColor )\r\n\r\n if showLetter:\r\n textStimuliStream1[thisStimIdx].setColor( letterColor )\r\n textStimuliStream2[thisStim2Idx].setColor( letterColor )\r\n else: \r\n textStimuliStream1[thisStimIdx].setColor( bgColor )\r\n textStimuliStream2[thisStim2Idx].setColor( bgColor )\r\n textStimuliStream1[thisStimIdx].flipHoriz = thisTrial['leftStreamFlip']\r\n textStimuliStream2[thisStim2Idx].flipHoriz = thisTrial['rightStreamFlip']\r\n textStimuliStream1[thisStimIdx].draw()\r\n textStimuliStream2[thisStim2Idx].draw()\r\n for cue in cues:\r\n cue.draw() #will be drawn in backgruond color if it's not time for that\r\n refreshNoise = False #Not recommended because takes longer than a frame, even to shuffle apparently. Or may be setXYs step\r\n if proportnNoise>0 and refreshNoise: \r\n if frameOfThisLetter ==0: \r\n np.random.shuffle(allFieldCoords) \r\n dotCoords = allFieldCoords[0:numNoiseDots]\r\n noise.setXYs(dotCoords)\r\n if proportnNoise>0:\r\n noise.draw()\r\n return True \r\n# #######End of function definition that displays the stimuli!!!! #####################################\r\n#############################################################################################################################\r\ncues = list()\r\npreCues = list()\r\nfor i in xrange(2):\r\n cue = visual.Circle(myWin, \r\n radius=cueRadius,#Martini used circles with diameter of 12 deg\r\n lineColorSpace = 'rgb',\r\n lineColor=bgColor,\r\n lineWidth=6.0, #in pixels. Was thinner (2 pixels) in letter AB experiments\r\n units = 'deg',\r\n fillColorSpace = 'rgb',\r\n fillColor=None, #beware, with convex shapes fill colors don't work\r\n pos= [0,0], #the anchor (rotation and vertices are position with respect to this)\r\n interpolate=True,\r\n autoLog=False)#this stim changes too much for autologging to be useful\r\n cues.append(cue)\r\n \r\n #Precue to potentially inform the participant where the letter streams will appear\r\n preCue = visual.Circle(myWin, \r\n radius=2,#Martini used circles with diameter of 12 deg\r\n lineColorSpace = 'rgb',\r\n lineColor=bgColor,\r\n lineWidth=4.0, #in pixels. Was thinner (2 pixels) in letter AB experiments\r\n units = 'deg',\r\n fillColorSpace = 'rgb',\r\n fillColor='white', #beware, with convex shapes fill colors don't work\r\n pos= [0,0], #the anchor (rotation and vertices are position with respect to this)\r\n interpolate=True,\r\n autoLog=False)#this stim changes too much for autologging to be useful\r\n preCues.append(preCue)\r\n\r\nltrHeight = 2.5 #Martini letters were 2.5deg high\r\n#All noise dot coordinates ultimately in pixels, so can specify each dot is one pixel \r\nnoiseFieldWidthDeg=ltrHeight *1.0\r\nnoiseFieldWidthPix = int( round( noiseFieldWidthDeg*pixelperdegree ) )\r\n\r\ndef timingCheckAndLog(ts,trialN):\r\n #check for timing problems and log them\r\n #ts is a list of the times of the clock after each frame\r\n interframeIntervs = np.diff(ts)*1000\r\n #print ' interframe intervs were ',around(interframeIntervs,1) #DEBUGOFF\r\n frameTimeTolerance=.3 #proportion longer than refreshRate that will not count as a miss\r\n longFrameLimit = np.round(1000/refreshRate*(1.0+frameTimeTolerance),2)\r\n idxsInterframeLong = np.where( interframeIntervs > longFrameLimit ) [0] #frames that exceeded 150% of expected duration\r\n numCasesInterframeLong = len( idxsInterframeLong )\r\n if numCasesInterframeLong >0 and (not demo):\r\n longFramesStr = 'ERROR,'+str(numCasesInterframeLong)+' frames were longer than '+str(longFrameLimit)+' ms'\r\n if demo: \r\n longFramesStr += 'not printing them all because in demo mode'\r\n else:\r\n longFramesStr += ' apparently screen refreshes skipped, interframe durs were:'+\\\r\n str( np.around( interframeIntervs[idxsInterframeLong] ,1 ) )+ ' and was these frames: '+ str(idxsInterframeLong)\r\n if longFramesStr != None:\r\n logging.error( 'trialnum='+str(trialN)+' '+longFramesStr )\r\n if not demo:\r\n flankingAlso=list()\r\n for idx in idxsInterframeLong: #also print timing of one before and one after long frame\r\n if idx-1>=0:\r\n flankingAlso.append(idx-1)\r\n else: flankingAlso.append(np.NaN)\r\n flankingAlso.append(idx)\r\n if idx+1<len(interframeIntervs): flankingAlso.append(idx+1)\r\n else: flankingAlso.append(np.NaN)\r\n flankingAlso = np.array(flankingAlso)\r\n flankingAlso = flankingAlso[np.negative(np.isnan(flankingAlso))] #remove nan values\r\n flankingAlso = flankingAlso.astype(np.integer) #cast as integers, so can use as subscripts\r\n logging.info( 'flankers also='+str( np.around( interframeIntervs[flankingAlso], 1) ) ) #because this is not an essential error message, as previous one already indicates error\r\n #As INFO, at least it won't fill up the console when console set to WARNING or higher\r\n return numCasesInterframeLong\r\n #end timing check\r\n \r\ntrialClock = core.Clock()\r\nnumTrialsCorrect = 0; \r\nnumTrialsApproxCorrect = 0;\r\nnumTrialsEachCorrect= np.zeros( numRespsWanted )\r\nnumTrialsEachApproxCorrect= np.zeros( numRespsWanted )\r\n\r\ndef do_RSVP_stim(thisTrial, cues, preCues, seq1, seq2, proportnNoise,trialN):\r\n #relies on global variables:\r\n # textStimuli, logging, bgColor\r\n # thisTrial should have 'cueSerialPos'\r\n global framesSaved #because change this variable. Can only change a global variable if you declare it\r\n cuesSerialPos = [] #will contain the serial positions in the stream of all the cues (corresponding to the targets)\r\n cuesSerialPos.append(thisTrial['cueSerialPos']) #stream1\r\n cuesSerialPos.append(thisTrial['cueSerialPos']) #stream2\r\n cuesSerialPos = np.array(cuesSerialPos)\r\n noise = None; allFieldCoords=None; numNoiseDots=0\r\n if proportnNoise > 0: #gtenerating noise is time-consuming, so only do it once per trial. Then shuffle noise coordinates for each letter\r\n (noise,allFieldCoords,numNoiseDots) = createNoise(proportnNoise,myWin,noiseFieldWidthPix, bgColor)\r\n\r\n preDrawStimToGreasePipeline = list() #I don't know why this works, but without drawing it I have consistent timing blip first time that draw ringInnerR for phantom contours\r\n for cue in cues:\r\n cue.setLineColor(bgColor)\r\n preDrawStimToGreasePipeline.extend([cue])\r\n for stim in preDrawStimToGreasePipeline:\r\n stim.draw()\r\n myWin.flip(); myWin.flip()\r\n #end preparation of stimuli\r\n \r\n core.wait(.1);\r\n trialClock.reset()\r\n fixatnPeriodMin = 0.3\r\n fixatnPeriodFrames = int( (np.random.rand(1)/2.+fixatnPeriodMin) *refreshRate) #random interval between 800ms and 1.3s\r\n ts = list(); #to store time of each drawing, to check whether skipped frames\r\n for i in range(fixatnPeriodFrames+20): #prestim fixation interval\r\n #if i%4>=2 or demo or exportImages: #flicker fixation on and off at framerate to see when skip frame\r\n # fixation.draw()\r\n #else: fixationBlank.draw()\r\n for preCue in preCues:\r\n preCue.draw()\r\n fixationPoint.draw()\r\n myWin.flip() #end fixation interval\r\n #myWin.setRecordFrameIntervals(True); #can't get it to stop detecting superlong frames\r\n t0 = trialClock.getTime()\r\n\r\n for n in range(trialDurFrames): #this is the loop for this trial's stimulus!\r\n worked = oneFrameOfStim( n,cues,cuesSerialPos,seq1,seq2,cueDurFrames,letterDurFrames,ISIframes,thisTrial,textStimuliStream1,textStimuliStream2,\r\n noise,proportnNoise,allFieldCoords,numNoiseDots ) #draw letter and possibly cue and noise on top\r\n if thisTrial['wordEccentricity'] > 2: #kludge to avoid drawing fixation in super-near condition for Cheryl\r\n fixationPoint.draw()\r\n if exportImages:\r\n myWin.getMovieFrame(buffer='back') #for later saving\r\n framesSaved +=1\r\n myWin.flip()\r\n t=trialClock.getTime()-t0; ts.append(t);\r\n #end of big stimulus loop\r\n myWin.setRecordFrameIntervals(False);\r\n\r\n if task=='T1':\r\n respPromptStim.setText('What was circled?',log=False) \r\n else: respPromptStim.setText('Error: unexpected task',log=False)\r\n postCueNumBlobsAway=-999 #doesn't apply to non-tracking and click tracking task\r\n #print('cuesSerialPos=',cuesSerialPos, 'cuesSerialPos.dtype =',cuesSerialPos.dtype, 'type(seq1)=',type(seq1))\r\n seq1 = np.array(seq1) #convert seq1 list to array so that can index it with multiple indices (cuesSerialPos)\r\n #print('seq1[cuesSerialPos]=', seq1[cuesSerialPos])\r\n seq2= np.array(seq2) #convert seq2 list to array so that can index it with multiple indices (cuesSerialPos)\r\n correctAnswerIdxsStream1 = np.array( seq1[cuesSerialPos] )\r\n correctAnswerIdxsStream2 = np.array( seq2[cuesSerialPos] )\r\n #print('correctAnswerIdxsStream1=',correctAnswerIdxsStream1)#, 'wordList[correctAnswerIdxsStream1[0]]=',wordList[correctAnswerIdxsStream1[0]])\r\n return cuesSerialPos,correctAnswerIdxsStream1,correctAnswerIdxsStream2,ts\r\n \r\ndef handleAndScoreResponse(passThisTrial,response,responseAutopilot,task,stimSequence,cueSerialPos,correctAnswerIdx):\r\n #Handle response, calculate whether correct, ########################################\r\n #responses are actual characters\r\n #correctAnswer is index into stimSequence\r\n #autopilot is global variable\r\n if autopilot or passThisTrial:\r\n response = responseAutopilot\r\n #print('handleAndScoreResponse correctAnswerIdxs=',correctAnswerIdxs,'\\nstimSequence=',stimSequence, '\\nwords=',wordList)\r\n correct = 0\r\n approxCorrect = 0\r\n posOfResponse = -999\r\n responsePosRelative = -999\r\n idx = correctAnswerIdx\r\n print('correctAnswerIdx = ',correctAnswerIdx) \r\n correctAnswer = wordList[idx].upper()\r\n responseString=response\r\n responseString= responseString.upper()\r\n #print('correctAnswer=',correctAnswer ,' responseString=',responseString)\r\n if correctAnswer == responseString:\r\n correct = 1\r\n #print('correct=',correct)\r\n responseMustBeInWordList = True\r\n if len(stimSequence) != len(wordList):\r\n responseMustBeInWordList = False\r\n #stimSeqAsLetters = list()\r\n #for letter in stimSequence:\r\n # stimSeqAsLetters.append( chr( ord('A') + letter ) )\r\n #letterIdxOfAlphabet = ord( responseString.upper() ) - ord( 'A') \r\n #print(\"Sending to responseWordIdx stimSequence=\",stimSequence,\" responseString=\",responseString, \"stimSeqAsLetters=\",stimSeqAsLetters, \"responseMustBeInWordList=\",responseMustBeInWordList)\r\n responseWordIdx = wordToIdx(responseString.upper(),wordList, responseMustBeInWordList)\r\n print('responseWordIdx = ', responseWordIdx, ' stimSequence=', stimSequence)\r\n if responseWordIdx is None: #response is not in the wordList\r\n posOfResponse = -999\r\n logging.warn('Response was not present in the stimulus stream')\r\n else:\r\n posOfResponse= np.where( np.array(stimSequence)==responseWordIdx ) #Assumes that the response was in the stimulus sequence\r\n print(\"posOfResponse=\",posOfResponse, \"responseWordIdx=\",responseWordIdx,\"stimSequence=\",stimSequence, \"type(stimSequence)=\",type(stimSequence))\r\n posOfResponse= posOfResponse[0] #list with two entries, want first which will be array of places where the response was found in the sequence\r\n if len(posOfResponse) > 1:\r\n logging.error('Expected response to have occurred in only one position in stream')\r\n elif len(posOfResponse) == 0:\r\n logging.error('Expected response to have occurred somewhere in the stream')\r\n raise ValueError('Expected response to have occurred somewhere in the stream')\r\n else:\r\n posOfResponse = posOfResponse[0] #first element of list (should be only one element long \r\n responsePosRelative = posOfResponse - cueSerialPos\r\n approxCorrect = abs(responsePosRelative)<= 3 #Vul efficacy measure of getting it right to within plus/minus\r\n #print('wordToIdx(',responseString,',',wordList,')=',responseWordIdx,' stimSequence=',stimSequence,'\\nposOfResponse = ',posOfResponse) #debugON\r\n #print response stuff to dataFile\r\n print('correctAnswer=',correctAnswer,' correct=',correct, 'responsePosRelative=',responsePosRelative)\r\n #header was answerPos0, answer0, response0, correct0, responsePosRelative0\r\n print(cueSerialPos,'\\t', end='', file=dataFile)\r\n print(correctAnswer, '\\t', end='', file=dataFile) #answer0\r\n print(responseString, '\\t', end='', file=dataFile) #response0\r\n print(correct, '\\t', end='',file=dataFile) #correct0\r\n print(responsePosRelative, '\\t', end='',file=dataFile) #responsePosRelative0\r\n\r\n return correct,approxCorrect,responsePosRelative\r\n #end handleAndScoreResponses\r\n\r\ndef play_high_tone_correct_low_incorrect(correct, passThisTrial=False):\r\n highA = sound.Sound('G',octave=5, sampleRate=6000, secs=.3, bits=8)\r\n low = sound.Sound('F',octave=3, sampleRate=6000, secs=.3, bits=8)\r\n highA.setVolume(0.9)\r\n low.setVolume(1.0)\r\n if correct:\r\n highA.play()\r\n elif passThisTrial:\r\n high= sound.Sound('G',octave=4, sampleRate=2000, secs=.08, bits=8)\r\n for i in range(2): \r\n high.play(); low.play(); \r\n else: #incorrect\r\n low.play()\r\n\r\ndef instructions():\r\n instrcolor = 'white'\r\n preInstructions = visual.TextStim(myWin, text = \"Press a key to see the instructions\",pos=(0, 0),colorSpace='rgb',color=(1,1,1),alignHoriz='center', alignVert='center',height=.1,units='norm',autoLog=autoLogging )\r\n Instructions1 = visual.TextStim(myWin, text = \"Instructions\",pos=(0, .8),colorSpace='rgb',color=(0,0,0),alignHoriz='center', alignVert='center',height=.1,units='norm',autoLog=autoLogging )\r\n Instructions2 = visual.TextStim(myWin, text = \"Please rest your eyes on the red dot at all times\",pos=(0, -.2),colorSpace='rgb',color=(1,1,1),alignHoriz='center', alignVert='center',height=.1,units='norm',autoLog=autoLogging )\r\n Instructions3 = visual.TextStim(myWin, text = \"Press Space to Continue\",pos=(0, -.9), colorSpace='rgb',color=(0,0,0),alignHoriz='center', alignVert='center',height=.1,units='norm',autoLog=autoLogging )\r\n Instructions4b = visual.TextStim(myWin, text = \"On each trial, two letter streams will be presented with each letter flashing for a fraction of a second.\",pos=(0, 0),colorSpace='rgb',color=(1,1,1),alignHoriz='center', alignVert='center',height=.1,units='norm',autoLog=autoLogging )\r\n Instructions5b = visual.TextStim(myWin, text = \"Two letters will be outlined with white circles on each trial. Try to remember these letters.\",pos=(0, 0),colorSpace='rgb',color=(1,1,1),alignHoriz='center', alignVert='center',height=.1,units='norm',autoLog=autoLogging ) \r\n Instructions6 = visual.TextStim(myWin, text = \"After the letter streams, you will need to select the letters you just saw by clicking the letters on the screen. \\nSome of the trials will require you to choose the left letter first. \\nOthers will require you to choose the right one first.\", pos=(0,0), colorSpace='rgb',color=(1,1,1),alignHoriz='center', alignVert='center',height=.1,units='norm',autoLog=autoLogging )\r\n Instructions7 = visual.TextStim(myWin, text = \"Press a key to begin the experiment\",pos=(0, 0), colorSpace='rgb',color=(1,1,1),alignHoriz='center', alignVert='center',height=.1,units='norm',autoLog=autoLogging )\r\n Instructions9 = visual.TextStim(myWin, text = \"If you have any questions, ask the experimenter now.\",pos=(0, 0),colorSpace='rgb',color=(1,1,1),alignHoriz='center', alignVert='center',height=.1,units='norm',autoLog=autoLogging )\r\n Instructions10 = visual.TextStim(myWin, text = \"If you don't know the letter, you can guess.\",pos=(0, 0),colorSpace='rgb',color=(1,1,1),alignHoriz='center', alignVert='center',height=.1,units='norm',autoLog=autoLogging )\r\n\r\n preInstructions.draw()\r\n myWin.flip()\r\n event.waitKeys()\r\n Instructions1.draw()\r\n Instructions2.draw()\r\n Instructions3.draw()\r\n fixationPoint.draw()\r\n myWin.flip()\r\n event.waitKeys()\r\n Instructions1.draw()\r\n Instructions4b.draw()\r\n Instructions3.draw()\r\n myWin.flip()\r\n event.waitKeys()\r\n Instructions1.draw()\r\n Instructions5b.draw()\r\n Instructions3.draw()\r\n myWin.flip()\r\n event.waitKeys()\r\n Instructions1.draw()\r\n Instructions6.draw()\r\n Instructions3.draw()\r\n myWin.flip()\r\n event.waitKeys()\r\n Instructions1.draw()\r\n Instructions3.draw()\r\n Instructions10.draw()\r\n myWin.flip()\r\n event.waitKeys()\r\n Instructions1.draw()\r\n Instructions9.draw()\r\n Instructions3.draw()\r\n myWin.flip()\r\n event.waitKeys()\r\n Instructions7.draw()\r\n myWin.flip()\r\n event.waitKeys()\r\n\r\nchangeToUpper = False #Chery's experiment\r\nexpStop=False\r\nnDoneMain = -1 #change to zero once start main part of experiment\r\nif doStaircase:\r\n #create the staircase handler\r\n useQuest = True\r\n if useQuest:\r\n staircase = data.QuestHandler(startVal = 95, \r\n startValSd = 80,\r\n stopInterval= 1, #sd of posterior has to be this small or smaller for staircase to stop, unless nTrials reached\r\n nTrials = staircaseTrials,\r\n #extraInfo = thisInfo,\r\n pThreshold = threshCriterion, #0.25, \r\n gamma = 1./26,\r\n delta=0.02, #lapse rate, I suppose for Weibull function fit\r\n method = 'quantile', #uses the median of the posterior as the final answer\r\n stepType = 'log', #will home in on the 80% threshold. But stepType = 'log' doesn't usually work\r\n minVal=1, maxVal = 100\r\n )\r\n print('created QUEST staircase')\r\n else:\r\n stepSizesLinear = [.2,.2,.1,.1,.05,.05]\r\n stepSizesLog = [log(1.4,10),log(1.4,10),log(1.3,10),log(1.3,10),log(1.2,10)]\r\n staircase = data.StairHandler(startVal = 0.1,\r\n stepType = 'log', #if log, what do I want to multiply it by\r\n stepSizes = stepSizesLog, #step size to use after each reversal\r\n minVal=0, maxVal=1,\r\n nUp=1, nDown=3, #will home in on the 80% threshold\r\n nReversals = 2, #The staircase terminates when nTrials have been exceeded, or when both nReversals and nTrials have been exceeded\r\n nTrials=1)\r\n print('created conventional staircase')\r\n \r\n if prefaceStaircaseTrialsN > len(prefaceStaircaseNoise): #repeat array to accommodate desired number of easyStarterTrials\r\n prefaceStaircaseNoise = np.tile( prefaceStaircaseNoise, ceil( prefaceStaircaseTrialsN/len(prefaceStaircaseNoise) ) )\r\n prefaceStaircaseNoise = prefaceStaircaseNoise[0:prefaceStaircaseTrialsN]\r\n \r\n phasesMsg = ('Doing '+str(prefaceStaircaseTrialsN)+'trials with noisePercent= '+str(prefaceStaircaseNoise)+' then doing a max '+str(staircaseTrials)+'-trial staircase')\r\n print(phasesMsg); logging.info(phasesMsg)\r\n\r\n #staircaseStarterNoise PHASE OF EXPERIMENT\r\n corrEachTrial = list() #only needed for easyStaircaseStarterNoise\r\n staircaseTrialN = -1; mainStaircaseGoing = False\r\n while (not staircase.finished) and expStop==False: #staircase.thisTrialN < staircase.nTrials\r\n if staircaseTrialN+1 < len(prefaceStaircaseNoise): #still doing easyStaircaseStarterNoise\r\n staircaseTrialN += 1\r\n noisePercent = prefaceStaircaseNoise[staircaseTrialN]\r\n else:\r\n if staircaseTrialN+1 == len(prefaceStaircaseNoise): #add these non-staircase trials so QUEST knows about them\r\n mainStaircaseGoing = True\r\n print('Importing ',corrEachTrial,' and intensities ',prefaceStaircaseNoise)\r\n staircase.importData(100-prefaceStaircaseNoise, np.array(corrEachTrial))\r\n printStaircase(staircase, descendingPsycho, briefTrialUpdate=False, printInternalVal=True, alsoLog=False)\r\n try: #advance the staircase\r\n printStaircase(staircase, descendingPsycho, briefTrialUpdate=True, printInternalVal=True, alsoLog=False)\r\n noisePercent = 100. - staircase.next() #will step through the staircase, based on whether told it (addResponse) got it right or wrong\r\n staircaseTrialN += 1\r\n except StopIteration: #Need this here, even though test for finished above. I can't understand why finished test doesn't accomplish this.\r\n print('stopping because staircase.next() returned a StopIteration, which it does when it is finished')\r\n break #break out of the trials loop\r\n #print('staircaseTrialN=',staircaseTrialN)\r\n idxsStream1, idxsStream2, cues, preCues = calcAndPredrawStimuli(wordList,cues,preCues, staircaseTrials)\r\n cuesSerialPos,correctAnswerIdxsStream1,correctAnswerIdxsStream2, ts = \\\r\n do_RSVP_stim(thisTrial, cues, preCues, idxsStream1, idxsStream2, noisePercent/100.,staircaseTrialN)\r\n numCasesInterframeLong = timingCheckAndLog(ts,staircaseTrialN)\r\n expStop,passThisTrial,responses,buttons,responsesAutopilot = \\\r\n letterLineupResponse.doLineup(myWin,bgColor,myMouse,clickSound,badKeySound,possibleResps,showBothSides,sideFirstLeftRightCentral,autopilot) #CAN'T YET HANDLE MORE THAN 2 LINEUPS\r\n\r\n if not expStop:\r\n if mainStaircaseGoing:\r\n print('staircase\\t', end='', file=dataFile)\r\n else: \r\n print('staircase_preface\\t', end='', file=dataFile)\r\n #header start 'trialnum\\tsubject\\ttask\\t'\r\n print(staircaseTrialN,'\\t', end='', file=dataFile) #first thing printed on each line of dataFile\r\n print(subject,'\\t',task,'\\t', round(noisePercent,2),'\\t', end='', file=dataFile)\r\n correct,approxCorrect,responsePosRelative= handleAndScoreResponse(\r\n passThisTrial,responses,responseAutopilot,task,sequenceLeft,cuesSerialPos[0],correctAnswerIdx )\r\n #header then had seq1, seq2\r\n print(idxsStream1,'\\t',idxsStream2,'\\t', end='', file=dataFile) #print the indexes into the wordList\r\n print(numCasesInterframeLong, file=dataFile) \r\n print('timingBlips=', numCasesInterframeLong)#timingBlips, last thing recorded on each line of dataFile\r\n core.wait(.06)\r\n if feedback: \r\n play_high_tone_correct_low_incorrect(correct, passThisTrial=False)\r\n print('staircaseTrialN=', staircaseTrialN,' noisePercent=',round(noisePercent,3),' T1approxCorrect=',T1approxCorrect) #debugON\r\n corrEachTrial.append(T1approxCorrect)\r\n if mainStaircaseGoing: \r\n staircase.addResponse(T1approxCorrect, intensity = 100-noisePercent) #Add a 1 or 0 to signify a correct/detected or incorrect/missed trial\r\n #print('Have added an intensity of','{:.3f}'.format(100-noisePercent), 'T1approxCorrect =', T1approxCorrect, ' to staircase') #debugON\r\n #ENDING STAIRCASE PHASE\r\n\r\n if staircaseTrialN+1 < len(prefaceStaircaseNoise) and (staircaseTrialN>=0): #exp stopped before got through staircase preface trials, so haven't imported yet\r\n print('Importing ',corrEachTrial,' and intensities ',prefaceStaircaseNoise[0:staircaseTrialN+1])\r\n staircase.importData(100-prefaceStaircaseNoise[0:staircaseTrialN], np.array(corrEachTrial)) \r\n print('framesSaved after staircase=',framesSaved) #debugON\r\n\r\n timeAndDateStr = time.strftime(\"%H:%M on %d %b %Y\", time.localtime())\r\n msg = ('prefaceStaircase phase' if expStop else '')\r\n msg += ('ABORTED' if expStop else 'Finished') + ' staircase part of experiment at ' + timeAndDateStr\r\n logging.info(msg); print(msg)\r\n printStaircase(staircase, descendingPsycho, briefTrialUpdate=True, printInternalVal=True, alsoLog=False)\r\n #print('staircase.quantile=',round(staircase.quantile(),2),' sd=',round(staircase.sd(),2))\r\n threshNoise = round(staircase.quantile(),3)\r\n if descendingPsycho:\r\n threshNoise = 100- threshNoise\r\n threshNoise = max( 0, threshNoise ) #e.g. ff get all trials wrong, posterior peaks at a very negative number\r\n msg= 'Staircase estimate of threshold = ' + str(threshNoise) + ' with sd=' + str(round(staircase.sd(),2))\r\n logging.info(msg); print(msg)\r\n myWin.close()\r\n #Fit and plot data\r\n fit = None\r\n try:\r\n intensityForCurveFitting = staircase.intensities\r\n if descendingPsycho: \r\n intensityForCurveFitting = 100-staircase.intensities #because fitWeibull assumes curve is ascending\r\n fit = data.FitWeibull(intensityForCurveFitting, staircase.data, expectedMin=1/26., sems = 1.0/len(staircase.intensities))\r\n except:\r\n print(\"Fit failed.\")\r\n plotDataAndPsychometricCurve(staircase,fit,descendingPsycho,threshCriterion)\r\n #save figure to file\r\n pylab.savefig(fileName+'.pdf')\r\n print('The plot has been saved, as '+fileName+'.pdf')\r\n pylab.show() #must call this to actually show plot\r\nelse: #not staircase\r\n noisePercent = defaultNoiseLevel\r\n phasesMsg = 'Experiment will have '+str(trials.nTotal)+' trials. Letters will be drawn with superposed noise of ' + \"{:.2%}\".format(defaultNoiseLevel)\r\n print(phasesMsg); logging.info(phasesMsg)\r\n nDoneMain =0\r\n while nDoneMain < trials.nTotal and expStop==False: #MAIN EXPERIMENT LOOP\r\n if nDoneMain==0:\r\n msg='Starting main (non-staircase) part of experiment'\r\n logging.info(msg); print(msg)\r\n instructions()\r\n thisTrial = trials.next() #get a proper (non-staircase) trial\r\n sequenceStream1, sequenceStream2, cues, preCues = calcAndPredrawStimuli(wordList,cues,preCues, thisTrial)\r\n print('sequenceStream1=',sequenceStream1)\r\n print('sequenceStream2=',sequenceStream2)\r\n myWin.setMouseVisible(False)\r\n cuesSerialPos,correctAnswerIdxsStream1,correctAnswerIdxsStream2, ts = \\\r\n do_RSVP_stim(thisTrial, cues, preCues, sequenceStream1, sequenceStream2, noisePercent/100.,nDoneMain)\r\n print('correctAnswerIdxsStream1=',correctAnswerIdxsStream1,'correctAnswerIdxsStream2=',correctAnswerIdxsStream2)\r\n numCasesInterframeLong = timingCheckAndLog(ts,nDoneMain)\r\n #call for each response\r\n myMouse = event.Mouse()\r\n alphabet = list(string.ascii_lowercase)\r\n possibleResps = alphabet #possibleResps.remove('C'); possibleResps.remove('V')\r\n\r\n expStop = list(); passThisTrial = list(); responses=list(); responsesAutopilot=list()\r\n dL = [None]*numRespsWanted #dummy list for null values\r\n expStop = copy.deepcopy(dL); responses = copy.deepcopy(dL); responsesAutopilot = copy.deepcopy(dL); passThisTrial=copy.deepcopy(dL)\r\n responseOrder = range(numRespsWanted)\r\n showBothSides=True\r\n sideFirstLeftRightCentral = thisTrial['rightResponseFirst']\r\n #if thisTrial['rightResponseFirst']: #change order of indices depending on rightResponseFirst. response0, answer0 etc refer to which one had to be reported first\r\n #responseOrder.reverse() #this is necessary if using text input rather than lineup response\r\n \r\n expStop,passThisTrial,responses,buttons,responsesAutopilot = \\\r\n letterLineupResponse.doLineup(myWin,bgColor,myMouse,clickSound,badKeySound,possibleResps,showBothSides,sideFirstLeftRightCentral,autopilot) #CAN'T YET HANDLE MORE THAN 2 LINEUPS\r\n expStop = np.array(expStop).any(); passThisTrial = np.array(passThisTrial).any()\r\n if not expStop:\r\n #data file output start\r\n print('main\\t', end='', file=dataFile) #first thing printed on each line of dataFile to indicate main part of experiment, not staircase\r\n print(nDoneMain,'\\t', end='', file=dataFile)\r\n print(subject,'\\t',task,'\\t', thisTrial['wordEccentricity'], '\\t', round(noisePercent,3),'\\t', end='', file=dataFile)\r\n print(thisTrial['leftStreamFlip'],'\\t', end='', file=dataFile)\r\n print(thisTrial['rightStreamFlip'],'\\t', end='', file=dataFile)\r\n print(thisTrial['rightResponseFirst'],'\\t', end='', file=dataFile)\r\n i = 0\r\n eachCorrect = np.ones(numRespsWanted)*-999; eachApproxCorrect = np.ones(numRespsWanted)*-999\r\n for i in range(numRespsWanted): #scored and printed to dataFile in left first, right second order even if collected in different order\r\n if thisTrial['rightResponseFirst']:\r\n if i==0:\r\n sequenceStream = sequenceStream2; correctAnswerIdxs = correctAnswerIdxsStream2; \r\n else: sequenceStream = sequenceStream1; correctAnswerIdxs = correctAnswerIdxsStream1; \r\n else: \r\n if i==0:\r\n sequenceStream = sequenceStream1; correctAnswerIdxs = correctAnswerIdxsStream1; \r\n else: sequenceStream = sequenceStream2; correctAnswerIdxs = correctAnswerIdxsStream2; \r\n correct,approxCorrect,responsePosRelative = (\r\n handleAndScoreResponse(passThisTrial,responses[i],responsesAutopilot,task,sequenceStream,thisTrial['cueSerialPos'],correctAnswerIdxs[i] ) )\r\n eachCorrect[i] = correct\r\n eachApproxCorrect[i] = approxCorrect\r\n #header then had seq1, seq2. Save them\r\n print(sequenceStream1,'\\t',sequenceStream2,'\\t', end='', file=dataFile) #print the indexes into the wordList\r\n print(numCasesInterframeLong, file=dataFile) #timingBlips, last thing recorded on each line of dataFile\r\n print('correct=',correct,' approxCorrect=',approxCorrect,' eachCorrect=',eachCorrect, ' responsePosRelative=', responsePosRelative)\r\n numTrialsCorrect += eachCorrect.all() #so count -1 as 0\r\n numTrialsApproxCorrect += eachApproxCorrect.all()\r\n numTrialsEachCorrect += eachCorrect #list numRespsWanted long\r\n numTrialsEachApproxCorrect += eachApproxCorrect #list numRespsWanted long\r\n \r\n if(sum(eachCorrect)==2):\r\n allCorrect=True\r\n else:\r\n allCorrect=False\r\n \r\n if exportImages: #catches one frame of response\r\n myWin.getMovieFrame() #I cant explain why another getMovieFrame, and core.wait is needed\r\n framesSaved +=1; core.wait(.1)\r\n myWin.saveMovieFrames('images_sounds_movies/frames.png') #mov not currently supported \r\n expStop=True\r\n core.wait(.1)\r\n if feedback: play_high_tone_correct_low_incorrect(allCorrect, passThisTrial=False)\r\n nDoneMain+=1\r\n \r\n dataFile.flush(); logging.flush()\r\n print('nDoneMain=', nDoneMain,' trials.nTotal=',trials.nTotal) #' trials.thisN=',trials.thisN\r\n if (trials.nTotal > 6 and nDoneMain > 2 and nDoneMain %\r\n ( trials.nTotal*pctCompletedBreak/100. ) ==1): #dont modulus 0 because then will do it for last trial\r\n nextText.setText('Press \"SPACE\" to continue!')\r\n nextText.draw()\r\n progressMsg = 'Completed ' + str(nDoneMain) + ' of ' + str(trials.nTotal) + ' trials'\r\n NextRemindCountText.setText(progressMsg)\r\n NextRemindCountText.draw()\r\n myWin.flip() # myWin.flip(clearBuffer=True) \r\n waiting=True\r\n while waiting:\r\n if autopilot: break\r\n elif expStop == True:break\r\n for key in event.getKeys(): #check if pressed abort-type key\r\n if key in ['space','ESCAPE']: \r\n waiting=False\r\n if key in ['ESCAPE']:\r\n expStop = True\r\n myWin.clearBuffer()\r\n core.wait(.2); time.sleep(.2)\r\n #end main trials loop\r\ntimeAndDateStr = time.strftime(\"%H:%M on %d %b %Y\", time.localtime())\r\nmsg = 'Finishing at '+timeAndDateStr\r\nprint(msg); logging.info(msg)\r\nif expStop:\r\n msg = 'user aborted experiment on keypress with trials done=' + str(nDoneMain) + ' of ' + str(trials.nTotal+1)\r\n print(msg); logging.error(msg)\r\n\r\nif not doStaircase and (nDoneMain >0):\r\n msg = 'Of ' + str(nDoneMain)+' trials, on '+str(numTrialsCorrect*1.0/nDoneMain*100.)+'% of all trials all targets reported exactly correct'\r\n print(msg); logging.info(msg)\r\n msg= 'All targets approximately correct in '+ str( round(numTrialsApproxCorrect*1.0/nDoneMain*100,1)) + '% of trials'\r\n print(msg); logging.info(msg)\r\n for i in range(numRespsWanted):\r\n msg = 'stream'+str(i)+': '+str( round(numTrialsEachCorrect[i]*1.0/nDoneMain*100.,2) ) + '% correct'\r\n print(msg); logging.info(msg)\r\n msg = 'stream' + str(i) + ': '+ str( round(numTrialsEachApproxCorrect[i]*1.0/nDoneMain*100,2) ) +'% approximately correct'\r\n print(msg); logging.info(msg)\r\n\r\nlogging.flush(); dataFile.close()\r\nmyWin.close() #have to close window if want to show a plot\r\nif quitFinder:\r\n applescript=\"\\'tell application \\\"Finder\\\" to launch\\'\" #turn Finder back on\r\n shellCmd = 'osascript -e '+applescript\r\n os.system(shellCmd)" ]
[ [ "numpy.array", "pandas.DataFrame", "numpy.ones", "numpy.random.shuffle", "numpy.arange" ], [ "numpy.array", "numpy.isnan", "numpy.random.rand", "numpy.zeros", "numpy.median", "numpy.round", "numpy.ones", "numpy.random.shuffle", "numpy.diff", "numpy.where", "numpy.around", "numpy.floor" ] ]
xw-hu/DGNL-Net
[ "44e8516429fe8a4e7db615573454b24f51f595cc" ]
[ "modules.py" ]
[ "import torch\nimport torch.nn.functional as F\nfrom torch import nn\n\n\nclass DGNL(nn.Module):\n def __init__(self, in_channels):\n super(DGNL, self).__init__()\n\n self.eps = 1e-6\n self.sigma_pow2 = 100\n\n self.theta = nn.Conv2d(in_channels, int(in_channels / 2), kernel_size=1)\n self.phi = nn.Conv2d(in_channels, int(in_channels / 2), kernel_size=1)\n self.g = nn.Conv2d(in_channels, int(in_channels / 2), kernel_size=1)\n\n self.down = nn.Conv2d(in_channels, in_channels, kernel_size=4, stride=4, groups=in_channels, bias=False)\n self.down.weight.data.fill_(1. / 16)\n\n self.z = nn.Conv2d(int(in_channels / 2), in_channels, kernel_size=1)\n\n\n\n def forward(self, x, depth_map):\n n, c, h, w = x.size()\n x_down = self.down(x)\n\n\t\t# [n, (h / 8) * (w / 8), c / 2]\n g = F.max_pool2d(self.g(x_down), kernel_size=2, stride=2).view(n, int(c / 2), -1).transpose(1, 2)\n\n ### appearance relation map\n # [n, (h / 4) * (w / 4), c / 2]\n theta = self.theta(x_down).view(n, int(c / 2), -1).transpose(1, 2)\n # [n, c / 2, (h / 8) * (w / 8)]\n phi = F.max_pool2d(self.phi(x_down), kernel_size=2, stride=2).view(n, int(c / 2), -1)\n\n\t\t# [n, (h / 4) * (w / 4), (h / 8) * (w / 8)]\n Ra = F.softmax(torch.bmm(theta, phi), 2)\n\n\n ### depth relation map\n depth1 = F.interpolate(depth_map, size=[int(h / 4), int(w / 4)], mode='bilinear', align_corners = True).view(n, 1, int(h / 4)*int(w / 4)).transpose(1,2)\n depth2 = F.interpolate(depth_map, size=[int(h / 8), int(w / 8)], mode='bilinear', align_corners = True).view(n, 1, int(h / 8)*int(w / 8))\n\n # n, (h / 4) * (w / 4), (h / 8) * (w / 8)\n depth1_expand = depth1.expand(n, int(h / 4) * int(w / 4), int(h / 8) * int(w / 8))\n depth2_expand = depth2.expand(n, int(h / 4) * int(w / 4), int(h / 8) * int(w / 8))\n\n Rd = torch.min(depth1_expand / (depth2_expand + self.eps), depth2_expand / (depth1_expand + self.eps))\n\n # normalization: depth relation map [n, (h / 4) * (w / 4), (h / 8) * (w / 8)]\n # Rd = Rd / (torch.sum(Rd, 2).view(n, int(h / 4) * int(w / 4), 1) + self.eps)\n\n Rd = F.softmax(Rd, 2)\n\n\n # ### position relation map\n # position_h = torch.Tensor(range(h)).cuda().view(h, 1).expand(h, w)\n # position_w = torch.Tensor(range(w)).cuda().view(1, w).expand(h, w)\n\t\t#\n # position_h1 = F.interpolate(position_h.unsqueeze(0).unsqueeze(0), size=[int(h / 4), int(w / 4)], mode='bilinear', align_corners=True).view(1, 1, int(h / 4) * int(w / 4)).transpose(1,2)\n # position_h2 = F.interpolate(position_h.unsqueeze(0).unsqueeze(0), size=[int(h / 8), int(w / 8)], mode='bilinear', align_corners=True).view(1, 1, int(h / 8) * int(w / 8))\n # position_h1_expand = position_h1.expand(n, int(h / 4) * int(w / 4), int(h / 8) * int(w / 8))\n # position_h2_expand = position_h2.expand(n, int(h / 4) * int(w / 4), int(h / 8) * int(w / 8))\n # h_distance = (position_h1_expand - position_h2_expand).pow(2)\n\t\t#\n # position_w1 = F.interpolate(position_w.unsqueeze(0).unsqueeze(0), size=[int(h / 4), int(w / 4)], mode='bilinear', align_corners=True).view(1, 1, int(h / 4) * int(w / 4)).transpose(1, 2)\n # position_w2 = F.interpolate(position_w.unsqueeze(0).unsqueeze(0), size=[int(h / 8), int(w / 8)], mode='bilinear', align_corners=True).view(1, 1, int(h / 8) * int(w / 8))\n # position_w1_expand = position_w1.expand(n, int(h / 4) * int(w / 4), int(h / 8) * int(w / 8))\n # position_w2_expand = position_w2.expand(n, int(h / 4) * int(w / 4), int(h / 8) * int(w / 8))\n # w_distance = (position_w1_expand - position_w2_expand).pow(2)\n\t\t#\n # Rp = 1 / (2 * 3.14159265 * self.sigma_pow2) * torch.exp(-0.5 * (h_distance / self.sigma_pow2 + w_distance / self.sigma_pow2))\n\t\t#\n # Rp = Rp / (torch.sum(Rp, 2).view(n, int(h / 4) * int(w / 4), 1) + self.eps)\n\n\n ### overal relation map\n #S = F.softmax(Ra * Rd * Rp, 2)\n\n S = F.softmax(Ra * Rd, 2)\n\n\n # [n, c / 2, h / 4, w / 4]\n y = torch.bmm(S, g).transpose(1, 2).contiguous().view(n, int(c / 2), int(h / 4), int(w / 4))\n\n return x + F.upsample(self.z(y), size=x.size()[2:], mode='bilinear', align_corners = True)\n\n\n\nclass NLB(nn.Module):\n def __init__(self, in_channels):\n super(NLB, self).__init__()\n self.theta = nn.Conv2d(in_channels, int(in_channels / 2), kernel_size=1)\n self.phi = nn.Conv2d(in_channels, int(in_channels / 2), kernel_size=1)\n self.g = nn.Conv2d(in_channels, int(in_channels / 2), kernel_size=1)\n\n self.down = nn.Conv2d(in_channels, in_channels, kernel_size=4, stride=4, groups=in_channels, bias=False)\n self.down.weight.data.fill_(1. / 16)\n\n self.z = nn.Conv2d(int(in_channels / 2), in_channels, kernel_size=1)\n\n def forward(self, x):\n n, c, h, w = x.size()\n x_down = self.down(x)\n\n # [n, (h / 4) * (w / 4), c / 2]\n theta = self.theta(x_down).view(n, int(c / 2), -1).transpose(1, 2)\n # [n, c / 2, (h / 8) * (w / 8)]\n phi = F.max_pool2d(self.phi(x_down), kernel_size=2, stride=2).view(n, int(c / 2), -1)\n # [n, (h / 8) * (w / 8), c / 2]\n g = F.max_pool2d(self.g(x_down), kernel_size=2, stride=2).view(n, int(c / 2), -1).transpose(1, 2)\n # [n, (h / 4) * (w / 4), (h / 8) * (w / 8)]\n f = F.softmax(torch.bmm(theta, phi), 2)\n # [n, c / 2, h / 4, w / 4]\n y = torch.bmm(f, g).transpose(1, 2).contiguous().view(n, int(c / 2), int(h / 4), int(w / 4))\n\n return x + F.upsample(self.z(y), size=x.size()[2:], mode='bilinear', align_corners=True)\n\n\nclass DepthWiseDilatedResidualBlock(nn.Module):\n def __init__(self, reduced_channels, channels, dilation):\n super(DepthWiseDilatedResidualBlock, self).__init__()\n self.conv0 = nn.Sequential(\n\n\t\t # pw\n\t\t nn.Conv2d(channels, channels * 2, 1, 1, 0, 1, bias=False),\n\t\t\tnn.ReLU6(inplace=True),\n\t\t # dw\n\t\t nn.Conv2d(channels*2, channels*2, kernel_size=3, padding=dilation, dilation=dilation, groups=channels, bias=False),\n\t\t nn.ReLU6(inplace=True),\n\t\t # pw-linear\n\t\t nn.Conv2d(channels*2, channels, 1, 1, 0, 1, 1, bias=False)\n )\n\n self.conv1 = nn.Sequential(\n\t\t\t# pw\n\t\t\t# nn.Conv2d(channels, channels * 2, 1, 1, 0, 1, bias=False),\n\t\t\t# nn.ReLU6(inplace=True),\n\t\t\t# dw\n\t\t\tnn.Conv2d(channels, channels, kernel_size=3, padding=dilation, dilation=dilation, groups=channels,\n\t\t\t\t\t bias=False),\n\t\t\tnn.ReLU6(inplace=True),\n\t\t\t# pw-linear\n\t\t\tnn.Conv2d(channels, channels, 1, 1, 0, 1, 1, bias=False)\n\t\t)\n\n\n def forward(self, x):\n res = self.conv1(self.conv0(x))\n return res + x\n\n\nclass DilatedResidualBlock(nn.Module):\n def __init__(self, channels, dilation):\n super(DilatedResidualBlock, self).__init__()\n self.conv0 = nn.Sequential(\n nn.Conv2d(channels, channels, kernel_size=3, padding=dilation, dilation=dilation), nn.ReLU()\n )\n self.conv1 = nn.Conv2d(channels, channels, kernel_size=3, padding=dilation, dilation=dilation)\n\n def forward(self, x):\n conv0 = self.conv0(x)\n conv1 = self.conv1(conv0)\n return x + conv1\n\n\nclass SpatialRNN(nn.Module):\n\t\"\"\"\n\tSpatialRNN model for one direction only\n\t\"\"\"\n\tdef __init__(self, alpha = 1.0, channel_num = 1, direction = \"right\"):\n\t\tsuper(SpatialRNN, self).__init__()\n\t\tself.alpha = nn.Parameter(torch.Tensor([alpha] * channel_num))\n\t\tself.direction = direction\n\n\tdef __getitem__(self, item):\n\t\treturn self.alpha[item]\n\n\tdef __len__(self):\n\t\treturn len(self.alpha)\n\n\n\tdef forward(self, x):\n\t\t\"\"\"\n\t\t:param x: (N,C,H,W)\n\t\t:return:\n\t\t\"\"\"\n\t\theight = x.size(2)\n\t\tweight = x.size(3)\n\t\tx_out = []\n\n\t\t# from left to right\n\t\tif self.direction == \"right\":\n\t\t\tx_out = [x[:, :, :, 0].clamp(min=0)]\n\n\t\t\tfor i in range(1, weight):\n\t\t\t\ttemp = (self.alpha.unsqueeze(1) * x_out[i - 1] + x[:, :, :, i]).clamp(min=0)\n\t\t\t\tx_out.append(temp) # a list of tensor\n\n\t\t\treturn torch.stack(x_out, 3) # merge into one tensor\n\n\t\t# from right to left\n\t\telif self.direction == \"left\":\n\t\t\tx_out = [x[:, :, :, -1].clamp(min=0)]\n\n\t\t\tfor i in range(1, weight):\n\t\t\t\ttemp = (self.alpha.unsqueeze(1) * x_out[i - 1] + x[:, :, :, -i - 1]).clamp(min=0)\n\t\t\t\tx_out.append(temp)\n\n\t\t\tx_out.reverse()\n\t\t\treturn torch.stack(x_out, 3)\n\n\t\t# from up to down\n\t\telif self.direction == \"down\":\n\t\t\tx_out = [x[:, :, 0, :].clamp(min=0)]\n\n\t\t\tfor i in range(1, height):\n\t\t\t\ttemp = (self.alpha.unsqueeze(1) * x_out[i - 1] + x[:, :, i, :]).clamp(min=0)\n\t\t\t\tx_out.append(temp)\n\n\t\t\treturn torch.stack(x_out, 2)\n\n\t\t# from down to up\n\t\telif self.direction == \"up\":\n\t\t\tx_out = [x[:, :, -1, :].clamp(min=0)]\n\n\t\t\tfor i in range(1, height):\n\t\t\t\ttemp = (self.alpha.unsqueeze(1) * x_out[i - 1] + x[:, :, -i - 1, :]).clamp(min=0)\n\t\t\t\tx_out.append(temp)\n\n\t\t\tx_out.reverse()\n\t\t\treturn torch.stack(x_out, 2)\n\n\t\telse:\n\t\t\tprint(\"Invalid direction in SpatialRNN!\")\n\t\t\treturn KeyError\n\n\n\nclass TVLoss(nn.Module):\n def __init__(self, tv_loss_weight=1):\n super(TVLoss, self).__init__()\n self.tv_loss_weight = tv_loss_weight\n\n def forward(self, x):\n batch_size = x.size()[0]\n h_x = x.size()[2]\n w_x = x.size()[3]\n count_h = self.tensor_size(x[:, :, 1:, :])\n count_w = self.tensor_size(x[:, :, :, 1:])\n h_tv = torch.pow((x[:, :, 1:, :] - x[:, :, :h_x - 1, :]), 2).sum()\n w_tv = torch.pow((x[:, :, :, 1:] - x[:, :, :, :w_x - 1]), 2).sum()\n return self.tv_loss_weight * 2 * (h_tv / count_h + w_tv / count_w) / batch_size\n\n @staticmethod\n def tensor_size(t):\n return t.size()[1] * t.size()[2] * t.size()[3]\n\n\nclass NLB(nn.Module):\n def __init__(self, in_channels):\n super(NLB, self).__init__()\n self.theta = nn.Conv2d(in_channels, int(in_channels / 2), kernel_size=1)\n self.phi = nn.Conv2d(in_channels, int(in_channels / 2), kernel_size=1)\n self.g = nn.Conv2d(in_channels, int(in_channels / 2), kernel_size=1)\n\n self.down = nn.Conv2d(in_channels, in_channels, kernel_size=4, stride=4, groups=in_channels, bias=False)\n self.down.weight.data.fill_(1. / 16)\n\n self.z = nn.Conv2d(int(in_channels / 2), in_channels, kernel_size=1)\n\n def forward(self, x):\n n, c, h, w = x.size()\n x_down = self.down(x)\n\n # [n, (h / 4) * (w / 4), c / 2]\n theta = self.theta(x_down).view(n, int(c / 2), -1).transpose(1, 2)\n # [n, c / 2, (h / 8) * (w / 8)]\n phi = F.max_pool2d(self.phi(x_down), kernel_size=2, stride=2).view(n, int(c / 2), -1)\n # [n, (h / 8) * (w / 8), c / 2]\n g = F.max_pool2d(self.g(x_down), kernel_size=2, stride=2).view(n, int(c / 2), -1).transpose(1, 2)\n # [n, (h / 4) * (w / 4), (h / 8) * (w / 8)]\n f = F.softmax(torch.bmm(theta, phi), 2)\n # [n, c / 2, h / 4, w / 4]\n y = torch.bmm(f, g).transpose(1, 2).contiguous().view(n, int(c / 2), int(h / 4), int(w / 4))\n\n return x + F.upsample(self.z(y), size=x.size()[2:], mode='bilinear', align_corners=True)\n\n\n\n\n\n\n\n\n\n# class DGNLB(nn.Module):\n# def __init__(self, in_channels):\n# super(DGNLB, self).__init__()\n#\n# self.roll = nn.Conv2d(1, int(in_channels / 2), kernel_size=1)\n# self.ita = nn.Conv2d(1, int(in_channels / 2), kernel_size=1)\n#\n# self.theta = nn.Conv2d(in_channels, int(in_channels / 2), kernel_size=1)\n# self.phi = nn.Conv2d(in_channels, int(in_channels / 2), kernel_size=1)\n# self.g = nn.Conv2d(in_channels, int(in_channels / 2), kernel_size=1)\n#\n# self.down = nn.Conv2d(in_channels, in_channels, kernel_size=4, stride=4, groups=in_channels, bias=False)\n# self.down.weight.data.fill_(1. / 16)\n#\n# #self.down_depth = nn.Conv2d(1, 1, kernel_size=4, stride=4, groups=in_channels, bias=False)\n# #self.down_depth.weight.data.fill_(1. / 16)\n#\n# self.z = nn.Conv2d(int(in_channels / 2), in_channels, kernel_size=1)\n#\n# def forward(self, x, depth):\n# n, c, h, w = x.size()\n# x_down = self.down(x)\n#\n# depth_down = F.avg_pool2d(depth, kernel_size=(4,4))\n#\n# # [n, (h / 4) * (w / 4), c / 2]\n# #roll = self.roll(depth_down).view(n, int(c / 2), -1).transpose(1, 2)\n# # [n, c / 2, (h / 4) * (w / 4)]\n# #ita = self.ita(depth_down).view(n, int(c / 2), -1)\n# # [n, (h / 4) * (w / 4), (h / 4) * (w / 4)]\n#\n# depth_correlation = F.softmax(torch.bmm(depth_down.view(n, 1, -1).transpose(1, 2), depth_down.view(n, 1, -1)), 2)\n#\n#\n# # [n, (h / 4) * (w / 4), c / 2]\n# theta = self.theta(x_down).view(n, int(c / 2), -1).transpose(1, 2)\n# # [n, c / 2, (h / 8) * (w / 8)]\n# phi = F.max_pool2d(self.phi(x_down), kernel_size=2, stride=2).view(n, int(c / 2), -1)\n# # [n, (h / 8) * (w / 8), c / 2]\n# g = F.max_pool2d(self.g(x_down), kernel_size=2, stride=2).view(n, int(c / 2), -1).transpose(1, 2)\n# # [n, (h / 4) * (w / 4), (h / 8) * (w / 8)]\n# f_correlation = F.softmax(torch.bmm(theta, phi), 2)\n# # [n, (h / 4) * (w / 4), (h / 8) * (w / 8)]\n# final_correlation = F.softmax(torch.bmm(depth_correlation, f_correlation), 2)\n#\n# # [n, c / 2, h / 4, w / 4]\n# y = torch.bmm(final_correlation, g).transpose(1, 2).contiguous().view(n, int(c / 2), int(h / 4), int(w / 4))\n#\n# return x + F.upsample(self.z(y), size=x.size()[2:], mode='bilinear', align_corners=True)\n" ]
[ [ "torch.stack", "torch.min", "torch.bmm", "torch.nn.ReLU6", "torch.nn.ReLU", "torch.nn.Conv2d", "torch.nn.functional.softmax", "torch.Tensor", "torch.pow" ] ]
sidgurun/zELDA
[ "60f5fa3936c643acdb4ea7ee502c9fc4b31abf92" ]
[ "zELDA/__init__.py" ]
[ "import os\nimport os.path\n\nimport time\n\n#from pylab import *\n\nimport sys\nimport shutil\n\nimport urllib\n\nimport numpy as np\n\nfrom sklearn.ensemble import RandomForestRegressor, ExtraTreesRegressor, GradientBoostingRegressor\nfrom sklearn.neighbors import KNeighborsRegressor\n\nimport pickle\n\nfrom scipy.stats import norm\n\nfrom scipy.optimize import curve_fit\n\nfrom scipy.ndimage import gaussian_filter1d\n\nimport emcee\n\nfrom sklearn.neural_network import MLPRegressor\n\nfrom pyswarms.single.global_best import GlobalBestPSO\n#====================================================================#\n#====================================================================#\n#====================================================================#\ndef Check_if_DATA_files_are_found():\n\n this_dir, this_filename = os.path.split(__file__)\n\n Bool_1 = True\n \n arxiv_with_file_names = this_dir + '/DATA/List_of_DATA_files'\n\n with open( arxiv_with_file_names ) as fd:\n\n for line in fd.readlines():\n\n arxiv_name = line.strip('\\n')\n\n Bool_1 = Bool_1 * os.path.isfile( this_dir + '/DATA/' + arxiv_name )\n\n return Bool_1\n#====================================================================#\n#====================================================================#\n#====================================================================#\ndef Download_data():\n\n this_dir, this_filename = os.path.split(__file__)\n\n arxiv_with_file_names = this_dir + '/DATA/List_of_DATA_files'\n\n file_where_to_store_data = this_dir + '/DATA/'\n\n print( 'This package is stored in ', this_dir , '(Please, note that we are not spying you.)' )\n\n print( 'Saving data in...' , file_where_to_store_data )\n\n http_url = 'http://www.cefca.es/people/~sidgurung/ShouT/ShouT/DATA/'\n\n\n testfile = urllib.request.URLopener()\n\n with open( arxiv_with_file_names ) as fd:\n\n for line in fd.readlines():\n\n arxiv_name = line.strip('\\n')\n\n print( 'Downloaing...' , http_url + arxiv_name )\n\n testfile.retrieve( http_url + arxiv_name , arxiv_name )\n\n print( '--> Done!' )\n\n print( 'Moving Downloaded file to' , file_where_to_store_data )\n\n shutil.move( arxiv_name , file_where_to_store_data + arxiv_name )\n\n print( '--> Done' )\n\n if Check_if_DATA_files_are_found():\n print( '\\nHey man, looks like everything is done! That is brilliant!' )\n\n else:\n print( 'This is weird... We just downloaded everthing but the files are not found...Exiting...')\n print( 'Error. Human is dead. Mismatch.')\n sys.exit()\n\n return\n#====================================================================#\n#====================================================================#\n#====================================================================#\ndef load_machine_fesc( Machine , property_name , Geometry ):#, INSIDE_BICONE=True ):\n\n '''\n This functions gives you the trained model that you want to use.\n '''\n\n Machine_Set = [ 'KN' , 'Grad' , 'Tree' , 'Forest' ]\n\n Geometry_Set = [ 'Thin_Shell' , 'Galactic_Wind' , 'Bicone_X_Slab_In' , 'Bicone_X_Slab_Out' ]\n\n geo_code = [ 'thin' , 'wind' , 'Bicone_X_Slab' , 'Bicone_X_Slab' ]\n\n Property_Set = [ 'KKK' , 'CCC' , 'LLL' , 'f_esc' ]\n\n assert property_name in Property_Set , \"Houston we've got a problem, Error Code = 23452345.7523\"\n\n index = np.where( Geometry == np.array(Geometry_Set) )[0][0]\n\n this_dir, this_filename = os.path.split(__file__)\n\n #/global/users/sidgurung/PROMARE/Grids\n\n print( 'HARDCORING PATH TO GRIDS!!!!' )\n #this_dir = '/global/users/sidgurung/PROMARE/Grids/'\n\n this_dir = '/global/users/sidgurung/PROMARE/Grids/'\n\n #filename_root = 'DATA/finalized_model_'+ geo_code[index] +'_f_esc_' + Machine + '_' + property_name\n\n filename_root = 'finalized_model_'+ geo_code[index] +'_f_esc_' + Machine + '_' + property_name \n\n if Geometry == 'Bicone_X_Slab_In':\n filename_root += '_Inside_Bicone_' + str(True)\n\n if Geometry == 'Bicone_X_Slab_Out':\n filename_root += '_Inside_Bicone_' + str(False)\n\n filename = filename_root + '.sav'\n\n filename = os.path.join(this_dir, filename)\n\n loaded_model = pickle.load(open(filename, 'rb'))\n\n return loaded_model\n#====================================================================#\n#====================================================================#\n#====================================================================#\ndef Analytic_f_esc_Thin_Shell( V_Arr , logNH_Arr , ta_Arr ):\n\n NH18 = 10 ** ( logNH_Arr - 18 ) \n\n #New MCMC\n c11 = 10**(1.90526)\n c12 = -10**(2.0399)\n c13 = 10**(2.34829)\n c21 = 10**(-3.138837)\n c22 = -10**(-1.92151)\n c23 = 10**(-1.1860205000000001)\n c24 = -10**(-0.1480042)\n c3 = 10**(0.0530715)\n c4 = 10**(-2.743455)\n\n C1 = ( ( np.log10(NH18) ) ** 2 ) * c11 + np.log10(NH18) * c12 + c13\n y = np.log10(NH18)\n C2 = c21*y*y*y + c22*y*y + c23*y + c24\n C3 = c3\n C4 = c4\n\n K1 = C1 * ( V_Arr ** C2 )\n K2 = C3 * ( V_Arr ** C4 )\n\n fesc = 1. / np.cosh( np.sqrt( K1 * ( ta_Arr ** K2 ) ) )\n\n return fesc\n#====================================================================#\n#====================================================================#\n#====================================================================#\ndef Analytic_f_esc_Wind( V_Arr , logNH_Arr , ta_Arr ):\n\n NH18 = 10 ** ( logNH_Arr - 18 )\n\n # New MCMC\n c11 = 10**(0.4852541)\n c12 = 10**(-0.2006394)\n c21 = 10**(-1.912059)\n c22 = -10**(-0.6380347)\n c3 = 10**(0.046314074999999996)\n c4 = 10**(-1.782037)\n\n C1 = c11 * ( NH18 ** c12 )\n C2 = c21 * np.log10( NH18 )**2 + c22 * np.log10(NH18) #+ c23\n C3 = c3\n C4 = c4\n\n K1 = C1 * V_Arr ** C2\n K2 = C3 * V_Arr ** C4\n\n fesc = 1./ np.cosh( np.sqrt( K1 * ta_Arr ** K2 ) )\n\n return fesc\n#====================================================================#\n#====================================================================#\n#====================================================================#\ndef RT_f_esc_Analytic( Geometry , V_Arr , logNH_Arr , ta_Arr ):\n\n Geometry_Set = [ 'Thin_Shell' , 'Galactic_Wind' ]\n\n assert Geometry in Geometry_Set , 'The geometry ' + Geometry + ' is nor supported in MODE=Analytic , only Thin_Shell and Galactic_Wind'\n\n logNH_Arr = np.atleast_1d( logNH_Arr )\n ta_Arr = np.atleast_1d( ta_Arr )\n V_Arr = np.atleast_1d( V_Arr )\n\n if Geometry == 'Thin_Shell' :\n f_esc_Arr = Analytic_f_esc_Thin_Shell( V_Arr , logNH_Arr , ta_Arr )\n if Geometry == 'Galactic_Wind' :\n f_esc_Arr = Analytic_f_esc_Wind( V_Arr , logNH_Arr , ta_Arr )\n\n return f_esc_Arr\n#====================================================================#\n#====================================================================#\n#====================================================================#\ndef fesc_of_ta_Thin_and_Wind( ta , CCC , KKK ):\n\n f_esc = 1./np.cosh( np.sqrt( CCC * (ta**KKK) ) )\n\n return f_esc\n#====================================================================#\n#====================================================================#\n#====================================================================#\ndef fesc_of_ta_Bicone( ta , CCC , KKK , LLL ):\n \n f_esc = LLL * 1./np.cosh( np.sqrt( CCC * (ta**KKK) ) )\n \n return f_esc\n#====================================================================#\n#====================================================================#\n#====================================================================#\ndef RT_f_esc_Machine_Parameter( Geometry , V_Arr , logNH_Arr , ta_Arr , Machine_Learning_Algorithm='Tree' ):\n\n logNH_Arr = np.atleast_1d( logNH_Arr )\n ta_Arr = np.atleast_1d( ta_Arr )\n V_Arr = np.atleast_1d( V_Arr )\n\n Coor_matrix = np.zeros( len(V_Arr) * 2 ).reshape( len(V_Arr) , 2 )\n\n Coor_matrix[ : , 0 ] = V_Arr\n Coor_matrix[ : , 1 ] = logNH_Arr\n\n if Geometry in [ 'Thin_Shell' , 'Galactic_Wind' ] :\n\n CCC_machine = load_machine_fesc( Machine_Learning_Algorithm , 'CCC' , Geometry )\n KKK_machine = load_machine_fesc( Machine_Learning_Algorithm , 'KKK' , Geometry )\n\n CCC_model_Arr = CCC_machine.predict( Coor_matrix )\n KKK_model_Arr = KKK_machine.predict( Coor_matrix )\n\n f_esc_Arr = fesc_of_ta_Thin_and_Wind( ta_Arr , CCC_model_Arr , KKK_model_Arr )\n\n if Geometry in [ 'Bicone_X_Slab_In' , 'Bicone_X_Slab_Out' ] :\n\n CCC_machine_in = load_machine_fesc( Machine_Learning_Algorithm , 'CCC' , Geometry )\n KKK_machine_in = load_machine_fesc( Machine_Learning_Algorithm , 'KKK' , Geometry )\n LLL_machine_in = load_machine_fesc( Machine_Learning_Algorithm , 'LLL' , Geometry )\n\n CCC_model_in_Arr = CCC_machine_in.predict( Coor_matrix )\n KKK_model_in_Arr = KKK_machine_in.predict( Coor_matrix )\n LLL_model_in_Arr = LLL_machine_in.predict( Coor_matrix )\n\n f_esc_Arr = fesc_of_ta_Bicone( ta_Arr , CCC_model_in_Arr , KKK_model_in_Arr , LLL_model_in_Arr )\n\n return f_esc_Arr\n#====================================================================#\n#====================================================================#\n#====================================================================#\ndef RT_f_esc_Machine_Values( Geometry , V_Arr , logNH_Arr , ta_Arr , Machine_Learning_Algorithm='Tree' ):\n\n logNH_Arr = np.atleast_1d( logNH_Arr )\n ta_Arr = np.atleast_1d( ta_Arr )\n V_Arr = np.atleast_1d( V_Arr )\n\n Coor_matrix = np.zeros( len(V_Arr) * 3 ).reshape( len(V_Arr) , 3 )\n\n Coor_matrix[ : , 0 ] = V_Arr\n Coor_matrix[ : , 1 ] = logNH_Arr\n Coor_matrix[ : , 2 ] = np.log10(ta_Arr)\n\n loaded_model = load_machine_fesc( Machine_Learning_Algorithm , 'f_esc' , Geometry )\n \n f_esc_Arr = loaded_model.predict( Coor_matrix )\n \n return f_esc_Arr\n#====================================================================#\n#====================================================================#\n#====================================================================#\n#====================================================================#\ndef Linear_ND_interpolator( N_dim , Coor_props_Matrix , Coor_grid_list , Field_in_grid_Matrix ):\n\n '''\n Interpolates in an arbitrary dimension space \n\n Parameters\n ----------\n N_dim : int\n Number of dimensions.\n\n Coor_props_Matrix : List of N_dim float values\n Coordenates in the N_dim space to evaluate.\n For example [ X , Y , Z ]\n \n\n Coor_grid_list : List of N_dim 1-D sequence of floats\n For example, if there is a field evaluated in X_Arr, Y_Arr, Z_Arr\n [ X_Arr , Y_Arr , Z_Arr ]\n\n Field_in_grid_Matrix : numpy array with the field to interpolate\n\n Returns\n -------\n Field_at_the_prob_point :\n '''\n\n dic_index = {}\n\n for i in range( 0 , N_dim ):\n\n dic_index['index_' + str(i) ] = np.where( ( Coor_grid_list[i] < Coor_props_Matrix[i] ) )[0][-1]\n\n dic_prob = {}\n\n for i in range( 0 , N_dim ):\n\n INDEX = dic_index['index_' + str(i) ]\n\n #print( 'INDEX' , INDEX)\n\n diff_i = Coor_grid_list[i][ INDEX+1 ] - Coor_grid_list[i][ INDEX ]\n\n min_i = Coor_grid_list[i][ INDEX ]\n\n dic_prob['prob_' + str(i) ] = ( Coor_props_Matrix[i] - min_i ) * 1. / diff_i\n\n #print( 'prob' , dic_prob['prob_' + str(i) ] )\n\n N_points = 2 ** N_dim\n\n VOLs = {}\n FIELDs = {}\n\n for i in range( 0 , N_points ):\n\n binary_str = '{0:b}'.format( i ).zfill( N_dim )\n\n VOLs[ 'vol_' + str( i ) ] = 1.\n\n eval_INDEX = []\n\n for j in range( 0 , N_dim ):\n\n CTE = int( binary_str[j] )\n\n eval_INDEX.append( dic_index['index_' + str(j) ] + CTE )\n\n if CTE == 0 : size_j = ( 1. - dic_prob[ 'prob_' + str(j) ] )\n if CTE == 1 : size_j = ( dic_prob[ 'prob_' + str(j) ] )\n\n pre_vol = VOLs[ 'vol_' + str( i ) ]\n\n VOLs[ 'vol_' + str( i ) ] = pre_vol * size_j\n\n eval_INDEX = tuple( eval_INDEX )\n\n FIELDs['field_' + str(i) ] = Field_in_grid_Matrix[ eval_INDEX ]\n\n\n Zero_Zero = np.array( eval_INDEX ) * 0\n\n Zero_Zero = tuple( Zero_Zero.tolist() )\n\n Field_at_the_prob_point = np.zeros_like( Field_in_grid_Matrix[Zero_Zero] )\n\n for i in range( 0 , N_points ) :\n\n #print( VOLs[ 'vol_' + str( i ) ] , FIELDs['field_' + str(i) ] )\n\n Field_at_the_prob_point += VOLs[ 'vol_' + str( i ) ] * FIELDs['field_' + str(i) ]\n\n return Field_at_the_prob_point\n#====================================================================#\n#====================================================================#\n#====================================================================#\n#====================================================================#\ndef load_Grid_fesc( Geometry , MODE ):#, INSIDE_BICONE=True ):\n\n Geometry_Set = [ 'Thin_Shell' , 'Galactic_Wind' , 'Bicone_X_Slab_In' , 'Bicone_X_Slab_Out']\n\n geo_code = [ 'Thin_Shell' , 'Wind' , 'Bicone_X_Slab' , 'Bicone_X_Slab' ]\n\n MODE_Set = [ 'Parameters' , 'values' ]\n\n index = np.where( Geometry == np.array(Geometry_Set) )[0][0]\n\n #filename_root = 'DATA/Dictonary_'+ geo_code[index] +'_Grid_f_esc_' + MODE \n filename_root = 'Dictonary_'+ geo_code[index] +'_Grid_f_esc_' + MODE \n\n if Geometry == 'Bicone_X_Slab_In':\n filename_root += '_Inside_Bicone_' + str(True)\n\n if Geometry == 'Bicone_X_Slab_Out':\n filename_root += '_Inside_Bicone_' + str(False)\n\n filename = filename_root + '.npy'\n\n this_dir, this_filename = os.path.split(__file__)\n\n print( 'HARDCORING PATH TO GRIDS!!!!' )\n #this_dir = '/global/users/sidgurung/PROMARE/Grids/'\n this_dir = '/global/users/sidgurung/PROMARE/Grids/'\n\n filename = os.path.join(this_dir, filename)\n\n loaded_model = np.load( filename , allow_pickle=True , encoding='latin1' ).item()\n\n return loaded_model\n#====================================================================#\n#====================================================================#\n#====================================================================#\ndef Interpolate_f_esc_Arrays_2D_grid( V_Arr , logNH_Arr , ta_Arr , Grid_Dictionary , Geometry ):\n\n V_Arr_Grid = Grid_Dictionary[ 'V_Arr' ]\n\n logNH_Arr_Grid = Grid_Dictionary[ 'logNH_Arr' ]\n\n logta_Arr_Grid = Grid_Dictionary[ 'logta_Arr' ]\n\n Grid = Grid_Dictionary[ 'Grid' ]\n\n N_objects = len( V_Arr )\n\n CCC_Arr_evaluated = np.zeros( N_objects )\n KKK_Arr_evaluated = np.zeros( N_objects )\n\n ###################\n\n Coor_grid_list = [ V_Arr_Grid , logNH_Arr_Grid ]\n\n if Geometry in [ 'Thin_Shell' , 'Galactic_Wind' ] :\n\n for INDEX in range( 0 , N_objects ):\n\n Coor_props_Matrix = [ V_Arr[INDEX] , logNH_Arr[INDEX] ]\n\n #CCC_Arr_evaluated[ INDEX ] , KKK_Arr_evaluated[ INDEX ] = Linear_2D_interpolator( V_Arr[INDEX] , logNH_Arr[INDEX] , V_Arr_Grid , logNH_Arr_Grid , Grid )\n CCC_Arr_evaluated[ INDEX ] , KKK_Arr_evaluated[ INDEX ] = Linear_ND_interpolator( 2 , Coor_props_Matrix , Coor_grid_list , Grid )\n\n f_esc_Arr = fesc_of_ta_Thin_and_Wind( ta_Arr , CCC_Arr_evaluated , KKK_Arr_evaluated )\n\n ###################\n\n if Geometry in [ 'Bicone_X_Slab_In' , 'Bicone_X_Slab_Out' ] :\n \n LLL_Arr_evaluated = np.zeros( N_objects )\n\n for INDEX in range( 0 , N_objects ):\n\n Coor_props_Matrix = [ V_Arr[INDEX] , logNH_Arr[INDEX] ]\n\n #CCC_Arr_evaluated[ INDEX ] , KKK_Arr_evaluated[ INDEX ] , LLL_Arr_evaluated[ INDEX ] = Linear_2D_interpolator( V_Arr[INDEX] , logNH_Arr[INDEX] , V_Arr_Grid , logNH_Arr_Grid , Grid )\n CCC_Arr_evaluated[ INDEX ] , KKK_Arr_evaluated[ INDEX ] , LLL_Arr_evaluated[ INDEX ] = Linear_ND_interpolator( 2 , Coor_props_Matrix , Coor_grid_list , Grid )\n\n f_esc_Arr = fesc_of_ta_Bicone( ta_Arr , CCC_Arr_evaluated , KKK_Arr_evaluated , LLL_Arr_evaluated )\n\n\n return f_esc_Arr\n#====================================================================#\n#====================================================================#\n#====================================================================#\ndef Interpolate_fesc_Arrays_3D_grid( V_Arr , logNH_Arr , ta_Arr , Grid_Dictionary ):\n\n V_Arr_Grid = Grid_Dictionary[ 'V_Arr' ]\n\n logNH_Arr_Grid = Grid_Dictionary[ 'logNH_Arr' ]\n\n logta_Arr_Grid = Grid_Dictionary[ 'logta_Arr' ]\n\n Grid = Grid_Dictionary[ 'Grid' ]\n\n logta_Arr = np.log10( ta_Arr )\n\n N_objects = len( V_Arr )\n\n f_esc_Arr_evaluated = np.zeros( N_objects )\n\n Coor_Arr_list = [ V_Arr_Grid , logNH_Arr_Grid , logta_Arr_Grid ]\n\n for INDEX in range( 0 , N_objects ):\n\n #f_esc_Arr_evaluated[ INDEX ] = Linear_3D_interpolator( V_Arr[INDEX] , logNH_Arr[INDEX] , logta_Arr[INDEX] , V_Arr_Grid , logNH_Arr_Grid , logta_Arr_Grid , Grid )\n \n Coor_list = [ V_Arr[INDEX] , logNH_Arr[INDEX] , logta_Arr[INDEX] ]\n\n f_esc_Arr_evaluated[ INDEX ] = Linear_ND_interpolator( 3 , Coor_list , Coor_Arr_list , Grid )\n\n return f_esc_Arr_evaluated\n#====================================================================#\n#====================================================================#\n#====================================================================#\ndef RT_f_esc_Interpolation_Values( Geometry , V_Arr , logNH_Arr , ta_Arr , Machine_Learning_Algorithm=None ):\n\n logNH_Arr = np.atleast_1d( logNH_Arr )\n ta_Arr = np.atleast_1d( ta_Arr )\n V_Arr = np.atleast_1d( V_Arr )\n\n DATA_DICTIONAY = load_Grid_fesc( Geometry , 'values' )\n\n f_esc_Arr = Interpolate_fesc_Arrays_3D_grid( V_Arr , logNH_Arr , ta_Arr , DATA_DICTIONAY )\n\n return f_esc_Arr\n#====================================================================#\n#====================================================================#\n#====================================================================#\ndef RT_f_esc_Interpolation_Parameters( Geometry , V_Arr , logNH_Arr , ta_Arr , Machine_Learning_Algorithm=None ):\n\n logNH_Arr = np.atleast_1d( logNH_Arr )\n ta_Arr = np.atleast_1d( ta_Arr )\n V_Arr = np.atleast_1d( V_Arr )\n\n DATA_DICTIONAY = load_Grid_fesc( Geometry , 'Parameters' )\n\n f_esc_Arr = Interpolate_f_esc_Arrays_2D_grid( V_Arr , logNH_Arr , ta_Arr , DATA_DICTIONAY , Geometry )\n\n return f_esc_Arr\n#====================================================================#\n#====================================================================#\n#====================================================================#\ndef pre_treatment_f_esc( Geometry , V_Arr , logNH_Arr , ta_Arr , MODE ):\n\n V_Arr = np.atleast_1d( V_Arr )\n logNH_Arr = np.atleast_1d( logNH_Arr )\n ta_Arr = np.atleast_1d( ta_Arr )\n\n V_Arr = V_Arr.astype(float)\n logNH_Arr = logNH_Arr.astype(float)\n ta_Arr = ta_Arr.astype(float)\n\n bool1 = np.isfinite( V_Arr )\n bool2 = np.isfinite( logNH_Arr )\n bool3 = np.isfinite( ta_Arr )\n\n mask_good = bool1 * bool2 * bool3\n \n assert sum( mask_good ) != 0 , 'All the V-logNH-ta combinations are np.nan, -np.inf or np.inf'\n\n #============================================#\n if Geometry in [ 'Bicone_X_Slab_In' , 'Bicone_X_Slab_Out' ] :\n tmp_bool1 = V_Arr < 100.0\n tmp_bool2 = logNH_Arr >= 20.5\n\n tmp_mask = tmp_bool1 * tmp_bool2 \n\n mask_good = mask_good * ~tmp_mask\n \n #============================================#\n\n bool5 = V_Arr >= 10.00 \n\n bool6 = V_Arr <= 1000\n \n bool7 = logNH_Arr >= 17.0\n\n bool8 = logNH_Arr <= 22.0\n\n mask_good = mask_good * ( bool5 * bool6 ) * ( bool7 * bool8 )\n\n if MODE=='Raw':\n bool9 = ta_Arr >= 10**(-2.5)\n\n bool10 = ta_Arr <= 10**(-0.25)\n\n mask_good = mask_good * ( bool9 * bool10 )\n\n #return V_Arr_used , logNH_Arr_used , ta_Arr_used , In_Bool_used , mask_good\n return mask_good\n#====================================================================#\n#====================================================================#\n#====================================================================#\ndef RT_f_esc( Geometry , V_Arr , logNH_Arr , ta_Arr , MODE='Parametrization' , Algorithm='Intrepolation' , Machine_Learning_Algorithm='Tree' ):\n\n\n '''\n Return the Lyman alpha escape fraction for a given outflow properties.\n\n Parameters\n ----------\n Geometry : string\n The outflow geometry to use: Options: 'Thins_Shell',\n 'Galactic_Wind' , 'Bicone_X_Slab'.\n\n\n wavelength_Arr : 1-D sequence of floats\n Array with the wavelength vales where the line\n profile is computed. The units are meters, i.e.,\n amstrongs * 1.e-10.\n\n\n V_Arr : 1-D sequence of float\n Array with the expansion velocity of the outflow. The unit\n are km/s. \n\n logNH_Arr : 1-D sequence of float\n Array with the logarithim of the outflow neutral hydrogen\n column density. The units of the colum density are in c.g.s,\n i.e, cm**-2.\n\n ta_Arr : 1-D sequence of float\n Array with the dust optic depth of the outflow. \n\n Inside_Bicone_Arr : optional 1-D sequence of bool\n An Array with booleans, indicating if the bicone is face-on\n or edge-on. If True then the bicone is face-on. If false the\n bicone is edge-on. The probability of being face on is\n np.cos( np.pi/4 ).\n\n\n MODE : optional string\n Set the mode in which the escape fraction is computed. It can be:\n Analytic : it uses an analytic equation fitted to the output of the RT MC code.\n Parametrization : it computes the escape fraction using a function that depends on the \n dust optical depts as in Neufeld et al. 1990.\n Raw : it uses directly the output of the RT MC code.\n\n Default = 'Parametrization'\n\n\n Algorithm : optional string\n Set how the escape fraction is computed. If MODE='Analytic' then this varialbe is useless.\n Intrepolation : Direct lineal interpolation.\n Machine_Learning : uses machine learning algorithms\n \n Default = 'Intrepolation'\n\n\n Machine_Learning_Algorithm : optial string\n Set the machine learning algorith used. Available:\n Tree : decision tree\n Forest : random forest\n KN : KN\n\n Default = 'Tree'\n\n\n .. versionadded:: 0.0.3\n\n Returns\n -------\n lines_Arr : 1-D sequence of float\n The Lyman alpha escape fraction for V_Arr[i] ,\n logNH_Arr[i] , ta_Arr[i] , Inside_Bicone_Arr[i].\n '''\n\n assert MODE in [ 'Parametrization' , 'Raw' , 'Analytic'] , 'The requested mode ' + MODE + ' is not available. The modes supported are : Parametrization , Raw , Analytic' \n\n assert Algorithm in [ 'Intrepolation' , 'Machine_Learning' ] , 'The requested algorithm ' + Algorithm + ' is not available. The algorithms supported are : Intrepolation , Machine_Learning' \n\n assert Geometry in [ 'Thin_Shell' , 'Galactic_Wind' , 'Bicone_X_Slab_In' , 'Bicone_X_Slab_Out' ] , 'The requested geoemtry ' + Geometry + ' is not available. The geometries supported are : Thin_Shell , Galactic_Wind , Bicone_X_Slab'\n \n mask_good = pre_treatment_f_esc( Geometry , V_Arr , logNH_Arr , ta_Arr , MODE ) \n\n f_esc_Arr = np.zeros( len( mask_good ) ) * np.nan\n\n if MODE == 'Parametrization' :\n\n if Algorithm == 'Intrepolation' :\n funtion_to_use = RT_f_esc_Interpolation_Parameters \n\n if Algorithm == 'Machine_Learning':\n funtion_to_use = RT_f_esc_Machine_Parameter\n\n if MODE == 'Raw' :\n\n if Algorithm == 'Intrepolation' :\n funtion_to_use = RT_f_esc_Interpolation_Values\n\n if Algorithm == 'Machine_Learning':\n funtion_to_use = RT_f_esc_Machine_Values\n\n if MODE == 'Analytic' :\n\n funtion_to_use = RT_f_esc_Analytic \n\n f_esc_Arr[ mask_good ] = funtion_to_use( Geometry , V_Arr[ mask_good ] , logNH_Arr[ mask_good ] , ta_Arr[ mask_good ] , Machine_Learning_Algorithm=Machine_Learning_Algorithm )\n\n return f_esc_Arr\n#====================================================================#\n#====================================================================#\n#====================================================================#\ndef define_RT_parameters( T4=None ):\n\n if T4 is None : \n T4 = 1. # = 10000. / 1e4\n\n nu0 = 2.46777 * 1.e15 #3. * 10.**8 / (1215.67 * (10**(-10)))\n Vth = 12.85 * np.sqrt(T4) # lo he comentado porque sqrt(1) = 1\n Dv = Vth * nu0 *1. / ( 3 * (10**5))\n return nu0 , Dv\n#====================================================================#\n#====================================================================#\n#====================================================================#\ndef convert_x_into_lamda( x , T4=None ):\n nu0 , Dv = define_RT_parameters( T4 ) \n return( 3. * 1.e8 / ( x * Dv + nu0) )\n\ndef convert_lamda_into_x( lamda , T4=None ):\n nu0 , Dv = define_RT_parameters( T4 ) \n return( (( 3. * 1.e8 / lamda) -nu0 ) / Dv )\n#====================================================================#\n#====================================================================#\n#====================================================================#\ndef load_Grid_Line( Geometry ):\n\n '''\n Return the dictionary with all the properties of the grid where the lines were run.\n\n Parameters\n ----------\n Geometry : string\n The outflow geometry to use: Options: 'Thins_Shell',\n 'Galactic_Wind' , 'Bicone_X_Slab'.\n\n INSIDE_BICONE : optional boolean\n This is useless if the geometry is not Bicone_X_Slab. \n If True then the bicone is face-on. If false the\n bicone is edge-on. The probability of being face \n on is np.cos( np.pi/4 ).\n \n\n Returns\n -------\n loaded_model : Dictionary\n This dictonary have all the information of the grid.\n Entries:\n 'V_Arr' : Array of velocity expansions used.[km/s]\n 'logNH_Arr' : Array of logarithm of the column density. [c.g.s.]\n 'logta_Arr' : Array of logarithm of the dust optical depth.\n 'x_Arr' : Array of frequency in Doppler units.\n 'Grid' : Array with the output of the RT MC code LyaRT:\n \n loaded_model['Grid'][i,j,k,:] has the line profile evaluated in loaded_model['x_Arr']\n with outflow velocity loaded_model['V_Arr'][i] , logarithm of the neutral hydrogen \n column density loaded_model['logNH_Arr'][j] and logarithm of dust optical depth \n loaded_model['logta_Arr'][k] \n '''\n\n assert Geometry in [ 'Thin_Shell_Cont' , 'Thin_Shell' , 'Galactic_Wind' , 'Bicone_X_Slab_In' , 'Bicone_X_Slab_Out' ] , 'The requested geoemtry ' + Geometry + ' is not available. The geometries supported are : Thin_Shell , Galactic_Wind , Bicone_X_Slab'\n\n Geometry_Set = [ 'Thin_Shell' , 'Galactic_Wind' , 'Bicone_X_Slab_In' , 'Bicone_X_Slab_Out' ]\n\n geo_code = [ 'Thin_Shell' , 'Wind' , 'Bicone_X_Slab' , 'Bicone_X_Slab' ]\n\n this_dir, this_filename = os.path.split(__file__)\n\n print( 'HARDCORING PATH TO GRIDS!!!!' )\n #this_dir = '/global/users/sidgurung/PROMARE/Grids/'\n this_dir = '/global/users/sidgurung/PROMARE/Grids/'\n\n if Geometry != 'Thin_Shell_Cont' :\n\n index = np.where( Geometry == np.array(Geometry_Set) )[0][0]\n\n filename_root = 'Dictonary_'+ geo_code[index] +'_Grid_Lines'\n\n if Geometry == 'Bicone_X_Slab_In':\n filename_root += '_In_Bicone_' + str(True)\n if Geometry == 'Bicone_X_Slab_Out':\n filename_root += '_In_Bicone_' + str(False)\n\n filename = filename_root + '.npy'\n\n filename = os.path.join(this_dir, filename)\n\n loaded_model = np.load( filename , allow_pickle=True , encoding='latin1' ).item()\n\n if Geometry == 'Thin_Shell_Cont' :\n\n NV = 29\n NNH = 19\n Nta = 9\n NEW = 20\n NWi = 31\n \n # ../Grids/GRID_data__V_29_logNH_15_logta_9_EW_20_Wi_18.npy\n \n t_name = '_V_'+str(NV)+'_logNH_'+str(NNH)+'_logta_'+str(Nta)+'_EW_'+str(NEW)+'_Wi_'+str(NWi)+'.npy'\n\n loaded_model = {}\n \n tmp_1 = np.load( this_dir + 'GRID_info_' + t_name , allow_pickle=True ).item()\n GRID = np.load( this_dir + 'GRID_data_' + t_name )\n\n loaded_model['Grid'] = GRID\n loaded_model['V_Arr'] = tmp_1['V']\n loaded_model['logNH_Arr'] = tmp_1['logNH']\n loaded_model['logta_Arr'] = tmp_1['logta']\n loaded_model['logEW_Arr'] = tmp_1['logEW']\n loaded_model['Wi_Arr'] = tmp_1['Wi']\n\n loaded_model['x_Arr'] = convert_lamda_into_x( tmp_1['wavelength']*1e-10 )\n loaded_model['w_Arr'] = tmp_1['wavelength']\n\n return loaded_model\n#====================================================================#\n#====================================================================#\n#====================================================================#\ndef Interpolate_Lines_Arrays_3D_grid( V_Arr , logNH_Arr , logta_Arr , x_Arr , Grid_Dictionary ):\n\n lines_Arr = np.zeros( len(V_Arr) * len( x_Arr ) ).reshape( len(V_Arr) , len( x_Arr ) )\n\n for i in range( 0 , len( V_Arr ) ):\n\n lines_Arr[i] = Interpolate_Lines_Arrays_3D_grid_MCMC( V_Arr[i] , logNH_Arr[i] , logta_Arr[i] , x_Arr , Grid_Dictionary )\n\n return lines_Arr\n#====================================================================#\n#====================================================================#\n#====================================================================#\ndef Interpolate_Lines_Arrays_3D_grid_MCMC( V_Value , logNH_Value , logta_Value , x_Arr , Grid_Dictionary ):\n\n Grid_Line = Grid_Dictionary['Grid']\n\n V_Arr_Grid = Grid_Dictionary['V_Arr']\n x_Arr_Grid = Grid_Dictionary['x_Arr']\n\n logNH_Arr_Grid = Grid_Dictionary['logNH_Arr']\n logta_Arr_Grid = Grid_Dictionary['logta_Arr']\n\n Coor_Arr_list = [ V_Arr_Grid , logNH_Arr_Grid , logta_Arr_Grid ]\n \n Coor_list = [ V_Value , logNH_Value , logta_Value ]\n\n aux_line = Linear_ND_interpolator( 3 , Coor_list , Coor_Arr_list , Grid_Line )\n\n axu_line_1 = np.interp( x_Arr , x_Arr_Grid , aux_line , left=aux_line[0] , right=[-1] )\n\n Integral = np.trapz( axu_line_1 , x_Arr )\n\n axu_line_1 = np.absolute( axu_line_1 * 1. / Integral )\n\n return axu_line_1\n#====================================================================#\n#====================================================================#\n#====================================================================#\ndef Interpolate_Lines_Arrays_5D_grid( V_Arr , logNH_Arr , logta_Arr , logEW_Arr , Wi_Arr , x_Arr , Grid_Dictionary ):\n\n lines_Arr = np.zeros( len(V_Arr) * len( x_Arr ) ).reshape( len(V_Arr) , len( x_Arr ) )\n\n for i in range( 0 , len( V_Arr ) ):\n\n lines_Arr[i] = Interpolate_Lines_Arrays_5D_grid_MCMC( V_Arr[i] , logNH_Arr[i] , logta_Arr[i] , logEW_Arr[i] , Wi_Arr[i] , x_Arr , Grid_Dictionary )\n\n return lines_Arr\n#====================================================================#\n#====================================================================#\n#====================================================================#\ndef Interpolate_Lines_Arrays_5D_grid_MCMC( V_Value , logNH_Value , logta_Value , logEW_Value , Wi_Value , x_Arr , Grid_Dictionary ):\n\n Grid_Line = Grid_Dictionary['Grid']\n\n V_Arr_Grid = Grid_Dictionary['V_Arr']\n x_Arr_Grid = Grid_Dictionary['x_Arr']\n\n Wi_Arr_Grid = Grid_Dictionary['Wi_Arr']\n\n logNH_Arr_Grid = Grid_Dictionary['logNH_Arr']\n logta_Arr_Grid = Grid_Dictionary['logta_Arr']\n logEW_Arr_Grid = Grid_Dictionary['logEW_Arr']\n\n Coor_Arr_list = [ V_Arr_Grid , logNH_Arr_Grid , logta_Arr_Grid , logEW_Arr_Grid , Wi_Arr_Grid ]\n\n Coor_list = [ V_Value , logNH_Value , logta_Value , logEW_Value , Wi_Value ]\n\n aux_line = Linear_ND_interpolator( 5 , Coor_list , Coor_Arr_list , Grid_Line )\n\n aux_line = aux_line[::-1]\n\n x_Arr_Grid = x_Arr_Grid[::-1]\n\n axu_line_1 = np.interp( x_Arr , x_Arr_Grid , aux_line , left=aux_line[0] , right=aux_line[-1] )\n\n Integral = np.trapz( axu_line_1 , x_Arr )\n\n axu_line_1 = np.absolute( axu_line_1 * 1. / Integral )\n\n return axu_line_1\n#====================================================================#\n#====================================================================#\n#====================================================================#\ndef pre_treatment_Line_profile_MCMC( Geometry , V_Value , logNH_Value , ta_Value , logEW_Value=None , Wi_Value=None ):\n\n bool1 = np.isfinite( V_Value )\n bool2 = np.isfinite( logNH_Value )\n bool3 = np.isfinite( ta_Value )\n\n Bool_good = bool1 * bool2 * bool3\n\n if Geometry == 'Thin_Shell_Cont':\n\n #print('Special treatment for a special geometry!')\n\n bool4 = np.isfinite( logEW_Value )\n bool5 = np.isfinite( Wi_Value )\n\n Bool_good = Bool_good * bool4 * bool5\n\n if not Bool_good : return Bool_good\n\n if Geometry in [ 'Bicone_X_Slab_In' , 'Bicone_X_Slab_Out' ]:\n\n if V_Value <= 100.0 and logNH_Value >= 20.5 : Bool_good = False\n\n if Geometry in [ 'Thin_Shell' , 'Galactic_Wind' , 'Bicone_X_Slab_In' , 'Bicone_X_Slab_Out' ]:\n\n if V_Value <= 10.0 : Bool_good = False \n if V_Value >= 1000.0 : Bool_good = False \n\n if logNH_Value <= 17.0 : Bool_good = False \n if logNH_Value >= 22.0 : Bool_good = False \n\n if ta_Value <= 10**(-3.75 ) : Bool_good = False \n if ta_Value >= 10**(-0.125) : Bool_good = False \n\n if Geometry in [ 'Thin_Shell_Cont' ]:\n\n if V_Value <= 00.0 : Bool_good = False \n if V_Value >= 1000.0 : Bool_good = False \n\n if logNH_Value <= 17.0 : Bool_good = False \n if logNH_Value >= 21.5 : Bool_good = False \n\n if ta_Value <= 10**(-4.00 ) : Bool_good = False \n if ta_Value >= 10**( 0.0 ) : Bool_good = False \n\n if logEW_Value <= -1. : Bool_good = False\n if logEW_Value >= 3. : Bool_good = False\n\n if Wi_Value <= 0.01 : Bool_good = False \n if Wi_Value >= 6.0 : Bool_good = False\n \n return Bool_good\n#====================================================================#\n#====================================================================#\n#====================================================================#\ndef Compute_Inflow_From_Outflow( w_Arr , f_out_Arr ):\n\n w_Lya = 1215.67 * 1e-10\n\n Delta_Arr = w_Arr - w_Lya\n\n f_in_Arr = np.interp( Delta_Arr , -1. * Delta_Arr[::-1] , f_out_Arr[::-1] )\n\n return f_in_Arr\n#====================================================================#\n#====================================================================#\n#====================================================================#\ndef RT_Line_Profile_MCMC( Geometry , wavelength_Arr , V_Value , logNH_Value , ta_Value , DATA_LyaRT , logEW_Value=None , Wi_Value=None ):\n\n '''\n Return one and only one Lyman alpha line profile for a given outflow properties.\n This function is especial to run MCMCs or PSO.\n\n Parameters\n ----------\n Geometry : string\n The outflow geometry to use: Options: 'Thins_Shell',\n 'Galactic_Wind' , 'Bicone_X_Slab'.\n\n wavelength_Arr : 1-D sequence of floats\n Array with the wavelength vales where the line\n profile is computed. The units are meters, i.e.,\n amstrongs * 1.e-10.\n\n V_Value : float\n Value of the expansion velocity of the outflow. The unit\n are km/s. \n\n logNH_Value : float\n Value of the logarithim of the outflow neutral hydrogen\n column density. The units of the colum density are in c.g.s,\n i.e, cm**-2. \n\n ta_Value : float\n Value of the dust optic depth of the outflow. \n\n DATA_LyaRT : Dictionay\n This dictonary have all the information of the grid.\n This dictionary can be loaded with the function : \n load_Grid_Line, for example:\n\n DATA_LyaRT = load_Grid_Line( 'Thin_Shell' ) \n\n Returns\n -------\n lines_Arr : 1-D sequence of float\n The Lyman alpha line profile. \n '''\n\n #V_Value , logNH_Value , ta_Value , Bool_good = pre_treatment_Line_profile_MCMC( Geometry , V_Value , logNH_Value , ta_Value )\n Bool_good = pre_treatment_Line_profile_MCMC( Geometry , np.absolute( V_Value ) , logNH_Value , ta_Value , logEW_Value=logEW_Value , Wi_Value=Wi_Value )\n\n x_Arr = convert_lamda_into_x( wavelength_Arr )\n\n if Bool_good :\n logta_Value = np.log10( ta_Value )\n\n if Geometry in [ 'Thin_Shell' , 'Galactic_Wind' , 'Bicone_X_Slab_In' , 'Bicone_X_Slab_Out']:\n\n line_Arr = Interpolate_Lines_Arrays_3D_grid_MCMC( np.absolute( V_Value ) , logNH_Value , logta_Value , x_Arr , DATA_LyaRT )\n\n if Geometry in [ 'Thin_Shell_Cont' ]:\n\n line_Arr = Interpolate_Lines_Arrays_5D_grid_MCMC( np.absolute( V_Value ) , logNH_Value , logta_Value , logEW_Value , Wi_Value , x_Arr , DATA_LyaRT )\n\n if not Bool_good :\n\n line_Arr = np.ones( len(x_Arr) ) * np.nan\n\n ###########################################\n CORRECT_FLAT_X = True\n if CORRECT_FLAT_X:\n\n tmp_line_Arr = line_Arr * wavelength_Arr **2 \n\n line_Arr = tmp_line_Arr * np.amax( line_Arr ) / np.amax( tmp_line_Arr )\n ###########################################\n\n if V_Value < 0 :\n\n line_Arr = Compute_Inflow_From_Outflow( wavelength_Arr , line_Arr )\n\n return line_Arr\n#====================================================================#\n#====================================================================#\n#====================================================================#\ndef pre_treatment_Line_profile( Geometry , V_Arr , logNH_Arr , ta_Arr , logEW_Arr=None , Wi_Arr=None ):\n\n V_Arr = np.atleast_1d( V_Arr )\n logNH_Arr = np.atleast_1d( logNH_Arr )\n ta_Arr = np.atleast_1d( ta_Arr )\n\n V_Arr = V_Arr.astype(float)\n logNH_Arr = logNH_Arr.astype(float)\n ta_Arr = ta_Arr.astype(float)\n\n bool1 = np.isfinite( V_Arr )\n bool2 = np.isfinite( logNH_Arr )\n bool3 = np.isfinite( ta_Arr )\n\n mask_good = bool1 * bool2 * bool3\n\n assert sum( mask_good ) != 0 , 'All the V-logNH-ta combinations are np.nan, -np.inf or np.inf'\n\n for i in range( 0 , len(V_Arr) ):\n\n tmp_bool = pre_treatment_Line_profile_MCMC( Geometry , V_Arr[i] , logNH_Arr[i] , ta_Arr[i] , logEW_Value=logEW_Arr[i] , Wi_Value=Wi_Arr[i] )\n\n mask_good[i] = tmp_bool\n\n #return V_Arr_used , logNH_Arr_used , ta_Arr_used , In_Bool_used , mask_good\n return mask_good\n#====================================================================#\n#====================================================================#\n#====================================================================#\ndef RT_Line_Profile( Geometry , wavelength_Arr , V_Arr , logNH_Arr , ta_Arr , logEW_Arr=None , Wi_Arr=None ):\n\n '''\n Return the Lyman alpha line profile for a given outflow properties.\n \n Parameters\n ----------\n Geometry : string\n The outflow geometry to use: Options: 'Thins_Shell',\n 'Galactic_Wind' , 'Bicone_X_Slab'.\n \n wavelength_Arr : 1-D sequence of floats\n Array with the wavelength vales where the line \n profile is computed. The units are meters, i.e.,\n amstrongs * 1.e-10.\n \n V_Arr : 1-D sequence of float \n Array with the expansion velocity of the outflow. The unit\n are km/s. \n \n logNH_Arr : 1-D sequence of float\n Array with the logarithim of the outflow neutral hydrogen \n column density. The units of the colum density are in c.g.s,\n i.e, cm**-2. \n \n ta_Arr : 1-D sequence of float\n Array with the dust optic depth of the outflow. \n\n Inside_Bicone_Arr : optional 1-D sequence of bool\n This is useless if the geometry is not Bicone_X_Slab.\n An Array with booleans, indicating if the bicone is face-on \n or edge-on. If True then the bicone is face-on. If false the\n bicone is edge-on. The probability of being face on is \n np.cos( np.pi/4 ).\n \n .. versionadded:: 0.0.3\n \n Returns\n -------\n lines_Arr : 2-D sequence of float\n The Lyman alpha line profiles. lines_Arr[i] is the line profile \n computed at the wavelengths wavelength_Arr for wich V_Arr[i] , \n logNH_Arr[i] , ta_Arr[i] , Inside_Bicone_Arr[i].\n '''\n\n assert Geometry in [ 'Thin_Shell' , 'Galactic_Wind' , 'Bicone_X_Slab_In' , 'Bicone_X_Slab_Out' , 'Thin_Shell_Cont'] , 'The requested geoemtry ' + Geometry + ' is not available. The geometries supported are : Thin_Shell , Galactic_Wind , Bicone_X_Slab'\n\n V_Arr = np.atleast_1d( V_Arr )\n logNH_Arr = np.atleast_1d( logNH_Arr )\n ta_Arr = np.atleast_1d( ta_Arr )\n\n if Geometry == 'Thin_Shell_Cont':\n\n assert not logEW_Arr is None , 'logEW_Arr can not be non if Geometry == Thin_Shell_Cont'\n assert not Wi_Arr is None , 'Wi_Arr can not be non if Geometry == Thin_Shell_Cont'\n\n logEW_Arr = np.atleast_1d( logEW_Arr )\n Wi_Arr = np.atleast_1d( Wi_Arr )\n\n x_Arr = convert_lamda_into_x( wavelength_Arr )\n\n lines_Arr = np.zeros( len(V_Arr) * len( x_Arr ) ).reshape( len(V_Arr) , len( x_Arr ) ) * np.nan\n \n mask_good = pre_treatment_Line_profile( Geometry , V_Arr , logNH_Arr , ta_Arr , logEW_Arr=logEW_Arr , Wi_Arr=Wi_Arr )\n\n logta_Arr = np.log10( ta_Arr )\n\n ##############################\n\n DATA_LyaRT = load_Grid_Line( Geometry )\n\n if Geometry in [ 'Thin_Shell' , 'Galactic_Wind' , 'Bicone_X_Slab_In' , 'Bicone_X_Slab_Out' ] :\n\n tmp_lines_Arr = Interpolate_Lines_Arrays_3D_grid( V_Arr[ mask_good ] , logNH_Arr[ mask_good ] , logta_Arr[ mask_good ] , x_Arr , DATA_LyaRT )\n\n if Geometry in [ 'Thin_Shell_Cont' ] :\n\n tmp_lines_Arr = Interpolate_Lines_Arrays_5D_grid( V_Arr[ mask_good ] , logNH_Arr[ mask_good ] , logta_Arr[ mask_good ] , logEW_Arr[ mask_good ] , Wi_Arr[ mask_good ] , x_Arr , DATA_LyaRT )\n\n ##############################\n\n lines_Arr[ mask_good ] = tmp_lines_Arr\n\n return lines_Arr\n#====================================================================#\n#====================================================================#\n#====================================================================#\ndef Print_the_grid_edges():\n\n print( '')\n print( ' Hi,')\n print( '')\n print( ' The expanssion velocity V_exp and neutral hydrogen column density logNH are the same in the escape fraction and line profile grids. However, the optical depth of dust tau_a is different.')\n print( '' )\n print( ' V_exp [ km/s ] = [ 0 , 10 , ... , 90 , 100 , 150 , 200 , ... , 950 , 1000 ]')\n print( '')\n print( ' Bicone_X_Slab :')\n print( '')\n print( ' For V_exp < 100 km/s the logNH [ cm**-2 ] = [ 17.0 , 17.25 , ... , 20.25 , 20.5 ]')\n print( ' ')\n print( ' For V_exp >= 100 km/s the logNH [ cm**-2 ] = [ 17.0 , 17.25 , ... , 21.75 , 22.0 ]')\n print( '')\n print( ' Thin_Shell and Galactic_Wind :')\n print( '')\n print( ' logNH [ cm**-2 ] = [ 17.0 , 17.25 , ... , 21.75 , 22.0 ]')\n print( '')\n print( ' ')\n print( ' For the escape fraction : tau_a = [ -3. , -2. , -1.5 , -1.0 , -0.75 , -0.5 , -0.25 , -0.0 ]')\n print( ' ')\n print( ' For the line profile : tau_a = [ -0.125 , -0.25 , -0.375 , -0.5 , -0.625 , -0.75 , -0.875 , -1.0 , -1.125 , -1.25 , -1.375 , -1.5 , -1.75 , -2.0 , -2.25 , -2.5 , -2.75 , -3.0 , -3.25 , -3.5 , -3.75 ]')\n print( '')\n print( ' Have a nice day!')\n print( ' El. PSY. CONGROO.')\n print( '')\n\n return\n#====================================================================#\n#====================================================================#\n#====================================================================#\ndef Test_1( ):\n print( '\\nChecking if all the files are found...',)\n\n print( '\\nSKIPPING',)\n print( '\\nSKIPPING',)\n print( '\\nSKIPPING',)\n print( '\\nSKIPPING',)\n print( '\\nSKIPPING',)\n print( '\\nSKIPPING',)\n print( '\\nSKIPPING',)\n print( '\\nSKIPPING',)\n print( '\\nSKIPPING',)\n \n #bool_files = Check_if_DATA_files_are_found()\n #\n #print ('Done!')\n #\n #if bool_files :\n # print( ' Every file was found. that is great!')\n #if not bool_files :\n # print( ' Missing files.... Let us download them... ;)')\n #\n # Download_data()\n \n print( '\\n Now that we are sure that the data is downloaded in your machine...')\n\n print( '\\n Let us check every different configuration for computing the escape fraction and the line profiles.')\n \n Geometry_set = [ 'Thin_Shell' , 'Galactic_Wind' , 'Bicone_X_Slab' ]\n #Geometry_set = [ 'Thin_Shell' , 'Galactic_Wind' ]#, 'Bicone_X_Slab' ]\n #Geometry_set = [ 'Galactic_Wind' ]#, 'Galactic_Wind' , 'Bicone_X_Slab' ]\n \n ML_codes_set = [ 'Tree' , 'Forest' , 'KN' ]\n \n MODE_set = [ 'Parametrization' , 'Raw' , 'Analytic' ]\n \n Algorithm_set = [ 'Intrepolation' , 'Machine_Learning' ]\n \n # Primero vamos a checkear que funciona las fracciones de escape\n \n N_points = int( 1e4 )\n \n V_Arr = np.random.rand( N_points ) * 1000 + 0.0\n logNH_Arr = np.random.rand( N_points ) * 5 + 17.0\n logta_Arr = np.random.rand( N_points ) * 4.5 - 4.0\n \n In_Arr = np.random.rand( N_points ) > 0.5\n \n print( '\\nComputing', N_points , 'random configurations of escape fraction with each algorithms...\\n')\n \n for Geo in Geometry_set:\n \n for Mod in MODE_set :\n \n if not Mod in [ 'Analytic' ]:\n \n for Algo in Algorithm_set:\n \n if Algo in [ 'Intrepolation' , 'Machine_Learning' ]:\n \n if Algo == 'Machine_Learning' :\n \n for machine in ML_codes_set :\n \n \n #try:\n print( ' Running : ' , Geo , Mod , Algo , machine , end = '' )\n fff = RT_f_esc( Geo , V_Arr , logNH_Arr , 10**logta_Arr , Inside_Bicone_Arr=In_Arr , MODE=Mod , Algorithm=Algo , Machine_Learning_Algorithm=machine)\n assert np.sum( np.isnan( fff ) ) == 0\n print( '--> Success!!')\n #except:\n # print( '--> ERROR.. MISMATCH!!')\n \n if Algo != 'Machine_Learning' :\n \n \n try:\n print( ' Running : ' , Geo , Mod , Algo , end = '' )\n fff = RT_f_esc( Geo , V_Arr , logNH_Arr , 10**logta_Arr , Inside_Bicone_Arr=In_Arr , MODE=Mod , Algorithm=Algo )\n assert np.sum( np.isnan( fff ) ) == 0\n print( '--> Success!!')\n \n except:\n print( '--> ERROR. MISMATCH!!')\n \n if Mod in [ 'Analytic' ]:\n \n \n try:\n print( ' Running : ' , Geo , Mod , end = '' )\n fff = RT_f_esc( Geo , V_Arr , logNH_Arr , 10**logta_Arr , MODE=Mod )\n assert np.sum( np.isnan( fff ) ) == 0\n print( '--> Success!!')\n \n except:\n print( '--> ERROR. HUMAN IS DEAD. MISMATCH!!')\n \n \n \n N_points = int( 1e3 )\n \n print( '\\nComputing', N_points , 'random configurations of line profile with each algorithms...\\n')\n \n V_Arr = np.random.rand( N_points ) * 1000 + 50\n logNH_Arr = np.random.rand( N_points ) * 5 + 17.0\n logta_Arr = np.random.rand( N_points ) * 5.5 - 4.75\n \n In_Arr = np.random.rand( N_points ) > 0.5\n \n wavelength_Arr = np.linspace( 1215.68 - 20 , 1215.68 + 20 , 1000 ) * 1e-10\n \n RUN_TEST_Lines = True\n if RUN_TEST_Lines :\n for Geo in Geometry_set:\n \n \n #try:\n print( ' Running : ' , Geo , end = '' )\n\n qq = RT_Line_Profile( Geo , wavelength_Arr , V_Arr , logNH_Arr , 10**logta_Arr , Inside_Bicone_Arr=In_Arr )\n assert np.sum( np.isnan( qq ) ) == 0\n print( '--> Success!!')\n \n #except:\n # print( '--> ERROR.. MISMATCH!!')\n\n return\n#====================================================================#\n#====================================================================#\n#====================================================================#\ndef Test_2( ):\n\n #from pylab import *\n\n #import matplotlib as plt\n\n #import matplotlib\n ## see http://matplotlib.org/faq/usage_faq.html#what-is-a-backend\n #matplotlib.use('Svg')\n\n import matplotlib.pyplot as plt\n\n print( '\\n Let us make some plots. This will show you just a glimpse of what LyaRT;Grid can do. Just wait for it...')\n\n # Plot some nice line profiles\n\n print( '\\n Plotting some line profiles...')\n\n wavelength_Arr = np.linspace( 1215.68 - 20 , 1215.68 + 20 , 1000 ) * 1e-10\n\n V_Arr = np.array( [ 10 , 50 , 100 , 200 , 300 ] )\n\n logNH_Arr = np.array( [ 20.0 ] * len( V_Arr ) )\n\n logta_Arr = np.array( [ -1. ] * len( V_Arr ) )\n\n Inside_Bicone_Arr = np.zeros( len(V_Arr) ) == 0\n\n cm = plt.get_cmap( 'rainbow' )\n \n for geo in [ 'Thin_Shell' , 'Galactic_Wind' , 'Bicone_X_Slab' ]:\n\n qq = RT_Line_Profile( geo , wavelength_Arr , V_Arr , logNH_Arr , 10.**logta_Arr , Inside_Bicone_Arr=Inside_Bicone_Arr ) \n\n plt.figure()\n\n ax_ax = plt.subplot(111)\n\n for i in range( 0 ,len( V_Arr ) ):\n\n #ax_ax.plot( wavelength_Arr*1e10 , qq[i] , color=cm( i*1./( len(V_Arr) -1 ) ) , label=r'$\\rm V_{exp} = '+ str(V_Arr[i]) +'km/s$ ' , lw=2 )\n ax_ax.plot( wavelength_Arr*1e10 , qq[i] , color=cm( i*1./( len(V_Arr) -1 ) ) , lw=2 )\n\n #texto = r'$\\rm N_{H} = 10^{20} cm^{-2}$' + '\\n' + r'$\\rm \\tau_{a} = 0.1$'\n\n #ax_ax.text( .95 , 0.45 , texto , verticalalignment='top', horizontalalignment='right', transform=ax_ax.transAxes, fontsize=20 )\n \n #ax_ax.set_title( r'$\\rm Geometry = $' + geo , size=20 )\n #ax_ax.set_title( r'Geometry = ' + geo , size=20 )\n\n ax_ax.set_ylabel( r'Flux [a.u.]' , size=20 )\n ax_ax.set_xlabel( r'Wavelength [AA]' , size=20 )\n\n ax_ax.set_xlim( 1212.5 , 1222.5 )\n\n ax_ax.legend(loc=0)\n\n print( '\\n Plotting some escape fractions...')\n\n logta_Arr = np.linspace( -2 , 0.5 , 20 )\n\n logNH_Arr = [20.0] * len( logta_Arr )\n\n for geo in [ 'Thin_Shell' , 'Galactic_Wind' , 'Bicone_X_Slab' ] :\n\n plt.figure()\n\n ax_ax = subplot(111)\n\n for i in range( 0 , len(V_Arr) ):\n\n V_Arr_tmp = [ V_Arr[i] ] * len( logta_Arr )\n\n Inside_Bicone_Arr = np.zeros( len( logta_Arr ) ) == 0\n\n f_esc = RT_f_esc( geo , V_Arr_tmp , logNH_Arr , 10**logta_Arr , Inside_Bicone_Arr=Inside_Bicone_Arr)\n \n #ax_ax.plot( logta_Arr , f_esc , color=cm( i*1./( len(V_Arr) -1 ) ) , label=r'$\\rm V_{exp} = '+ str(V_Arr[i]) +'km/s$ ' , lw=2 )\n ax_ax.plot( logta_Arr , f_esc , color=cm( i*1./( len(V_Arr) -1 ) ) , lw=2 )\n\n Inside_Bicone_Arr = np.zeros( len( logta_Arr ) ) == 1\n\n f_esc = RT_f_esc( geo , V_Arr_tmp , logNH_Arr , 10**logta_Arr , Inside_Bicone_Arr=Inside_Bicone_Arr)\n\n ax_ax.semilogy( logta_Arr , f_esc , '--' , color=cm( i*1./( len(V_Arr) -1 ) ) , lw=2 )\n\n ax_ax.set_xlabel( r'log tau a' , size=20 )\n ax_ax.set_ylabel( r'f esc Ly alpha ' , size=20 )\n\n #texto = r' N_{H} = 10^{20} cm^{-2}'\n\n #ax_ax.text( .5 , 0.05 , texto , verticalalignment='bottom', horizontalalignment='left', transform=ax_ax.transAxes, fontsize=20 )\n\n #ax_ax.set_title( r'Geometry = ' + geo , size=20 )\n ax_ax.set_title( r'Geometry = ' , size=20 )\n\n plt.legend( loc=0 )\n\n plt.show()\n\n return\n#====================================================================#\n#====================================================================#\n#====================================================================#\ndef Test_Installation( Make_Plots=False ):\n\n import warnings\n warnings.filterwarnings(\"ignore\")\n\n Test_1()\n\n #if Make_Plots : Test_2()\n\n return\n#====================================================================#\n#====================================================================#\n#====================================================================#\n#\n# CODE FOR THE NEW VERSION !!! YEEEY\n#\n#====================================================================================#\n#====================================================================================#\n#====================================================================================#\ndef convert_gaussian_FWHM_to_sigma( FWHM_Arr ):\n\n '''\n This function computes the sigma of a gaussian from its FWHM.\n\n Parameters\n ----------\n FWHM_Arr : 1-D sequence of float\n Array with the Full Width Half Maximum that you \n want to convert\n\n .. versionadded:: 1.0.1\n\n Returns\n -------\n sigma_Arr : 1-D sequence of float\n The width of the FWHM_Arr\n '''\n\n sigma_Arr = FWHM_Arr * 1. / 2.3548\n\n return sigma_Arr\n#====================================================================================#\n#====================================================================================#\n#====================================================================================#\ndef dilute_line_changing_FWHM( wave_Arr , Spec_Arr , FWHM_Arr , same_norm=False ):\n\n '''\n This functions dilutes a given spectrum by convolving with a gaussian \n filter.\n\n Parameters\n ----------\n wave_Arr : 1-D sequence of float\n Array with the Wavelength where the spectrum is evaluated.\n Same units as FWHM_Arr. This has to be sorted.\n \n Spec_Arr : 1-D sequence of float\n Arrays with the flux of the spectrum.\n\n FWHM_Arr : 1-D sequence of float\n Array with the Full width half maximuum of of the gaussian\n to convolve. \n If FWHM_Arr is a single value, it uses the same value across\n the x_Arr range.\n If FWHM is a 1-D sequence, a different value of width of\n the gaussian is used. In this case, the length of this array\n has to be the same as wave_Arr and Spec_Arr.\n\n same_norm : optional bool.\n If true return a line with the same normalization as the input\n\n .. versionadded:: 1.0.1\n\n Returns\n -------\n new_Line : 1-D sequence of float\n Spectrum after the convolution\n '''\n\n\n my_sigma_Arr = convert_gaussian_FWHM_to_sigma( FWHM_Arr )\n\n new_Line = gaussian_filter( wave_Arr , Spec_Arr , my_sigma_Arr ,same_norm=same_norm)\n\n return new_Line\n#====================================================================================#\n#====================================================================================#\n#====================================================================================#\ndef dilute_line( wave_Arr , Spec_Arr , FWHM ):\n\n '''\n This functions dilutes a given spectrum by convolving with a gaussian\n filter.\n\n Parameters\n ----------\n wave_Arr : 1-D sequence of float\n Array with the Wavelength where the spectrum is evaluated.\n Same units as FWHM_Arr. This has to be sorted.\n\n Spec_Arr : 1-D sequence of float\n Arrays with the flux of the spectrum.\n\n FWHM_Arr : 1-D sequence of float\n Array with the Full width half maximuum of of the gaussian\n to convolve.\n If FWHM_Arr is a single value, it uses the same value across\n the x_Arr range.\n If FWHM is a 1-D sequence, a different value of width of\n the gaussian is used. In this case, the length of this array\n has to be the same as wave_Arr and Spec_Arr.\n\n same_norm : optional bool.\n If true return a line with the same normalization as the input\n\n .. versionadded:: 1.0.1\n\n Returns\n -------\n new_Line : 1-D sequence of float\n Spectrum after the convolution\n '''\n\n sigma = convert_gaussian_FWHM_to_sigma( FWHM )\n\n bin_size = wave_Arr[1] - wave_Arr[0] \n\n new_Line = gaussian_filter1d( Spec_Arr , sigma * 1. / bin_size )\n\n return new_Line\n#====================================================================================#\n#====================================================================================#\n#====================================================================================#\ndef bin_one_line( wave_Arr_line , Line_Prob_Arr , new_wave_Arr , Bin , same_norm=False ):\n\n RES = 100\n\n binned_line = np.zeros( len( new_wave_Arr) )\n\n for i in range( 0 , len( new_wave_Arr) ) :\n\n wave_low = new_wave_Arr[i] - Bin*0.5\n wave_top = new_wave_Arr[i] + Bin*0.5\n\n High_res_wave_Arr = np.linspace( wave_low , wave_top , RES )\n\n High_res_line_in_bin = np.interp( High_res_wave_Arr , wave_Arr_line , Line_Prob_Arr , left=Line_Prob_Arr[0] , right=Line_Prob_Arr[-1] )\n\n binned_line[i] = np.mean( High_res_line_in_bin )\n\n #print( 'binnec_line = ' , binned_line )\n\n if same_norm:\n\n I_init = np.trapz( Line_Prob_Arr , wave_Arr_line )\n\n I_pix = np.trapz( binned_line , new_wave_Arr )\n\n binned_line = binned_line * I_init * 1. / I_pix\n\n return binned_line\n#====================================================================================#\n#====================================================================================#\n#====================================================================================#\ndef plot_a_rebinned_line( new_wave_Arr , binned_line , Bin ):\n\n DD = Bin * 1e-10\n\n XX_Arr = np.zeros( len( new_wave_Arr ) * 2 )\n YY_Arr = np.zeros( len( new_wave_Arr ) * 2 )\n\n for i in range( 0 , len( new_wave_Arr ) ):\n\n i_0 = 2 * i\n i_1 = 2 * i + 1\n\n XX_Arr[ i_0 ] = new_wave_Arr[i] - 0.5 * Bin + DD\n XX_Arr[ i_1 ] = new_wave_Arr[i] + 0.5 * Bin\n\n YY_Arr[ i_0 ] = binned_line[i]\n YY_Arr[ i_1 ] = binned_line[i]\n\n return XX_Arr , YY_Arr\n#====================================================================================#\n#====================================================================================#\n#====================================================================================#\ndef Add_noise_to_line( wave_Arr_line , Line_Prob_Arr , SN , same_norm=False ):\n\n mask = Line_Prob_Arr > 0.05 * np.amax( Line_Prob_Arr )\n\n SS = np.mean( Line_Prob_Arr[ mask ] )\n\n Noise_level = SS * 1. / SN \n\n Noise_Arr = np.random.randn( len(Line_Prob_Arr) ) * Noise_level \n\n Noisy_Line_Arr = Line_Prob_Arr + Noise_Arr\n\n if same_norm :\n\n I_init = np.trapz( Line_Prob_Arr , wave_Arr_line )\n\n I_noise = np.trapz( Noisy_Line_Arr , wave_Arr_line )\n\n Noisy_Line_Arr = Noisy_Line_Arr * I_init * 1. / I_noise \n\n return Noisy_Line_Arr , Noise_Arr\n##====================================================================================#\n##====================================================================================#\n##====================================================================================#\ndef gaus( x_Arr , A , mu , sigma ):\n\n return A * np.exp( -1*( x_Arr - mu )**2 * 1. / ( 2 * sigma**2 ) )\n##====================================================================================#\n##====================================================================================#\n##====================================================================================#\ndef Signal_to_noise_estimator( w_Arr , Line_Arr , w_line ):\n\n popt , pcov = curve_fit( gaus , w_Arr , Line_Arr , p0=[ 1e-17 , w_line , 5.0 ] )\n\n sigma_line = popt[2]\n\n mask_line = ( w_Arr > w_line - 2.30*sigma_line ) * ( w_Arr < w_line + 2.30*sigma_line )\n\n RMS = np.sqrt( np.mean( Line_Arr[ ~mask_line ]**2 ) )\n\n SS = np.mean( Line_Arr[ mask_line ] )\n\n SNR = SS * 1. / RMS\n\n return SNR\n##====================================================================================#\n##====================================================================================#\n##====================================================================================#\ndef generate_a_obs_line( z_f , V_f , logNH_f , ta_f , DATA_LyaRT , Geometry , logEW_f=None , Wi_f=None ):\n\n w_Lya = 1215.67\n\n w_rest_Arr = np.linspace( 1215.68 - 30 , 1215.68 + 30 , 2000 )\n\n line_Arr = RT_Line_Profile_MCMC( Geometry , w_rest_Arr*1e-10, V_f , logNH_f , ta_f , DATA_LyaRT , logEW_Value=logEW_f , Wi_Value=Wi_f )\n\n wavelength_Arr = ( 1 + z_f ) * w_rest_Arr\n\n return w_rest_Arr , wavelength_Arr , line_Arr\n##====================================================================================#\n##====================================================================================#\n##====================================================================================#\ndef generate_a_REAL_line_Noise_w( z_f , V_f , logNH_f , ta_f , F_line_f , logEW_f , Wi_f , Noise_w_Arr , Noise_Arr , FWHM_f , PIX_f , w_min , w_max , DATA_LyaRT , Geometry ):\n\n w_rest_Arr , wavelength_Arr , line_Arr = generate_a_obs_line( z_f , V_f , logNH_f , ta_f , DATA_LyaRT , Geometry , logEW_f=logEW_f , Wi_f=Wi_f )\n\n diluted_Arr = dilute_line( wavelength_Arr , line_Arr , FWHM_f )\n\n wave_pix_Arr = np.arange( w_min , w_max , PIX_f )\n\n pixled_Arr = bin_one_line( wavelength_Arr , diluted_Arr , wave_pix_Arr , PIX_f )\n\n F_lambda_Arr = pixled_Arr * F_line_f * 1. / np.trapz( pixled_Arr , wave_pix_Arr )\n\n noise_in_my_w_Arr = np.interp( pixled_Arr , Noise_w_Arr , Noise_Arr )\n\n noise_Arr = np.random.randn( len( pixled_Arr ) ) * noise_in_my_w_Arr\n\n noisy_Line_Arr = F_lambda_Arr + noise_Arr\n\n # Other usful things :\n\n dic = {}\n\n dic[ 'w_rest' ] = w_rest_Arr\n dic[ 'w_obs' ] = wavelength_Arr\n dic[ 'Intrinsic' ] = line_Arr\n dic[ 'Diluted' ] = diluted_Arr\n dic[ 'Pixelated' ] = F_lambda_Arr\n dic[ 'Noise' ] = noise_Arr\n\n return wave_pix_Arr , noisy_Line_Arr , dic\n##====================================================================================#\n##====================================================================================#\n##====================================================================================#\ndef Generate_a_real_line( z_t , V_t, log_N_t, t_t, F_t, log_EW_t, W_t , PNR_t, FWHM_t, PIX_t, DATA_LyaRT, Geometry ):\n\n w_min = ( 1215.67 - 25.5 ) * ( 1 + z_t )\n w_max = ( 1215.67 + 25.5 ) * ( 1 + z_t )\n\n w_Noise_Arr = np.linspace( w_min , w_max , 10 )\n\n tmp_Noise_Arr = w_Noise_Arr * 0.0\n\n w_Arr , f_noiseless_Arr , _ = generate_a_REAL_line_Noise_w( z_t, V_t, log_N_t, t_t , F_t , log_EW_t , W_t, w_Noise_Arr, tmp_Noise_Arr , FWHM_t , PIX_t , w_min, w_max, DATA_LyaRT, Geometry )\n\n noise_Amplitude = np.amax( f_noiseless_Arr ) * 1. / PNR_t\n\n noise_Arr = np.random.randn( len(w_Arr) ) * noise_Amplitude\n\n noise_Amplitude_Arr = np.ones( len( w_Arr ) ) * noise_Amplitude\n\n f_Arr = f_noiseless_Arr + noise_Arr\n\n return w_Arr , f_Arr , noise_Amplitude_Arr\n#######################################################################\n#######################################################################\n#######################################################################\ndef Define_wavelength_for_NN( Delta_min=-18.5 , Delta_max=18.5 , Nbins_tot=1000 , Denser_Center=True ):\n\n if not Denser_Center:\n \n rest_w_Arr = np.linspace( 1215.67+Delta_min , 1215.67+Delta_max , Nbins_tot )\n \n else :\n \n Delta_core = 4.0\n \n ratio_bin_size_core = 5\n \n L_core = 2*Delta_core\n L_wing = Delta_max - Delta_min - L_core\n \n ratio_Ls = L_core * 1. / L_wing\n \n N_c = ratio_bin_size_core * ratio_Ls * Nbins_tot / ( 1 + ratio_bin_size_core * ratio_Ls )\n \n N_w = Nbins_tot - N_c\n \n N_c = int( N_c )\n N_w = int( N_w )\n \n N_w = int( N_w * 0.5 ) * 2\n \n while N_c+N_w<Nbins_tot : N_c += 1\n \n #print( N_c , N_w )\n \n Delta_c = L_core * 1. / N_c\n \n low_wing = np.linspace( Delta_min , -Delta_core-Delta_c , int( N_w / 2 ) )\n top_wing = np.linspace( Delta_core+Delta_c , Delta_max , int( N_w / 2 ) )\n \n core = np.linspace( -Delta_core , Delta_core , N_c )\n \n rest_w_Arr = np.hstack([ low_wing , core , top_wing ]) + 1215.67\n\n return rest_w_Arr\n#######################################################################\n#######################################################################\n#######################################################################\ndef Treat_A_Line_To_NN_Input( w_Arr , f_Arr , PIX , FWHM , Delta_min=-18.5 , Delta_max=18.5 , Nbins_tot=1000 , Denser_Center=True , normed=False, scaled=False ):\n\n rest_w_Arr = Define_wavelength_for_NN( Delta_min=Delta_min , Delta_max=Delta_max , Nbins_tot=Nbins_tot , Denser_Center=Denser_Center ) \n\n w_r_i_Arr , Delta_r_i_Arr , f_r_i_Arr , z_max_i = NN_convert_Obs_Line_to_proxy_rest_line( w_Arr, f_Arr, normed=normed , scaled=scaled)\n\n NN_line = np.interp( rest_w_Arr , w_r_i_Arr , f_r_i_Arr , left=f_r_i_Arr[0] , right=f_r_i_Arr[-1] )\n\n NN_line = NN_line * 1. / np.amax( NN_line )\n\n INPUT = np.array( [ np.hstack( ( NN_line , z_max_i , np.log10( FWHM ) , np.log10( PIX ) ) ) ] )\n\n return rest_w_Arr , NN_line , z_max_i , INPUT \n#######################################################################\n#######################################################################\n#######################################################################\ndef Generate_a_line_for_training( z_t , V_t, log_N_t, t_t, F_t, log_EW_t, W_t, PNR_t, FWHM_t, PIX_t, DATA_LyaRT, Geometry, normed=False, scaled=False , Delta_min = -18.5 , Delta_max=18.5 , Denser_Center=True , Nbins_tot=1000 ):\n\n w_t_Arr , f_t_Arr , Noise_t_Arr = Generate_a_real_line( z_t , V_t, log_N_t, t_t, F_t, log_EW_t, W_t , PNR_t, FWHM_t, PIX_t, DATA_LyaRT, Geometry )\n\n rest_w_Arr , train_line , z_max_i , INPUT = Treat_A_Line_To_NN_Input( w_t_Arr , f_t_Arr , PIX_t , FWHM_t , Delta_min=Delta_min , Delta_max=Delta_max , Nbins_tot=Nbins_tot , Denser_Center=Denser_Center , normed=normed , scaled=scaled )\n\n return rest_w_Arr , train_line , z_max_i , INPUT\n##====================================================================================#\n##====================================================================================#\n##====================================================================================#\ndef generate_a_REAL_line_SN( z_f , V_f , logNH_f , ta_f , F_line_f , SN_f , FWHM_f , PIX_f , w_min , w_max , DATA_LyaRT , Geometry ):\n\n w_rest_Arr , wavelength_Arr , line_Arr = generate_a_obs_line( z_f , V_f , logNH_f , ta_f , DATA_LyaRT , Geometry )\n\n diluted_Arr = dilute_line( wavelength_Arr , line_Arr , FWHM_f )\n\n wave_pix_Arr = np.arange( w_min , w_max , PIX_f )\n\n pixled_Arr = bin_one_line( wavelength_Arr , diluted_Arr , wave_pix_Arr , PIX_f )\n\n pixled_Arr = pixled_Arr * F_line_f * 1. / np.trapz( pixled_Arr , wave_pix_Arr )\n\n noisy_Line_Arr , noise_Arr = Add_noise_to_line( wave_pix_Arr , pixled_Arr , SN_f , same_norm=False )\n\n # Other usful things :\n\n dic = {}\n\n dic[ 'w_rest' ] = w_rest_Arr\n dic[ 'w_obs' ] = wavelength_Arr\n dic[ 'Intrinsic' ] = line_Arr\n dic[ 'Diluted' ] = diluted_Arr\n dic[ 'Pixelated' ] = pixled_Arr\n\n return wave_pix_Arr , noisy_Line_Arr , dic\n##====================================================================================#\n##====================================================================================#\n##====================================================================================#\n\n## MCMC section\n\n##====================================================================================#\n##====================================================================================#\n##====================================================================================#\ndef log_likelihood( w_obs_Arr , f_obs_Arr , s_obs_Arr , w_model_Arr , f_model_Arr ):\n\n f_my_Arr = np.interp( w_obs_Arr , w_model_Arr , f_model_Arr )\n\n mask = np.isfinite( f_my_Arr )\n\n #print( f_my_Arr )\n\n sigma2 = s_obs_Arr ** 2\n\n cc = 1.0\n\n f_my_Arr = f_my_Arr[ mask ]\n sigma2 = sigma2[ mask ]\n my_f_obs_Arr = f_obs_Arr[ mask ]\n\n log_like = -0.5 * np.sum( cc *( my_f_obs_Arr - f_my_Arr ) ** 2 / sigma2 + np.log(sigma2))\n\n return log_like\n##====================================================================================#\n##====================================================================================#\n##====================================================================================#\ndef Prior_f( theta ):\n\n log_V , log_N , log_t , redshift = theta\n\n log_V_min = -1. \n log_V_max = 3. \n\n log_N_min = 17.0 \n log_N_max = 21.5\n\n log_t_min = -3.75\n log_t_max = -0.125\n\n z_min = 0.0\n z_max = 100.0\n\n #if log_V < np.log10( 40 ) : log_N_max = 20.5\n \n if log_V < log_V_min or log_V > log_V_max : return False\n if log_N < log_N_min or log_N > log_N_max : return False\n if log_t < log_t_min or log_t > log_t_max : return False\n\n if redshift < z_min or redshift > z_max : return False\n\n return True\n##====================================================================================#\n##====================================================================================#\n##====================================================================================#\ndef Prior_f_5( theta ):\n\n #log_V , log_N , log_t , redshift , log_F , log_EW , Wi = theta\n log_V , log_N , log_t , redshift , log_EW , Wi = theta\n\n log_V_min = -1. \n log_V_max = 3. \n\n log_N_min = 17.0 \n log_N_max = 21.5\n\n log_t_min = -4.00\n log_t_max = 0.000\n\n #log_F_min = -22.00\n #log_F_max = -3.000\n\n log_EW_min = -1.\n log_EW_max = 3.\n\n Wi_min = 0.01\n Wi_max = 6.\n\n z_min = 0.0\n z_max = 7.0\n\n #if log_V < np.log10( 40 ) : log_N_max = 20.5\n \n if log_V < log_V_min or log_V > log_V_max : return False\n if log_N < log_N_min or log_N > log_N_max : return False\n if log_t < log_t_min or log_t > log_t_max : return False\n #if log_F < log_F_min or log_F > log_F_max : return False\n if log_EW < log_EW_min or log_EW > log_EW_max : return False\n if Wi < Wi_min or Wi > Wi_max : return False\n\n if redshift < z_min or redshift > z_max : return False\n\n return True\n##====================================================================================#\n##====================================================================================#\n##====================================================================================#\ndef log_likeliehood_of_model_5( theta , w_obs_Arr , f_obs_Arr , s_obs_Arr , FWHM, PIX, w_min, w_max, DATA_LyaRT, Geometry , z_in , FORCE_z=False ):\n\n if not Prior_f_5( theta ):\n return -np.inf\n\n log_V , log_N , log_t , redshift , log_EW , Wi = theta\n\n if not z_in is None and FORCE_z :\n if redshift < z_in[0] : return -np.inf \n if redshift > z_in[1] : return -np.inf \n\n V_f = 10 ** log_V\n t_f = 10 ** log_t\n\n FF = 1.\n\n w_model_Arr , f_model_Arr , _ = generate_a_REAL_line_Noise_w( redshift, V_f, log_N, t_f, FF , log_EW , Wi , w_obs_Arr , s_obs_Arr*0.0, FWHM, PIX, w_min, w_max, DATA_LyaRT, Geometry )\n\n f_model_Arr = f_model_Arr * 1. / np.amax( f_model_Arr ) \n\n f_obs_Arr = f_obs_Arr * 1. / np.amax( f_obs_Arr )\n s_obs_Arr = s_obs_Arr * 1. / np.amax( f_obs_Arr )\n\n log_like = log_likelihood( w_obs_Arr , f_obs_Arr , s_obs_Arr , w_model_Arr , f_model_Arr )\n\n return log_like\n##====================================================================================#\n##====================================================================================#\n##====================================================================================#\ndef init_walkers_5( N_walkers , N_dim , log_V_in , log_N_in , log_t_in , z_in , log_E_in , W_in ):\n\n init_V_Arr = np.random.rand( N_walkers ) * ( log_V_in[1] - log_V_in[0] ) + log_V_in[0]\n init_N_Arr = np.random.rand( N_walkers ) * ( log_N_in[1] - log_N_in[0] ) + log_N_in[0]\n init_t_Arr = np.random.rand( N_walkers ) * ( log_t_in[1] - log_t_in[0] ) + log_t_in[0]\n init_E_Arr = np.random.rand( N_walkers ) * ( log_E_in[1] - log_E_in[0] ) + log_E_in[0]\n init_z_Arr = np.random.rand( N_walkers ) * ( z_in[1] - z_in[0] ) + z_in[0]\n init_W_Arr = np.random.rand( N_walkers ) * ( W_in[1] - W_in[0] ) + W_in[0]\n\n Theta_0 = np.zeros( N_walkers * N_dim ).reshape( N_walkers , N_dim )\n\n Theta_0[ : , 0 ] = init_V_Arr\n Theta_0[ : , 1 ] = init_N_Arr\n Theta_0[ : , 2 ] = init_t_Arr\n Theta_0[ : , 3 ] = init_z_Arr\n Theta_0[ : , 4 ] = init_E_Arr\n Theta_0[ : , 5 ] = init_W_Arr\n\n for i in range( 0 , N_walkers ) :\n\n theta = Theta_0[i]\n\n while not Prior_f_5( theta ) :\n\n Theta_0[ i , 0 ] = np.random.rand( ) * ( log_V_in[1] - log_V_in[0] ) + log_V_in[0]\n Theta_0[ i , 1 ] = np.random.rand( ) * ( log_N_in[1] - log_N_in[0] ) + log_N_in[0] \n Theta_0[ i , 2 ] = np.random.rand( ) * ( log_t_in[1] - log_t_in[0] ) + log_t_in[0] \n Theta_0[ i , 3 ] = np.random.rand( ) * ( z_in[1] - z_in[0] ) + z_in[0]\n Theta_0[ i , 4 ] = np.random.rand( ) * ( log_E_in[1] - log_E_in[0] ) + log_E_in[0] \n Theta_0[ i , 5 ] = np.random.rand( ) * ( W_in[1] - W_in[0] ) + W_in[0] \n\n theta = Theta_0[i]\n\n return Theta_0\n##====================================================================================#\n##====================================================================================#\n##====================================================================================#\ndef MCMC_get_region_6D( MODE , w_tar_Arr , f_tar_Arr , s_tar_Arr , FWHM , PIX , DATA_LyaRT , Geometry , Geometry_Mode='Outflow'):\n\n if MODE == 'PSO' :\n\n n_particles = 400\n n_iters = 100\n\n cost , pos = PSO_Analysis( w_tar_Arr , f_tar_Arr , FWHM , PIX , DATA_LyaRT , Geometry , n_particles , n_iters )\n\n log_V_in = [ 0.999 * pos[1] , 1.001 * pos[1] ]\n log_N_in = [ 0.999 * pos[2] , 1.001 * pos[2] ]\n log_t_in = [ 0.999 * pos[3] , 1.001 * pos[3] ]\n log_E_in = [ 0.999 * pos[4] , 1.001 * pos[4] ]\n\n z_in = [ 0.999 * pos[0] , 1.001 * pos[0] ]\n W_in = [ 0.999 * 10 ** pos[5] , 1.001 * 10 ** pos[5] ]\n\n Best = [ pos[0] , pos[1] , pos[2] , pos[3] , pos[4] , np.log10( pos[5] ) ]\n\n if MODE == 'DNN':\n\n machine_data = Load_NN_model( Geometry_Mode )\n\n loaded_model = machine_data['Machine']\n\n w_rest_Machine_Arr = machine_data['w_rest']\n\n _ , _ , log_V_sol_Arr , log_N_sol_Arr , log_t_sol_Arr , z_sol_Arr , log_E_sol_Arr , log_W_sol_Arr = NN_measure( w_tar_Arr , f_tar_Arr , s_tar_Arr , FWHM , PIX , loaded_model , w_rest_Machine_Arr , N_iter=1000 )\n\n log_V_in = [ np.percentile( log_V_sol_Arr , 5 ) , np.percentile( log_V_sol_Arr , 95 ) ]\n log_N_in = [ np.percentile( log_N_sol_Arr , 5 ) , np.percentile( log_N_sol_Arr , 95 ) ]\n log_t_in = [ np.percentile( log_t_sol_Arr , 5 ) , np.percentile( log_t_sol_Arr , 95 ) ]\n log_E_in = [ np.percentile( log_E_sol_Arr , 5 ) , np.percentile( log_E_sol_Arr , 95 ) ]\n log_W_in = [ np.percentile( log_W_sol_Arr , 5 ) , np.percentile( log_W_sol_Arr , 95 ) ]\n\n z_in = [ np.percentile( z_sol_Arr , 5 ) , np.percentile( z_sol_Arr , 95 ) ]\n\n W_in = 10**np.array( log_W_in )\n\n Best = [ np.percentile( z_sol_Arr , 50 ) , np.percentile( log_V_sol_Arr , 50 ) ,\n np.percentile( log_N_sol_Arr , 50 ) ,\n np.percentile( log_t_sol_Arr , 50 ) ,\n np.percentile( log_E_sol_Arr , 50 ) ,\n np.percentile( log_W_sol_Arr , 50 ) ]\n\n if not MODE in [ 'PSO' , 'DNN' ] :\n\n log_V_in = None\n log_N_in = None\n log_t_in = None\n log_E_in = None\n W_in = None\n\n w_f_max = w_tar_Arr[ f_tar_Arr==np.amax(f_tar_Arr) ]\n\n z_f_max = w_f_max / 1215.67 - 1.\n\n z_in = [ z_f_max*0.99 , z_f_max*1.01 ]\n\n Best = [ z_f_max , None , None , None , None , None ]\n\n return log_V_in , log_N_in , log_t_in , log_E_in , W_in , z_in , Best\n##====================================================================================#\n##====================================================================================#\n##====================================================================================#\ndef MCMC_Analysis_sampler_5( w_target_Arr , f_target_Arr , s_target_Arr , FWHM , N_walkers , N_burn , N_steps , Geometry , DATA_LyaRT , log_V_in=None , log_N_in=None , log_t_in=None , z_in=None , log_E_in=None , W_in=None , progress=True , FORCE_z=False ):\n\n N_dim = 6\n\n if log_V_in is None : log_V_in = [ 1. , 3. ]\n if log_N_in is None : log_N_in = [ 17. , 22. ]\n if log_t_in is None : log_t_in = [ -0.2 , -3. ]\n if log_E_in is None : log_E_in = [ -1. , 3. ]\n if z_in is None : z_in = [ 0.0 , 10. ]\n if W_in is None : W_in = [ 0.01 , 6. ]\n\n Theta_0 = init_walkers_5( N_walkers , N_dim , log_V_in , log_N_in , log_t_in , z_in , log_E_in , W_in )\n\n PIX = w_target_Arr[1] - w_target_Arr[0]\n\n w_min = np.amin( w_target_Arr )\n w_max = np.amax( w_target_Arr )\n\n my_args = ( w_target_Arr , f_target_Arr , s_target_Arr , FWHM, PIX, w_min, w_max, DATA_LyaRT, Geometry , z_in , FORCE_z )\n\n if progress : print( 'defining samples' )\n\n sampler = emcee.EnsembleSampler( N_walkers , N_dim, log_likeliehood_of_model_5 , args=my_args )\n\n if progress : print( 'burning in' )\n\n state = sampler.run_mcmc( Theta_0 , N_burn , progress=progress )\n sampler.reset()\n\n if progress : print( 'Running main MCMC' )\n\n sampler.run_mcmc(state, N_steps , progress=progress )\n\n if progress : print( 'Done' )\n\n return sampler\n##====================================================================================#\n##====================================================================================#\n##====================================================================================#\ndef get_solutions_from_sampler( sampler , N_walkers , N_burn , N_steps , Q_Arr ):\n\n chains = sampler.get_chain()\n\n print( 'chains_shape = ' , chains.shape )\n\n log_probs = sampler.get_log_prob()\n\n print( 'log_probs_shape = ' , log_probs.shape )\n\n N_dim = len( chains[0,0] )\n\n flat_samples = np.zeros( N_walkers * N_steps * N_dim ).reshape( N_walkers * N_steps , N_dim )\n\n flat_log_prob = log_probs.ravel()\n\n for i in range( 0 , N_dim ):\n \n flat_samples[ : , i ] = chains[ : , : , i ].ravel()\n\n print( 'flat_samples shape = ' , flat_samples.shape )\n\n print( 'flat_log_prob shape = ' , flat_log_prob.shape )\n\n N_Q = len( Q_Arr )\n \n matrix_sol = np.zeros( N_dim * N_Q ).reshape( N_dim , N_Q )\n \n #mask_log = flat_log_prob > np.percentile( flat_log_prob , 50 )\n\n #flat_samples = flat_samples[ mask_log ]\n\n for i in range( 0 , N_dim ):\n for j in range( 0 , N_Q ):\n\n #bool_1 = flat_samples[ : , i ] > np.percentile( flat_samples[ : , i ] , 5 ) \n #bool_2 = flat_samples[ : , i ] < np.percentile( flat_samples[ : , i ] , 95 ) \n\n #mask_per = bool_1 * bool_2\n\n #matrix_sol[i,j] = np.percentile( flat_samples[ : , i ][ mask_per ] , Q_Arr[j] )\n matrix_sol[i,j] = np.percentile( flat_samples[ : , i ] , Q_Arr[j] )\n\n return matrix_sol , flat_samples\n##====================================================================================#\n##====================================================================================#\n##====================================================================================#\ndef get_solutions_from_sampler_mean( sampler , N_walkers , N_burn , N_steps ):\n\n chains = sampler.get_chain()\n\n N_dim = len( chains[0,0] )\n\n flat_samples = np.zeros( N_walkers * N_steps * N_dim ).reshape( N_walkers * N_steps , N_dim )\n\n for i in range( 0 , N_dim ):\n \n flat_samples[ : , i ] = chains[ : , : , i ].ravel()\n\n #N_Q = len( Q_Arr )\n \n matrix_sol = np.zeros( N_dim )\n \n for i in range( 0 , N_dim ):\n \n matrix_sol[i] = np.mean( flat_samples[ : , i ] )\n\n return matrix_sol , flat_samples\n##====================================================================================#\n##====================================================================================#\n##====================================================================================#\ndef get_solutions_from_sampler_peak( sampler , N_walkers , N_burn , N_steps , N_hist_steps ):\n\n chains = sampler.get_chain()\n\n N_dim = len( chains[0,0] )\n\n flat_samples = np.zeros( N_walkers * N_steps * N_dim ).reshape( N_walkers * N_steps , N_dim )\n\n for i in range( 0 , N_dim ):\n\n flat_samples[ : , i ] = chains[ : , : , i ].ravel()\n\n #N_Q = len( Q_Arr )\n\n matrix_sol = np.zeros( N_dim )\n\n for i in range( 0 , N_dim ):\n\n #matrix_sol[i] = np.mean( flat_samples[ : , i ] )\n\n H_1 , edges_1 = np.histogram( flat_samples[ : , i ] , N_hist_steps ) \n\n H_1 = H_1 * 1. / np.sum( H_1 )\n\n centers_1 = 0.5 * ( edges_1[:-1] + edges_1[1:] )\n\n tmp_edges = centers_1[ H_1 > 0.01 ]\n\n v_min = np.amin( tmp_edges )\n v_max = np.amin( tmp_edges )\n\n H_2 , edges_2 = np.histogram( flat_samples[ : , i ] , N_hist_steps , range=[ v_min , v_max ] )\n\n centers = 0.5 * ( edges_2[:-1] + edges_2[1:] )\n\n matrix_sol[i] = centers[ H_2==np.amax(H_2) ][0]\n\n return matrix_sol , flat_samples\n##====================================================================================#\n##====================================================================================#\n##====================================================================================#\ndef get_solutions_from_flat_chain( flat_chains , Q_Arr ):\n\n N_dim = flat_chains.shape[1]\n\n N_Q = len( Q_Arr )\n\n matrix_sol = np.zeros( N_dim * N_Q ).reshape( N_dim , N_Q )\n\n for i in range( 0 , N_dim ):\n for j in range( 0 , N_Q ):\n\n matrix_sol[i,j] = np.percentile( flat_chains[ : , i ] , Q_Arr[j] )\n\n return matrix_sol \n##====================================================================================#\n##====================================================================================#\n##====================================================================================#\n\n# NN\n\n##====================================================================================#\n##====================================================================================#\n##====================================================================================#\ndef Load_NN_model( Mode , iteration=1 ):\n\n this_dir, this_filename = os.path.split(__file__) \n\n if Mode == 'Inflow' : my_str = 'INFLOWS'\n if Mode == 'Outflow': my_str = 'OUTFLOW'\n\n machine_name = 'nn_'+my_str+'_N_2500000_Npix_1000_Dl_-18.5_18.5_Dc_True_nor_False_sca_True_256_256_256_256_256_it_'+str(iteration)+'.sav'\n \n extra_dir = '/DATA/'\n\n machine_data = pickle.load(open( this_dir + extra_dir + machine_name , 'rb'))\n \n return machine_data\n##====================================================================================#\n##====================================================================================#\n##====================================================================================#\ndef NN_convert_Obs_Line_to_proxy_rest_line( w_obs_Arr , f_obs_Arr , s_obs_Arr=None , normed=False , scaled=False ):\n\n w_Lya = 1215.67\n\n w_obs_max = w_obs_Arr[ f_obs_Arr == np.amax( f_obs_Arr ) ]\n\n z_max = w_obs_max * 1. / w_Lya - 1.\n\n ########\n z_max = np.atleast_1d( z_max )\n\n z_max = z_max[0]\n #######\n #print( 'z_max = ' , z_max )\n\n w_rest_Arr = w_obs_Arr * 1. / ( 1 + z_max )\n\n Delta_rest_Arr = w_rest_Arr - w_Lya\n\n if normed :\n\n II = np.trapz( f_obs_Arr , w_rest_Arr )\n\n f_rest_Arr = f_obs_Arr / II\n\n if not s_obs_Arr is None :\n s_rest_Arr = s_obs_Arr / II\n\n if scaled :\n\n f_max = np.amax( f_obs_Arr )\n\n f_rest_Arr = f_obs_Arr * 1. / f_max \n\n if not s_obs_Arr is None :\n s_rest_Arr = s_obs_Arr * 1. / f_max\n\n if not scaled and not normed:\n\n f_rest_Arr = np.copy( f_obs_Arr )\n\n if not s_obs_Arr is None :\n s_rest_Arr = np.copy( s_obs_Arr )\n\n if s_obs_Arr is None :\n return w_rest_Arr , Delta_rest_Arr , f_rest_Arr , z_max\n\n if not s_obs_Arr is None :\n return w_rest_Arr , Delta_rest_Arr , f_rest_Arr , z_max , s_rest_Arr\n##====================================================================================#\n##====================================================================================#\n##====================================================================================#\ndef NN_generate_random_outflow_props( N_walkers , log_V_in , log_N_in , log_t_in , Allow_Inflows=True ):\n\n init_log_V_Arr = np.random.rand( N_walkers ) * ( log_V_in[1] - log_V_in[0] ) + log_V_in[0]\n init_log_N_Arr = np.random.rand( N_walkers ) * ( log_N_in[1] - log_N_in[0] ) + log_N_in[0]\n init_log_t_Arr = np.random.rand( N_walkers ) * ( log_t_in[1] - log_t_in[0] ) + log_t_in[0]\n\n for i in range( 0 , N_walkers ) :\n\n theta = [ init_log_V_Arr[i] , init_log_N_Arr[i] , init_log_t_Arr[i] , 1.0 ]\n\n while not Prior_f( theta ) :\n\n init_log_V_Arr[ i ] = np.random.rand( ) * ( log_V_in[1] - log_V_in[0] ) + log_V_in[0]\n init_log_N_Arr[ i ] = np.random.rand( ) * ( log_N_in[1] - log_N_in[0] ) + log_N_in[0]\n init_log_t_Arr[ i ] = np.random.rand( ) * ( log_t_in[1] - log_t_in[0] ) + log_t_in[0]\n\n theta = [ init_log_V_Arr[i] , init_log_N_Arr[i] , init_log_t_Arr[i] , 1.0 ]\n\n if Allow_Inflows:\n\n V_sign = np.sign( np.random.rand( N_walkers ) - 0.5 )\n\n else: \n V_sign = np.ones( N_walkers )\n\n init_V_Arr = 10**init_log_V_Arr * V_sign \n\n return init_V_Arr , init_log_N_Arr , init_log_t_Arr\n##====================================================================================#\n##====================================================================================#\n##====================================================================================#\ndef NN_generate_random_outflow_props_5D( N_walkers , log_V_in , log_N_in , log_t_in , log_E_in , log_W_in , MODE='Outflow' ):\n\n # MODE = Outflow , Inflow , Mixture\n\n if MODE not in [ 'Outflow' , 'Inflow' , 'Mixture' ]:\n\n print( ' -> Wrong MODE when generating gas properties, using only outflows' )\n\n init_log_V_Arr = np.random.rand( N_walkers ) * ( log_V_in[1] - log_V_in[0] ) + log_V_in[0]\n init_log_N_Arr = np.random.rand( N_walkers ) * ( log_N_in[1] - log_N_in[0] ) + log_N_in[0]\n init_log_t_Arr = np.random.rand( N_walkers ) * ( log_t_in[1] - log_t_in[0] ) + log_t_in[0]\n init_log_E_Arr = np.random.rand( N_walkers ) * ( log_E_in[1] - log_E_in[0] ) + log_E_in[0]\n init_log_W_Arr = np.random.rand( N_walkers ) * ( log_W_in[1] - log_W_in[0] ) + log_t_in[0]\n\n for i in range( 0 , N_walkers ) :\n\n theta = [ init_log_V_Arr[i] , init_log_N_Arr[i] , init_log_t_Arr[i] , init_log_E_Arr[i] , init_log_W_Arr[i] , 1.0 ]\n\n while not Prior_f_5( theta ) :\n\n init_log_V_Arr[ i ] = np.random.rand( ) * ( log_V_in[1] - log_V_in[0] ) + log_V_in[0]\n init_log_N_Arr[ i ] = np.random.rand( ) * ( log_N_in[1] - log_N_in[0] ) + log_N_in[0]\n init_log_t_Arr[ i ] = np.random.rand( ) * ( log_t_in[1] - log_t_in[0] ) + log_t_in[0]\n init_log_E_Arr[ i ] = np.random.rand( ) * ( log_E_in[1] - log_E_in[0] ) + log_E_in[0]\n init_log_W_Arr[ i ] = np.random.rand( ) * ( log_W_in[1] - log_W_in[0] ) + log_W_in[0]\n\n theta = [ init_log_V_Arr[i] , init_log_N_Arr[i] , init_log_t_Arr[i] , init_log_E_Arr[i] , init_log_W_Arr[i] , 1.0 ]\n\n if MODE == 'Mixture' :\n\n V_sign = np.sign( np.random.rand( N_walkers ) - 0.5 )\n\n if MODE == 'Inflow':\n\n V_sign = -1. * np.ones( N_walkers ) \n\n if MODE == 'Outflow':\n\n V_sign = np.ones( N_walkers ) \n\n\n init_V_Arr = 10**init_log_V_Arr * V_sign \n\n return init_V_Arr , init_log_N_Arr , init_log_t_Arr , init_log_E_Arr , init_log_W_Arr \n#####====================================================================================#\n#####====================================================================================#\n#####====================================================================================#\ndef NN_measure( w_tar_Arr , f_tar_Arr , s_tar_Arr , FWHM_tar , PIX_tar , loaded_model , w_rest_Machine_Arr , N_iter=None , normed=False , scaled=False , Delta_min=-18.5 , Delta_max=18.5 , Nbins_tot=1000 , Denser_Center=True ):\n\n w_rest_tar_Arr , f_rest_tar_Arr , z_max_tar , INPUT = Treat_A_Line_To_NN_Input( w_tar_Arr , f_tar_Arr , PIX_tar , FWHM_tar , Delta_min=Delta_min , Delta_max=Delta_max , Nbins_tot=Nbins_tot , Denser_Center=Denser_Center , normed=normed, scaled=scaled )\n\n assert np.sum( w_rest_tar_Arr - w_rest_Machine_Arr) == 0 , 'wavelength array of machine and measure dont match. Check that Delta_min, Delta_max, Nbins_tot and Denser_Center are the same in the model and the imput here.'\n\n Sol = loaded_model.predict( INPUT )\n\n w_Lya = 1215.673123 #A\n\n z_sol = ( w_Lya + Sol[0,0] ) * ( 1 + z_max_tar ) * 1. / ( w_Lya ) - 1.\n\n if N_iter is None :\n return Sol , z_sol\n\n if N_iter > 0 :\n log_V_sol_2_Arr = np.zeros( N_iter )\n log_N_sol_2_Arr = np.zeros( N_iter )\n log_t_sol_2_Arr = np.zeros( N_iter )\n log_E_sol_2_Arr = np.zeros( N_iter )\n log_W_sol_2_Arr = np.zeros( N_iter )\n\n z_sol_2_Arr = np.zeros( N_iter )\n\n for i in range( 0 , N_iter ) :\n\n f_obs_i_Arr = f_tar_Arr + np.random.randn( len( f_tar_Arr ) ) * s_tar_Arr\n\n w_rest_i_Arr , f_rest_i_Arr , z_max_i , INPUT_i = Treat_A_Line_To_NN_Input( w_tar_Arr , f_obs_i_Arr , PIX_tar , FWHM_tar , Delta_min=Delta_min , Delta_max=Delta_max , Nbins_tot=Nbins_tot , Denser_Center=Denser_Center , normed=normed, scaled=scaled )\n\n Sol_i = loaded_model.predict( INPUT_i )\n\n z_sol_i = ( w_Lya + Sol_i[0,0] ) * ( 1 + z_max_i ) * 1. / ( w_Lya ) - 1.\n\n log_V_sol_2_Arr[i] = Sol_i[0,1]\n log_N_sol_2_Arr[i] = Sol_i[0,2]\n log_t_sol_2_Arr[i] = Sol_i[0,3]\n log_E_sol_2_Arr[i] = Sol_i[0,4]\n log_W_sol_2_Arr[i] = Sol_i[0,5]\n\n z_sol_2_Arr[i] = z_sol_i\n\n return Sol , z_sol , log_V_sol_2_Arr , log_N_sol_2_Arr , log_t_sol_2_Arr , z_sol_2_Arr , log_E_sol_2_Arr , log_W_sol_2_Arr\n##====================================================================================#\n##====================================================================================#\n##====================================================================================#\n####def NN_measure_3_no_tau( w_tar_Arr , f_tar_Arr , s_tar_Arr , FWHM_tar , PIX_tar , loaded_model , w_rest_Machine_Arr , N_iter=0 , normed=False , scaled=False ):\n####\n#### w_rest_tar_Arr , Delta_rest_tar_Arr , f_rest_tar_Arr , z_max_tar , s_rest_tar_Arr = NN_convert_Obs_Line_to_proxy_rest_line( w_tar_Arr , f_tar_Arr , s_obs_Arr=s_tar_Arr , normed=normed , scaled=scaled)\n####\n#### f_rest_tar_Arr = np.interp( w_rest_Machine_Arr , w_rest_tar_Arr , f_rest_tar_Arr )\n####\n#### INPUT = [ np.hstack( ( f_rest_tar_Arr , z_max_tar , np.log10( FWHM_tar ) , np.log10( PIX_tar ) ) ) ]\n####\n#### Sol = loaded_model.predict( INPUT )\n####\n#### w_Lya = 1215.673123 #A\n####\n#### z_sol = ( w_Lya + Sol[0,3] ) * ( 1 + z_max_tar ) * 1. / ( w_Lya ) - 1.\n####\n#### if not N_iter > 0 :\n#### return Sol , z_sol\n####\n#### if N_iter > 0 :\n#### log_V_sol_2_Arr = np.zeros( N_iter )\n#### log_N_sol_2_Arr = np.zeros( N_iter )\n#### log_E_sol_2_Arr = np.zeros( N_iter )\n#### log_W_sol_2_Arr = np.zeros( N_iter )\n####\n#### z_sol_2_Arr = np.zeros( N_iter )\n####\n#### for i in range( 0 , N_iter ) :\n####\n#### f_obs_i_Arr = f_tar_Arr + np.random.randn( len( f_tar_Arr ) ) * s_tar_Arr\n####\n#### w_rest_i_Arr , Delta_rest_i_Arr , f_rest_i_Arr , z_max_i = NN_convert_Obs_Line_to_proxy_rest_line( w_tar_Arr , f_obs_i_Arr , normed=normed , scaled=scaled)\n####\n#### f_rest_i_Arr = np.interp( w_rest_Machine_Arr , w_rest_i_Arr , f_rest_i_Arr )\n####\n#### INPUT_i = [ np.hstack( ( f_rest_i_Arr , z_max_i , np.log10( FWHM_tar ) , np.log10( PIX_tar ) ) ) ]\n####\n#### Sol_i = loaded_model.predict( INPUT_i )\n####\n#### #print( Sol_i )\n####\n#### z_sol_i = ( w_Lya + Sol_i[0,0] ) * ( 1 + z_max_i ) * 1. / ( w_Lya ) - 1.\n####\n#### log_V_sol_2_Arr[i] = Sol_i[0,1]\n#### log_N_sol_2_Arr[i] = Sol_i[0,2]\n#### log_E_sol_2_Arr[i] = Sol_i[0,3]\n#### log_W_sol_2_Arr[i] = Sol_i[0,4]\n####\n#### z_sol_2_Arr[i] = z_sol_i\n####\n#### return Sol , z_sol , log_V_sol_2_Arr , log_N_sol_2_Arr , z_sol_2_Arr , log_E_sol_2_Arr , log_W_sol_2_Arr\n##====================================================================================#\n##====================================================================================#\n##====================================================================================#\n# PSO\n\n##====================================================================================#\n##====================================================================================#\n##====================================================================================#\ndef PSO_compute_xi_2_ONE_6D( x , w_tar_Arr , f_tar_Arr , FWHM , PIX , DATA_LyaRT, Geometry ):\n\n my_f_tar_Arr = ( f_tar_Arr - np.amin( f_tar_Arr ) ) * 1. / np.amax( f_tar_Arr )\n\n w_min = np.amin( w_tar_Arr )\n w_max = np.amax( w_tar_Arr ) + 0.000001\n\n w_s_tar_Arr = np.linspace( w_min , w_max , 100 )\n s_tar_Arr = np.zeros( len( w_s_tar_Arr ))\n\n redshift = x[0]\n log_V_pso = x[1]\n log_N_pso = x[2]\n log_t_pso = x[3]\n log_E_pso = x[4]\n log_W_pso = x[5] \n\n F_pso = 1.0\n\n # z_f , V_f , logNH_f , ta_f , F_line_f , logEW_f , Wi_f , Noise_w_Arr , Noise_Arr , FWHM_f , PIX_f , w_min , w_max , DATA_LyaRT , Geometry\n\n w_pso_Arr , f_pso_Arr , dic = generate_a_REAL_line_Noise_w( redshift , 10**log_V_pso, log_N_pso , \n 10**log_t_pso , F_pso , log_E_pso , \n 10**log_W_pso , w_s_tar_Arr ,\n s_tar_Arr , FWHM , PIX , \n w_min , w_max , DATA_LyaRT , \n Geometry )\n\n #my_f_pso_Arr = ( f_pso_Arr - np.amin( f_pso_Arr ) ) *1. / np.amax( f_pso_Arr) \n\n my_f_pso_Arr = f_pso_Arr * 1. / np.amax( f_pso_Arr)\n\n my_f_pso_Arr = np.interp( w_tar_Arr , w_pso_Arr , my_f_pso_Arr )\n\n my_f_tar_Arr = my_f_tar_Arr * 1. / np.amax( my_f_tar_Arr )\n\n #f_pso_to_use_Arr = np.interp( w_tar_Arr , w_pso_Arr , my_f_pso_Arr )\n\n xi_2 = np.sum( ( my_f_pso_Arr - my_f_tar_Arr ) ** 2 )\n\n if xi_2 == np.nan :\n print( 'found np.nan in xi_2!!!' )\n xi_2 = np.inf\n\n return xi_2 , w_pso_Arr , my_f_pso_Arr\n##====================================================================================#\n##====================================================================================#\n##====================================================================================#\ndef PSO_compute_xi_2_MANY( X , w_tar_Arr , f_tar_Arr , FWHM , PIX , DATA_LyaRT, Geometry ):\n\n xi_2_Arr = np.zeros( len(X) )\n\n for i in range( 0 , len(X) ):\n\n xi_2 , w_pso_Arr , f_pso_Arr = PSO_compute_xi_2_ONE_6D( X[i] , w_tar_Arr , f_tar_Arr , FWHM , PIX , DATA_LyaRT, Geometry )\n\n xi_2_Arr[i] = xi_2\n\n return xi_2_Arr\n##====================================================================================#\n##====================================================================================#\n##====================================================================================#\n##====================================================================================#\ndef PSO_Analysis( w_tar_Arr , f_tar_Arr , FWHM , PIX , DATA_LyaRT , Geometry , n_particles , n_iters ):\n\n w_lya = 1215.67\n\n #w_max = np.atleast_1d( w_tar_Arr[ f_tar_Arr == np.amax( f_tar_Arr ) ])[0]\n\n print( 'max = ' , np.amax( f_tar_Arr ) )\n\n print( f_tar_Arr == np.amax( f_tar_Arr ) )\n\n print( sum( f_tar_Arr == np.amax( f_tar_Arr ) ) )\n\n w_max = w_tar_Arr[ np.where( f_tar_Arr == np.amax( f_tar_Arr ) ) ][0]\n\n print( w_max )\n\n z_of_the_max = w_max / w_lya - 1.\n\n print( z_of_the_max )\n\n Dz = ( 1 + z_of_the_max ) * 2e-3\n\n pso_z_min = z_of_the_max - Dz\n pso_z_max = z_of_the_max + Dz\n\n if pso_z_min < 0 : pso_z_min = 1e-10\n\n X_min = [ pso_z_min , 1.0 , 17. , -4.00 , 0.1 , 0.01 ]\n X_max = [ pso_z_max , 3.0 , 22. , -0.25 , 3. , 6.0 ]\n\n bounds = (X_min, X_max)\n\n options = {'c1': 0.5, 'c2': 0.3, 'w': 0.9}\n\n n_particles = n_particles\n dimensions = 6\n\n optimizer = GlobalBestPSO( n_particles=n_particles , dimensions=dimensions , options=options, bounds=bounds)\n\n cost, pos = optimizer.optimize( PSO_compute_xi_2_MANY , iters=n_iters , w_tar_Arr=w_tar_Arr , f_tar_Arr=f_tar_Arr , FWHM=FWHM , PIX=PIX , DATA_LyaRT=DATA_LyaRT , Geometry=Geometry )\n\n return cost , pos\n##====================================================================================#\n##====================================================================================#\n##====================================================================================#\n##====================================================================================#\n##====================================================================================#\nif __name__ == '__main__':\n pass\n##====================================================================================#\n##====================================================================================#\n##====================================================================================#\n\n# Enjoy\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n" ]
[ [ "numpy.random.rand", "numpy.copy", "numpy.load", "numpy.exp", "numpy.mean", "numpy.where", "numpy.histogram", "numpy.zeros_like", "numpy.log", "matplotlib.pyplot.get_cmap", "numpy.interp", "numpy.arange", "numpy.trapz", "numpy.isfinite", "numpy.sqrt", "numpy.log10", "matplotlib.pyplot.subplot", "numpy.array", "numpy.zeros", "numpy.percentile", "matplotlib.pyplot.figure", "numpy.amax", "numpy.amin", "numpy.absolute", "matplotlib.pyplot.show", "numpy.hstack", "numpy.isnan", "scipy.optimize.curve_fit", "numpy.sum", "numpy.ones", "matplotlib.pyplot.legend", "numpy.atleast_1d", "scipy.ndimage.gaussian_filter1d", "numpy.linspace" ] ]
paulwababu/kuccps-placement-system
[ "8e3a96267fef92c1cc0755ce9637b8173d7d3ef8" ]
[ "cluster.py" ]
[ "from math import sqrt\nfrom xxlimited import Str\n\n\"\"\"\nTo calculate kcse cluster points, we use the formular below\n\nC = √(X/48 * Y/84) * 48\n\nwhere, C = Cluster Points\n X = points obtained in the four main subjects required in the course you want to do,\n eg computer science\n Y = total points of the seven subjects\n \n ***EXAMPLE**\n 1. Nursing Course Cluster Calculation\n - Nursing course needs Biology, Chemistry, Maths and English as Compulsory subjects\n The subjects above when converted to points , eg A for 12, adds up to X in the equation above\n Lets say you got: Biology C+(7) History C+(7) Maths B-(8) English B+(10) \n Your total X will be 32\n - For the other subjects, lets say you score:\n Kiswahili A(12) Business C(5) History D(2)\n Your total for Y will be 19\n\n * Keep in mind KNEX dictates that a candiate can sit for a minimum of 7 subjects to\n a maximum of 9 subjects\n \n * We now have everything we need to calculate the cluster point for the nursing student\n\n * The code below is a function that will implement the cluster calculation based on the number of subjects done\n (in the case above it should bring 30.5380)\n\n * Before getting into how the function works, lets start by understaing how our whole app will work\n The web application is a mordern kuccps placement portal which addresses the issue of calculating \n cluster points to determine what courses you can do and also the issue of people placing to courses\n they cannot do. \n It starts by asking the user to sign up and put thier results and name\n Once logged in, they can apply for a course and view all avaliable courses and universities\n and there is a button showing them whether or not they can apply for the courses shown\n Once they click a course, they can see their results and cluster calculate for that unit\n Once they apply for that course, the system blocks them from appyling again\n If the course has been fully applied, they do not take any more people!!\n \n * The function will have arguements for a total of 9 subjects, however, when it comes to cluster,\n some of the subjects wont be taken into account\n This means that our funtion will take both optional arguements as subjects, as only 7 are required\n and for the 7 will be mandatory arguements. \n Having the above,the function applies the formula above and returns the cluster points\n Having the cluster points, we can now make a new function that shows you whether you qualify\n for a course or not by comparing the cutoff points for the courses with all universities\n\"\"\"\n\n##implement the formular for cluster point above, c\n\n#map grade, g, to points. eg A for 12 points, this is the raw cluster\n#g = {'A':12, 'A-':11, 'B+':10, 'B':9, 'B-':8, 'C+':7, 'C':6, 'C-':5, 'D+':4, 'D':3, 'D-':2, 'E':1}\n#X for four main subjects in our case and Y\n#X = {'biology', 'chemistry', 'mathematics', 'english'}\n#Y = {'kiswahili', 'business', 'history'}\n\n#list all subjects by groups\n\"\"\"\n#group1\nenglish = {}\nkiswahili = {}\nmaths = {}\n\n#group2\nbiology = {}\nchemistry = {}\nphysics = {}\ngeneralscience = {}\n\n#group 3\ncre = {}\ngeography = {}\nhre = {}\nhistory = {}\nire = {}\n\n#group4\nagriculture = {}\nart = {}\naviation = {}\nbuilding = {}\ncomputer = {}\ndrawing = {}\nelectricity = {}\nhomescience = {}\nmetalwork = {}\npower = {}\nwoodwork = {}\n\n#group5\narabic = {}\nbusiness = {}\nfrench = {}\ngerman = {}\nsign = {}\nmusic = {}\n\n#ask user for subjects taken\n#for this example,we assume the cluster we are searching for is 4 a nursing course \nbiology = input('Enter Biology grade: ')\nchemistry = input('Enter Chemistry grade: ')\nmaths = input('Enter Mathematics grade: ')\nenglish = input('Enter English grade: ')\nkiswahili = input('Enter kiswahili grade: ')\nbusiness = input('Enter business grade: ')\nhistory = input('Enter history grade: ')\n\n#map function of grade to points instead of suing if else statements\ng = {'A':12, 'A-':11, 'B+':10, 'B':9, 'B-':8, 'C+':7, 'C':6, 'C-':5, 'D+':4, 'D':3, 'D-':2, 'E':1}\nbiology = g[biology]\nchemistry = g[chemistry]\nmaths = g[maths]\nenglish = g[english]\nkiswahili = g[kiswahili]\nbusiness = g[business]\nhistory = g[history]\n\n#this is the total grades of the four main subjects, X\nraw_cluster = (biology+chemistry+maths+english)\naggregate_cluster = (biology+chemistry+maths+english+kiswahili+business+history)\nrcp = raw_cluster / 48 #raw_cluster is X\nacp = aggregate_cluster / 84 #aggregate cluster is Y\nwcp = sqrt(rcp * acp) * 48 #wcp is the cluster points\nprint(wcp)\"\"\"\n\n\"\"\"\n- Now that the system is working for people who do nursing only,\nwe head over to the next part, asking for all units done\nThen the code goes through all courses and calculates the cluster\naccording to the major four units, X, to show you all courses you can do \nin their consecutive universities\nWe need a csv for subjects required for all courses across all Universities\nWe need cluster calculating function according to units done\n\"\"\"\n\nimport pandas as pd\n\ncourse_guide = pd.read_csv(\"course_guide.csv\")\n#print(course_guide.head())\n\ncut_off_15 = course_guide['2015_cut_off'].dropna()\n" ]
[ [ "pandas.read_csv" ] ]
BruninLima/PydaptiveFiltering
[ "14f4758a25b7cb3f1fd643caa5caffd5e7f06c6a" ]
[ "pydaptivefiltering/Meta_LMS.py" ]
[ "import numpy as np\nfrom time import time\n\n# Done:\n# LMS\n# NLMS\n# Newton_LMS\n# SignData\n# SignError\n\n# TODO:\n# DualSign\n# Power2Error\n# AffineProjection\n# T domain\n# T domain DCT\n# T domain DFT\n\n\nclass LMS:\n\n def LMS(Filter, desired_signal: np.ndarray, input_signal: np.ndarray, step: float = 1e-2, max_runs: int = 25, tolerance=0, verbose=(True, 5)) -> dict:\n \"\"\"\n Fit filter parameters to considering desired vector and inp\n ut x. desired and x must have length K,\n where K is the number of iterations\n\n Inputs\n -------\n\n desired : numpy array (row vector)\n desired signal\n x : numpy array (row vector)\n input signal to feed filter\n step : Convergence (relaxation) factor.\n\n max_runs\n max_iter\n\n verbose (boolean, int): (True/False, Print_every)\n\n\n Outputs\n -------\n\n python dict :\n outputs : numpy array (collumn vector)\n Store the estimated output of each iteration. outpu\n ts_vector[k] represents the output erros at iteration k\n errors : numpy array (collumn vector)\n FIR error vectors. error_vector[k] represents the o\n utput erros at iteration k.\n coefficients : numpy array\n Store the estimated coefficients for each iteration\n \"\"\"\n verbose, print_every = verbose\n # assert type(verbose) == bool\n # assert type(print_every) == int && print_every > 0\n\n total_tic = time()\n tic = time()\n for run in range(max_runs):\n\n if verbose == True:\n if run == max_runs - 1 or run % print_every == 0:\n print(\"Run {}\\t \".format(run), end='')\n\n max_iter = desired_signal.size\n\n x_k = np.zeros(Filter.filter_order+1, dtype=input_signal.dtype)\n\n errors_vector = np.array([])\n outputs_vector = np.array([])\n\n for k in range(max_iter):\n\n x_k = np.concatenate(([input_signal[k]], x_k))[\n :Filter.filter_order+1]\n\n w_k = Filter.coefficients\n y_k = np.dot(w_k.conj(), x_k)\n\n error_k = desired_signal[k] - y_k\n\n next_w_k = w_k + step * error_k.conj() * x_k\n\n errors_vector = np.append(errors_vector, error_k)\n outputs_vector = np.append(outputs_vector, y_k)\n\n Filter.coefficients = next_w_k\n Filter.coefficients_history.append(next_w_k)\n\n if verbose == True:\n\n if run == max_runs - 1 or print_every != -1 and run % print_every == 0:\n tac = time() - tic\n print('|error| = {:.02}\\t Time: {:.03} ms'.format(\n np.abs(error_k), (tac)*1000))\n tic = time()\n # tolerance break point\n\n if np.abs(error_k) < tolerance:\n if verbose == True:\n print(\" \")\n print(\" -- Ended at Run {} -- \\n\".format(run))\n print(\"Final |error| = {:.02}\".format(np.abs(error_k)))\n break\n\n if verbose == True:\n print(\" \")\n print('Total runtime {:.03} ms'.format((time() - total_tic)*1000))\n return {'outputs': outputs_vector,\n 'errors': errors_vector, 'coefficients': Filter.coefficients_history}\n\n def NLMS(Filter, desired_signal: np.ndarray, input_signal: np.ndarray, gamma: float, step: float = 1e-2, max_runs: int = 25, tolerance=0, verbose=(True, 5)) -> dict:\n \"\"\"\n Fit filter parameters to considering desired vector and inp\n ut x. desired and x must have length K,\n where K is the number of iterations\n\n Inputs\n -------\n\n desired : numpy array (row vector)\n desired signal\n x : numpy array (row vector)\n input signal to feed filter\n step : Convergence (relaxation) factor.\n\n max_runs\n max_iter\n\n verbose (boolean, int): (True/False, Print_every)\n\n\n Outputs\n -------\n\n python dict :\n outputs : numpy array (collumn vector)\n Store the estimated output of each iteration. outpu\n ts_vector[k] represents the output erros at iteration k\n errors : numpy array (collumn vector)\n FIR error vectors. error_vector[k] represents the o\n utput erros at iteration k.\n coefficients : numpy array\n Store the estimated coefficients for each iteration\n \"\"\"\n verbose, print_every = verbose\n # assert type(verbose) == bool\n # assert type(print_every) == int && print_every > 0\n\n total_tic = time()\n tic = time()\n for run in range(max_runs):\n\n if verbose == True:\n if run == max_runs - 1 or run % print_every == 0:\n print(\"Run {}\\t \".format(run), end='')\n\n max_iter = desired_signal.size\n\n x_k = np.zeros(Filter.filter_order+1, dtype=input_signal.dtype)\n\n errors_vector = np.array([])\n outputs_vector = np.array([])\n\n for k in range(max_iter):\n\n x_k = np.concatenate(([input_signal[k]], x_k))[\n :Filter.filter_order+1]\n\n w_k = Filter.coefficients\n y_k = np.dot(w_k.conj(), x_k)\n\n error_k = desired_signal[k] - y_k\n\n error_k = desired_signal[k] - y_k\n gamma_f = step/(x_k.conj()*x_k + gamma)\n next_w_k = w_k + x_k * error_k.conj() * gamma_f\n\n errors_vector = np.append(errors_vector, error_k)\n outputs_vector = np.append(outputs_vector, y_k)\n\n Filter.coefficients = next_w_k\n Filter.coefficients_history.append([next_w_k])\n\n if verbose == True:\n\n if run == max_runs - 1 or print_every != -1 and run % print_every == 0:\n tac = time() - tic\n print('|error| = {:.02} Time: {:.03} ms'.format(\n np.abs(error_k), (tac)*1000))\n tic = time()\n # tolerance break point\n\n if np.abs(error_k) < tolerance:\n if verbose == True:\n print(\" \")\n print(\" -- Ended at Run {} -- \\n\".format(run))\n print(\"Final |error| = {:.02}\".format(np.abs(error_k)))\n break\n\n if verbose == True:\n print(\" \")\n print('Total runtime {:.03} ms'.format((time() - total_tic)*1000))\n return {'outputs': outputs_vector,\n 'errors': errors_vector, 'coefficients': Filter.coefficients_history}\n\n def Newton_LMS(Filter, desired_signal: np.ndarray, input_signal: np.ndarray, alpha: float, initialInvRxHat: np.ndarray, step: float = 1e-2, max_runs: int = 25, tolerance=0, verbose=(True, 5)) -> dict:\n \"\"\"\n Fit filter parameters to considering desired vector and inp\n ut x. desired and x must have length K,\n where K is the number of iterations\n\n Inputs\n -------\n\n desired : numpy array (row vector)\n desired signal\n x : numpy array (row vector)\n input signal to feed filter\n step : Convergence (relaxation) factor.\n\n max_runs\n max_iter\n\n verbose (boolean, int): (True/False, Print_every)\n\n\n Outputs\n -------\n\n python dict :\n outputs : numpy array (collumn vector)\n Store the estimated output of each iteration. outpu\n ts_vector[k] represents the output erros at iteration k\n errors : numpy array (collumn vector)\n FIR error vectors. error_vector[k] represents the o\n utput erros at iteration k.\n coefficients : numpy array\n Store the estimated coefficients for each iteration\n \"\"\"\n verbose, print_every = verbose\n # assert type(verbose) == bool\n # assert type(print_every) == int && print_every > 0\n total_tic = time()\n tic = time()\n for run in range(max_runs):\n\n if verbose == True:\n if run == max_runs - 1 or run % print_every == 0:\n print(\"Run {}\\t \".format(run), end='')\n\n max_iter = desired_signal.size\n\n x_k = np.zeros(Filter.filter_order+1, dtype=input_signal.dtype)\n\n errors_vector = np.array([])\n outputs_vector = np.array([])\n invRxHat = initialInvRxHat\n\n for k in range(max_iter):\n\n x_k = np.concatenate(([input_signal[k]], x_k))[\n :Filter.filter_order+1]\n\n w_k = Filter.coefficients\n y_k = np.dot(w_k.conj(), x_k)\n\n error_k = desired_signal[k] - y_k\n\n auxDen = (1-alpha)/alpha + x_k.conj()*invRxHat*x_k\n invRxHat = (invRxHat-(invRxHat*x_k*x_k.conj()\n * invRxHat)/auxDen)/(1 - alpha)\n\n next_w_k = w_k + x_k * error_k.conj() * step * invRxHat\n\n errors_vector = np.append(errors_vector, error_k)\n outputs_vector = np.append(outputs_vector, y_k)\n\n Filter.coefficients = next_w_k\n Filter.coefficients_history.append([next_w_k])\n\n if verbose == True:\n\n if run == max_runs - 1 or print_every != -1 and run % print_every == 0:\n tac = time() - tic\n print('|error| = {:.02}\\t Time: {:.03} ms'.format(\n np.abs(error_k), (tac)*1000))\n tic = time()\n # tolerance break point\n\n if np.abs(error_k) < tolerance:\n if verbose == True:\n print(\" \")\n print(\" -- Ended at Run {} -- \\n\".format(run))\n print(\"Final |error| = {:.02}\".format(np.abs(error_k)))\n break\n\n if verbose == True:\n print(\" \")\n print('Total runtime {:.03} ms'.format((time() - total_tic)*1000))\n return {'outputs': outputs_vector,\n 'errors': errors_vector, 'coefficients': Filter.coefficients_history}\n\n def SignData(Filter, desired_signal: np.ndarray, input_signal: np.ndarray, step: float = 1e-2, max_runs: int = 25, tolerance=0, verbose=(True, 5)) -> dict:\n \"\"\"\n Fit filter parameters to considering desired vector and inp\n ut x. desired and x must have length K,\n where K is the number of iterations\n\n Inputs\n -------\n\n desired : numpy array (row vector)\n desired signal\n x : numpy array (row vector)\n input signal to feed filter\n step : Convergence (relaxation) factor.\n\n max_runs\n max_iter\n\n verbose (boolean, int): (True/False, Print_every)\n\n\n Outputs\n -------\n\n python dict :\n outputs : numpy array (collumn vector)\n Store the estimated output of each iteration. outpu\n ts_vector[k] represents the output erros at iteration k\n errors : numpy array (collumn vector)\n FIR error vectors. error_vector[k] represents the o\n utput erros at iteration k.\n coefficients : numpy array\n Store the estimated coefficients for each iteration\n \"\"\"\n verbose, print_every = verbose\n\n total_tic = time()\n tic = time()\n for run in range(max_runs):\n\n if verbose == True:\n if run == max_runs - 1 or run % print_every == 0:\n print(\"Run {}\\t \".format(run), end='')\n\n max_iter = desired_signal.size\n\n x_k = np.zeros(Filter.filter_order+1, dtype=input_signal.dtype)\n\n errors_vector = np.array([])\n outputs_vector = np.array([])\n\n for k in range(max_iter):\n\n x_k = np.concatenate(([input_signal[k]], x_k))[\n :Filter.filter_order+1]\n\n w_k = Filter.coefficients\n\n y_k = np.dot(x_k.conj(), w_k)\n\n error_k = desired_signal[k] - y_k\n\n next_w_k = w_k + 2 * step * error_k * np.sign(x_k)\n\n errors_vector = np.append(errors_vector, error_k)\n outputs_vector = np.append(outputs_vector, y_k)\n\n Filter.coefficients = next_w_k\n Filter.coefficients_history.append([next_w_k])\n\n if verbose == True:\n\n if run == max_runs - 1 or print_every != -1 and run % print_every == 0:\n tac = time() - tic\n print('|error| = {:.02} Time: {:.03} ms'.format(\n np.abs(error_k), (tac)*1000))\n tic = time()\n # tolerance break point\n\n if np.abs(error_k) < tolerance:\n if verbose == True:\n print(\" \")\n print(\" -- Ended at Run {} -- \\n\".format(run))\n print(\"Final |error| = {:.02}\".format(np.abs(error_k)))\n break\n\n if verbose == True:\n print(\" \")\n print('Total runtime {:.03} ms'.format((time() - total_tic)*1000))\n return {'outputs': outputs_vector,\n 'errors': errors_vector, 'coefficients': Filter.coefficients_history}\n\n def SignError(Filter, desired_signal: np.ndarray, input_signal: np.ndarray, step: float = 1e-2, max_runs: int = 25, tolerance=0, verbose=(True, 5)) -> dict:\n \"\"\"\n Fit filter parameters to considering desired vector and inp\n ut x. desired and x must have length K,\n where K is the number of iterations\n\n Inputs\n -------\n\n desired : numpy array (row vector)\n desired signal\n x : numpy array (row vector)\n input signal to feed filter\n step : Convergence (relaxation) factor.\n\n max_runs\n max_iter\n\n verbose (boolean, int): (True/False, Print_every)\n\n\n Outputs\n -------\n\n python dict :\n outputs : numpy array (collumn vector)\n Store the estimated output of each iteration. outpu\n ts_vector[k] represents the output erros at iteration k\n errors : numpy array (collumn vector)\n FIR error vectors. error_vector[k] represents the o\n utput erros at iteration k.\n coefficients : numpy array\n Store the estimated coefficients for each iteration\n \"\"\"\n verbose, print_every = verbose\n\n total_tic = time()\n tic = time()\n for run in range(max_runs):\n\n if verbose == True:\n if run == max_runs - 1 or run % print_every == 0:\n print(\"Run {}\\t \".format(run), end='')\n\n max_iter = desired_signal.size\n\n x_k = np.zeros(Filter.filter_order+1, dtype=input_signal.dtype)\n\n errors_vector = np.array([])\n outputs_vector = np.array([])\n\n for k in range(max_iter):\n\n x_k = np.concatenate(([input_signal[k]], x_k))[\n :Filter.filter_order+1]\n\n w_k = Filter.coefficients\n y_k = np.dot(x_k.conj(), w_k)\n\n error_k = desired_signal[k] - y_k\n\n next_w_k = w_k + 2 * step * np.sign(error_k) * x_k\n\n errors_vector = np.append(errors_vector, error_k)\n outputs_vector = np.append(outputs_vector, y_k)\n\n Filter.coefficients = next_w_k\n Filter.coefficients_history.append([next_w_k])\n\n if verbose == True:\n\n if run == max_runs - 1 or print_every != -1 and run % print_every == 0:\n tac = time() - tic\n print('|error| = {:.02} Time: {:.03} ms'.format(\n np.abs(error_k), (tac)*1000))\n tic = time()\n # tolerance break point\n\n if np.abs(error_k) < tolerance:\n if verbose == True:\n print(\" \")\n print(\" -- Ended at Run {} -- \\n\".format(run))\n print(\"Final |error| = {:.02}\".format(np.abs(error_k)))\n break\n\n if verbose == True:\n print(\" \")\n print('Total runtime {:.03} ms'.format((time() - total_tic)*1000))\n return {'outputs': outputs_vector,\n 'errors': errors_vector, 'coefficients': Filter.coefficients_history}\n" ]
[ [ "numpy.concatenate", "numpy.array", "numpy.zeros", "numpy.sign", "numpy.abs", "numpy.append" ] ]
johntiger1/vaal_querying
[ "c20da3b0b5ca9f25334523f831d0ba8a11f710ca" ]
[ "vgg.py" ]
[ "import torch\nimport torch.nn as nn\n\n\n__all__ = [\n 'VGG', 'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn',\n 'vgg19_bn', 'vgg19',\n]\n\n\nmodel_urls = {\n 'vgg11': 'https://download.pytorch.org/models/vgg11-bbd30ac9.pth',\n 'vgg13': 'https://download.pytorch.org/models/vgg13-c768596a.pth',\n 'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth',\n 'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth',\n 'vgg11_bn': 'https://download.pytorch.org/models/vgg11_bn-6002323d.pth',\n 'vgg13_bn': 'https://download.pytorch.org/models/vgg13_bn-abd245e5.pth',\n 'vgg16_bn': 'https://download.pytorch.org/models/vgg16_bn-6c64b313.pth',\n 'vgg19_bn': 'https://download.pytorch.org/models/vgg19_bn-c79401a0.pth',\n}\n\n\nclass VGG(nn.Module):\n\n def __init__(self, features, num_classes=1000, init_weights=True):\n super(VGG, self).__init__()\n self.features = features\n self.avgpool = nn.AdaptiveAvgPool2d((7, 7))\n self.classifier = nn.Sequential(\n nn.Linear(512 * 7 * 7, 4096),\n nn.ReLU(True),\n nn.Dropout(),\n nn.Linear(4096, 4096),\n nn.ReLU(True),\n nn.Dropout(),\n nn.Linear(4096, num_classes),\n )\n if init_weights:\n self._initialize_weights()\n\n def forward(self, x):\n x = self.features(x)\n x = self.avgpool(x)\n x = torch.flatten(x, 1)\n x = self.classifier(x)\n return x\n\n def _initialize_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.Linear):\n nn.init.normal_(m.weight, 0, 0.01)\n nn.init.constant_(m.bias, 0)\n\n\ndef make_layers(cfg, batch_norm=False):\n layers = []\n in_channels = 1\n for v in cfg:\n if v == 'M':\n layers += [nn.MaxPool2d(kernel_size=2, stride=2)]\n else:\n conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)\n if batch_norm:\n layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]\n else:\n layers += [conv2d, nn.ReLU(inplace=True)]\n in_channels = v\n return nn.Sequential(*layers)\n\n\ncfgs = {\n 'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],\n 'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],\n 'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],\n 'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],\n}\n\n\ndef _vgg(arch, cfg, batch_norm, pretrained, progress, **kwargs):\n if pretrained:\n kwargs['init_weights'] = False\n model = VGG(make_layers(cfgs[cfg], batch_norm=batch_norm), **kwargs)\n if pretrained:\n state_dict = load_state_dict_from_url(model_urls[arch],\n progress=progress)\n model.load_state_dict(state_dict)\n return model\n\n\ndef vgg11(pretrained=False, progress=True, **kwargs):\n r\"\"\"VGG 11-layer model (configuration \"A\") from\n `\"Very Deep Convolutional Networks For Large-Scale Image Recognition\" <https://arxiv.org/pdf/1409.1556.pdf>'_\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _vgg('vgg11', 'A', False, pretrained, progress, **kwargs)\n\n\ndef vgg11_bn(pretrained=False, progress=True, **kwargs):\n r\"\"\"VGG 11-layer model (configuration \"A\") with batch normalization\n `\"Very Deep Convolutional Networks For Large-Scale Image Recognition\" <https://arxiv.org/pdf/1409.1556.pdf>'_\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _vgg('vgg11_bn', 'A', True, pretrained, progress, **kwargs)\n\n\ndef vgg13(pretrained=False, progress=True, **kwargs):\n r\"\"\"VGG 13-layer model (configuration \"B\")\n `\"Very Deep Convolutional Networks For Large-Scale Image Recognition\" <https://arxiv.org/pdf/1409.1556.pdf>'_\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _vgg('vgg13', 'B', False, pretrained, progress, **kwargs)\n\n\ndef vgg13_bn(pretrained=False, progress=True, **kwargs):\n r\"\"\"VGG 13-layer model (configuration \"B\") with batch normalization\n `\"Very Deep Convolutional Networks For Large-Scale Image Recognition\" <https://arxiv.org/pdf/1409.1556.pdf>'_\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _vgg('vgg13_bn', 'B', True, pretrained, progress, **kwargs)\n\n\ndef vgg16(pretrained=False, progress=True, **kwargs):\n r\"\"\"VGG 16-layer model (configuration \"D\")\n `\"Very Deep Convolutional Networks For Large-Scale Image Recognition\" <https://arxiv.org/pdf/1409.1556.pdf>'_\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _vgg('vgg16', 'D', False, pretrained, progress, **kwargs)\n\n\ndef vgg16_bn(pretrained=False, progress=True, **kwargs):\n r\"\"\"VGG 16-layer model (configuration \"D\") with batch normalization\n `\"Very Deep Convolutional Networks For Large-Scale Image Recognition\" <https://arxiv.org/pdf/1409.1556.pdf>'_\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _vgg('vgg16_bn', 'D', True, pretrained, progress, **kwargs)\n\n\ndef vgg19(pretrained=False, progress=True, **kwargs):\n r\"\"\"VGG 19-layer model (configuration \"E\")\n `\"Very Deep Convolutional Networks For Large-Scale Image Recognition\" <https://arxiv.org/pdf/1409.1556.pdf>'_\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _vgg('vgg19', 'E', False, pretrained, progress, **kwargs)\n\n\ndef vgg19_bn(pretrained=False, progress=True, **kwargs):\n r\"\"\"VGG 19-layer model (configuration 'E') with batch normalization\n `\"Very Deep Convolutional Networks For Large-Scale Image Recognition\" <https://arxiv.org/pdf/1409.1556.pdf>'_\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _vgg('vgg19_bn', 'E', True, pretrained, progress, **kwargs)\n" ]
[ [ "torch.nn.Linear", "torch.nn.Dropout", "torch.flatten", "torch.nn.MaxPool2d", "torch.nn.Sequential", "torch.nn.init.constant_", "torch.nn.BatchNorm2d", "torch.nn.init.kaiming_normal_", "torch.nn.ReLU", "torch.nn.Conv2d", "torch.nn.init.normal_", "torch.nn.AdaptiveAvgPool2d" ] ]
rrkrp100/Face_detect
[ "57b069f6c03c9e07a3f45a485624e8e4ad40ce78" ]
[ "retrain.py" ]
[ "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Simple transfer learning with an Inception v3 architecture model.\n\nWith support for TensorBoard.\n\nThis example shows how to take a Inception v3 architecture model trained on\nImageNet images, and train a new top layer that can recognize other classes of\nimages.\n\nThe top layer receives as input a 2048-dimensional vector for each image. We\ntrain a softmax layer on top of this representation. Assuming the softmax layer\ncontains N labels, this corresponds to learning N + 2048*N model parameters\ncorresponding to the learned biases and weights.\n\nHere's an example, which assumes you have a folder containing class-named\nsubfolders, each full of images for each label. The example folder flower_photos\nshould have a structure like this:\n\n~/flower_photos/daisy/photo1.jpg\n~/flower_photos/daisy/photo2.jpg\n...\n~/flower_photos/rose/anotherphoto77.jpg\n...\n~/flower_photos/sunflower/somepicture.jpg\n\nThe subfolder names are important, since they define what label is applied to\neach image, but the filenames themselves don't matter. Once your images are\nprepared, you can run the training with a command like this:\n\n\n```bash\nbazel build tensorflow/examples/image_retraining:retrain && \\\nbazel-bin/tensorflow/examples/image_retraining/retrain \\\n --image_dir ~/flower_photos\n```\n\nOr, if you have a pip installation of tensorflow, `retrain.py` can be run\nwithout bazel:\n\n```bash\npython tensorflow/examples/image_retraining/retrain.py \\\n --image_dir ~/flower_photos\n```\n\nYou can replace the image_dir argument with any folder containing subfolders of\nimages. The label for each image is taken from the name of the subfolder it's\nin.\n\nThis produces a new model file that can be loaded and run by any TensorFlow\nprogram, for example the label_image sample code.\n\n\nTo use with TensorBoard:\n\nBy default, this script will log summaries to /tmp/retrain_logs directory\n\nVisualize the summaries with this command:\n\ntensorboard --logdir /tmp/retrain_logs\n\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nfrom datetime import datetime\nimport hashlib\nimport os.path\nimport random\nimport re\nimport struct\nimport sys\nimport tarfile\n\nimport numpy as np\nfrom six.moves import urllib\nimport tensorflow as tf\n\nfrom tensorflow.python.framework import graph_util\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.platform import gfile\nfrom tensorflow.python.util import compat\n\nFLAGS = None\n\n# These are all parameters that are tied to the particular model architecture\n# we're using for Inception v3. These include things like tensor names and their\n# sizes. If you want to adapt this script to work with another model, you will\n# need to update these to reflect the values in the network you're using.\n# pylint: disable=line-too-long\nDATA_URL = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz'\n# pylint: enable=line-too-long\nBOTTLENECK_TENSOR_NAME = 'pool_3/_reshape:0'\nBOTTLENECK_TENSOR_SIZE = 2048\nMODEL_INPUT_WIDTH = 299\nMODEL_INPUT_HEIGHT = 299\nMODEL_INPUT_DEPTH = 3\nJPEG_DATA_TENSOR_NAME = 'DecodeJpeg/contents:0'\nRESIZED_INPUT_TENSOR_NAME = 'ResizeBilinear:0'\nMAX_NUM_IMAGES_PER_CLASS = 2 ** 27 - 1 # ~134M\n\n\ndef create_image_lists(image_dir, testing_percentage, validation_percentage):\n \"\"\"Builds a list of training images from the file system.\n\n Analyzes the sub folders in the image directory, splits them into stable\n training, testing, and validation sets, and returns a data structure\n describing the lists of images for each label and their paths.\n\n Args:\n image_dir: String path to a folder containing subfolders of images.\n testing_percentage: Integer percentage of the images to reserve for tests.\n validation_percentage: Integer percentage of images reserved for validation.\n\n Returns:\n A dictionary containing an entry for each label subfolder, with images split\n into training, testing, and validation sets within each label.\n \"\"\"\n if not gfile.Exists(image_dir):\n print(\"Image directory '\" + image_dir + \"' not found.\")\n return None\n result = {}\n sub_dirs = [x[0] for x in gfile.Walk(image_dir)]\n # The root directory comes first, so skip it.\n is_root_dir = True\n for sub_dir in sub_dirs:\n if is_root_dir:\n is_root_dir = False\n continue\n extensions = ['jpg', 'jpeg', 'JPG', 'JPEG']\n file_list = []\n dir_name = os.path.basename(sub_dir)\n if dir_name == image_dir:\n continue\n print(\"Looking for images in '\" + dir_name + \"'\")\n for extension in extensions:\n file_glob = os.path.join(image_dir, dir_name, '*.' + extension)\n file_list.extend(gfile.Glob(file_glob))\n if not file_list:\n print('No files found')\n continue\n if len(file_list) < 20:\n print('WARNING: Folder has less than 20 images, which may cause issues.')\n elif len(file_list) > MAX_NUM_IMAGES_PER_CLASS:\n print('WARNING: Folder {} has more than {} images. Some images will '\n 'never be selected.'.format(dir_name, MAX_NUM_IMAGES_PER_CLASS))\n label_name = re.sub(r'[^a-z0-9]+', ' ', dir_name.lower())\n training_images = []\n testing_images = []\n validation_images = []\n for file_name in file_list:\n base_name = os.path.basename(file_name)\n # We want to ignore anything after '_nohash_' in the file name when\n # deciding which set to put an image in, the data set creator has a way of\n # grouping photos that are close variations of each other. For example\n # this is used in the plant disease data set to group multiple pictures of\n # the same leaf.\n hash_name = re.sub(r'_nohash_.*$', '', file_name)\n # This looks a bit magical, but we need to decide whether this file should\n # go into the training, testing, or validation sets, and we want to keep\n # existing files in the same set even if more files are subsequently\n # added.\n # To do that, we need a stable way of deciding based on just the file name\n # itself, so we do a hash of that and then use that to generate a\n # probability value that we use to assign it.\n hash_name_hashed = hashlib.sha1(compat.as_bytes(hash_name)).hexdigest()\n percentage_hash = ((int(hash_name_hashed, 16) %\n (MAX_NUM_IMAGES_PER_CLASS + 1)) *\n (100.0 / MAX_NUM_IMAGES_PER_CLASS))\n if percentage_hash < validation_percentage:\n validation_images.append(base_name)\n elif percentage_hash < (testing_percentage + validation_percentage):\n testing_images.append(base_name)\n else:\n training_images.append(base_name)\n result[label_name] = {\n 'dir': dir_name,\n 'training': training_images,\n 'testing': testing_images,\n 'validation': validation_images,\n }\n return result\n\n\ndef get_image_path(image_lists, label_name, index, image_dir, category):\n \"\"\"\"Returns a path to an image for a label at the given index.\n\n Args:\n image_lists: Dictionary of training images for each label.\n label_name: Label string we want to get an image for.\n index: Int offset of the image we want. This will be moduloed by the\n available number of images for the label, so it can be arbitrarily large.\n image_dir: Root folder string of the subfolders containing the training\n images.\n category: Name string of set to pull images from - training, testing, or\n validation.\n\n Returns:\n File system path string to an image that meets the requested parameters.\n\n \"\"\"\n if label_name not in image_lists:\n tf.logging.fatal('Label does not exist %s.', label_name)\n label_lists = image_lists[label_name]\n if category not in label_lists:\n tf.logging.fatal('Category does not exist %s.', category)\n category_list = label_lists[category]\n if not category_list:\n tf.logging.fatal('Label %s has no images in the category %s.',\n label_name, category)\n mod_index = index % len(category_list)\n base_name = category_list[mod_index]\n sub_dir = label_lists['dir']\n full_path = os.path.join(image_dir, sub_dir, base_name)\n return full_path\n\n\ndef get_bottleneck_path(image_lists, label_name, index, bottleneck_dir,\n category):\n \"\"\"\"Returns a path to a bottleneck file for a label at the given index.\n\n Args:\n image_lists: Dictionary of training images for each label.\n label_name: Label string we want to get an image for.\n index: Integer offset of the image we want. This will be moduloed by the\n available number of images for the label, so it can be arbitrarily large.\n bottleneck_dir: Folder string holding cached files of bottleneck values.\n category: Name string of set to pull images from - training, testing, or\n validation.\n\n Returns:\n File system path string to an image that meets the requested parameters.\n \"\"\"\n return get_image_path(image_lists, label_name, index, bottleneck_dir,\n category) + '.txt'\n\n\ndef create_inception_graph():\n \"\"\"\"Creates a graph from saved GraphDef file and returns a Graph object.\n\n Returns:\n Graph holding the trained Inception network, and various tensors we'll be\n manipulating.\n \"\"\"\n with tf.Graph().as_default() as graph:\n model_filename = os.path.join(\n FLAGS.model_dir, 'classify_image_graph_def.pb')\n with gfile.FastGFile(model_filename, 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n bottleneck_tensor, jpeg_data_tensor, resized_input_tensor = (\n tf.import_graph_def(graph_def, name='', return_elements=[\n BOTTLENECK_TENSOR_NAME, JPEG_DATA_TENSOR_NAME,\n RESIZED_INPUT_TENSOR_NAME]))\n return graph, bottleneck_tensor, jpeg_data_tensor, resized_input_tensor\n\n\ndef run_bottleneck_on_image(sess, image_data, image_data_tensor,\n bottleneck_tensor):\n \"\"\"Runs inference on an image to extract the 'bottleneck' summary layer.\n\n Args:\n sess: Current active TensorFlow Session.\n image_data: String of raw JPEG data.\n image_data_tensor: Input data layer in the graph.\n bottleneck_tensor: Layer before the final softmax.\n\n Returns:\n Numpy array of bottleneck values.\n \"\"\"\n bottleneck_values = sess.run(\n bottleneck_tensor,\n {image_data_tensor: image_data})\n bottleneck_values = np.squeeze(bottleneck_values)\n return bottleneck_values\n\n\ndef maybe_download_and_extract():\n \"\"\"Download and extract model tar file.\n\n If the pretrained model we're using doesn't already exist, this function\n downloads it from the TensorFlow.org website and unpacks it into a directory.\n \"\"\"\n dest_directory = FLAGS.model_dir\n if not os.path.exists(dest_directory):\n os.makedirs(dest_directory)\n filename = DATA_URL.split('/')[-1]\n filepath = os.path.join(dest_directory, filename)\n if not os.path.exists(filepath):\n\n def _progress(count, block_size, total_size):\n sys.stdout.write('\\r>> Downloading %s %.1f%%' %\n (filename,\n float(count * block_size) / float(total_size) * 100.0))\n sys.stdout.flush()\n\n filepath, _ = urllib.request.urlretrieve(DATA_URL,\n filepath,\n _progress)\n print()\n statinfo = os.stat(filepath)\n print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')\n tarfile.open(filepath, 'r:gz').extractall(dest_directory)\n\n\ndef ensure_dir_exists(dir_name):\n \"\"\"Makes sure the folder exists on disk.\n\n Args:\n dir_name: Path string to the folder we want to create.\n \"\"\"\n if not os.path.exists(dir_name):\n os.makedirs(dir_name)\n\n\ndef write_list_of_floats_to_file(list_of_floats, file_path):\n \"\"\"Writes a given list of floats to a binary file.\n\n Args:\n list_of_floats: List of floats we want to write to a file.\n file_path: Path to a file where list of floats will be stored.\n\n \"\"\"\n\n s = struct.pack('d' * BOTTLENECK_TENSOR_SIZE, *list_of_floats)\n with open(file_path, 'wb') as f:\n f.write(s)\n\n\ndef read_list_of_floats_from_file(file_path):\n \"\"\"Reads list of floats from a given file.\n\n Args:\n file_path: Path to a file where list of floats was stored.\n Returns:\n Array of bottleneck values (list of floats).\n\n \"\"\"\n\n with open(file_path, 'rb') as f:\n s = struct.unpack('d' * BOTTLENECK_TENSOR_SIZE, f.read())\n return list(s)\n\n\nbottleneck_path_2_bottleneck_values = {}\n\n\ndef create_bottleneck_file(bottleneck_path, image_lists, label_name, index,\n image_dir, category, sess, jpeg_data_tensor,\n bottleneck_tensor):\n \"\"\"Create a single bottleneck file.\"\"\"\n print('Creating bottleneck at ' + bottleneck_path)\n image_path = get_image_path(image_lists, label_name, index,\n image_dir, category)\n if not gfile.Exists(image_path):\n tf.logging.fatal('File does not exist %s', image_path)\n image_data = gfile.FastGFile(image_path, 'rb').read()\n try:\n bottleneck_values = run_bottleneck_on_image(\n sess, image_data, jpeg_data_tensor, bottleneck_tensor)\n except:\n raise RuntimeError('Error during processing file %s' % image_path)\n\n bottleneck_string = ','.join(str(x) for x in bottleneck_values)\n with open(bottleneck_path, 'w') as bottleneck_file:\n bottleneck_file.write(bottleneck_string)\n\n\ndef get_or_create_bottleneck(sess, image_lists, label_name, index, image_dir,\n category, bottleneck_dir, jpeg_data_tensor,\n bottleneck_tensor):\n \"\"\"Retrieves or calculates bottleneck values for an image.\n\n If a cached version of the bottleneck data exists on-disk, return that,\n otherwise calculate the data and save it to disk for future use.\n\n Args:\n sess: The current active TensorFlow Session.\n image_lists: Dictionary of training images for each label.\n label_name: Label string we want to get an image for.\n index: Integer offset of the image we want. This will be modulo-ed by the\n available number of images for the label, so it can be arbitrarily large.\n image_dir: Root folder string of the subfolders containing the training\n images.\n category: Name string of which set to pull images from - training, testing,\n or validation.\n bottleneck_dir: Folder string holding cached files of bottleneck values.\n jpeg_data_tensor: The tensor to feed loaded jpeg data into.\n bottleneck_tensor: The output tensor for the bottleneck values.\n\n Returns:\n Numpy array of values produced by the bottleneck layer for the image.\n \"\"\"\n label_lists = image_lists[label_name]\n sub_dir = label_lists['dir']\n sub_dir_path = os.path.join(bottleneck_dir, sub_dir)\n ensure_dir_exists(sub_dir_path)\n bottleneck_path = get_bottleneck_path(image_lists, label_name, index,\n bottleneck_dir, category)\n if not os.path.exists(bottleneck_path):\n create_bottleneck_file(bottleneck_path, image_lists, label_name, index,\n image_dir, category, sess, jpeg_data_tensor,\n bottleneck_tensor)\n with open(bottleneck_path, 'r') as bottleneck_file:\n bottleneck_string = bottleneck_file.read()\n did_hit_error = False\n try:\n bottleneck_values = [float(x) for x in bottleneck_string.split(',')]\n except ValueError:\n print('Invalid float found, recreating bottleneck')\n did_hit_error = True\n if did_hit_error:\n create_bottleneck_file(bottleneck_path, image_lists, label_name, index,\n image_dir, category, sess, jpeg_data_tensor,\n bottleneck_tensor)\n with open(bottleneck_path, 'r') as bottleneck_file:\n bottleneck_string = bottleneck_file.read()\n # Allow exceptions to propagate here, since they shouldn't happen after a\n # fresh creation\n bottleneck_values = [float(x) for x in bottleneck_string.split(',')]\n return bottleneck_values\n\n\ndef cache_bottlenecks(sess, image_lists, image_dir, bottleneck_dir,\n jpeg_data_tensor, bottleneck_tensor):\n \"\"\"Ensures all the training, testing, and validation bottlenecks are cached.\n\n Because we're likely to read the same image multiple times (if there are no\n distortions applied during training) it can speed things up a lot if we\n calculate the bottleneck layer values once for each image during\n preprocessing, and then just read those cached values repeatedly during\n training. Here we go through all the images we've found, calculate those\n values, and save them off.\n\n Args:\n sess: The current active TensorFlow Session.\n image_lists: Dictionary of training images for each label.\n image_dir: Root folder string of the subfolders containing the training\n images.\n bottleneck_dir: Folder string holding cached files of bottleneck values.\n jpeg_data_tensor: Input tensor for jpeg data from file.\n bottleneck_tensor: The penultimate output layer of the graph.\n\n Returns:\n Nothing.\n \"\"\"\n how_many_bottlenecks = 0\n ensure_dir_exists(bottleneck_dir)\n for label_name, label_lists in image_lists.items():\n for category in ['training', 'testing', 'validation']:\n category_list = label_lists[category]\n for index, unused_base_name in enumerate(category_list):\n get_or_create_bottleneck(sess, image_lists, label_name, index,\n image_dir, category, bottleneck_dir,\n jpeg_data_tensor, bottleneck_tensor)\n\n how_many_bottlenecks += 1\n if how_many_bottlenecks % 100 == 0:\n print(str(how_many_bottlenecks) + ' bottleneck files created.')\n\n\ndef get_random_cached_bottlenecks(sess, image_lists, how_many, category,\n bottleneck_dir, image_dir, jpeg_data_tensor,\n bottleneck_tensor):\n \"\"\"Retrieves bottleneck values for cached images.\n\n If no distortions are being applied, this function can retrieve the cached\n bottleneck values directly from disk for images. It picks a random set of\n images from the specified category.\n\n Args:\n sess: Current TensorFlow Session.\n image_lists: Dictionary of training images for each label.\n how_many: If positive, a random sample of this size will be chosen.\n If negative, all bottlenecks will be retrieved.\n category: Name string of which set to pull from - training, testing, or\n validation.\n bottleneck_dir: Folder string holding cached files of bottleneck values.\n image_dir: Root folder string of the subfolders containing the training\n images.\n jpeg_data_tensor: The layer to feed jpeg image data into.\n bottleneck_tensor: The bottleneck output layer of the CNN graph.\n\n Returns:\n List of bottleneck arrays, their corresponding ground truths, and the\n relevant filenames.\n \"\"\"\n class_count = len(image_lists.keys())\n bottlenecks = []\n ground_truths = []\n filenames = []\n if how_many >= 0:\n # Retrieve a random sample of bottlenecks.\n for unused_i in range(how_many):\n label_index = random.randrange(class_count)\n label_name = list(image_lists.keys())[label_index]\n image_index = random.randrange(MAX_NUM_IMAGES_PER_CLASS + 1)\n image_name = get_image_path(image_lists, label_name, image_index,\n image_dir, category)\n bottleneck = get_or_create_bottleneck(sess, image_lists, label_name,\n image_index, image_dir, category,\n bottleneck_dir, jpeg_data_tensor,\n bottleneck_tensor)\n ground_truth = np.zeros(class_count, dtype=np.float32)\n ground_truth[label_index] = 1.0\n bottlenecks.append(bottleneck)\n ground_truths.append(ground_truth)\n filenames.append(image_name)\n else:\n # Retrieve all bottlenecks.\n for label_index, label_name in enumerate(image_lists.keys()):\n for image_index, image_name in enumerate(\n image_lists[label_name][category]):\n image_name = get_image_path(image_lists, label_name, image_index,\n image_dir, category)\n bottleneck = get_or_create_bottleneck(sess, image_lists, label_name,\n image_index, image_dir, category,\n bottleneck_dir, jpeg_data_tensor,\n bottleneck_tensor)\n ground_truth = np.zeros(class_count, dtype=np.float32)\n ground_truth[label_index] = 1.0\n bottlenecks.append(bottleneck)\n ground_truths.append(ground_truth)\n filenames.append(image_name)\n return bottlenecks, ground_truths, filenames\n\n\ndef get_random_distorted_bottlenecks(\n sess, image_lists, how_many, category, image_dir, input_jpeg_tensor,\n distorted_image, resized_input_tensor, bottleneck_tensor):\n \"\"\"Retrieves bottleneck values for training images, after distortions.\n\n If we're training with distortions like crops, scales, or flips, we have to\n recalculate the full model for every image, and so we can't use cached\n bottleneck values. Instead we find random images for the requested category,\n run them through the distortion graph, and then the full graph to get the\n bottleneck results for each.\n\n Args:\n sess: Current TensorFlow Session.\n image_lists: Dictionary of training images for each label.\n how_many: The integer number of bottleneck values to return.\n category: Name string of which set of images to fetch - training, testing,\n or validation.\n image_dir: Root folder string of the subfolders containing the training\n images.\n input_jpeg_tensor: The input layer we feed the image data to.\n distorted_image: The output node of the distortion graph.\n resized_input_tensor: The input node of the recognition graph.\n bottleneck_tensor: The bottleneck output layer of the CNN graph.\n\n Returns:\n List of bottleneck arrays and their corresponding ground truths.\n \"\"\"\n class_count = len(image_lists.keys())\n bottlenecks = []\n ground_truths = []\n for unused_i in range(how_many):\n label_index = random.randrange(class_count)\n label_name = list(image_lists.keys())[label_index]\n image_index = random.randrange(MAX_NUM_IMAGES_PER_CLASS + 1)\n image_path = get_image_path(image_lists, label_name, image_index, image_dir,\n category)\n if not gfile.Exists(image_path):\n tf.logging.fatal('File does not exist %s', image_path)\n jpeg_data = gfile.FastGFile(image_path, 'rb').read()\n # Note that we materialize the distorted_image_data as a numpy array before\n # sending running inference on the image. This involves 2 memory copies and\n # might be optimized in other implementations.\n distorted_image_data = sess.run(distorted_image,\n {input_jpeg_tensor: jpeg_data})\n bottleneck = run_bottleneck_on_image(sess, distorted_image_data,\n resized_input_tensor,\n bottleneck_tensor)\n ground_truth = np.zeros(class_count, dtype=np.float32)\n ground_truth[label_index] = 1.0\n bottlenecks.append(bottleneck)\n ground_truths.append(ground_truth)\n return bottlenecks, ground_truths\n\n\ndef should_distort_images(flip_left_right, random_crop, random_scale,\n random_brightness):\n \"\"\"Whether any distortions are enabled, from the input flags.\n\n Args:\n flip_left_right: Boolean whether to randomly mirror images horizontally.\n random_crop: Integer percentage setting the total margin used around the\n crop box.\n random_scale: Integer percentage of how much to vary the scale by.\n random_brightness: Integer range to randomly multiply the pixel values by.\n\n Returns:\n Boolean value indicating whether any distortions should be applied.\n \"\"\"\n return (flip_left_right or (random_crop != 0) or (random_scale != 0) or\n (random_brightness != 0))\n\n\ndef add_input_distortions(flip_left_right, random_crop, random_scale,\n random_brightness):\n \"\"\"Creates the operations to apply the specified distortions.\n\n During training it can help to improve the results if we run the images\n through simple distortions like crops, scales, and flips. These reflect the\n kind of variations we expect in the real world, and so can help train the\n model to cope with natural data more effectively. Here we take the supplied\n parameters and construct a network of operations to apply them to an image.\n\n Cropping\n ~~~~~~~~\n\n Cropping is done by placing a bounding box at a random position in the full\n image. The cropping parameter controls the size of that box relative to the\n input image. If it's zero, then the box is the same size as the input and no\n cropping is performed. If the value is 50%, then the crop box will be half the\n width and height of the input. In a diagram it looks like this:\n\n < width >\n +---------------------+\n | |\n | width - crop% |\n | < > |\n | +------+ |\n | | | |\n | | | |\n | | | |\n | +------+ |\n | |\n | |\n +---------------------+\n\n Scaling\n ~~~~~~~\n\n Scaling is a lot like cropping, except that the bounding box is always\n centered and its size varies randomly within the given range. For example if\n the scale percentage is zero, then the bounding box is the same size as the\n input and no scaling is applied. If it's 50%, then the bounding box will be in\n a random range between half the width and height and full size.\n\n Args:\n flip_left_right: Boolean whether to randomly mirror images horizontally.\n random_crop: Integer percentage setting the total margin used around the\n crop box.\n random_scale: Integer percentage of how much to vary the scale by.\n random_brightness: Integer range to randomly multiply the pixel values by.\n graph.\n\n Returns:\n The jpeg input layer and the distorted result tensor.\n \"\"\"\n\n jpeg_data = tf.placeholder(tf.string, name='DistortJPGInput')\n decoded_image = tf.image.decode_jpeg(jpeg_data, channels=MODEL_INPUT_DEPTH)\n decoded_image_as_float = tf.cast(decoded_image, dtype=tf.float32)\n decoded_image_4d = tf.expand_dims(decoded_image_as_float, 0)\n margin_scale = 1.0 + (random_crop / 100.0)\n resize_scale = 1.0 + (random_scale / 100.0)\n margin_scale_value = tf.constant(margin_scale)\n resize_scale_value = tf.random_uniform(tensor_shape.scalar(),\n minval=1.0,\n maxval=resize_scale)\n scale_value = tf.multiply(margin_scale_value, resize_scale_value)\n precrop_width = tf.multiply(scale_value, MODEL_INPUT_WIDTH)\n precrop_height = tf.multiply(scale_value, MODEL_INPUT_HEIGHT)\n precrop_shape = tf.stack([precrop_height, precrop_width])\n precrop_shape_as_int = tf.cast(precrop_shape, dtype=tf.int32)\n precropped_image = tf.image.resize_bilinear(decoded_image_4d,\n precrop_shape_as_int)\n precropped_image_3d = tf.squeeze(precropped_image, squeeze_dims=[0])\n cropped_image = tf.random_crop(precropped_image_3d,\n [MODEL_INPUT_HEIGHT, MODEL_INPUT_WIDTH,\n MODEL_INPUT_DEPTH])\n if flip_left_right:\n flipped_image = tf.image.random_flip_left_right(cropped_image)\n else:\n flipped_image = cropped_image\n brightness_min = 1.0 - (random_brightness / 100.0)\n brightness_max = 1.0 + (random_brightness / 100.0)\n brightness_value = tf.random_uniform(tensor_shape.scalar(),\n minval=brightness_min,\n maxval=brightness_max)\n brightened_image = tf.multiply(flipped_image, brightness_value)\n distort_result = tf.expand_dims(brightened_image, 0, name='DistortResult')\n return jpeg_data, distort_result\n\n\ndef variable_summaries(var):\n \"\"\"Attach a lot of summaries to a Tensor (for TensorBoard visualization).\"\"\"\n with tf.name_scope('summaries'):\n mean = tf.reduce_mean(var)\n tf.summary.scalar('mean', mean)\n with tf.name_scope('stddev'):\n stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))\n tf.summary.scalar('stddev', stddev)\n tf.summary.scalar('max', tf.reduce_max(var))\n tf.summary.scalar('min', tf.reduce_min(var))\n tf.summary.histogram('histogram', var)\n\n\ndef add_final_training_ops(class_count, final_tensor_name, bottleneck_tensor):\n \"\"\"Adds a new softmax and fully-connected layer for training.\n\n We need to retrain the top layer to identify our new classes, so this function\n adds the right operations to the graph, along with some variables to hold the\n weights, and then sets up all the gradients for the backward pass.\n\n The set up for the softmax and fully-connected layers is based on:\n https://tensorflow.org/versions/master/tutorials/mnist/beginners/index.html\n\n Args:\n class_count: Integer of how many categories of things we're trying to\n recognize.\n final_tensor_name: Name string for the new final node that produces results.\n bottleneck_tensor: The output of the main CNN graph.\n\n Returns:\n The tensors for the training and cross entropy results, and tensors for the\n bottleneck input and ground truth input.\n \"\"\"\n with tf.name_scope('input'):\n bottleneck_input = tf.placeholder_with_default(\n bottleneck_tensor, shape=[None, BOTTLENECK_TENSOR_SIZE],\n name='BottleneckInputPlaceholder')\n\n ground_truth_input = tf.placeholder(tf.float32,\n [None, class_count],\n name='GroundTruthInput')\n\n # Organizing the following ops as `final_training_ops` so they're easier\n # to see in TensorBoard\n layer_name = 'final_training_ops'\n with tf.name_scope(layer_name):\n with tf.name_scope('weights'):\n initial_value = tf.truncated_normal([BOTTLENECK_TENSOR_SIZE, class_count],\n stddev=0.001)\n\n layer_weights = tf.Variable(initial_value, name='final_weights')\n\n variable_summaries(layer_weights)\n with tf.name_scope('biases'):\n layer_biases = tf.Variable(tf.zeros([class_count]), name='final_biases')\n variable_summaries(layer_biases)\n with tf.name_scope('Wx_plus_b'):\n logits = tf.matmul(bottleneck_input, layer_weights) + layer_biases\n tf.summary.histogram('pre_activations', logits)\n\n final_tensor = tf.nn.softmax(logits, name=final_tensor_name)\n tf.summary.histogram('activations', final_tensor)\n\n with tf.name_scope('cross_entropy'):\n cross_entropy = tf.nn.softmax_cross_entropy_with_logits(\n labels=ground_truth_input, logits=logits)\n with tf.name_scope('total'):\n cross_entropy_mean = tf.reduce_mean(cross_entropy)\n tf.summary.scalar('cross_entropy', cross_entropy_mean)\n\n with tf.name_scope('train'):\n optimizer = tf.train.GradientDescentOptimizer(FLAGS.learning_rate)\n train_step = optimizer.minimize(cross_entropy_mean)\n\n return (train_step, cross_entropy_mean, bottleneck_input, ground_truth_input,\n final_tensor)\n\n\ndef add_evaluation_step(result_tensor, ground_truth_tensor):\n \"\"\"Inserts the operations we need to evaluate the accuracy of our results.\n\n Args:\n result_tensor: The new final node that produces results.\n ground_truth_tensor: The node we feed ground truth data\n into.\n\n Returns:\n Tuple of (evaluation step, prediction).\n \"\"\"\n with tf.name_scope('accuracy'):\n with tf.name_scope('correct_prediction'):\n prediction = tf.argmax(result_tensor, 1)\n correct_prediction = tf.equal(\n prediction, tf.argmax(ground_truth_tensor, 1))\n with tf.name_scope('accuracy'):\n evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n tf.summary.scalar('accuracy', evaluation_step)\n return evaluation_step, prediction\n\n\ndef main(_):\n # Setup the directory we'll write summaries to for TensorBoard\n if tf.gfile.Exists(FLAGS.summaries_dir):\n tf.gfile.DeleteRecursively(FLAGS.summaries_dir)\n tf.gfile.MakeDirs(FLAGS.summaries_dir)\n\n # Set up the pre-trained graph.\n maybe_download_and_extract()\n graph, bottleneck_tensor, jpeg_data_tensor, resized_image_tensor = (\n create_inception_graph())\n\n # Look at the folder structure, and create lists of all the images.\n image_lists = create_image_lists(FLAGS.image_dir, FLAGS.testing_percentage,\n FLAGS.validation_percentage)\n class_count = len(image_lists.keys())\n if class_count == 0:\n print('No valid folders of images found at ' + FLAGS.image_dir)\n return -1\n if class_count == 1:\n print('Only one valid folder of images found at ' + FLAGS.image_dir +\n ' - multiple classes are needed for classification.')\n return -1\n\n # See if the command-line flags mean we're applying any distortions.\n do_distort_images = should_distort_images(\n FLAGS.flip_left_right, FLAGS.random_crop, FLAGS.random_scale,\n FLAGS.random_brightness)\n\n with tf.Session(graph=graph) as sess:\n\n if do_distort_images:\n # We will be applying distortions, so setup the operations we'll need.\n (distorted_jpeg_data_tensor,\n distorted_image_tensor) = add_input_distortions(\n FLAGS.flip_left_right, FLAGS.random_crop,\n FLAGS.random_scale, FLAGS.random_brightness)\n else:\n # We'll make sure we've calculated the 'bottleneck' image summaries and\n # cached them on disk.\n cache_bottlenecks(sess, image_lists, FLAGS.image_dir,\n FLAGS.bottleneck_dir, jpeg_data_tensor,\n bottleneck_tensor)\n\n # Add the new layer that we'll be training.\n (train_step, cross_entropy, bottleneck_input, ground_truth_input,\n final_tensor) = add_final_training_ops(len(image_lists.keys()),\n FLAGS.final_tensor_name,\n bottleneck_tensor)\n\n # Create the operations we need to evaluate the accuracy of our new layer.\n evaluation_step, prediction = add_evaluation_step(\n final_tensor, ground_truth_input)\n\n # Merge all the summaries and write them out to the summaries_dir\n merged = tf.summary.merge_all()\n train_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/train',\n sess.graph)\n\n validation_writer = tf.summary.FileWriter(\n FLAGS.summaries_dir + '/validation')\n\n # Set up all our weights to their initial default values.\n init = tf.global_variables_initializer()\n sess.run(init)\n\n # Run the training for as many cycles as requested on the command line.\n for i in range(FLAGS.how_many_training_steps):\n # Get a batch of input bottleneck values, either calculated fresh every\n # time with distortions applied, or from the cache stored on disk.\n if do_distort_images:\n (train_bottlenecks,\n train_ground_truth) = get_random_distorted_bottlenecks(\n sess, image_lists, FLAGS.train_batch_size, 'training',\n FLAGS.image_dir, distorted_jpeg_data_tensor,\n distorted_image_tensor, resized_image_tensor, bottleneck_tensor)\n else:\n (train_bottlenecks,\n train_ground_truth, _) = get_random_cached_bottlenecks(\n sess, image_lists, FLAGS.train_batch_size, 'training',\n FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor,\n bottleneck_tensor)\n # Feed the bottlenecks and ground truth into the graph, and run a training\n # step. Capture training summaries for TensorBoard with the `merged` op.\n\n train_summary, _ = sess.run(\n [merged, train_step],\n feed_dict={bottleneck_input: train_bottlenecks,\n ground_truth_input: train_ground_truth})\n train_writer.add_summary(train_summary, i)\n\n # !! ausgabe der trainingsgenaugikeit *gaendert\n is_last_step = (i + 1 == FLAGS.how_many_training_steps)\n if (i % FLAGS.eval_step_interval) == 0 or is_last_step:\n train_accuracy, cross_entropy_value = sess.run(\n [evaluation_step, cross_entropy],\n feed_dict={bottleneck_input: train_bottlenecks,\n ground_truth_input: train_ground_truth})\n print('%s: Step %d: Train accuracy = %.1f%%' % (datetime.now(), i,\n train_accuracy * 100))\n print('%s: Step %d: Cross entropy = %f' % (datetime.now(), i,\n cross_entropy_value))\n validation_bottlenecks, validation_ground_truth, _ = (\n get_random_cached_bottlenecks(\n sess, image_lists, FLAGS.validation_batch_size, 'validation',\n FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor,\n bottleneck_tensor))\n # Run a validation step and capture training summaries for TensorBoard\n # with the `merged` op.\n validation_summary, validation_accuracy = sess.run(\n [merged, evaluation_step],\n feed_dict={bottleneck_input: validation_bottlenecks,\n ground_truth_input: validation_ground_truth})\n validation_writer.add_summary(validation_summary, i)\n print('%s: Step %d: Validation accuracy = %.1f%% (N=%d)' %\n (datetime.now(), i, validation_accuracy * 100,\n len(validation_bottlenecks)))\n\n # We've completed all our training, so run a final test evaluation on\n # some new images we haven't used before.\n test_bottlenecks, test_ground_truth, test_filenames = (\n get_random_cached_bottlenecks(sess, image_lists, FLAGS.test_batch_size,\n 'testing', FLAGS.bottleneck_dir,\n FLAGS.image_dir, jpeg_data_tensor,\n bottleneck_tensor))\n test_accuracy, predictions = sess.run(\n [evaluation_step, prediction],\n feed_dict={bottleneck_input: test_bottlenecks,\n ground_truth_input: test_ground_truth})\n print('Final test accuracy = %.1f%% (N=%d)' % (\n test_accuracy * 100, len(test_bottlenecks)))\n\n if FLAGS.print_misclassified_test_images:\n print('=== MISCLASSIFIED TEST IMAGES ===')\n for i, test_filename in enumerate(test_filenames):\n if predictions[i] != test_ground_truth[i].argmax():\n print('%70s %s' % (test_filename,\n list(image_lists.keys())[predictions[i]]))\n\n # Write out the trained graph and labels with the weights stored as\n # constants.\n output_graph_def = graph_util.convert_variables_to_constants(\n sess, graph.as_graph_def(), [FLAGS.final_tensor_name])\n with gfile.FastGFile(FLAGS.output_graph, 'wb') as f:\n f.write(output_graph_def.SerializeToString())\n with gfile.FastGFile(FLAGS.output_labels, 'w') as f:\n f.write('\\n'.join(image_lists.keys()) + '\\n')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--image_dir',\t# definiert in train.sh\n type=str,\n default='',\n help='Path to folders of labeled images.'\n )\n parser.add_argument(\n '--output_graph',\n type=str,\n default='/tmp/output_graph.pb',\n help='Where to save the trained graph.'\n )\n parser.add_argument(\n '--output_labels',\n type=str,\n default='/tmp/output_labels.txt',\n help='Where to save the trained graph\\'s labels.'\n )\n parser.add_argument(\n '--summaries_dir',\n type=str,\n default='/tmp/retrain_logs',\n help='Where to save summary logs for TensorBoard.'\n )\n parser.add_argument(\n '--how_many_training_steps',\n type=int,\n default=4000,\n help='How many training steps to run before ending.'\n )\n parser.add_argument(\n '--learning_rate',\n type=float,\n default=0.01,\n help='How large a learning rate to use when training.'\n )\n parser.add_argument(\n '--testing_percentage',\n type=int,\n default=10,\n help='What percentage of images to use as a test set.'\n )\n parser.add_argument(\n '--validation_percentage',\n type=int,\n default=10,\n help='What percentage of images to use as a validation set.'\n )\n parser.add_argument(\n '--eval_step_interval',\n type=int,\n default=10,\n help='How often to evaluate the training results.'\n )\n #geadded in shell befehl *geaendert\n parser.add_argument(\n '--train_batch_size',\n type=int,\n default=100,\n help='How many images to train on at a time.'\n )\n parser.add_argument(\n '--test_batch_size',\n type=int,\n default=-1,\n help=\"\"\"\\\n How many images to test on. This test set is only used once, to evaluate\n the final accuracy of the model after training completes.\n A value of -1 causes the entire test set to be used, which leads to more\n stable results across runs.\\\n \"\"\"\n )\n parser.add_argument(\n '--validation_batch_size',\n type=int,\n default=100,\n help=\"\"\"\\\n How many images to use in an evaluation batch. This validation set is\n used much more often than the test set, and is an early indicator of how\n accurate the model is during training.\n A value of -1 causes the entire validation set to be used, which leads to\n more stable results across training iterations, but may be slower on large\n training sets.\\\n \"\"\"\n )\n parser.add_argument(\n '--print_misclassified_test_images',\n default=False,\n help=\"\"\"\\\n Whether to print out a list of all misclassified test images.\\\n \"\"\",\n action='store_true'\n )\n parser.add_argument(\n '--model_dir',\n type=str,\n default='/tmp/imagenet',\n help=\"\"\"\\\n Path to classify_image_graph_def.pb,\n imagenet_synset_to_human_label_map.txt, and\n imagenet_2012_challenge_label_map_proto.pbtxt.\\\n \"\"\"\n )\n parser.add_argument(\n '--bottleneck_dir',\n type=str,\n default='/tmp/bottleneck',\n help='Path to cache bottleneck layer values as files.'\n )\n parser.add_argument(\n '--final_tensor_name',\n type=str,\n default='final_result',\n help=\"\"\"\\\n The name of the output classification layer in the retrained graph.\\\n \"\"\"\n )\n parser.add_argument(\n '--flip_left_right',\n default=False,\n help=\"\"\"\\\n Whether to randomly flip half of the training images horizontally.\\\n \"\"\",\n action='store_true'\n )\n parser.add_argument(\n '--random_crop',\n type=int,\n default=0,\n help=\"\"\"\\\n A percentage determining how much of a margin to randomly crop off the\n training images.\\\n \"\"\"\n )\n parser.add_argument(\n '--random_scale',\n type=int,\n default=0,\n help=\"\"\"\\\n A percentage determining how much to randomly scale up the size of the\n training images by.\\\n \"\"\"\n )\n parser.add_argument(\n '--random_brightness',\n type=int,\n default=0,\n help=\"\"\"\\\n A percentage determining how much to randomly multiply the training image\n input pixels up or down by.\\\n \"\"\"\n )\n FLAGS, unparsed = parser.parse_known_args()\n tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)" ]
[ [ "tensorflow.image.resize_bilinear", "tensorflow.reduce_min", "tensorflow.nn.softmax_cross_entropy_with_logits", "tensorflow.image.random_flip_left_right", "tensorflow.matmul", "tensorflow.import_graph_def", "tensorflow.stack", "tensorflow.python.platform.gfile.Exists", "tensorflow.nn.softmax", "tensorflow.random_crop", "tensorflow.global_variables_initializer", "tensorflow.image.decode_jpeg", "tensorflow.cast", "tensorflow.train.GradientDescentOptimizer", "tensorflow.summary.histogram", "tensorflow.python.platform.gfile.FastGFile", "tensorflow.argmax", "tensorflow.Variable", "tensorflow.constant", "tensorflow.gfile.MakeDirs", "tensorflow.squeeze", "tensorflow.gfile.DeleteRecursively", "tensorflow.app.run", "tensorflow.python.platform.gfile.Walk", "tensorflow.python.framework.tensor_shape.scalar", "tensorflow.zeros", "numpy.zeros", "tensorflow.expand_dims", "tensorflow.summary.scalar", "tensorflow.GraphDef", "tensorflow.gfile.Exists", "tensorflow.Session", "tensorflow.truncated_normal", "tensorflow.python.platform.gfile.Glob", "tensorflow.placeholder", "tensorflow.name_scope", "tensorflow.summary.merge_all", "numpy.squeeze", "tensorflow.placeholder_with_default", "tensorflow.multiply", "tensorflow.Graph", "tensorflow.reduce_max", "tensorflow.logging.fatal", "tensorflow.summary.FileWriter", "tensorflow.reduce_mean", "tensorflow.square", "tensorflow.python.util.compat.as_bytes" ] ]
phillikus/doom-ai
[ "ee0d49ae46321960adacc054d041377622877b10" ]
[ "src/models/rnn/RNN.py" ]
[ "import numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\n\nclass RNN(nn.Module):\n def __init__(self, num_inputs, num_actions):\n super(RNN, self).__init__()\n self.convolution1 = nn.Conv2d(in_channels=num_inputs, out_channels=32, kernel_size=5)\n self.convolution2 = nn.Conv2d(in_channels=32, out_channels=32, kernel_size=3)\n self.convolution3 = nn.Conv2d(in_channels=32, out_channels=32, kernel_size=3)\n self.convolution4 = nn.Conv2d(in_channels=32, out_channels=32, kernel_size=2)\n self.fc1 = nn.Linear(in_features=self.count_neurons((1, 80, 80)), out_features=40)\n self.fc2 = nn.Linear(in_features=40, out_features=40)\n self.fc3 = nn.Linear(in_features=40, out_features=num_actions)" ]
[ [ "torch.nn.Linear", "torch.nn.Conv2d" ] ]
sdrees/pandas
[ "bef454f0893efe2fa5e49317635f89c03467d16e" ]
[ "pandas/tests/extension/test_arrow.py" ]
[ "\"\"\"\nThis file contains a minimal set of tests for compliance with the extension\narray interface test suite, and should contain no other tests.\nThe test suite for the full functionality of the array is located in\n`pandas/tests/arrays/`.\nThe tests in this file are inherited from the BaseExtensionTests, and only\nminimal tweaks should be applied to get the tests passing (by overwriting a\nparent method).\nAdditional tests should either be added to one of the BaseExtensionTests\nclasses (if they are relevant for the extension interface for all dtypes), or\nbe added to the array-specific tests in `pandas/tests/arrays/`.\n\"\"\"\n\nfrom datetime import (\n date,\n datetime,\n time,\n timedelta,\n)\n\nimport pytest\n\nfrom pandas.compat import (\n pa_version_under2p0,\n pa_version_under3p0,\n)\n\nimport pandas as pd\nimport pandas._testing as tm\nfrom pandas.tests.extension import base\n\npa = pytest.importorskip(\"pyarrow\", minversion=\"1.0.1\")\n\nfrom pandas.core.arrays.arrow.dtype import ArrowDtype # isort:skip\n\n\[email protected](params=tm.ALL_PYARROW_DTYPES)\ndef dtype(request):\n return ArrowDtype(pyarrow_dtype=request.param)\n\n\[email protected]\ndef data(dtype):\n pa_dtype = dtype.pyarrow_dtype\n if pa.types.is_boolean(pa_dtype):\n data = [True, False] * 4 + [None] + [True, False] * 44 + [None] + [True, False]\n elif pa.types.is_floating(pa_dtype):\n data = [1.0, 0.0] * 4 + [None] + [-2.0, -1.0] * 44 + [None] + [0.5, 99.5]\n elif pa.types.is_signed_integer(pa_dtype):\n data = [1, 0] * 4 + [None] + [-2, -1] * 44 + [None] + [1, 99]\n elif pa.types.is_unsigned_integer(pa_dtype):\n data = [1, 0] * 4 + [None] + [2, 1] * 44 + [None] + [1, 99]\n elif pa.types.is_date(pa_dtype):\n data = (\n [date(2022, 1, 1), date(1999, 12, 31)] * 4\n + [None]\n + [date(2022, 1, 1), date(2022, 1, 1)] * 44\n + [None]\n + [date(1999, 12, 31), date(1999, 12, 31)]\n )\n elif pa.types.is_timestamp(pa_dtype):\n data = (\n [datetime(2020, 1, 1, 1, 1, 1, 1), datetime(1999, 1, 1, 1, 1, 1, 1)] * 4\n + [None]\n + [datetime(2020, 1, 1, 1), datetime(1999, 1, 1, 1)] * 44\n + [None]\n + [datetime(2020, 1, 1), datetime(1999, 1, 1)]\n )\n elif pa.types.is_duration(pa_dtype):\n data = (\n [timedelta(1), timedelta(1, 1)] * 4\n + [None]\n + [timedelta(-1), timedelta(0)] * 44\n + [None]\n + [timedelta(-10), timedelta(10)]\n )\n elif pa.types.is_time(pa_dtype):\n data = (\n [time(12, 0), time(0, 12)] * 4\n + [None]\n + [time(0, 0), time(1, 1)] * 44\n + [None]\n + [time(0, 5), time(5, 0)]\n )\n else:\n raise NotImplementedError\n return pd.array(data, dtype=dtype)\n\n\[email protected]\ndef data_missing(data):\n \"\"\"Length-2 array with [NA, Valid]\"\"\"\n return type(data)._from_sequence([None, data[0]])\n\n\[email protected]\ndef na_value():\n \"\"\"The scalar missing value for this type. Default 'None'\"\"\"\n return pd.NA\n\n\nclass TestBaseCasting(base.BaseCastingTests):\n pass\n\n\nclass TestConstructors(base.BaseConstructorsTests):\n @pytest.mark.xfail(\n reason=(\n \"str(dtype) constructs \"\n \"e.g. in64[pyarrow] like int64 (numpy) \"\n \"due to StorageExtensionDtype.__str__\"\n )\n )\n def test_from_dtype(self, data):\n super().test_from_dtype(data)\n\n\[email protected](\n raises=NotImplementedError, reason=\"pyarrow.ChunkedArray backing is 1D.\"\n)\nclass TestDim2Compat(base.Dim2CompatTests):\n pass\n\n\[email protected](\n raises=NotImplementedError, reason=\"pyarrow.ChunkedArray backing is 1D.\"\n)\nclass TestNDArrayBacked2D(base.NDArrayBacked2DTests):\n pass\n\n\nclass TestGetitemTests(base.BaseGetitemTests):\n @pytest.mark.xfail(\n reason=(\n \"data.dtype.type return pyarrow.DataType \"\n \"but this (intentionally) returns \"\n \"Python scalars or pd.Na\"\n )\n )\n def test_getitem_scalar(self, data):\n super().test_getitem_scalar(data)\n\n def test_take_series(self, request, data):\n tz = getattr(data.dtype.pyarrow_dtype, \"tz\", None)\n unit = getattr(data.dtype.pyarrow_dtype, \"unit\", None)\n bad_units = [\"ns\"]\n if pa_version_under2p0:\n bad_units.extend([\"s\", \"ms\", \"us\"])\n if pa_version_under3p0 and tz not in (None, \"UTC\") and unit in bad_units:\n request.node.add_marker(\n pytest.mark.xfail(\n reason=(\n f\"Not supported by pyarrow < 3.0 \"\n f\"with timestamp type {tz} and {unit}\"\n )\n )\n )\n super().test_take_series(data)\n\n def test_reindex(self, request, data, na_value):\n tz = getattr(data.dtype.pyarrow_dtype, \"tz\", None)\n unit = getattr(data.dtype.pyarrow_dtype, \"unit\", None)\n bad_units = [\"ns\"]\n if pa_version_under2p0:\n bad_units.extend([\"s\", \"ms\", \"us\"])\n if pa_version_under3p0 and tz not in (None, \"UTC\") and unit in bad_units:\n request.node.add_marker(\n pytest.mark.xfail(\n reason=(\n f\"Not supported by pyarrow < 3.0 \"\n f\"with timestamp type {tz} and {unit}\"\n )\n )\n )\n super().test_reindex(data, na_value)\n\n def test_loc_iloc_frame_single_dtype(self, request, using_array_manager, data):\n tz = getattr(data.dtype.pyarrow_dtype, \"tz\", None)\n unit = getattr(data.dtype.pyarrow_dtype, \"unit\", None)\n bad_units = [\"ns\"]\n if pa_version_under2p0:\n bad_units.extend([\"s\", \"ms\", \"us\"])\n if (\n pa_version_under3p0\n and not using_array_manager\n and tz not in (None, \"UTC\")\n and unit in bad_units\n ):\n request.node.add_marker(\n pytest.mark.xfail(\n reason=(\n f\"Not supported by pyarrow < 3.0 \"\n f\"with timestamp type {tz} and {unit}\"\n )\n )\n )\n super().test_loc_iloc_frame_single_dtype(data)\n\n\nclass TestBaseIndex(base.BaseIndexTests):\n pass\n\n\ndef test_arrowdtype_construct_from_string_type_with_parameters():\n with pytest.raises(NotImplementedError, match=\"Passing pyarrow type\"):\n ArrowDtype.construct_from_string(\"timestamp[s][pyarrow]\")\n" ]
[ [ "pandas.array", "pandas.core.arrays.arrow.dtype.ArrowDtype.construct_from_string", "pandas.core.arrays.arrow.dtype.ArrowDtype" ] ]
SahilC/reinforcement-learning
[ "ed4aa772f7a5c28188887242ba35cd5854a8c78f" ]
[ "assign2/bandit.py" ]
[ "import numpy as np\nclass Bandit:\n def __init__(self, k):\n self.k = k\n \n def action(self, a):\n return np.random.normal(5 + a, 1)\n\n def loss(self, delta, t, numsteps):\n \tloss = [np.random.binomial(1, 0.5, 1) for i in xrange(8)]\n \t\n \tloss.append(np.random.binomial(1, 0.5 - delta, 1))\n \tif t < (numsteps/2):\n \t\tloss.append(np.random.binomial(1, 0.5 + delta, 1))\n \telse:\n \t\tloss.append(np.random.binomial(1, 0.5 - 2*delta, 1))\n \tloss = np.array(loss)\n \treturn loss\n\n" ]
[ [ "numpy.random.normal", "numpy.array", "numpy.random.binomial" ] ]
csmasters/vedastr
[ "7513384ab503f15dc574c7d92b75ff2092354757" ]
[ "vedastr/models/bodies/rectificators/tps_stn.py" ]
[ "# modify from https://github.com/clovaai/deep-text-recognition-benchmark\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch.nn import functional as F\n\nfrom vedastr.models.bodies import build_feature_extractor\nfrom vedastr.models.utils import build_torch_nn, build_module\nfrom .registry import RECTIFICATORS\n\n\[email protected]_module\nclass TPS_STN(nn.Module):\n def __init__(self, F, input_size, output_size, stn):\n super(TPS_STN, self).__init__()\n\n self.F = F\n self.input_size = input_size\n self.output_size = output_size\n\n self.feature_extractor = build_feature_extractor(stn['feature_extractor'])\n self.pool = build_torch_nn(stn['pool'])\n heads = []\n for head in stn['head']:\n heads.append(build_module(head))\n self.heads = nn.Sequential(*heads)\n\n self.grid_generator = GridGenerator(F, output_size)\n\n # Init last fc in heads\n last_fc = heads[-1].fc\n last_fc.weight.data.fill_(0)\n \"\"\" see RARE paper Fig. 6 (a) \"\"\"\n ctrl_pts_x = np.linspace(-1.0, 1.0, int(F / 2))\n ctrl_pts_y_top = np.linspace(0.0, -1.0, num=int(F / 2))\n ctrl_pts_y_bottom = np.linspace(1.0, 0.0, num=int(F / 2))\n ctrl_pts_top = np.stack([ctrl_pts_x, ctrl_pts_y_top], axis=1)\n ctrl_pts_bottom = np.stack([ctrl_pts_x, ctrl_pts_y_bottom], axis=1)\n initial_bias = np.concatenate([ctrl_pts_top, ctrl_pts_bottom], axis=0)\n last_fc.bias.data = torch.from_numpy(initial_bias).float().view(-1)\n\n def forward(self, x):\n batch_size = x.size(0)\n\n batch_C_prime = self.feature_extractor(x)\n batch_C_prime = self.pool(batch_C_prime).view(batch_size, -1)\n batch_C_prime = self.heads(batch_C_prime)\n\n build_P_prime_reshape = self.grid_generator(batch_C_prime)\n\n if torch.__version__ > \"1.2.0\":\n out = F.grid_sample(x, build_P_prime_reshape, padding_mode='border', align_corners=True)\n else:\n out = F.grid_sample(x, build_P_prime_reshape, padding_mode='border')\n\n return out\n\n\nclass GridGenerator(nn.Module):\n \"\"\" Grid Generator of RARE, which produces P_prime by multipling T with P \"\"\"\n\n def __init__(self, F, output_size, eps=1e-6):\n \"\"\" Generate P_hat and inv_delta_C for later \"\"\"\n super(GridGenerator, self).__init__()\n self.eps = eps\n self.output_height, self.output_width = output_size\n self.F = F\n self.C = self._build_C(self.F) # F x 2\n self.P = self._build_P(self.output_width, self.output_height)\n ## for multi-gpu, you need register buffer\n self.register_buffer(\"inv_delta_C\", torch.tensor(self._build_inv_delta_C(self.F, self.C)).float()) # F+3 x F+3\n self.register_buffer(\"P_hat\", torch.tensor(self._build_P_hat(self.F, self.C, self.P)).float()) # n x F+3\n ## for fine-tuning with different image width, you may use below instead of self.register_buffer\n #self.inv_delta_C = torch.tensor(self._build_inv_delta_C(self.F, self.C)).float().cuda() # F+3 x F+3\n #self.P_hat = torch.tensor(self._build_P_hat(self.F, self.C, self.P)).float().cuda() # n x F+3\n\n def _build_C(self, F):\n \"\"\" Return coordinates of fiducial points in I_r; C \"\"\"\n ctrl_pts_x = np.linspace(-1.0, 1.0, int(F / 2))\n ctrl_pts_y_top = -1 * np.ones(int(F / 2))\n ctrl_pts_y_bottom = np.ones(int(F / 2))\n ctrl_pts_top = np.stack([ctrl_pts_x, ctrl_pts_y_top], axis=1)\n ctrl_pts_bottom = np.stack([ctrl_pts_x, ctrl_pts_y_bottom], axis=1)\n C = np.concatenate([ctrl_pts_top, ctrl_pts_bottom], axis=0)\n\n return C # F x 2\n\n def _build_inv_delta_C(self, F, C):\n \"\"\" Return inv_delta_C which is needed to calculate T \"\"\"\n hat_C = np.zeros((F, F), dtype=float) # F x F\n for i in range(0, F):\n for j in range(i, F):\n r = np.linalg.norm(C[i] - C[j])\n hat_C[i, j] = r\n hat_C[j, i] = r\n np.fill_diagonal(hat_C, 1)\n hat_C = (hat_C ** 2) * np.log(hat_C)\n # print(C.shape, hat_C.shape)\n delta_C = np.concatenate( # F+3 x F+3\n [\n np.concatenate([np.ones((F, 1)), C, hat_C], axis=1), # F x F+3\n np.concatenate([np.zeros((2, 3)), np.transpose(C)], axis=1), # 2 x F+3\n np.concatenate([np.zeros((1, 3)), np.ones((1, F))], axis=1) # 1 x F+3\n ],\n axis=0\n )\n inv_delta_C = np.linalg.inv(delta_C)\n\n return inv_delta_C # F+3 x F+3\n\n def _build_P(self, I_r_width, I_r_height):\n I_r_grid_x = (np.arange(-I_r_width, I_r_width, 2) + 1.0) / I_r_width # self.I_r_width\n I_r_grid_y = (np.arange(-I_r_height, I_r_height, 2) + 1.0) / I_r_height # self.I_r_height\n P = np.stack( # self.I_r_width x self.I_r_height x 2\n np.meshgrid(I_r_grid_x, I_r_grid_y),\n axis=2\n )\n\n return P.reshape([-1, 2]) # n (= self.I_r_width x self.I_r_height) x 2\n\n def _build_P_hat(self, F, C, P):\n n = P.shape[0] # n (= self.I_r_width x self.I_r_height)\n P_tile = np.tile(np.expand_dims(P, axis=1), (1, F, 1)) # n x 2 -> n x 1 x 2 -> n x F x 2\n C_tile = np.expand_dims(C, axis=0) # 1 x F x 2\n P_diff = P_tile - C_tile # n x F x 2\n rbf_norm = np.linalg.norm(P_diff, ord=2, axis=2, keepdims=False) # n x F\n rbf = np.multiply(np.square(rbf_norm), np.log(rbf_norm + self.eps)) # n x F\n P_hat = np.concatenate([np.ones((n, 1)), P, rbf], axis=1)\n\n return P_hat # n x F+3\n\n def build_P_prime(self, batch_C_prime, device=None):\n \"\"\" Generate Grid from batch_C_prime [batch_size x F x 2] \"\"\"\n batch_size = batch_C_prime.size(0)\n batch_inv_delta_C = self.inv_delta_C.repeat(batch_size, 1, 1)\n batch_P_hat = self.P_hat.repeat(batch_size, 1, 1)\n batch_C_prime_with_zeros = torch.cat((batch_C_prime, torch.zeros(\n batch_size, 3, 2).float().to(device)), dim=1) # batch_size x F+3 x 2\n batch_T = torch.bmm(batch_inv_delta_C, batch_C_prime_with_zeros) # batch_size x F+3 x 2\n batch_P_prime = torch.bmm(batch_P_hat, batch_T) # batch_size x n x 2\n\n return batch_P_prime # batch_size x n x 2\n\n def forward(self, x):\n batch_size = x.size(0)\n\n build_P_prime = self.build_P_prime(x.view(batch_size, self.F, 2), x.device) # batch_size x n (= output_width x output_height) x 2\n build_P_prime_reshape = build_P_prime.reshape([build_P_prime.size(0), self.output_height, self.output_width, 2])\n\n return build_P_prime_reshape\n" ]
[ [ "numpy.concatenate", "numpy.square", "torch.zeros", "numpy.linalg.norm", "numpy.meshgrid", "numpy.fill_diagonal", "numpy.zeros", "numpy.log", "torch.nn.Sequential", "numpy.ones", "torch.bmm", "torch.from_numpy", "numpy.stack", "torch.nn.functional.grid_sample", "numpy.arange", "numpy.transpose", "numpy.linalg.inv", "numpy.expand_dims" ] ]
usnistgov/sesame
[ "2cfc33b64f4834ca63a31de744d5879e6c5ad342", "2cfc33b64f4834ca63a31de744d5879e6c5ad342" ]
[ "sesame/observables.py", "examples/tutorial2/1d_heterojunction.py" ]
[ "# Copyright 2017 University of Maryland.\n#\n# This file is part of Sesame. It is subject to the license terms in the file\n# LICENSE.rst found in the top-level directory of this distribution.\n\nfrom numpy import exp\nimport numpy as np\n\n\ndef get_n(sys, efn, v, sites):\n \"\"\"\n Compute the electron density on the given sites.\n\n Parameters\n ----------\n sys: Builder\n The discretized system.\n efn: numpy array of floats\n Values of the electron quasi-Fermi level.\n v: numpy array of floats\n Values of the electrostatic potential.\n sites: list of integers\n The sites where the electron density should be computed.\n\n Returns\n -------\n n: numpy array\n \"\"\"\n\n n = sys.Nc[sites] * exp(+sys.bl[sites] + efn[sites] + v[sites])\n return n\n\n\ndef get_p(sys, efp, v, sites):\n \"\"\"\n Compute the hole density on the given sites.\n\n Parameters\n ----------\n sys: Builder\n The discretized system.\n efp: numpy array of floats\n Values of the hole quasi-Fermi level.\n v: numpy array of floats\n Values of the electrostatic potential.\n sites: list of integers\n The sites where the hole density should be computed.\n\n Returns\n -------\n p: numpy array\n \"\"\"\n bl = sys.bl[sites]\n Eg = sys.Eg[sites]\n Nv = sys.Nv[sites]\n p = Nv * exp(-Eg - bl - efp[sites] - v[sites])\n return p\n\n\ndef get_bulk_rr(sys, n, p):\n # Compute the bulk recombination of the entire system for SRH, radiative and\n # Auger mechanisms\n ni2 = sys.ni ** 2\n _np = n * p\n r = (_np - ni2) / (sys.tau_h * (n + sys.n1) + sys.tau_e * (p + sys.p1)) \\\n + (sys.Cn * n + sys.Cp * p) * (_np - ni2) \\\n + sys.B * (_np - ni2)\n return r\n\n\ndef get_bulk_rr_derivs(sys, n, p):\n ni2 = sys.ni ** 2\n _np = n * p\n\n defn = (_np * (sys.tau_h * (n + sys.n1) + sys.tau_e * (p + sys.p1)) - (_np - ni2) * n * sys.tau_h) \\\n / (sys.tau_h * (n + sys.n1) + sys.tau_e * (p + sys.p1)) ** 2 \\\n + sys.Cn * n * (2 * _np - ni2) + sys.Cp * _np * p \\\n + sys.B * _np\n\n defp = -(_np * (sys.tau_h * (n + sys.n1) + sys.tau_e * (p + sys.p1)) - (_np - ni2) * p * sys.tau_e) \\\n / (sys.tau_h * (n + sys.n1) + sys.tau_e * (p + sys.p1)) ** 2 \\\n + sys.Cn * n * _np + sys.Cp * p * (2 * _np - ni2) \\\n + sys.B * _np\n\n dv = (_np - ni2) * (sys.tau_e * p - sys.tau_h * n) \\\n / (sys.tau_h * (n + sys.n1) + sys.tau_e * (p + sys.p1)) ** 2 \\\n + sys.Cn * n * (_np - ni2) - sys.Cp * p * (_np - ni2)\n\n return defn, defp, dv\n\n\ndef get_jn(sys, efn, v, sites_i, sites_ip1, dl):\n \"\"\"\n Compute the electron current between sites ``site_i`` and ``sites_ip1``.\n\n Parameters\n ----------\n sys: Builder\n The discretized system.\n efn: numpy array of floats\n Values of the electron quasi-Fermi level for the entire system (as given\n by the drift diffusion Poisson solver).\n v: numpy array of floats\n Values of the electrostatic potential for the entire system (as given\n by the drift diffusion Poisson solver).\n sites_i: list of integers\n Indices of the sites the current is coming from.\n sites_ip1: list of integers\n Indices of the sites the current is going to.\n dl: numpy arrays of floats\n Lattice distances between sites ``sites_i`` and sites ``sites_ip1``.\n\n Returns\n -------\n jn: numpy array of floats\n \"\"\"\n\n # tol1 controls the minimum value of dv. all values less than tol1 are set equal to tol1\n tol1 = 1e-12\n # tol2 controls threshold for taylor series expansion of jp in terms of dv0: series expansion is used if dv0<tol2\n tol2 = 1e-5\n # tol3 controls threshold for taylor series expansion of jp in terms of defp: series expansion is used if defp<tol3\n tol3 = 1e-9\n # this description of tol variables applies for the jp function, and jn and jp derivative functions\n\n vp0 = v[sites_i] + sys.bl[sites_i] + np.log(sys.Nc[sites_i])\n dv = vp0 - (v[sites_ip1] + sys.bl[sites_ip1] + np.log(sys.Nc[sites_ip1]))\n dv0 = dv\n dv = dv + (np.abs(dv) < tol1) * tol1\n\n efnp0 = efn[sites_i]\n efnp1 = efn[sites_ip1]\n defn = efnp1 - efnp0\n mu = sys.mu_e[sites_i]\n\n\n jn = ( mu * exp(efnp1)*(1 - exp(efnp0-efnp1)) / dl * dv / (-exp(-vp0) * (1 - exp(dv))) * (np.abs(dv0) >= tol2) + \\\n -1 * mu * exp(efnp1)*(1 - exp(efnp0-efnp1)) / dl / (-exp(-vp0) * (1 + .5 * dv0 + 1/6.*(dv0)**2)) * (np.abs(dv0) < tol2)) * (np.abs(defn)>=tol3) + \\\n ( mu * exp(efnp1)*(-(efnp0 - efnp1)) / dl * dv / (-exp(-vp0) * (1 - exp(dv))) * (np.abs(dv0) >= tol2) + \\\n -1 * mu * exp(efnp1)*(-(efnp0 - efnp1)) / dl / (-exp(-vp0) * (1 + .5 * dv0 + 1 / 6. * (dv0) ** 2)) * (np.abs(dv0) < tol2)) * (np.abs(defn) < tol3)\n\n\n return jn\n\n\ndef get_jp(sys, efp, v, sites_i, sites_ip1, dl):\n \"\"\"\n Compute the hole current between sites ``site_i`` and ``sites_ip1``.\n\n Parameters\n ----------\n sys: Builder\n The discretized system.\n efp: numpy array of floats\n Values of the hole quasi-Fermi level for the entire system (as given\n by the drift diffusion Poisson solver).\n v: numpy array of floats\n Values of the electrostatic potential for the entire system (as given\n by the drift diffusion Poisson solver).\n sites_i: list of integers\n Indices of the sites the current is coming from.\n sites_ip1: list of integers\n Indices of the sites the current is going to.\n dl: numpy arrays of floats\n Lattice distances between sites ``sites_i`` and sites ``sites_ip1``.\n\n Returns\n -------\n jp: numpy array of floats\n \"\"\"\n tol1 = 1e-12\n tol2 = 1e-5\n tol3 = 1e-9\n\n vp0 = v[sites_i] + sys.bl[sites_i] + sys.Eg[sites_i] - np.log(sys.Nv[sites_i])\n dv = vp0 - (v[sites_ip1] + sys.bl[sites_ip1] + sys.Eg[sites_ip1] - np.log(sys.Nv[sites_ip1]))\n dv0 = dv\n dv = dv + (np.abs(dv) < tol1) * tol1\n\n efpp0 = -efp[sites_i]\n efpp1 = -efp[sites_ip1]\n defp = efpp1 - efpp0\n\n mu = sys.mu_h[sites_i]\n\n jp = (mu * exp(efpp1) * (1 - exp(efpp0-efpp1)) / dl * dv / (-exp(vp0) * (1 - exp(-dv))) * (np.abs(dv0) >= tol2) + \\\n mu * exp(efpp1) * (1 - exp(efpp0-efpp1)) / dl * 1 / (-exp(vp0) * (1 - .5*(dv0) + 1/6.*(dv0)**2.)) * (np.abs(dv0) < tol2)) * (np.abs(defp) >= tol3) + \\\n (mu * exp(efpp1) * ( -(efpp0 - efpp1)) / dl * dv / (-exp(vp0) * (1 - exp(-dv))) * (np.abs(dv0) >= tol2) + \\\n mu * exp(efpp1) * ( -(efpp0 - efpp1)) / dl * 1 / (-exp(vp0) * (1 - .5 * (dv0) + 1 / 6. * (dv0) ** 2.)) * (np.abs(dv0) < tol2)) * (np.abs(defp) < tol3)\n\n\n return jp\n\n\ndef get_jn_derivs(sys, efn, v, sites_i, sites_ip1, dl):\n tol1 = 1e-12\n tol2 = 1e-5\n tol3 = 1e-9\n\n\n vp0 = v[sites_i] + sys.bl[sites_i] + np.log(sys.Nc[sites_i])\n vp1 = v[sites_ip1] + sys.bl[sites_ip1] + np.log(sys.Nc[sites_ip1])\n dv = vp0 - vp1\n dv0 = dv\n dv = dv + (np.abs(dv) < tol1) * tol1\n\n efnp0 = efn[sites_i]\n efnp1 = efn[sites_ip1]\n defn = efnp1 - efnp0\n mu = sys.mu_e[sites_i]\n\n ev0 = exp(-vp0)\n ep1 = exp(efnp1)\n ep0 = exp(efnp0)\n\n\n defn_i = (1. / dl * exp(efnp0 + vp0) * (dv) / (1 - exp(dv)) * (np.abs(dv0) >= tol2) + \\\n -1. / dl * exp(efnp0 + vp0) / (1 + .5*dv0 + 1/6.*dv0**2) * (np.abs(dv0) < tol2)) * (np.abs(defn) >= tol3) + \\\n (1. * exp(efnp1) / dl * exp(vp0) * (dv) / (1 - exp(dv)) * (np.abs(dv0) >= tol2) + \\\n -1. * exp(efnp1) / dl * exp(vp0) / (1 + .5 * dv0 + 1 / 6. * dv0 ** 2) * (np.abs(dv0) < tol2)) * (np.abs(defn) < tol3)\n\n defn_ip1 = (-1. / dl * exp(efnp1 + vp0) * (dv) / (1 - exp(dv)) * (np.abs(dv0) >= tol2) + \\\n 1. / dl * exp(efnp1 + vp0) / (1 + .5*dv0 + 1/6.*dv0**2) * (np.abs(dv0) < tol2)) * (np.abs(defn) >= tol3) + \\\n (-1. * exp(efnp1) *(1-(efnp0 - efnp1))/ dl * exp(vp0) * (dv) / (1 - exp(dv)) * (np.abs(dv0) >= tol2) + \\\n 1. * exp(efnp1) *(1-(efnp0 - efnp1))/ dl * exp(vp0) / (1 + .5 * dv0 + 1 / 6. * dv0 ** 2) * (np.abs(dv0) < tol2)) * (np.abs(defn) < tol3)\n\n dv_i = (-exp(efnp1)*(1 - exp(efnp0-efnp1)) / dl * ev0 * (1 + dv - exp(dv)) / (ev0 ** 2 * (exp(dv) - 1) ** 2) * (np.abs(dv0) >= tol2) + \\\n -6*exp(vp0) * exp(efnp1)*(1 - exp(efnp0-efnp1)) / dl * (3 + vp0 + vp0**2 - 2*vp0*vp1 + vp1*(-1 + vp1)) \\\n / (6 + vp0**2 + vp0*(3 - 2*vp1) + vp1*(-3 + vp1))**2 * (np.abs(dv0) < tol2)) * (np.abs(defn)>=tol3) + \\\n (-exp(efnp1) * ( -(efnp0 - efnp1)) / dl * ev0 * (1 + dv - exp(dv)) / (ev0 ** 2 * (exp(dv) - 1) ** 2) * (np.abs(dv0) >= tol2) + \\\n -6 * exp(vp0) * exp(efnp1) * (-(efnp0 - efnp1)) / dl * (3 + vp0 + vp0 ** 2 - 2 * vp0 * vp1 + vp1 * (-1 + vp1)) \\\n / (6 + vp0 ** 2 + vp0 * (3 - 2 * vp1) + vp1 * (-3 + vp1)) ** 2 * (np.abs(dv0) < tol2)) * (np.abs(defn) < tol3)\n\n dv_ip1 = (-1. / dl * exp(efnp1)*(1 - exp(efnp0-efnp1)) * exp(-vp1) * (1 - dv - exp(-dv)) / (exp(-2 * vp1) * (1 - exp(-dv)) ** 2) * (np.abs(dv0) >= tol2) + \\\n -6 * exp(vp0) * exp(efnp1)*(1 - exp(efnp0-efnp1)) / dl * (3 + 2*vp0 - 2*vp1)\\\n / (6 + vp0**2 + vp0*(3 - 2*vp1) + vp1*(-3 + vp1))**2 * (np.abs(dv0) < tol2)) * (np.abs(defn) >= tol3) + \\\n (-1. / dl * exp(efnp1) * (-(efnp0 - efnp1)) * exp(-vp1) * (1 - dv - exp(-dv)) / (exp(-2 * vp1) * (1 - exp(-dv)) ** 2) * (np.abs(dv0) >= tol2) + \\\n -6 * exp(vp0) * exp(efnp1) * (-(efnp0 - efnp1)) / dl * (3 + 2 * vp0 - 2 * vp1) \\\n / (6 + vp0 ** 2 + vp0 * (3 - 2 * vp1) + vp1 * (-3 + vp1)) ** 2 * (np.abs(dv0) < tol2)) * (np.abs(defn) < tol3)\n\n\n return mu * defn_i, mu * defn_ip1, mu * dv_i, mu * dv_ip1\n\n\ndef get_jp_derivs(sys, efp, v, sites_i, sites_ip1, dl):\n tol1 = 1e-12\n tol2 = 1e-5\n tol3 = 1e-9\n\n vp0 = v[sites_i] + sys.bl[sites_i] + sys.Eg[sites_i] - np.log(sys.Nv[sites_i])\n vp1 = v[sites_ip1] + sys.bl[sites_ip1] + sys.Eg[sites_ip1] - np.log(sys.Nv[sites_ip1])\n dv = vp0 - vp1\n dv0 = dv\n dv = dv + (np.abs(dv) < tol1) * tol1\n\n efpp0 = -efp[sites_i]\n efpp1 = -efp[sites_ip1]\n defp = efpp1 - efpp0\n mu = sys.mu_h[sites_i]\n\n ev0 = exp(vp0)\n ep1 = exp(efpp1)\n ep0 = exp(efpp0)\n\n defp_i = -(exp(efpp0 - vp0) * dv / (dl * (1 - exp(-dv))) * (np.abs(dv0) >= tol2) + \\\n exp(efpp0 - vp0) / (dl) / (1 - .5*(vp0-vp1) + 1/6.*(vp0-vp1)**2.) * (np.abs(dv0) < tol2)) * (np.abs(defp)>=tol3) + \\\n -(exp(efpp1) * exp(-vp0) * dv / (dl * (1 - exp(-dv))) * (np.abs(dv0) >= tol2) + \\\n exp(efpp1) * exp(-vp0) / (dl) / (1 - .5 * (vp0 - vp1) + 1 / 6. * (vp0 - vp1) ** 2.) * (np.abs(dv0) < tol2)) * (np.abs(defp) < tol3)\n\n defp_ip1 = -(-exp(efpp1 - vp0) * dv / (dl * (1 - exp(-dv))) * (np.abs(dv0) >= tol2) + \\\n -exp(efpp1 - vp0) / (dl) / (1 - .5*(vp0-vp1) + 1/6.*(vp0-vp1)**2.) * (np.abs(dv0) < tol2)) * (np.abs(defp)>=tol3) + \\\n -(-exp(efpp1) * exp(-vp0)*(1-(efpp0 - efpp1)) * dv / (dl * (1 - exp(-dv))) * (np.abs(dv0) >= tol2) + \\\n -exp(efpp1) * exp(-vp0)*(1-(efpp0 - efpp1)) / (dl) / (1 - .5*(vp0-vp1) + 1/6.*(vp0-vp1)**2.) * (np.abs(dv0) < tol2)) * (np.abs(defp) < tol3)\n\n dv_i = (-exp(efpp0)*(1 - exp(efpp1-efpp0)) * ev0 * (exp(-dv) + (-1 + dv)) / (dl * exp(2 * vp0) * (1 - exp(-dv)) ** 2) * (np.abs(dv0) >= tol2) + \\\n -6* exp(efpp0)*(1 - exp(efpp1-efpp0)) / dl * (-exp(-vp0)) * (3 + (-1 + vp0)*vp0 + vp1 - 2*vp0*vp1 + vp1**2) \\\n / (6 + vp0**2 + vp1*(3+vp1) - vp0*(3 + 2*vp1)) ** 2 * (np.abs(dv0) < tol2)) * (np.abs(defp) >= tol3) + \\\n (-exp(efpp0) * (-(efpp1 - efpp0)) * ev0 * (exp(-dv) + (-1 + dv)) / (dl * exp(2 * vp0) * (1 - exp(-dv)) ** 2) * (np.abs(dv0) >= tol2) + \\\n -6 * exp(efpp0) * (-(efpp1 - efpp0)) / dl * (-exp(-vp0)) * (3 + (-1 + vp0) * vp0 + vp1 - 2 * vp0 * vp1 + vp1 ** 2) \\\n / (6 + vp0 ** 2 + vp1 * (3 + vp1) - vp0 * (3 + 2 * vp1)) ** 2 * (np.abs(dv0) < tol2)) * (np.abs(defp) < tol3)\n\n dv_ip1 = (-exp(efpp0)*(1 - exp(efpp1-efpp0)) * ev0 * (1 + exp(-dv) * (-1 - dv)) / (dl * exp(2 * vp0) * (1 - exp(-dv)) ** 2) * (np.abs(dv0) >= tol2) + \\\n 6 * exp(efpp0)*(1 - exp(efpp1-efpp0)) / dl * (-exp(-vp0)) * (-3 + 2*vp0 - 2*vp1) \\\n / (6 + vp0 ** 2 + vp1 * (3 + vp1) - vp0 * (3 + 2 * vp1)) ** 2 * (np.abs(dv0) < tol2)) * (np.abs(defp) >= tol3) + \\\n (-exp(efpp0) * (-(efpp1 - efpp0)) * ev0 * (1 + exp(-dv) * (-1 - dv)) / (dl * exp(2 * vp0) * (1 - exp(-dv)) ** 2) * (np.abs(dv0) >= tol2) + \\\n 6 * exp(efpp0) * (-(efpp1 - efpp0)) / dl * (-exp(-vp0)) * (-3 + 2 * vp0 - 2 * vp1) \\\n / (6 + vp0 ** 2 + vp1 * (3 + vp1) - vp0 * (3 + 2 * vp1)) ** 2 * (np.abs(dv0) < tol2)) * (np.abs(defp) < tol3)\n\n\n return mu * defp_i, mu * defp_ip1, mu * dv_i, mu * dv_ip1\n\n\ndef get_srh_rr_derivs(sys, n, p, n1, p1, tau_e, tau_h):\n ni2 = n1 * p1\n _np = n * p\n\n defn = (_np * (tau_h * (n + n1) + tau_e * (p + p1)) - (_np - ni2) * n * tau_h) \\\n / (tau_h * (n + n1) + tau_e * (p + p1)) ** 2\n defp = -(_np * (tau_h * (n + n1) + tau_e * (p + p1)) - (_np - ni2) * p * tau_e) \\\n / (tau_h * (n + n1) + tau_e * (p + p1)) ** 2\n dv = (_np - ni2) * (tau_e * p - tau_h * n) / (tau_h * (n + n1) + tau_e * (p + p1)) ** 2\n\n return defn, defp, dv\n\n", "import sesame\nimport numpy as np\nimport scipy.io\nfrom scipy.io import savemat\n\n\nt1 = 25*1e-7 # thickness of CdS\nt2 = 4*1e-4 # thickness of CdTe\n\n# Heterojunctions require dense mesh near the interface\ndd = 1e-7 # 2*dd is the distance over which mesh is refined\n# Define the mesh\nx = np.concatenate((np.linspace(0, dd, 10, endpoint=False), # L contact interface\n np.linspace(dd, t1-dd, 50, endpoint=False), # material 1\n np.linspace(t1 - dd, t1 + dd, 10, endpoint=False), # interface 1\n np.linspace(t1 + dd, (t1+t2) - dd, 100, endpoint=False), # material 2\n np.linspace((t1+t2) - dd, (t1+t2), 10))) # R contact interface\n\n# Build system\nsys = sesame.Builder(x)\n\n# CdS material dictionary\nCdS = {'Nc': 2.2e18, 'Nv':1.8e19, 'Eg':2.4, 'epsilon':10, 'Et': 0,\n 'mu_e':100, 'mu_h':25, 'tau_e':1e-8, 'tau_h':1e-13,\n 'affinity': 4.}\n# CdTe material dictionary\nCdTe = {'Nc': 8e17, 'Nv': 1.8e19, 'Eg':1.5, 'epsilon':9.4, 'Et': 0,\n 'mu_e':320, 'mu_h':40, 'tau_e':5e-9, 'tau_h':5e-9,\n 'affinity': 3.9}\n\n# CdS region\nCdS_region = lambda x: x<=t1\n# CdTe region\nCdTe_region = lambda x: x>t1\n\n# Add the material to the system\nsys.add_material(CdS, CdS_region) # adding CdS\nsys.add_material(CdTe, CdTe_region) # adding CdTe\n\n# Add the donors\nnD = 1e17 # donor density [cm^-3]\nsys.add_donor(nD, CdS_region)\n# Add the acceptors\nnA = 1e15 # acceptor density [cm^-3]\nsys.add_acceptor(nA, CdTe_region)\n\n# Define contacts: CdS contact is Ohmic, CdTe contact is Schottky\nLcontact_type, Rcontact_type = 'Ohmic', 'Schottky'\nLcontact_workFcn, Rcontact_workFcn = 0, 5.0 # Lcontact work function irrelevant because L contact is Ohmic\n# Add the contacts\nsys.contact_type(Lcontact_type, Rcontact_type, Lcontact_workFcn, Rcontact_workFcn)\n\n# Define the surface recombination velocities for electrons and holes [m/s]\nScontact = 1.16e7 # [cm/s]\n# non-selective contacts\nSn_left, Sp_left, Sn_right, Sp_right = Scontact, Scontact, Scontact, Scontact\n# This function specifies the simulation contact recombination velocity\nsys.contact_S(Sn_left, Sp_left, Sn_right, Sp_right)\n\n# Add illumination\nphi0 = 1e17 # incoming flux [1/(cm^2 sec)]\nalpha = 2.3e4 # absorbtion coefficient [1/cm]\n# Define a function for illumination profile\nf = lambda x: phi0*alpha*np.exp(-x*alpha) # f is an \"inline\" function\n# This function adds generation to the simulation\nsys.generation(f)\n\n# Specify the applied voltage values\nvoltages = np.linspace(0,1,21)\n# Perform I-V calculation\nj = sesame.IVcurve(sys, voltages, '1dhetero_V')\nj = j * sys.scaling.current\n\nresult = {'v':voltages, 'j':j}\nnp.save('IV_values', result)\n\n# plot I-V curve\ntry:\n import matplotlib.pyplot as plt\n plt.plot(voltages, j,'-o')\n plt.xlabel('Voltage [V]')\n plt.ylabel('Current [A/cm^2]')\n plt.grid() # add grid\n plt.show() # show the plot on the screen\n# no matplotlib installed\nexcept ImportError:\n print(\"Matplotlib not installed, can't make plot\")\n\n\n" ]
[ [ "numpy.abs", "numpy.exp", "numpy.log" ], [ "matplotlib.pyplot.xlabel", "matplotlib.pyplot.grid", "matplotlib.pyplot.plot", "numpy.exp", "numpy.save", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.show", "numpy.linspace" ] ]
dpfranke/qtt
[ "f60e812fe8b329e67f7b38d02eef552daf08d7c9", "f60e812fe8b329e67f7b38d02eef552daf08d7c9" ]
[ "docs/notebooks/qtt_example.py", "qtt/legacy.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\" Example script to show QTT capabilities\n\n@author: eendebakpt\n\"\"\"\n\n#%% Load packages\nfrom imp import reload\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport tempfile\nfrom collections import OrderedDict\n\nimport qcodes\nfrom qcodes import MatPlot\n\nimport qtt\nfrom qtt.gui.parameterviewer import createParameterWidget\nfrom qtt.algorithms.gatesweep import analyseGateSweep\nfrom qtt.measurements.scans import scanjob_t\nfrom qtt.instrument_drivers.virtual_gates import virtual_gates\nfrom qtt import save_state, load_state\nimport qtt.measurements.videomode\n\nimport qtt.simulation.virtual_dot_array\n\ndatadir = tempfile.mkdtemp(prefix='qtt_example')\nqcodes.DataSet.default_io = qcodes.DiskIO(datadir)\n\n\n#%% Create a virtual model for testing\n#\n# The model resembles the spin-qubit dot setup. The hardware consists of a virtual\n# keithley, IVVI racks and a virtual gates object\n\nnr_dots = 3\nstation = qtt.simulation.virtual_dot_array.initialize(reinit=True, nr_dots=nr_dots, maxelectrons=2)\n\nkeithley1 = station.keithley1\nkeithley3 = station.keithley3\n\n# virtual gates for the model\ngates = station.gates\nmodel = station.model\n\n#%% Setup measurement windows\n\n\nmwindows = qtt.gui.live_plotting.setupMeasurementWindows(station, create_parameter_widget=False)\npv = createParameterWidget([gates, ])\n\nlogviewer = qtt.gui.dataviewer.DataViewer()\nlogviewer.show()\n\n#%% Read out instruments\nprint('value: %f' % keithley3.readnext())\nsnapshotdata = station.snapshot()\n\n\n#%% Simple 1D scan loop\n\nparam_left=station.model.bottomgates[0]\nparam_right=station.model.bottomgates[-1]\nscanjob = scanjob_t({'sweepdata': dict({'param': param_right, 'start': -500, 'end': 1, 'step': .8, 'wait_time': 3e-3}), 'minstrument': ['keithley3.amplitude']})\ndata1d = qtt.measurements.scans.scan1D(station, scanjob, location=None, verbose=1)\n\n\n#%% Save the current state of the system to disk\n\nsave_state(station)\n\n#%% Print the scanned data\n\nprint(data1d.default_parameter_name())\n\n#%% Make a 2D scan\nstart = -500\nscanjob = scanjob_t()\nscanjob.add_sweep(param_right, start=start, end=start+400, step=4., wait_time=0.)\nscanjob.add_sweep(param_left, start=start, end=start+400, step=5)\nscanjob.add_minstrument(['keithley1.amplitude'])\ndata = qtt.measurements.scans.scan2D(station, scanjob)\n\ngates.set(param_right, -300); gates.set(param_left, -300)\ngv=gates.allvalues()\n\n#%% Fit 1D pinch-off scan:\n\nadata = analyseGateSweep(data1d, fig=100)\n\n#%% Fit 2D cross\ntry:\n from projects.autotune4dot.autotuning import analyse2dot\n qtt.measurements.scans.plotData(data, fig=30)\n \n pt, resultsfine = analyse2dot(data, fig=300, efig=400, istep=1, verbose=2)\nexcept:\n pass\n \n#%% Make virtual gates\nnp.set_printoptions(precision=2, suppress=True)\n\ncrosscap_map = OrderedDict((\n('VP1', OrderedDict((('P1', 1), ('P2', 0.56), ('P3', 0.15)))),\n('VP2', OrderedDict((('P1', 0.62), ('P2', 1), ('P3', 0.593)))),\n('VP3', OrderedDict((('P1', 0.14), ('P2', 0.62), ('P3', 1))))\n))\nvirts = virtual_gates(qtt.measurements.scans.instrumentName('vgates'), gates, crosscap_map)\nvirts.print_matrix()\n\ngates.resetgates(gv, gv, verbose=0)\n\nvirts.VP2.set(-60)\n\ncc1= virts.VP1()\ncc2=virts.VP2()\nr=80\nscanjob = scanjob_t({'sweepdata': dict({'param': virts.VP1, 'start': cc1-100, 'end': cc1 + 100, 'step': 4.}), 'minstrument': ['keithley1.amplitude'], 'wait_time': 0.})\nscanjob['stepdata'] = dict({'param': virts.VP2, 'start': cc2 - r, 'end': cc2 +r, 'step': 2.})\ndata = qtt.measurements.scans.scan2D(station, scanjob)\ngates.resetgates(gv, gv, verbose=0)\n\n\nprint('virtual and physical gates: ' + ','.join( '%.2f' % x for x in [virts.VP1(),virts.VP2(),virts.VP3(), gates.P1(), gates.P2(), gates.P3() ]) )\n\nvgates=['vSD1b'] + virts.vgates() + ['vSD1a']\npgates=['SD1b'] + virts.pgates() + ['SD1a']\nvirts2= qtt.instrument_drivers.virtual_gates.extend_virtual_gates(vgates, pgates, virts, name='vgates')\n \n#%% Send data to powerpoint\nprint('add copy data to Powerpoint use the following:')\nprint(' qtt.utilities.tools.addPPT_dataset(data);')\nif 0:\n qtt.utilities.tools.addPPT_dataset(data)\n\n#%% Test objects\n\nqtt.instrument_drivers.virtual_gates.test_virtual_gates()\nqtt.measurements.scans.test_scan2D()\n\n#%% Start videomode\n\ndigitizer=station.sdigitizer\nstation.awg=station.vawg\n\n\nprint('starting videomode in background...')\ngates.P3.increment(40)\nvm = qtt.measurements.videomode.VideoMode(station, ['P1', 'P2'], [160]*2, \n minstrument=(digitizer.name,[1,1]), resolution = [96,96],\n diff_dir=[None, 'g'], name='physical gates' )\nvm.crosshair(True)\nvm.stopreadout()\nvm.updatebg()\n\n\n#%%\n#gates.P3.increment(-40)\n\ns1=qtt.measurements.scans.create_vectorscan(virts.VP1, 160)\ns2=qtt.measurements.scans.create_vectorscan(virts.VP2, 160)\nvm = qtt.measurements.videomode.VideoMode(station, {'gates_horz': s1['param'],'gates_vert': s2['param']}, [200,180], \n minstrument=(digitizer.name,[1,1]), resolution = [96,96],\n diff_dir=[None, 'g'], name='virtual gates' )\nvm.crosshair(True)\nvm.stopreadout()\nvm.updatebg() ", "\"\"\" Legacy functions (do not use) \"\"\"\nimport copy\n\nimport numpy as np\nimport scipy\nimport matplotlib\nimport sys\nimport os\nimport logging\nimport cv2\nimport time\nimport math\nimport pickle\nimport warnings\n\nimport qcodes\n# explicit import\nfrom qcodes.plots.pyqtgraph import QtPlot\nfrom qcodes.plots.qcmatplotlib import MatPlot\nfrom qtt.algorithms.images import straightenImage\n\nimport qtt.data\nfrom qtt.data import loadExperimentData\nimport qtt.algorithms.onedot \nfrom qtt.measurements.scans import scanjob_t\nimport matplotlib.pyplot as plt\nimport datetime\n\nfrom qtt.measurements.scans import sample_data_t, enforce_boundaries\n\n#%%\n\nfrom qtt.data import dataset2Dmetadata, dataset2image\n\nfrom qtt.algorithms.onedot import onedotGetBalanceFine\nfrom qtt.measurements.scans import fixReversal\nfrom qtt.data import load_data, show2D\nfrom qtt.utilities.tools import diffImage, diffImageSmooth, rdeprecated\nfrom qtt.algorithms.generic import smoothImage\n#from qtt.measurements.scans import scanPinchValue\n\n\nfrom qtt import pgeometry as pmatlab\nfrom qtt.pgeometry import plotPoints, tilefigs\n\nwarnings.warn('please do not this import this module')\n\n#%%\n\ntry:\n import graphviz\nexcept:\n pass\nimport matplotlib.pyplot as plt\n\n@rdeprecated(expire='1 Sep 2018')\ndef showDotGraph(dot, fig=10):\n dot.format = 'png'\n outfile = dot.render('dot-dummy', view=False)\n print(outfile)\n\n im = plt.imread(outfile)\n plt.figure(fig)\n plt.clf()\n plt.imshow(im)\n plt.tight_layout()\n plt.axis('off')\n\n\n\n#%%\n\n\n@rdeprecated(expire='7-1-2018')\ndef positionScanjob(scanjob, pt):\n \"\"\" Helper function\n\n Changes an existing scanjob to scan at the centre of the specified point\n\n \"\"\"\n scanjob = copy.deepcopy(scanjob)\n sh = float(pt[0] - (scanjob['sweepdata']['start'] + scanjob['sweepdata']['end']) / 2)\n scanjob['sweepdata']['start'] += sh\n scanjob['sweepdata']['end'] += sh\n\n sh = float(pt[1] - (scanjob['stepdata']['start'] + scanjob['stepdata']['end']) / 2)\n scanjob['stepdata']['start'] += sh\n scanjob['stepdata']['end'] += sh\n\n return scanjob\n\n\n\n\n#%%\n\n\n@rdeprecated(expire='1-7-2018')\ndef saveImage(resultsdir, name, fig=None, dpi=300, ext='png', tight=False):\n \"\"\" Save matplotlib figure to disk\n\n Arguments\n ---------\n name : str\n name of file to save\n Returns\n -------\n imfilerel, imfile : string\n filenames\n \"\"\"\n imfile0 = '%s.%s' % (name, ext)\n imfile = os.path.join(resultsdir, 'pictures', imfile0)\n qtt.utilities.tools.mkdirc(os.path.join(resultsdir, 'pictures'))\n imfilerel = os.path.join('pictures', imfile0)\n\n if fig is not None:\n plt.figure(fig)\n if tight:\n plt.savefig(imfile, dpi=dpi, bbox_inches='tight', pad_inches=tight)\n else:\n plt.savefig(imfile, dpi=dpi)\n return imfilerel, imfile\n\n\n@rdeprecated(expire='1-7-2019')\ndef plotCircle(pt, radius=11.5, color='r', alpha=.5, linewidth=3, **kwargs):\n \"\"\" Plot a circle in a matplotlib figure\n\n Args:\n pt (array): center of circle\n radius (float): radius of circle\n color (str or list)\n alpha (float): transparency \n \"\"\"\n c2 = plt.Circle(pt, radius, color=color, fill=False, linewidth=3, alpha=alpha, **kwargs)\n plt.gca().add_artist(c2)\n return c2\n\n@rdeprecated(expire='1 Sep 2018')\ndef scaleCmap(imx, setclim=True, verbose=0):\n \"\"\" Scale colormap of sensing dot image \"\"\"\n p99 = np.percentile(imx, 99.9)\n mval = p99\n\n # 0 <-> alpha\n # mval <->1\n\n w = np.array([0, 1])\n\n # cl=(1./mval)*(w)+.2)\n alpha = .23\n cl = (mval / (1 - alpha)) * (w - alpha)\n\n if verbose:\n print('scaleCmap to %.1f %.1f' % (cl[0], cl[1]))\n if setclim:\n plt.clim(cl)\n return cl\n\n\n@rdeprecated(expire='1-1-2019')\ndef writeBatchData(outputdir, tag, timestart, timecomplete):\n tt = datetime.datetime.now().strftime('%d%m%Y-%H%m%S')\n with open(os.path.join(outputdir, '%s-%s.txt' % (tag, tt)), 'wt') as fid:\n fid.write('Tag: %s\\n' % tag)\n fid.write('Time start: %s\\n' % timestart)\n fid.write('Time complete: %s\\n' % timecomplete)\n fid.close()\n print('writeBatchData: %s' % fid.name)\n\n#%%\n\n\n@rdeprecated(expire='1 Sep 2018')\ndef filterBG(imx, ksize, sigma=None):\n \"\"\" Filter away background using Gaussian filter \"\"\"\n # imq = cv2.bilateralFilter(imx.astype(np.float32),9,75,75)\n # imq=cv2.medianBlur(imx.astype(np.uint8), 33)\n\n if ksize % 2 == 0:\n ksize = ksize + 1\n if sigma is None:\n sigma = 0.3 * ((ksize - 1) * 0.5 - 1) + 0.8\n # sigma=.8\n imq = imx.copy()\n imq = cv2.GaussianBlur(imq, (int(ksize), int(ksize)), sigma)\n imq = imx - imq\n return imq\n\n\n@rdeprecated(expire='1 Sep 2018')\ndef filterGabor(im, theta0=-np.pi / 8, istep=1, widthmv=2, lengthmv=10, gammax=1, cut=None, verbose=0, fig=None):\n \"\"\"\n Filter image with Gabor\n\n step is in pixel/mV\n\n Parameters\n ----------\n\n im : array\n input image\n theta0 : float\n angle of Gabor filter (in radians)\n\n \"\"\"\n cwidth = 2. * widthmv * np.abs(istep)\n clength = .5 * lengthmv * np.abs(istep)\n\n # odd number, at least twice the length\n ksize = 2 * int(np.ceil(clength)) + 1\n\n if verbose:\n print('filterGabor: kernel size %d %d' % (ksize, ksize))\n print('filterGabor: width %.1f pixel (%.1f mV)' % (cwidth, widthmv))\n print('filterGabor: length %.1f pixel (%.1f mV)' % (clength, lengthmv))\n sigmax = cwidth / 2 * gammax\n sigmay = clength / 2\n\n gfilter = pmatlab.gaborFilter(ksize, sigma=sigmax, theta=theta0, Lambda=cwidth, psi=0, gamma=sigmax / sigmay, cut=cut)\n # gfilter=cv2.getGaborKernel( (ksize,ksize), sigma=sigmax, theta=theta0, lambd=cwidth, gamma=sigmax/sigmay, psi=0*np.pi/2)\n gfilter -= gfilter.sum() / gfilter.size\n imf = cv2.filter2D(im, -1, gfilter)\n\n if fig is not None:\n plt.figure(fig + 1)\n plt.clf()\n plt.imshow(r[0], interpolation='nearest')\n plt.colorbar()\n plt.clim([-1, 1])\n return imf, (gfilter, )\n\n\n#%%\n\n\n\n@rdeprecated(expire='1 Sep 2018')\ndef cmap_map(function, cmap):\n \"\"\" Applies function (which should operate on vectors of shape 3:\n [r, g, b], on colormap cmap. This routine will break any discontinuous points in a colormap.\n \"\"\"\n cdict = cmap._segmentdata\n step_dict = {}\n # Firt get the list of points where the segments start or end\n for key in ('red', 'green', 'blue'):\n step_dict[key] = map(lambda x: x[0], cdict[key])\n step_list = sum(step_dict.values(), [])\n step_list = np.array(list(set(step_list)))\n # Then compute the LUT, and apply the function to the LUT\n reduced_cmap = lambda step: np.array(cmap(step)[0:3])\n old_LUT = np.array(map(reduced_cmap, step_list))\n new_LUT = np.array(map(function, old_LUT))\n # Now try to make a minimal segment definition of the new LUT\n cdict = {}\n for i, key in enumerate(('red', 'green', 'blue')):\n this_cdict = {}\n for j, step in enumerate(step_list):\n if step in step_dict[key]:\n this_cdict[step] = new_LUT[j, i]\n elif new_LUT[j, i] != old_LUT[j, i]:\n this_cdict[step] = new_LUT[j, i]\n colorvector = sorted(map(lambda x: x + (x[1], ), this_cdict.items()))\n cdict[key] = colorvector\n\n return matplotlib.colors.LinearSegmentedColormap('colormap', cdict, 1024)\n\n\n@rdeprecated(expire='1 Sep 2018')\ndef cmap_discretize(cmap, N, m=1024):\n \"\"\"Return a discrete colormap from the continuous colormap cmap.\n\n cmap: colormap instance, eg. cm.jet.\n N: number of colors.\n\n Example\n x = resize(arange(100), (5,100))\n djet = cmap_discretize(cm.jet, 5)\n imshow(x, cmap=djet)\n \"\"\"\n\n if isinstance(cmap, str):\n cmap = get_cmap(cmap)\n colors_i = np.concatenate((np.linspace(0, 1., N), (0., 0., 0., 0.)))\n colors_rgba = cmap(colors_i)\n indices = np.linspace(0, 1., N + 1)\n cdict = {}\n for ki, key in enumerate(('red', 'green', 'blue')):\n cdict[key] = [\n (indices[i], colors_rgba[i - 1, ki], colors_rgba[i, ki]) for i in range(N + 1)]\n # Return colormap object.\n return matplotlib.colors.LinearSegmentedColormap(cmap.name + \"_%d\" % m, cdict, m)\n\n#%%\n\nfrom qtt.algorithms.misc import polyval2d, polyfit2d\n\nfrom qtt.utilities.imagetools import fitBackground as fitBackgroundTmp\nfrom qtt.utilities.imagetools import cleanSensingImage\n\nfitBackground= qtt.utilities.tools.deprecated(fitBackgroundTmp)\n\[email protected]\ndef showIm(ims, fig=1, title='', showz=False):\n \"\"\" Show image with nearest neighbor interpolation and axis scaling \"\"\"\n plt.figure(fig)\n plt.clf()\n if showz:\n pmatlab.imshowz(ims, interpolation='nearest')\n else:\n plt.imshow(ims, interpolation='nearest')\n plt.axis('image')\n plt.title(title)\n\n\n\n\n#%%\n\n\nfrom qtt.algorithms.misc import point_in_poly, points_in_poly, fillPoly\n\n@rdeprecated(expire='1 Sep 2018')\ndef getPinchvalues(od, xdir, verbose=1):\n \"\"\" Get pinch values from recorded data \"\"\"\n gg = od['gates']\n od['pinchvalues'] = -800 * np.ones(3)\n for jj, g in enumerate(gg):\n # pp='%s-sweep-1d-%s.pickle' % (od['name'], g)\n pp = pinchoffFilename(g, od=None)\n pfile = os.path.join(xdir, pp)\n\n dd, metadata = qtt.data.loadDataset(pfile)\n\n adata = qtt.algorithms.gatesweep.analyseGateSweep(dd, fig=0, minthr=100, maxthr=800, verbose=0)\n if verbose:\n print('getPinchvalues: gate %s : %.2f' % (g, adata['pinchoff_point']))\n od['pinchvalues'][jj] = adata['pinchoff_point']\n return od\n\n\n@rdeprecated(expire='1 Sep 2018')\ndef createDoubleDotJobs(two_dots, one_dots, resultsdir, basevalues=dict(), sdinstruments=[], fig=None, verbose=1):\n \"\"\" Create settings for a double-dot from scans of the individual one-dots \"\"\"\n raise Exception('function was removed from qtt')\n\n\n#%%\n\n@rdeprecated(expire='1-1-2019')\ndef printGateValues(gv, verbose=1):\n s = ', '.join(['%s: %.1f' % (x, gv[x]) for x in sorted(gv.keys())])\n return s\n\n\n@rdeprecated(expire='1-1-2019')\ndef getODbalancepoint(od):\n bp = od['balancepoint']\n if 'balancepointfine' in od:\n bp = od['balancepointfine']\n return bp\n\n\n\n@rdeprecated(expire='1-6-2018')\ndef loadpickle(pkl_file):\n \"\"\" Load objects from file \"\"\"\n try:\n output = open(pkl_file, 'rb')\n data2 = pickle.load(output)\n output.close()\n except:\n if sys.version_info.major >= 3:\n # if pickle file was saved in python2 we might fix issues with a different encoding\n output = open(pkl_file, 'rb')\n data2 = pickle.load(output, encoding='latin')\n # pickle.load(pkl_file, fix_imports=True, encoding=\"ASCII\", errors=\"strict\")\n output.close()\n else:\n data2 = None\n return data2\n\n\n" ]
[ [ "numpy.set_printoptions" ], [ "matplotlib.pyplot.colorbar", "matplotlib.pyplot.clim", "matplotlib.pyplot.savefig", "matplotlib.pyplot.tight_layout", "matplotlib.colors.LinearSegmentedColormap", "matplotlib.pyplot.gca", "matplotlib.pyplot.axis", "numpy.array", "numpy.percentile", "matplotlib.pyplot.title", "matplotlib.pyplot.figure", "matplotlib.pyplot.Circle", "matplotlib.pyplot.clf", "numpy.ceil", "numpy.ones", "numpy.abs", "numpy.linspace", "matplotlib.pyplot.imread", "matplotlib.pyplot.imshow" ] ]
entn-at/filtering
[ "da8edda4c8fb651eb9dbd7b25f7fc8f18b2cc144" ]
[ "lilfilter/local_amplitude.py" ]
[ "# To be run with python3. Caution: this module requires torch!\n\n\"\"\"\nThis module defines an object called Normalizer that can be used to\nnormalize the output of class Multistreamer (from ./multistreamer.py),\nwith a view to making the data easier for neural nets to process.\n\nThe basic idea is that we compute a moving average of the amplitude of the\nsignal within each frequency band, and use that to normalize the signal. (The\nneural net will see both the normalized signal and the log of the normalization\nfactor). The idea is that after possibly being modified by the nnet\n(e.g. denoised), we then 'un-normalize' the signal with the same normalization\nfactor.\n\nWe also provide a factor that can be used as part of the objective function\nif it's desired to put a greater weight on the louder frequency bands for\ntraining purposes.\n\"\"\"\n\n\nimport numpy as np\nimport cmath\nimport math\nimport torch\nfrom . import filter_function\nfrom . import filters\nfrom . import torch_filter\nfrom . import resampler\n\nimport matplotlib.pyplot as plt # TEMP\n\nclass LocalAmplitudeComputer:\n \"\"\"\n This class is a utility for computing the smoothed-over-time local amplitude\n of a signal, to be used in class Normalizer to compute a normalized form of\n the signal.\n \"\"\"\n def __init__(self,\n gaussian_stddev = 100.0,\n epsilon = 1.0e-05,\n block_size = 8,\n double_precision = False):\n \"\"\"\n Constructor.\n Args:\n gaussian_stddev (float): This can be interpreted as a time constant measured\n in samples; for instance, if the sampling rate of the signal\n we are normalizing is 1kHz, gaussian_stddev = 1000 would mean\n we're smoothing with approximately 1 second of data on each\n side.\n epsilon (float): A constant that is used to smooth the instantaneous\n amplitude. Dimensionally this is an amplitude.\n block_size A number which should be substantially less than\n gaussian_stddev. We first sum the data over blocks and then\n do convolutions, efficiency. Any number >= 1 is OK but\n numbers approaching gaussian_stddev may start to affect\n the output\n double_precision If true, create these filters in double precision\n (float64), will require input to be double too.\n \"\"\"\n if block_size < 1 or block_size >= gaussian_stddev / 2:\n raise ValueError(\"Invalid values block-size={}, gaussian-stddev={}\".format(\n block_size, gaussian_stddev))\n\n # reduced_stddev is the stddev after summing over blocks of samples\n # (which reduces the sampling rate by that factor).\n reduced_stddev = gaussian_stddev / block_size\n (f, i) = filters.gaussian_filter(reduced_stddev)\n # We'll be summing, not averaging over blocks, so we need\n # to correct for that factor.\n f *= (1.0 / block_size)\n\n self.epsilon = epsilon\n\n self.dtype = torch.float64 if double_precision else torch.float32\n\n self.gaussian_filter = torch_filter.SymmetricFirFilter(\n (f,i), double_precision = double_precision)\n\n\n self.block_size = block_size\n assert block_size > 1\n\n def compute(self,\n input):\n \"\"\"\n Computes and returns the local energy which is a smoothed version of the\n instantaneous amplitude.\n\n Args:\n input: a torch.Tensor with dimension\n (minibatch_size, 2, num_channels, signal_length)\n representing the (real, imaginary) parts of `num_channels`\n parallel frequency channels. dtype should be\n torch.float32 if constructor had double_precision==False,\n else torch.float36.\n Returns:\n Returns a torch.Tensor with dimension (minibatch_size, num_channels,\n signal_length) containing the smoothed local amplitude.\n \"\"\"\n if not isinstance(input, torch.Tensor) or input.dtype != self.dtype:\n raise TypeError(\"Expected input to be of type torch.Tensor with dtype=\".format(\n self.dtype))\n if len(input.shape) != 4 or input.shape[1] != 2:\n raise ValueError(\"Expected input to have 4 axes with the 2nd dim == 2, got {}\".format(\n input.shape))\n (minibatch_size, two, num_channels, signal_length) = input.shape\n\n\n # We really want shape (minibatch_size, num_channels, signal_length) for\n # instantaneous_amplitude, but we want another array of size (signal_length)\n # containing all ones, for purposes of normalization after applying the\n # Gaussian smoothing (to correct for end effects)..\n amplitudes = torch.empty(\n (minibatch_size * num_channels + 1), signal_length,\n dtype=self.dtype)\n\n # set the last row to all ones.\n amplitudes[minibatch_size*num_channels:,:] = 1\n\n instantaneous_amplitude = amplitudes[0:minibatch_size*num_channels,:].view(\n minibatch_size, num_channels, signal_length)\n instantaneous_amplitude.fill_(self.epsilon*self.epsilon) # set to epsilon...\n instantaneous_amplitude += input[:,0,:,:] ** 2\n instantaneous_amplitude += input[:,1,:,:] ** 2\n instantaneous_amplitude.sqrt_()\n\n\n # summed_amplitudes has num-cols reduced by about self.block_size,\n # which will make convolution with a Gaussian easier.\n summed_amplitudes = self._block_sum(amplitudes)\n\n\n smoothed_amplitudes = self.gaussian_filter.apply(summed_amplitudes)\n assert smoothed_amplitudes.shape == summed_amplitudes.shape\n\n # num_zeros = 4 is a lower-than-normal width for the FIR filter since there\n # won't be frequencies near the Nyquist and we don't need a sharp cutoff.\n # filter_cutoff_ratio = 9 is to avoid aliasing effects with this less-precise\n # filter (default is 0.95).\n self.resampler = resampler.Resampler(1, self.block_size, dtype = self.dtype, num_zeros = 4,\n cutoff_ratio = 0.9)\n upsampled_amplitudes = self.resampler.resample(smoothed_amplitudes)\n assert upsampled_amplitudes.shape[1] >= signal_length\n\n\n\n # Truncate to actual signal length (we may have a few extra samples at\n # the end.) Remove the first self.block_size samples to avoid small\n # phase changes, not that it would really matter since the block\n # size will be << the gaussian stddev.\n upsampled_amplitudes = upsampled_amplitudes[:,:signal_length]\n\n n = minibatch_size*num_channels\n # The following corrects for constant factors, including a\n # 1/b factor that we missed when summing over blocks, and also for\n # edge effects so that we can interpret the Gaussian convolution as\n # an appropriately weighted average near the edges of the signal.\n # We took a signal of all-ones and put it through this process\n # as the last row of an n+1-row matrix, and we're using that\n # to normalize.\n # The shapes of the expressions below are, respectively:\n # (minibatch_size*num_channels, signal_length) and (1, signal_length)\n upsampled_amplitudes[0:n,:] /= upsampled_amplitudes[n:,:]\n\n\n # the `contiguous()` below would not be necessary if PyTorch had been\n # more carefully implemented, since the shapes here are quite compatible\n # with zero-copy. (Possibly it's not necessary even now, not 100%\n # sure.)\n return upsampled_amplitudes[0:n,:].contiguous().view(minibatch_size, num_channels,\n signal_length)\n\n def _block_sum(self, amplitudes):\n \"\"\"\n This internal function sums the input amplitudes over blocks\n (we do this before the Gaussian filtering to save compute).\n\n Args:\n amplitudes: a torch.Tensor with shape (n, s) with s being the\n signal length and n being some combination of minibatch\n and channel; dtype self.dtype\n Returns:\n returns a torch.Tensor with shape (n, t) where t = (s+2b-1)//b, where\n b is the block_size passed to the constructor. Note that this means\n we are padding with two extra outputs, one zero-valued block at the\n start and also a partial block sum at the end. This is necessary to\n ensure we have enough samples when we upsample the Gaussian-smoothed\n version of this. It also means we get the amplitude sum for time t\n from a Gaussian centered at about t - block_size/2; this is harmless.\n \"\"\"\n amplitudes = amplitudes.contiguous()\n b = self.block_size\n (n, s) = amplitudes.shape\n t = (s + 2 * b - 1) // b\n\n ans = torch.zeros((n, t), dtype=self.dtype)\n\n # make sure `amplitudes` is contiguous.\n\n # t_end will be t-1 if there is a partial block, otherwise t.\n t_whole = s // b # the number of whole sums\n t_end = t_whole + 1\n s_whole = (s // b) * b\n\n # Sum over the b elements of each block.\n ans[:,1:t_end] += amplitudes[:,:s_whole].view(n, t_whole, b).sum(dim=-1)\n if t_end != t:\n # sum over the left-over columns, i.e. sum over k things where k ==\n # s % b\n ans[:,t_end] += amplitudes[:,s_whole:].sum(dim=-1)\n return ans\n\n\n\n\n\n" ]
[ [ "torch.zeros", "torch.empty" ] ]
muzishen/Huawei_Digix_Retrieval_Top4
[ "39151e2f8493221138404e2942afbf03e3afbf08" ]
[ "code/model/backbones/se_resnet_ibn_a.py" ]
[ "from .se_module import SELayer\nimport torch.nn as nn\nimport torch\nimport math\n\n__all__ = ['se_resnet50_ibn_a', 'se_resnet101_ibn_a', 'se_resnet152_ibn_a']\n\ndef conv3x3(in_planes, out_planes, stride=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)\n\n\nclass IBN(nn.Module):\n def __init__(self, planes):\n super(IBN, self).__init__()\n half1 = int(planes/2)\n self.half = half1\n half2 = planes - half1\n self.IN = nn.InstanceNorm2d(half1, affine=True)\n self.BN = nn.BatchNorm2d(half2)\n \n def forward(self, x):\n split = torch.split(x, self.half, 1)\n out1 = self.IN(split[0].contiguous())\n out2 = self.BN(split[1].contiguous())\n out = torch.cat((out1, out2), 1)\n return out\n\n\nclass SEBasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None, reduction=16):\n super(SEBasicBlock, self).__init__()\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = nn.BatchNorm2d(planes)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(planes, planes, 1)\n self.bn2 = nn.BatchNorm2d(planes)\n self.se = SELayer(planes, reduction)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.se(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass SEBottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, downsample=None, ibn=False, reduction=16):\n super(SEBottleneck, self).__init__()\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\n if ibn:\n self.bn1 = IBN(planes)\n else:\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm2d(planes * 4)\n self.relu = nn.ReLU(inplace=True)\n self.se = SELayer(planes * 4, reduction)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n out = self.se(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\nclass ResNet(nn.Module):\n\n def __init__(self, last_stride,block, layers, frozen_stages=-1, num_classes=1000):\n self.inplanes = 64\n super(ResNet, self).__init__()\n self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,\n bias=False)\n self.bn1 = nn.BatchNorm2d(64)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n self.frozen_stages = frozen_stages\n self.layer1 = self._make_layer(block, 64, layers[0]) \n self.layer2 = self._make_layer(block, 128, layers[1], stride=2)\n self.layer3 = self._make_layer(block, 256, layers[2], stride=2)\n self.layer4 = self._make_layer(block, 512, layers[3], stride=last_stride)\n #self.avgpool = nn.AvgPool2d(7)\n #self.fc = nn.Linear(512 * block.expansion, num_classes)\n \n self.conv1.weight.data.normal_(0, math.sqrt(2. / (7 * 7 * 64)))\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.InstanceNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n def _make_layer(self, block, planes, blocks, stride=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(self.inplanes, planes * block.expansion,\n kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(planes * block.expansion),\n )\n\n layers = []\n ibn = True\n if planes == 512:\n ibn = False\n layers.append(block(self.inplanes, planes, stride, downsample, ibn=ibn))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes, 1, None, ibn=ibn))\n\n return nn.Sequential(*layers)\n\n def _freeze_stages(self):\n if self.frozen_stages >= 0:\n self.bn1.eval()\n for m in [self.conv1, self.bn1]:\n for param in m.parameters():\n param.requires_grad = False\n\n for i in range(1, self.frozen_stages + 1):\n m = getattr(self, 'layer{}'.format(i))\n print('layer{}'.format(i))\n m.eval()\n for param in m.parameters():\n param.requires_grad = False\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n #x = self.avgpool(x)\n #x = x.view(x.size(0), -1)\n #x = self.fc(x)\n\n return x\n\n\n def load_param(self, model_path):\n param_dict = torch.load(model_path)\n if 'state_dict' in param_dict:\n param_dict = param_dict['state_dict']\n for i in param_dict:\n if 'fc' in i:\n continue\n self.state_dict()[i.replace('module.','')].copy_(param_dict[i])\n\ndef se_resnet50_ibn_a(last_stride,num_classes=1000,**kwargs):\n \"\"\"Constructs a ResNet-50 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(last_stride,SEBottleneck, [3, 4, 6, 3],**kwargs)\n model.avgpool = nn.AdaptiveAvgPool2d(1)\n return model\n\n\ndef se_resnet101_ibn_a(last_stride, **kwargs):\n \"\"\"Constructs a ResNet-101 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(last_stride,SEBottleneck, [3, 4, 23, 3],**kwargs)\n #model.avgpool = nn.AdaptiveAvgPool2d(1)\n return model\n\n\ndef se_resnet152_ibn_a(last_stride):\n \"\"\"Constructs a ResNet-152 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(last_stride,SEBottleneck, [3, 8, 36, 3],**kwargs)\n model.avgpool = nn.AdaptiveAvgPool2d(1)\n return model\n" ]
[ [ "torch.cat", "torch.nn.MaxPool2d", "torch.nn.Sequential", "torch.nn.BatchNorm2d", "torch.split", "torch.nn.ReLU", "torch.nn.Conv2d", "torch.nn.InstanceNorm2d", "torch.load", "torch.nn.AdaptiveAvgPool2d" ] ]
SalihTuncer/CoronaSimulation
[ "ee18d5735bc0f583459fc1ee15bbf912348abf18" ]
[ "Simulation.py" ]
[ "# package imports\n# external libraries which need to be installed separately\nimport numpy as np\n# internal python libraries\nfrom pprint import pformat\n\nfrom InfectionChain import InfectionChain\n\n\"\"\"\nThis is the heart of the whole application. This is where the simulation happens.\n\"\"\"\n\n\nclass Simulation:\n \"\"\"\n Contact per person will be calculated depending of the extended circle of friends.\n\n Args:\n _config: includes all the configurations of the simulation.\n \"\"\"\n\n def __init__(self, _config: {str: str}):\n\n self._config = _config\n\n # we document our virus chain\n self._infection_history = []\n # we document the infection counts\n self._viruses = []\n\n # we assume that a person meets every friend two times per year\n # a person meets another person after 'contact_per_person'-days\n self.contact_per_person = 1.75 * round(365 / (2 * self._config['extended_circle_of_friends']))\n # representation of simulation will be printed in the terminal so it looks pretty sick\n print(self)\n\n \"\"\"\n String representation of the object.\n \n If we override the __repr__-method, we can decide what will be printed.\n \n Returns:\n str. the configuration\n \"\"\"\n\n def __repr__(self) -> str:\n return '\\nConfiguration of the simulation:\\n' + pformat(vars(self), indent=2, width=1) + '\\n'\n\n \"\"\"\n Simulates every day until the time is over.\n \n Every day needs to be simulated seperately for every infection chain. So we iterate over every day and every\n chain and let a day pass with virus.day_ends(). When we create a new infection chain, we put that one into our \n numpy array which is used like a database. At the end we return the infection history and all viruses as numpy\n arrays which can then be further processed.\n \n Returns:\n np.ndarray: infection history.\n np.ndarray: all virus chains. \n \"\"\"\n\n def simulate(self) -> (np.ndarray, np.ndarray):\n\n # we add the time of the duration of the infection so we let the virus die at the rest of the time\n for day in range(0, int(self._config['simulation_duration'])):\n # initiate virus\n if day % self.contact_per_person == 0:\n\n if day == 0:\n self._viruses.append(\n InfectionChain(int(day // self.contact_per_person), self._config,\n int(self._config['infection_count_start'])))\n else:\n self._viruses.append(\n InfectionChain(int(day // self.contact_per_person), self._config,\n self._viruses[-1].infection_count))\n\n for idx, virus in enumerate(self._viruses):\n virus.day_ends()\n self._infection_history.append(self.get_total_active_infections_count())\n\n return self.spread_infection_history(), np.array(self._viruses)\n\n \"\"\"\n Indicates how many people are infected in total.\n \n Returns:\n int. amount infections in total.\n \"\"\"\n\n def get_total_active_infections_count(self) -> int:\n return sum([virus.infection_count for virus in self._viruses if virus.lifetime > 0])\n\n \"\"\"\n Indicates how many people are infected right now.\n \n Returns:\n int. amount active infections.\n \"\"\"\n\n def get_total_infections_count(self) -> int:\n return sum([virus.infection_count for virus in self._viruses])\n\n \"\"\"\n Indicates the the incidence-value depending on population and active infections.\n \n Returns:\n np.ndarray. incidence values\n \"\"\"\n\n def get_incidence_values(self) -> np.ndarray:\n return np.array([(virus.infection_count * 100000) / self._config['population'] for virus in self._viruses])\n\n \"\"\"\n Indicates how many people have passed away through the virus.\n \n Returns:\n int. mortality.\n \"\"\"\n\n def get_mortality(self) -> int:\n return int(sum([virus.infection_count for virus in self._viruses]) * self._config['death_rate'])\n\n \"\"\"\n Indicates the 7-day incidence-value. \n \n Returns:\n int. incidence value.\n \"\"\"\n\n def get_seven_day_incidence(self) -> int:\n return (self._viruses[-1].infection_count * 100000) / self._config['population']\n\n \"\"\"\n Indicates the amount of infections in the last seven days.\n \n Returns:\n int. amount of total infections.\n \"\"\"\n\n def get_seven_days_total_infections_count(self) -> int:\n return self._viruses[-1].infection_count\n\n \"\"\"\n Distributes the infection numbers realistically over the days so as not to show a linear development.\n \n Returns:\n np.ndarray. distribution. \n \"\"\"\n\n def spread_infection_history(self) -> np.ndarray:\n\n spread = np.zeros(len(self._infection_history) * 7)\n\n for i, infection_count in enumerate(self._infection_history):\n spread[i * 7:(i + 1) * 7] = infection_count / 7\n\n return spread\n" ]
[ [ "numpy.array" ] ]
THGLab/NewtonNet
[ "fcf2af848a1c998bd08096dcefb58a5610eda03c" ]
[ "newtonnet/data/parse_raw.py" ]
[ "import os\nimport numpy as np\nimport warnings\nfrom collections import defaultdict\nfrom numpy.lib.function_base import append\nfrom sklearn.utils import random\nfrom sklearn.utils.random import sample_without_replacement\nfrom sklearn.model_selection import train_test_split\n\nfrom combust.utils.utility import standardize_batch\nfrom combust.utils import DataManager, parse_irc_data\nfrom combust.data import ExtensiveEnvironment, PeriodicEnvironment\nfrom combust.data import extensive_train_loader, extensive_loader_rotwise\n\nfrom ase.io import iread\nimport math\nimport pickle\n\ndef concat_listofdicts(listofdicts, axis=0):\n \"\"\"\n\n Parameters\n ----------\n listofdicts: list\n values must be 2d arrays\n axis: int\n\n Returns\n -------\n dict\n\n \"\"\"\n data = dict()\n for k in listofdicts[0].keys():\n data[k] = np.concatenate([d[k] for d in listofdicts], axis=axis)\n\n return data\n\n\ndef split(data, train_size, test_size, val_size, random_states=90, stratify=None):\n \"\"\"\n\n Parameters\n ----------\n data: dict\n train_size: int\n test_size\n val_size\n random_states\n stratify: None or labels\n\n Returns\n -------\n dict: train data\n dict: val data\n dict: test data\n\n \"\"\"\n\n tr_ind, val_ind = train_test_split(list(range(data['R'].shape[0])),\n test_size=val_size,\n random_state=random_states,\n stratify=stratify)\n\n if stratify is not None:\n stratify_new = stratify[tr_ind]\n else:\n stratify_new = None\n\n tr_ind, te_ind = train_test_split(tr_ind,\n test_size=test_size,\n train_size=train_size,\n random_state=random_states,\n stratify=stratify_new)\n\n train = dict()\n val = dict()\n test = dict()\n for key in data:\n train[key] = data[key][tr_ind]\n val[key] = data[key][val_ind]\n test[key] = data[key][te_ind]\n\n if stratify is not None:\n train['L'] = stratify[tr_ind]\n val['L'] = stratify[val_ind]\n test['L'] = stratify[te_ind]\n\n return train, val, test\n\n\ndef h2_reaction(reaction_number, settings, all_sets):\n \"\"\"\n\n Parameters\n ----------\n reaction_number: int\n\n settings: dict\n dict of yaml file\n\n all_sets: dict\n\n Returns\n -------\n\n \"\"\"\n\n dir_path = settings['data']['root']\n\n # file name prefix\n if reaction_number < 10:\n pre = '0%i'%reaction_number\n elif reaction_number >= 10:\n pre = '%i'%reaction_number\n # elif reaction_number == 6:\n # pre = ['0%ia_irc.npz' % reaction_number, '0%ib_irc.npz' % reaction_number]\n # elif reaction_number == 12:\n # pre = ['%ia_irc.npz' % reaction_number, '%ib_irc.npz' % reaction_number]\n\n # read npz files\n aimd = nm = irc = None\n aimd_path = os.path.join(dir_path, '%s_aimd.npz'%pre)\n if os.path.exists(aimd_path):\n aimd = dict(np.load(aimd_path))\n nm_path = os.path.join(dir_path, '%s_nm.npz'%pre)\n if os.path.exists(nm_path):\n nm = dict(np.load(nm_path))\n irc_path = os.path.join(dir_path, '%s_irc.npz'%pre)\n if os.path.exists(irc_path):\n irc = dict(np.load(irc_path))\n\n # merge aimd and normal mode data\n if settings['data']['normal_mode'] and nm is not None:\n data = dict()\n n_nm = min(settings['data']['size_nmode_max'], nm['R'].shape[0])\n nm_select = sample_without_replacement(nm['R'].shape[0],\n n_nm,\n random_state=settings['data']['random_states'])\n if aimd is not None:\n for k in aimd.keys():\n data[k] = np.concatenate([aimd[k], nm[k][nm_select]], axis=0)\n\n assert data['R'].shape[0] == (aimd['R'].shape[0]+n_nm)\n else:\n data = None\n warnings.warn('both AIMD and normal mode data for reaction# %i are missing.'%reaction_number)\n\n elif aimd is not None:\n data = aimd\n\n else:\n data = None\n warnings.warn('both AIMD and normal mode data for reaction# %i are missing.'%reaction_number)\n\n if settings['data']['cgem']:\n assert data['E'].shape == data['CE'].shape\n assert data['F'].shape == data['CF'].shape\n data['E'] = data['E'] - data['CE']\n data['F'] = data['F'] - data['CF']\n irc['E'] = irc['E'] - irc['CE']\n irc['F'] = irc['F'] - irc['CF']\n\n train_size = settings['data']['trsize_perrxn_max']\n if train_size == -1:\n train_size = None # to select all remaining data in each reaction\n\n if data is not None:\n dtrain, dval, dtest = split(data,\n train_size=train_size,\n test_size=settings['data']['test_size'],\n val_size=settings['data']['val_size'],\n random_states=settings['data']['random_states'],\n stratify=None)\n else:\n dtrain, dval, dtest = None, None, None\n\n # compile data sets\n all_sets['train'].append(dtrain)\n all_sets['val'].append(dval)\n all_sets['test'].append(dtest)\n all_sets['irc'].append(irc)\n\n return all_sets\n\n\ndef parse_h2_reaction(settings, device):\n \"\"\"\n\n Parameters\n ----------\n settings: instance of yaml file\n device: torch devices\n\n Returns\n -------\n generator: train, val, irc, test generators, respectively\n int: n_steps for train, val, irc, test, respectively\n tuple: tuple of mean and standard deviation of energies in the training data\n\n \"\"\"\n\n # list of reaction number(s)\n reaction_number = settings['data']['reaction']\n\n if isinstance(reaction_number, int):\n reaction_number = [reaction_number]\n\n # compile dictionary of train, test, val, and irc data\n all_sets = defaultdict(list)\n for rxn_n in reaction_number:\n all_sets = h2_reaction(rxn_n, settings, all_sets)\n\n dtrain = concat_listofdicts(all_sets['train'], axis=0)\n dval = concat_listofdicts(all_sets['val'], axis=0)\n dtest = concat_listofdicts(all_sets['test'], axis=0)\n irc = concat_listofdicts(all_sets['irc'], axis=0)\n\n # final down-sampling of training data\n n_train = settings['data']['train_size']\n if n_train == -1:\n n_train = dtrain['R'].shape[0]\n\n n_train = min(n_train, dtrain['R'].shape[0])\n n_select = sample_without_replacement(dtrain['R'].shape[0],\n n_train,\n random_state=settings['data']['random_states'])\n for k in dtrain.keys():\n dtrain[k] = dtrain[k][n_select]\n\n normalizer = (dtrain['E'].mean(), dtrain['E'].std())\n\n n_tr_data = dtrain['R'].shape[0]\n n_val_data = dval['R'].shape[0]\n n_irc_data = irc['R'].shape[0]\n n_test_data = dtest['R'].shape[0]\n print(\"# data (train,val,test,irc): %i, %i, %i, %i\"%(n_tr_data,n_val_data,n_test_data,n_irc_data))\n\n tr_batch_size = settings['training']['tr_batch_size']\n val_batch_size = settings['training']['val_batch_size']\n tr_rotations = settings['training']['tr_rotations']\n val_rotations = settings['training']['val_rotations']\n\n # freeze rotatios\n # Todo: it seems that we don't need separated tr and val anymore\n # Todo: consider keep_original scenario in the code\n # if settings['training']['tr_frz_rot']:\n # if settings['training']['saved_angle_path']:\n # tr_fra_rot = list(np.load(settings['training']['saved_angle_path']))[:tr_rotations+1]\n # tr_frz_rot = (np.random.uniform(-np.pi, np.pi, size=3)\n # for _ in range(tr_rotations+1))\n # val_frz_rot = tr_frz_rot\n # else:\n # tr_frz_rot = settings['training']['tr_frz_rot']\n # val_frz_rot = settings['training']['val_frz_rot']\n\n # generators\n project = settings['general']['driver']\n if project not in ['voxel_cart_rotwise.py']:\n # steps\n tr_steps = int(np.ceil(n_tr_data / tr_batch_size)) * (tr_rotations + 1)\n val_steps = int(np.ceil(n_val_data / val_batch_size)) * (val_rotations + 1)\n irc_steps = int(np.ceil(n_irc_data / val_batch_size)) * (val_rotations + 1)\n test_steps= int(np.ceil(n_test_data / val_batch_size)) * (val_rotations + 1)\n\n env = ExtensiveEnvironment()\n\n train_gen = extensive_train_loader(data=dtrain,\n env_provider=env,\n batch_size=tr_batch_size,\n n_rotations=tr_rotations,\n freeze_rotations=settings['training']['tr_frz_rot'],\n keep_original=settings['training']['tr_keep_original'],\n device=device,\n shuffle=settings['training']['shuffle'],\n drop_last=settings['training']['drop_last'])\n\n val_gen = extensive_train_loader(data=dval,\n env_provider=env,\n batch_size=val_batch_size,\n n_rotations=val_rotations,\n freeze_rotations=settings['training']['val_frz_rot'],\n keep_original=settings['training']['val_keep_original'],\n device=device,\n shuffle=settings['training']['shuffle'],\n drop_last=settings['training']['drop_last'])\n\n irc_gen = extensive_train_loader(data=irc,\n env_provider=env,\n batch_size=val_batch_size,\n n_rotations=val_rotations,\n freeze_rotations=settings['training']['val_frz_rot'],\n keep_original=settings['training']['val_keep_original'],\n device=device,\n shuffle=False,\n drop_last=False)\n\n test_gen = extensive_train_loader(data=dtest,\n env_provider=env,\n batch_size=val_batch_size,\n n_rotations=val_rotations,\n freeze_rotations=settings['training']['val_frz_rot'],\n keep_original=settings['training']['val_keep_original'],\n device=device,\n shuffle=False,\n drop_last=False)\n\n return train_gen, val_gen, irc_gen, test_gen, tr_steps, val_steps, irc_steps, test_steps, normalizer\n\n else:\n\n tr_steps = int(np.ceil(n_tr_data / tr_batch_size))\n val_steps = int(np.ceil(n_val_data / val_batch_size))\n irc_steps = int(np.ceil(n_irc_data / val_batch_size))\n test_steps = int(np.ceil(n_test_data / val_batch_size))\n\n env = ExtensiveEnvironment()\n\n train_gen = extensive_loader_rotwise(data=dtrain,\n env_provider=env,\n batch_size=tr_batch_size,\n n_rotations=tr_rotations,\n freeze_rotations=settings['training']['tr_frz_rot'],\n keep_original=settings['training']['tr_keep_original'],\n device=device,\n shuffle=settings['training']['shuffle'],\n drop_last=settings['training']['drop_last'])\n\n val_gen = extensive_loader_rotwise(data=dval,\n env_provider=env,\n batch_size=val_batch_size,\n n_rotations=val_rotations,\n freeze_rotations=settings['training']['val_frz_rot'],\n keep_original=settings['training']['val_keep_original'],\n device=device,\n shuffle=settings['training']['shuffle'],\n drop_last=settings['training']['drop_last'])\n\n irc_gen = extensive_loader_rotwise(data=irc,\n env_provider=env,\n batch_size=val_batch_size,\n n_rotations=val_rotations,\n freeze_rotations=settings['training']['val_frz_rot'],\n keep_original=settings['training']['val_keep_original'],\n device=device,\n shuffle=False,\n drop_last=False)\n\n test_gen = extensive_loader_rotwise(data=dtest,\n env_provider=env,\n batch_size=val_batch_size,\n n_rotations=val_rotations,\n freeze_rotations=settings['training']['val_frz_rot'],\n keep_original=settings['training']['val_keep_original'],\n device=device,\n shuffle=False,\n drop_last=False)\n\n return train_gen, val_gen, irc_gen, test_gen, tr_steps, val_steps, irc_steps, test_steps, normalizer\n\n\ndef parse_nmr_data(settings, device, test_only=False):\n '''\n parse and load NMR data from tripeptides/3d fragmentations of SPARTA+/SHIFTX2 dataset with NMR chemical shifts calculated with HF/6-31g* or experimental chemical shifts\n\n Parameters\n ----------\n settings: instance of yaml file\n device: list\n list of torch devices\n test_only: boolean indicator for whether only read test data\n\n generator: train, val, test generators, respectively\n int: n_steps for train, val, test, respectively\n tuple: tuple of mean and standard deviation of energies in the training data\n '''\n # meta data\n root_folder = settings['data']['root']\n\n test_proportion = settings['data']['test_proportion']\n val_proportion = settings['data']['val_proportion']\n shift_types = settings['data']['shift_types']\n\n dtrain = {'R':[], 'Z':[], 'N':[], 'CS':[], \"M\": []}\n dtest = {'R':[], 'Z':[], 'N':[], 'CS':[], \"M\": [], \"labels\": []}\n\n pbc = settings[\"data\"].get(\"pbc\", False)\n if pbc:\n dtrain[\"lattice\"] = []\n dtest[\"lattice\"] = []\n\n if test_only:\n test_path = settings['data']['test']\n test_proteins = os.listdir(test_path)\n else:\n if settings['data']['test']:\n test_path = settings['data']['test']\n train_proteins = os.listdir(root_folder)\n test_proteins = os.listdir(test_path)\n else:\n test_path = root_folder\n train_proteins, test_proteins = train_test_split(os.listdir(root_folder),\n test_size=test_proportion, \n random_state=settings['data']['random_states'])\n # Load train data\n for protein in train_proteins:\n with open(os.path.join(root_folder, protein), \"rb\") as f:\n data = pickle.load(f)\n for residue in data:\n if residue['R'].shape[1] > settings['data']['max_natom_cutoff']:\n continue\n dtrain['R'].append(residue['R'])\n dtrain['Z'].append(residue['Z'])\n dtrain['N'].append([len(residue['R'])])\n shift_type_filter = np.array([item in shift_types for item in residue['Z']])\n dtrain['CS'].append(np.nan_to_num(residue['CS']))\n dtrain['M'].append(residue['M'] & shift_type_filter)\n if pbc:\n dtrain['lattice'].append(residue['lattice'])\n\n\n # Load test data\n for protein in test_proteins:\n with open(os.path.join(test_path, protein), \"rb\") as f:\n data = pickle.load(f)\n for residue in data:\n if residue['R'].shape[0] > settings['data']['max_natom_cutoff']:\n continue\n dtest['R'].append(residue['R'])\n dtest['Z'].append(residue['Z'])\n dtest['N'].append([len(residue['R'])])\n shift_type_filter = np.array([item in shift_types for item in residue['Z']])\n dtest['CS'].append(np.nan_to_num(residue['CS']))\n dtest['M'].append(residue['M'] & shift_type_filter)\n dtest[\"labels\"].append(residue[\"meta\"])\n if pbc:\n dtest['lattice'].append(residue['lattice'])\n\n if test_only:\n n_tr_data = n_val_data = 0\n n_test_data = len(dtest['R'])\n for k in dtest:\n dtest[k] = np.array(dtest[k])\n print(\"test data size: %d\"%n_test_data)\n else:\n # Further split train into train/validation\n train_val_indices = list(range(len(dtrain['R'])))\n train_idx, val_idx = train_test_split(train_val_indices, test_size=val_proportion, random_state=settings['data']['random_states'])\n data = dtrain\n dtrain = {}\n dval = {}\n for k in data:\n dtrain[k] = np.array(data[k])[train_idx]\n dval[k] = np.array(data[k])[val_idx]\n dtest[k] = np.array(dtest[k])\n\n n_tr_data = len(dtrain['R'])\n n_val_data = len(dval['R'])\n n_test_data = len(dtest['R'])\n print(\"data size: (train,val,test): %i, %i, %i\"%(n_tr_data,n_val_data,n_test_data))\n\n # extract mean and standard deviation of chemical shifts for different atom types\n normalizers = {}\n # normalized_CS = dtrain['CS'].copy()\n for z in shift_types:\n atom_cs = []\n for i in range(n_tr_data):\n atom_cs.extend(dtrain['CS'][i][dtrain['Z'][i] == z])\n normalizers[z] = (np.nanmean(atom_cs), np.nanstd(atom_cs))\n # for i in range(n_tr_data):\n # # normalize data for a balanced multi-target training\n # normalized_CS[i][dtrain['Z'][i] == z] -= normalizers[z][0]\n # normalized_CS[i][dtrain['Z'][i] == z] /= normalizers[z][1]\n # dtrain['CS_normalized'] = normalized_CS\n\n atomic_cs_scalers = []\n for i in range(n_tr_data):\n scaler = np.ones_like(dtrain['Z'][i], dtype=float)\n for z in shift_types:\n scaler[dtrain['Z'][i] == z] = normalizers[z][1]\n atomic_cs_scalers.append(scaler)\n dtrain['cs_scaler'] = np.array(atomic_cs_scalers)\n \n tr_batch_size = settings['training']['tr_batch_size']\n val_batch_size = settings['training']['val_batch_size']\n tr_rotations = settings['training']['tr_rotations']\n val_rotations = settings['training']['val_rotations']\n\n # generators\n me = settings['general']['driver']\n\n # steps\n tr_steps = int(np.ceil(n_tr_data / tr_batch_size)) * (tr_rotations + 1)\n val_steps = int(np.ceil(n_val_data / val_batch_size)) * (val_rotations + 1)\n test_steps= int(np.ceil(n_test_data / val_batch_size)) * (val_rotations + 1)\n\n if settings['data']['pbc']:\n env = PeriodicEnvironment(cutoff=settings['data']['cutoff'] * 1.1)\n else:\n env = ExtensiveEnvironment()\n\n test_gen = extensive_train_loader(data=dtest,\n env_provider=env,\n batch_size=val_batch_size,\n n_rotations=val_rotations,\n freeze_rotations=settings['training']['val_frz_rot'],\n keep_original=settings['training']['val_keep_original'],\n device=device,\n shuffle=False,\n drop_last=False)\n\n if test_only:\n return test_gen, test_steps\n else:\n train_gen = extensive_train_loader(data=dtrain,\n env_provider=env,\n batch_size=tr_batch_size,\n n_rotations=tr_rotations,\n freeze_rotations=settings['training']['tr_frz_rot'],\n keep_original=settings['training']['tr_keep_original'],\n device=device,\n shuffle=settings['training']['shuffle'],\n drop_last=settings['training']['drop_last'])\n\n val_gen = extensive_train_loader(data=dval,\n env_provider=env,\n batch_size=val_batch_size,\n n_rotations=val_rotations,\n freeze_rotations=settings['training']['val_frz_rot'],\n keep_original=settings['training']['val_keep_original'],\n device=device,\n shuffle=settings['training']['shuffle'],\n drop_last=settings['training']['drop_last'])\n\n\n\n\n return train_gen, val_gen, test_gen, tr_steps, val_steps, test_steps, normalizers\n\ndef parse_ani_data(settings, device):\n \"\"\"\n parse and load the ANI-1 dataset with splitting of train, validation and test\n dataset is expected to be in original .h5 format\n (https://figshare.com/collections/_/3846712)\n energy units: Hartree (convert_unit=False), or kcal/mol (convert_unit=True)\n\n Parameters\n ----------\n settings: instance of yaml file\n device: list\n list of torch devices\n\n Returns\n -------\n generator: train, val, test generators, respectively\n int: n_steps for train, val, test, respectively\n tuple: tuple of mean and standard deviation of energies in the training data\n\n \"\"\"\n from .pyanitools import anidataloader\n atomic_Z_map = {'C': 6, 'H': 1, 'O': 8, 'N': 7}\n # atomic_self_energy = {'H': -0.60467592, 'C': -38.06846167, 'N': -54.70613008, 'O': -75.1796043 } # calculated from dataset\n atomic_self_energy = {'H': -0.500607632585, 'C': -37.8302333826, 'N': -54.5680045287, 'O': -75.0362229210 } # provided by ANI authors\n # meta data\n root = settings['data']['root']\n train_data = settings['data']['train']\n test_data = settings['data']['test'] # can be False\n\n # Handle train data and test data to make them lists\n if isinstance(train_data, int):\n train_data = [train_data]\n\n if test_data and isinstance(test_data, int):\n test_data = [test_data]\n \n test_size_per_molecule = settings['data']['test_size_per_molecule']\n\n dtrain = {'R':[], 'Z':[], 'N':[], 'E':[]}\n dtest = {'R':[], 'Z':[], 'N':[], 'E':[]}\n\n for train_data_num in train_data:\n ani_data = anidataloader(root + \"/ani_gdb_s0%d.h5\" % train_data_num)\n for molecule in ani_data:\n # prepare self energy of the current molecule for subtraction from total energy\n if settings['data']['subtract_self_energy']:\n self_energy = np.sum([atomic_self_energy[a] for a in molecule['species']])\n molecule['energies'] -= self_energy\n if settings['data'].get('convert_unit', True):\n # convert Hartree units to kCal/mol\n molecule['energies'] *= 627.2396\n n_conf, n_atoms, _ = molecule['coordinates'].shape\n conf_indices = np.arange(n_conf)\n # If no test data specified, for each molecule split conformations into train and test\n if test_data is False:\n train_idx, test_idx = train_test_split(conf_indices, \n test_size=math.ceil(test_size_per_molecule * n_conf), \n random_state=settings['data']['train_test_split_random_state'])\n else:\n train_idx = conf_indices\n n_conf_train = len(train_idx)\n dtrain['R'].extend(molecule['coordinates'][train_idx])\n dtrain['Z'].extend(np.tile([atomic_Z_map[a] for a in molecule['species']], (n_conf_train, 1)))\n dtrain['N'].extend([n_atoms] * n_conf_train)\n dtrain['E'].extend(molecule['energies'][train_idx])\n if test_data is False:\n n_conf_test = len(test_idx)\n dtest['R'].extend(molecule['coordinates'][test_idx])\n dtest['Z'].extend(np.tile([atomic_Z_map[a] for a in molecule['species']], (n_conf_test, 1)))\n dtest['N'].extend([n_atoms] * n_conf_test)\n dtest['E'].extend(molecule['energies'][test_idx])\n \n if test_data:\n for test_data_num in test_data:\n ani_data = anidataloader(root + \"/ani_gdb_s0%d.h5\" % test_data_num)\n for molecule in ani_data:\n # prepare self energy of the current molecule for subtraction from total energy\n if settings['data']['subtract_self_energy']:\n self_energy = np.sum([atomic_self_energy[a] for a in molecule['species']])\n molecule['energies'] -= self_energy\n if settings['data'].get('convert_unit', True):\n # convert Hartree units to kCal/mol\n molecule['energies'] *= 627.2396\n n_conf, n_atoms, _ = molecule['coordinates'].shape\n dtest['R'].extend(molecule['coordinates'])\n dtest['Z'].extend(np.tile([atomic_Z_map[a] for a in molecule['species']], (n_conf, 1)))\n dtest['N'].extend([n_atoms] * n_conf)\n dtest['E'].extend(molecule['energies'])\n\n # Pad irregular-shaped arrays to make all arrays regular in size\n # for k in ['R', 'Z', 'N', 'E']:\n # dtrain[k] = standardize_batch(dtrain[k])\n # dtest[k] = standardize_batch(dtest[k])\n\n further_split_indices = list(range(len(dtrain['R'])))\n train_proportion = settings['data']['train_size_proportion']\n val_proportion = settings['data']['val_size_proportion']\n total_proportion = train_proportion + val_proportion\n if total_proportion < 1:\n train_val_indices, unused_indices = train_test_split(further_split_indices, train_size=total_proportion, random_state=settings['data']['train_val_split_random_state'])\n else:\n train_val_indices = further_split_indices\n train_idx, val_idx = train_test_split(train_val_indices, test_size=(val_proportion / total_proportion), random_state=settings['data']['train_val_split_random_state'])\n data = dtrain\n dtrain = {}\n dval = {}\n for k in data:\n dtrain[k] = np.array(data[k])[train_idx]\n dval[k] = np.array(data[k])[val_idx]\n dtest[k] = np.array(dtest[k])\n\n # extract data stats\n normalizer = (dtrain['E'].mean(), dtrain['E'].std())\n\n n_tr_data = len(dtrain['R'])\n n_val_data = len(dval['R'])\n n_test_data = len(dtest['R'])\n print(\"data size: (train,val,test): %i, %i, %i\"%(n_tr_data,n_val_data,n_test_data))\n\n # HASH check for test energies to make sure test data is fixed\n import hashlib\n test_energy_hash = hashlib.sha1(dtest['E']).hexdigest()\n print(\"Test set energy HASH:\", test_energy_hash)\n\n tr_batch_size = settings['training']['tr_batch_size']\n val_batch_size = settings['training']['val_batch_size']\n tr_rotations = settings['training']['tr_rotations']\n val_rotations = settings['training']['val_rotations']\n\n # generators\n me = settings['general']['driver']\n\n # steps\n tr_steps = int(np.ceil(n_tr_data / tr_batch_size)) * (tr_rotations + 1)\n val_steps = int(np.ceil(n_val_data / val_batch_size)) * (val_rotations + 1)\n test_steps= int(np.ceil(n_test_data / val_batch_size)) * (val_rotations + 1)\n\n env = ExtensiveEnvironment()\n\n train_gen = extensive_train_loader(data=dtrain,\n env_provider=env,\n batch_size=tr_batch_size,\n n_rotations=tr_rotations,\n freeze_rotations=settings['training']['tr_frz_rot'],\n keep_original=settings['training']['tr_keep_original'],\n device=device,\n shuffle=settings['training']['shuffle'],\n drop_last=settings['training']['drop_last'])\n\n val_gen = extensive_train_loader(data=dval,\n env_provider=env,\n batch_size=val_batch_size,\n n_rotations=val_rotations,\n freeze_rotations=settings['training']['val_frz_rot'],\n keep_original=settings['training']['val_keep_original'],\n device=device,\n shuffle=settings['training']['shuffle'],\n drop_last=settings['training']['drop_last'])\n\n\n test_gen = extensive_train_loader(data=dtest,\n env_provider=env,\n batch_size=val_batch_size,\n n_rotations=val_rotations,\n freeze_rotations=settings['training']['val_frz_rot'],\n keep_original=settings['training']['val_keep_original'],\n device=device,\n shuffle=False,\n drop_last=False)\n\n return train_gen, val_gen, test_gen, tr_steps, val_steps, test_steps, n_tr_data, n_val_data, n_test_data, normalizer, test_energy_hash\n\ndef parse_train_test(settings, device, unit='kcal'):\n \"\"\"\n implementation based on train and validation size.\n we don't need the test_size in this implementaion.\n\n Parameters\n ----------\n settings: instance of yaml file\n device: torch.device\n list of torch devices\n\n Returns\n -------\n generator: train, val, test generators, respectively\n int: n_steps for train, val, test, respectively\n tuple: tuple of mean and standard deviation of energies in the training data\n\n \"\"\"\n # meta data\n train_path = settings['data']['train_path']\n test_path = settings['data']['test_path'] # can be False\n\n train_size = settings['data']['train_size']\n val_size = settings['data']['val_size']\n\n\n # read data\n data = np.load(train_path)\n test = None\n if test_path:\n test = dict(np.load(test_path))\n\n # take care of inconsistencies\n dtrain = dict()\n dtest = dict()\n\n for key in list(data.keys()):\n # copy Z embarrassingly Todo: make it data efficient by flexible environment module\n if key == 'z':\n dtrain['Z'] = np.tile(data['z'], (data['E'].shape[0], 1))\n if test is not None:\n dtest['Z'] = np.tile(test['z'], (test['E'].shape[0], 1))\n\n elif key == 'E':\n if data['E'].ndim == 1:\n dtrain['E'] = data['E'].reshape(-1,1)\n else:\n dtrain[key] = data[key]\n\n if test is not None:\n if test['E'].ndim == 1:\n dtest['E'] = test['E'].reshape(-1, 1)\n else:\n dtest[key] = test[key]\n\n elif key in ['R','F','Z']:\n dtrain[key] = data[key]\n if test is not None:\n dtest[key] = test[key]\n\n # convert unit\n if unit == 'ev':\n dtrain['E'] = dtrain['E'] * 23.061\n dtrain['F'] = dtrain['F'] * 23.061\n\n # split the data\n dtrain, dval, dtest_leftover = split(dtrain,\n train_size=train_size,\n test_size=None,\n val_size=val_size,\n random_states=settings['data']['random_states'])\n if test is None:\n test_size = settings['data'].get('test_size', -1)\n if test_size == -1:\n dtest = dtest_leftover\n else:\n test_size = min(test_size, dtest_leftover['R'].shape[0])\n n_select = sample_without_replacement(dtest_leftover['R'].shape[0],\n test_size,\n random_state=settings['data']['random_states'])\n for k in dtest_leftover.keys():\n dtest[k] = dtest_leftover[k][n_select]\n\n\n # extract data stats\n normalizer = (dtrain['E'].mean(), dtrain['E'].std())\n\n n_tr_data = dtrain['R'].shape[0]\n n_val_data = dval['R'].shape[0]\n n_test_data = dtest['R'].shape[0]\n print(\"data size: (train,val,test): %i, %i, %i\"%(n_tr_data,n_val_data,n_test_data))\n\n tr_batch_size = settings['training']['tr_batch_size']\n val_batch_size = settings['training']['val_batch_size']\n tr_rotations = settings['training']['tr_rotations']\n val_rotations = settings['training']['val_rotations']\n\n # generators\n me = settings['general']['driver']\n\n # steps\n tr_steps = int(np.ceil(n_tr_data / tr_batch_size)) * (tr_rotations + 1)\n val_steps = int(np.ceil(n_val_data / val_batch_size)) * (val_rotations + 1)\n test_steps= int(np.ceil(n_test_data / val_batch_size)) * (val_rotations + 1)\n\n env = ExtensiveEnvironment()\n\n train_gen = extensive_train_loader(data=dtrain,\n env_provider=env,\n batch_size=tr_batch_size,\n n_rotations=tr_rotations,\n freeze_rotations=settings['training']['tr_frz_rot'],\n keep_original=settings['training']['tr_keep_original'],\n device=device,\n shuffle=settings['training']['shuffle'],\n drop_last=settings['training']['drop_last'])\n\n val_gen = extensive_train_loader(data=dval,\n env_provider=env,\n batch_size=val_batch_size,\n n_rotations=val_rotations,\n freeze_rotations=settings['training']['val_frz_rot'],\n keep_original=settings['training']['val_keep_original'],\n device=device,\n shuffle=settings['training']['shuffle'],\n drop_last=settings['training']['drop_last'])\n\n test_gen = extensive_train_loader(data=dtest,\n env_provider=env,\n batch_size=val_batch_size,\n n_rotations=val_rotations,\n freeze_rotations=settings['training']['val_frz_rot'],\n keep_original=settings['training']['val_keep_original'],\n device=device,\n shuffle=False,\n drop_last=False)\n\n return train_gen, val_gen, test_gen, tr_steps, val_steps, test_steps, normalizer\n\ndef parse_methane_data(settings, device):\n \"\"\"\n parse and load methane combustion reaction by splitting all available data into train, validation and test\n\n data comes from https://figshare.com/articles/dataset/Dataset_for_methane_combustion/12973055\n\n reference paper: Zeng, J., Cao, L., Xu, M. et al. Complex reaction processes in combustion unraveled by neural network-based molecular dynamics simulation. Nat Commun 11, 5713 (2020). \n https://doi.org/10.1038/s41467-020-19497-z\n\n energy units: eV ?\n force units: eV/A ?\n\n Parameters\n ----------\n settings: instance of yaml file\n device: torch.device\n list of torch devices\n\n Returns\n -------\n generator: train, val, test generators, respectively\n int: n_steps for train, val, test, respectively\n tuple: tuple of mean and standard deviation of energies in the training data\n\n \"\"\"\n # meta data\n train_path = settings['data']['train_path']\n test_path = settings['data']['test_path'] # can be False\n assert test_path is False\n\n\n all_data = os.listdir(train_path)\n\n data = {'R':[], 'Z':[], 'E':[], 'F':[], 'NA': []}\n\n type_maps = {0: 6, #C,\n 1: 1, #H,\n 2: 8} #O\n atomic_self_energy = [-1034.54661575, -15.63566308, -2043.59323628] # C, H, O\n\n for composition in all_data:\n types = np.loadtxt(os.path.join(train_path, composition, \"type.raw\"), dtype=int)\n Z = np.array([type_maps[i] for i in types])\n n_atoms = len(Z)\n coords = np.load(os.path.join(train_path, composition, \"set.000\", \"coord.npy\")).reshape((-1, n_atoms, 3))\n energy = np.load(os.path.join(train_path, composition, \"set.000\", \"energy.npy\"))\n force = np.load(os.path.join(train_path, composition, \"set.000\", \"force.npy\")).reshape((-1, n_atoms, 3))\n # subtract self energy when needed\n if settings['data']['subtract_self_energy']:\n self_energy = np.sum([atomic_self_energy[i] for i in types])\n energy -= self_energy\n n_confs = energy.shape[0]\n Z = np.tile(Z[None], (n_confs, 1))\n\n\n # add compositon data into dataset\n data['R'].extend(coords)\n data['Z'].extend(Z)\n data['E'].extend(energy)\n data['F'].extend(force)\n data['NA'].extend([n_atoms] * n_confs)\n\n # split data into train and test\n all_indices = list(range(len(data['R'])))\n further_split_indices, test_idx = train_test_split(all_indices, test_size=settings['data']['test_count'], random_state=settings['data']['train_test_split_random_state'])\n train_proportion = settings['data']['train_proportion']\n val_proportion = settings['data']['val_proportion']\n total_proportion = train_proportion + val_proportion\n if total_proportion < 1:\n train_val_indices, unused_indices = train_test_split(further_split_indices, train_size=total_proportion, random_state=settings['data']['train_val_split_random_state'])\n else:\n train_val_indices = further_split_indices\n train_idx, val_idx = train_test_split(train_val_indices, test_size=(val_proportion / total_proportion), random_state=settings['data']['train_val_split_random_state'])\n\n dtrain = {}\n dval = {}\n dtest = {}\n for k in data:\n dtrain[k] = np.array(data[k])[train_idx]\n dval[k] = np.array(data[k])[val_idx]\n dtest[k] = np.array(data[k])[test_idx]\n\n\n # extract data stats\n normalizer = (dtrain['E'].mean(), dtrain['E'].std())\n\n n_tr_data = dtrain['R'].shape[0]\n n_val_data = dval['R'].shape[0]\n n_test_data = dtest['R'].shape[0]\n print(\"data size: (train,val,test): %i, %i, %i\"%(n_tr_data,n_val_data,n_test_data))\n\n # HASH check for test energies to make sure test data is fixed\n import hashlib\n test_energy_hash = hashlib.sha1(dtest['E']).hexdigest()\n print(\"Test set energy HASH:\", test_energy_hash)\n\n tr_batch_size = settings['training']['tr_batch_size']\n val_batch_size = settings['training']['val_batch_size']\n tr_rotations = settings['training']['tr_rotations']\n val_rotations = settings['training']['val_rotations']\n\n # generators\n me = settings['general']['driver']\n\n # steps\n tr_steps = int(np.ceil(n_tr_data / tr_batch_size)) * (tr_rotations + 1)\n val_steps = int(np.ceil(n_val_data / val_batch_size)) * (val_rotations + 1)\n test_steps= int(np.ceil(n_test_data / val_batch_size)) * (val_rotations + 1)\n\n env = ExtensiveEnvironment()\n\n train_gen = extensive_train_loader(data=dtrain,\n env_provider=env,\n batch_size=tr_batch_size,\n n_rotations=tr_rotations,\n freeze_rotations=settings['training']['tr_frz_rot'],\n keep_original=settings['training']['tr_keep_original'],\n device=device,\n shuffle=settings['training']['shuffle'],\n drop_last=settings['training']['drop_last'])\n\n val_gen = extensive_train_loader(data=dval,\n env_provider=env,\n batch_size=val_batch_size,\n n_rotations=val_rotations,\n freeze_rotations=settings['training']['val_frz_rot'],\n keep_original=settings['training']['val_keep_original'],\n device=device,\n shuffle=settings['training']['shuffle'],\n drop_last=settings['training']['drop_last'])\n\n test_gen = extensive_train_loader(data=dtest,\n env_provider=env,\n batch_size=val_batch_size,\n n_rotations=val_rotations,\n freeze_rotations=settings['training']['val_frz_rot'],\n keep_original=settings['training']['val_keep_original'],\n device=device,\n shuffle=False,\n drop_last=False)\n\n return train_gen, val_gen, test_gen, tr_steps, val_steps, test_steps, n_tr_data, n_val_data, n_test_data, normalizer, test_energy_hash\n" ]
[ [ "numpy.concatenate", "numpy.array", "numpy.ceil", "numpy.ones_like", "numpy.nan_to_num", "numpy.sum", "sklearn.utils.random.sample_without_replacement", "numpy.load", "numpy.tile", "numpy.nanmean", "numpy.arange", "sklearn.model_selection.train_test_split", "numpy.nanstd" ] ]
WordBearerYI/HPNet
[ "41d37ae61adab813582091bf3fb0e744513d731d" ]
[ "model.py" ]
[ "from torchvision.models import resnext50_32x4d as resnext\r\nimport torch.nn as nn\r\nimport torch\r\nimport torch.nn.functional as F\r\n\r\n\r\nclass HPANet(nn.Module):\r\n def __init__(self, hidden_size=1280,num_cls=18):\r\n super(HPANet, self).__init__()\r\n self.model_base = resnext().double()\r\n #self.ln = nn.LayerNorm((4,)\r\n self.linear1 = nn.Linear(1000*4,hidden_size)\r\n self.linear2 = nn.Linear(hidden_size,hidden_size)\r\n self.linear3 = nn.Linear(hidden_size,num_cls)\r\n \r\n self.relu = nn.ReLU()\r\n self.loss = nn.CrossEntropyLoss()\r\n self.dropout = nn.Dropout(0.5)\r\n self.loss = nn.BCEWithLogitsLoss()\r\n \r\n def forward(self,inputs,labels):\r\n '''\r\n inputs: B x 4 x [img_size x img_size x num_channel]\r\n labels: B x num_cls\r\n '''\r\n \r\n g = inputs[:,0,:,:]\r\n r = inputs[:,1,:,:]\r\n y = inputs[:,2,:,:]\r\n b = inputs[:,3,:,:]\r\n fg = self.model_base(g)\r\n fr = self.model_base(r)\r\n fy = self.model_base(y)\r\n fb = self.model_base(b)\r\n \r\n f = torch.cat((fg,fr,fy,fb),dim=1)\r\n x = self.dropout(self.relu(self.ln1(f)))\r\n x = self.dropout(self.relu(self.ln2(x)))\r\n x = self.ln3(x)\r\n return self.loss(labels,x)\r\n \r\n \r\n \r\n def inference(self,inputs):\r\n '''\r\n inputs: 4 x [img_size x img_size x num_channel]\r\n labels: B x num_cls\r\n '''\r\n g = inputs[:,0,:,:]\r\n r = inputs[:,1,:,:]\r\n y = inputs[:,2,:,:]\r\n b = inputs[:,3,:,:]\r\n fg = self.model_base(g)\r\n fr = self.model_base(r)\r\n fy = self.model_base(y)\r\n fb = self.model_base(b)\r\n \r\n f = torch.cat((fg,fr,fy,fb),dim=1)\r\n x = self.dropout(self.relu(self.ln1(f)))\r\n x = self.dropout(self.relu(self.ln2(x)))\r\n x = self.ln3(x)\r\n preds = [i for i,x in enumerate(x) if x>=0.5]\r\n return preds\r\n\r\n" ]
[ [ "torch.nn.Linear", "torch.nn.Dropout", "torch.cat", "torch.nn.ReLU", "torch.nn.BCEWithLogitsLoss", "torch.nn.CrossEntropyLoss" ] ]
alfredoav/trax
[ "e35776e7d0b5e58d2e69100fafb8080d4f2b19be" ]
[ "trax/supervised/decoding.py" ]
[ "# coding=utf-8\n# Copyright 2020 The Trax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Helper functions for decoding using Trax models, esp. autoregressive ones.\"\"\"\n\nimport numpy as np\nfrom trax import layers as tl\n\n\ndef autoregressive_sample(model, prefix=None, inputs=None,\n batch_size=1, temperature=1.0,\n start_id=0, eos_id=1, max_length=100,\n accelerate=True):\n \"\"\"Perform aturegressive sampling from the provided model.\n\n Note that the provided model should be an autoregressive model initialized\n in 'predict' mode. In this mode, a model takes the outputs it is generating\n one-by-one (instead of taking them all at once, as, e.g., during training).\n Model state is used to store the intermediate information needed, and usually\n the model perfoms inference in this mode faster than in 'eval' mode.\n\n Args:\n model: instance of trax.Layer, the model to sample from (at mode='predict')\n prefix: optional tensor [batch_size, L]: prefix for decoding\n inputs: optional tensor [batch_size, M]: inputs to provide to the model\n batch_size: how many batches to sample (default: 1)\n temperature: sampling temperature (default: 1.0)\n start_id: int, id for the start symbol fed at the beginning (default: 1)\n eos_id: int, id of the end-of-sequence symbol used to stop (default: 1)\n max_length: maximum length to sample (default: 100)\n accelerate: whether to accelerate the model before decoding (default: True)\n\n Returns:\n a tensor of ints of shape [batch_size, N] with N <= max_length containing\n the autoregressively sampled output from the model\n \"\"\"\n if prefix is not None and prefix.shape[0] != batch_size:\n raise ValueError(f'Prefix batch size {prefix.shape[0]} != {batch_size}.')\n if inputs is not None and inputs.shape[0] != batch_size:\n raise ValueError(f'Inputs batch size {inputs.shape[0]} != {batch_size}.')\n fast_model = tl.Accelerate(model) if accelerate else model\n cur_symbol = np.full((batch_size, 1), start_id, dtype=np.int32)\n result = []\n eos_seen = []\n for i in range(max_length):\n model_input = cur_symbol if inputs is None else (inputs, cur_symbol)\n logits = fast_model(model_input)\n if inputs is not None:\n logits = logits[0] # Pick first element from model output (a pair here)\n if prefix is not None and i < prefix.shape[1]: # Read from prefix.\n cur_prefix_symbol = prefix[:, i]\n sample = cur_prefix_symbol[:, None]\n else:\n sample = tl.gumbel_sample(logits, temperature=temperature)\n result.append(sample)\n # Note: we're using 'predict' mode autoregressive models here, so history\n # is caches in the model state and we are only feeding one symbol next.\n cur_symbol = sample\n # Check at which batch positions have we already encountered EOS.\n for j in range(batch_size):\n if int(sample[j, 0]) == eos_id:\n eos_seen.append(j)\n # If EOS has been seen on all positions, stop.\n if all([j in eos_seen for j in range(batch_size)]):\n break\n return np.concatenate(result, axis=1)\n" ]
[ [ "numpy.concatenate", "numpy.full" ] ]
yuhonghong66/kepler-mapper
[ "b7ecd1c573b1da49006aa651313fc2c79293f671" ]
[ "kmapper/cover.py" ]
[ "from __future__ import division\n\nimport warnings\nfrom itertools import product\nimport numpy as np\n\n# TODO: Incorporate @pablodecm's cover API.\n\n\nclass Cover:\n \"\"\"Helper class that defines the default covering scheme\n\n\n Parameters\n ------------\n\n limits: Numpy Array (n_dim,2)\n (lower bound, upper bound) for every dimension\n If a value is set to `np.float('inf')`, the bound will be assumed to be the min/max value of the dimension\n Also, if `limits == None`, the limits are defined by the maximum and minimum value of the lens for all dimensions.\n i.e. `[[min_1, max_1], [min_2, max_2], [min_3, max_3]]`\n\n \"\"\"\n\n def __init__(\n self,\n n_cubes=10,\n perc_overlap=0.2,\n # Deprecated parameters:\n nr_cubes=None,\n overlap_perc=None,\n limits=None,\n ):\n\n self.n_cubes = nr_cubes if nr_cubes else n_cubes\n self.perc_overlap = overlap_perc if overlap_perc else perc_overlap\n\n if overlap_perc is not None or nr_cubes is not None:\n warnings.warn(\n \"Arguments `overlap_perc` and `nr_cubes` have been replaced with `perc_overlap` and `n_cubes`. Use `perc_overlap` and `n_cubes` instead. They will be removed in future releases.\",\n DeprecationWarning,\n )\n\n self.limits = limits\n\n # Check limits can actually be handled and are set appropriately\n NoneType = type(None)\n assert isinstance(\n self.limits, (list, np.ndarray, type(None))\n ), \"limits should either be an array or None\"\n if isinstance(self.limits, (list, np.ndarray)):\n self.limits = np.array(self.limits)\n assert self.limits.shape[1] == 2, \"limits should be (n_dim,2) in shape\"\n\n def define_bins(self, data):\n \"\"\"Returns an iterable of all bins in the cover.\n\n Warning: This function must assume that the first column of data are indices.\n \n Examples\n ---------\n\n If there are 4 cubes per dimension and 3 dimensions return the bottom left (origin) coordinates of 64 hypercubes, as a sorted list of Numpy arrays\n \"\"\"\n\n indexless_data = data[:, 1:]\n\n # Find upper and lower bounds of bins using self.limits\n # If array, use the values in the array\n # If None, use the maximum and minimum values in the lens\n\n # If self.limits is array-like\n if isinstance(self.limits, np.ndarray):\n # limits_array is used so we can change the values of self.limits from None to the min/max\n limits_array = np.zeros(self.limits.shape)\n limits_array[:, 0] = np.min(indexless_data, axis=0)\n limits_array[:, 1] = np.max(indexless_data, axis=0)\n limits_array[self.limits != np.float(\"inf\")] = 0\n self.limits[self.limits == np.float(\"inf\")] = 0\n bounds_arr = self.limits + limits_array\n \"\"\" bounds_arr[i,j] = self.limits[i,j] if self.limits[i,j] == inf\n bounds_arr[i,j] = max/min(indexless_data[i]) if self.limits == inf \"\"\"\n bounds = (bounds_arr[:, 0], bounds_arr[:, 1])\n\n # Check new bounds are actually sensible - do they cover the range of values in the dataset?\n if not (\n (np.min(indexless_data, axis=0) >= bounds_arr[:, 0]).all()\n or (np.max(indexless_data, axis=0) <= bounds_arr[:, 1]).all()\n ):\n warnings.warn(\n \"The limits given do not cover the entire range of the lens functions\\n\"\n + \"Actual Minima: %s\\tInput Minima: %s\\n\"\n % (np.min(indexless_data, axis=0), bounds_arr[:, 0])\n + \"Actual Maxima: %s\\tInput Maxima: %s\\n\"\n % (np.max(indexless_data, axis=0), bounds_arr[:, 1])\n )\n\n else: # It must be None, as we checked to see if it is array-like or None in __init__\n bounds = (np.min(indexless_data, axis=0), np.max(indexless_data, axis=0))\n\n # We chop up the min-max column ranges into 'n_cubes' parts\n self.chunk_dist = (bounds[1] - bounds[0]) / self.n_cubes\n\n # We calculate the overlapping windows distance\n self.overlap_dist = self.perc_overlap * self.chunk_dist\n\n # We find our starting point\n self.d = bounds[0]\n # And our ending point (for testing)\n self.end = bounds[1]\n\n # Use a dimension index array on the projected X\n # (For now this uses the entire dimensionality, but we keep for experimentation)\n self.di = np.array(range(1, data.shape[1]))\n self.nr_dimensions = len(self.di)\n\n if type(self.n_cubes) is not list:\n cubes = [self.n_cubes] * self.nr_dimensions\n else:\n assert (\n len(self.n_cubes) == self.nr_dimensions\n ), \"There are {} ({}) dimensions specified but {} dimensions needing specification. If you supply specific number of cubes for each dimension, please supply the correct number.\".format(\n len(self.n_cubes), self.n_cubes, self.nr_dimensions\n )\n cubes = self.n_cubes\n\n coordinates = map(np.asarray, product(*(range(i) for i in cubes)))\n return coordinates\n\n def find_entries(self, data, cube, verbose=0):\n \"\"\"Find all entries in data that are in the given cube.\n\n Parameters\n ----------\n data: Numpy array\n Either projected data or original data.\n cube:\n an item from the list of cubes provided by `cover.define_bins` iterable.\n\n Returns\n -------\n hypercube: Numpy Array\n All entries in data that are in cube.\n\n \"\"\"\n\n chunk = self.chunk_dist\n overlap = self.overlap_dist\n lower_bound = self.d + (cube * chunk)\n upper_bound = lower_bound + chunk + overlap\n\n # Slice the hypercube\n entries = (data[:, self.di] >= lower_bound) & (data[:, self.di] < upper_bound)\n\n hypercube = data[np.invert(np.any(entries == False, axis=1))]\n\n return hypercube\n\n\nclass CubicalCover(Cover):\n \"\"\"\n Explicit definition of a cubical cover as the default behavior of the cover class. This is currently identical to the default cover class.\n \"\"\"\n\n pass\n" ]
[ [ "numpy.max", "numpy.array", "numpy.zeros", "numpy.float", "numpy.min", "numpy.any" ] ]
jessicaleete/numerical_computing
[ "cc71f51f35ca74d00e617af3d1a0223e19fb9a68" ]
[ "Labs/GMRES/plots.py" ]
[ "import numpy as np\nfrom scipy import linalg as la\nimport matplotlib\nmatplotlib.rcParams = matplotlib.rc_params_from_file('../../matplotlibrc')\nfrom matplotlib import pyplot as plt\n\ndef plot_gmres():\n M = 2*np.eye(200)+np.random.normal(0, .5/np.sqrt(200), (200,200))\n b = np.ones(200)\n k = 200\n tol = 1e-8\n res = np.empty(k)\n \n Q = np.empty((b.size, k+1))\n H = np.zeros((k+1, k))\n bnorm = la.norm(b)\n Q[:,0] = b/bnorm\n for j in xrange(k):\n # Arnoldi algorithm, minus the last step\n Q[:,j+1] = M.dot(Q[:,j])\n for i in xrange(j+1):\n H[i,j] = np.inner(Q[:,i].conjugate(), Q[:,j+1])\n Q[:,j+1] -= H[i,j] * Q[:,i]\n H[j+1,j] = la.norm(Q[:,j+1])\n \n # Calculate least squares solution\n y, res[j] = la.lstsq(H[:j+2, :j+1], bnorm*np.eye(j+2)[0] )[:2]\n #calculate residual\n res[j] = np.sqrt(res[j])/bnorm\n \n # Stop if the residual is small enough OR if Arnoldi has terminated.\n # Though, I think the second condition implies the first.\n if res[j] < tol or H[j+1, j] < tol:\n break\n \n # Last step of Arnoldi\n Q[:,j+1] /= H[j+1, j]\n plt.subplot(1,2,2)\n plt.plot(xrange(j+1), res[:j+1] )\n plt.gca().set_yscale('log')\n \n # plot eigenvalues\n evals, evecs = la.eig(M)\n plt.subplot(1,2,1)\n plt.scatter(np.real(evals), np.imag(evals))\n plt.savefig('./plot_gmres.pdf', bbox_inches='tight')\n plt.close()\n \nif __name__=='__main__':\n plot_gmres()" ]
[ [ "numpy.empty", "numpy.zeros", "matplotlib.pyplot.savefig", "numpy.ones", "matplotlib.rc_params_from_file", "matplotlib.pyplot.close", "numpy.real", "numpy.eye", "scipy.linalg.eig", "numpy.sqrt", "numpy.imag", "matplotlib.pyplot.gca", "scipy.linalg.norm", "matplotlib.pyplot.subplot" ] ]
Eunjnnn/ignite
[ "743089705b2b252aa5e2a0f310da3a8724d6711e" ]
[ "ignite/contrib/metrics/regression/wave_hedges_distance.py" ]
[ "from typing import Tuple\n\nimport torch\n\nfrom ignite.contrib.metrics.regression._base import _BaseRegression\nfrom ignite.metrics.metric import reinit__is_reduced, sync_all_reduce\n\n\nclass WaveHedgesDistance(_BaseRegression):\n r\"\"\"Calculates the Wave Hedges Distance.\n\n .. math::\n \\text{WHD} = \\sum_{j=1}^n\\frac{|A_j - P_j|}{max(A_j, P_j)}\n\n where, :math:`A_j` is the ground truth and :math:`P_j` is the predicted value.\n\n More details can be found in `Botchkarev 2018`__.\n\n - ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.\n - `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)`.\n\n __ https://arxiv.org/abs/1809.03006\n\n Parameters are inherited from ``Metric.__init__``.\n\n Args:\n output_transform: a callable that is used to transform the\n :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the\n form expected by the metric. This can be useful if, for example, you have a multi-output model and\n you want to compute the metric with respect to one of the outputs.\n By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.\n device: specifies which device updates are accumulated on. Setting the\n metric's device to be the same as your ``update`` arguments ensures the ``update`` method is\n non-blocking. By default, CPU.\n\n Examples:\n To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.\n The output of the engine's ``process_function`` needs to be in format of\n ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.\n\n .. testcode::\n\n metric = WaveHedgesDistance()\n metric.attach(default_evaluator, 'whd')\n y_true = torch.Tensor([0, 1, 2, 3, 4, 5])\n y_pred = y_true * 0.75\n state = default_evaluator.run([[y_pred, y_true]])\n print(state.metrics['whd'])\n\n .. testoutput::\n\n 1.25...\n\n .. versionchanged:: 0.4.5\n - Works with DDP.\n \"\"\"\n\n @reinit__is_reduced\n def reset(self) -> None:\n self._sum_of_errors = torch.tensor(0.0, device=self._device)\n\n def _update(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:\n y_pred, y = output[0].detach(), output[1].detach()\n errors = torch.abs(y.view_as(y_pred) - y_pred) / (torch.max(y_pred, y.view_as(y_pred)) + 1e-30)\n self._sum_of_errors += torch.sum(errors).to(self._device)\n\n @sync_all_reduce(\"_sum_of_errors\")\n def compute(self) -> float:\n return self._sum_of_errors.item()\n" ]
[ [ "torch.tensor", "torch.sum" ] ]
feay1234/som-dst
[ "3e96fc8da8cae5a3b822718fa1e75edef1944d56" ]
[ "utils/data_utils.py" ]
[ "\"\"\"\nSOM-DST\nCopyright (c) 2020-present NAVER Corp.\nMIT license\n\"\"\"\n\nimport numpy as np\nimport json\nfrom torch.utils.data import Dataset\nimport torch\nimport random\nimport re\nfrom copy import deepcopy\nfrom .fix_label import fix_general_label_error\nimport collections\n\nflatten = lambda x: [i for s in x for i in s]\nEXPERIMENT_DOMAINS = [\"hotel\", \"train\", \"restaurant\", \"attraction\", \"taxi\"]\ndomain2id = {d: i for i, d in enumerate(EXPERIMENT_DOMAINS)}\n\nOP_SET = {\n '2': {'update': 0, 'carryover': 1},\n '3-1': {'update': 0, 'carryover': 1, 'dontcare': 2},\n '3-2': {'update': 0, 'carryover': 1, 'delete': 2},\n '4': {'delete': 0, 'update': 1, 'dontcare': 2, 'carryover': 3},\n '6': {'delete': 0, 'update': 1, 'dontcare': 2, 'carryover': 3, 'yes': 4, 'no': 5}\n}\n\n\ndef make_turn_label(slot_meta, last_dialog_state, turn_dialog_state,\n tokenizer, op_code='4', dynamic=False):\n if dynamic:\n gold_state = turn_dialog_state\n turn_dialog_state = {}\n for x in gold_state:\n s = x.split('-')\n k = '-'.join(s[:2])\n turn_dialog_state[k] = s[2]\n\n op_labels = ['carryover'] * len(slot_meta)\n generate_y = []\n keys = list(turn_dialog_state.keys())\n for k in keys:\n v = turn_dialog_state[k]\n if v == 'none':\n turn_dialog_state.pop(k)\n continue\n vv = last_dialog_state.get(k)\n try:\n idx = slot_meta.index(k)\n if vv != v:\n if v == 'dontcare' and OP_SET[op_code].get('dontcare') is not None:\n op_labels[idx] = 'dontcare'\n elif v == 'yes' and OP_SET[op_code].get('yes') is not None:\n op_labels[idx] = 'yes'\n elif v == 'no' and OP_SET[op_code].get('no') is not None:\n op_labels[idx] = 'no'\n else:\n op_labels[idx] = 'update'\n generate_y.append([tokenizer.tokenize(v) + ['[EOS]'], idx])\n elif vv == v:\n op_labels[idx] = 'carryover'\n except ValueError:\n continue\n\n for k, v in last_dialog_state.items():\n vv = turn_dialog_state.get(k)\n try:\n idx = slot_meta.index(k)\n if vv is None:\n if OP_SET[op_code].get('delete') is not None:\n op_labels[idx] = 'delete'\n else:\n op_labels[idx] = 'update'\n generate_y.append([['[NULL]', '[EOS]'], idx])\n except ValueError:\n continue\n gold_state = [str(k) + '-' + str(v) for k, v in turn_dialog_state.items()]\n if len(generate_y) > 0:\n generate_y = sorted(generate_y, key=lambda lst: lst[1])\n generate_y, _ = [list(e) for e in list(zip(*generate_y))]\n\n if dynamic:\n op2id = OP_SET[op_code]\n generate_y = [tokenizer.convert_tokens_to_ids(y) for y in generate_y]\n op_labels = [op2id[i] for i in op_labels]\n\n return op_labels, generate_y, gold_state\n\n\ndef postprocessing(slot_meta, ops, last_dialog_state,\n generated, tokenizer, op_code, gold_gen={}):\n gid = 0\n for st, op in zip(slot_meta, ops):\n if op == 'dontcare' and OP_SET[op_code].get('dontcare') is not None:\n last_dialog_state[st] = 'dontcare'\n elif op == 'yes' and OP_SET[op_code].get('yes') is not None:\n last_dialog_state[st] = 'yes'\n elif op == 'no' and OP_SET[op_code].get('no') is not None:\n last_dialog_state[st] = 'no'\n elif op == 'delete' and last_dialog_state.get(st) and OP_SET[op_code].get('delete') is not None:\n last_dialog_state.pop(st)\n elif op == 'update':\n g = tokenizer.convert_ids_to_tokens(generated[gid])\n gen = []\n for gg in g:\n if gg == '[EOS]':\n break\n gen.append(gg)\n gen = ' '.join(gen).replace(' ##', '')\n gid += 1\n gen = gen.replace(' : ', ':').replace('##', '')\n if gold_gen and gold_gen.get(st) and gold_gen[st] not in ['dontcare']:\n gen = gold_gen[st]\n\n if gen == '[NULL]' and last_dialog_state.get(st) and not OP_SET[op_code].get('delete') is not None:\n last_dialog_state.pop(st)\n else:\n last_dialog_state[st] = gen\n return generated, last_dialog_state\n\n\ndef make_slot_meta(ontology):\n meta = []\n change = {}\n idx = 0\n max_len = 0\n for i, k in enumerate(ontology.keys()):\n d, s = k.split('-')\n if d not in EXPERIMENT_DOMAINS:\n continue\n if 'price' in s or 'leave' in s or 'arrive' in s:\n s = s.replace(' ', '')\n ss = s.split()\n if len(ss) + 1 > max_len:\n max_len = len(ss) + 1\n meta.append('-'.join([d, s]))\n change[meta[-1]] = ontology[k]\n return sorted(meta), change\n\n\ndef prepare_dataset(data_path, tokenizer, slot_meta,\n n_history, max_seq_length, diag_level=False, op_code='4', turn_weight=0, seq_num=0):\n dials = json.load(open(data_path))\n data = []\n domain_counter = {}\n max_resp_len, max_value_len = 0, 0\n max_line = None\n dia_max_turn = collections.defaultdict(int)\n\n isTrain = \"train\" in data_path\n\n count = 0\n for dial_dict in dials:\n print(dial_dict)\n for domain in dial_dict[\"domains\"]:\n if domain not in EXPERIMENT_DOMAINS:\n continue\n if domain not in domain_counter.keys():\n domain_counter[domain] = 0\n domain_counter[domain] += 1\n\n dialog_history = []\n last_dialog_state = {}\n last_uttr = \"\"\n for ti, turn in enumerate(dial_dict[\"dialogue\"]):\n turn_domain = turn[\"domain\"]\n if turn_domain not in EXPERIMENT_DOMAINS:\n continue\n\n turn_id = turn[\"turn_idx\"]\n\n turn_loss_weight = 1 + ((len(dial_dict[\"dialogue\"]) - (turn_id + 1)) * turn_weight) if isTrain else 1\n # print(turn_loss_weight, turn_weight)\n turn_uttr = (turn[\"system_transcript\"] + ' ; ' + turn[\"transcript\"]).strip()\n dialog_history.append(last_uttr)\n turn_dialog_state = fix_general_label_error(turn[\"belief_state\"], False, slot_meta)\n last_uttr = turn_uttr\n\n op_labels, generate_y, gold_state = make_turn_label(slot_meta, last_dialog_state,\n turn_dialog_state,\n tokenizer, op_code)\n if (ti + 1) == len(dial_dict[\"dialogue\"]):\n is_last_turn = True\n else:\n is_last_turn = False\n\n instance = TrainingInstance(dial_dict[\"dialogue_idx\"], turn_domain,\n turn_id, turn_uttr, ' '.join(dialog_history[-n_history:]),\n last_dialog_state, op_labels,\n generate_y, gold_state, max_seq_length, slot_meta,\n is_last_turn, op_code=op_code, turn_weight=turn_loss_weight)\n\n # print(turn_id, len(last_dialog_state), len(gold_state))\n # print(turn_uttr)\n # print(last_dialog_state)\n # print(generate_y)\n # print(gold_state)\n # print(\"----------\")\n\n instance.make_instance(tokenizer)\n data.append(instance)\n if isTrain and seq_num > 0:\n augment_turn_uttr = turn_uttr[:]\n augment_op_labels = op_labels[:]\n augment_generate_y = generate_y[:]\n augment_gold_state = []\n for _seq_num in range(1, seq_num + 1):\n i = ti + _seq_num\n if i < len(dial_dict[\"dialogue\"]):\n _turn = dial_dict[\"dialogue\"][i]\n _turn_domain = _turn[\"domain\"]\n if _turn_domain not in EXPERIMENT_DOMAINS:\n continue\n\n _turn_uttr = (_turn[\"system_transcript\"] + ' ; ' + _turn[\"transcript\"]).strip()\n\n _turn_dialog_state = fix_general_label_error(_turn[\"belief_state\"], False, slot_meta)\n\n _op_labels, _generate_y, _gold_state = make_turn_label(slot_meta, last_dialog_state,\n _turn_dialog_state,\n tokenizer, op_code)\n\n augment_gold_state = _gold_state\n augment_generate_y.extend(_generate_y)\n\n for j in range(len(_op_labels)):\n\n if _op_labels[j] == \"carryover\":\n continue\n elif augment_op_labels[j] == \"carryover\":\n augment_op_labels[j] = _op_labels[j]\n\n\n if (i + 1) == len(dial_dict[\"dialogue\"]):\n is_last_turn = True\n else:\n is_last_turn = False\n\n augment_turn_uttr += \" ; \" + _turn_uttr\n\n new_did = \"%s_%d_a%d\" % (dial_dict[\"dialogue_idx\"], turn_id, _seq_num)\n # print(new_did)\n instance = TrainingInstance(new_did, turn_domain,\n turn_id, augment_turn_uttr, ' '.join(dialog_history[-n_history:]),\n last_dialog_state, augment_op_labels,\n augment_generate_y, augment_gold_state, max_seq_length, slot_meta,\n is_last_turn, op_code=op_code, turn_weight=turn_loss_weight)\n\n # print(augment_turn_uttr)\n # print(last_dialog_state)\n # print(augment_generate_y)\n # print(augment_gold_state)\n # print(\"----------\")\n\n instance.make_instance(tokenizer)\n data.append(instance)\n\n last_dialog_state = turn_dialog_state\n # break\n # if count > 10:\n # break\n # count += 1\n return data\n\n\nclass TrainingInstance:\n def __init__(self, ID,\n turn_domain,\n turn_id,\n turn_utter,\n dialog_history,\n last_dialog_state,\n op_labels,\n generate_y,\n gold_state,\n max_seq_length,\n slot_meta,\n is_last_turn,\n op_code='4',\n turn_weight=1):\n self.id = ID\n self.turn_domain = turn_domain\n self.turn_id = turn_id\n self.turn_utter = turn_utter\n self.dialog_history = dialog_history\n self.last_dialog_state = last_dialog_state\n self.gold_p_state = last_dialog_state\n self.generate_y = generate_y\n self.op_labels = op_labels\n self.gold_state = gold_state\n self.max_seq_length = max_seq_length\n self.slot_meta = slot_meta\n self.is_last_turn = is_last_turn\n self.op2id = OP_SET[op_code]\n self.turn_weight = turn_weight\n\n def shuffle_state(self, rng, slot_meta=None):\n new_y = []\n gid = 0\n for idx, aa in enumerate(self.op_labels):\n if aa == 'update':\n new_y.append(self.generate_y[gid])\n gid += 1\n else:\n new_y.append([\"dummy\"])\n if slot_meta is None:\n temp = list(zip(self.op_labels, self.slot_meta, new_y))\n rng.shuffle(temp)\n else:\n indices = list(range(len(slot_meta)))\n for idx, st in enumerate(slot_meta):\n indices[self.slot_meta.index(st)] = idx\n temp = list(zip(self.op_labels, self.slot_meta, new_y, indices))\n temp = sorted(temp, key=lambda x: x[-1])\n temp = list(zip(*temp))\n self.op_labels = list(temp[0])\n self.slot_meta = list(temp[1])\n self.generate_y = [yy for yy in temp[2] if yy != [\"dummy\"]]\n\n def make_instance(self, tokenizer, max_seq_length=None,\n word_dropout=0., slot_token='[SLOT]'):\n if max_seq_length is None:\n max_seq_length = self.max_seq_length\n state = []\n for s in self.slot_meta:\n state.append(slot_token)\n k = s.split('-')\n v = self.last_dialog_state.get(s)\n if v is not None:\n k.extend(['-', v])\n t = tokenizer.tokenize(' '.join(k))\n else:\n t = tokenizer.tokenize(' '.join(k))\n t.extend(['-', '[NULL]'])\n state.extend(t)\n avail_length_1 = max_seq_length - len(state) - 3\n diag_1 = tokenizer.tokenize(self.dialog_history)\n diag_2 = tokenizer.tokenize(self.turn_utter)\n avail_length = avail_length_1 - len(diag_2)\n\n if len(diag_1) > avail_length: # truncated\n avail_length = len(diag_1) - avail_length\n diag_1 = diag_1[avail_length:]\n\n if len(diag_1) == 0 and len(diag_2) > avail_length_1:\n avail_length = len(diag_2) - avail_length_1\n diag_2 = diag_2[avail_length:]\n\n drop_mask = [0] + [1] * len(diag_1) + [0] + [1] * len(diag_2) + [0]\n diag_1 = [\"[CLS]\"] + diag_1 + [\"[SEP]\"]\n diag_2 = diag_2 + [\"[SEP]\"]\n segment = [0] * len(diag_1) + [1] * len(diag_2)\n\n diag = diag_1 + diag_2\n # word dropout\n if word_dropout > 0.:\n drop_mask = np.array(drop_mask)\n word_drop = np.random.binomial(drop_mask.astype('int64'), word_dropout)\n diag = [w if word_drop[i] == 0 else '[UNK]' for i, w in enumerate(diag)]\n input_ = diag + state\n segment = segment + [1] * len(state)\n self.input_ = input_\n\n self.segment_id = segment\n slot_position = []\n for i, t in enumerate(self.input_):\n if t == slot_token:\n slot_position.append(i)\n self.slot_position = slot_position\n\n input_mask = [1] * len(self.input_)\n self.input_id = tokenizer.convert_tokens_to_ids(self.input_)\n if len(input_mask) < max_seq_length:\n self.input_id = self.input_id + [0] * (max_seq_length - len(input_mask))\n self.segment_id = self.segment_id + [0] * (max_seq_length - len(input_mask))\n input_mask = input_mask + [0] * (max_seq_length - len(input_mask))\n\n self.input_mask = input_mask\n self.domain_id = domain2id[self.turn_domain]\n self.op_ids = [self.op2id[a] for a in self.op_labels]\n self.generate_ids = [tokenizer.convert_tokens_to_ids(y) for y in self.generate_y]\n self.train_turn_weight = self.turn_weight\n\n\n\nclass MultiWozDataset(Dataset):\n def __init__(self, data, tokenizer, slot_meta, max_seq_length, rng,\n ontology, word_dropout=0.1, shuffle_state=False, shuffle_p=0.5):\n self.data = data\n self.len = len(data)\n self.tokenizer = tokenizer\n self.slot_meta = slot_meta\n self.max_seq_length = max_seq_length\n self.ontology = ontology\n self.word_dropout = word_dropout\n self.shuffle_state = shuffle_state\n self.shuffle_p = shuffle_p\n self.rng = rng\n\n def __len__(self):\n return self.len\n\n def __getitem__(self, idx):\n if self.shuffle_state and self.shuffle_p > 0.:\n if self.rng.random() < self.shuffle_p:\n self.data[idx].shuffle_state(self.rng, None)\n else:\n self.data[idx].shuffle_state(self.rng, self.slot_meta)\n if self.word_dropout > 0 or self.shuffle_state:\n self.data[idx].make_instance(self.tokenizer,\n word_dropout=self.word_dropout)\n return self.data[idx]\n\n def collate_fn(self, batch):\n input_ids = torch.tensor([f.input_id for f in batch], dtype=torch.long)\n input_mask = torch.tensor([f.input_mask for f in batch], dtype=torch.long)\n segment_ids = torch.tensor([f.segment_id for f in batch], dtype=torch.long)\n state_position_ids = torch.tensor([f.slot_position for f in batch], dtype=torch.long)\n op_ids = torch.tensor([f.op_ids for f in batch], dtype=torch.long)\n domain_ids = torch.tensor([f.domain_id for f in batch], dtype=torch.long)\n gen_ids = [b.generate_ids for b in batch]\n max_update = max([len(b) for b in gen_ids])\n max_value = max([len(b) for b in flatten(gen_ids)])\n turn_weights = torch.tensor([f.train_turn_weight for f in batch], dtype=torch.float)\n for bid, b in enumerate(gen_ids):\n n_update = len(b)\n for idx, v in enumerate(b):\n b[idx] = v + [0] * (max_value - len(v))\n gen_ids[bid] = b + [[0] * max_value] * (max_update - n_update)\n gen_ids = torch.tensor(gen_ids, dtype=torch.long)\n\n return input_ids, input_mask, segment_ids, state_position_ids, op_ids, domain_ids, gen_ids, max_value, max_update, turn_weights\n" ]
[ [ "numpy.array", "torch.tensor" ] ]
RoboJackets/robocup-software
[ "ae2920b8b98213e625d0565dd67005e7a8595fac" ]
[ "rj_gameplay/rj_gameplay/tactic/goalie_tactic.py" ]
[ "\"\"\"Tactic to produce goalie behavior, which tracks the ball, moves to block if a shot on goal is taken, and stays within the goalie box (generally).\"\"\"\n\nfrom dataclasses import dataclass\nfrom typing import List, Optional\nfrom typing import Dict, Generic, List, Optional, Tuple, Type, TypeVar\n\nimport stp.action as action\nimport stp.rc as rc\nimport stp.tactic as tactic\nimport stp.role as role\n\nimport rj_gameplay.eval\nimport rj_gameplay.skill as skills\nfrom rj_gameplay.skill import move, receive, line_kick # , intercept\nimport stp.skill as skill\nimport numpy as np\n# TODO: replace w/ global param server\nfrom stp.utils.constants import RobotConstants, BallConstants\nimport stp.global_parameters as global_parameters\nfrom stp.local_parameters import Param\n\n# TODO: param server this const\nMIN_WALL_RAD = 0\nGOALIE_PCT_TO_BALL = 0.15\nDIST_TO_FAST_KICK = 7\n\n\nclass GoalieCost(role.CostFn):\n def __call__(\n self,\n robot: rc.Robot,\n prev_result: Optional[\"RoleResult\"],\n world_state: rc.WorldState,\n ) -> float:\n if world_state.game_info is not None:\n if robot.id == world_state.goalie_id:\n return 0.0\n\n return 10000000\n\n def unassigned_cost_fn(\n self,\n prev_result: Optional[\"RoleResult\"],\n world_state: rc.WorldState,\n ) -> float:\n\n #TODO: Implement real unassigned cost function\n return role.BIG_STUPID_NUMBER_CONST_FOR_UNASSIGNED_COST_PLS_CHANGE\n\n\ndef get_goalie_pt(world_state: rc.WorldState) -> np.ndarray:\n \"\"\"Finds point for goalie to best be in to block a shot.\n :return numpy point\n \"\"\"\n # TODO: param server any constant from stp/utils/constants.py (this includes BallConstants)\n ball_pt = world_state.ball.pos\n goal_pt = world_state.field.our_goal_loc\n\n dir_vec = (ball_pt - goal_pt) / np.linalg.norm(ball_pt - goal_pt)\n # get in-between ball and goal, staying behind wall\n dist_from_goal = min(\n GOALIE_PCT_TO_BALL * np.linalg.norm(ball_pt - goal_pt), 1.0)\n mid_pt = goal_pt + (dir_vec * dist_from_goal)\n return mid_pt\n\n\ndef get_block_pt(world_state: rc.WorldState, my_pos: np.ndarray) -> np.ndarray:\n pos = world_state.ball.pos\n vel = world_state.ball.vel\n\n tangent = vel / (np.linalg.norm(vel) + 1e-6)\n\n # Find out where it would cross\n time_to_cross = np.abs(pos[1] / vel[1]) if np.abs(vel[1]) > 1e-6 else 0\n cross_x = pos[0] + vel[0] * time_to_cross\n cross_position = np.array([np.clip(cross_x, a_min=-0.6, a_max=0.6), 0.0])\n\n tangent = cross_position - pos\n tangent /= np.linalg.norm(tangent)\n block_pt = np.dot(tangent, my_pos - pos) * tangent + pos\n\n return block_pt\n\n\nclass GoalieTactic(tactic.ITactic):\n def __init__(self, brick=False):\n self.brick = brick\n\n # init skills\n self.move_se = tactic.SkillEntry(move.Move(ignore_ball=True))\n self.receive_se = tactic.SkillEntry(receive.Receive())\n self.pivot_kick_se = tactic.SkillEntry(\n line_kick.LineKickSkill(None,\n target_point=np.array([0.0, 6.0]),\n chip=True,\n kick_speed=5.5))\n\n # TODO: rename cost_list to role_cost in other gameplay files\n self.role_cost = GoalieCost()\n\n def compute_props(self):\n pass\n\n def create_request(self, **kwargs) -> role.RoleRequest:\n \"\"\"Creates a sane default RoleRequest.\n :return: A list of size 1 of a sane default RoleRequest.\n \"\"\"\n pass\n\n def get_requests(self, world_state: rc.WorldState,\n props) -> List[tactic.RoleRequests]:\n global MIN_WALL_RAD\n \"\"\"\n :return: A list of role requests for move skills needed\n \"\"\"\n\n # TODO: this calculation is copy-pasted from wall_tactic\n # put into common param file: https://www.geeksforgeeks.org/global-keyword-in-python/\n\n # dist is slightly greater than def_area box bounds\n box_w = world_state.field.def_area_long_dist_m\n box_h = world_state.field.def_area_short_dist_m\n line_w = world_state.field.line_width_m\n # max out of box to cap for goalie\n MAX_OOB = RobotConstants.RADIUS\n\n role_requests = {}\n if world_state and world_state.ball.visible:\n ball_speed = np.linalg.norm(world_state.ball.vel)\n ball_pos = world_state.ball.pos\n ball_dist = np.linalg.norm(world_state.field.our_goal_loc -\n ball_pos)\n goal_pos = world_state.field.our_goal_loc\n towards_goal = goal_pos - ball_pos\n\n if self.brick:\n self.move_se.skill.target_point = world_state.field.our_goal_loc\n self.move_se.skill.face_point = world_state.ball.pos\n role_requests[self.move_se] = [\n role.RoleRequest(role.Priority.HIGH, True, self.role_cost)\n ]\n return role_requests\n\n if ball_speed < 0.5 and (\n abs(ball_pos[0]) < box_w / 2 + line_w + MAX_OOB\n and ball_pos[1] < box_h + line_w + MAX_OOB\n ) and not world_state.game_info.is_stopped():\n self.move_se = tactic.SkillEntry(move.Move(ignore_ball=True))\n if ball_speed < 1e-6:\n # if ball is stopped and inside goalie box, collect it\n role_requests[self.receive_se] = [\n role.RoleRequest(role.Priority.HIGH, True,\n self.role_cost)\n ]\n else:\n # if ball has been stopped already, chip toward center field\n self.pivot_kick_se.skill.target_point = np.array(\n [0.0, 6.0])\n role_requests[self.pivot_kick_se] = [\n role.RoleRequest(role.Priority.HIGH, True,\n self.role_cost)\n ]\n else:\n if ball_speed > 0 and np.dot(towards_goal,\n world_state.ball.vel) > 0.3:\n # if ball is moving and coming at goal, move laterally to block ball\n # TODO (#1676): replace this logic with a real intercept planner\n goalie_pos = world_state.our_robots[\n world_state.\n goalie_id].pose[:\n 2] if world_state.goalie_id is not None else np.array(\n [0., 0.])\n self.move_se.skill.target_point = get_block_pt(\n world_state, goalie_pos)\n self.move_se.skill.face_point = world_state.ball.pos\n role_requests[self.move_se] = [\n role.RoleRequest(role.Priority.HIGH, True,\n self.role_cost)\n ]\n else:\n # else, track ball normally\n self.move_se.skill.target_point = get_goalie_pt(\n world_state)\n self.move_se.skill.face_point = world_state.ball.pos\n role_requests[self.move_se] = [\n role.RoleRequest(role.Priority.HIGH, True,\n self.role_cost)\n ]\n if self.pivot_kick_se.skill.is_done(world_state):\n self.pivot_kick_se = tactic.SkillEntry(\n line_kick.LineKickSkill(None,\n target_point=np.array([0.0, 6.0]),\n chip=True,\n kick_speed=5.5))\n\n return role_requests\n\n def tick(self, world_state: rc.WorldState,\n role_results: tactic.RoleResults) -> List[tactic.SkillEntry]:\n \"\"\"\n :return: A list of skills depending on which roles are filled\n \"\"\"\n\n # create list of skills based on if RoleResult exists for SkillEntry\n skills = []\n\n move_result = role_results[self.move_se]\n receive_result = role_results[self.receive_se]\n pivot_kick_result = role_results[self.pivot_kick_se]\n\n # move skill takes priority\n if move_result and move_result[0].is_filled():\n skills.append(self.move_se)\n elif receive_result and receive_result[0].is_filled():\n skills.append(self.receive_se)\n elif pivot_kick_result and pivot_kick_result[0].is_filled():\n skills.append(self.pivot_kick_se)\n\n return skills\n\n def is_done(self, world_state):\n \"\"\"\n :return boolean indicating if tactic is done\n \"\"\"\n # goalie tactic always active\n return False\n" ]
[ [ "numpy.array", "numpy.linalg.norm", "numpy.dot", "numpy.abs", "numpy.clip" ] ]
timoklimmer/neanno
[ "0c76e21a1774880827f518dc9550b54cce38625c" ]
[ "neanno/utils/metrics.py" ]
[ "from io import BytesIO\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sn\nfrom sklearn.metrics import confusion_matrix\n\nfrom neanno.utils.dict import merge_dict_sum_child_dicts\nfrom neanno.utils.text import (\n extract_annotations_as_list,\n extract_categories_from_categories_column,\n extract_entity_codes_from_annotated_texts_column,\n)\nfrom neanno.utils.list import get_set_of_list_and_keep_sequence\n\n\ndef f1_score(precision, recall):\n if precision + recall == 0:\n return 0\n else:\n return 2 * (precision * recall) / (precision + recall)\n\n\ndef compute_ner_metrics_on_text_level(\n actual_annotations, predicted_annotations, considered_entity_codes\n):\n actual_annotations = [\n annotation\n for annotation in actual_annotations\n if annotation[\"type\"] in [\"standalone_named_entity\", \"parented_named_entity\"]\n and annotation[\"entity_code\"] in considered_entity_codes\n ]\n predicted_annotations = [\n annotation\n for annotation in predicted_annotations\n if annotation[\"type\"] in [\"standalone_named_entity\", \"parented_named_entity\"]\n and annotation[\"entity_code\"] in considered_entity_codes\n ]\n counters = {\n entity_code: {\n \"actual\": 0,\n \"predictions\": 0,\n \"correct\": 0,\n \"incorrect\": 0,\n \"precision\": 0,\n \"recall\": 0,\n }\n for entity_code in considered_entity_codes\n }\n for predicted_annotation in predicted_annotations:\n entity_code = predicted_annotation[\"entity_code\"]\n counters[entity_code][\"predictions\"] += 1\n if any(\n actual_annotation[\"entity_code\"] == predicted_annotation[\"entity_code\"]\n and actual_annotation[\"start_net\"] == predicted_annotation[\"start_net\"]\n and actual_annotation[\"end_net\"] == predicted_annotation[\"end_net\"]\n for actual_annotation in actual_annotations\n ):\n # if predicted_annotation in actual_annotations:\n counters[entity_code][\"correct\"] += 1\n else:\n counters[entity_code][\"incorrect\"] += 1\n for actual_annotation in actual_annotations:\n entity_code = actual_annotation[\"entity_code\"]\n counters[entity_code][\"actual\"] += 1\n for entity_code in considered_entity_codes:\n correct = counters[entity_code][\"correct\"]\n number_predictions = counters[entity_code][\"predictions\"]\n possible = counters[entity_code][\"actual\"]\n counters[entity_code][\"precision\"] = (\n correct / number_predictions if number_predictions > 0 else 0\n )\n counters[entity_code][\"recall\"] = correct / possible if possible > 0 else 0\n return counters\n\n\ndef compute_category_metrics_on_text_level(\n actual_categories, predicted_categories, considered_categories\n):\n actual_categories = [\n category for category in actual_categories if category in considered_categories\n ]\n predicted_categories = [\n category\n for category in predicted_categories\n if category in considered_categories\n ]\n counters = {\n category: {\n \"actual\": 0,\n \"predictions\": 0,\n \"correct\": 0,\n \"incorrect\": 0,\n \"precision\": 0,\n \"recall\": 0,\n }\n for category in considered_categories\n }\n for predicted_category in predicted_categories:\n category = predicted_category\n counters[category][\"predictions\"] += 1\n if any(\n actual_category == predicted_category\n for actual_category in actual_categories\n ):\n counters[category][\"correct\"] += 1\n else:\n counters[category][\"incorrect\"] += 1\n for actual_category in actual_categories:\n category = actual_category\n counters[category][\"actual\"] += 1\n for category in considered_categories:\n correct = counters[category][\"correct\"]\n number_predictions = counters[category][\"predictions\"]\n possible = counters[category][\"actual\"]\n counters[category][\"precision\"] = (\n correct / number_predictions if number_predictions > 0 else 0\n )\n counters[category][\"recall\"] = correct / possible if possible > 0 else 0\n return counters\n\n\ndef aggregate_ner_metrics(ner_metrics1, ner_metrics2):\n result = merge_dict_sum_child_dicts(ner_metrics1, ner_metrics2)\n for entity_code in result:\n possible = result[entity_code][\"actual\"]\n number_predictions = result[entity_code][\"predictions\"]\n correct = result[entity_code][\"correct\"]\n result[entity_code][\"precision\"] = (\n correct / number_predictions if number_predictions > 0 else 0\n )\n result[entity_code][\"recall\"] = correct / possible if possible > 0 else 0\n result[entity_code][\"f1_score\"] = f1_score(\n result[entity_code][\"precision\"], result[entity_code][\"recall\"]\n )\n return result\n\n\ndef aggregate_category_metrics(category_metrics1, category_metrics2):\n result = merge_dict_sum_child_dicts(category_metrics1, category_metrics2)\n for category in result:\n possible = result[category][\"actual\"]\n number_predictions = result[category][\"predictions\"]\n correct = result[category][\"correct\"]\n result[category][\"precision\"] = (\n correct / number_predictions if number_predictions > 0 else 0\n )\n result[category][\"recall\"] = correct / possible if possible > 0 else 0\n result[category][\"f1_score\"] = f1_score(\n result[category][\"precision\"], result[category][\"recall\"]\n )\n return result\n\n\ndef compute_ner_metrics(\n actual_annotated_texts_pandas_series,\n predicted_annotated_texts_pandas_series,\n considered_entity_codes=None,\n):\n \"\"\" Computes some metrics incl. precision and recall on entity code level, given a text column with the true annotations and a column with the predicted annotations.\"\"\"\n\n actual_annotations_series = actual_annotated_texts_pandas_series.map(\n lambda text: extract_annotations_as_list(\n text, types_to_extract=[\"standalone_named_entity\", \"parented_named_entity\"]\n )\n )\n predicted_annotations_series = predicted_annotated_texts_pandas_series.map(\n lambda text: extract_annotations_as_list(\n text, types_to_extract=[\"standalone_named_entity\", \"parented_named_entity\"]\n )\n )\n if considered_entity_codes is None:\n considered_entity_codes = extract_entity_codes_from_annotated_texts_column(\n actual_annotated_texts_pandas_series\n )\n\n result = {}\n for (index, actual_annotations) in actual_annotations_series.iteritems():\n predicted_annotations = predicted_annotations_series[index]\n ner_metrics_on_text_level = compute_ner_metrics_on_text_level(\n actual_annotations, predicted_annotations, considered_entity_codes\n )\n result = aggregate_ner_metrics(result, ner_metrics_on_text_level)\n return result\n\n\ndef compute_category_metrics(\n actual_categories_pandas_series,\n predicted_categories_pandas_series,\n considered_categories=None,\n):\n \"\"\" Computes precision and recall for predicted text categories.\"\"\"\n actual_categories_series = actual_categories_pandas_series.map(\n lambda text: text.split(\"|\")\n )\n predicted_categories_series = predicted_categories_pandas_series.map(\n lambda text: text.split(\"|\")\n )\n if considered_categories is None:\n considered_categories = extract_categories_from_categories_column(\n actual_categories_pandas_series\n )\n\n result = {}\n for (index, actual_categories) in actual_categories_series.iteritems():\n predicted_categories = predicted_categories_series[index]\n category_metrics_on_text_level = compute_category_metrics_on_text_level(\n actual_categories, predicted_categories, considered_categories\n )\n result = aggregate_category_metrics(result, category_metrics_on_text_level)\n return result\n\n\ndef get_confusion_matrix(actual_series, predicted_series, categories_to_train):\n \"\"\"Computes a confusion matrix from the given actual and predicted series.\"\"\"\n actual_series = actual_series.map(lambda value: value if value else \"(None)\")\n predicted_series = predicted_series.map(lambda value: value if value else \"(None)\")\n categories = get_set_of_list_and_keep_sequence(\n pd.Series(categories_to_train).append(\n pd.Series(sorted(actual_series.append(predicted_series)))\n )\n )\n result = pd.crosstab(\n pd.Categorical(actual_series, categories=categories),\n pd.Categorical(predicted_series, categories=categories),\n rownames=[\"Actual\"],\n colnames=[\"Predicted\"],\n dropna=False,\n )\n return result\n\n\ndef get_confusion_matrix_png_bytes(\n actual_series, predicted_series, categories_to_train\n):\n \"\"\"Computes a confusion matrix from the given actual and predicted series and returns the bytes of a confusion matrix plot (png format).\"\"\"\n confusion_matrix = get_confusion_matrix(\n actual_series, predicted_series, categories_to_train\n )\n figure = sn.heatmap(\n confusion_matrix, annot=True, fmt=\"d\", linewidths=0.5\n ).get_figure()\n memory_buffer = BytesIO()\n figure.savefig(memory_buffer, format=\"png\")\n return memory_buffer.getvalue()\n" ]
[ [ "pandas.Categorical", "pandas.Series" ] ]