repo_name
stringlengths
6
130
hexsha
sequence
file_path
sequence
code
sequence
apis
sequence
Guli-Y/wimlds_emissions
[ "51c610867666f91446fbe228660c865cf5869e99" ]
[ "emissions/utils.py" ]
[ "from sklearn.model_selection import learning_curve\nfrom sklearn.model_selection import ShuffleSplit\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import accuracy_score, recall_score, precision_score\nfrom emissions.data import load_data, clean_data\n\n\ndef scoring_table(search, \n X_test,\n y_test):\n \"\"\"\n takes grid search output and index of best params\n returns a scoring table\n \"\"\"\n result = search.cv_results_\n tmp = pd.DataFrame({'train':{'accuracy': result['mean_train_accuracy'][search.best_index_], \n 'recall': result['mean_train_recall'][search.best_index_],\n 'precision': result['mean_train_precision'][search.best_index_]}, \n 'val':{'accuracy': result['mean_test_accuracy'][search.best_index_], \n 'recall': result['mean_test_recall'][search.best_index_],\n 'precision': result['mean_test_precision'][search.best_index_]}\n })\n\n y_pred = search.best_estimator_.predict(X_test)\n y_true = y_test\n tmp.loc['accuracy', 'test'] = accuracy_score(y_true, y_pred)\n tmp.loc['recall', 'test'] = recall_score(y_true, y_pred)\n tmp.loc['precision', 'test'] = precision_score(y_true, y_pred)\n return tmp.round(3)\n\ndef plot_learning_curve(model, X_train, y_train, scoring='recall'):\n \"\"\"takes a model, X_train, y_train and plots learning curve\"\"\"\n\n cv = ShuffleSplit(n_splits=5, test_size=0.2, random_state=42)\n\n train_sizes, train_scores, test_scores = learning_curve(model, \n X_train, \n y_train, \n train_sizes=np.linspace(0.05, 1, 20),\n cv=cv,\n scoring=scoring,\n n_jobs=-1\n )\n train_scores_mean = np.mean(train_scores, axis=1)\n train_scores_std = np.std(train_scores, axis=1)\n test_scores_mean = np.mean(test_scores, axis=1)\n test_scores_std = np.std(test_scores, axis=1)\n\n plt.plot(train_sizes, train_scores_mean, label = 'Train')\n plt.fill_between(train_sizes, \n train_scores_mean - train_scores_std,\n train_scores_mean + train_scores_std, \n alpha=0.1)\n\n plt.plot(train_sizes, test_scores_mean, label = 'Val')\n plt.fill_between(train_sizes, \n test_scores_mean - test_scores_std,\n test_scores_mean + test_scores_std, \n alpha=0.1)\n plt.legend()\n plt.ylabel('score')\n plt.xlabel('train sizes')\n if scoring=='recall':\n plt.ylim(0.6, 1)\n \ndef make_transform_get(df, make_threshhold=\"0.01\"):\n '''\n Take cleaned training data and return a list of makes to be converted to 'other'\n '''\n #create a make label 'other' for all makes that only account for less than 1% of cars each and together aprox <10% of cars\n value_counts_norm = df['MAKE'].value_counts(normalize = True)\n to_other = value_counts_norm[value_counts_norm < float(make_threshhold)]\n #print(f\"\\n{len(to_other)} make labels each account for less than {round((float(make_threshhold) *100), 2)}% of cars and together account for {(round(to_other.sum(), 4)) *100}% of cars\")\n to_keep = value_counts_norm[value_counts_norm >= float(make_threshhold)]\n makes_keep = list(to_keep.index)\n makes_keep.sort()\n return makes_keep\n\nif __name__==\"__main__\":\n df = load_data()\n df = clean_data(df)\n print('Makes to keep:', make_transform_get(df))" ]
[ [ "pandas.DataFrame", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.plot", "matplotlib.pyplot.legend", "matplotlib.pyplot.ylim", "numpy.mean", "sklearn.metrics.accuracy_score", "numpy.linspace", "numpy.std", "matplotlib.pyplot.fill_between", "matplotlib.pyplot.ylabel", "sklearn.metrics.precision_score", "sklearn.model_selection.ShuffleSplit", "sklearn.metrics.recall_score" ] ]
taiducvu/VNG-NSFW-DETECTION
[ "f49b6ad21ed9c8646c402a37015a80258fb79a68" ]
[ "train_model.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport tensorflow as tf\nimport data as dt\nimport vng_model as md\nimport time\nimport csv\nimport math\n\nFLAGS = tf.app.flags.FLAGS\n\ntf.app.flags.DEFINE_string('train_dir', '/home/taivu/workspace/Pycharm_Nudity_Detection/Dataset',\n \"\"\"Direction where the training set is\"\"\")\n\ntf.app.flags.DEFINE_string('val_dir', '/home/taivu/workspace/Pycharm_Nudity_Detection/Dataset',\n \"\"\"Direction where the validation set is\"\"\")\n\ntf.app.flags.DEFINE_integer('num_steps', 500000,\n \"The number of steps in updating the weights of models\")\n\ntf.app.flags.DEFINE_string('checkpoint_dir_resnet', '/home/taivu/workspace/Pycharm_Nudity_Detection/pretrain_weight',\n \"\"\"Direction where the checkpoint is\"\"\")\n\ntf.app.flags.DEFINE_string('checkpoint_dir', '/home/taivu/workspace/Pycharm_Nudity_Detection/checkpoint_model',\n \"\"\"Direction where the checkpoint of model is saved\"\"\")\n\ntf.app.flags.DEFINE_float('learning_rate', 1e-3,\n \"\"\"Learning rate for optimization\"\"\")\n\ntf.app.flags.DEFINE_integer('num_train_sample', 8000,\n \"\"\"The number of training samples\"\"\")\n\ntf.app.flags.DEFINE_integer('num_val_sample', 1156,\n \"\"\"The number of validate samples\"\"\")\n\ntf.app.flags.DEFINE_integer('batch_size', 32,\n \"The size of a image batch\")\n\ntf.app.flags.DEFINE_float('weight_decay', 0.01,\n \"\"\"Weight decay\"\"\")\n\n# Flags for validation process\ntf.app.flags.DEFINE_boolean('use_val', True,\n \"\"\"Whether using the validation set in the training process\"\"\")\n\ntf.app.flags.DEFINE_integer('val_batch_size', 128,\n \"\"\"The size of a validate data batch\"\"\")\n\n# Logging the result\ntf.app.flags.DEFINE_boolean('is_logging', True,\n \"\"\"Whether logging the result of training model\"\"\")\n\ntf.app.flags.DEFINE_string('log_dir', '/home/taivu/workspace/Pycharm_Nudity_Detection/checkpoint_model',\n \"\"\"Direction where the log file is saved\"\"\")\n\ntf.app.flags.DEFINE_string('summaries_dir', '/home/taivu/Dropbox/Pycharm_Nudity_Detection/log',\n \"\"\"Direction where the log tensorboard is saved\"\"\")\n\n\ndef set_flag(flag, value):\n flag.assign(value)\n\n\ndef train():\n \"\"\"\n\n :return:\n \"\"\"\n # Read data\n with tf.Graph().as_default():\n global_step = tf.Variable(0, trainable=False)\n train_flag = tf.Variable(True, trainable=False)\n\n train_path = os.path.join(FLAGS.train_dir, 'transfer_learning_train.tfrecords')\n val_path = os.path.join(FLAGS.val_dir, 'transfer_learning_val.tfrecords')\n\n tr_samples, tr_lb = dt.input_data(train_path, FLAGS.batch_size)\n val_samples, val_lb = dt.input_data(val_path, 1156, False)\n\n samples, labels = tf.cond(train_flag,\n lambda: (tr_samples, tr_lb),\n lambda: (val_samples, val_lb))\n\n samples = tf.squeeze(samples, [1, 2])\n\n logits = md.inference(samples)\n\n loss = md.loss(logits, labels)\n\n correct_predict = tf.equal(tf.cast(tf.arg_max(logits, 1), tf.int32), labels)\n\n val_acc = tf.reduce_mean(tf.cast(correct_predict, tf.float32))\n\n # train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)\n train_step = tf.train.RMSPropOptimizer(1e-5).minimize(loss)\n\n coord = tf.train.Coordinator()\n\n format_str = ('%d step: %.2f (%.1f examples/sec; %0.3f sec/batch)')\n\n with tf.Session() as sess:\n threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n\n sess.run(tf.global_variables_initializer())\n\n # samp_batch, lb_batch = sess.run([tr_samples, tr_lb])\n # size: samp_batch [#batch_size, 1, 1, 2048]\n # tr_lb [#batch_size,]\n\n for idx in range(FLAGS.num_epochs):\n start_time = time.time()\n _, loss_value = sess.run([train_step, loss])\n duration = time.time() - start_time\n examples_per_sec = FLAGS.batch_size / float(duration)\n sec_per_batch = float(duration)\n\n if idx % 10 == 0:\n set_flag(train_flag, False)\n acc = sess.run([val_acc])\n set_flag(train_flag, True)\n print('Validation accuracy: %.2f'%acc[0])\n\n print(format_str %(idx, loss_value, examples_per_sec, sec_per_batch))\n\n coord.request_stop()\n coord.join(threads, stop_grace_period_secs=120)\n sess.close()\n\n\ndef train_resnet():\n \"\"\"\n The function is used trainto the model. We use the 'Stochastic Gradient Descent' algorithm to\n optimize the weights of model. More detail, we initialize them by using pre-trained weights of Resnet model.\n To train model, we freeze the first block of model and train the other. The learning rate in layers that belong to\n Resnet model is set 5 times larger than in additional layers.\n\n :return:\n \"\"\"\n\n batch_ls = []\n for batch in range(2, 8):\n name_batch = '4000x224x224_batch_' + str(batch) + '.tfrecords'\n train_batch = os.path.join(FLAGS.train_dir, name_batch)\n batch_ls.append(train_batch)\n\n val_path = os.path.join(FLAGS.train_dir, '4000x224x224_batch_1.tfrecords')\n\n with tf.Graph().as_default() as g:\n # ------------------------- BUILD THE GRAPH OF MODEL ---------------------------- #\n x = tf.placeholder(tf.float32, (None, 224, 224, 3), name='input_features')\n y_ = tf.placeholder(tf.int32, (None,), name='labels')\n\n val_x = tf.placeholder(tf.float32, (None, 224, 224, 3), name='validate_features')\n val_y = tf.placeholder(tf.int32, (None,), name='validate_labels')\n\n tr_samples, tr_labels = dt.input_data(batch_ls, FLAGS.batch_size)\n\n val_samples, val_labels = dt.input_data([val_path], FLAGS.val_batch_size, False)\n\n logit = md.inference_resnet(x, is_training=False, is_log=FLAGS.is_logging)\n\n val_logit = md.inference_resnet(val_x, is_training=False, reuse=True, is_log=FLAGS.is_logging)\n\n # Define variables to output the predict of model and to evaluate one\n resnet_var_ls = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='resnet_v1_50')\n resnet_weight_ls = []\n\n for idx in range(0, 159, 3):\n resnet_weight_ls.append(resnet_var_ls[idx])\n\n loss = md.loss(logit, y_, resnet_weight_ls)\n\n val_loss = md.loss(val_logit, val_y, resnet_weight_ls)\n\n hat_y = tf.arg_max(logit, 1, name='predict_label')\n\n val_pre_y = tf.arg_max(val_logit, 1, name='val_predict_label')\n\n correct_pre = tf.equal(tf.cast(hat_y, tf.int32), y_)\n\n val_correct_predict = tf.equal(tf.cast(val_pre_y, tf.int32), val_y)\n\n accuracy = tf.reduce_mean(tf.cast(correct_pre, tf.float32))\n\n val_accuracy = tf.reduce_mean(tf.cast(val_correct_predict, tf.float32))\n\n tf.summary.scalar('train_loss', loss) # Log the value of the train loss\n tf.summary.scalar('accuracy', accuracy) # Log the accuracy\n # ------------------------------------- END -------------------------------------- #\n\n # -------------------------------Optimizing process ------------------------------ #\n resnet_var_ls = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='resnet_v1_50')\n\n add_var_ls = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='additional_layers')\n\n opt_1 = tf.train.GradientDescentOptimizer(FLAGS.learning_rate)\n\n opt_2 = tf.train.GradientDescentOptimizer(5*FLAGS.learning_rate)\n\n # Freeze the weights of from first to third blocks\n grads = tf.gradients(loss, resnet_var_ls[153:] + add_var_ls)\n\n # Do gradient descent only on a particular weight set\n num_opt_resnet_layers = len(resnet_var_ls[153:])\n\n grads_1 = grads[:num_opt_resnet_layers] # Do gradient for Resnet's layers\n\n grads_2 = grads[num_opt_resnet_layers:] # Do gradient for Additional layers\n\n train_opt_1 = opt_1.apply_gradients(zip(grads_1, resnet_var_ls[153:]))\n\n train_opt_2 = opt_2.apply_gradients(zip(grads_2, add_var_ls))\n\n train_opt = tf.group(train_opt_1, train_opt_2)\n # ------------------------------------- END -------------------------------------- #\n\n saver_my_model = tf.train.Saver(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES), max_to_keep=50)\n\n # ------------------ Support for loading the trained weights of Resnet ----------- #\n saver_resnet = tf.train.Saver(var_list=tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,\n scope='resnet_v1_50'))\n\n ckpt_resnet = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir_resnet)\n ###################################################################################\n\n # ----------------------------------- TENSORBOARD --------------------------------\n # merged = tf.summary.merge_all()\n # train_writer = tf.train.SummaryWriter(FLAGS.summaries_dir + '/train', graph=g)\n # test_writer = tf.train.SummaryWriter(FLAGS.summaries_dir + '/test')\n\n coord = tf.train.Coordinator()\n\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n\n # ------ Load pre-trained weights of the Resnet model -------------------------- #\n if ckpt_resnet and ckpt_resnet.model_checkpoint_path:\n saver_resnet.restore(sess, ckpt_resnet.model_checkpoint_path)\n print('Load pre-trained weights of Resnet successfully!')\n\n else:\n print('Checkpoint of Resnet not found!')\n ####################################################################################\n\n threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n\n format_str=('Step %d: %0.2f (%0.1f samples/sec; %0.3f secs/batch)')\n\n steps_per_epoch = int(math.ceil(float(FLAGS.num_train_sample)/FLAGS.batch_size))\n\n for idx in range(FLAGS.num_steps):\n\n tr_x, tr_y = sess.run([tr_samples, tr_labels])\n\n start_time = time.time()\n\n _, loss_value = sess.run([train_opt, loss], feed_dict={x: tr_x, y_: tr_y})\n\n duration = time.time() - start_time\n\n examples_per_sec = FLAGS.batch_size / float(duration)\n\n sec_per_batch = float(duration)\n\n print(format_str % (idx, loss_value, examples_per_sec, sec_per_batch))\n\n mean_acc = 0\n mean_val_acc = 0\n mean_tr_loss = 0\n mean_val_loss = 0\n\n if (idx + 1) % steps_per_epoch == 0 or idx == 0:\n # Logging the performance of model in training process\n if FLAGS.use_val and FLAGS.is_logging:\n val_iter = int(math.ceil(FLAGS.num_val_sample)/FLAGS.val_batch_size)\n\n for i in range(val_iter):\n v_x, v_y = sess.run([val_samples, val_labels])\n\n val_acc, val_err = sess.run([val_accuracy, val_loss], feed_dict={x: tr_x,\n y_: tr_y,\n val_x: v_x,\n val_y: v_y})\n # train_writer.add_summary(summary, idx) # Log\n if i == 0:\n mean_val_acc = val_acc\n mean_val_loss = val_err\n\n else:\n mean_val_acc = 1.0/(i + 1)*(val_acc + i*mean_val_acc)\n mean_val_loss = 1.0/(i + 1)*(val_err + i*mean_val_loss)\n\n print('Validation accuracy: %0.2f'%mean_val_acc)\n\n for i in range(steps_per_epoch):\n eval_tr_x, eval_tr_y = sess.run([tr_samples, tr_labels])\n\n tr_acc, loss_value = sess.run([accuracy, loss], feed_dict={x:eval_tr_x, y_:eval_tr_y})\n\n if i == 0:\n mean_acc = tr_acc\n mean_tr_loss = loss_value\n else:\n mean_acc = 1.0/(i+1)*(tr_acc + i*mean_acc)\n mean_tr_loss = 1.0/(i+1)*(loss_value + i*mean_tr_loss)\n\n # -------------------- Writing log-file ------------------------------\n log_path = os.path.join(FLAGS.log_dir, 'result.csv')\n\n if os.path.isfile(log_path) and idx == 0:\n os.remove(log_path)\n\n with open(log_path, 'a') as csvfile:\n print('Writing data into csv file ...')\n\n csv_writer = csv.writer(csvfile, delimiter=',',\n quotechar='|', quoting=csv.QUOTE_MINIMAL)\n\n csv_writer.writerow([idx, mean_tr_loss, mean_val_loss, mean_acc, mean_val_acc])\n\n print('Finish writing!')\n # ---------------------------- END ------------------------------------\n\n elif FLAGS.use_val:\n print('Is training')\n\n else:\n print('Set True for use_val flag to log the performance of model in training process!')\n\n checkpoint_path = os.path.join(FLAGS.checkpoint_dir, 'model.ckpt')\n\n saver_my_model.save(sess, checkpoint_path, global_step=idx)\n\n coord.request_stop()\n coord.join(threads, stop_grace_period_secs=120)\n sess.close()\n\n\ndef main(argv=None):\n train_resnet()\n\nif __name__ == '__main__':\n tf.app.run()\n" ]
[ [ "tensorflow.train.start_queue_runners", "tensorflow.group", "tensorflow.train.get_checkpoint_state", "tensorflow.gradients", "tensorflow.global_variables_initializer", "tensorflow.cast", "tensorflow.train.GradientDescentOptimizer", "tensorflow.Variable", "tensorflow.squeeze", "tensorflow.app.run", "tensorflow.get_collection", "tensorflow.train.Coordinator", "tensorflow.summary.scalar", "tensorflow.train.RMSPropOptimizer", "tensorflow.Session", "tensorflow.placeholder", "tensorflow.arg_max", "tensorflow.app.flags.DEFINE_integer", "tensorflow.app.flags.DEFINE_string", "tensorflow.cond", "tensorflow.Graph", "tensorflow.app.flags.DEFINE_boolean", "tensorflow.app.flags.DEFINE_float" ] ]
mevangelista-alvarado/NumericalPoissonGrometry-
[ "76f41be4eb11248c3206b5e371c7aa9eb9d73b44" ]
[ "test/num_curl_operator.py" ]
[ "import datetime\nimport time\nimport numpy as np\nimport statistics as stat\nfrom numpoisson.numpoisson import NumPoissonGeometry\n\nprint('Start')\nnpg = NumPoissonGeometry(4, 'x')\nP ={(1, 3): '2*x4', (1, 4): '2*x3', (2, 3): '-2*x4', (2, 4): '2*x3', (3, 4): 'x1 - x2'}\n\nnum_curl_operator_res = dict()\nj = 2\nfor mesh_path in ['4Qmesh_10_2.npy', '4Qmesh_10_3.npy', '4Qmesh_10_4.npy', '4Qmesh_10_5.npy', '4Qmesh_10_6.npy', '4Qmesh_10_7.npy']:\n print(f'step {j}')\n tiempos = dict()\n with open(mesh_path, 'rb') as f:\n mesh = np.load(f)\n for k in range(25):\n A = datetime.datetime.now()\n npg.num_curl_operator(P, 1, mesh, pt_output=True)\n B = datetime.datetime.now()\n tiempos[k] = (B - A).total_seconds()\n promedio = stat.mean(tiempos.values())\n desviacion = stat.pstdev(tiempos.values())\n tiempos['promedios'] = promedio\n tiempos['desviacion'] = desviacion\n num_curl_operator_res[f'10**{j}'] = tiempos\n j = j + 1\n\nprint(num_curl_operator_res)\nprint('Finish')\n" ]
[ [ "numpy.load" ] ]
pavolmarak/level1_extractor
[ "55cb4b0d32a9740a963a069c732548363dc010fe" ]
[ "neuronova_siet/convolutionalNetwork_ResNet50_4class.py" ]
[ "import tensorflow as tf\r\nfrom tensorflow import keras\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom PIL import Image\r\nimport glob\r\nfrom sklearn.metrics import confusion_matrix\r\nfrom sklearn.metrics import plot_confusion_matrix\r\n\r\n\r\nclasses = {'Arch': 0, 'Left Loop': 1, 'Right Loop': 2, 'Whorl': 3}\r\nclass_num = len(classes.keys())\r\n\r\nfile_folder_arch = '/home/editav/Desktop/FVC2002_Db4_a_b/A/'\r\nfile_folder_left_loop = '/home/editav/Desktop/FVC2002_Db4_a_b/LeftLoop/'\r\nfile_folder_right_loop = '/home/editav/Desktop/FVC2002_Db4_a_b/RightLoop/'\r\nfile_folder_whorl = '/home/editav/Desktop/FVC2002_Db4_a_b/Whorl/'\r\n\r\n\r\ndata_files_arch = [f for f in glob.glob(file_folder_arch + \"*.tif\")]\r\ndata_files_left_loop = [f for f in glob.glob(file_folder_left_loop + \"*.tif\")]\r\ndata_files_right_loop = [f for f in glob.glob(file_folder_right_loop + \"*.tif\")]\r\ndata_files_whorl = [f for f in glob.glob(file_folder_whorl + \"*.tif\")]\r\n\r\ndata_arch = []\r\nfor i in range(len(data_files_arch)):\r\n data_arch.append(np.array(Image.open(data_files_arch[i])))\r\n\r\ndata_left_loop = []\r\nfor i in range(len(data_files_left_loop)):\r\n data_left_loop.append(np.array(Image.open(data_files_left_loop[i])))\r\n\r\ndata_right_loop = []\r\nfor i in range(len(data_files_right_loop)):\r\n data_right_loop.append(np.array(Image.open(data_files_right_loop[i])))\r\n\r\ndata_whorl = []\r\nfor i in range(len(data_files_whorl)):\r\n data_whorl.append(np.array(Image.open(data_files_whorl[i])))\r\n\r\ndata_arch_train = data_arch[:int(len(data_files_arch) * 0.7)]\r\ndata_arch_val = data_arch[int(len(data_files_arch) * 0.7): int(len(data_files_arch) * 0.8)]\r\ndata_arch_test = data_arch[int(len(data_files_arch) * -0.2):]\r\n\r\ndata_arch_train_labels = [classes['Arch']] * int(len(data_arch_train))\r\ndata_arch_val_labels = [classes['Arch']] * int(len(data_arch_val))\r\ndata_arch_test_labels = [classes['Arch']] * int(len(data_arch_test))\r\n\r\ndata_left_loop_train = data_left_loop[:int(len(data_files_left_loop) * 0.7)]\r\ndata_left_loop_val = data_left_loop[int(len(data_files_left_loop) * 0.7): int(len(data_files_left_loop) * 0.8)]\r\ndata_left_loop_test = data_left_loop[int(len(data_files_left_loop) * -0.2):]\r\n\r\ndata_left_loop_train_labels = [classes['Left Loop']] * int(len(data_left_loop_train))\r\ndata_left_loop_val_labels = [classes['Left Loop']] * int(len(data_left_loop_val))\r\ndata_left_loop_test_labels = [classes['Left Loop']] * int(len(data_left_loop_test))\r\n\r\ndata_right_loop_train = data_right_loop[:int(len(data_files_right_loop) * 0.7)]\r\ndata_right_loop_val = data_right_loop[int(len(data_files_right_loop) * 0.7): int(len(data_files_right_loop) * 0.8)]\r\ndata_right_loop_test = data_right_loop[int(len(data_files_right_loop) * -0.2):]\r\n\r\ndata_right_loop_train_labels = [classes['Right Loop']] * int(len(data_right_loop_train))\r\ndata_right_loop_val_labels = [classes['Right Loop']] * int(len(data_right_loop_val))\r\ndata_right_loop_test_labels = [classes['Right Loop']] * int(len(data_right_loop_test))\r\n\r\n\r\ndata_whorl_train = data_whorl[:int(len(data_files_whorl) * 0.7)]\r\ndata_whorl_val = data_whorl[int(len(data_files_whorl) * 0.7): int(len(data_files_whorl) * 0.8)]\r\ndata_whorl_test = data_whorl[int(len(data_files_whorl) * -0.2):]\r\n\r\ndata_whorl_train_labels = [classes['Whorl']] * int(len(data_whorl_train))\r\ndata_whorl_val_labels = [classes['Whorl']] * int(len(data_whorl_val))\r\ndata_whorl_test_labels = [classes['Whorl']] * int(len(data_whorl_test))\r\n\r\ntrain_images = np.concatenate(\r\n (data_arch_train, data_left_loop_train, data_right_loop_train, data_whorl_train), axis=0)\r\ntest_images = np.concatenate(\r\n (data_arch_test, data_left_loop_test, data_right_loop_test, data_whorl_test), axis=0)\r\nval_image = np.concatenate(\r\n (data_arch_val, data_left_loop_val, data_right_loop_val, data_whorl_val), axis=0)\r\n\r\ntrain_labels = np.concatenate((data_arch_train_labels, data_left_loop_train_labels, data_right_loop_train_labels, data_whorl_train_labels), axis=0)\r\ntest_labels = np.concatenate((data_arch_test_labels, data_left_loop_test_labels, data_right_loop_test_labels, data_whorl_test_labels), axis=0)\r\nval_labels = np.concatenate((data_arch_val_labels, data_left_loop_val_labels, data_right_loop_val_labels, data_whorl_val_labels), axis=0)\r\n\r\nclass_names = ['Arch', 'Left Loop', 'Right Loop', 'Whorl']\r\n\r\nprint(train_images.shape)\r\nprint(test_images.shape)\r\nprint(val_image.shape)\r\n\r\n\r\ntrain_mean = np.mean(train_images)\r\ntrain_std = np.std(train_images)\r\n\r\ntrain_images = (train_images - train_mean) / train_std\r\nval_image = (val_image - train_mean) / train_std\r\ntest_images = (test_images - train_mean) / train_std\r\n\r\n\r\nmodel_resnet = tf.keras.applications.ResNet50(False, None, None, (384, 288, 1), 'avg', None)\r\n\r\nmodel = keras.Sequential([model_resnet, keras.layers.Dense(4, 'softmax')])\r\n\r\n\r\nearly_stop = keras.callbacks.EarlyStopping(\r\n monitor=\"val_loss\",\r\n min_delta=0,\r\n patience=10,\r\n mode=\"min\",\r\n restore_best_weights=True)\r\n\r\nmodel.compile(optimizer='adam',\r\n loss=tf.keras.losses.SparseCategoricalCrossentropy(),\r\n metrics=['accuracy'])\r\n\r\nmodel.fit(train_images, train_labels,\r\n epochs=60,\r\n batch_size=10,\r\n validation_data=(val_image, val_labels),\r\n callbacks=[early_stop],\r\n class_weight={0: 9, \r\n 1: len(train_labels)/len(data_left_loop_train_labels),\r\n\t\t\t2: len(train_labels)/len(data_right_loop_train_labels),\r\n\t\t\t3: len(train_labels)/len(data_whorl_train_labels)})\r\n\r\nmodel.save('/home/editav/Desktop/model.h5')\r\n\r\ntest_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)\r\n\r\nprint('\\nTest accuracy:', test_acc)\r\n\r\npredictions = model.predict(test_images)\r\n\r\nprint(\"Predictions shape: \")\r\nprint(predictions.shape)\r\n\r\nprint(predictions[0])\r\nprint(np.argmax(predictions[0]))\r\nprint(test_labels[0])\r\n\r\n\r\n\r\n\r\nimg = test_images[1]\r\n\r\n\r\nimg = (np.expand_dims(img, 0))\r\n\r\n\r\npredictions_single = model.predict(img)\r\n\r\nimage_predictions = []\r\nimage_predict = []\r\nfor i in range(len(test_images)):\r\n image_predict.append(model.predict(np.expand_dims(test_images[i], 0)))\r\n image_predictions.append(np.argmax(model.predict(np.expand_dims(test_images[i], 0))))\r\n\r\nconf = []\r\nconf = confusion_matrix(test_labels, image_predictions)\r\nprint(conf)\r\n\r\n#print(predictions_single)\r\n#print(np.argmax(predictions_single[0]))\r\n#print(image_predictions)\r\n" ]
[ [ "numpy.concatenate", "sklearn.metrics.confusion_matrix", "tensorflow.keras.applications.ResNet50", "numpy.mean", "tensorflow.keras.layers.Dense", "numpy.std", "tensorflow.keras.losses.SparseCategoricalCrossentropy", "numpy.argmax", "numpy.expand_dims", "tensorflow.keras.callbacks.EarlyStopping" ] ]
thutran/bspline
[ "7e3cf5e6652fd51e5ca8ea13537e649c629fc567" ]
[ "bspline/bspline.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"Python/Numpy implementation of Bspline basis functions via Cox - de Boor algorithm.\"\"\"\n\nfrom __future__ import division, print_function, absolute_import\n\nfrom functools import partial\nimport numpy as np\n\nclass memoize(object):\n \"\"\"Cache the return value of a method.\n\n This class is meant to be used as a decorator of methods. The return value\n from a given method invocation will be cached on the instance whose method\n was invoked. All arguments passed to a method decorated with memoize must\n be hashable.\n\n If a memoized method is invoked directly on its class the result will not\n be cached. Instead the method will be invoked like a static method:\n class Obj(object):\n @memoize\n def add_to(self, arg):\n return self + arg\n Obj.add_to(1) # not enough arguments\n Obj.add_to(1, 2) # returns 3, result is not cached\n\n Script borrowed from here:\n MIT Licensed, attributed to Daniel Miller, Wed, 3 Nov 2010\n http://code.activestate.com/recipes/577452-a-memoize-decorator-for-instance-methods/\n \"\"\"\n def __init__(self, func):\n self.func = func\n def __get__(self, obj, objtype=None):\n if obj is None:\n return self.func\n return partial(self, obj)\n def __call__(self, *args, **kw):\n obj = args[0]\n try:\n cache = obj.__cache\n except AttributeError:\n cache = obj.__cache = {}\n key = (self.func, args[1:], frozenset(kw.items()))\n try:\n res = cache[key]\n except KeyError:\n res = cache[key] = self.func(*args, **kw)\n return res\n\n\n\nclass Bspline():\n \"\"\"Numpy implementation of Cox - de Boor algorithm in 1D.\"\"\"\n\n def __init__(self, knot_vector, degree):\n \"\"\"Create a Bspline object.\n\n Parameters:\n knot_vector: Python list or rank-1 Numpy array containing knot vector\n entries\n degree: Degree of interpolation (degree = spline order - 1), \n\t\t e.g. 0 -> piecewise constant between knots, \n\t\t\t 1 -> piecewise linear between knots, etc.\n\n Returns:\n Bspline object, callable to evaluate basis functions at given\n values of `x` inside the knot span.\n \"\"\"\n kv = np.atleast_1d(knot_vector)\n if kv.ndim > 1:\n raise ValueError(\"knot_vector must be Python list or rank-1 array, but got rank = %d\" % (kv.ndim))\n self.knot_vector = kv\n\n degree = int(degree)\n if degree < 0:\n raise ValueError(\"degree must be integer >= 0, but got %d\" % (degree))\n\n self.p = degree\n\n #Dummy calls to the functions for memory storage\n self.__call__(0.0)\n self.d(0.0)\n\n\n def __basis0(self, xi):\n \"\"\"degree zero basis (for internal use).\"\"\"\n return np.where(np.all([self.knot_vector[:-1] <= xi,\n xi < self.knot_vector[1:]],axis=0), 1.0, 0.0)\n\n def __basis(self, xi, p, compute_derivatives=False):\n \"\"\"Recursive Cox - de Boor function (for internal use).\n\n Compute basis functions and optionally their first derivatives.\n \"\"\"\n\n if p == 0:\n return self.__basis0(xi)\n else:\n basis_p_minus_1 = self.__basis(xi, p - 1)\n\n first_term_numerator = xi - self.knot_vector[:-p]\n first_term_denominator = self.knot_vector[p:] - self.knot_vector[:-p]\n\n second_term_numerator = self.knot_vector[(p + 1):] - xi\n second_term_denominator = (self.knot_vector[(p + 1):] -\n self.knot_vector[1:-p])\n\n\n #Change numerator in last recursion if derivatives are desired\n if compute_derivatives and p == self.p:\n\n first_term_numerator = p\n second_term_numerator = -p\n\n #Disable divide by zero error because we check for it\n with np.errstate(divide='ignore', invalid='ignore'):\n first_term = np.where(first_term_denominator != 0.0,\n (first_term_numerator /\n first_term_denominator), 0.0)\n second_term = np.where(second_term_denominator != 0.0,\n (second_term_numerator /\n second_term_denominator), 0.0)\n\n return (first_term[:-1] * basis_p_minus_1[:-1] +\n second_term * basis_p_minus_1[1:])\n\n @memoize\n def __call__(self, xi):\n \"\"\"Convenience function to make the object callable. Also 'memoized' for speed.\"\"\"\n return self.__basis(xi, self.p, compute_derivatives=False)\n\n @memoize\n def d(self, xi):\n \"\"\"Convenience function to compute first derivative of basis functions. 'Memoized' for speed.\"\"\"\n return self.__basis(xi, self.p, compute_derivatives=True)\n\n def plot(self):\n \"\"\"Plot basis functions over full range of knots.\n\n Convenience function. Requires matplotlib.\n \"\"\"\n\n try:\n import matplotlib.pyplot as plt\n except ImportError:\n from sys import stderr\n print(\"ERROR: matplotlib.pyplot not found, matplotlib must be installed to use this function\", file=stderr)\n raise\n\n x_min = np.min(self.knot_vector)\n x_max = np.max(self.knot_vector)\n\n x = np.linspace(x_min, x_max, num=1000)\n\n N = np.array([self(i) for i in x]).T\n\n for n in N:\n plt.plot(x,n)\n\n return plt.show()\n\n def dplot(self):\n \"\"\"Plot first derivatives of basis functions over full range of knots.\n\n Convenience function. Requires matplotlib.\n \"\"\"\n\n try:\n import matplotlib.pyplot as plt\n except ImportError:\n from sys import stderr\n print(\"ERROR: matplotlib.pyplot not found, matplotlib must be installed to use this function\", file=stderr)\n raise\n\n x_min = np.min(self.knot_vector)\n x_max = np.max(self.knot_vector)\n\n x = np.linspace(x_min, x_max, num=1000)\n\n N = np.array([self.d(i) for i in x]).T\n\n for n in N:\n plt.plot(x,n)\n\n return plt.show()\n\n\n def __diff_internal(self):\n \"\"\"Differentiate a B-spline once, and return the resulting coefficients and Bspline objects.\n\nThis preserves the Bspline object nature of the data, enabling recursive implementation\nof higher-order differentiation (see `diff`).\n\nThe value of the first derivative of `B` at a point `x` can be obtained as::\n\n def diff1(B, x):\n terms = B.__diff_internal()\n return sum( ci*Bi(x) for ci,Bi in terms )\n\nReturns:\n tuple of tuples, where each item is (coefficient, Bspline object).\n\nSee:\n `diff`: differentiation of any order >= 0\n\"\"\"\n assert self.p > 0, \"degree of Bspline must be > 0\" # we already handle the other case in diff()\n\n # https://www.cs.mtu.edu/~shene/COURSES/cs3621/NOTES/spline/B-spline/bspline-derv.html\n #\n t = self.knot_vector\n p = self.p\n Bi = Bspline( t[:-1], p-1 )\n Bip1 = Bspline( t[1:], p-1 )\n\n numer1 = +p\n numer2 = -p\n denom1 = t[p:-1] - t[:-(p+1)]\n denom2 = t[(p+1):] - t[1:-p]\n\n with np.errstate(divide='ignore', invalid='ignore'):\n ci = np.where(denom1 != 0., (numer1 / denom1), 0.)\n cip1 = np.where(denom2 != 0., (numer2 / denom2), 0.)\n\n return ( (ci,Bi), (cip1,Bip1) )\n\n\n def diff(self, order=1):\n \"\"\"Differentiate a B-spline `order` number of times.\n\nParameters:\n order:\n int, >= 0\n\nReturns:\n **lambda** `x`: ... that evaluates the `order`-th derivative of `B` at the point `x`.\n The returned function internally uses __call__, which is 'memoized' for speed.\n\"\"\"\n order = int(order)\n if order < 0:\n raise ValueError(\"order must be >= 0, got %d\" % (order))\n\n if order == 0:\n return self.__call__\n\n if order > self.p: # identically zero, but force the same output format as in the general case\n dummy = self.__call__(0.) # get number of basis functions and output dtype\n nbasis = dummy.shape[0]\n return lambda x: np.zeros( (nbasis,), dtype=dummy.dtype ) # accept but ignore input x\n\n # At each differentiation, each term maps into two new terms.\n # The number of terms in the result will be 2**order.\n #\n # This will cause an exponential explosion in the number of terms for high derivative orders,\n # but for the first few orders (practical usage; >3 is rarely needed) the approach works.\n #\n terms = [ (1.,self) ]\n for k in range(order):\n tmp = []\n for Ci,Bi in terms:\n tmp.extend( (Ci*cn, Bn) for cn,Bn in Bi.__diff_internal() ) # NOTE: also propagate Ci\n terms = tmp\n\n # perform final summation at call time\n return lambda x: sum( ci*Bi(x) for ci,Bi in terms )\n\n\n def collmat(self, tau, deriv_order=0):\n \"\"\"Compute collocation matrix.\n\nParameters:\n tau:\n Python list or rank-1 array, collocation sites\n deriv_order:\n int, >=0, order of derivative for which to compute the collocation matrix.\n The default is 0, which means the function value itself.\n\nReturns:\n A:\n if len(tau) > 1, rank-2 array such that\n A[i,j] = D**deriv_order B_j(tau[i])\n where\n D**k = kth derivative (0 for function value itself)\n\n if len(tau) == 1, rank-1 array such that\n A[j] = D**deriv_order B_j(tau)\n\nExample:\n If the coefficients of a spline function are given in the vector c, then::\n\n np.sum( A*c, axis=-1 )\n\n will give a rank-1 array of function values at the sites tau[i] that were supplied\n to `collmat`.\n\n Similarly for derivatives (if the supplied `deriv_order`> 0).\n\n\"\"\"\n # get number of basis functions and output dtype\n dummy = self.__call__(0.)\n nbasis = dummy.shape[0]\n\n tau = np.atleast_1d(tau)\n if tau.ndim > 1:\n raise ValueError(\"tau must be a list or a rank-1 array\")\n\n A = np.empty( (tau.shape[0], nbasis), dtype=dummy.dtype )\n f = self.diff(order=deriv_order)\n for i,taui in enumerate(tau):\n A[i,:] = f(taui)\n\n return np.squeeze(A)\n" ]
[ [ "numpy.max", "numpy.empty", "numpy.errstate", "numpy.zeros", "matplotlib.pyplot.plot", "numpy.min", "numpy.where", "numpy.atleast_1d", "numpy.all", "matplotlib.pyplot.show", "numpy.linspace", "numpy.squeeze" ] ]
maxxxzdn/en_flows
[ "04ed4dd45431cafcd23f8bf5199a47f917a72058" ]
[ "qm9/visualizer.py" ]
[ "import torch\nimport numpy as np\nimport os\nimport glob\nimport random\nimport matplotlib\nimport imageio\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom qm9 import analyze\n##############\n### Files ####\n###########-->\n\n\nbond1_radius = {'H': 31, 'C': 76, 'N': 71, 'O': 66, 'F': 57} # covalnt bond in pm for each type of atom https://en.wikipedia.org/wiki/Covalent_radius\nbond1_stdv = {'H': 5, 'C': 2, 'N': 2, 'O': 2, 'F': 3}\n\nbond2_radius = {'H': -1000, 'C': 67, 'N': 60, 'O': 57, 'F': 59}\nbond3_radius = {'H': -1000, 'C': 60, 'N': 54, 'O': 53, 'F': 53} # Not sure why oxygen has triple bond\n\n\n\n\n\ndef save_xyz_file(\n path, one_hot, charges, positions, id_from=0, name='molecule'):\n try:\n os.makedirs(path)\n except OSError:\n pass\n for batch_i in range(one_hot.size(0)):\n f = open(path + name + '_' + \"%03d.txt\" % (batch_i + id_from), \"w\")\n f.write(\"%d\\n\\n\" % one_hot.size(1))\n atoms = torch.argmax(one_hot[batch_i], dim=1)\n for atom_i in range(one_hot.size(1)):\n atom = atoms[atom_i]\n atom = analyze.atom_decoder[atom]\n f.write(\"%s %.9f %.9f %.9f\\n\" % (atom, positions[batch_i, atom_i, 0], positions[batch_i, atom_i, 1], positions[batch_i, atom_i, 2]))\n f.close()\n\n\ndef load_molecule_xyz(file):\n with open(file, encoding='utf8') as f:\n n_atoms = int(f.readline())\n one_hot = torch.zeros(n_atoms, 5)\n charges = torch.zeros(n_atoms, 1)\n positions = torch.zeros(n_atoms, 3)\n f.readline()\n atoms = f.readlines()\n for i in range(n_atoms):\n atom = atoms[i].split(' ')\n atom_type = atom[0]\n one_hot[i, analyze.atom_encoder[atom_type]] = 1\n position = torch.Tensor([float(e) for e in atom[1:]])\n positions[i, :] = position\n return positions, one_hot, charges\n\n\ndef load_xyz_files(path, shuffle=True):\n files = glob.glob(path + \"/*.txt\")\n if shuffle:\n random.shuffle(files)\n return files\n\n#<----########\n### Files ####\n##############\ndef draw_sphere(ax, x, y, z, size, color):\n u = np.linspace(0, 2 * np.pi, 100)\n v = np.linspace(0, np.pi, 100)\n\n xs = size * np.outer(np.cos(u), np.sin(v))\n ys = size * np.outer(np.sin(u), np.sin(v))\n zs = size * np.outer(np.ones(np.size(u)), np.cos(v))\n # for i in range(2):\n # ax.plot_surface(x+random.randint(-5,5), y+random.randint(-5,5), z+random.randint(-5,5), rstride=4, cstride=4, color='b', linewidth=0, alpha=0.5)\n\n ax.plot_surface(x + xs, y + ys, z + zs, rstride=2, cstride=2, color=color, linewidth=0,\n alpha=1.)\n # # calculate vectors for \"vertical\" circle\n # a = np.array([-np.sin(elev / 180 * np.pi), 0, np.cos(elev / 180 * np.pi)])\n # b = np.array([0, 1, 0])\n # b = b * np.cos(rot) + np.cross(a, b) * np.sin(rot) + a * np.dot(a, b) * (\n # 1 - np.cos(rot))\n # ax.plot(np.sin(u), np.cos(u), 0, color='k', linestyle='dashed')\n # horiz_front = np.linspace(0, np.pi, 100)\n # ax.plot(np.sin(horiz_front), np.cos(horiz_front), 0, color='k')\n # vert_front = np.linspace(np.pi / 2, 3 * np.pi / 2, 100)\n # ax.plot(a[0] * np.sin(u) + b[0] * np.cos(u), b[1] * np.cos(u),\n # a[2] * np.sin(u) + b[2] * np.cos(u), color='k', linestyle='dashed')\n # ax.plot(a[0] * np.sin(vert_front) + b[0] * np.cos(vert_front),\n # b[1] * np.cos(vert_front),\n # a[2] * np.sin(vert_front) + b[2] * np.cos(vert_front), color='k')\n #\n # ax.view_init(elev=elev, azim=0)\n\n\ndef plot_data3d(positions, atom_type, camera_elev=0, camera_azim=0, save_path=None, spheres_3d=False, bg='black'):\n\n black = (0, 0, 0)\n white = (1, 1, 1)\n\n from mpl_toolkits.mplot3d import Axes3D\n fig = plt.figure()\n ax = fig.add_subplot(projection='3d')\n ax.set_aspect('auto')\n ax.view_init(elev=camera_elev, azim=camera_azim)\n if bg == 'black':\n ax.set_facecolor(black)\n else:\n ax.set_facecolor(white)\n #ax.xaxis.pane.set_edgecolor('#D0D0D0')\n ax.xaxis.pane.set_alpha(0)\n ax.yaxis.pane.set_alpha(0)\n ax.zaxis.pane.set_alpha(0)\n ax._axis3don = False\n\n # draw_sphere(ax, 0, 0, 0, 1)\n # draw_sphere(ax, 1, 1, 1, 1)\n\n x = positions[:, 0]\n y = positions[:, 1]\n z = positions[:, 2]\n # Hydrogen, Carbon, Nitrogen, Oxygen, Flourine\n if bg == 'black':\n ax.w_xaxis.line.set_color(\"black\")\n else:\n ax.w_xaxis.line.set_color(\"white\")\n #ax.set_facecolor((1.0, 0.47, 0.42))\n colors_dic = np.array(['#FFFFFF99', 'C7', 'C0', 'C3', 'C1'])\n radius_dic = np.array([0.46, 0.77, 0.77, 0.77, 0.77])\n area_dic = 1500 * radius_dic ** 2\n #areas_dic = sizes_dic * sizes_dic * 3.1416\n\n areas = area_dic[atom_type]\n radii = radius_dic[atom_type]\n colors = colors_dic[atom_type]\n\n if spheres_3d:\n for i, j, k, s, c in zip(x, y, z, radii, colors):\n draw_sphere(ax, i.item(), j.item(), k.item(), 0.7 * s, c)\n else:\n ax.scatter(x, y, z, s=areas, alpha=0.9, c=colors)#, linewidths=2, edgecolors='#FFFFFF')\n\n for i in range(len(x)):\n for j in range(i + 1, len(x)):\n p1 = np.array([x[i], y[i], z[i]])\n p2 = np.array([x[j], y[j], z[j]])\n dist = np.sqrt(np.sum((p1 - p2) ** 2))\n atom1, atom2 = analyze.atom_decoder[atom_type[i]], analyze.atom_decoder[atom_type[j]]\n if analyze.get_bond_order(atom1, atom2, dist):\n if bg == 'black':\n ax.plot([x[i], x[j]], [y[i], y[j]], [z[i], z[j]], linewidth=(3-2)*2 * 2, c='#FFFFFF')\n else:\n ax.plot([x[i], x[j]], [y[i], y[j]], [z[i], z[j]],\n linewidth=(3 - 2) * 2 * 2, c='#666666')\n #plt.show()\n\n # max_value = positions.abs().max().item()\n\n axis_lim = 3.2\n ax.set_xlim(-axis_lim, axis_lim)\n ax.set_ylim(-axis_lim, axis_lim)\n ax.set_zlim(-axis_lim, axis_lim)\n\n dpi = 100 if spheres_3d else 50\n\n if save_path is not None:\n plt.savefig(save_path, bbox_inches='tight', pad_inches=0.0, dpi=dpi)\n\n if spheres_3d:\n img = imageio.imread(save_path)\n img_brighter = np.clip(img * 1.4, 0, 255).astype('uint8')\n imageio.imsave(save_path, img_brighter)\n else:\n plt.show()\n plt.close()\n\n\ndef plot_grid():\n import matplotlib.pyplot as plt\n from mpl_toolkits.axes_grid1 import ImageGrid\n\n im1 = np.arange(100).reshape((10, 10))\n im2 = im1.T\n im3 = np.flipud(im1)\n im4 = np.fliplr(im2)\n\n fig = plt.figure(figsize=(10., 10.))\n grid = ImageGrid(fig, 111, # similar to subplot(111)\n nrows_ncols=(6, 6), # creates 2x2 grid of axes\n axes_pad=0.1, # pad between axes in inch.\n )\n\n for ax, im in zip(grid, [im1, im2, im3, im4]):\n # Iterating over the grid returns the Axes.\n\n ax.imshow(im)\n\n plt.show()\n\n\ndef visualize(path, max_num=25, wandb=None, spheres_3d=False):\n files = load_xyz_files(path)[0:max_num]\n for file in files:\n positions, one_hot, charges = load_molecule_xyz(file)\n atom_type = torch.argmax(one_hot, dim=1).numpy()\n plot_data3d(positions, atom_type, save_path=file[:-4] + '.png',\n spheres_3d=spheres_3d)\n\n if wandb is not None:\n path = file[:-4] + '.png'\n # Log image(s)\n im = plt.imread(path)\n wandb.log({path: [wandb.Image(im, caption=path)]})\n\n\ndef visualize_chain(path, wandb=None, spheres_3d=False):\n files = load_xyz_files(path)\n files = sorted(files)\n save_paths = []\n\n print(f'Visualizing chain using files: {files}')\n for file in files:\n positions, one_hot, charges = load_molecule_xyz(file)\n atom_type = torch.argmax(one_hot, dim=1).numpy()\n fn = file[:-4] + '.png'\n plot_data3d(positions, atom_type, save_path=fn, spheres_3d=spheres_3d)\n save_paths.append(fn)\n\n imgs = [imageio.imread(fn) for fn in save_paths]\n dirname = os.path.dirname(save_paths[0])\n gif_path = dirname + '/output.gif'\n print(f'Creating gif with {len(imgs)} images')\n # Add the last frame 10 times so that the final result remains temporally.\n # imgs.extend([imgs[-1]] * 10)\n imageio.mimsave(gif_path, imgs, subrectangles=True)\n\n if wandb is not None:\n wandb.log({gif_path: [wandb.Video(gif_path, caption=gif_path)]})\n\n\nif __name__ == '__main__':\n #plot_grid()\n import qm9.dataset as dataset\n matplotlib.use('macosx')\n\n task = \"plot_chain\"\n\n if task == \"visualize_molecules\":\n dataloaders, charge_scale = dataset.retrieve_dataloaders(batch_size=1)\n for i, data in enumerate(dataloaders['train']):\n positions = data['positions'].view(-1, 3)\n positions_centered = positions - positions.mean(dim=0, keepdim=True)\n one_hot = data['one_hot'].view(-1, 5).type(torch.float32)\n atom_type = torch.argmax(one_hot, dim=1).numpy()\n\n plot_data3d(positions_centered, atom_type, spheres_3d=True)\n elif task == \"plot_chain\":\n visualize_chain(path=\"../outputs/here_we_go2_resume_best_batch/eval/chain\", spheres_3d=False)\n else:\n raise Exception(\"Wrong task\")\n" ]
[ [ "matplotlib.use", "torch.zeros", "numpy.array", "numpy.sin", "numpy.clip", "matplotlib.pyplot.savefig", "numpy.sum", "matplotlib.pyplot.close", "matplotlib.pyplot.figure", "numpy.flipud", "numpy.arange", "numpy.cos", "numpy.size", "matplotlib.pyplot.show", "numpy.linspace", "torch.argmax", "matplotlib.pyplot.imread", "numpy.fliplr" ] ]
susloparovdenis/dlcourse_ai
[ "0be8278161f85822e71be370296586509428d9d2" ]
[ "assignments/assignment1/knn.py" ]
[ "import numpy as np\nfrom collections import Counter\n\nclass KNN:\n \"\"\"\n K-neariest-neighbor classifier using L1 loss\n \"\"\"\n def __init__(self, k=1):\n self.k = k\n\n def fit(self, X, y):\n self.train_X = X\n self.train_y = y\n\n\n def predict(self, X, num_loops=0):\n '''\n Uses the KNN model to predict clases for the data samples provided\n \n Arguments:\n X, np array (num_samples, num_features) - samples to run\n through the model\n num_loops, int - which implementation to use\n\n Returns:\n predictions, np array of ints (num_samples) - predicted class\n for each sample\n '''\n if num_loops == 0:\n dists = self.compute_distances_no_loops(X)\n elif num_loops == 1:\n dists = self.compute_distances_one_loop(X)\n else:\n dists = self.compute_distances_two_loops(X)\n\n if self.train_y.dtype == np.bool:\n return self.predict_labels_binary(dists)\n else:\n return self.predict_labels_multiclass(dists)\n\n def compute_distances_two_loops(self, X):\n '''\n Computes L1 distance from every sample of X to every training sample\n Uses simplest implementation with 2 Python loops\n\n Arguments:\n X, np array (num_test_samples, num_features) - samples to run\n \n Returns:\n dists, np array (num_test_samples, num_train_samples) - array\n with distances between each test and each train sample\n '''\n num_train = self.train_X.shape[0]\n num_test = X.shape[0]\n dists = np.zeros((num_test, num_train), np.float32)\n for i_test in range(num_test):\n for i_train in range(num_train):\n dists[i_test][i_train] = np.abs(X[i_test] - self.train_X[i_train]).sum()\n return dists\n\n def compute_distances_one_loop(self, X):\n '''\n Computes L1 distance from every sample of X to every training sample\n Vectorizes some of the calculations, so only 1 loop is used\n\n Arguments:\n X, np array (num_test_samples, num_features) - samples to run\n \n Returns:\n dists, np array (num_test_samples, num_train_samples) - array\n with distances between each test and each train sample\n '''\n num_train = self.train_X.shape[0]\n num_test = X.shape[0]\n dists = np.zeros((num_test, num_train), np.float32)\n for i_test in range(num_test):\n dists[i_test] = np.abs(X[i_test] - self.train_X).sum(1)\n return dists\n\n def compute_distances_no_loops(self, X):\n '''\n Computes L1 distance from every sample of X to every training sample\n Fully vectorizes the calculations using numpy\n\n Arguments:\n X, np array (num_test_samples, num_features) - samples to run\n \n Returns:\n dists, np array (num_test_samples, num_train_samples) - array\n with distances between each test and each train sample\n '''\n num_train = self.train_X.shape[0]\n num_test = X.shape[0]\n # Using float32 to to save memory - the default is float64\n return np.abs(X[:, None, :]-self.train_X[None, :, :]).sum(2)\n\n def predict_labels_binary(self, dists):\n '''\n Returns model predictions for binary classification case\n \n Arguments:\n dists, np array (num_test_samples, num_train_samples) - array\n with distances between each test and each train sample\n\n Returns:\n pred, np array of bool (num_test_samples) - binary predictions \n for every test sample\n '''\n num_test = dists.shape[0]\n pred = np.zeros(num_test, np.bool)\n for i in range(num_test):\n k_nearest_indexes = np.argsort(dists[i])[:self.k]\n values_of_nearest = self.train_y[k_nearest_indexes] \n pred[i] = Counter(values_of_nearest).most_common(1)[0][0]\n return pred\n\n def predict_labels_multiclass(self, dists):\n '''\n Returns model predictions for multi-class classification case\n\n Arguments:\n dists, np array (num_test_samples, num_train_samples) - array\n with distances between each test and each train sample\n\n Returns:\n pred, np array of int (num_test_samples) - predicted class index\n for every test sample\n '''\n num_test = dists.shape[0]\n pred = np.zeros(num_test, np.int)\n for i in range(num_test):\n k_nearest_indexes = np.argsort(dists[i])[:3]\n values_of_nearest = self.train_y[k_nearest_indexes] \n pred[i] = Counter(values_of_nearest).most_common(1)[0][0]\n return pred\n" ]
[ [ "numpy.abs", "numpy.argsort", "numpy.zeros" ] ]
matklad/xain
[ "20c6881cabdf1e3ed7a9ddb40bbdcc7a7fd22f78" ]
[ "xain/generator/transformer.py" ]
[ "from typing import Callable, Tuple\n\nimport numpy as np\nfrom numpy import ndarray\n\nfrom .class_per_partition_distribution import distribution as cpp_distribution\n\n# Passed to RandomState for predictable shuffling\nSEED = 851746\n\n\ndef transfomer_decorator(func: Callable):\n \"\"\"The decorator will validate the input and result of any\n transformer function it is applied to\"\"\"\n\n def wrapper(\n x: np.ndarray, y: np.ndarray, *args, **kwargs\n ) -> Tuple[np.ndarray, np.ndarray]:\n assert x.shape[0] == y.shape[0], \"x and y need to have them dimension on axis=0\"\n\n x_transformed, y_transformed = func(x, y, *args, **kwargs)\n\n assert (\n x.shape == x_transformed.shape\n ), \"x has to have the same shape after transformation as before\"\n assert (\n y.shape == y_transformed.shape\n ), \"y has to have the same shape after transformation as before\"\n\n return (x_transformed, y_transformed)\n\n return wrapper\n\n\n@transfomer_decorator\ndef random_shuffle(x: ndarray, y: ndarray) -> Tuple[ndarray, ndarray]:\n # pylint: disable=no-member\n permutation = np.random.RandomState(seed=SEED).permutation(x.shape[0])\n x_shuffled = x[permutation]\n y_shuffled = y[permutation]\n return x_shuffled, y_shuffled\n\n\n@transfomer_decorator\ndef classes_balanced_randomized_per_partition(\n x: ndarray, y: ndarray, num_partitions=10\n) -> Tuple[ndarray, ndarray]:\n \"\"\"Shuffles y so that only a each class is in each partition\"\"\"\n example_count = y.shape[0]\n section_size = int(example_count / num_partitions)\n\n assert (\n example_count % num_partitions == 0\n ), \"Number of examples needs to be evenly divisible by section_count\"\n\n x_shuffled, y_shuffled = random_shuffle(x, y)\n\n # Array of indices that sort a along the specified axis.\n sort_index = np.argsort(y_shuffled, axis=0)\n\n x_sorted = x_shuffled[sort_index]\n y_sorted = y_shuffled[sort_index]\n\n balance_index = (\n np.array(range(example_count), np.int64)\n .reshape((section_size, num_partitions))\n .transpose()\n .reshape(example_count)\n )\n\n x_balanced = x_sorted[balance_index]\n y_balanced = y_sorted[balance_index]\n\n return x_balanced, y_balanced\n\n\n@transfomer_decorator\ndef sort_by_class(x: ndarray, y: ndarray) -> Tuple[ndarray, ndarray]:\n \"\"\"\n Shuffles y so that only a single label is in each partition\n Number of partitions will depend on number of unique labels\n \"\"\"\n example_count = y.shape[0]\n partition_count = np.unique(y).shape[0]\n\n assert (\n example_count % partition_count == 0\n ), \"Number of examples needs to be evenly divisible by partition_count\"\n\n # Array of indices that sort a along the specified axis.\n sort_indexes = np.argsort(y, axis=0)\n\n x_sorted = x[sort_indexes]\n y_sorted = y[sort_indexes]\n\n return x_sorted, y_sorted\n\n\n@transfomer_decorator\ndef one_biased_class_per_partition( # pylint: disable=R0914\n x: ndarray, y: ndarray, bias=1000\n) -> Tuple[ndarray, ndarray]:\n \"\"\"\n Shuffle y so that the labels are uniformly distributed in each section\n except one label which will have a bias. Considering the bias the rest\n needs to be evenly divisible\n \"\"\"\n example_count = y.shape[0]\n # section_count is equal to number of unique labels\n unique_labels_set = set(y)\n section_count = len(unique_labels_set)\n section_size = int(example_count / section_count)\n\n assert (\n example_count % section_count == 0\n ), \"Number of examples needs to be evenly divisible by section_count\"\n\n # Array of indices that sort a along the specified axis.\n sort_indexes = np.argsort(y, axis=0)\n\n x_sorted = x[sort_indexes]\n y_sorted = y[sort_indexes]\n\n x_splits = np.split(x_sorted, indices_or_sections=section_count, axis=0)\n y_splits = np.split(y_sorted, indices_or_sections=section_count, axis=0)\n\n # Extract first \"bias\" from each split\n x_biased_splits = [x_split[:bias] for x_split in x_splits]\n y_biased_splits = [y_split[:bias] for y_split in y_splits]\n\n for y_biased_split in y_biased_splits:\n # Check that we got single label splits\n assert len(set(y_biased_split)) == 1\n\n # Merge rest\n x_unbiased = np.concatenate([x_split[bias:] for x_split in x_splits])\n y_unbiased = np.concatenate([y_split[bias:] for y_split in y_splits])\n\n assert x_unbiased.shape[0] == section_count * (\n section_size - bias\n ), \"Length of unbiased elements should be equal to original length minus extracted bias\"\n\n # Create balanced shuffle of rest\n x_balanced, y_balanced = classes_balanced_randomized_per_partition(\n x_unbiased, y_unbiased, num_partitions=section_count\n )\n\n for y_balanced_split in np.split(y_balanced, indices_or_sections=section_count):\n assert set(y_balanced_split) == unique_labels_set\n\n # split unbiased splits again to be merged with biased splits\n x_balanced_splits = np.split(x_balanced, indices_or_sections=section_count, axis=0)\n y_balanced_splits = np.split(y_balanced, indices_or_sections=section_count, axis=0)\n\n x_merged = np.concatenate(\n [\n np.concatenate([x1, x2], axis=0)\n for x1, x2 in zip(x_biased_splits, x_balanced_splits)\n ]\n )\n y_merged = np.concatenate(\n [\n np.concatenate([y1, y2], axis=0)\n for y1, y2 in zip(y_biased_splits, y_balanced_splits)\n ]\n )\n\n assert x.shape == x_merged.shape, \"Shape of x should not change\"\n\n return x_merged, y_merged\n\n\n@transfomer_decorator\ndef class_per_partition( # pylint: disable=R0914\n x: ndarray, y: ndarray, num_partitions: int, cpp: int\n) -> Tuple[ndarray, ndarray]:\n \"\"\"\n Does the following:\n 1. Sort by label\n 2. Shuffles sections randomley\n \"\"\"\n assert x.shape[0] % num_partitions == 0, (\n f\"Number of examples ({x.shape[0]}) needs to be divisible by \"\n + \"num_partitions ({num_partitions})\"\n )\n\n num_classes = len(np.unique(y))\n num_sections = cpp * num_partitions\n\n assert num_sections % num_classes == 0, (\n f\"number of sections ({num_sections}) needs to be divisible \"\n + f\"by number of classes ({num_classes})\"\n )\n\n assert x.shape[0] % num_sections == 0, (\n f\"number of examples ({x.shape[0]}) needs to be divisible \"\n + f\"by number of sections ({cpp * num_partitions})\"\n )\n\n section_size = x.shape[0] // num_sections # number of examples per section\n\n assert (x.shape[0] / num_classes) % section_size == 0, (\n f\"number of examples per class ({x.shape[0] / num_classes}) needs to be divisible \"\n + f\"by number of examples per section ({section_size})\"\n )\n\n # Array of indices that sort a along the specified axis.\n sort_indices = np.argsort(y, axis=0)\n\n # After sorting we will have num_labels sorted sections (e.g. 10 for MNIST)\n # e.g. with 4 labels and 8 examples (assuming each label occurs equal times)\n # => y = [0, 0, 1, 1, 2, 2, 3, 3]\n x_sorted = x[sort_indices]\n y_sorted = y[sort_indices]\n\n # We want to achive the following structure\n # global: [ class 1 , ..., class N ]\n # per class: [ section 1, ..., section N ]\n # per section: [ example 1, ..., example N ]\n new_x_shape = (num_classes, num_sections // num_classes, section_size, *x.shape[1:])\n new_y_shape = (num_classes, num_sections // num_classes, section_size, *y.shape[1:])\n\n x_sections = x_sorted.reshape(new_x_shape)\n y_sections = y_sorted.reshape(new_y_shape)\n\n # Type of dist is List[List[int]] with length num_partitions where each sublist\n # has length num_class and contains at each index a one if a class section should\n # occur in the final dataset partition\n cpp_dist = cpp_distribution(\n num_classes=num_classes, num_partitions=num_partitions, cpp=cpp\n )\n\n _, class_indices = np.nonzero(cpp_dist)\n section_indices = np.zeros((num_classes), dtype=np.int8)\n\n x_dist = []\n y_dist = []\n\n for c_idx in class_indices:\n s_idx = section_indices[c_idx]\n section_indices[c_idx] += 1\n\n x_sec = x_sections[c_idx][s_idx]\n y_sec = y_sections[c_idx][s_idx]\n\n x_dist.append(x_sec)\n y_dist.append(y_sec)\n\n x_dist = np.concatenate(x_dist)\n y_dist = np.concatenate(y_dist)\n\n return (x_dist, y_dist)\n" ]
[ [ "numpy.concatenate", "numpy.zeros", "numpy.random.RandomState", "numpy.split", "numpy.nonzero", "numpy.argsort", "numpy.unique" ] ]
gwillmer/openpilot
[ "1d45781c973a716c914f5d62a00fd9217e12d3b5" ]
[ "selfdrive/controls/lib/longitudinal_planner.py" ]
[ "#!/usr/bin/env python3\nimport math\nimport numpy as np\nfrom common.numpy_fast import interp\nfrom common.cached_params import CachedParams\n\nimport cereal.messaging as messaging\nfrom cereal import log\nfrom common.realtime import DT_MDL\nfrom common.realtime import sec_since_boot\nfrom selfdrive.modeld.constants import T_IDXS\nfrom selfdrive.config import Conversions as CV\nfrom selfdrive.controls.lib.fcw import FCWChecker\nfrom selfdrive.controls.lib.longcontrol import LongCtrlState\nfrom selfdrive.controls.lib.lead_mpc import LeadMpc\nfrom selfdrive.controls.lib.long_mpc import LongitudinalMpc\nfrom selfdrive.controls.lib.drive_helpers import V_CRUISE_MAX, CONTROL_N\nfrom selfdrive.swaglog import cloudlog\n\nLON_MPC_STEP = 0.2 # first step is 0.2s\nAWARENESS_DECEL = -0.2 # car smoothly decel at .2m/s^2 when user is distracted\nA_CRUISE_MIN = -10.\nA_CRUISE_MAX = 10.\n\n# Lookup table for turns\n_A_TOTAL_MAX_V = [1.7, 3.2]\n_A_TOTAL_MAX_BP = [20., 40.]\n\n\ndef get_max_accel(v_ego):\n return A_CRUISE_MAX\n\n\ndef limit_accel_in_turns(v_ego, angle_steers, a_target, CP):\n \"\"\"\n This function returns a limited long acceleration allowed, depending on the existing lateral acceleration\n this should avoid accelerating when losing the target in turns\n \"\"\"\n\n a_total_max = interp(v_ego, _A_TOTAL_MAX_BP, _A_TOTAL_MAX_V)\n a_y = v_ego**2 * angle_steers * CV.DEG_TO_RAD / (CP.steerRatio * CP.wheelbase)\n a_x_allowed = math.sqrt(max(a_total_max**2 - a_y**2, 0.))\n\n return [a_target[0], min(a_target[1], a_x_allowed)]\n\n\nclass Planner():\n def __init__(self, CP):\n self.CP = CP\n self.mpcs = {}\n self.mpcs['lead0'] = LeadMpc(0)\n self.mpcs['lead1'] = LeadMpc(1)\n self.mpcs['cruise'] = LongitudinalMpc()\n\n self.fcw = False\n self.fcw_checker = FCWChecker()\n\n self.cachedParams = CachedParams()\n\n self.v_desired = 0.0\n self.a_desired = 0.0\n self.longitudinalPlanSource = 'cruise'\n self.alpha = np.exp(-DT_MDL/2.0)\n self.lead_0 = log.ModelDataV2.LeadDataV3.new_message()\n self.lead_1 = log.ModelDataV2.LeadDataV3.new_message()\n\n self.v_desired_trajectory = np.zeros(CONTROL_N)\n self.a_desired_trajectory = np.zeros(CONTROL_N)\n\n\n def update(self, sm, CP, lateral_planner):\n cur_time = sec_since_boot()\n v_ego = sm['carState'].vEgo\n a_ego = sm['carState'].aEgo\n\n v_cruise_kph = sm['controlsState'].vCruise\n v_cruise_kph = min(v_cruise_kph, V_CRUISE_MAX)\n v_cruise = v_cruise_kph * CV.KPH_TO_MS\n\n long_control_state = sm['controlsState'].longControlState\n force_slow_decel = sm['controlsState'].forceDecel\n\n self.lead_0 = sm['radarState'].leadOne\n self.lead_1 = sm['radarState'].leadTwo\n\n enabled = (long_control_state == LongCtrlState.pid) or (long_control_state == LongCtrlState.stopping)\n if not enabled or sm['carState'].gasPressed:\n self.v_desired = v_ego\n self.a_desired = a_ego\n\n # Prevent divergence, smooth in current v_ego\n self.v_desired = self.alpha * self.v_desired + (1 - self.alpha) * v_ego\n self.v_desired = max(0.0, self.v_desired)\n\n accel_limits = [A_CRUISE_MIN, get_max_accel(v_ego)]\n accel_limits_turns = limit_accel_in_turns(v_ego, sm['carState'].steeringAngleDeg, accel_limits, self.CP)\n if force_slow_decel:\n # if required so, force a smooth deceleration\n accel_limits_turns[1] = min(accel_limits_turns[1], AWARENESS_DECEL)\n accel_limits_turns[0] = min(accel_limits_turns[0], accel_limits_turns[1])\n # clip limits, cannot init MPC outside of bounds\n accel_limits_turns[0] = min(accel_limits_turns[0], self.a_desired)\n accel_limits_turns[1] = max(accel_limits_turns[1], self.a_desired)\n self.mpcs['cruise'].set_accel_limits(accel_limits_turns[0], accel_limits_turns[1])\n\n next_a = np.inf\n for key in self.mpcs:\n self.mpcs[key].set_cur_state(self.v_desired, self.a_desired)\n self.mpcs[key].update(sm['carState'], sm['radarState'], v_cruise)\n if self.mpcs[key].status and self.mpcs[key].a_solution[5] < next_a: # picks slowest solution from accel in ~0.2 seconds\n self.longitudinalPlanSource = key\n self.v_desired_trajectory = self.mpcs[key].v_solution[:CONTROL_N]\n self.a_desired_trajectory = self.mpcs[key].a_solution[:CONTROL_N]\n self.j_desired_trajectory = self.mpcs[key].j_solution[:CONTROL_N]\n next_a = self.mpcs[key].a_solution[5]\n\n # determine fcw\n if self.mpcs['lead0'].new_lead:\n self.fcw_checker.reset_lead(cur_time)\n blinkers = sm['carState'].leftBlinker or sm['carState'].rightBlinker\n self.fcw = self.fcw_checker.update(self.mpcs['lead0'].mpc_solution, cur_time,\n sm['controlsState'].active,\n v_ego, sm['carState'].aEgo,\n self.lead_1.dRel, self.lead_1.vLead, self.lead_1.aLeadK,\n self.lead_1.yRel, self.lead_1.vLat,\n self.lead_1.fcw, blinkers) and not sm['carState'].brakePressed\n if self.fcw:\n cloudlog.info(\"FCW triggered %s\", self.fcw_checker.counters)\n\n # Interpolate 0.05 seconds and save as starting point for next iteration\n a_prev = self.a_desired\n self.a_desired = float(interp(DT_MDL, T_IDXS[:CONTROL_N], self.a_desired_trajectory))\n self.v_desired = self.v_desired + DT_MDL * self.a_desired\n\n if self.cachedParams.get('jvePilot.settings.slowInCurves', 5000) == \"1\":\n curvs = list(lateral_planner.mpc_solution.curvature)\n if len(curvs):\n # find the largest curvature in the solution and use that.\n curv = max(abs(min(curvs)), abs(max(curvs)))\n if curv != 0:\n self.v_desired = float(min(self.v_desired, self.limit_speed_in_curv(sm, curv)))\n\n def publish(self, sm, pm):\n plan_send = messaging.new_message('longitudinalPlan')\n\n plan_send.valid = sm.all_alive_and_valid(service_list=['carState', 'controlsState'])\n\n longitudinalPlan = plan_send.longitudinalPlan\n longitudinalPlan.modelMonoTime = sm.logMonoTime['modelV2']\n longitudinalPlan.processingDelay = (plan_send.logMonoTime / 1e9) - sm.logMonoTime['modelV2']\n\n longitudinalPlan.speeds = [float(x) for x in self.v_desired_trajectory]\n longitudinalPlan.accels = [float(x) for x in self.a_desired_trajectory]\n longitudinalPlan.jerks = [float(x) for x in self.j_desired_trajectory]\n\n longitudinalPlan.hasLead = self.mpcs['lead0'].status\n longitudinalPlan.longitudinalPlanSource = self.longitudinalPlanSource\n longitudinalPlan.fcw = self.fcw\n\n pm.send('longitudinalPlan', plan_send)\n\n def limit_speed_in_curv(self, sm, curv):\n v_ego = sm['carState'].vEgo\n a_y_max = 2.975 - v_ego * 0.0375 # ~1.85 @ 75mph, ~2.6 @ 25mph\n\n # drop off\n drop_off = self.cachedParams.get_float('jvePilot.settings.slowInCurves.speedDropOff', 5000)\n if drop_off != 2 and a_y_max > 0:\n a_y_max = np.sqrt(a_y_max) ** drop_off\n\n v_curvature = np.sqrt(a_y_max / np.clip(curv, 1e-4, None))\n model_speed = np.min(v_curvature)\n return model_speed * self.cachedParams.get_float('jvePilot.settings.slowInCurves.speedRatio', 5000)" ]
[ [ "numpy.zeros", "numpy.min", "numpy.exp", "numpy.sqrt", "numpy.clip" ] ]
scrambleegg7/ssd_prescription
[ "37932d16d5b7a7741fcdf6afff5be0804ef958a8" ]
[ "m1124test.py" ]
[ "import cv2\nfrom cv2 import imshow\n\nimport matplotlib.pyplot as plt\n\nimport numpy as np\nimport pickle\n\nif __name__ == '__main__':\n print('Hello World')\n \n # Connect to video Source\n cam = cv2.VideoCapture()\n #\n #\n #\n # AXIS M1124 Video streaming\n cam.open(\"http://192.168.1.151/axis-cgi/mjpg/video.cgi?fps=1\")\n\n if cam.isOpened():\n print(\"Camera connection established.\")\n else:\n print(\"Failed to connect to the camera.\")\n exit(-1)\n \n BoardSize = (9,6)\n \n # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)\n objp = np.zeros((BoardSize[0]*BoardSize[1],3), np.float32)\n objp[:,:2] = np.mgrid[0:BoardSize[0],0:BoardSize[1]].T.reshape(-1,2)\n \n # Arrays to store object points and image points from all the images.\n objpoints = [] # 3d point in real world space\n imgpoints = [] # 2d points in image plane.\n \n i = 0\n \n while(True):\n ret, frame = cam.read() \n \n imshow(\"Source video\", frame) \n if cv2.waitKey(1) & 0xFF == ord('q'):\n break \n" ]
[ [ "numpy.zeros" ] ]
wh629/CNLI-generalization
[ "5c6d4df5f6a7e58d23c6db1a9dbc50eb9bcfda2b" ]
[ "jiant/jiant/tasks/lib/scitail.py" ]
[ "import numpy as np\nimport pandas as pd\nimport torch\nfrom dataclasses import dataclass\nfrom typing import List\n\nfrom jiant.tasks.core import (\n BaseExample,\n BaseTokenizedExample,\n BaseDataRow,\n BatchMixin,\n Task,\n TaskTypes,\n)\nfrom jiant.tasks.lib.templates.shared import labels_to_bimap, double_sentence_featurize\n\n\n@dataclass\nclass Example(BaseExample):\n guid: str\n input_premise: str\n input_hypothesis: str\n label: str\n\n def tokenize(self, tokenizer):\n return TokenizedExample(\n guid=self.guid,\n input_premise=tokenizer.tokenize(self.input_premise),\n input_hypothesis=tokenizer.tokenize(self.input_hypothesis),\n label_id=SciTailTask.LABEL_TO_ID[self.label],\n )\n\n\n@dataclass\nclass TokenizedExample(BaseTokenizedExample):\n guid: str\n input_premise: List\n input_hypothesis: List\n label_id: int\n\n def featurize(self, tokenizer, feat_spec):\n return double_sentence_featurize(\n guid=self.guid,\n input_tokens_a=self.input_premise,\n input_tokens_b=self.input_hypothesis,\n label_id=self.label_id,\n tokenizer=tokenizer,\n feat_spec=feat_spec,\n data_row_class=DataRow,\n )\n\n\n@dataclass\nclass DataRow(BaseDataRow):\n guid: str\n input_ids: np.ndarray\n input_mask: np.ndarray\n segment_ids: np.ndarray\n label_id: int\n tokens: list\n\n\n@dataclass\nclass Batch(BatchMixin):\n input_ids: torch.LongTensor\n input_mask: torch.LongTensor\n segment_ids: torch.LongTensor\n label_id: torch.LongTensor\n tokens: list\n\n\nclass SciTailTask(Task):\n Example = Example\n TokenizedExample = Example\n DataRow = DataRow\n Batch = Batch\n\n TASK_TYPE = TaskTypes.CLASSIFICATION\n LABELS = [\"entails\", \"neutral\"]\n\n LABEL_TO_ID, ID_TO_LABEL = labels_to_bimap(LABELS)\n\n def get_train_examples(self):\n return self._create_examples(self.train_path, set_type=\"train\")\n\n def get_val_examples(self):\n return self._create_examples(self.val_path, set_type=\"val\")\n\n def get_test_examples(self):\n return self._create_examples(self.test_path, set_type=\"test\")\n\n @classmethod\n def _create_examples(cls, path, set_type):\n df = pd.read_csv(path, sep=\"\\t\", header=None, names=[\"premise\", \"hypothesis\", \"label\"],)\n examples = []\n for i, row in enumerate(df.itertuples()):\n examples.append(\n Example(\n guid=\"%s-%s\" % (set_type, i),\n input_premise=row.premise,\n input_hypothesis=row.hypothesis,\n label=row.label if set_type != \"test\" else cls.LABELS[-1],\n )\n )\n return examples\n" ]
[ [ "pandas.read_csv" ] ]
glenn-jocher/torchflare
[ "3c55b5a0761f2e85dd6da95767c6ec03f0f5baad" ]
[ "torchflare/modules/arcface.py" ]
[ "\"\"\"Implements ArcFace.\"\"\"\nimport math\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass ArcFace(nn.Module):\n \"\"\"Implementation of ArcFace.\n\n ArcFace: : [Additive Angular Margin Loss for Deep Face Recognition](https://arxiv.org/abs/1801.07698)\n \"\"\"\n\n def __init__(self, in_features, out_features, s=30.0, m=0.35):\n \"\"\"Class Constructor.\n\n Args:\n in_features: Size of the input features\n out_features: The size of output features(usually number of num_classes)\n s: The norm for input features.\n m: margin\n \"\"\"\n super(ArcFace, self).__init__()\n\n self.in_features = in_features\n self.out_features = out_features\n self.s = s\n self.m = m\n self.Weight = nn.Parameter(torch.FloatTensor(self.out_features, self.in_features))\n nn.init.xavier_uniform_(self.Weight)\n self.threshold = math.pi - self.m\n self.eps = 1e-7\n\n def forward(self, features: torch.Tensor, targets: torch.Tensor = None) -> torch.Tensor:\n \"\"\"Forward Pass.\n\n Args:\n features: The input features of shape (BS x F) where BS is batch size and F is input feature dimension.\n targets: The targets with shape BS , where BS is batch size\n\n Returns:\n Logits with shape (BS x out_features)\n \"\"\"\n cos_theta = F.linear(F.normalize(features), F.normalize(self.Weight))\n if targets is None:\n return cos_theta\n\n theta = torch.acos(torch.clamp(cos_theta, -1 + self.eps, 1 - self.eps))\n one_hot = torch.zeros_like(cos_theta)\n one_hot.scatter_(1, targets.view(-1, 1).long(), 1)\n\n mask = torch.where(theta > self.threshold, torch.zeros_like(one_hot), one_hot)\n\n logits = torch.where(mask.bool(), theta + self.m, theta)\n logits = torch.cos(logits)\n logits *= self.s\n\n return logits\n" ]
[ [ "torch.cos", "torch.nn.functional.normalize", "torch.FloatTensor", "torch.nn.init.xavier_uniform_", "torch.clamp", "torch.zeros_like" ] ]
jacklee-scau/ssd.pytorch-master
[ "ff2953bc773fb917dd6ab70ffccc2aa0cbe60e0e" ]
[ "train.py" ]
[ "from data import *\nfrom utils.augmentations import SSDAugmentation\nfrom layers.modules import MultiBoxLoss\nfrom ssd import build_ssd\nimport os\nimport sys\nimport time\nimport torch\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.backends.cudnn as cudnn\nimport torch.nn.init as init\nimport torch.utils.data as data\nimport numpy as np\nimport argparse\n\n\ndef str2bool(v):\n return v.lower() in (\"yes\", \"true\", \"t\", \"1\")\n\n\nparser = argparse.ArgumentParser(\n description='Single Shot MultiBox Detector Training With Pytorch')\ntrain_set = parser.add_mutually_exclusive_group()\nparser.add_argument('--dataset', default='VOC', choices=['VOC', 'COCO'],\n type=str, help='VOC or COCO')\nparser.add_argument('--dataset_root', default=VOC_ROOT,\n help='Dataset root directory path')\nparser.add_argument('--basenet', default='vgg16_reducedfc.pth',#vgg16_reducedfc\n help='Pretrained base model')\nparser.add_argument('--batch_size', default=32, type=int,\n help='Batch size for training')\nparser.add_argument('--resume', default='weights/VOC.pth', type=str,\n help='Checkpoint state_dict file to resume training from')\nparser.add_argument('--start_iter', default=4000, type=int,\n help='Resume training at this iter')\nparser.add_argument('--num_workers', default=0, type=int,\n help='Number of workers used in dataloading')\nparser.add_argument('--cuda', default=True, type=str2bool,\n help='Use CUDA to train model')\nparser.add_argument('--lr', '--learning-rate', default=1e-4, type=float,\n help='initial learning rate')\nparser.add_argument('--momentum', default=0.9, type=float,\n help='Momentum value for optim')\nparser.add_argument('--weight_decay', default=5e-4, type=float,\n help='Weight decay for SGD')\nparser.add_argument('--gamma', default=0.1, type=float,\n help='Gamma update for SGD')\nparser.add_argument('--visdom', default=False, type=str2bool,\n help='Use visdom for loss visualization')\nparser.add_argument('--save_folder', default='weights/',\n help='Directory for saving checkpoint models')\nargs = parser.parse_args()\n\n\nif torch.cuda.is_available():\n if args.cuda:\n torch.set_default_tensor_type('torch.cuda.FloatTensor')\n if not args.cuda:\n print(\"WARNING: It looks like you have a CUDA device, but aren't \" +\n \"using CUDA.\\nRun with --cuda for optimal training speed.\")\n torch.set_default_tensor_type('torch.FloatTensor')\nelse:\n torch.set_default_tensor_type('torch.FloatTensor')\n\nif not os.path.exists(args.save_folder):\n os.mkdir(args.save_folder)\n\n\ndef train():\n if args.dataset == 'COCO':\n if args.dataset_root == VOC_ROOT:\n if not os.path.exists(COCO_ROOT):\n parser.error('Must specify dataset_root if specifying dataset')\n print(\"WARNING: Using default COCO dataset_root because \" +\n \"--dataset_root was not specified.\")\n args.dataset_root = COCO_ROOT\n cfg = coco\n dataset = COCODetection(root=args.dataset_root,\n transform=SSDAugmentation(cfg['min_dim'],\n MEANS))\n elif args.dataset == 'VOC':\n if args.dataset_root == COCO_ROOT:\n parser.error('Must specify dataset if specifying dataset_root')\n cfg = voc\n dataset = VOCDetection(root=args.dataset_root,\n transform=SSDAugmentation(cfg['min_dim'],\n MEANS))\n\n if args.visdom:\n import visdom\n viz = visdom.Visdom()\n\n ssd_net = build_ssd('train', cfg['min_dim'], cfg['num_classes'])\n net = ssd_net\n\n if args.cuda:\n net = torch.nn.DataParallel(ssd_net)\n cudnn.benchmark = True\n\n if args.resume:\n print('Resuming training, loading {}...'.format(args.resume))\n ssd_net.load_weights(args.resume)\n else:\n vgg_weights = torch.load(args.save_folder + args.basenet)\n print('Loading base network...')\n ssd_net.vgg.load_state_dict(vgg_weights)\n\n if args.cuda:\n net = net.cuda()\n\n if not args.resume:\n print('Initializing weights...')\n # initialize newly added layers' weights with xavier method\n ssd_net.extras.apply(weights_init)\n ssd_net.loc.apply(weights_init)\n ssd_net.conf.apply(weights_init)\n\n optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=args.momentum,\n weight_decay=args.weight_decay)\n criterion = MultiBoxLoss(cfg['num_classes'], 0.5, True, 0, True, 3, 0.5,\n False, args.cuda)\n\n net.train()\n # loss counters\n loc_loss = 0\n conf_loss = 0\n epoch = 0\n print('Loading the dataset...')\n\n epoch_size = len(dataset) // args.batch_size\n print('Training SSD on:', dataset.name)\n print('Using the specified args:')\n print(args)\n\n step_index = 0\n\n if args.visdom:\n vis_title = 'SSD.PyTorch on ' + dataset.name\n vis_legend = ['Loc Loss', 'Conf Loss', 'Total Loss']\n iter_plot = create_vis_plot('Iteration', 'Loss', vis_title, vis_legend)\n epoch_plot = create_vis_plot('Epoch', 'Loss', vis_title, vis_legend)\n\n data_loader = data.DataLoader(dataset, args.batch_size,\n num_workers=args.num_workers,\n shuffle=False, collate_fn=detection_collate,\n pin_memory=True)\n # create batch iterator\n batch_iterator = iter(data_loader)\n for iteration in range(args.start_iter, cfg['max_iter']):\n if args.visdom and iteration != 0 and (iteration % epoch_size == 0):\n update_vis_plot(epoch, loc_loss, conf_loss, epoch_plot, None,\n 'append', epoch_size)\n # reset epoch loss counters\n loc_loss = 0\n conf_loss = 0\n epoch += 1\n\n if iteration in cfg['lr_steps']:\n step_index += 1\n adjust_learning_rate(optimizer, args.gamma, step_index)\n\n # load train data\n try:\n images, targets = next(batch_iterator)\n except StopIteration:\n batch_iterator = iter(data_loader)\n images, targets = next(batch_iterator)\n\n if args.cuda:\n images = Variable(images.cuda())\n targets = [Variable(ann.cuda(), volatile=True) for ann in targets]\n else:\n images = Variable(images)\n targets = [Variable(ann, volatile=True) for ann in targets]\n # forward\n t0 = time.time()\n out = net(images)\n # backprop\n optimizer.zero_grad()\n loss_l, loss_c = criterion(out, targets)\n loss = loss_l + loss_c\n loss.backward()\n optimizer.step()\n t1 = time.time()\n loc_loss += loss_l.item()\n conf_loss += loss_c.item()\n\n if iteration % 10 == 0:\n print('timer: %.4f sec.' % (t1 - t0))\n print('iter ' + repr(iteration) + ' || Loss: %.4f ||' % (loss.item()), end=' ')\n\n if args.visdom:\n update_vis_plot(iteration, loss_l.item(), loss_c.item(),\n iter_plot, epoch_plot, 'append')\n\n if iteration != 0 and iteration % 5000 == 0:\n print('Saving state, iter:', iteration)\n torch.save(ssd_net.state_dict(), 'weights/ssd300_COCO_' +\n repr(iteration) + '.pth')\n torch.save(ssd_net.state_dict(),\n args.save_folder + '' + args.dataset + '.pth')\n\n\ndef adjust_learning_rate(optimizer, gamma, step):\n \"\"\"Sets the learning rate to the initial LR decayed by 10 at every\n specified step\n # Adapted from PyTorch Imagenet example:\n # https://github.com/pytorch/examples/blob/master/imagenet/main.py\n \"\"\"\n lr = args.lr * (gamma ** (step))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n\ndef xavier(param):\n init.xavier_uniform(param)\n\n\ndef weights_init(m):\n if isinstance(m, nn.Conv2d):\n xavier(m.weight.data)\n m.bias.data.zero_()\n\n\ndef create_vis_plot(_xlabel, _ylabel, _title, _legend):\n return viz.line(\n X=torch.zeros((1,)).cpu(),\n Y=torch.zeros((1, 3)).cpu(),\n opts=dict(\n xlabel=_xlabel,\n ylabel=_ylabel,\n title=_title,\n legend=_legend\n )\n )\n\n\ndef update_vis_plot(iteration, loc, conf, window1, window2, update_type,\n epoch_size=1):\n viz.line(\n X=torch.ones((1, 3)).cpu() * iteration,\n Y=torch.Tensor([loc, conf, loc + conf]).unsqueeze(0).cpu() / epoch_size,\n win=window1,\n update=update_type\n )\n # initialize epoch plot on first iteration\n if iteration == 0:\n viz.line(\n X=torch.zeros((1, 3)).cpu(),\n Y=torch.Tensor([loc, conf, loc + conf]).unsqueeze(0).cpu(),\n win=window2,\n update=True\n )\n\n\nif __name__ == '__main__':\n train()\n" ]
[ [ "torch.zeros", "torch.nn.init.xavier_uniform", "torch.autograd.Variable", "torch.set_default_tensor_type", "torch.ones", "torch.cuda.is_available", "torch.utils.data.DataLoader", "torch.load", "torch.Tensor", "torch.nn.DataParallel" ] ]
chr0n1x/pymldb
[ "c86e8191d4d36aad22500fa4298b23580aac6e6f" ]
[ "pymldb/data.py" ]
[ "# -*- coding: utf-8 -*-\n# Copyright (c) 2015 Datacratic Inc. All rights reserved.\n# @Author: Alexis Tremblay\n# @Email: [email protected]\n# @Date: 2015-01-07 15:45:01\n# @Last Modified by: Alexis Tremblay\n# @Last Modified time: 2015-06-02 08:32:09\n# @File Name: data.py\n\n\nimport pandas as pd\nfrom pymldb.query import Query\nfrom pymldb.index import Time, Index\nimport requests\nimport logging\nimport numpy as np\nlogging.basicConfig(level=logging.DEBUG)\n\n\nclass BatFrame(object):\n def __init__(self, dataset_url):\n self.dataset_url = dataset_url\n self.query = Query(dataset_url)\n self._time = Time(dataset_url)\n self._index = Index(self)\n\n def __getitem__(self, val):\n\n if isinstance(val, str):\n col = Column(val, self.dataset_url)\n col.query.mergeQuery(self.query)\n return col\n elif isinstance(val, Query):\n bf = self.copy()\n # bf.query.addSELECT('*')\n bf.query.mergeQuery(val)\n return bf\n elif isinstance(val, slice):\n start = val.start\n stop = val.stop\n # step = val.step\n bf = self.copy()\n # bf.query.addSELECT('*')\n if start is not None:\n bf.query.setOFFSET(start)\n if stop is not None:\n bf.query.setLIMIT(stop)\n return bf\n elif isinstance(val, list):\n bf = self.copy()\n for value in val:\n bf.query.addSELECT(\"\\\"{}\\\"\".format(value))\n return bf\n elif isinstance(val, Column):\n bf = self.copy()\n bf.query.addWHERE(\"({})\".format(val.execution_name))\n return bf\n\n @property\n def columns(self):\n \"\"\"Returns a numpy array of the columns name\"\"\"\n return requests.get(self.dataset_url + '/columns').json()\n\n @property\n def rows(self):\n \"\"\"Returns a numpy array of the rows name\"\"\"\n bf = self.copy()\n result = bf.query.executeQuery(format=\"soa\")\n return result[\"_rowName\"]\n\n @property\n def time(self):\n copy_time = self._time.copy()\n return copy_time.query.mergeQuery(self.Query)\n\n @property\n def ix(self):\n copy_index = self._index.copy()\n return copy_index\n\n def copy(self):\n bf = BatFrame(self.dataset_url)\n bf.query = self.query.copy()\n return bf\n\n def toPandas(self):\n result = self.query.executeQuery(format=\"aos\")\n if len(result) == 0:\n return pd.DataFrame()\n return pd.DataFrame.from_records(result, index=\"_rowName\")\n\n def head(self, num_rows=5):\n bf = self.copy()\n bf.query.setLIMIT(num_rows)\n return bf\n\n def query(self, query):\n raise NotImplementedError()\n\n def sort(self, value, ascending=True):\n bf = self.copy()\n if not isinstance(value, list):\n value = [value]\n\n if not isinstance(ascending, list):\n ascending = [ascending]*len(value)\n\n if len(value) != len(ascending):\n raise RuntimeError(\"len(value) != len(ascending)\")\n\n for by, asc in zip(value, ascending):\n if asc:\n sort = \"ASC\"\n else:\n sort = \"DESC\"\n bf.query.addORDERBY(\"\\\"{}\\\" {}\".format(by, sort))\n return bf\n\n @property\n def shape(self):\n \"\"\"\n Returns (rowCount, valueCount)\n \"\"\"\n bf = self.copy()\n content = requests.get(bf.dataset_url).json()\n rowCount = content['status']['rowCount']\n valueCount = content['status']['valueCount']\n\n return (rowCount, valueCount)\n\n def __repr__(self):\n bf = self.copy()\n bf.query.setLIMIT(40)\n print(bf.toPandas())\n response = requests.get(bf.dataset_url).json()\n try:\n rowCount = response['status']['rowCount']\n except:\n rowCount = None\n\n if rowCount is not None and rowCount > 40:\n print(\"{} rows\".format(rowCount))\n return \"\"\n\n\nclass Column(object):\n\n def __init__(self, name, dataset_url):\n \"\"\"\n Parameters\n ----------\n name: string\n Name of the column. No check is actually done to see if the column\n exists.\n dataset_id:\n The base url where the dataset is located.\n e.g. localhost:8888/v1/datasets/<dataset_name>\n \"\"\"\n logging.debug(\"Instanciating Column with {}\".format(name))\n self.name = \"\\\"{}\\\"\".format(name)\n self.execution_name = \"\\\"{}\\\"\".format(name)\n self.dataset_url = dataset_url\n self.query = Query(dataset_url)\n self.query.addSELECT(self.name)\n\n @property\n def values(self):\n result = self.query.executeQuery(format=\"soa\")\n if len(result) > 2:\n raise RuntimeError(\"Only one column should be returned\")\n colName = [x for x in result.keys() if x != \"_rowName\"][0]\n return np.array(result[colName])\n\n def __getitem__(self, val):\n\n if isinstance(val, slice):\n start = val.start\n stop = val.stop\n # step = val.step\n col = self.copy()\n if start is not None:\n col.query.setOFFSET(start)\n if stop is not None:\n col.query.setLIMIT(stop)\n return col\n elif isinstance(val, Query):\n col = self.copy()\n col.query.mergeQuery(val)\n return col\n elif isinstance(val, str):\n col = self.copy()\n col.query.addWHERE(\"(rowName()='{}')\".format(val))\n return col\n\n\n ####################\n # Rich comparison #\n ####################\n def _comparison(self, value, operator):\n \"\"\"\n Parameters\n ----------\n value: Column object or base type\n The value against which to compare the column. It can either be\n another column or a base type value (e.g. int)\n\n Returns\n -------\n self.query\n\n Notes\n -----\n Returning self.query will allow the next object to use this column\n ops and concatenate something else\n \"\"\"\n if isinstance(value, Column):\n self.query.addWHERE(\"(({}){}({}))\".format(\n self.execution_name,\n operator,\n value.execution_name))\n elif isinstance(value, str):\n self.query.addWHERE(\"(({}){}\\'{}\\')\".format(\n self.execution_name,\n operator,\n value))\n else:\n self.query.addWHERE(\"(({}){}({}))\".format(\n self.execution_name,\n operator,\n value))\n\n copy = self.copy()\n copy.query.removeSELECT(\"{}\".format(copy.execution_name))\n return copy.query\n\n def __eq__(self, value):\n return self._comparison(value, '=')\n\n def __ne__(self, value):\n return self._comparison(value, '!=')\n\n def __gt__(self, value):\n return self._comparison(value, '>')\n\n def __ge__(self, value):\n return self._comparison(value, '>=')\n\n def __lt__(self, value):\n return self._comparison(value, '<')\n\n def __le__(self, value):\n return self._comparison(value, '<=')\n\n ##################################\n # Binary arithmetic operations #\n ##################################\n def _binary_arithemtic(self, left, binary, right):\n \"\"\"\n Parameters\n ----------\n operand: Column object, integer or float\n Value on which to apply operator to this column\n binary: char\n binary arithmetic operator (-, +, *, /, ^, %)\n\n Returns\n -------\n self\n\n Notes\n -----\n Returning self will allow the next object to use this column ops and\n concatenate something else\n \"\"\"\n if isinstance(right, (int, float)):\n right = right\n elif isinstance(right, Column):\n right = right.execution_name\n else:\n raise AttributeError(\n \"{} can only be used \".format(binary)\n + \"with integer, float or column\")\n\n if isinstance(left, (int, float)):\n left = left\n elif isinstance(left, Column):\n left = left.execution_name\n else:\n raise AttributeError(\n \"{} can only be used \".format(binary)\n + \"with integer, float or column\")\n\n copy = self.copy()\n copy.query.removeSELECT(\"{}\".format(copy.execution_name))\n if binary == '^': # POWER needs a different treatment\n copy.execution_name = \"pow({},{})\".format(left, right)\n else:\n copy.execution_name = \"{}{}{}\".format(left, binary, right)\n copy.query.addSELECT(copy.execution_name)\n\n return copy\n\n def __mul__(self, value):\n return self._binary_arithemtic(self, '*', value)\n\n def __rmul__(self, value):\n return self._binary_arithemtic(value, '*', self)\n\n def __div__(self, value):\n if isinstance(value, (int, float)) and value == 0:\n raise ValueError(\n \"Cannot divide by zero. \"\n \"Do you really want to explode the planet?\")\n return self._binary_arithemtic(self, '/', value)\n\n def __rdiv__(self, value):\n return self._binary_arithemtic(value, '/', self)\n\n def __truediv__(self, value):\n if isinstance(value, (int, float)) and value == 0:\n raise ValueError(\n \"Cannot divide by zero. \"\n \"Do you really want to explode the planet?\")\n return self._binary_arithemtic(self, '/', value)\n\n def __rtruediv__(self, value):\n return self._binary_arithemtic(value, '/', self)\n\n def __sub__(self, value):\n return self._binary_arithemtic(self, '-', value)\n\n def __rsub__(self, value):\n return self._binary_arithemtic(value, '-', self)\n\n def __add__(self, value):\n return self._binary_arithemtic(self, '+', value)\n\n def __radd__(self, value):\n return self._binary_arithemtic(value, '+', self)\n\n def __pow__(self, value):\n return self._binary_arithemtic(self, '^', value)\n\n def __rpow__(self, value):\n return self._binary_arithemtic(value, '^', self)\n\n def __mod__(self, value):\n return self._binary_arithemtic(self, '%', value)\n\n def __rmod__(self, value):\n return self._binary_arithemtic(value, '%', self)\n\n def __or__(self, value):\n col = self.copy()\n left = self.execution_name\n right = value\n\n col.query.removeSELECT(left)\n if isinstance(right, Column):\n right = value.execution_name\n col.query.removeSELECT(right)\n elif isinstance(right, Query):\n right = right.WHERE\n\n col.query.addWHERE('(({}) OR ({}))'.format(left, right))\n return col.query\n\n def __and__(self, value):\n col = self.copy()\n left = self.execution_name\n right = value\n\n col.query.removeSELECT(left)\n if isinstance(right, Column):\n right = value.execution_name\n col.query.removeSELECT(right)\n elif isinstance(right, Query):\n right = right.WHERE\n\n col.query.addWHERE('(({}) AND ({}))'.format(left, right))\n\n return col.query\n\n def __rand__(self, value):\n col = self.copy()\n left = self.execution_name\n right = value\n\n col.query.removeSELECT(left)\n if isinstance(right, Column):\n right = value.execution_name\n col.query.removeSELECT(right)\n elif isinstance(right, Query):\n right = right.WHERE\n\n col.query.addWHERE('(({}) AND ({}))'.format(right, left))\n\n def __ror__(self, value):\n col = self.copy()\n left = self.execution_name\n right = value\n\n col.query.removeSELECT(left)\n if isinstance(right, Column):\n right = value.execution_name\n col.query.removeSELECT(right)\n elif isinstance(right, Query):\n right = right.WHERE\n\n col.query.addWHERE('(({}) OR ({}))'.format(right, left))\n return col.query\n\n #################################\n # Unary arithmetic operations #\n #################################\n def _unary_arithmetic(self, unary):\n \"\"\"\n Parameters\n ----------\n unary: char\n Unary arithmetic operator (-, +) to be applied to this column\n\n Returns\n -------\n self\n\n Notes\n -----\n Returning self will allow the next object to use this column ops and\n concatenate something else\n \"\"\"\n copy = self.copy()\n copy.query.removeSELECT(\"{}\".format(copy.execution_name))\n copy.execution_name = \"{}({})\".format(unary, self.execution_name)\n copy.query.addSELECT(copy.execution_name)\n\n return copy\n\n def __neg__(self):\n return self._unary_arithmetic('-')\n\n def __pos__(self):\n raise NotImplementedError()\n\n def __invert__(self):\n copy = self.copy()\n copy.execution_name = \"NOT {}\".format(copy.execution_name)\n return copy\n\n def __abs__(self):\n raise NotImplementedError()\n\n #############\n # Casting #\n #############\n def __float__(self):\n raise NotImplementedError()\n\n def __int__(self):\n raise NotImplementedError()\n\n def __long__(self):\n raise NotImplementedError()\n\n ###########\n # Other #\n ###########\n\n def __iter__(self):\n result = self.query.executeQuery(format=\"soa\")\n if len(result) > 2:\n raise RuntimeError(\"Only one column should be returned\")\n colName = [x for x in result.keys() if x != \"_rowName\"][0]\n values = result[colName]\n\n i = 0\n while i < len(values):\n yield values[i]\n i += 1\n\n def max(self):\n copy = self.copy()\n copy.query.removeSELECT(\"{}\".format(copy.execution_name))\n copy.execution_name = \"max({})\".format(self.execution_name)\n copy.query.addSELECT(copy.execution_name)\n copy.query.addGROUPBY(1)\n\n result = copy.query.executeQuery(format=\"table\")\n return result[1][1]\n\n def min(self):\n copy = self.copy()\n copy.query.removeSELECT(\"{}\".format(copy.execution_name))\n copy.execution_name = \"min({})\".format(self.execution_name)\n copy.query.addSELECT(copy.execution_name)\n copy.query.addGROUPBY(1)\n\n result = copy.query.executeQuery(format=\"table\")\n return result[1][1]\n\n def copy(self):\n name = self.name[1:-1] # Removing the surrounding ''\n col = Column(name, self.dataset_url)\n col.execution_name = self.execution_name\n col.query = self.query.copy()\n return col\n\n def count(self):\n \"\"\"Return number of non-NA/null observations in the Series\"\"\"\n raise NotImplementedError()\n\n def head(self, n=5):\n \"\"\"Returns first n rows\"\"\"\n col = self.copy()\n col.query.setLIMIT(n)\n return col.toPandas()\n\n def isnull(self):\n raise NotImplementedError()\n\n def isin(self, values):\n raise NotImplementedError()\n\n def value_counts(self):\n raise NotImplementedError()\n\n def unique(self):\n if self.name == self.execution_name:\n url = self.dataset_url + '/columns/{}/values'.format(\n self.name[1:-1])\n logging.debug(\"Getting values at {}\".format(url))\n return requests.get(url).json()\n else:\n result = self.query.executeQuery(format=\"soa\")\n if len(result) > 2:\n raise RuntimeError(\"Only one column should be returned\")\n colName = [x for x in result.keys() if x != \"_rowName\"][0]\n return set(result[colName])\n\n def sort(self, ascending=True):\n col = self.copy()\n\n if ascending:\n sort = \"ASC\"\n else:\n sort = \"DESC\"\n col.query.addORDERBY(\"{} {}\".format(col.execution_name, sort))\n return col\n\n def toPandas(self):\n result = self.query.executeQuery(format=\"soa\")\n if len(result) > 2:\n raise RuntimeError(\"Only one column should be returned\")\n colName = [x for x in result.keys() if x != \"_rowName\"][0]\n values = result[colName]\n rowName = result[\"_rowName\"]\n if len(values) > 0:\n s = pd.Series(values, index=rowName)\n else:\n s = pd.Series()\n return s\n\n def __repr__(self):\n col = self.copy()\n col.query.setLIMIT(40)\n print(col.toPandas())\n response = requests.get(col.dataset_url).json()\n try:\n rowCount = response['status']['rowCount']\n except:\n rowCount = None\n\n if rowCount is not None and rowCount > 40:\n print(\"{} rows\".format(rowCount))\n return \"\"\n" ]
[ [ "numpy.array", "pandas.DataFrame.from_records", "pandas.Series", "pandas.DataFrame" ] ]
lisaplag/pints
[ "3de6617e57ba5b395edaca48961bfc5a4b7209b3" ]
[ "pints/toy/_constant_model.py" ]
[ "#\n# Constant model with multiple outputs.\n#\n# This file is part of PINTS (https://github.com/pints-team/pints/) which is\n# released under the BSD 3-clause license. See accompanying LICENSE.md for\n# copyright notice and full license details.\n#\nfrom __future__ import absolute_import, division\nfrom __future__ import print_function, unicode_literals\nimport numpy as np\nimport pints\n\n\nclass ConstantModel(pints.ForwardModelS1):\n r\"\"\"\n Toy model that's constant over time, linear over the parameters, mostly\n useful for unit testing.\n\n For an `n`-dimensional model, evaluated with parameters\n ``p = [p_1, p_2, ..., p_n]``, the simulated values are time-invariant, so\n that for any time ``t``\n\n .. math::\n f(t) = (p_1, 2 p_2, 3 p_3, ..., n p_n)\n\n The derivatives with respect to the derivatives are time-invariant, and\n simply equal\n\n .. math::\n\n \\frac{\\partial{f_i(t)}}{dp_j} =\n \\begin{cases} 1, i = j\\\\0, i \\neq j \\end{cases}\n\n Extends :class:`pints.ForwardModelS1`.\n\n Parameters\n ----------\n n : int\n The number of parameters (and outputs) the model should have.\n force_multi_output : boolean\n Set to ``True`` to always return output of the shape\n ``(n_times, n_outputs)``, even if ``n_outputs == 1``.\n\n Example\n -------\n ::\n\n times = np.linspace(0, 1, 100)\n m = pints.ConstantModel(2)\n m.simulate([1, 2], times)\n\n In this example, the returned output is ``[1, 4]`` at every point in time.\n \"\"\"\n\n def __init__(self, n, force_multi_output=False):\n super(ConstantModel, self).__init__()\n\n n = int(n)\n if n < 1:\n raise ValueError('Number of parameters must be 1 or greater.')\n self._r = np.arange(1, 1 + n)\n self._n = n\n\n # Reshape for single-output models?\n self._reshape = (n == 1 and not force_multi_output)\n\n def n_outputs(self):\n \"\"\" See :meth:`pints.ForwardModel.n_outputs()`. \"\"\"\n return self._n\n\n def n_parameters(self):\n \"\"\" See :meth:`pints.ForwardModel.n_parameters()`. \"\"\"\n return self._n\n\n def simulate(self, parameters, times):\n \"\"\" See :meth:`pints.ForwardModel.simulate()`. \"\"\"\n\n # Check input\n parameters = np.asarray(parameters)\n times = np.asarray(times)\n if np.any(times < 0):\n raise ValueError('Negative times are not allowed.')\n if len(parameters) != self._n:\n raise ValueError('Expected ' + str(self._n) + ' parameters.')\n if not np.all(np.isfinite(parameters)):\n raise ValueError('All parameters must be finite.')\n\n # Calculate\n out = parameters.reshape((1, self._n)) * self._r\n out = out.repeat(len(times), axis=0)\n if self._reshape:\n out = out.reshape((len(times), ))\n return out\n\n def simulateS1(self, parameters, times):\n \"\"\" See :meth:`pints.ForwardModel.simulateS1()`. \"\"\"\n y = self.simulate(parameters, times)\n if self._reshape:\n dy = np.ones(len(times))\n else:\n # Output has shape (times, outputs, parameters)\n # At every time point, there is a matrix:\n # [[df1/dp1, df1/dp2],\n # [df2/dp1, df2/dp2]] (for 2d...)\n # i.e.\n # [[df1/dp1, df1/dp2],\n # [df2/dp1, df2/dp2]]\n # i.e.\n # [[1, 0],\n # [0, 1]]\n dy = np.tile(np.diag(np.ones(self._n)), (len(times), 1, 1))\n return (y, dy)\n" ]
[ [ "numpy.asarray", "numpy.ones", "numpy.any", "numpy.arange", "numpy.isfinite" ] ]
dgarrett622/raven
[ "f36cc108f7500b0e2717df4832b69b801b43960d" ]
[ "ravenframework/SupervisedLearning/SupervisedLearning.py" ]
[ "# Copyright 2017 Battelle Energy Alliance, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\n Created on May 8, 2018\n\n @author: alfoa, wangc\n\n Originally from ../SupervisedLearning.py, split in PR #650 in July 2018\n Base subclass definition for all supported type of ROM aka Surrogate Models etc\n Previous module notes:\n here we intend ROM as super-visioned learning,\n where we try to understand the underlying model by a set of labeled sample\n a sample is composed by (feature,label) that is easy translated in (input,output)\n\"\"\"\n\n#External Modules------------------------------------------------------------------------------------\nimport abc\nimport copy\nimport numpy as np\n#External Modules End--------------------------------------------------------------------------------\n\n#Internal Modules------------------------------------------------------------------------------------\nfrom ..utils import utils, mathUtils, xmlUtils\nfrom ..utils import InputTypes, InputData\nfrom ..BaseClasses import BaseInterface\n#Internal Modules End--------------------------------------------------------------------------------\n\nclass SupervisedLearning(BaseInterface):\n \"\"\"\n This is the general interface to any SupervisedLearning learning method.\n Essentially it contains a train method and an evaluate method\n \"\"\"\n returnType = '' # this describe the type of information generated the possibility are\n # 'boolean', 'integer', 'float'\n qualityEstType = [] # this describe the type of estimator returned known type are 'distance', 'probability'.\n # The values are returned by the self.__confidenceLocal__(Features)\n @classmethod\n def getInputSpecification(cls):\n \"\"\"\n Method to get a reference to a class that specifies the input data for\n class cls.\n @ In, cls, the class for which we are retrieving the specification\n @ Out, inputSpecification, InputData.ParameterInput, class to use for\n specifying input of cls.\n \"\"\"\n spec = super().getInputSpecification()\n spec.addParam(\"subType\", param_type=InputTypes.StringType, required=True,\n descr=r\"\"\"specify the type of ROM that will be used\"\"\")\n spec.addSub(InputData.parameterInputFactory('Features',contentType=InputTypes.StringListType,\n descr=r\"\"\"specifies the names of the features of this ROM.\n \\nb These parameters are going to be requested for the training of this object\n (see Section~\\ref{subsec:stepRomTrainer})\"\"\"))\n spec.addSub(InputData.parameterInputFactory('Target',contentType=InputTypes.StringListType,\n descr=r\"\"\"contains a comma separated list of the targets of this ROM. These parameters\n are the Figures of Merit (FOMs) this ROM is supposed to predict.\n \\nb These parameters are going to be requested for the training of this\n object (see Section \\ref{subsec:stepRomTrainer}).\"\"\"))\n spec.addSub(InputData.parameterInputFactory('pivotParameter',contentType=InputTypes.StringType,\n descr=r\"\"\"If a time-dependent ROM is requested, please specifies the pivot\n variable (e.g. time, etc) used in the input HistorySet.\"\"\", default='time'))\n cvInput = InputData.parameterInputFactory(\"CV\", contentType=InputTypes.StringType,\n descr=r\"\"\"The text portion of this node needs to contain the name of the \\xmlNode{PostProcessor} with \\xmlAttr{subType}\n ``CrossValidation``.\"\"\")\n cvInput.addParam(\"class\", InputTypes.StringType, descr=r\"\"\"should be set to \\xmlString{Model}\"\"\")\n cvInput.addParam(\"type\", InputTypes.StringType, descr=r\"\"\"should be set to \\xmlString{PostProcessor}\"\"\")\n spec.addSub(cvInput)\n AliasInput = InputData.parameterInputFactory(\"alias\", contentType=InputTypes.StringType,\n descr=r\"\"\"specifies alias for\n any variable of interest in the input or output space. These aliases can be used anywhere in the RAVEN input to\n refer to the variables. In the body of this node the user specifies the name of the variable that the model is going to use\n (during its execution).\"\"\")\n AliasInput.addParam(\"variable\", InputTypes.StringType, True, descr=r\"\"\"define the actual alias, usable throughout the RAVEN input\"\"\")\n AliasTypeInput = InputTypes.makeEnumType(\"aliasType\",\"aliasTypeType\",[\"input\",\"output\"])\n AliasInput.addParam(\"type\", AliasTypeInput, True, descr=r\"\"\"either ``input'' or ``output''.\"\"\")\n spec.addSub(AliasInput)\n return spec\n\n @staticmethod\n def checkArrayConsistency(arrayIn, isDynamic=False):\n \"\"\"\n This method checks the consistency of the in-array\n @ In, arrayIn, object, It should be an array\n @ In, isDynamic, bool, optional, is Dynamic?\n @ Out, (consistent, 'error msg'), tuple, tuple[0] is a bool (True -> everything is ok, False -> something wrong), tuple[1], string ,the error mesg\n \"\"\"\n #checking if None provides a more clear message about the problem\n if arrayIn is None:\n return (False,' The object is None, and contains no entries!')\n if type(arrayIn).__name__ == 'list':\n if isDynamic:\n for cnt, elementArray in enumerate(arrayIn):\n resp = SupervisedLearning.checkArrayConsistency(elementArray)\n if not resp[0]:\n return (False,' The element number '+str(cnt)+' is not a consistent array. Error: '+resp[1])\n else:\n return (False,' The list type is allowed for dynamic ROMs only')\n else:\n if type(arrayIn).__name__ not in ['ndarray','c1darray']:\n return (False,' The object is not a numpy array. Got type: '+type(arrayIn).__name__)\n if len(np.asarray(arrayIn).shape) > 1:\n return(False, ' The array must be 1-d. Got shape: '+str(np.asarray(arrayIn).shape))\n return (True,'')\n\n def __init__(self):\n \"\"\"\n A constructor that will appropriately initialize a supervised learning object\n @ In, None\n @ Out, None\n \"\"\"\n super().__init__()\n self.printTag = 'SupervisedLearning'\n self.features = None # \"inputs\" to this model\n self.target = None # \"outputs\" of this model\n self.amITrained = False # \"True\" if the ROM is alread trained\n self._dynamicHandling = False # time-like dependence in the model?\n self.dynamicFeatures = False # time-like dependence in the feature space? FIXME: this is not the right design\n self._assembledObjects = None # objects assembled by the ROM Model, passed through.\n #average value and sigma are used for normalization of the feature data\n #a dictionary where for each feature a tuple (average value, sigma)\n #these need to be declared in the child classes!!!!\n self.muAndSigmaFeatures = {} # normalization parameters\n self.metadataKeys = set() # keys that can be passed to DataObject as meta information\n self.metadataParams = {} # indexMap for metadataKeys to pass to a DataObject as meta dimensionality\n\n def _handleInput(self, paramInput):\n \"\"\"\n Function to handle the common parts of the model parameter input.\n @ In, paramInput, InputData.ParameterInput, the already parsed input.\n @ Out, None\n \"\"\"\n super()._handleInput(paramInput)\n nodes, notFound = paramInput.findNodesAndExtractValues(['Features', 'Target', 'pivotParameter'])\n assert(not notFound)\n self.features = nodes['Features']\n self.target = nodes['Target']\n self.pivotID = nodes['pivotParameter']\n dups = set(self.target).intersection(set(self.features))\n if len(dups) != 0:\n self.raiseAnError(IOError, 'The target(s) \"{}\" is/are also among the given features!'.format(', '.join(dups)))\n\n ## This method is used when the SupervisedLearning ROM is directly initiated within another module\n def initializeFromDict(self, inputDict):\n \"\"\"\n Function which initializes the ROM given a the information contained in inputDict\n @ In, inputDict, dict, dictionary containing the values required to initialize the ROM\n @ Out, None\n \"\"\"\n self.features = inputDict.get('Features', None)\n self.target = inputDict.get('Target', None)\n self.verbosity = inputDict.get('verbosity', None)\n\n def __getstate__(self):\n \"\"\"\n This function return the state of the ROM\n @ In, None\n @ Out, state, dict, it contains all the information needed by the ROM to be initialized\n \"\"\"\n state = copy.copy(self.__dict__)\n return state\n\n def __setstate__(self, d):\n \"\"\"\n Initialize the ROM with the data contained in newstate\n @ In, d, dict, it contains all the information needed by the ROM to be initialized\n @ Out, None\n \"\"\"\n self.__dict__.update(d)\n #FIXME: REMOVE THIS ONCE HERON GETS UPDATED WITH\n #FIXME: NEW PICKLED ROMS\n if 'dynamicFeatures' not in d:\n self.dynamicFeatures = False\n\n def setEstimator(self, estimatorList):\n \"\"\"\n Initialization method\n @ In, estimatorList, list of ROM instances/estimators used by ROM\n @ Out, None\n \"\"\"\n pass\n\n ## TODO: we may not need the set and read AssembleObjects\n ## currently only used by ROMCollection\n def setAssembledObjects(self, assembledObjects):\n \"\"\"\n Allows providing entities from the Assembler to be used in supervised learning algorithms.\n @ In, assembledObjects, dict, assembled objects that the ROM model requested as an Assembler.\n @ Out, None\n \"\"\"\n self._assembledObjects = assembledObjects\n\n def readAssembledObjects(self):\n \"\"\"\n Collects the entities from the Assembler as needed.\n In general, SVL don't need any assembled objects.\n @ In, None\n @ Out, None\n \"\"\"\n pass\n\n def train(self, tdict, indexMap=None):\n \"\"\"\n Method to perform the training of the SupervisedLearning algorithm\n NB.the SupervisedLearning object is committed to convert the dictionary that is passed (in), into the local format\n the interface with the kernels requires. So far the base class will do the translation into numpy\n @ In, tdict, dict, training dictionary\n @ In, indexMap, dict, mapping of variables to their dependent indices, if any\n @ Out, None\n \"\"\"\n if type(tdict) != dict:\n self.raiseAnError(TypeError,'In method \"train\", the training set needs to be provided through a dictionary. Type of the in-object is ' + str(type(tdict)))\n names, values = list(tdict.keys()), list(tdict.values())\n ## This is for handling the special case needed by skl *MultiTask* that\n ## requires multiple targets.\n targetValues = []\n for target in self.target:\n if target in names:\n targetValues.append(values[names.index(target)])\n else:\n self.raiseAnError(IOError,'The target '+target+' is not in the training set')\n\n # stack targets\n targetValues = np.stack(targetValues, axis=-1)\n # construct the evaluation matrixes\n ## add the indices if they're not present\n needFeatures = copy.deepcopy(self.features)\n needTargets = copy.deepcopy(self.target)\n if indexMap:\n for feat in self.features:\n for index in indexMap.get(feat, []):\n if index not in needFeatures and index not in needTargets:\n needFeatures.append(feat)\n if self.dynamicFeatures:\n featLen = 0\n for cnt, feat in enumerate(self.features):\n featLen = max(values[names.index(feat)][0].size, featLen)\n featureValues = np.zeros(shape=(len(targetValues), featLen,len(self.features)))\n else:\n featureValues = np.zeros(shape=(len(targetValues), len(self.features)))\n for cnt, feat in enumerate(self.features):\n if feat not in names:\n self.raiseAnError(IOError,'The feature sought '+feat+' is not in the training set')\n else:\n valueToUse = values[names.index(feat)]\n resp = self.checkArrayConsistency(valueToUse, self.isDynamic())\n if not resp[0]:\n self.raiseAnError(IOError,'In training set for feature '+feat+':'+resp[1])\n valueToUse = np.asarray(valueToUse)\n if len(valueToUse) != featureValues.shape[0]:\n self.raiseAWarning('feature values:',featureValues.shape[0],tag='ERROR')\n self.raiseAWarning('target values:',len(valueToUse),tag='ERROR')\n self.raiseAnError(IOError,'In training set, the number of values provided for feature '+feat+' are != number of target outcomes!')\n self._localNormalizeData(values,names,feat)\n # valueToUse can be either a matrix (for who can handle time-dep data) or a vector (for who can not)\n if self.dynamicFeatures:\n featureValues[:, :, cnt] = (valueToUse[:, :]- self.muAndSigmaFeatures[feat][0])/self.muAndSigmaFeatures[feat][1]\n else:\n featureValues[:,cnt] = ( (valueToUse[:,0] if len(valueToUse.shape) > 1 else valueToUse[:]) - self.muAndSigmaFeatures[feat][0])/self.muAndSigmaFeatures[feat][1]\n\n self.__trainLocal__(featureValues,targetValues)\n self.amITrained = True\n\n def _localNormalizeData(self,values,names,feat):\n \"\"\"\n Method to normalize data based on the mean and standard deviation. If undesired for a particular ROM,\n this method can be overloaded to simply pass (see, e.g., GaussPolynomialRom).\n @ In, values, list, list of feature values (from tdict)\n @ In, names, list, names of features (from tdict)\n @ In, feat, list, list of features (from ROM)\n @ Out, None\n \"\"\"\n self.muAndSigmaFeatures[feat] = mathUtils.normalizationFactors(values[names.index(feat)])\n\n def confidence(self, edict):\n \"\"\"\n This call is used to get an estimate of the confidence in the prediction.\n The base class self.confidence will translate a dictionary into numpy array, then call the local confidence\n @ In, edict, dict, evaluation dictionary\n @ Out, confidence, float, the confidence\n \"\"\"\n if type(edict) != dict:\n self.raiseAnError(IOError,'method \"confidence\". The inquiring set needs to be provided through a dictionary. Type of the in-object is ' + str(type(edict)))\n names, values = list(edict.keys()), list(edict.values())\n for index in range(len(values)):\n resp = self.checkArrayConsistency(values[index], self.isDynamic())\n if not resp[0]:\n self.raiseAnError(IOError,'In evaluate request for feature '+names[index]+':'+resp[1])\n\n if self.dynamicFeatures:\n featureValues = np.zeros(shape=(values[0].size, self.featureShape[1], len(self.features)))\n else:\n featureValues = np.zeros(shape=(values[0].size, len(self.features)))\n for cnt, feat in enumerate(self.features):\n if feat not in names:\n self.raiseAnError(IOError,'The feature sought '+feat+' is not in the evaluate set')\n else:\n resp = self.checkArrayConsistency(values[names.index(feat)], self.isDynamic())\n if not resp[0]:\n self.raiseAnError(IOError,'In training set for feature '+feat+':'+resp[1])\n featureValues[:,cnt] = values[names.index(feat)]\n return self.__confidenceLocal__(featureValues)\n\n # compatibility with BaseInterface requires having a \"run\" method\n # TODO during SVL rework, \"run\" should probably replace \"evaluate\", maybe?\n def run(self, edict):\n \"\"\"\n Method to perform the evaluation of a point or a set of points through the previous trained SupervisedLearning algorithm\n NB.the SupervisedLearning object is committed to convert the dictionary that is passed (in), into the local format\n the interface with the kernels requires.\n @ In, edict, dict, evaluation dictionary\n @ Out, evaluate, dict, {target: evaluated points}\n \"\"\"\n return self.evaluate(edict)\n\n def evaluate(self,edict):\n \"\"\"\n Method to perform the evaluation of a point or a set of points through the previous trained SupervisedLearning algorithm\n NB.the SupervisedLearning object is committed to convert the dictionary that is passed (in), into the local format\n the interface with the kernels requires.\n @ In, edict, dict, evaluation dictionary\n @ Out, evaluate, dict, {target: evaluated points}\n \"\"\"\n if type(edict) != dict:\n self.raiseAnError(IOError,'method \"evaluate\". The evaluate request/s need/s to be provided through a dictionary. Type of the in-object is ' + str(type(edict)))\n names, values = list(edict.keys()), list(edict.values())\n stepInFeatures = 0\n for index in range(len(values)):\n #If value is a float, convert to numpy array for evaluation\n if type(values[index]).__name__ == 'float':\n values[index] = np.array(values[index])\n resp = self.checkArrayConsistency(values[index], self.isDynamic())\n if not resp[0]:\n self.raiseAnError(IOError,'In evaluate request for feature '+names[index]+':'+resp[1])\n if self.dynamicFeatures:\n stepInFeatures = max(stepInFeatures,values[index].shape[-1])\n # construct the evaluation matrix\n if self.dynamicFeatures:\n featureValues = np.zeros(shape=(values[0].size, stepInFeatures, len(self.features)))\n else:\n featureValues = np.zeros(shape=(values[0].size, len(self.features)))\n for cnt, feat in enumerate(self.features):\n if feat not in names:\n self.raiseAnError(IOError,'The feature sought '+feat+' is not in the evaluate set')\n else:\n resp = self.checkArrayConsistency(values[names.index(feat)], self.isDynamic())\n if not resp[0]:\n self.raiseAnError(IOError,'In training set for feature '+feat+':'+resp[1])\n if self.dynamicFeatures:\n featureValues[:, :, cnt] = ((values[names.index(feat)] - self.muAndSigmaFeatures[feat][0]))/self.muAndSigmaFeatures[feat][1]\n else:\n featureValues[:,cnt] = ((values[names.index(feat)] - self.muAndSigmaFeatures[feat][0]))/self.muAndSigmaFeatures[feat][1]\n return self.__evaluateLocal__(featureValues)\n\n def reset(self):\n \"\"\"\n Reset ROM\n \"\"\"\n self.amITrained = False\n self.__resetLocal__()\n\n def returnInitialParameters(self):\n \"\"\"\n override this method to return the fix set of parameters of the ROM\n @ In, None\n @ Out, iniParDict, dict, initial parameter dictionary\n \"\"\"\n iniParDict = dict(list({'returnType':self.__class__.returnType,'qualityEstType':self.__class__.qualityEstType,'Features':self.features,\n 'Target':self.target,'returnType':self.__class__.returnType}.items()) + list(self.__returnInitialParametersLocal__().items()))\n return iniParDict\n\n def returnCurrentSetting(self):\n \"\"\"\n return the set of parameters of the ROM that can change during simulation\n @ In, None\n @ Out, currParDict, dict, current parameter dictionary\n \"\"\"\n currParDict = dict({'Trained':self.amITrained}.items() + self.__CurrentSettingDictLocal__().items())\n return currParDict\n\n def writeXMLPreamble(self, writeTo, targets=None):\n \"\"\"\n Allows the SVE to put whatever it wants into an XML file only once (right before calling pringXML)\n Extend in subclasses.\n @ In, writeTo, xmlUtils.StaticXmlElement instance, Element to write to\n @ In, targets, list, list of targets for whom information should be written\n @ Out, None\n \"\"\"\n # different calls depending on if it's static or dynamic\n if isinstance(writeTo, xmlUtils.DynamicXmlElement):\n writeTo.addScalar('ROM', \"type\", self.printTag, None, general = True)\n else:\n writeTo.addScalar('ROM', \"type\", self.printTag)\n\n def writePointwiseData(self, *args):\n \"\"\"\n Allows the SVE to add data to a DataObject\n Overload in subclasses.\n @ In, args, list, unused arguments\n @ Out, None\n \"\"\"\n # by default, nothing to write!\n self.raiseAMessage('Writing ROM \"{}\", but no pointwise data found. Moving on ...')\n\n def writeXML(self, writeTo, targets=None, skip=None):\n \"\"\"\n Allows the SVE to put whatever it wants into an XML to print to file.\n Overload in subclasses.\n @ In, writeTo, xmlUtils.StaticXmlElement, StaticXmlElement to write to\n @ In, targets, list, optional, list of targets for whom information should be written\n @ In, skip, list, optional, list of targets to skip\n @ Out, None\n \"\"\"\n writeTo.addScalar('ROM',\"noInfo\",'ROM has no special output options.')\n\n def isDynamic(self):\n \"\"\"\n This method is a utility function that tells if the relative ROM is able to\n treat dynamic data (e.g. time-series) on its own or not (Primarly called by LearningGate)\n @ In, None\n @ Out, isDynamic, bool, True if the ROM is able to treat dynamic data, False otherwise\n \"\"\"\n return self._dynamicHandling\n\n def reseed(self,seed):\n \"\"\"\n Used to reset the seed of the ROM. By default does nothing; overwrite in the inheriting classes as needed.\n @ In, seed, int, new seed to use\n @ Out, None\n \"\"\"\n return\n\n def setAdditionalParams(self, params):\n \"\"\"\n Sets parameters aside from initialization, such as during deserialization.\n @ In, params, dict, parameters to set (dependent on ROM)\n @ Out, None\n \"\"\"\n # reseeding is common to many\n seed = params.pop('seed', None)\n if seed:\n self.reseed(seed)\n # overload this method in subclasses to load other parameters\n\n ### ROM Clustering (see ROMCollection.py) ###\n def isClusterable(self):\n \"\"\"\n Allows ROM to declare whether it has methods for clustring. Default is no.\n @ In, None\n @ Out, isClusterable, bool, if True then has clustering mechanics.\n \"\"\"\n # only true if overridden.\n return False\n\n def checkRequestedClusterFeatures(self, request):\n \"\"\"\n Takes the user-requested features (sometimes \"all\") and interprets them for this ROM.\n @ In, request, dict(list), as from ROMColletion.Cluster._extrapolateRequestedClusterFeatures\n @ Out, interpreted, dict(list), interpreted features\n \"\"\"\n self.raiseAnError(NotImplementedError, 'This ROM is not prepared to handle feature cluster requests!')\n\n def getLocalRomClusterFeatures(self, *args, **kwargs):\n \"\"\"\n Provides metrics aka features on which clustering compatibility can be measured.\n This is called on LOCAL subsegment ROMs, not on the GLOBAL template ROM\n @ In, featureTemplate, str, format for feature inclusion\n @ In, settings, dict, as per getGlobalRomSegmentSettings\n @ In, picker, slice, indexer for segmenting data\n @ In, kwargs, dict, arbitrary keyword arguments\n @ Out, features, dict, {target_metric: np.array(floats)} features to cluster on\n \"\"\"\n # TODO can we do a generic basic statistics clustering on mean, std for all roms?\n self.raiseAnError(NotImplementedError, 'Clustering capabilities not yet implemented for \"{}\" ROM!'.format(self.__class__.__name__))\n\n def getGlobalRomSegmentSettings(self, trainingDict, divisions):\n \"\"\"\n Allows the ROM to perform some analysis before segmenting.\n Note this is called on the GLOBAL templateROM from the ROMcollection, NOT on the LOCAL subsegment ROMs!\n @ In, trainingDict, dict, data for training\n @ In, divisions, tuple, (division slice indices, unclustered spaces)\n @ Out, settings, object, arbitrary information about ROM clustering settings\n @ Out, trainingDict, dict, adjusted training data (possibly unchanged)\n \"\"\"\n # by default, do nothing\n return None, trainingDict\n\n def adjustLocalRomSegment(self, settings):\n \"\"\"\n Adjusts this ROM to account for it being a segment as a part of a larger ROM collection.\n Call this before training the subspace segment ROMs\n Note this is called on the LOCAL subsegment ROMs, NOT on the GLOBAL templateROM from the ROMcollection!\n @ In, settings, dict, as from getGlobalRomSegmentSettings\n @ Out, None\n \"\"\"\n # by default, do nothing\n pass\n\n def finalizeLocalRomSegmentEvaluation(self, settings, evaluation, picker):\n \"\"\"\n Allows global settings in \"settings\" to affect a LOCAL evaluation of a LOCAL ROM\n Note this is called on the LOCAL subsegment ROM and not the GLOBAL templateROM.\n @ In, settings, dict, as from getGlobalRomSegmentSettings\n @ In, evaluation, dict, preliminary evaluation from the local segment ROM as {target: [values]}\n @ In, picker, slice, indexer for data range of this segment\n @ Out, evaluation, dict, {target: np.ndarray} adjusted global evaluation\n \"\"\"\n return evaluation\n\n def finalizeGlobalRomSegmentEvaluation(self, settings, evaluation):\n \"\"\"\n Allows any global settings to be applied to the signal collected by the ROMCollection instance.\n Note this is called on the GLOBAL templateROM from the ROMcollection, NOT on the LOCAL supspace segment ROMs!\n @ In, evaluation, dict, {target: np.ndarray} evaluated full (global) signal from ROMCollection\n TODO finish docs\n @ Out, evaluation, dict, {target: np.ndarray} adjusted global evaluation\n \"\"\"\n return evaluation\n ### END ROM Clustering ###\n\n @abc.abstractmethod\n def __trainLocal__(self,featureVals,targetVals):\n \"\"\"\n Perform training on samples in featureVals with responses y.\n For an one-class model, +1 or -1 is returned.\n @ In, featureVals, {array-like, sparse matrix}, shape=[n_samples, n_features],\n an array of input feature values\n @ Out, targetVals, array, shape = [n_samples], an array of output target\n associated with the corresponding points in featureVals\n \"\"\"\n\n @abc.abstractmethod\n def __confidenceLocal__(self,featureVals):\n \"\"\"\n This should return an estimation of the quality of the prediction.\n This could be distance or probability or anything else, the type needs to be declared in the variable cls.qualityEstType\n @ In, featureVals, 2-D numpy array , [n_samples,n_features]\n @ Out, __confidenceLocal__, float, the confidence\n \"\"\"\n\n @abc.abstractmethod\n def __evaluateLocal__(self,featureVals):\n \"\"\"\n @ In, featureVals, np.array, 2-D numpy array [n_samples,n_features]\n @ Out, targetVals , np.array, 1-D numpy array [n_samples]\n \"\"\"\n\n @abc.abstractmethod\n def __resetLocal__(self):\n \"\"\"\n Reset ROM. After this method the ROM should be described only by the initial parameter settings\n @ In, None\n @ Out, None\n \"\"\"\n\n @abc.abstractmethod\n def __returnInitialParametersLocal__(self):\n \"\"\"\n Returns a dictionary with the parameters and their initial values\n @ In, None\n @ Out, params, dict, dictionary of parameter names and initial values\n \"\"\"\n\n @abc.abstractmethod\n def __returnCurrentSettingLocal__(self):\n \"\"\"\n Returns a dictionary with the parameters and their current values\n @ In, None\n @ Out, params, dict, dictionary of parameter names and current values\n \"\"\"\n" ]
[ [ "numpy.array", "numpy.stack", "numpy.asarray" ] ]
silverneko/scikit-image
[ "1f8c74f69e0f9e72c59f180b2fc96d311659c609" ]
[ "skimage/draw/draw.py" ]
[ "# coding: utf-8\nimport numpy as np\n\nfrom .._shared._geometry import polygon_clip\nfrom ._draw import (_coords_inside_image, _line, _line_aa,\n _polygon, _ellipse_perimeter,\n _circle_perimeter, _circle_perimeter_aa,\n _bezier_curve)\n\n\ndef _ellipse_in_shape(shape, center, radii, rotation=0.):\n \"\"\"Generate coordinates of points within ellipse bounded by shape.\n\n Parameters\n ----------\n shape : iterable of ints\n Shape of the input image. Must be length 2.\n center : iterable of floats\n (row, column) position of center inside the given shape.\n radii : iterable of floats\n Size of two half axes (for row and column)\n rotation : float, optional\n Rotation of the ellipse defined by the above, in radians\n in range (-PI, PI), in contra clockwise direction,\n with respect to the column-axis.\n\n Returns\n -------\n rows : iterable of ints\n Row coordinates representing values within the ellipse.\n cols : iterable of ints\n Corresponding column coordinates representing values within the ellipse.\n \"\"\"\n r_lim, c_lim = np.ogrid[0:float(shape[0]), 0:float(shape[1])]\n r_org, c_org = center\n r_rad, c_rad = radii\n rotation %= np.pi\n sin_alpha, cos_alpha = np.sin(rotation), np.cos(rotation)\n r, c = (r_lim - r_org), (c_lim - c_org)\n distances = ((r * cos_alpha + c * sin_alpha) / r_rad) ** 2 \\\n + ((r * sin_alpha - c * cos_alpha) / c_rad) ** 2\n return np.nonzero(distances < 1)\n\n\ndef ellipse(r, c, r_radius, c_radius, shape=None, rotation=0.):\n \"\"\"Generate coordinates of pixels within ellipse.\n\n Parameters\n ----------\n r, c : double\n Centre coordinate of ellipse.\n r_radius, c_radius : double\n Minor and major semi-axes. ``(r/r_radius)**2 + (c/c_radius)**2 = 1``.\n shape : tuple, optional\n Image shape which is used to determine the maximum extent of output pixel\n coordinates. This is useful for ellipses which exceed the image size.\n By default the full extent of the ellipse are used.\n rotation : float, optional (default 0.)\n Set the ellipse rotation (rotation) in range (-PI, PI)\n in contra clock wise direction, so PI/2 degree means swap ellipse axis\n\n Returns\n -------\n rr, cc : ndarray of int\n Pixel coordinates of ellipse.\n May be used to directly index into an array, e.g.\n ``img[rr, cc] = 1``.\n\n Examples\n --------\n >>> from skimage.draw import ellipse\n >>> img = np.zeros((10, 12), dtype=np.uint8)\n >>> rr, cc = ellipse(5, 6, 3, 5, rotation=np.deg2rad(30))\n >>> img[rr, cc] = 1\n >>> img\n array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0],\n [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0],\n [0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0],\n [0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],\n [0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0],\n [0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],\n [0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=uint8)\n\n Notes\n -----\n The ellipse equation::\n\n ((x * cos(alpha) + y * sin(alpha)) / x_radius) ** 2 +\n ((x * sin(alpha) - y * cos(alpha)) / y_radius) ** 2 = 1\n\n\n Note that the positions of `ellipse` without specified `shape` can have\n also, negative values, as this is correct on the plane. On the other hand\n using these ellipse positions for an image afterwards may lead to appearing\n on the other side of image, because ``image[-1, -1] = image[end-1, end-1]``\n\n >>> rr, cc = ellipse(1, 2, 3, 6)\n >>> img = np.zeros((6, 12), dtype=np.uint8)\n >>> img[rr, cc] = 1\n >>> img\n array([[1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1]], dtype=uint8)\n \"\"\"\n\n center = np.array([r, c])\n radii = np.array([r_radius, c_radius])\n # allow just rotation with in range +/- 180 degree\n rotation %= np.pi\n\n # compute rotated radii by given rotation\n r_radius_rot = abs(r_radius * np.cos(rotation)) \\\n + c_radius * np.sin(rotation)\n c_radius_rot = r_radius * np.sin(rotation) \\\n + abs(c_radius * np.cos(rotation))\n # The upper_left and lower_right corners of the smallest rectangle\n # containing the ellipse.\n radii_rot = np.array([r_radius_rot, c_radius_rot])\n upper_left = np.ceil(center - radii_rot).astype(int)\n lower_right = np.floor(center + radii_rot).astype(int)\n\n if shape is not None:\n # Constrain upper_left and lower_right by shape boundary.\n upper_left = np.maximum(upper_left, np.array([0, 0]))\n lower_right = np.minimum(lower_right, np.array(shape[:2]) - 1)\n\n shifted_center = center - upper_left\n bounding_shape = lower_right - upper_left + 1\n\n rr, cc = _ellipse_in_shape(bounding_shape, shifted_center, radii, rotation)\n rr.flags.writeable = True\n cc.flags.writeable = True\n rr += upper_left[0]\n cc += upper_left[1]\n return rr, cc\n\n\ndef circle(r, c, radius, shape=None):\n \"\"\"Generate coordinates of pixels within circle.\n\n Parameters\n ----------\n r, c : double\n Centre coordinate of circle.\n radius : double\n Radius of circle.\n shape : tuple, optional\n Image shape which is used to determine the maximum extent of output\n pixel coordinates. This is useful for circles that exceed the image\n size. If None, the full extent of the circle is used.\n\n Returns\n -------\n rr, cc : ndarray of int\n Pixel coordinates of circle.\n May be used to directly index into an array, e.g.\n ``img[rr, cc] = 1``.\n\n Examples\n --------\n >>> from skimage.draw import circle\n >>> img = np.zeros((10, 10), dtype=np.uint8)\n >>> rr, cc = circle(4, 4, 5)\n >>> img[rr, cc] = 1\n >>> img\n array([[0, 0, 1, 1, 1, 1, 1, 0, 0, 0],\n [0, 1, 1, 1, 1, 1, 1, 1, 0, 0],\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 0],\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 0],\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 0],\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 0],\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 0],\n [0, 1, 1, 1, 1, 1, 1, 1, 0, 0],\n [0, 0, 1, 1, 1, 1, 1, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=uint8)\n \"\"\"\n return ellipse(r, c, radius, radius, shape)\n\n\ndef polygon_perimeter(r, c, shape=None, clip=False):\n \"\"\"Generate polygon perimeter coordinates.\n\n Parameters\n ----------\n r : (N,) ndarray\n Row coordinates of vertices of polygon.\n c : (N,) ndarray\n Column coordinates of vertices of polygon.\n shape : tuple, optional\n Image shape which is used to determine maximum extents of output pixel\n coordinates. This is useful for polygons that exceed the image size.\n If None, the full extents of the polygon is used.\n clip : bool, optional\n Whether to clip the polygon to the provided shape. If this is set\n to True, the drawn figure will always be a closed polygon with all\n edges visible.\n\n Returns\n -------\n rr, cc : ndarray of int\n Pixel coordinates of polygon.\n May be used to directly index into an array, e.g.\n ``img[rr, cc] = 1``.\n\n Examples\n --------\n >>> from skimage.draw import polygon_perimeter\n >>> img = np.zeros((10, 10), dtype=np.uint8)\n >>> rr, cc = polygon_perimeter([5, -1, 5, 10],\n ... [-1, 5, 11, 5],\n ... shape=img.shape, clip=True)\n >>> img[rr, cc] = 1\n >>> img\n array([[0, 0, 0, 0, 1, 1, 1, 0, 0, 0],\n [0, 0, 0, 1, 0, 0, 0, 1, 0, 0],\n [0, 0, 1, 0, 0, 0, 0, 0, 1, 0],\n [0, 1, 0, 0, 0, 0, 0, 0, 0, 1],\n [1, 0, 0, 0, 0, 0, 0, 0, 0, 1],\n [1, 0, 0, 0, 0, 0, 0, 0, 0, 1],\n [1, 0, 0, 0, 0, 0, 0, 0, 0, 1],\n [0, 1, 1, 0, 0, 0, 0, 0, 0, 1],\n [0, 0, 0, 1, 0, 0, 0, 1, 1, 0],\n [0, 0, 0, 0, 1, 1, 1, 0, 0, 0]], dtype=uint8)\n\n \"\"\"\n if clip:\n if shape is None:\n raise ValueError(\"Must specify clipping shape\")\n clip_box = np.array([0, 0, shape[0] - 1, shape[1] - 1])\n else:\n clip_box = np.array([np.min(r), np.min(c),\n np.max(r), np.max(c)])\n\n # Do the clipping irrespective of whether clip is set. This\n # ensures that the returned polygon is closed and is an array.\n r, c = polygon_clip(r, c, *clip_box)\n\n r = np.round(r).astype(int)\n c = np.round(c).astype(int)\n\n # Construct line segments\n rr, cc = [], []\n for i in range(len(r) - 1):\n line_r, line_c = line(r[i], c[i], r[i + 1], c[i + 1])\n rr.extend(line_r)\n cc.extend(line_c)\n\n rr = np.asarray(rr)\n cc = np.asarray(cc)\n\n if shape is None:\n return rr, cc\n else:\n return _coords_inside_image(rr, cc, shape)\n\n\ndef set_color(image, coords, color, alpha=1):\n \"\"\"Set pixel color in the image at the given coordinates.\n\n Coordinates that exceed the shape of the image will be ignored.\n\n Parameters\n ----------\n image : (M, N, D) ndarray\n Image\n coords : tuple of ((P,) ndarray, (P,) ndarray)\n Row and column coordinates of pixels to be colored.\n color : (D,) ndarray\n Color to be assigned to coordinates in the image.\n alpha : scalar or (N,) ndarray\n Alpha values used to blend color with image. 0 is transparent,\n 1 is opaque.\n\n Returns\n -------\n image : (M, N, D) ndarray\n The updated image.\n\n Examples\n --------\n >>> from skimage.draw import line, set_color\n >>> img = np.zeros((10, 10), dtype=np.uint8)\n >>> rr, cc = line(1, 1, 20, 20)\n >>> set_color(img, (rr, cc), 1)\n >>> img\n array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 1, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 1, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 1, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 1, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 1, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 1, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 1, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 1]], dtype=uint8)\n\n \"\"\"\n rr, cc = coords\n\n if image.ndim == 2:\n image = image[..., np.newaxis]\n\n color = np.array(color, ndmin=1, copy=False)\n\n if image.shape[-1] != color.shape[-1]:\n raise ValueError('Color shape ({}) must match last '\n 'image dimension ({}).'.format(color.shape[0],\n image.shape[-1]))\n\n if np.isscalar(alpha):\n # Can be replaced by ``full_like`` when numpy 1.8 becomes\n # minimum dependency\n alpha = np.ones_like(rr) * alpha\n\n rr, cc, alpha = _coords_inside_image(rr, cc, image.shape, val=alpha)\n\n alpha = alpha[..., np.newaxis]\n\n color = color * alpha\n vals = image[rr, cc] * (1 - alpha)\n\n image[rr, cc] = vals + color\n\n\ndef line(r0, c0, r1, c1):\n \"\"\"Generate line pixel coordinates.\n\n Parameters\n ----------\n r0, c0 : int\n Starting position (row, column).\n r1, c1 : int\n End position (row, column).\n\n Returns\n -------\n rr, cc : (N,) ndarray of int\n Indices of pixels that belong to the line.\n May be used to directly index into an array, e.g.\n ``img[rr, cc] = 1``.\n\n Notes\n -----\n Anti-aliased line generator is available with `line_aa`.\n\n Examples\n --------\n >>> from skimage.draw import line\n >>> img = np.zeros((10, 10), dtype=np.uint8)\n >>> rr, cc = line(1, 1, 8, 8)\n >>> img[rr, cc] = 1\n >>> img\n array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 1, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 1, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 1, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 1, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 1, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 1, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 1, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=uint8)\n \"\"\"\n return _line(r0, c0, r1, c1)\n\n\ndef line_aa(r0, c0, r1, c1):\n \"\"\"Generate anti-aliased line pixel coordinates.\n\n Parameters\n ----------\n r0, c0 : int\n Starting position (row, column).\n r1, c1 : int\n End position (row, column).\n\n Returns\n -------\n rr, cc, val : (N,) ndarray (int, int, float)\n Indices of pixels (`rr`, `cc`) and intensity values (`val`).\n ``img[rr, cc] = val``.\n\n References\n ----------\n .. [1] A Rasterizing Algorithm for Drawing Curves, A. Zingl, 2012\n http://members.chello.at/easyfilter/Bresenham.pdf\n\n Examples\n --------\n >>> from skimage.draw import line_aa\n >>> img = np.zeros((10, 10), dtype=np.uint8)\n >>> rr, cc, val = line_aa(1, 1, 8, 8)\n >>> img[rr, cc] = val * 255\n >>> img\n array([[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 255, 74, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 74, 255, 74, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 74, 255, 74, 0, 0, 0, 0, 0],\n [ 0, 0, 0, 74, 255, 74, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 74, 255, 74, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 74, 255, 74, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 74, 255, 74, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 74, 255, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=uint8)\n \"\"\"\n return _line_aa(r0, c0, r1, c1)\n\n\ndef polygon(r, c, shape=None):\n \"\"\"Generate coordinates of pixels within polygon.\n\n Parameters\n ----------\n r : (N,) ndarray\n Row coordinates of vertices of polygon.\n c : (N,) ndarray\n Column coordinates of vertices of polygon.\n shape : tuple, optional\n Image shape which is used to determine the maximum extent of output\n pixel coordinates. This is useful for polygons that exceed the image\n size. If None, the full extent of the polygon is used.\n\n Returns\n -------\n rr, cc : ndarray of int\n Pixel coordinates of polygon.\n May be used to directly index into an array, e.g.\n ``img[rr, cc] = 1``.\n\n Examples\n --------\n >>> from skimage.draw import polygon\n >>> img = np.zeros((10, 10), dtype=np.uint8)\n >>> r = np.array([1, 2, 8, 1])\n >>> c = np.array([1, 7, 4, 1])\n >>> rr, cc = polygon(r, c)\n >>> img[rr, cc] = 1\n >>> img\n array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 1, 1, 1, 1, 1, 0, 0, 0],\n [0, 0, 1, 1, 1, 1, 1, 0, 0, 0],\n [0, 0, 0, 1, 1, 1, 0, 0, 0, 0],\n [0, 0, 0, 1, 1, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 1, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 1, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=uint8)\n\n \"\"\"\n return _polygon(r, c, shape)\n\n\ndef circle_perimeter(r, c, radius, method='bresenham', shape=None):\n \"\"\"Generate circle perimeter coordinates.\n\n Parameters\n ----------\n r, c : int\n Centre coordinate of circle.\n radius: int\n Radius of circle.\n method : {'bresenham', 'andres'}, optional\n bresenham : Bresenham method (default)\n andres : Andres method\n shape : tuple, optional\n Image shape which is used to determine the maximum extent of output\n pixel coordinates. This is useful for circles that exceed the image\n size. If None, the full extent of the circle is used.\n\n Returns\n -------\n rr, cc : (N,) ndarray of int\n Bresenham and Andres' method:\n Indices of pixels that belong to the circle perimeter.\n May be used to directly index into an array, e.g.\n ``img[rr, cc] = 1``.\n\n Notes\n -----\n Andres method presents the advantage that concentric\n circles create a disc whereas Bresenham can make holes. There\n is also less distortions when Andres circles are rotated.\n Bresenham method is also known as midpoint circle algorithm.\n Anti-aliased circle generator is available with `circle_perimeter_aa`.\n\n References\n ----------\n .. [1] J.E. Bresenham, \"Algorithm for computer control of a digital\n plotter\", IBM Systems journal, 4 (1965) 25-30.\n .. [2] E. Andres, \"Discrete circles, rings and spheres\", Computers &\n Graphics, 18 (1994) 695-706.\n\n Examples\n --------\n >>> from skimage.draw import circle_perimeter\n >>> img = np.zeros((10, 10), dtype=np.uint8)\n >>> rr, cc = circle_perimeter(4, 4, 3)\n >>> img[rr, cc] = 1\n >>> img\n array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 1, 1, 1, 0, 0, 0, 0],\n [0, 0, 1, 0, 0, 0, 1, 0, 0, 0],\n [0, 1, 0, 0, 0, 0, 0, 1, 0, 0],\n [0, 1, 0, 0, 0, 0, 0, 1, 0, 0],\n [0, 1, 0, 0, 0, 0, 0, 1, 0, 0],\n [0, 0, 1, 0, 0, 0, 1, 0, 0, 0],\n [0, 0, 0, 1, 1, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=uint8)\n \"\"\"\n return _circle_perimeter(r, c, radius, method, shape)\n\n\ndef circle_perimeter_aa(r, c, radius, shape=None):\n \"\"\"Generate anti-aliased circle perimeter coordinates.\n\n Parameters\n ----------\n r, c : int\n Centre coordinate of circle.\n radius: int\n Radius of circle.\n shape : tuple, optional\n Image shape which is used to determine the maximum extent of output\n pixel coordinates. This is useful for circles that exceed the image\n size. If None, the full extent of the circle is used.\n\n Returns\n -------\n rr, cc, val : (N,) ndarray (int, int, float)\n Indices of pixels (`rr`, `cc`) and intensity values (`val`).\n ``img[rr, cc] = val``.\n\n Notes\n -----\n Wu's method draws anti-aliased circle. This implementation doesn't use\n lookup table optimization.\n\n References\n ----------\n .. [1] X. Wu, \"An efficient antialiasing technique\", In ACM SIGGRAPH\n Computer Graphics, 25 (1991) 143-152.\n\n Examples\n --------\n >>> from skimage.draw import circle_perimeter_aa\n >>> img = np.zeros((10, 10), dtype=np.uint8)\n >>> rr, cc, val = circle_perimeter_aa(4, 4, 3)\n >>> img[rr, cc] = val * 255\n >>> img\n array([[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 60, 211, 255, 211, 60, 0, 0, 0],\n [ 0, 60, 194, 43, 0, 43, 194, 60, 0, 0],\n [ 0, 211, 43, 0, 0, 0, 43, 211, 0, 0],\n [ 0, 255, 0, 0, 0, 0, 0, 255, 0, 0],\n [ 0, 211, 43, 0, 0, 0, 43, 211, 0, 0],\n [ 0, 60, 194, 43, 0, 43, 194, 60, 0, 0],\n [ 0, 0, 60, 211, 255, 211, 60, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=uint8)\n \"\"\"\n return _circle_perimeter_aa(r, c, radius, shape)\n\n\ndef ellipse_perimeter(r, c, r_radius, c_radius, orientation=0, shape=None):\n \"\"\"Generate ellipse perimeter coordinates.\n\n Parameters\n ----------\n r, c : int\n Centre coordinate of ellipse.\n r_radius, c_radius : int\n Minor and major semi-axes. ``(r/r_radius)**2 + (c/c_radius)**2 = 1``.\n orientation : double, optional\n Major axis orientation in clockwise direction as radians.\n shape : tuple, optional\n Image shape which is used to determine the maximum extent of output\n pixel coordinates. This is useful for ellipses that exceed the image\n size. If None, the full extent of the ellipse is used.\n\n Returns\n -------\n rr, cc : (N,) ndarray of int\n Indices of pixels that belong to the ellipse perimeter.\n May be used to directly index into an array, e.g.\n ``img[rr, cc] = 1``.\n\n References\n ----------\n .. [1] A Rasterizing Algorithm for Drawing Curves, A. Zingl, 2012\n http://members.chello.at/easyfilter/Bresenham.pdf\n\n Examples\n --------\n >>> from skimage.draw import ellipse_perimeter\n >>> img = np.zeros((10, 10), dtype=np.uint8)\n >>> rr, cc = ellipse_perimeter(5, 5, 3, 4)\n >>> img[rr, cc] = 1\n >>> img\n array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 1, 1, 1, 1, 1, 0, 0],\n [0, 0, 1, 0, 0, 0, 0, 0, 1, 0],\n [0, 1, 0, 0, 0, 0, 0, 0, 0, 1],\n [0, 1, 0, 0, 0, 0, 0, 0, 0, 1],\n [0, 1, 0, 0, 0, 0, 0, 0, 0, 1],\n [0, 0, 1, 0, 0, 0, 0, 0, 1, 0],\n [0, 0, 0, 1, 1, 1, 1, 1, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=uint8)\n\n\n Note that the positions of `ellipse` without specified `shape` can have \n also, negative values, as this is correct on the plane. On the other hand\n using these ellipse positions for an image afterwards may lead to appearing\n on the other side of image, because ``image[-1, -1] = image[end-1, end-1]``\n\n >>> rr, cc = ellipse_perimeter(2, 3, 4, 5)\n >>> img = np.zeros((9, 12), dtype=np.uint8)\n >>> img[rr, cc] = 1\n >>> img\n array([[0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1],\n [0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0],\n [0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1],\n [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],\n [0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0],\n [0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0],\n [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]], dtype=uint8)\n \"\"\"\n return _ellipse_perimeter(r, c, r_radius, c_radius, orientation, shape)\n\n\ndef bezier_curve(r0, c0, r1, c1, r2, c2, weight, shape=None):\n \"\"\"Generate Bezier curve coordinates.\n\n Parameters\n ----------\n r0, c0 : int\n Coordinates of the first control point.\n r1, c1 : int\n Coordinates of the middle control point.\n r2, c2 : int\n Coordinates of the last control point.\n weight : double\n Middle control point weight, it describes the line tension.\n shape : tuple, optional\n Image shape which is used to determine the maximum extent of output\n pixel coordinates. This is useful for curves that exceed the image\n size. If None, the full extent of the curve is used.\n\n Returns\n -------\n rr, cc : (N,) ndarray of int\n Indices of pixels that belong to the Bezier curve.\n May be used to directly index into an array, e.g.\n ``img[rr, cc] = 1``.\n\n Notes\n -----\n The algorithm is the rational quadratic algorithm presented in\n reference [1]_.\n\n References\n ----------\n .. [1] A Rasterizing Algorithm for Drawing Curves, A. Zingl, 2012\n http://members.chello.at/easyfilter/Bresenham.pdf\n\n Examples\n --------\n >>> import numpy as np\n >>> from skimage.draw import bezier_curve\n >>> img = np.zeros((10, 10), dtype=np.uint8)\n >>> rr, cc = bezier_curve(1, 5, 5, -2, 8, 8, 2)\n >>> img[rr, cc] = 1\n >>> img\n array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 1, 0, 0, 0, 0],\n [0, 0, 0, 1, 1, 0, 0, 0, 0, 0],\n [0, 0, 1, 0, 0, 0, 0, 0, 0, 0],\n [0, 1, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 1, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 1, 1, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 1, 1, 1, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 1, 1, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=uint8)\n \"\"\"\n return _bezier_curve(r0, c0, r1, c1, r2, c2, weight, shape)\n" ]
[ [ "numpy.max", "numpy.sin", "numpy.array", "numpy.ceil", "numpy.asarray", "numpy.ones_like", "numpy.round", "numpy.min", "numpy.nonzero", "numpy.isscalar", "numpy.cos", "numpy.floor" ] ]
changjianhui/SPM
[ "4547db9559feedb0d168442862616de427c82094" ]
[ "models/networks/generator.py" ]
[ "\"\"\"\nCopyright (C) 2019 NVIDIA Corporation. All rights reserved.\nLicensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).\n\"\"\"\nimport os\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom torch.nn.utils import spectral_norm\nfrom models.networks.architecture import STYLEResnetBlock as STYLEResnetBlock\nfrom models.networks.base_network import BaseNetwork\nfrom models.networks.normalization import get_nonspade_norm_layer, STYLE\nfrom util import util\n\n\n# spectralstylesyncbatch3x3\n\nclass STYLEGenerator(BaseNetwork):\n @staticmethod\n def modify_commandline_options(parser, is_train):\n parser.set_defaults(norm_G='spectralstyleinstance3x3')\n parser.add_argument('--num_upsampling_layers',\n choices=('less', 'normal', 'more', 'most'), default='normal',\n help=\"If 'more', adds upsampling layer between the two middle resnet blocks. If 'most', also add one more upsampling + resnet layer at the end of the generator\")\n\n return parser\n\n def __init__(self, opt):\n super().__init__()\n self.opt = opt\n nf = opt.ngf # 64\n\n self.sw, self.sh, self.num_upsampling_layers = self.compute_latent_vector_size(opt)\n\n self.fc = nn.Conv2d(self.opt.semantic_nc, 16 * nf, 3, padding=1)\n\n self.head_0 = STYLEResnetBlock(16 * nf, 16 * nf, opt)\n\n self.G_middle_0 = STYLEResnetBlock(16 * nf, 16 * nf, opt)\n self.G_middle_1 = STYLEResnetBlock(16 * nf, 16 * nf, opt)\n\n self.up_0 = STYLEResnetBlock(16 * nf, 8 * nf, opt)\n self.up_1 = STYLEResnetBlock(8 * nf, 4 * nf, opt)\n self.up_2 = STYLEResnetBlock(4 * nf, 2 * nf, opt)\n self.up_3 = STYLEResnetBlock(2 * nf, 1 * nf, opt)\n\n final_nc = nf\n\n if opt.num_upsampling_layers == 'most':\n self.up_4 = STYLEResnetBlock(1 * nf, nf // 2, opt)\n final_nc = nf // 2\n\n self.conv_img = nn.Conv2d(final_nc, 3, 3, padding=1)\n\n self.up = nn.Upsample(scale_factor=2, mode='bicubic')\n\n def compute_latent_vector_size(self, opt):\n if opt.num_upsampling_layers == 'normal':\n num_up_layers = 5\n elif opt.num_upsampling_layers == 'more':\n num_up_layers = 6\n elif opt.num_upsampling_layers == 'most':\n num_up_layers = 7\n elif opt.num_upsampling_layers == 'less':\n num_up_layers = 3\n else:\n raise ValueError('opt.num_upsampling_layers [%s] not recognized' %\n opt.num_upsampling_layers)\n\n sw = opt.crop_size // (2 ** num_up_layers)\n sh = round(sw / opt.aspect_ratio)\n\n return sw, sh, num_up_layers\n\n def forward(self, input, st, img_dims=None):\n seg = input\n\n if img_dims is None:\n sh, sw = self.sh, self.sw\n else:\n factor = 2 ** self.num_upsampling_layers\n seg = util.pad_factor(seg, seg.size()[2:], factor)\n sh, sw = seg.size()[2] // factor, seg.size()[3] // factor\n\n if self.opt.label_type != 'edge':\n x = F.interpolate(seg, size=(sh, sw), mode='nearest')\n else:\n x = F.interpolate(seg, size=(sh, sw), mode='bilinear')\n\n x = self.fc(x)\n\n x = self.head_0(x, seg, st)\n x = self.up(x)\n\n if self.opt.num_upsampling_layers != 'less':\n x = self.G_middle_0(x, seg, st)\n\n if self.opt.num_upsampling_layers == 'more' or \\\n self.opt.num_upsampling_layers == 'most':\n x = self.up(x)\n\n if self.opt.num_upsampling_layers != 'less':\n x = self.G_middle_1(x, seg, st)\n x = self.up(x)\n\n x = self.up_0(x, seg, st)\n x = self.up(x)\n\n x = self.up_1(x, seg, st)\n x = self.up(x)\n\n x = self.up_2(x, seg, st)\n\n if self.opt.num_upsampling_layers != 'less':\n x = self.up(x)\n\n x = self.up_3(x, seg, st)\n\n if self.opt.num_upsampling_layers == 'most':\n x = self.up(x)\n x = self.up_4(x, seg, st)\n\n x = self.conv_img(F.leaky_relu(x, 2e-1, inplace=True))\n\n x = F.tanh(x)\n\n if img_dims is not None:\n x = x[:, :, :img_dims[1], :img_dims[2]]\n\n return x\n" ]
[ [ "torch.nn.functional.interpolate", "torch.nn.Upsample", "torch.nn.Conv2d", "torch.nn.functional.tanh", "torch.nn.functional.leaky_relu" ] ]
btaba/text-image-embedding
[ "a5f16e1f89649eb7bc380b60c599d56eb3f91ae1" ]
[ "src/tests/test_encoding.py" ]
[ "import sys\nimport os\nimport numpy as np\n\nsys.path.insert(0, os.getcwd())\n\n\nfrom utils.encoding import tokenize, TokenizeTransformer\nfrom utils.encoding import AverageWordTokenTransformer\n\n\ndef test_tokenize():\n assert tokenize('hey bro you', {'you'}) == ['hey', 'bro']\n\n\ndef test_tokenize_transform():\n assert list(TokenizeTransformer().transform(['hey bro'])) == [['hey', 'bro']]\n\n\ndef test_average_word_token():\n w = {'a': [1, 0], 'b': [0, 1]}\n r = AverageWordTokenTransformer(w).transform([['a', 'b']])\n assert np.all(r == np.array([[0.5, 0.5]]))\n" ]
[ [ "numpy.array" ] ]
MTDzi/real-estate-pipeline-and-model
[ "cd3754f1aa71a69bf6c8a7b599cc11e16a8bd0e2" ]
[ "plugins/quality_checks.py" ]
[ "from pathlib import Path\nimport pandas as pd\nimport logging\n\n\ndef check_if_file_exists(filepath: str, additional_message: str = '') -> None:\n if not Path(filepath).exists():\n raise ValueError(f'The file\\n\\t\"{filepath}\"\\ndoes not exist\\n{additional_message}')\n\n\ndef check_nullability(\n parquet_path: Path,\n city: str,\n critical_null_percentage: float,\n warning_null_percentage: float,\n ds: str,\n **kwargs,\n) -> None:\n \"\"\"\n This function:\n 1) reads in the parquet_filepath\n 2) calculates the percentages of null values for each column\n 3) checks which (if any) columns have more than critica_null_percentage null values\n 4) checks which (if any) columns have more than warning_null_percentage null values\n \"\"\"\n year_month_day = ds.replace('-', '')\n parquet_filepath = Path(parquet_path) / f'{city}_{year_month_day}.parquet'\n df = pd.read_parquet(parquet_filepath)\n null_percentages_per_column = 100 * df.isnull().mean().round(2)\n above_critical = (null_percentages_per_column > critical_null_percentage)\n if any(above_critical):\n error_msg = (\n f'The following columns had more than {critical_null_percentage}% values missing:\\n'\n f'{df.columns[above_critical == True].tolist()}'\n )\n raise ValueError(error_msg)\n\n above_warning = (null_percentages_per_column > warning_null_percentage)\n if any(above_warning):\n warning_msg = (\n f'The following columns had more than {warning_null_percentage}% values missing:\\n'\n f'{df.columns[above_warning == True].tolist()}'\n )\n logging.warning(warning_msg)\n" ]
[ [ "pandas.read_parquet" ] ]
Rushi21-kesh/Analyzing-real-time-data-with-spark-streaming-and-kafka
[ "ad730ba74a2972c1e713d78c79ebcfedea1e0391" ]
[ "code/output.py" ]
[ "import json\nimport matplotlib.pyplot as plt\nfrom matplotlib.animation import FuncAnimation\nfrom kafka import KafkaConsumer\nimport time\nimport threading\nplt_val_x=[]\nplt_val_temp=[]\nplt_val_apptemp=[]\nplt_val_pres=[]\nplt_val_rh=[]\ncity_name='Chennai'\n\ndef animate(i):\n global plt_val_x, plt_val_temp, plt_val_apptemp, plt_val_pres, plt_val_rh\n\n axs[0, 0].cla()\n axs[0, 1].cla()\n axs[1, 0].cla()\n axs[1, 1].cla() \n axs[0, 0].plot(plt_val_x, plt_val_temp,color='blue')\n axs[0, 0].set_title('Temperature')\n axs[0, 1].plot(plt_val_x, plt_val_apptemp, color='green')\n axs[0, 1].set_title('Feels like')\n axs[1, 0].plot(plt_val_x, plt_val_pres, color = 'red')\n axs[1, 0].set_title('Pressure')\n axs[1, 1].plot(plt_val_x, plt_val_rh, color = 'purple')\n axs[1, 1].set_title('Relative Humidity')\n\n\nplt.style.use('fivethirtyeight')\n\ndef plot():\n global consumer\n count = 0\n for message in consumer:\n if(message.value['city'] == city_name):\n plt_val_x.append(int(count))\n count += 5\n plt_val_temp.append(float(message.value[\"temp\"]))\n plt_val_apptemp.append(float(message.value[\"app_temp\"]))\n plt_val_pres.append(float(message.value[\"pres\"]))\n plt_val_rh.append(float(message.value[\"rh\"]))\n\nconsumer = KafkaConsumer('output',bootstrap_servers=['localhost:9092'], value_deserializer=lambda x: json.loads(x.decode('utf-8')))\n\nplot_thread = threading.Thread(target=plot).start()\n\ntime.sleep(10)\nfig, axs = plt.subplots(2, 2, figsize=(12,6), num = city_name) \nani = FuncAnimation(plt.gcf(), animate, interval=1000) \nplt.tight_layout()\nplt.show()\n\n\n\n\n\n" ]
[ [ "matplotlib.pyplot.subplots", "matplotlib.pyplot.gcf", "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.style.use", "matplotlib.pyplot.show" ] ]
cah-cesar-medrano/pyTorch-Example-Classifier
[ "008868cab339b89dd68bf469fdb12f846435749a" ]
[ "Classifier/image_show.py" ]
[ "import matplotlib.pyplot as plt\nimport numpy as np\n\n# Lets show some of the training images for fun\n\n# functions to show an image\n\n\ndef imshow(img):\n img = img / 2 + 0.5 # Unnormalize\n npimg = img.numpy()\n plt.imshow(np.transpose(npimg, (1, 2, 0)))\n plt.show()\n\n\ndef peek(trainloader, torchvision, classes):\n # get some random training images\n dataiter = iter(trainloader)\n images, labels = dataiter.next()\n\n # show image\n imshow(torchvision.utils.make_grid(images))\n\n # print labels\n print(' '.join('%5s' % classes[labels[j]] for j in range(4)))\n" ]
[ [ "matplotlib.pyplot.show", "numpy.transpose" ] ]
ds-praveenkumar/ds-project-template
[ "2f55ca99d4c238ce947d594c2bce883676ff4e49" ]
[ "src/prepare/csv_loader.py" ]
[ "#\n# Created on Thu Nov 05 2020 1:29:10 AM\n#\n# author: Praveen Kumar\n#\n# github url: https://github.com/ds-praveenkumar/\n#\n# filename: csv_loader.py\n#\n\n\"\"\"\nLoads the csv data from data/raw dir\n\"\"\"\nimport pandas as pd\nimport sys\nsys.path.append(\".\")\nfrom config import config\n\n\nclass CSVLoader:\n \"\"\" loades all the csv files in a folder \"\"\"\n\n def __init__(self, ROOT):\n self.ROOT = ROOT\n self.data_dict = {}\n\n def read_csv(self):\n \"\"\" Reads all the csv files\"\"\"\n\n print(\"reading csv files...\")\n for file_path in self.ROOT.glob(\"*.csv\"):\n print(file_path.name)\n df_name = \"df_\"+str(file_path.name).split(\".\")[0]\n self.data_dict[df_name] = pd.read_csv(file_path)\n if self.data_dict:\n print(\"dataframes created:\\n\")\n print(self.data_dict.keys())\n return self.data_dict\n else:\n print(f\"No .csv files present in {self.ROOT.as_posix()}\")\n\n\nif __name__ == \"__main__\":\n main = CSVLoader(config.Config.RAW_DATA)\n main.read_csv()\n" ]
[ [ "pandas.read_csv" ] ]
ubolonton/tensorflow-serving
[ "e0610365dc59c548f9a9eedc59f89b41303bb193" ]
[ "tensorflow_serving/example/inception_export.py" ]
[ "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n#!/usr/bin/env python2.7\n\"\"\"Export inception model given existing training checkpoints.\n\nThe model is exported with proper signatures that can be loaded by standard\ntensorflow_model_server.\n\"\"\"\n\nfrom __future__ import print_function\n\nimport os.path\n\n# This is a placeholder for a Google-internal import.\n\nimport tensorflow as tf\n\nfrom tensorflow.contrib.session_bundle import exporter\nfrom inception import inception_model\n\n\ntf.app.flags.DEFINE_string('checkpoint_dir', '/tmp/inception_train',\n \"\"\"Directory where to read training checkpoints.\"\"\")\ntf.app.flags.DEFINE_string('export_dir', '/tmp/inception_export',\n \"\"\"Directory where to export inference model.\"\"\")\ntf.app.flags.DEFINE_integer('image_size', 299,\n \"\"\"Needs to provide same value as in training.\"\"\")\nFLAGS = tf.app.flags.FLAGS\n\n\nNUM_CLASSES = 1000\nNUM_TOP_CLASSES = 5\n\nWORKING_DIR = os.path.dirname(os.path.realpath(__file__))\nSYNSET_FILE = os.path.join(WORKING_DIR, 'imagenet_lsvrc_2015_synsets.txt')\nMETADATA_FILE = os.path.join(WORKING_DIR, 'imagenet_metadata.txt')\n\n\ndef export():\n # Create index->synset mapping\n synsets = []\n with open(SYNSET_FILE) as f:\n synsets = f.read().splitlines()\n # Create synset->metadata mapping\n texts = {}\n with open(METADATA_FILE) as f:\n for line in f.read().splitlines():\n parts = line.split('\\t')\n assert len(parts) == 2\n texts[parts[0]] = parts[1]\n\n with tf.Graph().as_default():\n # Build inference model.\n # Please refer to Tensorflow inception model for details.\n\n # Input transformation.\n serialized_tf_example = tf.placeholder(tf.string, name='tf_example')\n feature_configs = {\n 'image/encoded': tf.FixedLenFeature(shape=[], dtype=tf.string),\n }\n tf_example = tf.parse_example(serialized_tf_example, feature_configs)\n jpegs = tf_example['image/encoded']\n images = tf.map_fn(preprocess_image, jpegs, dtype=tf.float32)\n\n # Run inference.\n logits, _ = inception_model.inference(images, NUM_CLASSES + 1)\n\n # Transform output to topK result.\n values, indices = tf.nn.top_k(logits, NUM_TOP_CLASSES)\n\n # Create a constant string Tensor where the i'th element is\n # the human readable class description for the i'th index.\n # Note that the 0th index is an unused background class\n # (see inception model definition code).\n class_descriptions = ['unused background']\n for s in synsets:\n class_descriptions.append(texts[s])\n class_tensor = tf.constant(class_descriptions)\n\n classes = tf.contrib.lookup.index_to_string(tf.to_int64(indices),\n mapping=class_tensor)\n\n # Restore variables from training checkpoint.\n variable_averages = tf.train.ExponentialMovingAverage(\n inception_model.MOVING_AVERAGE_DECAY)\n variables_to_restore = variable_averages.variables_to_restore()\n saver = tf.train.Saver(variables_to_restore)\n with tf.Session() as sess:\n # Restore variables from training checkpoints.\n ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)\n if ckpt and ckpt.model_checkpoint_path:\n saver.restore(sess, ckpt.model_checkpoint_path)\n # Assuming model_checkpoint_path looks something like:\n # /my-favorite-path/imagenet_train/model.ckpt-0,\n # extract global_step from it.\n global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]\n print('Successfully loaded model from %s at step=%s.' %\n (ckpt.model_checkpoint_path, global_step))\n else:\n print('No checkpoint file found at %s' % FLAGS.checkpoint_dir)\n return\n\n # Export inference model.\n init_op = tf.group(tf.initialize_all_tables(), name='init_op')\n classification_signature = exporter.classification_signature(\n input_tensor=serialized_tf_example,\n classes_tensor=classes,\n scores_tensor=values)\n named_graph_signature = {\n 'inputs': exporter.generic_signature({'images': jpegs}),\n 'outputs': exporter.generic_signature({\n 'classes': classes,\n 'scores': values\n })}\n model_exporter = exporter.Exporter(saver)\n model_exporter.init(\n init_op=init_op,\n default_graph_signature=classification_signature,\n named_graph_signatures=named_graph_signature)\n model_exporter.export(FLAGS.export_dir, tf.constant(global_step), sess)\n print('Successfully exported model to %s' % FLAGS.export_dir)\n\n\ndef preprocess_image(image_buffer):\n \"\"\"Preprocess JPEG encoded bytes to 3D float Tensor.\"\"\"\n\n # Decode the string as an RGB JPEG.\n # Note that the resulting image contains an unknown height and width\n # that is set dynamically by decode_jpeg. In other words, the height\n # and width of image is unknown at compile-time.\n image = tf.image.decode_jpeg(image_buffer, channels=3)\n # After this point, all image pixels reside in [0,1)\n # until the very end, when they're rescaled to (-1, 1). The various\n # adjust_* ops all require this range for dtype float.\n image = tf.image.convert_image_dtype(image, dtype=tf.float32)\n # Crop the central region of the image with an area containing 87.5% of\n # the original image.\n image = tf.image.central_crop(image, central_fraction=0.875)\n # Resize the image to the original height and width.\n image = tf.expand_dims(image, 0)\n image = tf.image.resize_bilinear(image,\n [FLAGS.image_size, FLAGS.image_size],\n align_corners=False)\n image = tf.squeeze(image, [0])\n # Finally, rescale to [-1,1] instead of [0, 1)\n image = tf.sub(image, 0.5)\n image = tf.mul(image, 2.0)\n return image\n\n\ndef main(unused_argv=None):\n export()\n\n\nif __name__ == '__main__':\n tf.app.run()\n" ]
[ [ "tensorflow.image.central_crop", "tensorflow.image.resize_bilinear", "tensorflow.train.get_checkpoint_state", "tensorflow.mul", "tensorflow.initialize_all_tables", "tensorflow.image.decode_jpeg", "tensorflow.FixedLenFeature", "tensorflow.train.Saver", "tensorflow.constant", "tensorflow.squeeze", "tensorflow.app.run", "tensorflow.contrib.session_bundle.exporter.generic_signature", "tensorflow.parse_example", "tensorflow.expand_dims", "tensorflow.to_int64", "tensorflow.image.convert_image_dtype", "tensorflow.Session", "tensorflow.map_fn", "tensorflow.placeholder", "tensorflow.nn.top_k", "tensorflow.app.flags.DEFINE_integer", "tensorflow.app.flags.DEFINE_string", "tensorflow.contrib.session_bundle.exporter.classification_signature", "tensorflow.Graph", "tensorflow.contrib.session_bundle.exporter.Exporter", "tensorflow.train.ExponentialMovingAverage", "tensorflow.sub" ] ]
dynamicwebpaige/estimator
[ "ef33bb79ea0831527bea80c572d2ee9cc1fe6065" ]
[ "tensorflow_estimator/python/estimator/canned/kmeans.py" ]
[ "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"A canned Estimator for k-means clustering.\"\"\"\n\n# TODO(ccolby): Move clustering_ops.py into this file and streamline the code.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport time\n\nimport numpy as np\n\nfrom tensorflow.python.feature_column import feature_column_lib as fc\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import clustering_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import metrics\nfrom tensorflow.python.ops import state_ops\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.saved_model import signature_constants\nfrom tensorflow.python.summary import summary\nfrom tensorflow.python.training import session_run_hook\nfrom tensorflow.python.training import training_util\nfrom tensorflow.python.util.tf_export import estimator_export\nfrom tensorflow_estimator.python.estimator import estimator\nfrom tensorflow_estimator.python.estimator import model_fn as model_fn_lib\nfrom tensorflow_estimator.python.estimator.export import export_output\n\n\nclass _LossRelativeChangeHook(session_run_hook.SessionRunHook):\n \"\"\"Stops when the change in loss goes below a tolerance.\"\"\"\n\n def __init__(self, loss_tensor, tolerance):\n \"\"\"Creates a _LossRelativeChangeHook.\n\n Args:\n loss_tensor: A scalar tensor of the loss value.\n tolerance: A relative tolerance of loss change between iterations.\n \"\"\"\n self._loss_tensor = loss_tensor\n self._tolerance = tolerance\n self._prev_loss = None\n\n def before_run(self, run_context):\n del run_context # unused\n return session_run_hook.SessionRunArgs(self._loss_tensor)\n\n def after_run(self, run_context, run_values):\n loss = run_values.results\n assert loss is not None\n if self._prev_loss:\n relative_change = (\n abs(loss - self._prev_loss) / (1 + abs(self._prev_loss)))\n if relative_change < self._tolerance:\n run_context.request_stop()\n self._prev_loss = loss\n\n\nclass _InitializeClustersHook(session_run_hook.SessionRunHook):\n \"\"\"Initializes the cluster centers.\n\n The chief repeatedly invokes an initialization op until all cluster centers\n are initialized. The workers wait for the initialization phase to complete.\n \"\"\"\n\n def __init__(self, init_op, is_initialized_var, is_chief):\n \"\"\"Creates an _InitializeClustersHook.\n\n Args:\n init_op: An op that, when run, will choose some initial cluster centers.\n This op may need to be run multiple times to choose all the centers.\n is_initialized_var: A boolean variable reporting whether all initial\n centers have been chosen.\n is_chief: A boolean specifying whether this task is the chief.\n \"\"\"\n self._init_op = init_op\n self._is_initialized_var = is_initialized_var\n self._is_chief = is_chief\n\n def after_create_session(self, session, coord):\n del coord # unused\n assert self._init_op.graph is ops.get_default_graph()\n assert self._is_initialized_var.graph is self._init_op.graph\n while True:\n try:\n if session.run(self._is_initialized_var):\n break\n elif self._is_chief:\n session.run(self._init_op)\n else:\n time.sleep(1)\n except RuntimeError as e:\n logging.info(e)\n\n\ndef _parse_features_if_necessary(features, feature_columns):\n \"\"\"Helper function to convert the input points into a usable format.\n\n Args:\n features: The input features.\n feature_columns: An optionable iterable containing all the feature columns\n used by the model. All items in the set should be feature column instances\n that can be passed to `tf.feature_column.input_layer`. If this is None,\n all features will be used.\n\n Returns:\n If `features` is a dict of `k` features (optionally filtered by\n `feature_columns`), each of which is a vector of `n` scalars, the return\n value is a Tensor of shape `(n, k)` representing `n` input points, where the\n items in the `k` dimension are sorted lexicographically by `features` key.\n If `features` is not a dict, it is returned unmodified.\n \"\"\"\n if not isinstance(features, dict):\n return features\n\n if feature_columns:\n return fc.input_layer(features, feature_columns)\n\n keys = sorted(features.keys())\n with ops.colocate_with(features[keys[0]]):\n return array_ops.concat([features[k] for k in keys], axis=1)\n\n\nclass _ModelFn(object):\n \"\"\"Model function for the estimator.\"\"\"\n\n def __init__(self, num_clusters, initial_clusters, distance_metric, seed,\n use_mini_batch, mini_batch_steps_per_iteration,\n kmeans_plus_plus_num_retries, relative_tolerance,\n feature_columns):\n self._num_clusters = num_clusters\n self._initial_clusters = initial_clusters\n self._distance_metric = distance_metric\n self._seed = seed\n self._use_mini_batch = use_mini_batch\n self._mini_batch_steps_per_iteration = mini_batch_steps_per_iteration\n self._kmeans_plus_plus_num_retries = kmeans_plus_plus_num_retries\n self._relative_tolerance = relative_tolerance\n self._feature_columns = feature_columns\n\n def model_fn(self, features, mode, config):\n \"\"\"Model function for the estimator.\n\n Note that this does not take a `labels` arg. This works, but `input_fn` must\n return either `features` or, equivalently, `(features, None)`.\n\n Args:\n features: The input points. See `tf.estimator.Estimator`.\n mode: See `tf.estimator.Estimator`.\n config: See `tf.estimator.Estimator`.\n\n Returns:\n A `tf.estimator.EstimatorSpec` (see `tf.estimator.Estimator`) specifying\n this behavior:\n * `train_op`: Execute one mini-batch or full-batch run of Lloyd's\n algorithm.\n * `loss`: The sum of the squared distances from each input point to its\n closest center.\n * `eval_metric_ops`: Maps `SCORE` to `loss`.\n * `predictions`: Maps `ALL_DISTANCES` to the distance from each input\n point to each cluster center; maps `CLUSTER_INDEX` to the index of\n the closest cluster center for each input point.\n \"\"\"\n # input_points is a single Tensor. Therefore, the sharding functionality\n # in clustering_ops is unused, and some of the values below are lists of a\n # single item.\n input_points = _parse_features_if_necessary(features, self._feature_columns)\n\n # Let N = the number of input_points.\n # all_distances: A list of one matrix of shape (N, num_clusters). Each value\n # is the distance from an input point to a cluster center.\n # model_predictions: A list of one vector of shape (N). Each value is the\n # cluster id of an input point.\n # losses: Similar to cluster_idx but provides the distance to the cluster\n # center.\n # is_initialized: scalar indicating whether the initial cluster centers\n # have been chosen; see init_op.\n # init_op: an op to choose the initial cluster centers. A single worker\n # repeatedly executes init_op until is_initialized becomes True.\n # training_op: an op that runs an iteration of training, either an entire\n # Lloyd iteration or a mini-batch of a Lloyd iteration. Multiple workers\n # may execute this op, but only after is_initialized becomes True.\n (all_distances, model_predictions, losses, is_initialized, init_op,\n training_op) = clustering_ops.KMeans(\n inputs=input_points,\n num_clusters=self._num_clusters,\n initial_clusters=self._initial_clusters,\n distance_metric=self._distance_metric,\n use_mini_batch=self._use_mini_batch,\n mini_batch_steps_per_iteration=self._mini_batch_steps_per_iteration,\n random_seed=self._seed,\n kmeans_plus_plus_num_retries=self._kmeans_plus_plus_num_retries\n ).training_graph()\n\n loss = math_ops.reduce_sum(losses)\n summary.scalar('loss/raw', loss)\n\n incr_step = state_ops.assign_add(training_util.get_global_step(), 1)\n training_op = control_flow_ops.with_dependencies([training_op, incr_step],\n loss)\n\n training_hooks = [\n _InitializeClustersHook(init_op, is_initialized, config.is_chief)\n ]\n if self._relative_tolerance is not None:\n training_hooks.append(\n _LossRelativeChangeHook(loss, self._relative_tolerance))\n\n export_outputs = {\n KMeansClustering.ALL_DISTANCES:\n export_output.PredictOutput(all_distances[0]),\n KMeansClustering.CLUSTER_INDEX:\n export_output.PredictOutput(model_predictions[0]),\n signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:\n export_output.PredictOutput(model_predictions[0])\n }\n\n return model_fn_lib.EstimatorSpec(\n mode=mode,\n predictions={\n KMeansClustering.ALL_DISTANCES: all_distances[0],\n KMeansClustering.CLUSTER_INDEX: model_predictions[0],\n },\n loss=loss,\n train_op=training_op,\n eval_metric_ops={KMeansClustering.SCORE: metrics.mean(loss)},\n training_hooks=training_hooks,\n export_outputs=export_outputs)\n\n\n# TODO(agarwal,ands): support sharded input.\n@estimator_export(v1=['estimator.experimental.KMeans'])\nclass KMeansClustering(estimator.Estimator):\n \"\"\"An Estimator for K-Means clustering.\n\n Example:\n ```\n import numpy as np\n import tensorflow as tf\n\n num_points = 100\n dimensions = 2\n points = np.random.uniform(0, 1000, [num_points, dimensions])\n\n def input_fn():\n return tf.train.limit_epochs(\n tf.convert_to_tensor(points, dtype=tf.float32), num_epochs=1)\n\n num_clusters = 5\n kmeans = tf.estimator.experimental.KMeans(\n num_clusters=num_clusters, use_mini_batch=False)\n\n # train\n num_iterations = 10\n previous_centers = None\n for _ in xrange(num_iterations):\n kmeans.train(input_fn)\n cluster_centers = kmeans.cluster_centers()\n if previous_centers is not None:\n print 'delta:', cluster_centers - previous_centers\n previous_centers = cluster_centers\n print 'score:', kmeans.score(input_fn)\n print 'cluster centers:', cluster_centers\n\n # map the input points to their clusters\n cluster_indices = list(kmeans.predict_cluster_index(input_fn))\n for i, point in enumerate(points):\n cluster_index = cluster_indices[i]\n center = cluster_centers[cluster_index]\n print 'point:', point, 'is in cluster', cluster_index, 'centered at', center\n ```\n\n The `SavedModel` saved by the `export_savedmodel` method does not include the\n cluster centers. However, the cluster centers may be retrieved by the\n latest checkpoint saved during training. Specifically,\n ```\n kmeans.cluster_centers()\n ```\n is equivalent to\n ```\n tf.train.load_variable(\n kmeans.model_dir, KMeansClustering.CLUSTER_CENTERS_VAR_NAME)\n ```\n \"\"\"\n\n # Valid values for the distance_metric constructor argument.\n SQUARED_EUCLIDEAN_DISTANCE = clustering_ops.SQUARED_EUCLIDEAN_DISTANCE\n COSINE_DISTANCE = clustering_ops.COSINE_DISTANCE\n\n # Values for initial_clusters constructor argument.\n RANDOM_INIT = clustering_ops.RANDOM_INIT\n KMEANS_PLUS_PLUS_INIT = clustering_ops.KMEANS_PLUS_PLUS_INIT\n\n # Metric returned by evaluate(): The sum of the squared distances from each\n # input point to its closest center.\n SCORE = 'score'\n\n # Keys returned by predict().\n # ALL_DISTANCES: The distance from each input point to each cluster center.\n # CLUSTER_INDEX: The index of the closest cluster center for each input point.\n CLUSTER_INDEX = 'cluster_index'\n ALL_DISTANCES = 'all_distances'\n\n # Variable name used by cluster_centers().\n CLUSTER_CENTERS_VAR_NAME = clustering_ops.CLUSTERS_VAR_NAME\n\n def __init__(self,\n num_clusters,\n model_dir=None,\n initial_clusters=RANDOM_INIT,\n distance_metric=SQUARED_EUCLIDEAN_DISTANCE,\n seed=None,\n use_mini_batch=True,\n mini_batch_steps_per_iteration=1,\n kmeans_plus_plus_num_retries=2,\n relative_tolerance=None,\n config=None,\n feature_columns=None):\n r\"\"\"Creates an Estimator for running KMeans training and inference.\n\n This Estimator implements the following variants of the K-means algorithm:\n\n If `use_mini_batch` is False, it runs standard full batch K-means. Each\n training step runs a single iteration of K-Means and must process the full\n input at once. To run in this mode, the `input_fn` passed to `train` must\n return the entire input dataset.\n\n If `use_mini_batch` is True, it runs a generalization of the mini-batch\n K-means algorithm. It runs multiple iterations, where each iteration is\n composed of `mini_batch_steps_per_iteration` steps. Each training step\n accumulates the contribution from one mini-batch into temporary storage.\n Every `mini_batch_steps_per_iteration` steps, the cluster centers are\n updated and the temporary storage cleared for the next iteration.\n For example: the entire dataset contains 64k examples, where the batch size\n is 64. User can choose mini_batch_steps_per_iteration = 100 to run 10% of\n the entire data every iteration in order to update the cluster centers.\n Note that:\n * If `mini_batch_steps_per_iteration=1`, the algorithm reduces to the\n standard K-means mini-batch algorithm.\n * If `mini_batch_steps_per_iteration = num_inputs / batch_size`, the\n algorithm becomes an asynchronous version of the full-batch algorithm.\n However, there is no guarantee by this implementation that each input\n is seen exactly once per iteration. Also, different updates are applied\n asynchronously without locking. So this asynchronous version may not\n behave exactly like a full-batch version.\n\n Args:\n num_clusters: An integer tensor specifying the number of clusters. This\n argument is ignored if `initial_clusters` is a tensor or numpy array.\n model_dir: The directory to save the model results and log files.\n initial_clusters: Specifies how the initial cluster centers are chosen.\n One of the following: * a tensor or numpy array with the initial cluster\n centers. * a callable `f(inputs, k)` that selects and returns up to\n `k` centers from an input batch. `f` is free to return any number of\n centers from `0` to `k`. It will be invoked on successive input\n batches as necessary until all `num_clusters` centers are chosen.\n * `KMeansClustering.RANDOM_INIT`: Choose centers randomly from an input\n batch. If the batch size is less than `num_clusters` then the entire\n batch is chosen to be initial cluster centers and the remaining\n centers are chosen from successive input batches.\n * `KMeansClustering.KMEANS_PLUS_PLUS_INIT`: Use kmeans++ to choose\n centers from the first input batch. If the batch size is less than\n `num_clusters`, a TensorFlow runtime error occurs.\n distance_metric: The distance metric used for clustering. One of:\n * `KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE`: Euclidean distance\n between vectors `u` and `v` is defined as \\\\(||u - v||_2\\\\) which is\n the square root of the sum of the absolute squares of the elements'\n difference.\n * `KMeansClustering.COSINE_DISTANCE`: Cosine distance between vectors\n `u` and `v` is defined as \\\\(1 - (u . v) / (||u||_2 ||v||_2)\\\\).\n seed: Python integer. Seed for PRNG used to initialize centers.\n use_mini_batch: A boolean specifying whether to use the mini-batch k-means\n algorithm. See explanation above.\n mini_batch_steps_per_iteration: The number of steps after which the\n updated cluster centers are synced back to a master copy. Used only if\n `use_mini_batch=True`. See explanation above.\n kmeans_plus_plus_num_retries: For each point that is sampled during\n kmeans++ initialization, this parameter specifies the number of\n additional points to draw from the current distribution before selecting\n the best. If a negative value is specified, a heuristic is used to\n sample `O(log(num_to_sample))` additional points. Used only if\n `initial_clusters=KMeansClustering.KMEANS_PLUS_PLUS_INIT`.\n relative_tolerance: A relative tolerance of change in the loss between\n iterations. Stops learning if the loss changes less than this amount.\n This may not work correctly if `use_mini_batch=True`.\n config: See `tf.estimator.Estimator`.\n feature_columns: An optionable iterable containing all the feature columns\n used by the model. All items in the set should be feature column\n instances that can be passed to `tf.feature_column.input_layer`. If this\n is None, all features will be used.\n\n Raises:\n ValueError: An invalid argument was passed to `initial_clusters` or\n `distance_metric`.\n \"\"\"\n if isinstance(initial_clusters, str) and initial_clusters not in [\n KMeansClustering.RANDOM_INIT, KMeansClustering.KMEANS_PLUS_PLUS_INIT\n ]:\n raise ValueError(\n \"Unsupported initialization algorithm '%s'\" % initial_clusters)\n if distance_metric not in [\n KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,\n KMeansClustering.COSINE_DISTANCE\n ]:\n raise ValueError(\"Unsupported distance metric '%s'\" % distance_metric)\n self._distance_metric = distance_metric\n super(KMeansClustering, self).__init__(\n model_fn=_ModelFn(num_clusters, initial_clusters, distance_metric, seed,\n use_mini_batch, mini_batch_steps_per_iteration,\n kmeans_plus_plus_num_retries, relative_tolerance,\n feature_columns).model_fn,\n model_dir=model_dir,\n config=config)\n\n def _predict_one_key(self, input_fn, predict_key):\n for result in self.predict(input_fn=input_fn, predict_keys=[predict_key]):\n yield result[predict_key]\n\n def predict_cluster_index(self, input_fn):\n \"\"\"Finds the index of the closest cluster center to each input point.\n\n Args:\n input_fn: Input points. See `tf.estimator.Estimator.predict`.\n\n Yields:\n The index of the closest cluster center for each input point.\n \"\"\"\n for index in self._predict_one_key(input_fn,\n KMeansClustering.CLUSTER_INDEX):\n yield index\n\n def score(self, input_fn):\n \"\"\"Returns the sum of squared distances to nearest clusters.\n\n Note that this function is different from the corresponding one in sklearn\n which returns the negative sum.\n\n Args:\n input_fn: Input points. See `tf.estimator.Estimator.evaluate`. Only one\n batch is retrieved.\n\n Returns:\n The sum of the squared distance from each point in the first batch of\n inputs to its nearest cluster center.\n \"\"\"\n return self.evaluate(input_fn=input_fn, steps=1)[KMeansClustering.SCORE]\n\n def transform(self, input_fn):\n \"\"\"Transforms each input point to its distances to all cluster centers.\n\n Note that if `distance_metric=KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE`,\n this\n function returns the squared Euclidean distance while the corresponding\n sklearn function returns the Euclidean distance.\n\n Args:\n input_fn: Input points. See `tf.estimator.Estimator.predict`.\n\n Yields:\n The distances from each input point to each cluster center.\n \"\"\"\n for distances in self._predict_one_key(input_fn,\n KMeansClustering.ALL_DISTANCES):\n if self._distance_metric == KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE:\n yield np.sqrt(distances)\n else:\n yield distances\n\n def cluster_centers(self):\n \"\"\"Returns the cluster centers.\"\"\"\n return self.get_variable_value(KMeansClustering.CLUSTER_CENTERS_VAR_NAME)\n" ]
[ [ "tensorflow.python.util.tf_export.estimator_export", "tensorflow.python.feature_column.feature_column_lib.input_layer", "tensorflow.python.summary.summary.scalar", "tensorflow.python.ops.metrics.mean", "tensorflow.python.platform.tf_logging.info", "tensorflow.python.framework.ops.colocate_with", "tensorflow.python.training.session_run_hook.SessionRunArgs", "tensorflow.python.ops.array_ops.concat", "tensorflow.python.ops.control_flow_ops.with_dependencies", "tensorflow.python.ops.math_ops.reduce_sum", "tensorflow.python.ops.clustering_ops.KMeans", "tensorflow.python.training.training_util.get_global_step", "numpy.sqrt", "tensorflow.python.framework.ops.get_default_graph" ] ]
WorksWellWithOthers/dqn_zoo
[ "f011d683529d8d23b017a95194ebbb41a4962fe8" ]
[ "dqn_zoo/parts.py" ]
[ "# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Components for DQN.\"\"\"\n\n# pylint: disable=g-bad-import-order\n\nimport abc\nimport collections\nimport csv\nimport os\nimport timeit\nfrom typing import Any, Iterable, Mapping, Optional, Text, Tuple, Union\n\nimport dm_env\nimport jax\nimport jax.numpy as jnp\nimport numpy as np\nimport rlax\n\nfrom dqn_zoo import networks\nfrom dqn_zoo import processors\n\nAction = int\nNetwork = networks.Network\nNetworkParams = networks.Params\nPRNGKey = jnp.ndarray # A size 2 array.\n\n\nclass Agent(abc.ABC):\n \"\"\"Agent interface.\"\"\"\n\n @abc.abstractmethod\n def step(self, timestep: dm_env.TimeStep) -> Action:\n \"\"\"Selects action given timestep and potentially learns.\"\"\"\n\n @abc.abstractmethod\n def reset(self) -> None:\n \"\"\"Resets the agent's episodic state such as frame stack and action repeat.\n\n This method should be called at the beginning of every episode.\n \"\"\"\n\n @abc.abstractmethod\n def get_state(self) -> Mapping[Text, Any]:\n \"\"\"Retrieves agent state as a dictionary (e.g. for serialization).\"\"\"\n\n @abc.abstractmethod\n def set_state(self, state: Mapping[Text, Any]) -> None:\n \"\"\"Sets agent state from a (potentially de-serialized) dictionary.\"\"\"\n\n @property\n @abc.abstractmethod\n def statistics(self) -> Mapping[Text, float]:\n \"\"\"Returns current agent statistics as a dictionary.\"\"\"\n\n\ndef run_loop(\n agent: Agent,\n environment: dm_env.Environment,\n max_steps_per_episode: int = 0,\n yield_before_reset: bool = False,\n) -> Iterable[Tuple[dm_env.Environment, Optional[dm_env.TimeStep], Agent,\n Optional[Action]]]:\n \"\"\"Repeatedly alternates step calls on environment and agent.\n\n At time `t`, `t + 1` environment timesteps and `t + 1` agent steps have been\n seen in the current episode. `t` resets to `0` for the next episode.\n\n Args:\n agent: Agent to be run, has methods `step(timestep)` and `reset()`.\n environment: Environment to run, has methods `step(action)` and `reset()`.\n max_steps_per_episode: If positive, when time t reaches this value within an\n episode, the episode is truncated.\n yield_before_reset: Whether to additionally yield `(environment, None,\n agent, None)` before the agent and environment is reset at the start of\n each episode.\n\n Yields:\n Tuple `(environment, timestep_t, agent, a_t)` where\n `a_t = agent.step(timestep_t)`.\n \"\"\"\n while True: # For each episode.\n if yield_before_reset:\n yield environment, None, agent, None,\n\n t = 0\n agent.reset()\n timestep_t = environment.reset() # timestep_0.\n\n while True: # For each step in the current episode.\n a_t = agent.step(timestep_t)\n yield environment, timestep_t, agent, a_t\n\n # Update t after one environment step and agent step and relabel.\n t += 1\n a_tm1 = a_t\n timestep_t = environment.step(a_tm1)\n\n if max_steps_per_episode > 0 and t >= max_steps_per_episode:\n assert t == max_steps_per_episode\n timestep_t = timestep_t._replace(step_type=dm_env.StepType.LAST)\n\n if timestep_t.last():\n unused_a_t = agent.step(timestep_t) # Extra agent step, action ignored.\n yield environment, timestep_t, agent, None\n break\n\n\ndef generate_statistics(\n trackers: Iterable[Any],\n timestep_action_sequence: Iterable[Tuple[dm_env.Environment,\n Optional[dm_env.TimeStep], Agent,\n Optional[Action]]]\n) -> Mapping[Text, Any]:\n \"\"\"Generates statistics from a sequence of timestep and actions.\"\"\"\n # Only reset at the start, not between episodes.\n for tracker in trackers:\n tracker.reset()\n\n for environment, timestep_t, agent, a_t in timestep_action_sequence:\n for tracker in trackers:\n tracker.step(environment, timestep_t, agent, a_t)\n\n # Merge all statistics dictionaries into one.\n statistics_dicts = (tracker.get() for tracker in trackers)\n return dict(collections.ChainMap(*statistics_dicts))\n\n\nclass EpisodeTracker:\n \"\"\"Tracks episode return and other statistics.\"\"\"\n\n def __init__(self):\n self._num_steps_since_reset = None\n self._num_steps_over_episodes = None\n self._episode_returns = None\n self._current_episode_rewards = None\n self._current_episode_step = None\n\n def step(\n self,\n environment: Optional[dm_env.Environment],\n timestep_t: dm_env.TimeStep,\n agent: Optional[Agent],\n a_t: Optional[Action],\n ) -> None:\n \"\"\"Accumulates statistics from timestep.\"\"\"\n del (environment, agent, a_t)\n\n if timestep_t.first():\n if self._current_episode_rewards:\n raise ValueError('Current episode reward list should be empty.')\n if self._current_episode_step != 0:\n raise ValueError('Current episode step should be zero.')\n else:\n # First reward is invalid, all other rewards are appended.\n self._current_episode_rewards.append(timestep_t.reward)\n\n self._num_steps_since_reset += 1\n self._current_episode_step += 1\n\n if timestep_t.last():\n self._episode_returns.append(sum(self._current_episode_rewards))\n self._current_episode_rewards = []\n self._num_steps_over_episodes += self._current_episode_step\n self._current_episode_step = 0\n\n def reset(self) -> None:\n \"\"\"Resets all gathered statistics, not to be called between episodes.\"\"\"\n self._num_steps_since_reset = 0\n self._num_steps_over_episodes = 0\n self._episode_returns = []\n self._current_episode_step = 0\n self._current_episode_rewards = []\n\n def get(self) -> Mapping[Text, Union[int, float, None]]:\n \"\"\"Aggregates statistics and returns as a dictionary.\n\n Here the convention is `episode_return` is set to `current_episode_return`\n if a full episode has not been encountered. Otherwise it is set to\n `mean_episode_return` which is the mean return of complete episodes only. If\n no steps have been taken at all, `episode_return` is set to `NaN`.\n\n Returns:\n A dictionary of aggregated statistics.\n \"\"\"\n if self._episode_returns:\n mean_episode_return = np.array(self._episode_returns).mean()\n current_episode_return = sum(self._current_episode_rewards)\n episode_return = mean_episode_return\n else:\n mean_episode_return = np.nan\n if self._num_steps_since_reset > 0:\n current_episode_return = sum(self._current_episode_rewards)\n else:\n current_episode_return = np.nan\n episode_return = current_episode_return\n\n return {\n 'mean_episode_return': mean_episode_return,\n 'current_episode_return': current_episode_return,\n 'episode_return': episode_return,\n 'num_episodes': len(self._episode_returns),\n 'num_steps_over_episodes': self._num_steps_over_episodes,\n 'current_episode_step': self._current_episode_step,\n 'num_steps_since_reset': self._num_steps_since_reset,\n }\n\n\nclass StepRateTracker:\n \"\"\"Tracks step rate, number of steps taken and duration since last reset.\"\"\"\n\n def __init__(self):\n self._num_steps_since_reset = None\n self._start = None\n\n def step(\n self,\n environment: Optional[dm_env.Environment],\n timestep_t: Optional[dm_env.TimeStep],\n agent: Optional[Agent],\n a_t: Optional[Action],\n ) -> None:\n del (environment, timestep_t, agent, a_t)\n self._num_steps_since_reset += 1\n\n def reset(self) -> None:\n self._num_steps_since_reset = 0\n self._start = timeit.default_timer()\n\n def get(self) -> Mapping[Text, float]:\n duration = timeit.default_timer() - self._start\n if self._num_steps_since_reset > 0:\n step_rate = self._num_steps_since_reset / duration\n else:\n step_rate = np.nan\n return {\n 'step_rate': step_rate,\n 'num_steps': self._num_steps_since_reset,\n 'duration': duration,\n }\n\n\nclass UnbiasedExponentialWeightedAverageAgentTracker:\n \"\"\"'Unbiased Constant-Step-Size Trick' from the Sutton and Barto RL book.\"\"\"\n\n def __init__(self, step_size: float, initial_agent: Agent):\n self._initial_statistics = dict(initial_agent.statistics)\n self._step_size = step_size\n self.trace = 0.\n self._statistics = dict(self._initial_statistics)\n\n def step(\n self,\n environment: Optional[dm_env.Environment],\n timestep_t: Optional[dm_env.TimeStep],\n agent: Agent,\n a_t: Optional[Action],\n ) -> None:\n \"\"\"Accumulates agent statistics.\"\"\"\n del (environment, timestep_t, a_t)\n\n self.trace = (1 - self._step_size) * self.trace + self._step_size\n final_step_size = self._step_size / self.trace\n assert 0 <= final_step_size <= 1\n\n if final_step_size == 1:\n # Since the self._initial_statistics is likely to be NaN and\n # 0 * NaN == NaN just replace self._statistics on the first step.\n self._statistics = dict(agent.statistics)\n else:\n self._statistics = jax.tree_multimap(\n lambda s, x: (1 - final_step_size) * s + final_step_size * x,\n self._statistics, agent.statistics)\n\n def reset(self) -> None:\n \"\"\"Resets statistics and internal state.\"\"\"\n self.trace = 0.\n # get() may be called before step() so ensure statistics are initialized.\n self._statistics = dict(self._initial_statistics)\n\n def get(self) -> Mapping[Text, float]:\n \"\"\"Returns current accumulated statistics.\"\"\"\n return self._statistics\n\n\ndef make_default_trackers(initial_agent: Agent):\n return [\n EpisodeTracker(),\n StepRateTracker(),\n UnbiasedExponentialWeightedAverageAgentTracker(\n step_size=1e-3, initial_agent=initial_agent),\n ]\n\n\nclass EpsilonGreedyActor(Agent):\n \"\"\"Agent that acts with a given set of Q-network parameters and epsilon.\n\n Network parameters are set on the actor. The actor can be serialized,\n ensuring determinism of execution (e.g. when checkpointing).\n \"\"\"\n\n def __init__(\n self,\n preprocessor: processors.Processor,\n network: Network,\n exploration_epsilon: float,\n rng_key: PRNGKey,\n ):\n self._preprocessor = preprocessor\n self._rng_key = rng_key\n self._action = None\n self.network_params = None # Nest of arrays (haiku.Params), set externally.\n\n def select_action(rng_key, network_params, s_t):\n \"\"\"Samples action from eps-greedy policy wrt Q-values at given state.\"\"\"\n rng_key, apply_key, policy_key = jax.random.split(rng_key, 3)\n q_t = network.apply(network_params, apply_key, s_t[None, ...]).q_values[0]\n a_t = rlax.epsilon_greedy().sample(policy_key, q_t, exploration_epsilon)\n return rng_key, a_t\n\n self._select_action = jax.jit(select_action)\n\n def step(self, timestep: dm_env.TimeStep) -> Action:\n \"\"\"Selects action given a timestep.\"\"\"\n timestep = self._preprocessor(timestep)\n\n if timestep is None: # Repeat action.\n return self._action\n\n s_t = timestep.observation\n self._rng_key, a_t = self._select_action(self._rng_key, self.network_params,\n s_t)\n self._action = Action(jax.device_get(a_t))\n return self._action\n\n def reset(self) -> None:\n \"\"\"Resets the agent's episodic state such as frame stack and action repeat.\n\n This method should be called at the beginning of every episode.\n \"\"\"\n processors.reset(self._preprocessor)\n self._action = None\n\n def get_state(self) -> Mapping[Text, Any]:\n \"\"\"Retrieves agent state as a dictionary (e.g. for serialization).\"\"\"\n # State contains network params to make agent easy to run from a checkpoint.\n return {\n 'rng_key': self._rng_key,\n 'network_params': self.network_params,\n }\n\n def set_state(self, state: Mapping[Text, Any]) -> None:\n \"\"\"Sets agent state from a (potentially de-serialized) dictionary.\"\"\"\n self._rng_key = state['rng_key']\n self.network_params = state['network_params']\n\n @property\n def statistics(self) -> Mapping[Text, float]:\n return {}\n\n\nclass LinearSchedule:\n \"\"\"Linear schedule, used for exploration epsilon in DQN agents.\"\"\"\n\n def __init__(self,\n begin_value,\n end_value,\n begin_t,\n end_t=None,\n decay_steps=None):\n if (end_t is None) == (decay_steps is None):\n raise ValueError('Exactly one of end_t, decay_steps must be provided.')\n self._decay_steps = decay_steps if end_t is None else end_t - begin_t\n self._begin_t = begin_t\n self._begin_value = begin_value\n self._end_value = end_value\n\n def __call__(self, t):\n \"\"\"Implements a linear transition from a begin to an end value.\"\"\"\n frac = min(max(t - self._begin_t, 0), self._decay_steps) / self._decay_steps\n return (1 - frac) * self._begin_value + frac * self._end_value\n\n\nclass NullWriter:\n \"\"\"A placeholder logging object that does nothing.\"\"\"\n\n def write(self, *args, **kwargs) -> None:\n pass\n\n def close(self) -> None:\n pass\n\n\nclass CsvWriter:\n \"\"\"A logging object writing to a CSV file.\n\n Each `write()` takes a `OrderedDict`, creating one column in the CSV file for\n each dictionary key on the first call. Successive calls to `write()` must\n contain the same dictionary keys.\n \"\"\"\n\n def __init__(self, fname: Text):\n \"\"\"Initializes a `CsvWriter`.\n\n Args:\n fname: File name (path) for file to be written to.\n \"\"\"\n dirname = os.path.dirname(fname)\n if not os.path.exists(dirname):\n os.makedirs(dirname)\n self._fname = fname\n self._header_written = False\n self._fieldnames = None\n\n def write(self, values: collections.OrderedDict) -> None:\n \"\"\"Appends given values as new row to CSV file.\"\"\"\n if self._fieldnames is None:\n self._fieldnames = values.keys()\n # Open a file in 'append' mode, so we can continue logging safely to the\n # same file after e.g. restarting from a checkpoint.\n with open(self._fname, 'a') as file:\n # Always use same fieldnames to create writer, this way a consistency\n # check is performed automatically on each write.\n writer = csv.DictWriter(file, fieldnames=self._fieldnames)\n # Write a header if this is the very first write.\n if not self._header_written:\n writer.writeheader()\n self._header_written = True\n writer.writerow(values)\n\n def close(self) -> None:\n \"\"\"Closes the `CsvWriter`.\"\"\"\n pass\n\n def get_state(self) -> Mapping[Text, Any]:\n \"\"\"Retrieves `CsvWriter` state as a `dict` (e.g. for serialization).\"\"\"\n return {\n 'header_written': self._header_written,\n 'fieldnames': self._fieldnames\n }\n\n def set_state(self, state: Mapping[Text, Any]) -> None:\n \"\"\"Sets `CsvWriter` state from a (potentially de-serialized) dictionary.\"\"\"\n self._header_written = state['header_written']\n self._fieldnames = state['fieldnames']\n\n\nclass NullCheckpoint:\n \"\"\"A placeholder checkpointing object that does nothing.\n\n Can be used as a substitute for an actual checkpointing object when\n checkpointing is disabled.\n \"\"\"\n\n def __init__(self):\n self.state = AttributeDict()\n\n def save(self) -> None:\n pass\n\n def can_be_restored(self) -> bool:\n return False\n\n def restore(self) -> None:\n pass\n\n\nclass AttributeDict(dict):\n \"\"\"A `dict` that supports getting, setting, deleting keys via attributes.\"\"\"\n\n def __getattr__(self, key):\n return self[key]\n\n def __setattr__(self, key, value):\n self[key] = value\n\n def __delattr__(self, key):\n del self[key]\n" ]
[ [ "numpy.array" ] ]
mm5110/sparse-structures-for-classification
[ "ac4d765754f92f22afeb1ed0473e6d8332aa8f73" ]
[ "joint/dev/Old/dev_old/Jere/MM_MNIST_ML_ISTA/Models_MNIST.py" ]
[ "import torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport torch.utils.data as Data\nimport torch.nn.functional as F\nimport torchvision\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\nimport numpy as np\n\n\n##################################################\n\n#### MultiLayer ISTA NET ####\n\n##################################################\n\nclass ML_ISTA_NET(nn.Module):\n def __init__(self,m1,m2,m3):\n super(ML_ISTA_NET, self).__init__()\n \n # Convolutional Filters\n self.W1 = nn.Parameter(torch.randn(m1,1,6,6), requires_grad=True)\n self.strd1 = 2;\n self.W2 = nn.Parameter(torch.randn(m2,m1,6,6), requires_grad=True)\n self.strd2 = 2;\n self.W3 = nn.Parameter(torch.randn(m3,m2,4,4), requires_grad=True)\n self.strd3 = 1;\n \n # Biases / Thresholds\n self.b1 = nn.Parameter(torch.zeros(1,m1,1,1), requires_grad=True)\n self.b2 = nn.Parameter(torch.zeros(1,m2,1,1), requires_grad=True)\n self.b3 = nn.Parameter(torch.zeros(1,m3,1,1), requires_grad=True)\n \n # Classifier\n self.Wclass = nn.Linear(m3, 10)\n \n # Initialization\n self.W1.data = 0.01 * self.W1.data\n self.W2.data = 0.01 * self.W2.data\n self.W3.data = 0.01 * self.W3.data\n \n def forward(self, x,T=0,RHO=1):\n \n # Encoding\n gamma1 = F.relu(F.conv2d(x,self.W1, stride = self.strd1) + self.b1) # first estimation\n gamma2 = F.relu(F.conv2d(gamma1,self.W2, stride = self.strd2) + self.b2) \n gamma3 = F.relu(F.conv2d(gamma2,self.W3, stride = self.strd3) + self.b3) \n \n for _ in range(T):\n \n # backward computatoin\n gamma2_ml = F.conv_transpose2d(gamma3,self.W3, stride=self.strd3)\n gamma1_ml = F.conv_transpose2d(gamma2_ml,self.W2, stride=self.strd2)\n \n gamma1 = (1-RHO) * gamma1 + RHO * gamma1_ml\n gamma2 = (1-RHO) * gamma2 + RHO * gamma2_ml\n \n # forward computation\n gamma1 = F.relu( (gamma1 - F.conv2d( F.conv_transpose2d(gamma1,self.W1, stride = self.strd1) - x ,self.W1, stride = self.strd1)) + self.b1)\n gamma2 = F.relu( (gamma2 - F.conv2d( F.conv_transpose2d(gamma2,self.W2, stride = self.strd2) - gamma1, self.W2, stride = self.strd2)) + self.b2) \n gamma3 = F.relu( (gamma3 - F.conv2d( F.conv_transpose2d(gamma3,self.W3, stride = self.strd3) - gamma2, self.W3, stride = self.strd3)) + self.b3) \n \n # classifier\n gamma = gamma3.view(gamma3.shape[0],gamma3.shape[1]*gamma3.shape[2]*gamma3.shape[3])\n out = self.Wclass(gamma)\n out = F.log_softmax(out,dim = 1)\n \n return gamma, out\n \n\n##################################################\n\n#### MultiLayer J-ISTA NET ####\n\n##################################################\n\nclass ML_JISTA_NET(nn.Module):\n def __init__(self,m1,m2,m3):\n super(ML_JISTA_NET, self).__init__()\n \n # Convolutional Filters\n self.W1 = nn.Parameter(torch.randn(m1,1,6,6), requires_grad=True)\n self.strd1 = 2;\n self.W2 = nn.Parameter(torch.randn(m2,m1,6,6), requires_grad=True)\n self.strd2 = 2;\n self.W3 = nn.Parameter(torch.randn(m3,m2,4,4), requires_grad=True)\n self.strd3 = 1;\n \n # Biases / Thresholds\n self.b1 = nn.Parameter(torch.zeros(1,m1,1,1), requires_grad=True)\n self.b2 = nn.Parameter(torch.zeros(1,m2,1,1), requires_grad=True)\n self.b3 = nn.Parameter(torch.zeros(1,m3,1,1), requires_grad=True)\n \n # Classifier\n self.Wclass = nn.Linear(m3, 10)\n \n # Initialization\n self.W1.data = .1 * self.W1.data\n self.W2.data = .1 * self.W2.data\n self.W3.data = .1 * self.W3.data\n \n def forward(self, x,T=0,RHO=1):\n \n # Encoding\n gamma1 = F.relu(F.conv2d(x,self.W1, stride = self.strd1) + self.b1) # first estimation\n gamma2 = F.relu(F.conv2d(gamma1,self.W2, stride = self.strd2) + self.b2) \n gamma3 = F.relu(F.conv2d(gamma2,self.W3, stride = self.strd3) + self.b3) \n \n for _ in range(T):\n \n # backward computatoin\n gamma2_ml = F.conv_transpose2d(gamma3,self.W3, stride=self.strd3)\n gamma1_ml = F.conv_transpose2d(gamma2_ml,self.W2, stride=self.strd2)\n \n gamma1 = (1-RHO) * gamma1 + RHO * gamma1_ml\n gamma2 = (1-RHO) * gamma2 + RHO * gamma2_ml\n \n # forward computation\n gamma1 = F.relu( (gamma1 - F.conv2d( F.conv_transpose2d(gamma1,self.W1, stride = self.strd1) - x ,self.W1, stride = self.strd1)) + self.b1)\n gamma2 = F.relu( (gamma2 - F.conv2d( F.conv_transpose2d(gamma2,self.W2, stride = self.strd2) - gamma1, self.W2, stride = self.strd2)) + self.b2) \n gamma3 = F.relu( (gamma3 - F.conv2d( F.conv_transpose2d(gamma3,self.W3, stride = self.strd3) - gamma2, self.W3, stride = self.strd3)) + self.b3) \n \n # classifier\n gamma = gamma3.view(gamma3.shape[0],gamma3.shape[1]*gamma3.shape[2]*gamma3.shape[3])\n out = self.Wclass(gamma)\n out = F.log_softmax(out,dim = 1)\n \n return gamma, out \n \n\n def joint_train(self, x, labels, T=0, RHO=1):\n # Initialise dics to contain sorted data\n label_bin_data = {\"0\":[], \"1\":[], \"2\":[], \"3\":[], \"4\":[], \"5\":[], \"6\":[], \"7\":[], \"8\":[], \"9\":[]} # Dictionary of lists of tensors\n data_by_class = {} # Dictionary of tensors\n encoded_by_class = {} # Dictionary of tensors\n scores_by_class = {} # Dictionary of lists\n sorted_labels = np.empty(labels.shape[0])\n index = 0\n # Sort data by its label class into a dictionary of lists which contain the data point tensors\n for i in range(labels.shape[0]):\n label_bin_data[str(int(labels[i].item()))].append(x[i,:,:,:])\n # Turn each list of tensors in the dictionary into a tensor\n for key, tensor_list in label_bin_data.items():\n if len(label_bin_data[key]) > 0:\n sorted_labels[index:index+len(label_bin_data[key])] = int(key)*np.ones(len(label_bin_data[key]))\n index = index+len(label_bin_data[key])\n data_by_class[key] = torch.stack(label_bin_data[key], dim=0)\n encoded_by_class[key], scores_by_class[key] = self.joint_forward(data_by_class[key],T,RHO)\n if key == \"0\":\n scores = scores_by_class[key]\n else:\n scores = torch.cat((scores, scores_by_class[key]), 0)\n return encoded_by_class, scores, torch.from_numpy(sorted_labels).type(torch.LongTensor)\n \n\n def joint_forward(self, x,T=0,RHO=1): \n # Encoding\n gamma1 = F.relu(F.conv2d(x,self.W1, stride = self.strd1) + self.b1) # first estimation\n gamma2 = F.relu(F.conv2d(gamma1,self.W2, stride = self.strd2) + self.b2)\n \n # Encourage joint sparisty in the final layer sparse layer encoding\n X1 = F.conv2d(gamma2,self.W3, stride = self.strd3)\n X1_dims = list(X1.shape)\n X1_mat = X1.view(-1, X1_dims[1])\n st_factors = 1-torch.squeeze(self.b3)*1/(torch.sum(X1_mat**2, dim=0))\n st_factors_mat = torch.diag(st_factors)\n X2_mat = torch.t(torch.mm(st_factors_mat, torch.t(X1_mat)))\n X2 = X2_mat.view(X1_dims[0], X1_dims[1], X1_dims[2], X1_dims[3]) \n gamma3 = F.relu(X2)\n\n for _ in range(T):\n \n # backward computation\n gamma2_ml = F.conv_transpose2d(gamma3,self.W3, stride=self.strd3)\n gamma1_ml = F.conv_transpose2d(gamma2_ml,self.W2, stride=self.strd2)\n \n gamma1 = (1-RHO) * gamma1 + RHO * gamma1_ml\n gamma2 = (1-RHO) * gamma2 + RHO * gamma2_ml\n \n # forward computation\n gamma1 = F.relu( (gamma1 - F.conv2d( F.conv_transpose2d(gamma1,self.W1, stride = self.strd1) - x ,self.W1, stride = self.strd1)) + self.b1)\n gamma2 = F.relu( (gamma2 - F.conv2d( F.conv_transpose2d(gamma2,self.W2, stride = self.strd2) - gamma1, self.W2, stride = self.strd2)) + self.b2)\n\n X1 = F.conv2d(gamma2,self.W3, stride = self.strd3)\n X1_dims = list(X1.shape)\n X1_mat = X1.view(-1, X1_dims[1])\n st_factors = 1-torch.squeeze(self.b3)*1/(torch.sum(X1_mat**2, dim=0))\n st_factors_mat = torch.diag(st_factors)\n X2_mat = torch.t(torch.mm(st_factors_mat, torch.t(X1_mat)))\n X2 = X2_mat.view(X1_dims[0], X1_dims[1], X1_dims[2], X1_dims[3])\n gamma3 = F.relu(X2)\n # gamma3 = F.relu( (gamma3 - F.conv2d( F.conv_transpose2d(gamma3,self.W3, stride = self.strd3) - gamma2, self.W3, stride = self.strd3)) + self.b3) \n \n # classifier\n gamma = gamma3.view(gamma3.shape[0],gamma3.shape[1]*gamma3.shape[2]*gamma3.shape[3])\n out = self.Wclass(gamma)\n out = F.log_softmax(out,dim = 1)\n \n return gamma, out \n\n \n \n##################################################\n\n#### MultiLayer FISTA NET ####\n\n##################################################\n\nclass ML_FISTA_NET(nn.Module):\n def __init__(self,m1,m2,m3):\n super(ML_FISTA_NET, self).__init__()\n \n # Convolutional Filters\n self.W1 = nn.Parameter(torch.randn(m1,1,6,6), requires_grad=True)\n self.strd1 = 2;\n self.W2 = nn.Parameter(torch.randn(m2,m1,6,6), requires_grad=True)\n self.strd2 = 2;\n self.W3 = nn.Parameter(torch.randn(m3,m2,4,4), requires_grad=True)\n self.strd3 = 1;\n \n # Biases / Thresholds\n self.b1 = nn.Parameter(torch.zeros(1,m1,1,1), requires_grad=True)\n self.b2 = nn.Parameter(torch.zeros(1,m2,1,1), requires_grad=True)\n self.b3 = nn.Parameter(torch.zeros(1,m3,1,1), requires_grad=True)\n \n # Classifier\n self.Wclass = nn.Linear(m3, 10)\n \n # Initialization\n self.W1.data = 0.01 * self.W1.data\n self.W2.data = 0.01 * self.W2.data\n self.W3.data = 0.01 * self.W3.data\n \n def forward(self, x,T=0,RHO=1):\n \n t = 1\n t_prv = t\n \n # Encoding\n gamma1 = F.relu(F.conv2d(x,self.W1, stride = self.strd1) + self.b1) \n gamma2 = F.relu(F.conv2d(gamma1,self.W2, stride = self.strd2) + self.b2) \n gamma3 = F.relu(F.conv2d(gamma2,self.W3, stride = self.strd3) + self.b3) \n gamma3_prv = gamma3\n \n for _ in range(T):\n \n t_prv = t\n t = float((1+np.sqrt(1+4*t_prv**2))/2) \n \n Z = gamma3 + (t_prv-1)/t * (gamma3 - gamma3_prv)\n gamma3_prv = gamma3\n \n # backward computation\n gamma2_ml = F.conv_transpose2d(Z,self.W3, stride=self.strd3)\n gamma1_ml = F.conv_transpose2d(gamma2_ml,self.W2, stride=self.strd2)\n \n gamma1 = (1-RHO) * gamma1 + RHO * gamma1_ml\n gamma2 = (1-RHO) * gamma2 + RHO * gamma2_ml\n \n # forward computation\n gamma1 = F.relu( (gamma1 - F.conv2d( F.conv_transpose2d(gamma1,self.W1, stride = self.strd1) - x ,self.W1, stride = self.strd1)) + self.b1)\n gamma2 = F.relu( (gamma2 - F.conv2d( F.conv_transpose2d(gamma2,self.W2, stride = self.strd2) - gamma1, self.W2, stride = self.strd2)) + self.b2) \n gamma3 = F.relu( (Z - F.conv2d( F.conv_transpose2d(Z,self.W3, stride = self.strd3) - gamma2, self.W3, stride = self.strd3)) + self.b3) \n \n # classifier\n gamma = gamma3.view(gamma3.shape[0],gamma3.shape[1]*gamma3.shape[2]*gamma3.shape[3])\n out = self.Wclass(gamma)\n out = F.log_softmax(out,dim = 1)\n \n return gamma, out\n \n \n " ]
[ [ "torch.nn.Linear", "torch.zeros", "torch.cat", "numpy.empty", "torch.stack", "torch.nn.functional.conv_transpose2d", "torch.nn.functional.log_softmax", "torch.nn.functional.relu", "torch.from_numpy", "torch.squeeze", "torch.t", "numpy.sqrt", "torch.diag", "torch.nn.functional.conv2d", "torch.randn", "torch.sum" ] ]
baruchel/apl
[ "238247a24106c80aa57ad8aa797afca71550425f" ]
[ "apl/__init__.py" ]
[ "\"\"\"\nAn implementation of the APL programming language.\n\"\"\"\n\n__version__ = '0.1'\n\nimport numpy as np\n\nfrom .internal import (\n _apl, AplArray,\n # Exceptions:\n DomainError, RankError, InvalidAxisError\n )\n\nfrom .core import (\n index, rho\n )\n\nfrom .arithmetic import (\n add, sub, mul, div, residue,\n min, max, power, log\n )\n\nfrom .parse import parse_line\n\ndef APL(x):\n \"\"\"\n Return an array to be used with the APL module.\n This type is basically a Numpy array with some internal\n new features.\n \"\"\"\n if isinstance(x, AplArray):\n return _apl(np.array(x), stops = x.__apl_stops__)\n if isinstance(x, (np.integer, int,\n np.floating, float,\n np.complexfloating, complex)):\n return _apl(np.array([x])) # scalar\n return _apl(np.array(x))\n\n__all__ = ['APL', 'index', 'rho',\n 'add', 'sub', 'mul', 'div', 'residue',\n 'min', 'max', 'power', 'log',\n # to be removed later\n 'parse_line',\n # Exceptions:\n 'DomainError', 'RankError', 'InvalidAxisError'\n ]\n" ]
[ [ "numpy.array" ] ]
DTRademaker/DL_tutorials_2020
[ "aaae2833c084547e027e51ebeb0abf6bcd336897" ]
[ "20200513_cnn_week5/hw/FM/HW5_Farzaneh.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 19 02:44:54 2020\n\n@author: farzanehmeimandi\n\"\"\"\n\n# Problem 1\n# As we are using Max-pooling, it does not differ in the order of Max-pooling and the activation function\n\n# Problem 2\n# 2d & 1d (in the following code) - It seems to be a debate whether using BatchNormalization before or after activation\n# Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom Bio import SeqIO\n'''\nclass Net(nn.Module):\n def __init__(self)\t:\n super(Net , self).__init__()\n self.conv1 = nn.Conv2d(1,8,5)\n self.pool = nn.MaxPool2d(2,2)\n self.bn2d = nn.BatchNorm2d(8)\n self.conv2 = nn.Conv2d(8, 16, 5)\n self.bn2d = nn.BatchNorm2d(16)\n self.fc1 = nn.Linear(16 * 4 * 4, 256)\n self.bn1d = nn.BatchNorm1d(256)\n self.fc2 = nn.Linear(256, 64)\n self.bn1d = nn.BatchNorm1d(64)\n self.fc3 = nn.Linear(64, 10)\n def forward(self,x):\n x = self.pool(F.relu(self.conv1(x)))\n x = self.bn2d(x)\n x = self.pool(F.relu(self.conv2(x)))\n x = self.bn2d(x)\n x = x.view(-1, 16 * 4 * 4)\n x = F.relu(self.fc1(x))\n x = self.bn1d(x)\n x = F.relu(self.fc2(x))\n x = self.bn1d(x)\n x = self.fc3(x)\n return F.softmax(x,dim=1)\n\nNet = nn.Sequential(nn.Conv2d(1,8,5),\n nn.ReLU(),\n nn.MaxPool2d(2,2),\n nn.BatchNorm2d(8),\n nn.Conv2d(8,16,5),\n nn.ReLU(),\n nn.MaxPool2d(2,2),\n nn.BatchNorm2d(16),\n nn.Flatten(),\n nn.Linear(16 * 4 * 4, 256),\n nn.ReLU(),\n nn.BatchNorm1d(256),\n nn.Linear(256, 64),\n nn.ReLU(),\n nn.BatchNorm1d(64),\n nn.Linear(64, 10),\n nn.Softmax(dim=1)\n ) \n''' \n# Problem 3a \n# With convolution sliding a window through the data, the edges are not covered. \n# If we have N x N image size and F x F filter size then after convolution result will be\n# (N x N) * (F x F) = (N-F+1)x(N-F+1) --> For our case : (10-5+1) x (10-5+1) = 6 x 6\nconv = nn.Conv2d(1,1,5)\ninput = torch.rand(1,1,10,10)\noutput = conv(input)\n\n \n# Problem 3b --> no it does not differ using kernel-size 1\n \n# Problem 3c\n# To maintain the dimension of output as in input , we use padding. Padding is a process of adding zeros to the input matrix symmetrically to allow for more space for the kernel to cover the image. \n# https://towardsdatascience.com/covolutional-neural-network-cb0883dd6529\n\n# Problem 4\n\nnet = nn.Sequential(\n nn.Conv1d(4, 10 , 3, stride=3),\n nn.BatchNorm1d(10),\n nn.ReLU(),\n nn.Conv1d(10, 20, 1),\n nn.BatchNorm1d(20), \n nn.ReLU(), \n nn.Conv1d(20,21, 1), \n nn.Softmax(dim=1))\n \n\n# One hot encoding\nNAs = torch.eye(4, dtype=torch.int)\nAAs = torch.eye(21, dtype=torch.int)\n\n\ndef decode2AA(enocded_res_seq):\n AA_codes= ['A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'Y','*']\n decoded_seq = []\n for i in range(len(enocded_res_seq)):\n decoded_seq.append(AA_codes[enocded_res_seq[i].argmax(0)])\n decoded_seq=''.join(decoded_seq)\n return decoded_seq\n\ndef encodeAA(prot_seq):\n prot_dict = {'A':1, 'C':2, 'D':3, 'E':4, 'F':5, 'G':6, 'H':7, 'I':8, 'K':9, 'L':10, 'M':11, 'N':12, 'P':13, 'Q':14, 'R':15, 'S':16, 'T':17, 'V':18, 'W':19, 'Y':20,'*':21}\n i=0\n encoded_prot_seq=torch.zeros(len(prot_seq),len(prot_dict))\n for k in prot_seq:\n encoded_prot_seq[i,:]=AAs[prot_dict[k]-1]\n i+=1\n return encoded_prot_seq\n \ndef encodeNA(DNA_seq):\n NA_dict = {'A': 1, 'T':2 , 'C':3, 'G':4}\n i=0\n encoded_DNA_seq=torch.zeros(len(DNA_seq),len(NA_dict))\n for k in DNA_seq:\n encoded_DNA_seq[i,:]=NAs[NA_dict[k]-1]\n i+=1\n return encoded_DNA_seq\n\ndef calcAccuracy(prediction, labels, reduce=True):\n\toverlap = (prediction.argmax(1)==labels.argmax(1)).sum()\n\tif reduce:\n\t\treturn overlap/float(labels.size(2))\n\treturn overlap\n\noptim = torch.optim.Adam(net.parameters()) # the optimizer for the gradients\n\ntrain_sequences = SeqIO.parse(open('sequences.fasta'),'fasta')\nDNAs=[]\nproteins=[]\nfor record in SeqIO.parse(\"sequences.fasta\", \"fasta\"):\n if (\"DNA\" in record.description):\n DNAs.append(str(record.seq))\n elif (\"PROTEIN\" in record.description):\n proteins.append(str(record.seq))\n\nfor epoch in range(int(400)):\n for DNA, prot in zip(DNAs, proteins):\n optim.zero_grad()\n DNA_train=encodeNA(DNA)\n labels_train=encodeAA(prot).T.unsqueeze(0)\n prediction=net(DNA_train.T.unsqueeze(0))\n net.eval()\n loss = F.binary_cross_entropy(prediction, labels_train)\n loss.backward() # Calculate gradients\n optim.step() # Update gradients\n loss = float(loss) \n accuracy = calcAccuracy(prediction, labels_train, True)\n if ((epoch+1)%10)==0:\n print(decode2AA(prediction.squeeze(0).T))\n print(prot)\n \n if ((epoch+1)%10)==0:\n print('\\nEpoch: %i\\t loss: %.4f\\t accuracy %.2f%%' % (epoch+1,loss, accuracy*100))\n\n\n# Problem 4c:\n#Stride is the number of pixels a convolutional filter moves, like a sliding window \n#https://avvineed.com/convolutional-neural-network-numpy.html \n \n# Problem 4d:\n \n# PRoblem 4e: B and D and F\n " ]
[ [ "torch.rand", "torch.nn.Softmax", "torch.nn.Conv1d", "torch.nn.ReLU", "torch.nn.Conv2d", "torch.nn.BatchNorm1d", "torch.eye", "torch.nn.functional.binary_cross_entropy" ] ]
giangnguyen2412/dissect_catastrophic_forgetting
[ "b0dea59e9cf6521c7b43b2ab53b6c3258902d6ae" ]
[ "train.py" ]
[ "import torch.nn as nn\nfrom data_loader import get_loader\nfrom model import EncoderCNN, DecoderRNN\nfrom torch.nn.utils.rnn import pack_padded_sequence\nfrom torchvision import transforms\nfrom utils import *\nfrom prepro.build_vocab import *\nfrom prepro.pick_image import make_dir\nfrom infer import infer_caption_by_master, infer_caption\nimport numpy as np\nimport pickle\nimport argparse\nimport json\n\n\n# Device configuration\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\ncfg = load_and_print_cfg('config.yaml')\n\n\ndef main(args):\n print(args)\n epochs_since_improvement = 0\n\n # Create model directory\n make_dir(args.model_path)\n\n # Image pre-processing, normalization for the pre-trained res-net\n transform = transforms.Compose([\n transforms.RandomCrop(args.crop_size),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize((0.485, 0.456, 0.406),\n (0.229, 0.224, 0.225))])\n\n # Load vocabulary wrapper\n vocab_path = args.vocab_path\n with open(vocab_path, 'rb') as f:\n vocab = pickle.load(f)\n\n # Build data loader\n train_root = args.image_dir + cfg['train']['TRAIN_DIR']\n train_json = args.caption_path + cfg['train']['train_annotation']\n\n val_root = args.image_dir + cfg['train']['VAL_DIR']\n val_json = args.caption_path + cfg['train']['valid_annotation']\n\n # After patience epochs without improvement, break training\n patience = cfg['train']['patience']\n early_stopping = EarlyStopping(patience=patience, verbose=True)\n\n if args.check_point and os.path.isfile(args.check_point):\n checkpoint = torch.load(args.check_point)\n\n old_vocab_size = 0\n if args.fine_tuning:\n encoder = checkpoint['encoder']\n decoder = checkpoint['decoder']\n print(\"Fine tuning with check point is {}\".format(args.check_point))\n\n vocab, old_vocab_size = append_vocab(args.check_point_vocab, vocab)\n\n with open(vocab_path, 'wb') as v:\n print(\"Dump {} entries to vocab {}\".format(vocab.idx, vocab_path))\n pickle.dump(vocab, v)\n vocab_size = len(vocab)\n\n # Get decoder's previous state\n old_embed = decoder.embed.weight.data[:4124]\n old_weight = decoder.linear.weight.data[:4124]\n old_bias = decoder.linear.bias.data[:4124]\n\n # Initialize new embedding and linear layers\n decoder.embed = nn.Embedding(vocab_size, args.embed_size)\n decoder.linear = nn.Linear(args.hidden_size, vocab_size)\n\n if args.freeze_cri or args.lwf or args.distill:\n # Assign old neurons to the newly-initialized layer, fine-tuning only should ignore this\n print(\"Assigning old neurons of embedding and linear layer to new decoder...\")\n\n # Init by decoder's params\n decoder.embed.weight.data[:4124, :] = old_embed # 4124 is the vocab size of S19\n decoder.linear.weight.data[:4124] = old_weight\n decoder.linear.bias.data[:4124] = old_bias\n\n encoder.to(device)\n decoder.to(device)\n\n else:\n # Normal training procedure\n encoder = EncoderCNN(args.embed_size).to(device)\n decoder = DecoderRNN(args.embed_size, args.hidden_size, len(vocab), args.num_layers).to(device)\n\n if args.freeze_enc:\n args.task_name += '_freeze_enc'\n elif args.freeze_dec:\n args.task_name += '_freeze_dec'\n elif args.freeze_cri:\n args.task_name += '_freeze_cri'\n elif args.lwf:\n args.task_name += '_lwf'\n elif args.distill and args.kd1:\n args.task_name += '_kd1'\n elif args.distill and args.kd2:\n args.task_name += '_kd2'\n\n if args.task_type == 'seq':\n args.model_path = cfg['model']['model_path_format'].format(args.task_type, args.task_name + '_seq', 'models')\n args.cpkt_path = cfg['model']['model_path_format'].format(args.task_type, args.task_name + '_seq', 'best')\n else:\n args.model_path = cfg['model']['model_path_format'].format(args.task_type, args.task_name, 'models')\n args.cpkt_path = cfg['model']['model_path_format'].format(args.task_type, args.task_name, 'best')\n\n # Create model directory\n make_dir(args.model_path)\n\n # Pseudo-labeling option\n if args.lwf:\n print(\"Running pseudo-labeling option...\")\n # Infer pseudo-labels using previous model\n pseudo_labels = infer_caption(img_path=train_root,\n json_path=train_json,\n model=args.check_point,\n vocab_path=vocab_path,\n prediction_path=None,\n id2class_path=None)\n\n # Freeze LSTM and decoder for later joint optimization\n for param in decoder.lstm.parameters():\n param.requires_grad_(False)\n for param in encoder.parameters():\n param.requires_grad_(False)\n\n data = append_json(pseudo_labels, train_json)\n\n # Create a new json file from the train_json\n train_json = args.caption_path + 'captions_train_lwf.json'\n with open(train_json, 'w') as file:\n json.dump(data, file)\n\n # Knowledge distillation option\n if args.distill:\n print(\"Running knowledge distillation...\")\n # Teacher\n teacher_cnn = checkpoint['encoder']\n teacher_lstm = checkpoint['decoder']\n teacher_cnn.train()\n teacher_lstm.train()\n\n # Initialize a totally new captioning model - Student\n encoder = EncoderCNN(args.embed_size).to(device)\n decoder = DecoderRNN(args.embed_size, args.hidden_size, len(vocab), args.num_layers).to(device)\n\n # Student\n student_cnn = encoder\n student_lstm = decoder\n\n # Move teacher to cuda\n teacher_cnn.to(device)\n teacher_lstm.to(device)\n\n # Loss between GT caption and the prediction\n criterion_lstm = nn.CrossEntropyLoss()\n # Loss between predictions of teacher and student\n criterion_distill = nn.MSELoss()\n\n # Params of student\n params_st = list(student_lstm.parameters()) + list(student_cnn.parameters())\n\n optimizer_lstm = torch.optim.Adam(params_st, lr=1e-4)\n optimizer_distill = torch.optim.Adam(student_cnn.parameters(), lr=1e-5)\n\n if args.freeze_enc:\n print(\"Freeze encoder technique!\")\n for param in encoder.parameters():\n param.requires_grad_(False)\n\n if args.freeze_dec:\n print(\"Freeze decoder technique!\")\n for param in decoder.lstm.parameters():\n param.requires_grad_(False)\n\n if args.freeze_cri:\n print(\"Critical Freezing technique!\")\n layer_idx = -1\n for child in encoder.resnet.children():\n layer_idx += 1\n if layer_idx == 0 or layer_idx == 4: # blk 1 & 2\n for param in child.parameters():\n param.requires_grad = False\n\n train_loader = get_loader(root=train_root, json=train_json, vocab=vocab,\n transform=transform, batch_size=args.batch_size,\n shuffle=True, num_workers=args.num_workers)\n\n val_loader = get_loader(root=val_root, json=val_json, vocab=vocab,\n transform=transform, batch_size=args.batch_size,\n shuffle=True, num_workers=args.num_workers)\n\n # Loss and optimizer\n criterion = nn.CrossEntropyLoss()\n params = list(decoder.parameters()) + list(encoder.parameters())\n optimizer = torch.optim.Adam(params, lr=args.learning_rate)\n\n # Theses vars are for plotting\n avg_train_losses = []\n avg_val_losses = []\n\n for epoch in range(args.num_epochs):\n\n if args.distill:\n print(\"Training with distillation option!\")\n train_step, train_loss_step = train_distill(epoch, train_loader=train_loader,\n student_cnn=student_cnn,\n student_lstm=student_lstm,\n teacher_cnn=teacher_cnn,\n teacher_lstm=teacher_lstm,\n criterion_lstm=criterion_lstm,\n criterion_distill=criterion_distill,\n optimizer_lstm=optimizer_lstm,\n optimizer_distill=optimizer_distill)\n # Validate after an epoch\n recent_val_loss, val_step, val_loss_step = validate(epoch, val_loader=val_loader,\n encoder=student_cnn,\n decoder=student_lstm,\n criterion=criterion)\n else:\n\n train_step, train_loss_step = train(epoch, train_loader=train_loader,\n encoder=encoder,\n decoder=decoder,\n criterion=criterion,\n optimizer=optimizer,\n first_training=True,\n old_vocab_size=old_vocab_size)\n # Validate after an epoch\n recent_val_loss, val_step, val_loss_step = validate(epoch, val_loader=val_loader,\n encoder=encoder,\n decoder=decoder,\n criterion=criterion)\n train_loss = np.average(train_loss_step)\n val_loss = np.average(val_loss_step)\n\n avg_train_losses.append(train_loss)\n avg_val_losses.append(val_loss)\n\n # Save checkpoint\n make_dir(args.cpkt_path)\n early_stopping(args.cpkt_path, cfg['train']['data_name'], epoch, epochs_since_improvement, encoder, decoder, optimizer,\n optimizer, val_loss)\n\n if early_stopping.early_stop:\n print(\"Early Stopping!\")\n break\n\n if args.lwf:\n # Make all trainable\n for param in decoder.linear.parameters():\n param.requires_grad_(True)\n for param in decoder.embed.parameters():\n param.requires_grad_(True)\n for param in decoder.lstm.parameters():\n param.requires_grad_(True)\n for param in encoder.parameters():\n param.requires_grad_(True)\n\n print(\"Unfreezing parameters ...\")\n\n print(\"Critical Freezing technique!\")\n layer_idx = -1\n for child in encoder.resnet.children():\n layer_idx += 1\n if layer_idx == 0 or layer_idx == 4: # blk 1 & 2\n for param in child.parameters():\n param.requires_grad = False\n\n # Joint optimization starts\n\n early_stopping = EarlyStopping(patience=patience, verbose=True)\n for epoch in range(args.num_epochs):\n train_step, train_loss_step = train(epoch, train_loader=train_loader,\n encoder=encoder,\n decoder=decoder,\n criterion=criterion,\n optimizer=optimizer,\n first_training=False,\n old_vocab_size=old_vocab_size)\n # Validate after an epoch\n recent_val_loss, val_step, val_loss_step = validate(epoch, val_loader=val_loader,\n encoder=encoder,\n decoder=decoder,\n criterion=criterion)\n\n train_loss = np.average(train_loss_step)\n val_loss = np.average(val_loss_step)\n\n avg_train_losses.append(train_loss)\n avg_val_losses.append(val_loss)\n\n # Save checkpoint\n make_dir(args.cpkt_path)\n early_stopping(args.cpkt_path, cfg['train']['data_name'], epoch, epochs_since_improvement, encoder, decoder, optimizer,\n optimizer, val_loss)\n\n if early_stopping.early_stop:\n print(\"Early Stopping!\")\n break\n\n\ndef train_distill(epoch, train_loader, student_cnn, student_lstm, teacher_cnn, teacher_lstm,\n criterion_lstm, criterion_distill, optimizer_lstm, optimizer_distill):\n \"\"\"\n Train function for distillation option\n :param epoch: num of epoch for training\n :param train_loader: training loader\n :param student_cnn: student encoder\n :param student_lstm: student decoder\n :param teacher_cnn: teacher encoder\n :param teacher_lstm: teacher decoder\n :param criterion_lstm: normal loss calculation\n :param criterion_distill: loss calculation for distill part\n :param optimizer_lstm: normal optimizer\n :param optimizer_distill: optimizer for distill part\n :return:\n \"\"\"\n\n step = []\n loss_step = []\n # Train mode on\n total_step = len(train_loader)\n student_cnn.to(device)\n student_lstm.to(device)\n student_cnn.train()\n student_lstm.train()\n\n for param in student_cnn.parameters():\n param.requires_grad_(True)\n\n for i, (images, captions, lengths) in enumerate(train_loader):\n # Set mini-batch dataset\n images = images.to(device)\n captions = captions.to(device)\n targets = pack_padded_sequence(captions, lengths, batch_first=True)[0]\n\n # Forward, backward and optimize\n optimizer_lstm.zero_grad()\n optimizer_distill.zero_grad()\n\n features_tr, _, _ = teacher_cnn(images)\n features_st, _, _ = student_cnn(images)\n\n outputs = student_lstm(features_st, captions, lengths)\n outputs_tr = teacher_lstm(features_tr, captions, lengths)\n\n # Add CNN distillation loss here\n lstm_loss = criterion_lstm(outputs, targets)\n if args.kd2:\n dis_loss = criterion_distill(outputs, outputs_tr)\n #print(\"Running KD2\")\n elif args.kd1:\n dis_loss = criterion_distill(features_tr, features_st)\n #print(\"Running KD1\")\n else:\n assert False, \"Choose KD1 or KD2 option! Terminating............\"\n loss = lstm_loss + dis_loss\n\n loss.backward()\n\n optimizer_lstm.step()\n optimizer_distill.step()\n\n # Print log info\n if i % args.log_step == 0:\n print('Training: Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}, LSTM Loss: {:.4f}, Distillation Loss: {:.4f}'\n .format(epoch + 1, args.num_epochs, i, total_step, loss.item(), lstm_loss.item(), dis_loss.item()))\n step.append(i)\n loss_step.append(loss.item())\n\n torch.save(student_lstm.state_dict(), os.path.join(\n args.model_path, 'decoder-{}.ckpt'.format(epoch + 1)))\n torch.save(student_cnn.state_dict(), os.path.join(\n args.model_path, 'encoder-{}.ckpt'.format(epoch + 1)))\n\n return step, loss_step\n\n\ndef train(epoch, train_loader, encoder, decoder, criterion, optimizer, first_training, old_vocab_size):\n \"\"\"\n Train function\n :param epoch: epoch\n :param train_loader: training loader\n :param encoder: encoder model\n :param decoder: decoder model\n :param criterion: loss calculation\n :param optimizer: optimizer\n :param first_training: this flag is used for pseudo-labeling, we train 2 times\n :param old_vocab_size: size of the old vocab\n :return:\n \"\"\"\n\n step = []\n loss_step = []\n\n # Train mode on\n total_step = len(train_loader)\n encoder.train()\n decoder.train()\n print(first_training)\n\n for i, (images, captions, lengths) in enumerate(train_loader):\n # Set mini-batch dataset\n images = images.to(device)\n captions = captions.to(device)\n targets = pack_padded_sequence(captions, lengths, batch_first=True)[0]\n\n # Forward, backward and optimize\n features, _, _ = encoder(images)\n outputs = decoder(features, captions, lengths)\n loss = criterion(outputs, targets)\n decoder.zero_grad()\n encoder.zero_grad()\n loss.backward()\n\n optimizer.step()\n\n # Print log info\n if i % args.log_step == 0:\n print('Training: Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'\n .format(epoch + 1, args.num_epochs, i, total_step, loss.item()))\n step.append(i)\n loss_step.append(loss.item())\n\n return step, loss_step\n\n\ndef validate(epoch, val_loader, encoder, decoder, criterion):\n \"\"\"\n Performs one epoch's validation.\n\n :param val_loader: DataLoader for validation data.\n :param encoder: encoder model\n :param decoder: decoder model\n :param criterion: loss layer\n :param epoch\n :return:\n \"\"\"\n\n step = []\n loss_step = []\n loss_over_validation = 0\n\n decoder.eval() # eval mode (no dropout or batchnorm)\n if encoder is not None:\n encoder.eval()\n\n total_step = len(val_loader)\n for i, (images, captions, lengths) in enumerate(val_loader):\n images = images.to(device)\n captions = captions.to(device)\n targets = pack_padded_sequence(captions, lengths, batch_first=True)[0]\n\n # Forward, backward and optimize\n features, _, _ = encoder(images)\n outputs = decoder(features, captions, lengths)\n loss = criterion(outputs, targets)\n loss_over_validation += loss.item()\n\n # Print log info\n if i % args.log_step == 0:\n print('Validation: Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'\n .format(epoch + 1, args.num_epochs, i, total_step, loss.item()))\n step.append(i)\n loss_step.append(loss.item())\n\n return loss_over_validation, step, loss_step\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n # task type is one | once | seq\n parser.add_argument('--task_type', type=str, default='one', help='Add classes one by one or once')\n\n parser.add_argument('--log_step', type=int, default=10, help='step size for printing log info')\n parser.add_argument('--save_step', type=int, default=400, help='step size for saving trained models')\n parser.add_argument('--crop_size', type=int, default=224, help='size for randomly cropping images')\n\n # Model parameters\n parser.add_argument('--embed_size', type=int, default=256, help='dimension of word embedding vectors')\n parser.add_argument('--hidden_size', type=int, default=512, help='dimension of lstm hidden states')\n parser.add_argument('--num_layers', type=int, default=1, help='number of layers in lstm')\n\n parser.add_argument('--num_epochs', type=int, default=50)\n parser.add_argument('--batch_size', type=int, default=32)\n parser.add_argument('--num_workers', type=int, default=2)\n\n parser.add_argument('--task_name', type=str, default='2to21')\n parser.add_argument('--check_point', type=str,\n default='models/one/2to21/best/BEST_checkpoint_ms-coco.pth.tar')\n parser.add_argument('--check_point_vocab', type=str,\n default='data/vocab/2to21/vocab.pkl')\n\n # Technique options\n parser.add_argument('--fine_tuning', action=\"store_true\", help=\"use Fine-tuning from a check point\")\n parser.add_argument('--freeze_enc', action=\"store_true\", help=\"use Freezing the encoder\")\n parser.add_argument('--freeze_dec', action=\"store_true\", help=\"use Freezing the decoder\")\n parser.add_argument('--freeze_cri', action=\"store_true\", help=\"use Critical Freezing method\")\n parser.add_argument('--lwf', action=\"store_true\", help=\"use Learning without forgetting\")\n parser.add_argument('--distill', action=\"store_true\", help=\"use KD\")\n parser.add_argument('--kd1', action=\"store_true\", help=\"use Knowledge distillation on intermediate space\")\n parser.add_argument('--kd2', action=\"store_true\", help=\"use Knowledge distillation on output layer\")\n\n # As Karpathy, 3e-4 is the best learning rate for Adam\n parser.add_argument('--learning_rate', type=float, default=5e-4)\n args = parser.parse_args()\n\n args.vocab_path = cfg['dataset']['vocab_format'].format(args.task_name)\n args.image_dir = cfg['dataset']['image_dir_format'].format(args.task_name)\n args.caption_path = cfg['dataset']['caption_path_format'].format(args.task_name)\n args.model_path = cfg['model']['model_path_format'].format(args.task_type, args.task_name, 'models')\n args.cpkt_path = cfg['model']['model_path_format'].format(args.task_type, args.task_name, 'best')\n\n if args.task_type == 'seq':\n print(\"Running sequentially!\")\n task_list = cfg['train']['seq_task_list']\n for i, task_name in enumerate(task_list):\n # First task (i=0) will get checkpoint from 2to21\n if i >= 1:\n if args.freeze_enc:\n args.check_point = cfg['model']['check_point_format_seq'].format(\n task_list[i - 1] + '_freeze_enc_seq')\n elif args.freeze_dec:\n args.check_point = cfg['model']['check_point_format_seq'].format(\n task_list[i - 1] + '_freeze_dec_seq')\n elif args.freeze_cri:\n args.check_point = cfg['model']['check_point_format_seq'].format(\n task_list[i - 1] + '_freeze_cri_seq')\n elif args.lwf:\n args.check_point = cfg['model']['check_point_format_seq'].format(\n task_list[i - 1] + '_lwf_seq')\n elif args.kd1:\n args.check_point = cfg['model']['check_point_format_seq'].format(\n task_list[i - 1] + '_kd1_seq')\n elif args.kd2:\n args.check_point = cfg['model']['check_point_format_seq'].format(\n task_list[i - 1] + '_kd2_seq')\n\n args.check_point_vocab = cfg['dataset']['vocab_format'].format(task_list[i-1])\n args.task_name = task_name\n args.vocab_path = cfg['dataset']['vocab_format'].format(args.task_name)\n args.image_dir = cfg['dataset']['image_dir_format'].format(args.task_name)\n args.caption_path = cfg['dataset']['caption_path_format'].format(args.task_name)\n args.model_path = cfg['model']['model_path_format'].format(args.task_type, args.task_name + '_seq', 'models')\n args.cpkt_path = cfg['model']['model_path_format'].format(args.task_type, args.task_name + '_seq', 'best')\n main(args)\n else:\n main(args)\n" ]
[ [ "torch.nn.Linear", "torch.nn.MSELoss", "torch.nn.CrossEntropyLoss", "torch.nn.utils.rnn.pack_padded_sequence", "numpy.average", "torch.nn.Embedding" ] ]
KKVANONYMOUS/Rubiks-Cube
[ "df15104fcd6b9da536e2879a6bba8778edbfc0a8" ]
[ "CubeInput.py" ]
[ "import cv2\nimport numpy as np \n\ndef face_color(image):\n\thsv=cv2.cvtColor(crop,cv2.COLOR_BGR2HSV)\n\n\tlower_red = np.array([0,70,50])\n\tupper_red = np.array([10,255,255])\n\tlowerred = np.array([170,20,50])\n\tupperred = np.array([180,255,255])\n\n\n\tmask1 = cv2.inRange(hsv, lower_red,upper_red)\n\tmask2 = cv2.inRange(hsv, lowerred,upperred)\n\tmaskr = mask1 + mask2\n\tmask(maskr, 'red')\n\n\tmasky = cv2.inRange(hsv, np.array([22,40,40]),np.array([35,255,255]))\n\tmask(masky , 'yellow')\n\n\tmasko = cv2.inRange(hsv , np.array([12,40,40]),np.array([20,255,255]))\n\tmask(masko , 'orange')\n\n\tmaskg = cv2.inRange(hsv , np.array([35,40,40]),np.array([85,255,255]))\n\tmask(maskg , 'green')\n\n\tmaskb = cv2.inRange(hsv , np.array([90,40,40]),np.array([125,255,255]))\n\tmask(maskb , 'blue')\n\n\tmaskw = cv2.inRange(hsv , np.array([0,0,215]),np.array([1,1,255]))\n\tmask(maskw , 'white')\n\n\n\n\ndef mask(image , color)\t:\n\tkernel = np.ones((5,5),np.uint8)\n\timage = cv2.erode(image,kernel,iterations = 1)\n\tcontour,_=cv2.findContours(image,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\n\n\tfor cnt in contour:\n\t\tarea=cv2.contourArea(cnt)\n\t\tif (area<1000):\n\t\t\tcontinue\n\t\tM = cv2.moments(cnt)\n\t\tcx =0\n\t\tcy=0\n\t\tif M[\"m00\"] != 0:\n\t\t\tcx=int(M[\"m10\"]/M[\"m00\"])\n\t\t\tcy=int(M[\"m01\"]/M[\"m00\"])\n\t\tp =cy//200\n\t\tq=cx//200\n\t\tif (color == 'red'):\n\t\t\tb[p][q]=1\n\t\tif (color == 'white'):\n\t\t\tb[p][q]=0\n\t\tif (color == 'green'):\n\t\t\tb[p][q]=2\n\t\tif (color == 'blue'):\n\t\t\tb[p][q]=3\n\t\tif (color == 'orange'):\n\t\t\tb[p][q]=4\n\t\tif (color == 'yellow'):\n\t\t\tb[p][q]=5\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\nface_images= [\"1.jpeg\",\"2.jpeg\",\"3.jpeg\",\"4.jpeg\",\"5.jpeg\",\"6.jpeg\"]\nfor i in face_images:\n\tf = cv2.imread(i)\n\tr = cv2.selectROI(f)\n\tf = f[int(r[1]):int(r[1]+r[3]),int(r[0]):int(r[0]+r[2])]\n\tcv2.imshow('image',f)\n\tkey = cv2.waitKey(0)\n\tif(key == 27):\n\t\tcv2.destroyAllWindows()\n\tcrop = cv2.resize(f,(600,600))\n\tb =np.zeros((3,3),dtype=int)\n\tface_color(crop)\n\tprint(b)\n\tcontent=str(b)\n\twith open('color.txt','a+') as file:\n\t\tfile.write(content)\n" ]
[ [ "numpy.array", "numpy.ones", "numpy.zeros" ] ]
lwneal/starcraft-rl
[ "fb60b28d8b635e0666fe20276b87ece60ccaa291" ]
[ "sc2env/environments/fog_of_war.py" ]
[ "import time\nimport os\nimport random\nimport numpy as np\nimport imutil\n\nfrom pysc2.env import sc2_env\nfrom pysc2.lib import actions\n\nfrom sc2env.pysc2_util import register_map\n\n\n# These parameters are adjustable without changing the .SC2Map\nMAP_NAME = 'FogOfWar'\nMAP_SIZE = 64\nRGB_SCREEN_WIDTH = 400\nRGB_SCREEN_HEIGHT = 240\nFIVE_SECONDS = 85\n\n# These parameters are based on the .SC2Map triggers\n# Open the map in the Galaxy editor for details\nMAX_STEPS = 10\nunit_id_to_name = {\n 1922: \"CustomUnitCommandCenter\",\n 1923: \"CustomUnitPaper\",\n 1924: \"CustomUnitRock\",\n 1925: \"CustomUnitScissors\",\n}\naction_to_ability_id = {\n 1: 3771,\n 2: 3773,\n 3: 3775,\n 4: 3777,\n 5: 3779,\n 6: 3781,\n 7: 3783,\n 8: 3785,\n}\naction_to_name = {\n 0: \"No-Op\",\n 1: \"Paper (reserves)\",\n 2: \"Paper (front)\",\n 3: \"Rock (reserves)\",\n 4: \"Rock (front)\",\n 5: \"Scissors (reserves)\",\n 6: \"Scissors (front)\",\n 7: \"Scout\",\n 8: \"Counterintelligence\",\n}\n\n\nclass FogOfWarMultiplayerEnvironment():\n \"\"\"\n This environment pits two agents against each other in a game of\n incomplete information. Each agent trades off between building more\n units, and gathering information about the enemy's units.\n \"\"\"\n def __init__(self, render=False, video_filename=None, verbose=False, num_players=2):\n if video_filename:\n render = True\n self.render = render\n self.num_players = num_players\n self.sc2env = make_sc2env(num_players, render=render)\n self.video = None\n if video_filename:\n self.video = imutil.VideoMaker(filename=video_filename)\n self.verbose = verbose\n\n def reset(self):\n self.step_sc2env()\n self.steps = 0\n state, reward, done, info = self.unpack_state()\n return state\n\n def action_space(self):\n from gym.spaces.discrete import Discrete\n return Discrete(len(action_to_name))\n\n # Step: Take an action and play the game out for ~10 seconds\n def step(self, action_player1, action_player2=None):\n if self.game_over():\n print('Game is over, cannot take further actions')\n return\n\n if self.verbose:\n print('Taking action_player1={}, action_player2={} at t={}'.format(\n action_player1, action_player2, self.steps))\n self.steps += 1\n\n if self.steps >= MAX_STEPS:\n if self.verbose:\n print('Game has reached limit of {} actions: simulating endgame'.format(MAX_STEPS))\n self.step_until_endgame()\n else:\n if action_player1 > 0:\n player1_ability_id = action_to_ability_id[action_player1]\n self.use_custom_ability(player1_ability_id, 1)\n if self.num_players > 1 and action_player2 > 0:\n player2_ability_id = action_to_ability_id[action_player2]\n self.use_custom_ability(player2_ability_id, 2)\n if self.render:\n # Move forward 5 ticks at a time\n self.sc2env._step_mul = 5\n for i in range(FIVE_SECONDS // self.sc2env._step_mul):\n self.step_sc2env()\n filename = 'demo_fog_of_war_frame_output_{:06d}_{:04d}.jpg'.format(self.steps, i)\n imutil.show(self.unpack_state()[0][3], filename=filename, resize_to=(2*800,2*480))\n print('Saving file {}'.format(filename))\n else:\n # Move forward 5 seconds in time (in a single step)\n self.step_sc2env()\n\n if self.video:\n screenshot = self.unpack_state()[0][3]\n for _ in range(10):\n self.video.write_frame(screenshot)\n return self.unpack_state()\n\n # Convert the SC2Env timestep into a Gym-style tuple\n def unpack_state(self):\n obs = self.last_timestep.observation\n feature_map = np.array(obs.feature_minimap)\n feature_screen = np.array(obs.feature_screen)\n rgb_map = None\n rgb_screen = None\n if self.render:\n rgb_map = np.array(obs.rgb_minimap)\n rgb_screen = np.array(obs.rgb_screen)\n state = (feature_map, feature_screen, rgb_map, rgb_screen)\n\n reward = 0\n done = self.game_over()\n if done:\n reward = 1 if self.first_player_victory() else -1\n print('Finishing game at step {}'.format(self.steps))\n info = {}\n return state, reward, done, info\n\n def game_over(self):\n # SC2Env Game States\n # 0: first timestep, 1: other, 2: last timestep\n return self.sc2env._state == 2\n\n def first_player_victory(self):\n return self.sc2env._obs[0].player_result[0].result != 2\n\n def step_sc2env(self):\n if self.verbose:\n print('step_sc2env() state={}'.format(self.sc2env._state))\n # Step forward to synchronize clients\n start_time = time.time()\n for i in range(self.num_players):\n self.sc2env._controllers[i].step(count=1)\n self.sc2env._controllers[i].observe()\n\n noop = actions.FUNCTIONS.no_op()\n action_list = [noop] * self.num_players\n timesteps = self.sc2env.step(action_list)\n self.last_timestep = timesteps[0]\n if self.verbose:\n print('SC2Env step took {:.02f} sec'.format(time.time() - start_time))\n\n def step_until_endgame(self):\n if self.video:\n self.sc2env._step_mul = 3\n while not self.game_over():\n self.step_sc2env()\n if self.video:\n screenshot = self.unpack_state()[0][3]\n self.video.write_frame(screenshot)\n\n def use_custom_ability(self, ability_id, player_id=1):\n # Sends a command directly to the SC2 protobuf API\n # Can cause the pysc2 client to desync, unless step_sc2env() is called afterward\n from s2clientprotocol import sc2api_pb2\n from s2clientprotocol import common_pb2\n from s2clientprotocol import spatial_pb2\n\n def get_action_spatial(ability_id):\n target_point = common_pb2.PointI()\n target_point.x = 0\n target_point.y = 0\n\n action_spatial_unit_command = spatial_pb2.ActionSpatialUnitCommand(target_minimap_coord=target_point)\n action_spatial_unit_command.ability_id = ability_id\n\n action_spatial = spatial_pb2.ActionSpatial(unit_command=action_spatial_unit_command)\n action = sc2api_pb2.Action(action_feature_layer=action_spatial)\n return action\n\n player_action = get_action_spatial(ability_id)\n request_action = sc2api_pb2.RequestAction(actions=[player_action])\n request = sc2api_pb2.Request(action=request_action)\n\n # Bypass pysc2 and send the proto directly\n client = self.sc2env._controllers[player_id - 1]._client\n if self.verbose:\n print('Calling client.send_req for player_id {}'.format(player_id))\n if self.sc2env._state == 2:\n print('Game is over, cannot send action')\n return\n client.send_req(request)\n\n\n# Create the low-level SC2Env object, which we wrap with\n# a high level Gym-style environment\ndef make_sc2env(num_players, render=False):\n if num_players == 1:\n players = [sc2_env.Agent(sc2_env.Race.terran)]\n else:\n players = [sc2_env.Agent(sc2_env.Race.terran), sc2_env.Agent(sc2_env.Race.terran)]\n\n if render:\n interface = sc2_env.AgentInterfaceFormat(\n feature_dimensions=sc2_env.Dimensions(\n screen=(MAP_SIZE, MAP_SIZE),\n minimap=(MAP_SIZE, MAP_SIZE)\n ),\n rgb_dimensions=sc2_env.Dimensions(\n screen=(RGB_SCREEN_WIDTH, RGB_SCREEN_HEIGHT),\n minimap=(RGB_SCREEN_WIDTH, RGB_SCREEN_HEIGHT),\n ),\n action_space=actions.ActionSpace.FEATURES)\n else:\n interface = sc2_env.AgentInterfaceFormat(\n feature_dimensions=sc2_env.Dimensions(\n screen=(MAP_SIZE, MAP_SIZE),\n minimap=(MAP_SIZE, MAP_SIZE)\n ), action_space=actions.ActionSpace.FEATURES)\n\n env_args = {\n 'agent_interface_format': interface,\n 'map_name': MAP_NAME,\n 'step_mul': FIVE_SECONDS, # 17 is ~1 action per second\n 'players': players,\n }\n maps_dir = os.path.join(os.path.dirname(__file__), '..', 'maps')\n register_map(maps_dir, env_args['map_name'], players=num_players)\n return sc2_env.SC2Env(**env_args)\n\n\nclass FogOfWarEnvironment(FogOfWarMultiplayerEnvironment):\n \"\"\"\n The single-player version, against a scripted opponent\n \"\"\"\n def __init__(self, *args, **kwargs):\n kwargs['num_players'] = 1\n super().__init__(*args, **kwargs)\n\n" ]
[ [ "numpy.array" ] ]
Fassty/best
[ "3bb1be1cdf021596ea64107063789c940fdde9b3" ]
[ "best/model.py" ]
[ "import pymc3 as pm\nimport numpy as np\n\nimport logging\nlogger = logging.getLogger('pymc3')\nlogger.setLevel(logging.ERROR)\n\n\nclass Model:\n PRECISION_SCALING = 1e-6\n SIGMA_SCALING = 1e3\n NORMALITY_THRESHOLD = 29 # since 30 the distribution is considered normal\n\n def __init__(self, model: pm.Model):\n super().__init__()\n\n self.model = model\n\n @classmethod\n def from_custom_model(cls, model: pm.Model):\n \"\"\"\n As of now this method assumes that the posterior distribution has a name `posterior_dist` and\n that it contains 4 parameters: Normality, Mean, Std. dev, Effect size.\n\n Passing a different model is fine for sampling but the plotting will fail\n \"\"\"\n return cls(model)\n\n @classmethod\n def from_two_groups(cls, group_1: np.ndarray, group_2: np.ndarray):\n diff = group_1 - group_2\n return Model.from_one_group(diff)\n\n @classmethod\n def from_one_group(cls, diff: np.ndarray):\n diff_std = np.std(diff).item()\n diff_mean = np.mean(diff).item()\n mu = diff_mean\n tau = cls.PRECISION_SCALING / np.power(diff_std, 2)\n sigma_low = diff_std / cls.SIGMA_SCALING\n sigma_high = diff_std * cls.SIGMA_SCALING\n\n with pm.Model() as model:\n mu = pm.Normal(name='prior_mu', mu=mu, tau=tau)\n sigma = pm.Uniform(name='prior_sigma', lower=sigma_low, upper=sigma_high)\n nu = pm.Exponential(name='Normality', lam=1 / cls.NORMALITY_THRESHOLD) + 1\n lam = sigma ** -2\n\n r = pm.StudentT('posterior_dist', mu=mu, lam=lam, nu=nu, observed=diff)\n\n mean = pm.Deterministic('Mean', mu)\n std = pm.Deterministic('Std. dev', sigma)\n effect_size = pm.Deterministic('Effect size', mu / sigma)\n\n return cls(model)\n\n def sample(self, it: int = 110000):\n with self.model:\n trace = pm.sample(it, chains=1, progressbar=False)\n\n return self.model, trace\n" ]
[ [ "numpy.std", "numpy.power", "numpy.mean" ] ]
Yasushi-Shinohara/GPR_Note
[ "cfb909f659b62fed81e4e5dd904ec8233034dfc9" ]
[ "common/functions.py" ]
[ "import numpy as np\nfrom scipy.stats import norm\n\ndef PF(_xsample, _ysample, _x, _deg, normalization = True):\n if (normalization):\n xmin = np.amin(_x)\n xmax = np.amax(_x)\n xmid = 0.5*(xmin + xmax)\n _x = (_x - xmid)/(xmax - xmin)\n _xsample = (_xsample - xmid)/(xmax - xmin)\n ymin = np.amin(_ysample)\n ymax = np.amax(_ysample)\n ymid = 0.5*(ymin + ymax)\n _ysample = (_ysample - ymid)/(ymax - ymin)\n Nsample = len(_xsample)\n polyinfo = np.polyfit(_xsample, _ysample, _deg)\n p = np.poly1d(polyinfo)\n mean = p(_x)\n if (normalization):\n mean = mean*(ymax - ymin) + ymid\n return mean\n" ]
[ [ "numpy.amin", "numpy.amax", "numpy.polyfit", "numpy.poly1d" ] ]
ElLorans/PredictSocialMediaTrend
[ "527a407429c3dcead402f19d4e07149e8b1040c9" ]
[ "code/old_models/matrix_of_precision_for_var_and_file.py" ]
[ "# Obtain matrix for each column and each file\r\n# stating success & reliability of Baseline Model\r\n\r\n# Baseline Model is a model that divides a dataset in time windows of 4 weeks (the parameter can be changed when calling the function), \r\n# does linear regression on each time window's\r\n# first 3 weeks and use the regression to understand if 4 week will see an increase or a decrease.\r\n\r\n# So Baseline Model gets first 3 weeks to predict fourth, gets weeks 2-5 to predict the sixth ... and so on.\r\n\r\n# Baseline Model forecasts only whether there will be growth in the last week of each period!\r\n\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport pandas as pd\r\n\r\nfrom sklearn.model_selection import train_test_split # splits data for training and testing\r\nfrom sklearn.linear_model import LinearRegression\r\n\r\ncount_ig_csv = 'emerging_risks_doc_count_instagram.csv'\r\ncount_tw_csv = 'emerging_risks_doc_count_twitter.csv'\r\n\r\nengagement_fb_csv = 'emerging_risks_local_engagement_facebook.csv'\r\nengagement_ig_csv = 'emerging_risks_local_engagement_instagram.csv'\r\nengagement_tw_csv = 'emerging_risks_local_engagement_twitter.csv'\r\n\r\nfiles = [count_ig_csv, count_tw_csv, engagement_fb_csv, engagement_ig_csv, engagement_tw_csv]\r\n\r\ndfs = {'count_ig_csv': pd.read_csv(count_ig_csv), 'count_tw_csv': pd.read_csv(count_tw_csv), \r\n 'engagement_fb_csv': pd.read_csv(engagement_fb_csv), \r\n 'engagement_ig_csv': pd.read_csv(engagement_ig_csv), 'engagement_tw_csv': pd.read_csv(engagement_tw_csv)}\r\n\r\n\r\ndef is_up_real_target(ser: pd.Series, mean_previous_week) -> bool: \r\n \"\"\"\r\n Evaluates whether the last value of a pandas Series is higher than the first one.\r\n Goal: Understand if during the test period we have a positive or negative trend.\r\n \r\n :param ser: column of dataset to predict (REAL value of the part of the dataset selected for test).\r\n : param mean_previous_week: UNUSED right now \r\n TODO: IMPROVE. Maybe compare mean_previous_week to ser.mean() ??\r\n \"\"\" \r\n if ser.values[0] < ser.values[-1]:\r\n return 1\r\n \r\n return 0\r\n\r\ndef is_trending_up_real_target(ser: pd.Series) -> bool:\r\n \"\"\"\r\n Improvement of previous version: estimate sign of linear regression coefficient for real data in week 4.\r\n \"\"\"\r\n model = LinearRegression()\r\n x = ser.index.values.reshape(-1,1)\r\n y = ser\r\n model.fit(x, y)\r\n if model.coef_ > 0:\r\n return 1\r\n return 0\r\n \r\ndef is_up_predicted_target(coefficients: np.array) -> bool:\r\n \"\"\"\r\n Evaluates if slope of the linear regression is positive.\r\n Goal: Understand if during the trend period we have a positive or negative trend (calculated as slope/coefficient of a \r\n regression)\r\n :param coefficients: coefficients of regression on column of dataset used for training.\r\n \"\"\"\r\n if coefficients[0] > 0:\r\n return 1\r\n return 0\r\n\r\n\r\ndef update_eval_values(tp: int, tn: int, fp: int, fn: int, predicted_target:bool, real_target:bool):\r\n \"\"\"\r\n Updates matrix of\r\n _________________________________\r\n | True Positive | False Positive |\r\n ---------------------------------\r\n | False Negative | True Negative |\r\n _________________________________\r\n \r\n depending on the difference \r\n Goal: Considering one train/test, understand if the model is correctly predicting if the test period had a positive or negative trend.\r\n \"\"\"\r\n if predicted_target == 1 and real_target == 1:\r\n tp += 1\r\n elif predicted_target == 0 and real_target == 0:\r\n tn += 1\r\n elif predicted_target == 1 and real_target == 0:\r\n fp += 1\r\n elif predicted_target == 0 and real_target == 1:\r\n fn += 1\r\n return (tp, tn, fp, fn)\r\n\r\ndef confusion_matrix_baseline_model(column: pd.Series, step_days=7, month_length=28, evaluate_trend=True):\r\n \"\"\"\r\n Goal: apply a linear regression model to one variable in one file and return the confusion matrix\r\n _________________________________\r\n | True Positive | False Positive |\r\n ---------------------------------\r\n | False Negative | True Negative |\r\n _________________________________ \r\n \r\n The time series of the variable is split into moving time windows of a length of \"month_length\". To each time window, \r\n a linear regression is applied on a \"train\" (3/4 of \"month_length\") period and then tested on a \"test\" \r\n period (1/4 of \"month length\").\r\n \r\n calls is_up_real_target and is_up_predicted_target to check if regression corretcly predicts the following \"test\" period.\r\n :param evaluate_trend: if set to True, calls is_trending_up_real_target in place of is_up_real_target\r\n \"\"\"\r\n #breakpoint()\r\n tp, tn, fp, fn = 0, 0, 0, 0\r\n\r\n for day in range(0, 364, step_days):\r\n month = column[day:(day + month_length)]\r\n train, test = train_test_split(month, test_size=0.25, shuffle=False)\r\n\r\n model = LinearRegression()\r\n X_train = train.index.values.reshape(-1,1)\r\n y_train = train\r\n model.fit(X_train, y_train)\r\n \r\n last_train_period = train_test_split(train, test_size=0.64, shuffle=False)[0] # get last wk of train\r\n \r\n if evaluate_trend is False:\r\n real_target = is_up_real_target(test, last_train_period.mean())\r\n elif evaluate_trend is True:\r\n real_target = is_trending_up_real_target(test)\r\n \r\n predicted_target = is_up_predicted_target(model.coef_)\r\n\r\n tp, tn, fp, fn = update_eval_values(tp, tn, fp, fn, predicted_target, real_target)\r\n\r\n return {\"tp\": tp, \"tn\": tn, \"fp\": fp, \"fn\": fn}\r\n\r\n\r\ndef get_df_matrix(data_table: pd.DataFrame, confusion=False, accuracy=False, threshold=10, evaluate_trend=True) -> dict:\r\n \"\"\"\r\n Return the confusion matrix or the accuracy matrix for an entire df.\r\n Confusion matrix for entire df is a dict of dicts.\r\n Accuracy matrix for entire df is a dict of floats.\r\n \r\n :param threshold: min of # of values different from 0. \r\n \"\"\"\r\n if confusion == accuracy:\r\n raise TypeError('Set either confusion or accuracy to True.'\r\n '\\nUse either get_file_matrix(df, confusion=True) or get_file_matrix(df, accuracy=true)')\r\n \r\n matrix = dict()\r\n for colonna in data_table:\r\n # do regression only if at least threshold non-zero values, as column with few values get 100% prediction success!\r\n if colonna != 'date' and sum(data_table[colonna] != 0) >= threshold:\r\n conf = confusion_matrix_baseline_model(data_table[colonna], evaluate_trend=evaluate_trend)\r\n \r\n if confusion is True:\r\n matrix[colonna] = conf\r\n \r\n elif accuracy is True:\r\n matrix[colonna] = (conf['tp'] + conf['tn']) / sum(conf.values())\r\n return matrix\r\n\r\n# confusion matrixes not suited to csv export: each cell is a dictionary with the confusion matrix!\r\nconfusion_matrixes = {df: get_df_matrix(dfs[df], confusion=True) for df in dfs} # takes 30-60 seconds\r\n\r\naccuracy_matrix = {df: get_df_matrix(dfs[df], accuracy=True) for df in dfs }\r\n \r\nacc_df = pd.DataFrame(accuracy_matrix)\r\n# acc_df.to_csv(\"accuracy_matrix.csv\")\r\n\r\nsource_accuracies = acc_df.mean(axis=0) # by column\r\ntopic_accuracies = acc_df.mean(axis=1) # by row\r\nprint(source_accuracies)\r\nsource_accuracies.to_csv(\"BaselineModelAccuracyByDoc.csv\", header=[\"Baseline Model Avg Accuracy\"])\r\n\r\n# Threshold of 10 non-0 values caused a loss in accuracy of \r\n\r\n# count_ig_csv 7 % points\r\n# count_tw_csv 0 % points\r\n# engagement_fb_csv 7 % points\r\n# engagement_ig_csv 14 % points\r\n# engagement_tw_csv 0 % points\r\n\r\nprint(topic_accuracies)\r\n# topic_accuracies.to_csv(\"BaselineModelAccuracyByTopic.csv\", header=[\"Baseline Model Avg Accuracy\"])\r\n" ]
[ [ "sklearn.model_selection.train_test_split", "pandas.DataFrame", "pandas.read_csv", "sklearn.linear_model.LinearRegression" ] ]
Czworldy/GP_traj
[ "96261f39a5a322092e3a6be98938bb4601f0f746" ]
[ "dataset.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport glob\nimport random\nimport numpy as np\nfrom PIL import Image\n\nimport torch\nfrom torch.utils.data import Dataset\nimport torchvision.transforms as transforms\n\nclass CostMapDataset(Dataset):\n def __init__(self, data_index, opt, dataset_path='/media/wang/DATASET/CARLA/town01/', evalmode=False):\n self.points_num = opt.points_num\n self.evalmode = evalmode\n self.data_index = data_index\n self.weights = []\n self.max_dist = opt.max_dist\n self.max_speed = opt.max_speed\n self.max_t = opt.max_t\n self.img_step = opt.img_step\n transforms_ = [ transforms.Resize((200, 400), Image.BICUBIC),\n transforms.ToTensor(),\n transforms.Normalize((0.5), (0.5)),\n ]\n \n self.transform = transforms.Compose(transforms_)\n self.dataset_path = dataset_path\n self.pose_dict = {}\n self.vel_dict = {}\n #self.acc_dict = {}\n self.files_dict = {}\n self.balance_dict = {}\n self.total_len = 0\n self.eval_index = None # eval mode\n self.eval_cnt = 0 # eval mode\n \n for index in self.data_index:\n self.read_pose(index)\n self.read_vel(index)\n #self.read_acc(index)\n self.read_img(index)\n self.weights.append(len(self.files_dict[index]))\n self.data_balance()\n \n def data_balance(self):\n for data_index in list(self.files_dict.keys()):\n balance_dict = []\n for index in range(300, len(self.files_dict[data_index])-120):\n #for ts in self.files_dict[data_index][300:-120]:\n ts = self.files_dict[data_index][index]\n after_ts = self.files_dict[data_index][index+20]\n x_0 = self.pose_dict[data_index][ts][0]\n y_0 = self.pose_dict[data_index][ts][1]\n yaw = np.deg2rad(self.pose_dict[data_index][ts][3])\n x, y = self.tf_pose(data_index, after_ts, yaw, x_0, y_0)\n \n #if abs(self.pose_dict[data_index][after_ts][1]) > 1.0 and abs(self.vel_dict[data_index][after_ts][1]) > 1.0:\n if abs(y) > 0.5:\n balance_dict.append(ts)\n self.balance_dict[data_index] = balance_dict\n \n def read_pose(self, index):\n file_path = self.dataset_path+str(index)+'/state/pos.txt'\n ts_dict = {}\n with open(file_path, 'r') as file:\n lines = file.readlines()\n for line in lines:\n sp_line = line.split()\n ts = sp_line[0]\n x = float(sp_line[1])\n y = float(sp_line[2])\n z = float(sp_line[3])\n yaw = float(sp_line[5])\n ts_dict[ts] = [x, y, z, yaw]\n self.pose_dict[index] = ts_dict\n \n def read_vel(self, index):\n file_path = self.dataset_path+str(index)+'/state/vel.txt'\n ts_dict = {}\n with open(file_path, 'r') as file:\n lines = file.readlines()\n for line in lines:\n sp_line = line.split()\n ts = sp_line[0]\n vx = float(sp_line[1])\n vy = float(sp_line[2])\n vz = float(sp_line[3])\n ts_dict[ts] = [vx, vy, vz]\n self.vel_dict[index] = ts_dict\n \n def read_acc(self, index):\n file_path = self.dataset_path+str(index)+'/state/acc.txt'\n ts_dict = {}\n with open(file_path, 'r') as file:\n lines = file.readlines()\n for line in lines:\n sp_line = line.split()\n ts = sp_line[0]\n ax = float(sp_line[1])\n ay = float(sp_line[2])\n az = float(sp_line[3])\n ts_dict[ts] = [ax, ay, az]\n self.acc_dict[index] = ts_dict\n \n def read_img(self, index):\n files = glob.glob(self.dataset_path+str(index)+'/ipm/*.png')\n file_names = []\n for file in files:\n file_name = file.split('/')[-1][:-4]\n file_names.append(file_name)\n file_names.sort()\n self.files_dict[index] = file_names\n\n def tf_pose(self, data_index, ts, yaw, x_0, y_0):\n x_t = self.pose_dict[data_index][ts][0]\n y_t = self.pose_dict[data_index][ts][1]\n dx = x_t - x_0\n dy = y_t - y_0\n x = np.cos(yaw)*dx + np.sin(yaw)*dy\n y = np.cos(yaw)*dy - np.sin(yaw)*dx\n return x, y\n \n def __len__(self):\n return 100000000000\n \n def __getitem__(self, index):\n while True:\n if self.evalmode:\n if self.eval_index == None:\n self.eval_index = random.sample(self.data_index,1)[0]\n self.cnt = 300\n data_index = self.eval_index\n file_name = self.files_dict[data_index][self.cnt]\n self.cnt += 20\n if self.cnt > len(self.files_dict[data_index])-20:\n self.eval_index = random.sample(self.data_index,1)[0]\n self.cnt = 300\n else:\n data_index = random.choices(self.data_index, self.weights)[0]\n file_name = random.sample(self.files_dict[data_index][300:-120], 1)[0]\n if random.random() < 0.5:\n file_name = random.sample(self.balance_dict[data_index], 1)[0]\n \n ts_index = self.files_dict[data_index].index(file_name)\n \n x_0 = self.pose_dict[data_index][file_name][0]\n y_0 = self.pose_dict[data_index][file_name][1]\n yaw = np.deg2rad(self.pose_dict[data_index][file_name][3])\n \n ts_list = []\n relative_t_list = []\n x_list = []\n y_list = []\n vx_list = []\n vy_list = []\n a_list = []\n for i in range(ts_index, len(self.files_dict[data_index])-100):\n ts = self.files_dict[data_index][i]\n if float(ts)-float(file_name) > self.max_t:\n break\n else:\n x_, y_ = self.tf_pose(data_index, ts, yaw, x_0, y_0)\n x_list.append(x_)\n y_list.append(y_)\n vx_ = self.vel_dict[data_index][ts][0]\n vy_ = self.vel_dict[data_index][ts][1]\n vx = np.cos(yaw)*vx_ + np.sin(yaw)*vy_\n vy = np.cos(yaw)*vy_ - np.sin(yaw)*vx_\n \"\"\"\n ax_ = self.acc_dict[data_index][ts][0]\n ay_ = self.acc_dict[data_index][ts][1]\n ax = ax_*np.cos(yaw) + ay_*np.sin(yaw)\n ay = ay_*np.cos(yaw) - ax_*np.sin(yaw)\n theta_a = np.arctan2(ay, ax)\n theta_v = np.arctan2(vy, vx)\n sign = np.sign(np.cos(theta_a-theta_v))\n a = sign*np.sqrt(ax*ax + ay*ay)\n a_list.append(a)\n \"\"\"\n vx_list.append(vx)\n vy_list.append(vy)\n ts_list.append(ts)\n relative_t_list.append(float(ts)-float(file_name))\n \n if len(ts_list) == 0:\n continue\n else:\n #ts = random.sample(ts_list, 1)[0]\n #ts_array = random.sample(ts_list, self.points_num)\n #print(len(ts_list))\n if len(ts_list) < 6*(self.points_num-1)+1:\n continue\n ts_array = [ts_list[6*item] for item in range(self.points_num)]\n \n #ts_array.sort()\n #weights = [np.exp(-0.23*(float(ts)-float(file_name))) for ts in ts_list]\n #sample_ts = random.choices(ts_list, weights)[0]\n #print(weights/sum(weights))\n break\n \n #ts = sample_ts\n # [0 ~ 1]\n # v0\n _vx_0 = self.vel_dict[data_index][file_name][0]\n _vy_0 = self.vel_dict[data_index][file_name][1]\n v_0 = np.sqrt(_vx_0*_vx_0 + _vy_0*_vy_0)\n v_0 = torch.FloatTensor([v_0])\n v0_array = [v_0]*self.points_num\n\n t_array = []\n xy_array = []\n vxy_array = []\n \"\"\"\n axy_array = []\n a_array = []\n \"\"\"\n mirror = random.random() < 0.5\n for ts in ts_array:\n t = torch.FloatTensor([float(ts)/self.max_t - float(file_name)/self.max_t])\n t_array.append(t)\n # x, y\n x, y = self.tf_pose(data_index, ts, yaw, x_0, y_0)\n if not mirror:\n xy = torch.FloatTensor([x/self.max_dist, y/self.max_dist])# [-1, 1]\n else:\n xy = torch.FloatTensor([x/self.max_dist, -y/self.max_dist])\n xy_array.append(xy)\n #print('xy', xy, t)\n # yaw_t\n #yaw_t = angle_normal(np.deg2rad(self.pose_dict[data_index][ts][3]) - yaw)\n #yaw_t = torch.FloatTensor([yaw_t/np.pi])# [-1, 1]\n \n # vx, vy\n _vx = self.vel_dict[data_index][ts][0]\n _vy = self.vel_dict[data_index][ts][1]\n vx = np.cos(yaw)*_vx + np.sin(yaw)*_vy\n vy = np.cos(yaw)*_vy - np.sin(yaw)*_vx\n if not mirror:\n vxy_array.append(torch.FloatTensor([vx, vy]))\n else:\n vxy_array.append(torch.FloatTensor([vx, -vy]))\n \n # ax, ay\n \"\"\"\n _ax = self.acc_dict[data_index][ts][0]\n _ay = self.acc_dict[data_index][ts][1]\n ax = _ax*np.cos(yaw) + _ay*np.sin(yaw)\n ay = _ay*np.cos(yaw) - _ax*np.sin(yaw)\n axy_array.append(torch.FloatTensor([ax, ay]))\n \n theta_a = np.arctan2(_ay, _ax)\n\n theta_v = np.arctan2(_vy, _vx)\n sign = np.sign(np.cos(theta_a-theta_v))\n a = sign*np.sqrt(ax*ax + ay*ay)\n a_array.append(a)\n \"\"\"\n t = torch.FloatTensor(t_array)\n v_0 = torch.FloatTensor(v_0/self.max_speed)\n v0_array = torch.FloatTensor(v0_array)/self.max_speed\n xy = torch.stack(xy_array)\n #print(xy)\n vxy = torch.stack(vxy_array)\n \"\"\"\n axy = torch.stack(axy_array)\n a = torch.FloatTensor(a_array)\n \"\"\"\n \n #vxy = torch.FloatTensor([vx, vy])\n #axy = torch.FloatTensor([ax, ay])\n x_list = torch.FloatTensor(x_list)\n y_list = torch.FloatTensor(y_list)\n vx_list = torch.FloatTensor(vx_list)\n vy_list = torch.FloatTensor(vy_list)\n \"\"\"\n a_list = torch.FloatTensor(a_list)\n \"\"\"\n relative_t_list = torch.FloatTensor(relative_t_list)\n \n if self.evalmode:\n return {'t': t, 'xy':xy, 'vxy':vxy, 'v_0':v_0,\n #'a_list':a_list, 'axy':axy, 'a':a,\n 'x_list':x_list, 'y_list':y_list,\n 'vx_list':vx_list, 'vy_list':vy_list,\n 'ts_list':relative_t_list}\n else:\n #return {'t': t, 'xy':xy, 'vxy':vxy, 'axy':axy, 'v_0':v_0, 'v0_array':v0_array}\n return {'t': t, 'xy':xy, 'vxy':vxy, 'v_0':v_0, 'v0_array':v0_array}\n \n\n\"\"\"\nclass CostMapDataset_ORG(Dataset):\n def __init__(self, data_index, opt, dataset_path='/media/wang/DATASET/CARLA/town01/', evalmode=False):\n self.traj_steps = 8\n self.evalmode = evalmode\n self.data_index = data_index\n self.weights = []\n self.max_dist = opt.max_dist\n self.max_t = opt.max_t\n self.img_step = opt.img_step\n transforms_ = [ transforms.Resize((200, 400), Image.BICUBIC),\n transforms.ToTensor(),\n transforms.Normalize((0.5), (0.5)),\n ]\n \n self.transform = transforms.Compose(transforms_)\n self.dataset_path = dataset_path\n self.pose_dict = {}\n self.vel_dict = {}\n self.acc_dict = {}\n self.files_dict = {}\n self.total_len = 0\n self.eval_index = None # eval mode\n self.eval_cnt = 0 # eval mode\n \n for index in self.data_index:\n self.read_pose(index)\n self.read_vel(index)\n self.read_acc(index)\n self.read_img(index)\n self.weights.append(len(self.files_dict[index]))\n \n def read_pose(self, index):\n file_path = self.dataset_path+str(index)+'/state/pos.txt'\n ts_dict = {}\n with open(file_path, 'r') as file:\n lines = file.readlines()\n for line in lines:\n sp_line = line.split()\n ts = sp_line[0]\n x = float(sp_line[1])\n y = float(sp_line[2])\n z = float(sp_line[3])\n yaw = float(sp_line[5])\n ts_dict[ts] = [x, y, z, yaw]\n self.pose_dict[index] = ts_dict\n \n def read_vel(self, index):\n file_path = self.dataset_path+str(index)+'/state/vel.txt'\n ts_dict = {}\n with open(file_path, 'r') as file:\n lines = file.readlines()\n for line in lines:\n sp_line = line.split()\n ts = sp_line[0]\n vx = float(sp_line[1])\n vy = float(sp_line[2])\n vz = float(sp_line[3])\n ts_dict[ts] = [vx, vy, vz]\n self.vel_dict[index] = ts_dict\n \n def read_acc(self, index):\n file_path = self.dataset_path+str(index)+'/state/acc.txt'\n ts_dict = {}\n with open(file_path, 'r') as file:\n lines = file.readlines()\n for line in lines:\n sp_line = line.split()\n ts = sp_line[0]\n ax = float(sp_line[1])\n ay = float(sp_line[2])\n az = float(sp_line[3])\n ts_dict[ts] = [ax, ay, az]\n self.acc_dict[index] = ts_dict\n \n def read_img(self, index):\n files = glob.glob(self.dataset_path+str(index)+'/ipm/*.png')\n file_names = []\n for file in files:\n file_name = file.split('/')[-1][:-4]\n file_names.append(file_name)\n file_names.sort()\n self.files_dict[index] = file_names\n\n def tf_pose(self, data_index, ts, yaw, x_0, y_0):\n x_t = self.pose_dict[data_index][ts][0]\n y_t = self.pose_dict[data_index][ts][1]\n dx = x_t - x_0\n dy = y_t - y_0\n x = np.cos(yaw)*dx + np.sin(yaw)*dy\n y = np.cos(yaw)*dy - np.sin(yaw)*dx\n return x, y\n \n def __len__(self):\n return 100000000000\n \n def __getitem__(self, index):\n while True:\n if self.evalmode:\n if self.eval_index == None:\n self.eval_index = random.sample(self.data_index,1)[0]\n self.cnt = 300\n data_index = self.eval_index\n file_name = self.files_dict[data_index][self.cnt]\n self.cnt += 20\n if self.cnt > len(self.files_dict[data_index])-50:\n self.eval_index = random.sample(self.data_index,1)[0]\n self.cnt = 300\n else:\n data_index = random.choices(self.data_index, self.weights)[0]\n file_name = random.sample(self.files_dict[data_index][300:-120], 1)[0]\n ts_index = self.files_dict[data_index].index(file_name)\n imgs = []\n try:\n for i in range(-9,1):\n _file_name = self.files_dict[data_index][ts_index + self.img_step*i]\n image_path = self.dataset_path + str(data_index)+'/ipm/'+_file_name+'.png'\n img = Image.open(image_path).convert('L')\n img = self.transform(img)\n imgs.append(img)\n except:\n print('get img error:', image_path)\n continue\n imgs = torch.stack(imgs)\n x_0 = self.pose_dict[data_index][file_name][0]\n y_0 = self.pose_dict[data_index][file_name][1]\n yaw = np.deg2rad(self.pose_dict[data_index][file_name][3])\n \n ts_list = []\n relative_t_list = []\n x_list = []\n y_list = []\n vx_list = []\n vy_list = []\n a_list = []\n for i in range(ts_index, len(self.files_dict[data_index])-100):\n ts = self.files_dict[data_index][i]\n if float(ts)-float(file_name) > self.max_t:\n break\n else:\n x_, y_ = self.tf_pose(data_index, ts, yaw, x_0, y_0)\n x_list.append(x_)\n y_list.append(y_)\n vx_ = self.vel_dict[data_index][ts][0]\n vy_ = self.vel_dict[data_index][ts][1]\n vx = np.cos(yaw)*vx_ + np.sin(yaw)*vy_\n vy = np.cos(yaw)*vy_ - np.sin(yaw)*vx_\n \n ax_ = self.acc_dict[data_index][ts][0]\n ay_ = self.acc_dict[data_index][ts][1]\n ax = ax_*np.cos(yaw) + ay_*np.sin(yaw)\n ay = ay_*np.cos(yaw) - ax_*np.sin(yaw)\n theta_a = np.arctan2(ay, ax)\n theta_v = np.arctan2(vy, vx)\n sign = np.sign(np.cos(theta_a-theta_v))\n a = sign*np.sqrt(ax*ax + ay*ay)\n a_list.append(a)\n vx_list.append(vx)\n vy_list.append(vy)\n ts_list.append(ts)\n relative_t_list.append(float(ts)-float(file_name))\n \n if len(ts_list) == 0:\n continue\n else:\n #ts = random.sample(ts_list, 1)[0]\n ts_array = random.sample(ts_list, self.traj_steps)\n #weights = [np.exp(-0.23*(float(ts)-float(file_name))) for ts in ts_list]\n #sample_ts = random.choices(ts_list, weights)[0]\n #print(weights/sum(weights))\n break\n \n #ts = sample_ts\n # [0 ~ 1]\n # v0\n _vx_0 = self.vel_dict[data_index][file_name][0]\n _vy_0 = self.vel_dict[data_index][file_name][1]\n v_0 = np.sqrt(_vx_0*_vx_0 + _vy_0*_vy_0)\n v_0 = torch.FloatTensor([v_0])\n v0_array = [v_0]*self.traj_steps\n\n t_array = []\n xy_array = []\n vxy_array = []\n axy_array = []\n a_array = []\n for ts in ts_array:\n t = torch.FloatTensor([float(ts)/self.max_t - float(file_name)/self.max_t])\n t_array.append(t)\n # x, y\n x, y = self.tf_pose(data_index, ts, yaw, x_0, y_0)\n xy = torch.FloatTensor([x/self.max_dist, y/self.max_dist])# [-1, 1]\n xy_array.append(xy)\n # yaw_t\n #yaw_t = angle_normal(np.deg2rad(self.pose_dict[data_index][ts][3]) - yaw)\n #yaw_t = torch.FloatTensor([yaw_t/np.pi])# [-1, 1]\n \n # vx, vy\n _vx = self.vel_dict[data_index][ts][0]\n _vy = self.vel_dict[data_index][ts][1]\n vx = np.cos(yaw)*_vx + np.sin(yaw)*_vy\n vy = np.cos(yaw)*_vy - np.sin(yaw)*_vx\n vxy_array.append(torch.FloatTensor([vx, vy]))\n \n # ax, ay\n _ax = self.acc_dict[data_index][ts][0]\n _ay = self.acc_dict[data_index][ts][1]\n ax = _ax*np.cos(yaw) + _ay*np.sin(yaw)\n ay = _ay*np.cos(yaw) - _ax*np.sin(yaw)\n axy_array.append(torch.FloatTensor([ax, ay]))\n \n theta_a = np.arctan2(_ay, _ax)\n theta_v = np.arctan2(_vy, _vx)\n sign = np.sign(np.cos(theta_a-theta_v))\n a = sign*np.sqrt(ax*ax + ay*ay)\n a_array.append(a)\n \n t = torch.FloatTensor(t_array)\n v_0 = torch.FloatTensor(v0_array)\n xy = torch.stack(xy_array)\n vxy = torch.stack(vxy_array)\n axy = torch.stack(axy_array)\n a = torch.FloatTensor(a_array)\n \n #vxy = torch.FloatTensor([vx, vy])\n #axy = torch.FloatTensor([ax, ay])\n x_list = torch.FloatTensor(x_list)\n y_list = torch.FloatTensor(y_list)\n vx_list = torch.FloatTensor(vx_list)\n vy_list = torch.FloatTensor(vy_list)\n a_list = torch.FloatTensor(a_list)\n relative_t_list = torch.FloatTensor(relative_t_list)\n \n if self.evalmode:\n return {'img': imgs, 't': t, 'xy':xy, 'vxy':vxy, 'axy':axy, 'a':a, 'v_0':v_0,\n 'a_list':a_list,\n 'x_list':x_list, 'y_list':y_list,\n 'vx_list':vx_list, 'vy_list':vy_list,\n 'ts_list':relative_t_list}\n else:\n return {'img': imgs, 't': t, 'xy':xy, 'vxy':vxy, 'axy':axy, 'a':a, 'v_0':v_0}\n\"\"\"" ]
[ [ "numpy.sin", "torch.stack", "torch.FloatTensor", "numpy.sqrt", "numpy.cos", "numpy.deg2rad" ] ]
LightSpeedAI-Labs/HPCC_FPGA
[ "d12463757b99a2e5c3b61d015825c55ce6a0034e" ]
[ "scripts/evaluation/parse_raw_to_csv.py" ]
[ "#!/usr/bin/env python3\n\nimport argparse\nimport pandas as pd\nimport os\nfrom os import path\nimport re\nimport io\nimport sys\n\n# Regular expressions for the raw output of all \nfft_regex = \"Version:\\\\s+(?P<version>.+)\\n(.*\\n)+Batch\\\\sSize\\\\s+(?P<batch_size>\\d+)\\n(.*\\n)FFT\\\\sSize\\\\s+(?P<size>\\d+)(.*\\n)+Device\\\\s+(?P<device>.+)\\n(.*\\n)+\\\\s+res\\.\\\\serror\\\\s+mach\\.\\\\seps\\n\\\\s+(?P<error>(\\d|\\.|\\+|-|e)+)\\\\s+(?P<epsilon>(\\d|\\.|\\+|-|e)+)(.*\\n)+\\\\s+avg\\\\s+best\\n\\\\s+Time\\\\s+in\\\\s+s:\\\\s+(?P<avg_time>(\\d|\\.|\\+|-|e)+)\\\\s+(?P<best_time>(\\d|\\.|\\+|-|e)+)\\n\\\\s+GFLOPS:\\\\s+(?P<avg_flops>(\\d|\\.|\\+|-|e)+)\\\\s+(?P<best_flops>(\\d|\\.|\\+|-|e)+)\"\ngemm_regex = \"Version:\\\\s+(?P<version>.+)\\n(.*\\n)+Matrix\\\\sSize\\\\s+(?P<size>\\d+)(.*\\n)+Device\\\\s+(?P<device>.+)\\n(.*\\n)+\\\\s+norm\\.\\\\sresid\\\\s+resid\\\\s+machep\\n\\\\s+(?P<error>(\\d|\\.|\\+|-|e)+)\\\\s+(?P<resid>(\\d|\\.|\\+|-|e)+)\\\\s+(?P<epsilon>(\\d|\\.|\\+|-|e)+)(.*\\n)+\\\\s+best\\\\s+mean\\\\s+GFLOPS\\n\\\\s+(?P<best_time>(\\d|\\.|\\+|-|e)+)\\\\s+(?P<avg_time>(\\d|\\.|\\+|-|e)+)\\\\s+(?P<gflops>(\\d|\\.|\\+|-|e)+)\"\nra_regex = \"Version:\\\\s+(?P<version>.+)\\n(.*\\n)+Array\\\\sSize\\\\s+(?P<size>(\\d|\\.|\\+|-|e)+)(.*\\n)+Kernel\\\\sReplications\\\\s+(?P<replications>\\d+)(.*\\n)+Device\\\\s+(?P<device>.+)\\n(.*\\n)+Error:\\\\s+(?P<error>(\\d|\\.|\\+|-|e)+)(.*\\n)+\\\\s+best\\\\s+mean\\\\s+GUOPS\\n\\\\s+(?P<best_time>(\\d|\\.|\\+|-|e)+)\\\\s+(?P<avg_time>(\\d|\\.|\\+|-|e)+)\\\\s+(?P<gops>(\\d|\\.|\\+|-|e)+)\"\ntrans_regex = \"Version:\\\\s+(?P<version>.+)\\n(.*\\n)+Matrix\\\\sSize\\\\s+(?P<size>\\d+)(.*\\n)+Device\\\\s+(?P<device>.+)\\n(.*\\n)+\\\\s*Maximum\\\\serror:\\\\s+(?P<error>(\\d|\\.|\\+|-|e)+)(.*\\n)+\\\\s+total\\\\s\\\\[s\\\\]\\\\s+transfer\\\\s\\\\[s\\\\]\\\\s+calc\\\\s\\\\[s\\\\]\\\\s+calc\\\\s+FLOPS\\\\s+Mem\\\\s+\\\\[B/s\\\\]\\\\s+PCIe\\\\s+\\\\[B/s\\\\]\\n\\\\s*avg:\\\\s+(?P<avg_total_time>(\\d|\\.|\\+|-|e)+)\\\\s+(?P<avg_transfer_time>(\\d|\\.|\\+|-|e)+)\\\\s+(?P<avg_calc_time>(\\d|\\.|\\+|-|e)+)\\\\s+(?P<avg_calc_flops>(\\d|\\.|\\+|-|e)+)\\\\s+(?P<avg_mem_bw>(\\d|\\.|\\+|-|e)+)\\\\s+(?P<avg_trans_bw>(\\d|\\.|\\+|-|e|inf)+)\\n\\\\s*best:\\\\s+(?P<best_total_time>(\\d|\\.|\\+|-|e)+)\\\\s+(?P<best_transfer_time>(\\d|\\.|\\+|-|e)+)\\\\s+(?P<best_calc_time>(\\d|\\.|\\+|-|e)+)\\\\s+(?P<best_calc_flops>(\\d|\\.|\\+|-|e)+)\\\\s+(?P<best_mem_bw>(\\d|\\.|\\+|-|e)+)\\\\s+(?P<best_trans_bw>(\\d|\\.|\\+|-|e|inf)+)\"\nstream_regex = \"Version:\\\\s+(?P<version>.+)\\n(.*\\n)+Array\\\\sSize\\\\s+\\\\d+\\\\s+\\\\((?P<size>(\\d|\\.|\\+|-|e)+)(.*\\n)+Data\\\\sType\\\\s+(?P<data_type>.+)\\n(.*\\n)+Kernel\\\\sReplications\\\\s+(?P<replications>\\d+)(.*\\n)+Kernel\\\\sType\\\\s+(?P<type>.+)\\n(.*\\n)+Device\\\\s+(?P<device>.+)\\n(.*\\n)+\\\\s+Function\\\\s+Best\\\\sRate\\\\sMB/s\\\\s+Avg\\\\stime\\\\ss\\\\s+Min\\\\stime\\\\s+Max\\\\stime\\n\\\\s+Add\\\\s+(?P<add_rate>(\\d|\\.|\\+|-|e)+)\\\\s+(?P<add_avg_time>(\\d|\\.|\\+|-|e)+)\\\\s+(?P<add_min_time>(\\d|\\.|\\+|-|e)+)\\\\s+(?P<add_max_time>(\\d|\\.|\\+|-|e)+)\\n\\\\s+Copy\\\\s+(?P<copy_rate>(\\d|\\.|\\+|-|e)+)\\\\s+(?P<copy_avg_time>(\\d|\\.|\\+|-|e)+)\\\\s+(?P<copy_min_time>(\\d|\\.|\\+|-|e)+)\\\\s+(?P<copy_max_time>(\\d|\\.|\\+|-|e)+)\\n\\\\s+PCI\\\\sread\\\\s+(?P<pcir_rate>(\\d|\\.|\\+|-|e)+)\\\\s+(?P<pcir_avg_time>(\\d|\\.|\\+|-|e)+)\\\\s+(?P<pcir_min_time>(\\d|\\.|\\+|-|e)+)\\\\s+(?P<pcir_max_time>(\\d|\\.|\\+|-|e)+)\\n\\\\s+PCI\\\\swrite\\\\s+(?P<pciw_rate>(\\d|\\.|\\+|-|e)+)\\\\s+(?P<pciw_avg_time>(\\d|\\.|\\+|-|e)+)\\\\s+(?P<pciw_min_time>(\\d|\\.|\\+|-|e)+)\\\\s+(?P<pciw_max_time>(\\d|\\.|\\+|-|e)+)\\n\\\\s+Scale\\\\s+(?P<scale_rate>(\\d|\\.|\\+|-|e)+)\\\\s+(?P<scale_avg_time>(\\d|\\.|\\+|-|e)+)\\\\s+(?P<scale_min_time>(\\d|\\.|\\+|-|e)+)\\\\s+(?P<scale_max_time>(\\d|\\.|\\+|-|e)+)\\n\\\\s+Triad\\\\s+(?P<triad_rate>(\\d|\\.|\\+|-|e)+)\\\\s+(?P<triad_avg_time>(\\d|\\.|\\+|-|e)+)\\\\s+(?P<triad_min_time>(\\d|\\.|\\+|-|e)+)\\\\s+(?P<triad_max_time>(\\d|\\.|\\+|-|e)+)\"\nlinpack_regex = \"Version:\\\\s+(?P<version>.+)\\n(.*\\n)+Matrix\\\\sSize\\\\s+(?P<size>\\d+)(.*\\n)+Device\\\\s+(?P<device>.+)\\n(.*\\n)+\\\\s+norm\\.\\\\sresid\\\\s+resid\\\\s+machep.+\\n\\\\s+(?P<error>((\\d|\\.|\\+|-|e)+|nan))\\\\s+(?P<resid>((\\d|\\.|\\+|-|e)+|nan))\\\\s+(?P<epsilon>(\\d|\\.|\\+|-|e)+)(.*\\n)+\\\\s+Method\\\\s+\\\\s+best\\\\s+mean\\\\s+GFLOPS(\\\\s*\\n)\\\\s+total\\\\s+(?P<total_best_time>(\\d|\\.|\\+|-|e)+)\\\\s+(?P<total_avg_time>(\\d|\\.|\\+|-|e)+)\\\\s+(?P<total_gflops>(\\d|\\.|\\+|-|e)+)(\\\\s*\\n)\\\\s+GEFA\\\\s+(?P<lu_best_time>(\\d|\\.|\\+|-|e)+)\\\\s+(?P<lu_avg_time>(\\d|\\.|\\+|-|e)+)\\\\s+(?P<lu_gflops>(\\d|\\.|\\+|-|e)+)(\\\\s*\\n)\\\\s+GESL\\\\s+(?P<sl_best_time>(\\d|\\.|\\+|-|e)+)\\\\s+(?P<sl_avg_time>(\\d|\\.|\\+|-|e)+)\\\\s+(?P<sl_gflops>(\\d|\\.|\\+|-|e)+)\"\n \n\ndef parse_network(file_content):\n '''\n The b_eff benchmark uses a special function since the input is just directly parsed as CS.\n\n file_content: Content of the file is parsed\n '''\n df = pd.DataFrame()\n regex = \"(?P<data>\\\\s+MSize\\\\s+looplength\\\\s+transfer\\\\s+B/s\\n(.+\\n)+)\"\n res = re.search(regex, file_content)\n if res is not None:\n d = res.groupdict()\n df = pd.read_csv(io.StringIO(d[\"data\"]), sep=\"\\\\s+\")\n else:\n return None\n return df\n\n\ndef parse_by_regex(file_content, regex, bm_name):\n '''\n Parsing function using a REGEX.\n\n file_content: Content of the file is parsed\n regex: The regular expression that is used to parse the text\n bm_name: Name of the benchmark. Will be used as index in the data frame\n '''\n df = pd.DataFrame()\n res = re.search(regex, file_content)\n if res is not None:\n d = res.groupdict()\n df_tmp = pd.DataFrame(d, index=[bm_name])\n df = df.append(df_tmp)\n else:\n return None\n return df\n\n# The parsing functions for each benchmark preconfigured in a map\nparse_map = {\n \"b_eff\": parse_network,\n \"FFT\": lambda f: parse_by_regex(f, fft_regex, \"FFT\"),\n \"GEMM\": lambda f: parse_by_regex(f, gemm_regex, \"GEMM\"),\n \"LINPACK\": lambda f: parse_by_regex(f, linpack_regex, \"LINPACK\"),\n \"PTRANS\": lambda f: parse_by_regex(f, trans_regex, \"PTRANS\"),\n \"RandomAccess\": lambda f: parse_by_regex(f, ra_regex, \"RandomAccess\"),\n \"STREAM\": lambda f: parse_by_regex(f, stream_regex, \"STREAM\")\n}\n\ndef parse_single_file(file_name, used_parse_functions):\n # Read file content from stdin or a given file\n if file_name == \"-\":\n file_content = \"\"\n isopen = True\n while isopen:\n t = sys.stdin.read()\n if t == \"\":\n isopen = False\n file_content += t\n else:\n with open(file_name) as f:\n file_content = f.read()\n\n # Try to parse the file content\n for b in used_parse_functions:\n df = b(file_content)\n if not df is None:\n break\n if df is None:\n print(\"File content could not be parsed: %s\" % file_name, file=sys.stderr)\n df['filename'] = file_name\n return df\n\n\ndef parse_file_or_folder(file_name, used_parse_functions):\n df = pd.DataFrame()\n if os.path.isdir(file_name):\n files_in_dir = os.listdir(file_name)\n for f in files_in_dir:\n df = df.append(parse_file_or_folder(f, used_parse_functions))\n else:\n tmp = parse_single_file(file_name, used_parse_functions)\n if not tmp is None:\n df = df.append(tmp)\n return df\n \ndef parse_raw_inputs(input_paths, recursive=True, parse_functions=parse_map):\n\n if type(input_paths) is not list:\n input_paths = list(input_paths)\n\n df = pd.DataFrame()\n for ifile in input_paths:\n if recursive:\n df = df.append(parse_file_or_folder(ifile, parse_functions))\n elif not os.path.isdir(ifile):\n df = df.append(parse_single_file(ifile, parse_functions))\n else:\n print(\"Directory was specified, but no recursive execution\", file=sys.stderr)\n return df\n\ndef parse_script_called_directly():\n # Define input parameters\n parser = argparse.ArgumentParser(description=\"Parse plain text outputs of HPCC benchmarks to CSV\")\n parser.add_argument('-i', dest=\"input_paths\", nargs='+',\n help=\"Path to a text file containing the output of an HPCC benchmark. If not given, stdin is used.\",\n default=\"-\")\n parser.add_argument('-r', dest='recursive', action='store_const',\n const=True, default=False, help=\"Recursively parse files in a folder\")\n parser.add_argument('-b', dest='benchmark', help=\"Restrict parsing just to the named benchmark. Valid names are: %s\" % list(parse_map.keys()), default=\"-\")\n parser.add_argument('-o', dest='output_file', help=\"Name of the output file. If not given stout is used.\", default=\"-\")\n args = parser.parse_args()\n\n # If a benchmark restriction is given just use its parsing function\n used_parse_functions = parse_map.values()\n if args.benchmark in parse_map.keys():\n used_parse_functions = [parse_map[args.benchmark]]\n\n df = parse_raw_inputs(args.input_paths, args.recursive, used_parse_functions)\n\n if df is None:\n print(\"No files could be parsed\", file=sys.stderr)\n exit(1)\n\n # Write the resulting CSV data to stdout or a file\n if args.output_file == \"-\":\n df.to_csv(sys.stdout, header=True)\n else:\n df.to_csv(args.output_file, header=True)\n\n\nif __name__ == \"__main__\":\n parse_script_called_directly()\n" ]
[ [ "pandas.DataFrame" ] ]
iqDF/tsn-pytorch
[ "c734ee040fcb245542866f0a7812cdc6e417cb2c" ]
[ "split_data.py" ]
[ "import os\nimport glob\n\nimport numpy as np\nfrom sklearn.model_selection import StratifiedShuffleSplit\n\nimport os\nfrom pathlib import Path\nfrom multiprocessing import Pool, current_process\nfrom utils import *\n\n\n\n#------------------------\n# Helpers\n#------------------------\n\ndef get_path_label_list(\n dataset_dir\n):\n \"\"\"List paths and labels of all data in a directory.\n This assumes that all data are located at 2 levels below the directory (after class_folder)\n \"\"\"\n all_paths, all_labels = [], [] # place holder for all data in directory\n\n class_folders = filter(\n os.path.isdir,\n glob.glob(dataset_dir + '/*') # get class folders' paths\n )\n\n for label, class_folder in enumerate(class_folders): # label encode using for-loop index\n data_paths = glob.glob(class_folder + '/*') # get subfolders in each class folder\n data_labels = [label] * len(data_paths)\n \n all_paths.extend(data_paths) # add those subfolders to list\n all_labels.extend(data_labels) # add labels to list\n\n return sorted(all_paths), sorted(all_labels)\n\n\ndef train_test_split(\n all_paths,\n all_labels,\n test_size = 0.1,\n random_state = 42\n):\n all_paths = np.array(all_paths) # convert to numpy to ease multi-indexing\n all_labels = np.array(all_labels) # convert to numpy to ease multi-indexing\n\n sss = StratifiedShuffleSplit(n_splits=1, test_size=test_size, random_state=random_state)\n train_idx, test_idx = next(iter(sss.split(all_paths, all_labels)))\n\n return (\n all_paths[train_idx], all_paths[test_idx],\n all_labels[train_idx], all_labels[test_idx]\n )\n\n\ndef write_metadata_to_split_file(\n outfile,\n data_paths,\n data_labels\n):\n \"\"\"Write all metadata to a file\n \"\"\"\n to_tmpl = \"{} {}\\n\"\n to_writes = zip(data_paths, data_labels)\n\n with open(outfile, 'w+') as fp:\n # write or create mode\n for to_write in to_writes:\n fp.write(to_tmpl.format(*to_write))\n\n\n#------------------------\n# Main utilities\n#------------------------\n\ndef generate_skf_split_files(\n all_paths,\n all_labels,\n outdir,\n include_test_split = True,\n split_prefix = \"\",\n n_splits = 5,\n random_seed = 42\n):\n \"\"\"\n Generaterate Shuffled-Stratified K Fold split \n given paths and label to dataset.\n \"\"\"\n if include_test_split:\n # if True, split data to train, val, test. \n # otherwise just train and val split\n test_splitname = '{}_test_split.txt'.format(split_prefix)\n test_splitpath = os.path.join(outdir, test_splitname)\n\n all_train_paths, all_test_paths, all_train_labels, all_test_labels = train_test_split(\n all_paths, all_labels, test_size = 0.1, random_state = random_seed\n )\n # writing metadata for test split\n write_metadata_to_split_file(\n test_splitpath,\n all_test_paths,\n all_test_labels,\n )\n\n else:\n # here, we consider train and val as part of training \n # (a.k.a development) phase, hence the naming below for paths & label\n all_train_paths, all_train_labels = all_paths, all_labels\n\n # stratify dataset on train and validation\n skf = StratifiedShuffleSplit(\n n_splits = n_splits,\n test_size = 0.2,\n random_state = random_seed\n )\n\n for i, (train_idx, val_idx) in enumerate(skf.split(all_train_paths, all_train_labels)):\n # train split\n X_train = all_train_paths[train_idx] # X_train is list of train data path \n y_train = all_train_labels[train_idx] # y_train is list of label values\n\n train_splitname = \"{}_train_split_{}.txt\".format(split_prefix, i)\n train_splitpath = os.path.join(outdir, train_splitname) \n\n # writing metadata for training split\n write_metadata_to_split_file(\n train_splitpath,\n X_train,\n y_train,\n )\n\n # validation split\n X_val = all_train_paths[val_idx] # X_val is list of val data path\n y_val = all_train_labels[val_idx] # y_val is list of val data path\n\n val_splitname = \"{}_val_split_{}.txt\".format(split_prefix, i)\n val_splitpath = os.path.join(outdir, val_splitname) \n\n # writing metadata for validation split\n write_metadata_to_split_file(\n val_splitpath,\n X_val,\n y_val,\n )\n\n\ndef skf_split_metadata(\n dataset_dir,\n split_dir,\n n_splits,\n split_prefix,\n random_seed\n):\n \"\"\"Usage:\n > dataset_cli skf-split-metadata {YOUR_DATASET_DIR} \\\n {YOUR_SPLIT_DIR} --n_splits {NUMBER_OF_SPLITS} [--OPTIONS]\n \"\"\"\n safe_mkdir(split_dir)\n paths, labels = get_path_label_list(dataset_dir)\n\n generate_skf_split_files(\n paths,\n labels,\n split_dir,\n include_test_split = True,\n split_prefix = split_prefix,\n n_splits = n_splits,\n random_seed = random_seed\n )\n\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser(description=\"PyTorch implementation of Temporal Segment Networks\")\n parser.add_argument('dataset_dir', type=str)\n parser.add_argument('split_dir', type=str)\n parser.add_argument('-k', '--n_splits', type=int, default=5)\n parser.add_argument('--split_prefix', type=str, default='')\n parser.add_argument('-r', '--random_seed', type=int, default=42)\n args = parser.parse_args()\n\n skf_split_metadata(\n args.dataset_dir,\n args.split_dir,\n args.n_splits,\n args.split_prefix,\n args.random_seed\n )\n \n" ]
[ [ "sklearn.model_selection.StratifiedShuffleSplit", "numpy.array" ] ]
Jaspi10/manim
[ "cfba888ad69a7ccd4a8c4a1c80662078171b73ed" ]
[ "manimlib/mobject/coordinate_systems.py" ]
[ "import numpy as np\n\nfrom manimlib.constants import *\nfrom manimlib.mobject.functions import ParametricFunction\nfrom manimlib.mobject.geometry import Arrow\nfrom manimlib.mobject.geometry import Line\nfrom manimlib.mobject.number_line import NumberLine\nfrom manimlib.mobject.svg.tex_mobject import TexMobject\nfrom manimlib.mobject.types.vectorized_mobject import VGroup\nfrom manimlib.mobject.types.vectorized_mobject import VMobject\nfrom manimlib.utils.config_ops import digest_config\nfrom manimlib.utils.config_ops import merge_config\nfrom manimlib.utils.space_ops import angle_of_vector\n\n# TODO: There should be much more code reuse between Axes, NumberPlane and GraphScene\n\n\nclass Axes(VGroup):\n CONFIG = {\n \"propagate_style_to_family\": True,\n \"three_d\": False,\n \"number_line_config\": {\n \"color\": LIGHT_GREY,\n \"include_tip\": True,\n },\n \"x_axis_config\": {},\n \"y_axis_config\": {\n \"label_direction\": LEFT,\n },\n \"x_min\": -FRAME_X_RADIUS,\n \"x_max\": FRAME_X_RADIUS,\n \"y_min\": -FRAME_Y_RADIUS,\n \"y_max\": FRAME_Y_RADIUS,\n \"default_num_graph_points\": 100,\n }\n\n def __init__(self, **kwargs):\n VGroup.__init__(self, **kwargs)\n x_axis_config = merge_config([\n self.x_axis_config,\n {\"x_min\": self.x_min, \"x_max\": self.x_max},\n self.number_line_config,\n ])\n y_axis_config = merge_config([\n self.y_axis_config,\n {\"x_min\": self.y_min, \"x_max\": self.y_max},\n self.number_line_config,\n ])\n self.x_axis = NumberLine(**x_axis_config)\n self.y_axis = NumberLine(**y_axis_config)\n self.y_axis.rotate(90 * DEGREES, about_point=ORIGIN)\n self.add(self.x_axis, self.y_axis)\n\n def coords_to_point(self, *coords):\n origin = self.x_axis.number_to_point(0)\n result = np.array(origin)\n for axis, coord in zip(self, coords):\n result += (axis.number_to_point(coord) - origin)\n return result\n\n def point_to_coords(self, point):\n return tuple([\n axis.point_to_number(point)\n for axis in self\n if isinstance(axis, NumberLine)\n ])\n\n def get_graph(\n self, function, num_graph_points=None,\n x_min=None,\n x_max=None,\n **kwargs\n ):\n kwargs[\"fill_opacity\"] = kwargs.get(\"fill_opacity\", 0)\n kwargs[\"num_anchor_points\"] = \\\n num_graph_points or self.default_num_graph_points\n x_min = x_min or self.x_min\n x_max = x_max or self.x_max\n graph = ParametricFunction(\n lambda t: self.coords_to_point(t, function(t)),\n t_min=x_min,\n t_max=x_max,\n **kwargs\n )\n graph.underlying_function = function\n return graph\n\n def input_to_graph_point(self, x, graph):\n if hasattr(graph, \"underlying_function\"):\n return self.coords_to_point(x, graph.underlying_function(x))\n else:\n # binary search\n lh, rh = 0, 1\n while abs(lh - rh) > 0.001:\n mh = np.mean([lh, rh])\n hands = [lh, mh, rh]\n points = list(map(graph.point_from_proportion, hands))\n lx, mx, rx = list(map(self.x_axis.point_to_number, points))\n if lx <= x and rx >= x:\n if mx > x:\n rh = mh\n else:\n lh = mh\n elif lx <= x and rx <= x:\n return points[2]\n elif lx >= x and rx >= x:\n return points[0]\n elif lx > x and rx < x:\n lh, rh = rh, lh\n return points[1]\n return self.coords_to_point(x, graph.underlying_function(x))\n\n\nclass ThreeDAxes(Axes):\n CONFIG = {\n \"x_min\": -5.5,\n \"x_max\": 5.5,\n \"y_min\": -5.5,\n \"y_max\": 5.5,\n \"z_axis_config\": {},\n \"z_min\": -3.5,\n \"z_max\": 3.5,\n \"z_normal\": DOWN,\n \"num_axis_pieces\": 20,\n \"light_source\": 9 * DOWN + 7 * LEFT + 10 * OUT,\n }\n\n def __init__(self, **kwargs):\n Axes.__init__(self, **kwargs)\n z_axis = self.z_axis = self.get_axis(\n self.z_min, self.z_max, self.z_axis_config\n )\n z_axis.rotate(-np.pi / 2, UP, about_point=ORIGIN)\n z_axis.rotate(\n angle_of_vector(self.z_normal), OUT,\n about_point=ORIGIN\n )\n self.add(z_axis)\n\n self.add_3d_pieces()\n self.set_axis_shading()\n\n def add_3d_pieces(self):\n for axis in self:\n axis.pieces = VGroup(\n *axis.main_line.get_pieces(self.num_axis_pieces)\n )\n axis.add(axis.pieces)\n axis.main_line.set_stroke(width=0, family=False)\n axis.set_shade_in_3d(True)\n\n def set_axis_shading(self):\n def make_func(axis):\n vect = self.light_source\n return lambda: (\n axis.get_edge_center(-vect),\n axis.get_edge_center(vect),\n )\n for axis in self:\n for submob in axis.family_members_with_points():\n submob.get_gradient_start_and_end_points = make_func(axis)\n submob.get_unit_normal = lambda a: np.ones(3)\n submob.set_sheen(0.2)\n\n\nclass NumberPlane(VMobject):\n CONFIG = {\n \"color\": BLUE_D,\n \"secondary_color\": BLUE_E,\n \"axes_color\": WHITE,\n \"secondary_stroke_width\": 1,\n # TODO: Allow coordinate center of NumberPlane to not be at (0, 0)\n \"x_radius\": None,\n \"y_radius\": None,\n \"x_unit_size\": 1,\n \"y_unit_size\": 1,\n \"center_point\": ORIGIN,\n \"x_line_frequency\": 1,\n \"y_line_frequency\": 1,\n \"secondary_line_ratio\": 1,\n \"written_coordinate_height\": 0.2,\n \"propagate_style_to_family\": False,\n \"make_smooth_after_applying_functions\": True,\n }\n\n def generate_points(self):\n if self.x_radius is None:\n center_to_edge = (FRAME_X_RADIUS + abs(self.center_point[0]))\n self.x_radius = center_to_edge / self.x_unit_size\n if self.y_radius is None:\n center_to_edge = (FRAME_Y_RADIUS + abs(self.center_point[1]))\n self.y_radius = center_to_edge / self.y_unit_size\n self.axes = VMobject()\n self.main_lines = VMobject()\n self.secondary_lines = VMobject()\n tuples = [\n (\n self.x_radius,\n self.x_line_frequency,\n self.y_radius * DOWN,\n self.y_radius * UP,\n RIGHT\n ),\n (\n self.y_radius,\n self.y_line_frequency,\n self.x_radius * LEFT,\n self.x_radius * RIGHT,\n UP,\n ),\n ]\n for radius, freq, start, end, unit in tuples:\n main_range = np.arange(0, radius, freq)\n step = freq / float(freq + self.secondary_line_ratio)\n for v in np.arange(0, radius, step):\n line1 = Line(start + v * unit, end + v * unit)\n line2 = Line(start - v * unit, end - v * unit)\n if v == 0:\n self.axes.add(line1)\n elif v in main_range:\n self.main_lines.add(line1, line2)\n else:\n self.secondary_lines.add(line1, line2)\n self.add(self.secondary_lines, self.main_lines, self.axes)\n self.stretch(self.x_unit_size, 0)\n self.stretch(self.y_unit_size, 1)\n self.shift(self.center_point)\n # Put x_axis before y_axis\n y_axis, x_axis = self.axes.split()\n self.axes = VMobject(x_axis, y_axis)\n\n def init_colors(self):\n VMobject.init_colors(self)\n self.axes.set_stroke(self.axes_color, self.stroke_width)\n self.main_lines.set_stroke(self.color, self.stroke_width)\n self.secondary_lines.set_stroke(\n self.secondary_color, self.secondary_stroke_width\n )\n return self\n\n def get_center_point(self):\n return self.coords_to_point(0, 0)\n\n def coords_to_point(self, x, y):\n x, y = np.array([x, y])\n result = self.axes.get_center()\n result += x * self.get_x_unit_size() * RIGHT\n result += y * self.get_y_unit_size() * UP\n return result\n\n def point_to_coords(self, point):\n new_point = point - self.axes.get_center()\n x = new_point[0] / self.get_x_unit_size()\n y = new_point[1] / self.get_y_unit_size()\n return x, y\n\n # Does not recompute center, unit_sizes for each call; useful for\n # iterating over large lists of points, but does assume these\n # attributes are kept accurate. (Could alternatively have a method\n # which returns a function dynamically created after a single\n # call to each of get_center(), get_x_unit_size(), etc.)\n def point_to_coords_cheap(self, point):\n new_point = point - self.center_point\n x = new_point[0] / self.x_unit_size\n y = new_point[1] / self.y_unit_size\n return x, y\n\n def get_x_unit_size(self):\n return self.axes.get_width() / (2.0 * self.x_radius)\n\n def get_y_unit_size(self):\n return self.axes.get_height() / (2.0 * self.y_radius)\n\n def get_coordinate_labels(self, x_vals=None, y_vals=None):\n coordinate_labels = VGroup()\n if x_vals is None:\n x_vals = list(range(-int(self.x_radius), int(self.x_radius) + 1))\n if y_vals is None:\n y_vals = list(range(-int(self.y_radius), int(self.y_radius) + 1))\n for index, vals in enumerate([x_vals, y_vals]):\n num_pair = [0, 0]\n for val in vals:\n if val == 0:\n continue\n num_pair[index] = val\n point = self.coords_to_point(*num_pair)\n num = TexMobject(str(val))\n num.add_background_rectangle()\n num.set_height(\n self.written_coordinate_height\n )\n num.next_to(point, DOWN + LEFT, buff=SMALL_BUFF)\n coordinate_labels.add(num)\n self.coordinate_labels = coordinate_labels\n return coordinate_labels\n\n def get_axes(self):\n return self.axes\n\n def get_axis_labels(self, x_label=\"x\", y_label=\"y\"):\n x_axis, y_axis = self.get_axes().split()\n quads = [\n (x_axis, x_label, UP, RIGHT),\n (y_axis, y_label, RIGHT, UP),\n ]\n labels = VGroup()\n for axis, tex, vect, edge in quads:\n label = TexMobject(tex)\n label.add_background_rectangle()\n label.next_to(axis, vect)\n label.to_edge(edge)\n labels.add(label)\n self.axis_labels = labels\n return labels\n\n def add_coordinates(self, x_vals=None, y_vals=None):\n self.add(*self.get_coordinate_labels(x_vals, y_vals))\n return self\n\n def get_vector(self, coords, **kwargs):\n point = coords[0] * RIGHT + coords[1] * UP\n arrow = Arrow(ORIGIN, point, **kwargs)\n return arrow\n\n def prepare_for_nonlinear_transform(self, num_inserted_anchor_points=50):\n for mob in self.family_members_with_points():\n num_anchors = mob.get_num_anchor_points()\n if num_inserted_anchor_points > num_anchors:\n mob.insert_n_anchor_points(\n num_inserted_anchor_points - num_anchors)\n mob.make_smooth()\n return self\n\n\nclass ComplexPlane(NumberPlane):\n CONFIG = {\n \"color\": BLUE,\n \"unit_size\": 1,\n \"line_frequency\": 1,\n \"faded_line_frequency\": 0.5,\n }\n\n def __init__(self, **kwargs):\n digest_config(self, kwargs)\n kwargs.update({\n \"x_unit_size\": self.unit_size,\n \"y_unit_size\": self.unit_size,\n \"x_line_frequency\": self.line_frequency,\n \"x_faded_line_frequency\": self.faded_line_frequency,\n \"y_line_frequency\": self.line_frequency,\n \"y_faded_line_frequency\": self.faded_line_frequency,\n })\n NumberPlane.__init__(self, **kwargs)\n\n def number_to_point(self, number):\n number = complex(number)\n return self.coords_to_point(number.real, number.imag)\n\n def point_to_number(self, point):\n x, y = self.point_to_coords(point)\n return complex(x, y)\n\n def get_coordinate_labels(self, *numbers):\n # TODO: Should merge this with the code from NumberPlane.get_coordinate_labels\n\n result = VGroup()\n if len(numbers) == 0:\n numbers = list(range(-int(self.x_radius), int(self.x_radius) + 1))\n numbers += [\n complex(0, y)\n for y in range(-int(self.y_radius), int(self.y_radius) + 1)\n if y != 0\n ]\n for number in numbers:\n # if number == complex(0, 0):\n # continue\n point = self.number_to_point(number)\n num_str = str(number).replace(\"j\", \"i\")\n if num_str.startswith(\"0\"):\n num_str = \"0\"\n elif num_str in [\"1i\", \"-1i\"]:\n num_str = num_str.replace(\"1\", \"\")\n num_mob = TexMobject(num_str)\n num_mob.add_background_rectangle()\n num_mob.set_height(self.written_coordinate_height)\n num_mob.next_to(point, DOWN + LEFT, SMALL_BUFF)\n result.add(num_mob)\n self.coordinate_labels = result\n return result\n\n def add_coordinates(self, *numbers):\n self.coordinate_labels = self.get_coordinate_labels(*numbers)\n self.add(self.coordinate_labels)\n return self\n" ]
[ [ "numpy.array", "numpy.ones", "numpy.arange", "numpy.mean" ] ]
orbanjerbi/auto-sklearn
[ "1c6af59ff61f1d0a3b54b16a35ffbc5d2d3828cd" ]
[ "autosklearn/util/backend.py" ]
[ "import glob\nimport os\nimport tempfile\nimport time\nimport random\nimport lockfile\nimport numpy as np\nimport pickle\nimport shutil\nfrom typing import Union\n\nfrom autosklearn.util import logging_ as logging\n\n\n__all__ = [\n 'Backend'\n]\n\n\ndef create(temporary_directory,\n output_directory,\n delete_tmp_folder_after_terminate=True,\n delete_output_folder_after_terminate=True,\n shared_mode=False):\n context = BackendContext(temporary_directory, output_directory,\n delete_tmp_folder_after_terminate,\n delete_output_folder_after_terminate,\n shared_mode)\n backend = Backend(context)\n\n return backend\n\n\ndef get_randomized_directory_names(\n temporary_directory=None,\n output_directory=None,\n):\n random_number = random.randint(0, 10000)\n pid = os.getpid()\n\n temporary_directory = (\n temporary_directory\n if temporary_directory\n else os.path.join(\n tempfile.gettempdir(),\n 'autosklearn_tmp_%d_%d' % (pid, random_number),\n )\n )\n\n output_directory = (\n output_directory\n if output_directory\n else os.path.join(\n tempfile.gettempdir(),\n 'autosklearn_output_%d_%d' % (pid, random_number),\n )\n )\n\n return temporary_directory, output_directory\n\n\nclass BackendContext(object):\n\n def __init__(self,\n temporary_directory,\n output_directory,\n delete_tmp_folder_after_terminate,\n delete_output_folder_after_terminate,\n shared_mode=False):\n\n # Check that the names of tmp_dir and output_dir is not the same.\n if temporary_directory == output_directory \\\n and temporary_directory is not None:\n raise ValueError(\"The temporary and the output directory \"\n \"must be different.\")\n\n self.delete_tmp_folder_after_terminate = delete_tmp_folder_after_terminate\n self.delete_output_folder_after_terminate = delete_output_folder_after_terminate\n self.shared_mode = shared_mode\n # attributes to check that directories were created by autosklearn.\n self._tmp_dir_created = False\n self._output_dir_created = False\n\n self.__temporary_directory, self.__output_directory = (\n get_randomized_directory_names(\n temporary_directory=temporary_directory,\n output_directory=output_directory,\n )\n )\n self._logger = logging.get_logger(__name__)\n self.create_directories()\n\n @property\n def output_directory(self):\n # make sure that tilde does not appear on the path.\n return os.path.expanduser(os.path.expandvars(self.__output_directory))\n\n @property\n def temporary_directory(self):\n # make sure that tilde does not appear on the path.\n return os.path.expanduser(os.path.expandvars(self.__temporary_directory))\n\n def create_directories(self):\n if self.shared_mode:\n # If shared_mode == True, the tmp and output dir will be shared\n # by different instances of auto-sklearn.\n try:\n os.makedirs(self.temporary_directory)\n except OSError:\n pass\n try:\n os.makedirs(self.output_directory)\n except OSError:\n pass\n\n else:\n # Exception is raised if self.temporary_directory already exists.\n os.makedirs(self.temporary_directory)\n self._tmp_dir_created = True\n\n # Exception is raised if self.output_directory already exists.\n os.makedirs(self.output_directory)\n self._output_dir_created = True\n\n\n def __del__(self):\n self.delete_directories(force=False)\n\n def delete_directories(self, force=True):\n if self.delete_output_folder_after_terminate or force:\n if self._output_dir_created is False and self.shared_mode is False:\n raise ValueError(\"Failed to delete output dir: %s \"\n \"because auto-sklearn did not create it. \"\n \"Please make sure that the specified output \"\n \"dir does not exist when instantiating \"\n \"auto-sklearn.\" % self.output_directory)\n try:\n shutil.rmtree(self.output_directory)\n except Exception:\n if self._logger is not None:\n self._logger.warning(\"Could not delete output dir: %s\" %\n self.output_directory)\n else:\n print(\"Could not delete output dir: %s\" %\n self.output_directory)\n\n if self.delete_tmp_folder_after_terminate or force:\n if self._tmp_dir_created is False and self.shared_mode is False:\n raise ValueError(\"Failed to delete tmp dir: % s \"\n \"because auto-sklearn did not create it. \"\n \"Please make sure that the specified tmp \"\n \"dir does not exist when instantiating \"\n \"auto-sklearn.\" % self.temporary_directory)\n try:\n shutil.rmtree(self.temporary_directory)\n except Exception:\n if self._logger is not None:\n self._logger.warning(\"Could not delete tmp dir: %s\" %\n self.temporary_directory)\n pass\n else:\n print(\"Could not delete tmp dir: %s\" %\n self.temporary_directory)\n\n\nclass Backend(object):\n \"\"\"Utility class to load and save all objects to be persisted.\n\n These are:\n * start time of auto-sklearn\n * true targets of the ensemble\n \"\"\"\n\n def __init__(self, context):\n self.logger = logging.get_logger(__name__)\n self.context = context\n\n # Create the temporary directory if it does not yet exist\n try:\n os.makedirs(self.temporary_directory)\n except Exception:\n pass\n # This does not have to exist or be specified\n if self.output_directory is not None:\n if not os.path.exists(self.output_directory):\n raise ValueError(\"Output directory %s does not exist.\" %\n self.output_directory)\n\n self.internals_directory = os.path.join(self.temporary_directory,\n \".auto-sklearn\")\n self._make_internals_directory()\n\n @property\n def output_directory(self):\n return self.context.output_directory\n\n @property\n def temporary_directory(self):\n return self.context.temporary_directory\n\n def _make_internals_directory(self):\n try:\n os.makedirs(self.internals_directory)\n except Exception as e:\n self.logger.debug(\"_make_internals_directory: %s\" % e)\n pass\n\n def _get_start_time_filename(self, seed):\n seed = int(seed)\n return os.path.join(self.internals_directory, \"start_time_%d\" % seed)\n\n def save_start_time(self, seed):\n self._make_internals_directory()\n start_time = time.time()\n\n filepath = self._get_start_time_filename(seed)\n\n if not isinstance(start_time, float):\n raise ValueError(\"Start time must be a float, but is %s.\" %\n type(start_time))\n\n with tempfile.NamedTemporaryFile('w', dir=os.path.dirname(filepath),\n delete=False) as fh:\n fh.write(str(start_time))\n tempname = fh.name\n os.rename(tempname, filepath)\n\n return filepath\n\n def load_start_time(self, seed):\n with open(self._get_start_time_filename(seed), 'r') as fh:\n start_time = float(fh.read())\n return start_time\n\n def get_smac_output_directory(self):\n return os.path.join(self.temporary_directory, 'smac3-output')\n\n def get_smac_output_directory_for_run(self, seed):\n return os.path.join(\n self.temporary_directory,\n 'smac3-output',\n 'run_%d' % seed\n )\n\n def get_smac_output_glob(self, smac_run_id: Union[str, int] = 1) -> str:\n return os.path.join(\n glob.escape(self.temporary_directory),\n 'smac3-output',\n 'run_%s' % str(smac_run_id),\n )\n\n def _get_targets_ensemble_filename(self):\n return os.path.join(self.internals_directory,\n \"true_targets_ensemble.npy\")\n\n def save_targets_ensemble(self, targets):\n self._make_internals_directory()\n if not isinstance(targets, np.ndarray):\n raise ValueError('Targets must be of type np.ndarray, but is %s' %\n type(targets))\n\n filepath = self._get_targets_ensemble_filename()\n\n # Try to open the file without locking it, this will reduce the\n # number of times where we erronously keep a lock on the ensemble\n # targets file although the process already was killed\n try:\n existing_targets = np.load(filepath, allow_pickle=True)\n if existing_targets.shape[0] > targets.shape[0] or \\\n (existing_targets.shape == targets.shape and\n np.allclose(existing_targets, targets)):\n\n return filepath\n except Exception:\n pass\n\n lock_path = filepath + '.lock'\n with lockfile.LockFile(lock_path):\n if os.path.exists(filepath):\n with open(filepath, 'rb') as fh:\n existing_targets = np.load(fh, allow_pickle=True)\n if existing_targets.shape[0] > targets.shape[0] or \\\n (existing_targets.shape == targets.shape and\n np.allclose(existing_targets, targets)):\n return filepath\n\n with tempfile.NamedTemporaryFile('wb', dir=os.path.dirname(\n filepath), delete=False) as fh:\n np.save(fh, targets.astype(np.float32))\n tempname = fh.name\n\n os.rename(tempname, filepath)\n\n return filepath\n\n def load_targets_ensemble(self):\n filepath = self._get_targets_ensemble_filename()\n\n lock_path = filepath + '.lock'\n with lockfile.LockFile(lock_path):\n with open(filepath, 'rb') as fh:\n targets = np.load(fh, allow_pickle=True)\n\n return targets\n\n def _get_datamanager_pickle_filename(self):\n return os.path.join(self.internals_directory, 'datamanager.pkl')\n\n def save_datamanager(self, datamanager):\n self._make_internals_directory()\n filepath = self._get_datamanager_pickle_filename()\n\n lock_path = filepath + '.lock'\n with lockfile.LockFile(lock_path):\n if not os.path.exists(filepath):\n with tempfile.NamedTemporaryFile('wb', dir=os.path.dirname(\n filepath), delete=False) as fh:\n pickle.dump(datamanager, fh, -1)\n tempname = fh.name\n os.rename(tempname, filepath)\n\n return filepath\n\n def load_datamanager(self):\n filepath = self._get_datamanager_pickle_filename()\n lock_path = filepath + '.lock'\n with lockfile.LockFile(lock_path):\n with open(filepath, 'rb') as fh:\n return pickle.load(fh)\n\n def get_model_dir(self):\n return os.path.join(self.internals_directory, 'models')\n\n def save_model(self, model, idx, seed):\n # This should fail if no models directory exists\n filepath = os.path.join(self.get_model_dir(),\n '%s.%s.model' % (seed, idx))\n\n with tempfile.NamedTemporaryFile('wb', dir=os.path.dirname(\n filepath), delete=False) as fh:\n pickle.dump(model, fh, -1)\n tempname = fh.name\n\n os.rename(tempname, filepath)\n\n def list_all_models(self, seed):\n model_directory = self.get_model_dir()\n if seed >= 0:\n model_files = glob.glob(\n os.path.join(glob.escape(model_directory), '%s.*.model' % seed)\n )\n else:\n model_files = os.listdir(model_directory)\n model_files = [os.path.join(model_directory, mf)\n for mf in model_files]\n\n return model_files\n\n def load_all_models(self, seed):\n model_files = self.list_all_models(seed)\n models = self.load_models_by_file_names(model_files)\n return models\n\n def load_models_by_file_names(self, model_file_names):\n models = dict()\n\n for model_file in model_file_names:\n # File names are like: {seed}.{index}.model\n if model_file.endswith('/'):\n model_file = model_file[:-1]\n if not model_file.endswith('.model') and \\\n not model_file.endswith('.model'):\n continue\n\n basename = os.path.basename(model_file)\n\n basename_parts = basename.split('.')\n seed = int(basename_parts[0])\n idx = int(basename_parts[1])\n\n models[(seed, idx)] = self.load_model_by_seed_and_id(seed, idx)\n\n return models\n\n def load_models_by_identifiers(self, identifiers):\n models = dict()\n\n for identifier in identifiers:\n seed, idx = identifier\n models[identifier] = self.load_model_by_seed_and_id(seed, idx)\n\n return models\n\n def load_model_by_seed_and_id(self, seed, idx):\n model_directory = self.get_model_dir()\n\n model_file_name = '%s.%s.model' % (seed, idx)\n model_file_path = os.path.join(model_directory, model_file_name)\n with open(model_file_path, 'rb') as fh:\n return pickle.load(fh)\n\n def get_ensemble_dir(self):\n return os.path.join(self.internals_directory, 'ensembles')\n\n def load_ensemble(self, seed):\n ensemble_dir = self.get_ensemble_dir()\n\n if not os.path.exists(ensemble_dir):\n self.logger.warning('Directory %s does not exist' % ensemble_dir)\n return None\n\n print(seed)\n if seed >= 0:\n indices_files = glob.glob(\n os.path.join(glob.escape(ensemble_dir), '%s.*.ensemble' % seed)\n )\n indices_files.sort()\n else:\n indices_files = os.listdir(ensemble_dir)\n indices_files = [os.path.join(ensemble_dir, f) for f in indices_files]\n indices_files.sort(key=lambda f: time.ctime(os.path.getmtime(f)))\n\n with open(indices_files[-1], 'rb') as fh:\n ensemble_members_run_numbers = pickle.load(fh)\n print(indices_files)\n\n return ensemble_members_run_numbers\n\n def save_ensemble(self, ensemble, idx, seed):\n try:\n os.makedirs(self.get_ensemble_dir())\n except Exception:\n pass\n\n filepath = os.path.join(\n self.get_ensemble_dir(),\n '%s.%s.ensemble' % (str(seed), str(idx).zfill(10))\n )\n with tempfile.NamedTemporaryFile('wb', dir=os.path.dirname(\n filepath), delete=False) as fh:\n pickle.dump(ensemble, fh)\n tempname = fh.name\n os.rename(tempname, filepath)\n\n def _get_prediction_output_dir(self, subset):\n return os.path.join(self.internals_directory,\n 'predictions_%s' % subset)\n\n def save_predictions_as_npy(self, predictions, subset, automl_seed, idx):\n output_dir = self._get_prediction_output_dir(subset)\n # Make sure an output directory exists\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n filepath = os.path.join(output_dir, 'predictions_%s_%s_%s.npy' %\n (subset, automl_seed, str(idx)))\n\n with tempfile.NamedTemporaryFile('wb', dir=os.path.dirname(\n filepath), delete=False) as fh:\n pickle.dump(predictions.astype(np.float32), fh, -1)\n tempname = fh.name\n os.rename(tempname, filepath)\n\n def save_predictions_as_txt(self, predictions, subset, idx, precision,\n prefix=None):\n # Write prediction scores in prescribed format\n filepath = os.path.join(self.output_directory,\n ('%s_' % prefix if prefix else '') +\n '%s_%s.predict' % (subset, str(idx)))\n\n format_string = '{:.%dg} ' % precision\n with tempfile.NamedTemporaryFile('w', dir=os.path.dirname(\n filepath), delete=False) as output_file:\n for row in predictions:\n if not isinstance(row, np.ndarray) and not isinstance(row, list):\n row = [row]\n for val in row:\n output_file.write(format_string.format(float(val)))\n output_file.write('\\n')\n tempname = output_file.name\n os.rename(tempname, filepath)\n\n def write_txt_file(self, filepath, data, name):\n lock_file = filepath + '.lock'\n with lockfile.LockFile(lock_file):\n if not os.path.exists(lock_file):\n with tempfile.NamedTemporaryFile('w', dir=os.path.dirname(\n filepath), delete=False) as fh:\n fh.write(data)\n tempname = fh.name\n os.rename(tempname, filepath)\n self.logger.debug('Created %s file %s' % (name, filepath))\n else:\n self.logger.debug('%s file already present %s' %\n (name, filepath))\n" ]
[ [ "numpy.allclose", "numpy.load" ] ]
VasavanThiru/geopandas
[ "4945a185885b470026e284fa35310db40bff514f" ]
[ "geopandas/tests/test_geom_methods.py" ]
[ "from __future__ import absolute_import\n\nimport string\n\nimport numpy as np\nfrom pandas import Series, DataFrame, MultiIndex\nfrom shapely.geometry import (\n Point, LinearRing, LineString, Polygon, MultiPoint)\nfrom shapely.geometry.collection import GeometryCollection\nfrom shapely.ops import unary_union\n\nfrom geopandas import GeoSeries, GeoDataFrame\nfrom geopandas.base import GeoPandasBase\n\nfrom geopandas.tests.util import (\n geom_equals, geom_almost_equals, assert_geoseries_equal)\n\nimport pytest\nfrom numpy.testing import assert_array_equal\nfrom pandas.util.testing import assert_series_equal, assert_frame_equal\n\n\ndef assert_array_dtype_equal(a, b, *args, **kwargs):\n a = np.asanyarray(a)\n b = np.asanyarray(b)\n assert a.dtype == b.dtype\n assert_array_equal(a, b, *args, **kwargs)\n\n\nclass TestGeomMethods:\n\n def setup_method(self):\n self.t1 = Polygon([(0, 0), (1, 0), (1, 1)])\n self.t2 = Polygon([(0, 0), (1, 1), (0, 1)])\n self.t3 = Polygon([(2, 0), (3, 0), (3, 1)])\n self.sq = Polygon([(0, 0), (1, 0), (1, 1), (0, 1)])\n self.inner_sq = Polygon([(0.25, 0.25), (0.75, 0.25), (0.75, 0.75),\n (0.25, 0.75)])\n self.nested_squares = Polygon(self.sq.boundary,\n [self.inner_sq.boundary])\n self.p0 = Point(5, 5)\n self.p3d = Point(5, 5, 5)\n self.g0 = GeoSeries([self.t1, self.t2, self.sq, self.inner_sq,\n self.nested_squares, self.p0])\n self.g1 = GeoSeries([self.t1, self.sq])\n self.g2 = GeoSeries([self.sq, self.t1])\n self.g3 = GeoSeries([self.t1, self.t2])\n self.g3.crs = {'init': 'epsg:4326', 'no_defs': True}\n self.g4 = GeoSeries([self.t2, self.t1])\n self.g4.crs = {'init': 'epsg:4326', 'no_defs': True}\n self.g_3d = GeoSeries([self.p0, self.p3d])\n self.na = GeoSeries([self.t1, self.t2, Polygon()])\n self.na_none = GeoSeries([self.t1, None])\n self.a1 = self.g1.copy()\n self.a1.index = ['A', 'B']\n self.a2 = self.g2.copy()\n self.a2.index = ['B', 'C']\n self.esb = Point(-73.9847, 40.7484)\n self.sol = Point(-74.0446, 40.6893)\n self.landmarks = GeoSeries([self.esb, self.sol],\n crs={'init': 'epsg:4326', 'no_defs': True})\n self.l1 = LineString([(0, 0), (0, 1), (1, 1)])\n self.l2 = LineString([(0, 0), (1, 0), (1, 1), (0, 1)])\n self.g5 = GeoSeries([self.l1, self.l2])\n self.g6 = GeoSeries([self.p0, self.t3])\n self.empty = GeoSeries([])\n self.empty_poly = Polygon()\n\n # Crossed lines\n self.l3 = LineString([(0, 0), (1, 1)])\n self.l4 = LineString([(0, 1), (1, 0)])\n self.crossed_lines = GeoSeries([self.l3, self.l4])\n\n # Placeholder for testing, will just drop in different geometries\n # when needed\n self.gdf1 = GeoDataFrame({'geometry': self.g1,\n 'col0': [1.0, 2.0],\n 'col1': ['geo', 'pandas']})\n self.gdf2 = GeoDataFrame({'geometry': self.g1,\n 'col3': [4, 5],\n 'col4': ['rand', 'string']})\n\n def _test_unary_real(self, op, expected, a):\n \"\"\" Tests for 'area', 'length', 'is_valid', etc. \"\"\"\n fcmp = assert_series_equal\n self._test_unary(op, expected, a, fcmp)\n\n def _test_unary_topological(self, op, expected, a):\n if isinstance(expected, GeoPandasBase):\n fcmp = assert_geoseries_equal\n else:\n def fcmp(a, b): assert a.equals(b)\n self._test_unary(op, expected, a, fcmp)\n\n def _test_binary_topological(self, op, expected, a, b, *args, **kwargs):\n \"\"\" Tests for 'intersection', 'union', 'symmetric_difference', etc. \"\"\"\n if isinstance(expected, GeoPandasBase):\n fcmp = assert_geoseries_equal\n else:\n def fcmp(a, b): assert geom_equals(a, b)\n\n if isinstance(b, GeoPandasBase):\n right_df = True\n else:\n right_df = False\n\n self._binary_op_test(op, expected, a, b, fcmp, True, right_df,\n *args, **kwargs)\n\n def _test_binary_real(self, op, expected, a, b, *args, **kwargs):\n fcmp = assert_series_equal\n self._binary_op_test(op, expected, a, b, fcmp, True, False,\n *args, **kwargs)\n\n def _test_binary_operator(self, op, expected, a, b):\n \"\"\"\n The operators only have GeoSeries on the left, but can have\n GeoSeries or GeoDataFrame on the right.\n\n \"\"\"\n if isinstance(expected, GeoPandasBase):\n fcmp = assert_geoseries_equal\n else:\n def fcmp(a, b): assert geom_equals(a, b)\n\n if isinstance(b, GeoPandasBase):\n right_df = True\n else:\n right_df = False\n\n self._binary_op_test(op, expected, a, b, fcmp, False, right_df)\n\n def _binary_op_test(self, op, expected, left, right, fcmp, left_df,\n right_df,\n *args, **kwargs):\n \"\"\"\n This is a helper to call a function on GeoSeries and GeoDataFrame\n arguments. For example, 'intersection' is a member of both GeoSeries\n and GeoDataFrame and can take either GeoSeries or GeoDataFrame inputs.\n This function has the ability to test all four combinations of input\n types.\n\n Parameters\n ----------\n\n expected : str\n The operation to be tested. e.g., 'intersection'\n left: GeoSeries\n right: GeoSeries\n fcmp: function\n Called with the result of the operation and expected. It should\n assert if the result is incorrect\n left_df: bool\n If the left input should also be called with a GeoDataFrame\n right_df: bool\n Indicates whether the right input should be called with a\n GeoDataFrame\n\n \"\"\"\n def _make_gdf(s):\n n = len(s)\n col1 = string.ascii_lowercase[:n]\n col2 = range(n)\n\n return GeoDataFrame({'geometry': s.values,\n 'col1': col1,\n 'col2': col2},\n index=s.index, crs=s.crs)\n\n # Test GeoSeries.op(GeoSeries)\n result = getattr(left, op)(right, *args, **kwargs)\n fcmp(result, expected)\n\n if left_df:\n # Test GeoDataFrame.op(GeoSeries)\n gdf_left = _make_gdf(left)\n result = getattr(gdf_left, op)(right, *args, **kwargs)\n fcmp(result, expected)\n\n if right_df:\n # Test GeoSeries.op(GeoDataFrame)\n gdf_right = _make_gdf(right)\n result = getattr(left, op)(gdf_right, *args, **kwargs)\n fcmp(result, expected)\n\n if left_df:\n # Test GeoDataFrame.op(GeoDataFrame)\n result = getattr(gdf_left, op)(gdf_right, *args, **kwargs)\n fcmp(result, expected)\n\n def _test_unary(self, op, expected, a, fcmp):\n # GeoSeries, (GeoSeries or geometry)\n result = getattr(a, op)\n fcmp(result, expected)\n\n # GeoDataFrame, (GeoSeries or geometry)\n gdf = self.gdf1.set_geometry(a)\n result = getattr(gdf, op)\n fcmp(result, expected)\n\n # TODO reenable for all operations once we use pyproj > 2\n # def test_crs_warning(self):\n # # operations on geometries should warn for different CRS\n # no_crs_g3 = self.g3.copy()\n # no_crs_g3.crs = None\n # with pytest.warns(UserWarning):\n # self._test_binary_topological('intersection', self.g3,\n # self.g3, no_crs_g3)\n\n def test_intersection(self):\n self._test_binary_topological('intersection', self.t1,\n self.g1, self.g2)\n self._test_binary_topological('intersection', self.empty_poly,\n self.g1, self.empty)\n\n def test_union_series(self):\n self._test_binary_topological('union', self.sq, self.g1, self.g2)\n\n def test_union_polygon(self):\n self._test_binary_topological('union', self.sq, self.g1, self.t2)\n\n def test_symmetric_difference_series(self):\n self._test_binary_topological('symmetric_difference', self.sq,\n self.g3, self.g4)\n\n def test_symmetric_difference_poly(self):\n expected = GeoSeries([GeometryCollection(), self.sq], crs=self.g3.crs)\n self._test_binary_topological('symmetric_difference', expected,\n self.g3, self.t1)\n\n def test_difference_series(self):\n expected = GeoSeries([GeometryCollection(), self.t2])\n self._test_binary_topological('difference', expected,\n self.g1, self.g2)\n\n def test_difference_poly(self):\n expected = GeoSeries([self.t1, self.t1])\n self._test_binary_topological('difference', expected,\n self.g1, self.t2)\n\n def test_geo_op_empty_result(self):\n l1 = LineString([(0, 0), (1, 1)])\n l2 = LineString([(2, 2), (3, 3)])\n expected = GeoSeries([GeometryCollection()])\n # binary geo resulting in empty geometry\n result = GeoSeries([l1]).intersection(l2)\n assert_geoseries_equal(result, expected)\n # binary geo empty result with right GeoSeries\n result = GeoSeries([l1]).intersection(GeoSeries([l2]))\n assert_geoseries_equal(result, expected)\n # unary geo resulting in emtpy geometry\n result = GeoSeries([GeometryCollection()]).convex_hull\n assert_geoseries_equal(result, expected)\n\n def test_boundary(self):\n l1 = LineString([(0, 0), (1, 0), (1, 1), (0, 0)])\n l2 = LineString([(0, 0), (1, 0), (1, 1), (0, 1), (0, 0)])\n expected = GeoSeries([l1, l2], index=self.g1.index, crs=self.g1.crs)\n\n self._test_unary_topological('boundary', expected, self.g1)\n\n def test_area(self):\n expected = Series(np.array([0.5, 1.0]), index=self.g1.index)\n self._test_unary_real('area', expected, self.g1)\n\n expected = Series(np.array([0.5, np.nan]), index=self.na_none.index)\n self._test_unary_real('area', expected, self.na_none)\n\n def test_bounds(self):\n # Set columns to get the order right\n expected = DataFrame({'minx': [0.0, 0.0], 'miny': [0.0, 0.0],\n 'maxx': [1.0, 1.0], 'maxy': [1.0, 1.0]},\n index=self.g1.index,\n columns=['minx', 'miny', 'maxx', 'maxy'])\n\n result = self.g1.bounds\n assert_frame_equal(expected, result)\n\n gdf = self.gdf1.set_geometry(self.g1)\n result = gdf.bounds\n assert_frame_equal(expected, result)\n\n def test_unary_union(self):\n p1 = self.t1\n p2 = Polygon([(2, 0), (3, 0), (3, 1)])\n expected = unary_union([p1, p2])\n g = GeoSeries([p1, p2])\n\n self._test_unary_topological('unary_union', expected, g)\n\n def test_contains(self):\n expected = [True, False, True, False, False, False]\n assert_array_dtype_equal(expected, self.g0.contains(self.t1))\n\n def test_length(self):\n expected = Series(np.array([2 + np.sqrt(2), 4]), index=self.g1.index)\n self._test_unary_real('length', expected, self.g1)\n\n expected = Series(\n np.array([2 + np.sqrt(2), np.nan]),\n index=self.na_none.index)\n self._test_unary_real('length', expected, self.na_none)\n\n def test_crosses(self):\n expected = [False, False, False, False, False, False]\n assert_array_dtype_equal(expected, self.g0.crosses(self.t1))\n\n expected = [False, True]\n assert_array_dtype_equal(expected, self.crossed_lines.crosses(self.l3))\n\n def test_disjoint(self):\n expected = [False, False, False, False, False, True]\n assert_array_dtype_equal(expected, self.g0.disjoint(self.t1))\n\n def test_relate(self):\n expected = Series(['212101212',\n '212101212',\n '212FF1FF2',\n '2FFF1FFF2',\n 'FF2F112F2',\n 'FF0FFF212'],\n index=self.g0.index)\n assert_array_dtype_equal(expected, self.g0.relate(self.inner_sq))\n\n expected = Series(['FF0FFF212',\n None],\n index=self.g6.index)\n assert_array_dtype_equal(expected, self.g6.relate(self.na_none))\n\n def test_distance(self):\n expected = Series(np.array([np.sqrt((5 - 1)**2 + (5 - 1)**2), np.nan]),\n self.na_none.index)\n assert_array_dtype_equal(expected, self.na_none.distance(self.p0))\n\n expected = Series(np.array([np.sqrt(4**2 + 4**2), np.nan]),\n self.g6.index)\n assert_array_dtype_equal(expected, self.g6.distance(self.na_none))\n\n def test_intersects(self):\n expected = [True, True, True, True, True, False]\n assert_array_dtype_equal(expected, self.g0.intersects(self.t1))\n\n expected = [True, False]\n assert_array_dtype_equal(expected, self.na_none.intersects(self.t2))\n\n expected = np.array([], dtype=bool)\n assert_array_dtype_equal(expected, self.empty.intersects(self.t1))\n\n expected = np.array([], dtype=bool)\n assert_array_dtype_equal(\n expected, self.empty.intersects(self.empty_poly))\n\n expected = [False] * 6\n assert_array_dtype_equal(expected, self.g0.intersects(self.empty_poly))\n\n def test_overlaps(self):\n expected = [True, True, False, False, False, False]\n assert_array_dtype_equal(expected, self.g0.overlaps(self.inner_sq))\n\n expected = [False, False]\n assert_array_dtype_equal(expected, self.g4.overlaps(self.t1))\n\n def test_touches(self):\n expected = [False, True, False, False, False, False]\n assert_array_dtype_equal(expected, self.g0.touches(self.t1))\n\n def test_within(self):\n expected = [True, False, False, False, False, False]\n assert_array_dtype_equal(expected, self.g0.within(self.t1))\n\n expected = [True, True, True, True, True, False]\n assert_array_dtype_equal(expected, self.g0.within(self.sq))\n\n def test_is_valid(self):\n expected = Series(np.array([True] * len(self.g1)), self.g1.index)\n self._test_unary_real('is_valid', expected, self.g1)\n\n def test_is_empty(self):\n expected = Series(np.array([False] * len(self.g1)), self.g1.index)\n self._test_unary_real('is_empty', expected, self.g1)\n\n def test_is_ring(self):\n expected = Series(np.array([True] * len(self.g1)), self.g1.index)\n self._test_unary_real('is_ring', expected, self.g1)\n\n def test_is_simple(self):\n expected = Series(np.array([True] * len(self.g1)), self.g1.index)\n self._test_unary_real('is_simple', expected, self.g1)\n\n def test_has_z(self):\n expected = Series([False, True], self.g_3d.index)\n self._test_unary_real('has_z', expected, self.g_3d)\n\n def test_xy_points(self):\n expected_x = [-73.9847, -74.0446]\n expected_y = [40.7484, 40.6893]\n\n assert_array_dtype_equal(expected_x, self.landmarks.geometry.x)\n assert_array_dtype_equal(expected_y, self.landmarks.geometry.y)\n\n def test_xy_polygons(self):\n # accessing x attribute in polygon geoseries should raise an error\n with pytest.raises(ValueError):\n _ = self.gdf1.geometry.x\n # and same for accessing y attribute in polygon geoseries\n with pytest.raises(ValueError):\n _ = self.gdf1.geometry.y\n\n def test_centroid(self):\n polygon = Polygon([(-1, -1), (1, -1), (1, 1), (-1, 1)])\n point = Point(0, 0)\n polygons = GeoSeries([polygon for i in range(3)])\n points = GeoSeries([point for i in range(3)])\n assert_geoseries_equal(polygons.centroid, points)\n\n def test_convex_hull(self):\n # the convex hull of a square should be the same as the square\n squares = GeoSeries([self.sq for i in range(3)])\n assert_geoseries_equal(squares, squares.convex_hull)\n\n def test_exterior(self):\n exp_exterior = GeoSeries([LinearRing(p.boundary) for p in self.g3])\n for expected, computed in zip(exp_exterior, self.g3.exterior):\n assert computed.equals(expected)\n\n def test_interiors(self):\n original = GeoSeries([self.t1, self.nested_squares])\n\n # This is a polygon with no interior.\n expected = []\n assert original.interiors[0] == expected\n # This is a polygon with an interior.\n expected = LinearRing(self.inner_sq.boundary)\n assert original.interiors[1][0].equals(expected)\n\n def test_interpolate(self):\n expected = GeoSeries([Point(0.5, 1.0), Point(0.75, 1.0)])\n self._test_binary_topological('interpolate', expected, self.g5,\n 0.75, normalized=True)\n\n expected = GeoSeries([Point(0.5, 1.0), Point(1.0, 0.5)])\n self._test_binary_topological('interpolate', expected, self.g5,\n 1.5)\n\n def test_interpolate_distance_array(self):\n expected = GeoSeries([Point(0.0, 0.75), Point(1.0, 0.5)])\n self._test_binary_topological('interpolate', expected, self.g5,\n np.array([0.75, 1.5]))\n\n expected = GeoSeries([Point(0.5, 1.0), Point(0.0, 1.0)])\n self._test_binary_topological('interpolate', expected, self.g5,\n np.array([0.75, 1.5]), normalized=True)\n\n def test_interpolate_distance_wrong_length(self):\n distances = np.array([1, 2, 3])\n with pytest.raises(ValueError):\n self.g5.interpolate(distances)\n\n def test_interpolate_distance_wrong_index(self):\n distances = Series([1, 2], index=[99, 98])\n with pytest.raises(ValueError):\n self.g5.interpolate(distances)\n\n def test_project(self):\n expected = Series([2.0, 1.5], index=self.g5.index)\n p = Point(1.0, 0.5)\n self._test_binary_real('project', expected, self.g5, p)\n\n expected = Series([1.0, 0.5], index=self.g5.index)\n self._test_binary_real('project', expected, self.g5, p,\n normalized=True)\n\n def test_translate_tuple(self):\n trans = self.sol.x - self.esb.x, self.sol.y - self.esb.y\n assert self.landmarks.translate(*trans)[0].equals(self.sol)\n\n res = self.gdf1.set_geometry(self.landmarks).translate(*trans)[0]\n assert res.equals(self.sol)\n\n def test_rotate(self):\n angle = 98\n expected = self.g4\n\n o = Point(0, 0)\n res = self.g4.rotate(angle, origin=o).rotate(-angle, origin=o)\n assert geom_almost_equals(self.g4, res)\n\n res = self.gdf1.set_geometry(self.g4).rotate(angle, origin=Point(0, 0))\n assert geom_almost_equals(expected, res.rotate(-angle, origin=o))\n\n def test_scale(self):\n expected = self.g4\n\n scale = 2., 1.\n inv = tuple(1./i for i in scale)\n\n o = Point(0, 0)\n res = self.g4.scale(*scale, origin=o).scale(*inv, origin=o)\n assert geom_almost_equals(expected, res)\n\n res = self.gdf1.set_geometry(self.g4).scale(*scale, origin=o)\n res = res.scale(*inv, origin=o)\n assert geom_almost_equals(expected, res)\n\n def test_skew(self):\n expected = self.g4\n\n skew = 45.\n o = Point(0, 0)\n\n # Test xs\n res = self.g4.skew(xs=skew, origin=o).skew(xs=-skew, origin=o)\n assert geom_almost_equals(expected, res)\n\n res = self.gdf1.set_geometry(self.g4).skew(xs=skew, origin=o)\n res = res.skew(xs=-skew, origin=o)\n assert geom_almost_equals(expected, res)\n\n # Test ys\n res = self.g4.skew(ys=skew, origin=o).skew(ys=-skew, origin=o)\n assert geom_almost_equals(expected, res)\n\n res = self.gdf1.set_geometry(self.g4).skew(ys=skew, origin=o)\n res = res.skew(ys=-skew, origin=o)\n assert geom_almost_equals(expected, res)\n\n def test_buffer(self):\n original = GeoSeries([Point(0, 0)])\n expected = GeoSeries([Polygon(((5, 0), (0, -5), (-5, 0), (0, 5),\n (5, 0)))])\n calculated = original.buffer(5, resolution=1)\n assert geom_almost_equals(expected, calculated)\n\n def test_buffer_args(self):\n args = dict(cap_style=3, join_style=2, mitre_limit=2.5)\n calculated_series = self.g0.buffer(10, **args)\n for original, calculated in zip(self.g0, calculated_series):\n expected = original.buffer(10, **args)\n assert calculated.equals(expected)\n\n def test_buffer_distance_array(self):\n original = GeoSeries([self.p0, self.p0])\n expected = GeoSeries(\n [Polygon(((6, 5), (5, 4), (4, 5), (5, 6), (6, 5))),\n Polygon(((10, 5), (5, 0), (0, 5), (5, 10), (10, 5))),\n ])\n calculated = original.buffer(np.array([1, 5]), resolution=1)\n assert_geoseries_equal(calculated, expected, check_less_precise=True)\n\n def test_buffer_distance_wrong_length(self):\n original = GeoSeries([self.p0, self.p0])\n distances = np.array([1, 2, 3])\n with pytest.raises(ValueError):\n original.buffer(distances)\n\n def test_buffer_distance_wrong_index(self):\n original = GeoSeries([self.p0, self.p0], index=[0, 1])\n distances = Series(data=[1, 2], index=[99, 98])\n with pytest.raises(ValueError):\n original.buffer(distances)\n\n def test_envelope(self):\n e = self.g3.envelope\n assert np.all(e.geom_equals(self.sq))\n assert isinstance(e, GeoSeries)\n assert self.g3.crs == e.crs\n\n def test_total_bounds(self):\n bbox = self.sol.x, self.sol.y, self.esb.x, self.esb.y\n assert isinstance(self.landmarks.total_bounds, np.ndarray)\n assert tuple(self.landmarks.total_bounds) == bbox\n\n df = GeoDataFrame({'geometry': self.landmarks,\n 'col1': range(len(self.landmarks))})\n assert tuple(df.total_bounds) == bbox\n\n def test_explode_geoseries(self):\n s = GeoSeries([MultiPoint([(0, 0), (1, 1)]),\n MultiPoint([(2, 2), (3, 3), (4, 4)])])\n s.index.name = 'test_index_name'\n expected_index_name = ['test_index_name', None]\n index = [(0, 0), (0, 1), (1, 0), (1, 1), (1, 2)]\n expected = GeoSeries([Point(0, 0), Point(1, 1), Point(2, 2),\n Point(3, 3), Point(4, 4)],\n index=MultiIndex.from_tuples(\n index, names=expected_index_name))\n assert_geoseries_equal(expected, s.explode())\n\n @pytest.mark.parametrize(\"index_name\", [None, 'test'])\n def test_explode_geodataframe(self, index_name):\n s = GeoSeries([MultiPoint([Point(1, 2), Point(2, 3)]), Point(5, 5)])\n df = GeoDataFrame({'col': [1, 2], 'geometry': s})\n df.index.name = index_name\n\n test_df = df.explode()\n\n expected_s = GeoSeries([Point(1, 2), Point(2, 3), Point(5, 5)])\n expected_df = GeoDataFrame({'col': [1, 1, 2], 'geometry': expected_s})\n expected_index = MultiIndex([[0, 1], [0, 1]], # levels\n [[0, 0, 1], [0, 1, 0]], # labels/codes\n names=[index_name, None])\n expected_df = expected_df.set_index(expected_index)\n assert_frame_equal(test_df, expected_df)\n\n #\n # Test '&', '|', '^', and '-'\n # The left can only be a GeoSeries. The right hand side can be a\n # GeoSeries, GeoDataFrame or Shapely geometry\n #\n def test_intersection_operator(self):\n self._test_binary_operator('__and__', self.t1, self.g1, self.g2)\n\n def test_union_operator(self):\n self._test_binary_operator('__or__', self.sq, self.g1, self.g2)\n\n def test_union_operator_polygon(self):\n self._test_binary_operator('__or__', self.sq, self.g1, self.t2)\n\n def test_symmetric_difference_operator(self):\n self._test_binary_operator('__xor__', self.sq, self.g3, self.g4)\n\n def test_difference_series2(self):\n expected = GeoSeries([GeometryCollection(), self.t2])\n self._test_binary_operator('__sub__', expected, self.g1, self.g2)\n\n def test_difference_poly2(self):\n expected = GeoSeries([self.t1, self.t1])\n self._test_binary_operator('__sub__', expected, self.g1, self.t2)\n" ]
[ [ "numpy.array", "pandas.util.testing.assert_frame_equal", "pandas.DataFrame", "numpy.testing.assert_array_equal", "pandas.MultiIndex.from_tuples", "pandas.MultiIndex", "numpy.sqrt", "pandas.Series", "numpy.asanyarray" ] ]
hiskuDN/ensemble-adversarial-training
[ "819ad7c44d7dab4712a450e35237e9e2076cf762" ]
[ "attack_utils.py" ]
[ "import numpy as np\nimport keras.backend as K\n\nfrom tensorflow.python.platform import flags\nFLAGS = flags.FLAGS\n\n\ndef linf_loss(X1, X2):\n return np.max(np.abs(X1 - X2), axis=(1, 2, 3))\n\n\ndef gen_adv_loss(logits, y, loss='logloss', mean=False):\n \"\"\"\n Generate the loss function.\n \"\"\"\n\n if loss == 'training':\n # use the model's output instead of the true labels to avoid\n # label leaking at training time\n y = K.cast(K.equal(logits, K.max(logits, 1, keepdims=True)), \"float32\")\n y = y / K.sum(y, 1, keepdims=True)\n out = K.categorical_crossentropy(logits, y, from_logits=True)\n elif loss == 'logloss':\n out = K.categorical_crossentropy(logits, y, from_logits=True)\n else:\n raise ValueError(\"Unknown loss: {}\".format(loss))\n\n if mean:\n out = K.mean(out)\n else:\n out = K.sum(out)\n return out\n\n\ndef gen_grad(x, logits, y, loss='logloss'):\n \"\"\"\n Generate the gradient of the loss function.\n \"\"\"\n\n adv_loss = gen_adv_loss(logits, y, loss)\n\n # Define gradient of loss wrt input\n grad = K.gradients(adv_loss, [x])[0]\n return grad\n" ]
[ [ "numpy.abs" ] ]
davindratulsi/qiskit-terra
[ "0c8bb3dbf8d688590431ca79a83ba8aede84ed20" ]
[ "qiskit/visualization/gate_map.py" ]
[ "# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017, 2018.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"A module for visualizing device coupling maps\"\"\"\n\nimport math\nimport numpy as np\nfrom qiskit.exceptions import QiskitError\nfrom .matplotlib import HAS_MATPLOTLIB\nfrom .exceptions import VisualizationError\n\n\nclass _GraphDist():\n \"\"\"Transform the circles properly for non-square axes.\n \"\"\"\n\n def __init__(self, size, ax, x=True):\n self.size = size\n self.ax = ax # pylint: disable=invalid-name\n self.x = x\n\n @property\n def dist_real(self):\n \"\"\"Compute distance.\n \"\"\"\n x0, y0 = self.ax.transAxes.transform(\n (0, 0))\n x1, y1 = self.ax.transAxes.transform(\n (1, 1))\n value = x1 - x0 if self.x else y1 - y0\n return value\n\n @property\n def dist_abs(self):\n \"\"\"Distance abs\n \"\"\"\n bounds = self.ax.get_xlim() if self.x else self.ax.get_ylim()\n return bounds[0] - bounds[1]\n\n @property\n def value(self):\n \"\"\"Return value.\n \"\"\"\n return (self.size / self.dist_real) * self.dist_abs\n\n def __mul__(self, obj):\n return self.value * obj\n\n\ndef plot_gate_map(backend, figsize=None,\n plot_directed=False,\n label_qubits=True,\n qubit_size=24,\n line_width=4,\n font_size=12,\n qubit_color=None,\n qubit_labels=None,\n line_color=None,\n font_color='w',\n ax=None):\n \"\"\"Plots the gate map of a device.\n\n Args:\n backend (BaseBackend): A backend instance,\n figsize (tuple): Output figure size (wxh) in inches.\n plot_directed (bool): Plot directed coupling map.\n label_qubits (bool): Label the qubits.\n qubit_size (float): Size of qubit marker.\n line_width (float): Width of lines.\n font_size (int): Font size of qubit labels.\n qubit_color (list): A list of colors for the qubits\n qubit_labels (list): A list of qubit labels\n line_color (list): A list of colors for each line from coupling_map.\n font_color (str): The font color for the qubit labels.\n ax (Axes): A Matplotlib axes instance.\n\n Returns:\n Figure: A Matplotlib figure instance.\n\n Raises:\n QiskitError: if tried to pass a simulator.\n ImportError: if matplotlib not installed.\n\n Example:\n .. jupyter-execute::\n :hide-code:\n :hide-output:\n\n from qiskit.test.ibmq_mock import mock_get_backend\n mock_get_backend('FakeVigo')\n\n .. jupyter-execute::\n\n from qiskit import QuantumCircuit, execute, IBMQ\n from qiskit.visualization import plot_gate_map\n %matplotlib inline\n\n provider = IBMQ.load_account()\n accountProvider = IBMQ.get_provider(hub='ibm-q')\n backend = accountProvider.get_backend('ibmq_vigo')\n plot_gate_map(backend)\n \"\"\"\n if not HAS_MATPLOTLIB:\n raise ImportError('Must have Matplotlib installed. To install, '\n 'run \"pip install matplotlib\".')\n from matplotlib import get_backend\n import matplotlib.pyplot as plt\n import matplotlib.patches as mpatches\n\n if backend.configuration().simulator:\n raise QiskitError('Requires a device backend, not simulator.')\n\n input_axes = False\n if ax:\n input_axes = True\n\n mpl_data = {}\n\n mpl_data[1] = [[0, 0]]\n\n mpl_data[5] = [[1, 0], [0, 1], [1, 1], [1, 2], [2, 1]]\n\n mpl_data[7] = [[0, 0], [0, 1], [0, 2],\n [1, 1],\n [2, 0], [2, 1], [2, 2]]\n\n mpl_data[20] = [[0, 0], [0, 1], [0, 2], [0, 3], [0, 4],\n [1, 0], [1, 1], [1, 2], [1, 3], [1, 4],\n [2, 0], [2, 1], [2, 2], [2, 3], [2, 4],\n [3, 0], [3, 1], [3, 2], [3, 3], [3, 4]]\n\n mpl_data[15] = [[0, 0], [0, 1], [0, 2], [0, 3], [0, 4],\n [0, 5], [0, 6], [1, 7], [1, 6], [1, 5],\n [1, 4], [1, 3], [1, 2], [1, 1], [1, 0]]\n\n mpl_data[16] = [[1, 0], [1, 1], [2, 1], [3, 1], [1, 2],\n [3, 2], [0, 3], [1, 3], [3, 3], [4, 3],\n [1, 4], [3, 4], [1, 5], [2, 5], [3, 5], [1, 6]]\n\n mpl_data[27] = [[1, 0], [1, 1], [2, 1], [3, 1], [1, 2],\n [3, 2], [0, 3], [1, 3], [3, 3], [4, 3],\n [1, 4], [3, 4], [1, 5], [2, 5], [3, 5],\n [1, 6], [3, 6], [0, 7], [1, 7], [3, 7],\n [4, 7], [1, 8], [3, 8], [1, 9], [2, 9],\n [3, 9], [3, 10]]\n\n mpl_data[28] = [[0, 2], [0, 3], [0, 4], [0, 5], [0, 6],\n [1, 2], [1, 6],\n [2, 0], [2, 1], [2, 2], [2, 3], [2, 4],\n [2, 5], [2, 6], [2, 7], [2, 8],\n [3, 0], [3, 4], [3, 8],\n [4, 0], [4, 1], [4, 2], [4, 3], [4, 4],\n [4, 5], [4, 6], [4, 7], [4, 8]]\n\n mpl_data[53] = [[0, 2], [0, 3], [0, 4], [0, 5], [0, 6],\n [1, 2], [1, 6],\n [2, 0], [2, 1], [2, 2], [2, 3], [2, 4],\n [2, 5], [2, 6], [2, 7], [2, 8],\n [3, 0], [3, 4], [3, 8],\n [4, 0], [4, 1], [4, 2], [4, 3], [4, 4],\n [4, 5], [4, 6], [4, 7], [4, 8],\n [5, 2], [5, 6],\n [6, 0], [6, 1], [6, 2], [6, 3], [6, 4],\n [6, 5], [6, 6], [6, 7], [6, 8],\n [7, 0], [7, 4], [7, 8],\n [8, 0], [8, 1], [8, 2], [8, 3], [8, 4],\n [8, 5], [8, 6], [8, 7], [8, 8],\n [9, 2], [9, 6]]\n\n mpl_data[65] = [[0, 0], [0, 1], [0, 2], [0, 3], [0, 4],\n [0, 5], [0, 6], [0, 7], [0, 8], [0, 9],\n [1, 0], [1, 4], [1, 8],\n [2, 0], [2, 1], [2, 2], [2, 3], [2, 4],\n [2, 5], [2, 6], [2, 7], [2, 8], [2, 9], [2, 10],\n [3, 2], [3, 6], [3, 10],\n [4, 0], [4, 1], [4, 2], [4, 3], [4, 4],\n [4, 5], [4, 6], [4, 7], [4, 8], [4, 9], [4, 10],\n [5, 0], [5, 4], [5, 8],\n [6, 0], [6, 1], [6, 2], [6, 3], [6, 4],\n [6, 5], [6, 6], [6, 7], [6, 8], [6, 9], [6, 10],\n [7, 2], [7, 6], [7, 10],\n [8, 1], [8, 2], [8, 3], [8, 4],\n [8, 5], [8, 6], [8, 7], [8, 8], [8, 9], [8, 10]]\n\n config = backend.configuration()\n num_qubits = config.n_qubits\n cmap = config.coupling_map\n\n if qubit_labels is None:\n qubit_labels = list(range(num_qubits))\n else:\n if len(qubit_labels) != num_qubits:\n raise QiskitError('Length of qubit labels '\n 'does not equal number '\n 'of qubits.')\n\n if num_qubits in mpl_data.keys():\n grid_data = mpl_data[num_qubits]\n else:\n if not input_axes:\n fig, ax = plt.subplots(figsize=(5, 5))\n ax.axis('off')\n return fig\n\n x_max = max([d[1] for d in grid_data])\n y_max = max([d[0] for d in grid_data])\n max_dim = max(x_max, y_max)\n\n if figsize is None:\n if num_qubits == 1 or (x_max / max_dim > 0.33 and y_max / max_dim > 0.33):\n figsize = (5, 5)\n else:\n figsize = (9, 3)\n\n if ax is None:\n fig, ax = plt.subplots(figsize=figsize)\n ax.axis('off')\n\n # set coloring\n if qubit_color is None:\n qubit_color = ['#648fff'] * config.n_qubits\n if line_color is None:\n line_color = ['#648fff'] * len(cmap) if cmap else []\n\n # Add lines for couplings\n if num_qubits != 1:\n for ind, edge in enumerate(cmap):\n is_symmetric = False\n if edge[::-1] in cmap:\n is_symmetric = True\n y_start = grid_data[edge[0]][0]\n x_start = grid_data[edge[0]][1]\n y_end = grid_data[edge[1]][0]\n x_end = grid_data[edge[1]][1]\n\n if is_symmetric:\n if y_start == y_end:\n x_end = (x_end - x_start) / 2 + x_start\n\n elif x_start == x_end:\n y_end = (y_end - y_start) / 2 + y_start\n\n else:\n x_end = (x_end - x_start) / 2 + x_start\n y_end = (y_end - y_start) / 2 + y_start\n ax.add_artist(plt.Line2D([x_start, x_end], [-y_start, -y_end],\n color=line_color[ind], linewidth=line_width,\n zorder=0))\n if plot_directed:\n dx = x_end - x_start\n dy = y_end - y_start\n if is_symmetric:\n x_arrow = x_start + dx * 0.95\n y_arrow = -y_start - dy * 0.95\n dx_arrow = dx * 0.01\n dy_arrow = -dy * 0.01\n head_width = 0.15\n else:\n x_arrow = x_start + dx * 0.5\n y_arrow = -y_start - dy * 0.5\n dx_arrow = dx * 0.2\n dy_arrow = -dy * 0.2\n head_width = 0.2\n ax.add_patch(mpatches.FancyArrow(x_arrow,\n y_arrow,\n dx_arrow,\n dy_arrow,\n head_width=head_width,\n length_includes_head=True,\n edgecolor=None,\n linewidth=0,\n facecolor=line_color[ind],\n zorder=1))\n\n # Add circles for qubits\n for var, idx in enumerate(grid_data):\n _idx = [idx[1], -idx[0]]\n width = _GraphDist(qubit_size, ax, True)\n height = _GraphDist(qubit_size, ax, False)\n ax.add_artist(mpatches.Ellipse(\n _idx, width, height, color=qubit_color[var], zorder=1))\n if label_qubits:\n ax.text(*_idx, s=qubit_labels[var],\n horizontalalignment='center',\n verticalalignment='center',\n color=font_color, size=font_size, weight='bold')\n ax.set_xlim([-1, x_max + 1])\n ax.set_ylim([-(y_max + 1), 1])\n if not input_axes:\n if get_backend() in ['module://ipykernel.pylab.backend_inline',\n 'nbAgg']:\n plt.close(fig)\n return fig\n return None\n\n\ndef plot_circuit_layout(circuit, backend, view='virtual'):\n \"\"\"Plot the layout of a circuit transpiled for a given\n target backend.\n\n Args:\n circuit (QuantumCircuit): Input quantum circuit.\n backend (BaseBackend): Target backend.\n view (str): Layout view: either 'virtual' or 'physical'.\n\n Returns:\n Figure: A matplotlib figure showing layout.\n\n Raises:\n QiskitError: Invalid view type given.\n VisualizationError: Circuit has no layout attribute.\n\n Example:\n .. jupyter-execute::\n :hide-code:\n :hide-output:\n\n from qiskit.test.ibmq_mock import mock_get_backend\n mock_get_backend('FakeVigo')\n\n .. jupyter-execute::\n\n import numpy as np\n from qiskit import QuantumCircuit, IBMQ, transpile\n from qiskit.visualization import plot_histogram, plot_gate_map, plot_circuit_layout\n from qiskit.tools.monitor import job_monitor\n import matplotlib.pyplot as plt\n %matplotlib inline\n\n IBMQ.load_account()\n\n ghz = QuantumCircuit(3, 3)\n ghz.h(0)\n for idx in range(1,3):\n ghz.cx(0,idx)\n ghz.measure(range(3), range(3))\n\n provider = IBMQ.get_provider(hub='ibm-q')\n backend = provider.get_backend('ibmq_vigo')\n new_circ_lv3 = transpile(ghz, backend=backend, optimization_level=3)\n plot_circuit_layout(new_circ_lv3, backend)\n \"\"\"\n if circuit._layout is None:\n raise QiskitError('Circuit has no layout. '\n 'Perhaps it has not been transpiled.')\n\n num_qubits = backend.configuration().n_qubits\n\n qubits = []\n qubit_labels = [None] * num_qubits\n\n if view == 'virtual':\n for key, val in circuit._layout.get_virtual_bits().items():\n if key.register.name != 'ancilla':\n qubits.append(val)\n qubit_labels[val] = key.index\n\n elif view == 'physical':\n for key, val in circuit._layout.get_physical_bits().items():\n if val.register.name != 'ancilla':\n qubits.append(key)\n qubit_labels[key] = key\n\n else:\n raise VisualizationError(\"Layout view must be 'virtual' or 'physical'.\")\n\n qcolors = ['#648fff'] * num_qubits\n for k in qubits:\n qcolors[k] = 'k'\n\n cmap = backend.configuration().coupling_map\n\n lcolors = ['#648fff'] * len(cmap)\n\n for idx, edge in enumerate(cmap):\n if edge[0] in qubits and edge[1] in qubits:\n lcolors[idx] = 'k'\n\n fig = plot_gate_map(backend,\n qubit_color=qcolors,\n qubit_labels=qubit_labels,\n line_color=lcolors)\n return fig\n\n\ndef plot_error_map(backend, figsize=(12, 9), show_title=True):\n \"\"\"Plots the error map of a given backend.\n\n Args:\n backend (IBMQBackend): Given backend.\n figsize (tuple): Figure size in inches.\n show_title (bool): Show the title or not.\n\n Returns:\n Figure: A matplotlib figure showing error map.\n\n Raises:\n VisualizationError: Input is not IBMQ backend.\n ImportError: If seaborn is not installed\n\n Example:\n .. jupyter-execute::\n :hide-code:\n :hide-output:\n\n from qiskit.test.ibmq_mock import mock_get_backend\n mock_get_backend('FakeVigo')\n\n .. jupyter-execute::\n\n from qiskit import QuantumCircuit, execute, IBMQ\n from qiskit.visualization import plot_error_map\n %matplotlib inline\n\n IBMQ.load_account()\n provider = IBMQ.get_provider(hub='ibm-q')\n backend = provider.get_backend('ibmq_vigo')\n plot_error_map(backend)\n \"\"\"\n try:\n import seaborn as sns\n except ImportError as ex:\n raise ImportError('Must have seaborn installed to use plot_error_map. '\n 'To install, run \"pip install seaborn\".') from ex\n if not HAS_MATPLOTLIB:\n raise ImportError('Must have Matplotlib installed. To install, '\n 'run \"pip install matplotlib\".')\n import matplotlib\n from matplotlib import get_backend\n import matplotlib.pyplot as plt\n import matplotlib.gridspec as gridspec\n from matplotlib import ticker\n\n color_map = sns.cubehelix_palette(reverse=True, as_cmap=True)\n\n props = backend.properties().to_dict()\n config = backend.configuration().to_dict()\n\n num_qubits = config['n_qubits']\n\n # U2 error rates\n single_gate_errors = [0]*num_qubits\n for gate in props['gates']:\n if gate['gate'] == 'u2':\n _qubit = gate['qubits'][0]\n single_gate_errors[_qubit] = gate['parameters'][0]['value']\n\n # Convert to percent\n single_gate_errors = 100 * np.asarray(single_gate_errors)\n avg_1q_err = np.mean(single_gate_errors)\n\n single_norm = matplotlib.colors.Normalize(\n vmin=min(single_gate_errors), vmax=max(single_gate_errors))\n q_colors = [color_map(single_norm(err)) for err in single_gate_errors]\n\n cmap = config['coupling_map']\n\n directed = False\n line_colors = []\n if cmap:\n directed = False\n if num_qubits < 20:\n for edge in cmap:\n if not [edge[1], edge[0]] in cmap:\n directed = True\n break\n\n cx_errors = []\n for line in cmap:\n for item in props['gates']:\n if item['qubits'] == line:\n cx_errors.append(item['parameters'][0]['value'])\n break\n else:\n continue\n\n # Convert to percent\n cx_errors = 100 * np.asarray(cx_errors)\n avg_cx_err = np.mean(cx_errors)\n\n cx_norm = matplotlib.colors.Normalize(\n vmin=min(cx_errors), vmax=max(cx_errors))\n line_colors = [color_map(cx_norm(err)) for err in cx_errors]\n\n # Measurement errors\n\n read_err = []\n\n for qubit in range(num_qubits):\n for item in props['qubits'][qubit]:\n if item['name'] == 'readout_error':\n read_err.append(item['value'])\n\n read_err = 100 * np.asarray(read_err)\n avg_read_err = np.mean(read_err)\n max_read_err = np.max(read_err)\n\n fig = plt.figure(figsize=figsize)\n gridspec.GridSpec(nrows=2, ncols=3)\n\n grid_spec = gridspec.GridSpec(12, 12, height_ratios=[1] * 11 + [0.5],\n width_ratios=[2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2])\n\n left_ax = plt.subplot(grid_spec[2:10, :1])\n main_ax = plt.subplot(grid_spec[:11, 1:11])\n right_ax = plt.subplot(grid_spec[2:10, 11:])\n bleft_ax = plt.subplot(grid_spec[-1, :5])\n if cmap:\n bright_ax = plt.subplot(grid_spec[-1, 7:])\n\n plot_gate_map(backend, qubit_color=q_colors,\n line_color=line_colors,\n qubit_size=28,\n line_width=5,\n plot_directed=directed,\n ax=main_ax)\n main_ax.axis('off')\n main_ax.set_aspect(1)\n if cmap:\n single_cb = matplotlib.colorbar.ColorbarBase(bleft_ax, cmap=color_map,\n norm=single_norm,\n orientation='horizontal')\n tick_locator = ticker.MaxNLocator(nbins=5)\n single_cb.locator = tick_locator\n single_cb.update_ticks()\n single_cb.update_ticks()\n bleft_ax.set_title('H error rate (%) [Avg. = {}]'.format(round(avg_1q_err, 3)))\n\n if cmap is None:\n bleft_ax.axis('off')\n bleft_ax.set_title('H error rate (%) = {}'.format(round(avg_1q_err, 3)))\n\n if cmap:\n cx_cb = matplotlib.colorbar.ColorbarBase(bright_ax, cmap=color_map,\n norm=cx_norm,\n orientation='horizontal')\n tick_locator = ticker.MaxNLocator(nbins=5)\n cx_cb.locator = tick_locator\n cx_cb.update_ticks()\n bright_ax.set_title('CNOT error rate (%) [Avg. = {}]'.format(round(avg_cx_err, 3)))\n\n if num_qubits < 10:\n num_left = num_qubits\n num_right = 0\n else:\n num_left = math.ceil(num_qubits / 2)\n num_right = num_qubits - num_left\n\n left_ax.barh(range(num_left), read_err[:num_left], align='center', color='#DDBBBA')\n left_ax.axvline(avg_read_err, linestyle='--', color='#212121')\n left_ax.set_yticks(range(num_left))\n left_ax.set_xticks([0, round(avg_read_err, 2), round(max_read_err, 2)])\n left_ax.set_yticklabels([str(kk) for kk in range(num_left)], fontsize=12)\n left_ax.invert_yaxis()\n left_ax.set_title('Readout Error (%)', fontsize=12)\n\n for spine in left_ax.spines.values():\n spine.set_visible(False)\n\n if num_right:\n right_ax.barh(range(num_left, num_qubits), read_err[num_left:],\n align='center', color='#DDBBBA')\n right_ax.axvline(avg_read_err, linestyle='--', color='#212121')\n right_ax.set_yticks(range(num_left, num_qubits))\n right_ax.set_xticks([0, round(avg_read_err, 2), round(max_read_err, 2)])\n right_ax.set_yticklabels([str(kk) for kk in range(num_left, num_qubits)],\n fontsize=12)\n right_ax.invert_yaxis()\n right_ax.invert_xaxis()\n right_ax.yaxis.set_label_position(\"right\")\n right_ax.yaxis.tick_right()\n right_ax.set_title('Readout Error (%)', fontsize=12)\n else:\n right_ax.axis('off')\n\n for spine in right_ax.spines.values():\n spine.set_visible(False)\n\n if show_title:\n fig.suptitle('{name} Error Map'.format(name=backend.name()),\n fontsize=24, y=0.9)\n if get_backend() in ['module://ipykernel.pylab.backend_inline',\n 'nbAgg']:\n plt.close(fig)\n return fig\n" ]
[ [ "numpy.max", "matplotlib.get_backend", "numpy.asarray", "matplotlib.ticker.MaxNLocator", "matplotlib.patches.FancyArrow", "matplotlib.pyplot.close", "numpy.mean", "matplotlib.pyplot.figure", "matplotlib.pyplot.subplots", "matplotlib.patches.Ellipse", "matplotlib.pyplot.Line2D", "matplotlib.colorbar.ColorbarBase", "matplotlib.gridspec.GridSpec", "matplotlib.pyplot.subplot" ] ]
Secondtonumb/espnet
[ "f790a7695f07ac3d464d47ffbdef0306354ffd8f" ]
[ "src/asr/asr_pytorch.py" ]
[ "#!/usr/bin/env python\n\n# Copyright 2017 Johns Hopkins University (Shinji Watanabe)\n# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)\n\n\nimport copy\nimport json\nimport logging\nimport math\nimport os\nimport pickle\n\n# chainer related\nimport chainer\nfrom chainer import reporter as reporter_module\nfrom chainer import training\nfrom chainer.training import extensions\n\nimport torch\n\n# spnet related\nfrom asr_utils import adadelta_eps_decay\nfrom asr_utils import CompareValueTrigger\nfrom asr_utils import converter_kaldi\nfrom asr_utils import delete_feat\nfrom asr_utils import make_batchset\nfrom asr_utils import restore_snapshot\nfrom e2e_asr_attctc_th import E2E\nfrom e2e_asr_attctc_th import Loss\n\n# for kaldi io\nimport kaldi_io_py\nimport lazy_io\n\n# rnnlm\nimport lm_pytorch\n\n# numpy related\nimport matplotlib\nmatplotlib.use('Agg')\n\n\nclass PytorchSeqEvaluaterKaldi(extensions.Evaluator):\n '''Custom evaluater with Kaldi reader for pytorch'''\n\n def __init__(self, model, iterator, target, reader, device):\n super(PytorchSeqEvaluaterKaldi, self).__init__(\n iterator, target, device=device)\n self.reader = reader\n self.model = model\n\n # The core part of the update routine can be customized by overriding.\n def evaluate(self):\n iterator = self._iterators['main']\n\n if self.eval_hook:\n self.eval_hook(self)\n\n if hasattr(iterator, 'reset'):\n iterator.reset()\n it = iterator\n else:\n it = copy.copy(iterator)\n\n summary = reporter_module.DictSummary()\n\n for batch in it:\n observation = {}\n with reporter_module.report_scope(observation):\n # read scp files\n # x: original json with loaded features\n # will be converted to chainer variable later\n # batch only has one minibatch utterance, which is specified by batch[0]\n x = converter_kaldi(batch[0], self.reader)\n self.model.eval()\n self.model(x)\n delete_feat(x)\n\n summary.add(observation)\n\n return summary.compute_mean()\n\n\nclass PytorchSeqUpdaterKaldi(training.StandardUpdater):\n '''Custom updater with Kaldi reader for pytorch'''\n\n def __init__(self, model, grad_clip_threshold, train_iter, optimizer, reader, device):\n super(PytorchSeqUpdaterKaldi, self).__init__(\n train_iter, optimizer, device=None)\n self.model = model\n self.reader = reader\n self.grad_clip_threshold = grad_clip_threshold\n\n # The core part of the update routine can be customized by overriding.\n def update_core(self):\n # When we pass one iterator and optimizer to StandardUpdater.__init__,\n # they are automatically named 'main'.\n train_iter = self.get_iterator('main')\n optimizer = self.get_optimizer('main')\n\n # Get the next batch ( a list of json files)\n batch = train_iter.__next__()\n\n # read scp files\n # x: original json with loaded features\n # will be converted to chainer variable later\n # batch only has one minibatch utterance, which is specified by batch[0]\n x = converter_kaldi(batch[0], self.reader)\n\n # Compute the loss at this time step and accumulate it\n loss = self.model(x)\n optimizer.zero_grad() # Clear the parameter gradients\n loss.backward() # Backprop\n loss.detach() # Truncate the graph\n # compute the gradient norm to check if it is normal or not\n grad_norm = torch.nn.utils.clip_grad_norm(\n self.model.parameters(), self.grad_clip_threshold)\n logging.info('grad norm={}'.format(grad_norm))\n if math.isnan(grad_norm):\n logging.warning('grad norm is nan. Do not update model.')\n else:\n optimizer.step()\n delete_feat(x)\n\n\ndef train(args):\n '''Run training'''\n # seed setting\n torch.manual_seed(args.seed)\n\n # debug mode setting\n # 0 would be fastest, but 1 seems to be reasonable\n # by considering reproducability\n # revmoe type check\n if args.debugmode < 2:\n chainer.config.type_check = False\n logging.info('torch type check is disabled')\n # use determinisitic computation or not\n if args.debugmode < 1:\n torch.backends.cudnn.deterministic = False\n logging.info('torch cudnn deterministic is disabled')\n else:\n torch.backends.cudnn.deterministic = True\n\n # check cuda availability\n if not torch.cuda.is_available():\n logging.warning('cuda is not available')\n\n # get input and output dimension info\n with open(args.valid_label, 'rb') as f:\n valid_json = json.load(f)['utts']\n utts = list(valid_json.keys())\n idim = int(valid_json[utts[0]]['idim'])\n odim = int(valid_json[utts[0]]['odim'])\n logging.info('#input dims : ' + str(idim))\n logging.info('#output dims: ' + str(odim))\n\n # specify model architecture\n e2e = E2E(idim, odim, args)\n model = Loss(e2e, args.mtlalpha)\n\n # write model config\n if not os.path.exists(args.outdir):\n os.makedirs(args.outdir)\n model_conf = args.outdir + '/model.conf'\n with open(model_conf, 'wb') as f:\n logging.info('writing a model config file to' + model_conf)\n # TODO(watanabe) use others than pickle, possibly json, and save as a text\n pickle.dump((idim, odim, args), f)\n for key in sorted(vars(args).keys()):\n logging.info('ARGS: ' + key + ': ' + str(vars(args)[key]))\n\n # Set gpu\n if args.ngpu > 1:\n logging.warn(\"currently, pytorch does not support multi-gpu. use single gpu.\")\n if args.ngpu > 0:\n gpu_id = 0\n # Make a specified GPU current\n model.cuda(gpu_id) # Copy the model to the GPU\n else:\n gpu_id = -1\n\n # Setup an optimizer\n if args.opt == 'adadelta':\n optimizer = torch.optim.Adadelta(\n model.parameters(), rho=0.95, eps=args.eps)\n elif args.opt == 'adam':\n optimizer = torch.optim.Adam(model.parameters())\n\n # FIXME: TOO DIRTY HACK\n setattr(optimizer, \"target\", model.reporter)\n setattr(optimizer, \"serialize\", lambda s: model.reporter.serialize(s))\n\n # read json data\n with open(args.train_label, 'rb') as f:\n train_json = json.load(f)['utts']\n with open(args.valid_label, 'rb') as f:\n valid_json = json.load(f)['utts']\n\n # make minibatch list (variable length)\n train = make_batchset(train_json, args.batch_size,\n args.maxlen_in, args.maxlen_out, args.minibatches)\n valid = make_batchset(valid_json, args.batch_size,\n args.maxlen_in, args.maxlen_out, args.minibatches)\n # hack to make batchsze argument as 1\n # actual bathsize is included in a list\n train_iter = chainer.iterators.SerialIterator(train, 1)\n valid_iter = chainer.iterators.SerialIterator(\n valid, 1, repeat=False, shuffle=False)\n\n # prepare Kaldi reader\n train_reader = lazy_io.read_dict_scp(args.train_feat)\n valid_reader = lazy_io.read_dict_scp(args.valid_feat)\n\n # Set up a trainer\n updater = PytorchSeqUpdaterKaldi(\n model, args.grad_clip, train_iter, optimizer, train_reader, gpu_id)\n trainer = training.Trainer(\n updater, (args.epochs, 'epoch'), out=args.outdir)\n\n # Resume from a snapshot\n if args.resume:\n chainer.serializers.load_npz(args.resume, trainer)\n model = trainer.updater.model\n\n # Evaluate the model with the test dataset for each epoch\n trainer.extend(PytorchSeqEvaluaterKaldi(\n model, valid_iter, model.reporter, valid_reader, device=gpu_id))\n\n # Take a snapshot for each specified epoch\n trainer.extend(extensions.snapshot(), trigger=(1, 'epoch'))\n\n # Make a plot for training and validation values\n trainer.extend(extensions.PlotReport(['main/loss', 'validation/main/loss',\n 'main/loss_ctc', 'validation/main/loss_ctc',\n 'main/loss_att', 'validation/main/loss_att'],\n 'epoch', file_name='loss.png'))\n trainer.extend(extensions.PlotReport(['main/acc', 'validation/main/acc'],\n 'epoch', file_name='acc.png'))\n\n # Save best models\n def torch_save(path, _):\n torch.save(model.state_dict(), path)\n torch.save(model, path + \".pkl\")\n\n trainer.extend(extensions.snapshot_object(model, 'model.loss.best', savefun=torch_save),\n trigger=training.triggers.MinValueTrigger('validation/main/loss'))\n trainer.extend(extensions.snapshot_object(model, 'model.acc.best', savefun=torch_save),\n trigger=training.triggers.MaxValueTrigger('validation/main/acc'))\n\n # epsilon decay in the optimizer\n def torch_load(path, obj):\n model.load_state_dict(torch.load(path))\n return obj\n if args.opt == 'adadelta':\n if args.criterion == 'acc':\n trainer.extend(restore_snapshot(model, args.outdir + '/model.acc.best', load_fn=torch_load),\n trigger=CompareValueTrigger(\n 'validation/main/acc',\n lambda best_value, current_value: best_value > current_value))\n trainer.extend(adadelta_eps_decay(args.eps_decay),\n trigger=CompareValueTrigger(\n 'validation/main/acc',\n lambda best_value, current_value: best_value > current_value))\n elif args.criterion == 'loss':\n trainer.extend(restore_snapshot(model, args.outdir + '/model.loss.best', load_fn=torch_load),\n trigger=CompareValueTrigger(\n 'validation/main/loss',\n lambda best_value, current_value: best_value < current_value))\n trainer.extend(adadelta_eps_decay(args.eps_decay),\n trigger=CompareValueTrigger(\n 'validation/main/loss',\n lambda best_value, current_value: best_value < current_value))\n\n # Write a log of evaluation statistics for each epoch\n trainer.extend(extensions.LogReport(trigger=(100, 'iteration')))\n report_keys = ['epoch', 'iteration', 'main/loss', 'main/loss_ctc', 'main/loss_att',\n 'validation/main/loss', 'validation/main/loss_ctc', 'validation/main/loss_att',\n 'main/acc', 'validation/main/acc', 'elapsed_time']\n if args.opt == 'adadelta':\n trainer.extend(extensions.observe_value(\n 'eps', lambda trainer: trainer.updater.get_optimizer('main').param_groups[0][\"eps\"]),\n trigger=(100, 'iteration'))\n report_keys.append('eps')\n trainer.extend(extensions.PrintReport(\n report_keys), trigger=(100, 'iteration'))\n\n trainer.extend(extensions.ProgressBar())\n\n # Run the training\n trainer.run()\n\n\ndef recog(args):\n '''Run recognition'''\n # seed setting\n torch.manual_seed(args.seed)\n\n # read training config\n with open(args.model_conf, \"rb\") as f:\n logging.info('reading a model config file from' + args.model_conf)\n idim, odim, train_args = pickle.load(f)\n\n for key in sorted(vars(args).keys()):\n logging.info('ARGS: ' + key + ': ' + str(vars(args)[key]))\n\n # specify model architecture\n logging.info('reading model parameters from' + args.model)\n e2e = E2E(idim, odim, train_args)\n model = Loss(e2e, train_args.mtlalpha)\n\n def cpu_loader(storage, location):\n return storage\n model.load_state_dict(torch.load(args.model, map_location=cpu_loader))\n\n # read rnnlm\n if args.rnnlm:\n rnnlm = lm_pytorch.ClassifierWithState(\n lm_pytorch.RNNLM(len(train_args.char_list), 650))\n rnnlm.load_state_dict(torch.load(args.rnnlm, map_location=cpu_loader))\n else:\n rnnlm = None\n\n # prepare Kaldi reader\n reader = kaldi_io_py.read_mat_ark(args.recog_feat)\n\n # read json data\n with open(args.recog_label, 'rb') as f:\n recog_json = json.load(f)['utts']\n\n new_json = {}\n for name, feat in reader:\n if args.beam_size == 1:\n y_hat = e2e.recognize(feat, args, train_args.char_list, rnnlm=rnnlm)\n else:\n nbest_hyps = e2e.recognize(feat, args, train_args.char_list, rnnlm=rnnlm)\n # get 1best and remove sos\n y_hat = nbest_hyps[0]['yseq'][1:]\n\n y_true = map(int, recog_json[name]['tokenid'].split())\n\n # print out decoding result\n seq_hat = [train_args.char_list[int(idx)] for idx in y_hat]\n seq_true = [train_args.char_list[int(idx)] for idx in y_true]\n seq_hat_text = \"\".join(seq_hat).replace('<space>', ' ')\n seq_true_text = \"\".join(seq_true).replace('<space>', ' ')\n logging.info(\"groundtruth[%s]: \" + seq_true_text, name)\n logging.info(\"prediction [%s]: \" + seq_hat_text, name)\n\n # copy old json info\n new_json[name] = recog_json[name]\n\n # added recognition results to json\n logging.debug(\"dump token id\")\n # TODO(karita) make consistent to chainer as idx[0] not idx\n new_json[name]['rec_tokenid'] = \" \".join([str(idx) for idx in y_hat])\n logging.debug(\"dump token\")\n new_json[name]['rec_token'] = \" \".join(seq_hat)\n logging.debug(\"dump text\")\n new_json[name]['rec_text'] = seq_hat_text\n\n # add n-best recognition results with scores\n if args.beam_size > 1 and len(nbest_hyps) > 1:\n for i, hyp in enumerate(nbest_hyps):\n y_hat = hyp['yseq'][1:]\n seq_hat = [train_args.char_list[int(idx)] for idx in y_hat]\n seq_hat_text = \"\".join(seq_hat).replace('<space>', ' ')\n new_json[name]['rec_tokenid' + '[' + '{:05d}'.format(i) + ']'] = \" \".join([str(idx) for idx in y_hat])\n new_json[name]['rec_token' + '[' + '{:05d}'.format(i) + ']'] = \" \".join(seq_hat)\n new_json[name]['rec_text' + '[' + '{:05d}'.format(i) + ']'] = seq_hat_text\n new_json[name]['score' + '[' + '{:05d}'.format(i) + ']'] = hyp['score']\n\n # TODO(watanabe) fix character coding problems when saving it\n with open(args.result_label, 'wb') as f:\n f.write(json.dumps({'utts': new_json}, indent=4, sort_keys=True).encode('utf_8'))\n" ]
[ [ "matplotlib.use", "torch.save", "torch.manual_seed", "torch.cuda.is_available", "torch.load" ] ]
aiyo-labs/antispoofing-cnn
[ "b52cf30658681f93c909d1925978555cffda8da1" ]
[ "graphs/models/antispoofing.py" ]
[ "\"\"\"\nAntiSpoofing Depth model\n\"\"\"\n\nfrom __future__ import print_function\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n#from ..weights_initializer import weights_init\nfrom torchsummary import summary\n\nclass AntiSpoofing(nn.Module):\n def __init__(self,resolution_inp = 256, resolution_op = 256, channel = 6):\n super(AntiSpoofing,self).__init__()\n\n self.resolution_inp = resolution_inp\n self.resolution_op = resolution_op\n self.channel = channel\n # define layers\n self.zeropad2d = nn.ZeroPad2d((1,1,1,1))\n self.elu = nn.ELU()\n self.conv1 = nn.Conv2d(in_channels=6, out_channels=64, kernel_size=3, stride=1,bias=False)\n self.bn1 = nn.BatchNorm2d(num_features=64)\n self.conv2 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=1, bias=False)\n self.bn2 = nn.BatchNorm2d(num_features=128)\n self.conv3 = nn.Conv2d(in_channels=128, out_channels=196, kernel_size=3, stride=1, bias=False)\n self.bn3 = nn.BatchNorm2d(num_features=196)\n self.conv4 = nn.Conv2d(in_channels=196, out_channels=128, kernel_size=3, stride=1, bias=False)\n self.bn4 = nn.BatchNorm2d(num_features=128)\n self.pool1 = nn.MaxPool2d(kernel_size=2,stride=2)\n\n self.conv5 = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, bias=False)\n self.bn5 = nn.BatchNorm2d(num_features=128)\n self.conv6 = nn.Conv2d(in_channels=128, out_channels=196, kernel_size=3, stride=1, bias=False)\n self.bn6 = nn.BatchNorm2d(num_features=196)\n self.conv7 = nn.Conv2d(in_channels=196, out_channels=128, kernel_size=3, stride=1, bias=False)\n self.bn7 = nn.BatchNorm2d(num_features=128)\n self.pool2 = nn.MaxPool2d(kernel_size=2,stride=2)\n\n self.conv8 = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, bias=False)\n self.bn8 = nn.BatchNorm2d(num_features=128)\n self.conv9 = nn.Conv2d(in_channels=128, out_channels=196, kernel_size=3, stride=1, bias=False)\n self.bn9 = nn.BatchNorm2d(num_features=196)\n self.conv10 = nn.Conv2d(in_channels=196, out_channels=128, kernel_size=3, stride=1, bias=False)\n self.bn10 = nn.BatchNorm2d(num_features=128)\n self.pool3 = nn.MaxPool2d(kernel_size=2,stride=2)\n\n\n self.conv11 = nn.Conv2d(in_channels=384, out_channels=128, kernel_size=3, stride=1, bias=False)\n self.bn11 = nn.BatchNorm2d(num_features=128)\n self.conv12 = nn.Conv2d(in_channels=128, out_channels=3, kernel_size=3, stride=1, bias=False)\n self.bn12 = nn.BatchNorm2d(num_features=3)\n self.conv13 = nn.Conv2d(in_channels=3, out_channels=1, kernel_size=3, stride=1, bias=False)\n self.bn13 = nn.BatchNorm2d(num_features=1)\n\n\n\n def forward(self, x):\n \n x = self.bn1(self.elu(self.conv1(self.zeropad2d(x))))\n x = self.bn2(self.elu(self.conv2(self.zeropad2d(x))))\n x = self.bn3(self.elu(self.conv3(self.zeropad2d(x))))\n x = self.bn4(self.elu(self.conv4(self.zeropad2d(x))))\n x = self.pool1(x)\n resize1 = F.interpolate(x,size=32)\n\n x = self.bn5(self.elu(self.conv5(self.zeropad2d(x))))\n x = self.bn6(self.elu(self.conv6(self.zeropad2d(x))))\n x = self.bn7(self.elu(self.conv7(self.zeropad2d(x))))\n x = self.pool2(x)\n resize2 = F.interpolate(x,size=32)\n\n x = self.bn8(self.elu(self.conv8(self.zeropad2d(x))))\n x = self.bn9(self.elu(self.conv9(self.zeropad2d(x))))\n x = self.bn10(self.elu(self.conv10(self.zeropad2d(x))))\n x = self.pool3(x)\n \n x = torch.cat((resize1,resize2,x),1)\n\n x = self.bn11(self.elu(self.conv11(self.zeropad2d(x))))\n x = self.bn12(self.elu(self.conv12(self.zeropad2d(x))))\n x = self.bn13(self.elu(self.conv13(self.zeropad2d(x))))\n\n x = torch.sigmoid(x)" ]
[ [ "torch.sigmoid", "torch.cat", "torch.nn.MaxPool2d", "torch.nn.BatchNorm2d", "torch.nn.functional.interpolate", "torch.nn.Conv2d", "torch.nn.ZeroPad2d", "torch.nn.ELU" ] ]
tair-ai/deephtc
[ "ec11f88bf1af3583ecd9e351790e766f1f18b56f" ]
[ "HTC_lib/Error_checker.py" ]
[ "\n# coding: utf-8\n\n# # created on Feb 18 2018\n\n# In[1]:\n\n\nimport os, time, shutil\nimport re\nimport subprocess\n\nfrom pymatgen.io.vasp.outputs import Oszicar, Vasprun\nfrom pymatgen import Structure\n\nimport numpy as np\n\nfrom Query_from_OUTCAR import find_incar_tag_from_OUTCAR\nfrom Utilities import get_time_str, search_file, decorated_os_rename\nfrom Write_VASP_INCAR import modify_vasp_incar, get_current_firework_from_cal_loc, get_bader_charge_tags\n\n\n# In[2]:\n\n\ndef Vasp_Error_checker(error_type, cal_loc, workflow): \n \"\"\"\n Input error_type and return the instance of the associated error checker class which havs two methods, i.e. check, correct\n input arguments:\n - error_type (str or list of length 1): \n - str: error_type is an error type, and an instance of the associated error checker will be returned.\n - list of length 1: the only entry is a string, \"on_the_fly\" or \"after_cal\"\n - on_the_fly: all on-the-fly error checkers will be called one by one to check errors.\n If found, return True; Otherwise return False.\n - after_cal: all error checkers will be called one by one to check errors.\n If found, return True; Otherwise return False.\n - cal_loc: the location of the calculation.\n - workflow: the output of func Parse_calculation_workflow.parse_calculation_workflow\n \"\"\"\n error_checker_dict = {\"__unfinished_OUTCAR__\": OUTCAR_status, \n \"__electronic_divergence__\": Electronic_divergence, \n \"__ionic_divergence__\": Ionic_divergence, \n \"__pricel__\":Vasp_out_pricel, \n \"__posmap__\": Vasp_out_posmap,\n \"__positive_energy__\": Positive_energy, \n \"__bad_termination__\": Vasp_out_bad_termination, \n \"__zbrent__\":Vasp_out_zbrent, \n \"__invgrp__\": Vasp_out_invgrp, \n \"__too_few_bands__\": Vasp_out_too_few_bands, \n \"__too_few_kpoints__\":Vasp_out_too_few_kpoints, \n \"__rhosyg__\":Vasp_out_rhosyg, \n \"__edddav__\":Vasp_out_edddav, \n \"__zpotrf__\": Vasp_out_zpotrf, \n \"__real_optlay__\": Vasp_out_real_optlay, \n \"__bader_charge__\": Bader_Charge, \n \"__pzunmtr_or_pzstein__\": Vasp_out_pzunmtr_or_pzstein}\n \n on_the_fly = [\"__too_few_bands__\", \"__electronic_divergence__\", \"__bader_charge__\"]\n after_cal = on_the_fly + [\"__pricel__\", \"__posmap__\", \"__bad_termination__\", \"__zbrent__\", \"__invgrp__\"]\n after_cal += [\"__too_few_kpoints__\", \"__rhosyg__\", \"__edddav__\", \"__zpotrf__\", \"__real_optlay__\"]\n after_cal += [\"__pzunmtr_or_pzstein__\"]\n after_cal += [\"__positive_energy__\", \"__ionic_divergence__\", \"__unfinished_OUTCAR__\"]\n \n if isinstance(error_type, str): \n if error_type in error_checker_dict:\n return error_checker_dict[error_type](cal_loc=cal_loc, workflow=workflow)\n else:\n return Null_error_checker(cal_loc=cal_loc, workflow=workflow)\n elif isinstance(error_type, list):\n if error_type[0] == \"on_the_fly\":\n error_type_list = on_the_fly\n elif error_type[0] == \"after_cal\":\n error_type_list = after_cal\n else:\n raise Exception(\"The argument error_type of func Vasp_Error_checker must be a str or a list consisting of a str\")\n if error_type[0] == \"on_the_fly\":\n for error_checker in on_the_fly:\n if not error_checker_dict[error_checker](cal_loc=cal_loc, workflow=workflow).check():\n return False\n return True\n \n for error in error_type_list:\n if not error_checker_dict[error](cal_loc=cal_loc, workflow=workflow).check():\n return False\n return True\n\n\n# In[3]:\n\n\nclass Write_and_read_error_tag(object):\n \"\"\"\n Write or read error tag from file __error__ under folder cal_loc.\n input argument:\n -cal_loc (str): the location of the to-be-checked calculation\n \"\"\"\n \n def __init__(self, cal_loc):\n self.cal_loc = cal_loc\n \n def write_error_tag(self, error_tag, file=\"__error__\"):\n with open(os.path.join(self.cal_loc, file), \"w\") as f:\n f.write(error_tag)\n \n def read_error_tag(self, file=\"__killed__\"):\n \"\"\"\n input argument:\n -file (str): the file from which the error_tag is read.\n \"\"\"\n with open(os.path.join(self.cal_loc, file), \"r\") as f:\n error_tag = f.read().strip()\n return error_tag\n\n\n# In[4]:\n\n\nclass Queue_std_files():\n \"\"\"\n Check if the queue stdout and stderr file exist, which have certain suffixes or prefixes as defined in workflow.\n The presence of the two files indicate that the calculation under cal_loc has completed either successfully or unsuccessfully.\n input arguments:\n -cal_loc: the location of the to-be-checked calculation\n - workflow: the output of func Parse_calculation_workflow.parse_calculation_workflow in which either the suffixes \n or prefixes are pointed out for the queue stdout and stderr files.\n Two methods are provided, either of which requires any additional input parameters.\n -find_std_files:\n - If found, return [stdout_filename, stderr_filename]\n - If not found, return [None, None]\n Note that func Utilities.search_file will be called to search for the file with the given prefix or suffix.\n If more than one files are found, it will raise an Exception.\n -remove_std_files:\n If stdout and stderr files are present under cal_loc, remove them.\n Note that the find_std_files is called in __init__, thereby providing two data, i.e. stdout_file, stderr_file\n \"\"\"\n \n def __init__(self, cal_loc, workflow):\n self.cal_loc = cal_loc\n self.workflow = workflow\n self.stdout_file, self.stderr_file = self.find_std_files()\n \n def find_std_files(self):\n stdout_prefix, stdout_suffix = self.workflow[0][\"queue_stdout_file_prefix\"], self.workflow[0][\"queue_stdout_file_suffix\"]\n stderr_prefix, stderr_suffix = self.workflow[0][\"queue_stderr_file_prefix\"], self.workflow[0][\"queue_stderr_file_suffix\"]\n \n stdout_file = search_file(cal_loc=self.cal_loc, prefix=stdout_prefix, suffix=stdout_suffix)\n stderr_file = search_file(cal_loc=self.cal_loc, prefix=stderr_prefix, suffix=stderr_suffix)\n \n return [stdout_file, stderr_file]\n \n def remove_std_files(self):\n if self.stdout_file != None:\n os.remove(os.path.join(self.cal_loc, self.stdout_file))\n if self.stderr_file != None:\n os.remove(os.path.join(self.cal_loc, self.stderr_file))\n\n\n# def file_existence_decorator(filename, true=True):\n# def Func_wrapper(func):\n# def func_wrapper(*args):\n# file_loc = args[0].cal_loc\n# if os.path.isfile(os.path.join(file_loc, filename)):\n# return func(*args)\n# else:\n# if true:\n# return true_func(*args) #<---decorate method check of Check_xxx classes below.\n# else:\n# return false_func(*args) #<--- decorate method correct of Check_xxx classes below.\n# \n# return func_wrapper\n# \n# def true_func(*args):\n# return True\n# \n# def false_func(*args):\n# return False\n# \n# return Func_wrapper\n\n# In[5]:\n\n\ndef find_target_str(cal_loc, target_file, target_str):\n \"\"\"\n input arguments:\n -cal_loc (str): the location of the calculation.\n -target_file (str): the filename of the target file under cal_loc\n -target_str (str)\n output:\n - If target_str is found, return True.\n - If target_str is not found, return False\n Note that if the target_file is not existent, return False\n \"\"\"\n found_target_str = False\n if os.path.isfile(os.path.join(cal_loc, target_file)):\n with open(os.path.join(cal_loc, target_file), \"r\") as f:\n for line in f:\n if target_str in line:\n found_target_str = True\n break\n return found_target_str\n\n\n# In[6]:\n\n\nclass Vasp_Error_Saver(object):\n \"\"\"\n Backup INCAR, POSCAR, KPOINTS, OUTCAR, XDATCAR, vasp.out and queue stdout & stderr so as to facilitate the manual error repair.\n input arguments:\n -cal_loc: the location of the to-be-checked calculation\n -workflow: the output of func Parse_calculation_workflow.parse_calculation_workflow.\n methods:\n - backup: create a sub-sub-folder under sub-folder error_folder under cal_loc and save aforementioned VASP files.\n - find_error_times: return the number of times that errors have been detected for the cal under cal_loc\n \"\"\"\n def __init__(self, cal_loc, workflow):\n self.workflow = workflow\n self.cal_loc = cal_loc\n self.log_txt_loc, self.firework_name = os.path.split(cal_loc)\n self.log_txt = os.path.join(self.log_txt_loc, \"log.txt\")\n self.error_folder = os.path.join(self.cal_loc, \"error_folder\")\n \n def backup(self):\n if not os.path.isdir(self.error_folder):\n os.mkdir(self.error_folder)\n with open(self.log_txt, \"a\") as f:\n f.write(\"{} Backup: Create error_folder under {}\\n\".format(get_time_str(), self.firework_name))\n \n file_list = [\"INCAR\", \"POSCAR\", \"KPOINTS\", \"XDATCAR\", \"OUTCAR\", \"OSZICAR\", self.workflow[0][\"vasp.out\"], \"__killed__\"]\n stdout, stderr = Queue_std_files(cal_loc=self.cal_loc, workflow=self.workflow).find_std_files()\n for std_file in [stdout, stderr]:\n if std_file:\n file_list.append(std_file)\n \n sub_error_folder_name = self.find_next_sub_error_folder_name()\n new_sub_error_folder = os.path.join(self.error_folder, sub_error_folder_name)\n if not os.path.isdir(new_sub_error_folder):\n os.mkdir(new_sub_error_folder)\n non_existent_file_list = []\n for file in file_list:\n src_file = os.path.join(self.cal_loc, file)\n dst_file = os.path.join(new_sub_error_folder, file)\n if os.path.isfile(src_file):\n shutil.copyfile(src=src_file, dst=dst_file)\n else:\n non_existent_file_list.append(file)\n \n with open(self.log_txt, \"a\") as f:\n f.write(\"{} Backup: at {}\\n\".format(get_time_str(), self.firework_name))\n f.write(\"\\t\\t\\tcopy files below to {}:\\n\".format(os.path.join(\"error_folder\", sub_error_folder_name)))\n f.write(\"\\t\\t\\t\\t\")\n [f.write(\"{}\\t\".format(file_)) for file_ in file_list]\n f.write(\"\\n\")\n \n for file in non_existent_file_list:\n f.write(\"\\t\\t\\tno {} to backup\\n\".format(file))\n \n \n def find_error_times(self):\n if not os.path.isdir(self.error_folder):\n return 0\n else:\n return len(os.listdir(self.error_folder))\n \n def find_next_sub_error_folder_name(self):\n error_times = self.find_error_times()\n if error_times == 0:\n return \"error_1\"\n else:\n latest_sub_error_folder = os.path.join(self.error_folder, \"error_\"+str(error_times))\n latest_std = Queue_std_files(cal_loc=latest_sub_error_folder, workflow=self.workflow).find_std_files()\n curr_std = Queue_std_files(cal_loc=self.cal_loc, workflow=self.workflow).find_std_files()\n if latest_std == curr_std:\n return \"error_\"+str(error_times)\n else:\n return \"error_\"+str(error_times+1)\n\n\n# In[7]:\n\n\nclass Vasp_Error_Checker_Logger(Write_and_read_error_tag):\n \"\"\"\n This class provides two methods:\n -write_error_log: writes down the error information into log.txt for a material and\n changes file __running__ to file __error__, and writes down the error type into file __error__.\n -write_correction_log: write the correction info into log.txt\n input arguments:\n -cal_loc: the location of the to-be-checked calculation\n -workflow: the output of func Parse_calculation_workflow.parse_calculation_workflow.\n write_error_log method:\n input arguments:\n -target_error_str (list or str): an error string or a list of error strings.\n -error_type (str): the error type that will be written into file __error__\n \"\"\"\n \n def __init__(self, cal_loc, workflow):\n self.cal_loc = cal_loc\n self.workflow = workflow\n self.lot_txt_loc, self.firework_name = os.path.split(cal_loc)\n self.log_txt = os.path.join(self.log_txt, \"log.txt\")\n \n def write_error_log(self, target_error_str, error_type):\n error_type = error_type.strip()\n if isinstance(target_error_str, str):\n target_error_str_list = [target_error_str]\n elif isinstance(target_error_str, list):\n target_error_str_list = target_error_str\n else:\n raise Exception(\"target_error_str for Vasp_Error_Checker_Logger.write_error_log must be a string or a list of strings.\")\n \n with open(self.log_txt, \"a\") as f:\n f.write(\"{} Error: {}\\n\".format(get_time_str(), self.firework_name))\n for error_str in target_error_str_list:\n f.write(\"\\t\\t{}\\n\".format(error_str))\n decorated_os_rename(loc=self.cal_loc, old_filename=\"__running__\", new_filename=\"__error__\")\n #os.rename(os.path.join(self.cal_loc, \"__running__\"), os.path.join(self.cal_loc, \"__error__\"))\n f.write(\"\\t\\t\\t__running__ --> __error__\\n\")\n f.write(\"\\t\\t\\t write {} into __error__\\n\".format(error_type))\n super(Vasp_Error_Checker_Logger, self).write_error_tag(error_type)\n \n def write_file_absence_log(self, filename_list = [], initial_signal_file=\"\", final_signal_file=\"\"):\n \"\"\"\n Write the log for the absence of files listed in the input filename_list and the name change from initial_signal_file \n to final_signal_file if provided.\n input arguments:\n filename_list (list of str): a list of filenames. Default: empty list\n initial_signal_file (str): Default: \"\"\n final_signal_file (str): Default: \"\"\n Note that the log for the name change of the signal file will be written only when filename_list is not empty. \n initial_signal_file and final_signal_file must be provided at the same time.\n \"\"\"\n if filename_list:\n with open(self.log_txt, \"a\") as f:\n f.write(\"{} File Missing: {}\\n\".format(get_time_str(), self.firework_name))\n f.write(\"\\t\\t\\tThe file(s) listed below is(are) missing.\\n\\t\\t\\t\\t\")\n for filename in filename_list:\n f.write(\"{}\\t\".format(filename))\n f.write(\"\\n\")\n if initial_signal_file != \"\" and final_signal_file != \"\":\n f.write(\"\\t\\t\\tchange the signal file name:\\n\")\n f.write(\"\\t\\t\\t\\t{} --> {}\\n\".format(initial_signal_file, final_signal_file))\n \n \n \n \n def write_correction_log(self, new_incar_tags={}, comment_incar_tags=[], remove_incar_tags=[], new_filenames={}, remove_files=[]):\n \"\"\"\n write the correction log\n input arguments:\n new_incar_tags (dict): key-INCAR tags, value-corresponding values. Default: empty dictionary\n comment_incar_tags (list): a list of INCAR tags. Default: empty list\n remove_incar_tags (list): a list of INCAR tags. Default: empty list\n new_filenames (dict): key-old filename, value-new filename. Default: empty dictionary\n remove_files (list): file list that will be removed\n \"\"\"\n with open(self.log_txt, \"a\") as f:\n f.write(\"{} Correction: {}\\n\".format(get_time_str(), self.firework_name))\n if new_incar_tags:\n f.write(\"\\t\\tnew incar tags:\\n\")\n for tag, value in new_incar_tags.items():\n f.write(\"\\t\\t\\t{} = {}\\n\".format(tag, value))\n if comment_incar_tags:\n f.write(\"\\t\\t\\comment incar tags:\\n\\t\\t\\t\")\n for tag in comment_incar_tags:\n f.write(\"{}\\t\".format(tag))\n f.write(\"\\n\")\n if remove_incar_tags:\n f.write(\"\\t\\tremove incar tags:\\n\\t\\t\\t\")\n for tag in remove_incar_tags:\n f.write(\"{}\\t\".format(tag))\n f.write(\"\\n\")\n if new_filenames:\n f.write(\"\\t\\trename files:\\n\")\n for old_name, new_name in new_filenames.items():\n f.write(\"\\t\\t\\t{} --> {}\\n\".format(old_name, new_name))\n if remove_files:\n f.write(\"t\\t\\tremove files below:\\n\")\n for file in remove_files:\n if os.path.isfile(os.path.join(self.cal_loc, file)):\n os.remove(os.path.join(self.cal_loc, file))\n f.write(\"\\t\\t\\t{}\\n\".format(file))\n else:\n f.write(\"\\t\\t\\t{} isn't present --> no need to remove\\n\".format(file))\n\n\n# # For all error checkers, the check method will return False if an error is found. Otherwise return True\n\n# In[8]:\n\n\nclass OUTCAR_status(Vasp_Error_Checker_Logger):\n \"\"\"\n Error chekcing type: after the calculation.\n If the calculation successfully completes, \"General timing and accounting informations for this job:\" will \n be found at the end of OUTCAR\n inherit methods write_error_tag and read_error_tag from class Write_and_read_error__.\n input arguments:\n -cal_loc: the location of the to-be-checked calculation\n -workflow: the output of func Parse_calculation_workflow.parse_calculation_workflow.\n check method: return True, if found; return False and write error logs otherwise.\n \"\"\"\n def __init__(self, cal_loc, workflow):\n self.workflow = workflow\n self.cal_loc = cal_loc\n self.log_txt_loc, self.firework_name = os.path.split(cal_loc)\n self.log_txt = os.path.join(self.log_txt_loc, \"log.txt\")\n self.target_str = \"General timing and accounting informations for this job:\"\n self.target_file = \"OUTCAR\"\n \n def check(self):\n \n #This if statement deactivates the check method until the calculation is done.\n if Queue_std_files(cal_loc=self.cal_loc, workflow=self.workflow) == [None, None]:\n return True\n \n #Since the job is done, OUTCAR must exist.\n if not os.path.isfile(os.path.join(self.cal_loc, self.target_file)):\n decorated_os_rename(loc=self.cal_loc, old_filename=\"__running__\", new_filename=\"__error__\")\n #os.rename(os.path.join(self.cal_loc, \"__running__\"), os.path.join(self.cal_loc, \"__error__\"))\n super(OUTCAR_status, self).write_file_absence_log(filename_list = [self.target_file], \n initial_signal_file=\"__running__\", \n final_signal_file=\"__error__\")\n return False\n \n if find_target_str(cal_loc=self.cal_loc, target_file=self.target_file, target_str=self.target_str):\n return True\n else:\n self.write_error_log()\n return False\n \n \n def write_error_log(self):\n target_str_list = [\"\\t\\tcannot find the critical line in OUTCAR, which indicates the job successfully finished:\"]\n target_str_list.append(self.target_str)\n super(OUTCAR_status, self).write_error_log(target_error_str=target_str_list, error_type=\"__unfinished_OUTCAR__\")\n \n def correct(self):\n return False\n \n\n\n# In[9]:\n\n\nclass Vasp_out_pricel(Vasp_Error_Checker_Logger, Vasp_Error_Saver):\n \"\"\"\n Error checking type: after the calculation.\n Target file: vasp.out or the one specified by tag vasp.out\n Target error string: \"internal error in subroutine PRICEL\"\n inherit methods write_error_tag and read_error_tag from class Write_and_read_error__.\n input arguments:\n -cal_loc: the location of the to-be-checked calculation\n -workflow: the output of func Parse_calculation_workflow.parse_calculation_workflow.\n check method: return True, if not found; return False and write error logs otherwise.\n \"\"\"\n def __init__(self, cal_loc, workflow):\n Vasp_Error_Saver.__init__(self, cal_loc=cal_loc, workflow=workflow)\n \n self.workflow = workflow\n self.cal_loc = cal_loc\n self.log_txt_loc, self.firework_name = os.path.split(cal_loc)\n self.log_txt = os.path.join(self.log_txt_loc, \"log.txt\")\n self.target_file = self.workflow[0][\"vasp.out\"]\n self.target_str = \"internal error in subroutine PRICEL\"\n \n \n def check(self):\n \n #This method will be active only when the job is done.\n if Queue_std_files(cal_loc=self.cal_loc, workflow=self.workflow).find_std_files() == [None, None]:\n return True\n \n #Since the job is done, vasp.out must exist\n if not os.path.isfile(os.path.join(self.cal_loc, self.target_file)):\n decorated_os_rename(loc=self.cal_loc, old_filename=\"__running__\", new_filename=\"__error__\")\n #os.rename(os.path.join(self.cal_loc, \"__running__\"), os.path.join(self.cal_loc, \"__error__\"))\n super(Vasp_out_pricel, self).write_file_absence_log(filename_list = [self.target_file], \n initial_signal_file=\"__running__\", \n final_signal_file=\"__error__\")\n return False\n \n \n if find_target_str(cal_loc=self.cal_loc, target_file=self.target_file, target_str=self.target_str):\n self.write_error_log()\n return False\n else:\n return True\n\n \n def write_error_log(self):\n super(Vasp_out_pricel, self).write_error_log(target_error_str=self.target_str, error_type=\"__pricel__\")\n \n def correct(self):\n \"\"\"\n This correction is borrowed from custodian and modified.\n https://materialsproject.github.io/custodian/_modules/custodian/vasp/handlers.html#VaspErrorHandler.correct\n \"\"\"\n incar_dict = modify_vasp_incar(cal_loc=self.cal_loc)\n SYMPREC = float(incar_dict.get(\"SYMPREC\", 1.0e-5))\n ISYM = int(incar_dict.get(\"ISYM\", 2))\n \n if ISYM != 0 or SYMPREC > 1.1e-9:\n super(Vasp_out_pricel, self).backup()\n modify_vasp_incar(cal_loc=self.cal_loc, new_tags={\"SYMPREC\": 1e-8, \"ISYM\": 0}, rename_old_incar=False)\n super(Vasp_out_pricel, self).write_correction_log(new_incar_tags={\"SYMPREC\": 1e-8, \"ISYM\": 0})\n return True\n else:\n return False\n\n\n\n# In[10]:\n\n\nclass Vasp_out_too_few_bands(Vasp_Error_Checker_Logger, Vasp_Error_Saver):\n \"\"\"\n Error checking type: on the fly.\n Target file: vasp.out or the one specified by tag vasp.out\n Target error string: \"TOO FEW BANDS\"\n inherit methods write_error_tag and read_error_tag from class Write_and_read_error__.\n input arguments:\n -cal_loc: the location of the to-be-checked calculation\n -workflow: the output of func Parse_calculation_workflow.parse_calculation_workflow.\n check method: return True, if not found; return False and write error logs otherwise.\n \"\"\"\n def __init__(self, cal_loc, workflow):\n Vasp_Error_Saver.__init__(self, cal_loc=cal_loc, workflow=workflow)\n \n self.workflow = workflow\n self.cal_loc = cal_loc\n self.log_txt_loc, self.firework_name = os.path.split(cal_loc)\n self.log_txt = os.path.join(self.log_txt_loc, \"log.txt\")\n self.target_file = self.workflow[0][\"vasp.out\"]\n self.target_str = \"TOO FEW BANDS\"\n \n \n def check(self):\n \n #This method will be active only when the job is done.\n #if Queue_std_files(cal_loc=self.cal_loc, workflow=self.workflow).find_std_files() == [None, None]:\n #return True\n if not os.path.isfile(os.path.join(self.cal_loc, self.workflow[0][\"vasp.out\"])):\n return True\n \n ##Since the job is done, vasp.out must exist\n #if not os.path.isfile(os.path.join(self.cal_loc, self.target_file)):\n # decorated_os_rename(loc=self.cal_loc, old_filename=\"__running__\", new_filename=\"__error__\")\n # #os.rename(os.path.join(self.cal_loc, \"__running__\"), os.path.join(self.cal_loc, \"__error__\"))\n # super(Vasp_out_too_few_bands, self).write_file_absence_log(filename_list = [self.target_file], \n # initial_signal_file=\"__running__\", \n # final_signal_file=\"__error__\")\n # return False\n \n if find_target_str(cal_loc=self.cal_loc, target_file=self.target_file, target_str=self.target_str):\n self.write_error_log()\n return False\n else:\n return True\n \n \n def write_error_log(self):\n super(Vasp_out_too_few_bands, self).write_error_log(target_error_str=self.target_str, error_type=\"__too_few_bands__\")\n \n def correct(self):\n \"\"\"\n This correction is borrowed from custodian and modified.\n https://materialsproject.github.io/custodian/_modules/custodian/vasp/handlers.html#VaspErrorHandler.correct\n \"\"\"\n if not os.path.isfile(os.path.join(self.cal_loc, \"OUTCAR\")):\n open(os.path.join(self.cal_loc, \"__cannot_find_OUTCAR_for_correction__\"), \"w\").close()\n super(Vasp_out_too_few_bands, self).write_file_absence_log(filename_list = [\"OUTCAR\"])\n return False\n \n NBANDS = find_incar_tag_from_OUTCAR(cal_loc=self.cal_loc, tag=\"NBANDS\")\n NBANDS_ = int(NBANDS*1.1)\n \n super(Vasp_out_too_few_bands, self).backup()\n modify_vasp_incar(cal_loc=self.cal_loc, new_tags={\"NBANDS\": NBANDS_}, rename_old_incar=False)\n super(Vasp_out_too_few_bands, self).write_correction_log(new_incar_tags={\"NBANDS\": NBANDS_})\n return True\n\n\n\n# In[11]:\n\n\nclass Vasp_out_too_few_kpoints(Vasp_Error_Checker_Logger, Vasp_Error_Saver):\n \"\"\"\n Error checking type: after the calculation.\n Target file: vasp.out or the one specified by tag vasp.out\n Target error string: \"Tetrahedron method fails for NKPT<4.\"\n inherit methods write_error_tag and read_error_tag from class Write_and_read_error__.\n input arguments:\n -cal_loc: the location of the to-be-checked calculation\n -workflow: the output of func Parse_calculation_workflow.parse_calculation_workflow.\n check method: return True, if not found; return False and write error logs otherwise.\n \"\"\"\n def __init__(self, cal_loc, workflow):\n Vasp_Error_Saver.__init__(self, cal_loc=cal_loc, workflow=workflow)\n \n self.workflow = workflow\n self.cal_loc = cal_loc\n self.log_txt_loc, self.firework_name = os.path.split(cal_loc)\n self.log_txt = os.path.join(self.log_txt_loc, \"log.txt\")\n self.target_file = self.workflow[0][\"vasp.out\"]\n self.target_str = \"Tetrahedron method fails for NKPT<4.\"\n \n \n def check(self):\n \n #This method will be active only when the job is done.\n if Queue_std_files(cal_loc=self.cal_loc, workflow=self.workflow).find_std_files() == [None, None]:\n return True\n \n #Since the job is done, vasp.out must exist\n if not os.path.isfile(os.path.join(self.cal_loc, self.target_file)):\n decorated_os_rename(loc=self.cal_loc, old_filename=\"__running__\", new_filename=\"__error__\")\n #os.rename(os.path.join(self.cal_loc, \"__running__\"), os.path.join(self.cal_loc, \"__error__\"))\n super(Vasp_out_too_few_kpoints, self).write_file_absence_log(filename_list = [self.target_file], \n initial_signal_file=\"__running__\", \n final_signal_file=\"__error__\")\n return False\n \n if find_target_str(cal_loc=self.cal_loc, target_file=self.target_file, target_str=self.target_str):\n self.write_error_log()\n return False\n else:\n return True\n \n \n def write_error_log(self):\n super(Vasp_out_too_few_kpoints, self).write_error_log(target_error_str=self.target_str, error_type=\"__too_few_kpoints__\")\n \n def correct(self):\n incar_dict = modify_vasp_incar(cal_loc=self.cal_loc)\n ISMEAR = int(incar_dict.get(\"ISMEAR\", 1))\n SIGMA = float(incar_dict.get(\"SIGMA\", 0.2))\n \n if ISMEAR == -5: \n super(Vasp_out_too_few_kpoints, self).backup()\n modify_vasp_incar(cal_loc=self.cal_loc, new_tags={\"ISMEAR\": 0, \"SIGMA\":0.05}, rename_old_incar=False)\n super(Vasp_out_too_few_kpoints, self).write_correction_log(new_incar_tags={\"ISMEAR\": 0, \"SIGMA\":0.05})\n return True\n else:\n return False\n\n\n\n# In[12]:\n\n\nclass Vasp_out_posmap(Vasp_Error_Checker_Logger, Vasp_Error_Saver):\n \"\"\"\n Error checking type: after the calculation.\n Target file: vasp.out or the one specified by tag vasp.out\n Target error string: \"POSMAP internal error: symmetry equivalent atom not found\"\n inherit methods write_error_tag and read_error_tag from class Write_and_read_error__.\n input arguments:\n -cal_loc: the location of the to-be-checked calculation\n -workflow: the output of func Parse_calculation_workflow.parse_calculation_workflow.\n check method: return True, if not found; return False and write error logs otherwise.\n \"\"\"\n def __init__(self, cal_loc, workflow):\n Vasp_Error_Saver.__init__(self, cal_loc=cal_loc, workflow=workflow)\n \n self.workflow = workflow\n self.cal_loc = cal_loc\n self.log_txt_loc, self.firework_name = os.path.split(cal_loc)\n self.log_txt = os.path.join(self.log_txt_loc, \"log.txt\")\n self.target_file = self.workflow[0][\"vasp.out\"]\n #super(Vasp_out_posmap, self).__init__(cal_loc, workflow)\n self.target_str = \"POSMAP internal error: symmetry equivalent atom not found\"\n \n \n \n def check(self):\n #This method is deactive until the job is done\n if Queue_std_files(cal_loc=self.cal_loc, workflow=self.workflow).find_std_files() == [None, None]:\n return True\n \n #Since the job is done, vasp.out must exist\n if not os.path.isfile(os.path.join(self.cal_loc, self.target_file)):\n decorated_os_rename(loc=self.cal_loc, old_filename=\"__running__\", new_filename=\"__error__\")\n #os.rename(os.path.join(self.cal_loc, \"__running__\"), os.path.join(self.cal_loc, \"__error__\"))\n super(Vasp_out_posmaps, self).write_file_absence_log(filename_list = [self.target_file], \n initial_signal_file=\"__running__\", \n final_signal_file=\"__error__\")\n return False\n \n if find_target_str(cal_loc=self.cal_loc, target_file=self.target_file, target_str=self.target_str):\n self.write_error_log()\n return False\n else:\n return True\n \n \n def write_error_log(self):\n super(Vasp_out_posmap, self).write_error_log(target_error_str=self.target_str, error_type=\"__posmap__\")\n \n def correct(self):\n \"\"\"\n This correction is borrowed from custodian and modified.\n https://materialsproject.github.io/custodian/_modules/custodian/vasp/handlers.html#VaspErrorHandler.correct\n \"\"\"\n incar_dict = modify_vasp_incar(cal_loc=self.cal_loc)\n SYMPREC = float(incar_dict.get(\"SYMPREC\", 1.0e-5))\n \n if SYMPREC > 1e-7:\n super(Vasp_out_posmap, self).backup()\n modify_vasp_incar(cal_loc=self.cal_loc, new_tags={\"SYMPREC\": SYMPREC/10.}, rename_old_incar=False)\n super(Vasp_out_posmap, self).write_correction_log(new_incar_tags={\"SYMPREC\": SYMPREC/10.})\n return True\n else:\n return False\n \n\n\n# In[13]:\n\n\nclass Vasp_out_bad_termination(Vasp_Error_Checker_Logger):\n \"\"\"\n Error checking type: after the calculation.\n Target file: vasp.out or the one specified by tag vasp.out\n Target error string: \"= BAD TERMINATION OF ONE OF YOUR APPLICATION PROCESSES\"\n inherit methods write_error_tag and read_error_tag from class Write_and_read_error__.\n input arguments:\n -cal_loc: the location of the to-be-checked calculation\n -workflow: the output of func Parse_calculation_workflow.parse_calculation_workflow.\n check method: return True, if not found; return False and write error logs otherwise.\n \"\"\"\n def __init__(self, cal_loc, workflow):\n self.workflow = workflow\n self.cal_loc = cal_loc\n self.log_txt_loc, self.firework_name = os.path.split(cal_loc)\n self.log_txt = os.path.join(self.log_txt_loc, \"log.txt\")\n self.target_file = self.workflow[0][\"vasp.out\"]\n self.target_str = \"= BAD TERMINATION OF ONE OF YOUR APPLICATION PROCESSES\"\n \n \n \n def check(self):\n #this method is not active until the job is done\n if Queue_std_files(cal_loc=self.cal_loc, workflow=self.workflow).find_std_files() == [None, None]:\n return True\n \n #Since the job is done, vasp.out must exist\n if not os.path.isfile(os.path.join(self.cal_loc, self.target_file)):\n decorated_os_rename(loc=self.cal_loc, old_filename=\"__running__\", new_filename=\"__error__\")\n #os.rename(os.path.join(self.cal_loc, \"__running__\"), os.path.join(self.cal_loc, \"__error__\"))\n super(Vasp_out_bad_termination, self).write_file_absence_log(filename_list = [self.target_file], \n initial_signal_file=\"__running__\", \n final_signal_file=\"__error__\")\n return False\n \n if find_target_str(cal_loc=self.cal_loc, target_file=self.target_file, target_str=self.target_str):\n self.write_error_log()\n return False\n else:\n return True\n \n \n def write_error_log(self):\n super(Vasp_out_bad_termination, self).write_error_log(target_error_str=self.target_str, error_type=\"__bad_termination__\")\n \n def correct(self):\n if os.path.isfile(os.path.join(self.cal_loc, \"__bad_termination__\")):\n with open(self.log_txt, \"a\") as f:\n f.write(\"{} Correction: {}\\n\".format(get_time_str(), self.firework_name))\n f.write(\"\\t\\t\\t{}\\n\".format(self.target_str))\n f.write(\"\\t\\t\\tfile __bad_termination__ is detected in this folder\\n\")\n f.write(\"\\t\\t\\tThis is the second time to encounter such error\\n\")\n return False\n else:\n open(os.path.join(self.cal_loc, \"__bad_termination__\"), \"w\").close()\n Queue_std_files(cal_loc=self.cal_loc, workflow=self.workflow).remove_std_files()\n with open(self.log_txt, \"a\") as f:\n f.write(\"{} Correction: {}\\n\".format(get_time_str(), self.firework_name))\n f.write(\"\\t\\t\\t{}\\n\".format(self.target_str))\n f.write(\"\\t\\t\\tThis is the first time to encounter such error --> Input set remains unchanged.\\n\")\n f.write(\"\\t\\t\\tremove queue stdout and stderr.\\n\")\n f.write(\"\\t\\t\\tcreate file __bad_termination__\\n\")\n return True\n\n\n\n# In[14]:\n\n\nclass Vasp_out_invgrp(Vasp_Error_Checker_Logger, Vasp_Error_Saver):\n \"\"\"\n Error checking type: after the calculation.\n Target file: vasp.out or the one specified by tag vasp.out\n Target error string: \"VERY BAD NEWS! internal error in subroutine INVGRP:\" \n && \"inverse of rotation matrix was not found (increase SYMPREC)\"\n inherit methods write_error_tag and read_error_tag from class Write_and_read_error__.\n input arguments:\n -cal_loc: the location of the to-be-checked calculation\n -workflow: the output of func Parse_calculation_workflow.parse_calculation_workflow.\n check method: return True, if not found; return False and write error logs otherwise.\n correct method: if SYMPREC*5 < 0.9e-4, SYMPREC = SYMPREC*5 and return True; Otherwise return False\n \"\"\"\n def __init__(self, cal_loc, workflow):\n Vasp_Error_Saver.__init__(self, cal_loc=cal_loc, workflow=workflow)\n \n self.workflow = workflow\n self.cal_loc = cal_loc\n self.log_txt_loc, self.firework_name = os.path.split(cal_loc)\n self.log_txt = os.path.join(self.log_txt_loc, \"log.txt\")\n self.target_file = self.workflow[0][\"vasp.out\"]\n self.target_str_list = [\" VERY BAD NEWS! internal error in subroutine INVGRP:\", \n \"inverse of rotation matrix was not found (increase SYMPREC)\"]\n \n \n \n def check(self):\n #this method is not active until the job is done\n if Queue_std_files(cal_loc=self.cal_loc, workflow=self.workflow).find_std_files() == [None, None]:\n return True\n \n #Since the job is done, vasp.out must exist\n if not os.path.isfile(os.path.join(self.cal_loc, self.target_file)):\n decorated_os_rename(loc=self.cal_loc, old_filename=\"__running__\", new_filename=\"__error__\")\n #os.rename(os.path.join(self.cal_loc, \"__running__\"), os.path.join(self.cal_loc, \"__error__\"))\n super(Vasp_out_invgrp, self).write_file_absence_log(filename_list = [self.target_file], \n initial_signal_file=\"__running__\", \n final_signal_file=\"__error__\")\n return False\n \n no_error_list = []\n for target_str in self.target_str_list:\n no_error_list.append(find_target_str(cal_loc=self.cal_loc, target_file=self.target_file, target_str=target_str))\n \n if False in no_error_list:\n return True\n else:\n self.write_error_log()\n return False\n\n \n \n def write_error_log(self):\n super(Vasp_out_invgrp, self).write_error_log(target_error_str=self.target_str_list, error_type=\"__invgrp__\")\n \n def correct(self):\n \"\"\"\n This correction is borrowed from custodian and modified.\n https://materialsproject.github.io/custodian/_modules/custodian/vasp/handlers.html#VaspErrorHandler.correct\n \"\"\"\n incar_dict = modify_vasp_incar(cal_loc=self.cal_loc)\n SYMPREC = incar_dict.get(\"SYMPREC\", 1.0e-5)\n SYMPREC_ = SYMPREC * 5\n \n if SYMPREC_ < 0.9e-4:\n super(Vasp_out_invgrp, self).backup()\n modify_vasp_incar(cal_loc=self.cal_loc, new_tags={\"SYMPREC\": SYMPREC_}, rename_old_incar=False)\n super(Vasp_out_invgrp, self).write_correction_log(new_incar_tags={\"SYMPREC\": SYMPREC_})\n return True\n else:\n with open(self.log_txt, \"a\") as f:\n f.write(\"{} Correction: {}\\n\".format(get_time_str(), self.firework_name))\n f.write(\"\\t\\t\\tSYMPREC={} is already too big.\\n\".format(SYMPREC))\n return False\n \n\n\n# In[15]:\n\n\nclass Vasp_out_zbrent(Vasp_Error_Checker_Logger, Vasp_Error_Saver):\n \"\"\"\n Error checking type: after the calculation.\n Target file: vasp.out or the one specified by tag vasp.out\n Target error string: \"ZBRENT: fatal error in bracketing\" && \"please rerun with smaller EDIFF, or copy CONTCAR\"\n && \"to POSCAR and continue\"\n inherit methods write_error_tag and read_error_tag from class Write_and_read_error__.\n input arguments:\n -cal_loc: the location of the to-be-checked calculation\n -workflow: the output of func Parse_calculation_workflow.parse_calculation_workflow.\n check method: return True, if not found; return False and write error logs otherwise.\n correct method: IBRION --> 1 & EDIFF --> 0.5*EDIFF & CONTCAR --> POSCAR\n \"\"\"\n def __init__(self, cal_loc, workflow):\n Vasp_Error_Saver.__init__(self, cal_loc=cal_loc, workflow=workflow)\n \n self.workflow = workflow\n self.cal_loc = cal_loc\n self.log_txt_loc, self.firework_name = os.path.split(cal_loc)\n self.log_txt = os.path.join(self.log_txt_loc, \"log.txt\")\n self.target_file = self.workflow[0][\"vasp.out\"]\n self.target_str_list = [\"ZBRENT: fatal error in bracketing\", \n \"please rerun with smaller EDIFF, or copy CONTCAR\", \n \"to POSCAR and continue\"]\n \n \n \n def check(self):\n #this method is not active until the job is done\n if Queue_std_files(cal_loc=self.cal_loc, workflow=self.workflow).find_std_files() == [None, None]:\n return True\n \n #Since the job is done, vasp.out must exist\n if not os.path.isfile(os.path.join(self.cal_loc, self.target_file)):\n decorated_os_rename(loc=self.cal_loc, old_filename=\"__running__\", new_filename=\"__error__\")\n #os.rename(os.path.join(self.cal_loc, \"__running__\"), os.path.join(self.cal_loc, \"__error__\"))\n super(Vasp_out_zbrent, self).write_file_absence_log(filename_list = [self.target_file], \n initial_signal_file=\"__running__\", \n final_signal_file=\"__error__\")\n return False\n \n no_error_list = []\n for target_str in self.target_str_list:\n no_error_list.append(find_target_str(cal_loc=self.cal_loc, target_file=self.target_file, target_str=target_str))\n \n if False in no_error_list:\n return True\n else:\n self.write_error_log()\n return False\n \n \n def write_error_log(self):\n super(Vasp_out_zbrent, self).write_error_log(target_error_str=self.target_str_list, error_type=\"__zbrent__\")\n \n def correct(self):\n \"\"\"\n This correction is borrowed from custodian and modified.\n https://materialsproject.github.io/custodian/_modules/custodian/vasp/handlers.html#VaspErrorHandler.correct\n \"\"\"\n if not os.path.isfile(os.path.join(self.cal_loc, \"OUTCAR\")):\n open(os.path.join(self.cal_loc, \"__cannot_find_OUTCAR_for_corrections__\"), \"w\").close()\n super(Vasp_out_zbrent, self).write_file_absence_log(filename_list = [\"OUTCAR\"])\n return False\n \n EDIFF = find_incar_tag_from_OUTCAR(tag=\"EDIFF\", cal_loc=self.cal_loc)\n IBRION = find_incar_tag_from_OUTCAR(tag=\"IBRION\", cal_loc=self.cal_loc)\n \n super(Vasp_out_zbrent, self).backup()\n new_tags = {}\n if EDIFF * 0.5 >= 1.0e-6:\n new_tags[\"EDIFF\"] = EDIFF * 0.5\n if IBRION != 1:\n new_tags[\"IBRION\"] = 1\n \n modify_vasp_incar(cal_loc=self.cal_loc, new_tags=new_tags, rename_old_incar=False)\n \n shutil.copyfile(os.path.join(self.cal_loc, \"CONTCAR\"), os.path.join(self.cal_loc, \"POSCAR\"))\n \n super(Vasp_out_zbrent, self).write_correction_log(new_incar_tags=new_tags, new_filenames={\"CONTCAR\": \"POSCAR\"})\n\n return True\n \n\n\n# In[16]:\n\n\nclass Vasp_out_rhosyg(Vasp_Error_Checker_Logger, Vasp_Error_Saver):\n \"\"\"\n Error checking type: after the calculation.\n Target file: vasp.out or the one specified by tag vasp.out\n Target error string: \"RHOSYG internal error: stars are not distinct, try to increase SYMPREC to e.g.\"\n inherit methods write_error_tag and read_error_tag from class Write_and_read_error__.\n input arguments:\n -cal_loc: the location of the to-be-checked calculation\n -workflow: the output of func Parse_calculation_workflow.parse_calculation_workflow.\n check method: return True, if not found; return False and write error logs otherwise.\n correct method: if SYMPREC < 1.0E-4, SYMPREC --> 1.0E-4 & return True;\n elif SYMPREC >= 1.0e-4 and ISYM != 0, ISYM --> 0 & return True;\n else: return False\n \"\"\"\n def __init__(self, cal_loc, workflow):\n Vasp_Error_Saver.__init__(self, cal_loc=cal_loc, workflow=workflow)\n \n self.workflow = workflow\n self.cal_loc = cal_loc\n self.log_txt_loc, self.firework_name = os.path.split(cal_loc)\n self.log_txt = os.path.join(self.log_txt_loc, \"log.txt\")\n self.target_file = self.workflow[0][\"vasp.out\"]\n self.target_str = \"RHOSYG internal error: stars are not distinct, try to increase SYMPREC to e.g.\"\n \n \n \n def check(self):\n #this method is not active until the job is done\n if Queue_std_files(cal_loc=self.cal_loc, workflow=self.workflow).find_std_files() == [None, None]:\n return True\n \n #Since the job is done, vasp.out must exist\n if not os.path.isfile(os.path.join(self.cal_loc, self.target_file)):\n decorated_os_rename(loc=self.cal_loc, old_filename=\"__running__\", new_filename=\"__error__\")\n #os.rename(os.path.join(self.cal_loc, \"__running__\"), os.path.join(self.cal_loc, \"__error__\"))\n super(Vasp_out_rhosyg, self).write_file_absence_log(filename_list = [self.target_file], \n initial_signal_file=\"__running__\", \n final_signal_file=\"__error__\")\n return False\n \n if find_target_str(cal_loc=self.cal_loc, target_file=self.target_file, target_str=self.target_str):\n self.write_error_log()\n return False\n else:\n return True\n \n \n def write_error_log(self):\n super(Vasp_out_rhosyg, self).write_error_log(target_error_str=self.target_str, error_type=\"__rhosyg__\")\n \n def correct(self):\n \"\"\"\n This correction is borrowed from custodian and modified.\n https://materialsproject.github.io/custodian/_modules/custodian/vasp/handlers.html#VaspErrorHandler.correct\n \"\"\"\n incar_dict = modify_vasp_incar(cal_loc=self.cal_loc)\n SYMPREC = float(incar_dict.get(\"SYMPREC\", 1.0e-5))\n ISYM = int(incar_dict.get(\"ISYM\", 2))\n \n if 1.0e-4 > SYMPREC:\n super(Vasp_out_rhosyg, self).backup()\n modify_vasp_incar(cal_loc=self.cal_loc, new_tags={\"SYMPREC\": 1.0e-4}, rename_old_incar=False)\n super(Vasp_out_rhosyg, self).write_correction_log(new_incar_tags={\"SYMPREC\": 1.0e-4})\n return True\n elif ISYM != 0:\n super(Vasp_out_rhosyg, self).backup()\n modify_vasp_incar(cal_loc=self.cal_loc, new_tags={\"ISYM\": 0}, rename_old_incar=False)\n super(Vasp_out_rhosyg, self).write_correction_log(new_incar_tags={\"ISYM\": 0})\n return True\n else:\n return False \n\n\n# In[17]:\n\n\nclass Vasp_out_zpotrf(Vasp_Error_Checker_Logger, Vasp_Error_Saver):\n \"\"\"\n Error checking type: after the calculation.\n Target file: vasp.out or the one specified by tag vasp.out\n Target error string: \"LAPACK: Routine ZPOTRF failed\"\n inherit methods write_error_tag and read_error_tag from class Write_and_read_error__.\n input arguments:\n -cal_loc: the location of the to-be-checked calculation\n -workflow: the output of func Parse_calculation_workflow.parse_calculation_workflow.\n check method: return True, if not found; return False and write error logs otherwise.\n correct method: decrease POTIM and switch off symmetry. The lower bound for POTIM is 0.05\n \"\"\"\n def __init__(self, cal_loc, workflow):\n Vasp_Error_Saver.__init__(self, cal_loc=cal_loc, workflow=workflow)\n \n self.workflow = workflow\n self.cal_loc = cal_loc\n self.log_txt_loc, self.firework_name = os.path.split(cal_loc)\n self.log_txt = os.path.join(self.log_txt_loc, \"log.txt\")\n self.target_file = self.workflow[0][\"vasp.out\"]\n self.target_str = \"LAPACK: Routine ZPOTRF failed\"\n \n \n def check(self):\n #this method is not active until the job is done\n if Queue_std_files(cal_loc=self.cal_loc, workflow=self.workflow).find_std_files() == [None, None]:\n return True\n \n #Since the job is done, vasp.out must exist\n if not os.path.isfile(os.path.join(self.cal_loc, self.target_file)):\n decorated_os_rename(loc=self.cal_loc, old_filename=\"__running__\", new_filename=\"__error__\")\n #os.rename(os.path.join(self.cal_loc, \"__running__\"), os.path.join(self.cal_loc, \"__error__\"))\n super(Vasp_out_zpotrf, self).write_file_absence_log(filename_list = [self.target_file], \n initial_signal_file=\"__running__\", \n final_signal_file=\"__error__\")\n return False\n \n if find_target_str(cal_loc=self.cal_loc, target_file=self.target_file, target_str=self.target_str):\n self.write_error_log()\n return False\n else:\n return True\n \n \n def write_error_log(self):\n super(Vasp_out_zpotrf, self).write_error_log(target_error_str=self.target_str, error_type=\"__zpotrf__\")\n \n def correct(self):\n \"\"\"\n This correction is borrowed from custodian and modified.\n https://materialsproject.github.io/custodian/_modules/custodian/vasp/handlers.html#VaspErrorHandler.correct\n \"\"\"\n if not os.path.isfile(os.path.join(self.cal_loc, \"OUTCAR\")):\n open(os.path.join(self.cal_loc, \"__cannot_find_OUTCAR_for_corrections__\"), \"w\").close()\n super(Vasp_out_zpotrf, self).write_file_absence_log(filename_list = [\"OUTCAR\"])\n return False\n \n if not os.path.isfile(os.path.join(self.cal_loc, \"OSZICAR\")):\n open(os.path.join(self.cal_loc, \"__cannot_find_OSZICAR_for_corrections__\"), \"w\").close()\n super(Vasp_out_zpotrf, self).write_file_absence_log(filename_list = [\"OSZICAR\"])\n return False\n \n incar_dict = modify_vasp_incar(cal_loc=self.cal_loc)\n ISYM = int(incar_dict.get(\"ISYM\", 2))\n POTIM = float(incar_dict.get(\"POTIM\", 0.5))\n ICHARG = find_incar_tag_from_OUTCAR(tag=\"ICHARG\", cal_loc=self.cal_loc)\n NSW = find_incar_tag_from_OUTCAR(tag=\"NSW\", cal_loc=self.cal_loc)\n IBRION = find_incar_tag_from_OUTCAR(tag=\"IBRION\", cal_loc=self.cal_loc)\n \n new_tags = {}\n if ISYM != 0:\n new_tags[\"ISYM\"] = 0\n \n if NSW != 0 and IBRION != -1:\n if POTIM*0.5 >= 0.05:\n new_tags[\"POTIM\"] = POTIM * 0.5\n \n if new_tags == {}:\n return False\n else:\n delete_files = []\n if ICHARG < 10:\n for file_ in [\"WAVECAR\", \"CHGCAR\", \"CHG\"]:\n if os.path.isfile(os.path.join(self.cal_loc, file_)):\n os.remove(os.path.join(self.cal_loc, file_))\n delete_files.append(file_)\n super(Vasp_out_zpotrf, self).backup()\n modify_vasp_incar(cal_loc=self.cal_loc, new_tags=new_tags, rename_old_incar=False)\n super(Vasp_out_zpotrf, self).write_correction_log(new_incar_tags=new_tags, remove_files=delete_files)\n return True\n \n\n\n# In[18]:\n\n\nclass Vasp_out_edddav(Vasp_Error_Checker_Logger, Vasp_Error_Saver):\n \"\"\"\n Error checking type: after the calculation.\n Target file: vasp.out or the one specified by tag vasp.out\n Target error string: \"Error EDDDAV: Call to ZHEGV failed\"\n inherit methods write_error_tag and read_error_tag from class Write_and_read_error__.\n input arguments:\n -cal_loc: the location of the to-be-checked calculation\n -workflow: the output of func Parse_calculation_workflow.parse_calculation_workflow.\n check method: return True, if not found; return False and write error logs otherwise.\n correct method: if ICHARG < 10, remove CHGCAR; \n if ALGO != All, set it to All and return True;\n if ALGO == All, return False\n \"\"\"\n def __init__(self, cal_loc, workflow):\n Vasp_Error_Saver.__init__(self, cal_loc=cal_loc, workflow=workflow)\n \n self.workflow = workflow\n self.cal_loc = cal_loc\n self.log_txt_loc, self.firework_name = os.path.split(cal_loc)\n self.log_txt = os.path.join(self.log_txt_loc, \"log.txt\")\n self.target_file = self.workflow[0][\"vasp.out\"]\n self.target_str = \"Error EDDDAV: Call to ZHEGV failed\"\n \n \n \n def check(self):\n #this method is not active until the job is done\n if Queue_std_files(cal_loc=self.cal_loc, workflow=self.workflow).find_std_files() == [None, None]:\n return True\n \n #Since the job is done, vasp.out must exist\n if not os.path.isfile(os.path.join(self.cal_loc, self.target_file)):\n decorated_os_rename(loc=self.cal_loc, old_filename=\"__running__\", new_filename=\"__error__\")\n #os.rename(os.path.join(self.cal_loc, \"__running__\"), os.path.join(self.cal_loc, \"__error__\"))\n super(Vasp_out_edddav, self).write_file_absence_log(filename_list = [self.target_file], \n initial_signal_file=\"__running__\", \n final_signal_file=\"__error__\")\n return False\n \n if find_target_str(cal_loc=self.cal_loc, target_file=self.target_file, target_str=self.target_str):\n self.write_error_log()\n return False\n else:\n return True\n \n \n def write_error_log(self):\n super(Vasp_out_edddav, self).write_error_log(target_error_str=self.target_str, error_type=\"__edddav__\")\n \n \n def correct(self):\n \"\"\"\n This correction is borrowed from custodian and modified.\n https://materialsproject.github.io/custodian/_modules/custodian/vasp/handlers.html#VaspErrorHandler.correct\n \"\"\"\n if not os.path.isfile(os.path.join(self.cal_loc, \"OUTCAR\")):\n open(os.path.join(self.cal_loc, \"__cannot_find_OUTCAR_for_corrections__\"), \"w\").close()\n super(Vasp_out_edddav, self).write_file_absence_log(filename_list = [\"OUTCAR\"])\n return False\n \n ICHARG = find_incar_tag_from_OUTCAR(cal_loc=self.cal_loc, tag=\"ICHARG\")\n incar_dict = modify_vasp_incar(cal_loc=self.cal_loc)\n ALGO = incar_dict.get(\"ALGO\", \"Normal\").lower()\n \n if ICHARG < 10:\n if os.path.isfile(os.path.join(self.cal_loc, \"CHGCAR\")):\n os.remove(os.path.join(self.cal_loc, \"CHGCAR\"))\n if ALGO != \"all\":\n super(Vasp_out_edddav, self).backup()\n modify_vasp_incar(cal_loc=self.cal_loc, new_tags={\"ALGO\": \"All\"}, remove_tags=[\"AMIX\", \"BMIX\", \"AMIN\"])\n super(Vasp_out_edddav, self).write_correction_log(new_incar_tags={\"ALGO\": \"All\"}, remove_incar_tags=[\"AMIX\", \"BMIX\", \"AMIN\"], \n remove_files=[\"CHGCAR\"])\n return True\n \n return False \n\n\n# In[19]:\n\n\nclass Vasp_out_real_optlay(Vasp_Error_Checker_Logger, Vasp_Error_Saver):\n \"\"\"\n Error checking type: after the calculation.\n Target file: vasp.out or the one specified by tag vasp.out\n Target error string: \"REAL_OPTLAY: internal error\"\n inherit methods write_error_tag and read_error_tag from class Write_and_read_error__.\n input arguments:\n -cal_loc: the location of the to-be-checked calculation\n -workflow: the output of func Parse_calculation_workflow.parse_calculation_workflow.\n check method: return True, if not found; return False and write error logs otherwise.\n correct method: if LREAL = .TRUE., reset it to .FALSE. and return True; Otherwise, return False\n \"\"\"\n def __init__(self, cal_loc, workflow):\n Vasp_Error_Saver.__init__(self, cal_loc=cal_loc, workflow=workflow)\n \n self.workflow = workflow\n self.cal_loc = cal_loc\n self.log_txt_loc, self.firework_name = os.path.split(cal_loc)\n self.log_txt = os.path.join(self.log_txt_loc, \"log.txt\")\n self.target_file = self.workflow[0][\"vasp.out\"]\n self.target_str = \"REAL_OPTLAY: internal error\"\n \n \n \n def check(self):\n #this method is not active until the job is done\n if Queue_std_files(cal_loc=self.cal_loc, workflow=self.workflow).find_std_files() == [None, None]:\n return True\n \n #Since the job is done, vasp.out must exist\n if not os.path.isfile(os.path.join(self.cal_loc, self.target_file)):\n decorated_os_rename(loc=self.cal_loc, old_filename=\"__running__\", new_filename=\"__error__\")\n #os.rename(os.path.join(self.cal_loc, \"__running__\"), os.path.join(self.cal_loc, \"__error__\"))\n super(Vasp_out_real_optlay, self).write_file_absence_log(filename_list = [self.target_file], \n initial_signal_file=\"__running__\", \n final_signal_file=\"__error__\")\n return False\n \n if find_target_str(cal_loc=self.cal_loc, target_file=self.target_file, target_str=self.target_str):\n self.write_error_log()\n return False\n else:\n return True\n \n \n def write_error_log(self):\n super(Vasp_out_real_optlay, self).write_error_log(target_error_str=self.target_str, error_type=\"__real_optlay__\")\n \n \n def correct(self):\n target_str = \"Therefore set LREAL=.FALSE. in the INCAR file\"\n if find_target_str(cal_loc=self.cal_loc, target_file=self.target_file, target_str=target_str):\n super(Vasp_out_real_optlay, self).backup()\n modify_vasp_incar(cal_loc=self.cal_loc, new_tags={\"LREAL\": \".FALSE.\"}, rename_old_incar=False)\n super(Vasp_out_real_optlay, self).write_correction_log(new_incar_tags={\"LREAL\": \".FALSE.\"})\n return True\n \n \n return False \n\n\n# In[20]:\n\n\nclass Vasp_out_pzunmtr_or_pzstein(Vasp_Error_Checker_Logger, Vasp_Error_Saver):\n \"\"\"\n Error checking type: after the calculation.\n Target file: vasp.out or the one specified by tag vasp.out\n Target error string: \"PZUNMTR parameter number\" or \"PZSTEIN parameter number\n inherit methods write_error_tag and read_error_tag from class Write_and_read_error__.\n input arguments:\n -cal_loc: the location of the to-be-checked calculation\n -workflow: the output of func Parse_calculation_workflow.parse_calculation_workflow.\n check method: return True, if not found; return False and write error logs otherwise.\n correct method: if ALGO != Normal, reset it to Normal and return True; Otherwise, return False\n \"\"\"\n def __init__(self, cal_loc, workflow):\n Vasp_Error_Saver.__init__(self, cal_loc=cal_loc, workflow=workflow)\n \n self.workflow = workflow\n self.cal_loc = cal_loc\n self.log_txt_loc, self.firework_name = os.path.split(cal_loc)\n self.log_txt = os.path.join(self.log_txt_loc, \"log.txt\")\n self.target_file = self.workflow[0][\"vasp.out\"]\n self.target_str_list = [\"PZUNMTR parameter number\", \"PZSTEIN parameter number\"]\n \n \n \n def check(self):\n #this method is not active until the job is done\n if Queue_std_files(cal_loc=self.cal_loc, workflow=self.workflow).find_std_files() == [None, None]:\n return True\n \n #Since the job is done, vasp.out must exist\n if not os.path.isfile(os.path.join(self.cal_loc, self.target_file)):\n decorated_os_rename(loc=self.cal_loc, old_filename=\"__running__\", new_filename=\"__error__\")\n #os.rename(os.path.join(self.cal_loc, \"__running__\"), os.path.join(self.cal_loc, \"__error__\"))\n super(Vasp_out_pzunmtr_or_pzstein, self).write_file_absence_log(filename_list = [self.target_file], \n initial_signal_file=\"__running__\", \n final_signal_file=\"__error__\")\n return False\n \n for target_str in self.target_str_list:\n if find_target_str(cal_loc=self.cal_loc, target_file=self.target_file, target_str=target_str):\n self.target_str = target_str\n self.write_error_log()\n return False\n else:\n return True\n \n \n def write_error_log(self):\n super(Vasp_out_pzunmtr_or_pzstein, self).write_error_log(target_error_str=self.target_str, error_type=\"__pzunmtr_or_pzstein__\")\n \n \n def correct(self):\n IALGO = find_incar_tag_from_OUTCAR(tag=\"IALGO\", cal_loc=self.cal_loc) # IALGO=38 <--> ALGO=Normal\n if IALGO != 38:\n super(Vasp_out_pzunmtr_or_pzstein, self).backup()\n modify_vasp_incar(cal_loc=self.cal_loc, new_tags={\"ALGO\": \"Normal\"}, rename_old_incar=False)\n super(Vasp_out_pzunmtr_or_pzstein, self).write_correction_log(new_incar_tags={\"ALGO\": \"Normal\"})\n return True\n \n return False \n\n\n# In[21]:\n\n\nclass Electronic_divergence(Vasp_Error_Checker_Logger, Vasp_Error_Saver):\n \"\"\"\n Error checking type: on the fly & after the calculation.\n Check if electonic cal divergences and the max ionoic step is reached.\n inherit methods write_error_tag and read_error_tag from class Write_and_read_error__.\n input arguments:\n -cal_loc: the location of the to-be-checked calculation\n -workflow: the output of func Parse_calculation_workflow.parse_calculation_workflow\n check method: return True if reahced; return False and write error logs otherwise.\n \"\"\"\n def __init__(self, cal_loc, workflow):\n Vasp_Error_Saver.__init__(self, cal_loc=cal_loc, workflow=workflow)\n \n self.workflow = workflow\n self.cal_loc = cal_loc\n self.log_txt_loc, self.firework_name = os.path.split(cal_loc)\n self.log_txt = os.path.join(self.log_txt_loc, \"log.txt\")\n #super(Electronic_divergence, self).__init__(cal_loc)\n #Write_and_read_error_tag.__init__(self, cal_loc=self.cal_loc)\n \n \n def check(self):\n if not os.path.isfile(os.path.join(self.cal_loc, \"OUTCAR\")) or not os.path.isfile(os.path.join(self.cal_loc, \"OSZICAR\")):\n return True\n \n try:\n NELM = find_incar_tag_from_OUTCAR(tag=\"NELM\", cal_loc=self.cal_loc)\n EDIFF = find_incar_tag_from_OUTCAR(tag=\"EDIFF\", cal_loc=self.cal_loc)\n except:\n return True\n \n #print(NELM, EDIFF)\n oszicar = Oszicar(os.path.join(self.cal_loc, \"OSZICAR\"))\n for electronic_steps in oszicar.electronic_steps:\n #print(len(electronic_steps))\n if len(electronic_steps) == NELM:\n last_step = electronic_steps[-1]\n #print(last_step[\"dE\"], last_step[\"deps\"])\n if abs(last_step[\"dE\"]) > EDIFF or abs(last_step[\"deps\"]) > EDIFF:\n self.write_error_log()\n return False\n return True\n \n \n def write_error_log(self):\n error_str = \"Electronic divergence happens\"\n super(Electronic_divergence, self).write_error_log(target_error_str=error_str, error_type=\"__electronic_divergence__\")\n \n def correct(self):\n \"\"\"\n Orders of corrections:\n 1st option: if ALGO != Normal, set ALGO = Normal and NELM = 200 if original NELM < 200; \n If the dipole correction is on, try to set DIPOL if not present\n 2nd option: if the dipole correction is on, try to set DIPOL if not present.\n 3rd option: AMIX=0.1, BMIX = 0.01, ICHARG = 2 and NELM = 300 if original NELM < 300\n 4th option: AMIN=0.01, BMIX=3.0, ICHARG =2 and NELM = 400 if original NELM < 400\n 5th option: return False <-- fail to automatically recover.\n Note that for the 1st, 2nd, 3rd, 4th options, if EDIFF*5 <= 1.0E-4, we also set EDIFF = EDIFF*5\n This correction is borrowed from custodian and modified.\n https://materialsproject.github.io/custodian/_modules/custodian/vasp/handlers.html#VaspErrorHandler.correct\n \"\"\"\n if not os.path.isfile(os.path.join(self.cal_loc, \"OUTCAR\")):\n open(os.path.join(self.cal_loc, \"__cannot_find_OUTCAR_for_corrections__\"), \"w\").close()\n super(Electronic_divergence, self).write_file_absence_log(filename_list = [\"OUTCAR\"])\n return False\n \n NELM = find_incar_tag_from_OUTCAR(tag=\"NELM\", cal_loc=self.cal_loc)\n EDIFF = find_incar_tag_from_OUTCAR(tag=\"EDIFF\", cal_loc=self.cal_loc)\n IALGO = find_incar_tag_from_OUTCAR(tag=\"IALGO\", cal_loc=self.cal_loc) # IALGO=38 <--> ALGO=Normal\n incar = modify_vasp_incar(cal_loc=self.cal_loc)\n AMIX = float(incar.get(\"AMIX\", 0.4))\n BMIX = float(incar.get(\"BMIX\", 1.0))\n AMIN = float(incar.get(\"AMIN\", 0.1))\n #according to vaspwiki, IDIPOL will be switched on if it 1, 2, 3, or 4. \n #Here we use 0 to denote the absence of the dipole correction\n IDIPOL = int(incar.get(\"IDIPOL\", 0)) \n DIPOL = incar.get(\"DIPOL\", \"\")\n \n new_incar_tags = {\"LREAL\": \".FALSE.\"}\n \n if EDIFF*5 <= 1.0E-4:\n new_incar_tags[\"EDIFF\"] = EDIFF * 5\n\n \n if IALGO != 38: \n super(Electronic_divergence, self).backup()\n new_incar_tags[\"ALGO\"] = \"Normal\"\n new_incar_tags[\"NELM\"] = NELM if NELM > 200 else 200\n #For the calculations involved in the dipole correction, set the dipol center.\n #Note that 0.5 is set along x and y directions, while the geometrical center is adopted along the z direction.\n if IDIPOL != 0:\n if DIPOL == \"\":\n struct = Structure.from_file(os.path.join(self.cal_loc, \"POSCAR\"))\n mean_c = np.mean(struct.frac_coords[:, 2])\n new_incar_tags[\"DIPOL\"] = \"0.5 0.5 {:.3}\".format(mean_c)\n new_incar_tags[\"ICHARG\"] = 2\n \n modify_vasp_incar(cal_loc=self.cal_loc, new_tags=new_incar_tags, rename_old_incar=False)\n super(Electronic_divergence, self).write_correction_log(new_incar_tags=new_incar_tags)\n return True\n \n #For the calculations involved in the dipole correction, set the dipol center.\n #Note that 0.5 is set along x and y directions, while the geometrical center is adopted along the z direction. \n if IDIPOL != 0: \n if DIPOL == \"\":\n super(Electronic_divergence, self).backup()\n struct = Structure.from_file(os.path.join(self.cal_loc, \"POSCAR\"))\n mean_c = np.mean(struct.frac_coords[:, 2])\n new_incar_tags[\"DIPOL\"] = \"0.5 0.5 {:.3}\".format(mean_c)\n new_incar_tags[\"ICHARG\"] = 2\n modify_vasp_incar(cal_loc=self.cal_loc, new_tags=new_incar_tags, rename_old_incar=False)\n super(Electronic_divergence, self).write_correction_log(new_incar_tags=new_incar_tags)\n return True\n \n if BMIX == 3.0:\n return False\n \n if AMIX > 0.1 and BMIX > 0.01:\n super(Electronic_divergence, self).backup()\n new_incar_tags[\"NELM\"] = NELM if NELM > 300 else 300\n new_incar_tags[\"AMIX\"] = 0.1\n new_incar_tags[\"BMIX\"] = 0.01\n new_incar_tags[\"ICHARG\"] = 2\n modify_vasp_incar(cal_loc=self.cal_loc, new_tags=new_incar_tags, rename_old_incar=False)\n super(Electronic_divergence, self).write_correction_log(new_incar_tags=new_incar_tags)\n return True\n \n if BMIX < 3.0 and AMIN > 0.01:\n super(Electronic_divergence, self).backup()\n new_incar_tags[\"NELM\"] = NELM if NELM > 400 else 400\n new_incar_tags[\"AMIN\"] = 0.01\n new_incar_tags[\"BMIX\"] = 3.0\n new_incar_tags[\"ICHARG\"] = 2\n modify_vasp_incar(cal_loc=self.cal_loc, new_tags=new_incar_tags, remove_tags=[\"AMIX\"], rename_old_incar=False)\n super(Electronic_divergence, self).write_correction_log(new_incar_tags=new_incar_tags, remove_incar_tags=[\"AMIX\"])\n return True\n \n return False\n \n\n\n# In[22]:\n\n\nclass Ionic_divergence(Vasp_Error_Checker_Logger, Vasp_Error_Saver):\n \"\"\"\n Error checking type: after the calculation.\n Check if the ionic convergence is reached.\n inherit methods write_error_tag and read_error_tag from class Write_and_read_error__.\n input arguments:\n -cal_loc: the location of the to-be-checked calculation.\n -workflow: the output of func Parse_calculation_workflow.parse_calculation_workflow\n check method: return True if reached; return False and write error logs otherwise.\n \"\"\"\n def __init__(self, cal_loc, workflow):\n Vasp_Error_Saver.__init__(self, cal_loc=cal_loc, workflow=workflow)\n \n self.workflow = workflow\n self.cal_loc = cal_loc\n self.log_txt_loc, self.firework_name = os.path.split(cal_loc)\n self.log_txt = os.path.join(self.log_txt_loc, \"log.txt\")\n \n #Write_and_read_error_tag.__init__(self, cal_loc=self.cal_loc)\n \n def check(self):\n \n #This if statement deactivates the check method until the calculation is done.\n if Queue_std_files(cal_loc=self.cal_loc, workflow=self.workflow).stdout_file == [None, None]:\n return True\n \n #Since the job is done, vasp.out must exist\n if not os.path.isfile(os.path.join(self.cal_loc, \"OUTCAR\")):\n decorated_os_rename(loc=self.cal_loc, old_filename=\"__running__\", new_filename=\"__error__\")\n #os.rename(os.path.join(self.cal_loc, \"__running__\"), os.path.join(self.cal_loc, \"__error__\"))\n super(Ionic_divergence, self).write_file_absence_log(filename_list = [\"OUTCAR\"], \n initial_signal_file=\"__running__\", \n final_signal_file=\"__error__\")\n return False\n \n NSW = find_incar_tag_from_OUTCAR(tag=\"NSW\", cal_loc=self.cal_loc)\n IBRION = find_incar_tag_from_OUTCAR(tag=\"IBRION\", cal_loc=self.cal_loc)\n #EDIFFG = find_incar_tag_from_OUTCAR(tag=\"EDIFFG\", cal_loc=self.cal_loc)\n #This if statement deactivates the check method unless the calculation is the structural optimization\n if NSW == 0 or IBRION == -1:\n return True\n \n target_str = \"reached required accuracy - stopping structural energy minimisation\"\n if find_target_str(cal_loc=self.cal_loc, target_file=\"OUTCAR\", target_str=target_str):\n return True\n \n self.write_error_log()\n return False\n \n \n def write_error_log(self):\n error_str = \"Ionic divergence happens\"\n super(Ionic_divergence, self).write_error_log(target_error_str=error_str, error_type=\"__ionic_divergence__\")\n \n def correct(self):\n if not os.path.isfile(os.path.join(self.cal_loc, \"OUTCAR\")):\n open(os.path.join(self.cal_loc, \"__cannot_find_OUTCAR_for_corrections__\"), \"w\").close()\n super(Ionic_divergence, self).write_file_absence_log(filename_list = [\"OUTCAR\"])\n return False\n \n if not os.path.isfile(os.path.join(self.cal_loc, \"OSZICAR\")):\n open(os.path.join(self.cal_loc, \"__cannot_find_OSZICAR_for_corrections__\"), \"w\").close()\n super(Ionic_divergence, self).write_file_absence_log(filename_list = [\"OSZICAR\"])\n return False\n \n EDIFF = find_incar_tag_from_OUTCAR(cal_loc=self.cal_loc, tag=\"EDIFF\")\n EDIFFG = find_incar_tag_from_OUTCAR(cal_loc=self.cal_loc, tag=\"EDIFFG\")\n NSW = find_incar_tag_from_OUTCAR(cal_loc=self.cal_loc, tag=\"NSW\")\n IBRION = find_incar_tag_from_OUTCAR(cal_loc=self.cal_loc, tag=\"IBRION\")\n \n oszicar = Oszicar(filename=os.path.join(self.cal_loc, \"OSZICAR\"))\n if len(oszicar.electronic_steps) < NSW:\n #check if CONTCAR is empty.\n with open(os.path.join(self.cal_loc, \"CONTCAR\"), \"r\") as f:\n lines = [line for line in f if line.strip()]\n if lines == []:\n with open(self.log_txt, \"a\") as f:\n f.write(\"{} Correction: {}\\n\".format(get_time_str(), self.firework_name))\n f.write(\"\\t\\t\\tCONTCAR is empty, so the error may not be triggered by the limited walltime.\\n\")\n return False\n \n super(Ionic_divergence, self).backup()\n shutil.move(os.path.join(self.cal_loc, \"CONTCAR\"), os.path.join(self.cal_loc, \"POSCAR\"))\n with open(self.log_txt, \"a\") as f:\n f.write(\"{} Correction: {}\\n\".format(get_time_str(), self.firework_name))\n f.write(\"\\t\\t\\tThis error may be due to that the walltime is reached.\\n\")\n f.write(\"\\t\\t\\tCONTCAR --> POSCAR\\n\")\n return True\n elif IBRION in [2, 3]:\n super(Ionic_divergence, self).backup()\n shutil.move(os.path.join(self.cal_loc, \"CONTCAR\"), os.path.join(self.cal_loc, \"POSCAR\"))\n modify_vasp_incar(cal_loc=self.cal_loc, new_tags={\"IBRION\": 1}, rename_old_incar=False)\n with open(self.log_txt, \"a\") as f:\n f.write(\"{} Correction: {}\\n\".format(get_time_str(), self.firework_name))\n f.write(\"\\t\\t\\tThe ionic step reaches the preset maximum step ({})\\n\".format(NSW))\n f.write(\"\\t\\t\\tBut IBRION is {}, not 1. So try one more round.\\n\".format(IBRION))\n f.write(\"\\t\\t\\tIBRION = 1, CONTCAR --> POSCAR.\\n\")\n return True\n else:\n return False\n \n\n\n# In[23]:\n\n\nclass Positive_energy(Vasp_Error_Checker_Logger, Vasp_Error_Saver):\n \"\"\"\n Error checking type: after the calculation.\n Check if a electronic run has positive energy.\n inherit methods write_error_tag and read_error_tag from class Write_and_read_error__.\n input arguments:\n -cal_loc: the location of the to-be-checked calculation.\n -workflow: the output of func Parse_calculation_workflow.parse_calculation_workflow\n check method: return True if negative; return False and write error logs otherwise.\n \"\"\"\n \n def __init__(self, cal_loc, workflow):\n Vasp_Error_Saver.__init__(self, cal_loc=cal_loc, workflow=workflow)\n \n self.workflow = workflow\n self.cal_loc = cal_loc\n self.log_txt_loc, self.firework_name = os.path.split(cal_loc)\n self.log_txt = os.path.join(self.log_txt_loc, \"log.txt\")\n #super(Positive_energy, self).__init__(cal_loc)\n #Write_and_read_error_tag.__init__(self, cal_loc=self.cal_loc)\n \n def check(self):\n \n #This if statement deactivates the check method until the calculation is done.\n if Queue_std_files(cal_loc=self.cal_loc, workflow=self.workflow).stdout_file == [None, None]:\n return True\n \n #Since the job is done, vasp.out must exist\n if not os.path.isfile(os.path.join(self.cal_loc, \"OSZICAR\")):\n decorated_os_rename(loc=self.cal_loc, old_filename=\"__running__\", new_filename=\"__error__\")\n #os.rename(os.path.join(self.cal_loc, \"__running__\"), os.path.join(self.cal_loc, \"__error__\"))\n super(Positive_energy, self).write_file_absence_log(filename_list = [\"OSZICAR\"], \n initial_signal_file=\"__running__\", \n final_signal_file=\"__error__\")\n return False\n \n oszicar = Oszicar(os.path.join(self.cal_loc, \"OSZICAR\"))\n try:\n if oszicar.final_energy > 0:\n self.write_error_log()\n return False\n except:\n pass\n return True\n \n \n def write_error_log(self):\n error_str = \"Positive energy has been found\"\n super(Positive_energy, self).write_error_log(target_error_str=error_str, error_type=\"__positive_energy__\")\n \n def correct(self):\n \"\"\"\n This correction is borrowed from custodian and modified.\n https://materialsproject.github.io/custodian/_modules/custodian/vasp/handlers.html#VaspErrorHandler.correct\n \"\"\"\n if not os.path.isfile(os.path.join(self.cal_loc, \"OUTCAR\")):\n open(os.path.join(self.cal_loc, \"__cannot_find_OUTCAR_for_corrections__\"), \"w\").close()\n super(Positive_energy, self).write_file_absence_log(filename_list = [\"OUTCAR\"])\n return False\n \n IALGO = find_incar_tag_from_OUTCAR(cal_loc=self.cal_loc, tag=\"IALGO\")\n \n if IALGO != 38:\n super(Positive_energy, self).backup()\n modify_vasp_incar(cal_loc=self.cal_loc, new_tags={\"ALGO\": \"Normal\"}, rename_old_incar=False)\n super(Positive_energy, self).write_correction_log(new_incar_tags={\"ALGO\": \"Normal\"})\n #with open(self.log_txt, \"a\") as f:\n # f.write(\"{} Correction: ALGO --> Normal\\n\".format(get_time_str()))\n return True\n \n return False\n\n\n# In[24]:\n\n\nclass Bader_Charge(Vasp_Error_Checker_Logger, Vasp_Error_Saver):\n \"\"\"\n Error checking type: on the fly.\n If Bader Charge is going to be calculated (bader_charge tag is on), check LAECHG, NGXF, NGYF, NGZF, LCHARG.\n inherit methods write_error_tag and read_error_tag from class Write_and_read_error__.\n input arguments:\n -cal_loc: the location of the to-be-checked calculation.\n -workflow: the output of func Parse_calculation_workflow.parse_calculation_workflow\n check method: if bader_charge is on and any of LAECHG, NGXF, NGYF, NGZF, LCHARG is not set properly, trigger an error named __bader_charge__\n correction: set LAECHG, NGXF, NGYF, NGZF, LCHARG properly\n \"\"\"\n \n def __init__(self, cal_loc, workflow):\n Vasp_Error_Saver.__init__(self, cal_loc=cal_loc, workflow=workflow)\n \n self.workflow = workflow\n self.cal_loc = cal_loc\n self.log_txt_loc, self.firework_name = os.path.split(cal_loc)\n self.log_txt = os.path.join(self.log_txt_loc, \"log.txt\")\n self.firework = get_current_firework_from_cal_loc(cal_loc=cal_loc, workflow=workflow)\n\n \n def check(self):\n if self.firework[\"bader_charge\"] == False:\n return True\n \n incar_dict = modify_vasp_incar(cal_loc=self.cal_loc)\n all_in = True\n for tag in [\"LAECHG\", \"NGXF\", \"NGYF\", \"NGZF\"]:\n if tag not in incar_dict.keys():\n all_in = False\n break\n \n if \"LCHARG\" in incar_dict.keys():\n if \"t\" not in incar_dict[\"LCHARG\"].lower():\n all_in = False\n \n if all_in == False:\n if self.firework[\"step_no\"] == 1:\n if os.path.isfile(os.path.join(self.cal_loc, \"OUTCAR\")):\n if find_target_str(cal_loc=self.cal_loc, target_file=\"OUTCAR\", target_str=\"dimension x,y,z NGXF=\"):\n self.write_error_log()\n return False\n else:\n self.write_error_log()\n return False\n return True \n \n def write_error_log(self):\n error_str = \"INCAR tags related to the Bader Charge Calculation are not set properly\"\n super(Bader_Charge, self).write_error_log(target_error_str=error_str, error_type=\"__bader_charge__\")\n \n def correct(self):\n \"\"\"\n Please refer to http://theory.cm.utexas.edu/henkelman/code/bader/ for the INCAR tag settings for Bader Charge Analysis\n LCHARG = .TRUE.\n LAECHG = .TRUE.\n NGXF = 2 * default value\n NGYF = 2 * default value\n NGZF = 2 * default value\n \"\"\"\n if self.firework[\"step_no\"] == 1:\n new_incar_tags = get_bader_charge_tags(cal_loc=self.cal_loc)\n else:\n prev_cal = os.path.join(os.path.split(self.cal_loc)[0], self.workflow[self.firework[\"copy_which_step\"]-1][\"firework_folder_name\"])\n new_incar_tags = get_bader_charge_tags(cal_loc=prev_cal)\n \n super(Bader_Charge, self).backup()\n modify_vasp_incar(cal_loc=self.cal_loc, new_tags=new_incar_tags, rename_old_incar=\"INCAR.no_bader_charge\")\n super(Bader_Charge, self).write_correction_log(new_incar_tags=new_incar_tags)\n return True\n \n\n\n# In[25]:\n\n\nclass Null_error_checker(object):\n \"\"\"\n This class deals with any exceptional cases where the error_type in __error__ under cal_loc is not written\n by our defined Error Checker classes in this sript.\n This class makes the wrapper Vasp_Error_checker robust to deal with any cases.\n input arguments:\n -cal_loc: the location of the to-be-checked calculation.\n -workflow: the output of func Parse_calculation_workflow.parse_calculation_workflow\n \n When method check is called, return True.\n When method correct is called, return False.\n \"\"\"\n \n def __init__(self, cal_loc, workflow):\n pass\n \n def check(self):\n return True\n \n def correct(self):\n return False\n \n def write_error_log(self):\n pass\n\n" ]
[ [ "numpy.mean" ] ]
metasyn/scikeras
[ "cb422fdd4f5c592feb71ce155fa25f26b25cdd82" ]
[ "scikeras/utils/transformers.py" ]
[ "from typing import Any, Dict, List, Union\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom sklearn.exceptions import NotFittedError\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.preprocessing import FunctionTransformer, OneHotEncoder, OrdinalEncoder\nfrom sklearn.utils.multiclass import type_of_target\nfrom tensorflow.keras.losses import (\n CategoricalCrossentropy,\n Loss,\n categorical_crossentropy,\n)\n\n\ndef _is_categorical_crossentropy(loss):\n return (\n isinstance(loss, CategoricalCrossentropy)\n or loss == categorical_crossentropy\n or getattr(loss, \"__name__\", None) == \"categorical_crossentropy\"\n or loss in (\"categorical_crossentropy\", \"cce\", \"CategoricalCrossentropy\")\n )\n\n\nclass TargetReshaper(BaseEstimator, TransformerMixin):\n \"\"\"Convert 1D targets to 2D and back.\n\n For use in pipelines with transformers that only accept\n 2D inputs, like OneHotEncoder and OrdinalEncoder.\n\n Attributes\n ----------\n ndim_ : int\n Dimensions of y that the transformer was trained on.\n \"\"\"\n\n def fit(self, y: np.ndarray) -> \"TargetReshaper\":\n \"\"\"Fit the transformer to a target y.\n\n Returns\n -------\n TargetReshaper\n A reference to the current instance of TargetReshaper.\n \"\"\"\n self.ndim_ = y.ndim\n return self\n\n @staticmethod\n def transform(y: np.ndarray) -> np.ndarray:\n \"\"\"Makes 1D y 2D.\n\n Parameters\n ----------\n y : np.ndarray\n Target y to be transformed.\n\n Returns\n -------\n np.ndarray\n A numpy array, of dimension at least 2.\n \"\"\"\n if y.ndim == 1:\n return y.reshape(-1, 1)\n return y\n\n def inverse_transform(self, y: np.ndarray) -> np.ndarray:\n \"\"\"Revert the transformation of transform.\n\n Parameters\n ----------\n y : np.ndarray\n Transformed numpy array.\n\n Returns\n -------\n np.ndarray\n If the transformer was fit to a 1D numpy array,\n and a 2D numpy array with a singleton second dimension\n is passed, it will be squeezed back to 1D. Otherwise, it\n will eb left untouched.\n \"\"\"\n if not hasattr(self, \"ndim_\"):\n raise NotFittedError(\n f\"This {self.__class__.__name__} is not initialized.\"\n \" You must call ``fit`` before using ``inverse_transform``.\"\n )\n if self.ndim_ == 1 and y.ndim == 2:\n return np.squeeze(y, axis=1)\n return y\n\n\nclass ClassifierLabelEncoder(BaseEstimator, TransformerMixin):\n \"\"\"Default target transformer for KerasClassifier.\n\n Parameters\n ----------\n loss : Union[None, str, Loss], default None\n Keras Model's loss function. Used to automatically\n one-hot encode the target if the loss function is\n categorical crossentropy.\n categories : Union[str, List[np.ndarray]], default \"auto\"\n All of the categories present in the target for the entire\n dataset. \"auto\" will infer the categories from the\n data passed to fit.\n\n Attributes\n ----------\n classes_ : Iterable\n The classes seen during fit.\n n_classes_ : int\n The number of classes seen during fit.\n n_outputs_ : int\n Dimensions of y that the transformer was trained on.\n n_outputs_expected_ : int\n Number of outputs the Keras Model is expected to have.\n \"\"\"\n\n def __init__(\n self,\n loss: Union[None, str, Loss] = None,\n categories: Union[str, List[np.ndarray]] = \"auto\",\n ):\n self.loss = loss\n self.categories = categories\n\n def _type_of_target(self, y: np.ndarray) -> str:\n \"\"\"Determine the type of target accounting for the self.categories param.\"\"\"\n target_type = type_of_target(y)\n if target_type == \"binary\" and self.categories != \"auto\":\n # check that this is not a multiclass problem missing categories\n # if not \"auto\", categories is expected to be a list with a single np.ndarray\n target_type = type_of_target(self.categories[0])\n return target_type\n\n def fit(self, y: np.ndarray) -> \"ClassifierLabelEncoder\":\n \"\"\"Fit the estimator to the target y.\n\n For all targets, this transforms classes into ordinal numbers.\n If the loss function is categorical_crossentropy, the target\n will be one-hot encoded.\n\n Parameters\n ----------\n y : np.ndarray\n The target data to be transformed.\n\n Returns\n -------\n ClassifierLabelEncoder\n A reference to the current instance of ClassifierLabelEncoder.\n \"\"\"\n target_type = self._type_of_target(y)\n keras_dtype = np.dtype(tf.keras.backend.floatx())\n self._y_shape = y.shape\n encoders = {\n \"binary\": make_pipeline(\n TargetReshaper(),\n OrdinalEncoder(dtype=keras_dtype, categories=self.categories),\n ),\n \"multiclass\": make_pipeline(\n TargetReshaper(),\n OrdinalEncoder(dtype=keras_dtype, categories=self.categories),\n ),\n \"multiclass-multioutput\": FunctionTransformer(),\n \"multilabel-indicator\": FunctionTransformer(),\n }\n if _is_categorical_crossentropy(self.loss):\n encoders[\"multiclass\"] = make_pipeline(\n TargetReshaper(),\n OneHotEncoder(\n sparse=False, dtype=keras_dtype, categories=self.categories\n ),\n )\n if target_type not in encoders:\n raise ValueError(\n f\"Unknown label type: {target_type}.\"\n \"\\n\\nTo implement support, subclass KerasClassifier and override\"\n \" ``target_encoder`` with a transformer that supports this\"\n \" label type.\"\n \"\\n\\nFor information on sklearn target types, see:\"\n \" * https://scikit-learn.org/stable/modules/generated/sklearn.utils.multiclass.type_of_target.html\"\n \" * https://scikit-learn.org/stable/modules/multiclass.html\"\n \"\\n\\nFor information on the SciKeras data transformation interface, see:\"\n \" * https://www.adriangb.com/scikeras/stable/advanced.html#data-transformers\"\n )\n self._final_encoder = encoders[target_type].fit(y)\n\n if (\n target_type == \"multilabel-indicator\"\n and y.min() == 0\n and (y.sum(axis=1) == 1).all()\n ):\n target_type = \"multiclass-onehot\"\n\n self.n_outputs_ = 1\n self.n_outputs_expected_ = 1\n self._y_dtype = y.dtype\n self._target_type = target_type\n\n if target_type in (\"binary\", \"multiclass\"):\n self.classes_ = self._final_encoder[1].categories_[0]\n self.n_classes_ = self.classes_.size\n elif target_type in (\"multiclass-onehot\", \"multilabel-indicator\"):\n self.classes_ = np.arange(0, y.shape[1])\n self.n_classes_ = y.shape[1]\n elif target_type == \"multiclass-multioutput\":\n self.classes_ = None\n self.n_classes_ = None\n\n return self\n\n def transform(self, y: np.ndarray) -> np.ndarray:\n \"\"\"Transform the target y to the format expected by the Keras Model.\n\n If the loss function is categorical_crossentropy, the target\n will be one-hot encoded.\n For other types of target, this transforms classes into ordinal numbers.\n\n Returns\n -------\n np.ndarray\n Transformed target.\n \"\"\"\n # no need to validate n_outputs_ or n_outputs_expected_, those are hardcoded\n # self.classes_ and self.n_classes_ are validated by the transformers themselves\n return self._final_encoder.transform(y)\n\n def inverse_transform(\n self, y: np.ndarray, return_proba: bool = False\n ) -> np.ndarray:\n \"\"\"Restore the data types, shape and classes of the input y\n to the output of the Keras Model.\n\n Parameters\n ----------\n y : np.ndarray\n Raw probability predictions from the Keras Model.\n return_proba : bool, default False\n If True, return the prediction probabilites themselves.\n If False, return the class predictions.\n\n Returns\n -------\n np.ndarray\n Class predictions (of the same shape as the y to fit/transform), \\\n or class prediction probabilities.\n \"\"\"\n if self._target_type == \"binary\":\n # array([0.9, 0.1], [.2, .8]) -> array(['yes', 'no'])\n if y.ndim == 1 or (y.shape[1] == 1 and self.n_classes_ == 2):\n # result from a single sigmoid output\n # reformat so that we have 2 columns\n y = np.column_stack([1 - y, y])\n class_predictions = np.argmax(y, axis=1).reshape(-1, 1)\n class_predictions = self._final_encoder.inverse_transform(class_predictions)\n elif self._target_type == \"multiclass\":\n # array([0.8, 0.1, 0.1], [.1, .8, .1]) -> array(['apple', 'orange'])\n idx = np.argmax(y, axis=-1)\n if not _is_categorical_crossentropy(self.loss):\n class_predictions = idx.reshape(-1, 1)\n else:\n class_predictions = np.zeros(y.shape, dtype=int)\n class_predictions[np.arange(len(idx)), idx] = 1\n class_predictions = self._final_encoder.inverse_transform(class_predictions)\n elif self._target_type == \"multiclass-onehot\":\n # array([.8, .1, .1], [.1, .8, .1]) -> array([[1, 0, 0], [0, 1, 0]])\n idx = np.argmax(y, axis=-1)\n class_predictions = np.zeros(y.shape, dtype=int)\n class_predictions[np.arange(len(idx)), idx] = 1\n elif self._target_type == \"multilabel-indicator\":\n class_predictions = np.around(y).astype(int, copy=False)\n else:\n if not return_proba:\n raise NotImplementedError(\n \"Class-predictions are not clearly defined for\"\n \" 'multiclass-multioutput' target types.\"\n \"\\n\\nTo implement support, subclass KerasClassifier and override\"\n \" ``target_encoder`` with a transformer that supports this\"\n \" label type.\"\n \"\\n\\nFor information on sklearn target types, see:\"\n \" * https://scikit-learn.org/stable/modules/generated/sklearn.utils.multiclass.type_of_target.html\"\n \" * https://scikit-learn.org/stable/modules/multiclass.html\"\n \"\\n\\nFor information on the SciKeras data transformation interface, see:\"\n \" * https://www.adriangb.com/scikeras/stable/advanced.html#data-transformers\"\n )\n\n if return_proba:\n return np.squeeze(y)\n return class_predictions.reshape(-1, *self._y_shape[1:])\n\n def get_metadata(self) -> Dict[str, Any]:\n \"\"\"Returns a dictionary of meta-parameters generated when this transfromer\n was fitted.\n\n Used by SciKeras to bind these parameters to the SciKeras estimator itself\n and make them available as inputs to the Keras model.\n\n Returns\n -------\n Dict[str, Any]\n Dictionary of meta-parameters generated when this transfromer\n was fitted.\n \"\"\"\n return {\n \"classes_\": self.classes_,\n \"n_classes_\": self.n_classes_,\n \"n_outputs_\": self.n_outputs_,\n \"n_outputs_expected_\": self.n_outputs_expected_,\n }\n\n\nclass RegressorTargetEncoder(BaseEstimator, TransformerMixin):\n \"\"\"Default target transformer for KerasRegressor.\n\n Attributes\n ----------\n n_outputs_ : int\n Dimensions of y that the transformer was trained on.\n n_outputs_expected_ : int\n Number of outputs the Keras Model is expected to have.\n \"\"\"\n\n def fit(self, y: np.ndarray) -> \"RegressorTargetEncoder\":\n \"\"\"Fit the transformer to the target y.\n\n For RegressorTargetEncoder, this just records the dimensions\n of y as the expected number of outputs and saves the dtype.\n\n Returns\n -------\n RegressorTargetEncoder\n A reference to the current instance of RegressorTargetEncoder.\n \"\"\"\n self._y_dtype = y.dtype\n self._y_shape = y.shape\n self.n_outputs_ = 1 if y.ndim == 1 else y.shape[1]\n self.n_outputs_expected_ = 1\n return self\n\n def transform(self, y: np.ndarray) -> np.ndarray:\n \"\"\"Transform the target y to the format expected by the Keras Model.\n\n For RegressorTargetEncoder, this simply checks that the shape passed to\n fit matches the shape passed to transform.\n\n Returns\n -------\n np.ndarray\n Untouched input y.\n \"\"\"\n n_outputs_ = 1 if y.ndim == 1 else y.shape[1]\n if n_outputs_ != self.n_outputs_:\n raise ValueError(\n f\"Detected ``y`` to have {n_outputs_} outputs\"\n f\" with ``y.shape = {y.shape}``\",\n f\" but this {self.__class__.__name__} has\"\n f\" {self.n_outputs_} outputs.\",\n )\n return y\n\n def inverse_transform(self, y: np.ndarray) -> np.ndarray:\n \"\"\"Restore the data types and shape of the input y\n to the output of the Keras Model.\n\n Parameters\n ----------\n y : np.ndarray\n Raw predictions from the Keras Model.\n\n Returns\n -------\n np.ndarray\n Keras Model predictions cast to the dtype and shape of the input\n targets.\n \"\"\"\n y = y.reshape(-1, *self._y_shape[1:])\n return y\n\n def get_metadata(self):\n \"\"\"Returns a dictionary of meta-parameters generated when this transfromer\n was fitted.\n\n Used by SciKeras to bind these parameters to the SciKeras estimator itself\n and make them available as inputs to the Keras model.\n\n Returns\n -------\n Dict[str, Any]\n Dictionary of meta-parameters generated when this transfromer\n was fitted.\n \"\"\"\n return {\n \"n_outputs_\": self.n_outputs_,\n \"n_outputs_expected_\": self.n_outputs_expected_,\n }\n" ]
[ [ "numpy.column_stack", "sklearn.preprocessing.FunctionTransformer", "numpy.zeros", "sklearn.utils.multiclass.type_of_target", "sklearn.preprocessing.OneHotEncoder", "tensorflow.keras.backend.floatx", "numpy.arange", "sklearn.preprocessing.OrdinalEncoder", "numpy.argmax", "numpy.around", "sklearn.exceptions.NotFittedError", "numpy.squeeze" ] ]
rikenbit/MTL4MHC2
[ "1b0279ed7fb2e16fd1ca33e52b897637b08b5027" ]
[ "script/model_creation/bi_lstm_main.py" ]
[ "#!/usr/bin/env python\r\n# coding: utf-8\r\n\r\n\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport os\r\nimport random\r\nimport warnings\r\nfrom keras.preprocessing.sequence import pad_sequences\r\nimport tensorflow as tf\r\nimport keras\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Embedding, Dense, LSTM\r\nfrom keras.layers import Bidirectional\r\nimport matplotlib.pyplot as plt\r\nfrom keras.layers import Input, Dense, BatchNormalization, Dropout, GaussianNoise, GaussianDropout\r\nfrom keras.models import Model\r\nfrom keras.utils import np_utils\r\nfrom keras.callbacks import CSVLogger, History\r\nimport keras.backend as backend\r\nfrom tensorflow.python.keras.utils.vis_utils import plot_model\r\nfrom datetime import datetime\r\nfrom tensorflow.keras.callbacks import Callback, TensorBoard\r\nfrom tensorflow.keras.metrics import Precision, Recall\r\nfrom tensorflow.python.keras.utils.vis_utils import plot_model\r\nfrom tensorflow.keras import layers\r\nfrom keras_multi_head import MultiHead\r\nimport datetime\r\nimport pickle\r\n\r\n# input file\r\n\r\nc1_train = \"{class I MHC train data set path}\"\r\nc1_val =\"{class I MHC validation data set path}\"\r\n\r\nc2_train = \"{class II MHC train data set path}\"\r\nc2_val = \"{class II MHC validation data set path}\"\r\n\r\n# pkl file are available from https://github.com/rikenbit/MTL4MHC2/tree/main/dict\r\n\r\nwith open(\"{Path_to_pkl_file}/monovec.pkl\",\"rb\") as f:\r\n monovec = pickle.load(f)\r\n \r\nwith open(\"{Path_to_pkl_file}/trigram_to_idx_MHC.pkl\",\"rb\") as f:\r\n trigram_to_idx_MHC = pickle.load(f)\r\n\r\nwith open(\"{Path_to_pkl_file}/monogram_to_idx.pkl\",\"rb\") as f:\r\n monogram_to_idx = pickle.load(f)\r\n\r\nwith open(\"{Path_to_pkl_file}/trivec1_MHC.pkl\",\"rb\") as f:\r\n trivec1_MHC = pickle.load(f) \r\n\r\n# function\r\n\r\n# normalization\r\ndef replace(raw_seq_0):\r\n B_aa = 'DN'\r\n J_aa = 'IL'\r\n Z_aa = 'EQ'\r\n X_aa = 'ACDEFGHIKLMNPQRSTVWY'\r\n \r\n seq = raw_seq_0.str.replace('B', random.choice(B_aa))\r\n seq = seq.str.replace('J', random.choice(J_aa))\r\n seq = seq.str.replace('Z', random.choice(Z_aa))\r\n seq = seq.str.replace('X', random.choice(X_aa))\r\n raw_seq_0 = seq\r\n \r\n return raw_seq_0\r\n\r\n# monogram\r\ndef monogram(raw_seq_0):\r\n feature_0 = []\r\n for i in range(0, len(raw_seq_0)):\r\n strain_embedding = []\r\n for j in range(0, len(raw_seq_0[i])):\r\n monogram = raw_seq_0[i][j]\r\n mono_embedding = monogram_to_idx[\"\".join(monogram)]\r\n strain_embedding.append(mono_embedding)\r\n \r\n feature_0.append(strain_embedding)\r\n return feature_0\r\n\r\n# trigram\r\ndef trigram(raw_seq_0):\r\n feature_0 = []\r\n for i in range(0, len(raw_seq_0)):\r\n strain_embedding = []\r\n for j in range(0, len(raw_seq_0[i]) - 2):\r\n trigram = raw_seq_0[i][j:j + 3]\r\n tri_embedding = trigram_to_idx_MHC[\"\".join(trigram)]\r\n strain_embedding.append(tri_embedding)\r\n \r\n feature_0.append(strain_embedding)\r\n return feature_0\r\n\r\n\r\n# model\r\n\r\ndef multimodal_bilstm(out_dim, dropoutrate, out_dim2):\r\n pep_input = Input(shape=(None,)) \r\n mhc_input = Input(shape=(None,)) \r\n \r\n pep_emb = Embedding(47, 100, weights=[monovec], trainable=False)(pep_input)\r\n mhc_emb = Embedding(9419, 100, weights=[trivec1_MHC], trainable=False)(mhc_input)\r\n \r\n # peptide\r\n pep_output1 = Bidirectional(LSTM(out_dim,dropout=dropoutrate), merge_mode='concat')(pep_emb)\r\n pep_output2 = Dense(64, activation='relu')(pep_output1)\r\n \r\n # mhc\r\n mhc_output1 = Bidirectional(LSTM(out_dim2,dropout=dropoutrate), merge_mode='concat')(mhc_emb)\r\n mhc_output2 = Dense(64, activation='relu')(mhc_output1)\r\n \r\n conc = layers.concatenate([pep_output2, mhc_output2], axis=-1)\r\n out = Dense(2, activation='softmax')(conc)\r\n \r\n model = Model([pep_input, mhc_input], out) \r\n model.compile(loss=\"binary_crossentropy\", optimizer=\"adam\", metrics=['accuracy'])\r\n return model\r\n \r\n \r\n\r\n\r\n# pretreatment\r\n\r\nd = [0]*100\r\np = np.array(['-'])\r\nlp = np.array(d, dtype=float)\r\nlps = np.append(p, lp)\r\nlpsd = pd.DataFrame(lps).T\r\n\r\n\r\n# class II\r\nraw_seq_al_2 = pd.read_csv(c2_train)\r\nraw_seq_al_2 = raw_seq_al_2.sample(frac=1).reset_index(drop=True)\r\nraw_seq_2 = raw_seq_al_2[\"peptide\"]\r\nraw_seq_2MHC = raw_seq_al_2[\"mhc_amino_acid\"]\r\nraw_seq_al_2v = pd.read_csv(c2_val)\r\nraw_seq_al_2v = raw_seq_al_2v.sample(frac=1).reset_index(drop=True)\r\nraw_seq_2v = raw_seq_al_2v[\"peptide\"]\r\nraw_seq_2MHCv = raw_seq_al_2v[\"mhc_amino_acid\"]\r\n\r\n\r\n# Normalization\r\nraw_seq_2 = replace(raw_seq_2)\r\nraw_seq_2v = replace(raw_seq_2v)\r\nraw_seq_2MHC = replace(raw_seq_2MHC)\r\nraw_seq_2MHCv = replace(raw_seq_2MHCv)\r\n\r\n\r\n\r\nfeature_2 = monogram(raw_seq_2)\r\nfeature_2v = monogram(raw_seq_2v)\r\nfeature_2MHC = trigram(raw_seq_2MHC)\r\nfeature_2MHCv = trigram(raw_seq_2MHCv)\r\n \r\n \r\nlabel_2 = raw_seq_al_2[\"bind\"]\r\nlabel_2v = raw_seq_al_2v[\"bind\"]\r\n\r\nlabel_2 = pd.get_dummies(label_2, sparse=True)\r\nlabel_2v = pd.get_dummies(label_2v, sparse=True) \r\n\r\n\r\nlength_2 = []\r\n\r\nfor i in range(0, 45468):\r\n g = len(feature_2[i])\r\n length_2.append(g)\r\n \r\nMAX_LEN_2 = max(length_2)\r\n\r\ntrain2_x = feature_2[:45468]\r\ntrain2_y = label_2[:45468]\r\n\r\n\r\ntrain2_x = pad_sequences(train2_x, maxlen=MAX_LEN_2)\r\n\r\n\r\ntrain2_x = np.array(train2_x)\r\ntrain2_y = np.array(train2_y)\r\n\r\n\r\n\r\ntest2_x = feature_2v[:9743]\r\ntest2_y = label_2v[:9743]\r\n\r\ntest2_x = pad_sequences(test2_x, maxlen=MAX_LEN_2)\r\n\r\ntest2_x = np.array(test2_x)\r\ntest2_y = np.array(test2_y)\r\n\r\nlength_MHC = []\r\n\r\nfor i in range(0, 45468):\r\n g = len(feature_2MHC[i])\r\n length_MHC.append(g)\r\n \r\nMAX_LEN_MHC = max(length_MHC)\r\n\r\ntrain_x_MHC = feature_2MHC[:45468]\r\n\r\n\r\ntrain_x_MHC = pad_sequences(train_x_MHC, maxlen=MAX_LEN_MHC)\r\n\r\n\r\ntrain_x_MHC = np.array(train_x_MHC)\r\n\r\n\r\ntest_x_MHC = feature_2MHCv[:9743]\r\ntest_x_MHC = pad_sequences(test_x_MHC, maxlen=MAX_LEN_MHC)\r\ntest_x_MHC = np.array(test_x_MHC)\r\n\r\n\r\n\r\n# model6 (neoantigen dim=256, HLA dim = 64, dropout=0.7)\r\n\r\nmodel = multimodal_bilstm(out_dim=256, dropoutrate=0.7, out_dim2=64)\r\nH = model.fit([train2_x, train_x_MHC], train2_y, validation_data=([test2_x, test_x_MHC], test2_y), batch_size=32, verbose=1, epochs=100)\r\nmodel.save('{Path_of_directory}/modal_lstm_model6.hdf5')\r\nmodel.save_weights('{Path_of_directory}/modal_lstm_model6_weights.h5')\r\nH = pd.DataFrame(H.history)\r\nH.to_csv('{Path_of_directory}/modal_lstm_model6.csv', sep=\",\")\r\n\r\n\r\n# model11 (neoantigen dim=256, HLA dim = 64, dropout=0.7)\r\n\r\nmodel = multimodal_bilstm(out_dim=512, dropoutrate=0.6, out_dim2=128)\r\nH = model.fit([train2_x, train_x_MHC], train2_y, validation_data=([test2_x, test_x_MHC], test2_y), batch_size=32, verbose=1, epochs=100)\r\nmodel.save('{Path_of_directory}/modal_lstm_model11.hdf5')\r\nmodel.save_weights('{Path_of_directory}/modal_lstm_model11_weights.h5')\r\nH = pd.DataFrame(H.history)\r\nH.to_csv('{Path_of_directory}/modal_lstm_model11.csv', sep=\",\")\r\n" ]
[ [ "numpy.array", "pandas.DataFrame", "numpy.append", "pandas.read_csv", "tensorflow.keras.layers.concatenate", "pandas.get_dummies" ] ]
feiyangsb/out_of_distribution_detector_aebs
[ "b1238729863fab7d4147677baae00abc6af42831" ]
[ "scripts/vae_svdd_trainer.py" ]
[ "from scripts.network import VAE, SVDD\r\nimport torch\r\nimport numpy as np\r\nimport torch.optim as optim\r\nimport time\r\nfrom torch.utils.data import TensorDataset, DataLoader\r\n\r\n\r\nclass Trainer():\r\n def __init__(self, X_train, y_train):\r\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\r\n tensor_x = torch.Tensor(np.rollaxis(X_train, 3, 1))\r\n tensor_y = torch.Tensor(y_train)\r\n dataset = TensorDataset(tensor_x, tensor_y)\r\n self.data_loader = DataLoader(dataset, batch_size=32, shuffle=True) \r\n\r\n self.nu = 0.1\r\n self.R = 0.0\r\n self.c = None\r\n self.episode_vae = 350\r\n self.lr_milestones_vae = [250]\r\n self.episode_svdd = 350\r\n self.lr_milestones_svdd = [250]\r\n\r\n def train_vae(self):\r\n optimizer = optim.Adam(self.vae.parameters(), lr=0.0001, weight_decay=0.5e-6, amsgrad=False) \r\n scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=self.lr_milestones_vae, gamma=0.1)\r\n self.vae.train()\r\n for epoch in range(self.episode_vae):\r\n loss_epoch = 0.0\r\n reconstruction_loss_epoch = 0.0\r\n kl_loss_epoch = 0.0\r\n n_batches = 0\r\n epoch_start_time = time.time()\r\n for batch_idx, (inputs, targets) in enumerate(self.data_loader):\r\n inputs = inputs.to(self.device)\r\n optimizer.zero_grad()\r\n\r\n x, mu, logvar = self.vae(inputs)\r\n reconstruction_loss = torch.sum((x-inputs)**2, dim=tuple(range(1, x.dim())))\r\n kl_loss = 1 + logvar - (mu).pow(2) - logvar.exp()\r\n kl_loss = torch.sum(kl_loss, axis=-1) * -0.5\r\n loss = reconstruction_loss + kl_loss\r\n reconstruction_loss_mean = torch.mean(reconstruction_loss)\r\n kl_loss_mean = torch.mean(kl_loss)\r\n loss = torch.mean(loss)\r\n loss.backward()\r\n optimizer.step()\r\n loss_epoch += loss.item()\r\n reconstruction_loss_epoch += reconstruction_loss_mean.item()\r\n kl_loss_epoch += kl_loss_mean.item()\r\n n_batches += 1\r\n \r\n scheduler.step()\r\n epoch_train_time = time.time() - epoch_start_time\r\n print('Epoch {}/{}\\t Time: {:.3f}\\t Loss: {:.8f}'.format(epoch+1, self.episode_vae, epoch_train_time, loss_epoch/n_batches))\r\n\r\n def train_svdd(self):\r\n c = torch.tensor(self.c, device='cuda') if self.c is not None else None\r\n optimizer = optim.Adam(self.svdd.parameters(), lr=0.0001, weight_decay=0.5e-6, amsgrad=False)\r\n scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=self.lr_milestones_svdd, gamma=0.1)\r\n self.svdd.train()\r\n\r\n if c == None:\r\n c = self.init_center_c()\r\n\r\n for epoch in range(self.episode_svdd):\r\n print('LR is: {}'.format(float(scheduler.get_lr()[0])))\r\n if epoch in self.lr_milestones_svdd:\r\n print(' LR scheduler: new learning rate is %g' % float(scheduler.get_lr()[0]))\r\n loss_epoch = 0.0\r\n n_batches = 0\r\n epoch_start_time = time.time()\r\n for batch_idx, (inputs, target) in enumerate(self.data_loader):\r\n inputs = inputs.to(self.device)\r\n optimizer.zero_grad()\r\n outputs = self.svdd(inputs)\r\n dist = torch.sum((outputs - c)**2, dim=1)\r\n loss = torch.mean(dist)\r\n loss.backward()\r\n optimizer.step()\r\n\r\n loss_epoch += loss.item()\r\n n_batches += 1\r\n \r\n scheduler.step()\r\n epoch_train_time = time.time() - epoch_start_time\r\n print('Epoch {}/{}\\t Time: {:.3f}\\t Loss: {:.8f}'.format(epoch+1, self.episode_svdd, epoch_train_time, loss_epoch/n_batches))\r\n return c\r\n\r\n def init_center_c(self, eps=0.1):\r\n n_sample = 0\r\n c = torch.zeros(self.svdd.rep_dim, device=self.device)\r\n\r\n self.svdd.eval()\r\n with torch.no_grad():\r\n for batch_idx, (inputs, _) in enumerate(self.data_loader):\r\n inputs = inputs.to(self.device)\r\n outputs = self.svdd(inputs)\r\n n_sample += outputs.shape[0]\r\n c += torch.sum(outputs, dim=0)\r\n \r\n c /= n_sample\r\n\r\n c[(abs(c) < eps) & (c < 0)] = -eps\r\n c[(abs(c) < eps) & (c > 0)] = eps\r\n\r\n return c \r\n \r\n \r\n def fit(self):\r\n # vae \r\n self.vae = VAE()\r\n self.vae = self.vae.to(self.device)\r\n\r\n # trying to load pretrained VAE model\r\n try:\r\n self.vae.load_state_dict(torch.load(\"./models/vae.pt\"))\r\n self.vae.eval()\r\n print(\"loaded pretrained VAE model\")\r\n\r\n # train VAE model\r\n except:\r\n print(\"cannot find the pretrained VAE model, retrain it\")\r\n self.train_vae()\r\n torch.save(self.vae.state_dict(), \"./models/vae.pt\")\r\n \r\n self.svdd = SVDD()\r\n self.svdd = self.svdd.to(self.device)\r\n self.init_network_weights_from_pretraining()\r\n\r\n c = self.train_svdd()\r\n torch.save(self.svdd.state_dict(), \"./models/svdd.pt\")\r\n np.save(\"./models/svdd_c.npy\", c.cpu().data.numpy())\r\n \r\n def init_network_weights_from_pretraining(self):\r\n svdd_net_dict = self.svdd.state_dict()\r\n vae_net_dict = self.vae.state_dict()\r\n\r\n vae_net_dict = {k: v for k, v in vae_net_dict.items() if k in svdd_net_dict}\r\n svdd_net_dict.update(vae_net_dict)\r\n self.svdd.load_state_dict(svdd_net_dict)" ]
[ [ "torch.zeros", "numpy.rollaxis", "torch.no_grad", "torch.optim.lr_scheduler.MultiStepLR", "torch.cuda.is_available", "torch.tensor", "torch.utils.data.DataLoader", "torch.load", "torch.mean", "torch.Tensor", "torch.utils.data.TensorDataset", "torch.sum" ] ]
sunutf/TVQA
[ "72ede1892b7ce79641ae697ff3dc6ba5ff32506e" ]
[ "config.py" ]
[ "__author__ = \"Jie Lei\"\n\nimport os\nimport time\nimport torch\nimport argparse\nfrom utils import mkdirp, load_json, save_json, save_json_pretty\n\n\nclass BaseOptions(object):\n def __init__(self):\n self.parser = argparse.ArgumentParser()\n self.initialized = False\n self.opt = None\n\n def initialize(self):\n self.parser.add_argument(\"--debug\", action=\"store_true\", help=\"debug mode, break all loops\")\n self.parser.add_argument(\"--results_dir_base\", type=str, default=\"results/results\")\n self.parser.add_argument(\"--log_freq\", type=int, default=400, help=\"print, save training info\")\n self.parser.add_argument(\"--lr\", type=float, default=3e-4, help=\"learning rate\")\n self.parser.add_argument(\"--wd\", type=float, default=1e-5, help=\"weight decay\")\n self.parser.add_argument(\"--n_epoch\", type=int, default=100, help=\"number of epochs to run\")\n self.parser.add_argument(\"--max_es_cnt\", type=int, default=3, help=\"number of epochs to early stop\")\n self.parser.add_argument(\"--bsz\", type=int, default=32, help=\"mini-batch size\")\n self.parser.add_argument(\"--test_bsz\", type=int, default=100, help=\"mini-batch size for testing\")\n self.parser.add_argument(\"--device\", type=int, default=0, help=\"gpu ordinal, -1 indicates cpu\")\n self.parser.add_argument(\"--no_core_driver\", action=\"store_true\",\n help=\"hdf5 driver, default use `core` (load into RAM), if specified, use `None`\")\n self.parser.add_argument(\"--word_count_threshold\", type=int, default=2, help=\"word vocabulary threshold\")\n\n # model config\n self.parser.add_argument(\"--no_glove\", action=\"store_true\", help=\"not use glove vectors\")\n self.parser.add_argument(\"--no_ts\", action=\"store_true\", help=\"no timestep annotation, use full length feature\")\n self.parser.add_argument(\"--input_streams\", type=str, nargs=\"+\", choices=[\"vcpt\", \"sub\", \"imagenet\"],\n help=\"input streams for the model, will use both `vcpt` and `sub` streams\")\n self.parser.add_argument(\"--n_layers_cls\", type=int, default=1, help=\"number of layers in classifier\")\n self.parser.add_argument(\"--hsz1\", type=int, default=150, help=\"hidden size for the first lstm\")\n self.parser.add_argument(\"--hsz2\", type=int, default=300, help=\"hidden size for the second lstm\")\n self.parser.add_argument(\"--embedding_size\", type=int, default=300, help=\"word embedding dim\")\n self.parser.add_argument(\"--max_sub_l\", type=int, default=300, help=\"max length for subtitle\")\n self.parser.add_argument(\"--max_vcpt_l\", type=int, default=300, help=\"max length for visual concepts\")\n self.parser.add_argument(\"--max_vid_l\", type=int, default=480, help=\"max length for video feature\")\n self.parser.add_argument(\"--vocab_size\", type=int, default=0, help=\"vocabulary size\")\n self.parser.add_argument(\"--no_normalize_v\", action=\"store_true\", help=\"do not normalize video featrue\")\n\n # path config\n self.parser.add_argument(\"--train_path\", type=str, default=\"./data/tvqa_train_processed.json\",\n help=\"train set path\")\n self.parser.add_argument(\"--valid_path\", type=str, default=\"./data/tvqa_val_processed.json\",\n help=\"valid set path\")\n self.parser.add_argument(\"--test_path\", type=str, default=\"./data/tvqa_test_public_processed.json\",\n help=\"test set path\")\n self.parser.add_argument(\"--glove_path\", type=str, default=\"./data/glove.6B.300d.txt\",\n help=\"GloVe pretrained vector path\")\n self.parser.add_argument(\"--vcpt_path\", type=str, default=\"./data/det_visual_concepts_hq.pickle\",\n help=\"visual concepts feature path\")\n self.parser.add_argument(\"--vid_feat_path\", type=str, default=\"./data/tvqa_imagenet_pool5.h5\",\n help=\"imagenet feature path\")\n self.parser.add_argument(\"--vid_feat_size\", type=int, default=2048,\n help=\"visual feature dimension\")\n self.parser.add_argument(\"--word2idx_path\", type=str, default=\"./cache/word2idx.pickle\",\n help=\"word2idx cache path\")\n self.parser.add_argument(\"--idx2word_path\", type=str, default=\"./cache/idx2word.pickle\",\n help=\"idx2word cache path\")\n self.parser.add_argument(\"--vocab_embedding_path\", type=str, default=\"./cache/vocab_embedding.pickle\",\n help=\"vocab_embedding cache path\")\n self.initialized = True\n\n def display_save(self, options, results_dir):\n \"\"\"save config info for future reference, and print\"\"\"\n args = vars(options) # type == dict\n # Display settings\n print('------------ Options -------------')\n for k, v in sorted(args.items()):\n print('%s: %s' % (str(k), str(v)))\n print('-------------- End ----------------')\n\n # Save settings\n if not isinstance(self, TestOptions):\n option_file_path = os.path.join(results_dir, 'opt.json') # not yaml file indeed\n save_json_pretty(args, option_file_path)\n\n def parse(self):\n \"\"\"parse cmd line arguments and do some preprocessing\"\"\"\n if not self.initialized:\n self.initialize()\n opt = self.parser.parse_args()\n results_dir = opt.results_dir_base + time.strftime(\"_%Y_%m_%d_%H_%M_%S\")\n\n if isinstance(self, TestOptions):\n options = load_json(os.path.join(\"results\", opt.model_dir, \"opt.json\"))\n for arg in options:\n setattr(opt, arg, options[arg])\n else:\n\n os.makedirs(results_dir)\n self.display_save(opt, results_dir)\n\n opt.normalize_v = not opt.no_normalize_v\n opt.device = torch.device(\"cuda:%d\" % opt.device if opt.device >= 0 else \"cpu\")\n opt.with_ts = not opt.no_ts\n opt.input_streams = [] if opt.input_streams is None else opt.input_streams\n opt.vid_feat_flag = True if \"imagenet\" in opt.input_streams else False\n opt.h5driver = None if opt.no_core_driver else \"core\"\n opt.results_dir = results_dir\n\n self.opt = opt\n return opt\n\n\nclass TestOptions(BaseOptions):\n \"\"\"add additional options for evaluating\"\"\"\n def initialize(self):\n BaseOptions.initialize(self)\n self.parser.add_argument(\"--model_dir\", type=str, help=\"dir contains the model file\")\n self.parser.add_argument(\"--mode\", type=str, default=\"valid\", help=\"valid/test\")\n\n\nif __name__ == \"__main__\":\n import sys\n sys.argv[1:] = [\"--input_streams\", \"vcpt\"]\n opt = BaseOptions().parse()\n\n" ]
[ [ "torch.device" ] ]
dcastf01/ImageSemanticSegmentation
[ "892be6da952e25277f14200f5d0d652b6f47848a" ]
[ "callbacks/show_prediction_on_epoch_end.py" ]
[ "import tensorflow as tf \nimport numpy as np\nfrom ..utils_visualization import display\n\n#pendiente de introducir correctamente\ndef create_mask(pred_mask):\n\n pred_mask = tf.argmax(pred_mask, axis=-1)\n \n pred_mask = pred_mask[..., tf.newaxis]\n return pred_mask[0]\ndef show_predictions(model,dataset=None,NUM_CLASSES=256, num=1):\n if dataset:\n for image, mask in dataset.take(num):\n pred_mask = model.predict(image)\n \n display([image[0], mask[0], create_mask(pred_mask)],NUM_CLASSES)\n else:\n print(\"please give a dataset to can show\")\n\n\nclass DisplayCallback(tf.keras.callbacks.Callback):\n def __init__(self,model,train_dataset,NUM_CLASSES):\n super().__init__()\n self.model=model\n self.train_dataset=train_dataset\n self.NUM_CLASSES=NUM_CLASSES\n\n\n def on_epoch_end(self, epoch, logs=None):\n # clear_output(wait=True)\n show_predictions(self.model,self.train_dataset,self.NUM_CLASSES)\n print ('\\nSample Prediction after epoch {}\\n'.format(epoch+1))" ]
[ [ "tensorflow.argmax" ] ]
chiaolun/vaex
[ "36872e8d37a7e1b728ade872a86c9d301a5cb406" ]
[ "tests/datetime_test.py" ]
[ "import pytest\nfrom common import *\nimport numpy as np\n\n\ndef test_datetime_operations():\n date = np.array([np.datetime64('2009-10-12T03:31:00'),\n np.datetime64('2016-02-11T10:17:34'),\n np.datetime64('2015-11-12T11:34:22'),\n np.datetime64('2003-03-03T00:33:15'),\n np.datetime64('2014-07-23T15:08:05'),\n np.datetime64('2011-01-01T07:02:01')], dtype='<M8[ns]')\n\n df = vaex.from_arrays(date=date)._readonly()\n pandas_df = df.to_pandas_df()\n\n assert df.date.dt.hour.tolist() == pandas_df.date.dt.hour.values.tolist()\n assert df.date.dt.minute.tolist() == pandas_df.date.dt.minute.values.tolist()\n assert df.date.dt.second.tolist() == pandas_df.date.dt.second.values.tolist()\n assert df.date.dt.day.tolist() == pandas_df.date.dt.day.values.tolist()\n assert df.date.dt.day_name.tolist() == pandas_df.date.dt.day_name().values.tolist()\n assert df.date.dt.month.tolist() == pandas_df.date.dt.month.values.tolist()\n assert df.date.dt.month_name.tolist() == pandas_df.date.dt.month_name().values.tolist()\n assert df.date.dt.quarter.tolist() == pandas_df.date.dt.quarter.values.tolist()\n assert df.date.dt.year.tolist() == pandas_df.date.dt.year.values.tolist()\n assert df.date.dt.is_leap_year.tolist() == pandas_df.date.dt.is_leap_year.values.tolist()\n assert any(df.date.dt.is_leap_year.tolist())\n assert df.date.dt.weekofyear.tolist() == pandas_df.date.dt.weekofyear.values.tolist()\n assert df.date.dt.dayofyear.tolist() == pandas_df.date.dt.dayofyear.values.tolist()\n assert df.date.dt.dayofweek.tolist() == pandas_df.date.dt.dayofweek.values.tolist()\n assert df.date.dt.floor('H').tolist() == pandas_df.date.dt.floor('H').values.tolist()\n assert df.date.dt.date.tolist() == pandas_df.date.dt.date.values.tolist()\n assert df.date.dt.quarter.tolist() == pandas_df.date.dt.quarter.tolist()\n assert df.date.dt.halfyear.tolist() == [2, 1, 2, 1, 2, 1] # this method does not exist in pandas yet\n\n\ndef test_datetime_agg():\n date = [np.datetime64('2009-10-12T03:31:00'),\n np.datetime64('2016-02-11T10:17:34'),\n np.datetime64('2015-11-12T11:34:22'),\n np.datetime64('2003-03-03T00:33:15'),\n np.datetime64('2014-07-23T15:08:05'),\n np.datetime64('2011-01-01T07:02:01')]\n\n df = vaex.from_arrays(date=date)\n assert df.count(df.date) == len(date)\n assert df.max(df.date) == np.datetime64('2016-02-11T10:17:34')\n assert df.mean(df.date) < np.datetime64('2016-02-11T10:17:34')\n assert df.mean(df.date) > date[0]\n\n\ndef test_datetime_stats():\n x1 = np.datetime64('2005-01-01')\n x2 = np.datetime64('2015-02-01')\n x = np.arange(x1, x2, dtype=np.datetime64)\n y = np.arange(len(x))\n df = vaex.from_arrays(x=x, y=y)\n d1, d2 = df.x.minmax()\n assert d1 == x1\n assert d2 == x[-1]\n\n # TODO: we may want to support storing objects in the variables automatically\n # df['deltax'] = df.x - x1\n # assert df['deltax'].astype('datetime64[D]') == []\n # print(repr(df['deltax'])) # coverage\n\n\ndef test_timedelta_arithmetics():\n x = np.array(['2019-01-04T21:23:00', '2019-02-04T05:00:10', '2019-03-04T15:15:15', '2019-06-21T10:31:15'],\n dtype=np.datetime64)\n y = np.array(['2018-06-14T12:11:00', '2019-02-02T22:19:00', '2017-11-18T10:11:19', '2019-07-12T11:00:00'],\n dtype=np.datetime64)\n\n df = vaex.from_arrays(x=x, y=y)\n df['diff'] = df.x-df.y\n df['diff_dev_hours'] = df['diff'] / np.timedelta64(1, 'h')\n df['diff_add_days'] = df['diff'] + np.timedelta64(5, 'D')\n\n # normal numerical/numpy values\n diff = df.x.values-df.y.values\n diff_dev_hours = diff / np.timedelta64(1, 'h')\n diff_add_days = diff + np.timedelta64(5, 'D')\n\n # compare vaex to numerical results\n assert diff_dev_hours.tolist() == df['diff_dev_hours'].values.tolist()\n assert diff_add_days.tolist() == df['diff_add_days'].values.tolist()\n\n # check the min/max values for the TimeDelta column\n assert df['diff'].min() == df['diff'].values.min()\n assert df['diff'].max() == df['diff'].values.max()\n\n\n\[email protected](\"as_string\", [True, False])\ndef test_datetime_binary_operations(as_string):\n x = np.array(['2019-01-04T21:23:00', '2019-02-04T05:00:10', '2019-03-04T15:15:15', '2019-06-21T10:31:15'],\n dtype=np.datetime64)\n y = np.array(['2018-06-14T12:11:00', '2019-02-02T22:19:00', '2017-11-18T10:11:19', '2019-07-12T11:00:00'],\n dtype=np.datetime64)\n\n sample_date = '2019-03-15'\n if not as_string:\n sample_date = np.datetime64(sample_date)\n df = vaex.from_arrays(x=x, y=y)\n\n # Try simple binary operations\n assert (df.x > sample_date).tolist() == list(df.x.values > np.datetime64(sample_date))\n assert (df.x <= sample_date).tolist() == list(df.x.values <= np.datetime64(sample_date))\n assert (df.x > df.y).tolist() == list(df.x.values > df.y.values)\n\n\[email protected](vaex.utils.osname == 'windows',\n reason=\"windows' snprintf seems buggy\")\ndef test_create_datetime64_column_from_ints():\n year = np.array([2015, 2015, 2017])\n month = np.array([1, 2, 10])\n day = np.array([1, 3, 22])\n time = np.array([945, 1015, 30])\n df = vaex.from_arrays(year=year, month=month, day=day, time=time)\n\n df['hour'] = (df.time // 100 % 24).format('%02d')\n df['minute'] = (df.time % 100).format('%02d')\n\n expr = df.year.format('%4d') + '-' + df.month.format('%02d') + '-' + df.day.format('%02d') + 'T' + df.hour + ':' + df.minute\n assert expr.to_numpy().astype(np.datetime64).tolist() == expr.astype('datetime64').tolist()\n\n\ndef test_create_datetime64_column_from_str():\n year = np.array(['2015', '2015', '2017'])\n month = np.array(['01', '02', '10'])\n day = np.array(['01', '03', '22'])\n hour = np.array(['09', '10', '00'])\n minute = np.array(['45', '15', '30'])\n df = vaex.from_arrays(year=year, month=month, day=day, hour=hour, minute=minute)\n\n expr = df.year + '-' + df.month + '-' + df.day + 'T' + df.hour + ':' + df.minute\n assert expr.to_numpy().astype(np.datetime64).tolist() == expr.astype('datetime64').tolist()\n assert expr.to_numpy().astype('datetime64[ns]').tolist() == expr.astype('datetime64[ns]').to_numpy().tolist()\n\ndef test_create_str_column_from_datetime64():\n date = np.array([np.datetime64('2009-10-12T03:31:00'),\n np.datetime64('2016-02-11T10:17:34'),\n np.datetime64('2015-11-12T11:34:22'),\n np.datetime64('2003-03-03T00:33:15'),\n np.datetime64('2014-07-23T15:08:05'),\n np.datetime64('2011-01-01T07:02:01')], dtype='<M8[ns]')\n\n df = vaex.from_arrays(date=date)\n pandas_df = df.to_pandas_df()\n\n date_format = \"%Y/%m/%d\"\n\n assert df.date.dt.strftime(date_format).values.tolist() == pandas_df.date.dt.strftime(date_format).values.tolist()\n\n\ndef test_non_ns_units():\n date1 = np.datetime64('1900-10-12T03:31:00')\n date2 = np.datetime64('2011-01-01T07:02:01')\n dates = np.array([date1, date2], dtype='M8[ms]')\n df = vaex.from_arrays(dates=pa.array(dates))\n assert np.all(df.dates.to_numpy() == dates)\n\ndef test_datetime_operations_after_astype(df_factory):\n df = df_factory(x=[\n '2009-10-12T03:31:00',\n '2016-02-11T10:17:34',\n '2015-11-12T11:34:22',\n ])\n df['x_dt'] = df.x.astype('datetime64')\n df['x_hour'] = df.x_dt.dt.hour\n assert df.x_hour.tolist() == [3, 10, 11]\n\n\ndef test_no_change_fingerprint():\n # before this would introduce a variable into the dataframe, thus mutate it\n x = np.array(['2019-01-04T21:23:00', '2019-02-04T05:00:10'], dtype=np.datetime64)\n sample_date = np.datetime64('2019-03-15')\n df = vaex.from_arrays(x=x)\n fp = df.fingerprint()\n\n answer = df.x > sample_date\n assert df.fingerprint() == fp\n" ]
[ [ "numpy.array", "numpy.arange", "numpy.timedelta64", "numpy.datetime64" ] ]
leesusu/Paddle
[ "0ecf441af14d554c85f69a206e3e3a9bdd86fb13" ]
[ "python/paddle/fluid/tests/unittests/test_tensor.py" ]
[ "# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nimport paddle.fluid as fluid\nimport paddle.fluid.core as core\nimport unittest\nimport numpy\nimport numbers\n\n\nclass TestTensor(unittest.TestCase):\n def setUp(self):\n self.support_dtypes = [\n 'bool', 'uint8', 'int8', 'int16', 'int32', 'int64', 'float16',\n 'float32', 'float64'\n ]\n\n def test_int_tensor(self):\n scope = core.Scope()\n var = scope.var(\"test_tensor\")\n place = core.CPUPlace()\n\n tensor = var.get_tensor()\n\n tensor._set_dims([1000, 784])\n tensor._alloc_int(place)\n tensor_array = numpy.array(tensor)\n self.assertEqual((1000, 784), tensor_array.shape)\n tensor_array[3, 9] = 1\n tensor_array[19, 11] = 2\n tensor.set(tensor_array, place)\n\n tensor_array_2 = numpy.array(tensor)\n self.assertEqual(1, tensor_array_2[3, 9])\n self.assertEqual(2, tensor_array_2[19, 11])\n\n def test_float_tensor(self):\n scope = core.Scope()\n var = scope.var(\"test_tensor\")\n place = core.CPUPlace()\n\n tensor = var.get_tensor()\n\n tensor._set_dims([1000, 784])\n tensor._alloc_float(place)\n\n tensor_array = numpy.array(tensor)\n self.assertEqual((1000, 784), tensor_array.shape)\n tensor_array[3, 9] = 1.0\n tensor_array[19, 11] = 2.0\n tensor.set(tensor_array, place)\n\n tensor_array_2 = numpy.array(tensor)\n self.assertAlmostEqual(1.0, tensor_array_2[3, 9])\n self.assertAlmostEqual(2.0, tensor_array_2[19, 11])\n\n def test_int8_tensor(self):\n scope = core.Scope()\n var = scope.var(\"int8_tensor\")\n cpu_tensor = var.get_tensor()\n tensor_array = numpy.random.randint(\n -127, high=128, size=[100, 200], dtype=numpy.int8)\n place = core.CPUPlace()\n cpu_tensor.set(tensor_array, place)\n cpu_tensor_array_2 = numpy.array(cpu_tensor)\n self.assertAlmostEqual(cpu_tensor_array_2.all(), tensor_array.all())\n\n if core.is_compiled_with_cuda():\n cuda_tensor = var.get_tensor()\n tensor_array = numpy.random.randint(\n -127, high=128, size=[100, 200], dtype=numpy.int8)\n place = core.CUDAPlace(0)\n cuda_tensor.set(tensor_array, place)\n cuda_tensor_array_2 = numpy.array(cuda_tensor)\n self.assertAlmostEqual(cuda_tensor_array_2.all(),\n tensor_array.all())\n\n def test_int_lod_tensor(self):\n place = core.CPUPlace()\n scope = core.Scope()\n var_lod = scope.var(\"test_lod_tensor\")\n lod_tensor = var_lod.get_tensor()\n\n lod_tensor._set_dims([4, 4, 6])\n lod_tensor._alloc_int(place)\n array = numpy.array(lod_tensor)\n array[0, 0, 0] = 3\n array[3, 3, 5] = 10\n lod_tensor.set(array, place)\n lod_tensor.set_recursive_sequence_lengths([[2, 2]])\n\n lod_v = numpy.array(lod_tensor)\n self.assertTrue(numpy.alltrue(array == lod_v))\n\n lod = lod_tensor.recursive_sequence_lengths()\n self.assertEqual(2, lod[0][0])\n self.assertEqual(2, lod[0][1])\n\n def test_float_lod_tensor(self):\n place = core.CPUPlace()\n scope = core.Scope()\n var_lod = scope.var(\"test_lod_tensor\")\n\n lod_tensor = var_lod.get_tensor()\n lod_tensor._set_dims([5, 2, 3, 4])\n lod_tensor._alloc_float(place)\n\n tensor_array = numpy.array(lod_tensor)\n self.assertEqual((5, 2, 3, 4), tensor_array.shape)\n tensor_array[0, 0, 0, 0] = 1.0\n tensor_array[0, 0, 0, 1] = 2.0\n lod_tensor.set(tensor_array, place)\n\n lod_v = numpy.array(lod_tensor)\n self.assertAlmostEqual(1.0, lod_v[0, 0, 0, 0])\n self.assertAlmostEqual(2.0, lod_v[0, 0, 0, 1])\n self.assertEqual(len(lod_tensor.recursive_sequence_lengths()), 0)\n\n lod_py = [[2, 1], [1, 2, 2]]\n lod_tensor.set_recursive_sequence_lengths(lod_py)\n lod = lod_tensor.recursive_sequence_lengths()\n self.assertListEqual(lod_py, lod)\n\n def test_lod_tensor_init(self):\n place = core.CPUPlace()\n lod_py = [[2, 1], [1, 2, 2]]\n lod_tensor = core.LoDTensor()\n\n lod_tensor._set_dims([5, 2, 3, 4])\n lod_tensor.set_recursive_sequence_lengths(lod_py)\n lod_tensor._alloc_float(place)\n tensor_array = numpy.array(lod_tensor)\n tensor_array[0, 0, 0, 0] = 1.0\n tensor_array[0, 0, 0, 1] = 2.0\n lod_tensor.set(tensor_array, place)\n\n lod_v = numpy.array(lod_tensor)\n self.assertAlmostEqual(1.0, lod_v[0, 0, 0, 0])\n self.assertAlmostEqual(2.0, lod_v[0, 0, 0, 1])\n self.assertListEqual(lod_py, lod_tensor.recursive_sequence_lengths())\n\n def test_lod_tensor_gpu_init(self):\n if not core.is_compiled_with_cuda():\n return\n place = core.CUDAPlace(0)\n lod_py = [[2, 1], [1, 2, 2]]\n lod_tensor = core.LoDTensor()\n\n lod_tensor._set_dims([5, 2, 3, 4])\n lod_tensor.set_recursive_sequence_lengths(lod_py)\n lod_tensor._alloc_float(place)\n tensor_array = numpy.array(lod_tensor)\n tensor_array[0, 0, 0, 0] = 1.0\n tensor_array[0, 0, 0, 1] = 2.0\n lod_tensor.set(tensor_array, place)\n\n lod_v = numpy.array(lod_tensor)\n self.assertAlmostEqual(1.0, lod_v[0, 0, 0, 0])\n self.assertAlmostEqual(2.0, lod_v[0, 0, 0, 1])\n self.assertListEqual(lod_py, lod_tensor.recursive_sequence_lengths())\n\n def test_empty_tensor(self):\n place = core.CPUPlace()\n scope = core.Scope()\n var = scope.var(\"test_tensor\")\n\n tensor = var.get_tensor()\n tensor._set_dims([0, 1])\n tensor._alloc_float(place)\n\n tensor_array = numpy.array(tensor)\n self.assertEqual((0, 1), tensor_array.shape)\n\n if core.is_compiled_with_cuda():\n gpu_place = core.CUDAPlace(0)\n tensor._alloc_float(gpu_place)\n tensor_array = numpy.array(tensor)\n self.assertEqual((0, 1), tensor_array.shape)\n\n def run_slice_tensor(self, place, dtype):\n tensor = fluid.Tensor()\n shape = [3, 3, 3]\n tensor._set_dims(shape)\n\n tensor_array = numpy.array(\n [[[1, 2, 3], [4, 5, 6], [7, 8, 9]],\n [[10, 11, 12], [13, 14, 15], [16, 17, 18]],\n [[19, 20, 21], [22, 23, 24], [25, 26, 27]]]).astype(dtype)\n\n tensor.set(tensor_array, place)\n n1 = tensor[1]\n t1 = tensor_array[1]\n self.assertTrue((numpy.array(n1) == numpy.array(t1)).all())\n\n n2 = tensor[1:]\n t2 = tensor_array[1:]\n self.assertTrue((numpy.array(n2) == numpy.array(t2)).all())\n\n n3 = tensor[0:2:]\n t3 = tensor_array[0:2:]\n self.assertTrue((numpy.array(n3) == numpy.array(t3)).all())\n\n n4 = tensor[2::-2]\n t4 = tensor_array[2::-2]\n self.assertTrue((numpy.array(n4) == numpy.array(t4)).all())\n\n n5 = tensor[2::-2][0]\n t5 = tensor_array[2::-2][0]\n self.assertTrue((numpy.array(n5) == numpy.array(t5)).all())\n\n n6 = tensor[2:-1:-1]\n t6 = tensor_array[2:-1:-1]\n self.assertTrue((numpy.array(n6) == numpy.array(t6)).all())\n\n n7 = tensor[0:, 0:]\n t7 = tensor_array[0:, 0:]\n self.assertTrue((numpy.array(n7) == numpy.array(t7)).all())\n\n n8 = tensor[0::1, 0::-1, 2:]\n t8 = tensor_array[0::1, 0::-1, 2:]\n self.assertTrue((numpy.array(n8) == numpy.array(t8)).all())\n\n def test_slice_tensor(self):\n for dtype in self.support_dtypes:\n # run cpu first\n place = core.CPUPlace()\n self.run_slice_tensor(place, dtype)\n\n if core.is_compiled_with_cuda():\n place = core.CUDAPlace(0)\n self.run_slice_tensor(place, dtype)\n\n def test_print_tensor(self):\n scope = core.Scope()\n var = scope.var(\"test_tensor\")\n place = core.CPUPlace()\n tensor = var.get_tensor()\n tensor._set_dims([10, 10])\n tensor._alloc_int(place)\n tensor_array = numpy.array(tensor)\n self.assertEqual((10, 10), tensor_array.shape)\n tensor_array[0, 0] = 1\n tensor_array[2, 2] = 2\n tensor.set(tensor_array, place)\n print(tensor)\n self.assertTrue(isinstance(str(tensor), str))\n\n if core.is_compiled_with_cuda():\n tensor.set(tensor_array, core.CUDAPlace(0))\n print(tensor)\n self.assertTrue(isinstance(str(tensor), str))\n\n def test_tensor_poiter(self):\n place = core.CPUPlace()\n scope = core.Scope()\n var = scope.var(\"test_tensor\")\n place = core.CPUPlace()\n tensor = var.get_tensor()\n dtype = core.VarDesc.VarType.FP32\n self.assertTrue(\n isinstance(tensor._mutable_data(place, dtype), numbers.Integral))\n\n if core.is_compiled_with_cuda():\n place = core.CUDAPlace(0)\n self.assertTrue(\n isinstance(\n tensor._mutable_data(place, dtype), numbers.Integral))\n place = core.CUDAPinnedPlace()\n self.assertTrue(\n isinstance(\n tensor._mutable_data(place, dtype), numbers.Integral))\n places = fluid.cuda_pinned_places()\n self.assertTrue(\n isinstance(\n tensor._mutable_data(places[0], dtype), numbers.Integral))\n\n def test_tensor_set_fp16(self):\n array = numpy.random.random((300, 500)).astype(\"float16\")\n tensor = fluid.Tensor()\n place = core.CPUPlace()\n tensor.set(array, place)\n self.assertEqual(tensor._dtype(), core.VarDesc.VarType.FP16)\n self.assertTrue(numpy.array_equal(numpy.array(tensor), array))\n\n if core.is_compiled_with_cuda():\n place = core.CUDAPlace(0)\n tensor.set(array, place)\n self.assertEqual(tensor._dtype(), core.VarDesc.VarType.FP16)\n self.assertTrue(numpy.array_equal(numpy.array(tensor), array))\n\n place = core.CUDAPinnedPlace()\n tensor.set(array, place)\n self.assertEqual(tensor._dtype(), core.VarDesc.VarType.FP16)\n self.assertTrue(numpy.array_equal(numpy.array(tensor), array))\n\n def test_tensor_set_int16(self):\n array = numpy.random.randint(100, size=(300, 500)).astype(\"int16\")\n tensor = fluid.Tensor()\n place = core.CPUPlace()\n tensor.set(array, place)\n self.assertEqual(tensor._dtype(), core.VarDesc.VarType.INT16)\n self.assertTrue(numpy.array_equal(numpy.array(tensor), array))\n\n if core.is_compiled_with_cuda():\n place = core.CUDAPlace(0)\n tensor.set(array, place)\n self.assertEqual(tensor._dtype(), core.VarDesc.VarType.INT16)\n self.assertTrue(numpy.array_equal(numpy.array(tensor), array))\n\n place = core.CUDAPinnedPlace()\n tensor.set(array, place)\n self.assertEqual(tensor._dtype(), core.VarDesc.VarType.INT16)\n self.assertTrue(numpy.array_equal(numpy.array(tensor), array))\n\n def test_tensor_set_from_array_list(self):\n array = numpy.random.randint(1000, size=(200, 300))\n list_array = [array, array]\n tensor = fluid.Tensor()\n place = core.CPUPlace()\n tensor.set(list_array, place)\n self.assertEqual([2, 200, 300], tensor.shape())\n self.assertTrue(numpy.array_equal(numpy.array(tensor), list_array))\n\n if core.is_compiled_with_cuda():\n place = core.CUDAPlace(0)\n tensor.set(list_array, place)\n self.assertEqual([2, 200, 300], tensor.shape())\n self.assertTrue(numpy.array_equal(numpy.array(tensor), list_array))\n\n place = core.CUDAPinnedPlace()\n tensor.set(list_array, place)\n self.assertEqual([2, 200, 300], tensor.shape())\n self.assertTrue(numpy.array_equal(numpy.array(tensor), list_array))\n\n def test_tensor_set_error(self):\n scope = core.Scope()\n var = scope.var(\"test_tensor\")\n place = core.CPUPlace()\n\n tensor = var.get_tensor()\n\n exception = None\n try:\n error_array = [\"1\", \"2\"]\n tensor.set(error_array, place)\n except core.EnforceNotMet as ex:\n exception = ex\n\n self.assertIsNotNone(exception)\n\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "numpy.alltrue", "numpy.array", "numpy.random.randint", "numpy.random.random" ] ]
ThanThoai/nanodet
[ "57a20ad622b3704244e3fdb1360a70a802281441" ]
[ "tools/test.py" ]
[ "# Copyright 2021 RangiLyu.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport torch\nimport json\nimport datetime\nimport argparse\nimport warnings\nimport pytorch_lightning as pl\n\nfrom nanodet.util import mkdir, Logger, cfg, load_config, convert_old_model\nfrom nanodet.data.collate import collate_function\nfrom nanodet.data.dataset import build_dataset\nfrom nanodet.trainer.task import TrainingTask\nfrom nanodet.evaluator import build_evaluator\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--task', type=str, default='val', help='task to run, test or val')\n parser.add_argument('--config', type=str, help='model config file(.yml) path')\n parser.add_argument('--model', type=str, help='ckeckpoint file(.ckpt) path')\n args = parser.parse_args()\n return args\n\n\ndef main(args):\n load_config(cfg, args.config)\n local_rank = -1\n torch.backends.cudnn.enabled = True\n torch.backends.cudnn.benchmark = True\n cfg.defrost()\n timestr = datetime.datetime.now().__format__('%Y%m%d%H%M%S')\n cfg.save_dir = os.path.join(cfg.save_dir, timestr)\n mkdir(local_rank, cfg.save_dir)\n logger = Logger(local_rank, cfg.save_dir)\n\n assert args.task in ['val', 'test']\n cfg.update({'test_mode': args.task})\n\n logger.log('Setting up data...')\n val_dataset = build_dataset(cfg.data.val, args.task)\n val_dataloader = torch.utils.data.DataLoader(val_dataset, batch_size=1, shuffle=False,\n num_workers=cfg.device.workers_per_gpu,\n pin_memory=True, collate_fn=collate_function, drop_last=True)\n evaluator = build_evaluator(cfg, val_dataset)\n\n logger.log('Creating model...')\n task = TrainingTask(cfg, evaluator)\n\n ckpt = torch.load(args.model)\n if 'pytorch-lightning_version' not in ckpt:\n warnings.warn('Warning! Old .pth checkpoint is deprecated. '\n 'Convert the checkpoint with tools/convert_old_checkpoint.py ')\n ckpt = convert_old_model(ckpt)\n task.load_state_dict(ckpt['state_dict'])\n\n trainer = pl.Trainer(default_root_dir=cfg.save_dir,\n gpus=cfg.device.gpu_ids,\n accelerator='ddp',\n log_every_n_steps=cfg.log.interval,\n num_sanity_val_steps=0,\n )\n logger.log('Starting testing...')\n trainer.test(task, val_dataloader)\n\n\nif __name__ == '__main__':\n args = parse_args()\n main(args)\n" ]
[ [ "torch.utils.data.DataLoader", "torch.load" ] ]
FloodFinJava/LossGame
[ "9d1d22937457ccec5536058bd08e0ff4d64a94a1" ]
[ "src/game.py" ]
[ "# -*- coding: utf-8 -*-\n\n\"\"\"Provide the functions to run the flood risk game\n\"\"\"\n\nimport numpy as np\n\ndef distribute_points(extent, n_points):\n \"\"\"Randomly select a n number of points inside a given extent\n return two lists of lat/long coordinates\n \"\"\"\n lat = np.random.uniform(low=extent['latmin'], high=extent['latmax'], size=n_points)\n lon = np.random.uniform(low=extent['lonmin'], high=extent['lonmax'], size=n_points)\n return lat, lon\n" ]
[ [ "numpy.random.uniform" ] ]
YummyLoop/nrn
[ "ceaf140e49b53213c69ab89cb0d0fd85bf85b2d3" ]
[ "share/lib/python/neuron/rxd/rxd.py" ]
[ "from neuron import h, nrn, nrn_dll_sym \nfrom . import species, node, section1d, region, generalizedReaction, constants\nfrom .nodelist import NodeList\nfrom .node import _point_indices\nimport weakref\nimport numpy\nimport ctypes\nimport atexit\nfrom . import options\nfrom .rxdException import RxDException\nfrom . import initializer \nimport collections\nimport os\nfrom distutils import sysconfig\nimport uuid\nimport sys\nimport itertools\nfrom numpy.ctypeslib import ndpointer\nimport re\nimport platform\nfrom warnings import warn\n\n# aliases to avoid repeatedly doing multiple hash-table lookups\n_numpy_array = numpy.array\n_numpy_zeros = numpy.zeros\n_species_get_all_species = species._get_all_species\n_node_get_states = node._get_states\n_section1d_transfer_to_legacy = section1d._transfer_to_legacy\n_ctypes_c_int = ctypes.c_int\n_weakref_ref = weakref.ref\n\n_external_solver = None\n_external_solver_initialized = False\n_windows_dll_files = []\n_windows_dll = []\n\n\n\nmake_time_ptr = nrn_dll_sym('make_time_ptr')\nmake_time_ptr.argtypes = [ctypes.py_object, ctypes.py_object]\nmake_time_ptr(h._ref_dt, h._ref_t)\n\n_double_ptr = ctypes.POINTER(ctypes.c_double)\n_int_ptr = ctypes.POINTER(_ctypes_c_int)\n_long_ptr = ctypes.POINTER(ctypes.c_long)\n\n\nfptr_prototype = ctypes.CFUNCTYPE(None)\nset_nonvint_block = nrn_dll_sym('set_nonvint_block')\nset_nonvint_block(nrn_dll_sym('rxd_nonvint_block'))\n\nset_setup = nrn_dll_sym('set_setup')\nset_setup.argtypes = [fptr_prototype]\nset_initialize = nrn_dll_sym('set_initialize')\nset_initialize.argtypes = [fptr_prototype]\n\nscatter_concentrations = nrn_dll_sym('scatter_concentrations')\n\n# Transfer extracellular concentrations to NEURON\n_fih_transfer_ecs = h.FInitializeHandler(1, scatter_concentrations)\n\n\nrxd_set_no_diffusion = nrn_dll_sym('rxd_set_no_diffusion')\n\nsetup_solver = nrn_dll_sym('setup_solver')\nsetup_solver.argtypes = [ndpointer(ctypes.c_double), ctypes.c_int, numpy.ctypeslib.ndpointer(numpy.int_, flags='contiguous'), ctypes.c_int]\n\n#states = None\n_set_num_threads = nrn_dll_sym('set_num_threads')\n_set_num_threads.argtypes = [ctypes.c_int]\n_get_num_threads = nrn_dll_sym('get_num_threads')\n_get_num_threads.restype = ctypes.c_int\n\nfree_conc_ptrs = nrn_dll_sym('free_conc_ptrs')\nfree_curr_ptrs = nrn_dll_sym('free_curr_ptrs')\nclear_rates = nrn_dll_sym('clear_rates')\nregister_rate = nrn_dll_sym('register_rate')\nregister_rate.argtypes = [ \n ctypes.c_int, #num species\n ctypes.c_int, #num parameters\n ctypes.c_int, #num regions\n ctypes.c_int, #num seg\n numpy.ctypeslib.ndpointer(ctypes.c_int, flags='contiguous'), #species ids\n ctypes.c_int, #num ecs species\n ctypes.c_int, #num ecs parameters\n numpy.ctypeslib.ndpointer(ctypes.c_int, flags='contiguous'), #ecs species ids\n numpy.ctypeslib.ndpointer(ctypes.c_int, flags='contiguous'), #ecs indices\n ctypes.c_int, #num multicompartment reactions\n numpy.ctypeslib.ndpointer(ctypes.c_double, flags='contiguous'), #multicompartment multipliers\n ctypes.POINTER(ctypes.py_object), #voltage pointers\n ] #Reaction rate function\n\nsetup_currents = nrn_dll_sym('setup_currents')\nsetup_currents.argtypes = [\n ctypes.c_int, #number of membrane currents\n ctypes.c_int, #number induced currents\n _int_ptr, #number of species involved in each membrane current\n _int_ptr, #node indices\n _double_ptr, #scaling (areas) of the fluxes\n ctypes.POINTER(ctypes.py_object), #hoc pointers\n _int_ptr, #maps for membrane fluxes\n _int_ptr #maps for ecs fluxes\n]\n \n\nics_register_reaction = nrn_dll_sym('ics_register_reaction')\nics_register_reaction.argtypes = [ctypes.c_int, ctypes.c_int, ctypes.c_int,\n _int_ptr,\n numpy.ctypeslib.ndpointer(dtype=numpy.int64),\n ctypes.c_int,\n numpy.ctypeslib.ndpointer(dtype=numpy.float),\n]\n\necs_register_reaction = nrn_dll_sym('ecs_register_reaction')\necs_register_reaction.argtypes = [ctypes.c_int, ctypes.c_int, ctypes.c_int,\n _int_ptr,\n]\n\n\nset_hybrid_data = nrn_dll_sym('set_hybrid_data')\nset_hybrid_data.argtypes = [ \n numpy.ctypeslib.ndpointer(dtype=numpy.int64),\n numpy.ctypeslib.ndpointer(dtype=numpy.int64),\n numpy.ctypeslib.ndpointer(dtype=numpy.int64),\n numpy.ctypeslib.ndpointer(dtype=numpy.int64),\n numpy.ctypeslib.ndpointer(dtype=numpy.int64),\n numpy.ctypeslib.ndpointer(dtype=numpy.int64),\n numpy.ctypeslib.ndpointer(dtype=numpy.float_),\n numpy.ctypeslib.ndpointer(dtype=numpy.float_),\n numpy.ctypeslib.ndpointer(dtype=numpy.float_),\n numpy.ctypeslib.ndpointer(dtype=numpy.float_),\n]\n\n#ics_register_reaction = nrn_dll_sym('ics_register_reaction')\n#ics_register_reaction.argtype = [ctypes.c_int, ctypes.c_int, _int_ptr, fptr_prototype]\n\nset_euler_matrix = nrn_dll_sym('rxd_set_euler_matrix')\nset_euler_matrix.argtypes = [\n ctypes.c_int,\n ctypes.c_int,\n _long_ptr,\n _long_ptr,\n _double_ptr,\n numpy.ctypeslib.ndpointer(numpy.double, flags='contiguous'),\n]\nrxd_setup_curr_ptrs = nrn_dll_sym('rxd_setup_curr_ptrs')\nrxd_setup_curr_ptrs.argtypes = [\n ctypes.c_int,\n _int_ptr,\n numpy.ctypeslib.ndpointer(numpy.double, flags='contiguous'),\n ctypes.POINTER(ctypes.py_object),\n]\n\nrxd_setup_conc_ptrs = nrn_dll_sym('rxd_setup_conc_ptrs')\nrxd_setup_conc_ptrs.argtypes = [\n ctypes.c_int,\n _int_ptr,\n ctypes.POINTER(ctypes.py_object)\n]\n\nrxd_include_node_flux1D = nrn_dll_sym('rxd_include_node_flux1D')\nrxd_include_node_flux1D.argtypes = [ctypes.c_int, _long_ptr, _double_ptr,\n ctypes.POINTER(ctypes.py_object)]\n\nrxd_include_node_flux3D = nrn_dll_sym('rxd_include_node_flux3D')\nrxd_include_node_flux3D.argtypes = [ctypes.c_int, _int_ptr, _int_ptr, _long_ptr,\n _double_ptr,\n ctypes.POINTER(ctypes.py_object)]\n\n_c_headers = \"\"\"#include <math.h>\n/*Some functions supported by numpy that aren't included in math.h\n * names and arguments match the wrappers used in rxdmath.py\n */\ndouble factorial(const double);\ndouble degrees(const double);\nvoid radians(const double, double*);\ndouble log1p(const double);\ndouble vtrap(const double, const double);\n\"\"\"\n\ndef _list_to_cint_array(data):\n if data is None or len(data) == 0:\n return None\n else:\n return (ctypes.c_int * len(data))(*tuple(data))\n\ndef _list_to_cdouble_array(data):\n if data is None or len(data) == 0:\n return None\n else:\n return (ctypes.c_double * len(data))(*tuple(data))\n\ndef _list_to_clong_array(data):\n if data is None or len(data) == 0:\n return None\n else:\n return (ctypes.c_long * len(data))(*tuple(data))\n\ndef _list_to_pyobject_array(data):\n if data is None or len(data) == 0:\n return None\n else:\n return (ctypes.py_object * len(data))(*tuple(data))\n\ndef byeworld():\n # do not call __del__ that rearrange memory for states\n species.Species.__del__ = lambda x: None\n species._ExtracellularSpecies.__del__ = lambda x: None\n species._IntracellularSpecies.__del__ = lambda x: None\n section1d.Section1D.__del__ = lambda x: None\n generalizedReaction.GeneralizedReaction.__del__ = lambda x: None\n\n # needed to prevent a seg-fault error at shutdown in at least some\n # combinations of NEURON and Python, which I think is due to objects\n # getting deleted out-of-order\n global _react_matrix_solver\n try:\n del _react_matrix_solver\n except NameError:\n # # if it already didn't exist, that's fine\n pass\n _windows_remove_dlls()\n \natexit.register(byeworld)\n\n_cvode_object = h.CVode()\n\nlast_diam_change_cnt = None\nlast_structure_change_cnt = None\nlast_nrn_legacy_units = h.nrnunit_use_legacy()\n\n\n_all_reactions = []\n\nnrn_tree_solve = nrn_dll_sym('nrn_tree_solve')\nnrn_tree_solve.restype = None\n\n_dptr = _double_ptr\n\n_dimensions = {1: h.SectionList(), 3: h.SectionList()}\n_dimensions_default = 1\n_default_dx = 0.25\n_default_method = 'deterministic'\n\n#CRxD\n_diffusion_d = None\n_diffusion_a = None\n_diffusion_b = None\n_diffusion_p = None\n_diffusion_a_ptr, _diffusion_b_ptr, _diffusion_p_ptr = None, None, None\n\ndef _domain_lookup(sec, dim=None):\n for d, sl in _dimensions.items():\n if sec in sl:\n if dim is not None and d != dim:\n sl.remove(sec)\n return _domain_lookup(sec, dim)\n return d\n dimension = dim if dim else _dimensions_default\n _dimensions[dimension].append(sec)\n return dimension\n\ndef set_solve_type(domain=None, dimension=None, dx=None, nsubseg=None, method=None):\n \"\"\"Specify the numerical discretization and solver options.\n \n domain -- a section or Python iterable of sections\"\"\"\n\n global _dimensions_default, _dimensions\n setting_default = False\n if domain is None:\n domain = h.allsec()\n setting_default = True\n elif isinstance(domain, nrn.Section):\n domain = [domain]\n \n # NOTE: These attributes are set on a per-nrn.Section basis; they cannot \n # assume Section1D objects exist because they might be specified before\n # those objects are created\n \n # domain is now always an iterable (or invalid)\n if method is not None:\n raise RxDException('using set_solve_type to specify method is not yet implemented')\n if dimension is not None:\n if dimension not in (1, 3):\n raise RxDException('invalid option to set_solve_type: dimension must be 1 or 3')\n if setting_default:\n _dimensions_default = dimension\n for sec in domain:\n _domain_lookup(sec, dimension)\n if dx is not None:\n raise RxDException('using set_solve_type to specify dx is not yet implemented')\n if nsubseg is not None:\n raise RxDException('using set_solve_type to specify nsubseg is not yet implemented')\n \n\ndef _unregister_reaction(r):\n global _all_reactions\n react = r() if isinstance(r, weakref.ref) else r\n with initializer._init_lock:\n _all_reactions = list(filter(lambda x: x() is not None and x() != react, _all_reactions))\n\ndef _register_reaction(r):\n # TODO: should we search to make sure that (a weakref to) r hasn't already been added?\n global _all_reactions, _external_solver_initialized\n with initializer._init_lock:\n _all_reactions.append(_weakref_ref(r))\n _external_solver_initialized = False\n\n \ndef _after_advance():\n global last_diam_change_cnt\n last_diam_change_cnt = _diam_change_count.value\n \ndef re_init():\n \"\"\"reinitializes all rxd concentrations to match HOC values, updates matrices\"\"\"\n global _external_solver_initialized\n h.define_shape()\n \n if not species._has_3d:\n # TODO: if we do have 3D, make sure that we do the necessary parts of this\n \n # update current pointers\n section1d._purge_cptrs()\n for sr in _species_get_all_species():\n s = sr()\n if s is not None:\n s._register_cptrs()\n \n # update matrix equations\n _setup_matrices()\n for sr in _species_get_all_species():\n s = sr()\n if s is not None: s.re_init()\n # TODO: is this safe? \n _cvode_object.re_init()\n\n _external_solver_initialized = False\n \ndef _setup_memb_currents():\n initializer._do_init()\n # setup membrane fluxes from our stuff\n # TODO: cache the memb_cur_ptrs, memb_cur_charges, memb_net_charges, memb_cur_mapped\n # because won't change very often\n # need this; think it's because of initialization of mod files\n # setup for induced membrane currents\n cur_node_indices = []\n cur_map = {}\n curr_indices = []\n curr_scales = []\n curr_ptrs = []\n for sr in _species_get_all_species():\n s = sr()\n if s is not None: s._setup_currents(curr_indices, curr_scales, curr_ptrs, cur_map)\n num = len(curr_ptrs)\n if num:\n curr_ptr_vector = _h_ptrvector(num)\n curr_ptr_vector.ptr_update_callback(_donothing)\n for i, ptr in enumerate(curr_ptrs):\n curr_ptr_vector.pset(i, ptr)\n curr_ptr_storage_nrn = _h_vector(num)\n else:\n curr_ptr_vector = None\n curr_ptr_storage_nrn = None\n for rptr in _all_reactions:\n r = rptr()\n if r is not None:\n r._update_indices()\n r._setup_membrane_fluxes(cur_node_indices, cur_map)\n if not curr_indices:\n free_curr_ptrs()\n return\n rxd_setup_curr_ptrs(len(curr_indices),\n _list_to_cint_array(curr_indices),\n numpy.concatenate(curr_scales),\n _list_to_pyobject_array(curr_ptrs))\n\n SPECIES_ABSENT = -1\n # TODO: change so that this is only called when there are in fact currents\n rxd_memb_scales = []\n memb_cur_ptrs = []\n memb_cur_mapped = []\n memb_cur_mapped_ecs = []\n memb_cur_ptrs= []\n for rptr in _all_reactions:\n r = rptr()\n if r and r._membrane_flux:\n r._do_memb_scales(cur_map)\n scales = r._memb_scales\n rxd_memb_scales.extend(scales)\n memb_cur_ptrs += r._cur_ptrs\n memb_cur_mapped += r._cur_mapped\n memb_cur_mapped_ecs += r._cur_mapped_ecs\n ecs_map = [SPECIES_ABSENT if i is None else i for i in list(itertools.chain.from_iterable(itertools.chain.from_iterable(memb_cur_mapped_ecs)))]\n ics_map = [SPECIES_ABSENT if i is None else i for i in list(itertools.chain.from_iterable(itertools.chain.from_iterable(memb_cur_mapped)))]\n if memb_cur_ptrs:\n cur_counts = [len(x) for x in memb_cur_mapped] #TODO: is len(x) the same for all x?\n num_fluxes = numpy.array(cur_counts).sum()\n num_currents = len(memb_cur_ptrs)\n memb_cur_ptrs = list(itertools.chain.from_iterable(memb_cur_ptrs))\n \"\"\"print(\"num_currents\",num_currents)\n print(\"num_fluxes\",num_fluxes)\n print(\"num_nodes\",curr_indices)\n print(\"num_species\",cur_counts)\n print(\"cur_idxs\",curr_indices)\n print(\"node_idxs\",cur_node_indices)\n print(\"scales\",rxd_memb_scales)\n print(\"ptrs\",memb_cur_ptrs)\n print(\"mapped\",ics_map,min(abs(numpy.array(ics_map))),max(ics_map))\n print(\"mapped_ecs\",ecs_map,max(ecs_map))\"\"\"\n setup_currents(num_currents,\n num_fluxes,\n _list_to_cint_array(cur_counts),\n _list_to_cint_array(cur_node_indices),\n _list_to_cdouble_array(rxd_memb_scales),\n _list_to_pyobject_array(memb_cur_ptrs),\n _list_to_cint_array(ics_map),\n _list_to_cint_array(ecs_map))\n \n\n\ndef _setup():\n from . import initializer \n if not initializer.is_initialized(): initializer._do_init()\n # TODO: this is when I should resetup matrices (structure changed event)\n global _external_solver_initialized, last_diam_change_cnt, last_structure_change_cnt\n _external_solver_initialized = False\n \n\n # Using C-code for reactions\n options.use_reaction_contribution_to_jacobian = False\n with initializer._init_lock:\n _update_node_data()\n\n\ndef _find_librxdmath():\n import glob\n # cmake doesn't create x86_64 directory under install prefix\n base_path = os.path.join(h.neuronhome(), \"..\", \"..\", platform.machine())\n if not os.path.exists(base_path):\n base_path = os.path.join(h.neuronhome(), \"..\", \"..\")\n base_path = os.path.join(base_path, \"lib\", \"librxdmath\")\n success = False \n for extension in ['', '.dll', '.so', '.dylib']:\n dll = base_path + extension\n try:\n success = os.path.exists(dll) \n except:\n pass\n if success: break\n if not success:\n if sys.platform.lower().startswith(\"win\"):\n dll = os.path.join(h.neuronhome(), 'bin', 'librxdmath.dll')\n success = os.path.exists(dll)\n if not success:\n raise RxDException('unable to connect to the librxdmath library')\n return dll\n \ndef _c_compile(formula):\n filename = 'rxddll' + str(uuid.uuid1())\n with open(filename + '.c', 'w') as f:\n f.write(formula)\n math_library = '-lm'\n fpic = '-fPIC'\n try:\n gcc = os.environ[\"CC\"]\n except:\n #when running on windows try and used the gcc included with NEURON\n if sys.platform.lower().startswith(\"win\"):\n math_library = ''\n fpic = ''\n gcc = os.path.join(h.neuronhome(),\"mingw\",\"mingw64\",\"bin\",\"x86_64-w64-mingw32-gcc.exe\")\n if not os.path.isfile(gcc):\n raise RxDException(\"unable to locate a C compiler. Please `set CC=<path to C compiler>`\")\n else:\n gcc = \"gcc\"\n #TODO: Check this works on non-Linux machines\n gcc_cmd = \"%s -I%s -I%s \" % (gcc, sysconfig.get_python_inc(), os.path.join(h.neuronhome(), \"..\", \"..\", \"include\", \"nrn\"))\n gcc_cmd += \"-shared %s %s.c %s \" % (fpic, filename, _find_librxdmath())\n gcc_cmd += \"-o %s.so %s\" % (filename, math_library)\n if sys.platform.lower().startswith(\"win\"):\n my_path = os.getenv('PATH')\n os.putenv('PATH', my_path + ';' + os.path.join(h.neuronhome(),\"mingw\",\"mingw64\",\"bin\"))\n os.system(gcc_cmd)\n os.putenv('PATH', my_path)\n else:\n os.system(gcc_cmd)\n #TODO: Find a better way of letting the system locate librxdmath.so.0\n rxdmath_dll = ctypes.cdll[_find_librxdmath()]\n dll = ctypes.cdll['%s.so' % os.path.abspath(filename)]\n reaction = dll.reaction\n reaction.argtypes = [ctypes.POINTER(ctypes.c_double), ctypes.POINTER(ctypes.c_double)] \n reaction.restype = ctypes.c_double\n os.remove(filename + '.c')\n if sys.platform.lower().startswith(\"win\"):\n #cannot remove dll that are in use\n _windows_dll.append(weakref.ref(dll))\n _windows_dll_files.append(filename + \".so\")\n else:\n os.remove(filename + '.so')\n return reaction\n\n\n\n_h_ptrvector = h.PtrVector\n_h_vector = h.Vector\n\n_structure_change_count = nrn_dll_sym('structure_change_cnt', _ctypes_c_int)\n_diam_change_count = nrn_dll_sym('diam_change_cnt', _ctypes_c_int)\n\ndef _donothing(): pass\n\ndef _setup_units(force=False):\n global last_nrn_legacy_units\n if initializer.is_initialized():\n if(force or last_nrn_legacy_units != h.nrnunit_use_legacy()):\n last_nrn_legacy_units = h.nrnunit_use_legacy()\n clear_rates()\n _setup_memb_currents()\n _compile_reactions()\n if _cvode_object.active():\n _cvode_object.re_init()\n \n\ndef _update_node_data(force=False, newspecies=False):\n global last_diam_change_cnt, last_structure_change_cnt\n if last_diam_change_cnt != _diam_change_count.value or _structure_change_count.value != last_structure_change_cnt or force:\n \n last_diam_change_cnt = _diam_change_count.value\n last_structure_change_cnt = _structure_change_count.value\n #if not species._has_3d:\n # TODO: merge this with the 3d/hybrid case?\n if initializer.is_initialized():\n nsegs_changed = 0\n for sr in _species_get_all_species():\n s = sr()\n if s is not None: nsegs_changed += s._update_node_data()\n if nsegs_changed or newspecies:\n section1d._purge_cptrs()\n for sr in _species_get_all_species():\n s = sr()\n if s is not None:\n s._update_region_indices(True)\n s._register_cptrs()\n #if species._has_1d and species._1d_submatrix_n():\n _setup_matrices()\n # TODO: separate compiling reactions -- so the indices can be updated without recompiling\n _include_flux(True)\n _setup_units(force=True)\n\n #end#if\n\n #_curr_scales = _numpy_array(_curr_scales) \n\n\ndef _matrix_to_rxd_sparse(m):\n \"\"\"precondition: assumes m a numpy array\"\"\"\n nonzero_i, nonzero_j = list(zip(*list(m.keys())))\n nonzero_values = numpy.ascontiguousarray(list(m.values()), dtype=numpy.float64)\n\n # number of rows\n n = m.shape[1]\n\n return n, len(nonzero_i), numpy.ascontiguousarray(nonzero_i, dtype=numpy.int_), numpy.ascontiguousarray(nonzero_j, dtype=numpy.int_), nonzero_values\n\n\n# TODO: make sure this does the right thing when the diffusion constant changes between two neighboring nodes\ndef _setup_matrices():\n\n with initializer._init_lock:\n\n # update _node_fluxes in C\n _include_flux()\n \n # TODO: this sometimes seems to get called twice. Figure out why and fix, if possible.\n \n n = len(_node_get_states())\n \n #TODO: Replace with ADI version \n \"\"\"\n if species._has_3d:\n _euler_matrix = _scipy_sparse_dok_matrix((n, n), dtype=float)\n \n for sr in list(_species_get_all_species().values()):\n s = sr()\n if s is not None: s._setup_matrices3d(_euler_matrix)\n \n _diffusion_matrix = -_euler_matrix\n \n _euler_matrix = _euler_matrix.tocsr()\n _update_node_data(True)\n \n # NOTE: if we also have 1D, this will be replaced with the correct values below\n _zero_volume_indices = []\n _nonzero_volume_indices = list(range(len(_node_get_states())))\n \n \"\"\"\n volumes = node._get_data()[0]\n zero_volume_indices = (numpy.where(volumes == 0)[0]).astype(numpy.int_)\n if species._has_1d:\n # TODO: initialization is slow. track down why\n \n _last_dt = None\n for sr in _species_get_all_species():\n s = sr()\n if s is not None:\n s._assign_parents()\n \n \n # remove old linearmodeladdition\n _linmodadd_cur = None\n n = species._1d_submatrix_n()\n if n: \n # create sparse matrix for C in cy'+gy=b\n c_diagonal = numpy.zeros(n,dtype=ctypes.c_double)\n # most entries are 1 except those corresponding to the 0 and 1 ends\n \n # create the matrix G\n #if not species._has_3d:\n # # if we have both, then put the 1D stuff into the matrix that already exists for 3D\n from collections import OrderedDict\n diffusion_matrix = [OrderedDict() for idx in range(n)]\n for sr in _species_get_all_species():\n s = sr()\n if s is not None:\n s._setup_diffusion_matrix(diffusion_matrix)\n s._setup_c_matrix(c_diagonal)\n #print '_diffusion_matrix.shape = %r, n = %r, species._has_3d = %r' % (_diffusion_matrix.shape, n, species._has_3d)\n euler_matrix_i, euler_matrix_j, euler_matrix_nonzero = [], [], []\n for i in range(n):\n mat_i = diffusion_matrix[i]\n euler_matrix_i.extend(itertools.repeat(i,len(mat_i)))\n euler_matrix_j.extend(mat_i.keys())\n euler_matrix_nonzero.extend(mat_i.values())\n euler_matrix_nnonzero = len(euler_matrix_nonzero)\n assert(len(euler_matrix_i) == len(euler_matrix_j) == len(euler_matrix_nonzero))\n # modify C for cases where no diffusive coupling of 0, 1 ends\n # TODO: is there a better way to handle no diffusion?\n #for i in range(n):\n # if not _diffusion_matrix[i, i]:\n # _linmodadd_c[i, i] = 1\n \n \n #_cvode_object.re_init() \n \n #if species._has_3d:\n # _euler_matrix = -_diffusion_matrix\n \n #Hybrid logic\n if species._has_1d and species._has_3d:\n hybrid_neighbors = collections.defaultdict(lambda: [])\n hybrid_vols = collections.defaultdict(lambda: [])\n hybrid_diams = {}\n grid_id_dc = {}\n hybrid_index1d_grid_ids = {}\n grid_id_species = {}\n index1d_sec1d = {}\n hybrid_vols1d = {}\n dxs = set()\n for sr in _species_get_all_species():\n s = sr()\n if s is not None:\n if s._intracellular_instances and s._secs:\n # have both 1D and 3D, so find the neighbors\n # for each of the 3D sections, find the parent sections\n for r in s._regions:\n if r in s._intracellular_instances:\n grid_id = s._intracellular_instances[r]._grid_id\n grid_id_species.setdefault(grid_id, s._intracellular_instances[r])\n grid_id_dc[grid_id] = s.d\n dxs.add(r._dx)\n for sec in r._secs3d:\n parent_seg = sec.trueparentseg()\n parent_sec = None if not parent_seg else parent_seg.sec\n # are any of these a match with a 1d section?\n if s._has_region_section(r, parent_sec):\n #this section has a 1d section that is a parent\n index1d, indices3d, vols1d, vols3d = _get_node_indices(s, r, sec, sec.orientation(), parent_sec, h.parent_connection(sec=sec))\n hybrid_neighbors[index1d] += indices3d\n hybrid_vols[index1d] += vols3d\n hybrid_diams[index1d] = parent_sec(h.parent_connection(sec=sec)).diam\n hybrid_index1d_grid_ids[index1d] = grid_id\n index1d_sec1d[index1d] = parent_sec\n hybrid_vols1d[index1d] = vols1d\n \n for sec1d in r._secs1d:\n parent_1d_seg = sec1d.trueparentseg()\n parent_1d = None if not parent_1d_seg else parent_1d_seg.sec \n if parent_1d == sec:\n # it is the parent of a 1d section\n index1d, indices3d, vols1d, vols3d = _get_node_indices(s, r, sec, parent_1d_seg.x , sec1d, sec1d.orientation())\n hybrid_neighbors[index1d] += indices3d\n hybrid_vols[index1d] += vols3d\n hybrid_diams[index1d] = sec1d(h.section_orientation(sec=sec1d)).diam\n hybrid_index1d_grid_ids[index1d] = grid_id\n index1d_sec1d[index1d] = sec1d\n hybrid_vols1d[index1d] = vols1d\n \n \n \n if len(dxs) > 1:\n raise RxDException('currently require a unique value for dx')\n dx = dxs.pop()\n rates = []\n volumes3d = []\n volumes1d = []\n grids_dx = []\n hybrid_indices1d = []\n hybrid_indices3d = []\n num_3d_indices_per_1d_seg = []\n \n num_1d_indices_per_grid = []\n num_3d_indices_per_grid = []\n \n \n grid_id_indices1d = collections.defaultdict(lambda: [])\n for index1d in hybrid_neighbors:\n grid_id = hybrid_index1d_grid_ids[index1d]\n grid_id_indices1d[grid_id].append(index1d)\n hybrid_grid_ids = sorted(grid_id_indices1d.keys())\n for grid_id in hybrid_grid_ids:\n sp = grid_id_species[grid_id]\n # TODO: use 3D anisotropic diffusion coefficients\n dc = grid_id_dc[grid_id]\n grids_dx.append(sp._dx**3)\n num_1d_indices_per_grid.append(len(grid_id_indices1d[grid_id]))\n grid_3d_indices_cnt = 0\n for index1d in grid_id_indices1d[grid_id]:\n neighbors3d = []\n vols3d = []\n for neigh, vol in zip(hybrid_neighbors[index1d], hybrid_vols[index1d]):\n if neigh not in neighbors3d:\n neighbors3d.append(neigh)\n vols3d.append(vol)\n if len(neighbors3d) < 1:\n raise RxDException('No 3D neighbors detected for 1D segment. Try perturbing dx')\n sec1d = index1d_sec1d[index1d]\n seg_length1d = sec1d.L/sec1d.nseg\n if neighbors3d:\n hybrid_indices1d.append(index1d)\n cnt_neighbors_3d = len(neighbors3d) \n num_3d_indices_per_1d_seg.append(cnt_neighbors_3d)\n grid_3d_indices_cnt += cnt_neighbors_3d\n area = (numpy.pi * 0.25 * hybrid_diams[index1d] ** 2)\n areaT = sum([v**(2.0/3.0) for v in vols3d])\n volumes1d.append(hybrid_vols1d[index1d])\n for i, vol in zip(neighbors3d, vols3d):\n sp._region._vol[i] = vol\n ratio = vol**(2.0/3.0) / areaT\n rate = ratio * dc * area / (vol * (dx + seg_length1d) / 2)\n rates.append(rate)\n volumes3d.append(vol)\n hybrid_indices3d.append(i)\n \n \n num_3d_indices_per_grid.append(grid_3d_indices_cnt)\n \n num_1d_indices_per_grid = numpy.asarray(num_1d_indices_per_grid, dtype=numpy.int64)\n num_3d_indices_per_grid = numpy.asarray(num_3d_indices_per_grid, dtype=numpy.int64)\n \n \n hybrid_indices1d = numpy.asarray(hybrid_indices1d, dtype=numpy.int64)\n num_3d_indices_per_1d_seg = numpy.asarray(num_3d_indices_per_1d_seg, dtype=numpy.int64)\n hybrid_grid_ids = numpy.asarray(hybrid_grid_ids, dtype=numpy.int64)\n \n hybrid_indices3d = numpy.asarray(hybrid_indices3d, dtype=numpy.int64)\n rates = numpy.asarray(rates, dtype=numpy.float_)\n volumes1d = numpy.asarray(volumes1d, dtype=numpy.float_)\n volumes3d = numpy.asarray(volumes3d, dtype=numpy.float_)\n dxs = numpy.asarray(grids_dx, dtype=numpy.float_)\n set_hybrid_data(num_1d_indices_per_grid, num_3d_indices_per_grid, hybrid_indices1d, hybrid_indices3d, num_3d_indices_per_1d_seg, hybrid_grid_ids, rates, volumes1d, volumes3d, dxs)\n \n \n \n \n \n #TODO: Replace this this to handle 1d/3d hybrid models\n \"\"\"\n if species._has_1d and species._has_3d:\n # TODO: add connections to matrix; for now: find them\n hybrid_neighbors = collections.defaultdict(lambda: [])\n hybrid_diams = {}\n dxs = set()\n for sr in list(_species_get_all_species().values()):\n s = sr()\n if s is not None:\n if s._nodes and s._secs:\n # have both 1D and 3D, so find the neighbors\n # for each of the 3D sections, find the parent sections\n for r in s._regions:\n dxs.add(r._dx)\n for sec in r._secs3d:\n parent_seg = sec.trueparentseg()\n parent_sec = None if not parent_seg else parent_seg.sec\n # are any of these a match with a 1d section?\n if s._has_region_section(r, parent_sec):\n # this section has a 1d section that is a parent\n index1d, indices3d = _get_node_indices(s, r, sec, h.section_orientation(sec=sec), parent_sec, h.parent_connection(sec=sec))\n hybrid_neighbors[index1d] += indices3d\n hybrid_diams[index1d] = parent_seg.diam\n else:\n for sec1d in r._secs1d:\n parent_1d_seg = sec1d.trueparentseg()\n parent_1d = None if not parent_seg else parent_seg.sec\n if parent_1d == sec:\n # it is the parent of a 1d section\n index1d, indices3d = _get_node_indices(s, r, sec, h.parent_connection(sec=sec1d), sec1d, sec1d.orientation())\n hybrid_neighbors[index1d] += indices3d\n hybrid_diams[index1d] = parent_1d_seg.diam\n break\n elif parent_1d == parent_sec:\n # it connects to the parent of a 1d section\n index1d, indices3d = _get_node_indices(s, r, sec, h.section_orientation(sec=sec), sec1d, sec1d.orientation())\n hybrid_neighbors[index1d] += indices3d\n hybrid_diams[index1d] = parent_1d_seg.diam\n break\n if len(dxs) > 1:\n raise RxDException('currently require a unique value for dx')\n dx = dxs.pop()\n diffs = node._diffs\n n = len(_node_get_states())\n # TODO: validate that we're doing the right thing at boundaries\n for index1d in list(hybrid_neighbors.keys()):\n neighbors3d = set(hybrid_neighbors[index1d])\n # NOTE: splitting the connection area equally across all the connecting nodes\n area = (numpy.pi * 0.25 * hybrid_diams[index1d] ** 2) / len(neighbors3d)\n for i in neighbors3d:\n d = diffs[i]\n vol = node._volumes[i]\n rate = d * area / (vol * dx / 2.)\n # make the connections on the 3d side\n _euler_matrix[i, i] -= rate\n _euler_matrix[i, index1d] += rate\n # make the connections on the 1d side (scale by vol because conserving mass not volume)\n _euler_matrix[index1d, index1d] -= rate * vol\n _euler_matrix[index1d, i] += rate * vol\n #print 'index1d row sum:', sum(_euler_matrix[index1d, j] for j in xrange(n))\n #print 'index1d col sum:', sum(_euler_matrix[j, index1d] for j in xrange(n))\n \"\"\"\n #CRxD\n setup_solver(_node_get_states(), len(_node_get_states()), zero_volume_indices, len(zero_volume_indices))\n if species._has_1d and n and euler_matrix_nnonzero > 0:\n section1d._transfer_to_legacy()\n set_euler_matrix(n, euler_matrix_nnonzero,\n _list_to_clong_array(euler_matrix_i),\n _list_to_clong_array(euler_matrix_j),\n _list_to_cdouble_array(euler_matrix_nonzero),\n c_diagonal)\n else:\n rxd_set_no_diffusion()\n \n \n if section1d._all_cindices is not None and len(section1d._all_cindices) > 0:\n rxd_setup_conc_ptrs(len(section1d._all_cindices), \n _list_to_cint_array(section1d._all_cindices), \n _list_to_pyobject_array(section1d._all_cptrs))\n else:\n free_conc_ptrs()\n \n # we do this last because of performance issues with changing sparsity of csr matrices\n \"\"\"\n if _diffusion_matrix is not None:\n _diffusion_matrix = _diffusion_matrix.tocsr()\n if _euler_matrix is not None:\n _euler_matrix = _euler_matrix.tocsr()\n\n if species._has_1d:\n if species._has_3d:\n _diffusion_matrix = -_euler_matrix\n n = species._1d_submatrix_n()\n if n:\n matrix = _diffusion_matrix[_zero_volume_indices].tocsr()\n indptr = matrix.indptr\n matrixdata = matrix.data\n count = len(_zero_volume_indices)\n for row, i in enumerate(_zero_volume_indices):\n d = _diffusion_matrix[i, i]\n if d:\n matrixdata[indptr[row] : indptr[row + 1]] /= -d\n matrix[row, i] = 0\n else:\n matrixdata[indptr[row] : indptr[row + 1]] = 0\n global _mat_for_zero_volume_nodes\n _mat_for_zero_volume_nodes = matrix\n # TODO: _mat_for_zero_volume_nodes is used for CVode.\n # Figure out if/how it has to be changed for hybrid 1D/3D sims (probably just augment with identity? or change how its used to avoid multiplying by I)\n \n \"\"\"\n \n \"\"\"\n if pt1 in indices:\n ileft = indices[pt1]\n dleft = (d + diffs[ileft]) * 0.5\n left = dleft * areal / (vol * dx)\n euler_matrix[index, ileft] += left\n euler_matrix[index, index] -= left\n if pt2 in indices:\n iright = indices[pt2]\n dright = (d + diffs[iright]) * 0.5\n right = dright * arear / (vol * dx)\n euler_matrix[index, iright] += right\n euler_matrix[index, index] -= right\n\"\"\" \n \n\n\ndef _get_node_indices(species, region, sec3d, x3d, sec1d, x1d):\n #Recalculate the volumes \n xlo, xhi = region._mesh_grid['xlo'], region._mesh_grid['xhi']\n ylo, yhi = region._mesh_grid['ylo'], region._mesh_grid['yhi']\n zlo, zhi = region._mesh_grid['zlo'], region._mesh_grid['zhi']\n from . import geometry3d \n\n p3d = int((sec3d.n3d()-1)*x3d)\n p1d = int((sec1d.n3d()-1)*x1d)\n pt3d = [p3d, p3d + 1] if p3d == 0 else [p3d - 1, p3d]\n pt1d = [p1d, p1d + 1] if p1d == 0 else [p1d - 1, p1d]\n\n inter, surf, mesh = geometry3d.voxelize2([sec1d, sec3d], region._dx,\n mesh_grid=region._mesh_grid,\n relevant_pts=[pt1d, pt3d])\n\n # TODO: remove need for this assumption\n assert(x1d in (0, 1))\n disc_indices = region._indices_from_sec_x(sec3d, x3d)\n #print '%r(%g) connects to the 1d section %r(%g)' % (sec3d, x3d, sec1d, x1d)\n #print 'disc indices: %r' % disc_indices\n indices3d = []\n vols3d = []\n for point in disc_indices:\n if point in _point_indices[region] and _point_indices[region][point] not in indices3d:\n indices3d.append(_point_indices[region][point])\n vols3d.append(surf[point][0] if point in surf else region.dx**3)\n #print 'found node %d with coordinates (%g, %g, %g)' % (node._index, node.x3d, node.y3d, node.z3d)\n # discard duplicates...\n # TODO: really, need to figure out all the 3d nodes connecting to a given 1d endpoint, then unique that\n #print '3d matrix indices: %r' % indices3d\n # TODO: remove the need for this assertion\n if x1d == sec1d.orientation():\n # TODO: make this whole thing more efficient\n # the parent node is the nonzero index on the first row before the diagonal\n #first_row = min([node._index for node in species.nodes(region)(sec1d)])\n index_1d, vol1d = min([(node._index, node.volume) for node in \n species.nodes(region)(sec1d)],\n key=lambda x: x[0])\n \"\"\"for j in range(first_row):\n if _euler_matrix[first_row, j] != 0:\n index_1d = j\n break\n else:\n raise RxDException('should never get here; could not find parent')\"\"\"\n elif x1d == 1 - sec1d.orientation():\n # the ending zero-volume node is the one after the last node\n # TODO: make this more efficient\n index_1d, vol1d = max([(node._index, node.volume) for node in \n species.nodes(region)(sec1d)],\n key=lambda x: x[0])\n index_1d + 1\n else:\n raise RxDException('should never get here; _get_node_indices apparently only partly converted to allow connecting to 1d in middle')\n return index_1d, indices3d, vol1d, vols3d\n\ndef _compile_reactions():\n #clear all previous reactions (intracellular & extracellular) and the\n #supporting indexes\n #_windows_remove_dlls()\n \n regions_inv = dict() #regions -> reactions that occur there\n species_by_region = dict()\n all_species_involed = set()\n location_count = 0\n \n ecs_regions_inv = dict()\n ecs_species_by_region = dict()\n ecs_all_species_involed = set()\n ecs_mc_species_involved = set()\n from . import rate, multiCompartmentReaction\n\n #Find sets of sections that contain the same regions\n from .region import _c_region\n matched_regions = [] # the different combinations of regions that arise in different sections\n rxd_sec_lookup = section1d._SectionLookup()\n for nrnsec in rxd_sec_lookup:\n set_of_regions = set() # a set of the regions that occur in a given section\n for sec in rxd_sec_lookup[nrnsec]:\n if sec: set_of_regions.add(sec._region)\n if set_of_regions not in matched_regions:\n matched_regions.append(set_of_regions)\n region._c_region_lookup = dict()\n \n #create a c_region instance for each of the unique sets of regions\n c_region_list = []\n for sets in matched_regions:\n c_region_list.append(_c_region(sets))\n \n for rptr in _all_reactions:\n r = rptr()\n if not r:\n continue\n\n #Find all the species involved\n if isinstance(r,rate.Rate):\n if not r._species():\n continue\n sptrs = set([r._species])\n else:\n sptrs = set(r._dests + r._sources)\n\n if hasattr(r,'_involved_species') and r._involved_species:\n sptrs = sptrs.union(set(r._involved_species))\n if hasattr(r,'_involved_species_ecs') and r._involved_species_ecs:\n sptrs = sptrs.union(set(r._involved_species_ecs)) \n #Find all the regions involved\n if isinstance(r, multiCompartmentReaction.MultiCompartmentReaction):\n if not hasattr(r._mult, 'flatten'):\n r._update_indices()\n react_regions = [s()._extracellular()._region for s in r._sources + r._dests if isinstance(s(),species.SpeciesOnExtracellular)] + [s()._region() for s in r._sources + r._dests if not isinstance(s(),species.SpeciesOnExtracellular)]\n react_regions += [sptr()._region() for sptr in sptrs if isinstance(sptr(),species.SpeciesOnRegion)]\n react_regions += [r._regions[0]]\n react_regions = list(set(react_regions))\n\n #if regions are specified - use those\n elif hasattr(r,'_active_regions'):\n react_regions = r._active_regions\n #Otherwise use all the regions where the species are\n else:\n react_regions = set()\n nsp = 0\n for sp in sptrs:\n s = sp()\n nsp += 1\n if isinstance(s,species.SpeciesOnRegion):\n react_regions.add(s._region())\n elif isinstance(s,species.SpeciesOnExtracellular):\n react_regions.add(s._extracellular()._region)\n elif isinstance(s,species._ExtracellularSpecies):\n react_regions.add(s._region)\n elif None not in s._regions:\n [react_regions.add(reg) for reg in s._regions + s._extracellular_regions]\n react_regions = list(react_regions)\n #Only regions where ALL the species are present -- unless it is a membrane\n #from collections import Counter\n #from . import geometry as geo\n #react_regions = [reg for reg, count in Counter(react_regions).iteritems() if count == nsp or isinstance(reg.geometry,geo.ScalableBorder)]\n #Any intracellular regions\n if not all([isinstance(x, region.Extracellular) for x in react_regions]):\n species_involved = []\n for sp in sptrs:\n s = sp()\n if not isinstance(s, species.SpeciesOnExtracellular) and not isinstance(s, species._ExtracellularSpecies):\n all_species_involed.add(s)\n species_involved.append(s)\n for reg in react_regions:\n if isinstance(reg, region.Extracellular):\n continue\n if reg in regions_inv:\n regions_inv[reg].append(rptr)\n else:\n regions_inv[reg] = [rptr]\n if reg in species_by_region:\n species_by_region[reg] = species_by_region[reg].union(species_involved)\n else:\n species_by_region[reg] = set(species_involved)\n for sec in reg._secs:\n location_count += sec.nseg\n #Any extracellular regions\n if any([isinstance(x, region.Extracellular) for x in react_regions]):\n #MultiCompartment - so can have both extracellular and intracellular regions\n if isinstance(r, multiCompartmentReaction.MultiCompartmentReaction):\n for sp in sptrs:\n s = sp()\n if isinstance(s, species.SpeciesOnExtracellular):\n ecs_mc_species_involved.add(s)\n if isinstance(s, species.Species) and s._extracellular_instances:\n for ecs in s._extracellular_instances.keys():\n ecs_mc_species_involved.add(s[ecs])\n for reg in react_regions:\n if reg in list(ecs_species_by_region.keys()):\n ecs_species_by_region[reg] = ecs_species_by_region[reg].union(ecs_mc_species_involved)\n else:\n ecs_species_by_region[reg] = set(ecs_mc_species_involved)\n #Otherwise - reaction can only have extracellular regions\n else:\n ecs_species_involved = []\n for sp in sptrs:\n s = sp()\n ecs_all_species_involed.add(s)\n ecs_species_involved.append(s)\n for reg in react_regions:\n if not isinstance(reg, region.Extracellular):\n continue\n if reg in ecs_regions_inv:\n ecs_regions_inv[reg].append(rptr)\n else:\n ecs_regions_inv[reg] = [rptr]\n if reg in ecs_species_by_region:\n ecs_species_by_region[reg] = ecs_species_by_region[reg].union(ecs_species_involved)\n else:\n ecs_species_by_region[reg] = set(ecs_species_involved)\n #Create lists of indexes for intracellular reactions and rates\n # a table for location,species -> state index\n regions_inv_1d = [reg for reg in regions_inv if reg._secs1d]\n regions_inv_1d.sort(key=lambda r: r._id)\n all_regions_inv_3d = [reg for reg in regions_inv if reg._secs3d]\n #remove extra regions from multicompartment reactions. We only want the membrane\n regions_inv_3d = set()\n for reg in all_regions_inv_3d:\n for rptr in regions_inv[reg]:\n r = rptr()\n if isinstance(r, multiCompartmentReaction.MultiCompartmentReaction):\n regions_inv_3d.add(r._regions[0])\n else:\n regions_inv_3d.add(reg)\n regions_inv_3d = list(regions_inv_3d)\n\n for reg in regions_inv_1d:\n rptr = weakref.ref(reg)\n if rptr in region._c_region_lookup:\n for c_region in region._c_region_lookup[rptr]:\n for react in regions_inv[reg]:\n c_region.add_reaction(react, rptr)\n c_region.add_species(species_by_region[reg])\n if reg in ecs_species_by_region:\n c_region.add_ecs_species(ecs_species_by_region[reg])\n\n # now setup the reactions\n #if there are no reactions\n if location_count == 0 and len(ecs_regions_inv) == 0:\n return None\n\n def localize_index(creg, rate):\n rate_str = re.sub(r'species\\[(\\d+)\\]\\[(\\d+)\\]',\n lambda m: \"species[%i][%i]\" % \n (creg._species_ids.get(int(m.groups()[0])),\n creg._region_ids.get(int(m.groups()[1]))), rate)\n rate_str = re.sub(r'params\\[(\\d+)\\]\\[(\\d+)\\]', \n lambda m: \"params[%i][%i]\" % \n (creg._params_ids.get(int(m.groups()[0])),\n creg._region_ids.get(int(m.groups()[1]))), rate_str)\n rate_str = re.sub(r'species_3d\\[(\\d+)\\]',\n lambda m: \"species_3d[%i]\" % \n creg._ecs_species_ids.get(int(m.groups()[0])), rate_str)\n rate_str = re.sub(r'params_3d\\[(\\d+)\\]',\n lambda m: \"params_3d[%i]\" % \n creg._ecs_params_ids.get(int(m.groups()[0])), rate_str)\n return rate_str\n\n #Setup intracellular and multicompartment reactions\n if location_count > 0:\n from . import rate, multiCompartmentReaction, Parameter\n for creg in c_region_list:\n if not creg._react_regions:\n continue\n creg._initalize()\n mc_mult_count = 0\n mc_mult_list = []\n species_ids_used = numpy.zeros((creg.num_species,creg.num_regions),bool)\n flux_ids_used = numpy.zeros((creg.num_species,creg.num_regions),bool)\n ecs_species_ids_used = numpy.zeros((creg.num_ecs_species),bool)\n fxn_string = _c_headers \n fxn_string += 'void reaction(double** species, double** params, double** rhs, double* mult, double* species_3d, double* params_3d, double* rhs_3d, double** flux, double v)\\n{'\n # declare the \"rate\" variable if any reactions (non-rates)\n for rprt in creg._react_regions:\n if not isinstance(rprt(),rate.Rate):\n fxn_string += '\\n\\tdouble rate;'\n break\n for rptr in _all_reactions:\n if rptr not in creg._react_regions:\n continue\n r = rptr()\n if isinstance(r, rate.Rate):\n s = r._species()\n species_id = creg._species_ids[s._id]\n for reg in creg._react_regions[rptr]:\n if reg() in r._rate:\n try:\n region_id = creg._region_ids[reg()._id]\n rate_str = localize_index(creg, r._rate[reg()][0])\n except KeyError:\n warn(\"Species not on the region specified, %r will be ignored.\\n\" % r)\n continue\n operator = '+=' if species_ids_used[species_id][region_id] else '='\n fxn_string += \"\\n\\trhs[%d][%d] %s %s;\" % (species_id, region_id, operator, rate_str)\n species_ids_used[species_id][region_id] = True\n elif isinstance(r, multiCompartmentReaction.MultiCompartmentReaction):\n #Lookup the region_id for the reaction\n try:\n for reg in r._rate:\n rate_str = localize_index(creg, r._rate[reg][0])\n fxn_string += \"\\n\\trate = %s;\" % rate_str\n break\n except KeyError:\n warn(\"Species not on the region specified, %r will be ignored.\\n\" % r)\n continue\n\n for i, sptr in enumerate(r._sources + r._dests):\n s = sptr()\n if isinstance(s, species.SpeciesOnExtracellular):\n if not isinstance(s, species.ParameterOnExtracellular):\n species_id = creg._ecs_species_ids[s._extracellular()._grid_id]\n operator = '+=' if ecs_species_ids_used[species_id] else '='\n fxn_string += \"\\n\\trhs_3d[%d] %s mult[%d] * rate;\" % (species_id, operator, mc_mult_count)\n ecs_species_ids_used[species_id] = True\n elif not isinstance(s, species.Parameter) and not isinstance(s, species.ParameterOnRegion): \n species_id = creg._species_ids[s._id]\n region_id = creg._region_ids[s._region()._id]\n operator = '+=' if species_ids_used[species_id][region_id] else '='\n fxn_string += \"\\n\\trhs[%d][%d] %s mult[%d] * rate;\" % (species_id, region_id, operator, mc_mult_count)\n species_ids_used[species_id][region_id] = True\n if r._membrane_flux:\n operator = '+=' if flux_ids_used[species_id][region_id] else '='\n fxn_string += \"\\n\\tif(flux) flux[%d][%d] %s %1.1f * rate;\" % (species_id, region_id, operator, r._cur_charges[i])\n flux_ids_used[species_id][region_id] = True\n #TODO: Fix problem if the whole region isn't part of the same aggregate c_region\n mc_mult_count += 1\n mc_mult_list.extend(r._mult.flatten())\n else:\n for reg in creg._react_regions[rptr]:\n try:\n region_id = creg._region_ids[reg()._id]\n rate_str = localize_index(creg, r._rate[reg()][0])\n except KeyError:\n warn(\"Species not on the region specified, %r will be ignored.\\n\" % r)\n continue\n fxn_string += \"\\n\\trate = %s;\" % rate_str\n summed_mults = collections.defaultdict(lambda: 0)\n for (mult, sp) in zip(r._mult, r._sources + r._dests):\n summed_mults[creg._species_ids.get(sp()._id)] += mult\n for idx in sorted(summed_mults.keys()):\n operator = '+=' if species_ids_used[idx][region_id] else '='\n species_ids_used[idx][region_id] = True\n fxn_string += \"\\n\\trhs[%d][%d] %s (%g) * rate;\" % (idx, region_id, operator, summed_mults[idx])\n fxn_string += \"\\n}\\n\"\n register_rate(creg.num_species, creg.num_params, creg.num_regions,\n creg.num_segments, creg.get_state_index(),\n creg.num_ecs_species, creg.num_ecs_params,\n creg.get_ecs_species_ids(), creg.get_ecs_index(),\n mc_mult_count,\n numpy.array(mc_mult_list, dtype=ctypes.c_double),\n _list_to_pyobject_array(creg._vptrs),\n _c_compile(fxn_string))\n\n #Setup intracellular 3D reactions\n molecules_per_mM_um3 = constants.molecules_per_mM_um3()\n if regions_inv_3d:\n for reg in regions_inv_3d:\n ics_grid_ids = []\n all_ics_gids = set()\n ics_param_gids = set()\n fxn_string = _c_headers\n fxn_string += 'void reaction(double* species_3d, double* params_3d, double*rhs, double* mc3d_mults)\\n{'\n for rptr in [r for rlist in list(regions_inv.values()) for r in rlist]:\n if not isinstance(rptr(), rate.Rate):\n fxn_string += '\\n\\tdouble rate;\\n'\n break\n #if any rates on this region have SpeciesOnRegion, add their grid_ids\n #do this in loop above if it is correct\n for rptr in [r for rlist in list(regions_inv.values()) for r in rlist]:\n r = rptr()\n if isinstance(r, rate.Rate):\n if reg in r._regions:\n for spec_involved in r._involved_species:\n #probably should do parameters/states here as well\n if isinstance(spec_involved(), species.SpeciesOnRegion):\n all_ics_gids.add(spec_involved()._species()._intracellular_instances[spec_involved()._region()]._grid_id)\n elif isinstance(r, multiCompartmentReaction.MultiCompartmentReaction):\n if reg in r._rate:\n for spec_involved in r._involved_species + r._sources + r._dests:\n all_ics_gids.add(spec_involved()._species()._intracellular_instances[spec_involved()._region()]._grid_id) \n\n for s in species_by_region[reg]:\n spe = s._species() if isinstance(s,species.SpeciesOnRegion) else s\n if hasattr(spe, '_intracellular_instances') and spe._intracellular_instances and reg in spe._intracellular_instances:\n if isinstance(s, species.Parameter) or isinstance(s, species.ParameterOnRegion):\n sp = spe._intracellular_instances[reg]\n ics_param_gids.add(sp._grid_id)\n else:\n ###TODO is this correct? are there any other cases I should worry about? Do I always make a species the intracellular instance for the region we are looping through?\n sp = spe._intracellular_instances[reg]\n all_ics_gids.add(sp._grid_id)\n all_ics_gids = list(all_ics_gids)\n ics_param_gids = list(ics_param_gids)\n if any([isinstance(rptr(), multiCompartmentReaction.MultiCompartmentReaction) for rptr in regions_inv[reg]]):\n #the elements in each list contain the indices into the states vector for the intracellular instance that need to be updated\n mc3d_region_size = len(reg._xs)\n mc3d_indices_start = [species._defined_species_by_gid[index]._mc3d_indices_start(reg) for index in all_ics_gids + ics_param_gids]\n else:\n mc3d_region_size = 0\n mc3d_indices_start = [0 for i in range(len(all_ics_gids + ics_param_gids))]\n mults = [[] for i in range(len(all_ics_gids + ics_param_gids))]\n for rptr in regions_inv[reg]:\n r = rptr()\n if reg not in r._rate:\n continue\n rate_str = re.sub(r'species_3d\\[(\\d+)\\]',lambda m: \"species_3d[%i]\" % [pid for pid,gid in enumerate(all_ics_gids) if gid == int(m.groups()[0])][0], r._rate[reg][-1])\n rate_str = re.sub(r'params_3d\\[(\\d+)\\]',lambda m: \"params_3d[%i]\" % [pid for pid, gid in enumerate(ics_param_gids) if gid == int(m.groups()[0])][0], rate_str)\n if isinstance(r,rate.Rate):\n s = r._species()\n #Get underlying rxd._IntracellularSpecies for the grid_id\n if isinstance(s, species.Parameter) or isinstance(s, species.ParameterOnRegion):\n continue\n elif isinstance(s, species.Species):\n s = s._intracellular_instances[reg]\n elif isinstance(s, species.SpeciesOnRegion):\n s = s._species()._intracellular_instances[s._region()]\n if s._grid_id in ics_grid_ids:\n operator = '+=' \n else:\n operator = '='\n ics_grid_ids.append(s._grid_id)\n pid = [pid for pid,gid in enumerate(all_ics_gids) if gid == s._grid_id][0]\n fxn_string += \"\\n\\trhs[%d] %s %s;\" % (pid, operator, rate_str)\n elif isinstance(r, multiCompartmentReaction.MultiCompartmentReaction):\n if reg in r._regions:\n from . import geometry\n fxn_string += '\\n\\trate = ' + rate_str + \";\"\n for sptr in r._sources:\n s = sptr()\n if not isinstance(s, species.Parameter) and not isinstance(s, species.ParameterOnRegion):\n s3d = s.instance3d\n if s3d._grid_id in ics_grid_ids:\n operator = '+='\n else:\n operator = '='\n ics_grid_ids.append(s3d._grid_id)\n #Find mult for this grid\n for sec in reg._secs3d:\n sas = reg._vol\n s3d_reg = s3d._region\n for seg in sec:\n for index in reg._nodes_by_seg[seg]:\n #Change this to be by volume\n #membrane area / compartment volume / molecules_per_mM_um3\n mults[s3d._grid_id].append(sas[index] / (s3d._region._vol[index]) / molecules_per_mM_um3)\n pid = [pid for pid,gid in enumerate(all_ics_gids) if gid == s3d._grid_id][0]\n fxn_string += \"\\n\\trhs[%d] %s -mc3d_mults[%d] * rate;\" % (pid, operator, pid)\n for sptr in r._dests:\n s = sptr()\n if not isinstance(s, species.Parameter) and not isinstance(s, species.ParameterOnRegion):\n s3d = s.instance3d\n if s3d._grid_id in ics_grid_ids:\n operator = '+='\n else:\n operator = '='\n ics_grid_ids.append(s3d._grid_id)\n #Find mult for this grid\n for sec in reg._secs3d:\n sas = reg._vol\n s3d_reg = s3d._region \n for seg in sec:\n for index in reg._nodes_by_seg[seg]:\n #Change this to be by volume\n mults[s3d._grid_id].append(sas[index] / (s3d._region._vol[index]) / molecules_per_mM_um3)\n pid = [pid for pid,gid in enumerate(all_ics_gids) if gid == s3d._grid_id][0]\n fxn_string += \"\\n\\trhs[%d] %s mc3d_mults[%d] * rate;\" % (pid, operator, pid) \n \n else:\n idx=0\n fxn_string += \"\\n\\trate = %s;\" % rate_str\n for sp in r._sources + r._dests:\n s = sp()\n #Get underlying rxd._IntracellularSpecies for the grid_id\n if isinstance(s, species.Parameter) or isinstance(s, species.ParameterOnRegion):\n idx += 1\n continue\n if isinstance(s, species.Species):\n s = s._intracellular_instances[reg]\n elif isinstance(s, species.SpeciesOnRegion):\n if s._region() in s._species()._intracellular_instances:\n s = s._species()._intracellular_instances[s._region()]\n else:\n continue\n if s._grid_id in ics_grid_ids:\n operator = '+=' \n else:\n operator = '='\n ics_grid_ids.append(s._grid_id)\n pid = [pid for pid,gid in enumerate(all_ics_gids) if gid == s._grid_id][0]\n fxn_string += \"\\n\\trhs[%d] %s (%s)*rate;\" % (pid, operator, r._mult[idx])\n idx += 1\n fxn_string += \"\\n}\\n\"\n for i, ele in enumerate(mults):\n if ele == []:\n mults[i] = numpy.ones(len(reg._xs))\n mults = list(itertools.chain.from_iterable(mults))\n ics_register_reaction(0, len(all_ics_gids), len(ics_param_gids), _list_to_cint_array(all_ics_gids + ics_param_gids), numpy.asarray(mc3d_indices_start), mc3d_region_size, numpy.asarray(mults), _c_compile(fxn_string)) \n #Setup extracellular reactions\n if len(ecs_regions_inv) > 0:\n for reg in ecs_regions_inv:\n grid_ids = []\n all_gids = set()\n param_gids = set()\n fxn_string = _c_headers\n #TODO: find the nrn include path in python\n #It is necessary for a couple of function in python that are not in math.h\n fxn_string += 'void reaction(double* species_3d, double* params_3d, double* rhs)\\n{'\n # declare the \"rate\" variable if any reactions (non-rates)\n for rptr in [r for rlist in list(ecs_regions_inv.values()) for r in rlist]:\n if not isinstance(rptr(),rate.Rate):\n fxn_string += '\\n\\tdouble rate;'\n break\n #get a list of all grid_ids involved\n for s in ecs_species_by_region[reg]:\n if isinstance(s, species.Parameter) or isinstance(s, species.ParameterOnExtracellular):\n sp = s[reg] if isinstance(s, species.Species) else s\n param_gids.add(sp._extracellular()._grid_id if isinstance(sp, species.SpeciesOnExtracellular) else sp._grid_id)\n else:\n sp = s[reg] if isinstance(s, species.Species) else s\n all_gids.add(sp._extracellular()._grid_id if isinstance(sp, species.SpeciesOnExtracellular) else sp._grid_id)\n all_gids = list(all_gids)\n param_gids = list(param_gids)\n for rptr in ecs_regions_inv[reg]:\n r = rptr()\n rate_str = re.sub(r'species_3d\\[(\\d+)\\]',lambda m: \"species_3d[%i]\" % [pid for pid, gid in enumerate(all_gids) if gid == int(m.groups()[0])][0], r._rate_ecs[reg][-1])\n rate_str = re.sub(r'params_3d\\[(\\d+)\\]',lambda m: \"params_3d[%i]\" % [pid for pid, gid in enumerate(param_gids) if gid == int(m.groups()[0])][0], rate_str)\n if isinstance(r,rate.Rate):\n s = r._species()\n #Get underlying rxd._ExtracellularSpecies for the grid_id\n if isinstance(s, species.Parameter) or isinstance(s, species.ParameterOnExtracellular):\n continue\n elif isinstance(s, species.Species):\n s = s[reg]._extracellular()\n elif isinstance(s, species.SpeciesOnExtracellular):\n s = s._extracellular()\n if s._grid_id in grid_ids:\n operator = '+=' \n else:\n operator = '='\n grid_ids.append(s._grid_id)\n pid = [pid for pid,gid in enumerate(all_gids) if gid == s._grid_id][0]\n fxn_string += \"\\n\\trhs[%d] %s %s;\" % (pid, operator, rate_str)\n else:\n idx=0\n fxn_string += \"\\n\\trate = %s;\" % rate_str\n for sp in r._sources + r._dests:\n s = sp()\n #Get underlying rxd._ExtracellularSpecies for the grid_id\n if isinstance(s, species.Parameter) or isinstance(s, species.ParameterOnExtracellular):\n idx += 1\n continue\n if isinstance(s, species.Species):\n s = s[reg]._extracellular()\n elif isinstance(s, species.SpeciesOnExtracellular):\n s = s._extracellular()\n if s._grid_id in grid_ids:\n operator = '+=' \n else:\n operator = '='\n grid_ids.append(s._grid_id)\n pid = [pid for pid,gid in enumerate(all_gids) if gid == s._grid_id][0]\n fxn_string += \"\\n\\trhs[%d] %s (%s)*rate;\" % (pid, operator, r._mult[idx])\n idx += 1\n fxn_string += \"\\n}\\n\"\n ecs_register_reaction(0, len(all_gids), len(param_gids),\n _list_to_cint_array(all_gids + param_gids),\n _c_compile(fxn_string))\n\ndef _init():\n if len(species._all_species) == 0:\n return None\n initializer._do_init()\n # TODO: check about the 0<x<1 problem alluded to in the documentation\n h.define_shape()\n\n # if the shape has changed update the nodes\n _update_node_data()\n \n if species._has_1d:\n section1d._purge_cptrs()\n \n for sr in _species_get_all_species():\n s = sr()\n if s is not None:\n # TODO: are there issues with hybrid or 3D here? (I don't think so, but here's a bookmark just in case)\n s._register_cptrs()\n s._finitialize()\n _setup_matrices()\n #if species._has_1d and species._1d_submatrix_n():\n #volumes = node._get_data()[0]\n #zero_volume_indices = (numpy.where(volumes == 0)[0]).astype(numpy.int_)\n #setup_solver(_node_get_states(), len(_node_get_states()), zero_volume_indices, len(zero_volume_indices), h._ref_t, h._ref_dt)\n clear_rates()\n _setup_memb_currents()\n _compile_reactions()\n\n\ndef _include_flux(force=False):\n from .node import _node_fluxes\n from . import node\n if force or node._has_node_fluxes:\n index1D = []\n source1D = []\n scale1D = []\n grids = dict()\n for idx, t, src, sc, rptr in zip(_node_fluxes['index'], \n _node_fluxes['type'],\n _node_fluxes['source'],\n _node_fluxes['scale'],\n _node_fluxes['region']):\n if t == -1:\n index1D.append(idx)\n source1D.append(src)\n scale1D.append(sc * node._volumes[idx])\n else:\n gid = t\n if gid not in grids:\n grids[gid] = {'index':[], 'source': [], 'scale':[]}\n grids[gid]['index'].append(idx)\n grids[gid]['source'].append(src)\n grids[gid]['scale'].append(sc * rptr().volume(idx))\n counts3D = []\n grids3D = sorted(grids.keys())\n index3D = []\n source3D = []\n scale3D = []\n for gid in grids3D: \n counts3D.append(len(grids[gid]['index']))\n index3D.extend(grids[gid]['index'])\n source3D.extend(grids[gid]['source'])\n scale3D.extend(grids[gid]['scale'])\n rxd_include_node_flux1D(len(index1D), _list_to_clong_array(index1D),\n _list_to_cdouble_array(scale1D),\n _list_to_pyobject_array(source1D))\n \n rxd_include_node_flux3D(len(grids3D), _list_to_cint_array(counts3D),\n _list_to_cint_array(grids3D),\n _list_to_clong_array(index3D),\n _list_to_cdouble_array(scale3D),\n _list_to_pyobject_array(source3D))\n node._has_node_fluxes = False\n\ndef _init_concentration():\n if len(species._all_species) == 0:\n return None\n for sr in _species_get_all_species():\n s = sr()\n if s is not None:\n # TODO: are there issues with hybrid or 3D here? (I don't think so, but here's a bookmark just in case)\n s._finitialize()\n\n\n\n_has_nbs_registered = False\n_nbs = None\ndo_setup_matrices_fptr = None\ndo_setup_units_fptr = None\ndef _do_nbs_register():\n global _has_nbs_registered, _nbs, _fih, _fih2, _fih3, do_setup_matrices_fptr, do_setup_units_fptr\n \n if not _has_nbs_registered:\n #from neuron import nonvint_block_supervisor as _nbs\n\n _has_nbs_registered = True\n #_nbs.register(_callbacks) not used\n \n #\n # register the initialization handler and the ion register handler\n #\n _fih = h.FInitializeHandler(_init_concentration)\n _fih3 = h.FInitializeHandler(3, _init)\n\n set_setup_matrices = nrn_dll_sym('set_setup_matrices')\n set_setup_matrices.argtypes = [fptr_prototype]\n do_setup_matrices_fptr = fptr_prototype(_setup_matrices)\n set_setup_matrices(do_setup_matrices_fptr)\n\n\n set_setup_units = nrn_dll_sym('set_setup_units')\n set_setup_units.argtypes = [fptr_prototype]\n do_setup_units_fptr = fptr_prototype(_setup_units)\n set_setup_units(do_setup_units_fptr)\n\n _fih2 = h.FInitializeHandler(3, initializer._do_ion_register)\n\n\n #\n # register scatter/gather mechanisms\n #\n _cvode_object.extra_scatter_gather(0, _after_advance)\n \n\n# register the Python callbacks\ndo_setup_fptr = fptr_prototype(_setup)\ndo_initialize_fptr = fptr_prototype(_init)\nset_setup(do_setup_fptr)\nset_initialize(do_initialize_fptr)\n\ndef _windows_remove_dlls():\n global _windows_dll_files, _windows_dll\n for (dll_ptr,filepath) in zip(_windows_dll,_windows_dll_files):\n dll = dll_ptr()\n if dll:\n handle = dll._handle\n del dll\n ctypes.windll.kernel32.FreeLibrary(handle)\n os.remove(filepath)\n _windows_dll_files = []\n _windows_dll = []\n \n \ndef nthread(n=None):\n if(n):\n _set_num_threads(n)\n return _get_num_threads()\n" ]
[ [ "numpy.concatenate", "numpy.array", "numpy.asarray", "numpy.zeros", "numpy.ascontiguousarray", "numpy.ctypeslib.ndpointer", "numpy.where" ] ]
amodas/adversarial-robustness-toolbox
[ "3a957076d0df87203e1056b442a59d4ff56a8810" ]
[ "art/attacks/poisoning/feature_collision_attack.py" ]
[ "# MIT License\n#\n# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2020\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the\n# Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\n# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\"\"\"\nThis module implements clean-label attacks on Neural Networks.\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom functools import reduce\nimport logging\nfrom typing import Optional, Tuple, Union, TYPE_CHECKING\n\nimport numpy as np\nfrom tqdm.auto import trange\n\nfrom art.attacks.attack import PoisoningAttackWhiteBox\nfrom art.estimators import BaseEstimator, NeuralNetworkMixin\nfrom art.estimators.classification.classifier import ClassifierMixin\nfrom art.estimators.classification.keras import KerasClassifier\n\nif TYPE_CHECKING:\n from art.utils import CLASSIFIER_NEURALNETWORK_TYPE\n\nlogger = logging.getLogger(__name__)\n\n\nclass FeatureCollisionAttack(PoisoningAttackWhiteBox):\n \"\"\"\n Close implementation of Feature Collision Poisoning Attack by Shafahi, Huang, et al 2018.\n \"Poison Frogs! Targeted Clean-Label Poisoning Attacks on Neural Networks\"\n\n This implementation dynamically calculates the dimension of the feature layer, and doesn't hardcode this\n value to 2048 as done in the paper. Thus we recommend using larger values for the similarity_coefficient.\n\n | Paper link: https://arxiv.org/abs/1804.00792\n \"\"\"\n\n attack_params = PoisoningAttackWhiteBox.attack_params + [\n \"target\",\n \"feature_layer\",\n \"learning_rate\",\n \"decay_coeff\",\n \"stopping_tol\",\n \"obj_threshold\",\n \"num_old_obj\",\n \"max_iter\",\n \"similarity_coeff\",\n \"watermark\",\n \"verbose\",\n ]\n\n _estimator_requirements = (BaseEstimator, NeuralNetworkMixin, ClassifierMixin, KerasClassifier)\n\n def __init__(\n self,\n classifier: \"CLASSIFIER_NEURALNETWORK_TYPE\",\n target: np.ndarray,\n feature_layer: Union[str, int],\n learning_rate: float = 500 * 255.0,\n decay_coeff: float = 0.5,\n stopping_tol: float = 1e-10,\n obj_threshold: Optional[float] = None,\n num_old_obj: int = 40,\n max_iter: int = 120,\n similarity_coeff: float = 256.0,\n watermark: Optional[float] = None,\n verbose: bool = True,\n ):\n \"\"\"\n Initialize an Feature Collision Clean-Label poisoning attack\n\n :param classifier: A trained neural network classifier.\n :param target: The target input to misclassify at test time.\n :param feature_layer: The name of the feature representation layer.\n :param learning_rate: The learning rate of clean-label attack optimization.\n :param decay_coeff: The decay coefficient of the learning rate.\n :param stopping_tol: Stop iterations after changes in attacks in less than this threshold.\n :param obj_threshold: Stop iterations after changes in objectives values are less than this threshold.\n :param num_old_obj: The number of old objective values to store.\n :param max_iter: The maximum number of iterations for the attack.\n :param similarity_coeff: The maximum number of iterations for the attack.\n :param watermark: Whether The opacity of the watermarked target image.\n :param verbose: Show progress bars.\n \"\"\"\n super().__init__(classifier=classifier) # type: ignore\n self.target = target\n self.feature_layer = feature_layer\n self.learning_rate = learning_rate\n self.decay_coeff = decay_coeff\n self.stopping_tol = stopping_tol\n self.obj_threshold = obj_threshold\n self.num_old_obj = num_old_obj\n self.max_iter = max_iter\n self.similarity_coeff = similarity_coeff\n self.watermark = watermark\n self.verbose = verbose\n self._check_params()\n\n self.target_placeholder, self.target_feature_rep = self.estimator.get_activations(\n self.target, self.feature_layer, 1, framework=True\n )\n self.poison_placeholder, self.poison_feature_rep = self.estimator.get_activations(\n self.target, self.feature_layer, 1, framework=True\n )\n self.attack_loss = tensor_norm(self.poison_feature_rep - self.target_feature_rep)\n\n def poison(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Iteratively finds optimal attack points starting at values at x\n\n :param x: The base images to begin the poison process.\n :param y: Not used in this attack (clean-label).\n :return: An tuple holding the (poisoning examples, poisoning labels).\n \"\"\"\n num_poison = len(x)\n final_attacks = []\n if num_poison == 0:\n raise ValueError(\"Must input at least one poison point\")\n\n target_features = self.estimator.get_activations(self.target, self.feature_layer, 1)\n for init_attack in x:\n old_attack = np.expand_dims(np.copy(init_attack), axis=0)\n poison_features = self.estimator.get_activations(old_attack, self.feature_layer, 1)\n old_objective = self.objective(poison_features, target_features, init_attack, old_attack)\n last_m_objectives = [old_objective]\n\n for i in trange(self.max_iter, desc=\"Feature collision\", disable=not self.verbose):\n # forward step\n new_attack = self.forward_step(old_attack)\n\n # backward step\n new_attack = self.backward_step(np.expand_dims(init_attack, axis=0), poison_features, new_attack)\n\n rel_change_val = np.linalg.norm(new_attack - old_attack) / np.linalg.norm(new_attack)\n if rel_change_val < self.stopping_tol or self.obj_threshold and old_objective <= self.obj_threshold:\n logger.info(\"stopped after %d iterations due to small changes\", i)\n break\n\n np.expand_dims(new_attack, axis=0)\n new_feature_rep = self.estimator.get_activations(new_attack, self.feature_layer, 1)\n new_objective = self.objective(new_feature_rep, target_features, init_attack, new_attack)\n\n avg_of_last_m = sum(last_m_objectives) / float(min(self.num_old_obj, i + 1))\n\n # Increasing objective means then learning rate is too big. Chop it, and throw out the latest iteration\n if new_objective >= avg_of_last_m and (i % self.num_old_obj / 2 == 0):\n self.learning_rate *= self.decay_coeff\n else:\n old_attack = new_attack\n old_objective = new_objective\n\n if i < self.num_old_obj - 1:\n last_m_objectives.append(new_objective)\n else:\n # first remove the oldest obj then append the new obj\n del last_m_objectives[0]\n last_m_objectives.append(new_objective)\n\n # Watermarking\n watermark = self.watermark * self.target if self.watermark else 0\n final_poison = np.clip(old_attack + watermark, *self.estimator.clip_values)\n final_attacks.append(final_poison)\n\n return np.vstack(final_attacks), self.estimator.predict(x)\n\n def forward_step(self, poison: np.ndarray) -> np.ndarray:\n \"\"\"\n Forward part of forward-backward splitting algorithm.\n\n :param poison: the current poison samples.\n :return: poison example closer in feature representation to target space.\n \"\"\"\n (attack_grad,) = self.estimator.custom_loss_gradient(\n self.attack_loss,\n [self.poison_placeholder, self.target_placeholder],\n [poison, self.target],\n name=\"feature_collision_\" + str(self.feature_layer),\n )\n poison -= self.learning_rate * attack_grad[0]\n\n return poison\n\n def backward_step(self, base: np.ndarray, feature_rep: np.ndarray, poison: np.ndarray) -> np.ndarray:\n \"\"\"\n Backward part of forward-backward splitting algorithm\n\n :param base: The base image that the poison was initialized with.\n :param feature_rep: Numpy activations at the target layer.\n :param poison: The current poison samples.\n :return: Poison example closer in feature representation to target space.\n \"\"\"\n num_features = reduce(lambda x, y: x * y, base.shape)\n dim_features = feature_rep.shape[-1]\n beta = self.similarity_coeff * (dim_features / num_features) ** 2\n poison = (poison + self.learning_rate * beta * base) / (1 + beta * self.learning_rate)\n low, high = self.estimator.clip_values\n return np.clip(poison, low, high)\n\n def objective(\n self, poison_feature_rep: np.ndarray, target_feature_rep: np.ndarray, base_image: np.ndarray, poison: np.ndarray\n ) -> float:\n \"\"\"\n Objective function of the attack\n\n :param poison_feature_rep: The numpy activations of the poison image.\n :param target_feature_rep: The numpy activations of the target image.\n :param base_image: The initial image used to poison.\n :param poison: The current poison image.\n :return: The objective of the optimization.\n \"\"\"\n num_features = base_image.size\n num_activations = poison_feature_rep.size\n beta = self.similarity_coeff * (num_activations / num_features) ** 2\n return np.linalg.norm(poison_feature_rep - target_feature_rep) + beta * np.linalg.norm(poison - base_image)\n\n def _check_params(self) -> None:\n if self.learning_rate <= 0:\n raise ValueError(\"Learning rate must be strictly positive\")\n\n if self.max_iter < 1:\n raise ValueError(\"Value of max_iter at least 1\")\n\n if not isinstance(self.feature_layer, (str, int)):\n raise TypeError(\"Feature layer should be a string or int\")\n\n if self.decay_coeff <= 0:\n raise ValueError(\"Decay coefficient must be positive\")\n\n if self.stopping_tol <= 0:\n raise ValueError(\"Stopping tolerance must be positive\")\n\n if self.obj_threshold and self.obj_threshold <= 0:\n raise ValueError(\"Objective threshold must be positive\")\n\n if self.num_old_obj <= 0:\n raise ValueError(\"Number of old stored objectives must be positive\")\n\n if self.max_iter <= 0:\n raise ValueError(\"Number of old stored objectives must be positive\")\n\n if self.watermark and not (isinstance(self.watermark, float) and 0 <= self.watermark < 1):\n raise ValueError(\"Watermark must be between 0 and 1\")\n\n if not isinstance(self.verbose, bool):\n raise ValueError(\"The argument `verbose` has to be of type bool.\")\n\n\ndef get_class_name(obj: object) -> str:\n \"\"\"\n Get the full class name of an object.\n\n :param obj: A Python object.\n :return: A qualified class name.\n \"\"\"\n module = obj.__class__.__module__\n\n if module is None or module == str.__class__.__module__:\n return obj.__class__.__name__\n\n return module + \".\" + obj.__class__.__name__\n\n\ndef tensor_norm(tensor, norm_type: Union[int, float, str] = 2):\n \"\"\"\n Compute the norm of a tensor.\n\n :param tensor: A tensor from a supported ART neural network.\n :param norm_type: Order of the norm.\n :return: A tensor with the norm applied.\n \"\"\"\n tf_tensor_types = (\"tensorflow.python.framework.ops.Tensor\", \"tensorflow.python.framework.ops.EagerTensor\")\n torch_tensor_types = ()\n mxnet_tensor_types = ()\n supported_types = tf_tensor_types + torch_tensor_types + mxnet_tensor_types\n tensor_type = get_class_name(tensor)\n if tensor_type not in supported_types:\n raise TypeError(\"Tensor type `\" + tensor_type + \"` is not supported\")\n\n if tensor_type in tf_tensor_types:\n import tensorflow as tf\n\n return tf.norm(tensor, ord=norm_type)\n\n if tensor_type in torch_tensor_types:\n import torch\n\n return torch.norm(tensor, p=norm_type)\n\n if tensor_type in mxnet_tensor_types:\n import mxnet\n\n return mxnet.ndarray.norm(tensor, ord=norm_type)\n" ]
[ [ "numpy.linalg.norm", "torch.norm", "numpy.copy", "tensorflow.norm", "numpy.clip", "numpy.expand_dims", "numpy.vstack" ] ]
Jingyu-Peng/ray
[ "5ed3f0ce35cb3c66447858b4fc66573a13421d9a" ]
[ "rllib/policy/sample_batch.py" ]
[ "import collections\nimport numpy as np\nimport sys\nimport itertools\nimport tree # pip install dm_tree\nfrom typing import Dict, List, Optional, Set, Union\n\nfrom ray.util import log_once\nfrom ray.rllib.utils.annotations import Deprecated, DeveloperAPI, \\\n PublicAPI\nfrom ray.rllib.utils.compression import pack, unpack, is_compressed\nfrom ray.rllib.utils.deprecation import deprecation_warning\nfrom ray.rllib.utils.framework import try_import_tf, try_import_torch\nfrom ray.rllib.utils.memory import concat_aligned\nfrom ray.rllib.utils.typing import PolicyID, TensorType\n\ntf1, tf, tfv = try_import_tf()\ntorch, _ = try_import_torch()\n\n# Default policy id for single agent environments\nDEFAULT_POLICY_ID = \"default_policy\"\n\n\n@PublicAPI\nclass SampleBatch(dict):\n \"\"\"Wrapper around a dictionary with string keys and array-like values.\n\n For example, {\"obs\": [1, 2, 3], \"reward\": [0, -1, 1]} is a batch of three\n samples, each with an \"obs\" and \"reward\" attribute.\n \"\"\"\n\n # Outputs from interacting with the environment\n OBS = \"obs\"\n CUR_OBS = \"obs\"\n NEXT_OBS = \"new_obs\"\n ACTIONS = \"actions\"\n REWARDS = \"rewards\"\n PREV_ACTIONS = \"prev_actions\"\n PREV_REWARDS = \"prev_rewards\"\n DONES = \"dones\"\n INFOS = \"infos\"\n\n # Extra action fetches keys.\n ACTION_DIST_INPUTS = \"action_dist_inputs\"\n ACTION_PROB = \"action_prob\"\n ACTION_LOGP = \"action_logp\"\n\n # Uniquely identifies an episode.\n EPS_ID = \"eps_id\"\n\n # Uniquely identifies a sample batch. This is important to distinguish RNN\n # sequences from the same episode when multiple sample batches are\n # concatenated (fusing sequences across batches can be unsafe).\n UNROLL_ID = \"unroll_id\"\n\n # Uniquely identifies an agent within an episode.\n AGENT_INDEX = \"agent_index\"\n\n # Value function predictions emitted by the behaviour policy.\n VF_PREDS = \"vf_preds\"\n\n @PublicAPI\n def __init__(self, *args, **kwargs):\n \"\"\"Constructs a sample batch (same params as dict constructor).\"\"\"\n\n # Possible seq_lens (TxB or BxT) setup.\n self.time_major = kwargs.pop(\"_time_major\", None)\n\n self.max_seq_len = kwargs.pop(\"_max_seq_len\", None)\n self.zero_padded = kwargs.pop(\"_zero_padded\", False)\n self.is_training = kwargs.pop(\"_is_training\", None)\n\n # Call super constructor. This will make the actual data accessible\n # by column name (str) via e.g. self[\"some-col\"].\n dict.__init__(self, *args, **kwargs)\n\n self.accessed_keys = set()\n self.added_keys = set()\n self.deleted_keys = set()\n self.intercepted_values = {}\n self.get_interceptor = None\n\n # Clear out None seq-lens.\n seq_lens_ = self.get(\"seq_lens\")\n if seq_lens_ is None or \\\n (isinstance(seq_lens_, list) and len(seq_lens_) == 0):\n self.pop(\"seq_lens\", None)\n # Numpyfy seq_lens if list.\n elif isinstance(seq_lens_, list):\n self[\"seq_lens\"] = seq_lens_ = np.array(seq_lens_, dtype=np.int32)\n\n if self.max_seq_len is None and seq_lens_ is not None and \\\n not (tf and tf.is_tensor(seq_lens_)) and \\\n len(seq_lens_) > 0:\n self.max_seq_len = max(seq_lens_)\n\n if self.is_training is None:\n self.is_training = self.pop(\"is_training\", False)\n\n lengths = []\n copy_ = {k: v for k, v in self.items() if k != \"seq_lens\"}\n for k, v in copy_.items():\n assert isinstance(k, str), self\n\n # TODO: Drop support for lists as values.\n # Convert lists of int|float into numpy arrays make sure all data\n # has same length.\n if isinstance(v, list):\n self[k] = np.array(v)\n\n # Try to infer the \"length\" of the SampleBatch by finding the first\n # value that is actually a ndarray/tensor. This would fail if\n # all values are nested dicts/tuples of more complex underlying\n # structures.\n len_ = len(v) if isinstance(\n v,\n (list, np.ndarray)) or (torch and torch.is_tensor(v)) else None\n if len_:\n lengths.append(len_)\n\n if self.get(\"seq_lens\") is not None and \\\n not (tf and tf.is_tensor(self[\"seq_lens\"])) and \\\n len(self[\"seq_lens\"]) > 0:\n self.count = sum(self[\"seq_lens\"])\n else:\n self.count = lengths[0] if lengths else 0\n\n # A convenience map for slicing this batch into sub-batches along\n # the time axis. This helps reduce repeated iterations through the\n # batch's seq_lens array to find good slicing points. Built lazily\n # when needed.\n self._slice_map = []\n\n @PublicAPI\n def __len__(self):\n \"\"\"Returns the amount of samples in the sample batch.\"\"\"\n return self.count\n\n @staticmethod\n @PublicAPI\n def concat_samples(\n samples: Union[List[\"SampleBatch\"], List[\"MultiAgentBatch\"]],\n ) -> Union[\"SampleBatch\", \"MultiAgentBatch\"]:\n \"\"\"Concatenates n SampleBatches or MultiAgentBatches.\n\n Args:\n samples (Union[List[SampleBatch], List[MultiAgentBatch]]): List of\n SampleBatches or MultiAgentBatches to be concatenated.\n\n Returns:\n Union[SampleBatch, MultiAgentBatch]: A new (concatenated)\n SampleBatch or MultiAgentBatch.\n\n Examples:\n >>> b1 = SampleBatch({\"a\": np.array([1, 2]),\n ... \"b\": np.array([10, 11])})\n >>> b2 = SampleBatch({\"a\": np.array([3]),\n ... \"b\": np.array([12])})\n >>> print(SampleBatch.concat_samples([b1, b2]))\n {\"a\": np.array([1, 2, 3]), \"b\": np.array([10, 11, 12])}\n \"\"\"\n if isinstance(samples[0], MultiAgentBatch):\n return MultiAgentBatch.concat_samples(samples)\n concatd_seq_lens = []\n concat_samples = []\n zero_padded = samples[0].zero_padded\n max_seq_len = samples[0].max_seq_len\n time_major = samples[0].time_major\n for s in samples:\n if s.count > 0:\n assert s.zero_padded == zero_padded\n assert s.time_major == time_major\n if zero_padded:\n assert s.max_seq_len == max_seq_len\n concat_samples.append(s)\n if s.get(\"seq_lens\") is not None:\n concatd_seq_lens.extend(s[\"seq_lens\"])\n\n # If we don't have any samples (no or only empty SampleBatches),\n # return an empty SampleBatch here.\n if len(concat_samples) == 0:\n return SampleBatch()\n\n # Collect the concat'd data.\n concatd_data = {}\n\n def concat_key(*values):\n return concat_aligned(values, time_major)\n\n try:\n for k in concat_samples[0].keys():\n if k == \"infos\":\n concatd_data[k] = concat_aligned(\n [s[k] for s in concat_samples], time_major=time_major)\n else:\n concatd_data[k] = tree.map_structure(\n concat_key, *[c[k] for c in concat_samples])\n except Exception:\n raise ValueError(f\"Cannot concat data under key '{k}', b/c \"\n \"sub-structures under that key don't match. \"\n f\"`samples`={samples}\")\n\n # Return a new (concat'd) SampleBatch.\n return SampleBatch(\n concatd_data,\n seq_lens=concatd_seq_lens,\n _time_major=time_major,\n _zero_padded=zero_padded,\n _max_seq_len=max_seq_len,\n )\n\n @PublicAPI\n def concat(self, other: \"SampleBatch\") -> \"SampleBatch\":\n \"\"\"Concatenates `other` to this one and returns a new SampleBatch.\n\n Args:\n other (SampleBatch): The other SampleBatch object to concat to this\n one.\n\n Returns:\n SampleBatch: The new SampleBatch, resulting from concating `other`\n to `self`.\n\n Examples:\n >>> b1 = SampleBatch({\"a\": np.array([1, 2])})\n >>> b2 = SampleBatch({\"a\": np.array([3, 4, 5])})\n >>> print(b1.concat(b2))\n {\"a\": np.array([1, 2, 3, 4, 5])}\n \"\"\"\n return self.concat_samples([self, other])\n\n @PublicAPI\n def copy(self, shallow: bool = False) -> \"SampleBatch\":\n \"\"\"Creates a deep or shallow copy of this SampleBatch and returns it.\n\n Args:\n shallow (bool): Whether the copying should be done shallowly.\n\n Returns:\n SampleBatch: A deep or shallow copy of this SampleBatch object.\n \"\"\"\n copy_ = {k: v for k, v in self.items()}\n data = tree.map_structure(\n lambda v: (np.array(v, copy=not shallow) if\n isinstance(v, np.ndarray) else v),\n copy_,\n )\n copy_ = SampleBatch(data)\n copy_.set_get_interceptor(self.get_interceptor)\n return copy_\n\n @PublicAPI\n def rows(self) -> Dict[str, TensorType]:\n \"\"\"Returns an iterator over data rows, i.e. dicts with column values.\n\n Note that if `seq_lens` is set in self, we set it to [1] in the rows.\n\n Yields:\n Dict[str, TensorType]: The column values of the row in this\n iteration.\n\n Examples:\n >>> batch = SampleBatch({\n ... \"a\": [1, 2, 3],\n ... \"b\": [4, 5, 6],\n ... \"seq_lens\": [1, 2]\n ... })\n >>> for row in batch.rows():\n print(row)\n {\"a\": 1, \"b\": 4, \"seq_lens\": [1]}\n {\"a\": 2, \"b\": 5, \"seq_lens\": [1]}\n {\"a\": 3, \"b\": 6, \"seq_lens\": [1]}\n \"\"\"\n\n # Do we add seq_lens=[1] to each row?\n seq_lens = None if self.get(\"seq_lens\") is None else np.array([1])\n\n self_as_dict = {k: v for k, v in self.items()}\n\n for i in range(self.count):\n yield tree.map_structure_with_path(\n lambda p, v: v[i] if p[0] != \"seq_lens\" else seq_lens,\n self_as_dict,\n )\n\n @PublicAPI\n def columns(self, keys: List[str]) -> List[any]:\n \"\"\"Returns a list of the batch-data in the specified columns.\n\n Args:\n keys (List[str]): List of column names fo which to return the data.\n\n Returns:\n List[any]: The list of data items ordered by the order of column\n names in `keys`.\n\n Examples:\n >>> batch = SampleBatch({\"a\": [1], \"b\": [2], \"c\": [3]})\n >>> print(batch.columns([\"a\", \"b\"]))\n [[1], [2]]\n \"\"\"\n\n # TODO: (sven) Make this work for nested data as well.\n out = []\n for k in keys:\n out.append(self[k])\n return out\n\n @PublicAPI\n def shuffle(self) -> None:\n \"\"\"Shuffles the rows of this batch in-place.\n\n Returns:\n SampleBatch: This very (now shuffled) SampleBatch.\n\n Raises:\n ValueError: If self[\"seq_lens\"] is defined.\n\n Examples:\n >>> batch = SampleBatch({\"a\": [1, 2, 3, 4]})\n >>> print(batch.shuffle())\n {\"a\": [4, 1, 3, 2]}\n \"\"\"\n\n # Shuffling the data when we have `seq_lens` defined is probably\n # a bad idea!\n if self.get(\"seq_lens\") is not None:\n raise ValueError(\n \"SampleBatch.shuffle not possible when your data has \"\n \"`seq_lens` defined!\")\n\n # Get a permutation over the single items once and use the same\n # permutation for all the data (otherwise, data would become\n # meaningless).\n permutation = np.random.permutation(self.count)\n\n def _permutate_in_place(path, value):\n curr = self\n for i, p in enumerate(path):\n if i == len(path) - 1:\n curr[p] = value[permutation]\n curr = curr[p]\n\n tree.map_structure_with_path(_permutate_in_place, self)\n\n return self\n\n @PublicAPI\n def split_by_episode(self) -> List[\"SampleBatch\"]:\n \"\"\"Splits by `eps_id` column and returns list of new batches.\n\n Returns:\n List[SampleBatch]: List of batches, one per distinct episode.\n\n Raises:\n KeyError: If the `eps_id` AND `dones` columns are not present.\n\n Examples:\n >>> batch = SampleBatch({\"a\": [1, 2, 3], \"eps_id\": [0, 0, 1]})\n >>> print(batch.split_by_episode())\n [{\"a\": [1, 2], \"eps_id\": [0, 0]}, {\"a\": [3], \"eps_id\": [1]}]\n \"\"\"\n\n # No eps_id in data -> Make sure there are no \"dones\" in the middle\n # and add eps_id automatically.\n if SampleBatch.EPS_ID not in self:\n # TODO: (sven) Shouldn't we rather split by DONEs then and not\n # add fake eps-ids (0s) at all?\n if SampleBatch.DONES in self:\n assert not any(self[SampleBatch.DONES][:-1])\n self[SampleBatch.EPS_ID] = np.repeat(0, self.count)\n return [self]\n\n # Produce a new slice whenever we find a new episode ID.\n slices = []\n cur_eps_id = self[SampleBatch.EPS_ID][0]\n offset = 0\n for i in range(self.count):\n next_eps_id = self[SampleBatch.EPS_ID][i]\n if next_eps_id != cur_eps_id:\n slices.append(self[offset:i])\n offset = i\n cur_eps_id = next_eps_id\n # Add final slice.\n slices.append(self[offset:self.count])\n\n # TODO: (sven) Are these checks necessary? Should be all ok according\n # to above logic.\n for s in slices:\n slen = len(set(s[SampleBatch.EPS_ID]))\n assert slen == 1, (s, slen)\n assert sum(s.count for s in slices) == self.count, (slices, self.count)\n\n return slices\n\n @Deprecated(new=\"SampleBatch[start:stop]\", error=False)\n def slice(self, start: int, end: int, state_start=None,\n state_end=None) -> \"SampleBatch\":\n \"\"\"Returns a slice of the row data of this batch (w/o copying).\n\n Args:\n start (int): Starting index. If < 0, will left-zero-pad.\n end (int): Ending index.\n\n Returns:\n SampleBatch: A new SampleBatch, which has a slice of this batch's\n data.\n \"\"\"\n if self.get(\"seq_lens\") is not None and len(self[\"seq_lens\"]) > 0:\n if start < 0:\n data = {\n k: np.concatenate([\n np.zeros(\n shape=(-start, ) + v.shape[1:], dtype=v.dtype),\n v[0:end]\n ])\n for k, v in self.items()\n if k != \"seq_lens\" and not k.startswith(\"state_in_\")\n }\n else:\n data = {\n k: v[start:end]\n for k, v in self.items()\n if k != \"seq_lens\" and not k.startswith(\"state_in_\")\n }\n if state_start is not None:\n assert state_end is not None\n state_idx = 0\n state_key = \"state_in_{}\".format(state_idx)\n while state_key in self:\n data[state_key] = self[state_key][state_start:state_end]\n state_idx += 1\n state_key = \"state_in_{}\".format(state_idx)\n seq_lens = list(self[\"seq_lens\"][state_start:state_end])\n # Adjust seq_lens if necessary.\n data_len = len(data[next(iter(data))])\n if sum(seq_lens) != data_len:\n assert sum(seq_lens) > data_len\n seq_lens[-1] = data_len - sum(seq_lens[:-1])\n else:\n # Fix state_in_x data.\n count = 0\n state_start = None\n seq_lens = None\n for i, seq_len in enumerate(self[\"seq_lens\"]):\n count += seq_len\n if count >= end:\n state_idx = 0\n state_key = \"state_in_{}\".format(state_idx)\n if state_start is None:\n state_start = i\n while state_key in self:\n data[state_key] = self[state_key][state_start:i +\n 1]\n state_idx += 1\n state_key = \"state_in_{}\".format(state_idx)\n seq_lens = list(self[\"seq_lens\"][state_start:i]) + [\n seq_len - (count - end)\n ]\n if start < 0:\n seq_lens[0] += -start\n diff = sum(seq_lens) - (end - start)\n if diff > 0:\n seq_lens[0] -= diff\n assert sum(seq_lens) == (end - start)\n break\n elif state_start is None and count > start:\n state_start = i\n\n return SampleBatch(\n data,\n seq_lens=seq_lens,\n _time_major=self.time_major,\n )\n else:\n return SampleBatch(\n {k: v[start:end]\n for k, v in self.items()},\n _is_training=self.is_training,\n _time_major=self.time_major)\n\n @PublicAPI\n def timeslices(self,\n size: Optional[int] = None,\n num_slices: Optional[int] = None,\n k: Optional[int] = None) -> List[\"SampleBatch\"]:\n \"\"\"Returns SampleBatches, each one representing a k-slice of this one.\n\n Will start from timestep 0 and produce slices of size=k.\n\n Args:\n size (Optional[int]): The size (in timesteps) of each returned\n SampleBatch.\n num_slices (Optional[int]): The number of slices to produce.\n k (int): Obsoleted: Use size or num_slices instead!\n The size (in timesteps) of each returned SampleBatch.\n\n Returns:\n List[SampleBatch]: The list of `num_slices` (new) SampleBatches\n or n (new) SampleBatches each one of size `size`.\n \"\"\"\n if size is None and num_slices is None:\n deprecation_warning(\"k\", \"size or num_slices\")\n assert k is not None\n size = k\n\n if size is None:\n assert isinstance(num_slices, int)\n\n slices = []\n left = len(self)\n start = 0\n while left:\n len_ = left // (num_slices - len(slices))\n stop = start + len_\n slices.append(self[start:stop])\n left -= len_\n start = stop\n\n return slices\n\n else:\n assert isinstance(size, int)\n\n slices = []\n left = len(self)\n start = 0\n while left:\n stop = start + size\n slices.append(self[start:stop])\n left -= size\n start = stop\n\n return slices\n\n @Deprecated(new=\"SampleBatch.right_zero_pad\", error=False)\n def zero_pad(self, max_seq_len, exclude_states=True):\n return self.right_zero_pad(max_seq_len, exclude_states)\n\n def right_zero_pad(self, max_seq_len: int, exclude_states: bool = True):\n \"\"\"Right (adding zeros at end) zero-pads this SampleBatch in-place.\n\n This will set the `self.zero_padded` flag to True and\n `self.max_seq_len` to the given `max_seq_len` value.\n\n Args:\n max_len (int): The max (total) length to zero pad to.\n exclude_states (bool): If True, also right-zero-pad all\n `state_in_x` data. If False, leave `state_in_x` keys\n as-is.\n\n Returns:\n SampleBatch: This very (now right-zero-padded) SampleBatch.\n\n Raises:\n ValueError: If self.seq_lens is None (not defined).\n\n Examples:\n >>> batch = SampleBatch({\"a\": [1, 2, 3], \"seq_lens\": [1, 2]})\n >>> print(batch.right_zero_pad(max_seq_len=4))\n {\"a\": [1, 0, 0, 0, 2, 3, 0, 0], \"seq_lens\": [1, 2]}\n\n >>> batch = SampleBatch({\"a\": [1, 2, 3],\n ... \"state_in_0\": [1.0, 3.0],\n ... \"seq_lens\": [1, 2]})\n >>> print(batch.right_zero_pad(max_seq_len=5))\n {\"a\": [1, 0, 0, 0, 0, 2, 3, 0, 0, 0],\n \"state_in_0\": [1.0, 3.0], # <- all state-ins remain as-is\n \"seq_lens\": [1, 2]}\n \"\"\"\n seq_lens = self.get(\"seq_lens\")\n if seq_lens is None:\n raise ValueError(\n \"Cannot right-zero-pad SampleBatch if no `seq_lens` field \"\n \"present! SampleBatch={self}\")\n\n length = len(seq_lens) * max_seq_len\n\n def _zero_pad_in_place(path, value):\n # Skip \"state_in_...\" columns and \"seq_lens\".\n if (exclude_states is True and path[0].startswith(\"state_in_\")) \\\n or path[0] == \"seq_lens\":\n return\n # Generate zero-filled primer of len=max_seq_len.\n if value.dtype == np.object or value.dtype.type is np.str_:\n f_pad = [None] * length\n else:\n # Make sure type doesn't change.\n f_pad = np.zeros(\n (length, ) + np.shape(value)[1:], dtype=value.dtype)\n # Fill primer with data.\n f_pad_base = f_base = 0\n for len_ in self[\"seq_lens\"]:\n f_pad[f_pad_base:f_pad_base + len_] = value[f_base:f_base +\n len_]\n f_pad_base += max_seq_len\n f_base += len_\n assert f_base == len(value), value\n\n # Update our data in-place.\n curr = self\n for i, p in enumerate(path):\n if i == len(path) - 1:\n curr[p] = f_pad\n curr = curr[p]\n\n self_as_dict = {k: v for k, v in self.items()}\n tree.map_structure_with_path(_zero_pad_in_place, self_as_dict)\n\n # Set flags to indicate, we are now zero-padded (and to what extend).\n self.zero_padded = True\n self.max_seq_len = max_seq_len\n\n return self\n\n # Experimental method.\n def to_device(self, device, framework=\"torch\"):\n \"\"\"TODO: transfer batch to given device as framework tensor.\"\"\"\n if framework == \"torch\":\n assert torch is not None\n for k, v in self.items():\n if isinstance(v, np.ndarray) and v.dtype != np.object:\n self[k] = torch.from_numpy(v).to(device)\n else:\n raise NotImplementedError\n return self\n\n @PublicAPI\n def size_bytes(self) -> int:\n \"\"\"Returns sum over number of bytes of all data buffers.\n\n For numpy arrays, we use `.nbytes`. For all other value types, we use\n sys.getsizeof(...).\n\n Returns:\n int: The overall size in bytes of the data buffer (all columns).\n \"\"\"\n return sum(\n v.nbytes if isinstance(v, np.ndarray) else sys.getsizeof(v)\n for v in tree.flatten(self))\n\n def get(self, key, default=None):\n try:\n return self.__getitem__(key)\n except KeyError:\n return default\n\n @PublicAPI\n def __getitem__(self, key: Union[str, slice]) -> TensorType:\n \"\"\"Returns one column (by key) from the data or a sliced new batch.\n\n Args:\n key (Union[str, slice]): The key (column name) to return or\n a slice object for slicing this SampleBatch.\n\n Returns:\n TensorType: The data under the given key or a sliced version of\n this batch.\n \"\"\"\n if isinstance(key, slice):\n return self._slice(key)\n\n if not hasattr(self, key) and key in self:\n self.accessed_keys.add(key)\n\n # Backward compatibility for when \"input-dicts\" were used.\n if key == \"is_training\":\n if log_once(\"SampleBatch['is_training']\"):\n deprecation_warning(\n old=\"SampleBatch['is_training']\",\n new=\"SampleBatch.is_training\",\n error=False)\n return self.is_training\n\n value = dict.__getitem__(self, key)\n if self.get_interceptor is not None:\n if key not in self.intercepted_values:\n self.intercepted_values[key] = self.get_interceptor(value)\n value = self.intercepted_values[key]\n return value\n\n @PublicAPI\n def __setitem__(self, key, item) -> None:\n \"\"\"Inserts (overrides) an entire column (by key) in the data buffer.\n\n Args:\n key (str): The column name to set a value for.\n item (TensorType): The data to insert.\n \"\"\"\n # Defend against creating SampleBatch via pickle (no property\n # `added_keys` and first item is already set).\n if not hasattr(self, \"added_keys\"):\n dict.__setitem__(self, key, item)\n return\n\n if key not in self:\n self.added_keys.add(key)\n\n dict.__setitem__(self, key, item)\n if key in self.intercepted_values:\n self.intercepted_values[key] = item\n\n @PublicAPI\n def __delitem__(self, key):\n self.deleted_keys.add(key)\n dict.__delitem__(self, key)\n\n @DeveloperAPI\n def compress(self,\n bulk: bool = False,\n columns: Set[str] = frozenset([\"obs\", \"new_obs\"])) -> None:\n \"\"\"Compresses the data buffers (by column) in place.\n\n Args:\n bulk (bool): Whether to compress across the batch dimension (0)\n as well. If False will compress n separate list items, where n\n is the batch size.\n columns (Set[str]): The columns to compress. Default: Only\n compress the obs and new_obs columns.\n\n Returns:\n SampleBatch: This very (now compressed) SampleBatch.\n \"\"\"\n\n def _compress_in_place(path, value):\n if path[0] not in columns:\n return\n curr = self\n for i, p in enumerate(path):\n if i == len(path) - 1:\n if bulk:\n curr[p] = pack(value)\n else:\n curr[p] = np.array([pack(o) for o in value])\n curr = curr[p]\n\n tree.map_structure_with_path(_compress_in_place, self)\n\n return self\n\n @DeveloperAPI\n def decompress_if_needed(self,\n columns: Set[str] = frozenset(\n [\"obs\", \"new_obs\"])) -> \"SampleBatch\":\n \"\"\"Decompresses data buffers (per column if not compressed) in place.\n\n Args:\n columns (Set[str]): The columns to decompress. Default: Only\n decompress the obs and new_obs columns.\n\n Returns:\n SampleBatch: This very (now uncompressed) SampleBatch.\n \"\"\"\n\n def _decompress_in_place(path, value):\n if path[0] not in columns:\n return\n curr = self\n for p in path[:-1]:\n curr = curr[p]\n # Bulk compressed.\n if is_compressed(value):\n curr[path[-1]] = unpack(value)\n # Non bulk compressed.\n elif len(value) > 0 and is_compressed(value[0]):\n curr[path[-1]] = np.array([unpack(o) for o in value])\n\n tree.map_structure_with_path(_decompress_in_place, self)\n\n return self\n\n @DeveloperAPI\n def set_get_interceptor(self, fn):\n # If get-interceptor changes, must erase old intercepted values.\n if fn is not self.get_interceptor:\n self.intercepted_values = {}\n self.get_interceptor = fn\n\n def __repr__(self):\n keys = list(self.keys())\n if self.get(\"seq_lens\") is None:\n return f\"SampleBatch({self.count}: {keys})\"\n else:\n keys.remove(\"seq_lens\")\n return f\"SampleBatch({self.count} \" \\\n f\"(seqs={len(self['seq_lens'])}): {keys})\"\n\n def _slice(self, slice_: slice):\n \"\"\"Helper method to handle SampleBatch slicing using a slice object.\n\n The returned SampleBatch uses the same underlying data object as\n `self`, so changing the slice will also change `self`.\n\n Note that only zero or positive bounds are allowed for both start\n and stop values. The slice step must be 1 (or None, which is the\n same).\n\n Args:\n slice_ (slice): The python slice object to slice by.\n\n Returns:\n SampleBatch: A new SampleBatch, however \"linking\" into the same\n data (sliced) as self.\n \"\"\"\n start = slice_.start or 0\n stop = slice_.stop or len(self)\n assert start >= 0 and stop >= 0 and slice_.step in [1, None]\n\n if self.get(\"seq_lens\") is not None and len(self[\"seq_lens\"]) > 0:\n # Build our slice-map, if not done already.\n if not self._slice_map:\n sum_ = 0\n for i, l in enumerate(self[\"seq_lens\"]):\n for _ in range(l):\n self._slice_map.append((i, sum_))\n sum_ += l\n self._slice_map.append((len(self[\"seq_lens\"]), sum_))\n\n start_seq_len, start = self._slice_map[start]\n stop_seq_len, stop = self._slice_map[stop]\n if self.zero_padded:\n start = start_seq_len * self.max_seq_len\n stop = stop_seq_len * self.max_seq_len\n\n def map_(path, value):\n if path[0] != \"seq_lens\" and not path[0].startswith(\n \"state_in_\"):\n return value[start:stop]\n else:\n return value[start_seq_len:stop_seq_len]\n\n data = tree.map_structure_with_path(map_, self)\n return SampleBatch(\n data,\n _is_training=self.is_training,\n _time_major=self.time_major,\n _zero_padded=self.zero_padded,\n )\n else:\n data = tree.map_structure(lambda value: value[start:stop], self)\n return SampleBatch(\n data,\n _is_training=self.is_training,\n _time_major=self.time_major,\n )\n\n @Deprecated(error=False)\n def _get_slice_indices(self, slice_size):\n data_slices = []\n data_slices_states = []\n if self.get(\"seq_lens\") is not None and len(self[\"seq_lens\"]) > 0:\n assert np.all(self[\"seq_lens\"] < slice_size), \\\n \"ERROR: `slice_size` must be larger than the max. seq-len \" \\\n \"in the batch!\"\n start_pos = 0\n current_slize_size = 0\n actual_slice_idx = 0\n start_idx = 0\n idx = 0\n while idx < len(self[\"seq_lens\"]):\n seq_len = self[\"seq_lens\"][idx]\n current_slize_size += seq_len\n actual_slice_idx += seq_len if not self.zero_padded else \\\n self.max_seq_len\n # Complete minibatch -> Append to data_slices.\n if current_slize_size >= slice_size:\n end_idx = idx + 1\n # We are not zero-padded yet; all sequences are\n # back-to-back.\n if not self.zero_padded:\n data_slices.append((start_pos, start_pos + slice_size))\n start_pos += slice_size\n if current_slize_size > slice_size:\n overhead = current_slize_size - slice_size\n start_pos -= (seq_len - overhead)\n idx -= 1\n # We are already zero-padded: Cut in chunks of max_seq_len.\n else:\n data_slices.append((start_pos, actual_slice_idx))\n start_pos = actual_slice_idx\n\n data_slices_states.append((start_idx, end_idx))\n current_slize_size = 0\n start_idx = idx + 1\n idx += 1\n else:\n i = 0\n while i < self.count:\n data_slices.append((i, i + slice_size))\n i += slice_size\n return data_slices, data_slices_states\n\n # TODO: deprecate\n @property\n def data(self):\n deprecation_warning(\n old=\"SampleBatch.data[..]\", new=\"SampleBatch[..]\", error=True)\n return self\n\n # TODO: (sven) Experimental method.\n def get_single_step_input_dict(self, view_requirements, index=\"last\"):\n \"\"\"Creates single ts SampleBatch at given index from `self`.\n\n For usage as input-dict for model calls.\n\n Args:\n sample_batch (SampleBatch): A single-trajectory SampleBatch object\n to generate the compute_actions input dict from.\n index (Union[int, str]): An integer index value indicating the\n position in the trajectory for which to generate the\n compute_actions input dict. Set to \"last\" to generate the dict\n at the very end of the trajectory (e.g. for value estimation).\n Note that \"last\" is different from -1, as \"last\" will use the\n final NEXT_OBS as observation input.\n\n Returns:\n SampleBatch: The (single-timestep) input dict for ModelV2 calls.\n \"\"\"\n last_mappings = {\n SampleBatch.OBS: SampleBatch.NEXT_OBS,\n SampleBatch.PREV_ACTIONS: SampleBatch.ACTIONS,\n SampleBatch.PREV_REWARDS: SampleBatch.REWARDS,\n }\n\n input_dict = {}\n for view_col, view_req in view_requirements.items():\n # Create batches of size 1 (single-agent input-dict).\n data_col = view_req.data_col or view_col\n if index == \"last\":\n data_col = last_mappings.get(data_col, data_col)\n # Range needed.\n if view_req.shift_from is not None:\n data = self[view_col][-1]\n traj_len = len(self[data_col])\n missing_at_end = traj_len % view_req.batch_repeat_value\n obs_shift = -1 if data_col in [\n SampleBatch.OBS, SampleBatch.NEXT_OBS\n ] else 0\n from_ = view_req.shift_from + obs_shift\n to_ = view_req.shift_to + obs_shift + 1\n if to_ == 0:\n to_ = None\n input_dict[view_col] = np.array([\n np.concatenate(\n [data,\n self[data_col][-missing_at_end:]])[from_:to_]\n ])\n # Single index.\n else:\n data = self[data_col][-1]\n input_dict[view_col] = np.array([data])\n else:\n # Index range.\n if isinstance(index, tuple):\n data = self[data_col][index[0]:index[1] +\n 1 if index[1] != -1 else None]\n input_dict[view_col] = np.array([data])\n # Single index.\n else:\n input_dict[view_col] = self[data_col][\n index:index + 1 if index != -1 else None]\n\n return SampleBatch(input_dict, seq_lens=np.array([1], dtype=np.int32))\n\n\n@PublicAPI\nclass MultiAgentBatch:\n \"\"\"A batch of experiences from multiple agents in the environment.\n\n Attributes:\n policy_batches (Dict[PolicyID, SampleBatch]): Mapping from policy\n ids to SampleBatches of experiences.\n count (int): The number of env steps in this batch.\n \"\"\"\n\n @PublicAPI\n def __init__(self, policy_batches: Dict[PolicyID, SampleBatch],\n env_steps: int):\n \"\"\"Initialize a MultiAgentBatch object.\n\n Args:\n policy_batches (Dict[PolicyID, SampleBatch]): Mapping from policy\n ids to SampleBatches of experiences.\n env_steps (int): The number of environment steps in the environment\n this batch contains. This will be less than the number of\n transitions this batch contains across all policies in total.\n \"\"\"\n\n for v in policy_batches.values():\n assert isinstance(v, SampleBatch)\n self.policy_batches = policy_batches\n # Called \"count\" for uniformity with SampleBatch.\n # Prefer to access this via the `env_steps()` method when possible\n # for clarity.\n self.count = env_steps\n\n @PublicAPI\n def env_steps(self) -> int:\n \"\"\"The number of env steps (there are >= 1 agent steps per env step).\n\n Returns:\n int: The number of environment steps contained in this batch.\n \"\"\"\n return self.count\n\n @PublicAPI\n def agent_steps(self) -> int:\n \"\"\"The number of agent steps (there are >= 1 agent steps per env step).\n\n Returns:\n int: The number of agent steps total in this batch.\n \"\"\"\n ct = 0\n for batch in self.policy_batches.values():\n ct += batch.count\n return ct\n\n @PublicAPI\n def timeslices(self, k: int) -> List[\"MultiAgentBatch\"]:\n \"\"\"Returns k-step batches holding data for each agent at those steps.\n\n For examples, suppose we have agent1 observations [a1t1, a1t2, a1t3],\n for agent2, [a2t1, a2t3], and for agent3, [a3t3] only.\n\n Calling timeslices(1) would return three MultiAgentBatches containing\n [a1t1, a2t1], [a1t2], and [a1t3, a2t3, a3t3].\n\n Calling timeslices(2) would return two MultiAgentBatches containing\n [a1t1, a1t2, a2t1], and [a1t3, a2t3, a3t3].\n\n This method is used to implement \"lockstep\" replay mode. Note that this\n method does not guarantee each batch contains only data from a single\n unroll. Batches might contain data from multiple different envs.\n \"\"\"\n from ray.rllib.evaluation.sample_batch_builder import \\\n SampleBatchBuilder\n\n # Build a sorted set of (eps_id, t, policy_id, data...)\n steps = []\n for policy_id, batch in self.policy_batches.items():\n for row in batch.rows():\n steps.append((row[SampleBatch.EPS_ID], row[\"t\"],\n row[\"agent_index\"], policy_id, row))\n steps.sort()\n\n finished_slices = []\n cur_slice = collections.defaultdict(SampleBatchBuilder)\n cur_slice_size = 0\n\n def finish_slice():\n nonlocal cur_slice_size\n assert cur_slice_size > 0\n batch = MultiAgentBatch(\n {k: v.build_and_reset()\n for k, v in cur_slice.items()}, cur_slice_size)\n cur_slice_size = 0\n finished_slices.append(batch)\n\n # For each unique env timestep.\n for _, group in itertools.groupby(steps, lambda x: x[:2]):\n # Accumulate into the current slice.\n for _, _, _, policy_id, row in group:\n cur_slice[policy_id].add_values(**row)\n cur_slice_size += 1\n # Slice has reached target number of env steps.\n if cur_slice_size >= k:\n finish_slice()\n assert cur_slice_size == 0\n\n if cur_slice_size > 0:\n finish_slice()\n\n assert len(finished_slices) > 0, finished_slices\n return finished_slices\n\n @staticmethod\n @PublicAPI\n def wrap_as_needed(\n policy_batches: Dict[PolicyID, SampleBatch],\n env_steps: int) -> Union[SampleBatch, \"MultiAgentBatch\"]:\n \"\"\"Returns SampleBatch or MultiAgentBatch, depending on given policies.\n\n Args:\n policy_batches (Dict[PolicyID, SampleBatch]): Mapping from policy\n ids to SampleBatch.\n env_steps (int): Number of env steps in the batch.\n\n Returns:\n Union[SampleBatch, MultiAgentBatch]: The single default policy's\n SampleBatch or a MultiAgentBatch (more than one policy).\n \"\"\"\n if len(policy_batches) == 1 and DEFAULT_POLICY_ID in policy_batches:\n return policy_batches[DEFAULT_POLICY_ID]\n return MultiAgentBatch(\n policy_batches=policy_batches, env_steps=env_steps)\n\n @staticmethod\n @PublicAPI\n def concat_samples(samples: List[\"MultiAgentBatch\"]) -> \"MultiAgentBatch\":\n \"\"\"Concatenates a list of MultiAgentBatches into a new MultiAgentBatch.\n\n Args:\n samples (List[MultiAgentBatch]): List of MultiagentBatch objects\n to concatenate.\n\n Returns:\n MultiAgentBatch: A new MultiAgentBatch consisting of the\n concatenated inputs.\n \"\"\"\n policy_batches = collections.defaultdict(list)\n env_steps = 0\n for s in samples:\n if not isinstance(s, MultiAgentBatch):\n raise ValueError(\n \"`MultiAgentBatch.concat_samples()` can only concat \"\n \"MultiAgentBatch types, not {}!\".format(type(s).__name__))\n for key, batch in s.policy_batches.items():\n policy_batches[key].append(batch)\n env_steps += s.env_steps()\n out = {}\n for key, batches in policy_batches.items():\n out[key] = SampleBatch.concat_samples(batches)\n return MultiAgentBatch(out, env_steps)\n\n @PublicAPI\n def copy(self) -> \"MultiAgentBatch\":\n \"\"\"Deep-copies self into a new MultiAgentBatch.\n\n Returns:\n MultiAgentBatch: The copy of self with deep-copied data.\n \"\"\"\n return MultiAgentBatch(\n {k: v.copy()\n for (k, v) in self.policy_batches.items()}, self.count)\n\n @PublicAPI\n def size_bytes(self) -> int:\n \"\"\"\n Returns:\n int: The overall size in bytes of all policy batches (all columns).\n \"\"\"\n return sum(b.size_bytes() for b in self.policy_batches.values())\n\n @DeveloperAPI\n def compress(self,\n bulk: bool = False,\n columns: Set[str] = frozenset([\"obs\", \"new_obs\"])) -> None:\n \"\"\"Compresses each policy batch (per column) in place.\n\n Args:\n bulk (bool): Whether to compress across the batch dimension (0)\n as well. If False will compress n separate list items, where n\n is the batch size.\n columns (Set[str]): Set of column names to compress.\n \"\"\"\n for batch in self.policy_batches.values():\n batch.compress(bulk=bulk, columns=columns)\n\n @DeveloperAPI\n def decompress_if_needed(self,\n columns: Set[str] = frozenset(\n [\"obs\", \"new_obs\"])) -> \"MultiAgentBatch\":\n \"\"\"Decompresses each policy batch (per column), if already compressed.\n\n Args:\n columns (Set[str]): Set of column names to decompress.\n\n Returns:\n MultiAgentBatch: This very MultiAgentBatch.\n \"\"\"\n for batch in self.policy_batches.values():\n batch.decompress_if_needed(columns)\n return self\n\n def __str__(self):\n return \"MultiAgentBatch({}, env_steps={})\".format(\n str(self.policy_batches), self.count)\n\n def __repr__(self):\n return \"MultiAgentBatch({}, env_steps={})\".format(\n str(self.policy_batches), self.count)\n" ]
[ [ "numpy.concatenate", "numpy.array", "numpy.zeros", "numpy.random.permutation", "numpy.shape", "numpy.all", "numpy.repeat" ] ]
Dizzy-cell/HOUV
[ "f7ed05d1b0bb775b22b682c82607252a7a734850" ]
[ "registration/model_utils_completion.py" ]
[ "import torch\nimport math\nimport os\nimport sys\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n# proj_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n# sys.path.append(os.path.join(proj_dir, \"utils/Pointnet2.PyTorch/pointnet2\"))\n# import pointnet2_utils as pn2\n# sys.path.append(os.path.join(proj_dir, \"utils/emd\"))\n# import emd_module as emd\n# sys.path.append(os.path.join(proj_dir, \"utils/ChamferDistancePytorch\"))\n# from chamfer3D import dist_chamfer_3D\n# from fscore import fscore\n\nsys.path.append(\"../utils\")\nfrom metrics import cd, fscore, emd\nfrom mm3d_pn2 import furthest_point_sample, gather_points, grouping_operation, ball_query, three_nn\n\n# from ..utils import cd, fscore, emd, furthest_point_sample, gather_points, grouping_operation, ball_query, three_nn\n\n\nclass EF_expansion(nn.Module):\n def __init__(self, input_size, output_size=64, step_ratio=2, k=4):\n super(EF_expansion, self).__init__()\n self.step_ratio = step_ratio\n self.k = k\n self.input_size = input_size\n self.output_size = output_size\n\n self.conv1 = nn.Conv2d(input_size * 2, output_size, 1)\n self.conv2 = nn.Conv2d(input_size * 2 + output_size,\n output_size * step_ratio, 1)\n self.conv3 = nn.Conv2d(output_size, output_size, 1)\n\n def forward(self, x):\n batch_size, _, num_points = x.size()\n\n input_edge_feature = get_graph_feature(x, self.k,\n minus_center=False).permute(\n 0, 1, 3,\n 2).contiguous() # B C K N\n edge_feature = self.conv1(input_edge_feature)\n edge_feature = F.relu(torch.cat((edge_feature, input_edge_feature), 1))\n\n edge_feature = F.relu(self.conv2(edge_feature)) # B C K N\n edge_feature = edge_feature.permute(0, 2, 3, 1).contiguous().view(\n batch_size, self.k, num_points * self.step_ratio,\n self.output_size).permute(0, 3, 1, 2) # B C K N\n\n edge_feature = self.conv3(edge_feature)\n edge_feature, _ = torch.max(edge_feature, 2)\n\n return edge_feature\n\n\ndef attention(query, key, value, mask=None):\n d_k = query.size(-1)\n scores = torch.matmul(query,\n key.transpose(-2, -1).contiguous()) / math.sqrt(\n d_k) # B x 4 x points x points\n if mask is not None:\n scores = scores.masked_fill(mask == 0, -1e9)\n p_attn = F.softmax(scores, dim=-1)\n return torch.matmul(p_attn, value), p_attn\n\n\ndef calc_cd(output, gt, calc_f1=False):\n # cham_loss = dist_chamfer_3D.chamfer_3DDist()\n cham_loss = cd()\n dist1, dist2, _, _ = cham_loss(gt, output)\n\n cd_p = (torch.sqrt(dist1).mean(1) + torch.sqrt(dist2).mean(1)) / 2\n cd_t = (dist1.mean(1) + dist2.mean(1))\n if calc_f1:\n f1, _, _ = fscore(dist1, dist2)\n return cd_p, cd_t, f1\n else:\n return cd_p, cd_t\n\n\ndef calc_cd_percent(output, gt, calc_f1=False, percent = 1):\n # cham_loss = dist_chamfer_3D.chamfer_3DDist()\n point = output.shape[1]\n k = int(point * percent)\n\n cham_loss = cd()\n dist1, dist2, _, _ = cham_loss(gt, output)\n\n dist1, _ = dist1.topk(k, dim=1, largest = False, sorted=True)\n dist2, _ = dist2.topk(k, dim=1, largest = False, sorted=True)\n\n cd_p = torch.sqrt(dist1).mean(1) \n cd_t = torch.sqrt(dist2).mean(1)\n if calc_f1:\n f1, _, _ = fscore(dist1, dist2)\n return cd_p, cd_t, f1\n else:\n return cd_p, cd_t\n\n\ndef calc_cd_percent_aligned(output, gt, percent = 1):\n point = output.shape[1]\n k = int(point * percent)\n cham_loss = cd()\n\n dist1, dist2, idx1, idx2 = cham_loss(gt, output)\n\n dist1, idxx1 = dist1.topk(k, dim=1, largest = False, sorted=True)\n dist2, idxx2 = dist2.topk(k, dim=1, largest = False, sorted=True)\n\n cd_p = torch.sqrt(dist1).mean(1) \n cd_t = torch.sqrt(dist2).mean(1)\n\n return cd_p, cd_t, idx1, idx2, idxx1, idxx2\n\n\ndef generate_sent_masks(batch_size, max_seq_length, source_lengths):\n \"\"\" Generate sentence masks for encoder hidden states.\n returns enc_masks (Tensor): Tensor of sentence masks of shape (b, max_seq_length),where max_seq_length = max source length \"\"\"\n enc_masks = torch.zeros(batch_size, max_seq_length, dtype=torch.float)\n for e_id in range(batch_size):\n enc_masks[e_id, :source_lengths[e_id]] = 1\n return enc_masks\n\ndef calc_cd_percent_len(output, gt, enc1, enc2, calc_f1=False, percent = 1):\n # cham_loss = dist_chamfer_3D.chamfer_3DDist()\n\n k = int(2048 * percent)\n\n cham_loss = cd()\n\n dist1, dist2, _, _ = cham_loss(gt, output)\n\n # enc1 = generate_sent_masks(output.shape[0], 2048, output_len.cpu().numpy()).cuda()\n # enc2 = generate_sent_masks(gt.shape[0], 2048, gt_len.cpu().numpy()).cuda()\n\n # dist1 = dist1 * enc1\n # dist2 = dist2 * enc2\n\n # from IPython import embed\n # embed()\n # dist1 = = dist1\n # dist1, _ = dist1.topk(k, dim=1, largest = False, sorted=True)\n # dist2, _ = dist1.topk(k, dim=1, largest = False, sorted=True)\n\n cd_p = (torch.sqrt(dist1).mean(1) + torch.sqrt(dist2).mean(1)) / 2\n cd_t = (dist1.mean(1) + dist2.mean(1))\n if calc_f1:\n f1, _, _ = fscore(dist1, dist2)\n return cd_p, cd_t, f1\n else:\n return cd_p, cd_t\n\n\ndef loss_view(src, tgt, dim = 0, percent = 1):\n mask = torch.zeros_like(src)\n ones = torch.ones((1,1,3)).cuda()\n ones[:,:,dim] = 0\n mask = mask + ones\n a = src * mask\n tgt = tgt * mask\n cd_t, cd_p = calc_cd_percent(a, tgt, percent = percent)\n\n return cd_t, cd_p\n\n\n\ndef calc_emd(output, gt, eps=0.005, iterations=50):\n # emd_loss = emd.emdModule()\n emd_loss = emd()\n dist, _ = emd_loss(output, gt, eps, iterations)\n emd_out = torch.sqrt(dist).mean(1)\n return emd_out\n\n\ndef edge_preserve_sampling(feature_input, point_input, num_samples, k=10):\n batch_size = feature_input.size()[0]\n feature_size = feature_input.size()[1]\n num_points = feature_input.size()[2]\n\n p_idx = furthest_point_sample(point_input, num_samples)\n point_output = gather_points(\n point_input.transpose(1, 2).contiguous(),\n p_idx).transpose(1, 2).contiguous()\n\n pk = int(min(k, num_points))\n _, pn_idx = knn_point(pk, point_input, point_output)\n pn_idx = pn_idx.detach().int()\n neighbor_feature = gather_points(feature_input,\n pn_idx.view(batch_size,\n num_samples * pk)).view(\n batch_size, feature_size,\n num_samples, pk)\n neighbor_feature, _ = torch.max(neighbor_feature, 3)\n\n center_feature = grouping_operation(feature_input,\n p_idx.unsqueeze(2)).view(\n batch_size, -1, num_samples)\n\n net = torch.cat((center_feature, neighbor_feature), 1)\n\n return net, p_idx, pn_idx, point_output\n\n\ndef get_edge_features(x, idx):\n batch_size, num_points, k = idx.size()\n device = torch.device('cuda')\n idx_base = torch.arange(0, batch_size, device=device).view(-1, 1,\n 1) * num_points\n idx = idx + idx_base\n idx = idx.view(-1)\n x = x.squeeze(2)\n _, num_dims, _ = x.size()\n x = x.transpose(2, 1).contiguous()\n feature = x.view(batch_size * num_points, -1)[idx, :]\n feature = feature.view(batch_size, num_points, k,\n num_dims).permute(0, 3, 2, 1) # B, C, K, N\n return feature\n\n\ndef gen_grid(num_grid_point):\n x = torch.linspace(-0.05, 0.05, steps=num_grid_point)\n x, y = torch.meshgrid(x, x)\n grid = torch.stack([x, y], axis=-1).view(2, num_grid_point**2)\n return grid\n\n\ndef gen_1d_grid(num_grid_point):\n x = torch.linspace(-0.05, 0.05, num_grid_point)\n grid = x.view(1, num_grid_point)\n return grid\n\n\ndef gen_grid_up(up_ratio, grid_size=0.2):\n sqrted = int(math.sqrt(up_ratio)) + 1\n for i in range(1, sqrted + 1).__reversed__():\n if (up_ratio % i) == 0:\n num_x = i\n num_y = up_ratio // i\n break\n\n grid_x = torch.linspace(-grid_size, grid_size, steps=num_x)\n grid_y = torch.linspace(-grid_size, grid_size, steps=num_y)\n\n x, y = torch.meshgrid(grid_x, grid_y) # x, y shape: (2, 1)\n grid = torch.stack([x, y], dim=-1).view(-1, 2).transpose(0, 1).contiguous()\n return grid\n\n\ndef get_graph_feature(x, k=20, minus_center=True):\n idx = knn(x, k=k)\n batch_size, num_points, _ = idx.size()\n device = torch.device('cuda')\n\n idx_base = torch.arange(0, batch_size, device=device).view(-1, 1,\n 1) * num_points\n\n idx = idx + idx_base\n\n idx = idx.view(-1)\n\n _, num_dims, _ = x.size()\n\n x = x.transpose(2, 1).contiguous()\n feature = x.view(batch_size * num_points, -1)[idx, :]\n feature = feature.view(batch_size, num_points, k, num_dims)\n x = x.view(batch_size, num_points, 1, num_dims).repeat(1, 1, k, 1)\n\n if minus_center:\n feature = torch.cat((x, feature - x), dim=3).permute(0, 3, 1, 2)\n else:\n feature = torch.cat((x, feature), dim=3).permute(0, 3, 1, 2)\n return feature\n\n\ndef get_repulsion_loss(pred, nsample=20, radius=0.07):\n # pred: (batch_size, npoint,3)\n # idx = pn2.ball_query(radius, nsample, pred, pred)\n idx = knn(pred.transpose(1, 2).contiguous(), nsample).int()\n pred_flipped = pred.transpose(1, 2).contiguous()\n grouped_pred = grouping_operation(pred_flipped,\n idx) # (B, C, npoint, nsample)\n grouped_pred -= pred_flipped.unsqueeze(-1)\n\n # get the uniform loss\n h = 0.03\n dist_square = torch.sum(grouped_pred**2, dim=1)\n dist_square, idx = torch.topk(-dist_square, 5)\n dist_square = -dist_square[:, :, 1:] # remove the first one\n dist_square = torch.max(\n torch.FloatTensor([1e-12]).expand_as(dist_square).cuda(), dist_square)\n dist = torch.sqrt(dist_square)\n weight = torch.exp(-dist_square / h**2)\n uniform_loss = torch.mean(radius - dist * weight)\n return uniform_loss\n\n\ndef get_uniform_loss(pcd,\n percentages=[0.004, 0.006, 0.008, 0.010, 0.012],\n radius=1.0):\n B, N, C = pcd.size()\n npoint = int(N * 0.05)\n loss = 0\n for p in percentages:\n nsample = int(N * p)\n r = math.sqrt(p * radius)\n disk_area = math.pi * (radius**2) * p / nsample\n new_xyz = gather_points(\n pcd.transpose(1, 2).contiguous(),\n furthest_point_sample(pcd, npoint)).transpose(1, 2).contiguous()\n idx = ball_query(0, r, nsample, pcd, new_xyz)\n expect_len = math.sqrt(disk_area)\n\n grouped_pcd = grouping_operation(pcd.transpose(1, 2).contiguous(), idx)\n grouped_pcd = grouped_pcd.permute(0, 2, 3,\n 1).contiguous().view(-1, nsample, 3)\n\n var, _ = knn_point(2, grouped_pcd, grouped_pcd)\n uniform_dis = -var[:, :, 1:]\n\n uniform_dis = torch.sqrt(torch.abs(uniform_dis + 1e-8))\n uniform_dis = torch.mean(uniform_dis, dim=-1)\n uniform_dis = ((uniform_dis - expect_len)**2 / (expect_len + 1e-8))\n\n mean = torch.mean(uniform_dis)\n mean = mean * math.pow(p * 100, 2)\n loss += mean\n return loss / len(percentages)\n\n\ndef index_points(points, idx):\n device = points.device\n B = points.shape[0]\n view_shape = list(idx.shape)\n view_shape[1:] = [1] * (len(view_shape) - 1)\n repeat_shape = list(idx.shape)\n repeat_shape[0] = 1\n batch_indices = torch.arange(\n B, dtype=torch.long).to(device).view(view_shape).repeat(repeat_shape)\n new_points = points[batch_indices, idx, :]\n return new_points\n\n\ndef knn(x, k):\n inner = -2 * torch.matmul(x.transpose(2, 1).contiguous(), x)\n xx = torch.sum(x**2, dim=1, keepdim=True)\n pairwise_distance = -xx - inner - xx.transpose(2, 1).contiguous()\n idx = pairwise_distance.topk(k=k, dim=-1)[1]\n return idx\n\n\ndef knn_point(pk, point_input, point_output):\n m = point_output.size()[1]\n n = point_input.size()[1]\n\n inner = -2 * torch.matmul(point_output,\n point_input.transpose(2, 1).contiguous())\n xx = torch.sum(point_output**2, dim=2, keepdim=True).repeat(1, 1, n)\n yy = torch.sum(point_input**2, dim=2,\n keepdim=False).unsqueeze(1).repeat(1, m, 1)\n pairwise_distance = -xx - inner - yy\n dist, idx = pairwise_distance.topk(k=pk, dim=-1)\n return dist, idx\n\n\ndef knn_point_all(pk, point_input, point_output):\n m = point_output.size()[1]\n n = point_input.size()[1]\n\n inner = -2 * torch.matmul(point_output,\n point_input.transpose(2, 1).contiguous())\n xx = torch.sum(point_output**2, dim=2, keepdim=True).repeat(1, 1, n)\n yy = torch.sum(point_input**2, dim=2,\n keepdim=False).unsqueeze(1).repeat(1, m, 1)\n pairwise_distance = -xx - inner - yy\n dist, idx = pairwise_distance.topk(k=pk, dim=-1)\n\n return dist, idx\n\n\ndef symmetric_sample(points, num=512):\n p1_idx = furthest_point_sample(points, num)\n input_fps = gather_points(points.transpose(1, 2).contiguous(),\n p1_idx).transpose(1, 2).contiguous()\n x = torch.unsqueeze(input_fps[:, :, 0], dim=2)\n y = torch.unsqueeze(input_fps[:, :, 1], dim=2)\n z = torch.unsqueeze(-input_fps[:, :, 2], dim=2)\n input_fps_flip = torch.cat([x, y, z], dim=2)\n input_fps = torch.cat([input_fps, input_fps_flip], dim=1)\n return input_fps\n\n\ndef three_nn_upsampling(target_points, source_points):\n dist, idx = three_nn(target_points, source_points)\n dist = torch.max(dist, torch.ones(1).cuda() * 1e-10)\n norm = torch.sum((1.0 / dist), 2, keepdim=True)\n norm = norm.repeat(1, 1, 3)\n weight = (1.0 / dist) / norm\n\n return idx, weight\n" ]
[ [ "torch.cat", "torch.stack", "torch.ones", "torch.meshgrid", "torch.exp", "torch.topk", "torch.sum", "torch.sqrt", "torch.FloatTensor", "torch.unsqueeze", "torch.abs", "torch.zeros_like", "torch.zeros", "torch.device", "torch.max", "torch.linspace", "torch.nn.Conv2d", "torch.nn.functional.softmax", "torch.matmul", "torch.arange", "torch.mean" ] ]
alphadl/hiersumm
[ "58c4baeb709bc0550f85253cc12192054b0c7901" ]
[ "src/abstractive/data_loader.py" ]
[ "import gc\nimport glob\nimport random\n\nimport torch\n\nfrom others.logging import logger\n\ndef chunks(l, n):\n \"\"\"Yield successive n-sized chunks from l.\"\"\"\n for i in range(0, len(l), n):\n yield l[i:i + n]\n\n\nclass AbstractiveBatch(object):\n def _pad(self, data, height, width, pad_id):\n \"\"\" ? \"\"\"\n rtn_data = [d + [pad_id] * (width - len(d)) for d in data]\n rtn_length = [len(d) for d in data]\n rtn_data = rtn_data + [[pad_id] * width] * (height - len(data))\n rtn_length = rtn_length + [0] * (height - len(data))\n\n return rtn_data, rtn_length\n\n def __init__(self, data=None, hier=False, pad_id=None, device=None, is_test=False):\n \"\"\"Create a Batch from a list of examples.\"\"\"\n if data is not None:\n self.batch_size = len(data)\n src = [x[0] for x in data]\n tgt = [x[1] for x in data]\n\n if (hier):\n max_nblock = max([len(e) for e in src])\n max_ntoken = max([max([len(p) for p in e]) for e in src])\n _src = [self._pad(e, max_nblock, max_ntoken, pad_id) for e in src]\n src = torch.stack([torch.tensor(e[0]) for e in _src])\n\n\n else:\n _src = self._pad(src, width=max([len(d) for d in src]), height=len(src), pad_id=pad_id)\n src = torch.tensor(_src[0]) # batch_size, src_len\n\n setattr(self, 'src', src.to(device))\n\n _tgt = self._pad(tgt, width=max([len(d) for d in tgt]), height=len(tgt), pad_id=pad_id)\n tgt = torch.tensor(_tgt[0]).transpose(0, 1)\n setattr(self, 'tgt', tgt.to(device))\n\n if (is_test):\n tgt_str = [x[2] for x in data]\n setattr(self, 'tgt_str', tgt_str)\n\n def __len__(self):\n return self.batch_size\n\n\n\n\ndef load_dataset(args, corpus_type, shuffle):\n \"\"\"\n Dataset generator. Don't do extra stuff here, like printing,\n because they will be postponed to the first loading time.\n Args:\n corpus_type: 'train' or 'valid'\n Returns:\n A list of dataset, the dataset(s) are lazily loaded.\n \"\"\"\n assert corpus_type in [\"train\", \"valid\", \"test\"]\n\n def _lazy_dataset_loader(pt_file, corpus_type):\n dataset = torch.load(pt_file)\n logger.info('Loading %s dataset from %s, number of examples: %d' %\n (corpus_type, pt_file, len(dataset)))\n return dataset\n\n # Sort the glob output by file name (by increasing indexes).\n pts = sorted(glob.glob(args.data_path + '.' + corpus_type + '.[0-9]*.pt'))\n if pts:\n if (shuffle):\n random.shuffle(pts)\n\n for pt in pts:\n yield _lazy_dataset_loader(pt, corpus_type)\n else:\n # Only one inputters.*Dataset, simple!\n pt = args.data_path + '.' + corpus_type + '.pt'\n yield _lazy_dataset_loader(pt, corpus_type)\n\n\nclass AbstractiveDataloader(object):\n def __init__(self, args, datasets, symbols, batch_size,\n device, shuffle, is_test):\n self.args = args\n self.datasets = datasets\n self.symbols = symbols\n self.batch_size = batch_size\n self.device = device\n self.shuffle = shuffle\n self.is_test = is_test\n self.cur_iter = self._next_dataset_iterator(datasets)\n assert self.cur_iter is not None\n\n def __iter__(self):\n dataset_iter = (d for d in self.datasets)\n while self.cur_iter is not None:\n for batch in self.cur_iter:\n yield batch\n self.cur_iter = self._next_dataset_iterator(dataset_iter)\n\n def _next_dataset_iterator(self, dataset_iter):\n try:\n # Drop the current dataset for decreasing memory\n if hasattr(self, \"cur_dataset\"):\n self.cur_dataset = None\n gc.collect()\n del self.cur_dataset\n gc.collect()\n\n self.cur_dataset = next(dataset_iter)\n except StopIteration:\n return None\n\n return AbstracticeIterator(args = self.args,\n dataset=self.cur_dataset, symbols=self.symbols, batch_size=self.batch_size,\n device=self.device, shuffle=self.shuffle, is_test=self.is_test)\n\n\nclass AbstracticeIterator(object):\n def __init__(self, args, dataset, symbols, batch_size, device=None, is_test=False,\n shuffle=True):\n self.args = args\n self.batch_size, self.is_test, self.dataset = batch_size, is_test, dataset\n self.iterations = 0\n self.device = device\n self.shuffle = shuffle\n\n # self.secondary_sort_key = lambda x: len(x[0])\n # self.secondary_sort_key = lambda x: sum([len(xi) for xi in x[0]])\n # self.prime_sort_key = lambda x: len(x[1])\n self.secondary_sort_key = lambda x: sum([len(xi) for xi in x[0]])\n self.prime_sort_key = lambda x: len(x[1])\n self._iterations_this_epoch = 0\n\n\n self.symbols = symbols\n\n def data(self):\n if self.shuffle:\n random.shuffle(self.dataset)\n xs = self.dataset\n return xs\n\n def preprocess(self, ex):\n\n sos_id = self.symbols['BOS']\n eos_id = self.symbols['EOS']\n eot_id = self.symbols['EOT']\n eop_id = self.symbols['EOP']\n eoq_id = self.symbols['EOQ']\n src, tgt, tgt_str = ex['src'], ex['tgt'], ex['tgt_str']\n if (not self.args.hier):\n src = sum([p + [eop_id] for p in src], [])[:-1][:self.args.trunc_src_ntoken] + [\n eos_id]\n return src, tgt, tgt_str\n\n return src[:self.args.trunc_src_nblock], tgt, tgt_str\n\n def simple_batch_size_fn(self, new, count):\n src, tgt = new[0], new[1]\n\n global max_src_in_batch, max_tgt_in_batch\n if count == 1:\n max_src_in_batch = 0\n if (self.args.hier):\n max_src_in_batch = max(max_src_in_batch, sum([len(p) for p in src]))\n else:\n max_src_in_batch = max(max_src_in_batch, len(src))\n src_elements = count * max_src_in_batch\n return src_elements\n\n def get_batch(self, data, batch_size):\n \"\"\"Yield elements from data in chunks of batch_size.\"\"\"\n minibatch, size_so_far = [], 0\n for ex in data:\n minibatch.append(ex)\n size_so_far = self.simple_batch_size_fn(ex, len(minibatch))\n if size_so_far == batch_size:\n yield minibatch\n minibatch, size_so_far = [], 0\n elif size_so_far > batch_size:\n yield minibatch[:-1]\n minibatch, size_so_far = minibatch[-1:], self.simple_batch_size_fn(ex, 1)\n if minibatch:\n yield minibatch\n\n def batch_buffer(self, data, batch_size):\n minibatch, size_so_far = [], 0\n for ex in data:\n ex = self.preprocess(ex)\n minibatch.append(ex)\n size_so_far = self.simple_batch_size_fn(ex, len(minibatch))\n if size_so_far == batch_size:\n yield minibatch\n minibatch, size_so_far = [], 0\n elif size_so_far > batch_size:\n yield minibatch[:-1]\n minibatch, size_so_far = minibatch[-1:], self.simple_batch_size_fn(ex, 1)\n if minibatch:\n yield minibatch\n\n def create_batches(self):\n \"\"\" Create batches \"\"\"\n data = self.data()\n for buffer in self.batch_buffer(data, self.batch_size * 100):\n if (self.args.mode != 'train'):\n p_batch = self.get_batch(\n sorted(sorted(buffer, key=self.prime_sort_key), key=self.secondary_sort_key),\n self.batch_size)\n else:\n p_batch = self.get_batch(\n sorted(sorted(buffer, key=self.secondary_sort_key), key=self.prime_sort_key),\n self.batch_size)\n\n p_batch = list(p_batch)\n\n if (self.shuffle):\n random.shuffle(p_batch)\n for b in p_batch:\n if(len(b)==0):\n continue\n yield b\n\n def __iter__(self):\n\n while True:\n self.batches = self.create_batches()\n for idx, minibatch in enumerate(self.batches):\n if self._iterations_this_epoch > idx:\n continue\n self.iterations += 1\n self._iterations_this_epoch += 1\n batch = AbstractiveBatch(minibatch, self.args.hier, self.symbols['PAD'], self.device, self.is_test)\n\n yield batch\n return" ]
[ [ "torch.tensor", "torch.load" ] ]
pbuslaev/freud
[ "ed83bdc63ee8fddba78f070ce9ddf2a0021d67b8" ]
[ "tests/test_density_RDF.py" ]
[ "import numpy as np\nimport numpy.testing as npt\nimport freud\nimport matplotlib\nimport unittest\nimport util\nfrom test_managedarray import TestManagedArray\nmatplotlib.use('agg')\n\n\nclass TestRDF(unittest.TestCase):\n def test_generateR(self):\n r_max = 5\n for r_min in [0, 0.05, 0.1, 1.0, 3.0]:\n bins = round((r_max-r_min)/0.1)\n dr = (r_max - r_min) / bins\n\n # make sure the radius for each bin is generated correctly\n r_list = np.array([r_min + dr*(i+1/2) for i in range(bins) if\n r_min + dr*(i+1/2) < r_max])\n rdf = freud.density.RDF(bins, r_max, r_min=r_min)\n npt.assert_allclose(rdf.bin_centers, r_list, rtol=1e-4, atol=1e-4)\n npt.assert_allclose((rdf.bin_edges+dr/2)[:-1], r_list, rtol=1e-4,\n atol=1e-4)\n\n def test_attribute_access(self):\n r_max = 10.0\n bins = 10\n num_points = 100\n box_size = r_max*3.1\n box, points = freud.data.make_random_system(\n box_size, num_points, is2D=True)\n rdf = freud.density.RDF(r_max=r_max, bins=bins)\n\n # Test protected attribute access\n with self.assertRaises(AttributeError):\n rdf.rdf\n with self.assertRaises(AttributeError):\n rdf.box\n with self.assertRaises(AttributeError):\n rdf.n_r\n\n rdf.compute((box, points), reset=False)\n\n # Test if accessible now\n rdf.rdf\n rdf.box\n rdf.n_r\n\n rdf.compute((box, points))\n\n # Test if accessible now\n rdf.rdf\n rdf.box\n rdf.n_r\n\n def test_invalid_rdf(self):\n # Make sure that invalid RDF objects raise errors\n with self.assertRaises(ValueError):\n freud.density.RDF(r_max=-1, bins=10)\n with self.assertRaises(ValueError):\n freud.density.RDF(r_max=1, bins=0)\n with self.assertRaises(ValueError):\n freud.density.RDF(r_max=1, bins=10, r_min=2)\n\n def test_random_point(self):\n r_max = 10.0\n bins = 10\n num_points = 10000\n tolerance = 0.1\n box_size = r_max*3.1\n\n for i, r_min in enumerate([0, 0.05, 0.1, 1.0, 3.0]):\n box, points = freud.data.make_random_system(box_size, num_points)\n test_set = util.make_raw_query_nlist_test_set(\n box, points, points, \"ball\", r_max, 0, True)\n for nq, neighbors in test_set:\n rdf = freud.density.RDF(bins, r_max, r_min)\n\n if i < 3:\n rdf.compute(nq, neighbors=neighbors, reset=False)\n else:\n rdf.compute(nq, neighbors=neighbors)\n self.assertTrue(rdf.box == box)\n correct = np.ones(bins, dtype=np.float32)\n npt.assert_allclose(rdf.rdf, correct, atol=tolerance)\n\n # Numerical integration to compute the running coordination\n # number will be highly inaccurate, so we can only test up to\n # a limited precision. Also, since dealing with nonzero r_min\n # values requires extrapolation, we only test when r_min=0.\n ndens = points.shape[0]/box.volume\n dr = (r_max - r_min) / bins\n bin_boundaries = np.array([r_min + dr*i for i in range(bins+1)\n if r_min + dr*i <= r_max])\n bin_volumes = 4/3*np.pi*np.diff(bin_boundaries**3)\n avg_counts = rdf.rdf*ndens*bin_volumes\n npt.assert_allclose(rdf.n_r, np.cumsum(avg_counts),\n rtol=tolerance)\n\n def test_repr(self):\n rdf = freud.density.RDF(r_max=10, bins=100, r_min=0.5)\n self.assertEqual(str(rdf), str(eval(repr(rdf))))\n\n def test_repr_png(self):\n r_max = 10.0\n bins = 10\n num_points = 10\n box_size = r_max*3.1\n box, points = freud.data.make_random_system(box_size, num_points)\n rdf = freud.density.RDF(bins, r_max)\n\n with self.assertRaises(AttributeError):\n rdf.plot()\n self.assertEqual(rdf._repr_png_(), None)\n\n rdf.compute((box, points), reset=False)\n rdf.plot()\n rdf._repr_png_()\n\n def test_points_ne_query_points(self):\n r_max = 100.0\n bins = 100\n box_size = r_max*5\n box = freud.box.Box.square(box_size)\n\n rdf = freud.density.RDF(bins, r_max)\n\n query_points = []\n supposed_RDF = [0]\n N = 100\n\n # With points closely centered around the origin,\n # the cumulative average bin counts should be same as\n # having a single point at the origin.\n # Also, we can check for whether points are not considered against\n # each other.\n dr = r_max/bins\n points = [[dr/4, 0, 0], [-dr/4, 0, 0], [0, dr/4, 0], [0, -dr/4, 0]]\n for r in rdf.bin_centers:\n for k in range(N):\n query_points.append([r * np.cos(2*np.pi*k/N),\n r * np.sin(2*np.pi*k/N), 0])\n supposed_RDF.append(supposed_RDF[-1] + N)\n supposed_RDF = np.array(supposed_RDF[1:])\n\n test_set = util.make_raw_query_nlist_test_set(\n box, points, query_points, \"ball\", r_max, 0, False)\n for nq, neighbors in test_set:\n rdf = freud.density.RDF(bins, r_max)\n rdf.compute(nq, query_points, neighbors=neighbors)\n\n npt.assert_allclose(rdf.n_r, supposed_RDF, atol=1e-6)\n\n def test_empty_histogram(self):\n r_max = 0.5\n bins = 10\n box_size = 5\n box = freud.box.Box.cube(box_size)\n rdf = freud.density.RDF(bins, r_max)\n points = [[0, 0, 0], [2, 2, 2]]\n rdf.compute(system=(box, points))\n\n # Test that properties are accessible even though there's no data\n npt.assert_array_equal(rdf.rdf, np.zeros(bins))\n npt.assert_array_equal(rdf.n_r, np.zeros(bins))\n\n def test_bin_precision(self):\n # Ensure bin edges are precise\n bins = 500\n r_min = 0\n r_max = 50\n rdf = freud.density.RDF(bins=bins, r_max=r_max, r_min=r_min)\n expected_bin_edges = np.histogram_bin_edges(\n np.array([0], dtype=np.float32), bins=bins, range=[r_min, r_max])\n npt.assert_allclose(rdf.bin_edges, expected_bin_edges, atol=1e-6)\n\n\nclass TestRDFManagedArray(TestManagedArray, unittest.TestCase):\n def build_object(self):\n self.obj = freud.density.RDF(50, 3)\n\n @property\n def computed_properties(self):\n return ['rdf', 'n_r', 'bin_counts']\n\n def compute(self):\n box = freud.box.Box.cube(10)\n num_points = 100\n points = np.random.rand(\n num_points, 3)*box.L - box.L/2\n self.obj.compute((box, points), neighbors={'r_max': 2})\n\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "matplotlib.use", "numpy.testing.assert_allclose", "numpy.array", "numpy.sin", "numpy.random.rand", "numpy.zeros", "numpy.ones", "numpy.diff", "numpy.cos", "numpy.cumsum" ] ]
abefetterman/rl-testing
[ "557eadd5895e629632e701a27cdafde96e210215" ]
[ "buffer.py" ]
[ "import numpy as np\nimport random\n\nclass ReplayBuffer(object):\n def __init__(self, len):\n self.buf = []\n self.len = len\n def __len__(self):\n return len(self.buf)\n def add(self, new):\n if len(self.buf) >= self.len:\n self.buf.pop(0)\n self.buf.append(new)\n def sample(self, count):\n s = random.sample(self.buf, count)\n return [x for x in zip(*s)]\n\nclass PriorityBuffer(object):\n def __init__(self, len, alpha=1, beta=1):\n self.buf = []\n self.priorities = []\n self.priorities_max = 1\n self.alpha = alpha\n self.beta = beta\n self.len = len\n def __len__(self):\n return len(self.buf)\n def add(self, new):\n if len(self.buf) >= self.len:\n self.buf.pop(0)\n self.priorities.pop(0)\n self.buf.append(new)\n self.priorities.append(self.priorities_max)\n def sample(self, count):\n buffer_size = len(self.buf)\n p = np.array(self.priorities) ** self.alpha\n p = p / np.sum(p)\n idxs = np.random.choice(buffer_size, count, p=p)\n p_choice = p[idxs]\n buf_choice = [self.buf[x] for x in idxs]\n is_weights = (count * p_choice) ** ( - self.beta)\n is_weights = is_weights / np.max(is_weights)\n sample = [x for x in zip(*buf_choice)]\n return sample + [idxs, is_weights]\n def update_priorities(self, idxs, td_error):\n for i,e in zip(idxs,td_error):\n self.priorities[i] = e\n self.priorities_max = max(self.priorities)\n" ]
[ [ "numpy.max", "numpy.sum", "numpy.array", "numpy.random.choice" ] ]
PlasmaControl/bes-edge-ml
[ "a6540df4f0bdb7d39c24f23de36fbeba4c1aeb18" ]
[ "bes_data_tools/bes_data.py" ]
[ "\"\"\"\nBES_Data class\n\nFetches and stores BES metadata, relevant signals, and (optionally) BES signals\n\"\"\"\n\nfrom pathlib import Path\nimport time\nimport numpy as np\nimport h5py\nimport MDSplus\n\n\nclass BES_Data(object):\n _points = ['ip',\n 'bt',\n 'pinj',\n 'pinj_15l',\n 'vinj_15l',\n 'pinj_15r',\n 'vinj_15r',\n ]\n\n def __init__(self,\n shot=None,\n channels=None,\n verbose=False,\n get_signals=False):\n t1 = time.time()\n self.connection = MDSplus.Connection('atlas.gat.com')\n if shot is None:\n shot = 176778\n if channels is None:\n channels = np.arange(1, 65)\n channels = np.array(channels)\n self.shot = shot\n self.channels = channels\n self.verbose = verbose\n self.time = None\n self.signals = None\n self.metadata = None\n print(f'{self.shot}: start')\n # get time array\n ptdata = f'ptdata(\"besfu01\", {self.shot})'\n try:\n sigtime = self.connection.get(f'dim_of({ptdata})')\n self.time = np.array(sigtime).round(4)\n except:\n self.time = None\n print(f'{self.shot}: ERROR no time data')\n return\n n_time = self.connection.get(f'size({ptdata})')\n self.n_time = n_time.data()\n assert (self.n_time == self.time.size)\n try:\n # get metadata\n self.connection.openTree('bes', self.shot)\n r_position = np.array(self.connection.get(r'\\bes_r')).round(2)\n z_position = np.array(self.connection.get(r'\\bes_z')).round(2)\n start_time = self.connection.get(r'\\bes_ts')\n self.connection.closeTree('bes', self.shot)\n except:\n print(f'{self.shot}: ERROR getting metadata')\n self.time = None\n return\n if not start_time == self.time[0]:\n print(f'{self.shot}: ALERT inconsistent start times: ',\n start_time, self.time[0])\n self.metadata = {'shot': self.shot,\n 'delta_time': np.diff(self.time[0:100]).mean().round(\n 4),\n 'start_time': self.time[0],\n 'stop_time': self.time[-1],\n 'n_time': self.n_time,\n 'time_units': 'ms',\n 'r_position': r_position,\n 'z_position': z_position,\n 'rz_units': 'cm',\n 'date': ''}\n # get ip, beams, etc.\n for point_name in self._points:\n try:\n if 'inj' in point_name:\n self.connection.openTree('nb', self.shot)\n data = np.array(self.connection.get(f'\\\\{point_name}'))\n data_time = np.array(\n self.connection.get(f'dim_of(\\\\{point_name})'))\n if point_name == 'pinj':\n date = self.connection.get(\n f'getnci(\\\\{point_name}, \"time_inserted\")')\n self.metadata['date'] = date.date.decode('utf-8')\n self.connection.closeTree('nb', self.shot)\n else:\n ptdata = f'_n = ptdata(\"{point_name}\", {self.shot})'\n data = np.array(self.connection.get(ptdata))\n data_time = np.array(self.connection.get('dim_of(_n)'))\n time_mask = np.logical_and(data_time >= self.time[0],\n data_time <= self.time[-1])\n data = data[time_mask]\n data_time = data_time[time_mask]\n except:\n if point_name == 'pinj_15l':\n self.time = None\n print(f'{self.shot}: ERROR missing pinj_15l')\n return\n print(f'{self.shot}: INVALID data node for {point_name}')\n data = h5py.Empty(dtype='f')\n data_time = h5py.Empty(dtype='f')\n assert (data.shape == data_time.shape)\n setattr(self, point_name, data)\n if point_name == 'pinj' or 'inj' not in point_name:\n setattr(self, f'{point_name}_time', data_time)\n if point_name =='pinj_15l':\n if data.max() < 500e3:\n self.time = None\n print(f'{self.shot}: ERROR invalid pinj_15l')\n return\n print(f'{self.shot}: {self.n_time} time points')\n t2 = time.time()\n print(f'{self.shot}: Metadata time = {t2 - t1:.2f} s')\n if get_signals:\n self.get_signals()\n\n def get_signals(self):\n t1 = time.time()\n print(f'{self.shot}: fetching {self.channels.size} signals')\n tdi_vars = []\n tdi_assignments = []\n for channel in self.channels:\n var = f'_n{channel:02d}_{self.shot}'\n tdi_vars.append(var)\n tmp = f'{var} = ptdata(\"besfu{channel:02d}\", {self.shot})'\n tdi_assignments.append(tmp)\n self.signals = np.empty([self.channels.size, self.n_time])\n try:\n self.connection.get(', '.join(tdi_assignments))\n for i, tdi_var in enumerate(tdi_vars):\n self.signals[i, :] = self.connection.get(tdi_var)\n except:\n print(f'{self.shot}: ERROR fetching signals')\n self.time = None\n self.signals = None\n return\n t2 = time.time()\n print(f'{self.shot}: Signal time = {t2 - t1:.2f} s')\n\n\nif __name__=='__main__':\n bes_data = BES_Data(shot=184800,\n channels=[1,2,3,4],\n get_signals=True,\n verbose=True)" ]
[ [ "numpy.array", "numpy.empty", "numpy.logical_and", "numpy.diff", "numpy.arange" ] ]
dand-oss/scikit-criteria
[ "1ca7667e08e79d551f8241278c939f604800d81b" ]
[ "skcriteria/norm.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Copyright (c) 2016-2017, Cabral, Juan; Luczywo, Nadia\n# All rights reserved.\n\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\n\n# =============================================================================\n# DOCS\n# =============================================================================\n\n\"\"\"Several implementations of normalization methods\n\n\"\"\"\n\n# =============================================================================\n# IMPORTS\n# =============================================================================\n\nimport numpy as np\nfrom numpy import linalg\n\nfrom .validate import MIN, MAX, criteriarr\n\n\n# =============================================================================\n# EXCEPTIONS\n# =============================================================================\n\nclass DuplicatedNameError(ValueError):\n pass\n\n\nclass NormalizerNotFound(AttributeError):\n pass\n\n\nclass FunctionNotRegisteredAsNormalizer(ValueError):\n pass\n\n\n# =============================================================================\n# REGISTERS\n# =============================================================================\n\nNORMALIZERS = {}\n\n\ndef register(name, func=None):\n if name in NORMALIZERS:\n raise DuplicatedNameError(name)\n if func is None:\n def _dec(func):\n NORMALIZERS[name] = func\n return func\n return _dec\n else:\n NORMALIZERS[name] = func\n return func\n\n\ndef get(name, d=None):\n try:\n return NORMALIZERS[name]\n except KeyError:\n if d is not None:\n return d\n raise NormalizerNotFound(name)\n\n\ndef nameof(normalizer):\n for k, v in NORMALIZERS.items():\n if v == normalizer:\n return k\n raise FunctionNotRegisteredAsNormalizer(str(normalizer))\n\n\ndef norm(name, arr, *args, **kwargs):\n normalizer = get(name)\n return normalizer(arr, *args, **kwargs)\n\n\n# =============================================================================\n# IMPLEMENTATIONS\n# =============================================================================\n\n@register(\"none\")\ndef none(arr, criteria=None, axis=None):\n \"\"\"This do not nothing and only try to return an numpy.ndarray\n of the given data\n\n \"\"\"\n return np.asarray(arr)\n\n\n@register(\"sum\")\ndef sum(arr, criteria=None, axis=None):\n r\"\"\"Divide of every value on the array by sum of values along an\n axis.\n\n .. math::\n\n \\overline{X}_{ij} = \\frac{X_{ij}}{\\sum\\limits_{j=1}^m X_{ij}}\n\n Parameters\n ----------\n\n arr : (:py:class:`numpy.ndarray`, :py:class:`numpy.ndarray`)\n A array with values\n\n axis : :py:class:`int` optional\n Axis along which to operate. By default, flattened input is used.\n\n criteria : Not used\n\n Returns\n -------\n\n narray : (:py:class:`numpy.ndarray`, :py:class:`numpy.ndarray`)\n array of ratios\n\n Examples\n --------\n\n >>> from skcriteria import norm\n >>> mtx = [[1, 2], [3, 4]]\n >>> norm.sum(mtx) # ratios with the sum of the array\n aarray([[ 0.1 , 0.2 ],\n [ 0.30000001, 0.40000001]], dtype=float64)\n >>> norm.sum(mtx, axis=0) # ratios with the sum of the array by column\n array([[ 0.25 , 0.33333334],\n [ 0.75 , 0.66666669]], dtype=float64)\n >>> norm.sum(mtx, axis=1) # ratios with the sum of the array by row\n array([[ 0.33333334, 0.66666669],\n [ 0.42857143, 0.5714286 ]], dtype=float64)\n\n \"\"\"\n arr = np.asarray(arr, dtype=float)\n sumval = np.sum(arr, axis=axis, keepdims=True)\n return arr / sumval\n\n\n@register(\"max\")\ndef max(arr, criteria=None, axis=None):\n r\"\"\"Divide of every value on the array by max value along an axis.\n\n .. math::\n\n \\overline{X}_{ij} = \\frac{X_{ij}}{\\max_{X_{ij}}}\n\n Parameters\n ----------\n\n arr : (:py:class:`numpy.ndarray`, :py:class:`numpy.ndarray`)\n A array with values\n\n axis : :py:class:`int` optional\n Axis along which to operate. By default, flattened input is used.\n\n criteria : Not used\n\n Returns\n -------\n\n narray : (:py:class:`numpy.ndarray`, :py:class:`numpy.ndarray`)\n array of ratios\n\n Examples\n --------\n\n >>> from skcriteria import norm\n >>> mtx = [[1, 2], [3, 4]]\n >>> norm.max(mtx) # ratios with the max value of the array\n array([[ 0.25, 0.5 ],\n [ 0.75, 1. ]], dtype=float64)\n >>> norm.max(mtx, axis=0) # ratios with the max value of the arr by column\n array([[ 0.33333334, 0.5 ],\n [ 1. , 1. ]], dtype=float64)\n >>> norm.max(mtx, axis=1) # ratios with the max value of the array by row\n array([[ 0.5 , 1. ],\n [ 0.75, 1. ]], dtype=float64)\n\n \"\"\"\n arr = np.asarray(arr, dtype=float)\n maxval = np.max(arr, axis=axis, keepdims=True)\n return arr / maxval\n\n\n@register(\"vector\")\ndef vector(arr, criteria=None, axis=None):\n r\"\"\"Caculates the set of ratios as the square roots of the sum of squared\n responses of a given axis as denominators. If *axis* is *None* sum all\n the array.\n\n .. math::\n\n \\overline{X}_{ij} =\n \\frac{X_{ij}}{\\sqrt{\\sum\\limits_{j=1}^m X_{ij}^{2}}}\n\n Parameters\n ----------\n\n arr : (:py:class:`numpy.ndarray`, :py:class:`numpy.ndarray`)\n A array with values\n\n axis : :py:class:`int` optional\n Axis along which to operate. By default, flattened input is used.\n\n criteria : Not used\n\n Returns\n -------\n\n narray : (:py:class:`numpy.ndarray`, :py:class:`numpy.ndarray`)\n array of ratios\n\n Examples\n --------\n\n >>> from skcriteria import norm\n >>> mtx = [[1, 2], [3, 4]]\n >>> norm.vector(mtx) # ratios with the vector value of the array\n array([[ 0.18257418, 0.36514837],\n [ 0.54772252, 0.73029673]], dtype=float64)\n >>> norm.vector(mtx, axis=0) # ratios by column\n array([[ 0.31622776, 0.44721359],\n [ 0.94868326, 0.89442718]], dtype=float64)\n >>> norm.vector(mtx, axis=1) # ratios by row\n array([[ 0.44721359, 0.89442718],\n [ 0.60000002, 0.80000001]], dtype=float64)\n\n \"\"\"\n arr = np.asarray(arr, dtype=float)\n frob = linalg.norm(arr, None, axis=axis)\n return arr / frob\n\n\n@register(\"push_negatives\")\ndef push_negatives(arr, criteria=None, axis=None):\n r\"\"\"If an array has negative values this function increment the values\n proportionally to made all the array positive along an axis.\n\n .. math::\n\n \\overline{X}_{ij} =\n \\begin{cases}\n X_{ij} + min_{X_{ij}} & \\text{if } X_{ij} < 0\\\\\n X_{ij} & \\text{otherwise}\n \\end{cases}\n\n Parameters\n ----------\n\n arr : (:py:class:`numpy.ndarray`, :py:class:`numpy.ndarray`)\n A array with values\n\n axis : :py:class:`int` optional\n Axis along which to operate. By default, flattened input is used.\n\n criteria : Not used\n\n Returns\n -------\n\n narray : (:py:class:`numpy.ndarray`, :py:class:`numpy.ndarray`)\n array of ratios\n\n Examples\n --------\n\n >>> from skcriteria import norm\n >>> mtx = [[1, 2], [3, 4]]\n >>> mtx_lt0 = [[-1, 2], [3, 4]] # has a negative value\n >>> norm.push_negatives(mtx) # array without negatives don't be affected\n array([[1, 2],\n [3, 4]])\n >>> # all the array is incremented by 1 to eliminate the negative\n >>> norm.push_negatives(mtx_lt0)\n array([[0, 3],\n [4, 5]])\n >>> # by column only the first one (with the negative value) is affected\n >>> norm.push_negatives(mtx_lt0, axis=0)\n array([[0, 2],\n [4, 4]])\n >>> # by row only the first row (with the negative value) is affected\n >>> norm.push_negatives(mtx_lt0, axis=1)\n array([[0, 3],\n [3, 4]])\n\n \"\"\"\n arr = np.asarray(arr)\n mins = np.min(arr, axis=axis, keepdims=True)\n delta = (mins < 0) * mins\n return arr - delta\n\n\n@register(\"add1to0\")\ndef add1to0(arr, criteria=None, axis=None):\n r\"\"\"If a value in the array is 0, then an :math:`1` is added to\n all the values\n\n .. math::\n\n \\overline{X}_{ij} = X_{ij} + 1\n\n Parameters\n ----------\n\n arr : (:py:class:`numpy.ndarray`, :py:class:`numpy.ndarray`)\n A array with values\n\n axis : :py:class:`int` optional\n Axis along which to operate. By default, flattened input is used.\n\n criteria : Not used\n\n Returns\n -------\n\n narray : (:py:class:`numpy.ndarray`, :py:class:`numpy.ndarray`)\n array of ratios\n\n Examples\n --------\n\n >>> from skcriteria import norm\n >>> mtx = [[1, 2], [3, 4]]\n >>> mtx_w0 = [[0,1], [2,3]]\n >>> norm.add1to0(mtx)\n array([[1, 2],\n [3, 4]])\n >>> # added 1\n >>> norm.add1to0(mtx_w0)\n array([[ 1, 2],\n [ 3, 4]])\n\n \"\"\"\n arr = np.asarray(arr)\n if 0 in arr:\n if len(arr.shape) == 1 or axis is None:\n return arr + 1\n else:\n zeros = np.any(arr == 0, axis=axis)\n increment = np.zeros(zeros.shape)\n increment[zeros] = 1\n return arr + increment\n return arr\n\n\n@register(\"addepsto0\")\ndef addepsto0(arr, criteria=None, axis=None):\n r\"\"\"If a value in the array is 0, then an :math:`\\epsilon` is\n added to all the values\n\n .. math::\n\n \\overline{X}_{ij} = X_{ij} + \\epsilon\n\n Parameters\n ----------\n\n arr : (:py:class:`numpy.ndarray`, :py:class:`numpy.ndarray`)\n A array with values\n\n axis : :py:class:`int` optional\n Axis along which to operate. By default, flattened input is used.\n\n criteria : Not used\n\n Returns\n -------\n\n narray : (:py:class:`numpy.ndarray`, :py:class:`numpy.ndarray`)\n array of ratios\n\n Examples\n --------\n\n >>> from skcriteria import norm\n >>> mtx = [[1, 2], [3, 4]]\n >>> mtx_w0 = [[0, 1], [2,3]]\n >>> norm.addepsto0(mtx)\n array([[1, 2],\n [3, 4]])\n >>> # added epsilon\n >>> norm.addepsto0(mtx_w0)\n array([[ 2.22e-16, 1],\n [ 2, 3]])\n\n \"\"\"\n arr = np.asarray(arr)\n if 0 in arr:\n arr_type = arr.dtype.type\n if not issubclass(arr_type, (np.floating, float)):\n arr_type = float\n eps = np.finfo(arr_type).eps\n if len(arr.shape) == 1 or axis is None:\n return arr + eps\n else:\n zeros = np.any(arr == 0, axis=axis)\n increment = np.zeros(zeros.shape[0])\n increment[zeros] = eps\n return arr + increment\n return arr\n\n\n@register(\"ideal_point\")\ndef ideal_point(arr, criteria=None, axis=None):\n \"\"\"This transformation is based on the concept of the ideal\n point. So, the value :math:`x_{aj}` below, expresses the degree to which\n the alternative a is close to the ideal value :math:`f_j^*`, which is the\n best performance in criterion :math:`j`, and far from the anti-ideal\n value :math:`f_{j^*}`., which is the worst performance in criterion\n :math:`j`. Both :math:`f_j^*` and :math:`f_{j^*}`, are achieved by at\n least one of the alternatives under consideration.\n\n .. math::\n\n x_{aj} = \\\\frac{ f_j(a) - f_{j^*} }{ f_j^* - f_{j^*} }\n\n Parameters\n ----------\n\n arr : (:py:class:`numpy.ndarray`, :py:class:`numpy.ndarray`)\n A array with values\n\n axis : :py:class:`int` optional\n Axis along which to operate. By default, flattened input is used.\n\n criteria : :py:class:`numpy.ndarray`\n Criteria array to determine the ideal and nadir points of every\n criteria.\n\n Returns\n -------\n\n narray : (:py:class:`numpy.ndarray`, :py:class:`numpy.ndarray`)\n array of ratios\n\n\n Examples\n --------\n\n >>> from skcriteria import norm\n >>> mtx = [[1, 2], [3, 4]]\n >>> norm.ideal_point(mtx, axis=0)\n array([[ 0., 0.],\n [ 1., 1.]])\n\n \"\"\"\n\n if criteria is None:\n raise TypeError(\"you must provide a criteria\")\n\n if axis not in (0, 1, None):\n msg = \"'axis' must be 0, 1 or None. Found: {}\"\n raise ValueError(msg.format(axis))\n\n arr = np.asarray(arr, dtype=float)\n criteria = criteriarr(criteria)\n if axis is None:\n if len(set(criteria)) != 1:\n msg = \"If 'axis' is None all the 'criteria' must be the same\"\n raise ValueError(msg)\n criteria = criteria[0]\n idealf, nadirf = (\n (np.max, np.min)\n if criteria == MAX\n else (np.min, np.max))\n ideal, nadir = idealf(arr), nadirf(arr)\n elif axis == 1:\n arr = arr.T\n\n maxs = np.max(arr, axis=0)\n mins = np.min(arr, axis=0)\n\n ideal = np.where(criteria == MAX, maxs, mins)\n nadir = np.where(criteria == MAX, mins, maxs)\n\n result = (arr - nadir) / (ideal - nadir)\n\n if axis == 1:\n result = result.T\n\n return result\n\n\n@register(\"invert_min\")\ndef invert_min(arr, criteria=None, axis=None):\n \"\"\"Invert all the axis whith minimizartion criteria\n\n Parameters\n ----------\n\n arr : (:py:class:`numpy.ndarray`, :py:class:`numpy.ndarray`)\n A array with values\n\n axis : :py:class:`int` optional\n Axis along which to operate. By default, flattened input is used.\n\n criteria : :py:class:`numpy.ndarray`\n Criteria array.\n\n Returns\n -------\n\n narray : (:py:class:`numpy.ndarray`, :py:class:`numpy.ndarray`)\n array of ratios\n\n\n Examples\n --------\n\n >>> from skcriteria import norm\n >>> mtx = [[1, 2], [3, 4]]\n >>> norm.ideal_point(mtx, criteria=[1, -1], axis=0)\n array([[ 1., 0.5],\n [ 3., 0.25]])\n\n \"\"\"\n\n if criteria is None:\n raise TypeError(\"you must provide a criteria\")\n\n if axis not in (0, 1, None):\n msg = \"'axis' must be 0, 1 or None. Found: {}\"\n raise ValueError(msg.format(axis))\n\n arr = np.asarray(arr, dtype=float)\n criteria = criteriarr(criteria)\n\n if axis is None and len(set(criteria)) != 1:\n msg = \"If 'axis' is None all the 'criteria' must be the same\"\n raise ValueError(msg)\n elif axis == 1:\n arr = arr.T\n\n if MIN in criteria:\n mincrits = np.squeeze(np.where(criteria == MIN))\n\n if np.ndim(arr) == 1:\n mincrits_inverted = 1.0 / arr[mincrits]\n arr = arr.astype(mincrits_inverted.dtype.type)\n arr[mincrits] = mincrits_inverted\n else:\n mincrits_inverted = 1.0 / arr[:, mincrits]\n arr = arr.astype(mincrits_inverted.dtype.type)\n arr[:, mincrits] = mincrits_inverted\n\n if axis == 1:\n arr = arr.T\n\n return arr\n" ]
[ [ "numpy.max", "numpy.linalg.norm", "numpy.asarray", "numpy.zeros", "numpy.sum", "numpy.min", "numpy.where", "numpy.any", "numpy.finfo", "numpy.ndim" ] ]
vdankers/OpenNMT-py
[ "611f80428b6dad7726d3626e43515f7d05e2309e" ]
[ "onmt/utils/loss.py" ]
[ "\"\"\"\nThis file handles the details of the loss function during training.\n\nThis includes: LossComputeBase and the standard NMTLossCompute, and\n sharded loss compute stuff.\n\"\"\"\nfrom __future__ import division\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport onmt\nimport onmt.inputters as inputters\nfrom onmt.modules.sparse_losses import SparsemaxLoss\n\n\ndef build_loss_compute(model, tgt_vocab, opt, train=True):\n \"\"\"\n This returns user-defined LossCompute object, which is used to\n compute loss in train/validate process. You can implement your\n own *LossCompute class, by subclassing LossComputeBase.\n \"\"\"\n device = torch.device(\"cuda\" if onmt.utils.misc.use_gpu(opt) else \"cpu\")\n\n if opt.copy_attn:\n compute = onmt.modules.CopyGeneratorLossCompute(\n model.generator, tgt_vocab, opt.copy_attn_force,\n opt.copy_loss_by_seqlength)\n else:\n compute = NMTLossCompute(\n model.generator, tgt_vocab,\n label_smoothing=opt.label_smoothing if train else 0.0)\n compute.to(device)\n\n return compute\n\n\nclass LossComputeBase(nn.Module):\n \"\"\"\n Class for managing efficient loss computation. Handles\n sharding next step predictions and accumulating mutiple\n loss computations\n\n\n Users can implement their own loss computation strategy by making\n subclass of this one. Users need to implement the _compute_loss()\n and make_shard_state() methods.\n\n Args:\n generator (:obj:`nn.Module`) :\n module that maps the output of the decoder to a\n distribution over the target vocabulary.\n tgt_vocab (:obj:`Vocab`) :\n torchtext vocab object representing the target output\n normalzation (str): normalize by \"sents\" or \"tokens\"\n \"\"\"\n\n def __init__(self, generator, tgt_vocab):\n super(LossComputeBase, self).__init__()\n self.generator = generator\n self.tgt_vocab = tgt_vocab\n self.padding_idx = tgt_vocab.stoi[inputters.PAD_WORD]\n\n def _make_shard_state(self, batch, output, range_, attns=None):\n \"\"\"\n Make shard state dictionary for shards() to return iterable\n shards for efficient loss computation. Subclass must define\n this method to match its own _compute_loss() interface.\n Args:\n batch: the current batch.\n output: the predict output from the model.\n range_: the range of examples for computing, the whole\n batch or a trunc of it?\n attns: the attns dictionary returned from the model.\n \"\"\"\n return NotImplementedError\n\n def _compute_loss(self, batch, output, target, **kwargs):\n \"\"\"\n Compute the loss. Subclass must define this method.\n\n Args:\n\n batch: the current batch.\n output: the predict output from the model.\n target: the validate target to compare output with.\n **kwargs(optional): additional info for computing loss.\n \"\"\"\n return NotImplementedError\n\n def monolithic_compute_loss(self, batch, output, attns):\n \"\"\"\n Compute the forward loss for the batch.\n\n Args:\n batch (batch): batch of labeled examples\n output (:obj:`FloatTensor`):\n output of decoder model `[tgt_len x batch x hidden]`\n attns (dict of :obj:`FloatTensor`) :\n dictionary of attention distributions\n `[tgt_len x batch x src_len]`\n Returns:\n :obj:`onmt.utils.Statistics`: loss statistics\n \"\"\"\n range_ = (0, batch.tgt.size(0))\n shard_state = self._make_shard_state(batch, output, range_, attns)\n _, batch_stats = self._compute_loss(batch, **shard_state)\n\n return batch_stats\n\n def sharded_compute_loss(self, batch, output, attns,\n cur_trunc, trunc_size, shard_size,\n normalization):\n \"\"\"Compute the forward loss and backpropagate. Computation is done\n with shards and optionally truncation for memory efficiency.\n\n Also supports truncated BPTT for long sequences by taking a\n range in the decoder output sequence to back propagate in.\n Range is from `(cur_trunc, cur_trunc + trunc_size)`.\n\n Note sharding is an exact efficiency trick to relieve memory\n required for the generation buffers. Truncation is an\n approximate efficiency trick to relieve the memory required\n in the RNN buffers.\n\n Args:\n batch (batch) : batch of labeled examples\n output (:obj:`FloatTensor`) :\n output of decoder model `[tgt_len x batch x hidden]`\n attns (dict) : dictionary of attention distributions\n `[tgt_len x batch x src_len]`\n cur_trunc (int) : starting position of truncation window\n trunc_size (int) : length of truncation window\n shard_size (int) : maximum number of examples in a shard\n normalization (int) : Loss is divided by this number\n\n Returns:\n :obj:`onmt.utils.Statistics`: validation loss statistics\n\n \"\"\"\n batch_stats = onmt.utils.Statistics()\n range_ = (cur_trunc, cur_trunc + trunc_size)\n shard_state = self._make_shard_state(batch, output, range_, attns)\n for shard in shards(shard_state, shard_size):\n loss, stats = self._compute_loss(batch, **shard)\n loss.div(float(normalization)).backward()\n batch_stats.update(stats)\n return batch_stats\n\n def _stats(self, loss, scores, target):\n \"\"\"\n Args:\n loss (:obj:`FloatTensor`): the loss computed by the loss criterion.\n scores (:obj:`FloatTensor`): a score for each possible output\n target (:obj:`FloatTensor`): true targets\n\n Returns:\n :obj:`onmt.utils.Statistics` : statistics for this batch.\n \"\"\"\n pred = scores.max(1)[1]\n gtruth = target.view(-1)\n non_padding = gtruth.ne(self.padding_idx)\n \n num_correct = pred.eq(gtruth) \\\n .masked_select(non_padding) \\\n .sum() \\\n .item()\n\n # Now compute statistics needed for sequence accuracy,\n # to do so, reshape the target\n pred = pred.reshape(target.shape)\n\n non_padding = non_padding.reshape(target.shape)\n #pred = target\n sents_correct = pred.eq(target) * non_padding # torch.max(pred.eq(target), padding)\n sents_correct = torch.sum(torch.sum(sents_correct, 0).eq(torch.sum(non_padding, 0))).item()\n n_sents = pred.shape[1]\n\n num_non_padding = non_padding.sum().item()\n return onmt.utils.Statistics(loss.item(), num_non_padding, num_correct, n_sents, sents_correct)\n\n def _bottle(self, _v):\n return _v.view(-1, _v.size(2))\n\n def _unbottle(self, _v, batch_size):\n return _v.view(-1, batch_size, _v.size(1))\n\n\nclass LabelSmoothingLoss(nn.Module):\n \"\"\"\n With label smoothing,\n KL-divergence between q_{smoothed ground truth prob.}(w)\n and p_{prob. computed by model}(w) is minimized.\n \"\"\"\n def __init__(self, label_smoothing, tgt_vocab_size, ignore_index=-100):\n assert 0.0 < label_smoothing <= 1.0\n self.padding_idx = ignore_index\n super(LabelSmoothingLoss, self).__init__()\n\n smoothing_value = label_smoothing / (tgt_vocab_size - 2)\n one_hot = torch.full((tgt_vocab_size,), smoothing_value)\n one_hot[self.padding_idx] = 0\n self.register_buffer('one_hot', one_hot.unsqueeze(0))\n\n self.confidence = 1.0 - label_smoothing\n\n def forward(self, output, target):\n \"\"\"\n output (FloatTensor): batch_size x n_classes\n target (LongTensor): batch_size\n \"\"\"\n model_prob = self.one_hot.repeat(target.size(0), 1)\n model_prob.scatter_(1, target.unsqueeze(1), self.confidence)\n model_prob.masked_fill_((target == self.padding_idx).unsqueeze(1), 0)\n\n return F.kl_div(output, model_prob, reduction='sum')\n\n\nclass NMTLossCompute(LossComputeBase):\n \"\"\"\n Standard NMT Loss Computation.\n \"\"\"\n\n def __init__(self, generator, tgt_vocab, normalization=\"sents\",\n label_smoothing=0.0):\n super(NMTLossCompute, self).__init__(generator, tgt_vocab)\n self.sparse = not isinstance(generator[1], nn.LogSoftmax)\n if label_smoothing > 0:\n self.criterion = LabelSmoothingLoss(\n label_smoothing, len(tgt_vocab), ignore_index=self.padding_idx\n )\n elif self.sparse:\n self.criterion = SparsemaxLoss(\n ignore_index=self.padding_idx, size_average=False\n )\n else:\n self.criterion = nn.NLLLoss(\n ignore_index=self.padding_idx, reduction='sum'\n )\n\n def _make_shard_state(self, batch, output, range_, attns=None):\n return {\n \"output\": output,\n \"target\": batch.tgt[range_[0] + 1: range_[1]],\n }\n\n def _compute_loss(self, batch, output, target):\n bottled_output = self._bottle(output)\n if self.sparse:\n # for sparsemax loss, the loss function operates on the raw output\n # vector, not a probability vector. Hence it's only necessary to\n # apply the first part of the generator here.\n scores = self.generator[0](bottled_output)\n else:\n scores = self.generator(bottled_output)\n gtruth = target.view(-1)\n loss = self.criterion(scores, gtruth)\n stats = self._stats(loss.clone(), scores, target)\n\n return loss, stats\n\n\ndef filter_shard_state(state, shard_size=None):\n \"\"\" ? \"\"\"\n for k, v in state.items():\n if shard_size is None:\n yield k, v\n\n if v is not None:\n v_split = []\n if isinstance(v, torch.Tensor):\n for v_chunk in torch.split(v, shard_size):\n v_chunk = v_chunk.data.clone()\n v_chunk.requires_grad = v.requires_grad\n v_split.append(v_chunk)\n yield k, (v, v_split)\n\n\ndef shards(state, shard_size, eval_only=False):\n \"\"\"\n Args:\n state: A dictionary which corresponds to the output of\n *LossCompute._make_shard_state(). The values for\n those keys are Tensor-like or None.\n shard_size: The maximum size of the shards yielded by the model.\n eval_only: If True, only yield the state, nothing else.\n Otherwise, yield shards.\n\n Yields:\n Each yielded shard is a dict.\n\n Side effect:\n After the last shard, this function does back-propagation.\n \"\"\"\n if eval_only:\n yield filter_shard_state(state)\n else:\n # non_none: the subdict of the state dictionary where the values\n # are not None.\n non_none = dict(filter_shard_state(state, shard_size))\n\n # Now, the iteration:\n # state is a dictionary of sequences of tensor-like but we\n # want a sequence of dictionaries of tensors.\n # First, unzip the dictionary into a sequence of keys and a\n # sequence of tensor-like sequences.\n keys, values = zip(*((k, [v_chunk for v_chunk in v_split])\n for k, (_, v_split) in non_none.items()))\n\n # Now, yield a dictionary for each shard. The keys are always\n # the same. values is a sequence of length #keys where each\n # element is a sequence of length #shards. We want to iterate\n # over the shards, not over the keys: therefore, the values need\n # to be re-zipped by shard and then each shard can be paired\n # with the keys.\n for shard_tensors in zip(*values):\n yield dict(zip(keys, shard_tensors))\n\n # Assumed backprop'd\n variables = []\n for k, (v, v_split) in non_none.items():\n if isinstance(v, torch.Tensor) and state[k].requires_grad:\n variables.extend(zip(torch.split(state[k], shard_size),\n [v_chunk.grad for v_chunk in v_split]))\n inputs, grads = zip(*variables)\n torch.autograd.backward(inputs, grads)\n" ]
[ [ "torch.nn.NLLLoss", "torch.autograd.backward", "torch.split", "torch.nn.functional.kl_div", "torch.full", "torch.sum" ] ]
megnashah/pytorch-CycleGAN-and-pix2pix
[ "aa3d9c13a8e4e94b04fccc634ead149f486d8d46" ]
[ "post_processing/plots_test.py" ]
[ "import pandas as pd\nimport matplotlib\nmatplotlib.use(\"Agg\")\nimport matplotlib.pyplot as plt\nimport numpy\nimport os\nimport math\nimport pickle\nfrom datetime import date\n\n\n# TO BE CHANGED --> list of fake feature data to be analyzed\ntrial_list = [\"trial_12_14_20\", 'train_12_21_20', 'train_12_28v2_20', 'train_12_30_20'] #packets2blocks\nlabel_list = ['B1, smooth', 'B1, sharp', 'B50, smooth', 'B16, smooth'] #packets2blocks\n#trial_list = ['trial_12_05_20', 'train_12_23_20', 'train_12_26_20', 'train_12_28_20', 'train_12_29_20'] #grains2packets\n#label_list = ['B1, smooth', 'B1, sharp', 'B50, sharp', 'B50, smooth', 'B16, smooth'] #grains2packets\nproject = 'packets2blocks'\n\n# these will be 3D arrays of all fake data collected\nall_FAKE = list(range(len(trial_list)))\nlog_all_FAKE = list(range(len(trial_list)))\n\n#GCP PATHS \n# # directory paths for feature data, both real and fake\n# feature_data_dir = \"/home/tom_phelan_ext/Documents/microstructure_analysis/\" + project + \"/feature_data/\"\n# feature_data = os.listdir(feature_data_dir)\n# feature_data_FAKE_dir = \"/home/tom_phelan_ext/Documents/microstructure_analysis/\" + project + \"/feature_data_FAKE/\"\n\n# # directory path for output graphs/plots; outputs in new latest trial graph folder\n# graphs_folder = \"/home/tom_phelan_ext/Documents/graphs/\" + project + \"/\" + trial_list[len(trial_list) - 1] + \"/\"\n# if (not(os.path.exists(graphs_folder))): os.makedirs(graphs_folder)\n\n#Megna PATHS \n# directory paths for feature data, both real and fake\nfeature_data_dir = r'D:\\steelGAN\\12292020\\microstructure_analysis\\microstructure_analysis' + '\\\\' + project + \"\\\\feature_data\\\\\"\nfeature_data = os.listdir(feature_data_dir)\nfeature_data_FAKE_dir = r'D:\\steelGAN\\12292020\\microstructure_analysis\\microstructure_analysis' + '\\\\' + project + \"\\\\feature_data_FAKE\\\\\"\n\n# directory path for output graphs/plots; outputs in new latest trial graph folder\ngraphs_folder = r'D:\\steelGAN\\12292020\\microstructure_analysis\\microstructure_analysis' + '\\\\' + project + \"\\\\plots\\\\\" \nif (not(os.path.exists(graphs_folder))): os.makedirs(graphs_folder)\n\n# arrays of attributes to be normalized and log normalized\nattributes = ['AspectRatios_0', 'AxisEulerAngles_0','Neighborhoods']\nlog_attributes = ['AxisLengths_0', 'AxisLengths_1', 'EquivalentDiameters', 'NumNeighbors']\n\ndef build_attr_array(image, attr, log_bool):\n new_data = image[attr].values\n if attr == 'Neighborhoods':\n # DEBUGGING\n if (max(new_data) >= 580): print(\"csv file with >=580 neighborhoods: \" + str(csv_file))\n\n if (attr == 'AxisEulerAngles_0'):\n # convert radians to degrees.\n i = 0\n for i in range(len(new_data)):\n new_data[i] = math.degrees(new_data[i])\n \n # checks whether attribute needs log distr.\n if (log_bool):\n new_data = numpy.log(numpy.asarray(new_data))\n # return new_data array, to be appended to its respective attr. array\n return new_data\n\ndef create_histogram(real_attr_arr, fake, attr, attr_index, num_bins, log_bool):\n if attr == 'Neighborhoods' or attr == 'NumNeighbors':\n num_bins=10\n # alpha determines transparency, density normalizes the dataset\n #plt.hist(real_attr_arr, bins=num_bins, label='real', density=True, alpha=0.5, edgecolor='blue')\n y, binEdges = numpy.histogram(real_attr_arr, bins=num_bins, density = True)\n bincenters = 0.5*(binEdges[1:]+binEdges[:-1]) \n plt.plot(bincenters, y, label='real', linestyle='dashed', linewidth=4)\n for i in range(0,len(fake)):\n date = label_list[i] #trial_list[i][6:-3] # truncate trial name down to MM-DD for legend labeling\n fake_label = 'fake (' + date + ')'\n sub_arr = fake[i]\n # i = index of current fake dataset\n #plt.hist(sub_arr[attr_index], bins=num_bins, label=fake_label, density=True, alpha=0.5)\n y, binEdges = numpy.histogram(sub_arr[attr_index], bins=binEdges, density=True)\n bincenters = 0.5*(binEdges[1:]+binEdges[:-1]) \n plt.plot(bincenters, y, '-', label=fake_label)\n \n plt.ylabel(\"Number of Features, normalized\")\n plt.legend(loc='upper right')\n # labeling for histogram and image (normal or log normal)\n if (log_bool):\n plt.xlabel('ln(' + attr + ')')\n plt.title('ln(' + attr + ')')\n plt.savefig(graphs_folder + attr + \"_log_HIST.png\")\n else:\n plt.xlabel(attr)\n plt.title(attr)\n plt.savefig(graphs_folder + attr + \"_HIST.png\")\n plt.clf()\n\ndef read_images(image, data_arr, log_data_arr):\n # NORMAL DATA\n sub_arr = 0\n for attr in attributes:\n data_arr[sub_arr] = numpy.concatenate([data_arr[sub_arr], build_attr_array(image=image, attr=attr, log_bool=False)], axis=0)\n sub_arr += 1\n\n # LOG DATA\n sub_arr = 0\n for attr in log_attributes:\n log_data_arr[sub_arr] = numpy.concatenate([log_data_arr[sub_arr], build_attr_array(image=image, attr=attr, log_bool=True)], axis=0)\n sub_arr += 1\n\n # return arrays of image attributes\n return data_arr, log_data_arr\n\ndef get_min_and_max(final_min, final_max, data):\n data = [float(i) for i in data]\n current_min = min(data)\n current_max = max(data)\n if(final_min > current_min): final_min = current_min\n if(final_max < current_max): final_max = current_max\n return final_min, final_max\n\n# NOTE: To gather all attribute data without having to reread image .csv files numerous times, we will use 2-D arrays/lists.\n# NOTE: Each index of the total image data array will be a sub-array that represents a certain image attribute.\n\n# REAL IMAGE DATA --------------------------------------------------------------------------------- #\ndata_real = [[],[],[]]\nlog_data_real = [[],[],[],[]]\n\nprint(\"Processing real image data for \" + project + \"...\")\nfor csv_file in feature_data:\n image = pd.read_csv(feature_data_dir + csv_file, skiprows=0, header=1)\n data_real, log_data_real = read_images(image, data_real, log_data_real)\n \nprint(project + \" real image data collected.\")\nprint()\n\n# FAKE IMAGE DATA --------------------------------------------------------------------------------- #\ni = 0\nfor trial in trial_list:\n # selects a trial run of fake data to be analyzed\n feature_data_FAKE = feature_data_FAKE_dir + trial + '/'\n print(\"Processing \" + str(trial) + \" fake data...\")\n\n data_FAKE = [[],[],[]]\n log_data_FAKE = [[],[],[],[]]\n\n for csv_file in os.listdir(feature_data_FAKE):\n image = pd.read_csv(feature_data_FAKE + csv_file, skiprows=0, header=1)\n read_images(image, data_FAKE, log_data_FAKE)\n\n # add fake feature data from current trial to final arrays (all)\n all_FAKE[i] = data_FAKE\n log_all_FAKE[i] = log_data_FAKE\n i += 1\n print(str(trial) + \" fake image data collected.\")\n print()\n\n# HISTOGRAMS -------------------------------------------------------------------------------------- #\ni = 0\nfor attr in attributes:\n print(\"Generating histogram for \" + attr + \" data...\")\n create_histogram(real_attr_arr=data_real[i], fake=all_FAKE, attr=attr, attr_index=i, num_bins=25, log_bool=False)\n i += 1\n print(attr + \" histogram saved.\")\n print()\ni = 0\nfor attr in log_attributes:\n print(\"Generating histogram for \" + attr + \" data...\")\n create_histogram(real_attr_arr=log_data_real[i], fake=log_all_FAKE, attr=attr, attr_index=i, num_bins=25, log_bool=True)\n i += 1\n print(attr + \" histogram saved.\")\n print()\n\n# SCATTER PLOTS ----------------------------------------------------------------------------------- #\n\n# NOTE: in the log arrays, index 2 is EquivalentDiameters. This will be the x-axis on scatter plots.\n# plotting all attr in attributes array against EquivalentDiameters\ni = 0\nxmin = 10000\nxmax = 0\nfor attr in attributes:\n if attr == 'Neighborhoods' or attr == 'NumNeighbors':\n print(\"Generating Equivalent Diameters vs. \" + attr + \" scatter plot...\")\n xmin, xmax = get_min_and_max(xmin, xmax, data_real[i])\n plt.scatter(log_data_real[2], data_real[i], label='real', alpha=0.5)\n for j in range(len(all_FAKE)):\n # plot each set of fake data\n current_fake = all_FAKE[j]\n temp = log_all_FAKE[j]\n fake_diam = temp[2] # equivalent diameters of current fake dataset\n date = label_list[j] #trial_list[j][6:-3] # truncate trial name down to MM-DD for legend labeling\n fake_label = 'fake (' + date + ')'\n plt.scatter(fake_diam, current_fake[i], label=fake_label, alpha=0.25)\n # check min and max again\n xmin, xmax = get_min_and_max(xmin, xmax, current_fake[i])\n \n print(attr + \" xmin: \", xmin)\n print(attr + \" xmax: \", xmax)\n # plot image layout and save figure\n plt.title(\"Relationship in \" + attr + \" and EquivalentDiameters\")\n plt.xlabel('Equivalent Diameters (log)')\n plt.ylabel(attr)\n plt.ylim(xmin, xmax) # sclae x-axis to better fit the data\n plt.legend(loc='upper left')\n plt.savefig(graphs_folder + attr + \"_diameters_scatter.png\")\n plt.clf()\n print(attr + \" scatter plot saved.\")\n print()\n\n #reset xmin and xmax for next attribute\n xmin = 10000\n xmax = 0\n i += 1\n\nprint(\"Program completed.\")" ]
[ [ "matplotlib.use", "numpy.histogram", "numpy.asarray", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.savefig", "matplotlib.pyplot.plot", "matplotlib.pyplot.legend", "matplotlib.pyplot.title", "matplotlib.pyplot.ylim", "matplotlib.pyplot.scatter", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.clf", "pandas.read_csv" ] ]
ShaunZac/Neural-Network
[ "34f69f8f6daa0e0f9c85ebb19db401d436b1e346" ]
[ "MNIST_keras.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Apr 21 09:02:13 2020\n\n@author: Shaun Zacharia\n\"\"\"\n\n\nimport numpy as np\nimport tensorflow as tf\nimport pandas as pd\nfrom keras import layers\nfrom keras import losses\nfrom keras.layers import Input, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D\nfrom keras.layers import AveragePooling2D, MaxPooling2D, Dropout, GlobalMaxPooling2D, GlobalAveragePooling2D\nfrom keras.models import Model\nfrom keras.preprocessing import image\nfrom keras.utils import layer_utils\nfrom keras.utils.data_utils import get_file\nfrom keras.applications.imagenet_utils import preprocess_input\nfrom IPython.display import SVG\nfrom keras.utils.vis_utils import model_to_dot\nfrom keras.utils import plot_model\nimport keras.backend as K\nK.set_image_data_format('channels_last')\nimport matplotlib.pyplot as plt\nfrom matplotlib.pyplot import imshow\n\n(X_train_orig, Y_train), (X_test_orig, Y_test) = tf.keras.datasets.mnist.load_data()\nY_train = pd.get_dummies(pd.Series(Y_train).astype('category')).to_numpy()\nY_test = pd.get_dummies(pd.Series(Y_test).astype('category')).to_numpy()\n\nX_train = X_train_orig/255.\nX_test = X_test_orig/255.\n\ntrain_ex = X_train.shape[0]\ntest_ex = X_test.shape[0]\nX_train = X_train.reshape(train_ex, 28, 28, 1)\nX_test = X_test.reshape(test_ex, 28, 28, 1)\n\n# print (\"number of training examples = \" + str(X_train.shape[0]))\n# print (\"number of test examples = \" + str(X_test.shape[0]))\n# print (\"X_train shape: \" + str(X_train.shape))\n# print (\"Y_train shape: \" + str(Y_train.shape))\n# print (\"X_test shape: \" + str(X_test.shape))\n# print (\"Y_test shape: \" + str(Y_test.shape))\n\n\ndef myModel(input_shape):\n \"\"\"\n Implementation of the HappyModel.\n \n Arguments:\n input_shape -- shape of the images of the dataset\n (height, width, channels) as a tuple. \n Note that this does not include the 'batch' as a dimension.\n If you have a batch like 'X_train', \n then you can provide the input_shape using\n X_train.shape[1:]\n\n Returns:\n model -- a Model() instance in Keras\n \"\"\"\n \n # Define the input placeholder as a tensor with shape input_shape. Think of this as your input image!\n X_input = Input(input_shape)\n\n # Zero-Padding: pads the border of X_input with zeroes\n X = ZeroPadding2D((3, 3))(X_input)\n\n # CONV -> BN -> RELU Block applied to X\n X = Conv2D(32, (7, 7), strides = (1, 1), name = 'conv0')(X)\n X = BatchNormalization(axis = 3, name = 'bn0')(X)\n X = Activation('relu')(X)\n\n # MAXPOOL\n X = MaxPooling2D((2, 2), name='max_pool')(X)\n\n # FLATTEN X (means convert it to a vector) + FULLYCONNECTED\n X = Flatten()(X)\n X = Dense(10, activation='sigmoid', name='fc')(X)\n\n # Create model. This creates your Keras model instance, you'll use this instance to train/test the model.\n model = Model(inputs = X_input, outputs = X, name='myModel') \n \n return model\n\nmodel = myModel(X_train.shape[1:])\nmodel.compile(loss=losses.categorical_crossentropy, optimizer='Adam', metrics=['accuracy'])\nhistory = model.fit(X_train, Y_train, validation_data = (X_test, Y_test), epochs = 5, batch_size = 64)\n\nplt.plot(history.history['loss'], label='train')\nplt.plot(history.history['val_loss'], label='test')\nplt.legend()\nplt.show()\n\npreds = model.evaluate(X_test, Y_test)\nprint()\nprint (\"Loss = \" + str(preds[0]))\nprint (\"Test Accuracy = \" + str(preds[1]))\n" ]
[ [ "tensorflow.keras.datasets.mnist.load_data", "matplotlib.pyplot.plot", "matplotlib.pyplot.legend", "pandas.Series", "matplotlib.pyplot.show" ] ]
konpsar/Numerical-Methods-for-PDEs-course
[ "74e323ee9916fbbe59a3031aee623c44eb5d88a2" ]
[ "Ex2-Finite Difference Methods for the heat equation-Theta Methods/Code+Report/theta.py" ]
[ "import numpy as np\n\n# Initial conditon\n\ndef u0(x): return (x*(1-x))\n\n#Exact Solution\n\ndef u(x, t): return 8*np.sin(np.pi*x)*np.exp(-t*np.pi**2)/np.pi**3\n\n# Discretization parameters\n\nJ = 24; dx = 1.0 / J; x = np.linspace(0, 1, J+1)\nT = 0.1; N = 24; dt = T / N; mu = dt / dx**2\ntheta =1./2\n\n# Build the system matrix\n\na = (1-theta)*mu*np.ones(J-2); b = (1-2*(1-theta)*mu)*np.ones(J-1); #theta=1/2 a = 1/2*np one\nA = np.diag(a, -1) + np.diag(b, 0) + np.diag(a, 1)\n\nc = -(theta*mu)*np.ones(J-2); d = (1+2*theta*mu)*np.ones(J-1);\nB = np.diag(c, -1) + np.diag(d, 0) + np.diag(c, 1)\n\n# Initialize the solution vectors\n\nUold = u0(x[1:J]); Unew = np.zeros(J-1)\n\n# Time stepping\n#B*Unew = A*Uold\n\nfor n in range(N):\n Unew = np.linalg.solve(B, A.dot(Uold))\n for j in range(J-1):\n Uold[j] = Unew[j]\n print(Unew)\n" ]
[ [ "numpy.sin", "numpy.zeros", "numpy.ones", "numpy.exp", "numpy.linspace", "numpy.diag" ] ]
Yaoc0009/BRVFL_MR
[ "3865d780953541231825ba1fcd9c2043f80e6d97" ]
[ "old/BRVFL.py" ]
[ "import numpy as np\r\nfrom sklearn.model_selection import train_test_split, KFold\r\nfrom scipy.io import loadmat\r\nimport pymc3 as pm\r\n\r\nn_node = 10 # num of nodes in hidden layer\r\nw_range = [-1, 1] # range of random weights\r\nb_range = [0, 1] # range of random biases\r\n\r\nclass BRVFL:\r\n \"\"\" BRVFL Classifier \"\"\"\r\n\r\n def __init__(self, n_node, w_range, b_range, alpha_1=10**(-5), alpha_2=10**(-5), alpha_3=10**(-5), alpha_4=10**(-5), n_iter=1000, tol=1.0e-3, activation='relu', same_feature=False):\r\n self.n_node = n_node\r\n self.w_range = w_range\r\n self.b_range = b_range\r\n \r\n self.alpha_1 = alpha_1 # Gamma distribution parameter\r\n self.alpha_2 = alpha_2\r\n self.alpha_3 = alpha_3\r\n self.alpha_4 = alpha_4\r\n self.n_iter = n_iter\r\n self.tol = tol\r\n\r\n self.weight = None\r\n self.bias = None\r\n self.beta = None\r\n self.prec = None\r\n self.var = None\r\n a = Activation()\r\n self.activation_function = getattr(a, activation)\r\n self.data_std = None\r\n self.data_mean = None\r\n self.same_feature = same_feature\r\n\r\n def train(self, data, label, n_class):\r\n assert len(data.shape) > 1\r\n assert len(data) == len(label)\r\n assert len(label.shape) == 1\r\n\r\n data = self.standardize(data)\r\n n_sample, n_feature = np.shape(data)\r\n self.weight = (self.w_range[1] - self.w_range[0]) * np.random.random([n_feature, self.n_node]) + self.w_range[0]\r\n self.bias = (self.b_range[1] - self.b_range[0]) * np.random.random([1, self.n_node]) + self.b_range[0]\r\n \r\n h = self.activation_function(np.dot(data, self.weight) + np.dot(np.ones([n_sample, 1]), self.bias))\r\n d = np.concatenate([h, data], axis=1)\r\n d = np.concatenate([d, np.ones_like(d[:, 0:1])], axis=1)\r\n y = self.one_hot_encoding(label, n_class)\r\n dT_y = np.dot(d.T, y)\r\n dT_d = np.dot(d.T, d)\r\n eigen_val = np.linalg.eigvalsh(dT_d)\r\n\r\n # Initialize variance and precision using Evidence approximation\r\n model = pm.Model()\r\n with model:\r\n p = pm.Gamma('p', alpha=self.alpha_1, beta=self.alpha_2)\r\n v = pm.Gamma('v', alpha=self.alpha_3, beta=self.alpha_4)\r\n b = pm.Normal('b', mu=0, tau=p, shape=(len(d[0]), n_class))\r\n y_obs = pm.Normal('y_obs', mu=pm.math.dot(d, b), tau=v, observed=y)\r\n \r\n map_estimate = pm.find_MAP(model=model)\r\n self.prec, self.var, self.beta = map_estimate['p'].item(0), map_estimate['v'].item(0), map_estimate['b']\r\n\r\n # Iterate to meet convergence criteria\r\n mean_prev = None\r\n for iter_ in range(self.n_iter):\r\n # Posterior update\r\n # update posterior covariance\r\n covar = np.linalg.inv(self.prec * np.identity(dT_d.shape[1]) + dT_d / self.var)\r\n # update posterior mean\r\n mean = np.dot(covar, dT_y) / self.var\r\n\r\n # Hyperparameters update\r\n # update eigenvalues\r\n lam = eigen_val / self.var\r\n # update precision and variance \r\n delta = np.sum(np.divide(lam, lam + self.prec))\r\n self.prec = (delta + 2 * self.alpha_1) / (np.sum(np.square(mean)) + 2 * self.alpha_2)\r\n self.var = (np.sum(np.square(y - np.dot(d, self.beta))) + self.alpha_4) / (n_sample + delta + 2 * self.alpha_3)\r\n\r\n # Check for convergence\r\n if iter_ != 0 and np.sum(np.abs(mean_prev - mean)) < self.tol:\r\n print(\"Convergence after \", str(iter_), \" iterations\")\r\n break\r\n mean_prev = np.copy(mean)\r\n\r\n # Final Posterior update\r\n # update posterior covariance\r\n covar = np.linalg.inv(self.prec * np.identity(dT_d.shape[1]) + dT_d / self.var)\r\n # update posterior mean\r\n self.beta = np.dot(covar, dT_y) / self.var\r\n\r\n def predict(self, data, raw_output=False):\r\n data = self.standardize(data) # Normalize\r\n h = self.activation_function(np.dot(data, self.weight) + self.bias)\r\n d = np.concatenate([h, data], axis=1)\r\n d = np.concatenate([d, np.ones_like(d[:, 0:1])], axis=1)\r\n result = self.softmax(np.dot(d, self.beta))\r\n if not raw_output:\r\n result = np.argmax(result, axis=1)\r\n return result\r\n\r\n def eval(self, data, label):\r\n assert len(data.shape) > 1\r\n assert len(data) == len(label)\r\n assert len(label.shape) == 1\r\n \r\n result = self.predict(data, False)\r\n acc = np.sum(np.equal(result, label))/len(label)\r\n return acc\r\n\r\n def one_hot_encoding(self, label, n_class):\r\n y = np.zeros([len(label), n_class])\r\n for i in range(len(label)):\r\n y[i, label[i]] = 1\r\n return y\r\n\r\n def standardize(self, x):\r\n if self.same_feature is True:\r\n if self.data_std is None:\r\n self.data_std = np.maximum(np.std(x), 1/np.sqrt(len(x)))\r\n if self.data_mean is None:\r\n self.data_mean = np.mean(x)\r\n return (x - self.data_mean) / self.data_std\r\n else:\r\n if self.data_std is None:\r\n self.data_std = np.maximum(np.std(x, axis=0), 1/np.sqrt(len(x)))\r\n if self.data_mean is None:\r\n self.data_mean = np.mean(x, axis=0)\r\n return (x - self.data_mean) / self.data_std\r\n\r\n def softmax(self, x):\r\n return np.exp(x) / np.repeat((np.sum(np.exp(x), axis=1))[:, np.newaxis], len(x[0]), axis=1)\r\n\r\nclass Activation:\r\n def sigmoid(self, x):\r\n return 1 / (1 + np.e ** (-x))\r\n \r\n def sine(self, x):\r\n return np.sin(x)\r\n \r\n def sign(self, x):\r\n return np.sign(x)\r\n \r\n def relu(self, x):\r\n return np.maximum(0, x)\r\n\r\nif __name__==\"__main__\":\r\n dataset = loadmat('coil20.mat')\r\n label = np.array([dataset['Y'][i][0] - 1 for i in range(len(dataset['Y']))])\r\n data = dataset['X']\r\n n_class = len(np.unique(label))\r\n\r\n # train-test-split\r\n X_train, X_test, y_train, y_test = train_test_split(data, label, test_size=0.2)\r\n kf = KFold(n_splits=10, shuffle=True)\r\n val_acc = []\r\n max_index = -1\r\n \r\n for i, kf_values in enumerate(kf.split(X_train, y_train)):\r\n # print(f'train: {train_index}, val: {val_index}')\r\n print('Validation: {}'.format(i + 1))\r\n train_index, val_index = kf_values\r\n X_val_train, X_val_test = X_train[train_index], X_train[val_index]\r\n y_val_train, y_val_test = y_train[train_index], y_train[val_index]\r\n brvfl = BRVFL(n_node, w_range, b_range)\r\n brvfl.train(X_val_train, y_val_train, n_class)\r\n prediction = brvfl.predict(X_val_test, True)\r\n acc = brvfl.eval(X_val_test, y_val_test)\r\n print(f'Validation accuracy: {acc}')\r\n val_acc.append(acc)\r\n if acc >= max(val_acc):\r\n max_index = train_index\r\n\r\n X_train, y_train = X_train[max_index], y_train[max_index]\r\n brvfl = BRVFL(n_node, w_range, b_range)\r\n brvfl.train(X_train, y_train, n_class)\r\n prediction = brvfl.predict(X_test, True)\r\n acc = brvfl.eval(X_test, y_test)\r\n print(f'\\nTest accuracy: {acc}')" ]
[ [ "numpy.dot", "numpy.ones_like", "numpy.copy", "numpy.exp", "numpy.mean", "numpy.sign", "numpy.random.random", "numpy.concatenate", "numpy.divide", "numpy.sin", "numpy.argmax", "numpy.square", "numpy.equal", "numpy.shape", "numpy.identity", "numpy.std", "sklearn.model_selection.KFold", "sklearn.model_selection.train_test_split", "scipy.io.loadmat", "numpy.ones", "numpy.linalg.eigvalsh", "numpy.abs", "numpy.unique", "numpy.maximum" ] ]
chanmadeleine/Stint
[ "ec8e6c16cae1603a361e89d09ec92d7e9630afeb" ]
[ "app/main/Pointillism/pointillism_image.py" ]
[ "import pandas as pd\nfrom PIL import Image\n\nclass PointillismImage(object):\n \"\"\"\n Opens an image and provides accessors for aspect ratio and\n JSON formatted pixel data.\n\n \"\"\"\n def __init__(self, f_name):\n \"\"\"\n Initializes with provided filename.\n\n \"\"\"\n self.f_name = f_name\n self.im = self.open_image()\n self.pixel_matrix = self.set_pixel_data()\n\n def open_image(self):\n \"\"\"\n Opens image and sets the Image object to self.im\n\n \"\"\"\n return Image.open(self.f_name)\n\n def set_pixel_data(self):\n \"\"\"\n Gets pixel colors (R,G,B) for all (X, Y)'s. Sets self.pixel_matrix\n with the resulting Data Frame.\n\n \"\"\"\n pix = [[self.im.getpixel((x, y)) for x in range(self.im.size[0])] \\\n for y in range(self.im.size[1])]\n pix_frame = pd.DataFrame(pix)\n return pix_frame\n\n def get_pixel_json(self, height):\n \"\"\"\n Uses height and sets skip to determine which pixels to take,\n then formats the the points needed to plot the image in a list\n of dicts that will be parseable from D3.\n\n \"\"\"\n skip = round(self.im.size[1]/height)\n colors = [{\"x\": x, \"y\": y, \"color\": \"rgba({0}, {1}, {2}, 0.75)\".\\\n format(self.pixel_matrix[x][y][0], self.pixel_matrix[x][y][1], \\\n self.pixel_matrix[x][y][2])} for y in self.pixel_matrix.index \\\n for x in self.pixel_matrix.columns if y % skip == 0 \\\n and x % skip == 0]\n return colors\n \n def get_aspect(self):\n \"\"\"\n Floating point aspect ratio of image.\n \"\"\"\n return self.im.size[0] / float(self.im.size[1])" ]
[ [ "pandas.DataFrame" ] ]
JRyanShue/ldif
[ "fd2cbfbcb752d0e5e19e80b98760bf5bf9d1661f" ]
[ "ldif/training/metrics.py" ]
[ "# Copyright 2020 Google LLC\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# https://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# Lint as: python3\r\n\"\"\"Metrics for evaluating structured implicit functions.\"\"\"\r\n\r\nimport tensorflow as tf\r\n\r\n# LDIF is an internal package, should be imported last.\r\n# pylint: disable=g-bad-import-order\r\nfrom ldif.util import sdf_util\r\n# pylint: enable=g-bad-import-order\r\n\r\n\r\ndef point_iou(structured_implicit, sample_locations, sample_gt, model_config):\r\n \"\"\"Estimates the mesh iou by taking the iou from uniform point samples.\"\"\"\r\n assert model_config.hparams.bs == 1 # Otherwise the result would be wrong\r\n\r\n pred_class, _ = structured_implicit.class_at_samples(sample_locations)\r\n\r\n gt_is_inside = tf.logical_not(sdf_util.apply_class_transfer(\r\n sample_gt, model_config, soft_transfer=False, offset=0.0,\r\n dtype=tf.bool))\r\n pred_is_inside = pred_class < 0.5\r\n intersection = tf.cast(\r\n tf.logical_and(gt_is_inside, pred_is_inside), dtype=tf.float32)\r\n union = tf.cast(tf.logical_or(gt_is_inside, pred_is_inside), dtype=tf.float32)\r\n\r\n iou = tf.divide(tf.reduce_sum(intersection), tf.reduce_sum(union) + 1e-05)\r\n return iou\r\n" ]
[ [ "tensorflow.logical_or", "tensorflow.reduce_sum", "tensorflow.logical_and" ] ]
XiaoyuHuang96/mmdetection
[ "e2ff08b68e2f5907a59976dcedb055036c03eecf" ]
[ "mmdet/datasets/xml_style.py" ]
[ "import os.path as osp\nimport xml.etree.ElementTree as ET\n\nimport mmcv\nimport numpy as np\n\nfrom .custom import CustomDataset\nfrom .registry import DATASETS\n\n\[email protected]_module\nclass XMLDataset(CustomDataset):\n\n def __init__(self, min_size=None, **kwargs):\n super(XMLDataset, self).__init__(**kwargs)\n self.cat2label = {cat: i + 1 for i, cat in enumerate(self.CLASSES)}\n self.min_size = min_size\n\n def load_annotations(self, ann_file):\n img_infos = []\n img_ids = mmcv.list_from_file(ann_file)\n for img_id in img_ids:\n \n xml_path = osp.join(self.img_prefix, 'Annotations',\n '{}.xml'.format(img_id))\n tree = ET.parse(xml_path)\n root = tree.getroot()\n size = root.find('size')\n width = int(size.find('width').text)\n height = int(size.find('height').text)\n\n fn=root.find('filename').text\n\n filename = 'JPEGImages/{}'.format(fn)\n img_infos.append(\n dict(id=img_id, filename=filename, width=width, height=height))\n return img_infos\n\n def get_ann_info(self, idx):\n img_id = self.img_infos[idx]['id']\n xml_path = osp.join(self.img_prefix, 'Annotations',\n '{}.xml'.format(img_id))\n tree = ET.parse(xml_path)\n root = tree.getroot()\n bboxes = []\n labels = []\n bboxes_ignore = []\n labels_ignore = []\n for obj in root.findall('object'):\n name = obj.find('name').text\n label = self.cat2label[name]\n difficult = int(obj.find('difficult').text)\n bnd_box = obj.find('bndbox')\n bbox = [\n int(bnd_box.find('xmin').text),\n int(bnd_box.find('ymin').text),\n int(bnd_box.find('xmax').text),\n int(bnd_box.find('ymax').text)\n ]\n ignore = False\n if self.min_size:\n assert not self.test_mode\n w = bbox[2] - bbox[0]\n h = bbox[3] - bbox[1]\n if w < self.min_size or h < self.min_size:\n ignore = True\n if difficult or ignore:\n bboxes_ignore.append(bbox)\n labels_ignore.append(label)\n else:\n bboxes.append(bbox)\n labels.append(label)\n if not bboxes:\n bboxes = np.zeros((0, 4))\n labels = np.zeros((0, ))\n else:\n bboxes = np.array(bboxes, ndmin=2) - 1\n labels = np.array(labels)\n if not bboxes_ignore:\n bboxes_ignore = np.zeros((0, 4))\n labels_ignore = np.zeros((0, ))\n else:\n bboxes_ignore = np.array(bboxes_ignore, ndmin=2) - 1\n labels_ignore = np.array(labels_ignore)\n ann = dict(\n bboxes=bboxes.astype(np.float32),\n labels=labels.astype(np.int64),\n bboxes_ignore=bboxes_ignore.astype(np.float32),\n labels_ignore=labels_ignore.astype(np.int64))\n return ann\n" ]
[ [ "numpy.array", "numpy.zeros" ] ]
higlu/sdc-01-findlane
[ "e92336c28d10e548fdf82b68c29d370d48127efd" ]
[ "find_lane.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\n@author: Simone Diolaiuti\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport cv2\nfrom uda_utils import *\nfrom geom_utils import *\n\nfrom sklearn.cluster import KMeans\n\ndef findLane(img):\n \"\"\"\n Args:\n img: a colored img as opened by matplotlib.image.read function\n\n Returns:\n the image overlayed with the identification of the lane \n \"\"\"\n ysize = img.shape[0]\n xsize = img.shape[1]\n gscale_img = grayscale(img)\n blurred_img = gaussianBlur(gscale_img,5)\n canny_img = canny(blurred_img,low_threshold=10,high_threshold=180)\n \n # create a crop region to isolate the area of the image where the road is expected to be\n crop_region = np.array([[(0,ysize), \n (0, ysize*0.95),\n (xsize*0.50, ysize*0.60), \n (xsize, ysize*0.95), \n (xsize,ysize)]],dtype=np.int32)\n \n cropped_img = keepRegionOnly(canny_img,crop_region)\n \n # find the lines through Hough transformation\n hlines = computeHoughLines(img=cropped_img,rho=1,theta=np.pi/180,threshold=5,min_line_len=10,max_line_gap=10) \n drawLines(cropped_img,hlines)\n \n # get the equivalent list of Line2D instances\n elines = enrichLines(hlines)\n \n # remove segments that are too horizontal\n minslope = np.deg2rad(15)\n relevant_lines = [l for l in elines \n if (l.arc2 > minslope or l.arc2 < -minslope)] \n \n \n # from set of Line2D to set of points\n if len(relevant_lines) > 0:\n line_points = np.array(relevant_lines[0].getSamplePoints())\n for l in relevant_lines[1:]:\n # choosing to pick a number of points that is proportional to the length\n # of the line to let the longer lines count more than the short ones\n n_points = l.length/10\n points = l.getSamplePoints(n_points)\n line_points = np.append(line_points,points,axis=0)\n \n # I'm applaying clustering here using two fictitious line to guide the classification\n # of the real points towards left and right lanes\n # The fictitious (anchor) points are then removed after the K-Means clustering \n # has completed\n \n line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)\n left_points = np.empty(shape=(0,2),dtype=np.float32)\n right_points = np.empty_like(left_points)\n \n if len(line_points) > 0:\n anchor_line_left = Line2D.fromAB(0,0,0,ysize-1).getSamplePoints(50)\n anchor_line_right = Line2D.fromAB(xsize-1,0,xsize-1,ysize-1).getSamplePoints(50)\n n_anchor_points = len(anchor_line_left)+len(anchor_line_right)\n line_points = np.append(line_points,anchor_line_left,axis=0)\n line_points = np.append(line_points,anchor_line_right,axis=0)\n\n\n # allowing K-Means start with the initial centroids at the opposite sides\n starting_centroids = np.array([[0,ysize/2],[xsize-1,ysize/2]])\n\n clustered = KMeans(2,starting_centroids).fit(line_points)\n\n\n for i,p in enumerate(line_points[:-n_anchor_points]):\n if clustered.labels_[i] == 0:\n left_points = np.append(left_points,[p],axis=0)\n else:\n right_points = np.append(right_points,[p],axis=0)\n\n drawPoints(line_img,left_points.astype(np.uint32),[255,0,0])\n drawPoints(line_img,right_points.astype(np.uint32),[0,0,255])\n \n # compute polinomial regression over left points and draw it (in red)\n rpl = computeRegressionPoints(left_points,(0,xsize/2))\n drawPolyline(line_img,rpl.astype(np.uint32),[255,0,0],4)\n \n # compute polinomial regression over right points and draw it (in blue)\n rpl = computeRegressionPoints(right_points,(xsize/2,xsize))\n drawPolyline(line_img,rpl.astype(np.uint32),[0,0,255],4)\n \n #overlap lane lines over original image\n blended_img = np.zeros_like(line_img)\n blended_img = blendImg(img,line_img,0.8,0.5)\n \n final_img = blended_img\n \n return final_img\n\n\n\n\n\n\n" ]
[ [ "numpy.array", "numpy.zeros_like", "numpy.empty", "numpy.zeros", "sklearn.cluster.KMeans", "numpy.append", "numpy.deg2rad", "numpy.empty_like" ] ]
001honi/vc-cycle-gan
[ "a03f8e3cd08d8d9c3ace551464ac96d2c20eed28" ]
[ "convert.py" ]
[ "import argparse\nimport os\nimport numpy as np\n\nfrom model import CycleGAN\nfrom preprocess import *\nfrom hyparams import *\n\ndef conversion(model_dir, model_name, data_dir, conversion_direction, output_dir):\n\n num_features = 24\n sampling_rate = 16000\n frame_period = 5.0\n\n model = CycleGAN(num_features = num_mcep, mode = 'test')\n\n model.load(filepath = os.path.join(model_dir, model_name))\n\n mcep_normalization_params = np.load(os.path.join(model_dir, 'mcep_normalization.npz'))\n mcep_mean_A = mcep_normalization_params['mean_A']\n mcep_std_A = mcep_normalization_params['std_A']\n mcep_mean_B = mcep_normalization_params['mean_B']\n mcep_std_B = mcep_normalization_params['std_B']\n\n logf0s_normalization_params = np.load(os.path.join(model_dir, 'logf0s_normalization.npz'))\n logf0s_mean_A = logf0s_normalization_params['mean_A']\n logf0s_std_A = logf0s_normalization_params['std_A']\n logf0s_mean_B = logf0s_normalization_params['mean_B']\n logf0s_std_B = logf0s_normalization_params['std_B']\n\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n for file in os.listdir(data_dir):\n\n filepath = os.path.join(data_dir, file)\n wav, _ = librosa.load(filepath, sr = sampling_rate, mono = True)\n wav = wav_padding(wav = wav, sr = sampling_rate, frame_period = frame_period, multiple = 4)\n f0, timeaxis, sp, ap = world_decompose(wav = wav, fs = sampling_rate, frame_period = frame_period)\n coded_sp = world_encode_spectral_envelop(sp = sp, fs = sampling_rate, dim = num_features)\n coded_sp_transposed = coded_sp.T\n\n if conversion_direction == 'A2B':\n f0_converted = pitch_conversion(f0 = f0, mean_log_src = logf0s_mean_A, std_log_src = logf0s_std_A, mean_log_target = logf0s_mean_B, std_log_target = logf0s_std_B)\n #f0_converted = f0\n coded_sp_norm = (coded_sp_transposed - mcep_mean_A) / mcep_std_A\n coded_sp_converted_norm = model.test(inputs = np.array([coded_sp_norm]), direction = conversion_direction)[0]\n coded_sp_converted = coded_sp_converted_norm * mcep_std_B + mcep_mean_B\n else:\n f0_converted = pitch_conversion(f0 = f0, mean_log_src = logf0s_mean_B, std_log_src = logf0s_std_B, mean_log_target = logf0s_mean_A, std_log_target = logf0s_std_A)\n #f0_converted = f0\n coded_sp_norm = (coded_sp_transposed - mcep_mean_B) / mcep_std_B\n coded_sp_converted_norm = model.test(inputs = np.array([coded_sp_norm]), direction = conversion_direction)[0]\n coded_sp_converted = coded_sp_converted_norm * mcep_std_A + mcep_mean_A\n\n coded_sp_converted = coded_sp_converted.T\n coded_sp_converted = np.ascontiguousarray(coded_sp_converted)\n decoded_sp_converted = world_decode_spectral_envelop(coded_sp = coded_sp_converted, fs = sampling_rate)\n wav_transformed = world_speech_synthesis(f0 = f0_converted, decoded_sp = decoded_sp_converted, ap = ap, fs = sampling_rate, frame_period = frame_period)\n librosa.output.write_wav(os.path.join(output_dir, os.path.splitext(os.path.basename(file))[0]+\"-CONV.mp3\"), wav_transformed, sampling_rate)\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser(description = 'Convert voices using pre-trained CycleGAN model.')\n\n parser.add_argument('--model_dir', type = str, help = 'Directory for the pre-trained model.', default = model_dir_default)\n parser.add_argument('--model_name', type = str, help = 'Filename for the pre-trained model.', default = model_name_default)\n parser.add_argument('--data_dir', type = str, help = 'Directory for the voices for conversion.', default = data_dir_default)\n parser.add_argument('--conversion_direction', type = str, help = 'Conversion direction for CycleGAN. A2B or B2A. The first object in the model file name is A, and the second object in the model file name is B.', default = conversion_direction_default)\n parser.add_argument('--output_dir', type = str, help = 'Directory for the converted voices.', default = conv_output_dir_default)\n\n argv = parser.parse_args()\n\n model_dir = argv.model_dir\n model_name = argv.model_name\n data_dir = argv.data_dir\n conversion_direction = argv.conversion_direction\n output_dir = argv.output_dir\n\n conversion(model_dir = model_dir, model_name = model_name, data_dir = data_dir, conversion_direction = conversion_direction, output_dir = output_dir)\n\n\n" ]
[ [ "numpy.ascontiguousarray", "numpy.array" ] ]
wbaek/pytorch
[ "26fd4647a777beeef81cce78eaf6279739e4aeb9" ]
[ "torch/distributed/optim/zero_redundancy_optimizer.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n#\n# This source code is licensed under the BSD license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport collections\nimport copy\nimport io\nfrom collections import OrderedDict\nfrom itertools import chain\nfrom typing import Any, Callable, Dict, List, Optional, Type\n\nimport torch\nimport torch.distributed as dist\nfrom torch.nn import Parameter\nfrom torch.optim import Optimizer\nimport logging\n\n__all__ = [\"ZeroRedundancyOptimizer\"]\n\n\n# Credits: classy_vision/generic/distributed_util.py\ndef _recursive_copy_to_device(value: Any, non_blocking: bool, device: torch.device) -> Any:\n r\"\"\"\n Recursively searches lists, tuples, dicts and copies tensors to device if\n possible. Non-tensor values are passed as-is in the result.\n\n .. note: These are all copies, so if there are two objects that reference\n the same object, then after this call, there will be two different objects\n referenced on the device.\n \"\"\"\n\n if isinstance(value, torch.Tensor):\n return value.to(device, non_blocking=non_blocking)\n\n if isinstance(value, (list, tuple)):\n values = [_recursive_copy_to_device(val, non_blocking=non_blocking, device=device) for val in value]\n return values if isinstance(value, list) else tuple(values)\n\n if isinstance(value, collections.abc.Mapping):\n return {\n key: _recursive_copy_to_device(val, non_blocking=non_blocking, device=device) for key, val in value.items()\n }\n\n return value\n\n\ndef _is_trainable(param: torch.Tensor) -> bool:\n return param.requires_grad\n\n\ndef _broadcast_object(\n obj: Any,\n src_rank: int,\n group: object = dist.group.WORLD,\n dist_device: torch.device = torch.device(\"cpu\"),\n) -> Any:\n r\"\"\"\n Either broadcast from master to the fleet (default),\n or use the src setting as the original rank.\n \"\"\"\n\n if dist.get_rank() == src_rank:\n # Emit data\n buffer = io.BytesIO()\n torch.save(obj, buffer)\n data = bytearray(buffer.getbuffer())\n length_tensor = torch.LongTensor([len(data)]).to(dist_device)\n data_send_tensor = torch.ByteTensor(data).to(dist_device)\n dist.broadcast(length_tensor, src=src_rank, group=group, async_op=False)\n dist.broadcast(data_send_tensor, src=src_rank, group=group, async_op=False)\n else:\n # Fetch from the source\n length_tensor = torch.LongTensor([0]).to(dist_device)\n dist.broadcast(length_tensor, src=src_rank, group=group, async_op=False)\n data_recv_tensor = torch.empty([int(length_tensor.item())], dtype=torch.uint8, device=dist_device)\n dist.broadcast(data_recv_tensor, src=src_rank, group=group, async_op=False)\n buffer = io.BytesIO(data_recv_tensor.cpu().numpy())\n obj = torch.load(buffer, map_location=dist_device)\n return obj\n\n\ndef _get_global_rank(group: Any, rank: int) -> int:\n return rank if group is dist.group.WORLD else dist.distributed_c10d._get_global_rank(group, rank)\n\n\nclass ZeroRedundancyOptimizer(Optimizer):\n r\"\"\"\n This class wraps an arbitrary :class:`optim.Optimizer <torch.optim.Optimizer>`\n and shards its states across ranks in the group as described by\n ZeRO_. The optimizer instance in each rank is only responsible for\n updating ``1 / world_size`` parameters and hence only needs to keep\n ``1 / world_size`` optimizer states. After parameters are updated locally,\n each rank will broadcast its parameters to all other peers to keep all\n model replicas in the same state. ``ZeroRedundancyOptimizer`` can be used\n in conjunction with :class:`torch.nn.parallel.DistributedDataparallel` to\n reduce per-rank peak memory consumption.\n\n ``ZeroRedundancyOptimizer`` use a greedy algorithm to pack a number of\n parameters at each rank. Each parameter belongs to a single rank and is not\n divided among ranks. The partition is arbitrary and might not match the\n the parameter registration or usage order.\n\n Arguments:\n params (``Iterable``): an ``Iterable`` of :class:`torch.Tensor` s\n\n Keyword Args:\n optimizer_class (:class:`torch.nn.Optimizer`): the class of the local\n optimizer.\n group (``ProcessGroup``, optional): ``torch.distributed``\n ``ProcessGroup`` (default: ``group.WORLD`` initialized by\n :meth:`torch.distributed.init_process_group`).\n parameters_as_bucket_views (bool): when enabled, parameters will\n be packed into larger buckets to speed up communication and\n ``param.data`` fields will point to bucket views at different\n offsets. When disabled, each individual parameter will be\n communicated separately, but ``params.data`` will stay intact.\n **default: all trailing arguments will be forwarded to the given optimizer.\n\n Example::\n\n >>> import torch.nn as nn\n >>> from torch.distributed.optim import ZeroRedundancyOptimizer\n >>> from torch.nn.parallel import DistributedDataParallel as DDP\n\n >>> model = nn.Sequential(*[nn.Linear(2000, 2000).to(rank) for _ in range(20)])\n >>> ddp = DDP(model, device_ids=[rank])\n >>> opt = ZeroRedundancyOptimizer(\n >>> ddp.parameters(),\n >>> optimizer_class=torch.optim.Adam,\n >>> lr=0.01\n >>> )\n >>> ddp(inputs).sum().backward()\n >>> opt.step()\n\n .. warning: ZeroRedundancyOptimizer is experimental and subject to change.\n\n .. _ZeRO: https://arxiv.org/abs/1910.02054\n\n \"\"\"\n\n def __init__(\n self,\n params,\n optimizer_class: Type[Optimizer],\n group: Optional[Any] = None,\n parameters_as_bucket_view: bool = False,\n **default: Any,\n ):\n # Hold all the model params in the root .param_groups\n # NOTE: the default constructor uses `add_param_group` which is partially overloaded here\n # we introduce the `initialized` flag for be able to dissociate the behaviour of\n # `add_param_group` in between super() and ZeroRedundancyOptimizer\n self.initialized = False\n super().__init__(params, default)\n\n # Partition information. lazy evaluation, computed if requested\n self._per_device_params_cache: \"OrderedDict[torch.device, List[List[Parameter]]]\" = (\n OrderedDict()\n ) # device, rank, params\n self._param_rank_cache: Dict[torch.Tensor, int] = {}\n self._param_to_index_cache: Dict[int, int] = {}\n self._partition_parameters_cache: List[List[Dict]] = []\n self._index_to_param_cache: Dict[int, torch.Tensor] = {}\n self._all_params = params\n self._reference_is_trainable_mask = list(map(_is_trainable, self._all_params))\n\n # Build the wrapped optimizer, responsible for a shard of the params\n self.group = group if group is not None else dist.group.WORLD\n self.world_size = dist.get_world_size(self.group)\n self.rank = dist.get_rank(self.group)\n self.global_rank = _get_global_rank(self.group, self.rank)\n self.parameters_as_bucket_view = parameters_as_bucket_view\n\n self._optim_defaults = default\n self._optim_constructor = optimizer_class\n\n # Optional consolidated optimizer state\n self._all_states: List[Dict[str, Any]] = []\n\n # Current default device is set by the parameters allocated to this rank\n self._device = list(self._per_device_params.keys())[0]\n self.buckets: Dict[torch.device, List[torch.Tensor]] = {}\n\n self._update_trainable()\n self.initialized = True\n\n def _clear_cache(self) -> None:\n self._partition_parameters_cache.clear()\n self._per_device_params_cache.clear()\n self._param_rank_cache.clear()\n self._index_to_param_cache.clear()\n self._param_to_index_cache.clear()\n\n def add_param_group(self, param_group: dict) -> None:\n r\"\"\"\n Add a param group to the :class:`Optimizer` s ``param_groups``.\n\n This can be useful when fine tuning a pre-trained network, as frozen\n layers can be made trainable and added to the :class:`Optimizer` as\n training progresses.\n\n Arguments:\n param_group (dict): Specifies what Tensors should be optimized\n along with group specific optimization options.\n\n .. warning: This method handles updating the shards on all partitions,\n but needs to be called on all ranks. Calling this on a subset of the\n ranks will cause the training to hang, because communication\n primitives are called depending on the managed parameters, and\n expect all the ranks to participate on the sane set of parameters.\n \"\"\"\n\n super().add_param_group(param_group)\n if self.initialized:\n # Force a re-partitioning\n self._clear_cache()\n\n param_groups = self.partition_parameters()[self.rank]\n if len(param_groups) == len(self.optim.param_groups) + 1:\n self.optim.add_param_group(param_groups[-1])\n\n # Update the bucketing strategy accordingly\n if self.parameters_as_bucket_view:\n self._setup_flat_buffers()\n\n def consolidate_state_dict(self, to: int = 0) -> None:\n r\"\"\"\n Update the consolidated state_dict list, one per rank.\n\n Arguments:\n to (int): the rank that receives the global states. (default: 0)\n\n .. warning: This needs to be called on all replicas\n \"\"\"\n\n # Sync lr and other attributes in case its been updated\n self._sync_param_groups(self.param_groups, self.optim.param_groups)\n\n empty_messenger = torch.tensor([0], dtype=torch.uint8, device=self._device)\n\n # Pull the sharded state from all the other replicas\n # Store all the states in order, rank by rank\n\n # NOTE: In practice, `broadcast` is used, which is wasteful (gather would have been appropriate)\n # compatibility issues with some backends make the use of broadcast mandatory for now.\n # a possible follow up would be to move all sharded state management to RPC RRef\n\n self._all_states = []\n for rank in range(self.world_size):\n global_rank = _get_global_rank(self.group, rank)\n\n # This rank collects the whole state\n if self.rank == to:\n if rank == self.rank:\n self._all_states.append(\n _recursive_copy_to_device(\n self.local_state_dict(),\n non_blocking=True,\n device=torch.device(\"cpu\"),\n )\n )\n else:\n # Fetch the optim state from the other replicas\n replica_state = _broadcast_object(\n empty_messenger,\n src_rank=global_rank,\n group=self.group,\n dist_device=self._device,\n )\n\n self._all_states.append(\n _recursive_copy_to_device(replica_state, non_blocking=True, device=torch.device(\"cpu\"))\n )\n else:\n # Acknowledge broadcasts, and send this rank's shard when needed\n # Default to CPU space to gain some memory headroom\n if rank == self.rank:\n # Send the state to the reference replica\n _ = _broadcast_object(\n self.local_state_dict(),\n src_rank=self.global_rank,\n group=self.group,\n dist_device=self._device,\n )\n\n elif rank != to:\n # Discard this tensor/rank, broadcast was being use for compatibility reasons\n _ = _broadcast_object(\n empty_messenger,\n src_rank=global_rank,\n group=self.group,\n dist_device=self._device,\n )\n\n def partition_parameters(self) -> List[List[Dict]]:\n r\"\"\"\n Partitions parameters across distributed data parallel ranks.\n\n Returns:\n a list of ``param_groups`` (which is a list of dict) where each\n element of the list contains the param_groups for a rank. Element 0\n corresponds to rank 0, etc. We need all the ranks for the broadcast\n inside ``step()``.\n \"\"\"\n if len(self._partition_parameters_cache) == 0:\n self._partition_parameters_cache = [list() for _ in range(self.world_size)]\n sizes = [0] * self.world_size\n for param_group in self.param_groups:\n param_lists: List[List] = [list() for _ in range(self.world_size)]\n # Sort the params by size (largest first)\n params_sorted = sorted(param_group[\"params\"], key=lambda t: t.size()[0], reverse=True)\n for param in params_sorted:\n # Add this param to rank with smallest size.\n rank = sizes.index(min(sizes))\n param_lists[rank].append(param)\n sizes[rank] += param.numel()\n\n for rank, params in enumerate(param_lists):\n param_group_rank = copy.copy(param_group)\n param_group_rank[\"params\"] = params\n self._partition_parameters_cache[rank].append(param_group_rank)\n\n return self._partition_parameters_cache\n\n @property\n def _per_device_params(self) -> Dict[torch.device, List[List[Parameter]]]:\n r\"\"\"\n Sorted list of all the params, first per device then per rank.\n\n Within a list params are sorted per number of elements to allow for an easy bucketing.\n \"\"\"\n if len(self._per_device_params_cache) == 0:\n # Go through all params, log them per device\n # The ordering is important here, needs to be the same on all ranks\n # So that ulterior broadcast calls are matching\n for param_group in self.param_groups:\n for param in param_group[\"params\"]:\n device = param.device\n if self._per_device_params_cache.get(device) is None:\n self._per_device_params_cache[device] = [[] for _ in range(self.world_size)]\n self._per_device_params_cache[device][self._param_to_rank[param]] += [param]\n\n # Sort param_lists by size\n for k in self._per_device_params_cache.keys():\n for r in self._per_device_params_cache[k]:\n r.sort(key=lambda x: x.numel())\n\n return self._per_device_params_cache\n\n @property\n def _param_to_rank(self) -> Dict[torch.Tensor, int]:\n r\"\"\"Look up table to match a given param with a data parallel rank\"\"\"\n if len(self._param_rank_cache) == 0:\n for rank, param_groups in enumerate(self.partition_parameters()):\n for param_group in param_groups:\n for param in param_group[\"params\"]:\n self._param_rank_cache[param] = rank\n return self._param_rank_cache\n\n @property\n def _param_to_index(self) -> Dict[int, int]:\n r\"\"\"\n Hash table in between parameter indices in the global optimizer scheme,\n and the actual params.\n \"\"\"\n if len(self._param_to_index_cache) == 0:\n self._param_to_index_cache = {\n id(p): i for i, p in enumerate(chain(*(g[\"params\"] for g in self.param_groups)))\n }\n\n return self._param_to_index_cache\n\n @property\n def _index_to_param(self) -> Dict[int, torch.Tensor]:\n r\"\"\"\n Hash table in between parameter indices in the global optimizer scheme,\n and the actual params.\n \"\"\"\n if len(self._index_to_param_cache) == 0:\n self._index_to_param_cache = {i: p for i, p in enumerate(chain(*(g[\"params\"] for g in self.param_groups)))}\n\n return self._index_to_param_cache\n\n def step(self, closure: Optional[Callable[[], float]] = None, **kwargs: Any) -> Optional[float]:\n r\"\"\"\n Performs a single optimization step (parameter update).\n\n Arguments:\n closure (callable): A closure that reevaluates the model and\n returns the loss. Optional for most optimizers.\n Returns:\n optional loss, depends on the underlying optimizer\n\n .. note: Any extra parameter is passed to the base optimizer as-is\n \"\"\"\n\n # Check whether the model trainability graph changed\n trainable_mask = list(map(_is_trainable, self._all_params))\n if trainable_mask != self._reference_is_trainable_mask:\n logging.warning(\n \"ZeroRedundancyOptimizer detected that the trainable params changed, updating the partitioning\"\n )\n self._update_trainable()\n self._reference_is_trainable_mask = trainable_mask\n\n # Sync oss param_groups attributes in case they've been updated by a scheduler.\n self._sync_param_groups(self.param_groups, self.optim.param_groups)\n\n # Run the optimizer step on this shard only:\n if closure is not None:\n loss = self.optim.step(closure=closure, **kwargs) # type: ignore[call-arg]\n else:\n loss = self.optim.step(**kwargs)\n\n # Sync all the updated shards in between the ranks\n handles = []\n if self.parameters_as_bucket_view:\n for device in self.buckets.keys():\n for src_rank, bucket in enumerate(self.buckets[device]):\n global_src_rank = _get_global_rank(self.group, src_rank)\n handles.append(dist.broadcast(tensor=bucket, src=global_src_rank, group=self.group, async_op=True))\n else:\n for device, per_rank_params in self._per_device_params.items():\n for dst_rank, params in enumerate(per_rank_params):\n global_dst_rank = _get_global_rank(self.group, dst_rank)\n for param in params:\n handles.append(\n dist.broadcast(tensor=param.data, src=global_dst_rank, group=self.group, async_op=True)\n )\n\n _ = list(map(lambda x: x.wait(), handles))\n\n # Sync hypothethical new results from the wrapped optimizer to the exposed param_groups\n self._sync_param_groups(self.optim.param_groups, self.param_groups)\n\n return loss\n\n def load_state_dict(self, state_dict: Dict[str, Any]) -> None:\n r\"\"\"\n Restore the global parameter groups as well as the shard.\n\n Arguments:\n state_dict (dict): optimizer state. Should be an object returned\n from a call to :meth:`state_dict`\n \"\"\"\n\n for key, value in state_dict[\"state\"].items():\n param = self._index_to_param[key]\n\n # Populate the sharded optimizer state on the fly\n if self._param_to_rank[param] != self.rank:\n state_dict[\"state\"][key] = None\n else:\n self.optim.state[param] = _recursive_copy_to_device(value, non_blocking=True, device=param.device)\n\n super().load_state_dict(state_dict)\n\n # Sync with the optimizer param groups\n ZeroRedundancyOptimizer._sync_param_groups(state_dict[\"param_groups\"], self.param_groups)\n ZeroRedundancyOptimizer._sync_param_groups(self.param_groups, self.optim.param_groups)\n\n def local_state_dict(self) -> Dict:\n r\"\"\"\n Gets this rank's ``state_dict``.\n\n Returns:\n The state of the optimizer as a :class:`dict`.\n It contains two entries:\n\n * state - a dict holding current optimization state. Its content\n differs between optimizer classes.\n * param_groups - a dict containing all parameter groups\n \"\"\"\n return self.optim.state_dict()\n\n def state_dict(self) -> Dict[str, Any]:\n r\"\"\"\n Returns:\n the last known global optimizer state, which consist of a list of\n the shards.\n\n .. warning:\n If the state has not been consolidated, this returns a shard's worth,\n not the global state.\n\n .. warning:\n Returning the global state is limited to the replica which was\n responsible for the consolidation. The state may also not be up to\n date, depending on when :meth:`consolidate_state_dict` was last called.\n \"\"\"\n\n if len(self._all_states) == 0:\n raise RuntimeError(\n \"Optimizer state has not been consolidated on this rank. \\\n Please call `consolidate_state_dict()` on all ranks beforehand if you meant to save the global state\"\n )\n\n # Unify the shard states and the state that pytorch would expect, given the model.\n # Indexation needs several redirections, since each shard only knows a limited scope of the model\n # - get the pytorch compliant parameter indexing\n state_dict = super().state_dict()\n\n # - go through the per-shard states, which are all indexed locally\n for rank, s in enumerate(self._all_states):\n # -- match the local indexing and the global partition, update the corresponding saved state globally\n for local_pg, global_pg in zip(s[\"param_groups\"], self.partition_parameters()[rank]):\n local_index_to_param_id = {\n i_param: id(global_pg[\"params\"][i]) for i, i_param in enumerate(local_pg[\"params\"])\n }\n\n for local_param_index in local_pg[\"params\"]:\n # Update the state, if any\n if local_param_index in s[\"state\"].keys():\n global_id = self._param_to_index[local_index_to_param_id[local_param_index]]\n state_dict[\"state\"][global_id] = s[\"state\"][local_param_index]\n\n # Make sure that the parameters are sorted in the state, as expected\n state_dict[\"state\"] = dict(sorted(state_dict[\"state\"].items()))\n return state_dict\n\n @staticmethod\n def rank_local_state_dict(rank: int, state_dict: dict) -> dict:\n r\"\"\"\n Returns the local_state_dict for a given rank.\n\n Arguments:\n rank (int): rank to get ``local_state_dict`` for\n state_dict (dict): global ``state_dict``\n \"\"\"\n param_groups = state_dict[\"param_groups\"][state_dict[\"partition\"][rank][0] : state_dict[\"partition\"][rank][1]]\n return {\"state\": state_dict[\"state\"][rank], \"param_groups\": param_groups}\n\n @staticmethod\n def _sync_param_groups(source: List[Dict[Any, Any]], destination: List[Dict[Any, Any]]) -> None:\n r\"\"\"Sync learning rate and other optimizer attributes (needed to support schedulers).\"\"\"\n\n for source_group, destination_group in zip(source, destination):\n # Sync everything but the parameters\n for k in filter(lambda x: x != \"params\", source_group.keys()):\n destination_group[k] = source_group[k]\n\n def _setup_flat_buffers(self) -> None:\n r\"\"\"\n Make all params which are on the same device and tied to the same rank\n views of a single buffer. This is used at construction time, and anytime\n parameter trainability is changed (frozen or unfrozen) and\n ``_update_trainable`` is called.\n \"\"\"\n\n for device, per_rank_params in self._per_device_params.items():\n # Only wipe the existing buckets if there are none\n # (could be that this is called twice, when trainability changes)\n if device not in self.buckets.keys():\n self.buckets[device] = []\n\n # Make parameters a view of the bucket\n for dst_rank, params in enumerate(per_rank_params):\n if len(params) > 0:\n\n # Clone the non-trainable params, if in a bucket it will get destroyed\n for param in filter(lambda x: not x.requires_grad, params):\n param.data = param.data.detach().clone()\n\n # Merge all the trainable params in a single bucket\n trainable_params = list(filter(_is_trainable, params))\n buffer_size = sum(map(lambda x: x.numel(), trainable_params))\n bucket = torch.empty(buffer_size, dtype=params[0].dtype, device=device)\n offset = 0\n\n for param in trainable_params:\n offset_next = offset + param.numel()\n bucket[offset:offset_next].copy_(param.data.flatten())\n param.data = bucket[offset:offset_next].view_as(param.data)\n offset = offset_next\n\n # Either replace the existing bucket, or create it\n if len(self.buckets[device]) == dst_rank:\n self.buckets[device].append(bucket)\n else:\n self.buckets[device][dst_rank] = bucket\n else:\n self.buckets[device].append(torch.zeros(1, device=device))\n\n def _update_trainable(self) -> None:\n r\"\"\"\n Updates the partitioning and communication patterns if the trainability\n (``requires_grad``) of some parameters changed.\n \"\"\"\n\n # Create the optim which will work on the param shard\n if not hasattr(self, \"optim\"):\n self._clear_cache()\n self._default_device = list(self._per_device_params.keys())[0]\n self.optim = self._optim_constructor(self.partition_parameters()[self.rank], **self._optim_defaults)\n self._sync_param_groups(self.optim.param_groups, self.param_groups)\n\n if self.parameters_as_bucket_view:\n self._setup_flat_buffers()\n" ]
[ [ "torch.zeros", "torch.device", "torch.distributed.get_world_size", "torch.save", "torch.ByteTensor", "torch.tensor", "torch.LongTensor", "torch.load", "torch.distributed.get_rank", "torch.empty", "torch.distributed.distributed_c10d._get_global_rank", "torch.distributed.broadcast" ] ]
gmijenes/autogoal
[ "916b0eb4d1aa1a222d0ff1b0f6f202bf56458ef5" ]
[ "autogoal/datasets/wine_quality.py" ]
[ "# coding: utf-8\r\n\r\nimport numpy as np\r\nimport os\r\n\r\nfrom autogoal.datasets import download, datapath\r\n\r\n\r\ndef load(white=True, red=True, max_examples=None):\r\n if not red and not white:\r\n raise ValueError(\"Either red or white must be selected\")\r\n\r\n download(\"wine_quality\")\r\n\r\n f_white = open(datapath(\"wine_quality\") / \"winequality-white.csv\", \"r\")\r\n f_red = open(datapath(\"wine_quality\") / \"winequality-red.csv\", \"r\")\r\n\r\n X = []\r\n y = []\r\n\r\n if white:\r\n title_line = True\r\n for i in f_white.readlines():\r\n\r\n if max_examples and len(X) >= max_examples:\r\n break\r\n\r\n if title_line == True:\r\n title_line = False\r\n continue\r\n\r\n clean_line = i.strip().split(\";\")\r\n\r\n X.append([1, 0] + [float(i) for i in clean_line[:-1]])\r\n y.append(float(clean_line[-1]))\r\n\r\n if red:\r\n title_line = True\r\n for i in f_red.readlines():\r\n\r\n if max_examples and len(X) >= max_examples:\r\n break\r\n\r\n if title_line == True:\r\n title_line = False\r\n continue\r\n\r\n clean_line = i.strip().split(\";\")\r\n\r\n X.append([0, 1] + [float(i) for i in clean_line[:-1]])\r\n y.append(float(clean_line[-1]))\r\n\r\n return np.asarray(X), np.asarray(y)\r\n" ]
[ [ "numpy.asarray" ] ]
foamliu/MobileFaceNet-v2
[ "9f84e3191539eb7c1f71b54816d686c2ed058994" ]
[ "lfw_eval.py" ]
[ "import math\nimport os\nimport pickle\nimport tarfile\nimport time\n\nimport cv2 as cv\nimport numpy as np\nimport scipy.stats\nimport torch\nfrom matplotlib import pyplot as plt\nfrom torchvision import transforms\nfrom tqdm import tqdm\nfrom mobilefacenet import MobileFaceNet\nfrom config import device\nfrom data_gen import data_transforms\nfrom utils import align_face, get_central_face_attributes, get_all_face_attributes, draw_bboxes\n\nangles_file = 'data/angles.txt'\nlfw_pickle = 'data/lfw_funneled.pkl'\n\n\ndef extract(filename):\n with tarfile.open(filename, 'r') as tar:\n tar.extractall('data')\n\n\ndef process():\n subjects = [d for d in os.listdir('data/lfw_funneled') if os.path.isdir(os.path.join('data/lfw_funneled', d))]\n assert (len(subjects) == 5749), \"Number of subjects is: {}!\".format(len(subjects))\n\n file_names = []\n for i in tqdm(range(len(subjects))):\n sub = subjects[i]\n folder = os.path.join('data/lfw_funneled', sub)\n files = [f for f in os.listdir(folder) if\n os.path.isfile(os.path.join(folder, f)) and f.lower().endswith('.jpg')]\n for file in files:\n filename = os.path.join(folder, file)\n file_names.append({'filename': filename, 'class_id': i, 'subject': sub})\n\n assert (len(file_names) == 13233), \"Number of files is: {}!\".format(len(file_names))\n\n samples = []\n for item in tqdm(file_names):\n filename = item['filename']\n class_id = item['class_id']\n sub = item['subject']\n\n try:\n bboxes, landmarks = get_central_face_attributes(filename)\n\n samples.append(\n {'class_id': class_id, 'subject': sub, 'full_path': filename, 'bounding_boxes': bboxes,\n 'landmarks': landmarks})\n except KeyboardInterrupt:\n raise\n except Exception as err:\n print(err)\n\n with open(lfw_pickle, 'wb') as file:\n save = {\n 'samples': samples\n }\n pickle.dump(save, file, pickle.HIGHEST_PROTOCOL)\n\n\ndef get_image(samples, transformer, file):\n filtered = [sample for sample in samples if file in sample['full_path'].replace('\\\\', '/')]\n assert (len(filtered) == 1), 'len(filtered): {} file:{}'.format(len(filtered), file)\n sample = filtered[0]\n full_path = sample['full_path']\n landmarks = sample['landmarks']\n img = align_face(full_path, landmarks) # BGR\n img = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n img = transforms.ToPILImage()(img)\n img = transformer(img)\n img = img.to(device)\n return img\n\n\ndef evaluate(model):\n model.eval()\n\n with open(lfw_pickle, 'rb') as file:\n data = pickle.load(file)\n\n samples = data['samples']\n\n filename = 'data/lfw_test_pair.txt'\n with open(filename, 'r') as file:\n lines = file.readlines()\n\n transformer = data_transforms['val']\n\n angles = []\n\n start = time.time()\n with torch.no_grad():\n for line in tqdm(lines):\n tokens = line.split()\n file0 = tokens[0]\n img0 = get_image(samples, transformer, file0)\n file1 = tokens[1]\n img1 = get_image(samples, transformer, file1)\n imgs = torch.zeros([2, 1, 112, 112], dtype=torch.float, device=device)\n imgs[0] = img0\n imgs[1] = img1\n\n output = model(imgs)\n\n feature0 = output[0].cpu().numpy()\n feature1 = output[1].cpu().numpy()\n x0 = feature0 / np.linalg.norm(feature0)\n x1 = feature1 / np.linalg.norm(feature1)\n cosine = np.dot(x0, x1)\n cosine = np.clip(cosine, -1.0, 1.0)\n theta = math.acos(cosine)\n theta = theta * 180 / math.pi\n is_same = tokens[2]\n angles.append('{} {}\\n'.format(theta, is_same))\n\n elapsed_time = time.time() - start\n print('elapsed time(sec) per image: {}'.format(elapsed_time / (6000 * 2)))\n\n with open('data/angles.txt', 'w') as file:\n file.writelines(angles)\n\n\ndef visualize(threshold):\n with open(angles_file) as file:\n lines = file.readlines()\n\n ones = []\n zeros = []\n\n for line in lines:\n tokens = line.split()\n angle = float(tokens[0])\n type = int(tokens[1])\n if type == 1:\n ones.append(angle)\n else:\n zeros.append(angle)\n\n bins = np.linspace(0, 180, 181)\n\n plt.hist(zeros, bins, density=True, alpha=0.5, label='0', facecolor='red')\n plt.hist(ones, bins, density=True, alpha=0.5, label='1', facecolor='blue')\n\n mu_0 = np.mean(zeros)\n sigma_0 = np.std(zeros)\n y_0 = scipy.stats.norm.pdf(bins, mu_0, sigma_0)\n plt.plot(bins, y_0, 'r--')\n mu_1 = np.mean(ones)\n sigma_1 = np.std(ones)\n y_1 = scipy.stats.norm.pdf(bins, mu_1, sigma_1)\n plt.plot(bins, y_1, 'b--')\n plt.xlabel('theta')\n plt.ylabel('theta j Distribution')\n plt.title(\n r'Histogram : mu_0={:.4f},sigma_0={:.4f}, mu_1={:.4f},sigma_1={:.4f}'.format(mu_0, sigma_0, mu_1, sigma_1))\n\n print('threshold: ' + str(threshold))\n print('mu_0: ' + str(mu_0))\n print('sigma_0: ' + str(sigma_0))\n print('mu_1: ' + str(mu_1))\n print('sigma_1: ' + str(sigma_1))\n\n plt.legend(loc='upper right')\n plt.plot([threshold, threshold], [0, 0.05], 'k-', lw=2)\n plt.savefig('images/theta_dist.png')\n plt.show()\n\n\ndef accuracy(threshold):\n with open(angles_file) as file:\n lines = file.readlines()\n\n wrong = 0\n for line in lines:\n tokens = line.split()\n angle = float(tokens[0])\n type = int(tokens[1])\n if type == 1:\n if angle > threshold:\n wrong += 1\n else:\n if angle <= threshold:\n wrong += 1\n\n accuracy = 1 - wrong / 6000\n return accuracy\n\n\ndef show_bboxes(folder):\n with open(lfw_pickle, 'rb') as file:\n data = pickle.load(file)\n\n samples = data['samples']\n for sample in tqdm(samples):\n full_path = sample['full_path']\n bounding_boxes = sample['bounding_boxes']\n landmarks = sample['landmarks']\n img = cv.imread(full_path)\n img = draw_bboxes(img, bounding_boxes, landmarks)\n filename = os.path.basename(full_path)\n filename = os.path.join(folder, filename)\n cv.imwrite(filename, img)\n\n\ndef error_analysis(threshold):\n with open(angles_file) as file:\n angle_lines = file.readlines()\n\n fp = []\n fn = []\n for i, line in enumerate(angle_lines):\n tokens = line.split()\n angle = float(tokens[0])\n type = int(tokens[1])\n if angle <= threshold and type == 0:\n fp.append(i)\n if angle > threshold and type == 1:\n fn.append(i)\n\n print('len(fp): ' + str(len(fp)))\n print('len(fn): ' + str(len(fn)))\n\n num_fp = len(fp)\n num_fn = len(fn)\n\n filename = 'data/lfw_test_pair.txt'\n with open(filename, 'r') as file:\n pair_lines = file.readlines()\n\n for i in range(num_fp):\n fp_id = fp[i]\n fp_line = pair_lines[fp_id]\n tokens = fp_line.split()\n file0 = tokens[0]\n copy_file(file0, '{}_fp_0.jpg'.format(i))\n save_aligned(file0, '{}_fp_0_aligned.jpg'.format(i))\n file1 = tokens[1]\n copy_file(file1, '{}_fp_1.jpg'.format(i))\n save_aligned(file1, '{}_fp_1_aligned.jpg'.format(i))\n\n for i in range(num_fn):\n fn_id = fn[i]\n fn_line = pair_lines[fn_id]\n tokens = fn_line.split()\n file0 = tokens[0]\n copy_file(file0, '{}_fn_0.jpg'.format(i))\n save_aligned(file0, '{}_fn_0_aligned.jpg'.format(i))\n file1 = tokens[1]\n copy_file(file1, '{}_fn_1.jpg'.format(i))\n save_aligned(file1, '{}_fn_1_aligned.jpg'.format(i))\n\n\ndef save_aligned(old_fn, new_fn):\n old_fn = os.path.join('data/lfw_funneled', old_fn)\n _, landmarks = get_central_face_attributes(old_fn)\n img = align_face(old_fn, landmarks)\n new_fn = os.path.join('images', new_fn)\n cv.imwrite(new_fn, img)\n\n\ndef copy_file(old, new):\n old_fn = os.path.join('data/lfw_funneled', old)\n img = cv.imread(old_fn)\n bboxes, landmarks = get_all_face_attributes(old_fn)\n draw_bboxes(img, bboxes, landmarks)\n cv.resize(img, (224, 224))\n new_fn = os.path.join('images', new)\n cv.imwrite(new_fn, img)\n\n\ndef get_threshold():\n with open(angles_file, 'r') as file:\n lines = file.readlines()\n\n data = []\n\n for line in lines:\n tokens = line.split()\n angle = float(tokens[0])\n type = int(tokens[1])\n data.append({'angle': angle, 'type': type})\n\n min_error = 6000\n min_threshold = 0\n\n for d in data:\n threshold = d['angle']\n type1 = len([s for s in data if s['angle'] <= threshold and s['type'] == 0])\n type2 = len([s for s in data if s['angle'] > threshold and s['type'] == 1])\n num_errors = type1 + type2\n if num_errors < min_error:\n min_error = num_errors\n min_threshold = threshold\n\n # print(min_error, min_threshold)\n return min_threshold\n\n\ndef lfw_test(model):\n filename = 'data/lfw-funneled.tgz'\n if not os.path.isdir('data/lfw_funneled'):\n print('Extracting {}...'.format(filename))\n extract(filename)\n\n # if not os.path.isfile(lfw_pickle):\n print('Processing {}...'.format(lfw_pickle))\n process()\n\n # if not os.path.isfile(angles_file):\n print('Evaluating {}...'.format(angles_file))\n evaluate(model)\n\n print('Calculating threshold...')\n # threshold = 70.36\n thres = get_threshold()\n print('Calculating accuracy...')\n acc = accuracy(thres)\n print('Accuracy: {}%, threshold: {}'.format(acc * 100, thres))\n return acc, thres\n\n\nif __name__ == \"__main__\":\n checkpoint = 'BEST_checkpoint.tar'\n checkpoint = torch.load(checkpoint)\n model = checkpoint['model'].module\n model = model.to(device)\n model.eval()\n\n # model = MobileFaceNet()\n # model = model.to(device)\n # model.eval()\n\n acc, threshold = lfw_test(model)\n\n print('Visualizing {}...'.format(angles_file))\n visualize(threshold)\n\n print('error analysis...')\n error_analysis(threshold)\n" ]
[ [ "torch.zeros", "numpy.clip", "numpy.dot", "numpy.linalg.norm", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.savefig", "matplotlib.pyplot.plot", "matplotlib.pyplot.legend", "torch.no_grad", "numpy.mean", "numpy.std", "matplotlib.pyplot.hist", "matplotlib.pyplot.ylabel", "torch.load", "matplotlib.pyplot.show", "numpy.linspace" ] ]
Darleen2019/Nabu-MSSS
[ "5e862cbf846d45b8a317f87588533f3fde9f0726" ]
[ "nabu/postprocessing/scorers/sdr_scorer.py" ]
[ "\"\"\"@file sdr_scorer.py\ncontains the scorer using SDR\"\"\"\n\nimport scorer\nimport numpy as np\nimport bss_eval\n\n\nclass SdrScorer(scorer.Scorer):\n\t\"\"\"the SDR scorer class. Uses the script from\n\tC. Raffel, B. McFee, E. J. Humphrey, J. Salamon, O. Nieto, D. Liang, and D. P. W. Ellis,\n\t'mir_eval: A Transparent Implementation of Common MIR Metrics', Proceedings of the 15th\n\tInternational Conference on Music Information Retrieval, 2014\n\n\ta scorer using SDR\"\"\"\n\n\tscore_metrics = ('SDR', 'SIR', 'SAR', 'perm')\n\tscore_metrics_to_summarize = ('SDR', 'SIR', 'SAR')\n\tscore_scenarios = ('SS', 'base')\n\tscore_expects = 'data'\n\n\tdef __init__(self, conf, evalconf, dataconf, rec_dir, numbatches, task, scorer_name, checkpoint_file):\n\t\t\"\"\"SdrScorer constructor\n\n\t\tArgs:\n\t\t\tconf: the scorer configuration as a dictionary\n\t\t\tevalconf: the evaluator configuration as a ConfigParser\n\t\t\tdataconf: the database configuration\n\t\t\trec_dir: the directory where the reconstructions are\n\t\t\tnumbatches: the number of batches to process\n\t\t\"\"\"\n\n\t\tsuper(SdrScorer, self).__init__(conf, evalconf, dataconf, rec_dir, numbatches, task, scorer_name, checkpoint_file)\n\n\tdef _get_score(self, org_src_signals, base_signals, rec_src_signals, utt_rate=None):\n\t\t\"\"\"score the reconstructed utterances with respect to the original source signals\n\n\t\tArgs:\n\t\t\torg_src_signals: the original source signals, as a list of numpy arrarys\n\t\t\tbase_signals: the duplicated base signal (original mixture), as a list of numpy arrarys\n\t\t\trec_src_signals: the reconstructed source signals, as a list of numpy arrarys\n\n\t\tReturns:\n\t\t\tthe score\"\"\"\n\n\t\t# convert to numpy arrays\n\t\torg_src_signals = org_src_signals[:, :, 0]\n\t\tbase_signals = np.array(base_signals)[:, :, 0]\n\t\trec_src_signals = np.array(rec_src_signals)\n\n\t\t#\n\t\tcollect_outputs = dict()\n\t\tcollect_outputs[self.score_scenarios[1]] = bss_eval.bss_eval_sources(org_src_signals, base_signals)\n\t\tcollect_outputs[self.score_scenarios[0]] = bss_eval.bss_eval_sources(org_src_signals, rec_src_signals)\n\n\t\tnr_spk = len(org_src_signals)\n\n\t\t# convert the outputs to a single dictionary\n\t\tscore_dict = dict()\n\t\tfor i, metric in enumerate(self.score_metrics):\n\t\t\tscore_dict[metric] = dict()\n\n\t\t\tfor j, scen in enumerate(self.score_scenarios):\n\t\t\t\tscore_dict[metric][scen] = []\n\n\t\t\t\tfor spk in range(nr_spk):\n\t\t\t\t\tscore_dict[metric][scen].append(collect_outputs[scen][i][spk])\n\n\t\treturn score_dict\n" ]
[ [ "numpy.array" ] ]
sherbold/replication-kit-2020-smoke-testing
[ "17aa9858ab693e5c1b5b20c9b2d277802109600e" ]
[ "generated-tests/sklearn/test_SKLEARN_ComplementNB.py" ]
[ "import unittest\nimport xmlrunner\nimport pandas as pd\nimport numpy as np\nimport threading\nimport functools\nimport inspect\nimport math\nimport traceback\nimport warnings\n\nfrom parameterized import parameterized\nfrom scipy.io.arff import loadarff\nfrom scipy.stats import chisquare, ks_2samp\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.naive_bayes import ComplementNB\n\n\nclass TestTimeoutException(Exception):\n def __init__(self, value):\n self.value = value\n def __str__(self):\n return repr(self.value)\n\n# thanks to https://gist.github.com/vadimg/2902788\ndef timeout(duration, default=None):\n def decorator(func):\n class InterruptableThread(threading.Thread):\n def __init__(self, args, kwargs):\n threading.Thread.__init__(self)\n self.args = args\n self.kwargs = kwargs\n self.result = default\n self.daemon = True\n self.exception = None\n\n def run(self):\n try:\n self.result = func(*self.args, **self.kwargs)\n except Exception as e:\n self.exception = e\n\n @functools.wraps(func)\n def wrap(*args, **kwargs):\n it = InterruptableThread(args, kwargs)\n it.start()\n it.join(duration)\n if it.is_alive():\n raise TestTimeoutException('timeout after %i seconds for test %s' % (duration, func))\n if it.exception:\n raise it.exception\n return it.result\n return wrap\n return decorator\n\nclass test_SKLEARN_ComplementNB(unittest.TestCase):\n\n params = [(\"{'fit_prior':True,'alpha':0.0,'norm':False,}\", {'fit_prior':True,'alpha':0.0,'norm':False,}),\n (\"{'fit_prior':True,'alpha':0.5,'norm':False,}\", {'fit_prior':True,'alpha':0.5,'norm':False,}),\n (\"{'fit_prior':True,'alpha':1.0,'norm':False,}\", {'fit_prior':True,'alpha':1.0,'norm':False,}),\n (\"{'fit_prior':True,'alpha':1.5,'norm':False,}\", {'fit_prior':True,'alpha':1.5,'norm':False,}),\n (\"{'fit_prior':False,'alpha':1.0,'norm':False,}\", {'fit_prior':False,'alpha':1.0,'norm':False,}),\n (\"{'fit_prior':True,'alpha':1.0,'norm':True,}\", {'fit_prior':True,'alpha':1.0,'norm':True,}),\n ]\n\n def assert_morphtest(self, evaluation_type, testcase_name, iteration, deviations_class, deviations_score, pval_chisquare, pval_kstest):\n if evaluation_type=='score_exact':\n self.assertEqual(deviations_score, 0)\n elif evaluation_type=='class_exact':\n self.assertEqual(deviations_class, 0)\n elif evaluation_type=='score_stat':\n self.assertTrue(pval_kstest>0.05)\n elif evaluation_type=='class_stat':\n self.assertTrue(pval_chisquare>0.05)\n else:\n raise ValueError('invalid evaluation_type: %s (allowed: score_exact, class_exact, score_stat, class_stat' % evaluation_type)\n\n @parameterized.expand(params)\n @timeout(21600)\n def test_Uniform(self, name, kwargs):\n for iter in range(1,1+1):\n data, meta = loadarff('smokedata/Uniform_%i_training.arff' % iter)\n testdata, testmeta = loadarff('smokedata/Uniform_%i_test.arff' % iter)\n lb_make = LabelEncoder()\n data_df = pd.DataFrame(data)\n data_df[\"classAtt\"] = lb_make.fit_transform(data_df[\"classAtt\"])\n data_df = pd.get_dummies(data_df)\n \n testdata_df = pd.DataFrame(data)\n testdata_df[\"classAtt\"] = lb_make.fit_transform(testdata_df[\"classAtt\"])\n testdata_df = pd.get_dummies(testdata_df, sparse=True)\n \n classIndex = -1\n for i, s in enumerate(data_df.columns):\n if 'classAtt' in s:\n classIndex = i\n \n classifier = ComplementNB(**kwargs)\n np.random.seed(42)\n classifier.fit(np.delete(data_df.values, classIndex, axis=1),data_df.values[:,classIndex])\n classifier.predict(np.delete(testdata_df.values, classIndex, axis=1))\n \n\n @parameterized.expand(params)\n @timeout(21600)\n def test_MinFloat(self, name, kwargs):\n for iter in range(1,1+1):\n data, meta = loadarff('smokedata/MinFloat_%i_training.arff' % iter)\n testdata, testmeta = loadarff('smokedata/MinFloat_%i_test.arff' % iter)\n lb_make = LabelEncoder()\n data_df = pd.DataFrame(data)\n data_df[\"classAtt\"] = lb_make.fit_transform(data_df[\"classAtt\"])\n data_df = pd.get_dummies(data_df)\n \n testdata_df = pd.DataFrame(data)\n testdata_df[\"classAtt\"] = lb_make.fit_transform(testdata_df[\"classAtt\"])\n testdata_df = pd.get_dummies(testdata_df, sparse=True)\n \n classIndex = -1\n for i, s in enumerate(data_df.columns):\n if 'classAtt' in s:\n classIndex = i\n \n classifier = ComplementNB(**kwargs)\n np.random.seed(42)\n classifier.fit(np.delete(data_df.values, classIndex, axis=1),data_df.values[:,classIndex])\n classifier.predict(np.delete(testdata_df.values, classIndex, axis=1))\n \n\n @parameterized.expand(params)\n @timeout(21600)\n def test_VerySmall(self, name, kwargs):\n for iter in range(1,1+1):\n data, meta = loadarff('smokedata/VerySmall_%i_training.arff' % iter)\n testdata, testmeta = loadarff('smokedata/VerySmall_%i_test.arff' % iter)\n lb_make = LabelEncoder()\n data_df = pd.DataFrame(data)\n data_df[\"classAtt\"] = lb_make.fit_transform(data_df[\"classAtt\"])\n data_df = pd.get_dummies(data_df)\n \n testdata_df = pd.DataFrame(data)\n testdata_df[\"classAtt\"] = lb_make.fit_transform(testdata_df[\"classAtt\"])\n testdata_df = pd.get_dummies(testdata_df, sparse=True)\n \n classIndex = -1\n for i, s in enumerate(data_df.columns):\n if 'classAtt' in s:\n classIndex = i\n \n classifier = ComplementNB(**kwargs)\n np.random.seed(42)\n classifier.fit(np.delete(data_df.values, classIndex, axis=1),data_df.values[:,classIndex])\n classifier.predict(np.delete(testdata_df.values, classIndex, axis=1))\n \n\n @parameterized.expand(params)\n @timeout(21600)\n def test_MinDouble(self, name, kwargs):\n for iter in range(1,1+1):\n data, meta = loadarff('smokedata/MinDouble_%i_training.arff' % iter)\n testdata, testmeta = loadarff('smokedata/MinDouble_%i_test.arff' % iter)\n lb_make = LabelEncoder()\n data_df = pd.DataFrame(data)\n data_df[\"classAtt\"] = lb_make.fit_transform(data_df[\"classAtt\"])\n data_df = pd.get_dummies(data_df)\n \n testdata_df = pd.DataFrame(data)\n testdata_df[\"classAtt\"] = lb_make.fit_transform(testdata_df[\"classAtt\"])\n testdata_df = pd.get_dummies(testdata_df, sparse=True)\n \n classIndex = -1\n for i, s in enumerate(data_df.columns):\n if 'classAtt' in s:\n classIndex = i\n \n classifier = ComplementNB(**kwargs)\n np.random.seed(42)\n classifier.fit(np.delete(data_df.values, classIndex, axis=1),data_df.values[:,classIndex])\n classifier.predict(np.delete(testdata_df.values, classIndex, axis=1))\n \n\n @parameterized.expand(params)\n @timeout(21600)\n def test_MaxFloat(self, name, kwargs):\n for iter in range(1,1+1):\n data, meta = loadarff('smokedata/MaxFloat_%i_training.arff' % iter)\n testdata, testmeta = loadarff('smokedata/MaxFloat_%i_test.arff' % iter)\n lb_make = LabelEncoder()\n data_df = pd.DataFrame(data)\n data_df[\"classAtt\"] = lb_make.fit_transform(data_df[\"classAtt\"])\n data_df = pd.get_dummies(data_df)\n \n testdata_df = pd.DataFrame(data)\n testdata_df[\"classAtt\"] = lb_make.fit_transform(testdata_df[\"classAtt\"])\n testdata_df = pd.get_dummies(testdata_df, sparse=True)\n \n classIndex = -1\n for i, s in enumerate(data_df.columns):\n if 'classAtt' in s:\n classIndex = i\n \n classifier = ComplementNB(**kwargs)\n np.random.seed(42)\n classifier.fit(np.delete(data_df.values, classIndex, axis=1),data_df.values[:,classIndex])\n classifier.predict(np.delete(testdata_df.values, classIndex, axis=1))\n \n\n @parameterized.expand(params)\n @timeout(21600)\n def test_MaxDouble(self, name, kwargs):\n for iter in range(1,1+1):\n data, meta = loadarff('smokedata/MaxDouble_%i_training.arff' % iter)\n testdata, testmeta = loadarff('smokedata/MaxDouble_%i_test.arff' % iter)\n lb_make = LabelEncoder()\n data_df = pd.DataFrame(data)\n data_df[\"classAtt\"] = lb_make.fit_transform(data_df[\"classAtt\"])\n data_df = pd.get_dummies(data_df)\n \n testdata_df = pd.DataFrame(data)\n testdata_df[\"classAtt\"] = lb_make.fit_transform(testdata_df[\"classAtt\"])\n testdata_df = pd.get_dummies(testdata_df, sparse=True)\n \n classIndex = -1\n for i, s in enumerate(data_df.columns):\n if 'classAtt' in s:\n classIndex = i\n \n classifier = ComplementNB(**kwargs)\n np.random.seed(42)\n classifier.fit(np.delete(data_df.values, classIndex, axis=1),data_df.values[:,classIndex])\n classifier.predict(np.delete(testdata_df.values, classIndex, axis=1))\n \n\n @parameterized.expand(params)\n @timeout(21600)\n def test_OneClass(self, name, kwargs):\n for iter in range(1,1+1):\n data, meta = loadarff('smokedata/OneClass_%i_training.arff' % iter)\n testdata, testmeta = loadarff('smokedata/OneClass_%i_test.arff' % iter)\n lb_make = LabelEncoder()\n data_df = pd.DataFrame(data)\n data_df[\"classAtt\"] = lb_make.fit_transform(data_df[\"classAtt\"])\n data_df = pd.get_dummies(data_df)\n \n testdata_df = pd.DataFrame(data)\n testdata_df[\"classAtt\"] = lb_make.fit_transform(testdata_df[\"classAtt\"])\n testdata_df = pd.get_dummies(testdata_df, sparse=True)\n \n classIndex = -1\n for i, s in enumerate(data_df.columns):\n if 'classAtt' in s:\n classIndex = i\n \n classifier = ComplementNB(**kwargs)\n np.random.seed(42)\n classifier.fit(np.delete(data_df.values, classIndex, axis=1),data_df.values[:,classIndex])\n classifier.predict(np.delete(testdata_df.values, classIndex, axis=1))\n \n\n @parameterized.expand(params)\n @timeout(21600)\n def test_Bias(self, name, kwargs):\n for iter in range(1,1+1):\n data, meta = loadarff('smokedata/Bias_%i_training.arff' % iter)\n testdata, testmeta = loadarff('smokedata/Bias_%i_test.arff' % iter)\n lb_make = LabelEncoder()\n data_df = pd.DataFrame(data)\n data_df[\"classAtt\"] = lb_make.fit_transform(data_df[\"classAtt\"])\n data_df = pd.get_dummies(data_df)\n \n testdata_df = pd.DataFrame(data)\n testdata_df[\"classAtt\"] = lb_make.fit_transform(testdata_df[\"classAtt\"])\n testdata_df = pd.get_dummies(testdata_df, sparse=True)\n \n classIndex = -1\n for i, s in enumerate(data_df.columns):\n if 'classAtt' in s:\n classIndex = i\n \n classifier = ComplementNB(**kwargs)\n np.random.seed(42)\n classifier.fit(np.delete(data_df.values, classIndex, axis=1),data_df.values[:,classIndex])\n classifier.predict(np.delete(testdata_df.values, classIndex, axis=1))\n \n\n @parameterized.expand(params)\n @timeout(21600)\n def test_Zeroes(self, name, kwargs):\n for iter in range(1,1+1):\n data, meta = loadarff('smokedata/Zeroes_%i_training.arff' % iter)\n testdata, testmeta = loadarff('smokedata/Zeroes_%i_test.arff' % iter)\n lb_make = LabelEncoder()\n data_df = pd.DataFrame(data)\n data_df[\"classAtt\"] = lb_make.fit_transform(data_df[\"classAtt\"])\n data_df = pd.get_dummies(data_df)\n \n testdata_df = pd.DataFrame(data)\n testdata_df[\"classAtt\"] = lb_make.fit_transform(testdata_df[\"classAtt\"])\n testdata_df = pd.get_dummies(testdata_df, sparse=True)\n \n classIndex = -1\n for i, s in enumerate(data_df.columns):\n if 'classAtt' in s:\n classIndex = i\n \n classifier = ComplementNB(**kwargs)\n np.random.seed(42)\n classifier.fit(np.delete(data_df.values, classIndex, axis=1),data_df.values[:,classIndex])\n classifier.predict(np.delete(testdata_df.values, classIndex, axis=1))\n \n\n @parameterized.expand(params)\n @timeout(21600)\n def test_RandomNumeric(self, name, kwargs):\n for iter in range(1,1+1):\n data, meta = loadarff('smokedata/RandomNumeric_%i_training.arff' % iter)\n testdata, testmeta = loadarff('smokedata/RandomNumeric_%i_test.arff' % iter)\n lb_make = LabelEncoder()\n data_df = pd.DataFrame(data)\n data_df[\"classAtt\"] = lb_make.fit_transform(data_df[\"classAtt\"])\n data_df = pd.get_dummies(data_df)\n \n testdata_df = pd.DataFrame(data)\n testdata_df[\"classAtt\"] = lb_make.fit_transform(testdata_df[\"classAtt\"])\n testdata_df = pd.get_dummies(testdata_df, sparse=True)\n \n classIndex = -1\n for i, s in enumerate(data_df.columns):\n if 'classAtt' in s:\n classIndex = i\n \n classifier = ComplementNB(**kwargs)\n np.random.seed(42)\n classifier.fit(np.delete(data_df.values, classIndex, axis=1),data_df.values[:,classIndex])\n classifier.predict(np.delete(testdata_df.values, classIndex, axis=1))\n \n\n @parameterized.expand(params)\n @timeout(21600)\n def test_DisjointNumeric(self, name, kwargs):\n for iter in range(1,1+1):\n data, meta = loadarff('smokedata/DisjointNumeric_%i_training.arff' % iter)\n testdata, testmeta = loadarff('smokedata/DisjointNumeric_%i_test.arff' % iter)\n lb_make = LabelEncoder()\n data_df = pd.DataFrame(data)\n data_df[\"classAtt\"] = lb_make.fit_transform(data_df[\"classAtt\"])\n data_df = pd.get_dummies(data_df)\n \n testdata_df = pd.DataFrame(data)\n testdata_df[\"classAtt\"] = lb_make.fit_transform(testdata_df[\"classAtt\"])\n testdata_df = pd.get_dummies(testdata_df, sparse=True)\n \n classIndex = -1\n for i, s in enumerate(data_df.columns):\n if 'classAtt' in s:\n classIndex = i\n \n classifier = ComplementNB(**kwargs)\n np.random.seed(42)\n classifier.fit(np.delete(data_df.values, classIndex, axis=1),data_df.values[:,classIndex])\n classifier.predict(np.delete(testdata_df.values, classIndex, axis=1))\n \n\n\n\nif __name__ == '__main__':\n unittest.main()\n# with open('results.xml', 'wb') as output:\n# unittest.main(\n# testRunner=xmlrunner.XMLTestRunner(output=output),\n# failfast=False, buffer=False, catchbreak=False)" ]
[ [ "sklearn.preprocessing.LabelEncoder", "numpy.delete", "sklearn.naive_bayes.ComplementNB", "scipy.io.arff.loadarff", "pandas.DataFrame", "numpy.random.seed", "pandas.get_dummies" ] ]
hossen-code/DESlib
[ "77d0ccb491c522e7505d9ac6081b72cac001cb9e" ]
[ "deslib/static/static_selection.py" ]
[ "# coding=utf-8\n\n# Author: Rafael Menelau Oliveira e Cruz <[email protected]>\n#\n# License: BSD 3 clause\n\nimport numpy as np\nfrom sklearn.metrics import check_scoring\nfrom sklearn.utils.validation import check_is_fitted, check_X_y, check_array\n\nfrom deslib.util.aggregation import majority_voting_rule\nfrom deslib.util.aggregation import predict_proba_ensemble\nfrom .base import BaseStaticEnsemble\n\n\nclass StaticSelection(BaseStaticEnsemble):\n \"\"\"Ensemble model that selects N classifiers with the best performance in a\n dataset\n\n Parameters\n ----------\n pool_classifiers : list of classifiers (Default = None)\n The generated_pool of classifiers trained for the corresponding\n classification problem. Each base classifiers should support the method\n \"predict\". If None, then the pool of classifiers is a bagging\n classifier.\n\n scoring : string, callable (default = None)\n A single string or a callable to evaluate the predictions on the\n validation set.\n\n random_state : int, RandomState instance or None, optional (default=None)\n If int, random_state is the seed used by the random number generator;\n If RandomState instance, random_state is the random number generator;\n If None, the random number generator is the RandomState instance used\n by `np.random`.\n\n pct_classifiers : float (Default = 0.5)\n Percentage of base classifier that should be selected by the selection\n scheme.\n\n References\n ----------\n Britto, Alceu S., Robert Sabourin, and Luiz ES Oliveira. \"Dynamic selection\n of classifiers—a comprehensive review.\"\n Pattern Recognition 47.11 (2014): 3665-3680.\n\n Kuncheva, Ludmila I. Combining pattern classifiers: methods and algorithms.\n John Wiley & Sons, 2004.\n\n R. M. O. Cruz, R. Sabourin, and G. D. Cavalcanti, “Dynamic classifier\n selection: Recent advances and perspectives,”\n Information Fusion, vol. 41, pp. 195 – 216, 2018.\n \"\"\"\n\n def __init__(self, pool_classifiers=None,\n pct_classifiers=0.5,\n scoring=None,\n random_state=None):\n super(StaticSelection, self).__init__(\n pool_classifiers=pool_classifiers, random_state=random_state)\n self.pct_classifiers = pct_classifiers\n self.scoring = scoring\n\n def fit(self, X, y):\n \"\"\"Fit the static selection model by select an ensemble of classifier\n containing the base classifiers with highest accuracy in the given\n dataset.\n\n Parameters\n ----------\n X : array of shape (n_samples, n_features)\n Data used to fit the model.\n\n y : array of shape (n_samples)\n class labels of each example in X.\n\n Returns\n -------\n self : object\n Returns self.\n \"\"\"\n self._validate_parameters()\n\n X, y = check_X_y(X, y)\n\n super(StaticSelection, self).fit(X, y)\n\n self.n_classifiers_ensemble_ = int(\n self.n_classifiers_ * self.pct_classifiers)\n\n performances = np.zeros(self.n_classifiers_)\n\n if not self.base_already_encoded_:\n y_encoded = y\n else:\n y_encoded = self.enc_.transform(y)\n\n for clf_idx, clf in enumerate(self.pool_classifiers_):\n scorer = check_scoring(clf, self.scoring)\n performances[clf_idx] = scorer(clf, X, y_encoded)\n\n self.clf_indices_ = np.argsort(performances)[::-1][\n 0:self.n_classifiers_ensemble_]\n self.ensemble_ = [self.pool_classifiers_[clf_idx] for clf_idx in\n self.clf_indices_]\n\n return self\n\n def predict(self, X):\n \"\"\"Predict the label of each sample in X and returns the predicted\n label.\n\n Parameters\n ----------\n X : array of shape (n_samples, n_features)\n The data to be classified\n\n Returns\n -------\n predicted_labels : array of shape (n_samples)\n Predicted class for each sample in X.\n \"\"\"\n X = check_array(X)\n self._check_is_fitted()\n\n votes = np.zeros((X.shape[0], self.n_classifiers_ensemble_))\n for clf_index, clf in enumerate(self.ensemble_):\n votes[:, clf_index] = self._encode_base_labels(clf.predict(X))\n\n predicted_labels = majority_voting_rule(votes).astype(int)\n\n return self.classes_.take(predicted_labels)\n\n def predict_proba(self, X):\n \"\"\"Estimates the posterior probabilities for sample in X.\n\n Parameters\n ----------\n X : array of shape (n_samples, n_features)\n The input data.\n\n Returns\n -------\n predicted_proba : array of shape (n_samples, n_classes)\n Probabilities estimates for each sample in X.\n \"\"\"\n self._check_is_fitted()\n self._check_predict_proba()\n proba = predict_proba_ensemble(self.ensemble_, X)\n return proba\n\n def _validate_parameters(self):\n\n if not isinstance(self.pct_classifiers, float):\n raise TypeError('pct_classifiers should be a float.')\n if self.pct_classifiers > 1 or self.pct_classifiers < 0:\n raise ValueError(\n 'The parameter pct_classifiers should be a number '\n 'between 0 and 1.')\n\n def _check_is_fitted(self):\n \"\"\"Verify if the estimator algorithm was fitted. Raises an error if it\n is not fitted.\n \"\"\"\n check_is_fitted(self, \"ensemble_\")\n\n def _check_predict_proba(self):\n \"\"\" Checks if each base classifier in the ensemble (selected models)\n implements the predict_proba method.\n\n Raises\n -------\n ValueError\n If the base classifiers do not implements the predict_proba method.\n \"\"\"\n for clf in self.ensemble_:\n if \"predict_proba\" not in dir(clf):\n raise ValueError(\n \"All base classifiers should output probability estimates\")\n" ]
[ [ "sklearn.utils.validation.check_is_fitted", "sklearn.utils.validation.check_X_y", "numpy.zeros", "sklearn.utils.validation.check_array", "sklearn.metrics.check_scoring", "numpy.argsort" ] ]
payne911/SR-ResCNN-Keras-
[ "2e5aa7320b0cec83db8d9dee5011e8380987b30b" ]
[ "predict.py" ]
[ "import argparse\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport skimage.io\n\nfrom keras.models import load_model\n\nfrom constants import verbosity, save_dir, overlap, \\\n model_name, tests_path, input_width, input_height, scale_fact\nfrom utils import float_im\n\n\nparser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\nparser.add_argument('image', type=str,\n help='image name (example: \"bird.png\") that must be inside the \"./input/\" folder')\nparser.add_argument('-m', '--model', type=str, default=model_name,\n help='model name (in the \"./save/\" folder), followed by \".h5\"')\nparser.add_argument('-s', '--save', type=str, default='your_image.png',\n help='the name of the saved image which will appear inside the \"output\" folder')\n\nargs = parser.parse_args()\n\n\n\ndef predict(args):\n \"\"\"\n Super-resolution on the input image using the model.\n\n :param args:\n :return:\n 'predictions' contains an array of every single cropped sub-image once enhanced (the outputs of the model).\n 'image' is the original image, untouched.\n 'crops' is the array of every single cropped sub-image that will be used as input to the model.\n \"\"\"\n model = load_model(save_dir + '/' + args.model)\n\n image = skimage.io.imread(tests_path + args.image)[:, :, :3] # removing possible extra channels (Alpha)\n print(\"Image shape:\", image.shape)\n\n predictions = []\n images = []\n\n # Padding and cropping the image\n overlap_pad = (overlap, overlap) # padding tuple\n pad_width = (overlap_pad, overlap_pad, (0, 0)) # assumes color channel as last\n padded_image = np.pad(image, pad_width, 'constant') # padding the border\n crops = seq_crop(padded_image) # crops into multiple sub-parts the image based on 'input_' constants\n\n # Arranging the divided image into a single-dimension array of sub-images\n for i in range(len(crops)): # amount of vertical crops\n for j in range(len(crops[0])): # amount of horizontal crops\n current_image = crops[i][j]\n images.append(current_image)\n\n print(\"Moving on to predictions. Amount:\", len(images))\n upscaled_overlap = overlap * 2\n for p in range(len(images)):\n if p % 3 == 0 and verbosity == 2:\n print(\"--prediction #\", p)\n\n # Hack due to some GPUs that can only handle one image at a time\n input_img = (np.expand_dims(images[p], 0)) # Add the image to a batch where it's the only member\n pred = model.predict(input_img)[0] # returns a list of lists, one for each image in the batch\n\n # Cropping the useless parts of the overlapped predictions (to prevent the repeated erroneous edge-prediction)\n pred = pred[upscaled_overlap:pred.shape[0]-upscaled_overlap, upscaled_overlap:pred.shape[1]-upscaled_overlap]\n\n predictions.append(pred)\n return predictions, image, crops\n\n\ndef show_pred_output(input, pred):\n plt.figure(figsize=(20, 20))\n plt.suptitle(\"Results\")\n\n plt.subplot(1, 2, 1)\n plt.title(\"Input : \" + str(input.shape[1]) + \"x\" + str(input.shape[0]))\n plt.imshow(input, cmap=plt.cm.binary).axes.get_xaxis().set_visible(False)\n\n plt.subplot(1, 2, 2)\n plt.title(\"Output : \" + str(pred.shape[1]) + \"x\" + str(pred.shape[0]))\n plt.imshow(pred, cmap=plt.cm.binary).axes.get_xaxis().set_visible(False)\n\n plt.show()\n\n\n# adapted from https://stackoverflow.com/a/52463034/9768291\ndef seq_crop(img):\n \"\"\"\n To crop the whole image in a list of sub-images of the same size.\n Size comes from \"input_\" variables in the 'constants' (Evaluation).\n Padding with 0 the Bottom and Right image.\n\n :param img: input image\n :return: list of sub-images with defined size (as per 'constants')\n \"\"\"\n sub_images = [] # will contain all the cropped sub-parts of the image\n j, shifted_height = 0, 0\n while shifted_height < (img.shape[0] - input_height):\n horizontal = []\n shifted_height = j * (input_height - overlap)\n i, shifted_width = 0, 0\n while shifted_width < (img.shape[1] - input_width):\n shifted_width = i * (input_width - overlap)\n horizontal.append(crop_precise(img,\n shifted_width,\n shifted_height,\n input_width,\n input_height))\n i += 1\n sub_images.append(horizontal)\n j += 1\n\n return sub_images\n\n\ndef crop_precise(img, coord_x, coord_y, width_length, height_length):\n \"\"\"\n To crop a precise portion of an image.\n When trying to crop outside of the boundaries, the input to padded with zeros.\n\n :param img: image to crop\n :param coord_x: width coordinate (top left point)\n :param coord_y: height coordinate (top left point)\n :param width_length: width of the cropped portion starting from coord_x (toward right)\n :param height_length: height of the cropped portion starting from coord_y (toward bottom)\n :return: the cropped part of the image\n \"\"\"\n tmp_img = img[coord_y:coord_y + height_length, coord_x:coord_x + width_length]\n return float_im(tmp_img) # From [0,255] to [0.,1.]\n\n\n# adapted from https://stackoverflow.com/a/52733370/9768291\ndef reconstruct(predictions, crops):\n \"\"\"\n Used to reconstruct a whole image from an array of mini-predictions.\n The image had to be split in sub-images because the GPU's memory\n couldn't handle the prediction on a whole image.\n\n :param predictions: an array of upsampled images, from left to right, top to bottom.\n :param crops: 2D array of the cropped images\n :return: the reconstructed image as a whole\n \"\"\"\n\n # unflatten predictions\n def nest(data, template):\n data = iter(data)\n return [[next(data) for _ in row] for row in template]\n\n if len(crops) != 0:\n predictions = nest(predictions, crops)\n\n # At this point \"predictions\" is a 3D image of the individual outputs\n H = np.cumsum([x[0].shape[0] for x in predictions])\n W = np.cumsum([x.shape[1] for x in predictions[0]])\n D = predictions[0][0]\n recon = np.empty((H[-1], W[-1], D.shape[2]), D.dtype)\n for rd, rs in zip(np.split(recon, H[:-1], 0), predictions):\n for d, s in zip(np.split(rd, W[:-1], 1), rs):\n d[...] = s\n\n # Removing the pad from the reconstruction\n tmp_overlap = overlap * (scale_fact - 1) # using \"-2\" leaves the outer edge-prediction error\n return recon[tmp_overlap:recon.shape[0]-tmp_overlap, tmp_overlap:recon.shape[1]-tmp_overlap]\n\n\nif __name__ == '__main__':\n print(\" - \", args)\n\n preds, original, crops = predict(args) # returns the predictions along with the original\n enhanced = reconstruct(preds, crops) # reconstructs the enhanced image from predictions\n\n # Save and display the result\n enhanced = np.clip(enhanced, 0, 1)\n plt.imsave('output/' + args.save, enhanced, cmap=plt.cm.gray)\n show_pred_output(original, enhanced)\n" ]
[ [ "numpy.pad", "numpy.clip", "numpy.empty", "matplotlib.pyplot.imsave", "matplotlib.pyplot.suptitle", "numpy.split", "matplotlib.pyplot.figure", "numpy.cumsum", "matplotlib.pyplot.show", "matplotlib.pyplot.imshow", "numpy.expand_dims", "matplotlib.pyplot.subplot" ] ]
papamarkou/eeyore
[ "4cd9b5a619cd095035aa93f348d1c937629aa8a3" ]
[ "eeyore/constants/constants.py" ]
[ "import numpy as np\nimport torch\nimport torch.nn as nn\n\nfrom eeyore.stats import binary_cross_entropy\n\ntorch_to_np_types = {torch.float32: np.float32, torch.float64: np.float64}\n\n# Built-in function for binary classification\n# https://github.com/pytorch/pytorch/issues/18945\n# Second order automatic differentiation does not work after the pytorch issue has been merged\n# import torch.nn.functional as F\n# lambda x, y: F.binary_cross_entropy(x, y, reduction='sum')\n\nloss_functions = {\n 'binary_classification': lambda x, y: binary_cross_entropy(x, y, reduction='sum'),\n 'multiclass_classification': lambda x, y: nn.CrossEntropyLoss(reduction='sum')(x, torch.argmax(y, 1))\n}\n" ]
[ [ "torch.argmax", "torch.nn.CrossEntropyLoss" ] ]
trankha1655/pan_pp.origin
[ "aa4774b1bf360d0a8e54d520483514d57521bf34" ]
[ "models/post_processing/pse/setup.py" ]
[ "from distutils.core import Extension, setup\n\nimport numpy\nfrom Cython.Build import cythonize\n\nsetup(ext_modules=cythonize(\n Extension('pse',\n sources=['pse.pyx'],\n language='c++',\n include_dirs=[numpy.get_include()],\n library_dirs=[],\n libraries=[],\n extra_compile_args=['-O3'],\n extra_link_args=[])))\n" ]
[ [ "numpy.get_include" ] ]
aramis-lab/pac2019
[ "200681eb0441baa69a386ef1ac8cf6f0d2ab01e0" ]
[ "src/deep/models/resnet_model.py" ]
[ "import torch.nn as nn\n\nfrom structures.modules import Flatten, AddingNodes\n\n\nclass ResBlock(nn.Module):\n def __init__(self, block_number, input_size):\n super(ResBlock, self).__init__()\n\n layer_in = input_size if input_size is not None else 2 ** (block_number + 1)\n layer_out = 2 ** (block_number + 2)\n\n self.conv1 = nn.Conv3d(layer_in, layer_out, kernel_size=3, stride=1, padding=1, bias=False)\n self.bn1 = nn.BatchNorm3d(layer_out)\n self.act1 = nn.ELU()\n\n self.conv2 = nn.Conv3d(layer_out, layer_out, kernel_size=3, stride=1, padding=1, bias=False)\n self.bn2 = nn.BatchNorm3d(layer_out)\n\n # shortcut\n self.shortcut = nn.Sequential(\n nn.Conv3d(layer_in, layer_out, kernel_size=1, stride=1, padding=0, bias=False)\n )\n\n self.act2 = nn.ELU()\n\n def forward(self, x):\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.act1(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out += self.shortcut(x)\n out = self.act2(out)\n return out\n\n\nclass ResNet(nn.Module):\n def __init__(self, n_covars, input_size, **kwargs):\n super(ResNet, self).__init__()\n assert len(input_size) == 4, \"input must be in 3d with the corresponding number of channels\"\n self.nb_covars = n_covars\n\n # self.conv1 = nn.Conv3d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)\n # self.bn1 = nn.BatchNorm3d(64)\n\n self.layer1 = self._make_block(1, input_size[0])\n self.layer2 = self._make_block(2)\n self.layer3 = self._make_block(3)\n self.layer4 = self._make_block(4)\n self.layer5 = self._make_block(5)\n\n d, h, w = ResNet._maxpool_output_size(input_size[1::], nb_layers=5)\n\n self.fc = nn.Sequential(\n Flatten(),\n # nn.Linear(128*2*3*2, 256), # wm/gm 128 = 2 ** (5 + 2) # 5 is for 5 blocks\n # nn.Linear(128*4*5*4, 256), # t1 image\n nn.Linear(128 * d * h * w, 256), # t1 image\n nn.ELU(),\n nn.Dropout(p=0.8),\n AddingNodes(),\n nn.Linear(256 + self.nb_covars, 1)\n )\n\n @staticmethod\n def _make_block(block_number, input_size=None):\n return nn.Sequential(\n ResBlock(block_number, input_size),\n nn.MaxPool3d(3, stride=2)\n )\n\n @staticmethod\n def _maxpool_output_size(input_size, kernel_size=(3, 3, 3), stride=(2, 2, 2), nb_layers=1):\n import math\n\n d = math.floor((input_size[0] - kernel_size[0]) / stride[0] + 1)\n h = math.floor((input_size[1] - kernel_size[1]) / stride[1] + 1)\n w = math.floor((input_size[2] - kernel_size[2]) / stride[2] + 1)\n\n if nb_layers == 1:\n return d, h, w\n return ResNet._maxpool_output_size((d, h, w), kernel_size=kernel_size, stride=stride, nb_layers=nb_layers-1)\n\n def forward(self, x, covars=None):\n\n out = self.layer1(x)\n out = self.layer2(out)\n out = self.layer3(out)\n out = self.layer4(out)\n out = self.layer5(out)\n\n for layer in self.fc:\n if isinstance(layer, AddingNodes):\n out = layer(out, covars)\n else:\n out = layer(out)\n\n # out = self.fc(out)\n return out\n\n" ]
[ [ "torch.nn.Linear", "torch.nn.Dropout", "torch.nn.MaxPool3d", "torch.nn.Conv3d", "torch.nn.BatchNorm3d", "torch.nn.ELU" ] ]
ITRI-AIdea/CTSP-job-shop-scheduling
[ "b1ccc847aea9388f05a0c399b2609dcb444604c4" ]
[ "util.py" ]
[ "# Copyright (c) 2020 Industrial Technology Research Institute.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Tuple, Any\nfrom bisect import bisect\n\nimport json\nfrom pprint import pprint\nfrom datetime import datetime, timedelta\nfrom dateutil import parser\n\nimport pandas as pd\nfrom attend import (Attend, ManHourOneDay)\n\ncols_bom = [\n \"productCode\",\n \"sequence\",\n \"operationCode\",\n \"resourceCode\",\n \"prepareTime\",\n \"operationTime\",\n]\n\ncols_submit = [\n \"orderCode\",\n \"productCode\",\n \"sequence\",\n \"operationCode\",\n \"resourceCode\",\n \"resourceUsage\",\n \"start\",\n \"end\",\n]\n\ntab_unit = {\"H\": 60, \"M\": 1}\n\n\nclass JobShop(object):\n def __init__(self, jobs_data):\n # convert to DataFrame\n self.df_resource = pd.DataFrame.from_dict(jobs_data[\"resource\"])\n # [\"resourceCode\", \"weekday\", \"attendanceCode\", \"quantity\", \"usageMin\", \"usageMax\"]\n\n self.df_attend = pd.DataFrame.from_dict(jobs_data[\"attendance\"])\n # [\"attendanceCode\", \"start\", \"end\"]\n\n self.df_order = pd.DataFrame.from_dict(jobs_data[\"order\"])\n # [\"orderCode\", \"productCode\", \"notBefore\", \"notAfter\", \"quantity\"]\n\n self.df_op = pd.DataFrame.from_dict(jobs_data[\"BOM\"])\n\n self.df_op = self.df_op[cols_bom]\n\n self.df_op_rc = {}\n for rc, _df in self.df_op.groupby(['resourceCode']):\n idx = pd.MultiIndex.from_frame(_df[['productCode', 'sequence']])\n df = _df.set_index(idx)\n self.df_op_rc[rc] = df\n\n # table to lookup op count for each product\n self.tab_prod_op_count = self.df_op[\"productCode\"].value_counts().to_dict()\n # print(tab_prod_op_count)\n # {'PD004': 32, 'PD005': 31, 'PD002': 20, 'PD001': 19, 'PD003': 14, 'PD000': 8}\n\n self.tab_prod_op_count_nodupe = count_product_ops(self.df_op)\n # print(tab_prod_op_count_nodupe)\n # {'PD000': 8, 'PD001': 15, 'PD002': 15, 'PD003': 13, 'PD004': 24, 'PD005': 24}\n\n self.tab_order_qty = pd.Series(self.df_order.quantity.values, index=self.df_order.orderCode).to_dict()\n self.tab_rc_qty = {\n rc: _df.quantity.max() for rc, _df in self.df_resource.groupby([\"resourceCode\"])\n } # 資源總人數\n\n self.tab_time_constraint = self.df_order[[\"notBefore\", \"notAfter\"]].set_index(self.df_order.orderCode).to_dict(\n 'index')\n # {'C183-1': {'notBefore': '2020-08-01T00:00', 'notAfter': '2020-11-23T00:00'}, \n # 'C183-2': {'notBefore': '2020-08-01T00:00', 'notAfter': '2020-11-23T00:00'},\n # }\n\n self.df_up = None\n self.opcnt_total = -1\n\n def set_submit(self, submit_data):\n self.df_up = submit_data\n self.opcnt_total = count_ops_total(self)\n # print(f\"ops needed {self.opcnt_total}\")\n\n\ndef to_minute_of_day(s):\n \"\"\" from hh:mm string to minute of day, no check \"\"\"\n\n _ = s.partition(':')\n return int(_[0]) * 60 + int(_[2])\n\n\ndef import_data(jobs: str) -> Tuple[bool, Any]:\n result = None\n try:\n with open(\"jobs.json\", \"r\") as fin:\n result = json.load(fin)\n return True, result\n except Exception:\n msg = \"load environment failed.\"\n return False, msg\n\n\ndef calc_rest(df):\n rest_accu = 0\n last_end = 0\n _bag = []\n for _, r in df.iterrows():\n if r['weekday'] == 1:\n _bag.append(90) # compensate for adjust\n continue\n rest = r['mod_start'] - last_end\n last_end = r['mod_end']\n rest_accu += rest\n _bag.append(rest_accu)\n df['rest'] = _bag\n\n\nworktime_daily = {}\nworktime_daily_debug = {}\nattends = {}\n\n\ndef init_daily_template(attendances):\n attendances['mod_start'] = attendances['start'].apply(to_minute_of_day)\n attendances['mod_end'] = attendances['end'].apply(to_minute_of_day)\n global worktime_daily, worktime_daily_debug, attends\n # >>> new code\n attends = {}\n attends['dummy'] = Attend([]).prepare()\n for n, g in attendances.groupby(['attendanceCode']):\n zones = list(g[['mod_start', 'mod_end']].itertuples(index=False, name=None))\n attends[n] = Attend(zones).prepare()\n\n worktime_daily = {}\n worktime_daily_debug = {}\n # attendanceCode, start, end\n for n, g in attendances.groupby(['attendanceCode']):\n zones = list(g[['mod_start', 'mod_end']].itertuples(index=False, name=None))\n attends[n] = Attend(zones).prepare()\n d = g.copy()\n d.columns = ['weekday', 'start', 'end', 'mod_start', 'mod_end']\n d['weekday'] = 0\n d.reset_index(inplace=True, drop=True)\n if n == 'TM00':\n r = d.iloc[-1]\n r2 = [[0, r[1], '24:00', r[3], 1440], [1, '00:00', r[2], 0, r[4]]]\n d = d.drop(labels=[2]).append(pd.DataFrame(r2, columns=d.columns)).reset_index(drop=True)\n\n # print(d.add([1,'',''])) # adjust to week day\n\n calc_rest(d)\n # print(d)\n worktime_daily_debug[n] = d\n worktime_daily[n] = d.iloc[:, [0, 3, 4, 5]]\n\n\nrc_weekly = {}\nrc_week_total = {}\nonedays = {}\nworktime_weekly = {}\n\n\ndef init_worktime(cf):\n resources = cf.df_resource\n attendances = cf.df_attend\n init_daily_template(attendances)\n # print(worktime_daily.keys())\n\n global worktime_weekly, rc_weekly, onedays, rc_week_total\n\n rc_weekly = {}\n # resourceCode, weekday, attendanceCode\n\n for n, g in resources.groupby(['resourceCode']):\n # print(n) # groupby resourceCode\n modes_weekly = {i: 'dummy' for i in range(8)}\n for _, r in g.iterrows():\n mode = r.attendanceCode\n\n # expand workday to list contains workday\n workday = r.weekday # '1-5', '1-6', '6'\n workday = workday.partition('-')\n if workday[2]:\n workday = list(range(int(workday[0]), int(workday[2]) + 1))\n else:\n workday = [int(workday[0])]\n for i in workday:\n modes_weekly[i] = mode\n week_table = rc_weekly.setdefault(n, {})\n total = 0\n for wd in range(1, 8):\n k = (modes_weekly[wd], modes_weekly[wd - 1])\n if k not in onedays:\n onedays[k] = ManHourOneDay(attends.get(k[0]), attends.get(k[1]))\n day_routine = onedays[k]\n total += day_routine.total\n week_table[wd] = day_routine\n rc_week_total[n] = total\n\n worktime_weekly = {}\n\n # resourceCode, weekday, attendanceCode\n for n, g in resources.groupby(['resourceCode']):\n # print(n) # groupby resourceCode\n _bag_whole_code = []\n compensate = -90 if 'TM00' in g['attendanceCode'].unique() else 0\n # print(compensate)\n for _, r in g.iterrows():\n mode = r.attendanceCode\n\n # expand workday to list contains workday\n workday = r.weekday # '1-5', '1-6', '6'\n workday = workday.partition('-')\n if workday[2]:\n workday = list(range(int(workday[0]), int(workday[2]) + 1))\n else:\n workday = [int(workday[0])]\n _bag_a_record = []\n template = worktime_daily[mode]\n for i in workday:\n if i == 1:\n d = template.copy().add([i, 0, 0, 0])\n if mode == 'TM00':\n d.iat[-1, 3] = 0\n _bag_a_record.append(d)\n continue\n _bag_a_record.append(template.copy().add([i, 0, 0, compensate]))\n df_worktime = pd.concat(_bag_a_record)\n _bag_whole_code.append(df_worktime)\n\n dfnew = pd.concat(_bag_whole_code).reset_index(drop=True)\n for w, partial in dfnew.groupby(['weekday']):\n _df = worktime_weekly.setdefault(n, {})\n _df[w] = partial.reset_index(drop=True)\n # worktime_weekly[n] = pd.concat(_bag_whole_code).reset_index(drop=True)\n\n\nday_worktime = {}\n\n\ndef calc_day_worktime():\n global day_worktime\n for rc, tab_rc in worktime_weekly.items():\n wt = day_worktime.setdefault(rc, {})\n for wd in range(1, 8):\n tt = tab_rc.get(wd)\n if not isinstance(tt, pd.DataFrame):\n wt[wd] = 0\n continue\n wt[wd] = tt.iat[-1, 2] - tt.iat[-1, 3]\n\n\ndef get_worktime(rc: str):\n return day_worktime.get(rc)\n\n\ndef calc_midday_worktime(worktime, first_day: int, count_mid_day: int) -> int:\n week = list(range(1, 8))\n mh_day = [worktime[wd] for wd in week]\n days_first_week = 7 - first_day # first_day 當週,之後還有幾天\n cnt = count_mid_day\n if cnt < days_first_week: # 不到一週\n return sum(mh_day[first_day:first_day + cnt])\n parts = [0] * 3\n parts[0] = sum(mh_day[first_day:]) # 第一週\n cnt -= days_first_week\n if cnt >= 7: # 超過一週\n weeks = cnt // 7\n parts[1] = sum(mh_day) * weeks\n cnt = cnt % 7\n parts[2] = sum(mh_day[:cnt])\n return sum(parts)\n\n\ndef valid(resource: str, weekday: int, timestamp: datetime) -> Tuple[bool, int, int]:\n \"\"\"return Tuple[valid:bool, minute_of_day:int, rest_time:int]\"\"\"\n\n timetable = worktime_weekly.get(resource).get(weekday)\n\n minod = timestamp.hour * 60 + timestamp.minute\n if not isinstance(timetable, pd.DataFrame):\n print(f\"timetable not dataframe\")\n return False, minod, -1\n\n # print(minod)\n section = [p for _, sub in timetable.iloc[:, [1, 2]].iterrows() for p in sub]\n\n periods = bisect(section, minod)\n # print((minod, periods, periods//2, self._section))\n\n # after last valid section\n if periods == len(section) and minod != section[-1]:\n return False, minod, -1\n\n # before first valid section\n if periods == 0:\n return False, minod, -1\n # print(periods//2)\n if periods % 2 == 1:\n return True, minod, timetable.iloc[(periods // 2), 3]\n # check section right open point \n if section[periods - 1] == minod:\n return True, minod, timetable.iloc[(periods // 2) - 1, 3]\n return False, minod, -1\n\n\ndef calc_diff_old(resource: str, t1: datetime, t2: datetime) -> int:\n \"\"\"return -1 for error, assume t2>t1 ordered\"\"\"\n\n weektable = worktime_weekly.get(resource)\n if not weektable:\n print(\"no weektable\")\n return -1\n tstamp = [t1, t2]\n wday = [t.isoweekday() for t in tstamp]\n timetable = [weektable.get(w) for w in wday]\n minofday = []\n restinday = []\n\n for wd, ts in zip(wday, tstamp):\n ok, mod, rest = valid(resource, wd, ts)\n if not ok:\n print(f\"not ok {wd}, {ts}\")\n return -1\n minofday.append(mod)\n restinday.append(rest)\n\n # print(minofday)\n # print(restinday)\n if t1.date() == t2.date():\n return (minofday[1] - restinday[1]) - (minofday[0] - restinday[0])\n\n partition = [0] * 3\n # calc firstday left over\n # print(timetable[0].iloc[-1])\n tt = timetable[0]\n partition[0] = (tt.iat[-1, 2] - tt.iat[-1, 3]) - (minofday[0] - restinday[0])\n # calc lastday works\n partition[2] = minofday[1] - restinday[1]\n # calc midday\n cnt_mid_day = (t2.date() - t1.date()).days - 1\n # print(cnt_mid_day)\n wt = get_worktime(resource)\n partition[1] = calc_midday_worktime(wt, wday[0], cnt_mid_day)\n # print(partition)\n return sum(partition)\n\n\ndef calc_diff_fullday(resource: str, t1: datetime, t2: datetime) -> int:\n \"\"\"return -1 for error, assume t2>t1 ordered\"\"\"\n rc_routines = rc_weekly.get(resource)\n\n if not rc_routines:\n print(\"no weektable\")\n return -1\n tstamp = [t1, t2]\n wday = [t.isoweekday() for t in tstamp]\n # print(wday)\n timetable = [rc_routines.get(w) for w in wday]\n mod = [t.hour * 60 + t.minute for t in tstamp]\n mhcoord = [timetable[0].to_mh(mod[0]), timetable[1].to_mh(mod[1])]\n restinday = [mh.get_leftover() for mh in mhcoord]\n minofday = [mh.get_mhcoord() for mh in mhcoord]\n\n if any(mh == -1 for mh in minofday):\n return -1\n\n days = (t2.date() - t1.date()).days\n\n if minofday[0] == 0: # 開工前\n days += 1\n if restinday[1] > 0: # 收工前,不滿一天\n days -= 1\n return days\n\n\ndef calc_diff(resource: str, t1: datetime, t2: datetime) -> int:\n \"\"\"return -1 for error, assume t2>t1 ordered\"\"\"\n rc_routines = rc_weekly.get(resource)\n\n if not rc_routines:\n print(\"no weektable\")\n return -1\n tstamp = [t1, t2]\n wday = [t.isoweekday() for t in tstamp]\n # print(wday)\n timetable = [rc_routines.get(w) for w in wday]\n mod = [t.hour * 60 + t.minute for t in tstamp]\n mhcoord = [timetable[0].to_mh(mod[0]), timetable[1].to_mh(mod[1])]\n restinday = [mh.get_leftover() for mh in mhcoord]\n minofday = [mh.get_mhcoord() for mh in mhcoord]\n\n if any(mh == -1 for mh in minofday):\n return -1\n\n if t1.date() == t2.date():\n return (minofday[1] - minofday[0])\n\n partition = [0] * 3\n # calc firstday left over\n # print(timetable[0].iloc[-1])\n partition[0] = restinday[0]\n # calc lastday works\n partition[2] = minofday[1]\n # calc midday\n cnt_mid_day = (t2.date() - t1.date()).days - 1\n # print(cnt_mid_day)\n wt = get_worktime(resource)\n partition[1] = calc_midday_worktime(wt, wday[0], cnt_mid_day)\n # print(partition)\n return sum(partition)\n\n\ndef calc_diff_available(resource: str, t1: datetime, t2: datetime) -> int:\n \"\"\"return -1 for error, assume t2>t1 ordered\"\"\"\n rc_routines = rc_weekly.get(resource)\n\n if not rc_routines:\n print(\"no weektable\")\n return -1\n tstamp = [t1, t2]\n wday = [t.isoweekday() for t in tstamp]\n # print(wday)\n timetable = [rc_routines.get(w) for w in wday]\n mod = [t.hour * 60 + t.minute for t in tstamp]\n mhcoord = [timetable[0].to_mh(mod[0]), timetable[1].to_mh(mod[1])]\n while not mhcoord[0].to_available_next():\n t1 = datetime.combine(t1.date(), datetime.min.time()) + timedelta(days=1)\n tbl = rc_routines.get(t1.isoweekday())\n mhcoord[0] = tbl.to_mh(0)\n if t2 < t1:\n print(f\"rc={resource}, start={str(t1)}, end={str(t2)}\")\n print(\"wrong order\")\n return -1\n while not mhcoord[1].to_available_prev():\n # 今天前面沒時間\n if t2.time() != datetime.min.time():\n t2 = datetime.combine(t2.date(), datetime.min.time()) # 今天開始\n t2 += timedelta(minutes=-1) # 昨天 23:59\n else:\n t2 += timedelta(days=-1) # 往前一天 \n tbl = rc_routines.get(t2.isoweekday())\n mhcoord[1] = tbl.to_mh(t2.hour * 60 + t2.minute)\n tstamp = [t1, t2]\n wday = [t.isoweekday() for t in tstamp]\n mod = [t.hour * 60 + t.minute for t in tstamp]\n\n restinday = [mh.get_leftover() for mh in mhcoord]\n minofday = [mh.get_mhcoord() for mh in mhcoord]\n\n if any(mh == -1 for mh in minofday):\n return -1\n\n if t1.date() == t2.date():\n return (minofday[1] - minofday[0])\n\n partition = [0] * 3\n # calc firstday left over\n # print(timetable[0].iloc[-1])\n partition[0] = restinday[0]\n # calc lastday works\n partition[2] = minofday[1]\n # calc midday\n cnt_mid_day = (t2.date() - t1.date()).days - 1\n # print(cnt_mid_day)\n wt = get_worktime(resource)\n partition[1] = calc_midday_worktime(wt, wday[0], cnt_mid_day)\n # print(partition)\n return sum(partition)\n\n\ndef debug_calc_diff(resource: str, t1: datetime, t2: datetime) -> int:\n \"\"\"return -1 for error, assume t2>t1 ordered\"\"\"\n weektable = worktime_weekly.get(resource)\n if not weektable:\n print(\"no weektable\")\n return -1\n tstamp = [t1, t2]\n wday = [t.isoweekday() for t in tstamp]\n timetable = [weektable.get(w) for w in wday]\n minofday = []\n restinday = []\n\n for wd, ts in zip(wday, tstamp):\n ok, mod, rest = valid(resource, wd, ts)\n if not ok:\n print(f\"not ok {wd}, {ts}\")\n return -1\n minofday.append(mod)\n restinday.append(rest)\n\n # print(minofday)\n # print(restinday)\n if t1.date() == t2.date():\n return (minofday[1] - restinday[1]) - (minofday[0] - restinday[0])\n\n partition = [0] * 3\n # calc firstday left over\n # print(timetable[0].iloc[-1])\n tt = timetable[0]\n partition[0] = (tt.iat[-1, 2] - tt.iat[-1, 3]) - (minofday[0] - restinday[0])\n # calc lastday works\n partition[2] = minofday[1] - restinday[1]\n # calc midday\n cnt_mid_day = (t2.date() - t1.date()).days - 1\n # print(cnt_mid_day)\n wt = get_worktime(resource)\n partition[1] = calc_midday_worktime(wt, wday[0], cnt_mid_day)\n # print(partition)\n return sum(partition)\n\n\ndef check_monotonic(lst) -> bool:\n v1 = lst[0]\n for v2 in lst[1:]:\n if v2 < v1:\n return False\n v1 = v2\n return True\n\n\ndef count_product_ops(df_op):\n \"\"\"計算每個品種的工序數量,回傳 dict[productCode:str, op_count:int]\"\"\"\n _df = df_op.sequence\n _df.index = df_op.productCode\n _df = _df.groupby(_df.index).nunique()\n # {'PD000': 8, 'PD001': 15, 'PD002': 15, 'PD003': 13, 'PD004': 24, 'PD005': 24}\n return _df.to_dict()\n\n\ndef count_ops_total(cf) -> int:\n df_order = cf.df_order\n df_op_tbl = df_order[[\"orderCode\", \"productCode\"]]\n srs_opcnt = df_op_tbl.productCode.apply(cf.tab_prod_op_count_nodupe.get)\n return df_order.quantity.dot(srs_opcnt)\n\n\ndef check_product_op_count(cf, early_stop=False) -> Tuple[bool, str]:\n op_cnt_up = cf.df_up[\"orderCode\"].value_counts().to_dict()\n # print(op_cnt_up)\n errors = []\n\n for order, df_up_order in cf.df_up.groupby([\"orderCode\"]):\n prod = df_up_order.productCode.iat[0]\n # print(f\"{order}:{prod}\")\n order_qty = cf.tab_order_qty.get(order)\n prod_op_count = cf.tab_prod_op_count_nodupe.get(prod)\n prod_up_count = op_cnt_up.get(order)\n # compare total ops in order\n needed = order_qty * prod_op_count\n found = prod_up_count\n if found != needed:\n # find dismatch\n msg = f\"find {found}/{needed} ops, invalid.\"\n if early_stop:\n return False, msg\n errors.append(f\"{order} {msg}\")\n # compare qty\n op_count = df_up_order[\"sequence\"].value_counts().values\n if not (op_count == order_qty).all():\n msg = f\"contains wrong number of records.\"\n if early_stop:\n return False, msg\n errors.append(f\"{order} {msg}\")\n\n if not errors:\n return True, \"record conut OK\"\n return False, errors\n\n\ndef dump_routine(r) -> str:\n msg = []\n for k, v in r.items():\n msg.append(f\"{k}:{v.__dict__}\")\n return str(msg)\n\n\ndef check_op_time(cf, early_stop=False):\n errors = []\n\n for _, r in cf.df_up.iterrows():\n # print(r.columns)\n # [\"orderCode\", \"productCode\", \"sequence\", \"operationCode\",\n # \"resourceCode\", \"resourceUsage\", \"start\", \"end\"]\n\n prod = r.productCode\n seq = r.sequence\n rc = r.resourceCode\n usage = r.resourceUsage\n start_u = r.start\n end_u = r.end\n (ok, op_sol) = get_operationData(cf, prod, seq, rc) # from sol\n if not ok:\n # print(time_prep)\n errors.append(str(op_sol))\n msg = f\"bad record. {str(r.to_dict())}\"\n if early_stop:\n return False, msg\n errors.append(msg)\n continue\n dt_start_u = parser.isoparse(start_u)\n dt_end_u = parser.isoparse(end_u)\n time_op_raw = op_sol.get(\"operationTime\")\n time_prepare = op_sol.get(\"prepareTime\")\n if time_op_raw.endswith(\"D\"):\n # 處理 \"D\" 資料\n if time_prepare > 0:\n # TODO\n raise Exception(\"D days\")\n\n _days = int(time_op_raw[:-1]) # from sol\n\n days_diff = calc_diff_fullday(rc, dt_start_u, dt_end_u)\n if days_diff < _days:\n # errors.append(time_op_raw)\n msg = f\"not enough time {str(r.to_dict())}\"\n if early_stop:\n return False, msg\n errors.append(msg)\n # errors.append(f\"start {dt_start_u.isoweekday()} {datetime2minod(dt_start_u)}\")\n # errors.append(f\"end {dt_end_u.isoweekday()} {datetime2minod(dt_end_u)}\")\n # routine = rc_weekly.get(rc)\n # errors.append(dump_routine(routine))\n continue\n # 其他正常資料 (\"H\", \"M\")\n # print(time_op_raw)\n totalTime = int(time_op_raw[:-1]) * tab_unit.get(time_op_raw[-1]) + time_prepare\n diffTime = calc_diff(rc, dt_start_u, dt_end_u)\n\n if diffTime < 0:\n msg = f\"invalid start/end time: {str(r.to_dict())}\"\n if early_stop:\n return False, msg\n errors.append(msg)\n # errors.append(f\"start {dt_start_u.isoweekday()} {datetime2minod(dt_start_u)}\")\n # errors.append(f\"end {dt_end_u.isoweekday()} {datetime2minod(dt_end_u)}\")\n # routine = rc_weekly.get(rc)\n # errors.append(dump_routine(routine))\n # errors.append(f\"diffTime {diffTime}, totalTime {totalTime}\")\n continue\n\n if diffTime * usage < totalTime:\n msg = f\"allocate not enough time {str(r.to_dict())}\"\n if early_stop:\n return False, msg\n errors.append(msg)\n # errors.append(f\"start {dt_start_u.isoweekday()} {datetime2minod(dt_start_u)}\")\n # errors.append(f\"end {dt_end_u.isoweekday()} {datetime2minod(dt_end_u)}\")\n # routine = rc_weekly.get(rc)\n # errors.append(dump_routine(routine))\n # errors.append(f\"diffTime {diffTime}, totalTime {totalTime}\")\n continue\n\n if not errors:\n return True, \"OK\"\n return False, errors\n\n\ndef get_operationData(cf, prod: str, seq: int, rc: str) -> dict:\n # r = df_op[df_op[\"productCode\"] == prod]\n # r = r[r[\"sequence\"] == seq]\n # r = r[r[\"resourceCode\"] == rc]\n df = cf.df_op_rc.get(rc)\n if (prod, seq) not in df.index:\n return False, {}\n r = df.loc[(prod, seq)]\n # [{'productCode': 'PD001', 'sequence': 20, 'operationCode': 'PC024',\n # 'resourceCode': 'RC010', 'prepareTime': 0, 'TimeRaw': '90M'}]\n result = r.to_dict()\n return True, result\n\n\ndef check_overlap_inside_product(cf, early_stop=False):\n errors = []\n for order, df_up_order in cf.df_up.groupby([\"orderCode\"]):\n\n order_qty = cf.tab_order_qty.get(order)\n data = df_up_order[[\"orderCode\", \"sequence\", \"resourceCode\", \"start\", \"end\"]]\n seq_first = data.sequence.min()\n seq_cur = 0\n seq_last = 0\n fin_seq = data.sort_values([\"sequence\", \"end\"]).reset_index(drop=True)\n fin_seq['fin'] = fin_seq.index % order_qty\n\n data = fin_seq.sort_values([\"sequence\", \"start\"]).reset_index(drop=True)\n data['startseq'] = data.index % order_qty\n idx = pd.MultiIndex.from_frame(fin_seq[['sequence', 'fin']])\n fin_seq = fin_seq.set_index(idx)\n\n # cnt = 30\n for idx, r in data.iterrows():\n dt_start = parser.isoparse(r.start)\n if r.sequence == seq_first:\n seq_cur = r.sequence\n seq_last = r.sequence\n continue\n if r.sequence != seq_cur:\n seq_last = seq_cur\n seq_cur = r.sequence\n\n rlast = fin_seq.loc[(seq_last, r.startseq)]\n dt_end = parser.isoparse(rlast.end)\n if dt_end > dt_start: # 前工序結束在此工序開始之後\n print(df_up_order.loc[(df_up_order.sequence.isin([seq_last, seq_cur]))])\n msg = f\"worktime overlapped {str(rlast.to_dict())}:{str(rlast.to_dict())}\"\n if early_stop:\n return False, msg\n errors.append('worktime overlapped')\n errors.append(f\"last:{str(rlast.to_dict())}\")\n errors.append(f\"curr:{str(r.to_dict())}\")\n\n if errors:\n return False, errors\n return True, \"\"\n\n\ndef check_submit(cf):\n df_up = cf.df_up\n if df_up.shape[1] != len(cols_submit):\n msg = \"submit column dismatch.\"\n # print(msg)\n raise Exception(msg)\n\n if df_up.shape[0] != cf.opcnt_total:\n msg = f\"submit find {df_up.shape[0]}/{cf.opcnt_total} operations.\"\n # print(msg)\n raise Exception(msg)\n # print(df_up.columns.to_list())\n\n if set(df_up.columns.to_list()) != set(cols_submit):\n msg = \"submit find invalid columns.\"\n raise Exception(msg)\n\n\ndef check_rc_block(bag: list, quantity: int, early_stop=False) -> Tuple[bool, list]:\n \"\"\"檢查平行資源使用量,是否超標\"\"\"\n if quantity == 0: # infinity capacity\n return True, \"\"\n\n if not bag:\n return True, \"\"\n\n tsq = {} # time sequence\n for r in bag:\n usage = r.resourceUsage\n v = tsq.setdefault(r.start, 0)\n tsq[r.start] = v + usage\n v = tsq.setdefault(r.end, 0)\n tsq[r.end] = v - usage\n\n cnt = 0\n err = []\n for _, update in sorted(tsq.items()):\n cnt += update\n if cnt > quantity:\n msg = f\"run out of resource {quantity}\"\n if early_stop:\n return False, f\"{msg}:{str(bag[0].to_dict())}\"\n err.append(msg)\n for r in bag:\n err.append(r.to_dict())\n return False, err\n\n return True, []\n\n\ndef check_rc_usage(cf, early_stop=False):\n err = []\n for rc, df_up_rc in cf.df_up.groupby([\"resourceCode\"]):\n rc_data = cf.df_resource[cf.df_resource.resourceCode == rc]\n rc_qty = rc_data.quantity.values[0]\n rc_min = rc_data.usageMin.values[0]\n rc_max = rc_data.usageMax.values[0]\n data = df_up_rc.sort_values([\"start\"]).reset_index(drop=True)\n r_last = None\n bag_connected = []\n for _, r in data.iterrows():\n if r.resourceUsage < rc_min:\n msg = f\"usage less than {rc_min}:{str(r.to_dict())}\"\n if early_stop:\n return False, msg\n err.append(msg)\n elif r.resourceUsage > rc_max:\n msg = f\"usage larger than {rc_max}:{str(r.to_dict())}\"\n if early_stop:\n return False, msg\n err.append(msg)\n if r_last is None:\n r_last = r\n continue\n if r.start > r_last.end: # 非平行\n ok, msg = check_rc_block(bag_connected, rc_qty, early_stop)\n if not ok:\n if early_stop:\n return False, msg\n err.append(msg)\n bag_connected = []\n r_last = r\n continue\n # 平行\n bag_connected.append(r_last)\n r_last = r\n\n if not err:\n return True, []\n return False, err\n\n\ndef check_orders(cf, early_stop=False) -> Tuple[bool, str]:\n msg = f\"df_up {cf.df_up.shape}\"\n # print(msg)\n order_up = cf.df_up[\"orderCode\"].unique()\n order_sol = cf.df_order[\"orderCode\"].unique()\n msg = f\"find {len(order_up)} orders.\"\n # print(msg)\n if len(order_sol) != len(order_up):\n order_up = set(order_up)\n order_sol = set(order_sol)\n msg = f\"order dismatch. order not found: {order_sol - order_up}, order not expected: {order_up - order_sol}\"\n return False, msg\n return True, \"\"\n\n\ndef prepare(cf, submit):\n init_worktime(cf)\n calc_day_worktime()\n cf.set_submit(submit)\n try:\n check_submit(cf)\n except Exception as e:\n return False, str(e.args)\n return True, \"OK\"\n\n\ndef check(cf):\n errs = []\n ok, err = check_orders(cf, False)\n if not ok:\n errs += err\n # return ok, err\n ok, err = check_product_op_count(cf, False)\n if not ok:\n errs += err\n # return ok, err\n ok, err = check_notBefore(cf, False)\n if not ok:\n errs += err\n # return ok, err\n ok, err = check_op_time(cf, False)\n if not ok:\n errs += err\n # return ok, err\n ok, err = check_overlap_inside_product(cf, False)\n if not ok:\n errs += err\n # return ok, err\n\n ok, err = check_rc_usage(cf, False)\n if not ok:\n errs += err\n # return ok, err\n if errs:\n return False, errs\n else:\n return True, ['OK']\n\n\ndef check_notBefore(cf, early_stop=False):\n errors = []\n for order, df_up_order in cf.df_up.groupby([\"orderCode\"]):\n boundary = cf.tab_time_constraint.get(order)\n notbefore = boundary.get(\"notBefore\")\n # print(notbefore)\n if notbefore:\n start = df_up_order[\"start\"].min()\n # print(start)\n if start < notbefore:\n msg = f\"too early to start: {order}\"\n if early_stop:\n return False, msg\n errors.append(msg)\n\n if not errors:\n return True, \"OK\"\n return False, errors\n" ]
[ [ "pandas.DataFrame.from_dict", "pandas.DataFrame", "pandas.concat", "pandas.Series", "pandas.MultiIndex.from_frame" ] ]
ahuang11/ahh
[ "59f124c3aa04cde58db0ec2e81025eb63a92404e" ]
[ "ahh/vis.py" ]
[ "import os\nimport copy\nimport datetime\nimport numpy as np\nimport xarray as xr\nimport pandas as pd\nfrom collections import Counter\nfrom ahh.ext import (round_to, get_order_mag, report_err, lonw2e)\nfrom ahh.sci import get_stats, get_norm_anom, get_anom, get_norm\nfrom ahh.era import td2dict\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as mticker\nimport matplotlib.patches as mpatches\nfrom matplotlib.colors import LinearSegmentedColormap\nfrom matplotlib.dates import YearLocator, MonthLocator, DayLocator,\\\n HourLocator, MinuteLocator, AutoDateLocator, \\\n DateFormatter, AutoDateFormatter\nfrom matplotlib.ticker import MultipleLocator, \\\n FormatStrFormatter\nimport matplotlib.dates as mdates\n\n\n__author__ = '[email protected]'\n__copyright__ = 'Andrew Huang'\n\n\nclass MissingInput(Exception):\n pass\n\n\nclass Unsupported(Exception):\n pass\n\n\nTHIS_DIR = os.path.dirname(os.path.realpath(__file__))\n\nDEFAULT = {\n 'scale': 1,\n 'projection': None,\n 'dpi': 105,\n 'sizes': {\n 'figure': {'smallest': 6,\n 'smaller': 9,\n 'small': 12,\n 'medium': 14,\n 'large': 16,\n 'larger': 20,\n 'largest': 24\n },\n 'text': {'smallest': 5.5,\n 'smaller': 7.5,\n 'small': 12,\n 'medium': 14,\n 'large': 16,\n 'larger': 20,\n 'largest': 24\n },\n 'line': {'smallest': 0.4,\n 'smaller': 0.65,\n 'small': 1,\n 'medium': 1.15,\n 'large': 1.3,\n 'larger': 1.5,\n 'largest': 2\n },\n 'tick': {'smallest': 0.05,\n 'smaller': 0.15,\n 'small': 0.2,\n 'medium': 0.55,\n 'large': 1.0,\n 'larger': 1.25,\n 'largest': 1.5\n },\n 'bar': {'smallest': 6,\n 'smaller': 9,\n 'small': 12,\n 'medium': 14,\n 'large': 16,\n 'larger': 20,\n 'largest': 24\n },\n 'marker': {'smallest': 6,\n 'smaller': 9,\n 'small': 12,\n 'medium': 14,\n 'large': 16,\n 'larger': 20,\n 'largest': 24\n },\n 'title pad': {'smallest': 0.985,\n 'smaller': 0.995,\n 'small': 1.0,\n 'medium': 1.01,\n 'large': 1.03,\n 'larger': 1.05,\n 'largest': 1.07\n },\n 'pad': {'smallest': 0.15,\n 'smaller': 0.2,\n 'small': 0.3,\n 'medium': 0.45,\n 'large': 0.6,\n 'larger': 0.85,\n 'largest': 1.0\n }\n },\n 'styles': {\n 'color': {'green': '#145222',\n 'red': '#DF0909',\n 'orange': '#E68D00',\n 'pink': '#CE5F5F',\n 'magenta': '#9E005D',\n 'teal': '#66A7C5',\n 'yellow': '#E0D962',\n 'stone': '#6462E0',\n 'blue': '#2147B1',\n 'purple': '#630460',\n 'black': '#202020',\n 'light gray': '#DADADA',\n 'gray': '#5B5B5B',\n 'white': '#FFFFFF',\n },\n 'tc_color': {'dep': '#7EC6FF',\n 'storm': '#00F9F3',\n 'one': '#FFFFC6',\n 'two': '#FFFF5A',\n 'three': '#FFD97E',\n 'four': '#FF9C00',\n 'five': '#FF5454'\n },\n 'alpha': {'transparent': 0.2,\n 'translucid': 0.3,\n 'translucent': 0.5,\n 'semi opaque': 0.75,\n 'opaque': 0.95,\n }\n },\n 'figtext': {'loc': 'bottom right',\n 'center bottom': {\n 'xy_loc': (0.5, 0.05),\n 'ha': 'center',\n 'va': 'center',\n 'lef_marg': 0.05,\n 'rig_marg': 0.95,\n 'bot_marg': 0.15,\n 'top_marg': 0.95\n },\n 'center left': {'xy_loc': (0.1, 0.5),\n 'ha': 'right',\n 'va': 'center',\n 'lef_marg': 0.175,\n 'rig_marg': 0.95,\n 'bot_marg': 0.15,\n 'top_marg': 0.95\n },\n 'center right': {'xy_loc': (0.9, 0.5),\n 'ha': 'left',\n 'va': 'center',\n 'lef_marg': 0.05,\n 'rig_marg': 0.85,\n 'bot_marg': 0.05,\n 'top_marg': 0.95\n },\n 'bottom left': {'xy_loc': (0.1, 0.075),\n 'ha': 'right',\n 'va': 'bottom',\n 'lef_marg': 0.175,\n 'rig_marg': 0.95,\n 'bot_marg': 0.05,\n 'top_marg': 0.95\n },\n 'bottom right': {'xy_loc': (0.9, 0.075),\n 'ha': 'left',\n 'va': 'bottom',\n 'lef_marg': 0.05,\n 'rig_marg': 0.85,\n 'bot_marg': 0.05,\n 'top_marg': 0.95\n },\n 'upper left': {'xy_loc': (0.1, 0.925),\n 'ha': 'right',\n 'va': 'top',\n 'lef_marg': 0.175,\n 'rig_marg': 0.95,\n 'bot_marg': 0.05,\n 'top_marg': 0.95\n },\n 'upper right': {'xy_loc': (0.9, 0.925),\n 'ha': 'left',\n 'va': 'top',\n 'lef_marg': 0.05,\n 'rig_marg': 0.85,\n 'bot_marg': 0.05,\n 'top_marg': 0.95\n },\n }\n }\n\nSIZES = DEFAULT['sizes']\nSTYLES = DEFAULT['styles']\nCOLORS = STYLES['color']\nALPHAS = STYLES['alpha']\n\nCOLOR_LIST = [COLORS['red'], COLORS['teal'], COLORS['magenta'],\n COLORS['stone'], COLORS['green'], COLORS['purple'],\n COLORS['blue'], COLORS['light gray'], COLORS['pink'],\n COLORS['orange'], COLORS['gray'], COLORS['yellow'],\n COLORS['black']]\n\nMISC_COLOR_LIST = [\n '#fb2424',\n '#24d324',\n '#2139d5',\n '#21bdbd',\n '#cf0974',\n '#f96710',\n '#ccc506',\n '#780e96',\n '#32a26e',\n '#f89356'\n ]\n\nWARM_COLOR_LIST = [\n '#82050b',\n '#d50303',\n '#f33f00',\n '#f38f00',\n '#f0d073'\n ]\n\nCOOL_COLOR_LIST = [\n '#b9ddb4',\n '#65c2a5',\n '#3287bd',\n '#4f32bd',\n '#84038c'\n ]\n\nHOT_COLOR_LIST = [\n '#641502',\n '#ab0b0b',\n '#c03210',\n '#e27123',\n '#ffbb3e',\n '#f6cb7b'\n ]\n\nWET_COLOR_LIST = [\n '#badbee',\n '#6cb8d0',\n '#59ba85',\n '#3d9e3a',\n '#008000',\n '#003333'\n ]\n\nDRY_COLOR_LIST = [\n '#480505',\n '#7d3e14',\n '#ac6434',\n '#cf9053',\n '#c9c85b',\n '#ebe696'\n ]\n\nNEON_COLOR_LIST = [\n '#7bfc73',\n '#b0cd42',\n '#cd7842',\n '#9a3d5a',\n '#46224b'\n ]\n\nDIV_COLOR_LIST = (WARM_COLOR_LIST + COOL_COLOR_LIST)[::-1]\n\n# https://www.ncl.ucar.edu/Document/Graphics/color_tables.shtml\nNCL_CMAPS = pd.read_pickle(os.path.join(THIS_DIR, 'data', 'ncl_cmaps.pkl'))\nNCL_CMAP_NAMES = NCL_CMAPS.columns.tolist()\n\n\ndef prettify_ax(ax,\n alpha=0.75,\n xlabel=None,\n ylabel=None,\n title=None,\n suptitle=False,\n matchcolor=True,\n legend='best',\n title_pad=1.025,\n length_scale=False,\n ticks=True):\n \"\"\"\n Beautify a plot axis.\n\n :param ax: (matplotlib.axes) - original axis\n :param alpha: (float) - how transparent it is\n :param xlabel: (str) - label of x axis\n :param ylabel: (str) - label of y axis\n :param title: (str) - title of subplot\n :param suptitle: (boolean) - whether to make a figure title\n :param matchcolor: (boolean) - whether to match edgecolor with facecolor\n :param legend: (str) - location of legend\n :param title_pad: (scalar) - distance between box and title\n :param length_scale: (scalar) - whether to scale the labels based on length\n :param ticks: (boolean) - whether to modify ticks\n :return ax: (matplotlib.axes) - prettified axis\n \"\"\"\n if xlabel is None:\n xlabel = plt.getp(ax, 'xlabel')\n\n if ylabel is None:\n ylabel = plt.getp(ax, 'ylabel')\n\n if title is None:\n title = plt.getp(ax, 'title')\n\n set_labels(ax, xlabel=xlabel, ylabel=ylabel, suptitle=suptitle,\n title=title, title_pad=title_pad, length_scale=length_scale)\n\n plots = plt.getp(ax, 'children')\n for plot in plots:\n if plot.axes is not None:\n try:\n if matchcolor:\n edgecolor = plt.getp(plot, 'facecolor')\n plt.setp(plot,\n edgecolor=edgecolor,\n alpha=alpha)\n except:\n plt.setp(plot, alpha=alpha)\n\n set_legend(ax, loc=legend)\n set_borders(ax)\n\n if ticks:\n set_major_grid(ax)\n set_major_ticks(ax)\n set_major_tick_labels(ax)\n\n set_minor_grid(ax)\n set_minor_ticks(ax)\n set_minor_tick_labels(ax)\n\n return ax\n\n\ndef prettify_bokeh(p,\n title_size=15,\n xlabel_size=15,\n ylabel_size=15,\n ytick_label_size=10,\n xtick_label_size=10,\n legend_size=10,\n font='century gothic'):\n \"\"\"\n Scales bokeh plot's label sizes based on figure size\n\n :param p: (bokeh.figure) - bokeh figure\n :param title_size: (scalar) - title size\n :param xlabel_size: (scalar) - x label size\n :param ylabel_size: (scalar) - y label size\n :param xtick_label_size: (scalar) - x tick label size\n :param ytick_label_size: (scalar) - y tick label size\n :param legend: (scalar) - size of legend labels\n :param font: (str) - font of labels\n :return p: (bokeh.figure) - bokeh figure\n \"\"\"\n\n title_size = str(scale_it_bokeh(p, title_size, 1)) + 'pt'\n\n xlabel_size = str(scale_it_bokeh(p, xlabel_size, 1)) + 'pt'\n ylabel_size = str(scale_it_bokeh(p, ylabel_size, 1)) + 'pt'\n\n xtick_label_size = str(scale_it_bokeh(p, xtick_label_size, 1)) + 'pt'\n ytick_label_size = str(scale_it_bokeh(p, ytick_label_size, 1)) + 'pt'\n\n legend_size = str(scale_it_bokeh(p, legend_size, 1)) + 'pt'\n\n p.title.text_font_size = title_size\n p.title.text_font_style = 'normal'\n p.title.text_font = font\n p.title.align = 'left'\n p.title.offset = 5\n\n p.xaxis.axis_label_text_font_style = 'normal'\n p.xaxis.axis_label_text_font = font\n p.xaxis.axis_label_text_font_size = xlabel_size\n p.xaxis.major_tick_line_color = 'white'\n p.xaxis.major_label_text_font_size = xtick_label_size\n p.xaxis.axis_line_width = 0.01\n p.xaxis.minor_tick_line_color = 'white'\n\n p.yaxis.axis_label_standoff = 16\n p.yaxis.axis_label_text_font_style = 'normal'\n p.yaxis.axis_label_text_font = font\n p.yaxis.axis_label_text_font_size = ylabel_size\n p.yaxis.major_tick_line_color = 'white'\n p.yaxis.major_label_text_font_size = ytick_label_size\n p.yaxis.minor_tick_line_color = 'white'\n p.yaxis.axis_line_width = 0.01\n\n p.grid.grid_line_dash = 'solid'\n\n p.legend.location = 'top_left'\n p.legend.background_fill_alpha = 0\n p.legend.border_line_alpha = 0\n p.legend.label_text_font_size = legend_size\n\n return p\n\n\ndef plot_map(data, lats=None, lons=None, figsize=None, ax=None, stipple=None,\n cmap='BlueWhiteOrangeRed', orientation='horizontal', wrap=True,\n data_lim=None, vmin=None, vmax=None, balance=True,\n lat1=-90, lat2=90, lon1=-180, lon2=180,\n latlim=None, lonlim=None, region=None,\n title='', title_pad=1.025, suptitle=False,\n lat_labels='auto', lon_labels='auto', length_scale=True,\n rows=1, cols=1, pos=1, fmt=None,\n cbar=True, cbar_label='', shrink=0.25,\n contourf=True, interval=None, tick_locs=None,\n data2=None, lats2=None, lons2=None,\n contour=None, contour2=None,\n clabel=True, clabel2=True,\n mask_land=False, mask_ocean=False,\n land=False, ocean=False, coastlines=True, rivers=False,\n countries=False, states=False, lakes=False,\n projection=None, central_longitude=0, tight_layout='auto',\n dpi=DEFAULT['dpi'], save='', close=True, returnplot=False,\n **kwargs\n ):\n \"\"\"\n Makes a map on a subplot.\n\n :param data: (array) - data to be mapped\n :param lats: (array) - array of latitudes\n :param lons: (array) - array of longitudes\n :param figsize: (str/tup) - wide/tall/auto or tuple width x height of fig\n :param ax: (mpl.axes) - plot axis\n :param stipple: (array) - array of values to be stippled\n :param cmap: (str) - color map\n :param orientation: (str) - orientation of color bar\n :param wrap: (boolean) - fill missing data at prime meridian\n :param data_lim: (tup) - shortcut for vmin and vmax\n :param vmin: (scalar) - lower limit of color bar\n :param vmax: (scalar) - upper limit of color bar\n :param lat1: (scalar) lower limit of latitude\n :param lat2: (scalar) upper limit of latitude\n :param lon1: (scalar) left limit of longitude\n :param lon2: (scalar) right limit of longitude\n :param latlim: (tuple) shortcut for lat1 and lat2\n :param lonlim: (tuple) shortcut for lon1 and lon2\n :param region: (str) region to quickly subset lat and lon extent (na or us)\n :param title: (str) - title of subplot\n :param title_pad: (scalar) - distance between box and title\n :param suptitle: (boolean) - whether to make a figure title\n :param lat_labels: (array) - list of latitudes to show on map\n :param lon_labels: (array) - list of longitudes to show on map\n :param length_scale: (scalar) - whether to scale the labels based on length\n :param rows: (int) - number of rows for subplots\n :param cols: (int) - number of columns for subplots\n :param pos: (int) - position of current subplot\n :param fmt: (str) - format of color bar labels\n :param cbar: (boolean) - whether to show color bar\n :param cbar_label: (str) - label of color bar\n :param shrink: (scalar) - how much to shrink the color bar\n :param contourf: (boolean) - whether to cartoonize colormap\n :param interval: (scalar) - interval of tick marks on color bar\n :param tick_locs: (array) - input own tick marks on color bar\n :param data2: (array) - contours to be mapped\n :param lats2: (array) - array of contour latitudes\n :param lons2: (array) - array of contour longitudes\n :param contour: (array) - list of values to contour with solid line\n :param contour2: (array) - list of values to contour with dashed line\n :param clabel: (boolean) - whether to show value on solid contours\n :param clabel2: (boolean) - whether to show value on dashed contours\n :param mask_land: (boolean) - whether to mask land\n :param mask_ocean: (boolean) - whether to mask ocean\n :param land: (boolean) - whether to color fill land\n :param ocean: (boolean) - whether to color fill land\n :param coastlines: (boolean) - whether to draw coastline\n :param rivers: (boolean) - whether to draw rivers\n :param countries: (boolean) - whether to draw country borders\n :param states: (boolean) - whether to draw state borders\n :param lakes: (boolean) - whether to color fill lakes\n :param projection: (cartopy.crs) - projection of map\n :param central_longitude: (scalar) - longitude to center the map on\n :param tight_layout: (str) - on or auto adjust layout of subplots\n :param dpi: (int) - dots per inch to save the figure\n :param save: (str) - if filename is input, will save an image file\n :param close: (boolean) - whether to close figure after saving\n :param returnplot: (boolean) - whether to return plotted line\n :param kwargs: (kwargs) - additional keyword arguments\n :return ax: (mpl.axes) - plot axis\n :return plot: (mpl.axes) - optional image plot\n \"\"\"\n from ahh.ext import get_ocean_mask\n import cartopy.util\n\n if isinstance(data, xr.Dataset):\n raise Exception('Please subselect a variable from xr.Dataset!')\n\n if isinstance(data, xr.DataArray):\n if lats is None:\n lats = data.lat.values\n if lons is None:\n lons = data.lon.values\n data = data.to_masked_array()\n\n if isinstance(lons, xr.DataArray):\n lons = lons.values\n\n if isinstance(lons, xr.DataArray):\n lats = lats.values\n\n if lons is None or lats is None:\n raise Exception('Missing lats and lons!')\n\n if data2 is None:\n data2 = data\n\n ndim = data.ndim\n if ndim > 2:\n raise Exception('Data must be 2D, {0}D data was input!'.format(ndim))\n\n if mask_ocean:\n data, lons = get_ocean_mask(data, lats, lons, apply_mask=True)\n elif mask_land:\n data, lons = get_ocean_mask(data, lats, lons,\n reverse=True, apply_mask=True)\n\n projection = _get_projection_logic(projection, lons, central_longitude)\n\n if lons2 is None and lats2 is None:\n lats2, lons2 = lats, lons\n else:\n lons2 -= central_longitude\n\n lat1, lat2, lon1, lon2 = _get_lat_lon_lim_logic(latlim, lonlim,\n lat1, lat2, lon1, lon2,\n region=region,\n central_longitude=\n central_longitude)\n\n _set_figsize_logic(figsize=figsize, rows=rows,\n cols=cols, pos=pos, dpi=dpi)\n\n if ax is None:\n ax = plt.subplot(rows, cols, pos, projection=projection)\n\n if wrap:\n try:\n data, lons = cartopy.util.add_cyclic_point(data, coord=lons)\n except:\n print('Unable to wrap!')\n\n ax.set_extent([lon1, lon2, lat1, lat2], projection)\n\n _add_features(ax, land, ocean, coastlines,\n states, countries, lakes, rivers)\n\n set_latlons(ax, central_longitude=central_longitude,\n lat_labels=lat_labels, lon_labels=lon_labels)\n\n if contourf:\n try:\n contourf[0]\n base, base2 = _get_bases_logic(contourf)\n vmin, vmax = _get_vmin_vmax_logic(data=contourf,\n base=base2,\n vmin=vmin,\n vmax=vmax,\n data_lim=data_lim)\n if tick_locs is None:\n tick_locs = contourf\n except:\n base, base2 = _get_bases_logic(data)\n vmin, vmax = _get_vmin_vmax_logic(data=data,\n base=base2,\n vmin=vmin,\n vmax=vmax,\n data_lim=data_lim)\n vmin, vmax = _balance_logic(balance, vmin, vmax)\n\n if interval is None:\n interval = base\n\n oom = get_order_mag(np.abs(vmax) - np.abs(vmin))\n interval = _get_interval_logic(interval=interval,\n vmin=vmin, vmax=vmax,\n base=base, oom=oom)\n\n try:\n contourf[0]\n except:\n contourf = np.arange(vmin, vmax + interval, interval)\n vmin, vmax = _fix_vmin_vmax_logic(vmin=vmin,\n vmax=vmax,\n data=contourf,\n interval=interval)\n contourf, interval = _fix_contourf_logic(contourf=contourf,\n interval=interval,\n vmin=vmin,\n vmax=vmax)\n\n fmt = _get_fmt_logic(fmt=fmt, interval=interval)\n\n cmap = get_cmap(cmap, n=len(contourf))\n (tick_locs,\n cbar_count) = _get_tick_locs_cbar_count_logic(tick_locs=tick_locs,\n vmin=vmin,\n vmax=vmax,\n interval=interval)\n\n im = ax.contourf(lons, lats, data, levels=contourf, extend='both',\n transform=projection, cmap=cmap,\n vmin=vmin, vmax=vmax, **kwargs)\n drawedges = True\n else:\n base, base2 = _get_bases_logic(data)\n vmin, vmax = _get_vmin_vmax_logic(data=data,\n base=base2,\n vmin=vmin,\n vmax=vmax,\n data_lim=data_lim)\n\n vmin, vmax = _balance_logic(balance, vmin, vmax)\n\n cmap = get_cmap(cmap, n=100)\n im = ax.pcolormesh(lons, lats, data, transform=projection,\n cmap=cmap, vmin=vmin, vmax=vmax, **kwargs)\n drawedges = False\n\n if cbar:\n set_cbar(ax, im, label=cbar_label, drawedges=drawedges,\n shrink=shrink, orientation=orientation,\n fmt=fmt, tick_locs=tick_locs)\n\n if stipple:\n ax.contourf(lons2, lats2, data2, stipple, colors='none',\n hatches=['.', '.', ' '],\n transform=projection, **kwargs)\n\n _set_contour_logic(ax, lons2, lats2, data2, contour,\n projection, fmt, clabel)\n _set_contour_logic(ax, lons2, lats2, data2, contour2,\n projection, fmt, clabel2)\n\n set_labels(ax, title=title, title_pad=title_pad,\n length_scale=length_scale, suptitle=suptitle)\n set_borders(ax)\n\n _save_logic(save=save, tight_layout=tight_layout, close=close,\n dpi=dpi, pos=pos, rows=rows, cols=cols)\n\n if returnplot:\n return ax, im\n else:\n return ax\n\n\ndef plot_bounds(ax, lat1=-90, lat2=90, lon1=-180, lon2=180,\n latlim=None, lonlim=None,\n color='k', linestyle='solid', linewidth=1.25,\n fill=False, alpha=0.75, projection=None,\n tight_layout='on', dpi=DEFAULT['dpi'], save='',\n close=True, **kwargs):\n \"\"\"\n Plot a bounded region on a map. Default is a rectangle with black outlines.\n\n :param ax: (matplotlib.axes) - original axis\n :param lat1: (float) - a latitudinal bound (can be any order)\n :param lat2: (float) - another latitudinal bound (can be any order)\n :param lon1: (float) - a longitudinal bound (can be any order)\n :param lon2: (float) - another longitudinal bound (can be any order)\n :param latlim: (tuple) shortcut for lat1 and lat2\n :param lonlim: (tuple) shortcut for lon1 and lon2\n :param color: (str) - matplotlib abbrieviation of color\n :param linestyle: (str) - solid, dashed, dashdot, or dotted linestyle\n :param linewidth: (scalar) - how thick line is\n :param fill: (boolean) - whether to color in the region\n :param alpha: (float) - how transparent it is\n :param projection: (cartopy.crs) - map projection\n :param tight_layout: (str) - on or auto adjust layout of subplots\n :param dpi: (int) - dots per inch to save the figure\n :param save: (str) - save figure if string is specified\n :param kwargs: (kwargs) - additional keyword arguments\n :param close: (boolean) - whether to close figure after saving\n \"\"\"\n projection = _get_projection_logic(projection)\n\n lat1, lat2, lon1, lon2 = _get_lat_lon_lim_logic(latlim, lonlim,\n lat1, lat2, lon1, lon2)\n\n width = lon2 - lon1\n height = lat2 - lat1\n\n ax.add_patch(mpatches.Rectangle(xy=[lon1, lat1],\n width=width,\n height=height,\n facecolor=color,\n edgecolor=color,\n linestyle=linestyle,\n linewidth=linewidth,\n alpha=alpha,\n transform=projection,\n fill=fill, **kwargs\n )\n )\n\n _save_logic(save=save, tight_layout=tight_layout, close=close,\n dpi=dpi, pos=1, rows=1, cols=1)\n\n\ndef plot_line(x, y=None, figsize=None,\n ax=None, xlim=None, ylim=None,\n stats=False,\n norm=False, anom=False, norm_anom=False, cumsum=False,\n color=COLORS['red'], alpha=ALPHAS['translucent'],\n inherit=True, label='', xlabel='', ylabel='', title='',\n suptitle=False,\n title_pad=0.965, length_scale=True, linewidth=1, linestyle='-',\n xscale='linear', yscale='linear', minor_date_ticks=True,\n rows=1, cols=1, pos=1, label_inline=False,\n sharex=None, sharey=None,\n twinx=None, twiny=None, aligned=True,\n xinvert=False, yinvert=False, legend=None,\n projection=DEFAULT['projection'],\n tight_layout='auto', dpi=DEFAULT['dpi'],\n save='', close=True, returnplot=False, **kwargs):\n \"\"\"\n Draw a line on a subplot. Use other functions for full customizability.\n\n :param x: (arr) - input x array\n :param y: (arr) - input y array\n :param figsize: (str/tup) - wide/tall/auto or tuple width x height of fig\n :param ax: (mpl.axes) - plot axis\n :param xlim: (tup) - left and right x axis limit in a tuple, respectively\n :param ylim: (tup) - left and right y axis limit in a tuple, respectively\n :param stats: (boolean/str) - whether to show stats and if str, the loc\n :param norm: (boolean) - whether to normalize the y\n :param anom: (boolean) - whether to subtract the average of y from y\n :param norm_anom: (boolean) - whether to get the normalized anomaly of y\n :param cumsum: (boolean) - whether to take the cumulative sum of y\n :param color: (str) - color of the plotted line\n :param alpha: (scalar/str) - transparency of the plotted line\n :param inherit: (boolean) - whether to inherit previous labels\n :param label: (str) - label of line to be used in legend\n :param xlabel: (str) - label of x axis\n :param ylabel: (str) - label of y axis\n :param title: (str) - title of subplot\n :param title_pad: (scalar) - distance between box and title\n :param suptitle: (boolean) - whether to make a figure title\n :param length_scale: (scalar) - whether to scale the labels based on length\n :param linewidth: (scalar) - width of the plotted line\n :param linestyle: (str) - style of the plotted line\n :param xscale: (str) - linear or log scale of x axis\n :param yscale: (str) - linear or log scale of y axis\n :param minor_date_ticks: (str) - whether to have date ticks on top axis\n :param rows: (int) - number of rows for subplots\n :param cols: (int) - number of columns for subplots\n :param pos: (int) - position of current subplot\n :param label_inline: (scalar) - whether to label in line; x-value of label\n :param sharex: (mpl.axes) - share x axis ticks with another subplot\n :param sharey: (mpl.axes) - share y axis ticks with another subplot\n :param twinx: (mpl.axes) - share x axis and have another y axis\n :param twiny: (mpl.axes) - share x axis and have another x axis\n :param aligned: (boolean) - whether to keep left and right ticks aligned\n :param xinvert: (boolean) - whether to flip x axis\n :param yinvert: (boolean) - whether to flip y axis\n :param legend: (str) - location of legend\n :param projection: (cartopy.crs) - projection of plotted line\n :param tight_layout: (str) - on or auto adjust layout of subplots\n :param dpi: (int) - dots per inch to save the figure\n :param save: (str) - if filename is input, will save an image file\n :param close: (boolean) - whether to close figure after saving\n :param returnplot: (boolean) - whether to return plotted line\n :param kwargs: (kwargs) - additional keyword arguments\n :return ax: (mpl.axes) - plot axis\n :return plot: (mpl.axes) - optional line plot\n \"\"\"\n _set_figsize_logic(figsize=figsize, rows=rows,\n cols=cols, pos=pos, dpi=dpi)\n\n x = _get_dt_from_pd_logic(x)\n x, xtext, xticklabels = _get_xtext_logic(x=x)\n x, y = _get_x_to_y_logic(x=x, y=y)\n y = _get_stats_logic(ax, y, norm=norm, anom=anom,\n norm_anom=norm_anom, cumsum=cumsum)\n\n origin_xlim, xlim = _get_xlim_logic(x, xlim)\n origin_ylim, ylim = _get_ylim_logic(y, ylim)\n\n ax, rows, cols = _get_ax_logic(ax=ax, twinx=twinx, twiny=twiny,\n rows=rows, cols=cols, pos=pos,\n projection=projection)\n\n plot = ax.plot(x, y, **kwargs)\n\n if inherit:\n ax, xlabel, ylabel, title, xlim, ylim = \\\n set_inherited(ax, xlabel, ylabel, title,\n xlim, ylim, origin_xlim, origin_ylim)\n\n linewidth = scale_it(ax, linewidth, 0.2)\n\n plt.setp(plot, color=color, alpha=alpha, label=label,\n linewidth=linewidth, linestyle=linestyle,\n solid_capstyle='round', solid_joinstyle='round',\n dash_capstyle='round', dash_joinstyle='round')\n\n # must be after label\n if label is not None and label_inline:\n if not isinstance(label_inline, bool):\n set_inline_label(ax, plot, xval=label_inline)\n else:\n set_inline_label(ax, plot)\n\n if projection is not None:\n plt.setp(plot, transform=projection)\n\n set_axes(ax, xlim=xlim, ylim=ylim,\n xscale=xscale, yscale=yscale,\n xinvert=xinvert, yinvert=yinvert)\n\n # need ax and ylim set\n _show_stats_logic(ax, y, stats)\n\n _settings_logic(ax=ax,\n x=x,\n twinx=twinx,\n twiny=twiny,\n xticks=None,\n xlabel=xlabel,\n ylabel=ylabel,\n title=title,\n title_pad=title_pad,\n suptitle=suptitle,\n aligned=aligned,\n length_scale=length_scale,\n xtext=xtext,\n xticklabels=xticklabels,\n minor_date_ticks=minor_date_ticks)\n\n set_legend(ax, loc=legend)\n\n rows, cols = _set_share_logic(ax=ax, rows=rows, cols=cols,\n sharex=sharex, sharey=sharey,\n xlabel=xlabel, ylabel=ylabel)\n\n _save_logic(save=save, tight_layout=tight_layout, close=close,\n dpi=dpi, pos=pos, rows=rows, cols=cols)\n\n if returnplot:\n return ax, plot\n else:\n return ax\n\n\ndef plot_bar(x, y=None, figsize=None, ax=None, xlim=None, ylim=None,\n stats=False,\n norm=False, anom=False, norm_anom=False, cumsum=False,\n matchcolor=True, color=None, facecolor=COLORS['red'],\n edgecolor=COLORS['red'], alpha=ALPHAS['semi opaque'],\n linewidth=0.25, linestyle='-', title_pad=0.965, length_scale=True,\n inherit=True, label='', xlabel='', ylabel='', title='',\n suptitle=False,\n width='auto', height=None, align='edge',\n xscale='linear', yscale='linear', minor_date_ticks=True,\n rows=1, cols=1, pos=1, orientation='vertical',\n sidebar_count=0, sidebar_pos=1, bar_vals=None,\n sharex=None, sharey=None,\n twinx=None, twiny=None, aligned=True,\n xinvert=False, yinvert=False, legend=None,\n tight_layout='auto', dpi=DEFAULT['dpi'],\n save='', close=True, returnplot=False, **kwargs):\n \"\"\"\n Draw bars on a subplot. Use other functions for full customizability.\n :param x: (arr) - input x array\n :param y: (arr) - input y array\n :param xlim: (tup) - left and right x axis limit in a tuple, respectively\n :param ylim: (tup) - left and right y axis limit in a tuple, respectively\n :param figsize: (str/tup) - wide/tall/auto or tuple width x height of fig\n :param ax: (mpl.axes) - plot axis\n :param stats: (boolean/str) - whether to show stats and if str, the loc\n :param norm: (boolean) - whether to normalize the y\n :param anom: (boolean) - whether to subtract the average of y from y\n :param norm_anom: (boolean) - whether to get the normalized anomaly of y\n :param cumsum: (boolean) - whether to take the cumulative sum of y\n :param matchcolor: (boolean) - whether to match edgecolor with facecolor\n :param color: (str) - facecolor and edgecolor of plotted bar\n :param facecolor: (str) - facecolor of plotted bar\n :param edgecolor: (str) - edgecolor of plotted bar\n :param alpha: (scalar/str) - transparency of the plotted bar\n :param linewidth: (scalar) - width of plotted bar edges\n :param linestyle: (str) - style of the plotted bar edges\n :param title_pad: (scalar) - distance between box and title\n :param suptitle: (boolean) - whether to make a figure title\n :param inherit: (boolean) - whether to inherit previous labels\n :param length_scale: (scalar) - whether to scale the labels based on length\n :param label: (str) - label of line to be used in legend\n :param xlabel: (str) - label of x axis\n :param ylabel: (str) - label of y axis\n :param title: (str) - title of subplot\n :param width: (str/scalar) - width of plotted bars when vertical\n :param height: (str/scalar) - height of plotted bars when horizontal\n :param align: (str) - whether to align plotted bar on center or edge\n :param xscale: (str) - linear or log scale of x axis\n :param yscale: (str) - linear or log scale of y axis\n :param minor_date_ticks: (str) - whether to have date ticks on top axis\n :param rows: (int) - number of rows for subplots\n :param cols: (int) - number of columns for subplots\n :param pos: (int) - position of current subplot\n :param orientation: (str) - whether to have horizontal or vertical bars\n :param sidebar_count: (int) - how many bars per x\n :param sidebar_pos: (int) - the location of the side bar\n :param bar_vals: (str) - format of bar vals\n :param sharex: (mpl.axes) - share x axis ticks with another subplot\n :param sharey: (mpl.axes) - share y axis ticks with another subplot\n :param twinx: (mpl.axes) - share x axis and have another y axis\n :param twiny: (mpl.axes) - share x axis and have another x axis\n :param aligned: (boolean) - whether to keep left and right ticks aligned\n :param xinvert: (boolean) - whether to flip x axis\n :param yinvert: (boolean) - whether to flip y axis\n :param legend: (str) - location of legend\n :param tight_layout: (str) - on or auto adjust layout of subplots\n :param dpi: (int) - dots per inch to save the figure\n :param save: (str) - if filename is input, will save an image file\n :param close: (boolean) - whether to close figure after saving\n :param returnplot: (boolean) - whether to return plotted bar\n :return ax: (mpl.axes) - plot axis\n :return plot: (mpl.axes) - optional bar plot\n \"\"\"\n _set_figsize_logic(figsize=figsize, rows=rows,\n cols=cols, pos=pos, dpi=dpi,\n sidebar_pos=sidebar_pos)\n\n x = _get_dt_from_pd_logic(x)\n x, xtext, xticklabels = _get_xtext_logic(x=x)\n x, y = _get_x_to_y_logic(x=x, y=y)\n y = _get_stats_logic(ax, y, norm=norm, anom=anom,\n norm_anom=norm_anom, cumsum=cumsum)\n origin_ylim, ylim = _get_ylim_logic(y, ylim)\n\n facecolor, edgecolor = _get_color_logic(color,\n facecolor,\n edgecolor,\n matchcolor)\n\n if width == 'auto':\n width = _get_width_logic(x)\n\n if sidebar_count > 1:\n if facecolor is not COLORS['red']:\n (width, align, x_list) = get_side_bars_recs(x,\n sidebar_count,\n colors=False)\n else:\n (width, align,\n x_list, colors) = get_side_bars_recs(x,\n sidebar_count,\n colors=True)\n if facecolor is COLORS['red']:\n color = colors[sidebar_pos - 1]\n x = x_list[sidebar_pos - 1]\n\n ax, rows, cols = _get_ax_logic(ax=ax, twinx=twinx, twiny=twiny,\n rows=rows, cols=cols, pos=pos)\n\n # set width first\n if xtext:\n align = 'center'\n\n origin_xlim, xlim = _get_xlim_logic(x, xlim, pad=width, align=align)\n\n if sidebar_count > 1 and sidebar_count % 2 == 0:\n xlim = (xlim[0] - width * sidebar_count,\n xlim[1] + width * (sidebar_count - 1))\n elif sidebar_count > 1 and sidebar_count % 2 != 0:\n xlim = (xlim[0] - width * sidebar_count,\n xlim[1])\n\n if 'vertical' in orientation:\n plot = ax.bar(x, y, align=align, label=label, **kwargs)\n elif 'horizontal' in orientation:\n plot = ax.barh(x, y, height=height, align=align,\n label=label, **kwargs)\n\n if inherit:\n ax, xlabel, ylabel, title, xlim, ylim = \\\n set_inherited(ax, xlabel, ylabel, title,\n xlim, ylim, origin_xlim, origin_ylim)\n\n linewidth = scale_it(ax, linewidth, 0.2)\n\n plt.setp(plot, facecolor=facecolor, edgecolor=edgecolor, alpha=alpha,\n linestyle=linestyle, width=width, linewidth=linewidth)\n\n set_axes(ax,\n xlim=xlim,\n ylim=ylim,\n xscale=xscale,\n yscale=yscale,\n xinvert=xinvert,\n yinvert=yinvert)\n\n if bar_vals != False:\n if sidebar_count == 0:\n sidebar_count = 1\n if (len(x) < (50 / sidebar_count * 1.7) and\n sidebar_pos == sidebar_count):\n if bar_vals is None:\n interval = np.median(y)\n bar_vals = _get_fmt_logic(fmt=bar_vals, interval=interval)\n set_bar_vals(ax, fmt=bar_vals, orientation='auto',\n yinvert=yinvert)\n\n _settings_logic(ax=ax,\n x=x,\n twinx=twinx,\n twiny=twiny,\n xticks=None,\n xlabel=xlabel,\n ylabel=ylabel,\n title=title,\n title_pad=title_pad,\n suptitle=suptitle,\n aligned=aligned,\n length_scale=length_scale,\n xtext=xtext,\n xticklabels=xticklabels,\n minor_date_ticks=minor_date_ticks)\n\n rows, cols = _set_share_logic(ax=ax, rows=rows, cols=cols,\n sharex=sharex, sharey=sharey,\n xlabel=xlabel, ylabel=ylabel)\n set_legend(ax, loc=legend)\n\n # need ax and ylim set and bar vals shifted\n _show_stats_logic(ax, y, stats)\n\n _save_logic(save=save, tight_layout=tight_layout, close=close,\n dpi=dpi, pos=pos, rows=rows, cols=cols)\n\n if returnplot:\n return ax, plot\n else:\n return ax\n\n\ndef plot_scatter(x, y=None, figsize=None, ax=None,\n xlim=None, ylim=None,\n stats=False,\n norm=False, anom=False, norm_anom=False, cumsum=False,\n matchcolor=True,\n data_lim=None, vmin=None, vmax=None,\n color=None, facecolor=COLORS['red'], edgecolor=COLORS['red'],\n alpha=ALPHAS['translucent'],\n linewidth=0.25, size=5, marker='o', s=None,\n c=None, cbar=True, cbar_label='', shrink=0.35, cmap=None,\n orientation='horizontal', interval=None, tick_locs=None,\n inherit=True, label='', xlabel='', ylabel='',\n title='', title_pad=0.965, suptitle=False, length_scale=True,\n xscale='linear', yscale='linear', minor_date_ticks=True,\n rows=1, cols=1, pos=1, fmt=None, pad=0.225,\n sharex=None, sharey=None,\n twinx=None, twiny=None, aligned=True,\n xinvert=False, yinvert=False, legend=None,\n projection=DEFAULT['projection'],\n tight_layout='auto', dpi=DEFAULT['dpi'],\n save='', close=True, returnplot=False, **kwargs):\n \"\"\"\n Draw markers on a subplot. Use other functions for full customizability.\n\n :param x: (arr) - input x array\n :param y: (arr) - input y array\n :param figsize: (str/tup) - wide/tall/auto or tuple width x height of fig\n :param ax: (mpl.axes) - plot axis\n :param stats: (boolean/str) - whether to show stats and if str, the loc\n :param xlim: (tup) - left and right x axis limit in a tuple, respectively\n :param ylim: (tup) - left and right y axis limit in a tuple, respectively\n :param norm: (boolean) - whether to normalize the y\n :param anom: (boolean) - whether to subtract the average of y from y\n :param norm_anom: (boolean) - whether to get the normalized anomaly of y\n :param cumsum: (boolean) - whether to take the cumulative sum of y\n :param data_lim: (tup) - shortcut for vmin and vmax\n :param vmin: (scalar) - lower limit of color bar\n :param vmax: (scalar) - upper limit of color bar\n :param matchcolor: (boolean) - whether to match edgecolor with facecolor\n :param color: (str) - facecolor and edgecolor of plotted scatter marker\n :param facecolor: (str) - facecolor of plotted scatter marker\n :param edgecolor: (str) - edgecolor of plotted scatter marker\n :param alpha: (scalar/str) - transparency of the plotted scatter marker\n :param linewidth: (scalar) - width of plotted scatter marker edges\n :param size: (scalar) - size of plotted scatter marker\n :param marker: (scalar) - style of plotted scatter marker\n :param s: (arr) - array to map size to\n :param c: (arr) - array to map color to\n :param cbar: (boolean) - whether to show color bar\n :param cbar_label: (str) - label of color bar\n :param shrink: (scalar) - size of color bar\n :param cmap: (str) - color map\n :param orientation: (str) - orientation of color bar\n :param interval: (scalar) - interval of tick marks on color bar\n :param tick_locs: (array) - input own tick marks on color bar\n :param inherit: (boolean) - whether to inherit previous labels\n :param label: (str) - label of line to be used in legend\n :param xlabel: (str) - label of x axis\n :param ylabel: (str) - label of y axis\n :param title: (str) - title of subplot\n :param title_pad: (scalar) - distance between box and title\n :param suptitle: (boolean) - whether to make a figure title\n :param length_scale: (scalar) - whether to scale the labels based on length\n :param xscale: (str) - linear or log scale of x axis\n :param yscale: (str) - linear or log scale of y axis\n :param minor_date_ticks: (str) - whether to have date ticks on top axis\n :param rows: (int) - number of rows for subplots\n :param cols: (int) - number of columns for subplots\n :param pos: (int) - position of current subplot\n :param fmt: (str) - format of color bar labels\n :param pad: (scalar) - padding of color bar from plot\n :param sharex: (mpl.axes) - share x axis ticks with another subplot\n :param sharey: (mpl.axes) - share y axis ticks with another subplot\n :param twinx: (mpl.axes) - share x axis and have another y axis\n :param twiny: (mpl.axes) - share x axis and have another x axis\n :param aligned: (boolean) - whether to keep left and right ticks aligned\n :param xinvert: (boolean) - whether to flip x axis\n :param yinvert: (boolean) - whether to flip y axis\n :param legend: (str) - location of legend\n :param projection: (cartopy.crs) - projection of plotted scatter\n :param tight_layout: (str) - on or auto adjust layout of subplots\n :param dpi: (int) - dots per inch to save the figure\n :param save: (str) - if filename is input, will save an image file\n :param close: (boolean) - whether to close figure after saving\n :param returnplot: (boolean) - whether to return plotted scatter\n :param kwargs: (kwargs) - additional keyword arguments\n :return ax: (mpl.axes) - plot axis\n :return plot: (mpl.axes) - optional scatter plot\n \"\"\"\n _set_figsize_logic(figsize=figsize, rows=rows,\n cols=cols, pos=pos, dpi=dpi)\n\n x = _get_dt_from_pd_logic(x)\n x, xtext, xticklabels = _get_xtext_logic(x=x)\n x, y = _get_x_to_y_logic(x, y)\n y = _get_stats_logic(ax, y, norm=norm, anom=anom,\n norm_anom=norm_anom, cumsum=cumsum)\n origin_ylim, ylim = _get_ylim_logic(y, ylim)\n origin_xlim, xlim = _get_xlim_logic(x, xlim)\n\n ax, rows, cols = _get_ax_logic(ax=ax, twinx=twinx, twiny=twiny,\n rows=rows, cols=cols, pos=pos,\n projection=projection)\n\n if c is not None:\n base, base2 = _get_bases_logic(c)\n vmin, vmax = _get_vmin_vmax_logic(data=c, base=base2,\n vmin=vmin, vmax=vmax,\n data_lim=data_lim)\n\n oom = get_order_mag(vmax - vmin)\n interval = _get_interval_logic(interval=interval,\n vmin=vmin, vmax=vmax,\n base=base, oom=oom)\n fmt = _get_fmt_logic(fmt=fmt, interval=interval)\n vmin, vmax = _fix_vmin_vmax_logic(vmin=vmin, vmax=vmax, data=c,\n interval=interval)\n (tick_locs,\n cbar_count) = _get_tick_locs_cbar_count_logic(tick_locs=tick_locs,\n vmin=vmin, vmax=vmax,\n interval=interval)\n if cmap is None:\n cmap = 'viridis'\n\n cmap = get_cmap(cmap, cbar_count)\n edgecolor = None\n facecolor = COLORS['gray']\n\n if s is not None:\n size = np.abs(s)\n else:\n size = scale_it(ax, np.abs(size), 25, exp=False)\n\n plot = ax.scatter(x, y, marker=marker,\n linewidths=linewidth,\n s=size, c=c, cmap=cmap,\n vmin=vmin, vmax=vmax,\n **kwargs\n )\n\n if cbar and cmap is not None:\n set_cbar(ax, plot, label=cbar_label, fmt=fmt,\n pad=pad, shrink=shrink,\n tick_size=8, label_size=10,\n orientation=orientation,\n tick_locs=tick_locs)\n else:\n if color is not None:\n facecolor = color\n edgecolor = color\n if matchcolor:\n edgecolor = facecolor\n\n if inherit:\n ax, xlabel, ylabel, title, xlim, ylim = \\\n set_inherited(ax, xlabel, ylabel, title,\n xlim, ylim, origin_xlim, origin_ylim)\n\n linewidth = scale_it(ax, linewidth, 0.2)\n\n if projection is not None:\n plt.setp(plot, transform=projection)\n\n plt.setp(plot, facecolor=facecolor, edgecolor=edgecolor,\n alpha=alpha, label=label)\n\n set_axes(ax, xlim=xlim, ylim=ylim,\n xscale=xscale, yscale=yscale,\n xinvert=xinvert, yinvert=yinvert)\n\n # need ax and ylim set\n _show_stats_logic(ax, y, stats)\n\n _settings_logic(ax=ax,\n x=x,\n twinx=twinx,\n twiny=twiny,\n xticks=None,\n xlabel=xlabel,\n ylabel=ylabel,\n title=title,\n title_pad=title_pad,\n suptitle=suptitle,\n aligned=aligned,\n length_scale=length_scale,\n xtext=xtext,\n xticklabels=xticklabels,\n minor_date_ticks=minor_date_ticks)\n\n rows, cols = _set_share_logic(ax=ax, rows=rows, cols=cols,\n sharex=sharex, sharey=sharey,\n xlabel=xlabel, ylabel=ylabel)\n\n set_legend(ax, loc=legend)\n\n _save_logic(save=save, tight_layout=tight_layout, close=close,\n dpi=dpi, pos=pos, rows=rows, cols=cols)\n\n if returnplot:\n return ax, plot\n else:\n return ax\n\n\ndef plot(*plot_args, **plot_kwargs):\n \"\"\"\n Plot multiple line/bar/scatter plots at once using this syntax\n x, y, 'label', 'ptype/color/linestyle/marker'\n\n Example - plot a red dashed line with circle marker and a black bar plot\n plot(x, y, 'line plot', 'line/red/--/o', x2, y2, 'bar plot', 'bar/black')\n\n Equivalent shorthand\n plot(x, y, 'line plot', 'l/r/--/o', x2, y2, 'bar plot', 'b/k')\n\n Example 2 - plot a green solid line, blue bar plot, yellow scatter plot\n with a title, ylabel, and xlabel\n plot(x, y, 'labl', 'l/r', x2, y2, 'labl2', 'b/b', x3, y3, 'labl3', 's/y',\n title='title', ylabel='a ylabel', xlabel='one xlabel')\n\n Example 3 - adjust figsize while still stacking all the plots\n plot(x, y, 'labl', 'l', x2, y2, 'labl2', 'b', figsize=(8, 5), stack=True)\n\n Example 4 - plot two separate figures\n plot(x, y, 'labl', 'l', x2, y2, 'labl2', 'b', stack=False)\n\n :param stack: (bool) whether to keep stacking if figsize input is provided\n :return ax_list: (list) - list of axes\n \"\"\"\n plot_inputs = zip(plot_args[::4],\n plot_args[1::4],\n plot_args[2::4],\n plot_args[3::4])\n\n figsize = plot_kwargs.get('figsize', 'na')\n stack = plot_kwargs.get('stack', True)\n\n if figsize == 'na':\n set_figsize()\n\n ax_list = []\n\n for i, plot_input in enumerate(plot_inputs):\n if stack and i > 0:\n plot_kwargs['figsize'] = 'na'\n x, y, label, style = plot_input\n ptype, color, linestyle, marker = _parse_style(style)\n\n vis_dict = dict(label=label, color=color,\n linestyle=linestyle, marker=marker,\n **plot_kwargs)\n\n if ptype in ['b', 'bar']:\n _pop_keys(vis_dict, 'bar')\n ax = plot_bar(x, y, **vis_dict)\n elif ptype in ['s', 'scatter']:\n _pop_keys(vis_dict, 'scatter')\n if vis_dict['marker'] == '':\n vis_dict['marker'] = 'o'\n ax = plot_scatter(x, y, **vis_dict)\n else:\n _pop_keys(vis_dict, 'line')\n ax = plot_line(x, y, **vis_dict)\n\n ax_list.append(ax)\n\n return ax_list\n\n\ndef plot_hist(x=None, y=None, ptype='bar', align='edge', bar_vals=None,\n width='auto', norm=False, cumsum=False, **kwargs):\n \"\"\"\n Plot histogram using plot line/bar/scatter.\n\n :param x: (int/arr) - number of bins or array of bin edges\n :param y: (arr) - array of items\n :param ptype: (str) - whether to plot line, bar, or scatter\n :param align: (str) - whether to align bars on edge or center\n :param bar_vals: (str) - format of bar vals\n :param width: (str/scalar) - width of plotted bars when vertical\n :param norm: (boolean) - whether to normalize the y\n :param cumsum: (boolean) - whether to take the cumulative sum of y\n :param kwargs: (kwargs) - additional keyword arguments\n :return ax: (mpl.axes) - plot axis\n \"\"\"\n if y is None:\n y = x\n x = None\n\n try:\n int(x)\n refresh_x = x + 1\n except:\n refresh_x = 0\n\n if norm:\n weights = np.ones_like(y) / float(len(y))\n normed = 0\n else:\n weights = None\n normed = False\n\n try:\n if x is None or refresh_x:\n if not refresh_x:\n ymin = np.min(y)\n ymax = np.max(y)\n oom = get_order_mag(ymax - ymin)\n base = np.power(5, oom)\n ymin = round_to(ymin, base=base)\n ymax = round_to(ymax, base=base)\n x = np.arange(ymin, ymax, base)\n if ymin == ymax or refresh_x:\n ymin = np.min(y) # refresh it\n ymax = np.max(y)\n if refresh_x == 0:\n refresh_x += 7\n x = np.linspace(ymin, ymax, refresh_x)\n\n y = np.clip(y, np.min(x), np.max(x))\n hist_counts, bin_edges = np.histogram(y, x,\n normed=normed,\n weights=weights)\n x, y = bin_edges[:-1], hist_counts\n if width == 'auto':\n width = np.average(np.diff(x))\n except:\n text_hist = Counter(y)\n y = list(text_hist.values())\n x = list(text_hist.keys())\n align = 'center'\n\n if bar_vals is None:\n if not norm:\n bar_vals = '%1d'\n else:\n bar_vals = '%.2f'\n\n if ptype == 'bar':\n plot_bar(x, y, align=align, width=width, bar_vals=bar_vals,\n cumsum=cumsum, **kwargs)\n elif ptype == 'scatter':\n plot_scatter(x, y, cumsum=cumsum, **kwargs)\n else:\n plot_line(x, y, cumsum=cumsum, **kwargs)\n\n\ndef plot_heatmap(df, figsize=None, ax=None, mask=None, mask2=None,\n size=12, cmap='RdBu_r', orientation='vertical',\n edgecolor=COLORS['black'],\n xrotation=0, yrotation=0,\n data_lim=None, vmin=None, vmax=None,\n inherit=True, label='', xlabel='', ylabel='',\n title='', title_pad=1.025, suptitle=False, length_scale=True,\n xticklabels=None, yticklabels=None,\n rows=1, cols=1, pos=1, fmt=None, pad=0.3,\n cbar=True, cbar_label='', shrink=0.2,\n interval=None, tick_locs=None,\n xinvert=False, yinvert=True,\n tight_layout='auto', dpi=DEFAULT['dpi'],\n save='', close=True, returnplot=False, **kwargs):\n \"\"\"\n Draw a heatmap on a subplot. Use other functions for full customizability.\n\n :param df: (pd.DataFrame) - dataframe to be converted into heatmap\n :param mask: (pd.DataFrame) - dataframe containing booleans to show text\n :param mask2: (pd.DataFrame) - dataframe containing booleans to show text\n :param size: (scalar) - size of text over masks\n :param figsize: (str/tup) - wide/tall/auto or tuple width x height of fig\n :param ax: (mpl.axes) - plot axis\n :param cmap: (str) - color map\n :param orientation: (str) - orientation of color bar\n :param data_lim: (tup) - shortcut for vmin and vmax\n :param vmin: (scalar) - lower limit of color bar\n :param vmax: (scalar) - upper limit of color bar\n :param xrotation: (scalar) - degrees to rotate x major tick labels\n :param yrotation: (scalar) - degrees to rotate y major tick labels\n :param inherit: (boolean) - whether to inherit previous labels\n :param label: (str) - label of line to be used in legend\n :param xlabel: (str) - label of x axis\n :param ylabel: (str) - label of y axis\n :param title: (str) - title of subplot\n :param title_pad: (scalar) - distance between box and title\n :param suptitle: (boolean) - whether to make a figure title\n :param length_scale: (scalar) - whether to scale the labels based on length\n :param xticklabels: (list) - manually set x major tick labels\n :param yticklabels: (list) - manually set y major tick labels\n :param rows: (int) - number of rows for subplots\n :param cols: (int) - number of columns for subplots\n :param pos: (int) - position of current subplot\n :param fmt: (str) - format of color bar labels\n :param pad: (scalar) - padding of color bar\n :param cbar: (boolean) - whether to show color bar\n :param cbar_label: (str) - label of color bar\n :param shrink: (scalar) - size of color bar\n :param interval: (scalar) - interval of tick marks on color bar\n :param tick_locs: (array) - input own tick marks on color bar\n :param xinvert: (boolean) - whether to flip x axis\n :param yinvert: (boolean) - whether to flip y axis\n :param tight_layout: (str) - on or auto adjust layout of subplots\n :param dpi: (int) - dots per inch to save the figure\n :param save: (str) - if filename is input, will save an image file\n :param close: (boolean) - whether to close figure after saving\n :param returnplot: (boolean) - whether to return plotted heatmap\n :param kwargs: (kwargs) - additional keyword arguments\n :return ax: (mpl.axes) - plot axis\n :return plot: (mpl.axes) - optional line plot\n \"\"\"\n _set_figsize_logic(figsize=figsize, rows=rows,\n cols=cols, pos=pos, dpi=dpi)\n\n if ax is None:\n ax = plt.subplot(rows, cols, pos)\n\n base, base2 = _get_bases_logic(df)\n vmin, vmax = _get_vmin_vmax_logic(data=df,\n base=base2,\n vmin=vmin,\n vmax=vmax,\n data_lim=data_lim)\n oom = get_order_mag(vmax - vmin)\n interval = _get_interval_logic(interval=interval,\n vmin=vmin, vmax=vmax,\n base=base, oom=oom)\n fmt = _get_fmt_logic(fmt=fmt, interval=interval)\n vmin, vmax = _fix_vmin_vmax_logic(vmin=vmin, vmax=vmax, data=df,\n interval=interval)\n (tick_locs,\n cbar_count) = _get_tick_locs_cbar_count_logic(tick_locs=tick_locs,\n vmin=vmin, vmax=vmax,\n interval=interval)\n\n cmap = get_cmap(cmap, cbar_count)\n im = ax.pcolor(df,\n cmap=cmap,\n vmin=vmin,\n vmax=vmax,\n edgecolors=edgecolor,\n **kwargs)\n\n ax.set_yticks(np.arange(df.shape[0]) + 0.5, minor=False)\n ax.set_xticks(np.arange(df.shape[1]) + 0.5, minor=False)\n\n ax.patch.set(hatch='+',\n edgecolor=COLORS['gray'],\n color=COLORS['gray'],\n alpha=0.45, lw=0.25)\n\n if xinvert:\n ax.invert_yaxis()\n\n if yinvert:\n ax.invert_yaxis()\n\n if xticklabels is None:\n xticklabels = df.columns\n\n if yticklabels is None:\n yticklabels = df.index\n\n set_major_tick_labels(ax,\n xticklabels=xticklabels,\n yticklabels=yticklabels,\n xrotation=xrotation,\n yrotation=yrotation)\n\n set_labels(ax, xlabel=xlabel, ylabel=ylabel, suptitle=suptitle,\n title=title, title_pad=title_pad, length_scale=length_scale)\n\n ax.grid(False)\n\n if cbar:\n set_cbar(ax, im, label=cbar_label, fmt=fmt,\n pad=pad, shrink=shrink,\n tick_size=8, label_size=10,\n orientation=orientation,\n tick_locs=tick_locs)\n\n df_nan = np.ma.masked_invalid(df)\n\n if mask is not None:\n _set_heatmap_mask(ax, df_nan, mask, size)\n\n if mask2 is not None:\n _set_heatmap_mask(ax, df_nan, mask2, size)\n\n _save_logic(save=save, tight_layout=tight_layout, close=close,\n dpi=dpi, pos=pos, rows=rows, cols=cols)\n\n if returnplot:\n return ax, im\n else:\n return ax\n\n\ndef plot_cbar(cmap,\n fig=None,\n left=0.05,\n bottom=0.95,\n width=0.95,\n height=0.05,\n label='',\n fmt='%1.0f',\n label_size=12,\n drawedges=True,\n label_color=COLORS['gray'],\n ticks=None,\n boundaries=None,\n tick_size=8,\n tick_color=COLORS['gray'],\n color=COLORS['black'],\n pad=0.075,\n aspect=25.5,\n shrink=0.2,\n length=0,\n tick_width=0.25,\n direction='out',\n orientation='horizontal',\n cax=None,\n **kwargs):\n \"\"\"\n Plot lone color bar.\n\n :param cmap: (list/str) - a list containing RGB or Python/NCL cmap name\n :param fig: (boolean) - input figure\n :param left: (scalar) - left padding from figure edge\n :param bottom: (scalar) - bottom padding from figure left edge\n :param width: (scalar) - percent width of figure\n :param height: (scalar) - percent height of figure\n :param fmt: (str) - format of color bar labels\n :param label_size: (scalar) - size of color bar label\n :param label_color: (scalar) - color of color bar label\n :param ticks: (array) - input own tick marks on color bar\n :param tick_size: (scalar) - size of color bar tick labels\n :param tick_color: (scalar) - color of color bar tick labels\n :param color: (scalar) - color of color bar tick marks\n :param drawedges: (scalar) - whether to draw color edges\n :param pad: (scalar) - padding of color bar from plot\n :param aspect: (int) - aspect ratio of color bar\n :param shrink: (scalar) - size of color bar\n :param length: (scalar) - length of color bar tick marks\n :param tick_width: (scalar) - width of color bar tick marks\n :param direction: (str) - direction of color bar tick marks\n :param orientation: (str) - orientation of color bar\n :param cax: (mpl.axes) - plot axis to attach to\n :param kwargs: (kwargs) - additional keyword arguments\n :return cbar: (mpl.ColorBar) - matplotlib color bar\n \"\"\"\n if fig is None:\n fig = set_figsize(8, 4)\n\n if boundaries is None and ticks is not None:\n boundaries = ticks\n\n ax = fig.add_axes([left, bottom, width, height])\n\n cmap = get_cmap(cmap)\n\n cbar = mpl.colorbar.ColorbarBase(ax, ticks=ticks,\n boundaries=boundaries,\n cmap=cmap,\n orientation=orientation)\n cbar.ax.tick_params(labelsize=tick_size,\n direction=direction,\n length=length,\n width=tick_width,\n tick2On=True,\n labelcolor=label_color,\n color=color)\n cbar.set_label(label,\n size=label_size,\n color=label_color)\n return cbar\n\n\ndef init_map(lat1=-90, lat2=90, lon1=-180, lon2=180,\n latlim=None, lonlim=None, region=None,\n rows=1, cols=1, pos=1, figsize=None, ax=None,\n title='', suptitle=False,\n length_scale=True, lat_labels='auto', lon_labels='auto',\n projection=DEFAULT['projection'], central_longitude=0,\n land=False, ocean=False, lakes=True,\n coastlines=True, states=True, countries=True, rivers=False,\n tight_layout='auto', dpi=DEFAULT['dpi'], save='', close=True):\n \"\"\"\n Initialize a projected map.\n\n :param lat1: (scalar) lower limit of latitude\n :param lat2: (scalar) upper limit of latitude\n :param lon1: (scalar) left limit of longitude\n :param lon2: (scalar) right limit of longitude\n :param latlim: (tuple) shortcut for lat1 and lat2\n :param lonlim: (tuple) shortcut for lon1 and lon2\n :param region: (str) region to quickly subset lat and lon extent (na or us)\n :param rows: (int) - number of rows for subplots\n :param cols: (int) - number of columns for subplots\n :param pos: (int) - position of current subplot\n :param figsize: (str/tup) - wide/tall/auto or tuple width x height of fig\n :param ax: (mpl.axes) - plot axis\n :param title: (str) - title of subplot\n :param length_scale: (scalar) - whether to scale the labels based on length\n :param lat_labels: (array) - list of latitudes to show on map\n :param lon_labels: (array) - list of longitudes to show on map\n :param projection: (cartopy.crs) - projection of map\n :param central_longitude: (scalar) - longitude to center the map on\n :param land: (boolean) - whether to color fill land\n :param ocean: (boolean) - whether to color fill land\n :param lakes: (boolean) - whether to color fill lakes\n :param coastlines: (boolean) - whether to draw coastline\n :param states: (boolean) - whether to draw state borders\n :param countries: (boolean) - whether to draw country borders\n :param rivers: (boolean) - whether to draw rivers\n :param tight_layout: (str) - on or auto adjust layout of subplots\n :param dpi: (int) - dots per inch to save the figure\n :param save: (str) - if filename is input, will save an image file\n :param close: (boolean) - whether to close figure after saving\n :return ax: (mpl.axes) - plot axis\n \"\"\"\n\n _set_figsize_logic(figsize=figsize, rows=rows,\n cols=cols, pos=pos, dpi=dpi)\n\n projection = _get_projection_logic(projection)\n\n if ax is None:\n ax = plt.subplot(rows, cols, pos, projection=projection)\n\n lat1, lat2, lon1, lon2 = _get_lat_lon_lim_logic(latlim, lonlim,\n lat1, lat2, lon1, lon2,\n region=region,\n central_longitude=\n central_longitude)\n\n ax.set_extent([lon1, lon2, lat1, lat2], projection)\n\n _add_features(ax, land, ocean, coastlines,\n states, countries, lakes, rivers)\n\n set_latlons(ax,\n lat_labels=lat_labels, lon_labels=lon_labels,\n central_longitude=central_longitude)\n\n set_labels(ax, title=title, length_scale=length_scale)\n\n _save_logic(save=save, tight_layout=tight_layout, close=close,\n dpi=dpi, pos=pos, rows=rows, cols=cols)\n\n return ax\n\n\ndef get_side_bars_recs(x, sidebar_count, colors=True):\n \"\"\"\n Output some recommended values to show side by side bars.\n\n :param x: (arr) - input x array\n :param sidebar_count: (int) - how many bars side by side\n :param colors: (boolean) - whether to return colors\n :return width: (scalar) - adjusted width of color bars\n :return align: (str) - edge or center based on sidebar_count\n :return x_list: (list) - adjusted x values\n :return colors: (list) - list of colors\n \"\"\"\n if sidebar_count == 0:\n raise IOError('Unable to have 0 side bars per x!')\n if sidebar_count == 1:\n if colors:\n return 0.833333333, 'center', [x], [COLOR_LIST[0]]\n else:\n return 0.833333333, 'center', [x]\n\n if sidebar_count % 2 == 0:\n align = 'edge'\n else:\n align = 'center'\n\n width = _get_width_logic(x) / sidebar_count\n\n x_shift_end = sidebar_count // 2\n x_shift_start = -(sidebar_count - x_shift_end)\n x_shifts = np.arange(x_shift_start, x_shift_end)\n if align is 'center':\n extra_x_shift = len(x_shifts) // 2 + 1\n x_shifts += extra_x_shift\n\n x_list = []\n for x_shift in x_shifts:\n try:\n x_list.append(mdates.date2num(x) + width * x_shift)\n except:\n x_list.append(x + width * x_shift)\n\n if colors:\n colors = COLOR_LIST[0:sidebar_count]\n return width, align, x_list, colors\n else:\n return width, align, x_list\n\n\ndef set_bar_vals(ax, size=7.5,\n color=COLORS['black'],\n alpha=ALPHAS['translucent'],\n orientation='auto',\n inherit_color=False,\n pad_remover=1,\n fmt='%d',\n yinvert=False):\n \"\"\"\n Label the rectangles in bar plots with its respective values.\n Adaptation of: \"http://composition.al/blog/2015/11/29/a-better-way-to-\\\n add-labels-to-bar-charts-with-matplotlib/\"\n\n :param ax: (mpl.axes) - plot axis\n :param size: (scalar) - size of bar labels\n :param color: (str) - color of bar labels\n :param alpha: (scalar/str) - transparency of bar labels\n :param orientation: (str) - orientation of the labels\n :param inherit_color: (boolean) - whether to inherit color for labels\n :param pad_remover: (scalar): - space to remove between ylim and labels\n :param fmt: (str) - format of color bar labels\n :param yinvert (boolean) - whether to invert the y values of labels\n :return ax: (mpl.axes) - plot axis\n \"\"\"\n try:\n pad_remover = scale_it(ax, pad_remover, 0.1, exp=True)\n\n xmin, xmax = ax.get_xlim()\n ymin, ymax = ax.get_ylim()\n\n if xmin > xmax:\n xmin, xmax = xmax, xmin\n\n if ymin > ymax:\n ymin, ymax = ymax, ymin\n\n y_height = ymax - ymin\n\n rects = ax.patches\n size = scale_it(ax, size, 1, exp=True) / np.log(len(rects))\n if len(rects) > 5:\n size *= 3\n\n if orientation is 'auto':\n if len(str(int(ymax))) > 2:\n orientation = 'vertical'\n else:\n orientation = 'horizontal'\n\n if orientation is 'vertical':\n rotation = 90\n height_mult = 0.02\n limit_mult = 2\n else:\n rotation = 0\n height_mult = 0.015\n limit_mult = 1\n\n pos_ct = 1 # to dampen future\n neg_ct = 1 # ylim increases\n orient_add = 0\n\n for rect in rects:\n x = plt.getp(rect, 'x')\n y = rect.get_height()\n if plt.getp(ax, 'yscale') is 'log':\n label_position = y\n if orientation is 'vertical':\n label_position += y / 50\n else:\n label_position = y + (y_height * height_mult)\n\n if y < 0:\n va = 'top'\n if orientation is 'horizontal':\n orient_add = label_position / 60\n label_position += (y_height * -2 * height_mult)\n else:\n va = 'bottom'\n\n if label_position >= (ymax - ymax / 5):\n ymax += (ymax * pad_remover / 6.5 /\n pos_ct * limit_mult + orient_add)\n pos_ct += 15\n if label_position <= (ymin - ymin / 5):\n ymin += (ymin * pad_remover / 8 /\n neg_ct * limit_mult + orient_add)\n neg_ct += 15\n\n if inherit_color:\n color = plt.getp(rect, 'facecolor')\n\n ax.set_ylim(ymin, ymax)\n\n if yinvert:\n label_position *= -1\n\n if (ymin <= y < ymax) and (xmin < x < xmax):\n ax.text(rect.get_x() + rect.get_width() / 2., label_position,\n fmt % y, size=size, alpha=alpha, color=color,\n ha='center', va=va, rotation=rotation)\n\n except:\n print('Unable to set bar vals!')\n\n return ax\n\n\ndef set_inline_label(ax, line, label=None,\n xval=None, size=6, alpha=ALPHAS['translucent'],\n color=None, ha='center', va='center',\n bbox=dict(facecolor=COLORS['white'],\n edgecolor=COLORS['white'],\n alpha=ALPHAS['transparent']),\n **kwargs):\n \"\"\"\n Automatically adds an inline label to line\n https://github.com/cphyc/matplotlib-label-lines\n\n :param ax: (mpl.axes) - plot axis\n :param line: (mpl.Line2D) - line to be labeled\n :param label: (str) - label of line\n :param xval: (scalar) - x value of label; defaults to median\n :param size: (scalar) - size of label\n :param alpha: (scalar) - opacity of label\n :param ha: (str) - horizontal alignment of label\n :param va: (str) - vertical alignment of label\n :param bbox: (dict) - dictionary of box surrounding label\n :param kwargs: (kwargs) - additional keyword arguments\n \"\"\"\n if isinstance(line, list):\n line = line[0]\n\n xdata = line.get_xdata()\n ydata = line.get_ydata()\n\n try:\n if xval is None:\n xval = np.median(xdata)\n except:\n xval = xdata[int(len(xdata) / 2)]\n\n if isinstance(xval, datetime.datetime):\n xdata = pd.to_datetime(xdata).to_pydatetime()\n elif isinstance(xval, str):\n xval = pd.to_datetime(xval).to_pydatetime()\n xdata = pd.to_datetime(xdata).to_pydatetime()\n\n x_idx = np.where(xdata == xval)[0]\n\n if not x_idx:\n print('xval outside range of x in set_label_inline!')\n return\n\n yval = ydata[x_idx]\n\n if not label:\n label = line.get_label()\n\n size = scale_it(ax, size, 2, exp=True)\n\n try:\n if xval is None:\n xval = np.median(xdata)\n except:\n xval = xdata[int(len(xdata) / 2)]\n\n if color is None:\n color = plt.getp(line, 'color')\n\n ax.text(xval, yval, label,\n color=color,\n alpha=alpha,\n size=size,\n ha=ha,\n va=va,\n bbox=bbox,\n **kwargs\n )\n\n\ndef annotate_point(ax, x, y, label='', xytext=(0, 0),\n size=SIZES['marker']['smaller'],\n textcoords='offset points', transform=False,\n projection=DEFAULT['projection'],\n bbox=dict(boxstyle='round, pad=0.3',\n facecolor=COLORS['black'],\n alpha=ALPHAS['transparent']),\n **kwargs\n ):\n \"\"\"\n Annotate a point on a subplot.\n\n :param ax: (mpl.axes) - plot axis\n :param x: (scalar) - input x location to annotate\n :param y: (scalar) - input y location to annotate\n :param label: (str) - label of line to be used in legend\n :param xytext: (tup) - x, y offset from input x and y for annotation\n :param size: (scalar) - size of annotation\n :param textcoords: (str) - type of coordinates\n :param transform: (boolean) - whether to use input projection\n :param projection: (cartopy.crs) - projection of plotted scatter\n :param bbox: (dict) - dictionary of boxstyle, facecolor, and alpha of box\n :param kwargs: (kwargs) - additional keyword arguments\n :return ax: (mpl.axes) - plot axis\n \"\"\"\n if transform:\n x, y = ax.projection.transform_point(x, y, src_crs=projection)\n ax.annotate(label, xy=(x, y), xytext=xytext, ha='left', va='center',\n textcoords=textcoords, size=size, bbox=bbox, **kwargs)\n return ax\n\n\ndef set_figsize(width=None, height=None, figsize='wide',\n rows=1, cols=1, pos=1, dpi=DEFAULT['dpi'], **kwargs):\n \"\"\"\n Set figure size; can be wide, tall, auto, or input tuple.\n\n :param width: (scalar) - width of figure\n :param height: (scalar) - height of figure\n :param figsize: (str/tup) - wide/tall/auto or tuple width x height of fig\n :param rows: (int) - number of rows for subplots\n :param cols: (int) - number of columns for subplots\n :param pos: (int) - position of current subplot\n :param dpi: (int) - dots per inch to save the figure\n :param kwargs: (kwargs) - additional keyword arguments\n \"\"\"\n if width is not None and height is not None:\n figsize = (width, height)\n else:\n if figsize is 'wide' and pos == 1:\n fig_width = 10 + rows * 1.75\n fig_height = 3.5 + cols * 1.25\n figsize = (fig_width, fig_height)\n elif figsize is 'tall' and pos == 1:\n fig_width = 3.5 + rows * 1.25\n fig_height = 12 + cols * 1.75\n figsize = (fig_width, fig_height)\n elif figsize is 'auto' and pos == 1:\n fig_width = 8 + rows * 1.5\n fig_height = 4.5 + cols * 1.5\n figsize = (fig_width, fig_height)\n\n if isinstance(figsize, tuple):\n fig = plt.figure(figsize=figsize, dpi=dpi, **kwargs)\n return fig\n\n\ndef set_ax(rows=1, cols=1, pos=1, **kwargs):\n \"\"\"\n Create plot axis\n\n :param rows: (int) - number of rows for subplots\n :param cols: (int) - number of columns for subplots\n :param pos: (int) - position of current subplot\n :param kwargs: (kwargs) - additional keyword arguments\n :return ax: (mpl.axes) - plot axis\n \"\"\"\n return plt.subplot(rows, cols, pos, **kwargs)\n\n\ndef set_date_ticks(ax, minor_date_ticks=True):\n \"\"\"\n Use logic on the length of date range to decide the tick marks.\n\n :param ax: (mpl.axes) - plot axis\n :param minor_date_ticks: (boolean) - whether to show the top date ticks\n :return major_xlocator: (str) - locator of major tick\n :return major_xinterval: (str) - interval between each major tick\n :return major_xformatter: (str) - formatter of major tick\n :return minor_xlocator: (str) - locator of minor tick\n :return minor_xinterval: (str) - interval between each minor tick\n :return minor_xformatter: (str) - formatter of minor tick\n :return dt_bool: (boolean) - whether the x axis is datetimes\n \"\"\"\n geom = plt.getp(ax, 'geometry')\n nrows = geom[0]\n ncols = geom[1]\n\n xlim = plt.getp(ax, 'xlim')\n if xlim[0] < 700000:\n dt_bool = False\n return [None] * 6 + [dt_bool]\n else:\n dt_bool = True\n\n xlim_dts = mdates.num2date(xlim)\n\n dt_dict = td2dict(xlim_dts[-1] - xlim_dts[0])\n ndays = dt_dict['days']\n\n if ndays < 0:\n dt_dict = td2dict(xlim_dts[0] - xlim_dts[-1])\n ndays = dt_dict['days']\n\n if ndays > 10950:\n major_xlocator = 'years'\n major_xformatter = '%Y'\n major_xinterval = int(ndays / 2000)\n\n major_xlocator2 = None\n major_xformatter2 = None\n major_xinterval2 = None\n\n minor_xlocator = 'years'\n minor_xformatter = '\\'%y'\n minor_xinterval = int(ndays / 8000)\n minor_xshow = int(ndays / 8000)\n\n for i in range(0, 10):\n if major_xinterval % minor_xinterval != 0:\n major_xinterval += 1\n else:\n break\n\n if minor_xshow >= minor_xinterval / 2:\n minor_xshow -= int(minor_xinterval / 1.75)\n\n if minor_xshow <= minor_xinterval:\n minor_xshow += 1\n\n elif 3000 < ndays <= 10950:\n major_xlocator = 'years'\n major_xformatter = '%Y'\n major_xinterval = 1 + int(ndays / 3000)\n\n major_xlocator2 = None\n major_xformatter2 = None\n major_xinterval2 = None\n\n minor_xlocator = 'years'\n minor_xformatter = '\\'%y'\n minor_xinterval = 1 + int(ndays / 3300)\n minor_xshow = 1 + int(ndays / 3300)\n\n if major_xinterval >= minor_xinterval:\n minor_xinterval -= 1\n\n for i in range(0, 10):\n if major_xinterval % minor_xinterval != 0:\n major_xinterval += 1\n else:\n break\n\n if minor_xshow >= minor_xinterval / 2:\n minor_xshow -= int(minor_xshow / 1.3)\n\n if minor_xshow == 0:\n minor_xshow = 1\n\n elif 1825 < ndays <= 3000:\n major_xlocator = 'months'\n major_xformatter = '%B'\n major_xinterval = 10 + int(ndays / 1850)\n\n major_xlocator2 = 'months'\n major_xformatter2 = '%Y'\n major_xinterval2 = 8\n\n minor_xlocator = 'months'\n minor_xformatter = '%b'\n minor_xinterval = 1 + int(ndays / 600)\n minor_xshow = 1 + int(ndays / 725)\n\n if minor_xshow >= minor_xinterval / 2:\n minor_xshow -= int(minor_xshow / 1.25)\n\n for i in range(0, 10):\n if major_xinterval % minor_xinterval != 0:\n major_xinterval += 1\n else:\n break\n\n for i in range(0, 10):\n if (major_xinterval2 % major_xinterval != 0\n or major_xinterval2 == 0):\n major_xinterval2 += 1\n else:\n break\n\n elif 217 < ndays <= 1825:\n major_xlocator = 'months'\n major_xformatter = '%b %d'\n major_xinterval = 3 + int(ndays / 1000) * 2\n\n major_xlocator2 = 'months'\n major_xformatter2 = '%Y'\n major_xinterval2 = 4 + int(ndays / 800)\n\n minor_xlocator = 'months'\n minor_xformatter = '%b'\n minor_xinterval = 1 + int(ndays / 600)\n minor_xshow = 1 + int(ndays / 725)\n\n if minor_xshow >= minor_xinterval / 2:\n minor_xshow -= int(minor_xshow / 1.5)\n\n for i in range(0, 10):\n if major_xinterval % minor_xinterval != 0:\n major_xinterval += 1\n else:\n break\n\n for i in range(0, 10):\n if (major_xinterval2 % major_xinterval != 0\n or major_xinterval2 == 0):\n major_xinterval2 += 1\n else:\n break\n\n elif 6 < ndays <= 217:\n major_xlocator = 'days'\n major_xformatter = '%b %d'\n major_xinterval = 2 + int(ndays / 15) * 2\n\n major_xlocator2 = None\n major_xformatter2 = None\n major_xinterval2 = None\n\n minor_xlocator = 'days'\n minor_xformatter = '%d'\n minor_xinterval = 1 + int(ndays / 50)\n minor_xshow = 1 + int(ndays / 35)\n\n if minor_xshow >= minor_xinterval:\n minor_xshow -= int(minor_xshow / 2.25)\n\n for i in range(0, 10):\n if major_xinterval % minor_xinterval != 0:\n major_xinterval += 1\n else:\n break\n\n elif 1 < ndays <= 6:\n major_xlocator = 'hours'\n major_xformatter = '%H:%M'\n major_xinterval = ndays * 5\n\n major_xlocator2 = 'hours'\n major_xformatter2 = '%m/%d'\n major_xinterval2 = 24\n\n minor_xlocator = 'hours'\n minor_xformatter = '%H'\n minor_xinterval = int(ndays / 1.5)\n minor_xshow = 1 + int(minor_xinterval / 2)\n\n if minor_xshow >= minor_xinterval:\n minor_xshow -= int(minor_xshow / 2.25)\n\n for i in range(0, 10):\n if major_xinterval % minor_xinterval != 0:\n major_xinterval += 1\n else:\n break\n\n for i in range(0, 25):\n if (major_xinterval2 % major_xinterval != 0\n or major_xinterval2 == 0):\n major_xinterval2 -= 1\n else:\n break\n\n if minor_xshow <= minor_xinterval:\n minor_xshow += 1\n\n elif 0 <= ndays <= 1:\n nminutes = (dt_dict['days'] * 1440\n + dt_dict['hours'] * 60\n + dt_dict['minutes']\n )\n\n major_xlocator = 'minutes'\n major_xformatter = '%I:%M %p'\n major_xinterval = int(nminutes / 3)\n\n major_xlocator2 = 'minutes'\n major_xformatter2 = '%b %d'\n major_xinterval2 = int(nminutes / 1.5)\n\n minor_xlocator = 'minutes'\n minor_xformatter = '%H:%M'\n minor_xinterval = int(nminutes / 12)\n minor_xshow = 1\n\n if minor_xshow >= 3 and major_xlocator != 'years':\n minor_xshow = int(minor_xshow / 1.5)\n elif minor_xshow >= 3 and major_xlocator == 'years':\n minor_xshow -= int(minor_xshow / 1.5)\n\n if nminutes > 360:\n major_xinterval = round_to(major_xinterval, base=15)\n minor_xinterval = round_to(minor_xinterval, base=15)\n major_xinterval2 = round_to(major_xinterval2, base=15)\n\n if major_xinterval % minor_xinterval != 0:\n minor_xinterval = int(major_xinterval / 3)\n\n for i in range(0, 60):\n if major_xinterval % minor_xinterval != 0:\n minor_xinterval += 1\n else:\n break\n\n if major_xinterval2 % major_xinterval != 0:\n major_xinterval2 = major_xinterval\n\n if minor_xshow <= 0:\n minor_xshow = 1\n\n if major_xinterval2 is not None:\n if major_xinterval2 <= 0:\n major_xinterval2 = major_xinterval\n\n set_major_ticks(ax,\n xlocator=major_xlocator,\n xformatter=major_xformatter,\n xinterval=major_xinterval)\n set_major_tick_labels(ax, size=8)\n\n ax2 = ax.twiny()\n ax2.set_xlim(ax.get_xlim())\n prettify_ax(ax2, ticks=False)\n\n if major_xlocator2 is not None and nrows == 1:\n set_major_ticks(ax2,\n xlocator=major_xlocator2,\n xformatter=major_xformatter2,\n xinterval=major_xinterval2)\n set_major_tick_labels(ax2, bottom=True, top=False,\n pad=24, size=6)\n else:\n set_major_tick_labels(ax2, xticklabels=[])\n set_major_ticks(ax2, xticks=[])\n\n if minor_date_ticks:\n set_minor_ticks(ax2,\n xlocator=minor_xlocator,\n xformatter=minor_xformatter,\n xinterval=minor_xinterval,\n top=True, bottom=False)\n set_minor_tick_labels(ax2, top=True, size=7.5)\n set_minor_grid(ax2, xalpha=0.25)\n\n for label in ax2.get_xminorticklabels():\n label.set_visible(False) # find a better way?\n for label in ax2.get_xminorticklabels()[0::minor_xshow * ncols]:\n label.set_visible(True)\n\n return (major_xlocator, major_xinterval, major_xformatter,\n minor_xlocator, minor_xinterval, minor_xformatter, dt_bool)\n\n\ndef set_cbar(ax, im,\n fig=False,\n label='',\n fmt='%1.0f',\n label_size=7.5,\n drawedges=True,\n label_color=COLORS['gray'],\n tick_locs=None,\n tick_size=5,\n tick_color=COLORS['gray'],\n color=COLORS['black'],\n pad=0.1,\n aspect=25.5,\n shrink=0.2,\n length=0,\n width=0.25,\n direction='out',\n orientation='horizontal',\n cax=None,\n **kwargs):\n \"\"\"\n Set color bar for a map.\n\n :param ax: (mpl.axes) - plot axis\n :param im: (mpl.collections/contour) - plotted map\n :param fig: (boolean) - whether to plot a figure wide colorbar\n :param fmt: (str) - format of color bar labels\n :param label_size: (scalar) - size of color bar label\n :param label_color: (scalar) - color of color bar label\n :param tick_locs: (array) - input own tick marks on color bar\n :param tick_size: (scalar) - size of color bar tick labels\n :param tick_color: (scalar) - color of color bar tick labels\n :param color: (scalar) - color of color bar tick marks\n :param drawedges: (scalar) - whether to draw color edges\n :param pad: (scalar) - padding of color bar from plot\n :param aspect: (int) - aspect ratio of color bar\n :param shrink: (scalar) - size of color bar\n :param length: (scalar) - length of color bar tick marks\n :param width: (scalar) - width of color bar tick marks\n :param direction: (str) - direction of color bar tick marks\n :param orientation: (str) - orientation of color bar\n :param cax: (mpl.axes) - plot axis to attach to\n :param kwargs: (kwargs) - additional keyword arguments\n :return cbar: (mpl.ColorBar) - matplotlib color bar\n \"\"\"\n try:\n pad = scale_it(ax, pad, 0.00075, exp=True)\n label_size = scale_it(ax, label_size, 1.25, exp=True)\n tick_size = scale_it(ax, tick_size, 1.25, exp=True)\n width = scale_it(ax, width, 0.05, exp=True)\n shrink = scale_it(ax, shrink, 0.075)\n aspect = scale_it(ax, aspect, 1.25)\n\n geom = plt.getp(plt.getp(ax, 'subplotspec'), 'geometry')\n nrows = geom[0]\n ncols = geom[1]\n\n shrink *= (nrows + 0.5) / 1.5\n tick_size += (nrows + ncols)\n\n if orientation == 'vertical':\n shrink *= 2\n pad /= 3\n\n if fmt == '%.2f':\n rotation = 45\n else:\n rotation = 0\n\n try:\n if not fig:\n cbar = plt.colorbar(im, orientation=orientation,\n pad=pad,\n drawedges=drawedges,\n shrink=shrink,\n format=fmt,\n ticks=tick_locs,\n aspect=aspect,\n cax=cax,\n **kwargs)\n else:\n figure = plt.getp(ax, 'figure')\n cbar = figure.colorbar(im, ax=plt.getp(figure, 'axes'),\n orientation=orientation,\n pad=pad,\n drawedges=drawedges,\n shrink=shrink * 1.75,\n format=fmt,\n ticks=tick_locs,\n aspect=aspect,\n cax=cax,\n **kwargs)\n except:\n cbar = plt.colorbar(im,\n orientation=orientation,\n drawedges=drawedges,\n format=fmt,\n ticks=tick_locs,\n cax=cax,\n **kwargs)\n\n cbar.ax.tick_params(labelsize=tick_size,\n rotation=rotation,\n direction=direction,\n length=length,\n width=width,\n tick2On=True,\n labelcolor=label_color,\n color=color)\n\n cbar.set_label(label, size=label_size, color=label_color)\n\n return cbar\n\n except:\n report_err(comment='Could not set color bar; please set manually!')\n\n\ndef get_cmap(colors, n=None, r=False, start=0, stop=1, **kwargs):\n \"\"\"\n Converts a list of colors into a color map or discretizes a registered cmap\n http://matplotlib.org/examples/color/colormaps_reference.html\n http://www.ncl.ucar.edu/Document/Graphics/color_table_gallery.shtml\n\n :param colors: (list/str) - a list containing RGB or Python/NCL cmap name\n :param n: (int) - number of colors in cmap\n :param r: (boolean) - reverse colormap\n :param start: (scalar) - value to start on the cmap between 0 and 1\n :param stop: (scalar) - value to end on the cmap between 0 and 1\n :param kwargs: (kwargs) - additional keyword arguments\n :return cmap: (mpl.cmap) - color map\n \"\"\"\n try:\n if '_r' in colors:\n colors = colors[:-2]\n r = True\n except:\n pass\n\n if colors in NCL_CMAP_NAMES:\n if r:\n color_list = get_color_list(NCL_CMAPS[colors].values[0])[::-1]\n cmap = LinearSegmentedColormap.from_list('cmap',\n colors=color_list)\n else:\n cmap = NCL_CMAPS[colors].values[0]\n if n is None:\n n = NCL_CMAPS[colors].values[1]\n else:\n if isinstance(colors, str):\n if r:\n colors += '_r'\n if n is None:\n n = 10\n cmap = plt.get_cmap(colors, **kwargs)\n elif isinstance(colors, mpl.colors.LinearSegmentedColormap):\n return colors\n else:\n if r:\n colors = colors[::-1]\n if n is None and len(colors) > 2:\n n = len(colors)\n elif n is None:\n n = 10\n if not isinstance(colors[0], str):\n if (np.array(colors) > 1).any():\n for i, tup in enumerate(colors):\n colors[i] = np.array(tup) / 255.\n cmap = LinearSegmentedColormap.from_list('mycmap', colors=colors,\n **kwargs)\n colors = cmap(np.linspace(start, stop, cmap.N))\n\n return LinearSegmentedColormap.from_list('mycmap', colors=colors, N=n)\n\n\ndef get_color_list(cmap, hexcodes=False, **kwargs):\n \"\"\"\n Converts a registered colormap into a list of RGB tuples or hexcodes\n\n :param cmap_name: (mpl.cmap/str) - actual colormap or name of color\n :param hexcodes: (boolean) - whether to return a list of hexcodes\n :param kwargs: (kwargs) - additional keyword arguments\n :return cmap: (list) - list of RGB tuples or hexcodes\n \"\"\"\n if isinstance(cmap, str):\n if cmap in NCL_CMAP_NAMES:\n cmap = NCL_CMAPS[cmap].values[0]\n else:\n cmap = plt.get_cmap(cmap)\n\n if not hexcodes:\n color_list = [cmap(i)[:3] for i in range(cmap.N)]\n else:\n color_list = [mpl.colors.rgb2hex(cmap(i)[:3])\n for i in range(cmap.N)]\n\n return color_list\n\n\ndef set_latlons(ax,\n color=COLORS['black'],\n alpha=ALPHAS['semi opaque'],\n size=4,\n top=False,\n bottom=True,\n left=True,\n right=False,\n lat_labels='auto',\n lon_labels='auto',\n central_longitude=0,\n **kwargs):\n \"\"\"\n Set lat lon labels for a map.\n\n :param ax: (mpl.axes) - plot axis\n :param color: (scalar) - color of lat lon labels\n :param alpha: (scalar/str) - transparency of lat lon labels\n :param size: (scalar) - size of lat lon labels\n :param bottom: (boolean) - whether to show bottom lon labels\n :param top: (boolean) - whether to show top lon labels\n :param left: (boolean) - whether to show left lat labels\n :param right: (boolean) - whether to show right lat labels\n :param lat_labels: (array) - list of latitudes to show on map\n :param lon_labels: (array) - list of longitudes to show on map\n :param kwargs: (kwargs) - additional keyword arguments\n :return gl: (ax.gridlines) - gridlines\n \"\"\"\n from cartopy.mpl.gridliner import (LONGITUDE_FORMATTER,\n LATITUDE_FORMATTER\n )\n size = scale_it(ax, size, 1, exp=True)\n\n geom = plt.getp(plt.getp(ax, 'subplotspec'), 'geometry')\n nplots = geom[0] * geom[1]\n\n size += nplots\n linewidth = np.log(nplots + 1) / 85 + 0.35\n\n gl = ax.gridlines(draw_labels=True,\n linewidth=linewidth,\n color=COLORS['black'],\n alpha=ALPHAS['translucid'],\n linestyle=(0, (16, 4)), **kwargs) # length, how often\n\n if lon_labels is not None and lon_labels is not 'auto':\n gl.xlocator = mticker.FixedLocator(lon_labels)\n elif not lon_labels:\n gl.xlabels_top = False\n gl.xlabels_bottom = False\n if lat_labels is not None and lat_labels is not 'auto':\n gl.ylocator = mticker.FixedLocator(lat_labels)\n elif not lat_labels:\n gl.ylabels_left = False\n gl.ylabels_right = False\n else:\n if central_longitude != 0:\n base_range = np.arange(-360, 420, 60)\n base_range -= central_longitude\n base_range = np.delete(base_range,\n np.where(base_range == -180)[0])\n gl.xlocator = mticker.FixedLocator(base_range)\n\n gl.xformatter = LONGITUDE_FORMATTER\n gl.yformatter = LATITUDE_FORMATTER\n\n gl.xlabels_top = top\n gl.ylabels_bottom = bottom\n gl.xlabels_left = left\n gl.ylabels_right = right\n\n gl.xlabel_style = {'size': size, 'color': color, 'alpha': alpha}\n gl.ylabel_style = {'size': size, 'color': color, 'alpha': alpha}\n\n return gl\n\n\ndef set_figtext(ax, text, size=12, pad=0,\n loc='bottom center',\n color=COLORS['black'],\n alpha=ALPHAS['translucent'],\n fha=None, fva=None, **kwargs):\n \"\"\"\n Add text to the side of a figure.\n\n loc choices - center, center bottom, center left, center right,\n upper left, upper right, bottom left, bottom right.\n\n :param ax: (mpl.axes) - plot axis\n :param text: (str) - text to put on the figure\n :param loc: (str) - location of the text\n :param size: (int) - size in points\n :param color: (str) - color of text\n :param alpha: (scalar/str) - transparency of text\n :param fha: (boolean) - force the horizontal alignment to be input str\n :param fva: (boolean) - force the vertical alignment to be input str\n :param kwargs: (kwargs) - additional keyword arguments\n \"\"\"\n size = scale_it(ax, size, 1, exp=True)\n pad = scale_it(ax, pad, 0.005, exp=True)\n\n loc_keywords = get_loc_keywords(loc)\n\n if 'lower' in loc_keywords:\n if 'center' in loc_keywords: # lower center\n ha = 'center'\n va = 'top'\n x = 0.5\n y = -0.09 + pad\n elif 'right' in loc_keywords:\n ha = 'left'\n if 'corner' in loc_keywords: # lower corner right\n va = 'center'\n x = 0.925\n y = -0.04 + pad\n else: # lower right\n va = 'bottom'\n x = 0.925 + pad\n y = 0.125\n elif 'left' in loc_keywords:\n ha = 'right'\n if 'corner' in loc_keywords: # lower corner left\n va = 'center'\n x = 0.855\n y = -0.04 + pad\n else: # lower left\n va = 'bottom'\n x = 0.05\n y = 0.125\n\n elif 'upper' in loc_keywords:\n if 'center' in loc_keywords:\n ha = 'center'\n va = 'center'\n x = 0.5\n y = 0.975 - pad\n elif 'right' in loc_keywords:\n ha = 'left'\n if 'corner' in loc_keywords:\n va = 'center'\n x = 0.925\n y = 0.975 - pad\n else:\n va = 'top'\n x = 0.925 + pad\n y = 0.9\n elif 'left' in loc_keywords:\n ha = 'right'\n if 'corner' in loc_keywords:\n va = 'center'\n x = 0.855\n y = 0.975 - pad\n else:\n va = 'top'\n x = 0.05\n y = 0.9\n else:\n va = 'center'\n if 'right' in loc_keywords:\n x = 0.925 + pad\n y = 0.5\n ha = 'left'\n elif 'left' in loc_keywords:\n x = 0.05\n y = 0.5\n ha = 'right'\n else:\n x = 0.5\n y = 0.5\n ha = 'center'\n\n if fva is not None:\n va = fva\n if fha is not None:\n ha = fha\n\n plt.figtext(x, y, text,\n ha=ha, va=va,\n wrap=True,\n size=size,\n color=color,\n alpha=alpha,\n **kwargs)\n\n\ndef set_axtext(ax, text, loc='bottom center', xy=None,\n size=12, color=COLORS['black'],\n xpad=None, ypad=None,\n alpha=ALPHAS['translucent'],\n fha=None, fva=None,\n **kwargs):\n \"\"\"\n :param ax: (mpl.axes) - plot axis\n :param text: (str) - text to put on the subplot\n :param loc: (str) - location of the text\n :param xy: (tup) - coordinate to set text\n :param size: (int) - size in points\n :param color: (str) - color of text\n :param xpad: (scalar) - padding in the x axis direction\n :param ypad: (scalar) - padding in the y axis direction\n :param alpha: (scalar/str) - transparency of text\n :param fha: (boolean) - force the horizontal alignment to be input str\n :param fva: (boolean) - force the vertical alignment to be input str\n :param kwargs: (kwargs) - additional keyword arguments\n \"\"\"\n size = scale_it(ax, size, 1, exp=True)\n\n if xy is None:\n loc_keywords = get_loc_keywords(loc)\n xtick_diff = np.average(np.diff(plt.getp(ax, 'xticks')))\n ytick_diff = np.average(np.diff(plt.getp(ax, 'yticks')))\n\n if ax.get_xlim()[0] > 700000:\n if 'lower' in loc_keywords:\n loc_keywords.remove('lower')\n va = 'bottom'\n ha = ''.join(loc_keywords)\n if ha is 'left':\n xy = (ax.get_xlim()[0] + xtick_diff * 0.025,\n ax.get_ylim()[0] + ytick_diff * 0.025)\n elif ha is 'right':\n xy = (ax.get_xlim()[1] - xtick_diff * 0.025,\n ax.get_ylim()[0] + ytick_diff * 0.025)\n else:\n xy = ((ax.get_xlim()[0] + ax.get_xlim()[1]) / 2,\n ax.get_ylim()[0] + ytick_diff * 0.025)\n\n elif 'upper' in loc_keywords:\n loc_keywords.remove('upper')\n va = 'top'\n ha = ''.join(loc_keywords)\n if ha is 'left':\n xy = (ax.get_xlim()[0] + xtick_diff * 0.025,\n ax.get_ylim()[1])\n elif ha is 'right':\n xy = (ax.get_xlim()[1] - xtick_diff * 0.025,\n ax.get_ylim()[1])\n else:\n xy = ((ax.get_xlim()[0] + ax.get_xlim()[1]) / 2,\n ax.get_ylim()[1])\n\n else:\n loc_keywords.remove('center')\n va = 'center'\n ha = ''.join(loc_keywords)\n if ha is 'left':\n xy = (ax.get_xlim()[0] + xtick_diff * 0.025,\n ax.get_ylim()[1] / 2)\n elif ha is 'right':\n xy = (ax.get_xlim()[1] - xtick_diff * 0.025,\n ax.get_ylim()[1] / 2)\n else:\n ha = 'center'\n xy = ((ax.get_xlim()[0] + ax.get_xlim()[1]) / 2,\n ax.get_ylim()[1] / 2)\n\n xy = (mdates.num2date(xy[0]), xy[1])\n\n else:\n if 'lower' in loc_keywords:\n loc_keywords.remove('lower')\n va = 'bottom'\n ha = ''.join(loc_keywords)\n if ha is 'left':\n xy = (ax.get_xlim()[0] + ax.get_xlim()[1] * 0.025,\n ax.get_ylim()[0] + ytick_diff * 0.025)\n elif ha is 'right':\n xy = (ax.get_xlim()[1] * 0.985,\n ax.get_ylim()[0] + ytick_diff * 0.025)\n else:\n xy = (ax.get_xlim()[1] / 2,\n ax.get_ylim()[0] + ytick_diff * 0.025)\n\n elif 'upper' in loc_keywords:\n loc_keywords.remove('upper')\n va = 'top'\n ha = ''.join(loc_keywords)\n if ha is 'left':\n xy = (ax.get_xlim()[0] + ax.get_xlim()[1] * 0.025,\n ax.get_ylim()[1])\n elif ha is 'right':\n xy = (ax.get_xlim()[1] * 0.985,\n ax.get_ylim()[1])\n else:\n xy = (ax.get_xlim()[1] / 2,\n ax.get_ylim()[1])\n\n else:\n loc_keywords.remove('center')\n va = 'center'\n ha = ''.join(loc_keywords)\n if ha is 'left':\n xy = (ax.get_xlim()[0] + ax.get_xlim()[1] * 0.025,\n ax.get_ylim()[1] / 2)\n elif ha is 'right':\n xy = (ax.get_xlim()[1] * 0.985,\n ax.get_ylim()[1] / 2)\n else:\n ha = 'center'\n xy = (ax.get_xlim()[1] / 2,\n ax.get_ylim()[1] / 2)\n else:\n ha = 'left'\n va = 'center'\n\n if isinstance(xy[0], str):\n xy = (pd.to_datetime(xy[0]).to_pydatetime(), xy[1])\n\n if fva is not None:\n va = fva\n if fha is not None:\n ha = fha\n\n if xpad is not None:\n xy = (xy[0] + xpad, xy[1])\n\n if ypad is not None:\n xy = (xy[0], xy[1] + ypad)\n\n ax.annotate(text, xy=xy, size=size,\n color=color, alpha=alpha,\n ha=ha, va=va, **kwargs)\n\n\ndef get_loc_keywords(loc):\n \"\"\"\n Return the location keywords based on input loc.\n\n :param loc: (str) - location of the text\n :return loc_keywords: (list) - list of the location keywords\n \"\"\"\n loc = loc.lower()\n loc_keywords = []\n if 'top' in loc or 'upper' in loc or 'north' in loc:\n loc_keywords.append('upper')\n elif 'bottom' in loc or 'lower' in loc or 'south' in loc:\n loc_keywords.append('lower')\n if 'right' in loc or 'east' in loc or 'east' in loc:\n loc_keywords.append('right')\n elif 'left' in loc or 'west' in loc:\n loc_keywords.append('left')\n if 'center' in loc or 'middle' in loc:\n loc_keywords.append('center')\n if 'corner' in loc:\n loc_keywords.append('corner')\n return loc_keywords\n\n\ndef set_share(ax1, ax2, axis='x', xlabel='', ylabel=''):\n \"\"\"\n Match the tick locations of another axis and hide the current tick labels.\n\n :param ax1: (mpl.axes) - plot axis to adapt\n :param ax2: (mpl.axes) - plot axis to mimic ticks\n :param axis: (str) - share x or y axis\n :param xlabel: (str) - label of x axis\n :param ylabel: (str) - label of y axis\n :return ax1, ax2: (mpl.axes) - plot axes\n \"\"\"\n if 'x' in axis:\n xlim = plt.getp(ax2, 'xlim')\n ax1.set_xlim(xlim)\n plt.setp(ax1.get_xticklabels(), visible=False)\n ax1.set_xlabel(xlabel, labelpad=12)\n if 'y' in axis:\n ylim = plt.getp(ax2, 'ylim')\n ax1.set_ylim(ylim)\n plt.setp(ax1.get_yticklabels(), visible=False)\n ax1.set_ylabel(ylabel, labelpad=12)\n return ax1, ax2\n\n\ndef get_region_latlim(region, lat1=-90, lat2=90, lon1=-180, lon2=180,\n tup=False, sliceit=False, w2e=False):\n \"\"\"\n Get latitudinal and longitudinal extents of select regions.\n\n :param region: (str) - acronym of region [us/na/nino34/nh/sh]\n :param lat1: (scalar) lower limit of latitude\n :param lat2: (scalar) upper limit of latitude\n :param lon1: (scalar) left limit of longitude\n :param lon2: (scalar) right limit of longitude\n :param central_longitude: (scalar) - longitude to center the map on\n :param tup: (bool) - whether to return a tuple of extents\n :param sliceit: (bool) - whether to return a slice type of extents\n :return lat1, lat2, lon1, lon2: (scalar) - individual extents\n :return latlim, lonlim: (tuple) - tuple extents\n :return lat_slice, lon_slice: (slice) - slice extents\n \"\"\"\n if region == 'us':\n lat1 = 50\n lat2 = 22\n lon1 = -128\n lon2 = -65\n elif region == 'na':\n lat1 = 73\n lat2 = 10\n lon1 = -176\n lon2 = -65\n elif region == 'nino34':\n lat1 = -5\n lat2 = 5\n lon1 = -120\n lon2 = -170\n elif region == 'nh':\n lat1 = 0\n lat2 = 90\n elif region == 'sh':\n lat1 = -90\n lat2 = 0\n elif region == 'wh':\n lon1 = -180\n lon2 = 0\n elif region == 'eh':\n lon1 = 0\n lon2 = 180\n elif region == None or region == '':\n pass\n else:\n print('Region not found!')\n\n if w2e:\n lon1 = lonw2e(lon1)\n lon2 = lonw2e(lon2)\n\n if tup:\n return (lat1, lat2), (lon1, lon2)\n elif sliceit:\n return slice(lat1, lat2), slice(lon1, lon2)\n else:\n return lat1, lat2, lon1, lon2\n\n\ndef set_twin(ax1, ax2, axis='x', title_pad=1.09,\n xlabel='', ylabel='', title='', suptitle=False,\n aligned=True, length_scale=False):\n \"\"\"\n Create another y axis on the same subplot.\n\n :param ax1: (mpl.axes) - plot axis on the right to adapt\n :param ax2: (mpl.axes) - plot axis on the left or the one to mimic\n :param axis: (str) - twin x or y axis\n :param xlabel: (str) - label of x axis\n :param ylabel: (str) - label of y axis\n :param title: (str) - title of subplot\n :param suptitle: (boolean) - whether to make a figure title\n :param aligned: (boolean) - whether to keep left and right ticks aligned\n :param scale: (scalar) - scaling exponent\n :return ax1, ax2: (mpl.axes) - plot axes\n \"\"\"\n children1 = ax1.get_children()\n children2 = ax2.get_children()\n ylabel2 = plt.getp(ax2, 'ylabel')\n xlabel = plt.getp(ax2, 'xlabel')\n title = plt.getp(ax2, 'title')\n\n try:\n plotlist1 = list(filter(lambda x:\n isinstance(x, mpl.lines.Line2D),\n children1\n )\n )\n if len(plotlist1) == 1:\n plot1 = plotlist1[0]\n color1 = plt.getp(plot1, 'color')\n else:\n plot1 = list(filter(lambda x:\n isinstance(x, mpl.patches.Rectangle),\n children1)\n )[0]\n color1 = plt.getp(plot1, 'facecolor')\n\n plotlist2 = list(filter(lambda x:\n isinstance(x, mpl.lines.Line2D),\n children2)\n )\n if len(plotlist2) == 1:\n plot2 = plotlist2[0]\n color2 = plt.getp(plot2, 'color')\n else:\n plot2 = list(filter(lambda x:\n isinstance(x, mpl.patches.Rectangle),\n children2)\n )[0]\n color2 = plt.getp(plot2, 'facecolor')\n except Exception:\n color1 = COLORS['gray']\n color2 = COLORS['gray']\n print('Unable to get color for twinx.')\n\n if 'x' in axis:\n set_borders(ax2, spines=['left'], color=color2)\n set_borders(ax2, spines=['right'], color=color1)\n\n if aligned:\n yticks2 = np.linspace(ax2.get_yticks()[0],\n ax2.get_yticks()[-1],\n len(ax2.get_yticks())\n )\n yticks1 = np.linspace(ax1.get_yticks()[0],\n ax1.get_yticks()[-1],\n len(ax2.get_yticks())\n )\n set_major_grid(ax2)\n else:\n yticks2 = None\n yticks1 = None\n set_major_grid(ax1, ycolor=color1, yalpha=ALPHAS['translucent'])\n set_major_grid(ax2, ycolor=color2, yalpha=ALPHAS['translucent'])\n\n set_borders(ax1, all_=False)\n\n set_major_ticks(ax2,\n yticks=yticks2,\n axes=['y'],\n bottom=True,\n left=True,\n right=False,\n top=True,\n color=color2)\n set_minor_ticks(ax2,\n axes=['y'],\n bottom=True,\n left=True,\n right=False,\n top=True,\n color=color2)\n set_major_tick_labels(ax2,\n axes=['y'],\n left=True,\n right=False,\n color=color2)\n set_minor_tick_labels(ax2,\n axes=['y'],\n left=True,\n right=False,\n color=color2)\n\n set_labels(ax2, xlabel=xlabel, ylabel=ylabel2, title_pad=title_pad,\n title=title, suptitle=suptitle, ylabel_color=color2)\n\n set_major_ticks(ax1,\n yticks=yticks1,\n axes=['y'],\n bottom=False,\n left=False,\n right=True,\n top=False,\n color=color1)\n set_minor_ticks(ax1,\n axes=['y'],\n bottom=False,\n left=False,\n right=True,\n top=False,\n color=color1)\n set_major_tick_labels(ax1,\n axes=['y'],\n left=False,\n right=True,\n color=color1)\n set_minor_tick_labels(ax1,\n axes=['y'],\n left=False,\n right=True,\n color=color1)\n\n set_labels(ax1, ylabel=ylabel, ylabel_color=color1,\n length_scale=length_scale)\n\n return ax1, ax2\n\n\ndef set_axes(ax, xlim=None, ylim=None,\n xscale=None, yscale=None,\n xinvert=False, yinvert=False, **kwargs):\n \"\"\"\n Modify subplot axes settings.\n\n :param ax: (mpl.axes) - plot axis\n :param xlim: (tup) - left and right x axis limit in a tuple, respectively\n :param ylim: (tup) - left and right y axis limit in a tuple, respectively\n :param xscale: (str) - linear or log scale of x axis\n :param yscale: (str) - linear or log scale of y axis\n :param xinvert: (boolean) - whether to flip x axis\n :param yinvert: (boolean) - whether to flip y axis\n :param kwargs: (kwargs) - additional keyword arguments\n :return ax: (mpl.axes) - plot axis\n \"\"\"\n if xlim is None:\n xlim = plt.getp(ax, 'xlim')\n if ylim is None:\n ylim = plt.getp(ax, 'ylim')\n\n if xscale is None:\n xscale = plt.getp(ax, 'xscale')\n if yscale is None:\n yscale = plt.getp(ax, 'yscale')\n\n if isinstance(xlim[0], str):\n xlim = pd.to_datetime(xlim).to_pydatetime()\n\n ax.set(xlim=xlim, ylim=ylim, xscale=xscale, yscale=yscale, **kwargs)\n\n if xinvert:\n ax.invert_xaxis()\n\n if yinvert:\n ax.invert_yaxis()\n\n return ax\n\n\ndef set_major_tick_labels(ax, axes='both',\n xticklabels=None,\n yticklabels=None,\n pad=1.25, size=10,\n color=COLORS['gray'],\n bottom=True, top=False,\n left=True, right=False,\n xrotation=0, yrotation=0, **kwargs):\n \"\"\"\n Modify major tick label settings.\n\n :param ax: (mpl.axes) - plot axis\n :param axes: (list) - x and/or y axis to change\n :param xticklabels: (list) - manually set x major tick labels\n :param yticklabels: (list) - manually set y major tick labels\n :param pad: (scalar) - distance between ticks and major tick labels\n :param size: (scalar) - size of major tick labels\n :param color: (str) - color of major tick labels\n :param bottom: (boolean) - whether to show bottom major tick labels\n :param top: (boolean) - whether to show top major tick labels\n :param left: (boolean) - whether to show left major tick labels\n :param right: (boolean) - whether to show right major tick labels\n :param xrotation: (scalar) - degrees to rotate x major tick labels\n :param yrotation: (scalar) - degrees to rotate y major tick labels\n :param scale: (scalar) - scaling exponent\n :param kwargs: (kwargs) - additional keyword arguments\n :return ax: (mpl.axes) - plot axis\n \"\"\"\n size = scale_it(ax, size, 1.25, exp=True)\n pad = scale_it(ax, pad, 0.1)\n\n if xticklabels is not None:\n ax.set_xticklabels(xticklabels)\n\n if yticklabels is not None:\n ax.set_yticklabels(yticklabels)\n\n if color is 'xinherit':\n color = plt.getp(plt.getp(ax, 'xmajorticklabels')[0], 'color')\n\n if color is 'yinherit':\n color = plt.getp(plt.getp(ax, 'ymajorticklabels')[0], 'color')\n\n if axes is 'both':\n axes = ['x', 'y']\n\n for axis in axes:\n ax.tick_params(axis=axis,\n which='major',\n labelsize=size,\n labelcolor=color,\n labelleft=left,\n labelright=right,\n labeltop=top,\n labelbottom=bottom, **kwargs)\n\n ax.tick_params(axis=axis, which='major',\n pad=pad) # this doesn't play well with others...\n\n plt.setp(ax.xaxis.get_majorticklabels(), rotation=xrotation)\n plt.setp(ax.yaxis.get_majorticklabels(), rotation=yrotation)\n\n return ax\n\n\ndef set_minor_tick_labels(ax, axes='both',\n xticklabels=None, yticklabels=None,\n pad=1.25,\n size=9,\n color=COLORS['gray'],\n bottom=False, top=False,\n left=False, right=False,\n xrotation=0, yrotation=0, **kwargs):\n \"\"\"\n Modify minor tick label settings.\n\n :param ax: (mpl.axes) - plot axis\n :param axes: (list) - x and/or y axis to change\n :param xticklabels: (list) - manually set x minor tick labels\n :param yticklabels: (list) - manually set y minor tick labels\n :param pad: (scalar) - distance between ticks and minor tick labels\n :param size: (scalar) - size of minor tick labels\n :param color: (str) - color of minor tick labels\n :param bottom: (boolean) - whether to show bottom minor tick labels\n :param top: (boolean) - whether to show top minor tick labels\n :param left: (boolean) - whether to show left minor tick labels\n :param right: (boolean) - whether to show right minor tick labels\n :param xrotation: (scalar) - degrees to rotate x minor tick labels\n :param yrotation: (scalar) - degrees to rotate y minor tick labels\n :param scale: (scalar) - scaling exponent\n :param kwargs: (kwargs) - additional keyword arguments\n :return ax: (mpl.axes) - plot axis\n \"\"\"\n\n size = scale_it(ax, size, 1.25, exp=True)\n pad = scale_it(ax, pad, 0.1)\n\n if xticklabels is not None:\n ax.set_xticklabels(xticklabels, minor=True)\n\n if yticklabels is not None:\n ax.set_yticklabels(yticklabels, minor=True)\n\n if axes is 'both':\n axes = ['x', 'y']\n\n for axis in axes:\n ax.tick_params(axis=axis,\n which='minor',\n labelsize=size,\n labelcolor=color,\n labelleft=left,\n labelright=right,\n labeltop=top,\n labelbottom=bottom, **kwargs)\n\n ax.tick_params(axis=axis, which='minor',\n pad=pad) # this doesn't play well with others...\n\n plt.setp(ax.xaxis.get_minorticklabels(), rotation=xrotation)\n plt.setp(ax.yaxis.get_minorticklabels(), rotation=yrotation)\n\n return ax\n\n\ndef set_major_ticks(ax, axes='both',\n xticks=None, yticks=None, direction='out', width=0.1,\n size=2, color=COLORS['light gray'],\n left=True, right=False, bottom=True, top=False,\n xlocator=None, xinterval=None, xformatter=None, **kwargs):\n \"\"\"\n Modify major tick settings.\n\n :param ax: (mpl.axes) - plot axis\n :param axes: (list) - x and/or y axis to change\n :param xticks: (list) - manually set x major ticks\n :param yticks: (list) - manually set y major ticks\n :param direction: (str) - direction of tick\n :param width: (str) - width of major ticks\n :param size: (str) - length of major ticks\n :param color: (str) - color of major ticks\n :param bottom: (boolean) - whether to show bottom major tick\n :param top: (boolean) - whether to show top major tick\n :param left: (boolean) - whether to show left major tick\n :param right: (boolean) - whether to show right major tick\n :param xlocator: (str) - auto, years, months, days, hours, interval of tick\n :param xinterval: (str) - interval of date ticks\n :param xformatter: (str) - how to display the major tick labels\n :param kwargs: (kwargs) - additional keyword arguments\n :return ax: (mpl.axes) - plot axis\n \"\"\"\n size = scale_it(ax, size, 1, exp=True)\n width = scale_it(ax, width, 0.1)\n\n date = False\n locator = None\n\n if xticks is not None:\n ax.set_xticks(xticks)\n elif isinstance(xlocator, str):\n if xlocator == 'auto':\n locator = AutoDateLocator()\n elif xlocator == 'years':\n locator = YearLocator(base=int(xinterval))\n elif xlocator == 'months':\n locator = MonthLocator(interval=xinterval)\n elif xlocator == 'days':\n locator = DayLocator(interval=xinterval)\n elif xlocator == 'hours':\n locator = HourLocator(interval=xinterval)\n elif xlocator == 'minutes':\n locator = MinuteLocator(interval=xinterval)\n date = True\n elif isinstance(xlocator, int) or isinstance(xlocator, float):\n locator = MultipleLocator(xlocator)\n\n if xformatter is not None:\n if date:\n if xformatter is 'auto'and locator is not None:\n ax.xaxis.set_major_formatter(AutoDateFormatter(locator))\n else:\n ax.xaxis.set_major_formatter(DateFormatter(xformatter))\n else:\n ax.xaxis.set_major_formatter(FormatStrFormatter(xformatter))\n\n if locator is not None:\n ax.xaxis.set_major_locator(locator)\n\n if yticks is not None:\n ax.set_yticks(yticks)\n\n if axes is 'both':\n axes = ['x', 'y']\n\n for axis in axes:\n ax.tick_params(axis=axis,\n which='major',\n direction=direction,\n left=left,\n right=right,\n bottom=bottom,\n top=top,\n size=size,\n width=width,\n color=color,\n **kwargs)\n return ax\n\n\ndef set_minor_ticks(ax, axes='both',\n xticks=None, yticks=None, direction='out', width=0.1,\n size=2, color=COLORS['light gray'],\n left=False, right=False, bottom=True, top=False,\n xlocator=None, xinterval=3, xformatter=None, **kwargs):\n \"\"\"\n Modify minor tick settings.\n\n :param ax: (mpl.axes) - plot axis\n :param axes: (list) - x and/or y axis to change\n :param xticks: (list) - manually set x minor ticks\n :param yticks: (list) - manually set y minor ticks\n :param direction: (str) - direction of tick\n :param width: (str) - width of minor ticks\n :param size: (str) - length of minor ticks\n :param color: (str) - color of minor ticks\n :param bottom: (boolean) - whether to show bottom minor tick\n :param top: (boolean) - whether to show top minor tick\n :param left: (boolean) - whether to show left minor tick\n :param right: (boolean) - whether to show right minor tick\n :param xlocator: (str) - auto, years, months, days, hours, interval of tick\n :param xinterval: (str) - interval of date ticks\n :param xformatter: (str) - how to display the minor tick labels\n :return ax: (mpl.axes) - plot axis\n \"\"\"\n # size = scale_it(ax, size, scale=scale, what='tick')\n\n size = scale_it(ax, size, 1, exp=True)\n width = scale_it(ax, width, 0.1)\n\n date = False\n locator = None\n\n if xticks is not None:\n ax.set_xticks(xticks)\n elif isinstance(xlocator, str):\n if xlocator == 'years':\n locator = YearLocator(base=int(xinterval))\n elif xlocator == 'months':\n locator = MonthLocator(interval=xinterval)\n elif xlocator == 'days':\n locator = DayLocator(interval=xinterval)\n elif xlocator == 'hours':\n locator = HourLocator(interval=xinterval)\n elif xlocator == 'minutes':\n locator = MinuteLocator(interval=xinterval)\n date = True\n elif isinstance(xlocator, int) or isinstance(xlocator, float):\n locator = MultipleLocator(xlocator)\n\n if locator is not None:\n ax.xaxis.set_minor_locator(locator)\n\n if xformatter is not None:\n if date:\n if xformatter is 'auto' and locator is not None:\n ax.xaxis.set_minor_formatter(AutoDateFormatter(locator))\n else:\n ax.xaxis.set_minor_formatter(DateFormatter(xformatter))\n else:\n ax.xaxis.set_minor_formatter(FormatStrFormatter(xformatter))\n\n if yticks is not None:\n ax.set_yticks(yticks)\n\n if axes is 'both':\n axes = ['x', 'y']\n\n for axis in axes:\n ax.tick_params(axis=axis,\n which='minor',\n direction=direction,\n left=left,\n right=right,\n bottom=bottom,\n top=top,\n size=size,\n width=width,\n color=color,\n **kwargs)\n return ax\n\n\ndef set_major_grid(ax, xgrid=True, ygrid=True, linestyle=(0, (16, 4)),\n linewidth=0.095,\n xcolor=COLORS['black'], ycolor=COLORS['black'],\n xalpha=ALPHAS['translucid'], yalpha=ALPHAS['translucid'],\n **kwargs):\n \"\"\"\n Modify major grid settings.\n\n :param ax: (mpl.axes) - plot axis\n :param xgrid: (boolean) - whether to show vertical major grid\n :param ygrid: (boolean) - whether to show horizontal major grid\n :param linestyle: (str) - linestyle of major grid\n :param xcolor: (str) - color of vertical major grid\n :param ycolor: (str) - color of horizontal major grid\n :param xalpha: (scalar) - transparency of vertical major grid\n :param yalpha: (scalar) - transparency of horizontal major grid\n :param kwargs: (kwargs) - additional keyword arguments\n :return ax: (mpl.axes) - plot axis\n \"\"\"\n geom = plt.getp(plt.getp(ax, 'subplotspec'), 'geometry')\n nplots = geom[0] * geom[1] / 100\n\n linewidth = scale_it(ax, linewidth, 0.15, exp=True)\n linewidth = np.log(nplots + 1) + linewidth\n\n if xgrid:\n ax.xaxis.grid(b=xgrid,\n which='major',\n linestyle=linestyle,\n linewidth=linewidth,\n alpha=xalpha,\n **kwargs\n )\n else:\n ax.yaxis.grid(b=xgrid,\n which='major',\n alpha=0,\n **kwargs\n )\n\n if ygrid:\n ax.yaxis.grid(b=ygrid,\n which='major',\n color=ycolor,\n linestyle=linestyle,\n linewidth=linewidth,\n alpha=yalpha,\n **kwargs\n )\n else:\n ax.yaxis.grid(b=ygrid,\n which='major',\n alpha=0,\n **kwargs\n )\n\n return ax\n\n\ndef set_minor_grid(ax, xgrid=True, ygrid=True, linestyle=(0, (5, 5)),\n linewidth=0.095,\n xcolor=COLORS['black'], ycolor=COLORS['black'],\n xalpha=ALPHAS['translucent'], yalpha=ALPHAS['translucent'],\n **kwargs):\n \"\"\"\n Modify minor grid settings.\n\n :param ax: (mpl.axes) - plot axis\n :param xgrid: (boolean) - whether to show vertical minor grid\n :param ygrid: (boolean) - whether to show horizontal minor grid\n :param linestyle: (str) - linestyle of minor grid\n :param xcolor: (str) - color of vertical minor grid\n :param ycolor: (str) - color of horizontal minor grid\n :param xalpha: (scalar) - transparency of vertical minor grid\n :param yalpha: (scalar) - transparency of horizontal minor grid\n :param kwargs: (kwargs) - additional keyword arguments\n :return ax: (mpl.axes) - plot axis\n \"\"\"\n linewidth = scale_it(ax, linewidth, 0.45)\n\n if xgrid:\n ax.xaxis.grid(b=xgrid,\n which='minor',\n linestyle=linestyle,\n linewidth=linewidth,\n alpha=xalpha,\n **kwargs\n )\n else:\n ax.yaxis.grid(b=xgrid,\n which='minor',\n alpha=0,\n **kwargs\n )\n\n if ygrid:\n ax.yaxis.grid(b=ygrid,\n which='minor',\n color=ycolor,\n linestyle=linestyle,\n linewidth=linewidth,\n alpha=yalpha,\n **kwargs\n )\n else:\n ax.yaxis.grid(b=ygrid,\n which='minor',\n alpha=0,\n **kwargs\n )\n\n return ax\n\n\ndef set_borders(ax, all_=True,\n bottom=True, top=True, left=True, right=True,\n spines='all', color=COLORS['light gray'],\n alpha=ALPHAS['semi opaque']):\n \"\"\"\n Modify border settings.\n\n :param ax: (mpl.axes) - plot axis\n :param all: (boolean) - whether to show all borders\n :param bottom: (boolean) - whether to show bottom border\n :param top: (boolean) - whether to show top border\n :param left: (boolean) - whether to show left border\n :param right: (boolean) - whether to show right border\n :param spines: (list) - borders to be affected\n :param color: (str) - color of borders\n :param alpha: (str) - transparency of borders\n :return ax: (mpl.axes) - plot axis\n \"\"\"\n if spines is 'all':\n spines = ['top', 'bottom', 'left', 'right']\n\n for spine in spines:\n ax.spines[spine].set_color(color)\n ax.spines[spine].set_alpha(alpha)\n if not all_:\n ax.spines[spine].set_visible(False)\n\n if not left:\n ax.spines['left'].set_visible(False)\n if not right:\n ax.spines['right'].set_visible(False)\n if not bottom:\n ax.spines['bottom'].set_visible(False)\n if not top:\n ax.spines['top'].set_visible(False)\n\n return ax\n\n\ndef set_labels(ax,\n xlabel=None,\n ylabel=None,\n title=None,\n title_size=13.5,\n title_color=COLORS['black'],\n title_alpha=ALPHAS['semi opaque'],\n title_pad=0.965,\n xlabel_size=11,\n xlabel_color=COLORS['black'],\n xlabel_alpha=ALPHAS['semi opaque'],\n xlabel_pad=0.05,\n ylabel_size=11,\n ylabel_color=COLORS['black'],\n ylabel_alpha=ALPHAS['semi opaque'],\n ylabel_pad=0.05,\n length_scale=True,\n suptitle=False,\n ):\n \"\"\"\n Add and modify title and label settings.\n\n :param ax: (mpl.axes) - plot axis\n :param xlabel: (str) - label of x axis\n :param ylabel: (str) - label of y axis\n :param title: (str) - title of subplot\n :param title_size: (scalar) - size of title\n :param title_color: (str) - color of title\n :param title_alpha: (scalar) - transparency of title\n :param title_pad: (scalar) - distance between box and title\n :param xlabel_size: (scalar) - size of x label\n :param xlabel_color: (str) - color of x label\n :param xlabel_alpha: (scalar) - transparency of x label\n :param xlabel_pad: (scalar) - distance between ticks and x label\n :param ylabel_size: (scalar) - size of y label\n :param ylabel_color: (str) - color of y label\n :param ylabel_alpha: (scalar) - transparency of y label\n :param ylabel_pad: (scalar) - distance between ticks and y label\n :param length_scale: (scalar) - whether to scale the labels based on length\n :param suptitle: (boolean) - whether to make a figure title\n :return ax: (mpl.axes) - plot axis\n \"\"\"\n title_size = scale_it(ax, title_size, 3, exp=True)\n title_pad = 2 * title_pad - scale_it(ax, title_pad, 0.005)\n if suptitle:\n title_pad += title_pad / 35\n title_size += title_size / 3\n\n xlabel_size = scale_it(ax, xlabel_size, 3, exp=True)\n xlabel_pad = scale_it(ax, xlabel_pad, 1)\n\n ylabel_size = scale_it(ax, ylabel_size, 3, exp=True)\n ylabel_pad = scale_it(ax, ylabel_pad, 1)\n\n if title is None:\n title = plt.getp(ax, 'title')\n\n if ylabel is None:\n ylabel = plt.getp(ax, 'ylabel')\n\n if xlabel is None:\n xlabel = plt.getp(ax, 'xlabel')\n\n if length_scale:\n length_scale = len(str(title)) / 3\n title_size -= length_scale\n ylabel_size -= length_scale / 2\n xlabel_size -= length_scale / 2\n\n if suptitle:\n plt.suptitle(title,\n size=title_size,\n color=title_color,\n alpha=title_alpha,\n y=title_pad)\n else:\n ax.set_title(title,\n size=title_size,\n color=title_color,\n alpha=title_alpha,\n y=title_pad)\n\n xlabel_pad += 2\n ax.set_xlabel(xlabel,\n size=xlabel_size,\n color=xlabel_color,\n alpha=xlabel_alpha,\n labelpad=xlabel_pad)\n\n ylabel_pad += 5\n ax.set_ylabel(ylabel,\n size=ylabel_size,\n color=ylabel_color,\n alpha=ylabel_alpha,\n labelpad=ylabel_pad)\n\n return ax\n\n\ndef set_legend(ax, size=12,\n color=COLORS['black'], alpha=ALPHAS['translucent'],\n loc='best', frame=False, ncol=1, nscatter=1, **kwargs):\n \"\"\"\n Add and modify legend settings.\n\n :param ax: (mpl.axes) - plot axis\n :param size: (str) - size of legend labels\n :param color: (str) - color of legend labels\n :param alpha: (str) - transparency of legend labels\n :param loc: (str) - location of legend\n :param frame: (boolean) - whether to have a box around legend\n :param ncol: (int) - number of legend columns\n :param nscatter: (int) - number of scatter points to show in legend\n :param kwargs: (kwargs) - additional keyword arguments\n :return ax: (mpl.axes) - plot axis\n \"\"\"\n if plt.getp(ax, 'legend_handles_labels')[1]:\n size = scale_it(ax, size, 1, exp=True)\n start_locs = ['lower', 'upper', 'center']\n\n try:\n if loc is not 'best':\n loc_keywords = get_loc_keywords(loc)\n if loc_keywords[0] not in start_locs:\n loc_keywords = loc_keywords[::-1]\n loc = ' '.join(loc_keywords)\n except:\n pass\n\n legend = ax.legend(loc=loc,\n ncol=ncol,\n fontsize=size,\n frameon=frame,\n scatterpoints=nscatter,\n **kwargs\n )\n\n for text in legend.get_texts():\n plt.setp(text, size=size, alpha=alpha, color=color)\n\n return ax\n\n\ndef set_inherited(ax, xlabel='', ylabel='', title='',\n xlim=None, ylim=None, origin_xlim=None, origin_ylim=None):\n \"\"\"\n Get inputs from existing plot and set them again.\n\n :param ax: (mpl.axes) - plot axis\n :param xlabel: (str) - label of x axis\n :param ylabel: (str) - label of y axis\n :param title: (str) - title of subplot\n :param xlim: (tup) - left and right x axis limit in a tuple, respectively\n :param ylim: (tup) - left and right y axis limit in a tuple, respectively\n :return ax: (mpl.axes) - plot axis\n \"\"\"\n if xlabel is '':\n xlabel = plt.getp(ax, 'xlabel')\n if ylabel is '':\n ylabel = plt.getp(ax, 'ylabel')\n if title is '':\n title = plt.getp(ax, 'title')\n\n prev_xlim = plt.getp(ax, 'xlim')\n try:\n if origin_xlim is None:\n origin_xlim = xlim\n try:\n int(xlim[0])\n except:\n xlim = mdates.date2num(xlim)\n\n if xlim is None:\n xlim = prev_xlim\n elif xlim[0] > prev_xlim[0] and xlim[1] < prev_xlim[1]:\n xlim = (prev_xlim[0], prev_xlim[1])\n elif xlim[0] > prev_xlim[0]:\n xlim = (prev_xlim[0], xlim[1])\n elif xlim[1] < prev_xlim[1]:\n xlim = (xlim[0], prev_xlim[1])\n except:\n print('Unable to inherit xlim!')\n\n prev_ylim = plt.getp(ax, 'ylim')\n if ylim is None:\n ylim = prev_ylim\n elif ylim[0] > prev_ylim[0] and ylim[1] < prev_ylim[1]:\n ylim = (prev_ylim[0], prev_ylim[1])\n elif ylim[0] > prev_ylim[0]:\n ylim = (prev_ylim[0], ylim[1])\n elif ylim[1] < prev_ylim[1]:\n ylim = (ylim[0], prev_ylim[1])\n\n if origin_xlim is not None:\n xlim = origin_xlim\n\n if origin_ylim is not None:\n ylim = origin_ylim\n\n return ax, xlabel, ylabel, title, xlim, ylim\n\n\ndef savefig(file_path, tight_layout=True, both=False, dpi=DEFAULT['dpi'],\n suffix='auto', close=True, **kwargs):\n \"\"\"\n Save figure.\n\n :param file_path: (str) - path to file\n :param tight_layout: (boolean) - whether to save with tight layout\n :param both: (boolean) - whether to save both png and pdf\n :param suffix: (str/boolean) - whether to append png if not exist\n :param close: (boolean) - whether to close figure after saving\n :param kwargs: (kwargs) - additional keyword arguments\n :return file_path: (str) - path to file\n \"\"\"\n if tight_layout:\n bbox_inches = 'tight'\n else:\n bbox_inches = None\n\n if both:\n png_path = file_path + '.png'\n plt.savefig(png_path, bbox_inches=bbox_inches, dpi=dpi)\n pdf_path = file_path + '.pdf'\n plt.savefig(pdf_path, bbox_inches=bbox_inches)\n elif suffix is 'auto':\n if not file_path.endswith('.png') and not file_path.endswith('.pdf'):\n file_path += '.png'\n print('Suffix .png was appended to filepath!')\n plt.savefig(file_path, bbox_inches=bbox_inches, dpi=dpi, **kwargs)\n\n utils(close=close)\n\n return file_path\n\n\ndef utils(close=False, figsize=None, ax=False, rows=1, cols=1, pos=1,\n tight_layout=False, projection=None):\n \"\"\"\n :param close: (boolean) - whether to close subplots\n :param figsize: (str/tup) - wide/tall/auto or tuple width x height of fig\n :param ax: (boolean) - whether to return subplot\n :param rows: (boolean) - whether to close subplots\n :param cols: (boolean) - whether to close subplots\n :param pos: (boolean) - whether to close subplots\n :param tight_layout: (boolean) - whether to save with tight layout\n :param projection: (cartopy.crs) - projection of map\n \"\"\"\n if figsize is not None:\n fig = plt.figure(figsize=figsize)\n return fig\n if ax:\n projection = _get_projection_logic(projection)\n return plt.subplot(rows, cols, pos, projection=projection)\n if tight_layout:\n try:\n plt.tight_layout()\n except:\n print('Unable to set tight layout!')\n if close:\n plt.close('all')\n\n\ndef scale_it(ax, value, order_mag=1, exp=False):\n \"\"\"\n Scale matplotlib plots\n\n :param ax: (mpl.axes) - plot axis\n :param value: (scalar) - value to be scaled\n :param order_mag: (scalar) - the order of magnitude to scale by\n :param exp: (boolean) - whether scaling should be exponential\n :return scaled_value: (scalar) - scaled value\n \"\"\"\n bbox = ax.get_window_extent()\n width, height = bbox.width / 72., bbox.height / 72.\n\n geom = plt.getp(ax, 'geometry')\n nrows = geom[0]\n ncols = geom[1]\n\n nplots = nrows * ncols\n size_drop = np.power(nplots, 0.2)\n\n if height > width:\n major = height\n minor = width\n else:\n major = width\n minor = height\n base_term = height + width\n power_term = (minor + (major / 2) / major)\n factor = np.sqrt(np.log(np.power(base_term, power_term)) / 2)\n if exp:\n return np.power(value + factor * order_mag, 1.05) / size_drop\n else:\n return (value + factor * order_mag)\n\n\ndef scale_it_bokeh(p, value, order_mag=1, exp=False):\n \"\"\"\n Scale bokeh plots\n\n :param p: (bokeh.figure) - bokeh figure\n :param value: (scalar) - value to be scaled\n :param order_mag: (scalar) - the order of magnitude to scale by\n :param exp: (boolean) - whether scaling should be exponential\n :return scaled_value: (scalar) - scaled value\n \"\"\"\n width, height = p.plot_width / 36, p.plot_height / 36\n\n if height > width:\n major = height\n minor = width\n else:\n major = width\n minor = height\n base_term = height + width\n power_term = (minor + (major / 2) / major)\n\n factor = np.sqrt(np.log(base_term * power_term) / 2)\n\n if exp:\n return np.power(value + factor * order_mag, 1.05)\n else:\n return value + factor * order_mag\n\n############################################################################\n# Private utils to clean up code...\n\n\ndef _set_figsize_logic(sidebar_pos=None,\n figsize=None,\n rows=1, cols=1,\n pos=1, dpi=None):\n if sidebar_pos is None:\n if figsize is not None and pos == 1:\n set_figsize(figsize=figsize, rows=rows,\n cols=cols, pos=pos, dpi=dpi)\n elif figsize is None and pos == 1:\n set_figsize(figsize='wide', rows=rows,\n cols=cols, pos=pos, dpi=dpi)\n else:\n if figsize is not None and pos == 1 and sidebar_pos == 1:\n set_figsize(figsize=figsize, rows=rows,\n cols=cols, pos=pos, dpi=dpi)\n elif figsize is None and pos == 1 and sidebar_pos == 1:\n set_figsize(figsize='wide', rows=rows,\n cols=cols, pos=pos, dpi=dpi)\n\n\ndef _get_xtext_logic(x=None):\n xtext = False\n xstr = False\n xticklabels = None\n\n try:\n xstr = isinstance(x[0], str)\n except Exception:\n xstr = isinstance(x, str)\n\n if xstr:\n xticklabels = copy.copy(x)\n x = np.arange(len(xticklabels))\n xtext = True\n\n return x, xtext, xticklabels\n\n\ndef _get_x_to_y_logic(x, y):\n if y is None:\n y = copy.copy(x)\n x = range(len(y))\n return x, y\n\n\ndef _get_xlim_logic(x, xlim, pad=0, align='edge'):\n origin_xlim = xlim # store past xlim\n try:\n if xlim is None:\n if align == 'edge':\n xlim = (np.min(x) - pad / 3, np.max(x) + pad * 1.25)\n else:\n xlim = (np.min(x) - pad * 1.25, np.max(x) + pad * 1.25)\n except TypeError:\n if xlim is None:\n xdiff = x[1] - x[0]\n if align == 'edge':\n xlim = (x[0] - xdiff / 5, x[-1] + xdiff)\n else:\n xlim = (x[0] - xdiff / 2, x[-1] + xdiff / 1.5)\n return origin_xlim, xlim\n\n\ndef _get_ylim_logic(y, ylim):\n origin_ylim = ylim\n try:\n if ylim is None:\n ylim = (np.min(y), np.max(y))\n except TypeError:\n ylim = None\n return origin_ylim, ylim\n\n\ndef _get_ax_logic(ax=None, twinx=None, twiny=None,\n rows=1, cols=1, pos=1,\n transform=False, projection=None):\n if twinx is None and twiny is None:\n if ax is None:\n if projection is not None:\n ax = plt.subplot(rows, cols, pos, projection=projection)\n else:\n ax = plt.subplot(rows, cols, pos)\n else:\n if twinx is not None:\n ax = twinx.twinx()\n rows = plt.getp(twinx, 'geometry')[0]\n cols = plt.getp(twinx, 'geometry')[1]\n\n return ax, rows, cols\n\n\ndef _get_bases_logic(data=None):\n if isinstance(data, pd.DataFrame):\n data = data.values\n\n try:\n maxmin_diff = (np.percentile(data, 95) -\n np.abs(np.percentile(data, 5))\n )\n except:\n maxmin_diff = np.max(data) - np.abs(np.min(data))\n\n maxmin_diff = np.abs(maxmin_diff)\n\n if maxmin_diff <= 0.005:\n base = 0.0125\n base2 = 0.025\n elif maxmin_diff <= 0.05:\n base = 0.025\n base2 = 0.05\n elif maxmin_diff <= 0.35:\n base = 0.05\n base2 = 0.1\n elif maxmin_diff <= 1:\n base = 0.1\n base2 = 0.2\n else:\n base = 5\n base2 = 5\n\n return base, base2\n\n\ndef _get_vmin_vmax_logic(data=None, base=1,\n vmin=None, vmax=None,\n data_lim=None):\n if isinstance(data, pd.DataFrame):\n data = data.values\n\n if data_lim is None:\n if vmin is None:\n try:\n vmin = round_to(np.percentile(data, 7.5),\n prec=5,\n base=base)\n except ValueError:\n vmin = round_to(np.nanpercentile(data, 7.5),\n prec=5,\n base=base)\n if vmax is None:\n try:\n vmax = round_to(np.percentile(data, 97.5),\n prec=5,\n base=base)\n except ValueError:\n vmax = round_to(np.nanpercentile(data, 97.5),\n prec=5,\n base=base)\n if vmin == vmax:\n vmin = np.percentile(data, 10)\n vmax = np.percentile(data, 97.5)\n else:\n vmin = data_lim[0]\n vmax = data_lim[1]\n\n return vmin, vmax\n\n\ndef _get_stats_logic(ax, y,\n norm=False,\n anom=False,\n norm_anom=False,\n cumsum=False,\n ):\n if anom:\n y = get_anom(y)\n if norm:\n y = get_norm(y)\n if norm_anom:\n y = get_norm_anom(y)\n if cumsum:\n y = np.cumsum(y)\n return y\n\n\ndef _show_stats_logic(ax, y, stats):\n if stats:\n stats_str = get_stats(y, show=False)\n if isinstance(stats, str):\n set_axtext(ax, stats_str, loc=stats)\n else:\n set_axtext(ax, stats_str, loc='top left')\n\n\ndef _get_interval_logic(interval=None,\n vmin=None, vmax=None,\n base=1, oom=1):\n if interval is None:\n interval = (np.absolute(vmin) + np.absolute(vmax)) / 5.\n if interval > 1:\n interval = round_to(interval, base=base)\n if interval == 0:\n interval = 1\n if (vmax - vmin) % interval != 0:\n interval = np.power(10, oom) / 2\n vmin += (vmax - vmin) % interval\n elif interval < 0.1:\n interval = round_to(interval, base=base)\n if interval == 0:\n interval = 0.01\n elif interval < 1:\n interval = round_to(interval, base=base)\n if interval == 0:\n interval = 0.1\n vmax_vmin_total = np.abs(vmax) + np.abs(vmin)\n if interval < vmax_vmin_total / 150.:\n interval = round_to(vmax_vmin_total / 2., 5) / interval\n return interval\n\n\ndef _get_fmt_logic(fmt=None, interval=None):\n interval = np.abs(interval)\n if fmt is None:\n if interval < 0.25:\n fmt = '%.2f'\n elif interval < 1:\n fmt = '%.1f'\n elif interval >= 1:\n fmt = '%1d'\n return fmt\n\n\ndef _fix_vmin_vmax_logic(vmin=None, vmax=None,\n data=None, interval=1):\n if isinstance(data, pd.DataFrame):\n data = data.values\n\n if vmin == vmax:\n vmin = np.min(data)\n vmax = vmin + interval * 10\n\n return vmin, vmax\n\n\ndef _get_tick_locs_cbar_count_logic(tick_locs=None, vmin=None,\n vmax=None, interval=1):\n if tick_locs is None:\n tick_locs = np.arange(vmin, vmax + interval, interval)\n\n cbar_count = len(tick_locs) - 1\n\n if len(tick_locs) > 20:\n tick_locs = tick_locs[::4]\n elif len(tick_locs) > 10:\n tick_locs = tick_locs[::2]\n\n return tick_locs, cbar_count\n\n\ndef _save_logic(save='', tight_layout='auto', close=False,\n dpi=None, pos=1, rows=1, cols=1):\n if not close and pos == (rows * cols):\n utils(tight_layout=True)\n if save is not '':\n if tight_layout == 'on' or (tight_layout == 'auto' and\n pos == (rows * cols)):\n savefig(save, close=close, tight_layout=True, dpi=dpi)\n else:\n savefig(save, close=close, tight_layout=False, dpi=dpi)\n else:\n if tight_layout == 'on' or (tight_layout == 'auto' and\n pos == (rows * cols)):\n utils(tight_layout=True)\n\n\ndef _set_share_logic(ax=None, rows=1, cols=1,\n sharex=None, sharey=None,\n xlabel=None, ylabel=None):\n if sharex is not None:\n set_share(ax, sharex, axis='x', xlabel=xlabel)\n rows = plt.getp(sharex, 'geometry')[0]\n cols = plt.getp(sharex, 'geometry')[1]\n if sharey is not None:\n set_share(ax, sharey, axis='y', ylabel=ylabel)\n rows = plt.getp(sharey, 'geometry')[0]\n cols = plt.getp(sharey, 'geometry')[1]\n return rows, cols\n\n\ndef _set_datetime_logic(ax=None,\n minor_date_ticks=False,\n title_pad=0):\n major_xlocator, major_xinterval, major_xformatter, \\\n minor_xlocator, minor_xinterval, minor_xformatter, dt_bool = \\\n set_date_ticks(ax, minor_date_ticks=minor_date_ticks)\n\n if minor_date_ticks:\n title_pad += 0.14\n\n if dt_bool:\n return (major_xlocator, major_xinterval, major_xformatter,\n minor_xlocator, minor_xinterval, minor_xformatter,\n title_pad)\n else:\n return [None] * 6 + [title_pad]\n\n\ndef _settings_logic(ax=None, x=None, twinx=None, twiny=None, xticks=None,\n major_xlocator=None, major_xinterval=None,\n major_xformatter=None, xlabel='',\n ylabel='', title='', suptitle=False, title_pad=0.965,\n aligned=True, length_scale=True,\n xtext=False, xticklabels=None,\n minor_date_ticks=True):\n (major_xlocator, major_xinterval, major_xformatter,\n minor_xlocator, minor_xinterval, minor_xformatter, title_pad) = \\\n _set_datetime_logic(ax=ax,\n minor_date_ticks=minor_date_ticks,\n title_pad=title_pad)\n\n if twinx is None and twiny is None:\n set_major_tick_labels(ax)\n try:\n set_major_ticks(ax, xticks=xticks,\n xlocator=major_xlocator,\n xinterval=major_xinterval,\n xformatter=major_xformatter)\n except:\n set_major_ticks(ax)\n set_major_grid(ax)\n set_borders(ax)\n set_labels(ax, xlabel=xlabel, ylabel=ylabel, suptitle=suptitle,\n title=title, title_pad=title_pad, length_scale=length_scale)\n else:\n if twinx is not None:\n set_twin(ax, twinx, axis='x', suptitle=suptitle,\n title_pad=title_pad,\n xlabel=xlabel, ylabel=ylabel, title=title,\n aligned=aligned, length_scale=length_scale)\n\n if xtext:\n plt.xticks(x, xticklabels)\n\n\ndef _get_color_logic(color, facecolor, edgecolor, matchcolor):\n if color is not None:\n facecolor = color\n edgecolor = color\n if matchcolor:\n edgecolor = facecolor\n return facecolor, edgecolor\n\n\ndef _get_dt_from_pd_logic(x):\n if isinstance(x, pd.DatetimeIndex):\n return x.to_pydatetime()\n else:\n return x\n\n\ndef _get_width_logic(x):\n\n try:\n try:\n width = np.round(np.average(np.diff(x)), 0) / 1.2\n except (ValueError, IndexError, TypeError):\n width = 0.833333333\n except:\n days_multiplier = (x[1] - x[0]).total_seconds() / 3600. / 24.\n width = 0.833333333 * days_multiplier\n\n return width\n\n\ndef _set_heatmap_mask(ax, df, mask, size):\n for j, i in np.column_stack(np.where(mask)):\n try:\n ax.text(i + 0.5, j + 0.5, '{:.2f}'.format(df[j, i]),\n color=COLORS['light gray'], alpha=0.75,\n va='center', ha='center', size=size)\n except:\n pass\n\n\ndef _fix_contourf_logic(contourf, interval, vmin, vmax):\n if len(contourf) < 6:\n contourf = np.linspace(vmin, vmax, 8)\n interval = np.diff(contourf).mean()\n return contourf, interval\n else:\n return contourf, interval\n\n\ndef _balance_logic(balance, vmin, vmax):\n vmin_vmax_signs = np.sign([vmin, vmax])\n if balance:\n if 1 in vmin_vmax_signs and -1 in vmin_vmax_signs:\n if np.abs(vmin) >= np.abs(vmax):\n vmax = -vmin\n else:\n vmin = -vmax\n print('vmin and vmax were balanced!')\n return vmin, vmax\n\n\ndef _add_features(ax, land, ocean, coastlines, states,\n countries, lakes, rivers):\n import cartopy.feature as cfeature\n\n if land:\n ax.add_feature(cfeature.LAND, zorder=100)\n if ocean:\n ax.add_feature(cfeature.OCEAN, zorder=100)\n if coastlines:\n ax.add_feature(cfeature.COASTLINE, linestyle='-',\n alpha=.85, edgecolor='black')\n if states:\n feature_name = 'admin_1_states_provinces_lines'\n states_provinces = cfeature.NaturalEarthFeature(category='cultural',\n name=feature_name,\n scale='10m',\n facecolor='none')\n ax.add_feature(states_provinces,\n edgecolor='black',\n linestyle='-',\n alpha=.95)\n if countries:\n ax.add_feature(cfeature.BORDERS,\n edgecolor='black',\n linestyle='-',\n alpha=.95)\n if lakes:\n ax.add_feature(cfeature.LAKES, alpha=0.95)\n\n if rivers:\n ax.add_feature(cfeature.RIVERS, alpha=0.95)\n\n\ndef _get_lat_lon_lim_logic(latlim, lonlim,\n lat1, lat2, lon1, lon2,\n central_longitude=0, region=None):\n if latlim is not None:\n lat1 = latlim[0]\n lat2 = latlim[1]\n if lonlim is not None:\n lon1 = lonlim[0] - central_longitude\n lon2 = lonlim[1] - central_longitude\n\n lat1, lat2, lon1, lon2 = get_region_latlim(region,\n lat1=lat1,\n lat2=lat2,\n lon1=lon1,\n lon2=lon2)\n\n return lat1, lat2, lon1, lon2\n\n\ndef _get_projection_logic(projection, lons=None, central_longitude=0):\n import cartopy.crs as ccrs\n if projection is None:\n if central_longitude != 0:\n lons -= central_longitude\n projection = ccrs.PlateCarree(central_longitude=central_longitude)\n else:\n projection = ccrs.PlateCarree()\n return projection\n\n\ndef _set_contour_logic(ax, lons2, lats2, data2, contour,\n projection, fmt, clabel):\n if contour is not None:\n im1 = ax.contour(lons2,\n lats2,\n data2,\n contour, linewidths=0.7, alpha=0.85,\n colors='k', linestyles='solid',\n transform=projection)\n if clabel:\n clabels = plt.clabel(im1, fontsize=8, inline=1, fmt=fmt)\n [txt.set_bbox(dict(facecolor='white',\n edgecolor='none',\n boxstyle='round',\n pad=0, alpha=0.3)\n ) for txt in clabels]\n\n\ndef _parse_style(style):\n style = str(style)\n styles = style.split('/')\n ptype = styles[0]\n\n try:\n color = styles[1]\n except:\n color = COLORS['red']\n\n try:\n linestyle = styles[2]\n except:\n linestyle = '-'\n\n try:\n marker = styles[3]\n except:\n marker = ''\n\n if ptype == '':\n ptype = 'line'\n\n if linestyle == '':\n linestyle = '-'\n\n return ptype, color, linestyle, marker\n\n\ndef _pop_keys(vis_dict, ptype):\n drop_dict = {'line': ['matchcolor', 'facecolor', 'edgecolor',\n 'width', 'height', 'align', 'sidebar_count',\n 'sidebar_pos', 'bar_vals', 'stack',\n 's', 'c', 'cbar', 'cmap', 'orientation',\n 'interval', 'tick_locs', 'fmt', 'pad', 'size'\n ],\n 'bar': ['s', 'c', 'cbar', 'cmap', 'projection'\n 'interval', 'tick_locs', 'fmt', 'pad',\n 'marker', 'stack', 'size'\n ],\n 'scatter': ['matchcolor', 'width', 'height',\n 'align', 'sidebar_count', 'stack',\n 'sidebar_pos', 'bar_vals'\n ]\n }\n\n for key in drop_dict[ptype]:\n vis_dict.pop(key, None)\n return vis_dict\n" ]
[ [ "numpy.ones_like", "numpy.median", "matplotlib.dates.DateFormatter", "numpy.min", "numpy.sign", "numpy.where", "matplotlib.dates.num2date", "numpy.cumsum", "matplotlib.patches.Rectangle", "matplotlib.colorbar.ColorbarBase", "matplotlib.dates.DayLocator", "matplotlib.pyplot.xticks", "matplotlib.colors.LinearSegmentedColormap.from_list", "numpy.max", "numpy.histogram", "matplotlib.pyplot.colorbar", "matplotlib.dates.AutoDateLocator", "numpy.log", "numpy.nanpercentile", "matplotlib.pyplot.savefig", "matplotlib.pyplot.get_cmap", "matplotlib.dates.HourLocator", "matplotlib.dates.AutoDateFormatter", "numpy.arange", "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.clabel", "matplotlib.pyplot.subplot", "pandas.to_datetime", "numpy.array", "matplotlib.ticker.MultipleLocator", "numpy.percentile", "numpy.ma.masked_invalid", "matplotlib.pyplot.close", "matplotlib.pyplot.figure", "numpy.diff", "matplotlib.ticker.FormatStrFormatter", "numpy.power", "matplotlib.pyplot.getp", "numpy.absolute", "matplotlib.pyplot.setp", "matplotlib.dates.MinuteLocator", "matplotlib.pyplot.suptitle", "matplotlib.dates.MonthLocator", "matplotlib.pyplot.figtext", "numpy.abs", "numpy.linspace", "matplotlib.dates.date2num", "matplotlib.ticker.FixedLocator" ] ]
johnwlambert/argoverse-api
[ "ae7d7ad392695b2b9ad52cb7b5ad9145c13a0955" ]
[ "argoverse/evaluation/eval_utils.py" ]
[ "# <Copyright 2019, Argo AI, LLC. Released under the MIT license.>\n\"\"\"Utilities used in evaluation of performance.\"\"\"\n\nimport copy\nfrom typing import Any, Dict, List, Tuple\n\nimport numpy as np\nfrom argoverse.map_representation.map_api import ArgoverseMap\nfrom argoverse.utils.transform import quat2rotmat\n\n# Label dictionary should be of the form {\"center\": {\"x\": 0.0, \"y\": 0.0, \"z\": 0.0},\n# \"rotation\": {\"x\": 0.0, \"y\": 0.0, \"z\": 0.0},\n# \"height\": 0.0, \"width\": 0.0, \"depth\": 0.0}\n_LabelType = Dict[str, Any]\n\n\ndef get_pc_inside_bbox(pc_raw: np.ndarray, bbox: np.ndarray) -> np.ndarray:\n \"\"\"Get part of raw point cloud inside a given bounding box.\n\n Args:\n pc_raw: The raw point cloud\n bbox: The bounding box to restrict into\n\n Returns:\n The part of the point cloud inside the bounding box\n\n \"\"\"\n\n U_lst = []\n V_lst = []\n W_lst = []\n P1_lst = []\n P2_lst = []\n P4_lst = []\n P5_lst = []\n\n u = bbox[1] - bbox[0]\n v = bbox[2] - bbox[0]\n w = np.zeros((3, 1))\n w[2, 0] += bbox[3]\n\n p5 = w + bbox[0]\n\n U_lst.append(u[0:3, 0])\n if len(U_lst) == 0:\n return np.array([])\n\n V_lst.append(v[0:3, 0])\n W_lst.append(w[0:3, 0])\n P1_lst.append(bbox[0][0:3, 0])\n P2_lst.append(bbox[1][0:3, 0])\n P4_lst.append(bbox[2][0:3, 0])\n P5_lst.append(p5[0:3, 0])\n\n U = np.array(U_lst)\n W = np.array(W_lst)\n V = np.array(V_lst)\n P1 = np.array(P1_lst)\n P2 = np.array(P2_lst)\n P4 = np.array(P4_lst)\n P5 = np.array(P5_lst)\n\n dot1 = np.matmul(U, pc_raw.transpose(1, 0))\n dot2 = np.matmul(V, pc_raw.transpose(1, 0))\n dot3 = np.matmul(W, pc_raw.transpose(1, 0))\n u_p1 = np.tile((U * P1).sum(axis=1), (len(pc_raw), 1)).transpose(1, 0)\n v_p1 = np.tile((V * P1).sum(axis=1), (len(pc_raw), 1)).transpose(1, 0)\n w_p1 = np.tile((W * P1).sum(axis=1), (len(pc_raw), 1)).transpose(1, 0)\n u_p2 = np.tile((U * P2).sum(axis=1), (len(pc_raw), 1)).transpose(1, 0)\n v_p4 = np.tile((V * P4).sum(axis=1), (len(pc_raw), 1)).transpose(1, 0)\n w_p5 = np.tile((W * P5).sum(axis=1), (len(pc_raw), 1)).transpose(1, 0)\n\n flag = np.logical_and(\n np.logical_and(in_between_matrix(dot1, u_p1, u_p2), in_between_matrix(dot2, v_p1, v_p4)),\n in_between_matrix(dot3, w_p1, w_p5),\n )\n\n return pc_raw[flag[0, :]]\n\n\ndef label_to_bbox(label: _LabelType) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"Convert a label into a bounding box.\n\n Args:\n label: _LabelType\n\n Returns:\n bbox: nupmy array for bounding box itself\n orientation: numpy array for bounding box orientation\n\n \"\"\"\n\n length = label[\"length\"]\n width = label[\"width\"]\n height = label[\"height\"]\n\n p0 = np.array([-length / 2, -width / 2, -height / 2])[:, np.newaxis]\n p1 = np.array([+length / 2, -width / 2, -height / 2])[:, np.newaxis]\n p2 = np.array([-length / 2, +width / 2, -height / 2])[:, np.newaxis]\n\n bbox = np.array([p0, p1, p2, height])\n\n R = quat2rotmat((label[\"rotation\"][\"w\"], label[\"rotation\"][\"x\"], label[\"rotation\"][\"y\"], label[\"rotation\"][\"z\"]))\n t = np.array([label[\"center\"][\"x\"], label[\"center\"][\"y\"], label[\"center\"][\"z\"]])[:, np.newaxis]\n\n v = np.array([1, 0, 0])[:, np.newaxis]\n orientation = np.matmul(R, v)\n orientation = np.arctan2(orientation[1, 0], orientation[0, 0])\n\n return transform_bounding_box_3d(bbox, R, t), orientation\n\n\ndef transform_bounding_box_3d(bbox: np.ndarray, R: np.ndarray, t: np.ndarray) -> List[np.ndarray]:\n \"\"\"Transform bounding box with rotation and translation.\n\n Args:\n bbox: The bounding box\n R: The rotation transformation\n t: The translation transformation\n\n Returns:\n The transformed bounding box\n\n \"\"\"\n\n p0 = np.matmul(R, bbox[0]) + t\n p1 = np.matmul(R, bbox[1]) + t\n p2 = np.matmul(R, bbox[2]) + t\n\n return [p0, p1, p2, bbox[3]]\n\n\ndef in_between_matrix(x: np.ndarray, v1: np.ndarray, v2: np.ndarray) -> np.ndarray:\n \"\"\"Element-by-element check to see if x_ij is between v1_ij and v2_ij, without knowing v1 > v2 order.\n\n Args:\n x: matrix to check if is in bounds\n v1: elements comprising one side of range check\n v2: elements comprising other side of range check\n\n Returns:\n Matrix of whether x_ij is between v1_ij and v2_ij\n\n \"\"\"\n\n return np.logical_or(np.logical_and(x <= v1, x >= v2), np.logical_and(x <= v2, x >= v1))\n\n\ndef leave_only_roi_region(\n lidar_pts: np.ndarray, egovehicle_to_city_se3: np.ndarray, ground_removal_method: str, city_name: str = \"MIA\"\n) -> np.ndarray:\n \"\"\"Return points that are on driveable area, and (optionally) are not ground.\n\n Args:\n lidar_pts: The lidar points\n egovehicle_to_city_se3: Transformation from vehicle to map (city) frame\n ground_removal_method: \"map\" is only supported value currently, otherwise will not run ground removal\n city_name: city name, either 'PIT' or 'MIA'\n\n Returns:\n Modified point cloud reduced by driveable area and ground.\n\n \"\"\"\n\n avm = ArgoverseMap()\n\n driveable_area_pts = copy.deepcopy(lidar_pts)\n driveable_area_pts = egovehicle_to_city_se3.transform_point_cloud(driveable_area_pts) # put into city coords\n\n driveable_area_pts = avm.remove_non_roi_points(driveable_area_pts, city_name)\n\n if ground_removal_method == \"map\":\n driveable_area_pts = avm.remove_ground_surface(driveable_area_pts, city_name)\n driveable_area_pts = egovehicle_to_city_se3.inverse_transform_point_cloud(\n driveable_area_pts\n ) # put back into ego-vehicle coords\n return driveable_area_pts\n" ]
[ [ "numpy.array", "numpy.matmul", "numpy.zeros", "numpy.logical_and", "numpy.arctan2" ] ]
petteriTeikari/crank-voiceconversion-wrapper
[ "8d34a76a8a4949cb1780a4770922f755dd04000c" ]
[ "crank/net/trainer/trainer_vqvae.py" ]
[ "#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n# vim:fenc=utf-8\n#\n# Copyright (c) 2020 K. Kobayashi <[email protected]>\n#\n# Distributed under terms of the MIT license.\n\"\"\"\nVQVAE trainer\n\n\"\"\"\n\nimport random\n\nimport torch\nfrom crank.net.trainer import BaseTrainer\nfrom torch.nn.utils import clip_grad_norm\n\n\nclass VQVAETrainer(BaseTrainer):\n def __init__(\n self,\n model,\n optimizer,\n criterion,\n dataloader,\n writer,\n expdir,\n conf,\n feat_conf,\n scheduler=None,\n scaler=None,\n resume=0,\n device=\"cuda\",\n n_jobs=-1,\n ):\n super().__init__(\n model,\n optimizer,\n criterion,\n dataloader,\n writer,\n expdir,\n conf,\n feat_conf,\n scheduler=scheduler,\n scaler=scaler,\n resume=resume,\n device=device,\n n_jobs=n_jobs,\n )\n self.cycle_flag = False\n self._check_cycle_start()\n\n def check_custom_start(self):\n self._check_cycle_start()\n\n def train(self, batch, phase=\"train\"):\n loss = self._get_loss_dict()\n if self.cycle_flag:\n loss = self.forward_cycle(batch, loss, phase=phase)\n else:\n loss = self.forward_vqvae(batch, loss, phase=phase)\n loss = self.forward_spkradv(batch, loss, phase=phase)\n loss = self.forward_spkrclassifier(batch, loss, phase=phase)\n loss_values = self._parse_loss(loss)\n self._flush_writer(loss, phase)\n return loss_values\n\n @torch.no_grad()\n def dev(self, batch):\n loss_values = self.train(batch, phase=\"dev\")\n for cv_spkr_name in random.sample(list(self.spkrs.keys()), self.n_cv_spkrs):\n enc_h = self._get_enc_h(batch)\n dec_h, spkrvec = self._get_dec_h(batch, cv_spkr_name=cv_spkr_name)\n outputs = self.model[\"G\"](batch[\"in_feats\"], enc_h, dec_h, spkrvec=spkrvec)\n self._generate_cvwav(\n batch,\n outputs,\n cv_spkr_name,\n tdir=\"dev_wav\",\n save_hdf5=False,\n n_samples=self.n_dev_samples,\n )\n return loss_values\n\n @torch.no_grad()\n def reconstruction(self, batch, tdir=\"reconstruction\"):\n enc_h = self._get_enc_h(batch)\n dec_h, spkrvec = self._get_dec_h(batch, cv_spkr_name=None)\n outputs = self.model[\"G\"].forward(\n batch[\"in_feats\"], enc_h, dec_h, spkrvec=spkrvec\n )\n self._generate_cvwav(\n batch,\n outputs,\n None,\n tdir=tdir,\n save_hdf5=True,\n save_decoded=False,\n n_samples=-1,\n )\n\n @torch.no_grad()\n def eval(self, batch):\n for cv_spkr_name in self.spkrs.keys():\n enc_h = self._get_enc_h(batch)\n dec_h, spkrvec = self._get_dec_h(batch, cv_spkr_name=cv_spkr_name)\n outputs = self.model[\"G\"](batch[\"in_feats\"], enc_h, dec_h, spkrvec=spkrvec)\n self._generate_cvwav(\n batch,\n outputs,\n cv_spkr_name,\n tdir=\"eval_wav\",\n save_hdf5=True,\n save_decoded=False,\n n_samples=-1,\n )\n\n def forward_vqvae(self, batch, loss, phase=\"train\"):\n enc_h = self._get_enc_h(batch)\n dec_h, spkrvec = self._get_dec_h(batch)\n feats = batch[\"in_feats\"]\n outputs = self.model[\"G\"].forward(feats, enc_h, dec_h, spkrvec=spkrvec)\n loss = self.calculate_vqvae_loss(batch, outputs, loss)\n\n if self.conf[\"use_spkradv_training\"]:\n loss = self.calculate_spkradv_loss(\n batch, outputs, loss, label=\"org\", phase=phase\n )\n\n loss[\"objective\"] += loss[\"G\"]\n if phase == \"train\":\n self.step_model(loss, model=\"G\")\n\n return loss\n\n def forward_cycle(self, batch, loss, phase=\"train\"):\n enc_h = self._get_enc_h(batch)\n enc_h_cv = self._get_enc_h(batch, use_cvfeats=True)\n dec_h, spkrvec = self._get_dec_h(batch)\n dec_h_cv, spkrvec_cv = self._get_dec_h(batch, use_cvfeats=True)\n feats = batch[\"in_feats\"]\n cycle_outputs = self.model[\"G\"].cycle_forward(\n feats, enc_h, dec_h, enc_h_cv, dec_h_cv, spkrvec, spkrvec_cv\n )\n if self.conf[\"use_vqvae_loss\"]:\n loss = self.calculate_vqvae_loss(batch, cycle_outputs[0][\"org\"], loss)\n loss = self.calculate_cyclevqvae_loss(batch, cycle_outputs, loss)\n\n if self.conf[\"use_spkradv_training\"]:\n for label in [\"cv\", \"recon\"]:\n loss = self.calculate_spkradv_loss(\n batch, cycle_outputs[0][label], loss, label=label, phase=phase\n )\n\n loss[\"objective\"] += loss[\"G\"]\n if phase == \"train\":\n self.step_model(loss, model=\"G\")\n return loss\n\n def forward_spkradv(self, batch, loss, phase=\"train\"):\n if self.conf[\"use_spkradv_training\"]:\n enc_h = self._get_enc_h(batch)\n dec_h, spkrvec = self._get_dec_h(batch)\n feats = batch[\"in_feats\"]\n outputs = self.model[\"G\"].forward(feats, enc_h, dec_h, spkrvec=spkrvec)\n if self.conf[\"causal\"]:\n # discard causal area\n er = self.model[\"G\"].encoder_receptive_size\n encoded = [e[:, er:] for e in outputs[\"encoded_unmod\"]]\n else:\n er = 0\n encoded = outputs[\"encoded_unmod\"]\n advspkr_class = self.model[\"SPKRADV\"].forward(encoded, detach=True)\n spkradv_loss = self.criterion[\"ce\"](\n advspkr_class.reshape(-1, advspkr_class.size(2)),\n batch[\"org_h\"][:, er:].reshape(-1),\n )\n loss[\"SPKRADV\"] = self.conf[\"alpha\"][\"ce\"] * spkradv_loss\n if phase == \"train\":\n self.step_model(loss, model=\"SPKRADV\")\n return loss\n\n def forward_spkrclassifier(self, batch, loss, phase=\"train\"):\n def return_sample(x):\n return self.model[\"C\"](x.transpose(1, 2)).transpose(1, 2)\n\n if self.conf[\"use_spkr_classifier\"]:\n real = return_sample(batch[\"in_feats\"])\n real = real.reshape(-1, real.size(2))\n h = batch[\"org_h\"].reshape(-1)\n loss[\"C_real\"] = self.criterion[\"ce\"](real, h)\n loss[\"C\"] += self.conf[\"alpha\"][\"ce\"] * loss[\"C_real\"]\n if phase == \"train\":\n self.step_model(loss, model=\"C\")\n return loss\n\n def step_model(self, loss, model=\"G\"):\n self.optimizer[model].zero_grad()\n loss[model].backward()\n if self.conf[\"optim\"][model][\"clip_grad_norm\"] != 0:\n clip_grad_norm(\n self.model[model].parameters(),\n self.conf[\"optim\"][model][\"clip_grad_norm\"],\n )\n self.optimizer[model].step()\n\n def calculate_vqvae_loss(self, batch, outputs, loss):\n emask = batch[\"encoder_mask\"]\n dmask = batch[\"decoder_mask\"]\n target = batch[\"out_feats\"]\n decoded = outputs[\"decoded\"]\n loss[\"G_l1\"] = self.criterion[\"fl1\"](\n decoded, target, mask=dmask, causal_size=self.conf[\"causal_size\"]\n )\n loss[\"G_mse\"] = self.criterion[\"fmse\"](\n decoded, target, mask=dmask, causal_size=self.conf[\"causal_size\"]\n )\n loss[\"G_stft\"] = self.criterion[\"fstft\"](\n decoded, target, causal_size=self.conf[\"causal_size\"]\n )\n\n # loss for vq\n encoded = outputs[\"encoded\"]\n emb_idx = outputs[\"emb_idx\"]\n for n in range(self.conf[\"n_vq_stacks\"]):\n loss[f\"G_commit{n}\"] = self.criterion[\"mse\"](\n encoded[n].masked_select(emask),\n emb_idx[n].masked_select(emask).detach(),\n )\n if not self.conf[\"ema_flag\"]:\n loss[f\"G_dict{n}\"] = self.criterion[\"mse\"](\n emb_idx[n].masked_select(emask),\n encoded[n].masked_select(emask).detach(),\n )\n loss = self._parse_vqvae_loss(loss)\n return loss\n\n def calculate_cyclevqvae_loss(self, batch, outputs, loss):\n def calculate_spkrcls_loss(batch, outputs):\n def return_sample(x):\n return self.model[\"C\"](x.transpose(1, 2)).transpose(1, 2)\n\n fake = return_sample(outputs[\"decoded\"])\n fake = fake.reshape(-1, fake.size(2))\n h = batch[\"cv_h\"].reshape(-1)\n return self.criterion[\"ce\"](fake, h)\n\n for c in range(self.conf[\"n_cycles\"]):\n for io in [\"cv\", \"recon\"]:\n lbl = f\"{c}cyc_{io}\"\n o = outputs[c][io]\n if io == \"cv\":\n emask = batch[\"encoder_mask\"]\n dmask = batch[\"decoder_mask\"]\n loss[f\"C_fake_{lbl}\"] = calculate_spkrcls_loss(batch, o)\n else:\n emask = batch[\"cycle_encoder_mask\"]\n dmask = batch[\"cycle_decoder_mask\"]\n target = batch[\"in_feats\"]\n decoded = o[\"decoded\"]\n cs = self.conf[\"causal_size\"] * 2 if self.conf[\"causal\"] else 0\n loss[f\"G_l1_{lbl}\"] = self.criterion[\"fl1\"](\n decoded,\n target,\n mask=dmask,\n causal_size=cs,\n )\n loss[f\"G_mse_{lbl}\"] = self.criterion[\"fmse\"](\n decoded,\n target,\n mask=dmask,\n causal_size=cs,\n )\n loss[f\"G_stft_{lbl}\"] = self.criterion[\"fstft\"](\n decoded, target, causal_size=cs\n )\n\n for n in range(self.conf[\"n_vq_stacks\"]):\n loss[f\"G_commit{n}_{lbl}\"] = self.criterion[\"mse\"](\n o[\"encoded\"][n].masked_select(emask),\n o[\"emb_idx\"][n].masked_select(emask).detach(),\n )\n if not self.conf[\"ema_flag\"]:\n loss[f\"G_dict{n}_{lbl}\"] = self.criterion[\"mse\"](\n o[\"emb_idx\"][n].masked_select(emask),\n o[\"encoded\"][n].masked_select(emask).detach(),\n )\n loss = self._parse_cyclevqvae_loss(loss)\n return loss\n\n def calculate_spkradv_loss(self, batch, outputs, loss, label=\"org\", phase=\"train\"):\n if self.conf[\"causal\"]:\n # discard causal area\n er = self.model[\"G\"].encoder_receptive_size\n encoded = [e[:, er:] for e in outputs[\"encoded_unmod\"]]\n else:\n er = 0\n encoded = outputs[\"encoded_unmod\"]\n advspkr_class = self.model[\"SPKRADV\"].forward(encoded)\n loss[f\"G_spkradv_{label}\"] = self.criterion[\"ce\"](\n advspkr_class.reshape(-1, advspkr_class.size(2)),\n batch[\"org_h\"][:, er:].reshape(-1),\n )\n if not label == \"recon\":\n loss[\"G\"] += self.conf[\"alpha\"][\"ce\"] * loss[f\"G_spkradv_{label}\"]\n else:\n loss[\"G\"] += (\n self.conf[\"alpha\"][\"cycle\"]\n * self.conf[\"alpha\"][\"ce\"]\n * loss[f\"G_spkradv_{label}\"]\n )\n return loss\n\n def _parse_vqvae_loss(self, loss):\n def _parse_vq(k):\n for n in range(self.conf[\"n_vq_stacks\"]):\n loss[\"G\"] += self.conf[\"alpha\"][k] * loss[f\"G_{k}{n}\"]\n return loss\n\n for k in [\"l1\", \"mse\", \"stft\"]:\n loss[\"G\"] += self.conf[\"alpha\"][k] * loss[f\"G_{k}\"]\n loss = _parse_vq(\"commit\")\n if not self.conf[\"ema_flag\"]:\n loss = _parse_vq(\"dict\")\n return loss\n\n def _parse_cyclevqvae_loss(self, loss):\n for c in range(self.conf[\"n_cycles\"]):\n alpha_cycle = self.conf[\"alpha\"][\"cycle\"]\n for io in [\"cv\", \"recon\"]:\n lbl = f\"{c}cyc_{io}\"\n for n in range(self.conf[\"n_vq_stacks\"]):\n loss[\"G\"] += (\n alpha_cycle\n * self.conf[\"alpha\"][\"commit\"]\n * loss[f\"G_commit{n}_{lbl}\"]\n )\n if not self.conf[\"ema_flag\"]:\n loss[\"G\"] += (\n alpha_cycle\n * self.conf[\"alpha\"][\"dict\"]\n * loss[f\"G_dict{n}_{lbl}\"]\n )\n\n if io == \"recon\":\n for k in [\"l1\", \"mse\", \"stft\"]:\n loss[\"G\"] += (\n alpha_cycle * self.conf[\"alpha\"][k] * loss[f\"G_{k}_{lbl}\"]\n )\n elif io == \"cv\":\n loss[\"G\"] += (\n alpha_cycle * self.conf[\"alpha\"][\"ce\"] * loss[f\"C_fake_{lbl}\"]\n )\n return loss\n\n def _check_cycle_start(self):\n if (\n self.conf[\"use_cyclic_training\"]\n and self.steps > self.conf[\"n_steps_cycle_start\"]\n ):\n self.cycle_flag = True\n\n if self.conf[\"use_cyclic_training\"] and not self.conf[\"use_spkr_classifier\"]:\n raise ValueError(\n \"use_cyclic_training requires use_spkr_classifier to be true\"\n )\n" ]
[ [ "torch.no_grad" ] ]
changyu98/VGGNet-PyTorch
[ "20041ce7415f38498d61977b7814db90c63d9969" ]
[ "examples/cifar/main.py" ]
[ "# Copyright 2020 Lorna Authors. All Rights Reserved.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"\nEvaluate on cifar. Note that at the moment, training is not implemented (I am working on it).\nthat being said, evaluation is working. \n\"\"\"\n\nimport argparse\nimport os\nimport random\nimport shutil\nimport time\nimport warnings\nimport PIL\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.distributed as dist\nimport torch.optim\nimport torch.multiprocessing as mp\nimport torch.utils.data\nimport torch.utils.data.distributed\nimport torchvision.transforms as transforms\nimport torchvision.datasets as datasets\nimport torchvision.models as models\n\nfrom apex import amp\nfrom vggnet import VGGNet\n\nparser = argparse.ArgumentParser(description='PyTorch ImageNet Training')\nparser.add_argument('data', metavar='DIR', default='data',\n help='path to dataset')\nparser.add_argument('-a', '--arch', metavar='ARCH', default='resnet18',\n help='model architecture (default: resnet18)')\nparser.add_argument('-j', '--workers', default=0, type=int, metavar='N',\n help='number of data loading workers (default: 0)')\nparser.add_argument('--epochs', default=300, type=int, metavar='N',\n help='number of total epochs to run')\nparser.add_argument('--start-epoch', default=0, type=int, metavar='N',\n help='manual epoch number (useful on restarts)')\nparser.add_argument('-b', '--batch-size', default=256, type=int,\n metavar='N',\n help='mini-batch size (default: 256), this is the total '\n 'batch size of all GPUs on the current node when '\n 'using Data Parallel or Distributed Data Parallel')\nparser.add_argument('--lr', '--learning-rate', default=0.05, type=float,\n metavar='LR', help='initial learning rate', dest='lr')\nparser.add_argument('--momentum', default=0.9, type=float, metavar='M',\n help='momentum')\nparser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,\n metavar='W', help='weight decay (default: 1e-4)',\n dest='weight_decay')\nparser.add_argument('-p', '--print-freq', default=10, type=int,\n metavar='N', help='print frequency (default: 10)')\nparser.add_argument('--resume', default='', type=str, metavar='PATH',\n help='path to latest checkpoint (default: none)')\nparser.add_argument('--opt_level', default=\"O1\", type=str,\n help=\"Choose which accuracy to train. (default: 'O1')\")\nparser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',\n help='evaluate model on validation set')\nparser.add_argument('--pretrained', dest='pretrained', action='store_true',\n help='use pre-trained model')\nparser.add_argument('--world-size', default=-1, type=int,\n help='number of nodes for distributed training')\nparser.add_argument('--rank', default=-1, type=int,\n help='node rank for distributed training')\nparser.add_argument('--dist-url', default='tcp://172.168.1.1:11111', type=str,\n help='url used to set up distributed training')\nparser.add_argument('--dist-backend', default='nccl', type=str,\n help='distributed backend')\nparser.add_argument('--seed', default=None, type=int,\n help='seed for initializing training. ')\nparser.add_argument('--gpu', default=None, type=int,\n help='GPU id to use.')\nparser.add_argument('--image_size', default=32, type=int,\n help='image size')\nparser.add_argument('--num_classes', type=int, default=10,\n help=\"number of dataset category.\")\nparser.add_argument('--multiprocessing-distributed', action='store_true',\n help='Use multi-processing distributed training to launch '\n 'N processes per node, which has N GPUs. This is the '\n 'fastest way to use PyTorch for either single node or '\n 'multi node data parallel training')\n\nbest_acc1 = 0\nargs = parser.parse_args()\n\n\ndef main():\n args = parser.parse_args()\n\n if args.seed is not None:\n random.seed(args.seed)\n torch.manual_seed(args.seed)\n cudnn.deterministic = True\n warnings.warn('You have chosen to seed training. '\n 'This will turn on the CUDNN deterministic setting, '\n 'which can slow down your training considerably! '\n 'You may see unexpected behavior when restarting '\n 'from checkpoints.')\n\n if args.gpu is not None:\n warnings.warn('You have chosen a specific GPU. This will completely '\n 'disable data parallelism.')\n\n if args.dist_url == \"env://\" and args.world_size == -1:\n args.world_size = int(os.environ[\"WORLD_SIZE\"])\n\n args.distributed = args.world_size > 1 or args.multiprocessing_distributed\n\n ngpus_per_node = torch.cuda.device_count()\n if args.multiprocessing_distributed:\n # Since we have ngpus_per_node processes per node, the total world_size\n # needs to be adjusted accordingly\n args.world_size = ngpus_per_node * args.world_size\n # Use torch.multiprocessing.spawn to launch distributed processes: the\n # main_worker process function\n mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))\n else:\n # Simply call main_worker function\n main_worker(args.gpu, ngpus_per_node, args)\n\n\ndef main_worker(gpu, ngpus_per_node, args):\n global best_acc1\n args.gpu = gpu\n\n if args.gpu is not None:\n print(\"Use GPU: {} for training\".format(args.gpu))\n\n if args.distributed:\n if args.dist_url == \"env://\" and args.rank == -1:\n args.rank = int(os.environ[\"RANK\"])\n if args.multiprocessing_distributed:\n # For multiprocessing distributed training, rank needs to be the\n # global rank among all the processes\n args.rank = args.rank * ngpus_per_node + gpu\n dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,\n world_size=args.world_size, rank=args.rank)\n # create model\n if 'vgg' in args.arch: # NEW\n if args.pretrained:\n model = VGGNet.from_pretrained(args.arch, num_classes=args.num_classes)\n print(\"=> using pre-trained model '{}'\".format(args.arch))\n else:\n print(\"=> creating model '{}'\".format(args.arch))\n model = VGGNet.from_name(args.arch)\n\n else:\n if args.pretrained:\n print(\"=> using pre-trained model '{}'\".format(args.arch))\n model = models.__dict__[args.arch](pretrained=True)\n else:\n print(\"=> creating model '{}'\".format(args.arch))\n model = models.__dict__[args.arch]()\n\n if args.distributed:\n # For multiprocessing distributed, DistributedDataParallel constructor\n # should always set the single device scope, otherwise,\n # DistributedDataParallel will use all available devices.\n if args.gpu is not None:\n torch.cuda.set_device(args.gpu)\n model.cuda(args.gpu)\n # When using a single GPU per process and per\n # DistributedDataParallel, we need to divide the batch size\n # ourselves based on the total number of GPUs we have\n args.batch_size = int(args.batch_size / ngpus_per_node)\n args.workers = int(args.workers / ngpus_per_node)\n model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])\n else:\n model.cuda()\n # DistributedDataParallel will divide and allocate batch_size to all\n # available GPUs if device_ids are not set\n model = torch.nn.parallel.DistributedDataParallel(model)\n elif args.gpu is not None:\n torch.cuda.set_device(args.gpu)\n model = model.cuda(args.gpu)\n else:\n # DataParallel will divide and allocate batch_size to all available GPUs\n if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):\n model.features = torch.nn.DataParallel(model.features)\n model.cuda()\n else:\n model = torch.nn.DataParallel(model).cuda()\n\n # define loss function (criterion) and optimizer\n criterion = nn.CrossEntropyLoss().cuda(args.gpu)\n\n optimizer = torch.optim.SGD(model.parameters(), args.lr,\n momentum=args.momentum,\n weight_decay=args.weight_decay)\n\n model, optimizer = amp.initialize(model, optimizer, opt_level=args.opt_level)\n\n # optionally resume from a checkpoint\n if args.resume:\n if os.path.isfile(args.resume):\n print(\"=> loading checkpoint '{}'\".format(args.resume))\n checkpoint = torch.load(args.resume)\n args.start_epoch = checkpoint['epoch']\n best_acc1 = checkpoint['best_acc1']\n if args.gpu is not None:\n # best_acc1 may be from a checkpoint from a different GPU\n best_acc1 = best_acc1.to(args.gpu)\n model.load_state_dict(checkpoint['state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n amp.load_state_dict(checkpoint['amp'])\n print(\"=> loaded checkpoint '{}' (epoch {})\"\n .format(args.resume, checkpoint['epoch']))\n else:\n print(\"=> no checkpoint found at '{}'\".format(args.resume))\n\n cudnn.benchmark = True\n\n # Data loading code\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n\n train_dataset = datasets.CIFAR10(\n root=args.data,\n train=True,\n download=True,\n transform=transforms.Compose([\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalize,\n ]))\n\n if args.distributed:\n train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)\n else:\n train_sampler = None\n\n train_loader = torch.utils.data.DataLoader(\n train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),\n num_workers=args.workers, pin_memory=True, sampler=train_sampler)\n\n test_dataset = datasets.CIFAR10(\n root=args.data,\n train=False,\n download=True,\n transform=transforms.Compose([\n transforms.ToTensor(),\n normalize,\n ]))\n\n test_loader = torch.utils.data.DataLoader(\n test_dataset, batch_size=args.batch_size, shuffle=False,\n num_workers=args.workers, pin_memory=True)\n\n if args.evaluate:\n top1, top5 = validate(test_loader, model, criterion, args)\n with open(\"res.txt\", \"w\") as f:\n print(f\"Acc@1: {top1}\\tAcc@5: {top5}\", file=f)\n return\n\n for epoch in range(args.start_epoch, args.epochs):\n if args.distributed:\n train_sampler.set_epoch(epoch)\n adjust_learning_rate(optimizer, epoch, args)\n\n # train for one epoch\n train(train_loader, model, criterion, optimizer, epoch, args)\n\n # evaluate on validation set\n acc1, _ = validate(test_loader, model, criterion, args)\n\n # remember best acc@1 and save checkpoint\n is_best = acc1 > best_acc1\n best_acc1 = max(acc1, best_acc1)\n\n if not args.multiprocessing_distributed or (args.multiprocessing_distributed\n and args.rank % ngpus_per_node == 0):\n save_checkpoint({\n 'epoch': epoch + 1,\n 'arch': args.arch,\n 'state_dict': model.state_dict(),\n 'best_acc1': best_acc1,\n 'optimizer': optimizer.state_dict(),\n 'amp': amp.state_dict(),\n }, is_best)\n\n\ndef train(train_loader, model, criterion, optimizer, epoch, args):\n batch_time = AverageMeter('Time', ':6.3f')\n data_time = AverageMeter('Data', ':6.3f')\n losses = AverageMeter('Loss', ':.6f')\n top1 = AverageMeter('Acc@1', ':6.2f')\n top5 = AverageMeter('Acc@5', ':6.2f')\n progress = ProgressMeter(len(train_loader), batch_time, data_time, losses, top1,\n top5, prefix=\"Epoch: [{}]\".format(epoch))\n\n # switch to train mode\n model.train()\n\n end = time.time()\n for i, (images, target) in enumerate(train_loader):\n # measure data loading time\n data_time.update(time.time() - end)\n\n if args.gpu is not None:\n images = images.cuda(args.gpu, non_blocking=True)\n target = target.cuda(args.gpu, non_blocking=True)\n\n # compute output\n output = model(images)\n loss = criterion(output, target)\n\n # measure accuracy and record loss\n acc1, acc5 = accuracy(output, target, topk=(1, 5))\n losses.update(loss.item(), images.size(0))\n top1.update(acc1[0], images.size(0))\n top5.update(acc5[0], images.size(0))\n\n # compute gradient and do SGD step\n optimizer.zero_grad()\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward() \n optimizer.step()\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % args.print_freq == 0:\n progress.print(i)\n\n\ndef validate(test_loader, model, criterion, args):\n batch_time = AverageMeter('Time', ':6.3f')\n losses = AverageMeter('Loss', ':.6f')\n top1 = AverageMeter('Acc@1', ':6.2f')\n top5 = AverageMeter('Acc@5', ':6.2f')\n progress = ProgressMeter(len(test_loader), batch_time, losses, top1, top5,\n prefix='Test: ')\n\n # switch to evaluate mode\n model.eval()\n\n with torch.no_grad():\n end = time.time()\n for i, (images, target) in enumerate(test_loader):\n if args.gpu is not None:\n images = images.cuda(args.gpu, non_blocking=True)\n target = target.cuda(args.gpu, non_blocking=True)\n\n # compute output\n output = model(images)\n loss = criterion(output, target)\n\n # measure accuracy and record loss\n acc1, acc5 = accuracy(output, target, topk=(1, 5))\n losses.update(loss.item(), images.size(0))\n top1.update(acc1[0], images.size(0))\n top5.update(acc5[0], images.size(0))\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % args.print_freq == 0:\n progress.print(i)\n\n # TODO: this should also be done with the ProgressMeter\n print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'\n .format(top1=top1, top5=top5))\n\n return top1.avg, top5.avg\n\n\ndef save_checkpoint(state, is_best, filename='checkpoint.pth'):\n torch.save(state, filename)\n if is_best:\n shutil.copyfile(filename, \"model_best.pth\")\n\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n\n def __init__(self, name, fmt=':f'):\n self.name = name\n self.fmt = fmt\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n def __str__(self):\n fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'\n return fmtstr.format(**self.__dict__)\n\n\nclass ProgressMeter(object):\n def __init__(self, num_batches, *meters, prefix=\"\"):\n self.batch_fmtstr = self._get_batch_fmtstr(num_batches)\n self.meters = meters\n self.prefix = prefix\n\n def print(self, batch):\n entries = [self.prefix + self.batch_fmtstr.format(batch)]\n entries += [str(meter) for meter in self.meters]\n print('\\t'.join(entries))\n\n def _get_batch_fmtstr(self, num_batches):\n num_digits = len(str(num_batches // 1))\n fmt = '{:' + str(num_digits) + 'd}'\n return '[' + fmt + '/' + fmt.format(num_batches) + ']'\n\n\ndef adjust_learning_rate(optimizer, epoch, args):\n \"\"\"Sets the learning rate to the initial LR decayed by 10 every 30 epochs\"\"\"\n lr = args.lr * (0.5 ** (epoch // 30))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n\ndef accuracy(output, target, topk=(1,)):\n \"\"\"Computes the accuracy over the k top predictions for the specified values of k\"\"\"\n with torch.no_grad():\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res\n\n\ndef get_parameter_number(model, image_size, channels=3):\n from torchstat import stat\n stat(model, (channels, image_size, image_size))\n\n\ndef print_state_dict(model):\n print(\"----------------------------------------------------------\")\n print(\"| state dict pram |\")\n print(\"----------------------------------------------------------\")\n for param_tensor in model.state_dict():\n print(param_tensor, '\\t', model.state_dict()[param_tensor].size())\n print(\"----------------------------------------------------------\")\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "torch.distributed.init_process_group", "torch.save", "torch.no_grad", "torch.multiprocessing.spawn", "torch.nn.parallel.DistributedDataParallel", "torch.cuda.device_count", "torch.manual_seed", "torch.cuda.set_device", "torch.utils.data.DataLoader", "torch.utils.data.distributed.DistributedSampler", "torch.load", "torch.nn.CrossEntropyLoss", "torch.nn.DataParallel" ] ]
dfm/one-datum-pipeline
[ "ee37b35aaf05e4b8c006c7384baca4c7bf50cae8" ]
[ "workflow/scripts/figures/sigma_cmd.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport argparse\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom astropy.io import fits\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-i\", \"--input\", required=True, type=str)\nparser.add_argument(\"-o\", \"--output\", required=True, type=str)\nargs = parser.parse_args()\n\nwith fits.open(args.input) as f:\n data = f[1].data\n\nsigma = data[\"rv_est_uncert\"]\nnb_transits = data[\"dr2_rv_nb_transits\"].astype(np.int32)\nm = np.isfinite(sigma) & (nb_transits >= 3)\nsigma = sigma[m]\nnb_transits = nb_transits[m]\ncolor = data[\"bp_rp\"][m]\nmag = data[\"phot_g_mean_mag\"][m]\ndm = 5 * np.log10(1000 / data[\"parallax\"][m]) - 5\n\nrng = (color < 2.75) & (color > 0.15)\nrng &= (mag - dm > -4.0) & (mag - dm < 10.0)\ndenom, bx, by = np.histogram2d(color[rng], mag[rng] - dm[rng], bins=(80, 95))\n\nnum, bx, by = np.histogram2d(\n color[rng], mag[rng] - dm[rng], bins=(bx, by), weights=sigma[rng]\n)\n\nplt.figure(figsize=(7, 6))\nplt.contour(\n 0.5 * (bx[1:] + bx[:-1]),\n 0.5 * (by[1:] + by[:-1]),\n denom.T,\n cmap=\"Greys\",\n linewidths=[1],\n)\n\nv = np.zeros_like(num, dtype=np.float64)\nv[:] = np.nan\nm = denom > 50\nv[m] = num[m] / denom[m]\nplt.pcolor(\n bx,\n by,\n v.T,\n cmap=\"Reds\",\n alpha=1.0,\n edgecolor=\"none\",\n rasterized=True,\n)\n\nplt.ylim(plt.ylim()[::-1])\nplt.colorbar(label=\"average per-transit uncertainty [km/s]\")\nplt.xlabel(\"$G_\\mathrm{BP} - G_\\mathrm{RP}$\")\nplt.ylabel(\"$m_\\mathrm{G} +$ DM\")\n\nplt.savefig(args.output, bbox_inches=\"tight\")\n" ]
[ [ "numpy.histogram2d", "numpy.zeros_like", "matplotlib.pyplot.colorbar", "matplotlib.pyplot.pcolor", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.savefig", "matplotlib.pyplot.ylim", "matplotlib.pyplot.figure", "matplotlib.pyplot.ylabel", "numpy.isfinite", "numpy.log10", "matplotlib.pyplot.contour" ] ]
andrewnc/geometric-neural-processes
[ "56f6fb150685cb13393457beb275441e9fe268ba" ]
[ "tile_np.py" ]
[ "import matplotlib.pyplot as plt\nfrom matplotlib.lines import Line2D\nimport numpy as np\n\nimport pickle\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torchvision import datasets, transforms\n# from scipy.stats import wasserstein_distance\n# from geomloss import SamplesLoss\nfrom utils import sliced_wasserstein_distance\nfrom tqdm import tqdm\n\n\nimport os\n\nimport utils\n\nuse_cuda = True\ndevice = torch.device(\"cuda\" if use_cuda else \"cpu\")\n \n\nclass Encoder(nn.Module):\n \"\"\"takes in context points and returns a fixed length aggregation\"\"\"\n def __init__(self):\n super(Encoder, self).__init__()\n self.conv_tile = nn.Conv2d(3,16, kernel_size=1)\n self.fc_tile = nn.Linear(256, 256)\n self.fc1 = nn.Linear(256+16, 128)\n self.fc2 = nn.Linear(128, 128)\n # self.fc3 = nn.Linear(128, 128)\n self.fc4 = nn.Linear(128, 128)\n \n\n def forward(self, x, tile):\n \"\"\"x = coord of upper left of tile\n this returns the aggregated r value\n \"\"\"\n tile = tile.transpose(3,2).transpose(2,1)\n tile = self.conv_tile(tile)\n y = self.fc_tile(tile.view(tile.shape[0], tile.shape[1]*tile.shape[2]*tile.shape[3]))\n x = torch.stack(([coords.flatten() for coords in x]))\n x = torch.cat((x,y), -1)\n # original_x = x\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n # x = F.relu(self.fc3(x))\n output = self.fc4(x)\n return output.mean(0).view(1, 128)\n\nclass Decoder(nn.Module):\n def __init__(self, m, n):\n super(Decoder, self).__init__()\n self.m = m\n self.n = n\n self.fc1 = nn.Linear(144, 144)\n self.fc2 = nn.Linear(144, 144)\n self.fc3 = nn.Linear(144, 144)\n self.fc4 = nn.Linear(144, 48)\n\n def forward(self, r, inds):\n \"\"\"r is the aggregated data used to condition\"\"\"\n #x = torch.tensor([[i, j] for i in range(0,self.m) for j in range(0,self.n)]).float().to(device)\n #out = torch.cat((x, r.view(1,-1).repeat(1,self.m*self.n).view(self.m*self.n,128)), 1)\n inds = torch.stack(([coords.flatten() for coords in inds]))\n x = torch.cat((inds, r.repeat(inds.shape[0], 1)), -1)\n # original_x = x\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n # x = self.fc2(x) + original_x\n x = F.relu(self.fc3(x))\n out = torch.sigmoid(self.fc4(x))\n return out.view(64,4,4,3)\n\n\n# we probably want a better critic\nclass Critic(nn.Module):\n def __init__(self):\n super().__init__()\n self.fc1 = nn.Linear(32*32, 28*28) \n self.fc2 = nn.Linear(28*28, 20*20) \n self.fc3 =nn.Linear(20*20, 16*16) \n self.fc4 =nn.Linear(16*16, 10*10) \n self.output = nn.Linear(10*10, 1)\n\n def forward(self, x):\n x = x.contiguous().view(-1, 32*32)\n out = self.fc4(F.relu(self.fc3(F.relu(self.fc2(F.relu(self.fc1(x)))))))\n out = self.output(out)\n return out.mean()\n\ndef plot_grad_flow(named_parameters):\n '''Plots the gradients flowing through different layers in the net during training.\n Can be used for checking for possible gradient vanishing / exploding problems.\n \n Usage: Plug this function in Trainer class after loss.backwards() as \n \"plot_grad_flow(self.model.named_parameters())\" to visualize the gradient flow'''\n ave_grads = []\n max_grads= []\n layers = []\n for n, p in named_parameters:\n if(p.requires_grad) and (\"bias\" not in n):\n layers.append(n)\n ave_grads.append(p.grad.abs().mean())\n max_grads.append(p.grad.abs().max())\n plt.bar(np.arange(len(max_grads)), max_grads, alpha=0.1, lw=1, color=\"c\")\n plt.bar(np.arange(len(max_grads)), ave_grads, alpha=0.1, lw=1, color=\"b\")\n plt.hlines(0, 0, len(ave_grads)+1, lw=2, color=\"k\" )\n plt.xticks(range(0,len(ave_grads), 1), layers, rotation=\"vertical\")\n plt.xlim(left=0, right=len(ave_grads))\n plt.ylim(bottom = -0.001, top=0.02) # zoom in on the lower gradient regions\n plt.xlabel(\"Layers\")\n plt.ylabel(\"average gradient\")\n plt.title(\"Gradient flow\")\n plt.grid(True)\n plt.legend([Line2D([0], [0], color=\"c\", lw=4),\n Line2D([0], [0], color=\"b\", lw=4),\n Line2D([0], [0], color=\"k\", lw=4)], ['max-gradient', 'mean-gradient', 'zero-gradient'])\n plt.show()\nif __name__ == \"__main__\":\n\n batch_size=64\n test_batch_size=1\n\n c, m, n = 3, 32,32\n single_class = 0\n\n\n kwargs = {'num_workers': 10, 'pin_memory': True} if use_cuda else {}\n # kwargs = {}\n\n train_loader = torch.utils.data.DataLoader(\n datasets.cifar.CIFAR10('../data', train=True, download=True,\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.0,), (1.0,)),\n ])),\n batch_size=batch_size, shuffle=True, **kwargs)\n\n\n test_loader = torch.utils.data.DataLoader(\n datasets.cifar.CIFAR10('../data', train=False, transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.0,), (1.0,)),\n ])),\n batch_size=test_batch_size, shuffle=True, **kwargs)\n\n epochs = 10\n log_interval = 50\n\n encoder = Encoder().to(device)\n decoder = Decoder(m, n).to(device)\n # critic = Critic().to(device)\n\n optimizer = optim.Adam(list(encoder.parameters()) + list(decoder.parameters()), lr=1e-3)\n # criterion = nn.MSELoss()\n # criterion = SamplesLoss(\"sinkhorn\", p=2, blur=1.)\n # optimizer_critic = optim.RMSprop(critic.parameters(), lr=0.1)\n loss_values = []\n \n for epoch in range(1, epochs+1):\n encoder.train()\n decoder.train()\n # critic.train()\n\n progress = tqdm(train_loader)\n # progress = tqdm(range(100))\n # for _ in progress: \n for (ground_truth_images, targets) in progress:\n for image, target in zip(ground_truth_images, targets):\n if target != single_class:\n continue\n optimizer.zero_grad()\n num_tiles = np.random.randint(4, 64)\n tiles, inds, _, all_inds, tiled_ground_truth = utils.get_tiles(image, num_tiles=num_tiles)\n\n # tiles = tiled_ground_truth[26].repeat(len(tiles),1,1,1)\n # weird_ground_truth = tiled_ground_truth[26].repeat(len(tiled_ground_truth),1,1,1)\n weird_ground_truth = tiled_ground_truth\n image = image.to(device)\n tiles = tiles.float().to(device)\n tiled_ground_truth = tiled_ground_truth.float().to(device)\n weird_ground_truth = weird_ground_truth.float().to(device)\n one_hot_identity = torch.eye(8) # avoid magic numbers\n inds = inds / 4\n all_inds = all_inds / 4\n\n one_hot = one_hot_identity[inds].float().to(device)\n\n r = encoder(one_hot, tiles)\n fake_image = decoder(r, one_hot_identity[all_inds].float().to(device))\n # loss = torch.mean(torch.tensor([wasserstein_distance(x, y) for (x,y) in zip(fake_image.view(64,3*4*4).detach().cpu(), tiled_ground_truth.view(64,3*4*4).detach().cpu())], requires_grad=True))\n\n\n # loss = criterion(fake_image, weird_ground_truth)\n fake_image = fake_image.view(64,4*4*3)\n weird_ground_truth = weird_ground_truth.view(64,4*4*3)\n loss = 0.0\n for tile in range(64):\n loss += sliced_wasserstein_distance(fake_image[tile].unsqueeze(0), weird_ground_truth[tile].unsqueeze(0),num_projections=5, device=device)\n \n loss.backward()\n optimizer.step()\n # plot_grad_flow(encoder.named_parameters())\n # plot_grad_flow(decoder.named_parameters())\n\n\n # for t in range(5):\n # optimizer_critic.zero_grad()\n # for image, target in zip(ground_truth_images, targets):\n # num_tiles = np.random.randint(4, 64)\n # tiles, inds, _, all_inds, tiled_ground_truth = utils.get_tiles(image, num_tiles=num_tiles)\n\n # image = image.to(device)\n # tiles = tiles.float().to(device)\n # inds = inds.float().to(device)\n # all_inds = all_inds.float().to(device)\n # tiled_ground_truth = tiled_ground_truth.float().to(device)\n\n # # run the model to get r which will be concatenated onto every node pair in the decoder\n # r = encoder(inds, tiles)\n\n # fake_image = decoder(r, all_inds)\n\n # disc_real = critic(tiled_ground_truth) # we made need to change this to tiles?\n # disc_fake = critic(fake_image)\n # # gradient_penalty = utils.calc_gradient_penalty(critic, tiled_ground_truth, fake_image)\n # loss = disc_fake - disc_real# + gradient_penalty\n # loss.backward()\n # # w_dist = disc_real - disc_fake\n # optimizer_critic.step()\n # for p in critic.parameters():\n # p.data.clamp_(-0.01, 0.01)\n\n\n # optimizer.zero_grad()\n # for image, target in zip(ground_truth_images, targets):\n # num_tiles = np.random.randint(4, 64)\n # tiles, inds, _, all_inds, tiled_ground_truth = utils.get_tiles(image, num_tiles=num_tiles)\n\n # image = image.to(device)\n # tiles = tiles.float().to(device)\n # inds = inds.float().to(device)\n # all_inds = all_inds.float().to(device)\n # tiled_ground_truth = tiled_ground_truth.float().to(device)\n\n # # run the model to get r which will be concatenated onto every node pair in the decoder\n # r = encoder(inds, tiles)\n\n # fake_image = decoder(r, all_inds)\n # disc_fake = critic(fake_image)\n # # disc_fake.backward()\n # gen_loss = - disc_fake\n # gen_loss.backward()\n # progress.set_description(\"E{} - L{:.4f}\".format(epoch, loss.item()))\n progress.set_description(\"E{} - L{:.4f} - EMin{:.4f} - EMax{:.4f} - DMin{:.4f} - DMax{:.4f}\".format(epoch, loss.item(),\n torch.min(torch.tensor([torch.min(p.grad) for p in encoder.parameters()])), \n torch.max(torch.tensor([torch.max(p.grad) for p in encoder.parameters()])), \n torch.min(torch.tensor([torch.min(p.grad) for p in decoder.parameters()])), \n torch.max(torch.tensor([torch.max(p.grad) for p in decoder.parameters()])) \n ))\n loss_values.append(loss.item())\n\n if loss.item() <= 0.0001:\n break\n with open(\"encoder.pkl\", \"wb\") as of:\n pickle.dump(encoder, of)\n\n with open(\"decoder.pkl\", \"wb\") as of:\n pickle.dump(decoder, of)\n\n with open(\"loss.pkl\", \"wb\") as of:\n pickle.dump(loss_values, of)\n encoder.eval()\n decoder.eval()\n with torch.no_grad():\n\n for i, (ground_truth_image, target) in enumerate(train_loader):\n ground_truth_image = ground_truth_image[0].view(3, 32, 32)\n\n if target[0] != single_class:\n continue\n\n tiles, inds, frame, all_inds, tiled_ground_truth = utils.get_tiles(ground_truth_image, num_tiles=32)\n\n\n # tiles = tiled_ground_truth[26].repeat(len(tiles),1,1,1)\n # weird_ground_truth = tiled_ground_truth[26].repeat(len(tiled_ground_truth),1,1,1)\n weird_ground_truth = tiled_ground_truth\n image = image.to(device)\n tiles = tiles.float().to(device)\n tiled_ground_truth = tiled_ground_truth.float().to(device)\n weird_ground_truth = weird_ground_truth.float().to(device)\n one_hot_identity = torch.eye(8) # avoid magic numbers\n _inds = inds /4\n _all_inds = all_inds /4\n\n one_hot = one_hot_identity[_inds].float().to(device)\n\n r = encoder(one_hot, tiles)\n generated_image = decoder(r, one_hot_identity[_all_inds].float().to(device))\n\n for j, tile in enumerate(tiled_ground_truth):\n for ind in inds.detach().cpu():\n if all_inds[j][0].cpu().long() == ind[0].long() and all_inds[j][1].cpu().long() == ind[1].long():\n frame[j] = tiled_ground_truth[j]\n\n fig, ax = plt.subplots(nrows=8, ncols=8)\n count = 0\n for row in range(8):\n for col in range(8):\n ax[row][col].axis(\"off\")\n ax[row][col].imshow(frame[count].detach().cpu())\n count += 1\n plt.axis(\"off\")\n plt.savefig(\"{}sparse{}.png\".format(epoch, i))\n plt.close()\n\n\n fig, ax = plt.subplots(nrows=8, ncols=8)\n count = 0\n for row in range(8):\n for col in range(8):\n ax[row][col].axis(\"off\")\n ax[row][col].imshow(generated_image[count].detach().cpu())\n count += 1\n plt.axis(\"off\")\n plt.savefig(\"{}fake{}.png\".format(epoch, i))\n plt.close()\n\n\n fig, ax = plt.subplots(nrows=8, ncols=8)\n count = 0\n for row in range(8):\n for col in range(8):\n ax[row][col].axis(\"off\")\n ax[row][col].imshow(weird_ground_truth[count].detach().cpu())\n count += 1\n plt.axis(\"off\")\n plt.savefig(\"{}groundtruth{}.png\".format(epoch,i))\n plt.close()\n\n if i >= 20:\n break\n" ]
[ [ "torch.nn.Linear", "torch.cat", "torch.eye", "matplotlib.pyplot.subplots", "numpy.random.randint", "matplotlib.pyplot.axis", "torch.device", "matplotlib.lines.Line2D", "torch.min", "torch.max", "matplotlib.pyplot.title", "matplotlib.pyplot.close", "torch.nn.Conv2d", "matplotlib.pyplot.show", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.ylim", "matplotlib.pyplot.grid", "torch.no_grad", "matplotlib.pyplot.ylabel" ] ]
thomasreolon/DeepfakeDetection
[ "5bbeefdeeb05b2abd45336ad3382da6ce90c2fef" ]
[ "src/videoanalizer/classifier.py" ]
[ "from sklearn import svm\nimport numpy as np\nfrom .video_edits import save_video_landmarks\n\n\nclass OneClassRbf():\n def __init__(self, video_analizer, rich_features=0, person=None, config=None, outliers=True, custom_params=None):\n self.video_analizer = video_analizer\n self.rich = rich_features\n self.config = config or self.video_analizer._get_config({'interval':[(0,1e10)], 'frames_per_sample':300})\n self.outliers = outliers\n custom_params = custom_params or dict(kernel='rbf', gamma=1e-05, degree=20, nu=0.1, shrinking=True)\n self.clf = svm.OneClassSVM(**custom_params)\n\n def set_video_analizer(self, video_analizer):\n \"\"\"save association between classifier output and real class behind (eg. 1->Real, -1->Fake)\"\"\"\n self.video_analizer = video_analizer\n\n def get_sklearn_clf(self):\n return self.clf\n\n def predict_video(self, path_to_video, return_label=False, landmark_video=False):\n \"\"\"\n input:\n - video path\n oputput:\n - integer that maps 'labels_map' into person is real or not\n \"\"\"\n x, _ = self.video_analizer.process_video(files=[path_to_video], config=self.config, rich_features=1)\n if len(x)==0: raise Exception(f'0 samples have been extracted from {path_to_video}')\n\n y = self.predict(x)\n y = [max(0,v) for v in y]\n y = sum(y)/len(y)\n label = (y>0.5 and 'real' or 'fake')\n\n if landmark_video:\n save_video_landmarks(path_to_video, label, self.video_analizer)\n\n return return_label and label or y\n\n def _get_x(self, x, rich_features, use_outliers=False):\n num = int(len(x)/20)\n if rich_features==0:\n tmp = [s[:190] for s in outliers[:num]]\n x = [s[:190] for s in x]\n elif rich_features==2:\n tmp = [s[190:] for s in outliers[:num]]\n x = [s[190:] for s in x]\n else:\n tmp = outliers[:num]\n if use_outliers:\n x += tmp\n return x\n\n def fit(self, x):\n x = self._get_x(x, self.rich, self.outliers)\n self.clf.fit(x)\n return self\n\n def predict(self, x):\n x = self._get_x(x, self.rich)\n return self.clf.predict(x)\n\n\nclass BoostedOneClassRbf(OneClassRbf):\n def __init__(self, video_analizer, rich_features=None, config=None, person=None):\n self.config = config or self.video_analizer._get_config({'interval':[(0,1e10)], 'frames_per_sample':300})\n self.video_analizer = video_analizer\n self.predictions = []\n self.person = person\n\n self.params = [\n # rich_features, outliers, params, weight\n (0, True, {'kernel':'poly', 'gamma':0.005, 'degree':3, 'nu':0.4, 'shrinking':True}, 0.6),\n (0, True, {'kernel':'rbf', 'gamma':1e-05, 'degree':20, 'nu':0.3, 'shrinking':True}, 0.95),\n (0, True, {'kernel':'rbf', 'gamma':0.01, 'degree':5, 'nu':0.05, 'shrinking':False}, 0.75),\n (1, True, {'kernel':'rbf', 'gamma':1e-05, 'degree':20, 'nu':0.3, 'shrinking':True}, 0.95),\n (1, True, {'kernel':'poly', 'gamma':'scale', 'degree':2, 'nu':0.8, 'shrinking':False}, 0.5),\n (2, True, {'kernel':'rbf', 'gamma':0.1, 'degree':10, 'nu':0.3, 'shrinking':True}, 0.75),\n (2, True, {'kernel':'poly', 'gamma':'scale', 'degree':2, 'nu':0.8, 'shrinking':False}, 0.6),\n (0, False, {'kernel':'rbf', 'gamma':1e-05, 'degree':20, 'nu':0.3, 'shrinking':True}, 0.85),\n (0, False, {'kernel':'rbf', 'gamma':0.01, 'degree':5, 'nu':0.05, 'shrinking':False}, 0.75),\n (0, False, {'kernel':'poly', 'gamma':0.005, 'degree':3, 'nu':0.4, 'shrinking':True}, 0.6),\n (0, False, {'kernel':'sigmoid', 'gamma':1e-05, 'degree':3, 'nu':0.2, 'shrinking':False}, 0.75),\n (1, False, {'kernel':'rbf', 'gamma':1e-05, 'degree':20, 'nu':0.1, 'shrinking':True}, 0.85),\n (1, True, {'kernel':'rbf', 'gamma':1e-05, 'degree':20, 'nu':0.1, 'shrinking':True}, 0.85),\n (0, False, {'kernel':'rbf', 'gamma':1e-05, 'degree':20, 'nu':0.1, 'shrinking':True}, 0.85),\n (0, True, {'kernel':'rbf', 'gamma':1e-05, 'degree':20, 'nu':0.1, 'shrinking':True}, 0.75)\n ]\n self.clfs = [\n OneClassRbf(video_analizer, rich_features=r, outliers=o, custom_params=p) for r, o, p, w in self.params\n ]\n\n def get_sklearn_clf(self):\n return self.clfs[0]\n\n\n def fit(self, x):\n for clf in self.clfs:\n clf.fit(x)\n return self\n\n def predict(self, x):\n res=np.array([0]*len(x), dtype=\"float64\")\n for index, clf in enumerate(self.clfs):\n p = np.array(clf.predict(x), dtype=\"float64\")\n p *= [self.params[index][3]]\n res += p\n\n return [r>0 and 1 or 0 for r in res]\n\n\noutliers = [[0.5668, 0.0, -0.0157, 0.0159, -0.0128, -0.0108, 0.0352, 0.0, -0.0119, -0.0419, -0.0559, -0.0499, -0.0184, -0.0287, -0.0027, -0.2077, -0.1195, -0.1129, 0.067, 0.0, -0.0157, -0.0175, -0.1148, -0.001, -0.0149, 0.0, -0.0066, 0.0031, -0.0136, -0.0333, -0.0102, -0.0888, 0.01, 0.1644, -0.1268, 0.0773, -0.14, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.005, 0.068, -0.0158, 0.0013, 0.0, -0.0148, 0.0026, -0.0491, -0.044, -0.0244, 0.0719, 0.2119, -0.0618, 0.0447, -0.1166, 0.0815, 0.0654, -0.0058, 0.2217, 0.0, -0.0064, 0.037, 0.0263, -0.005, -0.0099, 0.0598, 0.0356, -0.0604, -0.0357, 0.0942, 0.0652, 0.0617, 0.0748, 0.0, -0.0051, -0.0415, -0.0099, -0.0119, -0.0643, 0.0718, 0.1946, -0.0796, -0.0051, -0.1582, 0.1313, 0.0993, 0.0, -0.0021, 0.281, 0.0098, 0.0054, 0.0402, 0.005, -0.0213, 0.1622, 0.0973, 0.1201, 0.0709, 0.0, -0.0052, -0.0252, -0.0162, 0.0064, -0.007, 0.1887, 0.0966, 0.0271, -0.0932, 0.0577, 0.1739, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0665, -0.0282, 0.1797, 0.0749, -0.0335, -0.0234, 0.0261, 0.0096, 0.1, -0.0854, 0.1831, 0.122, 0.0717, -0.1545, -0.0928, 0.1395, -0.0024, 0.2918, -0.2926, -0.1026, 0.023, -0.3698, -0.1945, 0.0023, 0.2053, 0.06, -0.3314, -0.0182, 0.1919, 0.0051, 0.1663, -0.1056, 0.1835, 0.0567, -0.0476, -0.0215, 0.1246, -0.056, 0.1493, -0.0944, 0.4661, -0.0183, -0.1699, -0.1496, 0.7366, 0.0444, -0.1673, -0.3661, 0.4099, -0.2899, 0.4021, -0.3306, 0.0177, -0.0941, -0.4351, 0.04096300733862513, 0.1651941224057379, 3.09, 0.01790474764115621, 0.13071123021028144, 2.62, 0.0, 0.0, 0.0, 0.025755578852778196, 0.07140381141131658, 0.77, 0.013258948629624083, 0.09924583468002997, 1.12, 0.8847865807997604, 0.38513136404597004, 2.24, 0.0019514752134192, 0.04468127567875761, 1.46, 0.004401677400029953, 0.040583092227565704, 1.16, 0.0, 0.0, 0.0, 0.0020682941440766816, 0.043004116430106404, 1.57, 0.050404373221506656, 0.21714876966537514, 3.54, 0.29953571963456643, 0.511564709015184, 2.85, 0.027073536019170282, 0.10682310242870138, 1.0, 0.004883929908641605, 0.06584303709030516, 1.38, 0.5023828066496929, 0.7206147207693282, 2.69, 0.31602965403624383, 0.6486453258470481, 2.76, -0.28748494833008836, 0.0575680279316928, 0.023, 0.050322450202186605, 0.024143624356788837, 0.17300000000000001, 50.8400273133606, 2.063035944966046, 69.68995623474017, 20.693244918392917, 3.2719748822137813, 30.804058174208155], [0.6711, -0.1493, 0.0, -0.0656, -0.1673, -0.04, 0.0328, -0.1032, 0.0397, 0.0042, -0.1259, -0.0548, -0.0858, -0.1219, -0.0069, 0.1284, 0.26, -0.1796, 0.0919, -0.0425, 0.0, 0.0457, -0.0637, -0.0297, -0.0574, 0.0059, 0.063, 0.013, -0.0127, 0.0488, -0.0636, 0.0122, 0.1106, 0.0965, 0.0625, -0.0359, 0.0889, 0.0, 0.0936, 0.1861, -0.0793, 0.4882, 0.0178, 0.3629, -0.0233, 0.34, -0.0371, -0.1102, 0.2411, 0.0021, 0.6132, -0.6469, 0.3316, 0.1159, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.5336, -0.0181, 0.2415, 0.7633, -0.0341, -0.0418, 0.0276, -0.0256, 0.0498, 0.4847, 0.147, -0.0448, -0.0054, 0.4367, 0.1982, -0.0555, 0.2712, 0.4536, 0.0434, 0.0342, 0.332, 0.1732, 0.2231, 0.4081, 0.1446, 0.0135, -0.0582, 0.4888, 0.0857, 0.0828, 0.0173, -0.1316, -0.0219, -0.0634, -0.0331, -0.0204, 0.1612, 0.0992, -0.1754, 0.0408, 0.012, 0.1243, 0.2895, 0.1768, -0.0369, -0.0062, -0.1626, -0.2076, 0.5249, 0.1487, 0.478, -0.2397, 0.5716, 0.5592, -0.0813, -0.0523, -0.0755, -0.0678, 0.0111, 0.5262, 0.1542, -0.0631, 0.0086, 0.4531, 0.2636, 0.0844, 0.1611, 0.3212, 0.0623, -0.0772, -0.2062, 0.4351, -0.3982, 0.2898, -0.2788, 0.3213, 0.0225, 0.0821, -0.0715, -0.1175, 0.0154, -0.1399, 0.0317, -0.1914, 0.2137, 0.2834, -0.0535, -0.1627, 0.173, -0.2616, 0.2421, -0.421, 0.416, -0.0437, 0.0155, -0.0494, 0.0988, 0.092, -0.2675, -0.0116, -0.0429, -0.1159, 0.0975, 0.026, -0.2276, 0.4329, 0.1221, -0.0934, 0.4089, 0.6441, -0.0319, 0.0439, -0.1681, 0.5979, -0.366, 0.2663, 0.2186, -0.1617, 0.0461, 0.0953, 0.32237777777777776, 0.7855161866608306, 3.68, 0.07697777777777778, 0.25317929429897773, 1.86, 0.3645555555555556, 0.4481293491618764, 1.43, 0.0, 0.0, 0.0, 0.05486666666666666, 0.29531957078542714, 2.24, 0.22319999999999998, 0.3925528754193503, 1.89, 0.00791111111111111, 0.08120257856393537, 0.98, 0.8197333333333333, 0.7105002431776892, 2.32, 0.09764444444444444, 0.3515556005056861, 2.57, 0.9024555555555556, 0.6521707966665568, 2.41, 0.03256666666666667, 0.14499414164410615, 1.32, 0.4325444444444444, 0.6650873570209674, 2.55, 0.048, 0.1412326685532305, 0.94, 0.03514444444444445, 0.1682652708018642, 1.71, 0.3052888888888889, 0.5293482626871241, 3.01, 0.2893444444444444, 0.5535195602508071, 2.08, 0.07009666666666667, 0.047453399245997126, 0.204, -0.016556666666666667, 0.11641756315569876, 0.212, 59.602499589957766, 3.7370202577538416, 71.04653404635586, 22.538487290264968, 4.273633842775984, 36.083514241270905], [0.0604, -0.2092, 0.0, -0.1145, 0.0176, 0.0, 0.3706, -0.0728, -0.1666, -0.0393, 0.0059, -0.0887, 0.0, 0.1192, -0.0579, 0.1488, -0.1503, 0.1561, 0.2253, 0.1459, 0.0, -0.1383, 0.0356, 0.0, -0.0919, -0.0479, -0.1482, -0.0257, 0.0472, -0.0579, 0.0, -0.0435, 0.194, 0.0902, 0.1956, -0.352, -0.0303, 0.0, 0.5518, -0.2754, 0.0, -0.5802, -0.1717, -0.1496, -0.0997, -0.4293, 0.0483, 0.0, 0.0798, 0.1364, 0.0847, 0.812, -0.6585, -0.1052, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.077, 0.0, -0.1922, 0.0144, 0.1697, -0.2015, -0.5292, 0.3215, 0.0, 0.2462, -0.1541, -0.2192, 0.5318, -0.0501, 0.1113, 0.0, 0.2724, 0.3726, 0.3077, -0.0237, 0.2124, -0.0721, 0.0, -0.2894, -0.186, 0.4776, -0.5881, 0.4923, -0.0042, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.1991, 0.0966, -0.0757, -0.0315, -0.1709, 0.0, 0.203, 0.0549, 0.1465, -0.5405, 0.662, 0.4547, 0.2514, -0.0305, 0.1133, -0.0446, 0.0, -0.065, -0.0868, 0.229, -0.2927, 0.3576, 0.0011, -0.0899, 0.279, 0.4974, 0.0, -0.311, -0.1909, 0.2391, -0.2996, 0.4124, -0.3951, 0.1435, -0.0288, 0.0, -0.0637, -0.0363, -0.1502, -0.0397, -0.0616, -0.0563, 0.1203, 0.0, -0.3806, -0.217, 0.233, -0.491, 0.2202, -0.5365, 0.0, -0.1396, -0.0819, -0.3198, 0.1586, -0.0662, -0.4151, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.2985, -0.1467, 0.2208, 0.0104, 0.6458, -0.019, 0.2235, -0.3859, 0.271, -0.3629, 0.2145, 0.0096, -0.7343, 0.0182, 0.1756, 0.03946015424164525, 0.11353160700971149, 0.76, 0.027429305912596392, 0.12092009414018179, 0.95, 1.6944473007712082, 0.66615075485933, 2.88, 0.0, 0.0, 0.0, 0.876375321336761, 0.31342534649073966, 1.55, 2.0053213367609257, 0.3421957107486421, 2.89, 0.0, 0.0, 0.0, 0.2320051413881748, 0.34648004497719126, 1.2, 0.02344473007712082, 0.08690307311868854, 0.52, 0.1596401028277635, 0.2008679726921638, 0.86, 0.004061696658097687, 0.036001375269368203, 0.36, 0.35000000000000003, 0.5191212666228906, 2.05, 0.026118251928020562, 0.10259910952889222, 0.66, 0.0, 0.0, 0.0, 0.14724935732647815, 0.26150914994947894, 1.41, 0.07997429305912596, 0.24906973318122796, 1.26, 0.18956555269922878, 0.04344744123817184, 0.303, 0.15573521850899744, 0.08926117375499466, 0.309, 60.5111533915968, 3.964483264938189, 68.31149244453673, 18.13703051112263, 2.669339543020563, 25.413775791881072], [0.8283, -0.3906, 0.4145, 0.0119, -0.0753, -0.1646, 0.0026, 0.168, -0.0531, 0.0214, -0.3464, -0.0738, -0.1096, 0.0899, -0.0906, 0.0784, -0.2467, 0.1063, 0.1582, -0.3116, 0.4365, 0.017, -0.0925, -0.1138, 0.0158, 0.1604, -0.0584, -0.0696, -0.2612, -0.0433, -0.094, 0.1458, -0.0276, 0.0543, -0.1361, 0.0673, 0.1885, -0.1472, -0.0332, 0.1567, 0.2396, 0.0067, -0.1967, -0.0384, 0.0781, 0.4411, 0.0275, 0.2368, 0.0882, 0.1249, -0.0242, 0.1335, -0.1425, -0.0059, 0.033, 0.0272, -0.0805, 0.08, 0.1943, 0.0207, -0.0383, -0.1142, -0.0015, -0.0091, 0.143, 0.017, -0.0956, 0.0226, -0.0413, 0.1622, 0.2822, -0.0265, 0.3522, 0.6002, 0.4577, 0.0248, -0.1309, 0.2525, 0.0247, 0.3263, 0.031, 0.195, -0.1232, 0.3813, 0.1659, 0.0948, 0.146, 0.0961, 0.0822, 0.0335, 0.0188, 0.0459, 0.2733, 0.0623, 0.0327, -0.0361, -0.0629, 0.0796, 0.0607, 0.0938, -0.0881, -0.005, -0.0264, 0.1529, -0.0373, 0.0333, -0.0011, -0.0281, -0.0152, 0.1102, 0.045, -0.0475, 0.2935, 0.3417, -0.004, 0.0023, 0.1202, 0.0713, 0.2511, 0.1157, 0.1038, -0.0472, 0.185, 0.1576, 0.6059, 0.0746, -0.3107, 0.1881, -0.084, 0.3256, -0.0636, 0.4157, -0.3164, 0.5624, 0.1312, 0.197, -0.1433, 0.1843, -0.0056, 0.0504, -0.1031, 0.4522, -0.311, 0.5488, -0.1659, -0.0689, -0.037, -0.0516, -0.0861, -0.0402, 0.282, -0.1364, 0.1069, -0.1375, 0.0033, 0.3338, -0.1674, 0.0813, -0.2315, 0.3464, -0.2836, -0.2976, 0.0727, 0.0731, -0.0101, 0.0477, -0.0087, 0.2264, -0.0468, -0.076, 0.0477, -0.1451, 0.0471, -0.0682, -0.1109, 0.3954, 0.1755, -0.0542, 0.1322, 0.6936, -0.2043, 0.1708, -0.3604, 0.4517, -0.4989, 0.7104, -0.1268, -0.4846, -0.0391, -0.2125, 0.51826130943778, 0.8039979758243925, 3.77, 0.2160803584332996, 0.48471018742424565, 3.26, 0.5217806041335453, 0.7202355308979005, 3.24, 0.08902442549501373, 0.275590187713772, 2.09, 0.02212602977308859, 0.10983624838842163, 1.64, 0.10430409018644314, 0.3781682034286345, 3.8, 0.04292527821939587, 0.16813816521780403, 1.88, 0.021241508888567715, 0.1632993023662222, 2.71, 0.14182685359155947, 0.3862180777863591, 2.79, 0.14693163751987282, 0.3351967146323123, 1.99, 0.0802066772655008, 0.20197807913306665, 1.53, 0.9173276485041191, 0.9422008909425978, 3.68, 0.05328804740569447, 0.18454785977701238, 1.83, 0.08386327503974562, 0.30207530837393454, 2.81, 0.379748518572048, 0.6926469212668469, 4.1, 0.3044182685359156, 0.6556080281853044, 4.06, -0.012605434311316662, 0.14125467802358013, 0.364, 0.012115768174591705, 0.20750933135654942, 0.48200000000000004, 49.4334732902914, 6.385549361122664, 75.08448574772287, 19.66082626727762, 4.243230304901102, 44.04815546648918], [0.2558, 0.0757, -0.0849, -0.0901, -0.0252, -0.0708, -0.0609, -0.0971, -0.2166, -0.01, 0.0313, -0.0639, -0.0857, -0.087, -0.0243, -0.0835, 0.0079, -0.0713, 0.0541, 0.2177, 0.1856, -0.0111, -0.0082, -0.0556, -0.0596, -0.0934, 0.0671, 0.2071, 0.2493, -0.0117, -0.0594, -0.0222, -0.0384, 0.3403, -0.139, -0.1717, -0.0721, 0.0307, 0.1262, 0.2338, 0.0452, 0.0791, -0.0272, 0.2133, 0.1824, 0.3069, 0.0661, 0.0408, 0.011, -0.1213, 0.3361, -0.1077, 0.0886, -0.1253, -0.0961, -0.087, -0.0424, 0.0054, -0.0491, 0.1696, 0.18, 0.1733, 0.0752, -0.0253, -0.0055, -0.0715, 0.1941, -0.0642, -0.0327, -0.1205, 0.2175, 0.1564, 0.4385, 0.5823, 0.3846, 0.0544, 0.043, 0.0602, 0.2853, 0.1926, -0.0469, 0.2298, -0.1435, 0.4639, -0.0406, 0.1554, 0.0369, 0.1123, 0.0112, -0.001, -0.029, 0.0078, 0.0998, 0.0267, -0.007, 0.0366, 0.083, 0.1442, 0.0552, 0.232, 0.1213, 0.1519, 0.0759, 0.0153, 0.0154, 0.0758, 0.0889, -0.0354, 0.0104, -0.0162, 0.1688, 0.0115, 0.4652, 0.4439, 0.1269, 0.0198, -0.0134, 0.0892, 0.3382, -0.0676, 0.1816, -0.0941, 0.4789, 0.0495, 0.3557, -0.0008, -0.0939, -0.0134, 0.1884, 0.2454, 0.02, 0.0621, -0.1433, 0.4953, 0.0238, 0.3538, 0.3118, 0.2169, 0.2421, 0.1628, -0.1768, 0.4622, -0.2386, 0.4968, -0.2899, 0.4228, 0.105, 0.0383, -0.0517, -0.119, 0.431, -0.0742, 0.1033, -0.2183, 0.1332, 0.0899, -0.1004, -0.2203, 0.5707, -0.1508, 0.0653, -0.3785, 0.1322, -0.0153, -0.0315, 0.0829, 0.0405, 0.185, -0.0658, -0.0655, -0.0544, 0.0038, 0.0113, 0.2831, -0.1621, 0.183, 0.1372, -0.1617, 0.2164, 0.402, -0.194, 0.1034, -0.1124, 0.4221, -0.3285, 0.1455, -0.3261, -0.063, 0.1124, -0.0804, 0.2567942519473543, 0.534612752275683, 2.97, 0.15767727639000806, 0.3999414875440235, 3.75, 0.20451786193929625, 0.37132168045232783, 2.35, 0.06933319903303788, 0.22034446676999517, 1.87, 0.08959172710179965, 0.24985514440233625, 2.01, 0.10627652430835346, 0.36215873126842835, 3.94, 0.014255976363148, 0.09669576477386017, 1.35, 0.11425530486167071, 0.2982300101530305, 2.28, 0.11835347837765244, 0.29447023630665214, 1.8, 0.36162234756916467, 0.537825091369842, 2.96, 0.15579170024174052, 0.3284885258992645, 4.88, 0.5593654311039484, 0.7178688532919654, 3.36, 0.045218909481600865, 0.180521446474728, 2.17, 0.06924321783507924, 0.2198472675500452, 1.83, 0.39065404243889335, 0.6446846258672656, 3.82, 0.15600053720118184, 0.4364847024972141, 3.03, 0.2645349180768197, 0.2242810698892728, 1.078, -0.004470185334407736, 0.1452708358580356, 0.461, 53.76621779435901, 3.252069717848224, 65.32908999825426, 18.71242447856594, 2.295241191461679, 30.127064244628958], [0.2558, 0.0757, -0.0849, -0.0901, -0.0252, -0.0708, -0.0609, -0.0971, -0.2166, -0.01, 0.0313, -0.0639, -0.0857, -0.087, -0.0243, -0.0835, 0.0079, -0.0713, 0.0541, 0.2177, 0.1856, -0.0111, -0.0082, -0.0556, -0.0596, -0.0934, 0.0671, 0.2071, 0.2493, -0.0117, -0.0594, -0.0222, -0.0384, 0.3403, -0.139, -0.1717, -0.0721, 0.0307, 0.1262, 0.2338, 0.0452, 0.0791, -0.0272, 0.2133, 0.1824, 0.3069, 0.0661, 0.0408, 0.011, -0.1213, 0.3361, -0.1077, 0.0886, -0.1253, -0.0961, -0.087, -0.0424, 0.0054, -0.0491, 0.1696, 0.18, 0.1733, 0.0752, -0.0253, -0.0055, -0.0715, 0.1941, -0.0642, -0.0327, -0.1205, 0.2175, 0.1564, 0.4385, 0.5823, 0.3846, 0.0544, 0.043, 0.0602, 0.2853, 0.1926, -0.0469, 0.2298, -0.1435, 0.4639, -0.0406, 0.1554, 0.0369, 0.1123, 0.0112, -0.001, -0.029, 0.0078, 0.0998, 0.0267, -0.007, 0.0366, 0.083, 0.1442, 0.0552, 0.232, 0.1213, 0.1519, 0.0759, 0.0153, 0.0154, 0.0758, 0.0889, -0.0354, 0.0104, -0.0162, 0.1688, 0.0115, 0.4652, 0.4439, 0.1269, 0.0198, -0.0134, 0.0892, 0.3382, -0.0676, 0.1816, -0.0941, 0.4789, 0.0495, 0.3557, -0.0008, -0.0939, -0.0134, 0.1884, 0.2454, 0.02, 0.0621, -0.1433, 0.4953, 0.0238, 0.3538, 0.3118, 0.2169, 0.2421, 0.1628, -0.1768, 0.4622, -0.2386, 0.4968, -0.2899, 0.4228, 0.105, 0.0383, -0.0517, -0.119, 0.431, -0.0742, 0.1033, -0.2183, 0.1332, 0.0899, -0.1004, -0.2203, 0.5707, -0.1508, 0.0653, -0.3785, 0.1322, -0.0153, -0.0315, 0.0829, 0.0405, 0.185, -0.0658, -0.0655, -0.0544, 0.0038, 0.0113, 0.2831, -0.1621, 0.183, 0.1372, -0.1617, 0.2164, 0.402, -0.194, 0.1034, -0.1124, 0.4221, -0.3285, 0.1455, -0.3261, -0.063, 0.1124, -0.0804, 0.2567942519473543, 0.534612752275683, 2.97, 0.15767727639000806, 0.3999414875440235, 3.75, 0.20451786193929625, 0.37132168045232783, 2.35, 0.06933319903303788, 0.22034446676999517, 1.87, 0.08959172710179965, 0.24985514440233625, 2.01, 0.10627652430835346, 0.36215873126842835, 3.94, 0.014255976363148, 0.09669576477386017, 1.35, 0.11425530486167071, 0.2982300101530305, 2.28, 0.11835347837765244, 0.29447023630665214, 1.8, 0.36162234756916467, 0.537825091369842, 2.96, 0.15579170024174052, 0.3284885258992645, 4.88, 0.5593654311039484, 0.7178688532919654, 3.36, 0.045218909481600865, 0.180521446474728, 2.17, 0.06924321783507924, 0.2198472675500452, 1.83, 0.39065404243889335, 0.6446846258672656, 3.82, 0.15600053720118184, 0.4364847024972141, 3.03, 0.2645349180768197, 0.2242810698892728, 1.078, -0.004470185334407736, 0.1452708358580356, 0.461, 53.76621779435901, 3.252069717848224, 65.32908999825426, 18.71242447856594, 2.295241191461679, 30.127064244628958], [0.8877, 0.7684, -0.0648, -0.059, -0.0537, -0.0488, 0.3206, 0.2029, 0.0, -0.0368, -0.1304, -0.0547, -0.0742, 0.8137, 0.4681, -0.4201, 0.226, 0.0741, 0.6479, 0.8781, -0.0561, -0.047, -0.0697, -0.0423, 0.1677, -0.0547, 0.0, -0.0692, -0.103, -0.0473, -0.0642, 0.7557, 0.7032, -0.6473, 0.3842, -0.023, 0.6505, -0.0345, -0.1949, -0.0451, -0.026, -0.0742, -0.0493, 0.0, -0.0725, -0.0744, -0.0221, -0.0394, 0.6232, 0.6646, -0.7753, 0.4375, -0.0169, 0.4622, 0.1666, -0.0401, -0.0231, -0.066, -0.0438, 0.0, -0.0355, 0.2513, -0.0258, -0.0351, -0.0525, -0.0576, -0.0965, 0.098, -0.059, -0.1636, 0.1192, 0.0104, 0.3492, 0.068, 0.0, 0.158, 0.6682, -0.1282, 0.252, -0.0556, 0.1571, -0.0738, 0.0067, -0.3494, -0.304, 0.0228, -0.0863, 0.0323, 0.0, -0.0843, -0.1272, -0.0338, -0.0459, -0.0686, -0.0753, 0.1452, -0.1493, -0.0985, -0.0349, -0.0497, -0.033, 0.0, -0.0485, -0.0732, -0.0195, -0.0264, -0.0395, -0.0434, 0.0593, -0.0152, -0.1155, -0.0518, -0.0944, 0.0, 0.3323, 0.2756, -0.0446, -0.0154, 0.3923, -0.1241, 0.1549, 0.0147, -0.0284, 0.2615, 0.0, -0.0921, -0.139, -0.0369, -0.0501, -0.075, -0.0823, 0.226, -0.2677, 0.289, -0.1145, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.2147, 0.6653, 0.0695, -0.1104, -0.1211, -0.057, 0.0314, 0.1971, -0.1184, -0.0371, 0.1806, -0.1666, 0.1792, -0.2609, 0.2023, -0.254, -0.4234, -0.0286, -0.0443, -0.0486, -0.1293, -0.0589, 0.3297, -0.0408, -0.0601, -0.0659, -0.0753, 0.076, -0.0597, -0.1363, 0.3807, -0.2974, 0.179, 0.0016, 0.6748, -0.7175, 0.3812, -0.1492, 0.2503, -0.5838, 0.0021, -0.1143, 0.2841, 0.2821, 0.257, 0.2690677966101695, 0.7297143477429539, 3.96, 0.095, 0.2976254898414949, 1.9, 0.049067796610169495, 0.2503116270928239, 1.64, 0.006016949152542373, 0.03452191223832531, 0.23, 0.6968644067796611, 0.6154379715850459, 1.91, 0.028474576271186443, 0.12490421495272304, 0.91, 0.005084745762711864, 0.038724270029816474, 0.3, 0.11076271186440678, 0.29494658171815125, 1.39, 0.05974576271186441, 0.2398250134125225, 1.24, 0.0, 0.0, 0.0, 0.05686440677966102, 0.15515273586046452, 0.85, 0.4641525423728814, 0.8392029372870206, 2.34, 0.03576271186440678, 0.2433154010175, 2.15, 0.02093220338983051, 0.10492925209869997, 0.71, 0.12050847457627117, 0.4036063014106086, 2.29, 0.22423728813559324, 0.6843435637060279, 3.22, 0.04338135593220339, 0.149987339549977, 0.281, -0.5637288135593223, 0.08524507650347839, -0.075, 31.818927214988065, 5.9516218874086375, 62.3394738508435, 15.537185555071273, 5.14507504616545, 31.693532463264507], [0.5795, -0.4229, -0.0324, -0.0411, -0.0305, -0.0631, -0.0282, 0.1959, -0.033, -0.0667, -0.0733, -0.0868, 0.0202, 0.1863, 0.1307, -0.0623, 0.1887, -0.0455, 0.2132, -0.1768, 0.0503, -0.0362, -0.0672, -0.0468, -0.1921, 0.0336, -0.0905, -0.06, -0.1397, -0.0549, 0.0906, 0.0964, 0.084, -0.0585, 0.0118, -0.1222, 0.1383, -0.1081, -0.146, 0.0286, -0.1025, -0.1859, -0.2062, -0.2631, -0.1258, -0.008, 0.0195, -0.1131, -0.2886, -0.1521, 0.2221, -0.2215, -0.1502, -0.3543, -0.0196, -0.028, -0.0084, -0.1229, -0.0342, 0.0693, -0.0457, -0.0692, 0.0073, 0.1819, -0.074, -0.0582, -0.0898, 0.0521, -0.1418, 0.04, 0.106, 0.2402, 0.0271, 0.1633, 0.0411, 0.0625, -0.0948, 0.0165, 0.011, 0.1544, 0.0796, -0.0137, -0.0405, 0.095, 0.1779, 0.0091, 0.0554, 0.2082, 0.0042, -0.0334, 0.0433, 0.0404, -0.0223, 0.0794, 0.114, 0.0008, 0.0547, 0.0388, 0.0409, 0.0302, -0.0032, 0.011, -0.0041, -0.0899, -0.0382, -0.0187, 0.1263, -0.0105, -0.0791, -0.0138, 0.0126, 0.149, 0.2263, 0.2912, 0.2178, 0.2866, -0.0422, -0.1077, 0.1182, -0.2061, -0.0012, 0.2561, 0.4803, 0.0466, 0.0513, -0.0416, -0.018, -0.0265, -0.0163, 0.2515, 0.0182, -0.0077, 0.1326, 0.209, 0.2042, 0.196, 0.1432, 0.1629, 0.0771, -0.159, -0.126, 0.1289, -0.034, 0.3002, -0.1878, 0.1885, 0.0611, 0.0919, -0.1455, -0.1137, 0.0064, 0.0513, 0.2123, -0.133, 0.1732, 0.1447, -0.2986, -0.1481, -0.1231, 0.3518, 0.0858, -0.4861, 0.1725, -0.1971, -0.1344, 0.1602, 0.0123, 0.0733, -0.2904, -0.089, -0.0457, -0.0659, -0.0069, -0.0182, -0.0932, 0.3374, -0.0231, 0.0209, -0.0273, 0.7311, -0.0291, -0.0708, -0.409, 0.3767, 0.0349, -0.0675, -0.0824, 0.1121, -0.0458, -0.0358, 0.3241590152355968, 0.7704945208228933, 4.17, 0.13203612148118254, 0.4464872606070887, 3.63, 1.47509333064272, 1.0164897770870722, 3.89, 0.016821713247906368, 0.1352445902039258, 2.37, 0.024226616890323883, 0.1463353325925585, 1.66, 0.07193118756936737, 0.24566121726195447, 2.11, 0.009989910200787004, 0.06315790764947872, 0.98, 1.0571788921400462, 1.0016255439947759, 3.47, 0.10894864292200584, 0.3949950023068219, 3.17, 0.34268085965089295, 0.6150284061941337, 3.14, 0.18087175865200283, 0.4479130260471995, 5.0, 1.147568358389668, 1.035901655894913, 5.0, 0.14849561093734234, 0.3789856661653856, 3.26, 0.018728685299162547, 0.14352179220077343, 2.99, 0.5073373019876904, 0.7098009565941604, 3.4, 0.3234032892745434, 0.650504605097015, 3.56, 0.13043325597820601, 0.11993065867241953, 2.9989999999999997, -0.028887700534759364, 0.13901639350911685, 0.34299999999999997, 57.90918922392609, 5.614093190709228, 82.66014759241605, 19.745951931539217, 4.784299252431439, 45.376315407930605], [0.78, -0.0542, 0.0787, -0.0792, -0.0097, -0.09, -0.014, 0.3441, 0.0929, -0.0664, -0.1552, -0.134, -0.0495, -0.1367, -0.026, -0.041, -0.1698, -0.0068, 0.0719, -0.0845, 0.1146, -0.1033, -0.0729, -0.0804, -0.0302, 0.2075, 0.0403, -0.0761, -0.0855, -0.092, -0.0305, -0.0206, 0.1041, -0.0792, -0.032, 0.0384, 0.0643, -0.0337, 0.3532, 0.1816, 0.3904, -0.0023, 0.0173, 0.0731, 0.0287, -0.0748, -0.0411, -0.0245, -0.0287, -0.0652, 0.0929, -0.1772, -0.0577, 0.0617, -0.0582, -0.0458, -0.0234, -0.0308, 0.0385, 0.0253, 0.0286, 0.1308, -0.0131, -0.0108, 0.0899, -0.0568, -0.0369, -0.0285, 0.0473, -0.0113, 0.4722, 0.1751, 0.2953, 0.3919, 0.323, -0.0024, -0.1156, 0.0173, -0.0331, 0.1058, -0.0736, -0.0778, -0.0496, 0.2118, 0.1404, 0.1047, 0.2695, 0.4279, 0.3553, 0.0443, -0.0701, -0.0312, 0.0409, 0.0902, -0.1098, -0.1615, -0.0313, 0.1895, 0.1527, -0.0271, -0.0592, 0.0526, -0.0075, -0.0113, -0.0774, -0.0207, -0.0321, -0.0716, -0.0926, 0.1061, 0.0843, 0.0163, 0.3339, 0.2026, 0.048, -0.0689, 0.0901, -0.0459, 0.4005, 0.1389, -0.0888, 0.0188, 0.2926, 0.3469, 0.4927, 0.0009, -0.1188, -0.0904, -0.0267, 0.0876, -0.1492, -0.2307, -0.1, 0.3287, 0.1447, 0.0847, -0.0036, 0.0255, -0.023, 0.0465, -0.111, -0.1362, -0.1603, 0.3167, 0.0124, 0.5747, 0.4583, 0.1231, -0.0891, -0.109, -0.1801, 0.0567, 0.1179, -0.222, 0.4726, 0.2861, -0.037, -0.0996, -0.1274, 0.168, 0.0892, -0.4033, 0.1618, -0.0313, 0.0153, 0.0585, 0.0564, 0.1372, -0.2812, -0.0654, -0.0005, 0.0663, 0.1556, -0.0689, -0.1899, 0.2584, -0.187, 0.1201, 0.1514, 0.5246, 0.0994, 0.0498, -0.0834, 0.3235, -0.4899, -0.2923, -0.0789, 0.1635, -0.0833, -0.031, 0.3560188464457374, 0.6983008309405547, 4.41, 0.18280356691120667, 0.45897595912787253, 3.33, 0.06283265874019732, 0.2606247268445417, 2.47, 0.027983809764735645, 0.15370780490955913, 1.67, 0.09197508221603845, 0.2813238483515082, 2.35, 0.08066658234252466, 0.2888214113055279, 2.26, 0.032736529218315207, 0.16125564454568683, 2.25, 0.39064571211737925, 0.6657909342507513, 2.98, 0.23387806729066532, 0.5981779187425298, 3.2, 0.12171009359979763, 0.33331459420745224, 2.21, 0.09141917530989122, 0.2650118194799636, 3.66, 0.38772008601062485, 0.8078036619155623, 5.0, 0.13089362509486466, 0.33201436131093703, 3.92, 0.014900708322792817, 0.14590015266593281, 2.43, 0.46602643561851753, 0.709377095740277, 3.35, 0.1983474576271186, 0.42705284035004026, 3.52, 0.2920082216038452, 0.13020077194520474, 0.675, -0.0011249051353402464, 0.14531048832143142, 0.732, 54.51910070482117, 3.617179786098553, 73.67496182557545, 18.483467468526293, 3.1347960705090947, 34.71224567785841], [0.78, -0.0542, 0.0787, -0.0792, -0.0097, -0.09, -0.014, 0.3441, 0.0929, -0.0664, -0.1552, -0.134, -0.0495, -0.1367, -0.026, -0.041, -0.1698, -0.0068, 0.0719, -0.0845, 0.1146, -0.1033, -0.0729, -0.0804, -0.0302, 0.2075, 0.0403, -0.0761, -0.0855, -0.092, -0.0305, -0.0206, 0.1041, -0.0792, -0.032, 0.0384, 0.0643, -0.0337, 0.3532, 0.1816, 0.3904, -0.0023, 0.0173, 0.0731, 0.0287, -0.0748, -0.0411, -0.0245, -0.0287, -0.0652, 0.0929, -0.1772, -0.0577, 0.0617, -0.0582, -0.0458, -0.0234, -0.0308, 0.0385, 0.0253, 0.0286, 0.1308, -0.0131, -0.0108, 0.0899, -0.0568, -0.0369, -0.0285, 0.0473, -0.0113, 0.4722, 0.1751, 0.2953, 0.3919, 0.323, -0.0024, -0.1156, 0.0173, -0.0331, 0.1058, -0.0736, -0.0778, -0.0496, 0.2118, 0.1404, 0.1047, 0.2695, 0.4279, 0.3553, 0.0443, -0.0701, -0.0312, 0.0409, 0.0902, -0.1098, -0.1615, -0.0313, 0.1895, 0.1527, -0.0271, -0.0592, 0.0526, -0.0075, -0.0113, -0.0774, -0.0207, -0.0321, -0.0716, -0.0926, 0.1061, 0.0843, 0.0163, 0.3339, 0.2026, 0.048, -0.0689, 0.0901, -0.0459, 0.4005, 0.1389, -0.0888, 0.0188, 0.2926, 0.3469, 0.4927, 0.0009, -0.1188, -0.0904, -0.0267, 0.0876, -0.1492, -0.2307, -0.1, 0.3287, 0.1447, 0.0847, -0.0036, 0.0255, -0.023, 0.0465, -0.111, -0.1362, -0.1603, 0.3167, 0.0124, 0.5747, 0.4583, 0.1231, -0.0891, -0.109, -0.1801, 0.0567, 0.1179, -0.222, 0.4726, 0.2861, -0.037, -0.0996, -0.1274, 0.168, 0.0892, -0.4033, 0.1618, -0.0313, 0.0153, 0.0585, 0.0564, 0.1372, -0.2812, -0.0654, -0.0005, 0.0663, 0.1556, -0.0689, -0.1899, 0.2584, -0.187, 0.1201, 0.1514, 0.5246, 0.0994, 0.0498, -0.0834, 0.3235, -0.4899, -0.2923, -0.0789, 0.1635, -0.0833, -0.031, 0.3560188464457374, 0.6983008309405547, 4.41, 0.18280356691120667, 0.45897595912787253, 3.33, 0.06283265874019732, 0.2606247268445417, 2.47, 0.027983809764735645, 0.15370780490955913, 1.67, 0.09197508221603845, 0.2813238483515082, 2.35, 0.08066658234252466, 0.2888214113055279, 2.26, 0.032736529218315207, 0.16125564454568683, 2.25, 0.39064571211737925, 0.6657909342507513, 2.98, 0.23387806729066532, 0.5981779187425298, 3.2, 0.12171009359979763, 0.33331459420745224, 2.21, 0.09141917530989122, 0.2650118194799636, 3.66, 0.38772008601062485, 0.8078036619155623, 5.0, 0.13089362509486466, 0.33201436131093703, 3.92, 0.014900708322792817, 0.14590015266593281, 2.43, 0.46602643561851753, 0.709377095740277, 3.35, 0.1983474576271186, 0.42705284035004026, 3.52, 0.2920082216038452, 0.13020077194520474, 0.675, -0.0011249051353402464, 0.14531048832143142, 0.732, 54.51910070482117, 3.617179786098553, 73.67496182557545, 18.483467468526293, 3.1347960705090947, 34.71224567785841], [0.8877, 0.7684, -0.0648, -0.059, -0.0537, -0.0488, 0.3206, 0.2029, 0.0, -0.0368, -0.1304, -0.0547, -0.0742, 0.8137, 0.4681, -0.4201, 0.226, 0.0741, 0.6479, 0.8781, -0.0561, -0.047, -0.0697, -0.0423, 0.1677, -0.0547, 0.0, -0.0692, -0.103, -0.0473, -0.0642, 0.7557, 0.7032, -0.6473, 0.3842, -0.023, 0.6505, -0.0345, -0.1949, -0.0451, -0.026, -0.0742, -0.0493, 0.0, -0.0725, -0.0744, -0.0221, -0.0394, 0.6232, 0.6646, -0.7753, 0.4375, -0.0169, 0.4622, 0.1666, -0.0401, -0.0231, -0.066, -0.0438, 0.0, -0.0355, 0.2513, -0.0258, -0.0351, -0.0525, -0.0576, -0.0965, 0.098, -0.059, -0.1636, 0.1192, 0.0104, 0.3492, 0.068, 0.0, 0.158, 0.6682, -0.1282, 0.252, -0.0556, 0.1571, -0.0738, 0.0067, -0.3494, -0.304, 0.0228, -0.0863, 0.0323, 0.0, -0.0843, -0.1272, -0.0338, -0.0459, -0.0686, -0.0753, 0.1452, -0.1493, -0.0985, -0.0349, -0.0497, -0.033, 0.0, -0.0485, -0.0732, -0.0195, -0.0264, -0.0395, -0.0434, 0.0593, -0.0152, -0.1155, -0.0518, -0.0944, 0.0, 0.3323, 0.2756, -0.0446, -0.0154, 0.3923, -0.1241, 0.1549, 0.0147, -0.0284, 0.2615, 0.0, -0.0921, -0.139, -0.0369, -0.0501, -0.075, -0.0823, 0.226, -0.2677, 0.289, -0.1145, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.2147, 0.6653, 0.0695, -0.1104, -0.1211, -0.057, 0.0314, 0.1971, -0.1184, -0.0371, 0.1806, -0.1666, 0.1792, -0.2609, 0.2023, -0.254, -0.4234, -0.0286, -0.0443, -0.0486, -0.1293, -0.0589, 0.3297, -0.0408, -0.0601, -0.0659, -0.0753, 0.076, -0.0597, -0.1363, 0.3807, -0.2974, 0.179, 0.0016, 0.6748, -0.7175, 0.3812, -0.1492, 0.2503, -0.5838, 0.0021, -0.1143, 0.2841, 0.2821, 0.257, 0.2690677966101695, 0.7297143477429539, 3.96, 0.095, 0.2976254898414949, 1.9, 0.049067796610169495, 0.2503116270928239, 1.64, 0.006016949152542373, 0.03452191223832531, 0.23, 0.6968644067796611, 0.6154379715850459, 1.91, 0.028474576271186443, 0.12490421495272304, 0.91, 0.005084745762711864, 0.038724270029816474, 0.3, 0.11076271186440678, 0.29494658171815125, 1.39, 0.05974576271186441, 0.2398250134125225, 1.24, 0.0, 0.0, 0.0, 0.05686440677966102, 0.15515273586046452, 0.85, 0.4641525423728814, 0.8392029372870206, 2.34, 0.03576271186440678, 0.2433154010175, 2.15, 0.02093220338983051, 0.10492925209869997, 0.71, 0.12050847457627117, 0.4036063014106086, 2.29, 0.22423728813559324, 0.6843435637060279, 3.22, 0.04338135593220339, 0.149987339549977, 0.281, -0.5637288135593223, 0.08524507650347839, -0.075, 31.818927214988065, 5.9516218874086375, 62.3394738508435, 15.537185555071273, 5.14507504616545, 31.693532463264507], [0.0604, -0.2092, 0.0, -0.1145, 0.0176, 0.0, 0.3706, -0.0728, -0.1666, -0.0393, 0.0059, -0.0887, 0.0, 0.1192, -0.0579, 0.1488, -0.1503, 0.1561, 0.2253, 0.1459, 0.0, -0.1383, 0.0356, 0.0, -0.0919, -0.0479, -0.1482, -0.0257, 0.0472, -0.0579, 0.0, -0.0435, 0.194, 0.0902, 0.1956, -0.352, -0.0303, 0.0, 0.5518, -0.2754, 0.0, -0.5802, -0.1717, -0.1496, -0.0997, -0.4293, 0.0483, 0.0, 0.0798, 0.1364, 0.0847, 0.812, -0.6585, -0.1052, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.077, 0.0, -0.1922, 0.0144, 0.1697, -0.2015, -0.5292, 0.3215, 0.0, 0.2462, -0.1541, -0.2192, 0.5318, -0.0501, 0.1113, 0.0, 0.2724, 0.3726, 0.3077, -0.0237, 0.2124, -0.0721, 0.0, -0.2894, -0.186, 0.4776, -0.5881, 0.4923, -0.0042, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.1991, 0.0966, -0.0757, -0.0315, -0.1709, 0.0, 0.203, 0.0549, 0.1465, -0.5405, 0.662, 0.4547, 0.2514, -0.0305, 0.1133, -0.0446, 0.0, -0.065, -0.0868, 0.229, -0.2927, 0.3576, 0.0011, -0.0899, 0.279, 0.4974, 0.0, -0.311, -0.1909, 0.2391, -0.2996, 0.4124, -0.3951, 0.1435, -0.0288, 0.0, -0.0637, -0.0363, -0.1502, -0.0397, -0.0616, -0.0563, 0.1203, 0.0, -0.3806, -0.217, 0.233, -0.491, 0.2202, -0.5365, 0.0, -0.1396, -0.0819, -0.3198, 0.1586, -0.0662, -0.4151, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.2985, -0.1467, 0.2208, 0.0104, 0.6458, -0.019, 0.2235, -0.3859, 0.271, -0.3629, 0.2145, 0.0096, -0.7343, 0.0182, 0.1756, 0.03946015424164525, 0.11353160700971149, 0.76, 0.027429305912596392, 0.12092009414018179, 0.95, 1.6944473007712082, 0.66615075485933, 2.88, 0.0, 0.0, 0.0, 0.876375321336761, 0.31342534649073966, 1.55, 2.0053213367609257, 0.3421957107486421, 2.89, 0.0, 0.0, 0.0, 0.2320051413881748, 0.34648004497719126, 1.2, 0.02344473007712082, 0.08690307311868854, 0.52, 0.1596401028277635, 0.2008679726921638, 0.86, 0.004061696658097687, 0.036001375269368203, 0.36, 0.35000000000000003, 0.5191212666228906, 2.05, 0.026118251928020562, 0.10259910952889222, 0.66, 0.0, 0.0, 0.0, 0.14724935732647815, 0.26150914994947894, 1.41, 0.07997429305912596, 0.24906973318122796, 1.26, 0.18956555269922878, 0.04344744123817184, 0.303, 0.15573521850899744, 0.08926117375499466, 0.309, 60.5111533915968, 3.964483264938189, 68.31149244453673, 18.13703051112263, 2.669339543020563, 25.413775791881072], [0.7317, 0.7628, 0.1062, 0.0666, 0.0669, -0.0262, 0.1691, 0.0, -0.0001, 0.094, 0.7715, -0.0656, 0.1248, 0.2641, 0.2737, 0.0863, 0.1181, -0.0508, -0.2088, 0.7597, 0.146, 0.0438, 0.0949, -0.0187, 0.0995, 0.0, 0.0942, -0.0201, 0.6204, -0.0029, 0.1385, 0.1076, 0.2921, -0.1441, 0.256, -0.0278, -0.1658, -0.036, 0.0551, 0.1363, -0.0215, 0.146, 0.0, 0.1134, 0.0805, 0.7912, -0.0216, 0.1519, 0.3288, 0.4597, -0.0595, 0.2751, -0.084, -0.2295, -0.0176, -0.0183, -0.0046, -0.0269, 0.0, -0.0231, -0.0285, -0.0286, -0.0276, -0.0055, -0.0114, -0.0301, -0.0585, -0.0085, -0.0964, 0.0619, 0.2591, 0.3478, 0.1294, 0.0, 0.1836, 0.1316, 0.0656, 0.0199, 0.1691, 0.1644, 0.1598, 0.0361, -0.0212, 0.122, 0.1437, -0.0109, 0.0031, 0.0, 0.3054, -0.0511, 0.0787, -0.0554, -0.013, 0.0383, 0.156, -0.2583, 0.3292, 0.0053, 0.0142, 0.0163, 0.0, 0.1972, -0.017, -0.0295, -0.0164, -0.0033, 0.0702, 0.0185, -0.003, -0.0299, 0.0393, 0.0634, 0.0, 0.0157, 0.0731, 0.1509, 0.1929, 0.1168, 0.1681, -0.0469, 0.0704, 0.0802, 0.2693, 0.19, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.0539, 0.0492, 0.126, -0.0164, 0.0522, 0.1889, -0.3631, 0.3811, 0.1233, -0.0624, 0.2106, 0.0815, 0.0834, -0.106, -0.1151, 0.0402, 0.039, 0.0262, -0.1936, -0.0331, 0.1205, 0.2208, 0.2358, 0.0083, 0.2379, -0.0102, -0.3858, 0.0259, 0.0801, 0.046, 0.005, 0.083, 0.2217, 0.0817, -0.0318, 0.0687, 0.0016, 0.0302, -0.0637, -0.0714, 0.6096, 0.2437, -0.1156, -0.0682, 0.3935, -0.019, 0.1222, -0.1347, 0.2651, -0.7763, 0.0098, 0.0111, 0.1201, -0.1322, 0.1642, 0.6748916064480267, 1.3515142833794158, 4.6, 0.30613118399110617, 0.8597946524493698, 4.37, 0.34497498610339083, 0.841333404573434, 4.66, 0.007565314063368538, 0.08620056268379304, 1.53, 0.02067815453029461, 0.10302747020708797, 1.19, 0.04360755975541968, 0.20941604259899735, 2.56, 0.001406336853807671, 0.026847954405996636, 0.58, 0.12804335742078932, 0.41762058775709043, 2.09, 0.0, 0.0, 0.0, 0.04490272373540856, 0.17052245841901906, 1.27, 0.06677598665925515, 0.20582444572591205, 1.35, 0.6983490828237909, 1.2409424140213634, 4.86, 0.072751528627015, 0.23188676763347207, 1.74, 0.010144524735964425, 0.16261441420201295, 2.87, 0.4222178988326848, 0.8274729139598583, 4.12, 0.32228460255697616, 0.6579199414876901, 2.96, 0.17993385214007784, 0.20630254464463313, 1.0170000000000001, -0.03597887715397443, 0.18760252293959925, 2.667, 55.09723741071316, 3.6650535589084186, 66.91262960009867, 20.175824474323793, 2.999441869474871, 31.205608470273425], [0.6066, -0.4102, 0.309, -0.1595, -0.1705, -0.2546, -0.4064, 0.1103, -0.0738, 0.3515, 0.0656, 0.0808, -0.0857, -0.2845, -0.0074, 0.1906, -0.253, -0.2949, -0.4365, -0.2125, 0.6677, -0.078, -0.047, -0.14, -0.2365, 0.0559, 0.0343, -0.0385, -0.1029, -0.078, -0.041, -0.0951, 0.2283, 0.1134, -0.1223, -0.3824, -0.2232, -0.0624, -0.0252, -0.074, 0.2257, 0.1311, -0.2553, -0.2236, -0.2368, 0.0326, 0.1221, -0.0253, 0.2768, 0.0361, -0.3586, 0.5241, -0.1428, 0.3425, -0.0555, -0.0691, -0.1091, -0.1871, -0.021, 0.1125, -0.113, -0.124, -0.1339, -0.0442, -0.0854, 0.354, -0.048, 0.0255, -0.4925, -0.1553, 0.3521, 0.0378, 0.2041, 0.5398, 0.5316, -0.0403, 0.0568, -0.0071, -0.0035, -0.0633, 0.0255, 0.1377, -0.1702, 0.4046, 0.0149, -0.0687, 0.2074, 0.4519, 0.3422, -0.0748, -0.0448, -0.1231, -0.0394, -0.1291, 0.0285, 0.2143, -0.218, 0.2484, -0.0952, 0.422, -0.139, -0.061, -0.1308, 0.1061, -0.1068, 0.0231, 0.2845, 0.2989, 0.1931, -0.1524, 0.1693, 0.0893, 0.1021, 0.2544, -0.2569, 0.1305, -0.1911, 0.0003, 0.2278, 0.3442, 0.3777, -0.2817, 0.4226, 0.1092, 0.5719, -0.0233, -0.0902, -0.1127, -0.0502, -0.2081, -0.1257, 0.1833, -0.2605, 0.3511, -0.2084, -0.1623, -0.0625, -0.1577, -0.041, -0.1217, 0.1818, 0.1554, -0.228, 0.2507, -0.0887, 0.4344, 0.3257, 0.0785, -0.2448, -0.2421, 0.1057, -0.1965, 0.0236, -0.2947, 0.3798, 0.0892, -0.3002, -0.0054, 0.1996, -0.2834, 0.0777, -0.2754, 0.0108, -0.2328, -0.1494, -0.2246, -0.0105, 0.0252, -0.1164, 0.1107, -0.0699, -0.1029, 0.1132, 0.0095, 0.1303, 0.1813, -0.2287, 0.3628, 0.1102, 0.6812, 0.2637, -0.2939, -0.0688, -0.0729, -0.8205, 0.2653, -0.4155, -0.3426, 0.5606, 0.1495, 0.9173029045643153, 1.409198205265933, 5.0, 0.2804580274497287, 0.7647249667236484, 5.0, 0.5000606447494415, 0.692522344107577, 2.86, 0.13268751994893074, 0.45142706340968103, 3.79, 0.09676029364826047, 0.37978407735647757, 2.43, 0.11943185445260135, 0.3884155118723334, 4.11, 0.2176077242259815, 0.553614453046046, 2.75, 0.9761346951803382, 1.2684791619679194, 4.47, 0.20543887647622092, 0.5460921500126225, 2.99, 0.1819182891797, 0.5368818221855781, 3.03, 0.6090999042451325, 1.2720493682203944, 5.0, 0.8416677306096394, 1.0047596489493889, 5.0, 0.4428758378550909, 0.9422431298597673, 5.0, 0.03301468241302266, 0.21798439485680643, 3.39, 0.4973763166294287, 0.8784948233128742, 5.0, 0.7977146504947336, 1.257151497352541, 5.0, -0.14595451643791893, 0.37666001259401827, 3.1060000000000003, 0.2359168528566869, 0.5464358864981677, 1.736, 51.56691309888132, 9.484393960540368, 91.28072085604944, 13.623145403904337, 5.9026312074075635, 48.06433188966635], [0.8877, 0.7684, -0.0648, -0.059, -0.0537, -0.0488, 0.3206, 0.2029, 0.0, -0.0368, -0.1304, -0.0547, -0.0742, 0.8137, 0.4681, -0.4201, 0.226, 0.0741, 0.6479, 0.8781, -0.0561, -0.047, -0.0697, -0.0423, 0.1677, -0.0547, 0.0, -0.0692, -0.103, -0.0473, -0.0642, 0.7557, 0.7032, -0.6473, 0.3842, -0.023, 0.6505, -0.0345, -0.1949, -0.0451, -0.026, -0.0742, -0.0493, 0.0, -0.0725, -0.0744, -0.0221, -0.0394, 0.6232, 0.6646, -0.7753, 0.4375, -0.0169, 0.4622, 0.1666, -0.0401, -0.0231, -0.066, -0.0438, 0.0, -0.0355, 0.2513, -0.0258, -0.0351, -0.0525, -0.0576, -0.0965, 0.098, -0.059, -0.1636, 0.1192, 0.0104, 0.3492, 0.068, 0.0, 0.158, 0.6682, -0.1282, 0.252, -0.0556, 0.1571, -0.0738, 0.0067, -0.3494, -0.304, 0.0228, -0.0863, 0.0323, 0.0, -0.0843, -0.1272, -0.0338, -0.0459, -0.0686, -0.0753, 0.1452, -0.1493, -0.0985, -0.0349, -0.0497, -0.033, 0.0, -0.0485, -0.0732, -0.0195, -0.0264, -0.0395, -0.0434, 0.0593, -0.0152, -0.1155, -0.0518, -0.0944, 0.0, 0.3323, 0.2756, -0.0446, -0.0154, 0.3923, -0.1241, 0.1549, 0.0147, -0.0284, 0.2615, 0.0, -0.0921, -0.139, -0.0369, -0.0501, -0.075, -0.0823, 0.226, -0.2677, 0.289, -0.1145, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.2147, 0.6653, 0.0695, -0.1104, -0.1211, -0.057, 0.0314, 0.1971, -0.1184, -0.0371, 0.1806, -0.1666, 0.1792, -0.2609, 0.2023, -0.254, -0.4234, -0.0286, -0.0443, -0.0486, -0.1293, -0.0589, 0.3297, -0.0408, -0.0601, -0.0659, -0.0753, 0.076, -0.0597, -0.1363, 0.3807, -0.2974, 0.179, 0.0016, 0.6748, -0.7175, 0.3812, -0.1492, 0.2503, -0.5838, 0.0021, -0.1143, 0.2841, 0.2821, 0.257, 0.2690677966101695, 0.7297143477429539, 3.96, 0.095, 0.2976254898414949, 1.9, 0.049067796610169495, 0.2503116270928239, 1.64, 0.006016949152542373, 0.03452191223832531, 0.23, 0.6968644067796611, 0.6154379715850459, 1.91, 0.028474576271186443, 0.12490421495272304, 0.91, 0.005084745762711864, 0.038724270029816474, 0.3, 0.11076271186440678, 0.29494658171815125, 1.39, 0.05974576271186441, 0.2398250134125225, 1.24, 0.0, 0.0, 0.0, 0.05686440677966102, 0.15515273586046452, 0.85, 0.4641525423728814, 0.8392029372870206, 2.34, 0.03576271186440678, 0.2433154010175, 2.15, 0.02093220338983051, 0.10492925209869997, 0.71, 0.12050847457627117, 0.4036063014106086, 2.29, 0.22423728813559324, 0.6843435637060279, 3.22, 0.04338135593220339, 0.149987339549977, 0.281, -0.5637288135593223, 0.08524507650347839, -0.075, 31.818927214988065, 5.9516218874086375, 62.3394738508435, 15.537185555071273, 5.14507504616545, 31.693532463264507], [0.5795, -0.4229, -0.0324, -0.0411, -0.0305, -0.0631, -0.0282, 0.1959, -0.033, -0.0667, -0.0733, -0.0868, 0.0202, 0.1863, 0.1307, -0.0623, 0.1887, -0.0455, 0.2132, -0.1768, 0.0503, -0.0362, -0.0672, -0.0468, -0.1921, 0.0336, -0.0905, -0.06, -0.1397, -0.0549, 0.0906, 0.0964, 0.084, -0.0585, 0.0118, -0.1222, 0.1383, -0.1081, -0.146, 0.0286, -0.1025, -0.1859, -0.2062, -0.2631, -0.1258, -0.008, 0.0195, -0.1131, -0.2886, -0.1521, 0.2221, -0.2215, -0.1502, -0.3543, -0.0196, -0.028, -0.0084, -0.1229, -0.0342, 0.0693, -0.0457, -0.0692, 0.0073, 0.1819, -0.074, -0.0582, -0.0898, 0.0521, -0.1418, 0.04, 0.106, 0.2402, 0.0271, 0.1633, 0.0411, 0.0625, -0.0948, 0.0165, 0.011, 0.1544, 0.0796, -0.0137, -0.0405, 0.095, 0.1779, 0.0091, 0.0554, 0.2082, 0.0042, -0.0334, 0.0433, 0.0404, -0.0223, 0.0794, 0.114, 0.0008, 0.0547, 0.0388, 0.0409, 0.0302, -0.0032, 0.011, -0.0041, -0.0899, -0.0382, -0.0187, 0.1263, -0.0105, -0.0791, -0.0138, 0.0126, 0.149, 0.2263, 0.2912, 0.2178, 0.2866, -0.0422, -0.1077, 0.1182, -0.2061, -0.0012, 0.2561, 0.4803, 0.0466, 0.0513, -0.0416, -0.018, -0.0265, -0.0163, 0.2515, 0.0182, -0.0077, 0.1326, 0.209, 0.2042, 0.196, 0.1432, 0.1629, 0.0771, -0.159, -0.126, 0.1289, -0.034, 0.3002, -0.1878, 0.1885, 0.0611, 0.0919, -0.1455, -0.1137, 0.0064, 0.0513, 0.2123, -0.133, 0.1732, 0.1447, -0.2986, -0.1481, -0.1231, 0.3518, 0.0858, -0.4861, 0.1725, -0.1971, -0.1344, 0.1602, 0.0123, 0.0733, -0.2904, -0.089, -0.0457, -0.0659, -0.0069, -0.0182, -0.0932, 0.3374, -0.0231, 0.0209, -0.0273, 0.7311, -0.0291, -0.0708, -0.409, 0.3767, 0.0349, -0.0675, -0.0824, 0.1121, -0.0458, -0.0358, 0.3241590152355968, 0.7704945208228933, 4.17, 0.13203612148118254, 0.4464872606070887, 3.63, 1.47509333064272, 1.0164897770870722, 3.89, 0.016821713247906368, 0.1352445902039258, 2.37, 0.024226616890323883, 0.1463353325925585, 1.66, 0.07193118756936737, 0.24566121726195447, 2.11, 0.009989910200787004, 0.06315790764947872, 0.98, 1.0571788921400462, 1.0016255439947759, 3.47, 0.10894864292200584, 0.3949950023068219, 3.17, 0.34268085965089295, 0.6150284061941337, 3.14, 0.18087175865200283, 0.4479130260471995, 5.0, 1.147568358389668, 1.035901655894913, 5.0, 0.14849561093734234, 0.3789856661653856, 3.26, 0.018728685299162547, 0.14352179220077343, 2.99, 0.5073373019876904, 0.7098009565941604, 3.4, 0.3234032892745434, 0.650504605097015, 3.56, 0.13043325597820601, 0.11993065867241953, 2.9989999999999997, -0.028887700534759364, 0.13901639350911685, 0.34299999999999997, 57.90918922392609, 5.614093190709228, 82.66014759241605, 19.745951931539217, 4.784299252431439, 45.376315407930605], [0.0604, -0.2092, 0.0, -0.1145, 0.0176, 0.0, 0.3706, -0.0728, -0.1666, -0.0393, 0.0059, -0.0887, 0.0, 0.1192, -0.0579, 0.1488, -0.1503, 0.1561, 0.2253, 0.1459, 0.0, -0.1383, 0.0356, 0.0, -0.0919, -0.0479, -0.1482, -0.0257, 0.0472, -0.0579, 0.0, -0.0435, 0.194, 0.0902, 0.1956, -0.352, -0.0303, 0.0, 0.5518, -0.2754, 0.0, -0.5802, -0.1717, -0.1496, -0.0997, -0.4293, 0.0483, 0.0, 0.0798, 0.1364, 0.0847, 0.812, -0.6585, -0.1052, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.077, 0.0, -0.1922, 0.0144, 0.1697, -0.2015, -0.5292, 0.3215, 0.0, 0.2462, -0.1541, -0.2192, 0.5318, -0.0501, 0.1113, 0.0, 0.2724, 0.3726, 0.3077, -0.0237, 0.2124, -0.0721, 0.0, -0.2894, -0.186, 0.4776, -0.5881, 0.4923, -0.0042, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.1991, 0.0966, -0.0757, -0.0315, -0.1709, 0.0, 0.203, 0.0549, 0.1465, -0.5405, 0.662, 0.4547, 0.2514, -0.0305, 0.1133, -0.0446, 0.0, -0.065, -0.0868, 0.229, -0.2927, 0.3576, 0.0011, -0.0899, 0.279, 0.4974, 0.0, -0.311, -0.1909, 0.2391, -0.2996, 0.4124, -0.3951, 0.1435, -0.0288, 0.0, -0.0637, -0.0363, -0.1502, -0.0397, -0.0616, -0.0563, 0.1203, 0.0, -0.3806, -0.217, 0.233, -0.491, 0.2202, -0.5365, 0.0, -0.1396, -0.0819, -0.3198, 0.1586, -0.0662, -0.4151, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.2985, -0.1467, 0.2208, 0.0104, 0.6458, -0.019, 0.2235, -0.3859, 0.271, -0.3629, 0.2145, 0.0096, -0.7343, 0.0182, 0.1756, 0.03946015424164525, 0.11353160700971149, 0.76, 0.027429305912596392, 0.12092009414018179, 0.95, 1.6944473007712082, 0.66615075485933, 2.88, 0.0, 0.0, 0.0, 0.876375321336761, 0.31342534649073966, 1.55, 2.0053213367609257, 0.3421957107486421, 2.89, 0.0, 0.0, 0.0, 0.2320051413881748, 0.34648004497719126, 1.2, 0.02344473007712082, 0.08690307311868854, 0.52, 0.1596401028277635, 0.2008679726921638, 0.86, 0.004061696658097687, 0.036001375269368203, 0.36, 0.35000000000000003, 0.5191212666228906, 2.05, 0.026118251928020562, 0.10259910952889222, 0.66, 0.0, 0.0, 0.0, 0.14724935732647815, 0.26150914994947894, 1.41, 0.07997429305912596, 0.24906973318122796, 1.26, 0.18956555269922878, 0.04344744123817184, 0.303, 0.15573521850899744, 0.08926117375499466, 0.309, 60.5111533915968, 3.964483264938189, 68.31149244453673, 18.13703051112263, 2.669339543020563, 25.413775791881072], [0.6066, -0.4102, 0.309, -0.1595, -0.1705, -0.2546, -0.4064, 0.1103, -0.0738, 0.3515, 0.0656, 0.0808, -0.0857, -0.2845, -0.0074, 0.1906, -0.253, -0.2949, -0.4365, -0.2125, 0.6677, -0.078, -0.047, -0.14, -0.2365, 0.0559, 0.0343, -0.0385, -0.1029, -0.078, -0.041, -0.0951, 0.2283, 0.1134, -0.1223, -0.3824, -0.2232, -0.0624, -0.0252, -0.074, 0.2257, 0.1311, -0.2553, -0.2236, -0.2368, 0.0326, 0.1221, -0.0253, 0.2768, 0.0361, -0.3586, 0.5241, -0.1428, 0.3425, -0.0555, -0.0691, -0.1091, -0.1871, -0.021, 0.1125, -0.113, -0.124, -0.1339, -0.0442, -0.0854, 0.354, -0.048, 0.0255, -0.4925, -0.1553, 0.3521, 0.0378, 0.2041, 0.5398, 0.5316, -0.0403, 0.0568, -0.0071, -0.0035, -0.0633, 0.0255, 0.1377, -0.1702, 0.4046, 0.0149, -0.0687, 0.2074, 0.4519, 0.3422, -0.0748, -0.0448, -0.1231, -0.0394, -0.1291, 0.0285, 0.2143, -0.218, 0.2484, -0.0952, 0.422, -0.139, -0.061, -0.1308, 0.1061, -0.1068, 0.0231, 0.2845, 0.2989, 0.1931, -0.1524, 0.1693, 0.0893, 0.1021, 0.2544, -0.2569, 0.1305, -0.1911, 0.0003, 0.2278, 0.3442, 0.3777, -0.2817, 0.4226, 0.1092, 0.5719, -0.0233, -0.0902, -0.1127, -0.0502, -0.2081, -0.1257, 0.1833, -0.2605, 0.3511, -0.2084, -0.1623, -0.0625, -0.1577, -0.041, -0.1217, 0.1818, 0.1554, -0.228, 0.2507, -0.0887, 0.4344, 0.3257, 0.0785, -0.2448, -0.2421, 0.1057, -0.1965, 0.0236, -0.2947, 0.3798, 0.0892, -0.3002, -0.0054, 0.1996, -0.2834, 0.0777, -0.2754, 0.0108, -0.2328, -0.1494, -0.2246, -0.0105, 0.0252, -0.1164, 0.1107, -0.0699, -0.1029, 0.1132, 0.0095, 0.1303, 0.1813, -0.2287, 0.3628, 0.1102, 0.6812, 0.2637, -0.2939, -0.0688, -0.0729, -0.8205, 0.2653, -0.4155, -0.3426, 0.5606, 0.1495, 0.9173029045643153, 1.409198205265933, 5.0, 0.2804580274497287, 0.7647249667236484, 5.0, 0.5000606447494415, 0.692522344107577, 2.86, 0.13268751994893074, 0.45142706340968103, 3.79, 0.09676029364826047, 0.37978407735647757, 2.43, 0.11943185445260135, 0.3884155118723334, 4.11, 0.2176077242259815, 0.553614453046046, 2.75, 0.9761346951803382, 1.2684791619679194, 4.47, 0.20543887647622092, 0.5460921500126225, 2.99, 0.1819182891797, 0.5368818221855781, 3.03, 0.6090999042451325, 1.2720493682203944, 5.0, 0.8416677306096394, 1.0047596489493889, 5.0, 0.4428758378550909, 0.9422431298597673, 5.0, 0.03301468241302266, 0.21798439485680643, 3.39, 0.4973763166294287, 0.8784948233128742, 5.0, 0.7977146504947336, 1.257151497352541, 5.0, -0.14595451643791893, 0.37666001259401827, 3.1060000000000003, 0.2359168528566869, 0.5464358864981677, 1.736, 51.56691309888132, 9.484393960540368, 91.28072085604944, 13.623145403904337, 5.9026312074075635, 48.06433188966635]]\n" ]
[ [ "sklearn.svm.OneClassSVM" ] ]
shepherdp/csc486-assignment-2
[ "bc28073857d888ffb24d3144c81a20b668368200" ]
[ "asgn2.py" ]
[ "# CSC486 - Spring 2022\n# Author: Dr. Patrick Shepherd\n\n# NOTE: This file contains several functions, some of which already do something\n# when run, even before you start writing code. For your convenience, in the\n# main function, you may want to comment out the functions you are not currently\n# using so they are not running each time you modify the code.\n\nimport networkx as nx\nimport matplotlib.pyplot as plt\n\n# A convenient function to create an undirected scale free graph.\ndef undirected_scale_free_graph(n):\n H = nx.scale_free_graph(n)\n G = nx.Graph()\n for (u, v) in H.edges():\n G.add_edge(u, v)\n del H\n return G\n\ndef task1():\n # Task 1: Examine some Erdos Renyi random graphs (named G1)\n # to see how the parameter 'p' affects them.\n n = 100\n\n # Modify this parameter and run the code again\n p = .05\n G1 = nx.erdos_renyi_graph(n, p)\n\n nx.draw_networkx(G1)\n plt.show()\n\ndef task2():\n # Task 2: Create a small world graph named G2\n # The function you will call is nx.watts_strogatz_graph\n # Call the function with parameters n, k, and p, in that order.\n n = 100\n k = 10\n p = .3\n\n # Create the variable G2 here, then plot the network as above.\n\ndef task3():\n # Task 3: Create a scale free network named G3.\n # The function you will call is nx.scale_free_graph\n # The function only takes the parameter n.\n n = 100\n\n # Create the variable G3 here, then plot the network as above.\n\ndef task4():\n # Task 4: Fill in the for loop below.\n # Inside the loop, create a new random network, collect graph metric values,\n # and plot them.\n n = 100\n \n for i in range(21):\n\n # Set the current iteration's value of p\n p = i*.05\n\n # Create a new random network here\n\n # Gather metric values here\n\n # The x-coordinate list is already made for you.\n # Pass it as the first argument to plt.scatter().\n x = [p for j in range(n)]\n\n # Plot the current set of points here\n\n\n # Show the network\n plt.show()\n\ndef task5and6():\n # Task 5, 6: Fill in the for loop below.\n # Inside the loop, create a new small world network, collect graph metric values,\n # and plot them.\n n = 100\n\n for i in range(21):\n\n # Task 6: after completing task 5, modify this parameter and \n k = 3\n\n # Set the current iteration's value of p\n p = i*.05\n\n # Create a new small world network here\n\n # Gather metric values here\n\n # The x-coordinate list is already made for you.\n # Pass it as the first argument to plt.scatter().\n x = [p for j in range(n)]\n\n # Plot the current set of points here\n\n # Show the network\n plt.show()\n\ndef worked_example():\n \n ###############################################################\n # WORKED EXAMPLE #\n ###############################################################\n\n n = 100\n p = .2\n k = 4\n \n # First, we create one of each network, using most of the parameters above.\n G4 = nx.erdos_renyi_graph(n, p)\n G5 = nx.watts_strogatz_graph(n, k, p)\n G6 = undirected_scale_free_graph(n)\n\n # Then, we collect the closeness centrality scores for all vertices in\n # each network.\n # These are dictionaries, in which the keys are vertex indices (0, 1, 2, ...)\n # and values are the corresponding centrality scores for each vertex.\n close4 = nx.closeness_centrality(G4)\n close5 = nx.closeness_centrality(G5)\n close6 = nx.closeness_centrality(G6)\n\n # A handy way to get the values from a dictionary as a 1D list.\n # NOTE: This is all we need to do here, as we don't need to know which\n # score corresponds to which vertex in this case. We are just plotting\n # all the scores from each network as a group.\n y4 = close4.values()\n y5 = close5.values()\n y6 = close6.values()\n\n # We will plot the scores out in such a way that all vertex scores from the\n # random graph are on the left, all small world scores are in the middle,\n # and all scale free scores are on the right. These lists are just meant\n # to hold the x-values of each score so that we can plot them together.\n # This way, all random network scores will be displayed vertically above\n # x=1, all small world scores above x=2, and all scale free scores above\n # x=3.\n x4 = [1 for i in range(n)]\n x5 = [2 for i in range(n)]\n x6 = [3 for i in range(n)]\n\n # Finally, we can use the function plt.scatter(x, y), where x and y are\n # either numbers or lists of numbers, and are the coordinates of the points\n # to plot. In other words, to plot three points, one at (1, 3), one at\n # (2, 4), and one at (6, 5), you would call\n # plt.scatter( [1, 2, 6], [3, 4, 5] )\n \n # You can call plt.scatter as many times as you like before displaying the\n # plot, and each call will place dots on the screen of a different color.\n # Since there are three calls made below, the dots on the plot show up in\n # three differently colored groups.\n plt.scatter(x4, y4)\n plt.scatter(x5, y5)\n plt.scatter(x6, y6)\n\n # Once you have plotted all your points, call plt.show() to display the plot.\n plt.show()\n\ndef main():\n\n task1()\n task2()\n task3()\n worked_example()\n task5and6()\n \n\nif __name__ == '__main__':\n main()\n" ]
[ [ "matplotlib.pyplot.show", "matplotlib.pyplot.scatter" ] ]
maestrojeong/Deep-Hash-Table-ICML18-
[ "0c7efa230f950d5a2cd1928ac9f5d99f4276d2b5" ]
[ "utils/sklearn_op.py" ]
[ "from sklearn.cluster import KMeans\nfrom tqdm import tqdm\n\nimport tensorflow as tf\nimport numpy as np\n\nclass KMeansClustering:\n def __init__(self, X_samples, n_clusters):\n '''\n Args:\n X_samples - Numpy 2D array\n [n_sample, n_features]\n n_clusters - int\n \n '''\n print(\"Fitting X_samples starts\")\n self.nfeatures = X_samples.shape[1]\n self.nclusters = n_clusters\n self.manager = KMeans(n_clusters=self.nclusters, random_state=0).fit(X_samples)\n print(\"Fitting X_samples done\")\n\n @property\n def centers(self):\n return self.manager.cluster_centers_\n\n def predict(self, predict_x):\n '''\n Args:\n predict_x - Numpy 2D array\n [n_predict, nfeatures]\n Return:\n label - Numpy 1D array\n [n_predict, ], whose values is [0, self.clusters)\n '''\n assert predict_x.shape[1] == self.nfeatures, \"x should have the same features %d but %d\"%(self.nfeatures, predict_x.shape[1])\n\n return self.manager.predict(predict_x)\n\n def k_hash(self, predict_x, session): \n '''\n accerelated with tensorflow\n\n Bigger closer implemented with just simple negative\n\n Args:\n predict_x - Numpy 2D array [npredict, nfeatures]\n Return:\n k_hash - Numpy 2D array [npredict, n_clusters]\n '''\n assert predict_x.shape[1] == self.nfeatures, \"x should have the same features %d but %d\"%(self.nfeatures, predict_x.shape[1])\n npredict = predict_x.shape[0]\n batch_size = 2\n if npredict%batch_size!=0:\n predict_x = np.concatenate([predict_x, np.zeros([batch_size-npredict%batch_size, self.nfeatures])], axis=0) \n \n inputs = tf.placeholder(tf.float32, [batch_size, self.nfeatures]) # [batch_size, nfeatures]\n centers = tf.convert_to_tensor(self.centers, dtype=tf.float32) # [n_clusters, nfeatures]\n \n negdist = tf.negative(\n tf.reduce_sum(\n tf.square( \n tf.subtract(\n tf.expand_dims(inputs, axis=1),\n tf.expand_dims(centers, axis=0))),\n axis=-1)) # [batch_size, 1, nfeatures] [1, nclusters, nfeatures] => [bath_size, ncluster, nfeatures] => [batch_size, n_clusters]\n \n nbatch = len(predict_x)//batch_size\n k_hash = list()\n for b in tqdm(range(nbatch), ascii = True, desc=\"batch\"):\n feed_dict = {inputs : predict_x[b*batch_size:(b+1)*batch_size]}\n k_hash.append(session.run(negdist, feed_dict=feed_dict))\n k_hash = np.concatenate(k_hash, axis=0)\n k_hash = k_hash[:npredict]\n\n return k_hash\n\n" ]
[ [ "numpy.concatenate", "tensorflow.convert_to_tensor", "numpy.zeros", "tensorflow.expand_dims", "sklearn.cluster.KMeans", "tensorflow.placeholder" ] ]
liuhyCV/instance-segmentation-based-part-seg
[ "9f946e20672d591130b1cd91b149e277315382dc" ]
[ "faster_rcnn/config/config.py" ]
[ "# --------------------------------------------------------\n# Deformable Convolutional Networks\n# Copyright (c) 2016 by Contributors\n# Copyright (c) 2017 Microsoft\n# Licensed under The Apache-2.0 License [see LICENSE for details]\n# Modified by Yuwen Xiong, Bin Xiao\n# --------------------------------------------------------\n\nimport yaml\nimport numpy as np\nfrom easydict import EasyDict as edict\n\nconfig = edict()\n\nconfig.MXNET_VERSION = ''\nconfig.output_path = ''\nconfig.symbol = ''\nconfig.gpus = ''\nconfig.CLASS_AGNOSTIC = True\nconfig.SCALES = [(600, 1000)] # first is scale (the shorter side); second is max size\n\n# default training\nconfig.default = edict()\nconfig.default.frequent = 20\nconfig.default.kvstore = 'device'\n\n# network related params\nconfig.network = edict()\nconfig.network.pretrained = ''\nconfig.network.pretrained_epoch = 0\nconfig.network.PIXEL_MEANS = np.array([0, 0, 0])\nconfig.network.IMAGE_STRIDE = 0\nconfig.network.RPN_FEAT_STRIDE = 16\nconfig.network.RCNN_FEAT_STRIDE = 16\nconfig.network.FIXED_PARAMS = ['gamma', 'beta']\nconfig.network.FIXED_PARAMS_SHARED = ['gamma', 'beta']\nconfig.network.ANCHOR_SCALES = (8, 16, 32)\nconfig.network.ANCHOR_RATIOS = (0.5, 1, 2)\nconfig.network.NUM_ANCHORS = len(config.network.ANCHOR_SCALES) * len(config.network.ANCHOR_RATIOS)\n\n# dataset related params\nconfig.dataset = edict()\nconfig.dataset.dataset = 'PascalVOC'\nconfig.dataset.image_set = '2012_trainval'\nconfig.dataset.test_image_set = '2012_test'\nconfig.dataset.root_path = './data'\nconfig.dataset.dataset_path = './data/VOCdevkit2012'\nconfig.dataset.NUM_CLASSES = 21\n\n\nconfig.TRAIN = edict()\n\nconfig.TRAIN.lr = 0\nconfig.TRAIN.lr_step = ''\nconfig.TRAIN.lr_factor = 0.1\nconfig.TRAIN.warmup = False\nconfig.TRAIN.warmup_lr = 0\nconfig.TRAIN.warmup_step = 0\nconfig.TRAIN.momentum = 0.9\nconfig.TRAIN.wd = 0.0005\nconfig.TRAIN.begin_epoch = 0\nconfig.TRAIN.end_epoch = 0\nconfig.TRAIN.model_prefix = ''\n\nconfig.TRAIN.ALTERNATE = edict()\nconfig.TRAIN.ALTERNATE.RPN_BATCH_IMAGES = 0\nconfig.TRAIN.ALTERNATE.RCNN_BATCH_IMAGES = 0\nconfig.TRAIN.ALTERNATE.rpn1_lr = 0\nconfig.TRAIN.ALTERNATE.rpn1_lr_step = '' # recommend '2'\nconfig.TRAIN.ALTERNATE.rpn1_epoch = 0 # recommend 3\nconfig.TRAIN.ALTERNATE.rfcn1_lr = 0\nconfig.TRAIN.ALTERNATE.rfcn1_lr_step = '' # recommend '5'\nconfig.TRAIN.ALTERNATE.rfcn1_epoch = 0 # recommend 8\nconfig.TRAIN.ALTERNATE.rpn2_lr = 0\nconfig.TRAIN.ALTERNATE.rpn2_lr_step = '' # recommend '2'\nconfig.TRAIN.ALTERNATE.rpn2_epoch = 0 # recommend 3\nconfig.TRAIN.ALTERNATE.rfcn2_lr = 0\nconfig.TRAIN.ALTERNATE.rfcn2_lr_step = '' # recommend '5'\nconfig.TRAIN.ALTERNATE.rfcn2_epoch = 0 # recommend 8\n# optional\nconfig.TRAIN.ALTERNATE.rpn3_lr = 0\nconfig.TRAIN.ALTERNATE.rpn3_lr_step = '' # recommend '2'\nconfig.TRAIN.ALTERNATE.rpn3_epoch = 0 # recommend 3\n\n# whether resume training\nconfig.TRAIN.RESUME = False\n# whether flip image\nconfig.TRAIN.FLIP = True\n# whether shuffle image\nconfig.TRAIN.SHUFFLE = True\n# whether use OHEM\nconfig.TRAIN.ENABLE_OHEM = False\n# size of images for each device, 2 for rcnn, 1 for rpn and e2e\nconfig.TRAIN.BATCH_IMAGES = 2\n# e2e changes behavior of anchor loader and metric\nconfig.TRAIN.END2END = False\n# group images with similar aspect ratio\nconfig.TRAIN.ASPECT_GROUPING = True\n\n# R-CNN\n# rcnn rois batch size\nconfig.TRAIN.BATCH_ROIS = 128\nconfig.TRAIN.BATCH_ROIS_OHEM = 128\n# rcnn rois sampling params\nconfig.TRAIN.FG_FRACTION = 0.25\nconfig.TRAIN.FG_THRESH = 0.5\nconfig.TRAIN.BG_THRESH_HI = 0.5\nconfig.TRAIN.BG_THRESH_LO = 0.0\n# rcnn bounding box regression params\nconfig.TRAIN.BBOX_REGRESSION_THRESH = 0.5\nconfig.TRAIN.BBOX_WEIGHTS = np.array([1.0, 1.0, 1.0, 1.0])\n\n# RPN anchor loader\n# rpn anchors batch size\nconfig.TRAIN.RPN_BATCH_SIZE = 256\n# rpn anchors sampling params\nconfig.TRAIN.RPN_FG_FRACTION = 0.5\nconfig.TRAIN.RPN_POSITIVE_OVERLAP = 0.7\nconfig.TRAIN.RPN_NEGATIVE_OVERLAP = 0.3\nconfig.TRAIN.RPN_CLOBBER_POSITIVES = False\n# rpn bounding box regression params\nconfig.TRAIN.RPN_BBOX_WEIGHTS = (1.0, 1.0, 1.0, 1.0)\nconfig.TRAIN.RPN_POSITIVE_WEIGHT = -1.0\n\n# used for end2end training\n# RPN proposal\nconfig.TRAIN.CXX_PROPOSAL = True\nconfig.TRAIN.RPN_NMS_THRESH = 0.7\nconfig.TRAIN.RPN_PRE_NMS_TOP_N = 12000\nconfig.TRAIN.RPN_POST_NMS_TOP_N = 2000\nconfig.TRAIN.RPN_MIN_SIZE = config.network.RPN_FEAT_STRIDE\n# approximate bounding box regression\nconfig.TRAIN.BBOX_NORMALIZATION_PRECOMPUTED = False\nconfig.TRAIN.BBOX_MEANS = (0.0, 0.0, 0.0, 0.0)\nconfig.TRAIN.BBOX_STDS = (0.1, 0.1, 0.2, 0.2)\n\nconfig.TEST = edict()\n\n# R-CNN testing\n# use rpn to generate proposal\nconfig.TEST.HAS_RPN = False\n# size of images for each device\nconfig.TEST.BATCH_IMAGES = 1\n\n# RPN proposal\nconfig.TEST.CXX_PROPOSAL = True\nconfig.TEST.RPN_NMS_THRESH = 0.7\nconfig.TEST.RPN_PRE_NMS_TOP_N = 6000\nconfig.TEST.RPN_POST_NMS_TOP_N = 300\nconfig.TEST.RPN_MIN_SIZE = config.network.RPN_FEAT_STRIDE\n\n# RPN generate proposal\nconfig.TEST.PROPOSAL_NMS_THRESH = 0.7\nconfig.TEST.PROPOSAL_PRE_NMS_TOP_N = 20000\nconfig.TEST.PROPOSAL_POST_NMS_TOP_N = 2000\nconfig.TEST.PROPOSAL_MIN_SIZE = config.network.RPN_FEAT_STRIDE\n\n# RCNN nms\nconfig.TEST.NMS = 0.3\n\nconfig.TEST.max_per_image = 300\n\n# Test Model Epoch\nconfig.TEST.test_epoch = 0\n\n\ndef update_config(config_file):\n exp_config = None\n with open(config_file) as f:\n exp_config = edict(yaml.load(f))\n for k, v in exp_config.items():\n if k in config:\n if isinstance(v, dict):\n if k == 'TRAIN':\n if 'BBOX_WEIGHTS' in v:\n v['BBOX_WEIGHTS'] = np.array(v['BBOX_WEIGHTS'])\n elif k == 'network':\n if 'PIXEL_MEANS' in v:\n v['PIXEL_MEANS'] = np.array(v['PIXEL_MEANS'])\n for vk, vv in v.items():\n config[k][vk] = vv\n else:\n if k == 'SCALES':\n config[k][0] = (tuple(v))\n else:\n config[k] = v\n else:\n raise ValueError(\"key must exist in config.py\")\n" ]
[ [ "numpy.array" ] ]
WillKoehrsen/pandas
[ "2efb60717bda9fc64344c5f6647d58564930808e" ]
[ "pandas/tests/groupby/test_whitelist.py" ]
[ "\"\"\"\ntest methods relating to generic function evaluation\nthe so-called white/black lists\n\"\"\"\n\nfrom string import ascii_lowercase\n\nimport numpy as np\nimport pytest\n\nfrom pandas import DataFrame, Index, MultiIndex, Series, date_range\nfrom pandas.util import testing as tm\n\nAGG_FUNCTIONS = [\n \"sum\",\n \"prod\",\n \"min\",\n \"max\",\n \"median\",\n \"mean\",\n \"skew\",\n \"mad\",\n \"std\",\n \"var\",\n \"sem\",\n]\nAGG_FUNCTIONS_WITH_SKIPNA = [\"skew\", \"mad\"]\n\ndf_whitelist = [\n \"quantile\",\n \"fillna\",\n \"mad\",\n \"take\",\n \"idxmax\",\n \"idxmin\",\n \"tshift\",\n \"skew\",\n \"plot\",\n \"hist\",\n \"dtypes\",\n \"corrwith\",\n \"corr\",\n \"cov\",\n \"diff\",\n]\n\n\[email protected](params=df_whitelist)\ndef df_whitelist_fixture(request):\n return request.param\n\n\ns_whitelist = [\n \"quantile\",\n \"fillna\",\n \"mad\",\n \"take\",\n \"idxmax\",\n \"idxmin\",\n \"tshift\",\n \"skew\",\n \"plot\",\n \"hist\",\n \"dtype\",\n \"corr\",\n \"cov\",\n \"diff\",\n \"unique\",\n \"nlargest\",\n \"nsmallest\",\n \"is_monotonic_increasing\",\n \"is_monotonic_decreasing\",\n]\n\n\[email protected](params=s_whitelist)\ndef s_whitelist_fixture(request):\n return request.param\n\n\[email protected]\ndef mframe():\n index = MultiIndex(\n levels=[[\"foo\", \"bar\", \"baz\", \"qux\"], [\"one\", \"two\", \"three\"]],\n codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],\n names=[\"first\", \"second\"],\n )\n return DataFrame(np.random.randn(10, 3), index=index, columns=[\"A\", \"B\", \"C\"])\n\n\[email protected]\ndef df():\n return DataFrame(\n {\n \"A\": [\"foo\", \"bar\", \"foo\", \"bar\", \"foo\", \"bar\", \"foo\", \"foo\"],\n \"B\": [\"one\", \"one\", \"two\", \"three\", \"two\", \"two\", \"one\", \"three\"],\n \"C\": np.random.randn(8),\n \"D\": np.random.randn(8),\n }\n )\n\n\[email protected]\ndef df_letters():\n letters = np.array(list(ascii_lowercase))\n N = 10\n random_letters = letters.take(np.random.randint(0, 26, N))\n df = DataFrame(\n {\n \"floats\": N / 10 * Series(np.random.random(N)),\n \"letters\": Series(random_letters),\n }\n )\n return df\n\n\[email protected](\"whitelist\", [df_whitelist, s_whitelist])\ndef test_groupby_whitelist(df_letters, whitelist):\n df = df_letters\n if whitelist == df_whitelist:\n # dataframe\n obj = df_letters\n else:\n obj = df_letters[\"floats\"]\n\n gb = obj.groupby(df.letters)\n\n assert set(whitelist) == set(gb._apply_whitelist)\n\n\ndef check_whitelist(obj, df, m):\n # check the obj for a particular whitelist m\n\n gb = obj.groupby(df.letters)\n\n f = getattr(type(gb), m)\n\n # name\n try:\n n = f.__name__\n except AttributeError:\n return\n assert n == m\n\n # qualname\n try:\n n = f.__qualname__\n except AttributeError:\n return\n assert n.endswith(m)\n\n\ndef test_groupby_series_whitelist(df_letters, s_whitelist_fixture):\n m = s_whitelist_fixture\n df = df_letters\n check_whitelist(df.letters, df, m)\n\n\ndef test_groupby_frame_whitelist(df_letters, df_whitelist_fixture):\n m = df_whitelist_fixture\n df = df_letters\n check_whitelist(df, df, m)\n\n\[email protected]\ndef raw_frame():\n index = MultiIndex(\n levels=[[\"foo\", \"bar\", \"baz\", \"qux\"], [\"one\", \"two\", \"three\"]],\n codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],\n names=[\"first\", \"second\"],\n )\n raw_frame = DataFrame(\n np.random.randn(10, 3), index=index, columns=Index([\"A\", \"B\", \"C\"], name=\"exp\")\n )\n raw_frame.iloc[1, [1, 2]] = np.nan\n raw_frame.iloc[7, [0, 1]] = np.nan\n return raw_frame\n\n\[email protected](\"op\", AGG_FUNCTIONS)\[email protected](\"level\", [0, 1])\[email protected](\"axis\", [0, 1])\[email protected](\"skipna\", [True, False])\[email protected](\"sort\", [True, False])\ndef test_regression_whitelist_methods(raw_frame, op, level, axis, skipna, sort):\n # GH6944\n # GH 17537\n # explicitly test the whitelist methods\n\n if axis == 0:\n frame = raw_frame\n else:\n frame = raw_frame.T\n\n if op in AGG_FUNCTIONS_WITH_SKIPNA:\n grouped = frame.groupby(level=level, axis=axis, sort=sort)\n result = getattr(grouped, op)(skipna=skipna)\n expected = getattr(frame, op)(level=level, axis=axis, skipna=skipna)\n if sort:\n expected = expected.sort_index(axis=axis, level=level)\n tm.assert_frame_equal(result, expected)\n else:\n grouped = frame.groupby(level=level, axis=axis, sort=sort)\n result = getattr(grouped, op)()\n expected = getattr(frame, op)(level=level, axis=axis)\n if sort:\n expected = expected.sort_index(axis=axis, level=level)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_groupby_blacklist(df_letters):\n df = df_letters\n s = df_letters.floats\n\n blacklist = [\n \"eval\",\n \"query\",\n \"abs\",\n \"where\",\n \"mask\",\n \"align\",\n \"groupby\",\n \"clip\",\n \"astype\",\n \"at\",\n \"combine\",\n \"consolidate\",\n \"convert_objects\",\n ]\n to_methods = [method for method in dir(df) if method.startswith(\"to_\")]\n\n blacklist.extend(to_methods)\n\n # e.g., to_csv\n defined_but_not_allowed = (\n \"(?:^Cannot.+{0!r}.+{1!r}.+try using the \" \"'apply' method$)\"\n )\n\n # e.g., query, eval\n not_defined = \"(?:^{1!r} object has no attribute {0!r}$)\"\n fmt = defined_but_not_allowed + \"|\" + not_defined\n for bl in blacklist:\n for obj in (df, s):\n gb = obj.groupby(df.letters)\n msg = fmt.format(bl, type(gb).__name__)\n with pytest.raises(AttributeError, match=msg):\n getattr(gb, bl)\n\n\ndef test_tab_completion(mframe):\n grp = mframe.groupby(level=\"second\")\n results = {v for v in dir(grp) if not v.startswith(\"_\")}\n expected = {\n \"A\",\n \"B\",\n \"C\",\n \"agg\",\n \"aggregate\",\n \"apply\",\n \"boxplot\",\n \"filter\",\n \"first\",\n \"get_group\",\n \"groups\",\n \"hist\",\n \"indices\",\n \"last\",\n \"max\",\n \"mean\",\n \"median\",\n \"min\",\n \"ngroups\",\n \"nth\",\n \"ohlc\",\n \"plot\",\n \"prod\",\n \"size\",\n \"std\",\n \"sum\",\n \"transform\",\n \"var\",\n \"sem\",\n \"count\",\n \"nunique\",\n \"head\",\n \"describe\",\n \"cummax\",\n \"quantile\",\n \"rank\",\n \"cumprod\",\n \"tail\",\n \"resample\",\n \"cummin\",\n \"fillna\",\n \"cumsum\",\n \"cumcount\",\n \"ngroup\",\n \"all\",\n \"shift\",\n \"skew\",\n \"take\",\n \"tshift\",\n \"pct_change\",\n \"any\",\n \"mad\",\n \"corr\",\n \"corrwith\",\n \"cov\",\n \"dtypes\",\n \"ndim\",\n \"diff\",\n \"idxmax\",\n \"idxmin\",\n \"ffill\",\n \"bfill\",\n \"pad\",\n \"backfill\",\n \"rolling\",\n \"expanding\",\n \"pipe\",\n }\n assert results == expected\n\n\ndef test_groupby_function_rename(mframe):\n grp = mframe.groupby(level=\"second\")\n for name in [\"sum\", \"prod\", \"min\", \"max\", \"first\", \"last\"]:\n f = getattr(grp, name)\n assert f.__name__ == name\n\n\ndef test_groupby_selection_with_methods(df):\n # some methods which require DatetimeIndex\n rng = date_range(\"2014\", periods=len(df))\n df.index = rng\n\n g = df.groupby([\"A\"])[[\"C\"]]\n g_exp = df[[\"C\"]].groupby(df[\"A\"])\n # TODO check groupby with > 1 col ?\n\n # methods which are called as .foo()\n methods = [\n \"count\",\n \"corr\",\n \"cummax\",\n \"cummin\",\n \"cumprod\",\n \"describe\",\n \"rank\",\n \"quantile\",\n \"diff\",\n \"shift\",\n \"all\",\n \"any\",\n \"idxmin\",\n \"idxmax\",\n \"ffill\",\n \"bfill\",\n \"pct_change\",\n \"tshift\",\n ]\n\n for m in methods:\n res = getattr(g, m)()\n exp = getattr(g_exp, m)()\n\n # should always be frames!\n tm.assert_frame_equal(res, exp)\n\n # methods which aren't just .foo()\n tm.assert_frame_equal(g.fillna(0), g_exp.fillna(0))\n tm.assert_frame_equal(g.dtypes, g_exp.dtypes)\n tm.assert_frame_equal(g.apply(lambda x: x.sum()), g_exp.apply(lambda x: x.sum()))\n\n tm.assert_frame_equal(g.resample(\"D\").mean(), g_exp.resample(\"D\").mean())\n tm.assert_frame_equal(g.resample(\"D\").ohlc(), g_exp.resample(\"D\").ohlc())\n\n tm.assert_frame_equal(\n g.filter(lambda x: len(x) == 3), g_exp.filter(lambda x: len(x) == 3)\n )\n" ]
[ [ "pandas.Index", "pandas.util.testing.assert_frame_equal", "numpy.random.randn", "numpy.random.randint", "pandas.MultiIndex", "pandas.Series", "numpy.random.random" ] ]
xuan-wang/funcat
[ "c4b184942564ab8a4092acb4907ab069fc44683c" ]
[ "funcat/data/crypto_backend.py" ]
[ "# -*- coding: utf-8 -*-\n#\n\nfrom cached_property import cached_property\n\nimport numpy as np\nimport pandas as pd\nimport datetime\nimport time\nimport os\n\nfrom .okex.spot_api import SpotAPI\nfrom pandas.core.frame import DataFrame\n\nfrom .backend import DataBackend\nfrom ..utils import lru_cache, get_str_date_from_int, get_int_date\n\n\nclass CryptoBackend(DataBackend):\n def __init__(self, api_key, seceret_key, passphrase, freq, url='wss://real.okex.com:8443/ws/v3'):\n self.api_key = api_key\n self.seceret_key = seceret_key\n self.passphrase = passphrase\n self.freq = int(freq)\n\n @cached_property\n def ts(self):\n try:\n import tushare as ts\n return ts\n except ImportError:\n print(\"-\" * 50)\n print(\">>> Missing tushare. Please run `pip install tushare`\")\n print(\"-\" * 50)\n raise\n\n @lru_cache(maxsize=4096)\n def get_price(self, ts_code, start, end, freq):\n tmp_end = end\n if len(str(start)) == 14:\n start = datetime.datetime.strptime(str(start), \"%Y%m%d%H%M%S\").strftime(\"%Y-%m-%dT%H:%M:%S\")\n else:\n start = get_str_date_from_int(start)+'T00:00:00'\n\n if len(str(end)) == 14:\n end = datetime.datetime.strptime(str(end), \"%Y%m%d%H%M%S\").strftime(\"%Y-%m-%dT%H:%M:%S\")\n else:\n end = get_str_date_from_int(end)+'T23:59:59'\n\n nt = datetime.datetime.now().strftime('%Y-%m-%d') + 'T23:59:59'\n filename = ts_code + '-' + freq + '-' + end + '.csv'\n update_to_date = ts_code + '-' + freq + '-' + nt + '.csv'\n if os.path.isfile(update_to_date) and end <= nt:\n data = pd.read_csv(update_to_date)\n data = data.loc[data['datetime'] <= tmp_end]\n self.data = data.to_records()\n return self.data\n\n sp = SpotAPI(self.api_key, self.seceret_key, self.passphrase, use_server_time=True)\n data0 = sp.get_kline(ts_code, start+'.000Z', end+'.000Z', freq)\n\n data0 = DataFrame(data0)\n pd_list = [data0]\n for i in range(5):\n e1 = (datetime.datetime.strptime(data0.iloc[-1, 0].replace('T', ' ')[:-5], '%Y-%m-%d %H:%M:%S') + datetime.timedelta(seconds=-int(freq))).strftime('%Y-%m-%dT%H:%M:%S')\n data1 = sp.get_kline(ts_code, start+'.000Z', e1+'.000Z', freq)\n data1 = DataFrame(data1)\n data1.reset_index(drop=True, inplace=True)\n pd_list.append(data1)\n data0 = data1\n \n data = pd.concat(pd_list, axis=0, ignore_index=True)\n data.rename(columns={0:'trade_time', 1:'open', 2:'high', 3:'low', 4:'close', 5:'vol'}, inplace=True)\n data = data.sort_index(ascending=False)\n\n if freq != '86400':\n data[\"datetime\"] = data.apply(\n lambda row: int(row['trade_time'].replace(\"T\", \" \").split(\" \")[0].replace(\"-\", \"\")) * 1000000 + int(row[\"trade_time\"].replace(\"T\", \" \").split(\" \")[1][:-5].replace(\":\", \"\")), axis=1)\n else:\n data[\"datetime\"] = data[\"trade_time\"].apply(lambda x: int(x.replace(\"T\", \" \").split(\" \")[0].replace(\"-\", \"\")) * 1000000)\n\n data.to_csv(update_to_date, index=False)\n self.data = data.to_records()\n return self.data\n \n def get_order_book_id_list(self):\n \"\"\"获取所有的股票代码列表\n \"\"\"\n pass\n\n def get_trading_dates(self, start, end):\n \"\"\"获取所有的交易日\n\n :param start: 20160101\n :param end: 20160201\n \"\"\"\n if len(str(end)) == 14:\n start = datetime.datetime.strptime(str(end), \"%Y%m%d%H%M%S\") + datetime.timedelta(seconds=-self.freq*200)\n end = datetime.datetime.strptime(str(end), \"%Y%m%d%H%M%S\")\n else:\n end = get_str_date_from_int(end)+'T23:59:59'\n\n date_list = []\n be = start.strftime(\"%Y%m%d%H%M%S\")\n en = end.strftime(\"%Y%m%d%H%M%S\")\n while be <= en:\n date_list.append(int(be))\n be = datetime.datetime.strptime(str(be), \"%Y%m%d%H%M%S\") + datetime.timedelta(seconds=self.freq)\n be = be.strftime(\"%Y%m%d%H%M%S\")\n\n return date_list\n\n\n def symbol(self, order_book_id):\n \"\"\"获取order_book_id对应的名字\n :param order_book_id str: 股票代码\n :returns: 名字\n :rtype: str\n \"\"\"\n pass\n" ]
[ [ "pandas.core.frame.DataFrame", "pandas.read_csv", "pandas.concat" ] ]
murilopontes/aditof_sdk
[ "c67fc31d52971b8652e39da50d66260fa66f71c7" ]
[ "tools/calibration-96tof1/cal_eeprom/cal_map.py" ]
[ "#\n# BSD 3-Clause License\n#\n# Copyright (c) 2019, Analog Devices, Inc.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# 3. Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\n\nimport pandas as pd\nimport os\nimport re\nfrom natsort import natsorted, ns\nimport sys\nimport struct\nfrom collections import namedtuple\nimport firmware_gen as lf\nimport cal_map_consts\nimport logging\n\n'''\nClass for managing the calibration map\nConsist functions to:\n generate calibration map\n store calibration map binary to file\n read calibration map from binary file\n parse binary back to calibration map\n display calibration map\n---------------------------------\n'''\n\n\ndef setup_logging():\n with open('./../logger.json', 'r') as f:\n config = json.load(f)\n logging.config.dictConfig(config)\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass cal_map(object):\n\n def __init__(self):\n setup_logging()\n self.calibration_map = {}\n header_packet = {\n TOTAL_SIZE: self.param_struct([8]),\n CHECKSUM: self.param_struct([8])\n }\n self.calibration_map = {\n HEADER: [self.get_packet_size(header_packet), header_packet],\n }\n\n # calculates size of value and returns list[size, value]\n def param_struct(self, param_value):\n size = len(param_value) * 4 # len * 4(each element is float)\n param_value = [int(size), [float(i) for i in param_value]]\n return param_value\n\n # calculates and returns size of packet\n def get_packet_size(self, packet):\n packet_size = 0\n for nested_key, nested_value in packet.items():\n param_size, param_value = nested_value\n # added size 8 for key and size of each parameter\n packet_size = packet_size + param_size + 8\n return int(packet_size)\n\n # calculates and returns size of map\n def get_map_size(self):\n map_size = 0\n for key, list_params in self.calibration_map.items():\n size, nested_dict = list_params\n map_size = map_size + size\n # Size of each key(4) and packet size(4) is added(4+4=8)\n map_size = map_size + 8\n return map_size\n\n def update_packet_checksum(self, packet):\n checksum = 0\n for nested_key, nested_value in packet.items():\n param_size, param_value = nested_value\n for i in range(int(param_size/4)):\n checksum = int(checksum) ^ int(param_value[i])\n packet[CHECKSUM] = self.param_struct([checksum])\n\n def update_map_header(self):\n # Update Header Total Size\n total_size = self.get_map_size()\n self.calibration_map[HEADER][VALUE][TOTAL_SIZE] = self.param_struct([\n total_size])\n # Update Header Checksum\n self.update_packet_checksum(self.calibration_map[HEADER][VALUE])\n\n # Generates Default Dictionary\n def init_default_cal_map(self):\n header_packet = {\n EEPROM_VERSION: self.param_struct([0]),\n TOTAL_SIZE: self.param_struct([1000]),\n NUMBER_OF_MODES: self.param_struct([3]),\n }\n self.update_packet_checksum(header_packet)\n\n camera_intrinsic_packet = {\n EEPROM_VERSION: self.param_struct([0]),\n CAL_SER_NUM: self.param_struct([0]),\n CAL_DATE: self.param_struct([12042019]),\n INTRINSIC: self.param_struct([0, 0, 0, 0, 0, 0, 0, 0, 0])\n }\n self.update_packet_checksum(camera_intrinsic_packet)\n\n self.calibration_map = {\n HEADER: [self.get_packet_size(header_packet), header_packet],\n CAMERA_INTRINSIC: [self.get_packet_size(\n camera_intrinsic_packet), camera_intrinsic_packet]\n }\n # Update Header\n self.update_map_header()\n\n # Parses through dictionary and prints the key and value\n\n def display_cal_map(self):\n # Printing just the value of Calibration Dictionary\n for key, list_params in self.calibration_map.items():\n # print the primary key (for Packet Type)\n print(\"Packet Key: \", (key), end=\"\")\n size, nested_dict = list_params\n print(\"\\tPacket Size: \", size) # print the size of pimary packet\n for nested_key, nested_value in nested_dict.items():\n # print the nested key (Parameter key)\n print(\"\\tParam Key: \", nested_key, end=\"\")\n param_size, param_value = nested_value\n # print the size of Param\n print(\"\\tParam Size: \", param_size, end=\"\")\n value = []\n for i in range(int(param_size/4)):\n value.append(param_value[i])\n print(\"\\tParam Value: \", value) # print the value of Param\n\n # Generates the binary file for writing to EEPROM\n\n def save_cal_map(self, filename):\n # writing float values\n f = open(filename, \"wb\")\n f.write(struct.pack('<f', self.get_map_size()))\n for key, list_params in self.calibration_map.items():\n # write the primary key (for Packet Type)\n f.write(struct.pack('<f', key))\n struct.pack('<f', key)\n size, nested_dict = list_params\n # write the size of pimary packet size\n f.write(struct.pack('<f', size))\n for nested_key, nested_value in nested_dict.items():\n # write the nested key (Parameter key)\n f.write(struct.pack('<f', nested_key))\n param_size, param_value = nested_value\n # write the size of Param\n f.write(struct.pack('<f', param_size))\n for i in range(int(param_size/4)):\n # write the value of Param\n f.write(struct.pack('<f', param_value[i]))\n f.close()\n\n '''Reads the binary file and parses it back to map,\n replaces the value if already exist'''\n\n def read_cal_map(self, filename):\n # open the file\n with open(filename, \"rb\") as f:\n size = f.read(4)\n while True:\n key = f.read(4)\n if not key:\n break\n key = struct.unpack('<f', key)\n key = int(key[0])\n sub_packet_size = struct.unpack('<f', f.read(4))\n sub_packet_size = int(sub_packet_size[0])\n sub_packet_map = {}\n i = 0\n while i < (sub_packet_size/4): # 4:size of float\n sub_packet_value = struct.unpack('<f', f.read(4))\n sub_packet_value = int(sub_packet_value[0])\n i = i + 1\n parameter_key = sub_packet_value\n\n sub_packet_value = struct.unpack('<f', f.read(4))\n sub_packet_value = int(sub_packet_value[0])\n i = i + 1\n parameter_size = sub_packet_value\n\n number_of_elements = int(\n parameter_size/4) # 4:size of float\n\n value = []\n for j in range(number_of_elements):\n sub_packet_value = struct.unpack('<f', f.read(4))\n value.append(sub_packet_value[0])\n i = i + 1\n sub_packet_map.update(\n {parameter_key: [parameter_size, value]})\n self.calibration_map[key] = [sub_packet_size, sub_packet_map]\n f.close()\n\n # Add Load files to map, if existing map consist load files, it overwrites it, otherwise adds it\n def add_load_files_to_map(self, packet_type, lf_path):\n lf_map = {}\n lf_list = []\n file_list = natsorted(os.listdir(\n \"./\"+lf_path+\"/\"), alg=ns.IGNORECASE)[:13]\n logger.debug(file_list)\n for file_name in file_list:\n if file_name.endswith(\".lf\"):\n addr, data, mode_locations = lf.extract_code_block(\n \"./\"+lf_path+\"/\"+file_name)\n for i in range(len(addr)):\n lf_list.append(addr[i])\n lf_list.append(data[i])\n logger.debug(\"Parsed File\", file_name,\n \"\\n\", lf_list)\n\n lf_map[ADDR_DATA_LIST] = self.param_struct(lf_list)\n self.update_packet_checksum(lf_map)\n self.calibration_map[packet_type] = [\n self.get_packet_size(lf_map), lf_map]\n # Update Header\n self.update_map_header()\n\n def add_linear_offset_csv_to_map(self, packet_type, linear_offset_csv_file):\n linear_df = pd.read_csv(linear_offset_csv_file)\n linear_correct_offset_list = (linear_df.to_dict(\n orient='list')[\"reg_offset_value_hex\"])\n linear_correct_xpwr_list = (\n linear_df.to_dict(orient='list')[\"xcorr\"][1:])\n linear_map = {}\n linear_map[LINEAR_CORRECT_OFFSET] = self.param_struct(\n [int(i, 16) for i in linear_correct_offset_list])\n linear_map[LINEAR_CORRECT_XPWR] = self.param_struct(\n linear_correct_xpwr_list)\n self.calibration_map[packet_type] = [\n self.get_packet_size(linear_map), linear_map]\n # Update Header\n self. update_map_header()\n\n def add_json_to_map(self, packet_type, json_file):\n with open(json_file, 'r') as f:\n json_read = json.load(f)\n json_map = {}\n for key, value in json_read.items():\n for sub_key, sub_value in json_read[key].items():\n if(type(sub_value) is list):\n json_map[int(sub_key)] = self.param_struct(sub_value)\n else:\n json_map[int(sub_key)] = self.param_struct([sub_value])\n self.update_packet_checksum(json_map)\n self.calibration_map[packet_type] = [\n self.get_packet_size(json_map), json_map]\n self.update_map_header()\n\n # Function to replace calibration mode block\n def replace_eeprom_mode(self, mode, linear_cal_json_file, load_file_path):\n self.add_json_to_map((get_cal_key(mode)), linear_cal_json_file)\n self.add_load_files_to_map((get_lf_key(mode)), load_file_path)\n\n def write_eeprom_cal_map(self, eeprom):\n logger.debug(\"\\n\\nWriting EEPROM\")\n eeprom_write_bytearray = bytes()\n for key, list_params in self.calibration_map.items():\n eeprom_write_bytearray = eeprom_write_bytearray + \\\n (struct.pack('<f', key)) # write the primary key (for Packet Type)\n struct.pack('<f', key)\n size, nested_dict = list_params\n eeprom_write_bytearray = eeprom_write_bytearray + \\\n (struct.pack('<f', size)) # write the size of pimary packet size\n for nested_key, nested_value in nested_dict.items():\n eeprom_write_bytearray = eeprom_write_bytearray + \\\n (struct.pack('<f', nested_key)\n ) # write the nested key (Parameter key)\n param_size, param_value = nested_value\n eeprom_write_bytearray = eeprom_write_bytearray + \\\n (struct.pack('<f', param_size)) # write the size of Param\n for i in range(int(param_size/4)):\n eeprom_write_bytearray = eeprom_write_bytearray + \\\n (struct.pack(\n '<f', param_value[i])) # write the value of Param\n eeprom_map_size = [self.get_map_size()]\n\n eeprom_write_list = []\n size = eeprom_write_bytearray.__len__()\n for index in range(0, size):\n eeprom_write_list.append(eeprom_write_bytearray[index])\n logger.debug(\"EEPROM WRITE List\\n\", eeprom_write_list)\n\n size_list = []\n size_byte = bytes()\n size_byte = struct.pack('<f', size)\n for index in range(0, 4):\n size_list.append(size_byte[index])\n eeprom.write(int(0), np.array(size_list, dtype='uint8'), 4)\n eeprom.write(int(4), np.array(eeprom_write_list,\n dtype='uint8'), eeprom_write_list.__len__())\n\n def read_eeprom_cal_map(self, eeprom):\n logger.debug(\"Reading EEPROM\")\n data_array = np.zeros(4, dtype='uint8')\n eeprom.read(int(0), data_array, 4)\n read_size = struct.unpack('<f', data_array)\n logger.debug(\"Read Size\", read_size)\n\n data_array = np.zeros(int(read_size[0]), dtype='uint8')\n eeprom.read(int(4), data_array, int(read_size[0]))\n r_b = data_array.tobytes()\n\n j = 0\n while j < r_b.__len__():\n key = r_b[j:j+4]\n j = j+4\n if not key:\n break\n key = struct.unpack('<f', key)\n key = int(key[0])\n logger.debug(\"Primary Key\", key)\n sub_packet_size = struct.unpack('<f', r_b[j:j+4])\n j = j+4\n sub_packet_size = int(sub_packet_size[0])\n logger.debug(\"Sub Size\", sub_packet_size)\n sub_packet_map = {}\n i = 0\n while i < (sub_packet_size/4): # 4:size of float\n sub_packet_value = struct.unpack('<f', r_b[j:j+4])\n j = j+4\n sub_packet_value = int(sub_packet_value[0])\n i = i + 1\n parameter_key = sub_packet_value\n logger.debug(\"Param Key\", parameter_key)\n\n sub_packet_value = struct.unpack('<f', r_b[j:j+4])\n j = j+4\n sub_packet_value = int(sub_packet_value[0])\n i = i + 1\n parameter_size = sub_packet_value\n logger.debug(\"Param Size\", parameter_size)\n\n number_of_elements = int(parameter_size/4) # 4:size of float\n logger.debug(\"Number of elements\", number_of_elements)\n value = []\n for k in range(number_of_elements):\n logger.debug(r_b[j:j+4])\n sub_packet_value = struct.unpack('<f', r_b[j:j+4])\n j = j+4\n value.append(sub_packet_value[0])\n i = i + 1\n sub_packet_map.update({parameter_key: [parameter_size, value]})\n self.calibration_map[key] = [sub_packet_size, sub_packet_map]\n" ]
[ [ "pandas.read_csv" ] ]
Olllom/openmmtools
[ "8ca9fb07627f889e531e9d6f0d52c2f81a33a8e1" ]
[ "openmmtools/tests/test_alchemy.py" ]
[ "#!/usr/bin/python\n\n# =============================================================================\n# MODULE DOCSTRING\n# =============================================================================\n\n\"\"\"\nTests for alchemical factory in `alchemy.py`.\n\n\"\"\"\n\n\n# =============================================================================\n# GLOBAL IMPORTS\n# =============================================================================\n\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport zlib\nimport pickle\nimport itertools\nfrom functools import partial\n\nimport nose\nimport scipy\nfrom nose.plugins.attrib import attr\n\nfrom openmmtools import testsystems, forces\nfrom openmmtools.constants import kB\nfrom openmmtools.alchemy import *\n\nlogger = logging.getLogger(__name__)\n\n\n# =============================================================================\n# CONSTANTS\n# =============================================================================\n\ntemperature = 300.0 * unit.kelvin # reference temperature\n# MAX_DELTA = 0.01 * kB * temperature # maximum allowable deviation\nMAX_DELTA = 1.0 * kB * temperature # maximum allowable deviation\nGLOBAL_ENERGY_UNIT = unit.kilojoules_per_mole # controls printed units\nGLOBAL_ALCHEMY_PLATFORM = None # This is used in every energy calculation.\n# GLOBAL_ALCHEMY_PLATFORM = openmm.Platform.getPlatformByName('OpenCL') # DEBUG: Use OpenCL over CPU platform for testing since OpenCL is deterministic, while CPU is not\n\n\n# =============================================================================\n# TESTING UTILITIES\n# =============================================================================\n\ndef create_context(system, integrator, platform=None):\n \"\"\"Create a Context.\n\n If platform is None, GLOBAL_ALCHEMY_PLATFORM is used.\n\n \"\"\"\n if platform is None:\n platform = GLOBAL_ALCHEMY_PLATFORM\n if platform is not None:\n context = openmm.Context(system, integrator, platform)\n else:\n context = openmm.Context(system, integrator)\n return context\n\n\ndef compute_energy(system, positions, platform=None, force_group=-1):\n \"\"\"Compute energy of the system in the given positions.\n\n Parameters\n ----------\n platform : simtk.openmm.Platform or None, optional\n If None, the global GLOBAL_ALCHEMY_PLATFORM will be used.\n force_group : int flag or set of int, optional\n Passed to the groups argument of Context.getState().\n\n \"\"\"\n timestep = 1.0 * unit.femtoseconds\n integrator = openmm.VerletIntegrator(timestep)\n context = create_context(system, integrator, platform)\n context.setPositions(positions)\n state = context.getState(getEnergy=True, groups=force_group)\n potential = state.getPotentialEnergy()\n del context, integrator, state\n return potential\n\n\ndef minimize(system, positions, platform=None, tolerance=1.0*unit.kilocalories_per_mole/unit.angstroms, maxIterations=50):\n \"\"\"Minimize the energy of the given system.\n\n Parameters\n ----------\n platform : simtk.openmm.Platform or None, optional\n If None, the global GLOBAL_ALCHEMY_PLATFORM will be used.\n tolerance : simtk.unit.Quantity with units compatible with energy/distance, optional, default = 1*kilocalories_per_mole/angstroms\n Minimization tolerance\n maxIterations : int, optional, default=50\n Maximum number of iterations for minimization\n\n Returns\n -------\n minimized_positions : simtk.openmm.Quantity with shape [nparticle,3] with units compatible with distance\n The energy-minimized positions.\n\n \"\"\"\n timestep = 1.0 * unit.femtoseconds\n integrator = openmm.VerletIntegrator(timestep)\n context = create_context(system, integrator, platform)\n context.setPositions(positions)\n openmm.LocalEnergyMinimizer.minimize(context, tolerance, maxIterations)\n minimized_positions = context.getState(getPositions=True).getPositions(asNumpy=True)\n del context, integrator\n return minimized_positions\n\n\ndef compute_force_energy(system, positions, force_name):\n \"\"\"Compute the energy of the force with the given name.\"\"\"\n system = copy.deepcopy(system) # Copy to avoid modifications\n force_name_index = 1\n found_force = False\n\n # Separate force group of force_name from all others.\n for force in system.getForces():\n if force.__class__.__name__ == force_name:\n force.setForceGroup(force_name_index)\n found_force = True\n else:\n force.setForceGroup(0)\n\n if not found_force:\n return None\n\n force_energy = compute_energy(system, positions, force_group=2**force_name_index)\n del system\n return force_energy\n\n\ndef assert_almost_equal(energy1, energy2, err_msg):\n delta = energy1 - energy2\n err_msg += ' interactions do not match! Reference {}, alchemical {},' \\\n ' difference {}'.format(energy1, energy2, delta)\n assert abs(delta) < MAX_DELTA, err_msg\n\n\ndef turn_off_nonbonded(system, sterics=False, electrostatics=False,\n exceptions=False, only_atoms=frozenset()):\n \"\"\"Turn off sterics and/or electrostatics interactions.\n\n This affects only NonbondedForce and non-alchemical CustomNonbondedForces.\n\n If `exceptions` is True, only the exceptions are turned off.\n Support also system that have gone through replace_reaction_field.\n The `system` must have only nonbonded forces.\n If `only_atoms` is specified, only the those atoms will be turned off.\n\n \"\"\"\n if len(only_atoms) == 0: # if empty, turn off all particles\n only_atoms = set(range(system.getNumParticles()))\n epsilon_coeff = 0.0 if sterics else 1.0\n charge_coeff = 0.0 if electrostatics else 1.0\n\n if exceptions: # Turn off exceptions\n force_idx, nonbonded_force = forces.find_forces(system, openmm.NonbondedForce, only_one=True)\n\n # Exceptions.\n for exception_index in range(nonbonded_force.getNumExceptions()):\n iatom, jatom, charge, sigma, epsilon = nonbonded_force.getExceptionParameters(exception_index)\n if iatom in only_atoms or jatom in only_atoms:\n nonbonded_force.setExceptionParameters(exception_index, iatom, jatom,\n charge_coeff*charge, sigma, epsilon_coeff*epsilon)\n\n # Offset exceptions.\n for offset_index in range(nonbonded_force.getNumExceptionParameterOffsets()):\n (parameter, exception_index, chargeprod_scale,\n sigma_scale, epsilon_scale) = nonbonded_force.getExceptionParameterOffset(offset_index)\n iatom, jatom, _, _, _ = nonbonded_force.getExceptionParameters(exception_index)\n if iatom in only_atoms or jatom in only_atoms:\n nonbonded_force.setExceptionParameterOffset(offset_index, parameter, exception_index,\n charge_coeff*chargeprod_scale, sigma_scale,\n epsilon_coeff*epsilon_scale)\n\n else:\n # Turn off particle interactions\n for force in system.getForces():\n # Handle only a Nonbonded and a CustomNonbonded (for RF).\n if not (isinstance(force, openmm.CustomNonbondedForce) and 'lambda' not in force.getEnergyFunction() or\n isinstance(force, openmm.NonbondedForce)):\n continue\n\n # Particle interactions.\n for particle_index in range(force.getNumParticles()):\n if particle_index in only_atoms:\n # Convert tuple parameters to list to allow changes.\n parameters = list(force.getParticleParameters(particle_index))\n parameters[0] *= charge_coeff # charge\n try: # CustomNonbondedForce\n force.setParticleParameters(particle_index, parameters)\n except TypeError: # NonbondedForce\n parameters[2] *= epsilon_coeff # epsilon\n force.setParticleParameters(particle_index, *parameters)\n\n # Offset particle interactions.\n if isinstance(force, openmm.NonbondedForce):\n for offset_index in range(force.getNumParticleParameterOffsets()):\n (parameter, particle_index, charge_scale,\n sigma_scale, epsilon_scale) = force.getParticleParameterOffset(offset_index)\n if particle_index in only_atoms:\n force.setParticleParameterOffset(offset_index, parameter, particle_index,\n charge_coeff*charge_scale, sigma_scale,\n epsilon_coeff*epsilon_scale)\n\n\ndef dissect_nonbonded_energy(reference_system, positions, alchemical_atoms, other_alchemical_atoms):\n \"\"\"Dissect the nonbonded energy contributions of the reference system\n by atom group and sterics/electrostatics.\n\n This works also for systems objects whose CutoffPeriodic force\n has been replaced by a CustomNonbondedForce to set c_rf = 0.\n\n Parameters\n ----------\n reference_system : simtk.openmm.System\n The reference system with the NonbondedForce to dissect.\n positions : simtk.openmm.unit.Quantity of dimension [nparticles,3] with units compatible with Angstroms\n The positions to test.\n alchemical_atoms : set of int\n The indices of the alchemical atoms.\n other_alchemical_atoms : set of int\n The indices of the alchemical atoms in other alchemical regions\n\n Returns\n -------\n tuple of simtk.openmm.unit.Quantity with units compatible with kJ/mol\n All contributions to the potential energy of NonbondedForce in the order:\n nn_particle_sterics: particle sterics interactions between nonalchemical atoms\n aa_particle_sterics: particle sterics interactions between alchemical atoms\n na_particle_sterics: particle sterics interactions between nonalchemical-alchemical atoms\n nn_particle_electro: (direct space) particle electrostatics interactions between nonalchemical atoms\n aa_particle_electro: (direct space) particle electrostatics interactions between alchemical atoms\n na_particle_electro: (direct space) particle electrostatics interactions between nonalchemical-alchemical atoms\n nn_exception_sterics: particle sterics 1,4 exceptions between nonalchemical atoms\n aa_exception_sterics: particle sterics 1,4 exceptions between alchemical atoms\n na_exception_sterics: particle sterics 1,4 exceptions between nonalchemical-alchemical atoms\n nn_exception_electro: particle electrostatics 1,4 exceptions between nonalchemical atoms\n aa_exception_electro: particle electrostatics 1,4 exceptions between alchemical atoms\n na_exception_electro: particle electrostatics 1,4 exceptions between nonalchemical-alchemical atoms\n nn_reciprocal_energy: electrostatics of reciprocal space between nonalchemical atoms\n aa_reciprocal_energy: electrostatics of reciprocal space between alchemical atoms\n na_reciprocal_energy: electrostatics of reciprocal space between nonalchemical-alchemical atoms\n\n \"\"\"\n all_alchemical_atoms = set(alchemical_atoms).union(other_alchemical_atoms)\n nonalchemical_atoms = set(range(reference_system.getNumParticles())).difference(all_alchemical_atoms)\n\n # Remove all forces but NonbondedForce and eventually the\n # CustomNonbondedForce used to model reaction field.\n reference_system = copy.deepcopy(reference_system) # don't modify original system\n forces_to_remove = list()\n for force_index, force in enumerate(reference_system.getForces()):\n force.setForceGroup(0)\n if isinstance(force, openmm.NonbondedForce):\n force.setReciprocalSpaceForceGroup(30) # separate PME reciprocal from direct space\n # We keep only CustomNonbondedForces that are not alchemically modified.\n elif not (isinstance(force, openmm.CustomNonbondedForce) and\n 'lambda' not in force.getEnergyFunction()):\n forces_to_remove.append(force_index)\n\n for force_index in reversed(forces_to_remove):\n reference_system.removeForce(force_index)\n assert len(reference_system.getForces()) <= 2\n\n # Compute particle interactions between different groups of atoms\n # ----------------------------------------------------------------\n # Turn off other alchemical regions\n if len(other_alchemical_atoms) > 0:\n turn_off_nonbonded(reference_system, sterics=True, electrostatics=True, only_atoms=other_alchemical_atoms)\n turn_off_nonbonded(reference_system, sterics=True, electrostatics=True, exceptions=True, only_atoms=other_alchemical_atoms)\n\n system = copy.deepcopy(reference_system)\n\n # Compute total energy from nonbonded interactions\n tot_energy = compute_energy(system, positions)\n tot_reciprocal_energy = compute_energy(system, positions, force_group={30})\n\n # Compute contributions from particle sterics\n turn_off_nonbonded(system, sterics=True, only_atoms=alchemical_atoms)\n tot_energy_no_alchem_particle_sterics = compute_energy(system, positions)\n system = copy.deepcopy(reference_system) # Restore alchemical sterics\n turn_off_nonbonded(system, sterics=True, only_atoms=nonalchemical_atoms)\n tot_energy_no_nonalchem_particle_sterics = compute_energy(system, positions)\n turn_off_nonbonded(system, sterics=True)\n tot_energy_no_particle_sterics = compute_energy(system, positions)\n\n tot_particle_sterics = tot_energy - tot_energy_no_particle_sterics\n nn_particle_sterics = tot_energy_no_alchem_particle_sterics - tot_energy_no_particle_sterics\n aa_particle_sterics = tot_energy_no_nonalchem_particle_sterics - tot_energy_no_particle_sterics\n na_particle_sterics = tot_particle_sterics - nn_particle_sterics - aa_particle_sterics\n\n # Compute contributions from particle electrostatics\n system = copy.deepcopy(reference_system) # Restore sterics\n turn_off_nonbonded(system, electrostatics=True, only_atoms=alchemical_atoms)\n tot_energy_no_alchem_particle_electro = compute_energy(system, positions)\n nn_reciprocal_energy = compute_energy(system, positions, force_group={30})\n system = copy.deepcopy(reference_system) # Restore alchemical electrostatics\n turn_off_nonbonded(system, electrostatics=True, only_atoms=nonalchemical_atoms)\n tot_energy_no_nonalchem_particle_electro = compute_energy(system, positions)\n aa_reciprocal_energy = compute_energy(system, positions, force_group={30})\n turn_off_nonbonded(system, electrostatics=True)\n tot_energy_no_particle_electro = compute_energy(system, positions)\n\n na_reciprocal_energy = tot_reciprocal_energy - nn_reciprocal_energy - aa_reciprocal_energy\n tot_particle_electro = tot_energy - tot_energy_no_particle_electro\n\n nn_particle_electro = tot_energy_no_alchem_particle_electro - tot_energy_no_particle_electro\n aa_particle_electro = tot_energy_no_nonalchem_particle_electro - tot_energy_no_particle_electro\n na_particle_electro = tot_particle_electro - nn_particle_electro - aa_particle_electro\n nn_particle_electro -= nn_reciprocal_energy\n aa_particle_electro -= aa_reciprocal_energy\n na_particle_electro -= na_reciprocal_energy\n\n # Compute exceptions between different groups of atoms\n # -----------------------------------------------------\n\n # Compute contributions from exceptions sterics\n system = copy.deepcopy(reference_system) # Restore particle interactions\n turn_off_nonbonded(system, sterics=True, exceptions=True, only_atoms=alchemical_atoms)\n tot_energy_no_alchem_exception_sterics = compute_energy(system, positions)\n system = copy.deepcopy(reference_system) # Restore alchemical sterics\n turn_off_nonbonded(system, sterics=True, exceptions=True, only_atoms=nonalchemical_atoms)\n tot_energy_no_nonalchem_exception_sterics = compute_energy(system, positions)\n turn_off_nonbonded(system, sterics=True, exceptions=True)\n tot_energy_no_exception_sterics = compute_energy(system, positions)\n\n tot_exception_sterics = tot_energy - tot_energy_no_exception_sterics\n nn_exception_sterics = tot_energy_no_alchem_exception_sterics - tot_energy_no_exception_sterics\n aa_exception_sterics = tot_energy_no_nonalchem_exception_sterics - tot_energy_no_exception_sterics\n na_exception_sterics = tot_exception_sterics - nn_exception_sterics - aa_exception_sterics\n\n # Compute contributions from exceptions electrostatics\n system = copy.deepcopy(reference_system) # Restore exceptions sterics\n turn_off_nonbonded(system, electrostatics=True, exceptions=True, only_atoms=alchemical_atoms)\n tot_energy_no_alchem_exception_electro = compute_energy(system, positions)\n system = copy.deepcopy(reference_system) # Restore alchemical electrostatics\n turn_off_nonbonded(system, electrostatics=True, exceptions=True, only_atoms=nonalchemical_atoms)\n tot_energy_no_nonalchem_exception_electro = compute_energy(system, positions)\n turn_off_nonbonded(system, electrostatics=True, exceptions=True)\n tot_energy_no_exception_electro = compute_energy(system, positions)\n\n tot_exception_electro = tot_energy - tot_energy_no_exception_electro\n nn_exception_electro = tot_energy_no_alchem_exception_electro - tot_energy_no_exception_electro\n aa_exception_electro = tot_energy_no_nonalchem_exception_electro - tot_energy_no_exception_electro\n na_exception_electro = tot_exception_electro - nn_exception_electro - aa_exception_electro\n\n assert tot_particle_sterics == nn_particle_sterics + aa_particle_sterics + na_particle_sterics\n assert_almost_equal(tot_particle_electro, nn_particle_electro + aa_particle_electro +\n na_particle_electro + nn_reciprocal_energy + aa_reciprocal_energy + na_reciprocal_energy,\n 'Inconsistency during dissection of nonbonded contributions:')\n assert tot_exception_sterics == nn_exception_sterics + aa_exception_sterics + na_exception_sterics\n assert tot_exception_electro == nn_exception_electro + aa_exception_electro + na_exception_electro\n assert_almost_equal(tot_energy, tot_particle_sterics + tot_particle_electro +\n tot_exception_sterics + tot_exception_electro,\n 'Inconsistency during dissection of nonbonded contributions:')\n\n return nn_particle_sterics, aa_particle_sterics, na_particle_sterics,\\\n nn_particle_electro, aa_particle_electro, na_particle_electro,\\\n nn_exception_sterics, aa_exception_sterics, na_exception_sterics,\\\n nn_exception_electro, aa_exception_electro, na_exception_electro,\\\n nn_reciprocal_energy, aa_reciprocal_energy, na_reciprocal_energy\n\n\ndef compute_direct_space_correction(nonbonded_force, alchemical_atoms, positions):\n \"\"\"\n Compute the correction added by OpenMM to the direct space to account for\n exception in reciprocal space energy.\n\n Parameters\n ----------\n nonbonded_force : simtk.openmm.NonbondedForce\n The nonbonded force to compute the direct space correction.\n alchemical_atoms : set\n Set of alchemical particles in the force.\n positions : numpy.array\n Position of the particles.\n\n Returns\n -------\n aa_correction : simtk.openmm.unit.Quantity with units compatible with kJ/mol\n The correction to the direct spaced caused by exceptions between alchemical atoms.\n na_correction : simtk.openmm.unit.Quantity with units compatible with kJ/mol\n The correction to the direct spaced caused by exceptions between nonalchemical-alchemical atoms.\n\n \"\"\"\n energy_unit = unit.kilojoule_per_mole\n aa_correction = 0.0\n na_correction = 0.0\n\n # Convert quantity positions into floats.\n if isinstance(positions, unit.Quantity):\n positions = positions.value_in_unit_system(unit.md_unit_system)\n\n # If there is no reciprocal space, the correction is 0.0\n if nonbonded_force.getNonbondedMethod() not in [openmm.NonbondedForce.Ewald, openmm.NonbondedForce.PME]:\n return aa_correction * energy_unit, na_correction * energy_unit\n\n # Get alpha ewald parameter\n alpha_ewald, _, _, _ = nonbonded_force.getPMEParameters()\n if alpha_ewald / alpha_ewald.unit == 0.0:\n cutoff_distance = nonbonded_force.getCutoffDistance()\n tolerance = nonbonded_force.getEwaldErrorTolerance()\n alpha_ewald = (1.0 / cutoff_distance) * np.sqrt(-np.log(2.0*tolerance))\n alpha_ewald = alpha_ewald.value_in_unit_system(unit.md_unit_system)\n assert alpha_ewald != 0.0\n\n for exception_id in range(nonbonded_force.getNumExceptions()):\n # Get particles parameters in md unit system\n iatom, jatom, _, _, _ = nonbonded_force.getExceptionParameters(exception_id)\n icharge, _, _ = nonbonded_force.getParticleParameters(iatom)\n jcharge, _, _ = nonbonded_force.getParticleParameters(jatom)\n icharge = icharge.value_in_unit_system(unit.md_unit_system)\n jcharge = jcharge.value_in_unit_system(unit.md_unit_system)\n\n # Compute the correction and take care of numerical instabilities\n r = np.linalg.norm(positions[iatom] - positions[jatom]) # distance between atoms\n alpha_r = alpha_ewald * r\n if alpha_r > 1e-6:\n correction = ONE_4PI_EPS0 * icharge * jcharge * scipy.special.erf(alpha_r) / r\n else: # for small alpha_r we linearize erf()\n correction = ONE_4PI_EPS0 * alpha_ewald * icharge * jcharge * 2.0 / np.sqrt(np.pi)\n\n # Assign correction to correct group\n if iatom in alchemical_atoms and jatom in alchemical_atoms:\n aa_correction += correction\n elif iatom in alchemical_atoms or jatom in alchemical_atoms:\n na_correction += correction\n\n return aa_correction * energy_unit, na_correction * energy_unit\n\n\ndef is_alchemical_pme_treatment_exact(alchemical_system):\n \"\"\"Return True if the given alchemical system models PME exactly.\"\"\"\n # If exact PME is here, the NonbondedForce defines a\n # lambda_electrostatics variable.\n _, nonbonded_force = forces.find_forces(alchemical_system, openmm.NonbondedForce,\n only_one=True)\n for parameter_idx in range(nonbonded_force.getNumGlobalParameters()):\n parameter_name = nonbonded_force.getGlobalParameterName(parameter_idx)\n # With multiple alchemical regions, lambda_electrostatics might have a suffix.\n if parameter_name.startswith('lambda_electrostatics'):\n return True\n return False\n\n\n# =============================================================================\n# SUBROUTINES FOR TESTING\n# =============================================================================\n\ndef compare_system_energies(reference_system, alchemical_system, alchemical_regions, positions):\n \"\"\"Check that the energies of reference and alchemical systems are close.\n\n This takes care of ignoring the reciprocal space when the nonbonded\n method is an Ewald method.\n\n \"\"\"\n if not isinstance(alchemical_regions, list):\n alchemical_regions = [alchemical_regions]\n\n # Default we compare the energy of all groups.\n force_group = -1\n\n # Check nonbonded method. Comparing with PME is more complicated\n # because the alchemical system with direct-space treatment of PME\n # does not take into account the reciprocal space.\n force_idx, nonbonded_force = forces.find_forces(reference_system, openmm.NonbondedForce, only_one=True)\n nonbonded_method = nonbonded_force.getNonbondedMethod()\n is_direct_space_pme = (nonbonded_method in [openmm.NonbondedForce.PME, openmm.NonbondedForce.Ewald] and\n not is_alchemical_pme_treatment_exact(alchemical_system))\n\n if is_direct_space_pme:\n # Separate the reciprocal space force in a different group.\n reference_system = copy.deepcopy(reference_system)\n alchemical_system = copy.deepcopy(alchemical_system)\n for system in [reference_system, alchemical_system]:\n for force in system.getForces():\n force.setForceGroup(0)\n if isinstance(force, openmm.NonbondedForce):\n force.setReciprocalSpaceForceGroup(31)\n\n # We compare only the direct space energy\n force_group = {0}\n\n # Compute the reciprocal space correction added to the direct space\n # energy due to the exceptions of the alchemical atoms.\n aa_correction = 0.0 * unit.kilojoule_per_mole\n na_correction = 0.0 * unit.kilojoule_per_mole\n for region in alchemical_regions:\n alchemical_atoms = region.alchemical_atoms\n aa, na = compute_direct_space_correction(nonbonded_force, alchemical_atoms, positions)\n aa_correction += aa\n na_correction += na\n\n # Compute potential of the direct space.\n potentials = [compute_energy(system, positions, force_group=force_group)\n for system in [reference_system, alchemical_system]]\n\n # Add the direct space correction.\n if is_direct_space_pme:\n potentials.append(aa_correction + na_correction)\n else:\n potentials.append(0.0 * GLOBAL_ENERGY_UNIT)\n\n # Check that error is small.\n delta = potentials[1] - potentials[2] - potentials[0]\n if abs(delta) > MAX_DELTA:\n print(\"========\")\n for description, potential in zip(['reference', 'alchemical', 'PME correction'], potentials):\n print(\"{}: {} \".format(description, potential))\n print(\"delta : {}\".format(delta))\n err_msg = \"Maximum allowable deviation exceeded (was {:.8f} kcal/mol; allowed {:.8f} kcal/mol).\"\n raise Exception(err_msg.format(delta / unit.kilocalories_per_mole, MAX_DELTA / unit.kilocalories_per_mole))\n\n\ndef check_multi_interacting_energy_components(reference_system, alchemical_system, alchemical_regions, positions):\n \"\"\"wrapper around check_interacting_energy_components for multiple regions\n\n Parameters\n ----------\n reference_system : simtk.openmm.System\n The reference system.\n alchemical_system : simtk.openmm.System\n The alchemically modified system to test.\n alchemical_regions : AlchemicalRegion.\n The alchemically modified region.\n positions : n_particlesx3 array-like of simtk.openmm.unit.Quantity\n The positions to test (units of length).\n\n Note\n ----------\n Interactions between alchemical regions are not tested here.\n Alchemical regions are assumed to be non interacting.\n \"\"\"\n\n all_alchemical_atoms = set()\n for region in alchemical_regions:\n for atom in region.alchemical_atoms:\n all_alchemical_atoms.add(atom)\n for region in alchemical_regions:\n check_interacting_energy_components(\n reference_system, alchemical_system, region, positions,\n all_alchemical_atoms, multi_regions=True)\n\n\ndef check_interacting_energy_components(reference_system, alchemical_system, alchemical_regions, positions,\n all_alchemical_atoms=None, multi_regions=False):\n \"\"\"Compare full and alchemically-modified system energies by energy component.\n\n Parameters\n ----------\n reference_system : simtk.openmm.System\n The reference system.\n alchemical_system : simtk.openmm.System\n The alchemically modified system to test.\n alchemical_regions : AlchemicalRegion.\n The alchemically modified region.\n positions : n_particlesx3 array-like of simtk.openmm.unit.Quantity\n The positions to test (units of length).\n multi_regions : boolean\n Indicates if mutiple regions are being tested\n\n \"\"\"\n energy_unit = unit.kilojoule_per_mole\n reference_system = copy.deepcopy(reference_system)\n alchemical_system = copy.deepcopy(alchemical_system)\n is_exact_pme = is_alchemical_pme_treatment_exact(alchemical_system)\n\n # Find nonbonded method\n _, nonbonded_force = forces.find_forces(reference_system, openmm.NonbondedForce, only_one=True)\n nonbonded_method = nonbonded_force.getNonbondedMethod()\n\n # Get energy components of reference system's nonbonded force\n if multi_regions:\n other_alchemical_atoms = all_alchemical_atoms.difference(alchemical_regions.alchemical_atoms)\n print(\"Dissecting reference system's nonbonded force for region {}\".format(alchemical_regions.name))\n else:\n other_alchemical_atoms = set()\n print(\"Dissecting reference system's nonbonded force\")\n\n energy_components = dissect_nonbonded_energy(reference_system, positions,\n alchemical_regions.alchemical_atoms, other_alchemical_atoms)\n nn_particle_sterics, aa_particle_sterics, na_particle_sterics,\\\n nn_particle_electro, aa_particle_electro, na_particle_electro,\\\n nn_exception_sterics, aa_exception_sterics, na_exception_sterics,\\\n nn_exception_electro, aa_exception_electro, na_exception_electro,\\\n nn_reciprocal_energy, aa_reciprocal_energy, na_reciprocal_energy = energy_components\n\n # Dissect unmodified nonbonded force in alchemical system\n if multi_regions:\n print(\"Dissecting alchemical system's unmodified nonbonded force for region {}\".format(alchemical_regions.name))\n else:\n print(\"Dissecting alchemical system's unmodified nonbonded force\")\n energy_components = dissect_nonbonded_energy(alchemical_system, positions,\n alchemical_regions.alchemical_atoms, other_alchemical_atoms)\n unmod_nn_particle_sterics, unmod_aa_particle_sterics, unmod_na_particle_sterics,\\\n unmod_nn_particle_electro, unmod_aa_particle_electro, unmod_na_particle_electro,\\\n unmod_nn_exception_sterics, unmod_aa_exception_sterics, unmod_na_exception_sterics,\\\n unmod_nn_exception_electro, unmod_aa_exception_electro, unmod_na_exception_electro,\\\n unmod_nn_reciprocal_energy, unmod_aa_reciprocal_energy, unmod_na_reciprocal_energy = energy_components\n\n # Get alchemically-modified energy components\n if multi_regions:\n print(\"Computing alchemical system components energies for region {}\".format(alchemical_regions.name))\n else:\n print(\"Computing alchemical system components energies\")\n alchemical_state = AlchemicalState.from_system(alchemical_system, parameters_name_suffix=alchemical_regions.name)\n alchemical_state.set_alchemical_parameters(1.0)\n energy_components = AbsoluteAlchemicalFactory.get_energy_components(alchemical_system, alchemical_state,\n positions, platform=GLOBAL_ALCHEMY_PLATFORM)\n if multi_regions:\n region_label = ' for region {}'.format(alchemical_regions.name)\n else:\n region_label = ''\n\n # Sterics particle and exception interactions are always modeled with a custom force.\n na_custom_particle_sterics = energy_components['alchemically modified NonbondedForce for non-alchemical/alchemical sterics' + region_label]\n aa_custom_particle_sterics = energy_components['alchemically modified NonbondedForce for alchemical/alchemical sterics' + region_label]\n na_custom_exception_sterics = energy_components['alchemically modified BondForce for non-alchemical/alchemical sterics exceptions' + region_label]\n aa_custom_exception_sterics = energy_components['alchemically modified BondForce for alchemical/alchemical sterics exceptions' + region_label]\n\n # With exact treatment of PME, we use the NonbondedForce offset for electrostatics.\n try:\n na_custom_particle_electro = energy_components['alchemically modified NonbondedForce for non-alchemical/alchemical electrostatics' + region_label]\n aa_custom_particle_electro = energy_components['alchemically modified NonbondedForce for alchemical/alchemical electrostatics' + region_label]\n na_custom_exception_electro = energy_components['alchemically modified BondForce for non-alchemical/alchemical electrostatics exceptions' + region_label]\n aa_custom_exception_electro = energy_components['alchemically modified BondForce for alchemical/alchemical electrostatics exceptions' + region_label]\n except KeyError:\n assert is_exact_pme\n\n # Test that all NonbondedForce contributions match\n # -------------------------------------------------\n\n # All contributions from alchemical atoms in unmodified nonbonded force are turned off\n err_msg = 'Non-zero contribution from unmodified NonbondedForce alchemical atoms: '\n assert_almost_equal(unmod_aa_particle_sterics, 0.0 * energy_unit, err_msg)\n assert_almost_equal(unmod_na_particle_sterics, 0.0 * energy_unit, err_msg)\n assert_almost_equal(unmod_aa_exception_sterics, 0.0 * energy_unit, err_msg)\n assert_almost_equal(unmod_na_exception_sterics, 0.0 * energy_unit, err_msg)\n if not is_exact_pme:\n # With exact PME treatment these are tested below.\n assert_almost_equal(unmod_aa_particle_electro, 0.0 * energy_unit, err_msg)\n assert_almost_equal(unmod_na_particle_electro, 0.0 * energy_unit, err_msg)\n assert_almost_equal(unmod_aa_reciprocal_energy, 0.0 * energy_unit, err_msg)\n assert_almost_equal(unmod_na_reciprocal_energy, 0.0 * energy_unit, err_msg)\n assert_almost_equal(unmod_aa_exception_electro, 0.0 * energy_unit, err_msg)\n assert_almost_equal(unmod_na_exception_electro, 0.0 * energy_unit, err_msg)\n\n # Check sterics interactions match\n assert_almost_equal(nn_particle_sterics, unmod_nn_particle_sterics,\n 'Non-alchemical/non-alchemical atoms particle sterics' + region_label)\n assert_almost_equal(nn_exception_sterics, unmod_nn_exception_sterics,\n 'Non-alchemical/non-alchemical atoms exceptions sterics' + region_label)\n assert_almost_equal(aa_particle_sterics, aa_custom_particle_sterics,\n 'Alchemical/alchemical atoms particle sterics' + region_label)\n assert_almost_equal(aa_exception_sterics, aa_custom_exception_sterics,\n 'Alchemical/alchemical atoms exceptions sterics' + region_label)\n assert_almost_equal(na_particle_sterics, na_custom_particle_sterics,\n 'Non-alchemical/alchemical atoms particle sterics' + region_label)\n assert_almost_equal(na_exception_sterics, na_custom_exception_sterics,\n 'Non-alchemical/alchemical atoms exceptions sterics' + region_label)\n\n # Check electrostatics interactions\n assert_almost_equal(nn_particle_electro, unmod_nn_particle_electro,\n 'Non-alchemical/non-alchemical atoms particle electrostatics' + region_label)\n assert_almost_equal(nn_exception_electro, unmod_nn_exception_electro,\n 'Non-alchemical/non-alchemical atoms exceptions electrostatics' + region_label)\n # With exact treatment of PME, the electrostatics of alchemical-alchemical\n # atoms is modeled with NonbondedForce offsets.\n if is_exact_pme:\n # Reciprocal space.\n assert_almost_equal(aa_reciprocal_energy, unmod_aa_reciprocal_energy,\n 'Alchemical/alchemical atoms reciprocal space energy' + region_label)\n assert_almost_equal(na_reciprocal_energy, unmod_na_reciprocal_energy,\n 'Non-alchemical/alchemical atoms reciprocal space energy' + region_label)\n # Direct space.\n assert_almost_equal(aa_particle_electro, unmod_aa_particle_electro,\n 'Alchemical/alchemical atoms particle electrostatics' + region_label)\n assert_almost_equal(na_particle_electro, unmod_na_particle_electro,\n 'Non-alchemical/alchemical atoms particle electrostatics' + region_label)\n # Exceptions.\n assert_almost_equal(aa_exception_electro, unmod_aa_exception_electro,\n 'Alchemical/alchemical atoms exceptions electrostatics' + region_label)\n assert_almost_equal(na_exception_electro, unmod_na_exception_electro,\n 'Non-alchemical/alchemical atoms exceptions electrostatics' + region_label)\n # With direct space PME, the custom forces model only the\n # direct space of alchemical-alchemical interactions.\n else:\n # Get direct space correction due to reciprocal space exceptions\n aa_correction, na_correction = compute_direct_space_correction(nonbonded_force,\n alchemical_regions.alchemical_atoms,\n positions)\n aa_particle_electro += aa_correction\n na_particle_electro += na_correction\n\n # Check direct space energy\n assert_almost_equal(aa_particle_electro, aa_custom_particle_electro,\n 'Alchemical/alchemical atoms particle electrostatics' + region_label)\n assert_almost_equal(na_particle_electro, na_custom_particle_electro,\n 'Non-alchemical/alchemical atoms particle electrostatics' + region_label)\n # Check exceptions.\n assert_almost_equal(aa_exception_electro, aa_custom_exception_electro,\n 'Alchemical/alchemical atoms exceptions electrostatics' + region_label)\n assert_almost_equal(na_exception_electro, na_custom_exception_electro,\n 'Non-alchemical/alchemical atoms exceptions electrostatics' + region_label)\n\n # With Ewald methods, the NonbondedForce should always hold the\n # reciprocal space energy of nonalchemical-nonalchemical atoms.\n if nonbonded_method in [openmm.NonbondedForce.PME, openmm.NonbondedForce.Ewald]:\n # Reciprocal space.\n assert_almost_equal(nn_reciprocal_energy, unmod_nn_reciprocal_energy,\n 'Non-alchemical/non-alchemical atoms reciprocal space energy')\n else:\n # Reciprocal space energy should be null in this case\n assert nn_reciprocal_energy == unmod_nn_reciprocal_energy == 0.0 * energy_unit\n assert aa_reciprocal_energy == unmod_aa_reciprocal_energy == 0.0 * energy_unit\n assert na_reciprocal_energy == unmod_na_reciprocal_energy == 0.0 * energy_unit\n\n # Check forces other than nonbonded\n # ----------------------------------\n for force_name in ['HarmonicBondForce', 'HarmonicAngleForce', 'PeriodicTorsionForce',\n 'GBSAOBCForce', 'CustomGBForce']:\n alchemical_forces_energies = [energy for label, energy in energy_components.items() if force_name in label]\n reference_force_energy = compute_force_energy(reference_system, positions, force_name)\n\n # There should be no force in the alchemical system if force_name is missing from the reference\n if reference_force_energy is None:\n assert len(alchemical_forces_energies) == 0, str(alchemical_forces_energies)\n continue\n\n # Check that the energies match\n tot_alchemical_forces_energies = 0.0 * energy_unit\n for energy in alchemical_forces_energies:\n tot_alchemical_forces_energies += energy\n assert_almost_equal(reference_force_energy, tot_alchemical_forces_energies,\n '{} energy '.format(force_name))\n\n\ndef check_multi_noninteracting_energy_components(reference_system, alchemical_system, alchemical_regions, positions):\n \"\"\"wrapper around check_noninteracting_energy_components for multiple regions\n Parameters\n ----------\n reference_system : simtk.openmm.System\n The reference system (not alchemically modified).\n alchemical_system : simtk.openmm.System\n The alchemically modified system to test.\n alchemical_regions : AlchemicalRegion.\n The alchemically modified region.\n positions : n_particlesx3 array-like of simtk.openmm.unit.Quantity\n The positions to test (units of length).\n \"\"\"\n for region in alchemical_regions:\n check_noninteracting_energy_components(reference_system, alchemical_system, region, positions, True)\n\n\ndef check_noninteracting_energy_components(reference_system, alchemical_system, alchemical_regions, positions, multi_regions=False):\n \"\"\"Check non-interacting energy components are zero when appropriate.\n Parameters\n ----------\n reference_system : simtk.openmm.System\n The reference system (not alchemically modified).\n alchemical_system : simtk.openmm.System\n The alchemically modified system to test.\n alchemical_regions : AlchemicalRegion.\n The alchemically modified region.\n positions : n_particlesx3 array-like of simtk.openmm.unit.Quantity\n The positions to test (units of length).\n multi_regions : boolean\n Indicates if mutiple regions are being tested\n \"\"\"\n alchemical_system = copy.deepcopy(alchemical_system)\n is_exact_pme = is_alchemical_pme_treatment_exact(alchemical_system)\n\n # Set state to non-interacting.\n alchemical_state = AlchemicalState.from_system(alchemical_system, parameters_name_suffix=alchemical_regions.name)\n alchemical_state.set_alchemical_parameters(0.0)\n energy_components = AbsoluteAlchemicalFactory.get_energy_components(alchemical_system, alchemical_state,\n positions, platform=GLOBAL_ALCHEMY_PLATFORM)\n\n def assert_zero_energy(label):\n # Handle multiple alchemical regions.\n if multi_regions:\n label = label + ' for region ' + alchemical_regions.name\n # Testing energy component of each region.\n print('testing {}'.format(label))\n value = energy_components[label]\n assert abs(value / GLOBAL_ENERGY_UNIT) == 0.0, (\"'{}' should have zero energy in annihilated alchemical\"\n \" state, but energy is {}\").format(label, str(value))\n\n # Check that non-alchemical/alchemical particle interactions and 1,4 exceptions have been annihilated\n assert_zero_energy('alchemically modified BondForce for non-alchemical/alchemical sterics exceptions')\n assert_zero_energy('alchemically modified NonbondedForce for non-alchemical/alchemical sterics')\n if is_exact_pme:\n assert 'alchemically modified NonbondedForce for non-alchemical/alchemical electrostatics' not in energy_components\n assert 'alchemically modified BondForce for non-alchemical/alchemical electrostatics exceptions' not in energy_components\n else:\n assert_zero_energy('alchemically modified NonbondedForce for non-alchemical/alchemical electrostatics')\n assert_zero_energy('alchemically modified BondForce for non-alchemical/alchemical electrostatics exceptions')\n\n # Check that alchemical/alchemical particle interactions and 1,4 exceptions have been annihilated\n if alchemical_regions.annihilate_sterics:\n assert_zero_energy('alchemically modified NonbondedForce for alchemical/alchemical sterics')\n assert_zero_energy('alchemically modified BondForce for alchemical/alchemical sterics exceptions')\n if alchemical_regions.annihilate_electrostatics:\n if is_exact_pme:\n assert 'alchemically modified NonbondedForce for alchemical/alchemical electrostatics' not in energy_components\n assert 'alchemically modified BondForce for alchemical/alchemical electrostatics exceptions' not in energy_components\n else:\n assert_zero_energy('alchemically modified NonbondedForce for alchemical/alchemical electrostatics')\n assert_zero_energy('alchemically modified BondForce for alchemical/alchemical electrostatics exceptions')\n\n # Check valence terms\n for force_name in ['HarmonicBondForce', 'HarmonicAngleForce', 'PeriodicTorsionForce']:\n force_label = 'alchemically modified ' + force_name\n if force_label in energy_components:\n assert_zero_energy(force_label)\n\n # Check implicit solvent force.\n for force_name in ['CustomGBForce', 'GBSAOBCForce']:\n label = 'alchemically modified ' + force_name\n\n # Check if the system has an implicit solvent force.\n try:\n alchemical_energy = energy_components[label]\n except KeyError: # No implicit solvent.\n continue\n\n # If all alchemical particles are modified, the alchemical energy should be zero.\n if len(alchemical_regions.alchemical_atoms) == reference_system.getNumParticles():\n assert_zero_energy(label)\n continue\n\n # Otherwise compare the alchemical energy with a\n # reference system with only non-alchemical particles.\n # Find implicit solvent force in reference system.\n for reference_force in reference_system.getForces():\n if reference_force.__class__.__name__ == force_name:\n break\n\n system = openmm.System()\n force = reference_force.__class__()\n\n # For custom GB forces, we need to copy all computed values,\n # energy terms, parameters, tabulated functions and exclusions.\n if isinstance(force, openmm.CustomGBForce):\n for index in range(reference_force.getNumPerParticleParameters()):\n name = reference_force.getPerParticleParameterName(index)\n force.addPerParticleParameter(name)\n for index in range(reference_force.getNumComputedValues()):\n computed_value = reference_force.getComputedValueParameters(index)\n force.addComputedValue(*computed_value)\n for index in range(reference_force.getNumEnergyTerms()):\n energy_term = reference_force.getEnergyTermParameters(index)\n force.addEnergyTerm(*energy_term)\n for index in range(reference_force.getNumGlobalParameters()):\n name = reference_force.getGlobalParameterName(index)\n default_value = reference_force.getGlobalParameterDefaultValue(index)\n force.addGlobalParameter(name, default_value)\n for function_index in range(reference_force.getNumTabulatedFunctions()):\n name = reference_force.getTabulatedFunctionName(function_index)\n function = reference_force.getTabulatedFunction(function_index)\n function_copy = copy.deepcopy(function)\n force.addTabulatedFunction(name, function_copy)\n for exclusion_index in range(reference_force.getNumExclusions()):\n particles = reference_force.getExclusionParticles(exclusion_index)\n force.addExclusion(*particles)\n\n # Create a system with only the non-alchemical particles.\n for particle_index in range(reference_system.getNumParticles()):\n if particle_index not in alchemical_regions.alchemical_atoms:\n # Add particle to System.\n mass = reference_system.getParticleMass(particle_index)\n system.addParticle(mass)\n\n # Add particle to Force..\n parameters = reference_force.getParticleParameters(particle_index)\n try: # GBSAOBCForce\n force.addParticle(*parameters)\n except NotImplementedError: # CustomGBForce\n force.addParticle(parameters)\n\n system.addForce(force)\n\n # Get positions for all non-alchemical particles.\n non_alchemical_positions = [pos for i, pos in enumerate(positions)\n if i not in alchemical_regions.alchemical_atoms]\n\n # Compute reference force energy.\n reference_force_energy = compute_force_energy(system, non_alchemical_positions, force_name)\n assert_almost_equal(reference_force_energy, alchemical_energy,\n 'reference {}, alchemical {}'.format(reference_force_energy, alchemical_energy))\n\n\ndef check_split_force_groups(system, region_names=None):\n \"\"\"Check that force groups are split correctly.\"\"\"\n\n if region_names is None:\n region_names = []\n # Separate forces groups by lambda parameters that AlchemicalState supports.\n for region in region_names:\n force_groups_by_lambda = {}\n lambdas_by_force_group = {}\n for force, lambda_name, _ in AlchemicalState._get_system_controlled_parameters(\n system, parameters_name_suffix=region):\n force_group = force.getForceGroup()\n try:\n force_groups_by_lambda[lambda_name].add(force_group)\n except KeyError:\n force_groups_by_lambda[lambda_name] = {force_group}\n try:\n lambdas_by_force_group[force_group].add(lambda_name)\n except KeyError:\n lambdas_by_force_group[force_group] = {lambda_name}\n\n # Check that force group 0 doesn't hold alchemical forces.\n assert 0 not in force_groups_by_lambda\n\n # There are as many alchemical force groups as not-None lambda variables.\n alchemical_state = AlchemicalState.from_system(system, parameters_name_suffix=region)\n valid_lambdas = {lambda_name for lambda_name in alchemical_state._get_controlled_parameters(parameters_name_suffix=region)\n if getattr(alchemical_state, lambda_name) is not None}\n assert valid_lambdas == set(force_groups_by_lambda.keys())\n\n # Check that force groups and lambda variables are in 1-to-1 correspondence.\n assert len(force_groups_by_lambda) == len(lambdas_by_force_group)\n for d in [force_groups_by_lambda, lambdas_by_force_group]:\n for value in d.values():\n assert len(value) == 1\n\n # With exact treatment of PME, the NonbondedForce must\n # be in the lambda_electrostatics force group.\n if is_alchemical_pme_treatment_exact(system):\n force_idx, nonbonded_force = forces.find_forces(system, openmm.NonbondedForce, only_one=True)\n assert force_groups_by_lambda['lambda_electrostatics_{}'.format(region)] == {nonbonded_force.getForceGroup()}\n\n\n# =============================================================================\n# BENCHMARKING AND DEBUG FUNCTIONS\n# =============================================================================\n\ndef benchmark(reference_system, alchemical_regions, positions, nsteps=500,\n timestep=1.0*unit.femtoseconds):\n \"\"\"\n Benchmark performance of alchemically modified system relative to original system.\n\n Parameters\n ----------\n reference_system : simtk.openmm.System\n The reference System object to compare with.\n alchemical_regions : AlchemicalRegion\n The region to alchemically modify.\n positions : n_particlesx3 array-like of simtk.unit.Quantity\n The initial positions (units of distance).\n nsteps : int, optional\n Number of molecular dynamics steps to use for benchmarking (default is 500).\n timestep : simtk.unit.Quantity, optional\n Timestep to use for benchmarking (units of time, default is 1.0*unit.femtoseconds).\n\n \"\"\"\n timer = utils.Timer()\n\n # Create the perturbed system.\n factory = AbsoluteAlchemicalFactory()\n timer.start('Create alchemical system')\n alchemical_system = factory.create_alchemical_system(reference_system, alchemical_regions)\n timer.stop('Create alchemical system')\n\n # Create an alchemically-perturbed state corresponding to nearly fully-interacting.\n # NOTE: We use a lambda slightly smaller than 1.0 because the AbsoluteAlchemicalFactory\n # may not use Custom*Force softcore versions if lambda = 1.0 identically.\n alchemical_state = AlchemicalState.from_system(alchemical_system)\n alchemical_state.set_alchemical_parameters(1.0 - 1.0e-6)\n\n # Create integrators.\n reference_integrator = openmm.VerletIntegrator(timestep)\n alchemical_integrator = openmm.VerletIntegrator(timestep)\n\n # Create contexts for sampling.\n if GLOBAL_ALCHEMY_PLATFORM:\n reference_context = openmm.Context(reference_system, reference_integrator, GLOBAL_ALCHEMY_PLATFORM)\n alchemical_context = openmm.Context(alchemical_system, alchemical_integrator, GLOBAL_ALCHEMY_PLATFORM)\n else:\n reference_context = openmm.Context(reference_system, reference_integrator)\n alchemical_context = openmm.Context(alchemical_system, alchemical_integrator)\n reference_context.setPositions(positions)\n alchemical_context.setPositions(positions)\n\n # Make sure all kernels are compiled.\n reference_integrator.step(1)\n alchemical_integrator.step(1)\n\n # Run simulations.\n print('Running reference system...')\n timer.start('Run reference system')\n reference_integrator.step(nsteps)\n timer.stop('Run reference system')\n\n print('Running alchemical system...')\n timer.start('Run alchemical system')\n alchemical_integrator.step(nsteps)\n timer.stop('Run alchemical system')\n print('Done.')\n\n timer.report_timing()\n\n\ndef benchmark_alchemy_from_pdb():\n \"\"\"CLI entry point for benchmarking alchemical performance from a PDB file.\n \"\"\"\n logging.basicConfig(level=logging.DEBUG)\n\n import mdtraj\n import argparse\n from simtk.openmm import app\n\n parser = argparse.ArgumentParser(description='Benchmark performance of alchemically-modified system.')\n parser.add_argument('-p', '--pdb', metavar='PDBFILE', type=str, action='store', required=True,\n help='PDB file to benchmark; only protein forcefields supported for now (no small molecules)')\n parser.add_argument('-s', '--selection', metavar='SELECTION', type=str, action='store', default='not water',\n help='MDTraj DSL describing alchemical region (default: \"not water\")')\n parser.add_argument('-n', '--nsteps', metavar='STEPS', type=int, action='store', default=1000,\n help='Number of benchmarking steps (default: 1000)')\n args = parser.parse_args()\n # Read the PDB file\n print('Loading PDB file...')\n pdbfile = app.PDBFile(args.pdb)\n print('Loading forcefield...')\n forcefield = app.ForceField('amber99sbildn.xml', 'tip3p.xml')\n print('Adding missing hydrogens...')\n modeller = app.Modeller(pdbfile.topology, pdbfile.positions)\n modeller.addHydrogens(forcefield)\n print('Creating System...')\n reference_system = forcefield.createSystem(modeller.topology, nonbondedMethod=app.PME)\n # Minimize\n print('Minimizing...')\n positions = minimize(reference_system, modeller.positions)\n # Select alchemical regions\n mdtraj_topology = mdtraj.Topology.from_openmm(modeller.topology)\n alchemical_atoms = mdtraj_topology.select(args.selection)\n alchemical_region = AlchemicalRegion(alchemical_atoms=alchemical_atoms)\n print('There are %d atoms in the alchemical region.' % len(alchemical_atoms))\n # Benchmark\n print('Benchmarking...')\n benchmark(reference_system, alchemical_region, positions, nsteps=args.nsteps, timestep=1.0*unit.femtoseconds)\n\n\ndef overlap_check(reference_system, alchemical_system, positions, nsteps=50, nsamples=200,\n cached_trajectory_filename=None, name=\"\"):\n \"\"\"\n Test overlap between reference system and alchemical system by running a short simulation.\n\n Parameters\n ----------\n reference_system : simtk.openmm.System\n The reference System object to compare with.\n alchemical_system : simtk.openmm.System\n Alchemically-modified system.\n positions : n_particlesx3 array-like of simtk.unit.Quantity\n The initial positions (units of distance).\n nsteps : int, optional\n Number of molecular dynamics steps between samples (default is 50).\n nsamples : int, optional\n Number of samples to collect (default is 100).\n cached_trajectory_filename : str, optional, default=None\n If not None, this file will be used to cache intermediate results with pickle.\n name : str, optional, default=None\n Name of test system being evaluated.\n\n \"\"\"\n temperature = 300.0 * unit.kelvin\n pressure = 1.0 * unit.atmospheres\n collision_rate = 5.0 / unit.picoseconds\n timestep = 2.0 * unit.femtoseconds\n kT = kB * temperature\n\n # Minimize\n positions = minimize(reference_system, positions)\n\n # Add a barostat if possible.\n reference_system = copy.deepcopy(reference_system)\n if reference_system.usesPeriodicBoundaryConditions():\n reference_system.addForce(openmm.MonteCarloBarostat(pressure, temperature))\n\n # Create integrators.\n reference_integrator = openmm.LangevinIntegrator(temperature, collision_rate, timestep)\n alchemical_integrator = openmm.VerletIntegrator(timestep)\n\n # Create contexts.\n reference_context = create_context(reference_system, reference_integrator)\n alchemical_context = create_context(alchemical_system, alchemical_integrator)\n\n # Initialize data structure or load if from cache.\n # du_n[n] is the potential energy difference of sample n.\n if cached_trajectory_filename is not None:\n try:\n with open(cached_trajectory_filename, 'rb') as f:\n data = pickle.load(f)\n except FileNotFoundError:\n data = dict(du_n=[])\n # Create directory if it doesn't exist.\n directory = os.path.dirname(cached_trajectory_filename)\n if not os.path.exists(directory):\n os.makedirs(directory)\n else:\n positions = data['positions']\n reference_context.setPeriodicBoxVectors(*data['box_vectors'])\n else:\n data = dict(du_n=[])\n\n # Collect simulation data.\n iteration = len(data['du_n'])\n reference_context.setPositions(positions)\n print()\n for sample in range(iteration, nsamples):\n print('\\rSample {}/{}'.format(sample+1, nsamples), end='')\n sys.stdout.flush()\n\n # Run dynamics.\n reference_integrator.step(nsteps)\n\n # Get reference energies.\n reference_state = reference_context.getState(getEnergy=True, getPositions=True)\n reference_potential = reference_state.getPotentialEnergy()\n if np.isnan(reference_potential/kT):\n raise Exception(\"Reference potential is NaN\")\n\n # Get alchemical energies.\n alchemical_context.setPeriodicBoxVectors(*reference_state.getPeriodicBoxVectors())\n alchemical_context.setPositions(reference_state.getPositions(asNumpy=True))\n alchemical_state = alchemical_context.getState(getEnergy=True)\n alchemical_potential = alchemical_state.getPotentialEnergy()\n if np.isnan(alchemical_potential/kT):\n raise Exception(\"Alchemical potential is NaN\")\n\n # Update and cache data.\n data['du_n'].append((alchemical_potential - reference_potential) / kT)\n if cached_trajectory_filename is not None:\n # Save only last iteration positions and vectors.\n data['positions'] = reference_state.getPositions()\n data['box_vectors'] = reference_state.getPeriodicBoxVectors()\n with open(cached_trajectory_filename, 'wb') as f:\n pickle.dump(data, f)\n\n # Discard data to equilibration and subsample.\n du_n = np.array(data['du_n'])\n from pymbar import timeseries, EXP\n t0, g, Neff = timeseries.detectEquilibration(du_n)\n indices = timeseries.subsampleCorrelatedData(du_n, g=g)\n du_n = du_n[indices]\n\n # Compute statistics.\n DeltaF, dDeltaF = EXP(du_n)\n\n # Raise an exception if the error is larger than 3kT.\n MAX_DEVIATION = 3.0 # kT\n report = ('\\nDeltaF = {:12.3f} +- {:12.3f} kT ({:3.2f} samples, g = {:3.1f}); '\n 'du mean {:.3f} kT stddev {:.3f} kT').format(DeltaF, dDeltaF, Neff, g, du_n.mean(), du_n.std())\n print(report)\n if dDeltaF > MAX_DEVIATION:\n raise Exception(report)\n\n\ndef rstyle(ax):\n \"\"\"Styles x,y axes to appear like ggplot2\n\n Must be called after all plot and axis manipulation operations have been\n carried out (needs to know final tick spacing)\n\n From:\n http://nbviewer.ipython.org/github/wrobstory/climatic/blob/master/examples/ggplot_styling_for_matplotlib.ipynb\n\n \"\"\"\n import pylab\n import matplotlib\n import matplotlib.pyplot as plt\n\n #Set the style of the major and minor grid lines, filled blocks\n ax.grid(True, 'major', color='w', linestyle='-', linewidth=1.4)\n ax.grid(True, 'minor', color='0.99', linestyle='-', linewidth=0.7)\n ax.patch.set_facecolor('0.90')\n ax.set_axisbelow(True)\n\n #Set minor tick spacing to 1/2 of the major ticks\n ax.xaxis.set_minor_locator((pylab.MultipleLocator((plt.xticks()[0][1] - plt.xticks()[0][0]) / 2.0)))\n ax.yaxis.set_minor_locator((pylab.MultipleLocator((plt.yticks()[0][1] - plt.yticks()[0][0]) / 2.0)))\n\n #Remove axis border\n for child in ax.get_children():\n if isinstance(child, matplotlib.spines.Spine):\n child.set_alpha(0)\n\n #Restyle the tick lines\n for line in ax.get_xticklines() + ax.get_yticklines():\n line.set_markersize(5)\n line.set_color(\"gray\")\n line.set_markeredgewidth(1.4)\n\n #Remove the minor tick lines\n for line in (ax.xaxis.get_ticklines(minor=True) +\n ax.yaxis.get_ticklines(minor=True)):\n line.set_markersize(0)\n\n #Only show bottom left ticks, pointing out of axis\n plt.rcParams['xtick.direction'] = 'out'\n plt.rcParams['ytick.direction'] = 'out'\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')\n\n\ndef lambda_trace(reference_system, alchemical_regions, positions, nsteps=100):\n \"\"\"\n Compute potential energy as a function of lambda.\n\n \"\"\"\n\n # Create a factory to produce alchemical intermediates.\n factory = AbsoluteAlchemicalFactory()\n alchemical_system = factory.create_alchemical_system(reference_system, alchemical_regions)\n alchemical_state = AlchemicalState.from_system(alchemical_system)\n\n # Take equally-sized steps.\n delta = 1.0 / nsteps\n\n # Compute unmodified energy.\n u_original = compute_energy(reference_system, positions)\n\n # Scan through lambda values.\n lambda_i = np.zeros([nsteps+1], np.float64) # lambda values for u_i\n\n # u_i[i] is the potential energy for lambda_i[i]\n u_i = unit.Quantity(np.zeros([nsteps+1], np.float64), unit.kilocalories_per_mole)\n for i in range(nsteps+1):\n lambda_i[i] = 1.0-i*delta\n alchemical_state.set_alchemical_parameters(lambda_i[i])\n alchemical_state.apply_to_system(alchemical_system)\n u_i[i] = compute_energy(alchemical_system, positions)\n logger.info(\"{:12.9f} {:24.8f} kcal/mol\".format(lambda_i[i], u_i[i] / GLOBAL_ENERGY_UNIT))\n\n # Write figure as PDF.\n from matplotlib.backends.backend_pdf import PdfPages\n import matplotlib.pyplot as plt\n with PdfPages('lambda-trace.pdf') as pdf:\n fig = plt.figure(figsize=(10, 5))\n ax = fig.add_subplot(111)\n plt.plot(1, u_original / unit.kilocalories_per_mole, 'ro', label='unmodified')\n plt.plot(lambda_i, u_i / unit.kilocalories_per_mole, 'k.', label='alchemical')\n plt.title('T4 lysozyme L99A + p-xylene : AMBER96 + OBC GBSA')\n plt.ylabel('potential (kcal/mol)')\n plt.xlabel('lambda')\n ax.legend()\n rstyle(ax)\n pdf.savefig() # saves the current figure into a pdf page\n plt.close()\n\n\ndef generate_trace(test_system):\n lambda_trace(test_system['test'].system, test_system['test'].positions, test_system['receptor_atoms'], test_system['ligand_atoms'])\n\n\n# =============================================================================\n# TEST ALCHEMICAL FACTORY SUITE\n# =============================================================================\n\ndef test_resolve_alchemical_region():\n \"\"\"Test the method AbsoluteAlchemicalFactory._resolve_alchemical_region.\"\"\"\n test_cases = [\n (testsystems.AlanineDipeptideVacuum(), range(22), 9, 36, 48),\n (testsystems.AlanineDipeptideVacuum(), range(11, 22), 4, 21, 31),\n (testsystems.LennardJonesCluster(), range(27), 0, 0, 0)\n ]\n\n for i, (test_case, atoms, n_bonds, n_angles, n_torsions) in enumerate(test_cases):\n system = test_case.system\n\n # Default arguments are converted to empty list.\n alchemical_region = AlchemicalRegion(alchemical_atoms=atoms)\n resolved_region = AbsoluteAlchemicalFactory._resolve_alchemical_region(system, alchemical_region)\n for region in ['bonds', 'angles', 'torsions']:\n assert getattr(resolved_region, 'alchemical_' + region) == set()\n\n # Numpy arrays are converted to sets.\n alchemical_region = AlchemicalRegion(alchemical_atoms=np.array(atoms),\n alchemical_bonds=np.array(range(n_bonds)),\n alchemical_angles=np.array(range(n_angles)),\n alchemical_torsions=np.array(range(n_torsions)))\n resolved_region = AbsoluteAlchemicalFactory._resolve_alchemical_region(system, alchemical_region)\n for region in ['atoms', 'bonds', 'angles', 'torsions']:\n assert isinstance(getattr(resolved_region, 'alchemical_' + region), frozenset)\n\n # Bonds, angles and torsions are inferred correctly.\n alchemical_region = AlchemicalRegion(alchemical_atoms=atoms, alchemical_bonds=True,\n alchemical_angles=True, alchemical_torsions=True)\n resolved_region = AbsoluteAlchemicalFactory._resolve_alchemical_region(system, alchemical_region)\n for j, region in enumerate(['bonds', 'angles', 'torsions']):\n assert len(getattr(resolved_region, 'alchemical_' + region)) == test_cases[i][j+2]\n\n # An exception is if indices are not part of the system.\n alchemical_region = AlchemicalRegion(alchemical_atoms=[10000000])\n with nose.tools.assert_raises(ValueError):\n AbsoluteAlchemicalFactory._resolve_alchemical_region(system, alchemical_region)\n\n # An exception is raised if nothing is defined.\n alchemical_region = AlchemicalRegion()\n with nose.tools.assert_raises(ValueError):\n AbsoluteAlchemicalFactory._resolve_alchemical_region(system, alchemical_region)\n\nclass TestAbsoluteAlchemicalFactory(object):\n \"\"\"Test AbsoluteAlchemicalFactory class.\"\"\"\n\n @classmethod\n def setup_class(cls):\n \"\"\"Create test systems and shared objects.\"\"\"\n cls.define_systems()\n cls.define_regions()\n cls.generate_cases()\n\n @classmethod\n def define_systems(cls):\n \"\"\"Create shared test systems in cls.test_systems for the test suite.\"\"\"\n cls.test_systems = dict()\n\n # Basic test systems: Lennard-Jones and water particles only.\n # Test also dispersion correction and switch off (\"on\" values\n # for these options are tested in HostGuestExplicit system).\n cls.test_systems['LennardJonesCluster'] = testsystems.LennardJonesCluster()\n cls.test_systems['LennardJonesFluid with dispersion correction'] = \\\n testsystems.LennardJonesFluid(nparticles=100, dispersion_correction=True)\n cls.test_systems['TIP3P WaterBox with reaction field, no switch, no dispersion correction'] = \\\n testsystems.WaterBox(dispersion_correction=False, switch=False, nonbondedMethod=openmm.app.CutoffPeriodic)\n cls.test_systems['TIP4P-EW WaterBox and NaCl with PME'] = \\\n testsystems.WaterBox(nonbondedMethod=openmm.app.PME, model='tip4pew', ionic_strength=200*unit.millimolar)\n\n # Vacuum and implicit.\n cls.test_systems['AlanineDipeptideVacuum'] = testsystems.AlanineDipeptideVacuum()\n cls.test_systems['AlanineDipeptideImplicit'] = testsystems.AlanineDipeptideImplicit()\n cls.test_systems['TolueneImplicitOBC2'] = testsystems.TolueneImplicitOBC2()\n cls.test_systems['TolueneImplicitGBn'] = testsystems.TolueneImplicitGBn()\n\n # Explicit test system: PME and CutoffPeriodic.\n #cls.test_systems['AlanineDipeptideExplicit with CutoffPeriodic'] = \\\n # testsystems.AlanineDipeptideExplicit(nonbondedMethod=openmm.app.CutoffPeriodic)\n cls.test_systems['HostGuestExplicit with PME'] = \\\n testsystems.HostGuestExplicit(nonbondedMethod=openmm.app.PME)\n cls.test_systems['HostGuestExplicit with CutoffPeriodic'] = \\\n testsystems.HostGuestExplicit(nonbondedMethod=openmm.app.CutoffPeriodic)\n\n @classmethod\n def define_regions(cls):\n \"\"\"Create shared AlchemicalRegions for test systems in cls.test_regions.\"\"\"\n cls.test_regions = dict()\n cls.test_regions['LennardJonesCluster'] = AlchemicalRegion(alchemical_atoms=range(2))\n cls.test_regions['LennardJonesFluid'] = AlchemicalRegion(alchemical_atoms=range(10))\n cls.test_regions['Toluene'] = AlchemicalRegion(alchemical_atoms=range(6)) # Only partially modified.\n cls.test_regions['AlanineDipeptide'] = AlchemicalRegion(alchemical_atoms=range(22))\n cls.test_regions['HostGuestExplicit'] = AlchemicalRegion(alchemical_atoms=range(126, 156))\n cls.test_regions['TIP3P WaterBox'] = AlchemicalRegion(alchemical_atoms=range(0,3)) \n\n # Modify ions.\n for atom in cls.test_systems['TIP4P-EW WaterBox and NaCl with PME'].topology.atoms():\n if atom.name in ['Na', 'Cl']:\n cls.test_regions['TIP4P-EW WaterBox and NaCl'] = AlchemicalRegion(alchemical_atoms=range(atom.index, atom.index+1))\n break\n\n @classmethod\n def generate_cases(cls):\n \"\"\"Generate all test cases in cls.test_cases combinatorially.\"\"\"\n cls.test_cases = dict()\n direct_space_factory = AbsoluteAlchemicalFactory(alchemical_pme_treatment='direct-space',\n alchemical_rf_treatment='switched')\n exact_pme_factory = AbsoluteAlchemicalFactory(alchemical_pme_treatment='exact')\n\n # We generate all possible combinations of annihilate_sterics/electrostatics\n # for each test system. We also annihilate bonds, angles and torsions every\n # 3 test cases so that we test it at least one for each test system and for\n # each combination of annihilate_sterics/electrostatics.\n n_test_cases = 0\n for test_system_name, test_system in cls.test_systems.items():\n\n # Find standard alchemical region.\n for region_name, region in cls.test_regions.items():\n if region_name in test_system_name:\n break\n assert region_name in test_system_name, test_system_name\n\n # Find nonbonded method.\n force_idx, nonbonded_force = forces.find_forces(test_system.system, openmm.NonbondedForce, only_one=True)\n nonbonded_method = nonbonded_force.getNonbondedMethod()\n\n # Create all combinations of annihilate_sterics/electrostatics.\n for annihilate_sterics, annihilate_electrostatics in itertools.product((True, False), repeat=2):\n # Create new region that we can modify.\n test_region = region._replace(annihilate_sterics=annihilate_sterics,\n annihilate_electrostatics=annihilate_electrostatics)\n\n # Create test name.\n test_case_name = test_system_name[:]\n if annihilate_sterics:\n test_case_name += ', annihilated sterics'\n if annihilate_electrostatics:\n test_case_name += ', annihilated electrostatics'\n\n # Annihilate bonds and angles every three test_cases.\n if n_test_cases % 3 == 0:\n test_region = test_region._replace(alchemical_bonds=True, alchemical_angles=True,\n alchemical_torsions=True)\n test_case_name += ', annihilated bonds, angles and torsions'\n\n # Add different softcore parameters every five test_cases.\n if n_test_cases % 5 == 0:\n test_region = test_region._replace(softcore_alpha=1.0, softcore_beta=1.0, softcore_a=1.0, softcore_b=1.0,\n softcore_c=1.0, softcore_d=1.0, softcore_e=1.0, softcore_f=1.0)\n test_case_name += ', modified softcore parameters'\n\n # Pre-generate alchemical system.\n alchemical_system = direct_space_factory.create_alchemical_system(test_system.system, test_region)\n\n # Add test case.\n cls.test_cases[test_case_name] = (test_system, alchemical_system, test_region)\n n_test_cases += 1\n\n # If we don't use softcore electrostatics and we annihilate charges\n # we can test also exact PME treatment. We don't increase n_test_cases\n # purposely to keep track of which tests are added above.\n if (test_region.softcore_beta == 0.0 and annihilate_electrostatics and\n nonbonded_method in [openmm.NonbondedForce.PME, openmm.NonbondedForce.Ewald]):\n alchemical_system = exact_pme_factory.create_alchemical_system(test_system.system, test_region)\n test_case_name += ', exact PME'\n cls.test_cases[test_case_name] = (test_system, alchemical_system, test_region)\n\n # If the test system uses reaction field replace reaction field\n # of the reference system to allow comparisons.\n if nonbonded_method == openmm.NonbondedForce.CutoffPeriodic:\n forcefactories.replace_reaction_field(test_system.system, return_copy=False,\n switch_width=direct_space_factory.switch_width)\n\n def filter_cases(self, condition_func, max_number=None):\n \"\"\"Return the list of test cases that satisfy condition_func(test_case_name).\"\"\"\n if max_number is None:\n max_number = len(self.test_cases)\n\n test_cases = {}\n for test_name, test_case in self.test_cases.items():\n if condition_func(test_name):\n test_cases[test_name] = test_case\n if len(test_cases) >= max_number:\n break\n return test_cases\n\n def test_split_force_groups(self):\n \"\"\"Forces having different lambda variables should have a different force group.\"\"\"\n # Select 1 implicit, 1 explicit, and 1 exact PME explicit test case randomly.\n test_cases = self.filter_cases(lambda x: 'Implicit' in x, max_number=1)\n test_cases.update(self.filter_cases(lambda x: 'Explicit ' in x and 'exact PME' in x, max_number=1))\n test_cases.update(self.filter_cases(lambda x: 'Explicit ' in x and 'exact PME' not in x, max_number=1))\n for test_name, (test_system, alchemical_system, alchemical_region) in test_cases.items():\n f = partial(check_split_force_groups, alchemical_system)\n f.description = \"Testing force splitting among groups of {}\".format(test_name)\n yield f\n\n def test_fully_interacting_energy(self):\n \"\"\"Compare the energies of reference and fully interacting alchemical system.\"\"\"\n for test_name, (test_system, alchemical_system, alchemical_region) in self.test_cases.items():\n f = partial(compare_system_energies, test_system.system,\n alchemical_system, alchemical_region, test_system.positions)\n f.description = \"Testing fully interacting energy of {}\".format(test_name)\n yield f\n\n def test_noninteracting_energy_components(self):\n \"\"\"Check all forces annihilated/decoupled when their lambda variables are zero.\"\"\"\n for test_name, (test_system, alchemical_system, alchemical_region) in self.test_cases.items():\n f = partial(check_noninteracting_energy_components, test_system.system, alchemical_system,\n alchemical_region, test_system.positions)\n f.description = \"Testing non-interacting energy of {}\".format(test_name)\n yield f\n\n @attr('slow')\n def test_fully_interacting_energy_components(self):\n \"\"\"Test interacting state energy by force component.\"\"\"\n # This is a very expensive but very informative test. We can\n # run this locally when test_fully_interacting_energies() fails.\n test_cases = self.filter_cases(lambda x: 'Explicit' in x)\n for test_name, (test_system, alchemical_system, alchemical_region) in test_cases.items():\n f = partial(check_interacting_energy_components, test_system.system, alchemical_system,\n alchemical_region, test_system.positions)\n f.description = \"Testing energy components of %s...\" % test_name\n yield f\n\n @attr('slow')\n def test_platforms(self):\n \"\"\"Test interacting and noninteracting energies on all platforms.\"\"\"\n global GLOBAL_ALCHEMY_PLATFORM\n old_global_platform = GLOBAL_ALCHEMY_PLATFORM\n\n # Do not repeat tests on the platform already tested.\n if old_global_platform is None:\n default_platform_name = utils.get_fastest_platform().getName()\n else:\n default_platform_name = old_global_platform.getName()\n platforms = [platform for platform in utils.get_available_platforms()\n if platform.getName() != default_platform_name]\n\n # Test interacting and noninteracting energies on all platforms.\n for platform in platforms:\n GLOBAL_ALCHEMY_PLATFORM = platform\n for test_name, (test_system, alchemical_system, alchemical_region) in self.test_cases.items():\n f = partial(compare_system_energies, test_system.system, alchemical_system,\n alchemical_region, test_system.positions)\n f.description = \"Test fully interacting energy of {} on {}\".format(test_name, platform.getName())\n yield f\n f = partial(check_noninteracting_energy_components, test_system.system, alchemical_system,\n alchemical_region, test_system.positions)\n f.description = \"Test non-interacting energy of {} on {}\".format(test_name, platform.getName())\n yield f\n\n # Restore global platform\n GLOBAL_ALCHEMY_PLATFORM = old_global_platform\n\n @attr('slow')\n def test_overlap(self):\n \"\"\"Tests overlap between reference and alchemical systems.\"\"\"\n for test_name, (test_system, alchemical_system, alchemical_region) in self.test_cases.items():\n #cached_trajectory_filename = os.path.join(os.environ['HOME'], '.cache', 'alchemy', 'tests',\n # test_name + '.pickle')\n cached_trajectory_filename = None\n f = partial(overlap_check, test_system.system, alchemical_system, test_system.positions,\n cached_trajectory_filename=cached_trajectory_filename, name=test_name)\n f.description = \"Testing reference/alchemical overlap for {}\".format(test_name)\n yield f\n\nclass TestMultiRegionAbsoluteAlchemicalFactory(TestAbsoluteAlchemicalFactory):\n \"\"\"Test AbsoluteAlchemicalFactory class using multiple regions.\"\"\"\n\n @classmethod\n def define_systems(cls):\n \"\"\"Create shared test systems in cls.test_systems for the test suite.\"\"\"\n cls.test_systems = dict()\n\n # Basic test systems: Lennard-Jones and water particles only.\n # Test also dispersion correction and switch off (\"on\" values\n # for these options are tested in HostGuestExplicit system).\n cls.test_systems['LennardJonesCluster'] = testsystems.LennardJonesCluster()\n cls.test_systems['LennardJonesFluid with dispersion correction'] = \\\n testsystems.LennardJonesFluid(nparticles=100, dispersion_correction=True)\n cls.test_systems['TIP3P WaterBox with reaction field, no switch, no dispersion correction'] = \\\n testsystems.WaterBox(dispersion_correction=False, switch=False, nonbondedMethod=openmm.app.CutoffPeriodic)\n cls.test_systems['HostGuestExplicit with PME'] = \\\n testsystems.HostGuestExplicit(nonbondedMethod=openmm.app.PME)\n cls.test_systems['HostGuestExplicit with CutoffPeriodic'] = \\\n testsystems.HostGuestExplicit(nonbondedMethod=openmm.app.CutoffPeriodic)\n\n @classmethod\n def define_regions(cls):\n \"\"\"Create shared AlchemicalRegions for test systems in cls.test_regions.\"\"\"\n cls.test_region_zero = dict()\n cls.test_region_one = dict()\n cls.test_region_two = dict()\n\n cls.test_region_zero['LennardJonesCluster'] = AlchemicalRegion(alchemical_atoms=range(2), name='zero')\n cls.test_region_one['LennardJonesCluster'] = AlchemicalRegion(alchemical_atoms=range(2,4), name='one')\n cls.test_region_two['LennardJonesCluster'] = AlchemicalRegion(alchemical_atoms=range(4,6), name='two')\n cls.test_region_zero['LennardJonesFluid'] = AlchemicalRegion(alchemical_atoms=range(10), name='zero')\n cls.test_region_one['LennardJonesFluid'] = AlchemicalRegion(alchemical_atoms=range(10,20), name='one')\n cls.test_region_two['LennardJonesFluid'] = AlchemicalRegion(alchemical_atoms=range(20,30), name='two')\n cls.test_region_zero['TIP3P WaterBox'] = AlchemicalRegion(alchemical_atoms=range(3), name='zero')\n cls.test_region_one['TIP3P WaterBox'] = AlchemicalRegion(alchemical_atoms=range(3,6), name='one')\n cls.test_region_two['TIP3P WaterBox'] = AlchemicalRegion(alchemical_atoms=range(6,9), name='two')\n #Three regions push HostGuest system beyond 32 force groups\n cls.test_region_zero['HostGuestExplicit'] = AlchemicalRegion(alchemical_atoms=range(126, 156), name='zero')\n cls.test_region_one['HostGuestExplicit'] = AlchemicalRegion(alchemical_atoms=range(156,160), name='one')\n cls.test_region_two['HostGuestExplicit'] = None\n\n @classmethod\n def generate_cases(cls):\n \"\"\"Generate all test cases in cls.test_cases combinatorially.\"\"\"\n cls.test_cases = dict()\n direct_space_factory = AbsoluteAlchemicalFactory(alchemical_pme_treatment='direct-space',\n alchemical_rf_treatment='switched')\n exact_pme_factory = AbsoluteAlchemicalFactory(alchemical_pme_treatment='exact')\n\n # We generate all possible combinations of annihilate_sterics/electrostatics\n # for each test system. We also annihilate bonds, angles and torsions every\n # 3 test cases so that we test it at least one for each test system and for\n # each combination of annihilate_sterics/electrostatics.\n n_test_cases = 0\n for test_system_name, test_system in cls.test_systems.items():\n\n # Find standard alchemical region zero.\n for region_name_zero, region_zero in cls.test_region_zero.items():\n if region_name_zero in test_system_name:\n break\n assert region_name_zero in test_system_name, test_system_name\n\n # Find standard alchemical region one.\n for region_name_one, region_one in cls.test_region_one.items():\n if region_name_one in test_system_name:\n break\n assert region_name_one in test_system_name, test_system_name\n\n # Find standard alchemical region two.\n for region_name_two, region_two in cls.test_region_two.items():\n if region_name_two in test_system_name:\n break\n assert region_name_two in test_system_name, test_system_name\n\n assert region_name_zero == region_name_one and region_name_one == region_name_two\n #We only want two regions for HostGuest or we get too many force groups\n if 'HostGuestExplicit' in region_name_one:\n test_regions = [region_zero, region_one]\n else:\n test_regions = [region_zero, region_one, region_two]\n\n # Find nonbonded method.\n force_idx, nonbonded_force = forces.find_forces(test_system.system, openmm.NonbondedForce, only_one=True)\n nonbonded_method = nonbonded_force.getNonbondedMethod()\n\n # Create all combinations of annihilate_sterics/electrostatics.\n for annihilate_sterics, annihilate_electrostatics in itertools.product((True, False), repeat=2):\n # Create new region that we can modify.\n for i, test_region in enumerate(test_regions):\n test_regions[i] = test_region._replace(annihilate_sterics=annihilate_sterics,\n annihilate_electrostatics=annihilate_electrostatics)\n\n # Create test name.\n test_case_name = test_system_name[:]\n if annihilate_sterics:\n test_case_name += ', annihilated sterics'\n if annihilate_electrostatics:\n test_case_name += ', annihilated electrostatics'\n\n # Annihilate bonds and angles every three test_cases.\n if n_test_cases % 3 == 0:\n for i, test_region in enumerate(test_regions):\n test_regions[i] = test_region._replace(alchemical_bonds=True, alchemical_angles=True,\n alchemical_torsions=True)\n test_case_name += ', annihilated bonds, angles and torsions'\n\n # Add different softcore parameters every five test_cases.\n if n_test_cases % 5 == 0:\n for i, test_region in enumerate(test_regions):\n test_regions[i] = test_region._replace(softcore_alpha=1.0, softcore_beta=1.0, softcore_a=1.0, softcore_b=1.0,\n softcore_c=1.0, softcore_d=1.0, softcore_e=1.0, softcore_f=1.0)\n test_case_name += ', modified softcore parameters'\n\n #region_interactions = frozenset(itertools.combinations(range(len(test_regions)), 2))\n # Pre-generate alchemical system.\n alchemical_system = direct_space_factory.create_alchemical_system(test_system.system, alchemical_regions = test_regions)\n\n # Add test case.\n cls.test_cases[test_case_name] = (test_system, alchemical_system, test_regions)\n\n n_test_cases += 1\n\n # If we don't use softcore electrostatics and we annihilate charges\n # we can test also exact PME treatment. We don't increase n_test_cases\n # purposely to keep track of which tests are added above.\n if (test_regions[1].softcore_beta == 0.0 and annihilate_electrostatics and\n nonbonded_method in [openmm.NonbondedForce.PME, openmm.NonbondedForce.Ewald]):\n alchemical_system = exact_pme_factory.create_alchemical_system(test_system.system, alchemical_regions = test_regions)\n test_case_name += ', exact PME'\n cls.test_cases[test_case_name] = (test_system, alchemical_system, test_regions)\n\n # If the test system uses reaction field replace reaction field\n # of the reference system to allow comparisons.\n if nonbonded_method == openmm.NonbondedForce.CutoffPeriodic:\n forcefactories.replace_reaction_field(test_system.system, return_copy=False,\n switch_width=direct_space_factory.switch_width)\n\n def test_split_force_groups(self):\n \"\"\"Forces having different lambda variables should have a different force group.\"\"\"\n # Select 1 implicit, 1 explicit, and 1 exact PME explicit test case randomly.\n test_cases = self.filter_cases(lambda x: 'Implicit' in x, max_number=1)\n test_cases.update(self.filter_cases(lambda x: 'Explicit ' in x and 'exact PME' in x, max_number=1))\n test_cases.update(self.filter_cases(lambda x: 'Explicit ' in x and 'exact PME' not in x, max_number=1))\n for test_name, (test_system, alchemical_system, alchemical_region) in test_cases.items():\n region_names = []\n for region in alchemical_region:\n region_names.append(region.name)\n f = partial(check_split_force_groups, alchemical_system, region_names)\n f.description = \"Testing force splitting among groups of {}\".format(test_name)\n yield f\n\n def test_noninteracting_energy_components(self):\n \"\"\"Check all forces annihilated/decoupled when their lambda variables are zero.\"\"\"\n for test_name, (test_system, alchemical_system, alchemical_region) in self.test_cases.items():\n f = partial(check_multi_noninteracting_energy_components, test_system.system, alchemical_system,\n alchemical_region, test_system.positions)\n f.description = \"Testing non-interacting energy of {}\".format(test_name)\n yield f\n\n @attr('slow')\n def test_platforms(self):\n \"\"\"Test interacting and noninteracting energies on all platforms.\"\"\"\n global GLOBAL_ALCHEMY_PLATFORM\n old_global_platform = GLOBAL_ALCHEMY_PLATFORM\n\n # Do not repeat tests on the platform already tested.\n if old_global_platform is None:\n default_platform_name = utils.get_fastest_platform().getName()\n else:\n default_platform_name = old_global_platform.getName()\n platforms = [platform for platform in utils.get_available_platforms()\n if platform.getName() != default_platform_name]\n\n # Test interacting and noninteracting energies on all platforms.\n for platform in platforms:\n GLOBAL_ALCHEMY_PLATFORM = platform\n for test_name, (test_system, alchemical_system, alchemical_region) in self.test_cases.items():\n f = partial(compare_system_energies, test_system.system, alchemical_system,\n alchemical_region, test_system.positions)\n f.description = \"Test fully interacting energy of {} on {}\".format(test_name, platform.getName())\n yield f\n f = partial(check_multi_noninteracting_energy_components, test_system.system, alchemical_system,\n alchemical_region, test_system.positions)\n f.description = \"Test non-interacting energy of {} on {}\".format(test_name, platform.getName())\n yield f\n\n # Restore global platform\n GLOBAL_ALCHEMY_PLATFORM = old_global_platform\n\n @attr('slow')\n def test_fully_interacting_energy_components(self):\n \"\"\"Test interacting state energy by force component.\"\"\"\n # This is a very expensive but very informative test. We can\n # run this locally when test_fully_interacting_energies() fails.\n test_cases = self.filter_cases(lambda x: 'Explicit' in x)\n for test_name, (test_system, alchemical_system, alchemical_region) in test_cases.items():\n f = partial(check_multi_interacting_energy_components, test_system.system, alchemical_system,\n alchemical_region, test_system.positions)\n f.description = \"Testing energy components of %s...\" % test_name\n yield f\n\n\nclass TestDispersionlessAlchemicalFactory(object):\n \"\"\"\n Only test overlap for dispersionless alchemical factory, since energy agreement\n will be poor.\n \"\"\"\n @classmethod\n def setup_class(cls):\n \"\"\"Create test systems and shared objects.\"\"\"\n cls.define_systems()\n cls.define_regions()\n cls.generate_cases()\n\n @classmethod\n def define_systems(cls):\n \"\"\"Create test systems and shared objects.\"\"\"\n cls.test_systems = dict()\n cls.test_systems['LennardJonesFluid with dispersion correction'] = \\\n testsystems.LennardJonesFluid(nparticles=100, dispersion_correction=True)\n\n @classmethod\n def define_regions(cls):\n \"\"\"Create shared AlchemicalRegions for test systems in cls.test_regions.\"\"\"\n cls.test_regions = dict()\n cls.test_regions['LennardJonesFluid'] = AlchemicalRegion(alchemical_atoms=range(10))\n\n @classmethod\n def generate_cases(cls):\n \"\"\"Generate all test cases in cls.test_cases combinatorially.\"\"\"\n cls.test_cases = dict()\n factory = AbsoluteAlchemicalFactory(disable_alchemical_dispersion_correction=True)\n\n # We generate all possible combinations of annihilate_sterics/electrostatics\n # for each test system. We also annihilate bonds, angles and torsions every\n # 3 test cases so that we test it at least one for each test system and for\n # each combination of annihilate_sterics/electrostatics.\n n_test_cases = 0\n for test_system_name, test_system in cls.test_systems.items():\n\n # Find standard alchemical region.\n for region_name, region in cls.test_regions.items():\n if region_name in test_system_name:\n break\n assert region_name in test_system_name\n\n # Create all combinations of annihilate_sterics.\n for annihilate_sterics in itertools.product((True, False), repeat=1):\n region = region._replace(annihilate_sterics=annihilate_sterics,\n annihilate_electrostatics=True)\n\n # Create test name.\n test_case_name = test_system_name[:]\n if annihilate_sterics:\n test_case_name += ', annihilated sterics'\n\n # Pre-generate alchemical system\n alchemical_system = factory.create_alchemical_system(test_system.system, region)\n cls.test_cases[test_case_name] = (test_system, alchemical_system, region)\n\n n_test_cases += 1\n\n def test_overlap(self):\n \"\"\"Tests overlap between reference and alchemical systems.\"\"\"\n for test_name, (test_system, alchemical_system, alchemical_region) in self.test_cases.items():\n #cached_trajectory_filename = os.path.join(os.environ['HOME'], '.cache', 'alchemy', 'tests',\n # test_name + '.pickle')\n cached_trajectory_filename = None\n f = partial(overlap_check, test_system.system, alchemical_system, test_system.positions,\n cached_trajectory_filename=cached_trajectory_filename, name=test_name)\n f.description = \"Testing reference/alchemical overlap for no alchemical dispersion {}\".format(test_name)\n yield f\n\n\n@attr('slow')\nclass TestAbsoluteAlchemicalFactorySlow(TestAbsoluteAlchemicalFactory):\n \"\"\"Test AbsoluteAlchemicalFactory class with a more comprehensive set of systems.\"\"\"\n\n @classmethod\n def define_systems(cls):\n \"\"\"Create test systems and shared objects.\"\"\"\n cls.test_systems = dict()\n cls.test_systems['LennardJonesFluid without dispersion correction'] = \\\n testsystems.LennardJonesFluid(nparticles=100, dispersion_correction=False)\n cls.test_systems['DischargedWaterBox with reaction field, no switch, no dispersion correction'] = \\\n testsystems.DischargedWaterBox(dispersion_correction=False, switch=False,\n nonbondedMethod=openmm.app.CutoffPeriodic)\n cls.test_systems['WaterBox with reaction field, no switch, dispersion correction'] = \\\n testsystems.WaterBox(dispersion_correction=False, switch=True, nonbondedMethod=openmm.app.CutoffPeriodic)\n cls.test_systems['WaterBox with reaction field, switch, no dispersion correction'] = \\\n testsystems.WaterBox(dispersion_correction=False, switch=True, nonbondedMethod=openmm.app.CutoffPeriodic)\n cls.test_systems['WaterBox with PME, switch, dispersion correction'] = \\\n testsystems.WaterBox(dispersion_correction=True, switch=True, nonbondedMethod=openmm.app.PME)\n\n # Big systems.\n cls.test_systems['LysozymeImplicit'] = testsystems.LysozymeImplicit()\n cls.test_systems['DHFRExplicit with reaction field'] = \\\n testsystems.DHFRExplicit(nonbondedMethod=openmm.app.CutoffPeriodic)\n cls.test_systems['SrcExplicit with PME'] = \\\n testsystems.SrcExplicit(nonbondedMethod=openmm.app.PME)\n cls.test_systems['SrcExplicit with reaction field'] = \\\n testsystems.SrcExplicit(nonbondedMethod=openmm.app.CutoffPeriodic)\n cls.test_systems['SrcImplicit'] = testsystems.SrcImplicit()\n\n @classmethod\n def define_regions(cls):\n super(TestAbsoluteAlchemicalFactorySlow, cls).define_regions()\n cls.test_regions['WaterBox'] = AlchemicalRegion(alchemical_atoms=range(3))\n cls.test_regions['LysozymeImplicit'] = AlchemicalRegion(alchemical_atoms=range(2603, 2621))\n cls.test_regions['DHFRExplicit'] = AlchemicalRegion(alchemical_atoms=range(0, 2849))\n cls.test_regions['Src'] = AlchemicalRegion(alchemical_atoms=range(0, 21))\n\n\n# =============================================================================\n# TEST ALCHEMICAL STATE\n# =============================================================================\n\nclass TestAlchemicalState(object):\n \"\"\"Test AlchemicalState compatibility with CompoundThermodynamicState.\"\"\"\n\n @classmethod\n def setup_class(cls):\n \"\"\"Create test systems and shared objects.\"\"\"\n alanine_vacuum = testsystems.AlanineDipeptideVacuum()\n alanine_explicit = testsystems.AlanineDipeptideExplicit()\n factory = AbsoluteAlchemicalFactory()\n factory_exact_pme = AbsoluteAlchemicalFactory(alchemical_pme_treatment='exact')\n\n cls.alanine_alchemical_atoms = list(range(22))\n cls.alanine_test_system = alanine_explicit\n\n # System with only lambda_sterics and lambda_electrostatics.\n alchemical_region = AlchemicalRegion(alchemical_atoms=cls.alanine_alchemical_atoms)\n alchemical_alanine_system = factory.create_alchemical_system(alanine_vacuum.system, alchemical_region)\n cls.alanine_state = states.ThermodynamicState(alchemical_alanine_system,\n temperature=300*unit.kelvin)\n\n # System with lambda_sterics and lambda_electrostatics and exact PME treatment.\n alchemical_alanine_system_exact_pme = factory_exact_pme.create_alchemical_system(alanine_explicit.system,\n alchemical_region)\n cls.alanine_state_exact_pme = states.ThermodynamicState(alchemical_alanine_system_exact_pme,\n temperature=300*unit.kelvin,\n pressure=1.0*unit.atmosphere)\n\n # System with all lambdas.\n alchemical_region = AlchemicalRegion(alchemical_atoms=cls.alanine_alchemical_atoms,\n alchemical_torsions=True, alchemical_angles=True,\n alchemical_bonds=True)\n fully_alchemical_alanine_system = factory.create_alchemical_system(alanine_vacuum.system, alchemical_region)\n cls.full_alanine_state = states.ThermodynamicState(fully_alchemical_alanine_system,\n temperature=300*unit.kelvin)\n\n # Test case: (ThermodynamicState, defined_lambda_parameters)\n cls.test_cases = [\n (cls.alanine_state, {'lambda_sterics', 'lambda_electrostatics'}),\n (cls.alanine_state_exact_pme, {'lambda_sterics', 'lambda_electrostatics'}),\n (cls.full_alanine_state, {'lambda_sterics', 'lambda_electrostatics', 'lambda_bonds',\n 'lambda_angles', 'lambda_torsions'})\n ]\n\n @staticmethod\n def test_constructor():\n \"\"\"Test AlchemicalState constructor behave as expected.\"\"\"\n # Raise an exception if parameter is not recognized.\n with nose.tools.assert_raises(AlchemicalStateError):\n AlchemicalState(lambda_electro=1.0)\n\n # Properties are initialized correctly.\n test_cases = [{},\n {'lambda_sterics': 0.5, 'lambda_angles': 0.5},\n {'lambda_electrostatics': 1.0}]\n for test_kwargs in test_cases:\n alchemical_state = AlchemicalState(**test_kwargs)\n for parameter in AlchemicalState._get_controlled_parameters():\n if parameter in test_kwargs:\n assert getattr(alchemical_state, parameter) == test_kwargs[parameter]\n else:\n assert getattr(alchemical_state, parameter) is None\n\n def test_from_system_constructor(self):\n \"\"\"Test AlchemicalState.from_system constructor.\"\"\"\n # A non-alchemical system raises an error.\n with nose.tools.assert_raises(AlchemicalStateError):\n AlchemicalState.from_system(testsystems.AlanineDipeptideVacuum().system)\n\n # Valid parameters are 1.0 by default in AbsoluteAlchemicalFactory,\n # and all the others must be None.\n for state, defined_lambdas in self.test_cases:\n alchemical_state = AlchemicalState.from_system(state.system)\n for parameter in AlchemicalState._get_controlled_parameters():\n property_value = getattr(alchemical_state, parameter)\n if parameter in defined_lambdas:\n assert property_value == 1.0, '{}: {}'.format(parameter, property_value)\n else:\n assert property_value is None, '{}: {}'.format(parameter, property_value)\n\n @staticmethod\n def test_equality_operator():\n \"\"\"Test equality operator between AlchemicalStates.\"\"\"\n state1 = AlchemicalState(lambda_electrostatics=1.0)\n state2 = AlchemicalState(lambda_electrostatics=1.0)\n state3 = AlchemicalState(lambda_electrostatics=0.9)\n state4 = AlchemicalState(lambda_electrostatics=0.9, lambda_sterics=1.0)\n assert state1 == state2\n assert state2 != state3\n assert state3 != state4\n\n def test_apply_to_system(self):\n \"\"\"Test method AlchemicalState.apply_to_system().\"\"\"\n # Do not modify cached test cases.\n test_cases = copy.deepcopy(self.test_cases)\n\n # Test precondition: all parameters are 1.0.\n for state, defined_lambdas in test_cases:\n kwargs = dict.fromkeys(defined_lambdas, 1.0)\n alchemical_state = AlchemicalState(**kwargs)\n assert alchemical_state == AlchemicalState.from_system(state.system)\n\n # apply_to_system() modifies the state.\n for state, defined_lambdas in test_cases:\n kwargs = dict.fromkeys(defined_lambdas, 0.5)\n alchemical_state = AlchemicalState(**kwargs)\n system = state.system\n alchemical_state.apply_to_system(system)\n system_state = AlchemicalState.from_system(system)\n assert system_state == alchemical_state\n\n # Raise an error if an extra parameter is defined in the system.\n for state, defined_lambdas in test_cases:\n defined_lambdas = set(defined_lambdas) # Copy\n defined_lambdas.pop() # Remove one element.\n kwargs = dict.fromkeys(defined_lambdas, 1.0)\n alchemical_state = AlchemicalState(**kwargs)\n with nose.tools.assert_raises(AlchemicalStateError):\n alchemical_state.apply_to_system(state.system)\n\n # Raise an error if an extra parameter is defined in the state.\n for state, defined_lambdas in test_cases:\n if 'lambda_bonds' in defined_lambdas:\n continue\n defined_lambdas = set(defined_lambdas) # Copy\n defined_lambdas.add('lambda_bonds') # Add extra parameter.\n kwargs = dict.fromkeys(defined_lambdas, 1.0)\n alchemical_state = AlchemicalState(**kwargs)\n with nose.tools.assert_raises(AlchemicalStateError):\n alchemical_state.apply_to_system(state.system)\n\n def test_check_system_consistency(self):\n \"\"\"Test method AlchemicalState.check_system_consistency().\"\"\"\n # A system is consistent with itself.\n alchemical_state = AlchemicalState.from_system(self.alanine_state.system)\n alchemical_state.check_system_consistency(self.alanine_state.system)\n\n # Raise error if system has MORE lambda parameters.\n with nose.tools.assert_raises(AlchemicalStateError):\n alchemical_state.check_system_consistency(self.full_alanine_state.system)\n\n # Raise error if system has LESS lambda parameters.\n alchemical_state = AlchemicalState.from_system(self.full_alanine_state.system)\n with nose.tools.assert_raises(AlchemicalStateError):\n alchemical_state.check_system_consistency(self.alanine_state.system)\n\n # Raise error if system has different lambda values.\n alchemical_state.lambda_bonds = 0.5\n with nose.tools.assert_raises(AlchemicalStateError):\n alchemical_state.check_system_consistency(self.full_alanine_state.system)\n\n def test_apply_to_context(self):\n \"\"\"Test method AlchemicalState.apply_to_context.\"\"\"\n integrator = openmm.VerletIntegrator(1.0*unit.femtosecond)\n\n # Raise error if Context has more parameters than AlchemicalState.\n alchemical_state = AlchemicalState.from_system(self.alanine_state.system)\n context = self.full_alanine_state.create_context(copy.deepcopy(integrator))\n with nose.tools.assert_raises(AlchemicalStateError):\n alchemical_state.apply_to_context(context)\n del context\n\n # Raise error if AlchemicalState is applied to a Context with missing parameters.\n alchemical_state = AlchemicalState.from_system(self.full_alanine_state.system)\n context = self.alanine_state.create_context(copy.deepcopy(integrator))\n with nose.tools.assert_raises(AlchemicalStateError):\n alchemical_state.apply_to_context(context)\n del context\n\n # Correctly sets Context's parameters.\n for state in [self.full_alanine_state, self.alanine_state_exact_pme]:\n alchemical_state = AlchemicalState.from_system(state.system)\n context = state.create_context(copy.deepcopy(integrator))\n alchemical_state.set_alchemical_parameters(0.5)\n alchemical_state.apply_to_context(context)\n for parameter_name, parameter_value in context.getParameters().items():\n if parameter_name in alchemical_state._parameters:\n assert parameter_value == 0.5\n del context\n\n def test_standardize_system(self):\n \"\"\"Test method AlchemicalState.standardize_system.\"\"\"\n test_cases = [self.full_alanine_state, self.alanine_state_exact_pme]\n\n for state in test_cases:\n # First create a non-standard system.\n system = copy.deepcopy(state.system)\n alchemical_state = AlchemicalState.from_system(system)\n alchemical_state.set_alchemical_parameters(0.5)\n alchemical_state.apply_to_system(system)\n\n # Test pre-condition: The state of the System has been changed.\n assert AlchemicalState.from_system(system).lambda_electrostatics == 0.5\n\n # Check that _standardize_system() sets all parameters back to 1.0.\n alchemical_state._standardize_system(system)\n standard_alchemical_state = AlchemicalState.from_system(system)\n assert alchemical_state != standard_alchemical_state\n for parameter_name, value in alchemical_state._parameters.items():\n standard_value = getattr(standard_alchemical_state, parameter_name)\n assert (value is None and standard_value is None) or (standard_value == 1.0)\n\n def test_find_force_groups_to_update(self):\n \"\"\"Test method AlchemicalState._find_force_groups_to_update.\"\"\"\n test_cases = [self.full_alanine_state, self.alanine_state_exact_pme]\n\n for thermodynamic_state in test_cases:\n system = copy.deepcopy(thermodynamic_state.system)\n alchemical_state = AlchemicalState.from_system(system)\n alchemical_state2 = copy.deepcopy(alchemical_state)\n\n # Each lambda should be separated in its own force group.\n expected_force_groups = {}\n for force, lambda_name, _ in AlchemicalState._get_system_controlled_parameters(\n system, parameters_name_suffix=None):\n expected_force_groups[lambda_name] = force.getForceGroup()\n\n integrator = openmm.VerletIntegrator(2.0*unit.femtoseconds)\n context = create_context(system, integrator)\n\n # No force group should be updated if we don't move.\n assert alchemical_state._find_force_groups_to_update(context, alchemical_state2, memo={}) == set()\n\n # Change the lambdas one by one and check that the method\n # recognize that the force group energy must be updated.\n for lambda_name in AlchemicalState._get_controlled_parameters():\n # Check that the system defines the global variable.\n if getattr(alchemical_state, lambda_name) is None:\n continue\n\n # Change the current state.\n setattr(alchemical_state2, lambda_name, 0.0)\n force_group = expected_force_groups[lambda_name]\n assert alchemical_state._find_force_groups_to_update(context, alchemical_state2, memo={}) == {force_group}\n setattr(alchemical_state2, lambda_name, 1.0) # Reset current state.\n del context\n\n def test_alchemical_functions(self):\n \"\"\"Test alchemical variables and functions work correctly.\"\"\"\n system = copy.deepcopy(self.full_alanine_state.system)\n alchemical_state = AlchemicalState.from_system(system)\n\n # Add two alchemical variables to the state.\n alchemical_state.set_function_variable('lambda', 1.0)\n alchemical_state.set_function_variable('lambda2', 0.5)\n assert alchemical_state.get_function_variable('lambda') == 1.0\n assert alchemical_state.get_function_variable('lambda2') == 0.5\n\n # Cannot call an alchemical variable as a supported parameter.\n with nose.tools.assert_raises(AlchemicalStateError):\n alchemical_state.set_function_variable('lambda_sterics', 0.5)\n\n # Assign string alchemical functions to parameters.\n alchemical_state.lambda_sterics = AlchemicalFunction('lambda')\n alchemical_state.lambda_electrostatics = AlchemicalFunction('(lambda + lambda2) / 2.0')\n assert alchemical_state.lambda_sterics == 1.0\n assert alchemical_state.lambda_electrostatics == 0.75\n\n # Setting alchemical variables updates alchemical parameter as well.\n alchemical_state.set_function_variable('lambda2', 0)\n assert alchemical_state.lambda_electrostatics == 0.5\n\n # ---------------------------------------------------\n # Integration tests with CompoundThermodynamicStates\n # ---------------------------------------------------\n\n def test_constructor_compound_state(self):\n \"\"\"The AlchemicalState is set on construction of the CompoundState.\"\"\"\n test_cases = copy.deepcopy(self.test_cases)\n\n # Test precondition: the original systems are in fully interacting state.\n for state, defined_lambdas in test_cases:\n system_state = AlchemicalState.from_system(state.system)\n kwargs = dict.fromkeys(defined_lambdas, 1.0)\n assert system_state == AlchemicalState(**kwargs)\n\n # CompoundThermodynamicState set the system state in constructor.\n for state, defined_lambdas in test_cases:\n kwargs = dict.fromkeys(defined_lambdas, 0.5)\n alchemical_state = AlchemicalState(**kwargs)\n compound_state = states.CompoundThermodynamicState(state, [alchemical_state])\n system_state = AlchemicalState.from_system(compound_state.system)\n assert system_state == alchemical_state\n\n def test_lambda_properties_compound_state(self):\n \"\"\"Lambda properties setters/getters work in the CompoundState system.\"\"\"\n test_cases = copy.deepcopy(self.test_cases)\n\n for state, defined_lambdas in test_cases:\n alchemical_state = AlchemicalState.from_system(state.system)\n compound_state = states.CompoundThermodynamicState(state, [alchemical_state])\n\n # Defined properties can be assigned and read.\n for parameter_name in defined_lambdas:\n assert getattr(compound_state, parameter_name) == 1.0\n setattr(compound_state, parameter_name, 0.5)\n assert getattr(compound_state, parameter_name) == 0.5\n\n # System global variables are updated correctly\n system_alchemical_state = AlchemicalState.from_system(compound_state.system)\n for parameter_name in defined_lambdas:\n assert getattr(system_alchemical_state, parameter_name) == 0.5\n\n # Same for parameters setters.\n compound_state.set_alchemical_parameters(1.0)\n system_alchemical_state = AlchemicalState.from_system(compound_state.system)\n for parameter_name in defined_lambdas:\n assert getattr(compound_state, parameter_name) == 1.0\n assert getattr(system_alchemical_state, parameter_name) == 1.0\n\n # Same for alchemical variables setters.\n compound_state.set_function_variable('lambda', 0.25)\n for parameter_name in defined_lambdas:\n setattr(compound_state, parameter_name, AlchemicalFunction('lambda'))\n system_alchemical_state = AlchemicalState.from_system(compound_state.system)\n for parameter_name in defined_lambdas:\n assert getattr(compound_state, parameter_name) == 0.25\n assert getattr(system_alchemical_state, parameter_name) == 0.25\n\n def test_set_system_compound_state(self):\n \"\"\"Setting inconsistent system in compound state raise errors.\"\"\"\n alanine_state = copy.deepcopy(self.alanine_state)\n alchemical_state = AlchemicalState.from_system(alanine_state.system)\n compound_state = states.CompoundThermodynamicState(alanine_state, [alchemical_state])\n\n # We create an inconsistent state that has different parameters.\n incompatible_state = copy.deepcopy(alchemical_state)\n incompatible_state.lambda_electrostatics = 0.5\n\n # Setting an inconsistent alchemical system raise an error.\n system = compound_state.system\n incompatible_state.apply_to_system(system)\n with nose.tools.assert_raises(AlchemicalStateError):\n compound_state.system = system\n\n # Same for set_system when called with default arguments.\n with nose.tools.assert_raises(AlchemicalStateError):\n compound_state.set_system(system)\n\n # This doesn't happen if we fix the state.\n compound_state.set_system(system, fix_state=True)\n assert AlchemicalState.from_system(compound_state.system) != incompatible_state\n\n def test_method_compatibility_compound_state(self):\n \"\"\"Compatibility between states is handled correctly in compound state.\"\"\"\n test_cases = [self.alanine_state, self.alanine_state_exact_pme]\n\n # An incompatible state has a different set of defined lambdas.\n full_alanine_state = copy.deepcopy(self.full_alanine_state)\n alchemical_state_incompatible = AlchemicalState.from_system(full_alanine_state.system)\n compound_state_incompatible = states.CompoundThermodynamicState(full_alanine_state,\n [alchemical_state_incompatible])\n\n for state in test_cases:\n state = copy.deepcopy(state)\n alchemical_state = AlchemicalState.from_system(state.system)\n compound_state = states.CompoundThermodynamicState(state, [alchemical_state])\n\n # A compatible state has the same defined lambda parameters,\n # but their values can be different.\n alchemical_state_compatible = copy.deepcopy(alchemical_state)\n assert alchemical_state.lambda_electrostatics != 0.5 # Test pre-condition.\n alchemical_state_compatible.lambda_electrostatics = 0.5\n compound_state_compatible = states.CompoundThermodynamicState(copy.deepcopy(state),\n [alchemical_state_compatible])\n\n # Test states compatibility.\n assert compound_state.is_state_compatible(compound_state_compatible)\n assert not compound_state.is_state_compatible(compound_state_incompatible)\n\n # Test context compatibility.\n integrator = openmm.VerletIntegrator(1.0*unit.femtosecond)\n context = compound_state_compatible.create_context(copy.deepcopy(integrator))\n assert compound_state.is_context_compatible(context)\n\n context = compound_state_incompatible.create_context(copy.deepcopy(integrator))\n assert not compound_state.is_context_compatible(context)\n\n @staticmethod\n def _check_compatibility(state1, state2, context_state1, is_compatible):\n \"\"\"Check the compatibility of states and contexts between 2 states.\"\"\"\n # Compatibility should be commutative\n assert state1.is_state_compatible(state2) is is_compatible\n assert state2.is_state_compatible(state1) is is_compatible\n\n # Test context incompatibility is commutative.\n context_state2 = state2.create_context(openmm.VerletIntegrator(1.0*unit.femtosecond))\n assert state2.is_context_compatible(context_state1) is is_compatible\n assert state1.is_context_compatible(context_state2) is is_compatible\n del context_state2\n\n def test_method_reduced_potential_compound_state(self):\n \"\"\"Test CompoundThermodynamicState.reduced_potential_at_states() method.\n\n Computing the reduced potential singularly and with the class\n method should give the same result.\n \"\"\"\n # Build a mixed collection of compatible and incompatible thermodynamic states.\n thermodynamic_states = [\n copy.deepcopy(self.alanine_state),\n copy.deepcopy(self.alanine_state_exact_pme)\n ]\n\n alchemical_states = [\n AlchemicalState(lambda_electrostatics=1.0, lambda_sterics=1.0),\n AlchemicalState(lambda_electrostatics=0.5, lambda_sterics=1.0),\n AlchemicalState(lambda_electrostatics=0.5, lambda_sterics=0.0),\n AlchemicalState(lambda_electrostatics=1.0, lambda_sterics=1.0)\n ]\n\n compound_states = []\n for thermo_state in thermodynamic_states:\n for alchemical_state in alchemical_states:\n compound_states.append(states.CompoundThermodynamicState(\n copy.deepcopy(thermo_state), [copy.deepcopy(alchemical_state)]))\n\n # Group thermodynamic states by compatibility.\n compatible_groups, _ = states.group_by_compatibility(compound_states)\n assert len(compatible_groups) == 2\n\n # Compute the reduced potentials.\n expected_energies = []\n obtained_energies = []\n for compatible_group in compatible_groups:\n # Create context.\n integrator = openmm.VerletIntegrator(2.0*unit.femtoseconds)\n context = compatible_group[0].create_context(integrator)\n context.setPositions(self.alanine_test_system.positions[:compatible_group[0].n_particles])\n\n # Compute with single-state method.\n for state in compatible_group:\n state.apply_to_context(context)\n expected_energies.append(state.reduced_potential(context))\n\n # Compute with multi-state method.\n compatible_energies = states.ThermodynamicState.reduced_potential_at_states(context, compatible_group)\n\n # The first and the last state must be equal.\n assert np.isclose(compatible_energies[0], compatible_energies[-1])\n obtained_energies.extend(compatible_energies)\n\n assert np.allclose(np.array(expected_energies), np.array(obtained_energies))\n\n def test_serialization(self):\n \"\"\"Test AlchemicalState serialization alone and in a compound state.\"\"\"\n alchemical_state = AlchemicalState(lambda_electrostatics=0.5, lambda_angles=None)\n alchemical_state.set_function_variable('lambda', 0.0)\n alchemical_state.lambda_sterics = AlchemicalFunction('lambda')\n\n # Test serialization/deserialization of AlchemicalState.\n serialization = utils.serialize(alchemical_state)\n deserialized_state = utils.deserialize(serialization)\n original_pickle = pickle.dumps(alchemical_state)\n deserialized_pickle = pickle.dumps(deserialized_state)\n assert original_pickle == deserialized_pickle\n\n # Test serialization/deserialization of AlchemicalState in CompoundState.\n test_cases = [copy.deepcopy(self.alanine_state), copy.deepcopy(self.alanine_state_exact_pme)]\n for thermodynamic_state in test_cases:\n compound_state = states.CompoundThermodynamicState(thermodynamic_state, [alchemical_state])\n\n # The serialized system is standard.\n serialization = utils.serialize(compound_state)\n serialized_standard_system = serialization['thermodynamic_state']['standard_system']\n # Decompress the serialized_system\n serialized_standard_system = zlib.decompress(serialized_standard_system).decode(\n states.ThermodynamicState._ENCODING)\n assert serialized_standard_system.__hash__() == compound_state._standard_system_hash\n\n # The object is deserialized correctly.\n deserialized_state = utils.deserialize(serialization)\n assert pickle.dumps(compound_state) == pickle.dumps(deserialized_state)\n\n\n# =============================================================================\n# MAIN FOR MANUAL DEBUGGING\n# =============================================================================\n\nif __name__ == \"__main__\":\n logging.basicConfig(level=logging.INFO)\n" ]
[ [ "matplotlib.pyplot.xlabel", "matplotlib.backends.backend_pdf.PdfPages", "matplotlib.pyplot.plot", "matplotlib.pyplot.title", "matplotlib.pyplot.close", "matplotlib.pyplot.yticks", "matplotlib.pyplot.figure", "matplotlib.pyplot.ylabel", "scipy.special.erf", "matplotlib.pyplot.xticks" ] ]
DanielKyr/DeepPurpose
[ "b6379c9946c9b88de344ba7276175271b0e70b42" ]
[ "DeepPurpose/PPI.py" ]
[ "import torch\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\nfrom torch.utils import data\nfrom torch.utils.data import SequentialSampler\nfrom torch import nn \n\nfrom tqdm import tqdm\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom time import time\nfrom sklearn.metrics import mean_squared_error, roc_auc_score, average_precision_score, f1_score, log_loss\nfrom lifelines.utils import concordance_index\nfrom scipy.stats import pearsonr\nimport pickle \ntorch.manual_seed(2)\nnp.random.seed(3)\nimport copy\nfrom prettytable import PrettyTable\n\nimport os\n\nfrom DeepPurpose.utils import *\nfrom DeepPurpose.model_helper import Encoder_MultipleLayers, Embeddings \nfrom DeepPurpose.encoders import *\n\nclass Classifier(nn.Sequential):\n\tdef __init__(self, model_protein, **config):\n\t\tsuper(Classifier, self).__init__()\n\t\tself.input_dim_protein = config['hidden_dim_protein']\n\t\tself.model_protein = model_protein\n\t\tself.dropout = nn.Dropout(0.1)\n\n\t\tself.hidden_dims = config['cls_hidden_dims']\n\t\tlayer_size = len(self.hidden_dims) + 1\n\t\tdims = [2*self.input_dim_protein] + self.hidden_dims + [1]\n\t\t\n\t\tself.predictor = nn.ModuleList([nn.Linear(dims[i], dims[i+1]) for i in range(layer_size)])\n\n\tdef forward(self, v_D, v_P):\n\t\t# each encoding\n\t\tv_D = self.model_protein(v_D)\n\t\tv_P = self.model_protein(v_P)\n\t\t# concatenate and classify\n\t\tv_f = torch.cat((v_D, v_P), 1)\n\t\tfor i, l in enumerate(self.predictor):\n\t\t\tif i==(len(self.predictor)-1):\n\t\t\t\tv_f = l(v_f)\n\t\t\telse:\n\t\t\t\tv_f = F.relu(self.dropout(l(v_f)))\n\t\treturn v_f\n\ndef model_initialize(**config):\n\tmodel = PPI_Model(**config)\n\treturn model\n\ndef model_pretrained(path_dir = None, model = None):\n\tif model is not None:\n\t\tpath_dir = download_pretrained_model(model)\n\tconfig = load_dict(path_dir)\n\tmodel = PPI_Model(**config)\n\tmodel.load_pretrained(path_dir + '/model.pt') \n\treturn model\n\n# def mpnn_feature_collate_func(x): \n# \treturn [torch.cat([x[j][i] for j in range(len(x))], 0) for i in range(len(x[0]))]\n\n# def mpnn_feature_collate_func(x):\n# \tassert len(x[0]) == 5\n# \tN_atoms_N_bonds = [i[-1] for i in x]\n# \tN_atoms_scope = []\n# \tf_a = torch.cat([x[j][0] for j in range(len(x))], 0)\n# \tf_b = torch.cat([x[j][1] for j in range(len(x))], 0)\n# \tagraph_lst, bgraph_lst = [], []\n# \tNa, Nb = 0, 0\n# \tfor j in range(len(x)):\n# \t\tagraph_lst.append(x[j][2] + Na)\n# \t\tbgraph_lst.append(x[j][3] + Nb)\n# \t\tN_atoms_scope.append([Na, x[j][2].shape[0]])\n# \t\tNa += x[j][2].shape[0]\n# \t\tNb += x[j][3].shape[0]\n# \tagraph = torch.cat(agraph_lst, 0)\n# \tbgraph = torch.cat(bgraph_lst, 0)\n# \treturn [f_a, f_b, agraph, bgraph, N_atoms_scope]\n\n# def mpnn_collate_func(x):\n# \tmpnn_feature = [i[0] for i in x]\n# \tmpnn_feature = mpnn_feature_collate_func(mpnn_feature)\n# \tfrom torch.utils.data.dataloader import default_collate\n# \tx_remain = [[i[1], i[2]] for i in x]\n# \tx_remain_collated = default_collate(x_remain)\n# \treturn [mpnn_feature] + x_remain_collated\n\nclass PPI_Model:\n\n\tdef __init__(self, **config):\n\t\ttarget_encoding = config['target_encoding']\n\n\t\tif target_encoding == 'AAC' or target_encoding == 'PseudoAAC' or target_encoding == 'Conjoint_triad' or target_encoding == 'Quasi-seq' or target_encoding == 'ESPF':\n\t\t\tself.model_protein = MLP(config['input_dim_protein'], config['hidden_dim_protein'], config['mlp_hidden_dims_target'])\n\t\telif target_encoding == 'CNN':\n\t\t\tself.model_protein = CNN('protein', **config)\n\t\telif target_encoding == 'CNN_RNN':\n\t\t\tself.model_protein = CNN_RNN('protein', **config)\n\t\telif target_encoding == 'Transformer':\n\t\t\tself.model_protein = transformer('protein', **config)\n\t\telse:\n\t\t\traise AttributeError('Please use one of the available encoding method.')\n\n\t\tself.model = Classifier(self.model_protein, **config)\n\t\tself.config = config\n\t\tself.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\t\t\n\t\tself.target_encoding = target_encoding\n\t\tself.result_folder = config['result_folder']\n\t\tif not os.path.exists(self.result_folder):\n\t\t\tos.mkdir(self.result_folder) \n\t\tself.binary = False\n\t\tif 'num_workers' not in self.config.keys():\n\t\t\tself.config['num_workers'] = 0\n\t\tif 'decay' not in self.config.keys():\n\t\t\tself.config['decay'] = 0\n\n\tdef test_(self, data_generator, model, repurposing_mode = False, test = False):\n\t\ty_pred = []\n\t\ty_label = []\n\t\tmodel.eval()\n\t\tfor i, (v_d, v_p, label) in enumerate(data_generator):\n\t\t\tif self.target_encoding == 'Transformer':\n\t\t\t\tv_d = v_d\n\t\t\t\tv_p = v_p\n\t\t\telse:\n\t\t\t\tv_d = v_d.float().to(self.device) \n\t\t\t\tv_p = v_p.float().to(self.device) \n\t\t\tscore = self.model(v_d, v_p)\n\t\t\tif self.binary:\n\t\t\t\tm = torch.nn.Sigmoid()\n\t\t\t\tlogits = torch.squeeze(m(score)).detach().cpu().numpy()\n\t\t\telse:\n\t\t\t\tlogits = torch.squeeze(score).detach().cpu().numpy()\n\t\t\tlabel_ids = label.to('cpu').numpy()\n\t\t\ty_label = y_label + label_ids.flatten().tolist()\n\t\t\ty_pred = y_pred + logits.flatten().tolist()\n\t\t\toutputs = np.asarray([1 if i else 0 for i in (np.asarray(y_pred) >= 0.5)])\n\t\tmodel.train()\n\t\tif self.binary:\n\t\t\tif repurposing_mode:\n\t\t\t\treturn y_pred\n\t\t\t## ROC-AUC curve\n\t\t\tif test:\n\t\t\t\troc_auc_file = os.path.join(self.result_folder, \"roc-auc.jpg\")\n\t\t\t\tplt.figure(0)\n\t\t\t\troc_curve(y_pred, y_label, roc_auc_file, self.target_encoding)\n\t\t\t\tplt.figure(1)\n\t\t\t\tpr_auc_file = os.path.join(self.result_folder, \"pr-auc.jpg\")\n\t\t\t\tprauc_curve(y_pred, y_label, pr_auc_file, self.target_encoding)\n\n\t\t\treturn roc_auc_score(y_label, y_pred), average_precision_score(y_label, y_pred), f1_score(y_label, outputs), log_loss(y_label, outputs), y_pred\n\t\telse:\n\t\t\tif repurposing_mode:\n\t\t\t\treturn y_pred\n\t\t\treturn mean_squared_error(y_label, y_pred), pearsonr(y_label, y_pred)[0], pearsonr(y_label, y_pred)[1], concordance_index(y_label, y_pred), y_pred\n\n\tdef train(self, train, val, test = None, verbose = True):\n\t\tif len(train.Label.unique()) == 2:\n\t\t\tself.binary = True\n\t\t\tself.config['binary'] = True\n\n\t\tlr = self.config['LR']\n\t\tdecay = self.config['decay']\n\t\tBATCH_SIZE = self.config['batch_size']\n\t\ttrain_epoch = self.config['train_epoch']\n\t\tif 'test_every_X_epoch' in self.config.keys():\n\t\t\ttest_every_X_epoch = self.config['test_every_X_epoch']\n\t\telse: \n\t\t\ttest_every_X_epoch = 40\n\t\tloss_history = []\n\n\t\tself.model = self.model.to(self.device)\n\n\t\t# support multiple GPUs\n\t\tif torch.cuda.device_count() > 1:\n\t\t\tif verbose:\n\t\t\t\tprint(\"Let's use \" + str(torch.cuda.device_count()) + \" GPUs!\")\n\t\t\tself.model = nn.DataParallel(self.model, dim = 0)\n\t\telif torch.cuda.device_count() == 1:\n\t\t\tif verbose:\n\t\t\t\tprint(\"Let's use \" + str(torch.cuda.device_count()) + \" GPU!\")\n\t\telse:\n\t\t\tif verbose:\n\t\t\t\tprint(\"Let's use CPU/s!\")\n\t\t# Future TODO: support multiple optimizers with parameters\n\t\topt = torch.optim.Adam(self.model.parameters(), lr = lr, weight_decay = decay)\n\t\tif verbose:\n\t\t\tprint('--- Data Preparation ---')\n\n\t\tparams = {'batch_size': BATCH_SIZE,\n\t \t\t'shuffle': True,\n\t \t\t'num_workers': self.config['num_workers'],\n\t \t\t'drop_last': False}\n\n\t\ttraining_generator = data.DataLoader(data_process_PPI_loader(train.index.values, train.Label.values, train, **self.config), **params)\n\t\tvalidation_generator = data.DataLoader(data_process_PPI_loader(val.index.values, val.Label.values, val, **self.config), **params)\n\t\t\n\t\tif test is not None:\n\t\t\tinfo = data_process_loader(test.index.values, test.Label.values, test, **self.config)\n\t\t\tparams_test = {'batch_size': BATCH_SIZE,\n\t\t\t\t\t'shuffle': False,\n\t\t\t\t\t'num_workers': self.config['num_workers'],\n\t\t\t\t\t'drop_last': False,\n\t\t\t\t\t'sampler':SequentialSampler(info)}\n \n\t\t\ttesting_generator = data.DataLoader(data_process_PPI_loader(test.index.values, test.Label.values, test, **self.config), **params_test)\n\n\t\t# early stopping\n\t\tif self.binary:\n\t\t\tmax_auc = 0\n\t\telse:\n\t\t\tmax_MSE = 10000\n\t\tmodel_max = copy.deepcopy(self.model)\n\n\t\tvalid_metric_record = []\n\t\tvalid_metric_header = [\"# epoch\"] \n\t\tif self.binary:\n\t\t\tvalid_metric_header.extend([\"AUROC\", \"AUPRC\", \"F1\"])\n\t\telse:\n\t\t\tvalid_metric_header.extend([\"MSE\", \"Pearson Correlation\", \"with p-value\", \"Concordance Index\"])\n\t\ttable = PrettyTable(valid_metric_header)\n\t\tfloat2str = lambda x:'%0.4f'%x\n\t\tif verbose:\n\t\t\tprint('--- Go for Training ---')\n\t\tt_start = time() \n\t\tfor epo in range(train_epoch):\n\t\t\tfor i, (v_d, v_p, label) in enumerate(training_generator):\n\t\t\t\tif self.target_encoding == 'Transformer':\n\t\t\t\t\tv_d = v_d\n\t\t\t\t\tv_p = v_p\n\t\t\t\telse:\n\t\t\t\t\tv_d = v_d.float().to(self.device)\n\t\t\t\t\tv_p = v_p.float().to(self.device) \n \n\t\t\t\tscore = self.model(v_d, v_p)\n\t\t\t\tlabel = Variable(torch.from_numpy(np.array(label)).float()).to(self.device)\n\n\t\t\t\tif self.binary:\n\t\t\t\t\tloss_fct = torch.nn.BCELoss()\n\t\t\t\t\tm = torch.nn.Sigmoid()\n\t\t\t\t\tn = torch.squeeze(m(score), 1)\n\t\t\t\t\tloss = loss_fct(n, label)\n\t\t\t\telse:\n\t\t\t\t\tloss_fct = torch.nn.MSELoss()\n\t\t\t\t\tn = torch.squeeze(score, 1)\n\t\t\t\t\tloss = loss_fct(n, label)\n\t\t\t\tloss_history.append(loss.item())\n\n\t\t\t\topt.zero_grad()\n\t\t\t\tloss.backward()\n\t\t\t\topt.step()\n\n\t\t\t\tif verbose:\n\t\t\t\t\tif (i % 100 == 0):\n\t\t\t\t\t\tt_now = time()\n\t\t\t\t\t\tprint('Training at Epoch ' + str(epo + 1) + ' iteration ' + str(i) + \\\n\t\t\t\t\t\t\t' with loss ' + str(loss.cpu().detach().numpy())[:7] +\\\n\t\t\t\t\t\t\t\". Total time \" + str(int(t_now - t_start)/3600)[:7] + \" hours\") \n\t\t\t\t\t\t### record total run time\n\n\t\t\t##### validate, select the best model up to now \n\t\t\twith torch.set_grad_enabled(False):\n\t\t\t\tif self.binary: \n\t\t\t\t\t## binary: ROC-AUC, PR-AUC, F1, cross-entropy loss\n\t\t\t\t\tauc, auprc, f1, loss, logits = self.test_(validation_generator, self.model)\n\t\t\t\t\tlst = [\"epoch \" + str(epo)] + list(map(float2str,[auc, auprc, f1]))\n\t\t\t\t\tvalid_metric_record.append(lst)\n\t\t\t\t\tif auc > max_auc:\n\t\t\t\t\t\tmodel_max = copy.deepcopy(self.model)\n\t\t\t\t\t\tmax_auc = auc \n\t\t\t\t\tif verbose:\n\t\t\t\t\t\tprint('Validation at Epoch '+ str(epo + 1) + ' , AUROC: ' + str(auc)[:7] + \\\n\t\t\t\t\t\t ' , AUPRC: ' + str(auprc)[:7] + ' , F1: '+str(f1)[:7] + ' , Cross-entropy Loss: ' + \\\n\t\t\t\t\t\t str(loss)[:7])\n\t\t\t\telse: \n\t\t\t\t\t### regression: MSE, Pearson Correlation, with p-value, Concordance Index \n\t\t\t\t\tmse, r2, p_val, CI, logits = self.test_(validation_generator, self.model)\n\t\t\t\t\tlst = [\"epoch \" + str(epo)] + list(map(float2str,[mse, r2, p_val, CI]))\n\t\t\t\t\tvalid_metric_record.append(lst)\n\t\t\t\t\tif mse < max_MSE:\n\t\t\t\t\t\tmodel_max = copy.deepcopy(self.model)\n\t\t\t\t\t\tmax_MSE = mse\n\t\t\t\t\tif verbose:\n\t\t\t\t\t\tprint('Validation at Epoch '+ str(epo + 1) + ' , MSE: ' + str(mse)[:7] + ' , Pearson Correlation: '\\\n\t\t\t\t\t\t + str(r2)[:7] + ' with p-value: ' + str(p_val)[:7] +' , Concordance Index: '+str(CI)[:7])\n\t\t\ttable.add_row(lst)\n\n\n\t\t# load early stopped model\n\t\tself.model = model_max\n\n\t\t#### after training \n\t\tprettytable_file = os.path.join(self.result_folder, \"valid_markdowntable.txt\")\n\t\twith open(prettytable_file, 'w') as fp:\n\t\t\tfp.write(table.get_string())\n\n\t\tif test is not None:\n\t\t\tif verbose:\n\t\t\t\tprint('--- Go for Testing ---')\n\t\t\tif self.binary:\n\t\t\t\tauc, auprc, f1, loss, logits = self.test_(testing_generator, model_max, test = True)\n\t\t\t\ttest_table = PrettyTable([\"AUROC\", \"AUPRC\", \"F1\"])\n\t\t\t\ttest_table.add_row(list(map(float2str, [auc, auprc, f1])))\n\t\t\t\tif verbose:\n\t\t\t\t\tprint('Validation at Epoch '+ str(epo + 1) + ' , AUROC: ' + str(auc)[:7] + \\\n\t\t\t\t\t ' , AUPRC: ' + str(auprc)[:7] + ' , F1: '+str(f1)[:7] + ' , Cross-entropy Loss: ' + \\\n\t\t\t\t\t str(loss)[:7])\t\t\t\t\n\t\t\telse:\n\t\t\t\tmse, r2, p_val, CI, logits = self.test_(testing_generator, model_max)\n\t\t\t\ttest_table = PrettyTable([\"MSE\", \"Pearson Correlation\", \"with p-value\", \"Concordance Index\"])\n\t\t\t\ttest_table.add_row(list(map(float2str, [mse, r2, p_val, CI])))\n\t\t\t\tif verbose:\n\t\t\t\t\tprint('Testing MSE: ' + str(mse) + ' , Pearson Correlation: ' + str(r2) \n\t\t\t\t\t + ' with p-value: ' + str(p_val) +' , Concordance Index: '+str(CI))\n\t\t\tnp.save(os.path.join(self.result_folder, str(self.target_encoding)\n\t\t\t\t + '_logits.npy'), np.array(logits)) \n\t\n\t\t\t######### learning record ###########\n\n\t\t\t### 1. test results\n\t\t\tprettytable_file = os.path.join(self.result_folder, \"test_markdowntable.txt\")\n\t\t\twith open(prettytable_file, 'w') as fp:\n\t\t\t\tfp.write(test_table.get_string())\n\n\t\t### 2. learning curve \n\t\tfontsize = 16\n\t\titer_num = list(range(1,len(loss_history)+1))\n\t\tplt.figure(3)\n\t\tplt.plot(iter_num, loss_history, \"bo-\")\n\t\tplt.xlabel(\"iteration\", fontsize = fontsize)\n\t\tplt.ylabel(\"loss value\", fontsize = fontsize)\n\t\tpkl_file = os.path.join(self.result_folder, \"loss_curve_iter.pkl\")\n\t\twith open(pkl_file, 'wb') as pck:\n\t\t\tpickle.dump(loss_history, pck)\n\n\t\tfig_file = os.path.join(self.result_folder, \"loss_curve.png\")\n\t\tplt.savefig(fig_file)\n\t\tif verbose:\n\t\t\tprint('--- Training Finished ---')\n \n\n\tdef predict(self, df_data):\n\t\t'''\n\t\t\tutils.data_process_repurpose_virtual_screening \n\t\t\tpd.DataFrame\n\t\t'''\n\t\tprint('predicting...')\n\t\tinfo = data_process_loader(df_data.index.values, df_data.Label.values, df_data, **self.config)\n\t\tself.model.to(device)\n\t\tparams = {'batch_size': self.config['batch_size'],\n\t\t\t\t'shuffle': False,\n\t\t\t\t'num_workers': self.config['num_workers'],\n\t\t\t\t'drop_last': False,\n\t\t\t\t'sampler':SequentialSampler(info)}\n\n\t\tgenerator = data.DataLoader(info, **params)\n\n\t\tscore = self.test_(generator, self.model, repurposing_mode = True)\n\t\treturn score\n\n\tdef save_model(self, path_dir):\n\t\tif not os.path.exists(path_dir):\n\t\t\tos.makedirs(path_dir)\n\t\ttorch.save(self.model.state_dict(), path_dir + '/model.pt')\n\t\tsave_dict(path_dir, self.config)\n\n\tdef load_pretrained(self, path):\n\t\tif not os.path.exists(path):\n\t\t\tos.makedirs(path)\n\n\t\tif self.device == 'cuda':\n\t\t\tstate_dict = torch.load(path)\n\t\telse:\n\t\t\tstate_dict = torch.load(path, map_location = torch.device('cpu'))\n\t\t# to support training from multi-gpus data-parallel:\n \n\t\tif next(iter(state_dict))[:7] == 'module.':\n\t\t\t# the pretrained model is from data-parallel module\n\t\t\tfrom collections import OrderedDict\n\t\t\tnew_state_dict = OrderedDict()\n\t\t\tfor k, v in state_dict.items():\n\t\t\t\tname = k[7:] # remove `module.`\n\t\t\t\tnew_state_dict[name] = v\n\t\t\tstate_dict = new_state_dict\n\n\t\tself.model.load_state_dict(state_dict)\n\n\t\tself.binary = self.config['binary']\n\n\n" ]
[ [ "torch.nn.Linear", "torch.cat", "scipy.stats.pearsonr", "torch.squeeze", "torch.cuda.is_available", "sklearn.metrics.average_precision_score", "torch.load", "sklearn.metrics.f1_score", "torch.nn.DataParallel", "matplotlib.pyplot.savefig", "torch.manual_seed", "torch.utils.data.DataLoader", "torch.nn.BCELoss", "torch.device", "numpy.array", "torch.utils.data.SequentialSampler", "matplotlib.pyplot.figure", "torch.cuda.device_count", "sklearn.metrics.roc_auc_score", "torch.nn.Dropout", "sklearn.metrics.mean_squared_error", "torch.nn.MSELoss", "numpy.asarray", "torch.nn.Sigmoid", "numpy.random.seed", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.plot", "sklearn.metrics.log_loss", "matplotlib.pyplot.ylabel", "torch.set_grad_enabled" ] ]