repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list | possible_versions
list |
---|---|---|---|---|---|
shiomasa1218/zi2zi
|
[
"546025f59690b7c7c85de0146aba6f24f26f6b9d"
] |
[
"export.py"
] |
[
"# -*- coding: utf-8 -*-\nfrom __future__ import print_function\nfrom __future__ import absolute_import\n\nimport tensorflow as tf\nimport argparse\nfrom model.unet import UNet\n\nparser = argparse.ArgumentParser(description='Export generator weights from the checkpoint file')\nparser.add_argument('--model_dir', dest='model_dir', required=True,\n help='directory that saves the model checkpoints')\nparser.add_argument('--batch_size', dest='batch_size', type=int, default=16, help='number of examples in batch')\nparser.add_argument('--inst_norm', dest='inst_norm', type=bool, default=False,\n help='use conditional instance normalization in your model')\nparser.add_argument('--save_dir', default='save_dir', type=str, help='path to save inferred images')\nargs = parser.parse_args()\n\n\ndef main(_):\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n\n with tf.Session(config=config) as sess:\n model = UNet(batch_size=args.batch_size)\n model.register_session(sess)\n model.build_model(is_training=False, inst_norm=args.inst_norm)\n model.export_generator(save_dir=args.save_dir, model_dir=args.model_dir)\n\n\nif __name__ == '__main__':\n tf.app.run()\n"
] |
[
[
"tensorflow.ConfigProto",
"tensorflow.Session",
"tensorflow.app.run"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
BingLiHanShuang/AutoGluon_IntegrateSimpleFeedforward
|
[
"12089bfeb66a77b207547d13bd5a5b4f584f01cc"
] |
[
"autogluon/utils/tabular/ml/utils.py"
] |
[
"import logging\nimport multiprocessing\nimport os\nfrom collections import defaultdict\nfrom datetime import datetime\n\nimport numpy as np\nimport pandas as pd\nfrom pandas import DataFrame, Series\nfrom sklearn.model_selection import KFold, StratifiedKFold, RepeatedKFold, RepeatedStratifiedKFold, train_test_split\n\nfrom .constants import BINARY, REGRESSION, MULTICLASS, SOFTCLASS\nfrom ..metrics import accuracy, root_mean_squared_error, Scorer\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_pred_from_proba(y_pred_proba, problem_type=BINARY):\n if problem_type == BINARY:\n y_pred = [1 if pred >= 0.5 else 0 for pred in y_pred_proba]\n elif problem_type == REGRESSION:\n y_pred = y_pred_proba\n else:\n y_pred = np.argmax(y_pred_proba, axis=1)\n return y_pred\n\n\ndef generate_kfold(X, y=None, n_splits=5, random_state=0, stratified=False, n_repeats=1):\n if stratified and (y is not None):\n if n_repeats > 1:\n kf = RepeatedStratifiedKFold(n_splits=n_splits, n_repeats=n_repeats, random_state=random_state)\n else:\n kf = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=random_state)\n\n kf.get_n_splits(X, y)\n return [[train_index, test_index] for train_index, test_index in kf.split(X, y)]\n else:\n if n_repeats > 1:\n kf = RepeatedKFold(n_splits=n_splits, n_repeats=n_repeats, random_state=random_state)\n else:\n kf = KFold(n_splits=n_splits, shuffle=True, random_state=random_state)\n\n kf.get_n_splits(X)\n return [[train_index, test_index] for train_index, test_index in kf.split(X)]\n\n\ndef generate_train_test_split(X: DataFrame, y: Series, problem_type: str, test_size: float = 0.1, random_state=0) -> (DataFrame, DataFrame, Series, Series):\n if (test_size <= 0.0) or (test_size >= 1.0):\n raise ValueError(\"fraction of data to hold-out must be specified between 0 and 1\")\n\n if problem_type in [REGRESSION, SOFTCLASS]:\n stratify = None\n else:\n stratify = y\n\n # TODO: Enable stratified split when y class would result in 0 samples in test.\n # One approach: extract low frequency classes from X/y, add back (1-test_size)% to X_train, y_train, rest to X_test\n # Essentially stratify the high frequency classes, random the low frequency (While ensuring at least 1 example stays for each low frequency in train!)\n # Alternatively, don't test low frequency at all, trust it to work in train set. Risky, but highest quality for predictions.\n X_train, X_test, y_train, y_test = train_test_split(X, y.values, test_size=test_size, shuffle=True, random_state=random_state, stratify=stratify)\n if problem_type != SOFTCLASS:\n y_train = pd.Series(y_train, index=X_train.index)\n y_test = pd.Series(y_test, index=X_test.index)\n else:\n y_train = pd.DataFrame(y_train, index=X_train.index)\n y_test = pd.DataFrame(y_test, index=X_test.index)\n return X_train, X_test, y_train, y_test\n\n\ndef convert_categorical_to_int(X):\n X = X.copy()\n cat_columns = X.select_dtypes(include=['category']).columns\n X[cat_columns] = X[cat_columns].apply(lambda x: x.cat.codes)\n return X\n\n\ndef setup_outputdir(output_directory):\n if output_directory is None:\n utcnow = datetime.utcnow()\n timestamp = utcnow.strftime(\"%Y%m%d_%H%M%S\")\n output_directory = f\"AutogluonModels/ag-{timestamp}{os.path.sep}\"\n os.makedirs(output_directory)\n logger.log(25, f\"No output_directory specified. Models will be saved in: {output_directory}\")\n output_directory = os.path.expanduser(output_directory) # replace ~ with absolute path if it exists\n if output_directory[-1] != os.path.sep:\n output_directory = output_directory + os.path.sep\n return output_directory\n\n\ndef setup_compute(nthreads_per_trial, ngpus_per_trial):\n if nthreads_per_trial is None:\n nthreads_per_trial = multiprocessing.cpu_count() # Use all of processing power / trial by default. To use just half: # int(np.floor(multiprocessing.cpu_count()/2))\n\n if ngpus_per_trial is None:\n ngpus_per_trial = 0 # do not use GPU by default\n elif ngpus_per_trial > 1:\n ngpus_per_trial = 1\n logger.debug(\"tabular_prediction currently doesn't use >1 GPU per training run. ngpus_per_trial set = 1\")\n return nthreads_per_trial, ngpus_per_trial\n\n\ndef setup_trial_limits(time_limits, num_trials, hyperparameters={'NN': None}):\n \"\"\" Adjust default time limits / num_trials \"\"\"\n if num_trials is None:\n if time_limits is None:\n time_limits = 10 * 60 # run for 10min by default\n time_limits /= float(len(hyperparameters)) # each model type gets half the available time\n num_trials = 1000 # run up to 1000 trials (or as you can within the given time_limits)\n elif time_limits is None:\n time_limits = int(1e6) # user only specified num_trials, so run all of them regardless of time-limits\n else:\n time_limits /= float(len(hyperparameters)) # each model type gets half the available time\n\n if time_limits <= 10: # threshold = 10sec, ie. too little time to run >1 trial.\n num_trials = 1\n time_limits *= 0.9 # reduce slightly to account for extra time overhead\n return time_limits, num_trials\n\n\ndef dd_list():\n return defaultdict(list)\n\n\ndef get_leaderboard_pareto_frontier(leaderboard: DataFrame, score_col='score_val', inference_time_col='pred_time_val_full') -> DataFrame:\n \"\"\"\n Given a set of models, returns in ranked order from best score to worst score models which satisfy the criteria:\n 1. No other model in the set has both a lower inference time and a better or equal score.\n\n :param leaderboard: Leaderboard DataFrame of model info containing score_col and inference_time_col\n :param score_col: Column name in leaderboard of model score values\n :param inference_time_col: Column name in leaderboard of model inference times\n :return: Subset of the original leaderboard DataFrame containing only models that are a valid optimal choice at different valuations of score and inference time.\n \"\"\"\n leaderboard = leaderboard.sort_values(by=[score_col, inference_time_col], ascending=[False, True]).reset_index(drop=True)\n leaderboard_unique = leaderboard.drop_duplicates(subset=[score_col])\n\n pareto_frontier = []\n inference_time_min = None\n for index, row in leaderboard_unique.iterrows():\n if row[inference_time_col] is None or row[score_col] is None:\n pass\n elif (inference_time_min is None) or (row[inference_time_col] < inference_time_min):\n inference_time_min = row[inference_time_col]\n pareto_frontier.append(index)\n leaderboard_pareto_frontier = leaderboard_unique.loc[pareto_frontier].reset_index(drop=True)\n return leaderboard_pareto_frontier\n\n\ndef shuffle_df_rows(X: DataFrame, seed=0, reset_index=True):\n \"\"\"Returns DataFrame with rows shuffled based on seed value.\"\"\"\n row_count = X.shape[0]\n np.random.seed(seed)\n rand_shuffle = np.random.randint(0, row_count, size=row_count)\n X_shuffled = X.iloc[rand_shuffle]\n if reset_index:\n X_shuffled.reset_index(inplace=True, drop=True)\n return X_shuffled\n\n\ndef normalize_pred_probas(y_predprob, problem_type, eps=1e-7):\n \"\"\" Remaps the predicted probabilities to ensure there are no zeros (needed for certain metrics like log-loss)\n and that no predicted probability exceeds [0,1] (eg. in distillation when classification is treated as regression).\n Args:\n y_predprob: 1D (for binary classification) or 2D (for multiclass) numpy array of predicted probabilities\n problem_type: We only consider normalization if the problem_type is one of: [BINARY, MULTICLASS, SOFTCLASS]\n eps: controls around how far from 0 remapped predicted probabilities should be (larger `eps` means predicted probabilities will lie further from 0).\n \"\"\"\n if (problem_type == REGRESSION) and (len(y_predprob.shape) > 1) and (y_predprob.shape[1] > 1):\n problem_type = SOFTCLASS # this was MULTICLASS problem converted to REGRESSION (as done in distillation)\n\n if problem_type in [BINARY, REGRESSION]:\n if len(y_predprob.shape) > 1 and min(y_predprob.shape) > 1:\n raise ValueError(f\"cannot call normalize_pred_probas with problem_type={problem_type} and y_predprob.shape=={y_predprob.shape}\")\n return normalize_binary_probas(y_predprob, eps)\n elif problem_type in [MULTICLASS, SOFTCLASS]: # clip all probs below at eps and then renormalize\n if len(y_predprob.shape) == 1:\n return normalize_binary_probas(y_predprob, eps)\n else:\n return normalize_multi_probas(y_predprob, eps)\n else:\n raise ValueError(f\"Invalid problem_type\")\n\n\ndef normalize_binary_probas(y_predprob, eps):\n \"\"\" Remaps the predicted probabilities to open interval (0,1) while maintaining rank order \"\"\"\n (pmin,pmax) = (eps, 1-eps) # predicted probs outside this range will be remapped into (0,1)\n which_toobig = y_predprob > pmax\n if np.sum(which_toobig) > 0: # remap overly large probs\n y_predprob = np.logical_not(which_toobig)*y_predprob + which_toobig*(1-(eps*np.exp(-(y_predprob-pmax))))\n which_toosmall = y_predprob < pmin\n if np.sum(which_toosmall) > 0: # remap overly small probs\n y_predprob = np.logical_not(which_toosmall)*y_predprob + which_toosmall*eps*np.exp(-(pmin-y_predprob))\n return y_predprob\n\n\ndef normalize_multi_probas(y_predprob, eps):\n \"\"\" Remaps the predicted probabilities to lie in (0,1) where eps controls how far from 0 smallest class-probability lies \"\"\"\n min_predprob = np.min(y_predprob)\n if min_predprob < 0: # ensure nonnegative rows\n most_negative_rowvals = np.clip(np.min(y_predprob, axis=1), a_min=None, a_max=0)\n y_predprob = y_predprob - most_negative_rowvals[:,None]\n if min_predprob < eps:\n y_predprob = np.clip(y_predprob, a_min=eps, a_max=None) # ensure no entries < eps\n y_predprob = y_predprob / y_predprob.sum(axis=1, keepdims=1) # renormalize\n return y_predprob\n\n\ndef infer_problem_type(y: Series):\n \"\"\" Identifies which type of prediction problem we are interested in (if user has not specified).\n Ie. binary classification, multi-class classification, or regression.\n \"\"\"\n if len(y) == 0:\n raise ValueError(\"provided labels cannot have length = 0\")\n y = y.dropna() # Remove missing values from y (there should not be any though as they were removed in Learner.general_data_processing())\n num_rows = len(y)\n\n unique_values = y.unique()\n unique_count = len(unique_values)\n if unique_count > 10:\n logger.log(20, f'Here are the first 10 unique label values in your data: {list(unique_values[:10])}')\n else:\n logger.log(20, f'Here are the {unique_count} unique label values in your data: {list(unique_values)}')\n\n MULTICLASS_LIMIT = 1000 # if numeric and class count would be above this amount, assume it is regression\n if num_rows > 1000:\n REGRESS_THRESHOLD = 0.05 # if the unique-ratio is less than this, we assume multiclass classification, even when labels are integers\n else:\n REGRESS_THRESHOLD = 0.1\n\n if unique_count == 2:\n problem_type = BINARY\n reason = \"only two unique label-values observed\"\n elif unique_values.dtype == 'object':\n problem_type = MULTICLASS\n reason = \"dtype of label-column == object\"\n elif np.issubdtype(unique_values.dtype, np.floating):\n unique_ratio = unique_count / float(num_rows)\n if (unique_ratio <= REGRESS_THRESHOLD) and (unique_count <= MULTICLASS_LIMIT):\n try:\n can_convert_to_int = np.array_equal(y, y.astype(int))\n if can_convert_to_int:\n problem_type = MULTICLASS\n reason = \"dtype of label-column == float, but few unique label-values observed and label-values can be converted to int\"\n else:\n problem_type = REGRESSION\n reason = \"dtype of label-column == float and label-values can't be converted to int\"\n except:\n problem_type = REGRESSION\n reason = \"dtype of label-column == float and label-values can't be converted to int\"\n else:\n problem_type = REGRESSION\n reason = \"dtype of label-column == float and many unique label-values observed\"\n elif np.issubdtype(unique_values.dtype, np.integer):\n unique_ratio = unique_count / float(num_rows)\n if (unique_ratio <= REGRESS_THRESHOLD) and (unique_count <= MULTICLASS_LIMIT):\n problem_type = MULTICLASS # TODO: Check if integers are from 0 to n-1 for n unique values, if they have a wide spread, it could still be regression\n reason = \"dtype of label-column == int, but few unique label-values observed\"\n else:\n problem_type = REGRESSION\n reason = \"dtype of label-column == int and many unique label-values observed\"\n else:\n raise NotImplementedError('label dtype', unique_values.dtype, 'not supported!')\n logger.log(25, f\"AutoGluon infers your prediction problem is: {problem_type} (because {reason}).\")\n logger.log(25, f\"If this is wrong, please specify `problem_type` argument in fit() instead \"\n f\"(You may specify problem_type as one of: {[BINARY, MULTICLASS, REGRESSION]})\\n\")\n return problem_type\n\n\ndef infer_eval_metric(problem_type: str) -> Scorer:\n \"\"\"Infers appropriate default eval metric based on problem_type. Useful when no eval_metric was provided.\"\"\"\n if problem_type == BINARY:\n return accuracy\n elif problem_type == MULTICLASS:\n return accuracy\n else:\n return root_mean_squared_error\n\n\ndef default_holdout_frac(num_train_rows, hyperparameter_tune=False):\n \"\"\" Returns default holdout_frac used in fit().\n Between row count 5,000 and 25,000 keep 0.1 holdout_frac, as we want to grow validation set to a stable 2500 examples.\n \"\"\"\n if num_train_rows < 5000:\n holdout_frac = max(0.1, min(0.2, 500.0 / num_train_rows))\n else:\n holdout_frac = max(0.01, min(0.1, 2500.0 / num_train_rows))\n\n if hyperparameter_tune:\n holdout_frac = min(0.2, holdout_frac * 2) # We want to allocate more validation data for HPO to avoid overfitting\n\n return holdout_frac\n\n\ndef augment_rare_classes(X, label, threshold):\n \"\"\" Use this method when using certain eval_metrics like log_loss, for which no classes may be filtered out.\n This method will augment dataset with additional examples of rare classes.\n \"\"\"\n class_counts = X[label].value_counts()\n class_counts_invalid = class_counts[class_counts < threshold]\n if len(class_counts_invalid) == 0:\n logger.debug(\"augment_rare_classes did not need to duplicate any data from rare classes\")\n return X\n\n aug_df = None\n for clss, n_clss in class_counts_invalid.iteritems():\n n_toadd = threshold - n_clss\n clss_df = X.loc[X[label] == clss]\n if aug_df is None:\n aug_df = clss_df[:0].copy()\n duplicate_times = int(np.floor(n_toadd / n_clss))\n remainder = n_toadd % n_clss\n new_df = clss_df.copy()\n new_df = new_df[:remainder]\n while duplicate_times > 0:\n logger.debug(f\"Duplicating data from rare class: {clss}\")\n duplicate_times -= 1\n new_df = new_df.append(clss_df.copy())\n aug_df = aug_df.append(new_df.copy())\n\n X = X.append(aug_df)\n class_counts = X[label].value_counts()\n class_counts_invalid = class_counts[class_counts < threshold]\n if len(class_counts_invalid) > 0:\n raise RuntimeError(\"augment_rare_classes failed to produce enough data from rare classes\")\n logger.log(15, \"Replicated some data from rare classes in training set because eval_metric requires all classes\")\n return X\n"
] |
[
[
"numpy.logical_not",
"sklearn.model_selection.RepeatedKFold",
"pandas.Series",
"numpy.random.seed",
"numpy.min",
"numpy.clip",
"numpy.issubdtype",
"sklearn.model_selection.train_test_split",
"pandas.DataFrame",
"sklearn.model_selection.StratifiedKFold",
"sklearn.model_selection.KFold",
"numpy.argmax",
"sklearn.model_selection.RepeatedStratifiedKFold",
"numpy.floor",
"numpy.exp",
"numpy.sum",
"numpy.random.randint"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
vik748/contrast_manipulation_toolbox
|
[
"ccf04cdf323e0cf16475a8873090fa87b5fdf6f3"
] |
[
"analyse_image_contrast.py"
] |
[
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu May 21 23:10:35 2020\n\n@author: vik748\n\"\"\"\nfrom cmtpy.histogram_warping_ace import HistogramWarpingACE\nfrom cmtpy import contrast_measurement as cm\nimport cv2\nimport numpy as np\nimport sys, os\nfrom matplotlib import pyplot as plt\n\ndef calculate_best_screen_packing(N, img_resolution = (800,600), screen_resolution = (1920, 1080)):\n screen_x = screen_resolution[0]\n screen_y = screen_resolution[1]\n screen_area = screen_x * screen_y\n\n img_x = img_resolution[0]\n img_y = img_resolution[1]\n img_aspect = img_x / img_y\n\n best_dims = (None,None)\n best_eff = 0.0\n\n for n_rows in range(1,N//2 +1):\n #print(i)\n n_cols = N // n_rows\n if N % n_rows != 0: n_cols = n_cols+1\n\n #print(n_rows, n_cols)\n\n # Test by maximising image height\n img_y_scaled = screen_y / n_rows\n img_x_scaled = img_y_scaled * img_aspect\n img_area_scaled = img_x_scaled * img_y_scaled\n eff = img_area_scaled * N / screen_area\n #print(img_x_scaled, img_y_scaled, eff)\n\n if eff <= 1.0 and eff > best_eff:\n best_eff = eff\n best_dims = (n_rows, n_cols)\n\n # Test by maximising image width\n img_x_scaled = screen_x / n_cols\n img_y_scaled = img_x_scaled / img_aspect\n img_area_scaled = img_x_scaled * img_y_scaled\n eff = img_area_scaled * N / screen_area\n #print(img_x_scaled, img_y_scaled, eff)\n if eff <= 1.0 and eff > best_eff:\n best_eff = eff\n best_dims = (n_rows, n_cols)\n\n #print(\"Best dims:\",best_dims,best_eff)\n return best_dims\n\ndef read_grimage(img_name, resize_scale = None, normalize=False, image_depth=8):\n '''\n Read image from file, convert to grayscale and resize if required\n Parameters\n ----------\n img_name : String\n Filename\n resize_scale : float, optional\n float scale factor for image < 1 downsamples and > 1 upsamples. The default is None.\n normalize : bool, optional\n Return normalized float image. The default is False.\n image_depth : int, optional\n Bit depth of image being read. The default is 8.\n\n Raises\n ------\n FileNotFoundError\n Raisees FileNotFound Error if unable to read image\n\n Returns\n -------\n gr : MxN uint8 or flat32 numpy array\n grayscale image\n '''\n img = cv2.imread(img_name, cv2.IMREAD_COLOR)\n if img is None:\n raise FileNotFoundError (\"Could not read image from: {}\".format(img_name))\n gr_full = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n\n if not resize_scale is None:\n gr = cv2.resize(gr_full, (0,0), fx=resize_scale, fy=resize_scale, interpolation=cv2.INTER_AREA)\n else:\n gr = gr_full\n\n if normalize:\n levels = 2 ** image_depth - 1\n gr = np.divide(gr, levels, dtype=np.float32)\n\n return gr\n\n\ndef show_plot(fig=None):\n if fig is None:\n fig = plt.gcf()\n\n #plt.show()\n plt.pause(1e-3)\n fig.canvas.manager.window.activateWindow()\n fig.canvas.manager.window.raise_()\n\ndef analyze_contrast(gr_name, graph_axes, iceberg_slice=np.s_[:,:], set_name=None):\n gr_full = cv2.imread(gr_name, cv2.IMREAD_GRAYSCALE)\n gr = cv2.resize(gr_full, (0,0), fx=1/5, fy=1/5, interpolation=cv2.INTER_AREA)\n\n adjs = np.arange(0,-1.05,-.2)\n sfs = np.arange(-1.0,0.2,.2)\n sfs_g, adj_g = np.meshgrid(sfs, adjs)\n\n ace_obj = HistogramWarpingACE(no_bits=8, tau=0.01, lam=5, adjustment_factor=-1.0, stretch_factor=-1.0,\n min_stretch_bits=4, downsample_for_kde=True,debug=False, plot_histograms=False)\n v_k, a_k = ace_obj.compute_vk_and_ak(gr)\n\n warped_images = np.empty(adj_g.shape,dtype=object)\n\n fig,axes = plt.subplots(*adj_g.shape, sharex=True, sharey=True)\n for axi in axes.ravel():\n axi.get_xaxis().set_ticks ([])\n axi.get_yaxis().set_ticks ([])\n axi.spines['left'].set_visible(False)\n axi.spines['right'].set_visible(False)\n axi.spines['bottom'].set_visible(False)\n axi.spines['top'].set_visible(False)\n\n for (i,j),adj in np.ndenumerate(adj_g):\n print(i,adj)\n outputs = ace_obj.compute_bk_and_dk(v_k, a_k, adjustment_factor=adj, stretch_factor=sfs_g[i,j])\n warped_images[i,j], Tx = ace_obj.transform_image(*outputs, gr)\n\n axes[i,j].imshow(warped_images[i,j], cmap='gray', vmin=0, vmax=255)\n #ax.set_title(\"Adj factor = {:.2f}\".format(adj))\n\n for i, sf in enumerate(sfs):\n axes[-1,i].set_xlabel('Stretch: {:.2f}'.format(sf))\n\n\n for j, adj in enumerate(adjs):\n axes[j,0].set_ylabel('Adj: {:.2f}'.format(adj))\n\n\n fig.subplots_adjust(left=0.025, bottom=0.025, right=0.99, top=.9, wspace=0.00, hspace=0.00)\n fig.suptitle(set_name)\n show_plot()\n\n\n\nif sys.platform == 'darwin':\n data_fold=os.path.expanduser('~/Google Drive/data')\nelse:\n data_fold=os.path.expanduser('~/data')\n\ndata_fold=os.path.expanduser('~/data')\n\ngr1_name = os.path.join(data_fold,'Lars1_080818','G0287250.JPG')\ngr2_name = os.path.join(data_fold,'Lars2_081018','G0029490.JPG')\ngr3_name = os.path.join(data_fold, 'chess_board','GOPR1488.JPG')\n\ngr1_iceberg_slice = np.s_[205:310,:]\ngr2_iceberg_slice = np.s_[130:280,:]\n\n\ngr_name = gr1_name\ngr_slice = gr1_iceberg_slice\ngr = read_grimage(gr_name, resize_scale = 1/5)\nset_name = 'Lars1 '\n\n\ncontrast_estimators = {'Global Contrast Factor': lambda gr: cm.compute_global_contrast_factor(gr),\n 'RMS Contrast': lambda gr: cm.compute_rms_contrast(gr,debug=False),\n 'Local box filt': lambda gr: cm.compute_box_filt_contrast(gr, kernel_size=17, debug=False),\n 'Local gaussian filt': lambda gr: cm.compute_gaussian_filt_contrast(gr, sigma=5.0, debug=False),\n 'Local bilateral filt': lambda gr: cm.compute_bilateral_filt_contrast(gr, sigmaSpace=5.0, sigmaColor=0.05, debug=False)}\n\n\nace_obj = HistogramWarpingACE(no_bits=8, tau=0.01, lam=5, adjustment_factor=-1.0, stretch_factor=-1.0,\n min_stretch_bits=4, downsample_for_kde=True,debug=False, plot_histograms=False)\nv_k, a_k = ace_obj.compute_vk_and_ak(gr)\n\nadjs = np.arange(0,-1.05,-.05)\ncontrast_estimates=np.zeros((len(adjs),len(contrast_estimators)))\ncontrast_estimates_slice=np.zeros((len(adjs),len(contrast_estimators)))\n\nwarped_images = np.empty(adjs.shape,dtype=object)\n\nfor i,adj in enumerate(adjs):\n print(i,adj)\n outputs = ace_obj.compute_bk_and_dk(v_k, a_k, adjustment_factor=adj, stretch_factor=adj )\n warped_images[i], Tx = ace_obj.transform_image(*outputs, gr)\n contrast_estimates[i,:] =np.array([ce(warped_images[i]) for nm, ce in contrast_estimators.items()])\n contrast_estimates_slice[i,:] =np.array([ce(warped_images[i][gr_slice]) for nm, ce in contrast_estimators.items()])\n\nfig_imgs,axes_imgs = plt.subplots(*calculate_best_screen_packing(len(adjs),img_resolution=gr.shape), sharex=True, sharey=True)\nfig_imgs.suptitle(gr_name)\n[ax.set_axis_off() for ax in axes_imgs.ravel()]\nfig_imgs.subplots_adjust(left=0.01, bottom=0.1, right=0.99, top=.9, wspace=0.01, hspace=0.01)\n\n\nfor wimg, ax, adj in zip(warped_images, axes_imgs.ravel(), adjs):\n ax.imshow(wimg, cmap='gray', vmin=0, vmax=255)\n ax.set_title('Adj fact: {:.2f}'.format(adj))\n\nfig_plot,axes_plot = plt.subplots(2,3, sharex=True)\nfig_plot.suptitle(gr_name)\n\n\nfor col, col_slice, ax, est in zip(contrast_estimates.T, contrast_estimates_slice.T, axes_plot.ravel(), contrast_estimators.keys()):\n ax.plot(adjs, col, '.', label=set_name+'Full')\n ax.plot(adjs, col_slice, '.', label=set_name+'Iceberg only')\n ax.set_title(est)\n ax.set_xlabel(\"Adjustment/stretch factor\")\n ax.set_ylabel(\"Calculated contrast\")\n ax.legend()\n\n\n #axes[i,j].imshow(warped_images[i,j], cmap='gray', vmin=0, vmax=255)\n #ax.set_title(\"Adj factor = {:.2f}\".format(adj))\n"
] |
[
[
"numpy.arange",
"numpy.divide",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.gcf",
"numpy.ndenumerate",
"numpy.meshgrid",
"matplotlib.pyplot.pause",
"numpy.empty"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
deeptavker/pysph
|
[
"6a9014fe225879b6f81f514f4db1270ad669c1ce"
] |
[
"pysph/tools/tests/test_geometry_stl.py"
] |
[
"import numpy as np\nimport unittest\nimport pytest\nimport tempfile\n\npytest.importorskip(\"stl\")\n\nimport pysph.tools.geometry_stl as G\nfrom pysph.base.utils import get_particle_array\n\ncube_stl = \"\"\"solid cube\n facet normal 0 0 0\n outer loop\n vertex 0 0 0\n vertex 0 1 0\n vertex 1 1 0\n endloop\n endfacet\n facet normal 0 0 0\n outer loop\n vertex 0 0 0\n vertex 1 1 0\n vertex 1 0 0\n endloop\n endfacet\n facet normal 0 0 0\n outer loop\n vertex 0 0 0\n vertex 0 0 1\n vertex 0 1 1\n endloop\n endfacet\n facet normal 0 0 0\n outer loop\n vertex 0 0 0\n vertex 0 1 1\n vertex 0 1 0\n endloop\n endfacet\n facet normal 0 0 0\n outer loop\n vertex 0 0 0\n vertex 1 0 0\n vertex 1 0 1\n endloop\n endfacet\n facet normal 0 0 0\n outer loop\n vertex 0 0 0\n vertex 1 0 1\n vertex 0 0 1\n endloop\n endfacet\n facet normal 0 0 0\n outer loop\n vertex 0 0 1\n vertex 1 0 1\n vertex 1 1 1\n endloop\n endfacet\n facet normal 0 0 0\n outer loop\n vertex 0 0 1\n vertex 1 1 1\n vertex 0 1 1\n endloop\n endfacet\n facet normal 0 0 0\n outer loop\n vertex 1 0 0\n vertex 1 1 0\n vertex 1 1 1\n endloop\n endfacet\n facet normal 0 0 0\n outer loop\n vertex 1 0 0\n vertex 1 1 1\n vertex 1 0 1\n endloop\n endfacet\n facet normal 0 0 0\n outer loop\n vertex 0 1 0\n vertex 0 1 1\n vertex 1 1 1\n endloop\n endfacet\n facet normal 0 0 0\n outer loop\n vertex 0 1 0\n vertex 1 1 1\n vertex 1 1 0\n endloop\n endfacet\nendsolid cube\"\"\"\n\n\nclass TestGeometry(unittest.TestCase):\n def test_in_triangle(self):\n assert(G._in_triangle(0.5, 0.5, 0.0, 0.0, 1.5, 0.0, 0.0, 1.5) is True)\n assert(G._in_triangle(1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0) is False)\n\n def test_interp_2d(self):\n # Check interpolation between two points on line y=x\n dx = 0.1\n r = G._interp_2d(np.array([0., 0.]), np.array([1., 1.]), dx)\n # Check if all points satisfy y=x\n np.testing.assert_array_almost_equal(\n r[:, 0] - r[:, 1], np.zeros(r.shape[0]))\n # Check if distance between consecutive points is lesser than dx\n np.testing.assert_array_less(np.linalg.norm(r[1:] - r[0:-1], axis=1),\n np.ones(r.shape[0] - 1) * dx)\n\n def test_fill_triangle(self):\n triangle = np.array([[0., 0., 0.],\n [1., 0., 0.],\n [0., 1., 0.]])\n dx_triangle = 0.1\n x, y, z = G._fill_triangle(triangle, dx_triangle)\n EPS = np.finfo(float).eps\n np.testing.assert_array_less(-x, np.zeros(x.shape[0]) + EPS)\n np.testing.assert_array_less(-y, np.zeros(x.shape[0]) + EPS)\n np.testing.assert_array_less(-(x + y), np.ones(x.shape[0]) + EPS)\n np.testing.assert_almost_equal(z, np.zeros(x.shape[0]))\n\n def test_fill_triangle_throws_zero_area_triangle_exception(self):\n self.assertRaises(G.ZeroAreaTriangleException, G._fill_triangle,\n np.zeros((3, 3)), 0.5)\n\n def test_fill_triangle_throws_polygon_mesh_error(self):\n self.assertRaises(G.PolygonMeshError, G._fill_triangle,\n np.zeros((4, 3)), 0.5)\n\n def test_get_neighbouring_particles(self):\n \"\"\"Find neighbouring particles around a unit sphere\"\"\"\n h = 0.1\n x1, y1, z1 = np.mgrid[-1.1:1.1:0.05, -1.1:1.1:0.05, -1.1:1.1:0.05]\n r2 = (x1 ** 2 + y1 ** 2 + z1 ** 2)\n mask = (r2 < (1. + h) ** 2) & (r2 > (1. - h) ** 2)\n x2, y2, z2 = x1[mask], y1[mask], z1[mask]\n p1 = get_particle_array(x=x1, y=y1, z=z1, h=h)\n p2 = get_particle_array(x=x2, y=y2, z=z2, h=h)\n x, y, z = G._get_neighbouring_particles(p2, p1, h)\n\n for i in range(x.shape[0]):\n assert((1. - 2 * h) ** 2 < (x[i] ** 2 + y[i] ** 2 + z[i] ** 2) <\n (1. + 2 * h) ** 2)\n\n def _generate_cube_stl(self):\n f = tempfile.NamedTemporaryFile(mode='w', delete=False)\n f.write(cube_stl)\n f.close()\n return f.name\n\n def _cube_assert(self, x, y, z, h):\n \"\"\"Check if x,y,z lie within surface of thickness `h` of a unit cube\"\"\"\n def surface1(x, y, z): return min(abs(x), abs(1 - x)) < h and \\\n y > -h and y < 1 + h and z > -h and z < 1 + h\n\n def on_surface(x, y, z): return surface1(x, y, z) or \\\n surface1(y, x, z) or surface1(z, x, y)\n\n for i in range(x.shape[0]):\n assert on_surface(x[i], y[i], z[i])\n\n def test_get_stl_mesh(self):\n \"\"\"Check if mesh is generated correctly for unit cube\"\"\"\n cube_fname = self._generate_cube_stl()\n x, y, z = G._get_stl_mesh(cube_fname, 0.1)\n h = np.finfo(float).eps\n self._cube_assert(x, y, z, h)\n\n def test_get_stl_surface(self):\n \"\"\"Check if stl surface is generated correctly for unit cube\"\"\"\n cube_fname = self._generate_cube_stl()\n h = 0.1\n x, y, z = G.get_stl_surface(cube_fname, h, h, 1)\n self._cube_assert(x, y, z, h)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] |
[
[
"numpy.linalg.norm",
"numpy.ones",
"numpy.finfo",
"numpy.array",
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
RDxR10/Linear_Feedback_Shift_Register
|
[
"9ab797b91f191e49d9a9fc3f77ecad339b3e7ae9"
] |
[
"pylfsr/pylfsr.py"
] |
[
"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\n'''\nAuthor @ Nikesh Bajaj\nfirst created : Date: 22 Oct 2017\nUpdated on : 29 Apr 2021\n : fixed bugs (1) not counting first outbit correctly (2) Exception in info method\n\t\t : added test properties (1) Balance (2) Runlength (3) Autocorrelation\n : improved functionalities\n : added Viz function\n : added A5/1 and Geffe Generator\nVersion : 1.0.6\nContact: [email protected]\n : http://nikeshbajaj.in\n'''\n\nclass LFSR():\n\t'''\n\tLinear Feedback Shift Register\n\n\tclass LFSR(fpoly=[5,2],initstate='ones',verbose=False)\n\n\tParameters\n\t----------\n\tinitstate : binary np.array (row vector) or str ='ones' or 'random', optional (default = 'ones'))\n\t\tInitial state of LFSR. initstate can not be all zeros.\n\t\tdefault ='ones'\n\t\t\tInitial state is intialized with ones and length of register is equal to\n\t\t\tdegree of feedback polynomial\n\t\tif state='rand'\n\t\t\tInitial state is intialized with random binary sequence of length equal to\n\t\t\tdegree of feedback polynomial\n\n\tfpoly : List, optional (default=[5,2])\n\t\tFeedback polynomial, it has to be primitive polynomial of GF(2) field, for valid output of LFSR\n\t\tto get the list of feedback polynomials check method 'get_fpolyList'\n\t\tor check Refeferece:\n\t\tRef: List of some primitive polynomial over GF(2)can be found at\n\t\thttp://www.partow.net/programming/polynomials/index.html\n\t\thttp://www.ams.org/journals/mcom/1962-16-079/S0025-5718-1962-0148256-1/S0025-5718-1962-0148256-1.pdf\n\t\thttp://poincare.matf.bg.ac.rs/~ezivkovm/publications/primpol1.pdf\n\tcounter_start_zero: bool (default = True), whether to start counter with 0 or 1. If True, initial outbit is\n\t set to -1, so is feedbackbit, until first .next() clock is excecuted. This initial output is not stacked in\n\t\tseq. The output sequence should be same, in anycase, for example if you need run 10 cycles, using runKCycle(10) methed.\n\tVerbose : boolean, optional (default=False)\n\t\tif True, state of LFSR will be printed at every cycle(iteration)\n\n\tAttributes\n\t----------\n\tcount : int\n\t\tCount the cycle, starts with 0 if counter_start_zero True, else starts with 1\n\tseq : np.array shape =(count,)\n\t\tOutput sequence stored in seq since first cycle\n\t\tif -1, no cycle has been excecuted, count=0 when counter_start_zero is True\n\t\telse last bit of initial state\n\n\toutbit : binary bit\n\t\tCurrent output bit,\n\t\tLast bit of current state\n\t\tif -1, no cycle has been excecuted, count =0, when counter_start_zero is True\n\n\tfeedbackbit : binary bit\n\t\tif -1, no cycle has been excecuted, count =0, when counter_start_zero is True\n\tM : int\n\t\tlength of LFSR, M-bit LFSR\n\n\texpectedPeriod : int (also saved as T)\n\t\tExpected period of sequence\n\t\tif feedback polynomial is primitive and irreducible (as per reference)\n\t\tperiod will be 2^M -1\n\tT : int (also saved as expectedPeriod)\n\t\tExpected period of sequence\n\t\tif feedback polynomial is primitive and irreducible (as per reference)\n\t\tperiod will be 2^M -1\n\tfeedpoly : str\n\t\tfeedback polynomial\n\n\n\tExamples\n\t--------\n\t>>> import numpy as np\n\t>>> from pylfsr import LFSR\n\n\t## Example 1 ## 5 bit LFSR with x^5 + x^2 + 1\n\t>>> L = LFSR()\n\t>>> L.info()\n\t5 bit LFSR with feedback polynomial x^5 + x^2 + 1\n\tExpected Period (if polynomial is primitive) = 31\n\tCurrent :\n\t\tState : [1 1 1 1 1]\n\t\tCount : 0\n\t\tOutput bit : -1\n\t\tfeedback bit : -1\n\n\t>>> L.next()\n\t1\n\n\t>>> L.runKCycle(10)\n\tarray([1, 1, 1, 1, 0, 0, 1, 1, 0, 1])\n\n\t>>> L.runFullCycle() # doctest: +NORMALIZE_WHITESPACE\n\tarray([1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0,\n 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1])\n\n\t>>> L.info() # doctest: +NORMALIZE_WHITESPACE\n\t5 bit LFSR with feedback polynomial x^5 + x^2 + 1\n\tExpected Period (if polynomial is primitive) = 31\n\tCurrent :\n\t State : [0 0 1 0 0]\n\t Count : 42\n\t Output bit : 1\n\t feedback bit : 0\n\t Output Sequence 111110011010010000101011101100011111001101\n\n\t>>> tempseq = L.runKCycle(10000) # generate 10000 bits from current state\n\n\t## Example 2 ## 5 bit LFSR with custum state and feedback polynomial\n\t>>> state = np.array([0,0,0,1,0])\n\t>>> fpoly = [5,4,3,2]\n\t>>> L1 = LFSR(fpoly=fpoly,initstate =state, verbose=True)\n\t>>> L1.info() # doctest: +NORMALIZE_WHITESPACE\n\t5 bit LFSR with feedback polynomial x^5 + x^4 + x^3 + x^2 + 1\n\tExpected Period (if polynomial is primitive) = 31\n\tCurrent :\n\t\tState : [0 0 0 1 0]\n\t\tCount : 0\n\t\tOutput bit : -1\n\t\tfeedback bit : -1\n\t>>> tempseq = L1.runKCycle(10)\n\tS: [0 0 0 1 0]\n\tS: [1 0 0 0 1]\n\tS: [1 1 0 0 0]\n\tS: [1 1 1 0 0]\n\tS: [0 1 1 1 0]\n\tS: [1 0 1 1 1]\n\tS: [1 1 0 1 1]\n\tS: [1 1 1 0 1]\n\tS: [1 1 1 1 0]\n\tS: [1 1 1 1 1]\n\t>>> tempseq\n\tarray([0, 1, 0, 0, 0, 1, 1, 1, 0, 1])\n\n\t>>>L1.set(fpoly=[5,3])\n\n\t## Example 3 ## TO visualize the process with 3-bit LFSR, with default counter_start_zero = True\n\t>>> state = [1,1,1]\n\t>>> fpoly = [3,2]\n\t>>> L = LFSR(initstate=state,fpoly=fpoly,counter_start_zero=True)\n\t>>> print('count \\t state \\t\\toutbit \\t seq')\n\t>>> print('-'*50)\n\t>>> for _ in range(15):\n\t>>> print(L.count,L.state,'',L.outbit,L.seq,sep='\\t')\n\t>>> L.next()\n\t>>> print('-'*50)\n\t>>> print('Output: ',L.seq)\n\tcount \t state \t\toutbit \t seq\n\t--------------------------------------------------\n\t0\t\t[1 1 1]\t\t-1\t[-1]\n\t1\t\t[0 1 1]\t\t1\t[1]\n\t2\t\t[0 0 1]\t\t1\t[1 1]\n\t3\t\t[1 0 0]\t\t1\t[1 1 1]\n\t4\t\t[0 1 0]\t\t0\t[1 1 1 0]\n\t5\t\t[1 0 1]\t\t0\t[1 1 1 0 0]\n\t6\t\t[1 1 0]\t\t1\t[1 1 1 0 0 1]\n\t7\t\t[1 1 1]\t\t0\t[1 1 1 0 0 1 0]\n\t8\t\t[0 1 1]\t\t1\t[1 1 1 0 0 1 0 1]\n\t9\t\t[0 0 1]\t\t1\t[1 1 1 0 0 1 0 1 1]\n\t10\t\t[1 0 0]\t\t1\t[1 1 1 0 0 1 0 1 1 1]\n\t11\t\t[0 1 0]\t\t0\t[1 1 1 0 0 1 0 1 1 1 0]\n\t12\t\t[1 0 1]\t\t0\t[1 1 1 0 0 1 0 1 1 1 0 0]\n\t13\t\t[1 1 0]\t\t1\t[1 1 1 0 0 1 0 1 1 1 0 0 1]\n\t14\t\t[1 1 1]\t\t0\t[1 1 1 0 0 1 0 1 1 1 0 0 1 0]\n\t--------------------------------------------------\n\tOutput: [1 1 1 0 0 1 0 1 1 1 0 0 1 0 1]\n\n\t## Example 4.1 ## To visualize the process with 3-bit LFSR, with counter_start_zero = False\n\t>>> state = [1,1,1]\n\t>>> fpoly = [3,2]\n\t>>> L = LFSR(initstate=state,fpoly=fpoly,counter_start_zero=False)\n\t>>> print('count \\t state \\t\\toutbit \\t seq')\n\t>>> print('-'*50)\n\t>>> for _ in range(15):\n\t>>> print(L.count,L.state,'',L.outbit,L.seq,sep='\\t')\n\t>>> L.next()\n\t>>> print('-'*50)\n\t>>> print('Output: ',L.seq)\n\tcount \t state \t\toutbit \t seq\n\t--------------------------------------------------\n\t1\t[1 1 1]\t\t1\t[1]\n\t2\t[0 1 1]\t\t1\t[1 1]\n\t3\t[0 0 1]\t\t1\t[1 1 1]\n\t4\t[1 0 0]\t\t0\t[1 1 1 0]\n\t5\t[0 1 0]\t\t0\t[1 1 1 0 0]\n\t6\t[1 0 1]\t\t1\t[1 1 1 0 0 1]\n\t7\t[1 1 0]\t\t0\t[1 1 1 0 0 1 0]\n\t8\t[1 1 1]\t\t1\t[1 1 1 0 0 1 0 1]\n\t9\t[0 1 1]\t\t1\t[1 1 1 0 0 1 0 1 1]\n\t10\t[0 0 1]\t\t1\t[1 1 1 0 0 1 0 1 1 1]\n\t11\t[1 0 0]\t\t0\t[1 1 1 0 0 1 0 1 1 1 0]\n\t12\t[0 1 0]\t\t0\t[1 1 1 0 0 1 0 1 1 1 0 0]\n\t13\t[1 0 1]\t\t1\t[1 1 1 0 0 1 0 1 1 1 0 0 1]\n\t14\t[1 1 0]\t\t0\t[1 1 1 0 0 1 0 1 1 1 0 0 1 0]\n\t--------------------------------------------------\n\tOutput: [1 1 1 0 0 1 0 1 1 1 0 0 1 0 1]\n\n\t## Example 4.2 ## To visualize LFSR\n\tL.Viz(show=False, show_labels=False,title='R1')\n\n\t## Example 5 ## 23 bit LFSR with custum state and feedback polynomial\n\n\t>>> fpoly = [23,19]\n\t>>> L1 = LFSR(fpoly=fpoly,initstate ='ones', verbose=False)\n\t>>> L1.info()\n\t23 bit LFSR with feedback polynomial x^23 + x^19 + 1\n\tExpected Period (if polynomial is primitive) = 8388607\n\tCurrent :\n\t State : [1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1]\n\t Count : 0\n\t Output bit : -1\n\t feedback bit : -1\n\n\t>>> seq = L1.runKCycle(100)\n\t>>> seq\n\tarray([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1,\n 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,\n 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0,\n 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1])\n\n\t>>> L.changeFpoly(newfpoly =[23,21])\n\t>>> seq1 = L.runKCycle(20)\n\n\t## Example 6 ## testing the properties\n\t>>> state = [1,1,1,1,0]\n\t>>> fpoly = [5,3]\n\t>>> L = LFSR(initstate=state,fpoly=fpoly)\n\t>>> L.info()\n\t5 bit LFSR with feedback polynomial x^5 + x^3 + 1\n\tExpected Period (if polynomial is primitive) = 31\n\tCurrent :\n\t State : [1 1 1 1 0]\n\t Count : 0\n\t Output bit : -1\n\t feedback bit : -1\n\n\t>>>result = L.test_properties(verbose=1)\n\t1. Periodicity\n\t------------------\n\t - Expected period = 2^M-1 = 31\n\t - Pass?: True\n\n\t2. Balance Property\n\t-------------------\n\t - Number of 1s = Number of 0s+1 (in a period): (N1s,N0s) = (16, 15)\n\t - Pass?: True\n\n\t3. Runlength Property\n\t-------------------\n\t - Number of Runs in a period should be of specific order, e.g. [4,2,1,1]\n\t - Runs: [8 4 2 1 1]\n\t - Pass?: True\n\n\t4. Autocorrelation Property\n\t-------------------\n\t - Autocorrelation of a period should be noise-like, specifically, 1 at k=0, -1/m everywhere else\n\t - Pass?: True\n\n\t==================\n\tPassed all the tests\n\t==================\n\n\n\t>>> p = L.getFullPeriod()\n\t>>> p\n\tarray([0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0,\n 0, 1, 0, 0, 1, 0, 1, 1, 0])\n\n\t>>> L.balance_property(p.copy())\n\t(True, (16, 15))\n\n\t>>> L.runlength_property(p.copy())\n\t(True, array([8, 4, 2, 1, 1]))\n\n\t>>> L.autocorr_property(p.copy())[0]\n\tTrue\n\n\t## Example 7 ## testing the properties for non-primitive polynomial\n\t>>> state = [1,1,1,1,0]\n\t>>> fpoly = [5,1]\n\t>>> L = LFSR(initstate=state,fpoly=fpoly)\n\t>>> result = L.test_properties(verbose=1)\n\t1. Periodicity\n\t------------------\n\t - Expected period = 2^M-1 = 31\n\t - Pass?: False\n\n\t2. Balance Property\n\t-------------------\n\t - Number of 1s = Number of 0s+1 (in a period): (N1s,N0s) = (17, 14)\n\t - Pass?: False\n\n\t3. Runlength Property\n\t-------------------\n\t - Number of Runs in a period should be of specific order, e.g. [4,2,1,1]\n\t - Runs: [10 2 1 1 2]\n\t - Pass?: False\n\n\t4. Autocorrelation Property\n\t-------------------\n\t - Autocorrelation of a period should be noise-like, specifically, 1 at k=0, -1/m everywhere else\n\t - Pass?: False\n\n\t==================\n\tFailed one or more tests, check if feedback polynomial is primitive polynomial\n\t==================\n\t'''\n\n\tdef __init__(self, fpoly=[5, 2], initstate='ones', verbose=False,counter_start_zero=True):\n\t\tif isinstance(initstate, str):\n\t\t\tif initstate == 'ones':\n\t\t\t\tinitstate = np.ones(np.max(fpoly))\n\t\t\telif initstate == 'random':\n\t\t\t\tinitstate = np.random.randint(0, 2, np.max(fpoly))\n\t\t\telse:\n\t\t\t\traise Exception('Unknown intial state')\n\t\tif isinstance(initstate, list):\n\t\t\tinitstate = np.array(initstate)\n\n\t\tself.initstate = initstate\n\t\tself.fpoly = fpoly\n\t\tself.state = initstate.astype(int)\n\t\t#self.skip_first = skip_first\n\t\tself.counter_start_zero = counter_start_zero\n\t\tself.count = 0 if counter_start_zero else 1\n\t\tself.seq = np.array([-1]) if counter_start_zero else np.array([self.state[-1]])\n\t\tself.outbit = -1 if counter_start_zero else self.state[-1]\n\t\tself.feedbackbit = -1 if counter_start_zero else self.state[-1]\n\t\tself.verbose = verbose\n\t\tself.M = self.initstate.shape[0]\n\t\tself.expectedPeriod = 2**self.M - 1\n\t\tself.T = 2**self.M - 1\n\t\tself.fpoly.sort(reverse=True)\n\t\tfeed = ' '\n\t\tfor i in range(len(self.fpoly)):\n\t\t\tfeed = feed + 'x^' + str(self.fpoly[i]) + ' + '\n\t\tfeed = feed + '1'\n\t\tself.feedpoly = feed\n\n\t\tself.check()\n\n\tdef info(self):\n\t\t'''\n\t\tDisplay the information about LFSR with current state of variables\n\t\t'''\n\t\tprint('%d bit LFSR with feedback polynomial %s' % (self.M, self.feedpoly))\n\t\tprint('Expected Period (if polynomial is primitive) = ', self.expectedPeriod)\n\t\tprint('Current :')\n\t\tprint(' State : ', self.state)\n\t\tprint(' Count : ', self.count)\n\t\tprint(' Output bit : ', self.outbit)\n\t\tprint(' feedback bit : ', self.feedbackbit)\n\t\tif self.count > 0 and self.count < 1000:\n\t\t\tprint(' Output Sequence %s' % (''.join([str(int(x)) for x in self.seq])))\n\n\tdef check(self):\n\t\t'''\n\t\tCheck if\n\t\t- degree of feedback polynomial <= length of LFSR >=1\n\t\t- given intistate of LFSR is correct\n\t\t'''\n\t\tif np.max(self.fpoly) > self.initstate.shape[0] or np.min(self.fpoly) < 1 or len(self.fpoly) < 2:\n\t\t\traise ValueError('Wrong feedback polynomial')\n\t\tif len(self.initstate.shape) > 1 and (self.initstate.shape[0] != 1 or self.initstate.shape[1] != 1):\n\t\t\traise ValueError('Size of intial state vector should be one diamensional')\n\t\telse:\n\t\t\tself.initstate = np.squeeze(self.initstate)\n\t\tassert np.sum(self.initstate>1) + np.sum(self.initstate<0)==0 # test if initial state is binary, 1s and 0s\n\n\tdef set(self, fpoly, state='ones'):\n\t\t'''\n\t\tSet feedback polynomial and state\n\n\t\tParameters\n\t\t----------\n\t\tfpoly : list\n\t\t\tfeedback polynomial like [5,4,3,2]\n\n\t\tstate : np.array, like np.array([1,0,0,1,1])\n\t\t\tdefault ='ones'\n\t\t\t\tInitial state is intialized with ones and length of register is equal to\n\t\t\t\tdegree of feedback polynomial\n\t\t\tif state='rand'\n\t\t\t\tInitial state is intialized with random binary sequence of length equal to\n\t\t\t\tdegree of feedback polynomial\n\t\t'''\n\t\tself.__init__(fpoly=fpoly, initstate=state)\n\n\tdef reset(self):\n\t\t'''\n\t\tReseting LFSR to its initial state and count\n\t\t'''\n\t\tself.__init__(initstate=self.initstate, fpoly=self.fpoly,counter_start_zero=self.counter_start_zero )\n\n\tdef changeFpoly(self, newfpoly, reset=False):\n\t\t'''\n\t\tChanging Feedback polynomial : Useful to change feedback polynomial in between as in A5/1 stream cipher\n\n\t\tParameters\n\t\t----------\n\t\tnewfpoly : list like, [5,4,2,1]\n\t\t\tchanging the feedback polynomial\n\n\t\treset : boolean default=False\n\t\t\tif True, reset all the Parameters: count and seq etc ....\n\t\t\tif False, leave the LFSR as it is only change the feedback polynomial\n\t\t\tfor further use, as used in\n\t\t\t'Enhancement of A5/1: Using variable feedback polynomials of LFSR'\n\t\t\t https://doi.org/10.1109/ETNCC.2011.5958486\n\t\t'''\n\t\tnewfpoly.sort(reverse=True)\n\t\tself.fpoly = newfpoly\n\t\tfeed = ' '\n\t\tfor i in range(len(self.fpoly)):\n\t\t\tfeed = feed + 'x^' + str(self.fpoly[i]) + ' + '\n\t\tfeed = feed + '1'\n\t\tself.feedpoly = feed\n\n\t\tself.check()\n\t\tif reset:\n\t\t\tself.reset()\n\n\tdef next(self):\n\t\t'''\n\t\tRun one cycle on LFSR with given feedback polynomial and\n\t\tupdate the count, state, feedback bit, output bit and seq\n\n\t\tReturns\n\t\t-------\n\t\toutput bit : binary\n\t\t'''\n\t\tif self.verbose:\n\t\t\tprint('S: ', self.state)\n\t\tif self.counter_start_zero:\n\t\t\tself.outbit = self.state[-1]\n\t\t\tif self.count ==0:\n\t\t\t\tself.seq = np.array([self.state[-1]])\n\t\t\telse:\n\t\t\t\tself.seq = np.append(self.seq, self.state[-1])\n\n\t\tb = np.logical_xor(self.state[self.fpoly[0] - 1], self.state[self.fpoly[1] - 1])\n\t\tif len(self.fpoly) > 2:\n\t\t\tfor i in range(2, len(self.fpoly)):\n\t\t\t\tb = np.logical_xor(self.state[self.fpoly[i] - 1], b)\n\n\t\t#self.outbit = self.state[-1]\n\t\tself.state = np.roll(self.state, 1)\n\t\tself.feedbackbit = b * 1\n\t\tself.state[0] = self.feedbackbit\n\n\t\tif not(self.counter_start_zero):\n\t\t\tself.outbit = self.state[-1]\n\t\t\tif self.count ==0:\n\t\t\t\tself.seq = np.array([self.state[-1]])\n\t\t\telse:\n\t\t\t\tself.seq = np.append(self.seq, self.state[-1])\n\n\t\tself.count += 1\n\n\t\treturn self.outbit\n\n\tdef runFullCycle(self):\n\t\t'''\n\t\tRun a full cycle (T = 2^M-1) on LFSR from current state\n\n\t\tReturns\n\t\t-------\n\t\tseq : binary output sequence since start: shape = (count,)\n\t\t'''\n\t\ttemp = [self.next() for i in range(self.expectedPeriod)]\n\t\treturn self.seq\n\n\tdef getFullPeriod(self):\n\t\t'''\n\t\tGet a seq of a full period from LSFR, by executing next() method T times.\n\t\tThe current state of LFSR is used to generate T bits.\n\n\t\tReturns\n\t\t-------\n\t\tseq (T bits), binary output sequence of last T bits\n\t\t'''\n\t\tseq = np.array([self.next() for i in range(self.expectedPeriod)])\n\t\treturn seq\n\n\tdef runKCycle(self, k):\n\t\t'''\n\t\tRun k cycles and update all the Parameters\n\n\t\tParameters\n\t\t----------\n\t\tk : int\n\n\t\tReturns\n\t\t-------\n\t\ttempseq : shape =(k,), output binary sequence of k cycles\n\t\t'''\n\t\ttempseq = [self.next() for i in range(k)]\n\t\treturn np.array(tempseq)\n\n\tdef _loadFpolyList(self):\n\t\timport os\n\t\tfname = 'primitive_polynomials_GF2_dict.txt'\n\t\tfname = os.path.join(os.path.dirname(__file__), fname)\n\t\ttry:\n\t\t\tf = open(fname, \"rb\")\n\t\t\tlines = f.readlines()\n\t\t\tf.close()\n\t\t\tself.fpolyList = eval(lines[0].decode())\n\t\texcept:\n\t\t\traise Exception(\"File named:'{}' Not Found!!! \\n try again, after downloading file from github save it in lfsr directory\".format(fname))\n\n\tdef get_fpolyList(self,m=None):\n\t\t'''\n\t\tGet the list of primitive polynomials as feedback polynomials for m-bit LFSR.\n\t\tOnly list of primary primitive polynomials are retuned, not full list (half list), since for each primary primitive polynomial\n\t\tan image polymial can be computed using 'get_Ifpoly' method\n\n\t\tParameters\n\t\t----------\n\t\tm: 1<int<32, if None, list of feedback polynomials for 1 < m < 32 is return as a dictionary\n\n\t\tReturns\n\t\t-------\n\t\tfpoly_list: list of polynomial if m is not None else a dictionary\n\n\t\t'''\n\t\tself._loadFpolyList()\n\t\tif m is None:\n\t\t\treturn self.fpolyList\n\t\telif type(m)== int and m > 1 and m < 32:\n\t\t\treturn self.fpolyList[m]\n\t\telse:\n\t\t\tprint('Wrong input m. m should be int 1 < m < 32 or None')\n\n\tdef get_Ifpoly(self,fpoly):\n\t\t'''\n\t\tGet image of feebback polynomial\n\t\tGet the image of primitive polynomial\n\t\tParameters\n\t\t----------\n\t\tfpoly: polynomial as list e.g. [5,2] for x^5 + x^2 + 1\n\t\t : should be a valid primitive polynomial\n\n\t\tReturns\n\t\t-------\n\t\tifpoly: polynomial as list e.g. [5,3] for x^5 + x^3 + 1\n\n\t\t'''\n\t\tif isinstance(fpoly, list) or (isinstance(fpoly, np.ndarray) and len(fpoly.shape)==1):\n\t\t\tfpoly = list(fpoly)\n\t\t\tfpoly.sort(reverse=True)\n\t\t\tifpoly = [fpoly[0]] +[fpoly[0]-ff for ff in fpoly[1:]]\n\t\t\tifpoly.sort(reverse=True)\n\t\t\treturn ifpoly\n\t\telse:\n\t\t\tprint('Not a valid form of feedback polynomial')\n\n\tdef test_properties(self,verbose=1):\n\t\tp1 = self.getFullPeriod()\n\t\tp2 = self.getFullPeriod()\n\t\tr1 = np.mean(p1==p2)==1\n\n\t\tr2, (N1s,N0s) = self.balance_property(p1.copy())\n\n\t\tr3,runs = self.runlength_property(p1.copy(),verbose=0)\n\n\t\tr4,(shift,rxx) = self.autocorr_property(p1.copy(),plot=False)\n\n\t\tresult = bool(np.prod([r1,r2,r3,r4]))\n\n\t\tif verbose:\n\t\t\tprint('1. Periodicity')\n\t\t\tprint('------------------')\n\t\t\tprint(' - Expected period = 2^M-1 =',self.expectedPeriod)\n\t\t\tprint(' - Pass?: ',r1)\n\t\t\tprint('')\n\t\t\tprint('2. Balance Property')\n\t\t\tprint('-------------------')\n\t\t\tprint(' - Number of 1s = Number of 0s+1 (in a period): (N1s,N0s) = ',(N1s, N0s))\n\t\t\tprint(' - Pass?: ',r2)\n\t\t\tprint('')\n\t\t\tprint('3. Runlength Property')\n\t\t\tprint('-------------------')\n\t\t\tprint(' - Number of Runs in a period should be of specific order, e.g. [4,2,1,1]')\n\t\t\tprint(' - Runs: ',runs)\n\t\t\tprint(' - Pass?: ',r3)\n\t\t\tprint('')\n\t\t\tprint('4. Autocorrelation Property')\n\t\t\tprint('-------------------')\n\t\t\tprint(' - Autocorrelation of a period should be noise-like, specifically, 1 at k=0, -1/m everywhere else')\n\t\t\tif verbose>1:\n\t\t\t print(' - Rxx(k): ',rxx)\n\t\t\t try:\n\t\t\t import matplotlib.pyplot as plt\n\t\t\t except:\n\t\t\t raise('Error loading matplotlib, either install it or set verbose<2')\n\t\t\t plt.plot(shift,rxx)\n\t\t\t plt.xlabel('shift (k)')\n\t\t\t plt.ylabel(r'$R_{xx}(k)$')\n\t\t\t plt.axhline(y=0,color='k',ls=':',lw=0.5)\n\t\t\t plt.xlim(shift[0],shift[-1])\n\t\t\t plt.title('Autocorrelation')\n\t\t\t plt.grid(alpha=0.4)\n\t\t\t plt.show()\n\t\t\tprint(' - Pass?: ',r4)\n\t\t\tprint('\\n\\n')\n\t\t\tprint('==================')\n\t\t\tif result:\n\t\t\t\tprint('Passed all the tests')\n\t\t\telse:\n\t\t\t\tprint('Failed one or more tests, check if feedback polynomial is primitive polynomial')\n\t\t\tprint('==================')\n\t\treturn result\n\n\tdef test_p(self,p,verbose=1):\n\t\t'''\n\t\tTest all the three properties for seq p :\n\t\t\t(1) Balance Property\n\t\t\t(2) Runlegth Property\n\t\t\t(3) Autocorrelation Property\n\n\n\t\tParameters\n\t\t----------\n\t\tp : array-like, a sequence of a period from LFSR\n\t\tverbose = 0 : no printing details\n\t\t = 1 : print details\n\t\t\t\t= 2 : print and plot more details\n\t\tReturns\n\t\t-------\n\t\tresult: bool, True if all three are satisfied else False\n\t\t'''\n\t\tr1,(N1s,N0s) = self.balance_property(p.copy())\n\t\tr2,runs = self.runlength_property(p.copy(),verbose=0)\n\t\tr3,(shift,rxx) = self.autocorr_property(p.copy(),plot=False)\n\t\tresult = bool(np.prod([r1,r2,r3]))\n\t\tif verbose:\n\t\t\tprint('1. Balance Property')\n\t\t\tprint('-------------------')\n\t\t\tprint(' - Number of 1s = Number of 0s+1 (in a period): (N1s,N0s) = ',(N1s, N0s))\n\t\t\tprint(' - Pass?: ',r1)\n\t\t\tprint('')\n\t\t\tprint('2. Runlength Property')\n\t\t\tprint('-------------------')\n\t\t\tprint(' - Number of Runs in a period should be of specific order, e.g. [4,2,1,1]')\n\t\t\tprint(' - Runs: ',runs)\n\t\t\tprint(' - Pass?: ',r2)\n\t\t\tprint('')\n\t\t\tprint('3. Autocorrelation Property')\n\t\t\tprint('-------------------')\n\t\t\tprint(' - Autocorrelation of a period should be noise-like, specifically, 1 at k=0, -1/m everywhere else')\n\t\t\tif verbose>1:\n\t\t\t\tprint(' - Rxx(k): ',rxx)\n\t\t\t\ttry:\n\t\t\t\t\timport matplotlib.pyplot as plt\n\t\t\t\texcept:\n\t\t\t\t\traise('Error loading matplotlib, either install it or set verbose<2')\n\t\t\t\tplt.plot(shift,rxx)\n\t\t\t\tplt.xlabel('shift (k)')\n\t\t\t\tplt.ylabel(r'$R_{xx}(k)$')\n\t\t\t\tplt.axhline(y=0,color='k',ls=':',lw=0.5)\n\t\t\t\tplt.xlim(shift[0],shift[-1])\n\t\t\t\tplt.title('Autocorrelation')\n\t\t\t\tplt.grid(alpha=0.4)\n\t\t\t\tplt.show()\n\t\t\tprint(' - Pass?: ',r3)\n\t\t\tprint('\\n\\n')\n\t\t\tprint('==================')\n\t\t\tif result:\n\t\t\t\tprint('Passed all three tests')\n\t\t\telse:\n\t\t\t\tprint('Failed one or more tests')\n\t\t\tprint('==================')\n\n\t\treturn result\n\n\tdef balance_property(self,p):\n\t\t'''\n\t\tBalance Property: In a period of LFSR with a valid feedback polynomial,\n\t\tthe number of 1s should be equal to number of 0s +1\n\t\t\t\t\t\t\t'' N1s == N0s + 1 ''\n\t\tTest balance property for a given full period of seq, p.\n\n\t\tParameters\n\t\t----------\n\t\tp: array-like, a sequence of a period from LFSR\n\n\n\t\tReturns\n\t\t-------\n\t\tresult: bool, True if seq p satisfies Balance Property else False\n\t\t(N1s, N0s): tuple, number of 1s and number of 0s\n\t\t'''\n\t\tN1s = np.sum(p==1)\n\t\tN0s = np.sum(p==0)\n\t\tresult = N1s == N0s+1\n\t\treturn result, (N1s,N0s)\n\n\tdef runlength_property(self,p,verbose=0):\n\t\t'''\n\t\tRun Length Property: In a period of LSFR with valid feedback polynomial,\n\t\tthe number of runs of different length are in specific order.\n\t\t\t''\n\t\t\tnumber of (M-k) bit runs = ⌈ 2^(k-1) ⌉ , for k = 0 to M-1\n\t\t\t''\n\t\twhere ⌈ ⌉ is a ceiling function\n\t\tThat is, for M bit LFSR,\n\t\t - number of M bit runs : 1\n\t\t\t- number of (M-1) bit runs : 1\n\t\t\t- number of (M-2) bit runs : 2\n\t\t\t- number of (M-3) bit runs : 4\n\t\t\t...\n\t\t\tso on\n\n\t\tParameters\n\t\t----------\n\t\tp: array-like, a sequence of a period from LFSR\n\n\t\tReturns\n\t\t-------\n\t\tresult: bool, True if seq p satisfies Run Length Property else False\n\t\truns: list, list of runs\n\t\t'''\n\t\tT = len(p)\n\t\tif verbose>1: print(p)\n\t\twhile p[0]==p[-1]: p = np.roll(p,1)\n\n\t\tif verbose>1: print(p)\n\t\tif p[-1]==0:\n\t\t p = np.append(p,1)\n\t\telse:\n\t\t p = np.append(p,0)\n\t\tif verbose>1: print(p)\n\n\t\ti=0\n\t\truns = np.zeros(T).astype(int)\n\t\tfor k in range(T):\n\t\t if p[k]==p[k+1]:\n\t\t i=i+1\n\t\t else:\n\t\t runs[i]=runs[i]+1\n\t\t i=0\n\t\tif verbose>1: print(runs)\n\t\truns = runs[:max(np.where(runs)[0])+1]\n\t\tif verbose: print('Runs : ',runs)\n\n\t\tl = len(runs)\n\t\tpp=0\n\t\tfor k in range(len(runs)-2):\n\t\t if runs[k]==2*runs[k+1]:\n\t\t pp=pp+1\n\t\tif runs[-2]==runs[-1]: pp=pp+1\n\n\t\tresult = False\n\n\t\tif pp==len(runs)-1: result = True\n\t\tif verbose>1:\n\t\t if result: print('Pass')\n\t\t else: print('Fail')\n\t\treturn result, runs\n\n\tdef autocorr_property(self,p,plot=False):\n\t\t'''\n\t\tAutocorrelation Property: For sequence of period T of LSFR with valid feedback polynomial,\n\t\tthe autocorrelation is a noise like, that is, 1 with zero (or T) lag (shift), -1/T (almost zero) else.\n\n\t\tunlike usual, for binary, the correlation value between two sequence of same length bx, by is computed as follow;\n\t\tmatch = sum(bx == by) (number of mataches)\n\t\tmismatch = sum(bx!= by) (number of mismatches)\n\t\t''\n\t\trxy = (match - mismatch)/ length(bx)\n\t\t''\n\n\t\tParameters\n\t\t----------\n\t\tp: array-like, a sequence of a period from LFSR\n\n\t\tplot: bool (default False), if True, it will plot the autocorrelation function,\n\t\t which will require matplotlib library. Turn it of if matplotlib is not installed\n\n\t\tReturns\n\t\t-------\n\t\tresult: bool, True if seq p satisfies Autocorrelation Property else False\n\t\t(shift, rxx): tuple of sequence of shift corresponding autocorrelation values\n\t\t'''\n\t\tT = len(p)\n\t\tpx = p.copy()\n\t\trxx = np.zeros(2*T+1)\n\t\tfor k in range(2*T+1):\n\t\t py = np.roll(p.copy(),k)\n\t\t r = px==py\n\t\t rxx[k] = (np.sum(r==1) - np.sum(r==0))/T\n\n\t\tresult = False\n\t\tif np.prod(np.isclose(rxx[1:T],-1/T)):\n\t\t result = True\n\n\t\tshift = np.arange(-T,T+1)\n\t\tif plot:\n\t\t try:\n\t\t import matplotlib.pyplot as plt\n\t\t except:\n\t\t raise('Error loading matplotlib, either install it or set plot=False')\n\t\t plt.plot(shift,rxx)\n\t\t plt.xlabel('shift (k)')\n\t\t plt.ylabel(r'$R_{xx}(k)$')\n\t\t plt.axhline(y=0,color='k',ls=':',lw=0.5)\n\t\t plt.xlim(shift[0],shift[-1])\n\t\t plt.title('Autocorrelation')\n\t\t plt.grid(alpha=0.4)\n\t\t plt.show()\n\t\treturn result, (shift,rxx)\n\n\tdef getSeq(self):\n\t\treturn ''.join(self.seq.copy().astype(str))\n\tdef getState(self):\n\t return ''.join(self.state.copy().astype(str))\n\tdef arr2str(self,arr):\n\t\treturn ''.join(arr.copy().astype(str))\n\tdef Viz(self,ax=None,show=True,fs=25,show_labels=False,title='',title_loc='left',box_color='lightblue',alpha=0.5):\n\t\t'''\n\t\tax: axis to plot, if None, new axis will be created, (default None)\n\t\tshow: if True, plt.show() will be excecuted, (default True)\n\t\tfs: fontsize (default 25)\n\t\tshow_label: if true, will display names\n\t\ttitle: str, title of figure, default '',\n\t\ttitle_loc, alignment of title, 'left', 'right', 'center', (default 'left')\n\t\t'''\n\t\tstate = self.state\n\t\tfpoly = self.fpoly\n\t\tseq = self.getSeq()\n\t\toutbit = self.outbit\n\t\tfeedbit = self.feedbackbit\n\t\tPlotLFSR(state,fpoly,seq=seq,ob=outbit,fb=feedbit,fs=fs,ax=ax,show_labels=show_labels,title=title,title_loc=title_loc,box_color=box_color,alpha=alpha)\n\t\tif show: plt.show()\n\n\n\ndef drawR(ax,x=0,y=0,s=1,alpha=0.5,color='lightblue'):\n rect = patches.Rectangle((x-s/2, y-s/2), s, s, linewidth=1, edgecolor='k', facecolor=color,alpha=alpha)\n ax.add_patch(rect)\ndef PlotLFSR(state,fpoly,seq='',ob=None,fb=None,fs=25,ax=None,show_labels=False,title='',title_loc='left',box_color='lightblue',alpha=0.5):\n\t'''\n ----- Plot LFSR ----\n state: current state of LFSR\n fpoly: feedback polynomial of LFSR\n seq: str, output sequence\n ob: output bit\n fb: feedback bit\n ax: axis to plot, if None, new axis will be created, (default None)\n\n\tshow: if True, plt.show() will be excecuted, (default True)\n\tfs: fontsize (default 25)\n\tshow_label: if true, will display names\n\ttitle: str, title of figure, default '',\n\ttitle_loc, alignment of title, 'left', 'right', 'center', (default 'left')\n\tbox_color: color of register box, default='lightblue'\n\t'''\n\tM = len(state)\n\tym = 3.5\n\tif ax is None:\n\t fig, ax = plt.subplots(figsize=(M+5,ym))\n\n\ts=1\n\txs = 3\n\tys = ym-1\n\tlast_x= xs\n\n\tfor k in range(M):\n\t x,y = xs+k,ys\n\t ax.text(x,y,str(state[k]),ha='center',va = 'center',fontsize=fs)\n\n\t if k==0:\n\t x1, y1 = [x-1.5*s, x-s/2], [y, y]\n\t ax.plot(x1, y1,marker = '>',color='k',markevery=(1,1),ms=10)\n\n\t if k+1 in fpoly:\n\t x1, y1 = [x, x], [y-s/2, y-1.5*s]\n\t ax.plot(x1, y1,marker = '.',color='k')\n\t ax.plot(x,y-1.5*s,marker = '+',color='b',ms=15,mew=3)\n\t ax.plot(x,y-1.5*s,marker = 'o',color='b',ms=15,mfc='none',mew=2)\n\t if last_x<x: last_x=x\n\t drawR(ax,x=x,y=y,s=s,alpha=alpha,color=box_color)\n\n #if fb is not None: ax.text(xs-1.7*s,y,'fb = '+str(fb),fontsize=fs-7,va = 'bottom')\n\tif fb is not None:\n\t if show_labels:\n\t ax.text(xs-1.7*s,y,'fb = '+str(fb),fontsize=fs-7,va = 'bottom')\n\t else:\n\t ax.text(xs-1.7*s,y,str(fb),fontsize=fs,va = 'bottom',color='b')\n\n\tx1, y1 = [last_x,xs-1.5*s ], [y-1.5*s, y-1.5*s]\n\tax.plot(x1, y1,marker = '<',color='k',markevery=(1,1),ms=10)\n\n\tx2, y2 = [xs-1.5*s,xs-1.5*s], [y-1.5*s,y]\n\tax.plot(x2, y2,marker = 'd',color='k',markevery=(1,1))\n\n\tx1, y1 = [x+s/2, x+1.5*s], [y, y]\n\tax.plot(x1, y1,marker = '>',color='k',markevery=(1,1),ms=10)\n\t#if ob is not None: ax.text(x+1.7*s,y,'ob = '+str(ob),fontsize=fs-7,va = 'bottom')\n\tif ob is not None:\n\t if show_labels:\n\t ax.text(x+1.7*s,y,'ob = '+str(ob),fontsize=fs-7,va = 'bottom')\n\t else:\n\t ax.text(x+1.7*s,y,str(ob),fontsize=fs,va = 'bottom',color='b')\n\n\tif len(seq):\n\t ax.text(0,0,'Output seq = '+seq,fontsize=0.7*fs,color='b',ha='left')\n\tax.axis('off')\n\tif title!='': plt.title(title,fontsize=fs,loc=title_loc)\n\nclass A5_1():\n\t'''\n\tA5/1 GSM Stream Cipher\n\t----------------------\n\t#TODO\n\t\t1.doc\n\t\t2.check the output sequence\n\tRef: https://en.wikipedia.org/wiki/A5/1\n\n\tExample\n\t--------\n\n\timport numpy as np\n\timport matplotlib.pyplot as plt\n\tfrom pylfsr import A5_1\n\n\tA5 = A5_1(key='random')\n\tprint('key: ',A5.key)\n\tA5.R1.Viz(title='R1')\n\tA5.R2.Viz(title='R2')\n\tA5.R3.Viz(title='R3')\n\n\tprint('key: ',A5.key)\n\tprint()\n\tprint('count \\t cbit\\t\\tclk\\t R1_R2_R3\\toutbit \\t seq')\n\tprint('-'*80)\n\tfor _ in range(15):\n\t print(A5.count,A5.getCbits(),A5.clock_bit,A5.getLastbits(),A5.outbit,A5.getSeq(),sep='\\t')\n\t A5.next()\n\tprint('-'*80)\n\tprint('Output: ',A5.seq)\n\n\tA5.runKCycle(1000)\n\tA5.getSeq()\n\n\t'''\n\tdef __init__(self,key='random',k1='ones',k2='random',k3='ones',counter_start_zero=True):\n\n\t self.M1,self.M2,self.M3 =19,22,23\n\t self.M = self.M1+self.M2+self.M3\n\t self.counter_start_zero = counter_start_zero\n\n\n\t if key is not None:\n\t key = self.key_frmt(self.M,key)\n\t assert len(key)==self.M\n\t #key should be of length = 19+22+23 for three LFSRs\n\n\t self.k1 = key[:self.M1]\n\t self.k2 = key[self.M1:self.M1+self.M2]\n\t self.k3 = key[self.M1+self.M2:]\n\n\t assert len(self.k1)==self.M1 and len(self.k2)==self.M2 and len(self.k3)==self.M3\n\t self.key = ''.join(key.copy().astype(str))\n\n\t else:\n\t self.k1 = self.key_frmt(n=self.M1,ktype=k1)\n\t self.k2 = self.key_frmt(n=self.M2,ktype=k2)\n\t self.k3 = self.key_frmt(n=self.M3,ktype=k3)\n\t assert len(self.k1)==self.M1 and len(self.k2)==self.M2 and len(self.k3)==self.M3\n\n\t self.key = ''.join([''.join(k.copy().astype(str)) for k in [self.k1, self.k2, self.k3]])\n\n\n\t self.R1 = LFSR(initstate=self.k1, fpoly = [19,18,17,14],counter_start_zero=counter_start_zero)\n\t self.R2 = LFSR(initstate=self.k2, fpoly = [22,21],counter_start_zero=counter_start_zero)\n\t self.R3 = LFSR(initstate=self.k3, fpoly = [23,22,21,8],counter_start_zero=counter_start_zero)\n\t self.state = np.r_[self.R1.state, self.R2.state,self.R3.state]\n\n\t # clocking bits\n\t self.c1 = self.R1.state[8]\n\t self.c2 = self.R2.state[10]\n\t self.c3 = self.R3.state[10]\n\n\t self.count = 0\n\t self.seq = np.array([])\n\t self.outbit = -1\n\t self.clock_bit = -1\n\n\n\n\tdef key_frmt(self,n,ktype):\n\t\tif isinstance(ktype, str):\n\t\t if ktype == 'ones':\n\t\t ikey = np.ones(n).astype(int)\n\t\t elif ktype == 'random':\n\t\t ikey = np.random.randint(0, 2,n).astype(int)\n\t\t elif len(ktype)==n:\n\t\t ikey = np.array([int(b) for b in ktype]).astype(int)\n\t\t else: raise Exception('Unknown intial state')\n\t\t return ikey\n\t\telif isinstance(ktype,list):\n\t\t return np.array(ktype)\n\t\telif isinstance(ktype,np.ndarray):\n\t\t return ktype\n\t\telse:\n\t\t raise Exception('Unknown key type one of [binary string, list, np.array]')\n\n\tdef next(self):\n\t\t'''\n\t\t#TODO check the output sequence\n\t\t'''\n\t\t# clocking bits\n\n\t\tif self.count:\n\t\t\tif self.c1==self.clock_bit: self.R1.next()\n\t\t\tif self.c2==self.clock_bit: self.R2.next()\n\t\t\tif self.c3==self.clock_bit: self.R3.next()\n\n\t\tself.state = np.r_[self.R1.state, self.R2.state,self.R3.state]\n\t\tself.outbit = np.logical_xor(np.logical_xor(self.R1.state[-1],self.R2.state[-1]),self.R3.state[-1])*1\n\n\t\tself.seq = np.append(self.seq, self.outbit).astype(int)\n\n\t\tself.count+=1\n\n\t\tself.c1 = self.R1.state[8]\n\t\tself.c2 = self.R2.state[10]\n\t\tself.c3 = self.R3.state[10]\n\t\tself.clock_bit = (self.c1+self.c2+self.c3 > 1)*1\n\t\treturn self.outbit\n\n\tdef getLastbits(self):\n\t return [self.R1.state[-1],self.R2.state[-1],self.R3.state[-1]]\n\tdef getCbits(self):\n\t return [self.R1.state[8],self.R2.state[10],self.R3.state[10]]\n\tdef getSeq(self):\n\t return ''.join(self.seq.copy().astype(str))\n\tdef getState(self):\n\t return ''.join(self.state.copy().astype(str))\n\tdef arr2str(self,arr):\n\t\treturn ''.join(arr.copy().astype(str))\n\tdef runKCycle(self, k):\n\t '''\n\t Run k cycles and update all the Parameters\n\n\t Parameters\n\t ----------\n\t k : int\n\n\t Returns\n\t -------\n\t tempseq : shape =(k,), output binary sequence of k cycles\n\t '''\n\t tempseq = [self.next() for i in range(k)]\n\t return np.array(tempseq)\n\nclass Geffe():\n\t'''\n\tGeffe Generator\n\t---------------\n\tCombining K LFSR in non-linear manner\n\tlinear complexity\n\n\tParameters\n\t----------\n\tK+1 LFSRs\n\n\tkLFSR_list: list of K LFSR, output of one of these is choosen at any time, depending on cLFSR\n\tcLFSR: clocking LFSR\n\n\tK should be power of 2. 2,4,8,... 128\n\n\tRef: Schneier, Bruce. Applied cryptography: protocols, algorithms, and source code in C. john wiley & sons, 2007.\n\tChaper 16\n\n\n\tExample\n\t--------\n\n\timport numpy as np\n\timport matplotlib.pyplot as plt\n\tfrom pylfsr import Geffe, LFSR\n\n\tkLFSR = [LFSR(initstate='random') for _ in range(8)]\n\tcLFSR = LFSR(initstate='random')\n\n\tGG = Geffe(kLFSR_list=kLFSR, cLFSR=cLFSR)\n\n\tprint('key: ',GG.getState())\n\tprint()\n\tfor _ in range(50):\n\t print(GG.count,GG.m_count,GG.outbit_k,GG.sel_k,GG.outbit,GG.getSeq(),sep='\\t')\n\t GG.next()\n\n\tGG.runKCycle(1000)\n\tGG.getSeq()\n\t'''\n\tdef __init__(self,kLFSR_list,cLFSR):\n\n\t self.K = len(kLFSR_list)\n\t assert isinstance(cLFSR,LFSR)\n\t assert [isinstance(Rk,LFSR) for Rk in kLFSR_list]\n\t assert self.K>1\n\t assert (self.K & (self.K-1) == 0) and self.K != 0\n\t #K (list of LFSR) should be power of 2\n\n\t self.m = np.log2(self.K).astype(int)\n\n\t self.kLFSR_list = kLFSR_list\n\t self.cLFSR = cLFSR\n\n\t self.count=0\n\t self.m_count =0\n\t self.seq =np.array([])\n\t self.outbit = -1\n\t self.sel_k = -1\n\t self.outbit_k = [Rk.state[-1] for Rk in self.kLFSR_list]\n\t self.state = np.hstack([R.state for R in self.kLFSR_list+[self.cLFSR]])\n\t self.state_k = np.hstack([R.state for R in self.kLFSR_list])\n\t self.state_c = self.cLFSR.state\n\n\n\tdef getSel(self):\n\t sel = self.cLFSR.runKCycle(self.m)\n\t self.m_count+=self.m\n\t sel = ''.join(sel.astype(str))\n\t return int(sel, 2)\n\tdef next(self):\n\t if self.count:\n\t _ = [Rk.next() for Rk in self.kLFSR_list]\n\n\t self.outbit_k = [Rk.state[-1] for Rk in self.kLFSR_list]\n\t self.sel_k = self.getSel()\n\t self.outbit = self.outbit_k[self.sel_k]\n\n\t self.seq = np.append(self.seq,self.outbit).astype(int)\n\n\t self.state = np.hstack([R.state for R in self.kLFSR_list+[self.cLFSR]])\n\t self.state_k = np.hstack([R.state for R in self.kLFSR_list])\n\t self.state_c = self.cLFSR.state\n\n\t self.count+=1\n\t return self.outbit\n\n\tdef getSeq(self):\n\t return ''.join(self.seq.copy().astype(str))\n\tdef getState(self):\n\t return ''.join(self.state.copy().astype(str))\n\tdef arr2str(self,arr):\n\t\treturn ''.join(arr.copy().astype(str))\n\n\tdef runKCycle(self, k):\n\t '''\n\t Run k cycles and update all the Parameters\n\n\t Parameters\n\t ----------\n\t k : int\n\n\t Returns\n\t -------\n\t tempseq : shape =(k,), output binary sequence of k cycles\n\t '''\n\t tempseq = [self.next() for i in range(k)]\n\t return np.array(tempseq)\n\nclass Geffe3():\n '''\n Geffe Generator\n ---------------\n Combining three LFSR in non-linear manner\n linear complexity: If the LFSRs have lengths n1, n2, and n3, respectively, then the linear\n complexity of the generator is = (n1 + 1)n2 + n1n3\n\n output bit at any time is\n\n b = (r1 ^ r2) • ((¬ r1) ^ r3)\n\n where r1,r2,r3 are the outbit of three LFSRs respectively\n\n Ref: Schneier, Bruce. Applied cryptography: protocols, algorithms, and source code in C. john wiley & sons, 2007.\n Chaper 16\n\n '''\n def __init__(self,R1,R2,R3):\n\n assert isinstance(R1,LFSR)\n assert isinstance(R2,LFSR)\n assert isinstance(R3,LFSR)\n\n self.R1 = R1\n self.R2 = R2\n self.R3 = R3\n self.count=0\n self.seq =[]\n self.state = np.r_[self.R1.state, self.R2.state,self.R3.state]\n self.next()\n\n def next(self):\n if self.count:\n self.R1.next()\n self.R2.next()\n self.R3.next()\n self.r1 = self.R1.state[-1]\n self.r2 = self.R2.state[-1]\n self.r3 = self.R3.state[-1]\n\n b1 = np.logical_and(self.r1,self.r2)\n b2 = np.logical_and(not(self.r1),self.r2)\n self.outbit = np.logical_xor(b1,b2)*1\n\n self.seq = np.append(self.seq,self.outbit).astype(int)\n\n self.state = np.r_[self.R1.state, self.R2.state,self.R3.state]\n self.count+=1\n return self.outbit\n def getSeq(self):\n return ''.join(self.seq.copy().astype(str))\n def getState(self):\n return ''.join(self.state.copy().astype(str))\n def arr2str(self,arr):\n \treturn ''.join(arr.copy().astype(str))\n\n def runKCycle(self, k):\n '''\n Run k cycles and update all the Parameters\n\n Parameters\n ----------\n k : int\n\n Returns\n -------\n tempseq : shape =(k,), output binary sequence of k cycles\n '''\n tempseq = [self.next() for i in range(k)]\n return np.array(tempseq)\n\n\nif __name__ == '__main__':\n\timport doctest\n\tdoctest.testmod()\n"
] |
[
[
"numpy.logical_xor",
"numpy.squeeze",
"matplotlib.pyplot.plot",
"numpy.max",
"numpy.mean",
"numpy.where",
"numpy.roll",
"numpy.random.randint",
"numpy.hstack",
"numpy.arange",
"numpy.zeros",
"numpy.isclose",
"matplotlib.pyplot.title",
"numpy.min",
"matplotlib.patches.Rectangle",
"numpy.append",
"numpy.logical_and",
"numpy.array",
"numpy.sum",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.axhline",
"numpy.log2",
"matplotlib.pyplot.subplots",
"numpy.ones",
"matplotlib.pyplot.xlim",
"numpy.prod",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlabel"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
imatiach-msft/EconML
|
[
"289c5412f4492035b794c2833e7f6f6f48807dd1",
"289c5412f4492035b794c2833e7f6f6f48807dd1"
] |
[
"econml/score/ensemble_cate.py",
"prototypes/dynamic_dml/coverage_panel_hetero.py"
] |
[
"# Copyright (c) Microsoft Corporation. All rights reserved.\r\n# Licensed under the MIT License.\r\n\r\nimport numpy as np\r\nfrom sklearn.utils.validation import check_array\r\nfrom .._cate_estimator import BaseCateEstimator, LinearCateEstimator\r\n\r\n\r\nclass EnsembleCateEstimator:\r\n \"\"\" A CATE estimator that represents a weighted ensemble of many\r\n CATE estimators. Returns their weighted effect prediction.\r\n\r\n Parameters\r\n ----------\r\n cate_models : list of BaseCateEstimator objects\r\n A list of fitted cate estimator objects that will be used in the ensemble.\r\n The models are passed by reference, and not copied internally, because we\r\n need the fitted objects, so any change to the passed models will affect\r\n the internal predictions (e.g. if the input models are refitted).\r\n weights : np.ndarray of shape (len(cate_models),)\r\n The weight placed on each model. Weights must be non-positive. The\r\n ensemble will predict effects based on the weighted average predictions\r\n of the cate_models estiamtors, weighted by the corresponding weight in `weights`.\r\n \"\"\"\r\n\r\n def __init__(self, *, cate_models, weights):\r\n self.cate_models = cate_models\r\n self.weights = weights\r\n\r\n def effect(self, X=None, *, T0=0, T1=1):\r\n return np.average([mdl.effect(X=X, T0=T0, T1=T1) for mdl in self.cate_models],\r\n weights=self.weights, axis=0)\r\n effect.__doc__ = BaseCateEstimator.effect.__doc__\r\n\r\n def marginal_effect(self, T, X=None):\r\n return np.average([mdl.marginal_effect(T, X=X) for mdl in self.cate_models],\r\n weights=self.weights, axis=0)\r\n marginal_effect.__doc__ = BaseCateEstimator.marginal_effect.__doc__\r\n\r\n def const_marginal_effect(self, X=None):\r\n if np.any([not hasattr(mdl, 'const_marginal_effect') for mdl in self.cate_models]):\r\n raise ValueError(\"One of the base CATE models in parameter `cate_models` does not support \"\r\n \"the `const_marginal_effect` method.\")\r\n return np.average([mdl.const_marginal_effect(X=X) for mdl in self.cate_models],\r\n weights=self.weights, axis=0)\r\n const_marginal_effect.__doc__ = LinearCateEstimator.const_marginal_effect.__doc__\r\n\r\n @property\r\n def cate_models(self):\r\n return self._cate_models\r\n\r\n @cate_models.setter\r\n def cate_models(self, value):\r\n if (not isinstance(value, list)) or (not np.all([isinstance(model, BaseCateEstimator) for model in value])):\r\n raise ValueError('Parameter `cate_models` should be a list of `BaseCateEstimator` objects.')\r\n self._cate_models = value\r\n\r\n @property\r\n def weights(self):\r\n return self._weights\r\n\r\n @weights.setter\r\n def weights(self, value):\r\n weights = check_array(value, accept_sparse=False, ensure_2d=False, allow_nd=False, dtype='numeric',\r\n force_all_finite=True)\r\n if np.any(weights < 0):\r\n raise ValueError(\"All weights in parameter `weights` must be non-negative.\")\r\n self._weights = weights\r\n",
"import os\r\nimport numpy as np\r\nimport joblib\r\nfrom joblib import Parallel, delayed\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.linear_model import Lasso, LassoCV, MultiTaskLasso, MultiTaskLassoCV, LinearRegression\r\nimport warnings\r\nfrom dynamic_panel_dgp import DynamicPanelDGP, LongRangeDynamicPanelDGP\r\nfrom hetero_panel_dynamic_dml import HeteroDynamicPanelDML\r\nimport scipy\r\n\r\n\r\ndef exp(exp_id, dgp, n_units, gamma, s_t, sigma_t, hetero_inds, test_policies):\r\n np.random.seed(exp_id)\r\n if exp_id % 100 == 0:\r\n print(exp_id)\r\n\r\n warnings.simplefilter('ignore')\r\n\r\n Y, T, X, groups = dgp.observational_data(\r\n n_units, gamma, s_t, sigma_t, random_seed=exp_id)\r\n\r\n # alpha_regs = [5e-3, 1e-2, 5e-2]\r\n\r\n # def lasso_model(): return LassoCV(cv=3, alphas=alpha_regs)\r\n\r\n # def mlasso_model(): return MultiTaskLassoCV(cv=3, alphas=alpha_regs)\r\n\r\n def lasso_model(): return Lasso(alpha=0.05)\r\n\r\n def mlasso_model(): return MultiTaskLasso(alpha=0.05)\r\n\r\n est = HeteroDynamicPanelDML(model_t=mlasso_model(),\r\n model_y=lasso_model(),\r\n n_cfit_splits=5).fit(Y, T, X, groups, hetero_inds=hetero_inds)\r\n\r\n param_hat = est.param\r\n conf_ints = est.param_interval(alpha=.05)\r\n stderrs = est.param_stderr\r\n\r\n policy_effect_hat = np.zeros(test_policies.shape[0])\r\n policy_effect_lower = np.zeros(test_policies.shape[0])\r\n policy_effect_upper = np.zeros(test_policies.shape[0])\r\n policy_effect_stderr = np.zeros(test_policies.shape[0])\r\n for q in range(test_policies.shape[0]):\r\n policy_point, policy_ints, policy_std = est.policy_effect(\r\n test_policies[q], X[:, hetero_inds], groups)\r\n policy_effect_hat[q] = policy_point\r\n policy_effect_lower[q] = policy_ints[0]\r\n policy_effect_upper[q] = policy_ints[1]\r\n policy_effect_stderr[q] = policy_std\r\n\r\n return (param_hat, conf_ints[:, 0], conf_ints[:, 1], stderrs), (policy_effect_hat, policy_effect_lower, policy_effect_upper, policy_effect_stderr)\r\n\r\n\r\ndef add_vlines(n_periods, n_treatments, hetero_inds):\r\n locs, labels = plt.xticks([], [])\r\n locs += [- .5 + (len(hetero_inds) + 1) / 2]\r\n labels += [\"\\n\\n$\\\\tau_{{{}}}$\".format(0)]\r\n locs += [qx for qx in np.arange(len(hetero_inds) + 1)]\r\n labels += [\"$1$\"] + [\"$x_{{{}}}$\".format(qx) for qx in hetero_inds]\r\n for q in np.arange(1, n_treatments):\r\n plt.axvline(x=q * (len(hetero_inds) + 1) - .5,\r\n linestyle='--', color='red', alpha=.2)\r\n locs += [q * (len(hetero_inds) + 1) - .5 + (len(hetero_inds) + 1) / 2]\r\n labels += [\"\\n\\n$\\\\tau_{{{}}}$\".format(q)]\r\n locs += [(q * (len(hetero_inds) + 1) + qx)\r\n for qx in np.arange(len(hetero_inds) + 1)]\r\n labels += [\"$1$\"] + [\"$x_{{{}}}$\".format(qx) for qx in hetero_inds]\r\n locs += [- .5 + (len(hetero_inds) + 1) * n_treatments / 2]\r\n labels += [\"\\n\\n\\n\\n$\\\\theta_{{{}}}$\".format(0)]\r\n for t in np.arange(1, n_periods):\r\n plt.axvline(x=t * (len(hetero_inds) + 1) *\r\n n_treatments - .5, linestyle='-', alpha=.6)\r\n locs += [t * (len(hetero_inds) + 1) * n_treatments - .5 +\r\n (len(hetero_inds) + 1) * n_treatments / 2]\r\n labels += [\"\\n\\n\\n\\n$\\\\theta_{{{}}}$\".format(t)]\r\n locs += [t * (len(hetero_inds) + 1) *\r\n n_treatments - .5 + (len(hetero_inds) + 1) / 2]\r\n labels += [\"\\n\\n$\\\\tau_{{{}}}$\".format(0)]\r\n locs += [t * (len(hetero_inds) + 1) * n_treatments +\r\n qx for qx in np.arange(len(hetero_inds) + 1)]\r\n labels += [\"$1$\"] + [\"$x_{{{}}}$\".format(qx) for qx in hetero_inds]\r\n for q in np.arange(1, n_treatments):\r\n plt.axvline(x=t * (len(hetero_inds) + 1) * n_treatments + q * (len(hetero_inds) + 1) - .5,\r\n linestyle='--', color='red', alpha=.2)\r\n locs += [t * (len(hetero_inds) + 1) * n_treatments + q *\r\n (len(hetero_inds) + 1) - .5 + (len(hetero_inds) + 1) / 2]\r\n labels += [\"\\n\\n$\\\\tau_{{{}}}$\".format(q)]\r\n locs += [t * (len(hetero_inds) + 1) * n_treatments + (q * (len(hetero_inds) + 1) + qx)\r\n for qx in np.arange(len(hetero_inds) + 1)]\r\n labels += [\"$1$\"] + [\"$x_{{{}}}$\".format(qx) for qx in hetero_inds]\r\n plt.xticks(locs, labels)\r\n plt.tight_layout()\r\n\r\n\r\ndef run_mc(n_exps, n_units, n_x, s_x, n_periods, n_treatments, s_t, sigma_x, sigma_t, sigma_y, gamma):\r\n print(\"Running {} MC experiments with: n_units={}, n_dimensions_x={}, non_zero_coefs={}\".format(n_exps,\r\n n_units, n_x, s_x))\r\n random_seed = 123\r\n np.random.seed(random_seed)\r\n conf_str = 1\r\n # subset of features that are exogenous and create heterogeneity\r\n true_hetero_inds = np.arange(n_x - 2 * s_x, n_x - s_x)\r\n # strength of heterogeneity wrt the exogenous variables (assumed to be the last s_x features)\r\n hetero_strength = 1\r\n # subset of features wrt we estimate heterogeneity\r\n hetero_inds = np.arange(n_x - 2 * s_x, n_x)\r\n n_test_policies = 10\r\n test_policies = np.random.binomial(1, .5, size=(\r\n n_test_policies, n_periods, n_treatments))\r\n\r\n if not os.path.exists('results'):\r\n os.makedirs('results')\r\n if not os.path.exists(os.path.join('results', 'long_range_hetero')):\r\n os.makedirs(os.path.join('results', 'long_range_hetero'))\r\n dirname = os.path.join('results', 'long_range_hetero')\r\n\r\n param_str = (\"n_exps_{}_n_units_{}_n_periods_{}_n_t_{}_n_x_{}_s_x_{}_s_t_{}\"\r\n \"_sigma_x_{}_sigma_t_{}_sigma_y_{}_conf_str_{}_gamma_{}_het_str_{}\").format(\r\n n_exps, n_units, n_periods, n_treatments, n_x, s_x, s_t, sigma_x, sigma_t,\r\n sigma_y, conf_str, gamma, hetero_strength)\r\n\r\n joblib.dump(hetero_inds, os.path.join(\r\n dirname, \"hetero_hetero_inds_{}.jbl\".format(param_str)))\r\n joblib.dump(test_policies, os.path.join(\r\n dirname, \"hetero_test_policies_{}.jbl\".format(param_str)))\r\n\r\n dgp = LongRangeDynamicPanelDGP(n_periods, n_treatments, n_x).create_instance(s_x, sigma_x, sigma_y,\r\n conf_str, hetero_strength, true_hetero_inds,\r\n random_seed=random_seed)\r\n joblib.dump(dgp, os.path.join(\r\n dirname, \"hetero_dgp_obj_{}.jbl\".format(param_str)))\r\n\r\n results = Parallel(n_jobs=-1, max_nbytes=None)(delayed(exp)(i, dgp, n_units, gamma, s_t, sigma_t, hetero_inds, test_policies)\r\n for i in range(n_exps))\r\n joblib.dump(results, os.path.join(\r\n dirname, \"hetero_results_{}.jbl\".format(param_str)))\r\n\r\n param_results = np.array([r[0] for r in results])\r\n points = param_results[:, 0]\r\n lowers = param_results[:, 1]\r\n uppers = param_results[:, 2]\r\n stderrs = param_results[:, 3]\r\n policy_results = np.array([r[1] for r in results])\r\n policy_effect_hat = policy_results[:, 0]\r\n policy_effect_lowers = policy_results[:, 1]\r\n policy_effect_uppers = policy_results[:, 2]\r\n policy_effect_stderrs = policy_results[:, 3]\r\n\r\n true_effect_inds = []\r\n for t in range(n_treatments):\r\n true_effect_inds += [t * (1 + n_x)] + \\\r\n list(t * (1 + n_x) + 1 + hetero_inds)\r\n true_effect_params = dgp.true_hetero_effect[:, true_effect_inds].flatten()\r\n\r\n true_policy_effect = np.array([dgp.static_policy_effect(\r\n tau, mc_samples=1000) for tau in test_policies])\r\n\r\n plt.figure(figsize=(15, 5))\r\n inds = np.arange(points.shape[1])\r\n plt.violinplot(points, positions=inds, showmeans=True)\r\n plt.scatter(inds, true_effect_params, marker='o',\r\n color='#D43F3A', s=10, zorder=3, alpha=.5)\r\n add_vlines(n_periods, n_treatments, hetero_inds)\r\n plt.savefig(os.path.join(dirname, \"hetero_dists_{}.png\".format(param_str)))\r\n\r\n plt.figure(figsize=(15, 5))\r\n inds = np.arange(points.shape[1])\r\n plt.violinplot(stderrs, positions=inds, showmeans=True)\r\n true_std = np.std(points, axis=0)\r\n true_std_error = (true_std * (np.sqrt((n_exps - 1) / scipy.stats.chi2.ppf((1 - .05 / 2), n_exps - 1)) - 1),\r\n true_std * (1 - np.sqrt((n_exps - 1) / scipy.stats.chi2.ppf((.05 / 2), n_exps - 1))))\r\n plt.errorbar(inds, true_std, yerr=true_std_error, fmt='o',\r\n color='#D43F3A', elinewidth=2, alpha=.9, capthick=.5, uplims=True, lolims=True)\r\n add_vlines(n_periods, n_treatments, hetero_inds)\r\n plt.savefig(os.path.join(\r\n dirname, \"hetero_stderrs_{}.png\".format(param_str)))\r\n\r\n coverage = np.mean((true_effect_params.reshape(1, -1) <= uppers) & (\r\n true_effect_params.reshape(1, -1) >= lowers), axis=0)\r\n plt.figure(figsize=(15, 5))\r\n inds = np.arange(points.shape[1])\r\n plt.scatter(inds, coverage)\r\n add_vlines(n_periods, n_treatments, hetero_inds)\r\n plt.savefig(os.path.join(\r\n dirname, \"hetero_coverage_{}.png\".format(param_str)))\r\n\r\n for kappa in range(n_periods):\r\n for t in range(n_treatments * (len(hetero_inds) + 1)):\r\n param_ind = kappa * (len(hetero_inds) + 1) * n_treatments + t\r\n coverage = np.mean((true_effect_params[param_ind] <= uppers[:, param_ind]) & (\r\n true_effect_params[param_ind] >= lowers[:, param_ind]))\r\n print(\"Effect Lag={}, TX={}: Mean={:.3f}, Std={:.3f}, Mean-Stderr={:.3f}, Coverage={:.3f}, (Truth={:.3f})\".format(kappa, t,\r\n np.mean(\r\n points[:, param_ind]),\r\n np.std(\r\n points[:, param_ind]),\r\n np.mean(\r\n stderrs[:, param_ind]),\r\n coverage,\r\n true_effect_params[param_ind]))\r\n\r\n plt.figure(figsize=(15, 5))\r\n inds = np.arange(policy_effect_hat.shape[1])\r\n plt.violinplot(policy_effect_hat, positions=inds, showmeans=True)\r\n plt.scatter(inds, true_policy_effect, marker='o',\r\n color='#D43F3A', s=10, zorder=3, alpha=.5)\r\n plt.savefig(os.path.join(\r\n dirname, \"hetero_policy_dists_{}.png\".format(param_str)))\r\n\r\n plt.figure(figsize=(15, 5))\r\n inds = np.arange(policy_effect_hat.shape[1])\r\n plt.violinplot(policy_effect_stderrs, positions=inds, showmeans=True)\r\n true_std = np.std(policy_effect_hat, axis=0)\r\n true_std_error = (true_std * (np.sqrt((n_exps - 1) / scipy.stats.chi2.ppf((1 - .05 / 2), n_exps - 1)) - 1),\r\n true_std * (1 - np.sqrt((n_exps - 1) / scipy.stats.chi2.ppf((.05 / 2), n_exps - 1))))\r\n plt.errorbar(inds, true_std, yerr=true_std_error, fmt='o',\r\n color='#D43F3A', elinewidth=2, alpha=.9, capthick=.5, uplims=True, lolims=True)\r\n plt.savefig(os.path.join(\r\n dirname, \"hetero_policy_stderrs_{}.png\".format(param_str)))\r\n\r\n policy_coverage = np.mean((true_policy_effect.reshape(1, -1) <= policy_effect_uppers) & (\r\n true_policy_effect.reshape(1, -1) >= policy_effect_lowers), axis=0)\r\n plt.figure(figsize=(15, 5))\r\n inds = np.arange(policy_coverage.shape[0])\r\n plt.scatter(inds, policy_coverage)\r\n plt.savefig(os.path.join(\r\n dirname, \"hetero_policy_coverage_{}.png\".format(param_str)))\r\n\r\n for q in range(test_policies.shape[0]):\r\n print(\"Policy effect for treatment seq: \\n {}\\n Mean={:.3f}, Std={:.3f}, Mean-Stderr={:.3f}, Coverage={:.3f}, (Truth={:.3f})\".format(test_policies[q],\r\n np.mean(\r\n policy_effect_hat[:, q]),\r\n np.std(\r\n policy_effect_hat[:, q]),\r\n np.mean(\r\n policy_effect_stderrs[:, q]),\r\n policy_coverage[\r\n q],\r\n true_policy_effect[q]))\r\n\r\n\r\nif __name__ == \"__main__\":\r\n run_mc(n_exps=1000, n_units=500, n_x=450, s_x=2, n_periods=3,\r\n n_treatments=2, s_t=2, sigma_x=1, sigma_t=1, sigma_y=1, gamma=.2)\r\n run_mc(n_exps=1000, n_units=1000, n_x=450, s_x=2, n_periods=3,\r\n n_treatments=2, s_t=2, sigma_x=1, sigma_t=1, sigma_y=1, gamma=.2)\r\n run_mc(n_exps=1000, n_units=2000, n_x=450, s_x=2, n_periods=3,\r\n n_treatments=2, s_t=2, sigma_x=1, sigma_t=1, sigma_y=1, gamma=.2)\r\n run_mc(n_exps=1000, n_units=500, n_x=450, s_x=2, n_periods=3,\r\n n_treatments=2, s_t=2, sigma_x=1, sigma_t=.5, sigma_y=1, gamma=.2)\r\n run_mc(n_exps=1000, n_units=2000, n_x=450, s_x=2, n_periods=3,\r\n n_treatments=2, s_t=2, sigma_x=1, sigma_t=.5, sigma_y=1, gamma=.2)\r\n run_mc(n_exps=1000, n_units=1000, n_x=450, s_x=2, n_periods=3,\r\n n_treatments=2, s_t=2, sigma_x=1, sigma_t=.5, sigma_y=.1, gamma=.2)\r\n run_mc(n_exps=1000, n_units=2000, n_x=450, s_x=2, n_periods=3,\r\n n_treatments=2, s_t=2, sigma_x=1, sigma_t=.5, sigma_y=.1, gamma=.2)\r\n"
] |
[
[
"numpy.any",
"sklearn.utils.validation.check_array"
],
[
"scipy.stats.chi2.ppf",
"matplotlib.pyplot.tight_layout",
"sklearn.linear_model.MultiTaskLasso",
"numpy.random.seed",
"matplotlib.pyplot.scatter",
"numpy.arange",
"sklearn.linear_model.Lasso",
"numpy.std",
"numpy.mean",
"matplotlib.pyplot.violinplot",
"matplotlib.pyplot.errorbar",
"numpy.random.binomial",
"matplotlib.pyplot.xticks",
"numpy.array",
"numpy.zeros",
"matplotlib.pyplot.figure"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
markmorr/side
|
[
"90e9f822ea7976f402798af3bdf2901a21b31f15"
] |
[
"start_to_elections.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nThis is a temporary script file.\n\"\"\"\n\nfrom __future__ import unicode_literals, print_function, division\nfrom io import open\nimport glob\nimport os\nimport torch\n\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn import tree\nfrom sklearn.metrics import accuracy_score, confusion_matrix, classification_report, accuracy_score, balanced_accuracy_score\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.tree import DecisionTreeClassifier\n\n\nfrom sklearn import tree\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.model_selection import train_test_split\n\n\n# X_selected_df = pd.DataFrame(X_selected, columns=[X_train.columns[i] for i in range(len(X_train.columns)) if feature_selector.get_support()[i]])\n# X_imputed_df = pd.DataFrame(X_imputed, columns = X_train.columns)\n\n\n# try with grus\n# try encoding name--language of origin\n# name--gender\n# and use those as hard-coded features while also adding the rnn strictly trained to\n# predict outcome\n# try and combine the signal from those different rnn outputs by inputting them as features\n# into a broader modell--use catboost maybe? could run on gpu for instance\n# see https://catboost.ai/ video is good\n# moved state to region to rnn_elections file\n#decision trees are stupid and don't even compare variables\n#there are data issues? check what's the one that doesn't fit\n#maybe they all fit?\n#gonna have to trim names down? give a score to how many times they won, etc.?\n# stratify by time and do bag of races--test the performance in both train/test settings\n# see if there are any time periods when names perform well, for instance using bag of races\n\n\n\ndef plotCM(cm_, title_):\n fig, ax = plt.subplots(figsize=(8, 8))\n ax.imshow(cm_)\n ax.grid(False)\n ax.xaxis.set(ticks=(0, 1), ticklabels=('Predicted Positive', 'Predicted Negative'))\n ax.yaxis.set(ticks=(0, 1), ticklabels=('Actual Positive', 'Actual Negative'))\n ax.set_ylim(1.5, -0.5)\n for i in range(2):\n for j in range(2):\n ax.text(j, i, cm_[i, j], ha='center', va='center', color='red')\n plt.title(title_)\n plt.show()\n \n\ndef runModels(X,y):\n \n X_train, X_test, y_train, y_test = train_test_split(X,y, test_size=.2, random_state=10)\n clf_list = [DecisionTreeClassifier(random_state=10,),\n RandomForestClassifier(random_state=10, max_depth=15),\n LogisticRegression(random_state=10, max_iter=10000)]\n for clf in clf_list:\n clf.fit(X_train, y_train)\n y_pred = clf.predict(X_test)\n training_pred = clf.predict(X_train)\n cm = confusion_matrix(y_test, clf.predict(X_test))\n title = clf.__class__.__name__\n print(title)\n # plotCM(cm, title + ' on Testing Data')\n print(\"Accuracy score: \",accuracy_score(y_test,y_pred))\n print(\"Balanced accuracy score: \",balanced_accuracy_score(y_test,y_pred))\n print(classification_report(y_test, y_pred))\n print(accuracy_score(y_train,training_pred))\n print(accuracy_score(y_test,y_pred))\n # tree.plot_tree(clf()\n return\n\n\ndf = pd.read_csv(r'C:\\Users\\16028\\Downloads\\house_76_thru_2020\\1976-2020-house.csv', encoding='unicode_escape')\n\nwith open(r'C:\\Users\\16028\\Downloads\\house_76_thru_2020\\1976-2020-house.csv') as f:\n print(f)\n\nda[['office', 'stage', 'runoff', 'special', 'writein']].value_counts()\n\nda.writein.value_counts()\n\ndf_filtered = df.groupby(['year', 'state', 'district'])\n\ni = 0\nfor name, group in df_filtered:\n if i < 10:\n print(name)\n print(group)\n i += 1\n\n\n\ndf = df[['year', 'state', 'district', 'office', 'stage', 'runoff', 'special', 'candidate', \n 'party', 'candidatevotes', 'totalvotes', 'unofficial', 'version', 'writein', 'fusion_ticket']]\ndf['state'] = df['state'].str.lower()\n# df['winner'] = df_filtered['candidatevotes'].transform('max')['candidate']\nidx = df_filtered['candidatevotes'].transform('max') == df['candidatevotes']\ndf['winners'] = df[idx]['candidate']\n\ndf['winners_fixed'] = df.groupby(['year', 'state', 'district'])['winners'].apply(lambda x: x.ffill().bfill())\n\n\ndf['vote_pct'] = df['candidatevotes']/df['totalvotes']\n\ndf = df[df.stage == 'gen']\ndf = df[df['vote_pct'] > .05]\n\nduo = df.groupby(['year', 'state', 'district']).filter(lambda x:len(x)==2)\nduo.reset_index(inplace=True)\n\nda.fusion_ticket.value_counts()\n\ndf = duo.copy()\ndf_dems = df[df['party'] == 'DEMOCRAT']\ndf_non_dems = df[df['party'] != 'DEMOCRAT']\ndf_m1_alt = df_dems.merge(df_non_dems, left_on=['year', 'state', 'district'], right_on = ['year', 'state', 'district'])\n\ndf2 = df_m1_alt[['year', 'state', 'district', 'writein_x', 'writein_y', 'fusion_ticket_x', 'fusion_ticket_y',\n 'candidate_x', 'candidate_y', 'party_x',\n 'party_y', \n 'candidatevotes_x', 'candidatevotes_y', 'totalvotes_x','vote_pct_x','vote_pct_y',\n 'winners_fixed_x']]\n\n\ndef binarize_result(row):\n x_won_flag = False\n if row['candidatevotes_x'] > row['candidatevotes_y']:\n x_won_flag = True\n else:\n x_won_flag = False\n \n if (row['party_x'] == 'DEMOCRAT') & (x_won_flag):\n return 1\n elif (row['party_x'] == 'REPUBLICAN') & (x_won_flag):\n return 0\n elif (row['party_y'] == 'DEMOCRAT') & ( not x_won_flag):\n return 1\n elif (row['party_y'] == 'REPUBLICAN') & (not x_won_flag):\n return 0\n else:\n return 2\n# df['y'] = df[['party_x', 'party_y', 'candidatevotes_x', 'candidatevotes_y']].apply(binarize_result)\n\ndf2['y']= df2.apply(binarize_result, axis=1)\n# df.columns\ndf2 = df2[df2.y != 2] #for now we'll ignore error cases, again filtering\ndf = df2.copy()\n\n\ndef binarize_party(x):\n if x =='DEMOCRAT':\n return 1\n elif x == 'REPUBLICAN':\n return 0\n else:\n return 0 #DEFAULTING TO NON-STANDARD MEANS REPUBLICAN\nprint('here')\ndf['party_x_bin']= df['party_x'].apply(binarize_party)\ndf['party_y_bin']= df['party_y'].apply(binarize_party)\n\n\ndf[(df.party_x_bin == 1) & (df.y == 1)].shape\ndf.shape \n\ndf = df[(df['candidate_x'].notnull()) & (df['candidate_y'].notnull())]\n\n\ndf['vote_diff'] = df['candidatevotes_x'] - df['candidatevotes_y']\nX = df[['candidatevotes_x', 'candidatevotes_y', 'vote_diff','party_x_bin', 'party_y_bin']]\ny = df['y']\nX_train, X_test, y_train, y_test = train_test_split(X,y, test_size=.2, random_state=0)\nrunModels(X,y)\n\n\ndf.writein_x.value_counts()\ndf.writein_y.value_counts()\ndf.fusion_ticket_x.value_counts()\ndf.fusion_ticket_y.value_counts()\n175/7900\n# state_to_region = {k.lower(): v for k, v in state_to_region.items()}\n# df['region'] = df['state'].map(state_to_region)\n#state only is doing best? logistic regression is learning; deicsion tree is not\n#set up a logging system for testing?\n#region hurt\n# abbrev_to_us_state = dict(map(reversed, us_state_to_abbrev.items()))\n\n\n #DOES THIS MATTER?\n# should i scramble the order so its not always x is democrat right is republican??\n\ndf['year_delta'] = df['year'] - 1976\none_hot_data = pd.get_dummies(df[['state', ]],drop_first=False) #probably negligible\nnumeric_data = df[['year_delta',]]\nnumeric_data.shape\nX = pd.concat([numeric_data, one_hot_data], axis=1)\nrunModels(X,y)\n#588, 616, 621\n#588, 614, 621\ndf.y.value_counts()[1]/(df.y.value_counts()[0]+df.y.value_counts()[1])\n\n\none_hot_data = pd.get_dummies(df[['state', 'fusion_ticket_x', 'fusion_ticket_y' ]],drop_first=False) #probably negligible\nnumeric_data = df[['year_delta', ]]\nnumeric_data.shape\nX = pd.concat([numeric_data, one_hot_data], axis=1)\nrunModels(X,y)\n\ndf2 = df.copy()\ndf = pd.concat([df2.drop('state', axis=1), one_hot_data], axis=1)\n\ndf.shape\ndf2.shape\ndef getWinningName(row):\n if row['y'] == 1:\n return row['candidate_x']\n else:\n return row['candidate_y']\n \ndef getLosingName(row):\n if row['y'] == 1:\n return row['candidate_y']\n else:\n return row['candidate_x']\n \n# X = pd.concat(df[['candidate_x', 'candidate_y']])\n# X = df[['candidate_x', 'candidate_y',]']\nX_train, X_test, y_train, y_test = train_test_split(df.drop(columns=['y']),df['y'], test_size=.2, random_state=0)\nold_train_index, old_test_index = X_train.index, X_test.index\nX_train.reset_index(drop=True, inplace=True)\ny_train.reset_index(drop=True, inplace=True)\n\nvadded = pd.concat([X_train, y_train], axis=1)\n\ndef categorizeNames(df__, y_values):\n df__ = pd.concat([df__, y_values], axis=1)\n df__['winning_name'] = df__.apply(getWinningName, axis=1)\n df__['losing_name'] = df__.apply(getLosingName, axis=1)\n\n df__ = df__[(df__['winning_name'].notna()) & df__['losing_name'].notna()]\n wn = df__['winning_name'].tolist()\n ln = df__['losing_name'].tolist()\n \n \n wn = df__['winning_name'].str.title()\n ln = df__['losing_name'].str.title()\n df__.drop(columns=['y'], axis=1, inplace=True)\n return wn, ln\nw1, l1 = categorizeNames(X_train, y_train)\n\n\n\n##################################################################################\n# credit to PyTorch tutorial--relied on heavily to perform the character classification\n# https://pytorch.org/tutorials/intermediate/char_rnn_classification_tutorial\n##################################################################################\n\n#################################################################################\ndef findFiles(path):\n return glob.glob(path)\n\nprint(findFiles('data/names/*.txt'))\n\nimport unicodedata\nimport string\n\nall_letters = string.ascii_letters + \" .,;'\"\nn_letters = len(all_letters)\n\n# Turn a Unicode string to plain ASCII, thanks to https://stackoverflow.com/a/518232/2809427\ndef unicodeToAscii(s):\n return ''.join(\n c for c in unicodedata.normalize('NFD', s)\n if unicodedata.category(c) != 'Mn'\n and c in all_letters\n )\n\nprint(unicodeToAscii('Ślusàrski'))\n\n# Build the category_lines dictionary, a list of names per language\ncategory_lines = {}\nall_categories = []\n\n\n# Read a file and split into lines\ndef readLines(filename):\n lines = open(filename, encoding='utf-8').read().strip().split('\\n')\n return [unicodeToAscii(line) for line in lines]\n\n# =============================================================================\n# for filename in findFiles('data/names/*.txt'):\n# category = os.path.splitext(os.path.basename(filename))[0]\n# all_categories.append(category)\n# lines = readLines(filename)\n# category_lines[category] = lines\n# =============================================================================\n\n###################################################\ncategory_lines['win'] = w1\ncategory_lines['lose'] = l1\nall_categories = ['win', 'lose']\nn_categories = len(all_categories)\n###################################################\n\n#################################################################################\n\nprint(category_lines['win'][:5])\n\n#################################################################################\n\n\n# Find letter index from all_letters, e.g. \"a\" = 0\ndef letterToIndex(letter):\n return all_letters.find(letter)\n\n# Just for demonstration, turn a letter into a <1 x n_letters> Tensor\ndef letterToTensor(letter):\n tensor = torch.zeros(1, n_letters)\n tensor[0][letterToIndex(letter)] = 1\n return tensor\n\n# Turn a line into a <line_length x 1 x n_letters>,\n# or an array of one-hot letter vectors\ndef lineToTensor(line):\n tensor = torch.zeros(len(line), 1, n_letters)\n for li, letter in enumerate(line):\n tensor[li][0][letterToIndex(letter)] = 1\n return tensor\n\nprint(letterToTensor('J'))\n\nprint(lineToTensor('Jones').size())\n\n###################################################################################\nimport torch.nn as nn\n\nclass RNN(nn.Module):\n def __init__(self, input_size, hidden_size, output_size):\n super(RNN, self).__init__()\n\n self.hidden_size = hidden_size\n\n self.i2h = nn.Linear(input_size + hidden_size, hidden_size)\n self.i2o = nn.Linear(input_size + hidden_size, output_size)\n self.softmax = nn.LogSoftmax(dim=1)\n\n def forward(self, input, hidden):\n combined = torch.cat((input, hidden), 1)\n hidden = self.i2h(combined)\n output = self.i2o(combined)\n output = self.softmax(output)\n return output, hidden\n\n def initHidden(self):\n return torch.zeros(1, self.hidden_size)\n\nn_hidden = 128\nrnn = RNN(n_letters, n_hidden, n_categories)\n\n##################################################################################\ninput = letterToTensor('A')\nhidden = torch.zeros(1, n_hidden)\n\noutput, next_hidden = rnn(input, hidden)\n####################################################################################\ninput = lineToTensor('Albert')\nhidden = torch.zeros(1, n_hidden)\n\noutput, next_hidden = rnn(input[0], hidden)\nprint(output)\n\n\n#################################################################################\ndef categoryFromOutput(output):\n top_n, top_i = output.topk(1)\n category_i = top_i[0].item()\n return all_categories[category_i], category_i\n\nprint(categoryFromOutput(output))\n\n\n#################################################################################\nimport random\n\ndef randomChoice(l):\n return l[random.randint(0, len(l) - 1)]\n\nay = category_lines['lose']\n\ndef randomTrainingExample():\n category = randomChoice(all_categories)\n line = randomChoice(category_lines[category])\n # print(line)\n category_tensor = torch.tensor([all_categories.index(category)], dtype=torch.long)\n line_tensor = lineToTensor(line)\n return category, line, category_tensor, line_tensor\n\nfor i in range(10):\n print\n category, line, category_tensor, line_tensor = randomTrainingExample()\n print('category =', category, '/ line =', line)\n \n \n#################################################################################\ncriterion = nn.NLLLoss()\n\n#################################################################################\nlearning_rate = 0.005 # If you set this too high, it might explode. If too low, it might not learn\n\ndef train(category_tensor, line_tensor):\n hidden = rnn.initHidden()\n\n rnn.zero_grad()\n\n for i in range(line_tensor.size()[0]):\n output, hidden = rnn(line_tensor[i], hidden)\n\n loss = criterion(output, category_tensor)\n loss.backward()\n\n # Add parameters' gradients to their values, multiplied by learning rate\n for p in rnn.parameters():\n p.data.add_(p.grad.data, alpha=-learning_rate)\n\n return output, loss.item()\n\n\n\n\n\n#################################################################################\nimport time\nimport math\n\nn_iters = 100000\nprint_every = 5000\nplot_every = 1000\n\n\n\n# Keep track of losses for plotting\ncurrent_loss = 0\nall_losses = []\n\ndef timeSince(since):\n now = time.time()\n s = now - since\n m = math.floor(s / 60)\n s -= m * 60\n return '%dm %ds' % (m, s)\n\nstart = time.time()\n\nfor iter in range(1, n_iters + 1):\n category, line, category_tensor, line_tensor = randomTrainingExample()\n output, loss = train(category_tensor, line_tensor)\n current_loss += loss\n\n # Print iter number, loss, name and guess\n if iter % print_every == 0:\n guess, guess_i = categoryFromOutput(output)\n correct = '✓' if guess == category else '✗ (%s)' % category\n print('%d %d%% (%s) %.4f %s / %s %s' % (iter, iter / n_iters * 100, timeSince(start), loss, line, guess, correct))\n\n # Add current loss avg to list of losses\n if iter % plot_every == 0:\n all_losses.append(current_loss / plot_every)\n current_loss = 0\n \n################################################################################\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as ticker\n\nplt.figure()\nplt.plot(all_losses)\n\n\n\n\n#################################################################################\n# Keep track of correct guesses in a confusion matrix\nconfusion = torch.zeros(n_categories, n_categories)\nn_confusion = 10000\n\n# Just return an output given a line\ndef evaluate(line_tensor):\n hidden = rnn.initHidden()\n\n for i in range(line_tensor.size()[0]):\n output, hidden = rnn(line_tensor[i], hidden)\n\n return output\n\n# Go through a bunch of examples and record which are correctly guessed\nfor i in range(n_confusion):\n category, line, category_tensor, line_tensor = randomTrainingExample()\n output = evaluate(line_tensor)\n guess, guess_i = categoryFromOutput(output)\n category_i = all_categories.index(category)\n confusion[category_i][guess_i] += 1\n\n# Normalize by dividing every row by its sum\nfor i in range(n_categories):\n confusion[i] = confusion[i] / confusion[i].sum()\n\n# Set up plot\nfig = plt.figure()\nax = fig.add_subplot(111)\ncax = ax.matshow(confusion.numpy())\nfig.colorbar(cax)\n\n# Set up axes\nax.set_xticklabels([''] + all_categories, rotation=90)\nax.set_yticklabels([''] + all_categories)\n\n# Force label at every tick\nax.xaxis.set_major_locator(ticker.MultipleLocator(1))\nax.yaxis.set_major_locator(ticker.MultipleLocator(1))\n\n# sphinx_gallery_thumbnail_number = 2\nplt.show()\n\n####################################################################################\n\n\ndef predict(input_line, n_predictions=2): #changed n_predictions from 3 to 2\n print('\\n> %s' % input_line)\n with torch.no_grad():\n output = evaluate(lineToTensor(input_line))\n\n # Get top N categories\n topv, topi = output.topk(n_predictions, 1, True)\n predictions = []\n\n for i in range(n_predictions):\n value = topv[0][i].item()\n category_index = topi[0][i].item()\n print('(%.2f) %s' % (value, all_categories[category_index]))\n predictions.append([value, all_categories[category_index]])\n \n \n \n \n \nimport numpy as np \n \ndef getPrediction(input_line, n_predictions=2): #changed n_predictions from 3 to 2\n # print('\\n> %s' % input_line)\n with torch.no_grad():\n output = evaluate(lineToTensor(input_line))\n # print(output)\n # Get top N categories\n topv, topi = output.topk(n_predictions, 1, True)\n # print(topi)\n predictions = []\n return (np.exp(output[0][0].item()))#, np.exp(output[0][1].item()))\n \n \ndef collectAllPredictions(test_set, n_predictions=2):\n tuples_list = []\n for index,row in test_set.iterrows():\n tuples_list.append((row['candidate_x'], getPrediction(row['candidate_x']),row['candidate_y'], getPrediction(row['candidate_y']),\n ))\n return tuples_list\n\n\ntest_set = X_test\nrnn_preds = collectAllPredictions(test_set)\n#check if order of names still matches\nrnn_preds\n\ndf_output = pd.DataFrame(rnn_preds, columns=['candidate_x', 'candidate_x_name_score', 'candidate_y', 'candidate_y_name_score'])\ndf_output['name_score_diff'] = df_output['candidate_x_name_score'] - df_output['candidate_y_name_score']\ndf_output['y'] = y_train\n\ndf_output.shape\ny_test.shape\nsubset_name = df_output[['name_score_diff', 'candidate_x_name_score', 'candidate_y_name_score', 'year_delta']] \nrunModels(subset_name, y_test)\nX_test.reset_index(drop=True).head()\nX_test.shape\nfinalized_subset = pd.concat([subset_name, X_test.reset_index(drop=True)], axis=1)\n\ndf_output.shape\ndf.shape\nlen(rnn_preds)\nX_test.shape\n\ntrain_set = X_train\nrnn_train_preds = collectAllPredictions(X_train)\n\ndf_train_output = pd.DataFrame(rnn_train_preds, columns=['candidate_x', 'candidate_x_name_score', 'candidate_y', 'candidate_y_name_score'])\ndf_train_output['name_score_diff'] = df_train_output['candidate_x_name_score'] - df_train_output['candidate_y_name_score']\n# df_train_output['y'] = y_train\n\nsubset_train_name = df_train_output[['name_score_diff', 'candidate_x_name_score', 'candidate_y_name_score']] \nfinalized_train_subset = pd.concat([subset_train_name, X_train.reset_index(drop=True)], axis=1)\n\n\nfinalized_subset.columns\n# checking_this = pd.concat(subset_name, X_train[['state']])\n\n# runModel(X,y)\npredict('Jackson')\npredict('Satoshi')\npredict('Krcatovic')\n\nmy_data_dir = 'model_states/'\ntorch.save(rnn.state_dict(), os.path.join(my_data_dir, \"rnn_model.pt\"))\n# caption_model.load_state_dict(torch.load(os.path.join(my_data_dir, \"outputs/rnn_model.pt\")))\n\n# caption_model = caption_model.to(device).eval()\n\nfinalized_train_subset.drop(columns=['candidate_x', 'candidate_y'], inplace=True)\nfinalized_subset.drop(columns=['candidate_x', 'candidate_y'], inplace=True)\n\nX_test_subsetted = finalized_subset.drop(columns=['year', 'district', 'writein_x', 'writein_y',\n 'party_x',\n 'party_y', 'party_x_bin', 'party_y_bin',\n 'candidatevotes_x', 'candidatevotes_y',\n 'vote_diff', 'totalvotes_x','vote_pct_x',\n 'vote_pct_y', 'winners_fixed_x'])\n\nX_test_subsetted.columns\nX_train_subsetted = finalized_train_subset.drop(columns=['year', 'district', 'writein_x', 'writein_y',\n 'party_x',\n 'party_y', 'party_x_bin', 'party_y_bin',\n 'candidatevotes_x', 'candidatevotes_y',\n 'vote_diff', 'totalvotes_x','vote_pct_x',\n 'vote_pct_y', 'winners_fixed_x'])\n\n# X_train, X_test, y_train, y_test = train_test_split(X,y, test_size=.2, random_state=10)\nclf_list = [DecisionTreeClassifier(random_state=10,),\nRandomForestClassifier(random_state=10, max_depth=15)]\n# LogisticRegression(random_state=10, max_iter=10000)]\n\nX_train_subsetted_2 = X_train_subsetted.drop(columns=[ 'candidate_y_name_score','name_score_diff' ]) #\nX_test_subsetted_2 = X_test_subsetted.drop(columns=[ 'candidate_y_name_score', 'name_score_diff']) #\n\n#baseline for only giving them name_score_diff: 69.9 DT and 69.4 RF\n#giving them name_score_diff and candidate_x_score: \n#keeping all 3: 66 DT and 67 RF\n# keep name_diff and one of them: about ~65 for both cases, both models\n# keep candidate_x and candidate_y (but not diff): 69.9 DT and 69.4 RF #why would this not be better?\n#doesn't make a ton of sense that having both candidate scores brings it down as compared\n# to having just one of them (and both individual adds perform a little better?)\n#check--i guess that means they're highly correlated? \n#perform dataset normalization?\n# keep name_diff only: 60.9 DT and 62.4 RF\n#keep only candidate_y_score: weirdly high. 78.5 DT and 68 RF.\n#keep only candidate_x_score: weirdly high. 75 DT and 69.7 RF.\n#keeping none: 60.6 DT and 62.9 RF\n\n\nfor clf in clf_list:\n clf.fit(X_train_subsetted_2, y_train)\n y_pred = clf.predict(X_test_subsetted_2)\n training_pred = clf.predict(X_train_subsetted_2)\n cm = confusion_matrix(y_test, clf.predict(X_test_subsetted_2))\n title = clf.__class__.__name__\n print(title)\n # plotCM(cm, title + ' on Testing Data')\n print(\"Balanced accuracy score: \",balanced_accuracy_score(y_test,y_pred))\n print('Training score: ', accuracy_score(y_train,training_pred))\n print('Test Score: ', accuracy_score(y_test,y_pred))\n print('\\n')\n # tree.plot_tree(clf()\nprint('\\n') \n \n \n################################################################################\nfor clf in clf_list:\n clf.fit(X_train_subsetted, y_train)\n y_pred = clf.predict(X_test_subsetted)\n training_pred = clf.predict(X_train_subsetted)\n cm = confusion_matrix(y_test, clf.predict(X_test_subsetted))\n title = clf.__class__.__name__\n print(title)\n # plotCM(cm, title + ' on Testing Data')\n print(\"Accuracy score: \",accuracy_score(y_test,y_pred))\n print(\"Balanced accuracy score: \",balanced_accuracy_score(y_test,y_pred))\n print('Training score: ', accuracy_score(y_train,training_pred))\n print('Test Score: ', accuracy_score(y_test,y_pred))\n print('\\n')\n # tree.plot_tree(clf()\n\n\n###################################################################################\n\n"
] |
[
[
"matplotlib.ticker.MultipleLocator",
"torch.zeros",
"torch.cat",
"sklearn.metrics.accuracy_score",
"pandas.DataFrame",
"matplotlib.pyplot.plot",
"sklearn.tree.DecisionTreeClassifier",
"torch.no_grad",
"sklearn.metrics.classification_report",
"pandas.read_csv",
"sklearn.ensemble.RandomForestClassifier",
"matplotlib.pyplot.figure",
"pandas.concat",
"torch.nn.NLLLoss",
"torch.nn.LogSoftmax",
"matplotlib.pyplot.title",
"sklearn.model_selection.train_test_split",
"torch.nn.Linear",
"matplotlib.pyplot.show",
"sklearn.linear_model.LogisticRegression",
"sklearn.metrics.balanced_accuracy_score",
"matplotlib.pyplot.subplots",
"pandas.get_dummies"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
koso019003/espnet
|
[
"7735c992b3d71fabbc0f0c48c1d8f78d72785e17"
] |
[
"espnet/nets/pytorch_backend/rnn/decoders.py"
] |
[
"from distutils.version import LooseVersion\nimport logging\nimport math\nimport random\nimport six\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\n\nfrom argparse import Namespace\n\nfrom espnet.nets.ctc_prefix_score import CTCPrefixScore\nfrom espnet.nets.ctc_prefix_score import CTCPrefixScoreTH\nfrom espnet.nets.e2e_asr_common import end_detect\n\nfrom espnet.nets.pytorch_backend.rnn.attentions import att_to_numpy\n\nfrom espnet.nets.pytorch_backend.nets_utils import mask_by_length\nfrom espnet.nets.pytorch_backend.nets_utils import pad_list\nfrom espnet.nets.pytorch_backend.nets_utils import th_accuracy\nfrom espnet.nets.pytorch_backend.nets_utils import to_device\nfrom espnet.nets.scorer_interface import ScorerInterface\n\nMAX_DECODER_OUTPUT = 5\nCTC_SCORING_RATIO = 1.5\n\n\nclass Decoder(torch.nn.Module, ScorerInterface):\n \"\"\"Decoder module\n\n :param int eprojs: # encoder projection units\n :param int odim: dimension of outputs\n :param str dtype: gru or lstm\n :param int dlayers: # decoder layers\n :param int dunits: # decoder units\n :param int sos: start of sequence symbol id\n :param int eos: end of sequence symbol id\n :param torch.nn.Module att: attention module\n :param int verbose: verbose level\n :param list char_list: list of character strings\n :param ndarray labeldist: distribution of label smoothing\n :param float lsm_weight: label smoothing weight\n :param float sampling_probability: scheduled sampling probability\n :param float dropout: dropout rate\n :param float context_residual: if True, use context vector for token generation\n :param float replace_sos: use for multilingual (speech/text) translation\n \"\"\"\n\n def __init__(self, eprojs, odim, dtype, dlayers, dunits, sos, eos, att, verbose=0,\n char_list=None, labeldist=None, lsm_weight=0., sampling_probability=0.0,\n dropout=0.0, context_residual=False, replace_sos=False, num_encs=1):\n\n torch.nn.Module.__init__(self)\n self.dtype = dtype\n self.dunits = dunits\n self.dlayers = dlayers\n self.context_residual = context_residual\n self.embed = torch.nn.Embedding(odim, dunits)\n self.dropout_emb = torch.nn.Dropout(p=dropout)\n\n self.decoder = torch.nn.ModuleList()\n self.dropout_dec = torch.nn.ModuleList()\n self.decoder += [\n torch.nn.LSTMCell(dunits + eprojs, dunits) if self.dtype == \"lstm\" else torch.nn.GRUCell(dunits + eprojs,\n dunits)]\n self.dropout_dec += [torch.nn.Dropout(p=dropout)]\n for _ in six.moves.range(1, self.dlayers):\n self.decoder += [\n torch.nn.LSTMCell(dunits, dunits) if self.dtype == \"lstm\" else torch.nn.GRUCell(dunits, dunits)]\n self.dropout_dec += [torch.nn.Dropout(p=dropout)]\n # NOTE: dropout is applied only for the vertical connections\n # see https://arxiv.org/pdf/1409.2329.pdf\n self.ignore_id = -1\n\n if context_residual:\n self.output = torch.nn.Linear(dunits + eprojs, odim)\n else:\n self.output = torch.nn.Linear(dunits, odim)\n\n self.loss = None\n self.att = att\n self.dunits = dunits\n self.sos = sos\n self.eos = eos\n self.odim = odim\n self.verbose = verbose\n self.char_list = char_list\n # for label smoothing\n self.labeldist = labeldist\n self.vlabeldist = None\n self.lsm_weight = lsm_weight\n self.sampling_probability = sampling_probability\n self.dropout = dropout\n self.num_encs = num_encs\n\n # for multilingual translation\n self.replace_sos = replace_sos\n\n self.logzero = -10000000000.0\n\n def zero_state(self, hs_pad):\n return hs_pad.new_zeros(hs_pad.size(0), self.dunits)\n\n def rnn_forward(self, ey, z_list, c_list, z_prev, c_prev):\n if self.dtype == \"lstm\":\n z_list[0], c_list[0] = self.decoder[0](ey, (z_prev[0], c_prev[0]))\n for l in six.moves.range(1, self.dlayers):\n z_list[l], c_list[l] = self.decoder[l](\n self.dropout_dec[l - 1](z_list[l - 1]), (z_prev[l], c_prev[l]))\n else:\n z_list[0] = self.decoder[0](ey, z_prev[0])\n for l in six.moves.range(1, self.dlayers):\n z_list[l] = self.decoder[l](self.dropout_dec[l - 1](z_list[l - 1]), z_prev[l])\n return z_list, c_list\n\n def forward(self, hs_pad, hlens, ys_pad, strm_idx=0, tgt_lang_ids=None):\n \"\"\"Decoder forward\n\n :param torch.Tensor hs_pad: batch of padded hidden state sequences (B, Tmax, D)\n [in multi-encoder case,\n list of torch.Tensor, [(B, Tmax_1, D), (B, Tmax_2, D), ..., ] ]\n :param torch.Tensor hlens: batch of lengths of hidden state sequences (B)\n [in multi-encoder case, list of torch.Tensor, [(B), (B), ..., ]\n :param torch.Tensor ys_pad: batch of padded character id sequence tensor (B, Lmax)\n :param int strm_idx: stream index indicates the index of decoding stream.\n :param torch.Tensor tgt_lang_ids: batch of target language id tensor (B, 1)\n :return: attention loss value\n :rtype: torch.Tensor\n :return: accuracy\n :rtype: float\n \"\"\"\n # to support mutiple encoder asr mode, in single encoder mode, convert torch.Tensor to List of torch.Tensor\n if self.num_encs == 1:\n hs_pad = [hs_pad]\n hlens = [hlens]\n\n # TODO(kan-bayashi): need to make more smart way\n ys = [y[y != self.ignore_id] for y in ys_pad] # parse padded ys\n # attention index for the attention module\n # in SPA (speaker parallel attention), att_idx is used to select attention module. In other cases, it is 0.\n att_idx = min(strm_idx, len(self.att) - 1)\n\n # hlens should be list of list of integer\n hlens = [list(map(int, hlens[idx])) for idx in range(self.num_encs)]\n\n self.loss = None\n # prepare input and output word sequences with sos/eos IDs\n eos = ys[0].new([self.eos])\n sos = ys[0].new([self.sos])\n if self.replace_sos:\n ys_in = [torch.cat([idx, y], dim=0) for idx, y in zip(tgt_lang_ids, ys)]\n else:\n ys_in = [torch.cat([sos, y], dim=0) for y in ys]\n ys_out = [torch.cat([y, eos], dim=0) for y in ys]\n\n # padding for ys with -1\n # pys: utt x olen\n ys_in_pad = pad_list(ys_in, self.eos)\n ys_out_pad = pad_list(ys_out, self.ignore_id)\n\n # get dim, length info\n batch = ys_out_pad.size(0)\n olength = ys_out_pad.size(1)\n for idx in range(self.num_encs):\n logging.info(\n self.__class__.__name__ + 'Number of Encoder:{}; enc{}: input lengths: {}.'.format(self.num_encs,\n idx + 1, hlens[idx]))\n logging.info(self.__class__.__name__ + ' output lengths: ' + str([y.size(0) for y in ys_out]))\n\n # initialization\n c_list = [self.zero_state(hs_pad[0])]\n z_list = [self.zero_state(hs_pad[0])]\n for _ in six.moves.range(1, self.dlayers):\n c_list.append(self.zero_state(hs_pad[0]))\n z_list.append(self.zero_state(hs_pad[0]))\n z_all = []\n if self.num_encs == 1:\n att_w = None\n self.att[att_idx].reset() # reset pre-computation of h\n else:\n att_w_list = [None] * (self.num_encs + 1) # atts + han\n att_c_list = [None] * (self.num_encs) # atts\n for idx in range(self.num_encs + 1):\n self.att[idx].reset() # reset pre-computation of h in atts and han\n\n # pre-computation of embedding\n eys = self.dropout_emb(self.embed(ys_in_pad)) # utt x olen x zdim\n\n # loop for an output sequence\n for i in six.moves.range(olength):\n if self.num_encs == 1:\n att_c, att_w = self.att[att_idx](hs_pad[0], hlens[0], self.dropout_dec[0](z_list[0]), att_w)\n else:\n for idx in range(self.num_encs):\n att_c_list[idx], att_w_list[idx] = self.att[idx](hs_pad[idx], hlens[idx],\n self.dropout_dec[0](z_list[0]), att_w_list[idx])\n hs_pad_han = torch.stack(att_c_list, dim=1)\n hlens_han = [self.num_encs] * len(ys_in)\n att_c, att_w_list[self.num_encs] = self.att[self.num_encs](hs_pad_han, hlens_han,\n self.dropout_dec[0](z_list[0]),\n att_w_list[self.num_encs])\n if i > 0 and random.random() < self.sampling_probability:\n logging.info(' scheduled sampling ')\n z_out = self.output(z_all[-1])\n z_out = np.argmax(z_out.detach().cpu(), axis=1)\n z_out = self.dropout_emb(self.embed(to_device(self, z_out)))\n ey = torch.cat((z_out, att_c), dim=1) # utt x (zdim + hdim)\n else:\n ey = torch.cat((eys[:, i, :], att_c), dim=1) # utt x (zdim + hdim)\n z_list, c_list = self.rnn_forward(ey, z_list, c_list, z_list, c_list)\n if self.context_residual:\n z_all.append(torch.cat((self.dropout_dec[-1](z_list[-1]), att_c), dim=-1)) # utt x (zdim + hdim)\n else:\n z_all.append(self.dropout_dec[-1](z_list[-1])) # utt x (zdim)\n\n z_all = torch.stack(z_all, dim=1).view(batch * olength, -1)\n # compute loss\n y_all = self.output(z_all)\n if LooseVersion(torch.__version__) < LooseVersion('1.0'):\n reduction_str = 'elementwise_mean'\n else:\n reduction_str = 'mean'\n self.loss = F.cross_entropy(y_all, ys_out_pad.view(-1),\n ignore_index=self.ignore_id,\n reduction=reduction_str)\n # compute perplexity\n ppl = math.exp(self.loss.item())\n # -1: eos, which is removed in the loss computation\n self.loss *= (np.mean([len(x) for x in ys_in]) - 1)\n acc = th_accuracy(y_all, ys_out_pad, ignore_label=self.ignore_id)\n logging.info('att loss:' + ''.join(str(self.loss.item()).split('\\n')))\n\n # show predicted character sequence for debug\n if self.verbose > 0 and self.char_list is not None:\n ys_hat = y_all.view(batch, olength, -1)\n ys_true = ys_out_pad\n for (i, y_hat), y_true in zip(enumerate(ys_hat.detach().cpu().numpy()),\n ys_true.detach().cpu().numpy()):\n if i == MAX_DECODER_OUTPUT:\n break\n idx_hat = np.argmax(y_hat[y_true != self.ignore_id], axis=1)\n idx_true = y_true[y_true != self.ignore_id]\n seq_hat = [self.char_list[int(idx)] for idx in idx_hat]\n seq_true = [self.char_list[int(idx)] for idx in idx_true]\n seq_hat = \"\".join(seq_hat)\n seq_true = \"\".join(seq_true)\n logging.info(\"groundtruth[%d]: \" % i + seq_true)\n logging.info(\"prediction [%d]: \" % i + seq_hat)\n\n if self.labeldist is not None:\n if self.vlabeldist is None:\n self.vlabeldist = to_device(self, torch.from_numpy(self.labeldist))\n loss_reg = - torch.sum((F.log_softmax(y_all, dim=1) * self.vlabeldist).view(-1), dim=0) / len(ys_in)\n self.loss = (1. - self.lsm_weight) * self.loss + self.lsm_weight * loss_reg\n\n return self.loss, acc, ppl\n\n def recognize_beam(self, h, lpz, recog_args, char_list, rnnlm=None, strm_idx=0):\n \"\"\"beam search implementation\n\n :param torch.Tensor h: encoder hidden state (T, eprojs)\n [in multi-encoder case, list of torch.Tensor, [(T1, eprojs), (T2, eprojs), ...] ]\n :param torch.Tensor lpz: ctc log softmax output (T, odim)\n [in multi-encoder case, list of torch.Tensor, [(T1, odim), (T2, odim), ...] ]\n :param Namespace recog_args: argument Namespace containing options\n :param char_list: list of character strings\n :param torch.nn.Module rnnlm: language module\n :param int strm_idx: stream index for speaker parallel attention in multi-speaker case\n :return: N-best decoding results\n :rtype: list of dicts\n \"\"\"\n # to support mutiple encoder asr mode, in single encoder mode, convert torch.Tensor to List of torch.Tensor\n if self.num_encs == 1:\n h = [h]\n lpz = [lpz]\n if self.num_encs > 1 and lpz is None:\n lpz = [lpz] * self.num_encs\n\n for idx in range(self.num_encs):\n logging.info('Number of Encoder:{}; enc{}: input lengths: {}.'.format(self.num_encs, idx + 1, h[0].size(0)))\n att_idx = min(strm_idx, len(self.att) - 1)\n # initialization\n c_list = [self.zero_state(h[0].unsqueeze(0))]\n z_list = [self.zero_state(h[0].unsqueeze(0))]\n for _ in six.moves.range(1, self.dlayers):\n c_list.append(self.zero_state(h[0].unsqueeze(0)))\n z_list.append(self.zero_state(h[0].unsqueeze(0)))\n if self.num_encs == 1:\n a = None\n self.att[att_idx].reset() # reset pre-computation of h\n else:\n a = [None] * (self.num_encs + 1) # atts + han\n att_w_list = [None] * (self.num_encs + 1) # atts + han\n att_c_list = [None] * (self.num_encs) # atts\n for idx in range(self.num_encs + 1):\n self.att[idx].reset() # reset pre-computation of h in atts and han\n\n # search parms\n beam = recog_args.beam_size\n penalty = recog_args.penalty\n ctc_weight = getattr(recog_args, \"ctc_weight\", False) # for NMT\n\n if lpz[0] is not None and self.num_encs > 1:\n # weights-ctc, e.g. ctc_loss = w_1*ctc_1_loss + w_2 * ctc_2_loss + w_N * ctc_N_loss\n weights_ctc_dec = recog_args.weights_ctc_dec / np.sum(recog_args.weights_ctc_dec) # normalize\n logging.info('ctc weights (decoding): ' + ' '.join([str(x) for x in weights_ctc_dec]))\n else:\n weights_ctc_dec = [1.0]\n\n # preprate sos\n if self.replace_sos and recog_args.tgt_lang:\n y = char_list.index(recog_args.tgt_lang)\n else:\n y = self.sos\n logging.info('<sos> index: ' + str(y))\n logging.info('<sos> mark: ' + char_list[y])\n vy = h[0].new_zeros(1).long()\n\n maxlen = np.amin([h[idx].size(0) for idx in range(self.num_encs)])\n if recog_args.maxlenratio != 0:\n # maxlen >= 1\n maxlen = max(1, int(recog_args.maxlenratio * maxlen))\n minlen = int(recog_args.minlenratio * maxlen)\n logging.info('max output length: ' + str(maxlen))\n logging.info('min output length: ' + str(minlen))\n\n # initialize hypothesis\n if rnnlm:\n hyp = {'score': 0.0, 'yseq': [y], 'c_prev': c_list,\n 'z_prev': z_list, 'a_prev': a, 'rnnlm_prev': None}\n else:\n hyp = {'score': 0.0, 'yseq': [y], 'c_prev': c_list, 'z_prev': z_list, 'a_prev': a}\n if lpz[0] is not None:\n ctc_prefix_score = [CTCPrefixScore(lpz[idx].detach().numpy(), 0, self.eos, np) for idx in\n range(self.num_encs)]\n hyp['ctc_state_prev'] = [ctc_prefix_score[idx].initial_state() for idx in range(self.num_encs)]\n hyp['ctc_score_prev'] = [0.0] * self.num_encs\n if ctc_weight != 1.0:\n # pre-pruning based on attention scores\n ctc_beam = min(lpz[0].shape[-1], int(beam * CTC_SCORING_RATIO))\n else:\n ctc_beam = lpz[0].shape[-1]\n hyps = [hyp]\n ended_hyps = []\n\n for i in six.moves.range(maxlen):\n logging.debug('position ' + str(i))\n\n hyps_best_kept = []\n for hyp in hyps:\n vy.unsqueeze(1)\n vy[0] = hyp['yseq'][i]\n ey = self.dropout_emb(self.embed(vy)) # utt list (1) x zdim\n ey.unsqueeze(0)\n if self.num_encs == 1:\n att_c, att_w = self.att[att_idx](h[0].unsqueeze(0), [h[0].size(0)],\n self.dropout_dec[0](hyp['z_prev'][0]), hyp['a_prev'])\n else:\n for idx in range(self.num_encs):\n att_c_list[idx], att_w_list[idx] = self.att[idx](h[idx].unsqueeze(0), [h[idx].size(0)],\n self.dropout_dec[0](hyp['z_prev'][0]),\n hyp['a_prev'][idx])\n h_han = torch.stack(att_c_list, dim=1)\n att_c, att_w_list[self.num_encs] = self.att[self.num_encs](h_han, [self.num_encs],\n self.dropout_dec[0](hyp['z_prev'][0]),\n hyp['a_prev'][self.num_encs])\n ey = torch.cat((ey, att_c), dim=1) # utt(1) x (zdim + hdim)\n z_list, c_list = self.rnn_forward(ey, z_list, c_list, hyp['z_prev'], hyp['c_prev'])\n\n # get nbest local scores and their ids\n if self.context_residual:\n logits = self.output(torch.cat((self.dropout_dec[-1](z_list[-1]), att_c), dim=-1))\n else:\n logits = self.output(self.dropout_dec[-1](z_list[-1]))\n local_att_scores = F.log_softmax(logits, dim=1)\n if rnnlm:\n rnnlm_state, local_lm_scores = rnnlm.predict(hyp['rnnlm_prev'], vy)\n local_scores = local_att_scores + recog_args.lm_weight * local_lm_scores\n else:\n local_scores = local_att_scores\n\n if lpz[0] is not None:\n local_best_scores, local_best_ids = torch.topk(\n local_att_scores, ctc_beam, dim=1)\n ctc_scores, ctc_states = [None] * self.num_encs, [None] * self.num_encs\n for idx in range(self.num_encs):\n ctc_scores[idx], ctc_states[idx] = ctc_prefix_score[idx](\n hyp['yseq'], local_best_ids[0], hyp['ctc_state_prev'][idx])\n local_scores = \\\n (1.0 - ctc_weight) * local_att_scores[:, local_best_ids[0]]\n if self.num_encs == 1:\n local_scores += ctc_weight * torch.from_numpy(ctc_scores[0] - hyp['ctc_score_prev'][0])\n else:\n for idx in range(self.num_encs):\n local_scores += ctc_weight * weights_ctc_dec[idx] * torch.from_numpy(\n ctc_scores[idx] - hyp['ctc_score_prev'][idx])\n if rnnlm:\n local_scores += recog_args.lm_weight * local_lm_scores[:, local_best_ids[0]]\n local_best_scores, joint_best_ids = torch.topk(local_scores, beam, dim=1)\n local_best_ids = local_best_ids[:, joint_best_ids[0]]\n else:\n local_best_scores, local_best_ids = torch.topk(local_scores, beam, dim=1)\n\n for j in six.moves.range(beam):\n new_hyp = {}\n # [:] is needed!\n new_hyp['z_prev'] = z_list[:]\n new_hyp['c_prev'] = c_list[:]\n if self.num_encs == 1:\n new_hyp['a_prev'] = att_w[:]\n else:\n new_hyp['a_prev'] = [att_w_list[idx][:] for idx in range(self.num_encs + 1)]\n new_hyp['score'] = hyp['score'] + local_best_scores[0, j]\n new_hyp['yseq'] = [0] * (1 + len(hyp['yseq']))\n new_hyp['yseq'][:len(hyp['yseq'])] = hyp['yseq']\n new_hyp['yseq'][len(hyp['yseq'])] = int(local_best_ids[0, j])\n if rnnlm:\n new_hyp['rnnlm_prev'] = rnnlm_state\n if lpz[0] is not None:\n new_hyp['ctc_state_prev'] = [ctc_states[idx][joint_best_ids[0, j]] for idx in\n range(self.num_encs)]\n new_hyp['ctc_score_prev'] = [ctc_scores[idx][joint_best_ids[0, j]] for idx in\n range(self.num_encs)]\n # will be (2 x beam) hyps at most\n hyps_best_kept.append(new_hyp)\n\n hyps_best_kept = sorted(\n hyps_best_kept, key=lambda x: x['score'], reverse=True)[:beam]\n\n # sort and get nbest\n hyps = hyps_best_kept\n logging.debug('number of pruned hypotheses: ' + str(len(hyps)))\n logging.debug(\n 'best hypo: ' + ''.join([char_list[int(x)] for x in hyps[0]['yseq'][1:]]))\n\n # add eos in the final loop to avoid that there are no ended hyps\n if i == maxlen - 1:\n logging.info('adding <eos> in the last position in the loop')\n for hyp in hyps:\n hyp['yseq'].append(self.eos)\n\n # add ended hypotheses to a final list, and removed them from current hypotheses\n # (this will be a problem, number of hyps < beam)\n remained_hyps = []\n for hyp in hyps:\n if hyp['yseq'][-1] == self.eos:\n # only store the sequence that has more than minlen outputs\n # also add penalty\n if len(hyp['yseq']) > minlen:\n hyp['score'] += (i + 1) * penalty\n if rnnlm: # Word LM needs to add final <eos> score\n hyp['score'] += recog_args.lm_weight * rnnlm.final(\n hyp['rnnlm_prev'])\n ended_hyps.append(hyp)\n else:\n remained_hyps.append(hyp)\n\n # end detection\n if end_detect(ended_hyps, i) and recog_args.maxlenratio == 0.0:\n logging.info('end detected at %d', i)\n break\n\n hyps = remained_hyps\n if len(hyps) > 0:\n logging.debug('remaining hypotheses: ' + str(len(hyps)))\n else:\n logging.info('no hypothesis. Finish decoding.')\n break\n\n for hyp in hyps:\n logging.debug(\n 'hypo: ' + ''.join([char_list[int(x)] for x in hyp['yseq'][1:]]))\n\n logging.debug('number of ended hypotheses: ' + str(len(ended_hyps)))\n\n nbest_hyps = sorted(\n ended_hyps, key=lambda x: x['score'], reverse=True)[:min(len(ended_hyps), recog_args.nbest)]\n\n # check number of hypotheses\n if len(nbest_hyps) == 0:\n logging.warning('there is no N-best results, perform recognition again with smaller minlenratio.')\n # should copy because Namespace will be overwritten globally\n recog_args = Namespace(**vars(recog_args))\n recog_args.minlenratio = max(0.0, recog_args.minlenratio - 0.1)\n if self.num_encs == 1:\n return self.recognize_beam(h[0], lpz[0], recog_args, char_list, rnnlm)\n else:\n return self.recognize_beam(h, lpz, recog_args, char_list, rnnlm)\n\n logging.info('total log probability: ' + str(nbest_hyps[0]['score']))\n logging.info('normalized log probability: ' + str(nbest_hyps[0]['score'] / len(nbest_hyps[0]['yseq'])))\n\n # remove sos\n return nbest_hyps\n\n def recognize_beam_batch(self, h, hlens, lpz, recog_args, char_list, rnnlm=None,\n normalize_score=True, strm_idx=0, tgt_lang_ids=None):\n # to support mutiple encoder asr mode, in single encoder mode, convert torch.Tensor to List of torch.Tensor\n if self.num_encs == 1:\n h = [h]\n hlens = [hlens]\n lpz = [lpz]\n if self.num_encs > 1 and lpz is None:\n lpz = [lpz] * self.num_encs\n\n att_idx = min(strm_idx, len(self.att) - 1)\n for idx in range(self.num_encs):\n logging.info(\n 'Number of Encoder:{}; enc{}: input lengths: {}.'.format(self.num_encs, idx + 1, h[idx].size(1)))\n h[idx] = mask_by_length(h[idx], hlens[idx], 0.0)\n\n # search params\n batch = len(hlens[0])\n beam = recog_args.beam_size\n penalty = recog_args.penalty\n ctc_weight = getattr(recog_args, \"ctc_weight\", 0) # for NMT\n att_weight = 1.0 - ctc_weight\n ctc_margin = getattr(recog_args, \"ctc_window_margin\", 0) # use getattr to keep compatibility\n # weights-ctc, e.g. ctc_loss = w_1*ctc_1_loss + w_2 * ctc_2_loss + w_N * ctc_N_loss\n if lpz[0] is not None and self.num_encs > 1:\n weights_ctc_dec = recog_args.weights_ctc_dec / np.sum(recog_args.weights_ctc_dec) # normalize\n logging.info('ctc weights (decoding): ' + ' '.join([str(x) for x in weights_ctc_dec]))\n else:\n weights_ctc_dec = [1.0]\n\n n_bb = batch * beam\n pad_b = to_device(self, torch.arange(batch) * beam).view(-1, 1)\n\n max_hlen = np.amin([max(hlens[idx]) for idx in range(self.num_encs)])\n if recog_args.maxlenratio == 0:\n maxlen = max_hlen\n else:\n maxlen = max(1, int(recog_args.maxlenratio * max_hlen))\n minlen = int(recog_args.minlenratio * max_hlen)\n logging.info('max output length: ' + str(maxlen))\n logging.info('min output length: ' + str(minlen))\n\n # initialization\n c_prev = [to_device(self, torch.zeros(n_bb, self.dunits)) for _ in range(self.dlayers)]\n z_prev = [to_device(self, torch.zeros(n_bb, self.dunits)) for _ in range(self.dlayers)]\n c_list = [to_device(self, torch.zeros(n_bb, self.dunits)) for _ in range(self.dlayers)]\n z_list = [to_device(self, torch.zeros(n_bb, self.dunits)) for _ in range(self.dlayers)]\n vscores = to_device(self, torch.zeros(batch, beam))\n\n rnnlm_state = None\n if self.num_encs == 1:\n a_prev = [None]\n att_w_list, ctc_scorer, ctc_state = [None], [None], [None]\n self.att[att_idx].reset() # reset pre-computation of h\n else:\n a_prev = [None] * (self.num_encs + 1) # atts + han\n att_w_list = [None] * (self.num_encs + 1) # atts + han\n att_c_list = [None] * (self.num_encs) # atts\n ctc_scorer, ctc_state = [None] * (self.num_encs), [None] * (self.num_encs)\n for idx in range(self.num_encs + 1):\n self.att[idx].reset() # reset pre-computation of h in atts and han\n\n if self.replace_sos and recog_args.tgt_lang:\n logging.info('<sos> index: ' + str(char_list.index(recog_args.tgt_lang)))\n logging.info('<sos> mark: ' + recog_args.tgt_lang)\n yseq = [[char_list.index(recog_args.tgt_lang)] for _ in six.moves.range(n_bb)]\n elif tgt_lang_ids is not None:\n # NOTE: used for evaluation during training\n yseq = [[tgt_lang_ids[b // recog_args.beam_size]] for b in six.moves.range(n_bb)]\n else:\n logging.info('<sos> index: ' + str(self.sos))\n logging.info('<sos> mark: ' + char_list[self.sos])\n yseq = [[self.sos] for _ in six.moves.range(n_bb)]\n\n accum_odim_ids = [self.sos for _ in six.moves.range(n_bb)]\n stop_search = [False for _ in six.moves.range(batch)]\n nbest_hyps = [[] for _ in six.moves.range(batch)]\n ended_hyps = [[] for _ in range(batch)]\n\n exp_hlens = [hlens[idx].repeat(beam).view(beam, batch).transpose(0, 1).contiguous() for idx in\n range(self.num_encs)]\n exp_hlens = [exp_hlens[idx].view(-1).tolist() for idx in range(self.num_encs)]\n exp_h = [h[idx].unsqueeze(1).repeat(1, beam, 1, 1).contiguous() for idx in range(self.num_encs)]\n exp_h = [exp_h[idx].view(n_bb, h[idx].size()[1], h[idx].size()[2]) for idx in range(self.num_encs)]\n\n if lpz[0] is not None:\n scoring_ratio = CTC_SCORING_RATIO if att_weight > 0.0 and not lpz[0].is_cuda else 0\n ctc_scorer = [CTCPrefixScoreTH(lpz[idx], hlens[idx], 0, self.eos, beam,\n scoring_ratio, margin=ctc_margin) for idx in range(self.num_encs)]\n\n for i in six.moves.range(maxlen):\n logging.debug('position ' + str(i))\n\n vy = to_device(self, torch.LongTensor(self._get_last_yseq(yseq)))\n ey = self.dropout_emb(self.embed(vy))\n if self.num_encs == 1:\n att_c, att_w = self.att[att_idx](exp_h[0], exp_hlens[0], self.dropout_dec[0](z_prev[0]), a_prev[0])\n att_w_list = [att_w]\n else:\n for idx in range(self.num_encs):\n att_c_list[idx], att_w_list[idx] = self.att[idx](exp_h[idx], exp_hlens[idx],\n self.dropout_dec[0](z_prev[0]), a_prev[idx])\n exp_h_han = torch.stack(att_c_list, dim=1)\n att_c, att_w_list[self.num_encs] = self.att[self.num_encs](exp_h_han, [self.num_encs] * n_bb,\n self.dropout_dec[0](z_prev[0]),\n a_prev[self.num_encs])\n ey = torch.cat((ey, att_c), dim=1)\n\n # attention decoder\n z_list, c_list = self.rnn_forward(ey, z_list, c_list, z_prev, c_prev)\n if self.context_residual:\n logits = self.output(torch.cat((self.dropout_dec[-1](z_list[-1]), att_c), dim=-1))\n else:\n logits = self.output(self.dropout_dec[-1](z_list[-1]))\n local_scores = att_weight * F.log_softmax(logits, dim=1)\n\n # rnnlm\n if rnnlm:\n rnnlm_state, local_lm_scores = rnnlm.buff_predict(rnnlm_state, vy, n_bb)\n local_scores = local_scores + recog_args.lm_weight * local_lm_scores\n\n # ctc\n if ctc_scorer[0]:\n for idx in range(self.num_encs):\n att_w = att_w_list[idx]\n att_w_ = att_w if isinstance(att_w, torch.Tensor) else att_w[0]\n ctc_state[idx], local_ctc_scores = ctc_scorer[idx](yseq, ctc_state[idx], local_scores, att_w_)\n local_scores = local_scores + ctc_weight * weights_ctc_dec[idx] * local_ctc_scores\n\n local_scores = local_scores.view(batch, beam, self.odim)\n if i == 0:\n local_scores[:, 1:, :] = self.logzero\n\n # accumulate scores\n eos_vscores = local_scores[:, :, self.eos] + vscores\n vscores = vscores.view(batch, beam, 1).repeat(1, 1, self.odim)\n vscores[:, :, self.eos] = self.logzero\n vscores = (vscores + local_scores).view(batch, -1)\n\n # global pruning\n accum_best_scores, accum_best_ids = torch.topk(vscores, beam, 1)\n accum_odim_ids = torch.fmod(accum_best_ids, self.odim).view(-1).data.cpu().tolist()\n accum_padded_beam_ids = (torch.div(accum_best_ids, self.odim) + pad_b).view(-1).data.cpu().tolist()\n\n y_prev = yseq[:][:]\n yseq = self._index_select_list(yseq, accum_padded_beam_ids)\n yseq = self._append_ids(yseq, accum_odim_ids)\n vscores = accum_best_scores\n vidx = to_device(self, torch.LongTensor(accum_padded_beam_ids))\n\n a_prev = []\n num_atts = self.num_encs if self.num_encs == 1 else self.num_encs + 1\n for idx in range(num_atts):\n if isinstance(att_w_list[idx], torch.Tensor):\n _a_prev = torch.index_select(att_w_list[idx].view(n_bb, *att_w_list[idx].shape[1:]), 0, vidx)\n elif isinstance(att_w_list[idx], list):\n # handle the case of multi-head attention\n _a_prev = [torch.index_select(att_w_one.view(n_bb, -1), 0, vidx) for att_w_one in att_w_list[idx]]\n else:\n # handle the case of location_recurrent when return is a tuple\n _a_prev_ = torch.index_select(att_w_list[idx][0].view(n_bb, -1), 0, vidx)\n _h_prev_ = torch.index_select(att_w_list[idx][1][0].view(n_bb, -1), 0, vidx)\n _c_prev_ = torch.index_select(att_w_list[idx][1][1].view(n_bb, -1), 0, vidx)\n _a_prev = (_a_prev_, (_h_prev_, _c_prev_))\n a_prev.append(_a_prev)\n z_prev = [torch.index_select(z_list[li].view(n_bb, -1), 0, vidx) for li in range(self.dlayers)]\n c_prev = [torch.index_select(c_list[li].view(n_bb, -1), 0, vidx) for li in range(self.dlayers)]\n\n # pick ended hyps\n if i > minlen:\n k = 0\n penalty_i = (i + 1) * penalty\n thr = accum_best_scores[:, -1]\n for samp_i in six.moves.range(batch):\n if stop_search[samp_i]:\n k = k + beam\n continue\n for beam_j in six.moves.range(beam):\n if eos_vscores[samp_i, beam_j] > thr[samp_i]:\n yk = y_prev[k][:]\n yk.append(self.eos)\n if len(yk) < min(hlens[idx][samp_i] for idx in range(self.num_encs)):\n _vscore = eos_vscores[samp_i][beam_j] + penalty_i\n if rnnlm:\n _vscore += recog_args.lm_weight * rnnlm.final(rnnlm_state, index=k)\n _score = _vscore.data.cpu().numpy()\n ended_hyps[samp_i].append({'yseq': yk, 'vscore': _vscore, 'score': _score})\n k = k + 1\n\n # end detection\n stop_search = [stop_search[samp_i] or end_detect(ended_hyps[samp_i], i)\n for samp_i in six.moves.range(batch)]\n stop_search_summary = list(set(stop_search))\n if len(stop_search_summary) == 1 and stop_search_summary[0]:\n break\n\n if rnnlm:\n rnnlm_state = self._index_select_lm_state(rnnlm_state, 0, vidx)\n if ctc_scorer[0]:\n for idx in range(self.num_encs):\n ctc_state[idx] = ctc_scorer[idx].index_select_state(ctc_state[idx], accum_best_ids)\n\n torch.cuda.empty_cache()\n\n dummy_hyps = [{'yseq': [self.sos, self.eos], 'score': np.array([-float('inf')])}]\n ended_hyps = [ended_hyps[samp_i] if len(ended_hyps[samp_i]) != 0 else dummy_hyps\n for samp_i in six.moves.range(batch)]\n if normalize_score:\n for samp_i in six.moves.range(batch):\n for x in ended_hyps[samp_i]:\n x['score'] /= len(x['yseq'])\n\n nbest_hyps = [sorted(ended_hyps[samp_i], key=lambda x: x['score'],\n reverse=True)[:min(len(ended_hyps[samp_i]), recog_args.nbest)]\n for samp_i in six.moves.range(batch)]\n\n return nbest_hyps\n\n def calculate_all_attentions(self, hs_pad, hlen, ys_pad, strm_idx=0, tgt_lang_ids=None):\n \"\"\"Calculate all of attentions\n\n :param torch.Tensor hs_pad: batch of padded hidden state sequences (B, Tmax, D)\n [in multi-encoder case,\n list of torch.Tensor, [(B, Tmax_1, D), (B, Tmax_2, D), ..., ] ]\n :param torch.Tensor hlen: batch of lengths of hidden state sequences (B)\n [in multi-encoder case, list of torch.Tensor, [(B), (B), ..., ]\n :param torch.Tensor ys_pad: batch of padded character id sequence tensor (B, Lmax)\n :param int strm_idx: stream index for parallel speaker attention in multi-speaker case\n :param torch.Tensor tgt_lang_ids: batch of target language id tensor (B, 1)\n :return: attention weights with the following shape,\n 1) multi-head case => attention weights (B, H, Lmax, Tmax),\n 2) multi-encoder case => [(B, Lmax, Tmax1), (B, Lmax, Tmax2), ..., (B, Lmax, NumEncs)]\n 3) other case => attention weights (B, Lmax, Tmax).\n :rtype: float ndarray\n \"\"\"\n # to support mutiple encoder asr mode, in single encoder mode, convert torch.Tensor to List of torch.Tensor\n if self.num_encs == 1:\n hs_pad = [hs_pad]\n hlen = [hlen]\n\n # TODO(kan-bayashi): need to make more smart way\n ys = [y[y != self.ignore_id] for y in ys_pad] # parse padded ys\n att_idx = min(strm_idx, len(self.att) - 1)\n\n # hlen should be list of list of integer\n hlen = [list(map(int, hlen[idx])) for idx in range(self.num_encs)]\n\n self.loss = None\n # prepare input and output word sequences with sos/eos IDs\n eos = ys[0].new([self.eos])\n sos = ys[0].new([self.sos])\n if self.replace_sos:\n ys_in = [torch.cat([idx, y], dim=0) for idx, y in zip(tgt_lang_ids, ys)]\n else:\n ys_in = [torch.cat([sos, y], dim=0) for y in ys]\n ys_out = [torch.cat([y, eos], dim=0) for y in ys]\n\n # padding for ys with -1\n # pys: utt x olen\n ys_in_pad = pad_list(ys_in, self.eos)\n ys_out_pad = pad_list(ys_out, self.ignore_id)\n\n # get length info\n olength = ys_out_pad.size(1)\n\n # initialization\n c_list = [self.zero_state(hs_pad[0])]\n z_list = [self.zero_state(hs_pad[0])]\n for _ in six.moves.range(1, self.dlayers):\n c_list.append(self.zero_state(hs_pad[0]))\n z_list.append(self.zero_state(hs_pad[0]))\n att_ws = []\n if self.num_encs == 1:\n att_w = None\n self.att[att_idx].reset() # reset pre-computation of h\n else:\n att_w_list = [None] * (self.num_encs + 1) # atts + han\n att_c_list = [None] * (self.num_encs) # atts\n for idx in range(self.num_encs + 1):\n self.att[idx].reset() # reset pre-computation of h in atts and han\n\n # pre-computation of embedding\n eys = self.dropout_emb(self.embed(ys_in_pad)) # utt x olen x zdim\n\n # loop for an output sequence\n for i in six.moves.range(olength):\n if self.num_encs == 1:\n att_c, att_w = self.att[att_idx](hs_pad[0], hlen[0], self.dropout_dec[0](z_list[0]), att_w)\n att_ws.append(att_w)\n else:\n for idx in range(self.num_encs):\n att_c_list[idx], att_w_list[idx] = self.att[idx](hs_pad[idx], hlen[idx],\n self.dropout_dec[0](z_list[0]), att_w_list[idx])\n hs_pad_han = torch.stack(att_c_list, dim=1)\n hlen_han = [self.num_encs] * len(ys_in)\n att_c, att_w_list[self.num_encs] = self.att[self.num_encs](hs_pad_han, hlen_han,\n self.dropout_dec[0](z_list[0]),\n att_w_list[self.num_encs])\n att_ws.append(att_w_list)\n ey = torch.cat((eys[:, i, :], att_c), dim=1) # utt x (zdim + hdim)\n z_list, c_list = self.rnn_forward(ey, z_list, c_list, z_list, c_list)\n\n if self.num_encs == 1:\n # convert to numpy array with the shape (B, Lmax, Tmax)\n att_ws = att_to_numpy(att_ws, self.att[att_idx])\n else:\n _att_ws = []\n for idx, ws in enumerate(zip(*att_ws)):\n ws = att_to_numpy(ws, self.att[idx])\n _att_ws.append(ws)\n att_ws = _att_ws\n return att_ws\n\n @staticmethod\n def _get_last_yseq(exp_yseq):\n last = []\n for y_seq in exp_yseq:\n last.append(y_seq[-1])\n return last\n\n @staticmethod\n def _append_ids(yseq, ids):\n if isinstance(ids, list):\n for i, j in enumerate(ids):\n yseq[i].append(j)\n else:\n for i in range(len(yseq)):\n yseq[i].append(ids)\n return yseq\n\n @staticmethod\n def _index_select_list(yseq, lst):\n new_yseq = []\n for l in lst:\n new_yseq.append(yseq[l][:])\n return new_yseq\n\n @staticmethod\n def _index_select_lm_state(rnnlm_state, dim, vidx):\n if isinstance(rnnlm_state, dict):\n new_state = {}\n for k, v in rnnlm_state.items():\n new_state[k] = [torch.index_select(vi, dim, vidx) for vi in v]\n elif isinstance(rnnlm_state, list):\n new_state = []\n for i in vidx:\n new_state.append(rnnlm_state[int(i)][:])\n return new_state\n\n # scorer interface methods\n def init_state(self, x):\n # to support mutiple encoder asr mode, in single encoder mode, convert torch.Tensor to List of torch.Tensor\n if self.num_encs == 1:\n x = [x]\n\n c_list = [self.zero_state(x[0].unsqueeze(0))]\n z_list = [self.zero_state(x[0].unsqueeze(0))]\n for _ in six.moves.range(1, self.dlayers):\n c_list.append(self.zero_state(x[0].unsqueeze(0)))\n z_list.append(self.zero_state(x[0].unsqueeze(0)))\n # TODO(karita): support strm_index for `asr_mix`\n strm_index = 0\n att_idx = min(strm_index, len(self.att) - 1)\n if self.num_encs == 1:\n a = None\n self.att[att_idx].reset() # reset pre-computation of h\n else:\n a = [None] * (self.num_encs + 1) # atts + han\n for idx in range(self.num_encs + 1):\n self.att[idx].reset() # reset pre-computation of h in atts and han\n return dict(c_prev=c_list[:], z_prev=z_list[:], a_prev=a, workspace=(att_idx, z_list, c_list))\n\n def score(self, yseq, state, x):\n # to support mutiple encoder asr mode, in single encoder mode, convert torch.Tensor to List of torch.Tensor\n if self.num_encs == 1:\n x = [x]\n\n att_idx, z_list, c_list = state[\"workspace\"]\n vy = yseq[-1].unsqueeze(0)\n ey = self.dropout_emb(self.embed(vy)) # utt list (1) x zdim\n if self.num_encs == 1:\n att_c, att_w = self.att[att_idx](\n x[0].unsqueeze(0), [x[0].size(0)],\n self.dropout_dec[0](state['z_prev'][0]), state['a_prev'])\n else:\n att_w = [None] * (self.num_encs + 1) # atts + han\n att_c_list = [None] * (self.num_encs) # atts\n for idx in range(self.num_encs):\n att_c_list[idx], att_w[idx] = self.att[idx](x[idx].unsqueeze(0), [x[idx].size(0)],\n self.dropout_dec[0](state['z_prev'][0]),\n state['a_prev'][idx])\n h_han = torch.stack(att_c_list, dim=1)\n att_c, att_w[self.num_encs] = self.att[self.num_encs](h_han, [self.num_encs],\n self.dropout_dec[0](state['z_prev'][0]),\n state['a_prev'][self.num_encs])\n ey = torch.cat((ey, att_c), dim=1) # utt(1) x (zdim + hdim)\n z_list, c_list = self.rnn_forward(ey, z_list, c_list, state['z_prev'], state['c_prev'])\n if self.context_residual:\n logits = self.output(torch.cat((self.dropout_dec[-1](z_list[-1]), att_c), dim=-1))\n else:\n logits = self.output(self.dropout_dec[-1](z_list[-1]))\n logp = F.log_softmax(logits, dim=1).squeeze(0)\n return logp, dict(c_prev=c_list[:], z_prev=z_list[:], a_prev=att_w, workspace=(att_idx, z_list, c_list))\n\n\ndef decoder_for(args, odim, sos, eos, att, labeldist):\n return Decoder(args.eprojs, odim, args.dtype, args.dlayers, args.dunits, sos, eos, att, args.verbose,\n args.char_list, labeldist,\n args.lsm_weight, args.sampling_probability, args.dropout_rate_decoder,\n getattr(args, \"context_residual\", False), # use getattr to keep compatibility\n getattr(args, \"replace_sos\", False), # use getattr to keep compatibility\n getattr(args, \"num_encs\", 1)) # use getattr to keep compatibility\n"
] |
[
[
"torch.fmod",
"torch.cat",
"torch.zeros",
"torch.nn.Embedding",
"torch.topk",
"torch.nn.Dropout",
"torch.from_numpy",
"torch.nn.LSTMCell",
"numpy.argmax",
"torch.arange",
"torch.nn.GRUCell",
"torch.index_select",
"torch.div",
"torch.LongTensor",
"torch.nn.ModuleList",
"torch.cuda.empty_cache",
"torch.nn.Linear",
"torch.stack",
"numpy.sum",
"torch.nn.Module.__init__",
"torch.nn.functional.log_softmax"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ertanusta/Faust
|
[
"fc47575174756a7e2aca695c42640db356fde98e"
] |
[
"public/script/web.py"
] |
[
"#!/usr/bin/env python\n# -*- coding: utf-8 -*- \nimport numpy as np\nimport re\nimport sys\nimport pickle\nfrom sklearn.datasets import load_files\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\nfrom sklearn.metrics import accuracy_score, confusion_matrix\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.metrics import classification_report\nimport pickle\nfrom keras.models import model_from_json\n\nstop_word_list=[\"şer\",\"acaba\",\"altmış\",\"altı\",\"ama\",\"ilk\",\"ancak\",\"arada\",\"aslında\",\"ayrıca\",\"bana\",\n \"bazı\",\"belki\",\"ben\",\"benden\",\"beni\",\"benim\",\"beri\",\"beş\",\"bile\",\"bin\",\"bir\",\"birçok\",\n \"biri\",\"birkaç\",\"birkez\",\"birşey\",\"birşeyi\",\"biz\",\"bize\",\"bizden\",\"bizi\",\"bizim\",\"böyle\",\n \"böylece\",\"bu\",\"buna\",\"bunda\",\"bundan\",\"bunlar\",\"bunları\",\"bunların\",\"bunu\",\"bunun\",\"burada\",\n \"çok\",\"çünkü\",\"da\",\"daha\",\"dahi\",\"de\",\"defa\",\"değil\",\"diğer\",\"diye\",\"doksan\",\"dokuz\",\"dolayı\",\n \"dolayısıyla\",\"dört\",\"edecek\",\"eden\",\"ederek\",\"edilecek\",\"ediliyor\",\"edilmesi\",\"ediyor\",\"eğer\",\n \"elli\",\"en\",\"etmesi\",\"etti\",\"ettiği\",\"ettiğini\",\"gibi\",\"göre\",\"halen\",\"hangi\",\"hatta\",\"hem\",\"henüz\",\n \"hep\",\"hepsi\",\"her\",\"herhangi\",\"herkesin\",\"hiç\",\"hiçbir\",\"için\",\"iki\",\"ile\",\"ilgili\",\"ise\",\"işte\",\n \"itibaren\",\"itibariyle\",\"kadar\",\"karşın\",\"katrilyon\",\"kendi\",\"kendilerine\",\"kendini\",\"kendisi\",\n \"kendisine\",\"kendisini\",\"kez\",\"ki\",\"kim\",\"kimden\",\"kime\",\"kimi\",\"kimse\",\"kırk\",\"milyar\",\"milyon\",\n \"mu\",\"mü\",\"mı\",\"nasıl\",\"ne\",\"neden\",\"nedenle\",\"nerde\",\"nerede\",\"nereye\",\"niye\",\"niçin\",\"o\",\"olan\",\n \"olarak\",\"oldu\",\"olduğu\",\"olduğunu\",\"olduklarını\",\"olmadı\",\"olmadığı\",\"olmak\",\"olması\",\"olmayan\",\n \"olmaz\",\"olsa\",\"olsun\",\"olup\",\"olur\",\"olursa\",\"oluyor\",\"on\",\"ona\",\"ondan\",\"onlar\",\"onlardan\",\"onları\",\n \"onların\",\"onu\",\"onun\",\"otuz\",\"oysa\",\"öyle\",\"pek\",\"rağmen\",\"sadece\",\"sanki\",\"sekiz\",\"seksen\",\"sen\",\n \"senden\",\"seni\",\"senin\",\"siz\",\"sizden\",\"sizi\",\"sizin\",\"şey\",\"şeyden\",\"şeyi\",\"şeyler\",\"şöyle\",\"şu\",\n \"şuna\",\"şunda\",\"şundan\",\"şunları\",\"şunu\",\"tarafından\",\"trilyon\",\"tüm\",\"üç\",\"üzere\",\"var\",\"vardı\",\"ve\",\n \"veya\",\"ya\",\"yani\",\"yapacak\",\"yapılan\",\"yapılması\",\"yapıyor\",\"yapmak\",\"yaptı\",\"yaptığı\",\"yaptığını\",\n \"yaptıkları\",\"yedi\",\"yerine\",\"yetmiş\",\"yine\",\"yirmi\",\"yoksa\",\"yüz\",\"zaten\",\"1\",\"2\",\"3\",\"4\",\"5\",\"6\",\"7\"\n ,\"8\",\"9\",\"10\",\"11\",\"12\",\"13\",\"14\",\"15\",\"16\",\"17\",\"18\",\"19\",\"20\",\"21\",\"22\",\"23\",\"24\",\"25\",\"26\",\"27\",\"28\",\"29\",\"30\"\n ,\"31\",\"ocak\",\"şubat\",\"mart\",\"nisan\",\"mayıs\",\"haziran\",\"temmuz\",\"ağustos\",\"eylül\",\"ekim\",\"kasım\",\"aralık\",\"hafta\",\n \"ay\",\"gün\",\"saat\",\":\",\",\",\";\",\"!\",\"?\",\"-\",\"_\",\"/\",\"*\",\"+\",\"(\",\")\",\"{\",\"}\",\"%\",\"&\",\"#\",'\"',\"'\",\"@\",\".\"]\ndef norm_doc(single_doc):\n \n single_doc = re.sub(\" \\d+\", \" \", single_doc)\n \n single_doc = single_doc.lower()\n single_doc = single_doc.strip()\n \n single_doc=single_doc.split(\" \")\n \n filtered_tokens = [token for token in single_doc if token not in stop_word_list]\n \n single_doc = ' '.join(filtered_tokens)\n return single_doc\npath=sys.argv[1]\nmodelSelect=sys.argv[2]\ntext=\"\"\nf = open(\"/var/www/html/Faust/storage/dataTest/\"+path+\".txt\", \"r\", encoding='utf8')\nfor i in f:\n text=text+i+\" \"\nf.close()\ntext=text.replace(\"İ\",\"i\")\ntext=text.replace(\"Ç\",\"ç\")\ntext = text.replace(\"Ö\", \"ö\")\ntext = text.replace(\"Ğ\", \"ğ\")\ntext = text.replace(\"Ş\", \"ş\")\ndocs = np.array(text)\n\nnorm_docs = np.vectorize(norm_doc)\nnormalized_documents = norm_docs(docs)\n# 0=>RandomForest\n# 1=>Naive Bayes Classifier\n# 2=>Linear Support Vector Machine\n# 3=>Logistic Regression\nif(modelSelect==\"0\"):\n with open('/var/www/html/Faust/public/script/keras.json', 'r') as f:\n model = model_from_json(f.read())\n model.load_weights('/var/www/html/Faust/public/script/keras.h5')\n vectorizer = pickle.load(open(\"/var/www/html/Faust/public/script/vectorizer.pickle\", \"rb\"))\n test = vectorizer.transform([str(normalized_documents)])\n result = model.predict(test)\n for i in result[0]:\n print(round(float(i)*100, 3),\",\")\n\nelif(modelSelect==\"1\"):\n with open('/var/www/html/Faust/public/script/bayes', 'rb') as training_model:\n model = pickle.load(training_model)\n result = model.predict_proba([str(normalized_documents)])\n for i in result[0]:\n print(round(i*100,2),\",\")\nelif(modelSelect==\"2\"):\n with open('/var/www/html/Faust/public/script/linear', 'rb') as training_model:\n model = pickle.load(training_model)\n result = model.predict_proba([str(normalized_documents)])\n for i in result[0]:\n print(round(i*100,2),\",\")\nelif(modelSelect==\"3\"):\n with open('/var/www/html/Faust/public/script/logistic', 'rb') as training_model:\n model = pickle.load(training_model)\n result=model.predict_proba([str(normalized_documents)])\n for i in result[0]:\n print(round(i*100,2),\",\")\n\n"
] |
[
[
"numpy.array",
"numpy.vectorize"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
eisen-ai/eisen-deploy
|
[
"ab1cdf0f8726cbfbdc7029616b1c753706b0039c",
"ab1cdf0f8726cbfbdc7029616b1c753706b0039c"
] |
[
"eisen_deploy/serving/handlers.py",
"eisen_deploy/utils.py"
] |
[
"import logging\nimport os\nimport torch\nimport dill\nimport json\nimport pickle\nimport msgpack\n\nfrom eisen.utils import EisenModuleWrapper\nfrom eisen_deploy.utils import encode_data, decode_data\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef json_file_to_dict(json_file):\n if not os.path.exists(json_file):\n raise FileNotFoundError('The JSON file {} cannot be read'.format(json_file))\n\n with open(json_file) as json_file:\n dictionary = json.load(json_file)\n\n return dictionary\n\n\nclass EisenServingHandler(object):\n \"\"\"\n EisenServingHandler is a custom object to handle inference request within TorchServing. It is usually included\n automatically in the MAR.\n \"\"\"\n\n def __init__(self):\n self.model = None\n self.device = None\n self.pre_process_tform = None\n self.post_process_tform = None\n self.metadata = None\n self.initialized = False\n self.input_name_list = []\n self.output_name_list = []\n\n def initialize(self, ctx):\n \"\"\"\n Initializes the fields of the EisenServingHandler object based on the context.\n\n :param ctx: context of an inference request\n :return: None\n \"\"\"\n properties = ctx.system_properties\n\n self.device = torch.device(\"cuda:\" + str(properties.get(\"gpu_id\")) if torch.cuda.is_available() else \"cpu\")\n\n model_dir = properties.get(\"model_dir\")\n\n # Model file\n model_pt_path = os.path.join(model_dir, \"model.pt\")\n\n # Pre processing chain\n pre_processing_pkl = os.path.join(model_dir, \"pre_process_tform.pkl\")\n\n # Post processing chain\n post_processing_pkl = os.path.join(model_dir, \"post_process_tform.pkl\")\n\n # unpickle serialized transform chain\n with open(pre_processing_pkl, \"rb\") as f:\n self.pre_process_tform = dill.load(f)\n\n with open(post_processing_pkl, \"rb\") as f:\n self.post_process_tform = dill.load(f)\n\n # Metadata about the model\n metadata_json = os.path.join(model_dir, \"metadata.json\")\n\n self.metadata = json_file_to_dict(metadata_json)\n\n self.input_name_list = self.metadata['model_input_list']\n\n self.output_name_list = self.metadata['model_output_list']\n\n # deserialize pytorch model\n base_model = torch.load(model_pt_path, map_location=self.device)\n\n self.model = EisenModuleWrapper(base_model, self.input_name_list, self.output_name_list)\n\n # put model in eval mode\n self.model.eval()\n\n logger.debug('Model file {0} loaded successfully'.format(model_pt_path))\n\n self.initialized = True\n\n def get_metadata(self):\n \"\"\"\n This function returns metadata about the model as JSON\n\n :return: list\n \"\"\"\n return [json.dumps(self.metadata)]\n\n def pre_process(self, data):\n \"\"\"\n Applies pre-processing transform using de-pickled transform chain in the MAR.\n\n :param data: dictionary containing a collated batch of data\n :type data: dict\n\n \"\"\"\n input_dict = self.pre_process_tform(data)\n\n return input_dict\n\n def inference(self, input_dict):\n \"\"\"\n Performs prediction using the model. Feeds the necessary information to the model starting from the\n received data and creates an output dictionary as a result.\n\n :param input_dict: input batch, in form of a dictionary of collated datapoints\n :type input_dict: dict\n\n :return: dict\n \"\"\"\n\n for name in self.model.input_names:\n input_dict[name] = torch.Tensor(input_dict[name]).to(self.device)\n\n output_dict = self.model(**input_dict)\n\n for name in self.model.output_names:\n output_dict[name] = output_dict[name].data.cpu().numpy()\n\n return output_dict\n\n def post_process(self, output_dict):\n \"\"\"\n Applies post-processing transform using de-pickled transform chain in the MAR.\n\n :param output_dict: dictionary containing the result of inference on a collated batch of data\n :type output_dict: dict\n \"\"\"\n\n prediction = self.post_process_tform(output_dict)\n\n return prediction\n\n def handle(self, data):\n \"\"\"\n Handles one request.\n\n :param data: dictionary of data\n :type data: dict\n\n :return: list\n \"\"\"\n input_data = {}\n for input in self.metadata['inputs']:\n input_data[input['name']] = data[input['name']]\n \n model_input = self.pre_process(input_data)\n \n model_out = self.inference(model_input)\n\n model_out.update(model_input) # output dictionary still contains inputs (which may be useful for tforms)\n\n prediction = self.post_process(model_out)\n \n output_data = {}\n for output in self.metadata['outputs']:\n output_data[output['name']] = prediction[output['name']]\n\n buffer = msgpack.packb(prediction, default=encode_data, use_bin_type=True)\n\n return [buffer]\n\n\n_service = EisenServingHandler()\n\n\ndef handle(data, context):\n\n if not _service.initialized:\n _service.initialize(context)\n\n if data is not None and hasattr(data, '__getitem__') and 'body' in data[0].keys() and len(data[0]['body']) > 0:\n data = data[0]['body']\n else:\n return _service.get_metadata()\n\n data = msgpack.unpackb(data, object_hook=decode_data, raw=False)\n\n if not all([key in data.keys() for key in _service.input_name_list]):\n return _service.get_metadata()\n\n else:\n return _service.handle(data)\n",
"import numpy as np\n\n\ndef encode_data(obj):\n \"\"\"\n Encodes data before serialization. Basic data-types that can be directly serialized are\n returned unchanged. Other data types that cannot be serialized by msgpack, are broken down\n and made compatible as well as recognizable. Numpy array are a perfect example of this need.\n As they are not serializable \"as-is\", they are transferred in a broken down format.\n\n If the passed object is of a type not supported by this function,\n the object is returned unchanged.\n\n This function is called recursively on dictionaries stored within dictionary elements.\n \"\"\"\n if isinstance(obj, (int, float, list, bool, str)):\n return obj\n\n elif isinstance(obj, dict):\n for key in obj.keys():\n obj[key] = encode_data(obj[key])\n\n return obj\n\n elif isinstance(obj, np.ndarray):\n return {\n '__ndarray__': True,\n 'data': obj.tostring(),\n 'shape': obj.shape,\n 'type': str(obj.dtype)\n }\n\n return obj\n\n\ndef decode_data(obj):\n \"\"\"\n Decodes data that has been previously encoded by encode_data (see above).\n\n Data is restored to its original format and type by recognizing what kind of transformation\n has been previously done by encode_data.\n\n Data types that cannot be decoded are returned unchanged.\n \"\"\"\n if isinstance(obj, (int, float, list, bool, str)):\n return obj\n\n elif '__ndarray__' in obj:\n return np.frombuffer(obj['data'], dtype=obj['type']).reshape(*obj['shape'])\n\n elif isinstance(obj, dict):\n for key in obj.keys():\n obj[key] = decode_data(obj[key])\n\n return obj\n\n return obj"
] |
[
[
"torch.Tensor",
"torch.cuda.is_available",
"torch.load"
],
[
"numpy.frombuffer"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
holazzer/transformers
|
[
"8e908c8c74f556a82534f4cf1e7a1b4f7b55d24c",
"8e908c8c74f556a82534f4cf1e7a1b4f7b55d24c",
"53191d75ecca21c028077b3227f9ac47379e4690"
] |
[
"src/transformers/models/big_bird/modeling_big_bird.py",
"src/transformers/models/deberta_v2/modeling_tf_deberta_v2.py",
"src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py"
] |
[
"# coding=utf-8\n# Copyright 2021 Google Research and The HuggingFace Inc. team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" PyTorch BigBird model. \"\"\"\n\n\nimport math\nimport os\nfrom dataclasses import dataclass\nfrom typing import Optional, Tuple\n\nimport numpy as np\nimport torch\nimport torch.utils.checkpoint\nfrom packaging import version\nfrom torch import nn\nfrom torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss\n\nfrom ...activations import ACT2FN\nfrom ...file_utils import (\n ModelOutput,\n add_code_sample_docstrings,\n add_start_docstrings,\n add_start_docstrings_to_model_forward,\n replace_return_docstrings,\n)\nfrom ...modeling_outputs import (\n BaseModelOutputWithPastAndCrossAttentions,\n BaseModelOutputWithPoolingAndCrossAttentions,\n CausalLMOutputWithCrossAttentions,\n MaskedLMOutput,\n MultipleChoiceModelOutput,\n SequenceClassifierOutput,\n TokenClassifierOutput,\n)\nfrom ...modeling_utils import PreTrainedModel, apply_chunking_to_forward\nfrom ...utils import logging\nfrom .configuration_big_bird import BigBirdConfig\n\n\nlogger = logging.get_logger(__name__)\n\n_CHECKPOINT_FOR_DOC = \"google/bigbird-roberta-base\"\n_CONFIG_FOR_DOC = \"BigBirdConfig\"\n_TOKENIZER_FOR_DOC = \"BigBirdTokenizer\"\n\nBIG_BIRD_PRETRAINED_MODEL_ARCHIVE_LIST = [\n \"google/bigbird-roberta-base\",\n \"google/bigbird-roberta-large\",\n \"google/bigbird-base-trivia-itc\",\n # See all BigBird models at https://huggingface.co/models?filter=big_bird\n]\n\n_TRIVIA_QA_MAPPING = {\n \"big_bird_attention\": \"attention/self\",\n \"output_layer_norm\": \"output/LayerNorm\",\n \"attention_output\": \"attention/output/dense\",\n \"output\": \"output/dense\",\n \"self_attention_layer_norm\": \"attention/output/LayerNorm\",\n \"intermediate\": \"intermediate/dense\",\n \"word_embeddings\": \"bert/embeddings/word_embeddings\",\n \"position_embedding\": \"bert/embeddings/position_embeddings\",\n \"type_embeddings\": \"bert/embeddings/token_type_embeddings\",\n \"embeddings\": \"bert/embeddings\",\n \"layer_normalization\": \"output/LayerNorm\",\n \"layer_norm\": \"LayerNorm\",\n \"trivia_qa_head\": \"qa_classifier\",\n \"dense\": \"intermediate/dense\",\n \"dense_1\": \"qa_outputs\",\n}\n\n\ndef load_tf_weights_in_big_bird(model, tf_checkpoint_path, is_trivia_qa=False):\n \"\"\"Load tf checkpoints in a pytorch model.\"\"\"\n\n def load_tf_weights_bert(init_vars, tf_path):\n names = []\n tf_weights = {}\n\n for name, shape in init_vars:\n array = tf.train.load_variable(tf_path, name)\n name = name.replace(\"bert/encoder/LayerNorm\", \"bert/embeddings/LayerNorm\")\n logger.info(f\"Loading TF weight {name} with shape {shape}\")\n names.append(name)\n tf_weights[name] = array\n\n return names, tf_weights\n\n def load_tf_weights_trivia_qa(init_vars):\n names = []\n tf_weights = {}\n\n for i, var in enumerate(init_vars):\n name_items = var.name.split(\"/\")\n\n if \"transformer_scaffold\" in name_items[0]:\n layer_name_items = name_items[0].split(\"_\")\n if len(layer_name_items) < 3:\n layer_name_items += [0]\n\n name_items[0] = f\"bert/encoder/layer_{layer_name_items[2]}\"\n\n name = \"/\".join([_TRIVIA_QA_MAPPING[x] if x in _TRIVIA_QA_MAPPING else x for x in name_items])[\n :-2\n ] # remove last :0 in variable\n\n if \"self/attention/output\" in name:\n name = name.replace(\"self/attention/output\", \"output\")\n\n if i >= len(init_vars) - 2:\n name = name.replace(\"intermediate\", \"output\")\n\n logger.info(f\"Loading TF weight {name} with shape {var.shape}\")\n array = var.value().numpy()\n names.append(name)\n tf_weights[name] = array\n\n return names, tf_weights\n\n try:\n import re\n\n import numpy as np\n import tensorflow as tf\n except ImportError:\n logger.error(\n \"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see \"\n \"https://www.tensorflow.org/install/ for installation instructions.\"\n )\n raise\n tf_path = os.path.abspath(tf_checkpoint_path)\n logger.info(f\"Converting TensorFlow checkpoint from {tf_path}\")\n\n # Load weights from TF model\n init_vars = tf.saved_model.load(tf_path).variables if is_trivia_qa else tf.train.list_variables(tf_path)\n\n assert len(init_vars) > 0, \"Loaded trained variables cannot be empty.\"\n\n pt_names = list(model.state_dict().keys())\n\n if is_trivia_qa:\n names, tf_weights = load_tf_weights_trivia_qa(init_vars)\n else:\n names, tf_weights = load_tf_weights_bert(init_vars, tf_path)\n\n for txt_name in names:\n array = tf_weights[txt_name]\n name = txt_name.split(\"/\")\n # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v\n # which are not required for using pretrained model\n if any(\n n in [\"adam_v\", \"adam_m\", \"AdamWeightDecayOptimizer\", \"AdamWeightDecayOptimizer_1\", \"global_step\"]\n for n in name\n ):\n logger.info(f\"Skipping {'/'.join(name)}\")\n continue\n pointer = model\n pt_name = []\n for m_name in name:\n if re.fullmatch(r\"[A-Za-z]+_\\d+\", m_name):\n scope_names = re.split(r\"_(\\d+)\", m_name)\n else:\n scope_names = [m_name]\n if scope_names[0] == \"kernel\" or scope_names[0] == \"gamma\":\n pointer = getattr(pointer, \"weight\")\n pt_name.append(\"weight\")\n elif scope_names[0] == \"output_bias\" or scope_names[0] == \"beta\":\n pointer = getattr(pointer, \"bias\")\n pt_name.append(\"bias\")\n elif scope_names[0] == \"output_weights\":\n pointer = getattr(pointer, \"weight\")\n pt_name.append(\"weight\")\n elif scope_names[0] == \"squad\":\n pointer = getattr(pointer, \"classifier\")\n pt_name.append(\"classifier\")\n elif scope_names[0] == \"transform\":\n pointer = getattr(pointer, \"transform\")\n pt_name.append(\"transform\")\n if (\"bias\" in name) or (\"kernel\" in name):\n pointer = getattr(pointer, \"dense\")\n pt_name.append(\"dense\")\n elif (\"beta\" in name) or (\"gamma\" in name):\n pointer = getattr(pointer, \"LayerNorm\")\n pt_name.append(\"LayerNorm\")\n else:\n try:\n pointer = getattr(pointer, scope_names[0])\n pt_name.append(f\"{scope_names[0]}\")\n except AttributeError:\n logger.info(f\"Skipping {m_name}\")\n continue\n if len(scope_names) >= 2:\n num = int(scope_names[1])\n pointer = pointer[num]\n pt_name.append(f\"{num}\")\n if m_name[-11:] == \"_embeddings\" or m_name == \"embeddings\":\n pointer = getattr(pointer, \"weight\")\n pt_name.append(\"weight\")\n elif m_name == \"kernel\":\n array = np.transpose(array)\n try:\n if len(array.shape) > len(pointer.shape) and math.prod(array.shape) == math.prod(pointer.shape):\n # print(txt_name, array.shape)\n if (\n txt_name.endswith(\"attention/self/key/kernel\")\n or txt_name.endswith(\"attention/self/query/kernel\")\n or txt_name.endswith(\"attention/self/value/kernel\")\n ):\n array = array.transpose(1, 0, 2).reshape(pointer.shape)\n elif txt_name.endswith(\"attention/output/dense/kernel\"):\n array = array.transpose(0, 2, 1).reshape(pointer.shape)\n else:\n array = array.reshape(pointer.shape)\n\n if pointer.shape != array.shape:\n raise ValueError(\n f\"Pointer shape {pointer.shape} and array shape {array.shape} mismatched of {txt_name}.\"\n )\n except AssertionError as e:\n e.args += (pointer.shape, array.shape)\n raise\n pt_weight_name = \".\".join(pt_name)\n logger.info(f\"Initialize PyTorch weight {pt_weight_name} from {txt_name}.\")\n pointer.data = torch.from_numpy(array)\n tf_weights.pop(txt_name, None)\n pt_names.remove(pt_weight_name)\n\n logger.info(f\"Weights not copied to PyTorch model: {', '.join(tf_weights.keys())}.\")\n logger.info(f\"Weights not initialized in PyTorch model: {', '.join(pt_names)}.\")\n return model\n\n\nclass BigBirdEmbeddings(nn.Module):\n \"\"\"Construct the embeddings from word, position and token_type embeddings.\"\"\"\n\n # Copied from transformers.models.bert.modeling_bert.BertEmbeddings.__init__\n def __init__(self, config):\n super().__init__()\n self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)\n self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)\n self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)\n\n # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load\n # any TensorFlow checkpoint file\n self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n # position_ids (1, len position emb) is contiguous in memory and exported when serialized\n self.position_embedding_type = getattr(config, \"position_embedding_type\", \"absolute\")\n self.register_buffer(\"position_ids\", torch.arange(config.max_position_embeddings).expand((1, -1)))\n if version.parse(torch.__version__) > version.parse(\"1.6.0\"):\n self.register_buffer(\n \"token_type_ids\",\n torch.zeros(self.position_ids.size(), dtype=torch.long, device=self.position_ids.device),\n persistent=False,\n )\n # End copy\n\n self.rescale_embeddings = config.rescale_embeddings\n self.hidden_size = config.hidden_size\n\n def forward(\n self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0\n ):\n if input_ids is not None:\n input_shape = input_ids.size()\n else:\n input_shape = inputs_embeds.size()[:-1]\n\n seq_length = input_shape[1]\n\n if position_ids is None:\n position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]\n\n # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs\n # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves\n # issue #5664\n if token_type_ids is None:\n if hasattr(self, \"token_type_ids\"):\n buffered_token_type_ids = self.token_type_ids[:, :seq_length]\n buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)\n token_type_ids = buffered_token_type_ids_expanded\n else:\n token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)\n\n if inputs_embeds is None:\n inputs_embeds = self.word_embeddings(input_ids)\n\n if self.rescale_embeddings:\n inputs_embeds = inputs_embeds * (self.hidden_size ** 0.5)\n\n token_type_embeddings = self.token_type_embeddings(token_type_ids)\n\n embeddings = inputs_embeds + token_type_embeddings\n\n position_embeddings = self.position_embeddings(position_ids)\n embeddings += position_embeddings\n\n embeddings = self.dropout(embeddings)\n embeddings = self.LayerNorm(embeddings)\n return embeddings\n\n\nclass BigBirdSelfAttention(nn.Module):\n def __init__(self, config):\n super().__init__()\n if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, \"embedding_size\"):\n raise ValueError(\n f\"The hidden size ({config.hidden_size}) is not a multiple of the number of attention \"\n f\"heads ({config.num_attention_heads})\"\n )\n\n self.num_attention_heads = config.num_attention_heads\n self.attention_head_size = int(config.hidden_size / config.num_attention_heads)\n self.all_head_size = self.num_attention_heads * self.attention_head_size\n\n self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias)\n self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias)\n self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias)\n\n self.dropout = nn.Dropout(config.attention_probs_dropout_prob)\n self.is_decoder = config.is_decoder\n\n def transpose_for_scores(self, x):\n new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)\n x = x.view(*new_x_shape)\n return x.permute(0, 2, 1, 3)\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_value=None,\n output_attentions=False,\n ):\n mixed_query_layer = self.query(hidden_states)\n\n # If this is instantiated as a cross-attention module, the keys\n # and values come from an encoder; the attention mask needs to be\n # such that the encoder's padding tokens are not attended to.\n is_cross_attention = encoder_hidden_states is not None\n\n if is_cross_attention and past_key_value is not None:\n # reuse k,v, cross_attentions\n key_layer = past_key_value[0]\n value_layer = past_key_value[1]\n attention_mask = encoder_attention_mask\n elif is_cross_attention:\n key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))\n value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))\n attention_mask = encoder_attention_mask\n elif past_key_value is not None:\n key_layer = self.transpose_for_scores(self.key(hidden_states))\n value_layer = self.transpose_for_scores(self.value(hidden_states))\n key_layer = torch.cat([past_key_value[0], key_layer], dim=2)\n value_layer = torch.cat([past_key_value[1], value_layer], dim=2)\n else:\n key_layer = self.transpose_for_scores(self.key(hidden_states))\n value_layer = self.transpose_for_scores(self.value(hidden_states))\n\n query_layer = self.transpose_for_scores(mixed_query_layer)\n\n if self.is_decoder:\n # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.\n # Further calls to cross_attention layer can then reuse all cross-attention\n # key/value_states (first \"if\" case)\n # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of\n # all previous decoder key/value_states. Further calls to uni-directional self-attention\n # can concat previous decoder key/value_states to current projected key/value_states (third \"elif\" case)\n # if encoder bi-directional self-attention `past_key_value` is always `None`\n past_key_value = (key_layer, value_layer)\n\n # Take the dot product between \"query\" and \"key\" to get the raw attention scores.\n attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))\n\n attention_scores = attention_scores / math.sqrt(self.attention_head_size)\n if attention_mask is not None:\n # Apply the attention mask is (precomputed for all layers in BigBirdModel forward() function)\n attention_scores = attention_scores + attention_mask\n\n # Normalize the attention scores to probabilities.\n attention_probs = nn.functional.softmax(attention_scores, dim=-1)\n\n # This is actually dropping out entire tokens to attend to, which might\n # seem a bit unusual, but is taken from the original Transformer paper.\n attention_probs = self.dropout(attention_probs)\n\n # Mask heads if we want to\n if head_mask is not None:\n attention_probs = attention_probs * head_mask\n\n context_layer = torch.matmul(attention_probs, value_layer)\n\n context_layer = context_layer.permute(0, 2, 1, 3).contiguous()\n new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)\n context_layer = context_layer.view(*new_context_layer_shape)\n\n outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)\n\n if self.is_decoder:\n outputs = outputs + (past_key_value,)\n return outputs\n\n\nclass BigBirdBlockSparseAttention(nn.Module):\n def __init__(self, config, seed=None):\n super().__init__()\n\n self.max_seqlen = config.max_position_embeddings\n self.seed = seed\n\n if config.hidden_size % config.num_attention_heads != 0:\n raise ValueError(\n f\"The hidden size {config.hidden_size} is not a multiple of the number of attention \"\n f\"heads {config.num_attention_heads}.\"\n )\n\n self.num_attention_heads = config.num_attention_heads\n self.num_random_blocks = config.num_random_blocks\n self.block_size = config.block_size\n\n self.attention_head_size = int(config.hidden_size / config.num_attention_heads)\n self.all_head_size = self.num_attention_heads * self.attention_head_size\n\n self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias)\n self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias)\n self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias)\n\n def transpose_for_scores(self, x):\n new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)\n x = x.view(*new_x_shape)\n return x.permute(0, 2, 1, 3)\n\n def forward(\n self,\n hidden_states,\n band_mask=None,\n from_mask=None,\n to_mask=None,\n from_blocked_mask=None,\n to_blocked_mask=None,\n output_attentions=None,\n ):\n # Currently this `class` can't be used in decoder.\n\n batch_size, seqlen, _ = hidden_states.size()\n to_seq_length = from_seq_length = seqlen\n from_block_size = to_block_size = self.block_size\n\n assert from_seq_length % from_block_size == 0, \"Query sided sequence length must be multiple of block size\"\n assert to_seq_length % to_block_size == 0, \"Key/Value sided sequence length must be multiple of block size\"\n\n query_layer = self.transpose_for_scores(self.query(hidden_states))\n key_layer = self.transpose_for_scores(self.key(hidden_states))\n value_layer = self.transpose_for_scores(self.value(hidden_states))\n\n context_layer, attention_probs = self.bigbird_block_sparse_attention(\n query_layer,\n key_layer,\n value_layer,\n band_mask,\n from_mask,\n to_mask,\n from_blocked_mask,\n to_blocked_mask,\n self.num_attention_heads,\n self.num_random_blocks,\n self.attention_head_size,\n from_block_size,\n to_block_size,\n batch_size,\n from_seq_length,\n to_seq_length,\n seed=self.seed,\n plan_from_length=None,\n plan_num_rand_blocks=None,\n output_attentions=output_attentions,\n )\n\n context_layer = context_layer.contiguous().view(batch_size, from_seq_length, -1)\n\n outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)\n return outputs\n\n @staticmethod\n def torch_bmm_nd(inp_1, inp_2, ndim=None):\n \"\"\"Fast nd matrix multiplication\"\"\"\n # faster replacement of torch.einsum (\"bhqk,bhkd->bhqd\")\n return torch.bmm(inp_1.reshape((-1,) + inp_1.shape[-2:]), inp_2.reshape((-1,) + inp_2.shape[-2:])).view(\n inp_1.shape[: ndim - 2] + (inp_1.shape[ndim - 2], inp_2.shape[ndim - 1])\n )\n\n @staticmethod\n def torch_bmm_nd_transpose(inp_1, inp_2, ndim=None):\n \"\"\"Fast nd matrix multiplication with transpose\"\"\"\n # faster replacement of torch.einsum (bhqd,bhkd->bhqk)\n return torch.bmm(\n inp_1.reshape((-1,) + inp_1.shape[-2:]), inp_2.reshape((-1,) + inp_2.shape[-2:]).transpose(1, 2)\n ).view(inp_1.shape[: ndim - 2] + (inp_1.shape[ndim - 2], inp_2.shape[ndim - 2]))\n\n def bigbird_block_sparse_attention(\n self,\n query_layer,\n key_layer,\n value_layer,\n band_mask,\n from_mask,\n to_mask,\n from_blocked_mask,\n to_blocked_mask,\n n_heads,\n n_rand_blocks,\n attention_head_size,\n from_block_size,\n to_block_size,\n batch_size,\n from_seq_len,\n to_seq_len,\n seed,\n plan_from_length,\n plan_num_rand_blocks,\n output_attentions,\n ):\n\n # BigBird block-sparse attention as suggested in paper\n\n # ITC:\n # global tokens: 2 x block_size\n # window tokens: 3 x block_size\n # random tokens: num_rand_tokens x block_size\n\n # ETC:\n # global tokens: extra_globals_tokens + 2 x block_size\n # window tokens: 3 x block_size\n # random tokens: num_rand_tokens x block_size\n\n # Note:\n # 1) Currently, ETC is not supported.\n # 2) Window size is fixed to 3 blocks & it can be changed only by\n # changing `block_size`.\n # 3) Number of global blocks are fixed (2 blocks here) & global tokens can be\n # controlled only by `block_size`.\n\n # attention is calculated separately for q[0], q[1], q[2:-2], q[-2], q[-1] in order to use special trick of shifting tokens (for calculating sliding attention)\n # hence following code can be divided into 5 parts.\n\n if from_seq_len // from_block_size != to_seq_len // to_block_size:\n raise ValueError(\"Error the number of blocks needs to be same!\")\n\n rsqrt_d = 1 / math.sqrt(attention_head_size)\n bsz = batch_size\n attn_mask_penalty = -10000.0\n\n # generate random attention and corresponding masks\n np.random.seed(seed)\n if from_seq_len in [1024, 3072, 4096]: # old plans used in paper\n rand_attn = [\n self._bigbird_block_rand_mask(\n self.max_seqlen, self.max_seqlen, from_block_size, to_block_size, n_rand_blocks, last_idx=1024\n )[: (from_seq_len // from_block_size - 2)]\n for _ in range(n_heads)\n ]\n else:\n if plan_from_length is None:\n plan_from_length, plan_num_rand_blocks = self._get_rand_attn_plan(\n from_seq_len, from_block_size, n_rand_blocks\n )\n\n rand_attn = self._bigbird_block_rand_mask_with_head(\n from_seq_length=from_seq_len,\n to_seq_length=to_seq_len,\n from_block_size=from_block_size,\n to_block_size=to_block_size,\n num_heads=n_heads,\n plan_from_length=plan_from_length,\n plan_num_rand_blocks=plan_num_rand_blocks,\n )\n\n rand_attn = np.stack(rand_attn, axis=0)\n rand_attn = torch.tensor(rand_attn, device=query_layer.device, dtype=torch.long)\n rand_attn.unsqueeze_(0)\n rand_attn = torch.cat([rand_attn for _ in range(batch_size)], dim=0)\n\n rand_mask = self._create_rand_mask_from_inputs(\n from_blocked_mask, to_blocked_mask, rand_attn, n_heads, n_rand_blocks, bsz, from_seq_len, from_block_size\n )\n\n blocked_query_matrix = query_layer.view(bsz, n_heads, from_seq_len // from_block_size, from_block_size, -1)\n blocked_key_matrix = key_layer.view(bsz, n_heads, to_seq_len // to_block_size, to_block_size, -1)\n blocked_value_matrix = value_layer.view(bsz, n_heads, to_seq_len // to_block_size, to_block_size, -1)\n\n # preparing block for randn attn\n gathered_key = self.torch_gather_b2(blocked_key_matrix, rand_attn)\n gathered_key = gathered_key.view(\n bsz, n_heads, to_seq_len // to_block_size - 2, n_rand_blocks * to_block_size, -1\n ) # [bsz, n_heads, to_seq_len//to_block_size-2, n_rand_blocks, to_block_size, -1]\n gathered_value = self.torch_gather_b2(blocked_value_matrix, rand_attn)\n gathered_value = gathered_value.view(\n bsz, n_heads, to_seq_len // to_block_size - 2, n_rand_blocks * to_block_size, -1\n ) # [bsz, n_heads, to_seq_len//to_block_size-2, n_rand_blocks, to_block_size, -1]\n\n # 1st PART\n # 1st block (global block) attention scores\n # q[0] x (k[0], k[1], k[2], k[3], k[4] .... )\n\n # [bsz, n_heads, from_block_size, -1] x [bsz, n_heads, to_seq_len, -1] ==> [bsz, n_heads, from_block_size, to_seq_len]\n first_product = self.torch_bmm_nd_transpose(blocked_query_matrix[:, :, 0], key_layer, ndim=4)\n\n first_product = first_product * rsqrt_d\n first_product += (1.0 - to_mask) * attn_mask_penalty\n first_attn_weights = nn.functional.softmax(\n first_product, dim=-1\n ) # [bsz, n_heads, from_block_size, to_seq_len]\n\n # [bsz, n_heads, from_block_size, to_seq_len] x [bsz, n_heads, to_seq_len, -1] ==> [bsz, n_heads, from_block_size, -1]\n first_context_layer = self.torch_bmm_nd(first_attn_weights, value_layer, ndim=4)\n first_context_layer.unsqueeze_(2)\n\n # 2nd PART\n # 2nd block attention scores\n # q[1] x (sliding_keys, random_keys, global_keys)\n # sliding key blocks -> 2nd, 3rd blocks\n # global key blocks -> 1st block\n\n second_key_mat = torch.cat(\n [\n blocked_key_matrix[:, :, 0],\n blocked_key_matrix[:, :, 1],\n blocked_key_matrix[:, :, 2],\n blocked_key_matrix[:, :, -1],\n gathered_key[:, :, 0],\n ],\n dim=2,\n ) # [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1]\n second_value_mat = torch.cat(\n [\n blocked_value_matrix[:, :, 0],\n blocked_value_matrix[:, :, 1],\n blocked_value_matrix[:, :, 2],\n blocked_value_matrix[:, :, -1],\n gathered_value[:, :, 0],\n ],\n dim=2,\n ) # [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1]\n\n # [bsz, n_heads, from_block_size, -1] x [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] ==> [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size]\n second_product = self.torch_bmm_nd_transpose(blocked_query_matrix[:, :, 1], second_key_mat, ndim=4)\n second_seq_pad = torch.cat(\n [\n to_mask[:, :, :, : 3 * to_block_size],\n to_mask[:, :, :, -to_block_size:],\n to_mask.new_ones([bsz, 1, 1, n_rand_blocks * to_block_size]),\n ],\n dim=3,\n )\n second_rand_pad = torch.cat(\n [\n rand_mask.new_ones([bsz, n_heads, from_block_size, 4 * to_block_size]),\n rand_mask[:, :, 0],\n ],\n dim=3,\n )\n second_product = second_product * rsqrt_d\n second_product += (1.0 - torch.minimum(second_seq_pad, second_rand_pad)) * attn_mask_penalty\n second_attn_weights = nn.functional.softmax(\n second_product, dim=-1\n ) # [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size]\n\n # [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size] x [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] ==> [bsz, n_heads, from_block_size, -1]\n second_context_layer = self.torch_bmm_nd(second_attn_weights, second_value_mat, ndim=4)\n\n second_context_layer.unsqueeze_(2)\n\n # 3rd PART\n # Middle blocks attention scores\n # q[-2:2] x (sliding_keys, random_keys, global_keys)\n # sliding attn is calculated using special trick of shifting tokens as discussed in paper\n # random keys are generated by taking random indices as per `rand_attn`\n # global keys -> 1st & last block\n\n exp_blocked_key_matrix = torch.cat(\n [blocked_key_matrix[:, :, 1:-3], blocked_key_matrix[:, :, 2:-2], blocked_key_matrix[:, :, 3:-1]], dim=3\n ) # [bsz, n_heads, from_seq_len//from_block_size-4, 3*to_block_size, -1]\n exp_blocked_value_matrix = torch.cat(\n [blocked_value_matrix[:, :, 1:-3], blocked_value_matrix[:, :, 2:-2], blocked_value_matrix[:, :, 3:-1]],\n dim=3,\n ) # [bsz, n_heads, from_seq_len//from_block_size-4, 3*to_block_size, -1]\n middle_query_matrix = blocked_query_matrix[:, :, 2:-2]\n\n # sliding attention scores for q[-2:2]\n # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] x [b, n_heads, from_seq_len//from_block_size-4, 3*to_block_size, -1]\n inner_band_product = self.torch_bmm_nd_transpose(middle_query_matrix, exp_blocked_key_matrix, ndim=5)\n # ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, 3*to_block_size]\n inner_band_product = inner_band_product * rsqrt_d\n\n # randn attention scores for q[-2:2]\n # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] x [bsz, n_heads, from_seq_len//from_block_size-4, n_rand_blocks*to_block_size, -1]\n rand_band_product = self.torch_bmm_nd_transpose(middle_query_matrix, gathered_key[:, :, 1:-1], ndim=5)\n # ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, n_rand_blocks*to_block_size]\n rand_band_product = rand_band_product * rsqrt_d\n\n # Including 1st block (since it's global)\n first_band_product = torch.einsum(\n \"bhlqd,bhkd->bhlqk\", middle_query_matrix, blocked_key_matrix[:, :, 0]\n ) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] x [bsz, n_heads, to_block_size, -1] ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, to_block_size]\n first_band_product = first_band_product * rsqrt_d\n\n # Including last block (since it's global)\n last_band_product = torch.einsum(\n \"bhlqd,bhkd->bhlqk\", middle_query_matrix, blocked_key_matrix[:, :, -1]\n ) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] x [bsz, n_heads, to_block_size, -1] ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, to_block_size]\n last_band_product = last_band_product * rsqrt_d\n\n # masking padded tokens\n inner_band_product += (1.0 - band_mask) * attn_mask_penalty\n first_band_product += (1.0 - to_mask[:, :, :, :to_block_size].unsqueeze(3)) * attn_mask_penalty\n last_band_product += (1.0 - to_mask[:, :, :, -to_block_size:].unsqueeze(3)) * attn_mask_penalty\n rand_band_product += (1.0 - rand_mask[:, :, 1:-1]) * attn_mask_penalty\n\n # completing attention scores matrix for all q[-2:2]\n band_product = torch.cat(\n [first_band_product, inner_band_product, rand_band_product, last_band_product], dim=-1\n ) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, (5+n_rand_blocks)*to_block_size]\n\n # safely doing softmax since attention matrix is completed\n attn_weights = nn.functional.softmax(\n band_product, dim=-1\n ) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, (5+n_rand_blocks)*to_block_size]\n\n # contribution of sliding keys\n # [bsz, n_heads, m//from_block_size-4, from_block_size, 3*to_block_size] x [bsz, n_heads, from_seq_len//from_block_size-4, 3*to_block_size, -1]\n context_layer = self.torch_bmm_nd(\n attn_weights[:, :, :, :, to_block_size : 4 * to_block_size], exp_blocked_value_matrix, ndim=5\n )\n # ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1]\n\n # adding contribution of random keys\n # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, n_rand_blocks*to_block_size] x [bsz, n_heads, from_seq_len//from_block_size-4, n_rand_blocks*to_block_size, -1]\n context_layer += self.torch_bmm_nd(\n attn_weights[:, :, :, :, 4 * to_block_size : -to_block_size], gathered_value[:, :, 1:-1], ndim=5\n )\n # ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1]\n\n # adding contribution of global keys\n context_layer += torch.einsum(\n \"bhlqk,bhkd->bhlqd\", attn_weights[:, :, :, :, :to_block_size], blocked_value_matrix[:, :, 0]\n ) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, to_block_size] x [bsz, n_heads, to_block_size, -1] ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1]\n context_layer += torch.einsum(\n \"bhlqk,bhkd->bhlqd\", attn_weights[:, :, :, :, -to_block_size:], blocked_value_matrix[:, :, -1]\n ) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, to_block_size] x [bsz, n_heads, to_block_size, -1] ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1]\n\n # 4th PART\n # last 2nd token attention scores\n # q[-2] x (sliding_keys, random_keys, global_keys)\n # sliding key blocks -> last 3 blocks\n # global key block -> 1st block\n # random key block -> based on indices stored in `randn_attn`\n\n second_last_key_mat = torch.cat(\n [\n blocked_key_matrix[:, :, 0],\n blocked_key_matrix[:, :, -3],\n blocked_key_matrix[:, :, -2],\n blocked_key_matrix[:, :, -1],\n gathered_key[:, :, -1],\n ],\n dim=2,\n ) # [bsz, n_heads, (4+n_random_blocks)*to_block_size, -1]\n second_last_value_mat = torch.cat(\n [\n blocked_value_matrix[:, :, 0],\n blocked_value_matrix[:, :, -3],\n blocked_value_matrix[:, :, -2],\n blocked_value_matrix[:, :, -1],\n gathered_value[:, :, -1],\n ],\n dim=2,\n ) # [bsz, n_heads, (4+r)*to_block_size, -1]\n\n # [bsz, n_heads, from_block_size, -1] x [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] ==> [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size]\n second_last_product = self.torch_bmm_nd_transpose(blocked_query_matrix[:, :, -2], second_last_key_mat, ndim=4)\n second_last_seq_pad = torch.cat(\n [\n to_mask[:, :, :, :to_block_size],\n to_mask[:, :, :, -3 * to_block_size :],\n to_mask.new_ones([bsz, 1, 1, n_rand_blocks * to_block_size]),\n ],\n dim=3,\n )\n second_last_rand_pad = torch.cat(\n [\n rand_mask.new_ones([bsz, n_heads, from_block_size, 4 * to_block_size]),\n rand_mask[:, :, -1],\n ],\n dim=3,\n )\n second_last_product = second_last_product * rsqrt_d\n second_last_product += (1.0 - torch.minimum(second_last_seq_pad, second_last_rand_pad)) * attn_mask_penalty\n second_last_attn_weights = nn.functional.softmax(\n second_last_product, dim=-1\n ) # [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size]\n\n # [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size] x [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] ==> [bsz, n_heads, from_block_size, -1]\n second_last_context_layer = self.torch_bmm_nd(second_last_attn_weights, second_last_value_mat, ndim=4)\n second_last_context_layer.unsqueeze_(2)\n\n # 5th PART\n # last block (global) attention scores\n # q[-1] x (k[0], k[1], k[2], k[3], .... )\n\n # [bsz, n_heads, from_block_size, -1] x [bsz, n_heads, to_seq_len, -1] ==> [bsz, n_heads, from_block_size, to_seq_len]\n last_product = self.torch_bmm_nd_transpose(blocked_query_matrix[:, :, -1], key_layer, ndim=4)\n last_product = last_product * rsqrt_d\n last_product += (1.0 - to_mask) * attn_mask_penalty\n last_attn_weights = nn.functional.softmax(last_product, dim=-1) # [bsz, n_heads, from_block_size, n]\n\n # [bsz, n_heads, from_block_size, to_seq_len] x [bsz, n_heads, to_seq_len, -1] ==> [bsz, n_heads, from_block_size, -1]\n last_context_layer = self.torch_bmm_nd(last_attn_weights, value_layer, ndim=4)\n last_context_layer.unsqueeze_(2)\n\n # combining representations of all tokens\n context_layer = torch.cat(\n [first_context_layer, second_context_layer, context_layer, second_last_context_layer, last_context_layer],\n dim=2,\n )\n context_layer = context_layer.view((bsz, n_heads, from_seq_len, -1)) * from_mask\n context_layer = torch.transpose(context_layer, 1, 2)\n\n # this is just for visualizing; forward pass doesn't depend on following code\n if output_attentions:\n # TODO(PVP): need to verify if below code is correct\n attention_probs = torch.zeros(\n bsz, n_heads, from_seq_len, to_seq_len, dtype=torch.float, device=context_layer.device\n )\n\n # 1st query block\n # corresponding to `first_context_layer`\n attention_probs[:, :, :from_block_size, :] = first_attn_weights # all keys global\n\n # 2nd query block\n # corresponding to `second_context_layer`\n attention_probs[:, :, from_block_size : 2 * from_block_size, : 3 * to_block_size] = second_attn_weights[\n :, :, :, : 3 * to_block_size\n ] # 1st three key blocks (global + sliding)\n attention_probs[:, :, from_block_size : 2 * from_block_size, -to_block_size:] = second_attn_weights[\n :, :, :, 3 * to_block_size : 4 * to_block_size\n ] # last key block (global)\n # random keys\n for p1, i1, w1 in zip(range(bsz), rand_attn, second_attn_weights):\n # p1, i1, w1 corresponds to batch_dim i.e. following operation is done for each sequence in batch\n for p2, i2, w2 in zip(range(n_heads), i1, w1):\n # p2, i2, w2 corresponds to head_dim i.e. following operation is done for each heads\n attn_probs_view = attention_probs.view(\n bsz,\n n_heads,\n from_seq_len // from_block_size,\n from_block_size,\n to_seq_len // to_block_size,\n to_block_size,\n )\n right_slice = w2[:, 4 * to_block_size :]\n attn_probs_view[p1, p2, 1, :, i2[0]] = right_slice.view(\n from_block_size, n_rand_blocks, to_block_size\n )\n\n # Middle query blocks\n # corresponding to `context_layer`\n # sliding keys\n for q_idx in range(from_seq_len // from_block_size - 4):\n attn_probs_view = attention_probs.view(\n bsz,\n n_heads,\n from_seq_len // from_block_size,\n from_block_size,\n to_seq_len // to_block_size,\n to_block_size,\n )[:, :, 2:-2, :, 1:-1, :]\n right_slice = attn_weights[:, :, q_idx, :, to_block_size : 4 * to_block_size]\n attn_probs_view[:, :, q_idx, :, q_idx : q_idx + 3, :] = right_slice.view(\n bsz, n_heads, from_block_size, 3, to_block_size\n ) # inner_band_product\n # global keys (corresponding to 1st key block)\n attention_probs[:, :, 2 * from_block_size : -2 * from_block_size, :to_block_size] = attn_weights[\n :, :, :, :, :to_block_size\n ].view(\n bsz, n_heads, -1, to_block_size\n ) # first_band_product\n # global keys (corresponding to last key block)\n attention_probs[:, :, 2 * from_block_size : -2 * from_block_size, -to_block_size:] = attn_weights[\n :, :, :, :, -to_block_size:\n ].view(\n bsz, n_heads, -1, to_block_size\n ) # last_band_product\n # random keys\n for p1, i1, w1 in zip(range(bsz), rand_attn, attn_weights):\n # p1, i1, w1 corresponds to batch_dim i.e. following operation is done for each sequence in batch\n for p2, i2, w2 in zip(range(n_heads), i1, w1):\n # p2, i2, w2 corresponds to head_dim i.e. following operation is done for each heads\n for q_idx in range(1, len(i2) - 1):\n attn_probs_view = attention_probs.view(\n bsz,\n n_heads,\n from_seq_len // from_block_size,\n from_block_size,\n to_seq_len // to_block_size,\n to_block_size,\n )\n right_slice = w2[q_idx - 1, :, 4 * to_block_size : -to_block_size]\n attn_probs_view[p1, p2, q_idx + 1, :, i2[q_idx]] = right_slice.view(\n from_block_size, n_rand_blocks, to_block_size\n )\n\n # Second-last query block\n # corresponding to `second_last_context_layer`\n attention_probs[:, :, -2 * from_block_size : -from_block_size, :to_block_size] = second_last_attn_weights[\n :, :, :, :to_block_size\n ] # 1st key block (global)\n attention_probs[\n :, :, -2 * from_block_size : -from_block_size, -3 * to_block_size :\n ] = second_last_attn_weights[\n :, :, :, to_block_size : 4 * to_block_size\n ] # last three blocks (global + sliding)\n # random keys\n for p1, i1, w1 in zip(range(bsz), rand_attn, second_last_attn_weights):\n # p1, i1, w1 corresponds to batch_dim i.e. following operation is done for each sequence in batch\n for p2, i2, w2 in zip(range(n_heads), i1, w1):\n # p2, i2, w2 corresponds to head_dim i.e. following operation is done for each heads\n attn_probs_view = attention_probs.view(\n bsz,\n n_heads,\n from_seq_len // from_block_size,\n from_block_size,\n to_seq_len // to_block_size,\n to_block_size,\n )\n right_slice = w2[:, 4 * to_block_size :]\n attn_probs_view[p1, p2, -2, :, i2[-1]] = right_slice.view(\n from_block_size, n_rand_blocks, to_block_size\n )\n\n # last query block\n # corresponding to `last_context_layer`\n attention_probs[:, :, -from_block_size:, :] = last_attn_weights # all keys global\n\n else:\n attention_probs = None\n\n return context_layer, attention_probs\n\n @staticmethod\n def torch_gather_b2(params, indices):\n # this operation is equivalent to tf.gather when batch_dims=2\n\n if params.shape[:2] != indices.shape[:2]:\n raise ValueError(\n f\"Make sure that the first two dimensions of params and indices are identical, \\\n but they are params: {params.shape[:2]} vs. indices: {params.shape[:2]}\"\n )\n num_indices_to_gather = indices.shape[-2] * indices.shape[-1]\n num_indices_to_pick_from = params.shape[2]\n\n indices_shift = (\n torch.arange(indices.shape[0] * indices.shape[1] * num_indices_to_gather, device=indices.device)\n // num_indices_to_gather\n * num_indices_to_pick_from\n )\n\n flattened_indices = indices.view(-1) + indices_shift\n flattened_params = params.reshape(-1, params.shape[-2], params.shape[-1])\n\n out_flattened = flattened_params.index_select(0, flattened_indices)\n\n out = out_flattened.reshape(params.shape[:2] + (num_indices_to_gather,) + params.shape[3:])\n return out\n\n @staticmethod\n def _create_rand_mask_from_inputs(\n from_blocked_mask,\n to_blocked_mask,\n rand_attn,\n num_attention_heads,\n num_rand_blocks,\n batch_size,\n from_seq_length,\n from_block_size,\n ):\n \"\"\"\n Create 3D attention mask from a 2D tensor mask.\n\n Args:\n from_blocked_mask: 2D Tensor of shape [batch_size,\n from_seq_length//from_block_size, from_block_size].\n to_blocked_mask: int32 Tensor of shape [batch_size,\n to_seq_length//to_block_size, to_block_size].\n rand_attn: [batch_size, num_attention_heads,\n from_seq_length//from_block_size-2, num_rand_blocks]\n num_attention_heads: int. Number of attention heads.\n num_rand_blocks: int. Number of random chunks per row.\n batch_size: int. Batch size for computation.\n from_seq_length: int. length of from sequence.\n from_block_size: int. size of block in from sequence.\n\n Returns:\n float Tensor of shape [batch_size, num_attention_heads, from_seq_length//from_block_size-2,\n from_block_size, num_rand_blocks*to_block_size].\n \"\"\"\n num_windows = from_seq_length // from_block_size - 2\n rand_mask = torch.stack([p1[i1.flatten()] for p1, i1 in zip(to_blocked_mask, rand_attn)])\n rand_mask = rand_mask.view(batch_size, num_attention_heads, num_windows, num_rand_blocks * from_block_size)\n rand_mask = torch.einsum(\"blq,bhlk->bhlqk\", from_blocked_mask[:, 1:-1], rand_mask)\n return rand_mask\n\n @staticmethod\n def _get_rand_attn_plan(from_seq_length, from_block_size, num_rand_blocks):\n \"\"\"\n Gives the plan of where to put random attention.\n\n Args:\n from_seq_length: int. length of from sequence.\n from_block_size: int. size of block in from sequence.\n num_rand_blocks: int. Number of random chunks per row.\n\n Returns:\n plan_from_length: ending location of from block plan_num_rand_blocks: number of random ending location for\n each block\n \"\"\"\n\n plan_from_length = []\n plan_num_rand_blocks = []\n if (2 * num_rand_blocks + 5) < (from_seq_length // from_block_size):\n plan_from_length.append(int((2 * num_rand_blocks + 5) * from_block_size))\n plan_num_rand_blocks.append(num_rand_blocks)\n plan_from_length.append(from_seq_length)\n plan_num_rand_blocks.append(0)\n elif (num_rand_blocks + 5) < (from_seq_length // from_block_size):\n plan_from_length.append(int((num_rand_blocks + 5) * from_block_size))\n plan_num_rand_blocks.append(num_rand_blocks // 2)\n plan_from_length.append(from_seq_length)\n plan_num_rand_blocks.append(num_rand_blocks - (num_rand_blocks // 2))\n else:\n plan_from_length.append(from_seq_length)\n plan_num_rand_blocks.append(num_rand_blocks)\n\n return plan_from_length, plan_num_rand_blocks\n\n @staticmethod\n def _bigbird_block_rand_mask(\n from_seq_length, to_seq_length, from_block_size, to_block_size, num_rand_blocks, last_idx=-1\n ):\n \"\"\"\n Create adjacency list of random attention.\n\n Args:\n from_seq_length: int. length of from sequence.\n to_seq_length: int. length of to sequence.\n from_block_size: int. size of block in from sequence.\n to_block_size: int. size of block in to sequence.\n num_rand_blocks: int. Number of random chunks per row.\n last_idx: if -1 then num_rand_blocks blocks chosen anywhere in to sequence,\n if positive then num_rand_blocks blocks chosen only up to last_idx.\n\n Returns:\n adjacency list of size from_seq_length//from_block_size-2 by num_rand_blocks\n \"\"\"\n # using this method when from_seq_length in [1024, 3072, 4096]\n\n assert (\n from_seq_length // from_block_size == to_seq_length // to_block_size\n ), \"Error the number of blocks needs to be same!\"\n\n rand_attn = np.zeros((from_seq_length // from_block_size - 2, num_rand_blocks), dtype=np.int32)\n middle_seq = np.arange(1, to_seq_length // to_block_size - 1, dtype=np.int32)\n last = to_seq_length // to_block_size - 1\n if last_idx > (2 * to_block_size):\n last = (last_idx // to_block_size) - 1\n\n r = num_rand_blocks # shorthand\n for i in range(1, from_seq_length // from_block_size - 1):\n start = i - 2\n end = i\n if i == 1:\n rand_attn[i - 1, :] = np.random.permutation(middle_seq[2:last])[:r]\n elif i == 2:\n rand_attn[i - 1, :] = np.random.permutation(middle_seq[3:last])[:r]\n elif i == from_seq_length // from_block_size - 3:\n rand_attn[i - 1, :] = np.random.permutation(middle_seq[:last])[:r]\n # Missing -3: should have been sliced till last-3\n elif i == from_seq_length // from_block_size - 2:\n rand_attn[i - 1, :] = np.random.permutation(middle_seq[:last])[:r]\n # Missing -4: should have been sliced till last-4\n else:\n if start > last:\n start = last\n rand_attn[i - 1, :] = np.random.permutation(middle_seq[:start])[:r]\n elif (end + 1) == last:\n rand_attn[i - 1, :] = np.random.permutation(middle_seq[:start])[:r]\n else:\n rand_attn[i - 1, :] = np.random.permutation(\n np.concatenate((middle_seq[:start], middle_seq[end + 1 : last]))\n )[:r]\n return rand_attn\n\n def _bigbird_block_rand_mask_with_head(\n self,\n from_seq_length,\n to_seq_length,\n from_block_size,\n to_block_size,\n num_heads,\n plan_from_length,\n plan_num_rand_blocks,\n window_block_left=1,\n window_block_right=1,\n global_block_top=1,\n global_block_bottom=1,\n global_block_left=1,\n global_block_right=1,\n ):\n \"\"\"\n Create adjacency list of random attention.\n\n Args:\n from_seq_length: int. length of from sequence.\n to_seq_length: int. length of to sequence.\n from_block_size: int. size of block in from sequence.\n to_block_size: int. size of block in to sequence.\n num_heads: int. total number of heads.\n plan_from_length: list. plan from length where num_random_blocks are chosen from.\n plan_num_rand_blocks: list. number of rand blocks within the plan.\n window_block_left: int. number of blocks of window to left of a block.\n window_block_right: int. number of blocks of window to right of a block.\n global_block_top: int. number of blocks at the top.\n global_block_bottom: int. number of blocks at the bottom.\n global_block_left: int. Number of blocks globally used to the left.\n global_block_right: int. Number of blocks globally used to the right.\n\n Returns:\n adjacency list of size num_head where each element is of size from_seq_length//from_block_size-2 by\n num_rand_blocks\n \"\"\"\n # using this method when from_seq_length not in [1024, 3072, 4096]\n\n assert (\n from_seq_length // from_block_size == to_seq_length // to_block_size\n ), \"Error the number of blocks needs to be same!\"\n\n assert from_seq_length in plan_from_length, \"Error from sequence length not in plan!\"\n\n # Total number of blocks in the mmask\n num_blocks = from_seq_length // from_block_size\n # Number of blocks per plan\n plan_block_length = np.array(plan_from_length) // from_block_size\n # till when to follow plan\n max_plan_idx = plan_from_length.index(from_seq_length)\n # Random Attention adjacency list\n rand_attn = [\n np.zeros((num_blocks, np.sum(plan_num_rand_blocks[: max_plan_idx + 1])), dtype=np.int32)\n for i in range(num_heads)\n ]\n\n # We will go iteratively over the plan blocks and pick random number of\n # Attention blocks from the legally allowed blocks\n for plan_idx in range(max_plan_idx + 1):\n rnd_r_cnt = 0\n if plan_idx > 0:\n # set the row for all from_blocks starting from 0 to\n # plan_block_length[plan_idx-1]\n # column indx start fromm plan_block_length[plan_idx-1] and ends at\n # plan_block_length[plan_idx]\n if plan_num_rand_blocks[plan_idx] > 0:\n rnd_r_cnt = int(np.sum(plan_num_rand_blocks[:plan_idx]))\n curr_r_cnt = int(np.sum(plan_num_rand_blocks[: plan_idx + 1]))\n for blk_rw_idx in range(global_block_top, plan_block_length[plan_idx - 1]):\n for h in range(num_heads):\n rand_attn[h][blk_rw_idx, rnd_r_cnt:curr_r_cnt] = self._get_single_block_row_attention(\n block_id=blk_rw_idx,\n to_start_block_id=plan_block_length[plan_idx - 1],\n to_end_block_id=plan_block_length[plan_idx],\n num_rand_blocks=plan_num_rand_blocks[plan_idx],\n window_block_left=window_block_left,\n window_block_right=window_block_right,\n global_block_left=global_block_left,\n global_block_right=global_block_right,\n )\n\n for pl_id in range(plan_idx):\n if plan_num_rand_blocks[pl_id] == 0:\n continue\n for blk_rw_idx in range(plan_block_length[plan_idx - 1], plan_block_length[plan_idx]):\n rnd_r_cnt = 0\n to_start_block_id = 0\n if pl_id > 0:\n rnd_r_cnt = int(np.sum(plan_num_rand_blocks[:pl_id]))\n to_start_block_id = plan_block_length[pl_id - 1]\n curr_r_cnt = int(np.sum(plan_num_rand_blocks[: pl_id + 1]))\n for h in range(num_heads):\n rand_attn[h][blk_rw_idx, rnd_r_cnt:curr_r_cnt] = self._get_single_block_row_attention(\n block_id=blk_rw_idx,\n to_start_block_id=to_start_block_id,\n to_end_block_id=plan_block_length[pl_id],\n num_rand_blocks=plan_num_rand_blocks[pl_id],\n window_block_left=window_block_left,\n window_block_right=window_block_right,\n global_block_left=global_block_left,\n global_block_right=global_block_right,\n )\n\n if plan_num_rand_blocks[plan_idx] == 0:\n continue\n curr_r_cnt = int(np.sum(plan_num_rand_blocks[: plan_idx + 1]))\n from_start_block_id = global_block_top\n to_start_block_id = 0\n if plan_idx > 0:\n rnd_r_cnt = int(np.sum(plan_num_rand_blocks[:plan_idx]))\n from_start_block_id = plan_block_length[plan_idx - 1]\n to_start_block_id = plan_block_length[plan_idx - 1]\n\n for blk_rw_idx in range(from_start_block_id, plan_block_length[plan_idx]):\n for h in range(num_heads):\n rand_attn[h][blk_rw_idx, rnd_r_cnt:curr_r_cnt] = self._get_single_block_row_attention(\n block_id=blk_rw_idx,\n to_start_block_id=to_start_block_id,\n to_end_block_id=plan_block_length[plan_idx],\n num_rand_blocks=plan_num_rand_blocks[plan_idx],\n window_block_left=window_block_left,\n window_block_right=window_block_right,\n global_block_left=global_block_left,\n global_block_right=global_block_right,\n )\n\n for nh in range(num_heads):\n rand_attn[nh] = rand_attn[nh][global_block_top : num_blocks - global_block_bottom, :]\n\n return rand_attn\n\n @staticmethod\n def _get_single_block_row_attention(\n block_id,\n to_start_block_id,\n to_end_block_id,\n num_rand_blocks,\n window_block_left=1,\n window_block_right=1,\n global_block_left=1,\n global_block_right=1,\n ):\n \"\"\"\n For a single row block get random row attention.\n\n Args:\n block_id: int. block id of row.\n to_start_block_id: int. random attention column start id.\n to_end_block_id: int. random attention column end id.\n num_rand_blocks: int. number of random blocks to be selected.\n window_block_left: int. number of blocks of window to left of a block.\n window_block_right: int. number of blocks of window to right of a block.\n global_block_left: int. Number of blocks globally used to the left.\n global_block_right: int. Number of blocks globally used to the right.\n\n Returns:\n row containing the random attention vector of size num_rand_blocks.\n \"\"\"\n # list of to_blocks from which to choose random attention\n to_block_list = np.arange(to_start_block_id, to_end_block_id, dtype=np.int32)\n # permute the blocks\n perm_block = np.random.permutation(to_block_list)\n\n # illegal blocks for the current block id, using window\n illegal_blocks = list(range(block_id - window_block_left, block_id + window_block_right + 1))\n\n # Add blocks at the start and at the end\n illegal_blocks.extend(list(range(global_block_left)))\n illegal_blocks.extend(list(range(to_end_block_id - global_block_right, to_end_block_id)))\n\n # The second from_block cannot choose random attention on second last to_block\n if block_id == 1:\n illegal_blocks.append(to_end_block_id - 2)\n\n # The second last from_block cannot choose random attention on second to_block\n if block_id == to_end_block_id - 2:\n illegal_blocks.append(1)\n\n selected_random_blokcs = []\n\n for i in range(to_end_block_id - to_start_block_id):\n if perm_block[i] not in illegal_blocks:\n selected_random_blokcs.append(perm_block[i])\n if len(selected_random_blokcs) == num_rand_blocks:\n break\n return np.array(selected_random_blokcs, dtype=np.int32)\n\n\n# Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->BigBird\nclass BigBirdSelfOutput(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, hidden_states, input_tensor):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.LayerNorm(hidden_states + input_tensor)\n return hidden_states\n\n\nclass BigBirdAttention(nn.Module):\n def __init__(self, config, seed=None):\n super().__init__()\n self.attention_type = config.attention_type\n self.config = config\n self.seed = seed\n\n if self.config.attention_type == \"original_full\":\n self.self = BigBirdSelfAttention(config)\n elif self.config.attention_type == \"block_sparse\":\n self.self = BigBirdBlockSparseAttention(config, seed)\n else:\n raise ValueError(\n f\"attention_type can either be original_full or block_sparse, but is {self.config.attention_type}\"\n )\n\n self.output = BigBirdSelfOutput(config)\n\n def set_attention_type(self, value: str):\n if value not in [\"original_full\", \"block_sparse\"]:\n raise ValueError(\n f\"attention_type can only be set to either 'original_full' or 'block_sparse', but is {value}\"\n )\n # attention type is already correctly set\n if value == self.attention_type:\n return\n\n self.attention_type = value\n if value == \"original_full\":\n # copy all weights to new full attention class\n attn_weights = BigBirdSelfAttention(self.config)\n else:\n # copy all weights to new sparse attention class\n attn_weights = BigBirdBlockSparseAttention(self.config, self.seed)\n\n attn_weights.query = self.self.query\n attn_weights.value = self.self.value\n attn_weights.key = self.self.key\n self.self = attn_weights\n self.attention_type = value\n\n if not self.training:\n self.self.eval()\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_value=None,\n output_attentions=False,\n # block_sparse config\n band_mask=None,\n from_mask=None,\n to_mask=None,\n from_blocked_mask=None,\n to_blocked_mask=None,\n ):\n\n if self.attention_type == \"original_full\":\n self_outputs = self.self(\n hidden_states,\n attention_mask,\n head_mask,\n encoder_hidden_states,\n encoder_attention_mask,\n past_key_value,\n output_attentions,\n )\n else:\n assert (\n encoder_hidden_states is None\n ), \"BigBird cannot be used as a decoder when config.attention_type != 'original_full'\"\n self_outputs = self.self(\n hidden_states, band_mask, from_mask, to_mask, from_blocked_mask, to_blocked_mask, output_attentions\n )\n\n attention_output = self.output(self_outputs[0], hidden_states)\n outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them\n return outputs\n\n\n# Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->BigBird\nclass BigBirdIntermediate(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.intermediate_size)\n if isinstance(config.hidden_act, str):\n self.intermediate_act_fn = ACT2FN[config.hidden_act]\n else:\n self.intermediate_act_fn = config.hidden_act\n\n def forward(self, hidden_states):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.intermediate_act_fn(hidden_states)\n return hidden_states\n\n\n# Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->BigBird\nclass BigBirdOutput(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.intermediate_size, config.hidden_size)\n self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, hidden_states, input_tensor):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.LayerNorm(hidden_states + input_tensor)\n return hidden_states\n\n\nclass BigBirdLayer(nn.Module):\n def __init__(self, config, seed=None):\n super().__init__()\n self.config = config\n self.attention_type = config.attention_type\n self.chunk_size_feed_forward = config.chunk_size_feed_forward\n self.seq_len_dim = 1\n self.attention = BigBirdAttention(config, seed=seed)\n self.is_decoder = config.is_decoder\n self.add_cross_attention = config.add_cross_attention\n if self.add_cross_attention:\n assert self.is_decoder, f\"{self} should be used as a decoder model if cross attention is added\"\n self.crossattention = BigBirdAttention(config)\n self.intermediate = BigBirdIntermediate(config)\n self.output = BigBirdOutput(config)\n\n def set_attention_type(self, value: str):\n if value not in [\"original_full\", \"block_sparse\"]:\n raise ValueError(\n f\"attention_type can only be set to either 'original_full' or 'block_sparse', but is {value}\"\n )\n # attention type is already correctly set\n if value == self.attention_type:\n return\n self.attention_type = value\n self.attention.set_attention_type(value)\n\n if self.add_cross_attention:\n self.crossattention.set_attention_type(value)\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n band_mask=None,\n from_mask=None,\n to_mask=None,\n blocked_encoder_mask=None,\n past_key_value=None,\n output_attentions=False,\n ):\n # decoder uni-directional self-attention cached key/values tuple is at positions 1,2\n self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None\n self_attention_outputs = self.attention(\n hidden_states,\n attention_mask,\n head_mask,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n past_key_value=self_attn_past_key_value,\n output_attentions=output_attentions,\n band_mask=band_mask,\n from_mask=from_mask,\n to_mask=to_mask,\n from_blocked_mask=blocked_encoder_mask,\n to_blocked_mask=blocked_encoder_mask,\n )\n attention_output = self_attention_outputs[0]\n\n # if decoder, the last output is tuple of self-attn cache\n if self.is_decoder:\n outputs = self_attention_outputs[1:-1]\n present_key_value = self_attention_outputs[-1]\n else:\n outputs = self_attention_outputs[1:] # add self attentions if we output attention weights\n\n cross_attn_present_key_value = None\n if self.is_decoder and encoder_hidden_states is not None:\n if not hasattr(self, \"crossattention\"):\n raise ValueError(\n f\"If `encoder_hidden_states` are passed, {self} has to be instantiated with \\\n cross-attention layers by setting `config.add_cross_attention=True`\"\n )\n\n # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple\n cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None\n cross_attention_outputs = self.crossattention(\n attention_output,\n attention_mask,\n head_mask,\n encoder_hidden_states,\n encoder_attention_mask,\n cross_attn_past_key_value,\n output_attentions,\n )\n attention_output = cross_attention_outputs[0]\n outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights\n\n # add cross-attn cache to positions 3,4 of present_key_value tuple\n cross_attn_present_key_value = cross_attention_outputs[-1]\n present_key_value = present_key_value + cross_attn_present_key_value\n\n layer_output = apply_chunking_to_forward(\n self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output\n )\n\n outputs = (layer_output,) + outputs\n\n # if decoder, return the attn key/values as the last output\n if self.is_decoder:\n outputs = outputs + (present_key_value,)\n\n return outputs\n\n def feed_forward_chunk(self, attention_output):\n intermediate_output = self.intermediate(attention_output)\n layer_output = self.output(intermediate_output, attention_output)\n return layer_output\n\n\nclass BigBirdEncoder(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.config = config\n self.attention_type = config.attention_type\n\n self.layer = nn.ModuleList(\n [BigBirdLayer(config, seed=layer_idx) for layer_idx in range(config.num_hidden_layers)]\n )\n\n def set_attention_type(self, value: str):\n if value not in [\"original_full\", \"block_sparse\"]:\n raise ValueError(\n f\"attention_type can only be set to either 'original_full' or 'block_sparse', but is {value}\"\n )\n # attention type is already correctly set\n if value == self.attention_type:\n return\n self.attention_type = value\n for layer in self.layer:\n layer.set_attention_type(value)\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_values=None,\n use_cache=None,\n output_attentions=False,\n output_hidden_states=False,\n band_mask=None,\n from_mask=None,\n to_mask=None,\n blocked_encoder_mask=None,\n return_dict=True,\n ):\n all_hidden_states = () if output_hidden_states else None\n all_self_attentions = () if output_attentions else None\n all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None\n\n next_decoder_cache = () if use_cache else None\n\n for i, layer_module in enumerate(self.layer):\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n layer_head_mask = head_mask[i] if head_mask is not None else None\n past_key_value = past_key_values[i] if past_key_values is not None else None\n\n if getattr(self.config, \"gradient_checkpointing\", False) and self.training:\n\n if use_cache:\n logger.warning(\n \"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting \"\n \"`use_cache=False`...\"\n )\n use_cache = False\n\n def create_custom_forward(module):\n def custom_forward(*inputs):\n return module(*inputs, past_key_value, output_attentions)\n\n return custom_forward\n\n layer_outputs = torch.utils.checkpoint.checkpoint(\n create_custom_forward(layer_module),\n hidden_states,\n attention_mask,\n layer_head_mask,\n encoder_hidden_states,\n encoder_attention_mask,\n band_mask,\n from_mask,\n to_mask,\n blocked_encoder_mask,\n )\n else:\n\n layer_outputs = layer_module(\n hidden_states,\n attention_mask,\n layer_head_mask,\n encoder_hidden_states,\n encoder_attention_mask,\n band_mask,\n from_mask,\n to_mask,\n blocked_encoder_mask,\n past_key_value,\n output_attentions,\n )\n\n hidden_states = layer_outputs[0]\n if use_cache:\n next_decoder_cache += (layer_outputs[-1],)\n if output_attentions:\n all_self_attentions = all_self_attentions + (layer_outputs[1],)\n if self.config.add_cross_attention:\n all_cross_attentions = all_cross_attentions + (layer_outputs[2],)\n\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n if not return_dict:\n return tuple(\n v\n for v in [\n hidden_states,\n next_decoder_cache,\n all_hidden_states,\n all_self_attentions,\n all_cross_attentions,\n ]\n if v is not None\n )\n return BaseModelOutputWithPastAndCrossAttentions(\n last_hidden_state=hidden_states,\n past_key_values=next_decoder_cache,\n hidden_states=all_hidden_states,\n attentions=all_self_attentions,\n cross_attentions=all_cross_attentions,\n )\n\n\n# Copied from transformers.models.bert.modeling_bert.BertPredictionHeadTransform with Bert->BigBird\nclass BigBirdPredictionHeadTransform(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n if isinstance(config.hidden_act, str):\n self.transform_act_fn = ACT2FN[config.hidden_act]\n else:\n self.transform_act_fn = config.hidden_act\n self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n\n def forward(self, hidden_states):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.transform_act_fn(hidden_states)\n hidden_states = self.LayerNorm(hidden_states)\n return hidden_states\n\n\n# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->BigBird\nclass BigBirdLMPredictionHead(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.transform = BigBirdPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n\n\n# Copied from transformers.models.bert.modeling_bert.BertOnlyMLMHead with Bert->BigBird\nclass BigBirdOnlyMLMHead(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.predictions = BigBirdLMPredictionHead(config)\n\n def forward(self, sequence_output):\n prediction_scores = self.predictions(sequence_output)\n return prediction_scores\n\n\n# Copied from transformers.models.bert.modeling_bert.BertOnlyNSPHead with Bert->BigBird\nclass BigBirdOnlyNSPHead(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.seq_relationship = nn.Linear(config.hidden_size, 2)\n\n def forward(self, pooled_output):\n seq_relationship_score = self.seq_relationship(pooled_output)\n return seq_relationship_score\n\n\n# Copied from transformers.models.bert.modeling_bert.BertPreTrainingHeads with Bert->BigBird\nclass BigBirdPreTrainingHeads(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.predictions = BigBirdLMPredictionHead(config)\n self.seq_relationship = nn.Linear(config.hidden_size, 2)\n\n def forward(self, sequence_output, pooled_output):\n prediction_scores = self.predictions(sequence_output)\n seq_relationship_score = self.seq_relationship(pooled_output)\n return prediction_scores, seq_relationship_score\n\n\nclass BigBirdPreTrainedModel(PreTrainedModel):\n \"\"\"\n An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained\n models.\n \"\"\"\n\n config_class = BigBirdConfig\n load_tf_weights = load_tf_weights_in_big_bird\n base_model_prefix = \"bert\"\n _keys_to_ignore_on_load_missing = [r\"position_ids\"]\n\n def _init_weights(self, module):\n \"\"\"Initialize the weights\"\"\"\n if isinstance(module, nn.Linear):\n # Slightly different from the TF version which uses truncated_normal for initialization\n # cf https://github.com/pytorch/pytorch/pull/5617\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n if module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, nn.Embedding):\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n if module.padding_idx is not None:\n module.weight.data[module.padding_idx].zero_()\n elif isinstance(module, nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n\n\nBIG_BIRD_START_DOCSTRING = r\"\"\"\n This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config (:class:`~transformers.BigBirdConfig`): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model\n weights.\n\"\"\"\n\nBIG_BIRD_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using :class:`transformers.BigBirdTokenizer`. See\n :func:`transformers.PreTrainedTokenizer.encode` and :func:`transformers.PreTrainedTokenizer.__call__` for\n details.\n\n `What are input IDs? <../glossary.html#input-ids>`__\n attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`):\n Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,\n 1]``:\n\n - 0 corresponds to a `sentence A` token,\n - 1 corresponds to a `sentence B` token.\n\n `What are token type IDs? <../glossary.html#token-type-ids>`_\n position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,\n config.max_position_embeddings - 1]``.\n\n `What are position IDs? <../glossary.html#position-ids>`_\n head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`):\n Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert `input_ids` indices into associated vectors\n than the model's internal embedding lookup matrix.\n output_attentions (:obj:`bool`, `optional`):\n Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned\n tensors for more detail.\n output_hidden_states (:obj:`bool`, `optional`):\n Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for\n more detail.\n return_dict (:obj:`bool`, `optional`):\n Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.\n\"\"\"\n\n\n@dataclass\nclass BigBirdForPreTrainingOutput(ModelOutput):\n \"\"\"\n Output type of :class:`~transformers.BigBirdForPreTraining`.\n\n Args:\n loss (`optional`, returned when ``labels`` is provided, ``torch.FloatTensor`` of shape :obj:`(1,)`):\n Total loss as the sum of the masked language modeling loss and the next sequence prediction\n (classification) loss.\n prediction_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):\n Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).\n seq_relationship_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, 2)`):\n Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation\n before SoftMax).\n hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):\n Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)\n of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):\n Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,\n sequence_length, sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n \"\"\"\n\n loss: Optional[torch.FloatTensor] = None\n prediction_logits: torch.FloatTensor = None\n seq_relationship_logits: torch.FloatTensor = None\n hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n attentions: Optional[Tuple[torch.FloatTensor]] = None\n\n\n@dataclass\nclass BigBirdForQuestionAnsweringModelOutput(ModelOutput):\n \"\"\"\n Base class for outputs of question answering models.\n\n Args:\n loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):\n Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.\n start_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`):\n Span-start scores (before SoftMax).\n end_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`):\n Span-end scores (before SoftMax).\n pooler_output (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, 1)`):\n pooler output from BigBigModel\n hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):\n Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)\n of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):\n Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,\n sequence_length, sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n \"\"\"\n\n loss: Optional[torch.FloatTensor] = None\n start_logits: torch.FloatTensor = None\n end_logits: torch.FloatTensor = None\n pooler_output: torch.FloatTensor = None\n hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n attentions: Optional[Tuple[torch.FloatTensor]] = None\n\n\n@add_start_docstrings(\n \"The bare BigBird Model transformer outputting raw hidden-states without any specific head on top.\",\n BIG_BIRD_START_DOCSTRING,\n)\nclass BigBirdModel(BigBirdPreTrainedModel):\n \"\"\"\n\n The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of\n cross-attention is added between the self-attention layers, following the architecture described in `Attention is\n all you need <https://arxiv.org/abs/1706.03762>`__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,\n Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.\n\n To behave as an decoder the model needs to be initialized with the :obj:`is_decoder` argument of the configuration\n set to :obj:`True`. To be used in a Seq2Seq model, the model needs to initialized with both :obj:`is_decoder`\n argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an\n input to the forward pass.\n \"\"\"\n\n def __init__(self, config, add_pooling_layer=True):\n super().__init__(config)\n self.attention_type = self.config.attention_type\n self.config = config\n\n self.block_size = self.config.block_size\n\n self.embeddings = BigBirdEmbeddings(config)\n self.encoder = BigBirdEncoder(config)\n\n if add_pooling_layer:\n self.pooler = nn.Linear(config.hidden_size, config.hidden_size)\n self.activation = nn.Tanh()\n else:\n self.pooler = None\n self.activation = None\n\n if self.attention_type != \"original_full\" and config.add_cross_attention:\n logger.warning(\n \"When using `BigBirdForCausalLM` as decoder, then `attention_type` must be `original_full`. Setting `attention_type=original_full`\"\n )\n self.set_attention_type(\"original_full\")\n\n self.init_weights()\n\n def get_input_embeddings(self):\n return self.embeddings.word_embeddings\n\n def set_input_embeddings(self, value):\n self.embeddings.word_embeddings = value\n\n def set_attention_type(self, value: str):\n if value not in [\"original_full\", \"block_sparse\"]:\n raise ValueError(\n f\"attention_type can only be set to either 'original_full' or 'block_sparse', but is {value}\"\n )\n # attention type is already correctly set\n if value == self.attention_type:\n return\n self.attention_type = value\n self.encoder.set_attention_type(value)\n\n @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=BaseModelOutputWithPoolingAndCrossAttentions,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_values=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):\n Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if\n the model is configured as a decoder.\n encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in\n the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):\n Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.\n If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`\n (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`\n instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.\n use_cache (:obj:`bool`, `optional`):\n If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up\n decoding (see :obj:`past_key_values`).\n \"\"\"\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if self.config.is_decoder:\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n else:\n use_cache = False\n\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n elif input_ids is not None:\n input_shape = input_ids.size()\n elif inputs_embeds is not None:\n input_shape = inputs_embeds.size()[:-1]\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n batch_size, seq_length = input_shape\n device = input_ids.device if input_ids is not None else inputs_embeds.device\n\n # past_key_values_length\n past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0\n\n if attention_mask is None:\n attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)\n if token_type_ids is None:\n if hasattr(self.embeddings, \"token_type_ids\"):\n buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]\n buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)\n token_type_ids = buffered_token_type_ids_expanded\n else:\n token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)\n\n # in order to use block_sparse attention, sequence_length has to be at least\n # bigger than all global attentions: 2 * block_size\n # + sliding tokens: 3 * block_size\n # + random tokens: 2 * num_random_blocks * block_size\n max_tokens_to_attend = (5 + 2 * self.config.num_random_blocks) * self.config.block_size\n if self.attention_type == \"block_sparse\" and seq_length <= max_tokens_to_attend:\n # change attention_type from block_sparse to original_full\n sequence_length = input_ids.size(1) if input_ids is not None else inputs_embeds.size(1)\n logger.warning(\n \"Attention type 'block_sparse' is not possible if sequence_length: \"\n f\"{sequence_length} <= num global tokens: 2 * config.block_size \"\n \"+ min. num sliding tokens: 3 * config.block_size \"\n \"+ config.num_random_blocks * config.block_size \"\n \"+ additional buffer: config.num_random_blocks * config.block_size \"\n f\"= {max_tokens_to_attend} with config.block_size \"\n f\"= {self.config.block_size}, config.num_random_blocks \"\n f\"= {self.config.num_random_blocks}.\"\n \"Changing attention type to 'original_full'...\"\n )\n self.set_attention_type(\"original_full\")\n\n if self.attention_type == \"block_sparse\":\n (\n padding_len,\n input_ids,\n attention_mask,\n token_type_ids,\n position_ids,\n inputs_embeds,\n ) = self._pad_to_block_size(\n input_ids=input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n inputs_embeds=inputs_embeds,\n pad_token_id=self.config.pad_token_id,\n )\n else:\n padding_len = 0\n\n if self.attention_type == \"block_sparse\":\n blocked_encoder_mask, band_mask, from_mask, to_mask = self.create_masks_for_block_sparse_attn(\n attention_mask, self.block_size\n )\n extended_attention_mask = None\n\n elif self.attention_type == \"original_full\":\n blocked_encoder_mask = None\n band_mask = None\n from_mask = None\n to_mask = None\n # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]\n # ourselves in which case we just need to make it broadcastable to all heads.\n extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(\n attention_mask, input_shape, device\n )\n else:\n raise ValueError(\n f\"attention_type can either be original_full or block_sparse, but is {self.attention_type}\"\n )\n\n # If a 2D or 3D attention mask is provided for the cross-attention\n # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]\n if self.config.is_decoder and encoder_hidden_states is not None:\n encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()\n encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)\n if encoder_attention_mask is None:\n encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)\n encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)\n else:\n encoder_extended_attention_mask = None\n\n # Prepare head mask if needed\n # 1.0 in head_mask indicate we keep the head\n # attention_probs has shape bsz x n_heads x N x N\n # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]\n # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]\n head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)\n\n embedding_output = self.embeddings(\n input_ids=input_ids,\n position_ids=position_ids,\n token_type_ids=token_type_ids,\n inputs_embeds=inputs_embeds,\n past_key_values_length=past_key_values_length,\n )\n\n encoder_outputs = self.encoder(\n embedding_output,\n attention_mask=extended_attention_mask,\n head_mask=head_mask,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_extended_attention_mask,\n past_key_values=past_key_values,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n band_mask=band_mask,\n from_mask=from_mask,\n to_mask=to_mask,\n blocked_encoder_mask=blocked_encoder_mask,\n return_dict=return_dict,\n )\n sequence_output = encoder_outputs[0]\n\n pooler_output = self.activation(self.pooler(sequence_output[:, 0, :])) if (self.pooler is not None) else None\n\n # undo padding\n if padding_len > 0:\n # unpad `sequence_output` because the calling function is expecting a length == input_ids.size(1)\n sequence_output = sequence_output[:, :-padding_len]\n\n if not return_dict:\n return (sequence_output, pooler_output) + encoder_outputs[1:]\n\n return BaseModelOutputWithPoolingAndCrossAttentions(\n last_hidden_state=sequence_output,\n pooler_output=pooler_output,\n past_key_values=encoder_outputs.past_key_values,\n hidden_states=encoder_outputs.hidden_states,\n attentions=encoder_outputs.attentions,\n cross_attentions=encoder_outputs.cross_attentions,\n )\n\n @staticmethod\n def create_masks_for_block_sparse_attn(attention_mask: torch.Tensor, block_size: int):\n\n batch_size, seq_length = attention_mask.size()\n assert (\n seq_length % block_size == 0\n ), f\"Sequence length must be multiple of block size, but sequence length is {seq_length}, while block size is {block_size}.\"\n\n def create_band_mask_from_inputs(from_blocked_mask, to_blocked_mask):\n \"\"\"\n Create 3D attention mask from a 2D tensor mask.\n\n Args:\n from_blocked_mask: 2D Tensor of shape [batch_size,\n from_seq_length//from_block_size, from_block_size].\n to_blocked_mask: int32 Tensor of shape [batch_size,\n to_seq_length//to_block_size, to_block_size].\n\n Returns:\n float Tensor of shape [batch_size, 1, from_seq_length//from_block_size-4, from_block_size,\n 3*to_block_size].\n \"\"\"\n exp_blocked_to_pad = torch.cat(\n [to_blocked_mask[:, 1:-3], to_blocked_mask[:, 2:-2], to_blocked_mask[:, 3:-1]], dim=2\n )\n band_mask = torch.einsum(\"blq,blk->blqk\", from_blocked_mask[:, 2:-2], exp_blocked_to_pad)\n band_mask.unsqueeze_(1)\n return band_mask\n\n blocked_encoder_mask = attention_mask.view(batch_size, seq_length // block_size, block_size)\n band_mask = create_band_mask_from_inputs(blocked_encoder_mask, blocked_encoder_mask)\n\n from_mask = attention_mask.view(batch_size, 1, seq_length, 1)\n to_mask = attention_mask.view(batch_size, 1, 1, seq_length)\n\n return blocked_encoder_mask, band_mask, from_mask, to_mask\n\n def _pad_to_block_size(\n self,\n input_ids: torch.Tensor,\n attention_mask: torch.Tensor,\n token_type_ids: torch.Tensor,\n position_ids: torch.Tensor,\n inputs_embeds: torch.Tensor,\n pad_token_id: int,\n ):\n \"\"\"A helper function to pad tokens and mask to work with implementation of BigBird block-sparse attention.\"\"\"\n # padding\n block_size = self.config.block_size\n\n input_shape = input_ids.shape if input_ids is not None else inputs_embeds.shape\n batch_size, seq_len = input_shape[:2]\n\n padding_len = (block_size - seq_len % block_size) % block_size\n if padding_len > 0:\n logger.info(\n f\"Input ids are automatically padded from {seq_len} to {seq_len + padding_len} to be a multiple of \"\n f\"`config.block_size`: {block_size}\"\n )\n if input_ids is not None:\n input_ids = nn.functional.pad(input_ids, (0, padding_len), value=pad_token_id)\n if position_ids is not None:\n # pad with position_id = pad_token_id as in modeling_bigbird.BigBirdEmbeddings\n position_ids = nn.functional.pad(position_ids, (0, padding_len), value=pad_token_id)\n if inputs_embeds is not None:\n input_ids_padding = inputs_embeds.new_full(\n (batch_size, padding_len),\n self.config.pad_token_id,\n dtype=torch.long,\n )\n inputs_embeds_padding = self.embeddings(input_ids_padding)\n inputs_embeds = torch.cat([inputs_embeds, inputs_embeds_padding], dim=-2)\n\n attention_mask = nn.functional.pad(\n attention_mask, (0, padding_len), value=False\n ) # no attention on the padding tokens\n token_type_ids = nn.functional.pad(token_type_ids, (0, padding_len), value=0) # pad with token_type_id = 0\n\n return padding_len, input_ids, attention_mask, token_type_ids, position_ids, inputs_embeds\n\n\nclass BigBirdForPreTraining(BigBirdPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n self.bert = BigBirdModel(config, add_pooling_layer=True)\n self.cls = BigBirdPreTrainingHeads(config)\n\n self.init_weights()\n\n def get_output_embeddings(self):\n return self.cls.predictions.decoder\n\n def set_output_embeddings(self, new_embeddings):\n self.cls.predictions.decoder = new_embeddings\n\n @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @replace_return_docstrings(output_type=BigBirdForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n next_sentence_label=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape ``(batch_size, sequence_length)``, `optional`):\n Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,\n config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored\n (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``\n next_sentence_label (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`):\n Labels for computing the next sequence prediction (classification) loss. If specified, nsp loss will be\n added to masked_lm loss. Input should be a sequence pair (see :obj:`input_ids` docstring) Indices should be\n in ``[0, 1]``:\n\n - 0 indicates sequence B is a continuation of sequence A,\n - 1 indicates sequence B is a random sequence.\n kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`):\n Used to hide legacy arguments that have been deprecated.\n\n Returns:\n\n Example::\n\n >>> from transformers import BigBirdTokenizer, BigBirdForPreTraining\n >>> import torch\n\n >>> tokenizer = BigBirdTokenizer.from_pretrained('bigbird-roberta-base')\n >>> model = BigBirdForPreTraining.from_pretrained('bigbird-roberta-base')\n\n >>> inputs = tokenizer(\"Hello, my dog is cute\", return_tensors=\"pt\")\n >>> outputs = model(**inputs)\n\n >>> prediction_logits = outputs.prediction_logits\n >>> seq_relationship_logits = outputs.seq_relationship_logits\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.bert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output, pooled_output = outputs[:2]\n prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)\n\n total_loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n total_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))\n\n if next_sentence_label is not None and total_loss is not None:\n next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))\n total_loss = total_loss + next_sentence_loss\n\n if not return_dict:\n output = (prediction_scores, seq_relationship_score) + outputs[2:]\n return ((total_loss,) + output) if total_loss is not None else output\n\n return BigBirdForPreTrainingOutput(\n loss=total_loss,\n prediction_logits=prediction_scores,\n seq_relationship_logits=seq_relationship_score,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\"\"\"BigBird Model with a `language modeling` head on top. \"\"\", BIG_BIRD_START_DOCSTRING)\nclass BigBirdForMaskedLM(BigBirdPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n if config.is_decoder:\n logger.warning(\n \"If you want to use `BigBirdForMaskedLM` make sure `config.is_decoder=False` for \"\n \"bi-directional self-attention.\"\n )\n\n self.bert = BigBirdModel(config)\n self.cls = BigBirdOnlyMLMHead(config)\n\n self.init_weights()\n\n def get_output_embeddings(self):\n return self.cls.predictions.decoder\n\n def set_output_embeddings(self, new_embeddings):\n self.cls.predictions.decoder = new_embeddings\n\n @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=MaskedLMOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,\n config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored\n (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``.\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.bert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0]\n prediction_scores = self.cls(sequence_output)\n\n masked_lm_loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss() # -100 index = padding token\n masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))\n\n if not return_dict:\n output = (prediction_scores,) + outputs[2:]\n return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output\n\n return MaskedLMOutput(\n loss=masked_lm_loss,\n logits=prediction_scores,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs):\n input_shape = input_ids.shape\n effective_batch_size = input_shape[0]\n\n # add a dummy token\n assert self.config.pad_token_id is not None, \"The PAD token should be defined for generation\"\n attention_mask = torch.cat([attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1)\n dummy_token = torch.full(\n (effective_batch_size, 1), self.config.pad_token_id, dtype=torch.long, device=input_ids.device\n )\n input_ids = torch.cat([input_ids, dummy_token], dim=1)\n\n return {\"input_ids\": input_ids, \"attention_mask\": attention_mask}\n\n\n@add_start_docstrings(\n \"\"\"BigBird Model with a `language modeling` head on top for CLM fine-tuning. \"\"\", BIG_BIRD_START_DOCSTRING\n)\nclass BigBirdForCausalLM(BigBirdPreTrainedModel):\n\n _keys_to_ignore_on_load_missing = [r\"position_ids\", r\"predictions.decoder.bias\"]\n\n def __init__(self, config):\n super().__init__(config)\n\n if not config.is_decoder:\n logger.warning(\"If you want to use `BigBirdForCausalLM` as a standalone, add `is_decoder=True.`\")\n\n self.bert = BigBirdModel(config)\n self.cls = BigBirdOnlyMLMHead(config)\n\n self.init_weights()\n\n def get_output_embeddings(self):\n return self.cls.predictions.decoder\n\n def set_output_embeddings(self, new_embeddings):\n self.cls.predictions.decoder = new_embeddings\n\n @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_values=None,\n labels=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):\n Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if\n the model is configured as a decoder.\n encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in\n the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):\n Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.\n If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`\n (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`\n instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in\n ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are\n ignored (masked), the loss is only computed for the tokens with labels n ``[0, ..., config.vocab_size]``.\n use_cache (:obj:`bool`, `optional`):\n If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up\n decoding (see :obj:`past_key_values`).\n\n Returns:\n\n Example::\n\n >>> from transformers import BigBirdTokenizer, BigBirdForCausalLM, BigBirdConfig\n >>> import torch\n\n >>> tokenizer = BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base')\n >>> config = BigBirdConfig.from_pretrained(\"google/bigbird-base\")\n >>> config.is_decoder = True\n >>> model = BigBirdForCausalLM.from_pretrained('google/bigbird-roberta-base', config=config)\n\n >>> inputs = tokenizer(\"Hello, my dog is cute\", return_tensors=\"pt\")\n >>> outputs = model(**inputs)\n\n >>> prediction_logits = outputs.logits\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.bert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n past_key_values=past_key_values,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0]\n prediction_scores = self.cls(sequence_output)\n\n lm_loss = None\n if labels is not None:\n # we are doing next-token prediction; shift prediction scores and input ids by one\n shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()\n labels = labels[:, 1:].contiguous()\n loss_fct = CrossEntropyLoss()\n lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))\n\n if not return_dict:\n output = (prediction_scores,) + outputs[2:]\n return ((lm_loss,) + output) if lm_loss is not None else output\n\n return CausalLMOutputWithCrossAttentions(\n loss=lm_loss,\n logits=prediction_scores,\n past_key_values=outputs.past_key_values,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n cross_attentions=outputs.cross_attentions,\n )\n\n def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, **model_kwargs):\n input_shape = input_ids.shape\n\n # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly\n if attention_mask is None:\n attention_mask = input_ids.new_ones(input_shape)\n\n # cut decoder_input_ids if past is used\n if past is not None:\n input_ids = input_ids[:, -1:]\n\n return {\"input_ids\": input_ids, \"attention_mask\": attention_mask, \"past_key_values\": past}\n\n def _reorder_cache(self, past, beam_idx):\n reordered_past = ()\n for layer_past in past:\n reordered_past += (\n tuple(past_state.index_select(0, beam_idx) for past_state in layer_past[:2]) + layer_past[2:],\n )\n return reordered_past\n\n\nclass BigBirdClassificationHead(nn.Module):\n \"\"\"Head for sentence-level classification tasks.\"\"\"\n\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n classifier_dropout = (\n config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob\n )\n self.dropout = nn.Dropout(classifier_dropout)\n self.out_proj = nn.Linear(config.hidden_size, config.num_labels)\n\n self.config = config\n\n def forward(self, features, **kwargs):\n x = features[:, 0, :] # take <s> token (equiv. to [CLS])\n x = self.dropout(x)\n x = self.dense(x)\n x = ACT2FN[self.config.hidden_act](x)\n x = self.dropout(x)\n x = self.out_proj(x)\n return x\n\n\n@add_start_docstrings(\n \"\"\"\n BigBird Model transformer with a sequence classification/regression head on top (a linear layer on top of the\n pooled output) e.g. for GLUE tasks.\n \"\"\",\n BIG_BIRD_START_DOCSTRING,\n)\nclass BigBirdForSequenceClassification(BigBirdPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n self.config = config\n self.bert = BigBirdModel(config)\n self.classifier = BigBirdClassificationHead(config)\n\n self.init_weights()\n\n @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=SequenceClassifierOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,\n config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),\n If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.bert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0]\n logits = self.classifier(sequence_output)\n\n loss = None\n if labels is not None:\n if self.config.problem_type is None:\n if self.num_labels == 1:\n self.config.problem_type = \"regression\"\n elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):\n self.config.problem_type = \"single_label_classification\"\n else:\n self.config.problem_type = \"multi_label_classification\"\n\n if self.config.problem_type == \"regression\":\n loss_fct = MSELoss()\n if self.num_labels == 1:\n loss = loss_fct(logits.squeeze(), labels.squeeze())\n else:\n loss = loss_fct(logits, labels)\n elif self.config.problem_type == \"single_label_classification\":\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n elif self.config.problem_type == \"multi_label_classification\":\n loss_fct = BCEWithLogitsLoss()\n loss = loss_fct(logits, labels)\n\n if not return_dict:\n output = (logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return SequenceClassifierOutput(\n loss=loss,\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n BigBird Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a\n softmax) e.g. for RocStories/SWAG tasks.\n \"\"\",\n BIG_BIRD_START_DOCSTRING,\n)\nclass BigBirdForMultipleChoice(BigBirdPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n self.bert = BigBirdModel(config)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.classifier = nn.Linear(config.hidden_size, 1)\n\n self.init_weights()\n\n @add_start_docstrings_to_model_forward(\n BIG_BIRD_INPUTS_DOCSTRING.format(\"batch_size, num_choices, sequence_length\")\n )\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=MultipleChoiceModelOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for computing the multiple choice classification loss. Indices should be in ``[0, ...,\n num_choices-1]`` where :obj:`num_choices` is the size of the second dimension of the input tensors. (See\n :obj:`input_ids` above)\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]\n\n input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None\n attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None\n token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None\n position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None\n inputs_embeds = (\n inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))\n if inputs_embeds is not None\n else None\n )\n\n outputs = self.bert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n pooled_output = outputs[1]\n\n pooled_output = self.dropout(pooled_output)\n logits = self.classifier(pooled_output)\n reshaped_logits = logits.view(-1, num_choices)\n\n loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(reshaped_logits, labels)\n\n if not return_dict:\n output = (reshaped_logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return MultipleChoiceModelOutput(\n loss=loss,\n logits=reshaped_logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n BigBird Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for\n Named-Entity-Recognition (NER) tasks.\n \"\"\",\n BIG_BIRD_START_DOCSTRING,\n)\nclass BigBirdForTokenClassification(BigBirdPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n\n self.bert = BigBirdModel(config)\n classifier_dropout = (\n config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob\n )\n self.dropout = nn.Dropout(classifier_dropout)\n self.classifier = nn.Linear(config.hidden_size, config.num_labels)\n\n self.init_weights()\n\n @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=TokenClassifierOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels -\n 1]``.\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.bert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0]\n\n sequence_output = self.dropout(sequence_output)\n logits = self.classifier(sequence_output)\n\n loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n # Only keep active parts of the loss\n if attention_mask is not None:\n active_loss = attention_mask.view(-1) == 1\n active_logits = logits.view(-1, self.num_labels)\n active_labels = torch.where(\n active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)\n )\n loss = loss_fct(active_logits, active_labels)\n else:\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n\n if not return_dict:\n output = (logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return TokenClassifierOutput(\n loss=loss,\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\nclass BigBirdForQuestionAnsweringHead(nn.Module):\n \"\"\"Head for question answering tasks.\"\"\"\n\n def __init__(self, config):\n super().__init__()\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.intermediate = BigBirdIntermediate(config)\n self.output = BigBirdOutput(config)\n self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)\n\n def forward(self, encoder_output):\n hidden_states = self.dropout(encoder_output)\n hidden_states = self.intermediate(hidden_states)\n hidden_states = self.output(hidden_states, encoder_output)\n hidden_states = self.qa_outputs(hidden_states)\n return hidden_states\n\n\n@add_start_docstrings(\n \"\"\"\n BigBird Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear\n layers on top of the hidden-states output to compute `span start logits` and `span end logits`).\n \"\"\",\n BIG_BIRD_START_DOCSTRING,\n)\nclass BigBirdForQuestionAnswering(BigBirdPreTrainedModel):\n def __init__(self, config, add_pooling_layer=False):\n super().__init__(config)\n\n config.num_labels = 2\n self.num_labels = config.num_labels\n self.sep_token_id = config.sep_token_id\n\n self.bert = BigBirdModel(config, add_pooling_layer=add_pooling_layer)\n self.qa_classifier = BigBirdForQuestionAnsweringHead(config)\n\n self.init_weights()\n\n @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"google/bigbird-base-trivia-itc\",\n output_type=BigBirdForQuestionAnsweringModelOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n question_lengths=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n start_positions=None,\n end_positions=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for position (index) of the start of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the\n sequence are not taken into account for computing the loss.\n end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for position (index) of the end of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the\n sequence are not taken into account for computing the loss.\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n seqlen = input_ids.size(1) if input_ids is not None else inputs_embeds.size(1)\n\n if question_lengths is None and input_ids is not None:\n # assuming input_ids format: <cls> <question> <sep> context <sep>\n question_lengths = torch.argmax(input_ids.eq(self.sep_token_id).int(), dim=-1) + 1\n question_lengths.unsqueeze_(1)\n\n logits_mask = None\n if question_lengths is not None:\n # setting lengths logits to `-inf`\n logits_mask = self.prepare_question_mask(question_lengths, seqlen)\n if token_type_ids is None:\n token_type_ids = (~logits_mask).long()\n logits_mask = logits_mask\n logits_mask[:, 0] = False\n logits_mask.unsqueeze_(2)\n\n outputs = self.bert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0]\n logits = self.qa_classifier(sequence_output)\n\n if logits_mask is not None:\n # removing question tokens from the competition\n logits = logits - logits_mask * 1e6\n\n start_logits, end_logits = logits.split(1, dim=-1)\n start_logits = start_logits.squeeze(-1).contiguous()\n end_logits = end_logits.squeeze(-1).contiguous()\n\n total_loss = None\n if start_positions is not None and end_positions is not None:\n # If we are on multi-GPU, split add a dimension\n if len(start_positions.size()) > 1:\n start_positions = start_positions.squeeze(-1)\n if len(end_positions.size()) > 1:\n end_positions = end_positions.squeeze(-1)\n # sometimes the start/end positions are outside our model inputs, we ignore these terms\n ignored_index = start_logits.size(1)\n start_positions = start_positions.clamp(0, ignored_index)\n end_positions = end_positions.clamp(0, ignored_index)\n\n loss_fct = CrossEntropyLoss(ignore_index=ignored_index)\n start_loss = loss_fct(start_logits, start_positions)\n end_loss = loss_fct(end_logits, end_positions)\n total_loss = (start_loss + end_loss) / 2\n\n if not return_dict:\n output = (start_logits, end_logits) + outputs[2:]\n return ((total_loss,) + output) if total_loss is not None else output\n\n return BigBirdForQuestionAnsweringModelOutput(\n loss=total_loss,\n start_logits=start_logits,\n end_logits=end_logits,\n pooler_output=outputs.pooler_output,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n @staticmethod\n def prepare_question_mask(q_lengths: torch.Tensor, maxlen: int):\n # q_lengths -> (bz, 1)\n mask = torch.arange(0, maxlen).to(q_lengths.device)\n mask.unsqueeze_(0) # -> (1, maxlen)\n mask = mask < q_lengths\n return mask\n",
"# coding=utf-8\n# Copyright 2021 Microsoft and The HuggingFace Inc. team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" TF 2.0 DeBERTa-v2 model. \"\"\"\n\n\nfrom typing import Dict, Optional, Tuple, Union\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom ...activations_tf import get_tf_activation\nfrom ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward\nfrom ...modeling_tf_outputs import (\n TFBaseModelOutput,\n TFMaskedLMOutput,\n TFQuestionAnsweringModelOutput,\n TFSequenceClassifierOutput,\n TFTokenClassifierOutput,\n)\nfrom ...modeling_tf_utils import (\n TFMaskedLanguageModelingLoss,\n TFModelInputType,\n TFPreTrainedModel,\n TFQuestionAnsweringLoss,\n TFSequenceClassificationLoss,\n TFTokenClassificationLoss,\n get_initializer,\n input_processing,\n shape_list,\n)\nfrom ...utils import logging\nfrom .configuration_deberta_v2 import DebertaV2Config\n\n\nlogger = logging.get_logger(__name__)\n\n\n_CONFIG_FOR_DOC = \"DebertaV2Config\"\n_TOKENIZER_FOR_DOC = \"DebertaV2Tokenizer\"\n_CHECKPOINT_FOR_DOC = \"kamalkraj/deberta-v2-xlarge\"\n\nTF_DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST = [\n \"kamalkraj/deberta-v2-xlarge\",\n # See all DeBERTa models at https://huggingface.co/models?filter=deberta-v2\n]\n\n\n# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaContextPooler with Deberta->DebertaV2\nclass TFDebertaV2ContextPooler(tf.keras.layers.Layer):\n def __init__(self, config: DebertaV2Config, **kwargs):\n super().__init__(**kwargs)\n self.dense = tf.keras.layers.Dense(config.pooler_hidden_size, name=\"dense\")\n self.dropout = TFDebertaV2StableDropout(config.pooler_dropout, name=\"dropout\")\n self.config = config\n\n def call(self, hidden_states, training: bool = False):\n # We \"pool\" the model by simply taking the hidden state corresponding\n # to the first token.\n context_token = hidden_states[:, 0]\n context_token = self.dropout(context_token, training=training)\n pooled_output = self.dense(context_token)\n pooled_output = get_tf_activation(self.config.pooler_hidden_act)(pooled_output)\n return pooled_output\n\n @property\n def output_dim(self) -> int:\n return self.config.hidden_size\n\n\n# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaXSoftmax with Deberta->DebertaV2\nclass TFDebertaV2XSoftmax(tf.keras.layers.Layer):\n \"\"\"\n Masked Softmax which is optimized for saving memory\n\n Args:\n input (:obj:`tf.Tensor`): The input tensor that will apply softmax.\n mask (:obj:`tf.Tensor`): The mask matrix where 0 indicate that element will be ignored in the softmax calculation.\n dim (int): The dimension that will apply softmax\n \"\"\"\n\n def __init__(self, axis=-1, **kwargs):\n super().__init__(**kwargs)\n self.axis = axis\n\n def call(self, inputs: tf.Tensor, mask: tf.Tensor):\n\n rmask = tf.logical_not(tf.cast(mask, tf.bool))\n output = tf.where(rmask, float(\"-inf\"), inputs)\n output = tf.nn.softmax(output, self.axis)\n output = tf.where(rmask, 0.0, output)\n return output\n\n\n# Copied from transformers.models.deberta.modeling_tf_deberta.get_mask\ndef get_mask(input, dropout):\n mask = tf.cast(\n 1 - tf.compat.v1.distributions.Bernoulli(probs=1 - dropout).sample(sample_shape=shape_list(input)), tf.bool\n )\n return mask, dropout\n\n\[email protected]_gradient\n# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaXDropout\ndef TFDebertaV2XDropout(input, local_ctx):\n mask, dropout = get_mask(input, local_ctx)\n scale = tf.convert_to_tensor(1.0 / (1 - dropout), dtype=tf.float32)\n input = tf.cond(dropout > 0, lambda: tf.where(mask, 0.0, input) * scale, lambda: input)\n\n def custom_grad(upstream_grad):\n return tf.cond(\n scale > 1, lambda: (tf.where(mask, 0.0, upstream_grad) * scale, None), lambda: (upstream_grad, None)\n )\n\n return input, custom_grad\n\n\n# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaStableDropout with Deberta->DebertaV2\nclass TFDebertaV2StableDropout(tf.keras.layers.Layer):\n \"\"\"\n Optimized dropout module for stabilizing the training\n\n Args:\n drop_prob (float): the dropout probabilities\n \"\"\"\n\n def __init__(self, drop_prob, **kwargs):\n super().__init__(**kwargs)\n self.drop_prob = tf.convert_to_tensor(drop_prob, dtype=tf.float32)\n\n def call(self, inputs: tf.Tensor, training: tf.Tensor = False):\n if training and self.drop_prob > 0:\n return TFDebertaV2XDropout(inputs, self.drop_prob)\n return inputs\n\n\n# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaSelfOutput with Deberta->DebertaV2\nclass TFDebertaV2SelfOutput(tf.keras.layers.Layer):\n def __init__(self, config: DebertaV2Config, **kwargs):\n super().__init__(**kwargs)\n self.dense = tf.keras.layers.Dense(config.hidden_size, name=\"dense\")\n self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name=\"LayerNorm\")\n self.dropout = TFDebertaV2StableDropout(config.hidden_dropout_prob, name=\"dropout\")\n\n def call(self, hidden_states, input_tensor, training: bool = False):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states, training=training)\n hidden_states = self.LayerNorm(hidden_states + input_tensor)\n return hidden_states\n\n\n# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaAttention with Deberta->DebertaV2\nclass TFDebertaV2Attention(tf.keras.layers.Layer):\n def __init__(self, config: DebertaV2Config, **kwargs):\n super().__init__(**kwargs)\n self.self = TFDebertaV2DisentangledSelfAttention(config, name=\"self\")\n self.dense_output = TFDebertaV2SelfOutput(config, name=\"output\")\n self.config = config\n\n def call(\n self,\n input_tensor: tf.Tensor,\n attention_mask: tf.Tensor,\n query_states: tf.Tensor = None,\n relative_pos: tf.Tensor = None,\n rel_embeddings: tf.Tensor = None,\n output_attentions: bool = False,\n training: bool = False,\n ) -> Tuple[tf.Tensor]:\n self_outputs = self.self(\n hidden_states=input_tensor,\n attention_mask=attention_mask,\n query_states=query_states,\n relative_pos=relative_pos,\n rel_embeddings=rel_embeddings,\n output_attentions=output_attentions,\n training=training,\n )\n if query_states is None:\n query_states = input_tensor\n attention_output = self.dense_output(\n hidden_states=self_outputs[0], input_tensor=query_states, training=training\n )\n\n output = (attention_output,) + self_outputs[1:]\n\n return output\n\n\n# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaIntermediate with Deberta->DebertaV2\nclass TFDebertaV2Intermediate(tf.keras.layers.Layer):\n def __init__(self, config: DebertaV2Config, **kwargs):\n super().__init__(**kwargs)\n\n self.dense = tf.keras.layers.Dense(\n units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name=\"dense\"\n )\n\n if isinstance(config.hidden_act, str):\n self.intermediate_act_fn = get_tf_activation(config.hidden_act)\n else:\n self.intermediate_act_fn = config.hidden_act\n\n def call(self, hidden_states: tf.Tensor) -> tf.Tensor:\n hidden_states = self.dense(inputs=hidden_states)\n hidden_states = self.intermediate_act_fn(hidden_states)\n\n return hidden_states\n\n\n# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaOutput with Deberta->DebertaV2\nclass TFDebertaV2Output(tf.keras.layers.Layer):\n def __init__(self, config: DebertaV2Config, **kwargs):\n super().__init__(**kwargs)\n\n self.dense = tf.keras.layers.Dense(\n units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name=\"dense\"\n )\n self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name=\"LayerNorm\")\n self.dropout = TFDebertaV2StableDropout(config.hidden_dropout_prob, name=\"dropout\")\n\n def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor:\n hidden_states = self.dense(inputs=hidden_states)\n hidden_states = self.dropout(hidden_states, training=training)\n hidden_states = self.LayerNorm(hidden_states + input_tensor)\n\n return hidden_states\n\n\n# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaLayer with Deberta->DebertaV2\nclass TFDebertaV2Layer(tf.keras.layers.Layer):\n def __init__(self, config: DebertaV2Config, **kwargs):\n super().__init__(**kwargs)\n\n self.attention = TFDebertaV2Attention(config, name=\"attention\")\n self.intermediate = TFDebertaV2Intermediate(config, name=\"intermediate\")\n self.bert_output = TFDebertaV2Output(config, name=\"output\")\n\n def call(\n self,\n hidden_states: tf.Tensor,\n attention_mask: tf.Tensor,\n query_states: tf.Tensor = None,\n relative_pos: tf.Tensor = None,\n rel_embeddings: tf.Tensor = None,\n output_attentions: bool = False,\n training: bool = False,\n ) -> Tuple[tf.Tensor]:\n attention_outputs = self.attention(\n input_tensor=hidden_states,\n attention_mask=attention_mask,\n query_states=query_states,\n relative_pos=relative_pos,\n rel_embeddings=rel_embeddings,\n output_attentions=output_attentions,\n training=training,\n )\n attention_output = attention_outputs[0]\n intermediate_output = self.intermediate(hidden_states=attention_output)\n layer_output = self.bert_output(\n hidden_states=intermediate_output, input_tensor=attention_output, training=training\n )\n outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them\n\n return outputs\n\n\nclass TFDebertaV2ConvLayer(tf.keras.layers.Layer):\n def __init__(self, config: DebertaV2Config, **kwargs):\n super().__init__(**kwargs)\n\n self.kernel_size = getattr(config, \"conv_kernel_size\", 3)\n # groups = getattr(config, \"conv_groups\", 1)\n self.conv_act = get_tf_activation(getattr(config, \"conv_act\", \"tanh\"))\n self.padding = (self.kernel_size - 1) // 2\n self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name=\"LayerNorm\")\n self.dropout = TFDebertaV2StableDropout(config.hidden_dropout_prob, name=\"dropout\")\n self.config = config\n\n def build(self, input_shape):\n with tf.name_scope(\"conv\"):\n self.conv_kernel = self.add_weight(\n name=\"kernel\",\n shape=[self.kernel_size, self.config.hidden_size, self.config.hidden_size],\n initializer=get_initializer(self.config.initializer_range),\n )\n self.conv_bias = self.add_weight(\n name=\"bias\", shape=[self.config.hidden_size], initializer=tf.zeros_initializer()\n )\n return super().build(input_shape)\n\n def call(\n self, hidden_states: tf.Tensor, residual_states: tf.Tensor, input_mask: tf.Tensor, training: bool = False\n ) -> tf.Tensor:\n out = tf.nn.conv2d(\n tf.expand_dims(hidden_states, 1),\n tf.expand_dims(self.conv_kernel, 0),\n strides=1,\n padding=[[0, 0], [0, 0], [self.padding, self.padding], [0, 0]],\n )\n out = tf.squeeze(tf.nn.bias_add(out, self.conv_bias), 1)\n rmask = tf.cast(1 - input_mask, tf.bool)\n out = tf.where(tf.broadcast_to(tf.expand_dims(rmask, -1), shape_list(out)), 0.0, out)\n out = self.dropout(out, training=training)\n hidden_states = self.conv_act(out)\n\n layer_norm_input = residual_states + out\n output = self.LayerNorm(layer_norm_input)\n\n if input_mask is None:\n output_states = output\n else:\n if len(shape_list(input_mask)) != len(shape_list(layer_norm_input)):\n if len(shape_list(input_mask)) == 4:\n mask = tf.squeeze(tf.squeeze(input_mask, axis=1), axis=1)\n mask = tf.cast(tf.expand_dims(input_mask, axis=2), tf.float32)\n\n output_states = output * mask\n\n return output_states\n\n\nclass TFDebertaV2Encoder(tf.keras.layers.Layer):\n def __init__(self, config: DebertaV2Config, **kwargs):\n super().__init__(**kwargs)\n\n self.layer = [TFDebertaV2Layer(config, name=f\"layer_._{i}\") for i in range(config.num_hidden_layers)]\n self.relative_attention = getattr(config, \"relative_attention\", False)\n self.config = config\n if self.relative_attention:\n self.max_relative_positions = getattr(config, \"max_relative_positions\", -1)\n if self.max_relative_positions < 1:\n self.max_relative_positions = config.max_position_embeddings\n\n self.position_buckets = getattr(config, \"position_buckets\", -1)\n self.pos_ebd_size = self.max_relative_positions * 2\n\n if self.position_buckets > 0:\n self.pos_ebd_size = self.position_buckets * 2\n\n self.norm_rel_ebd = [x.strip() for x in getattr(config, \"norm_rel_ebd\", \"none\").lower().split(\"|\")]\n\n if \"layer_norm\" in self.norm_rel_ebd:\n self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name=\"LayerNorm\")\n\n self.conv = TFDebertaV2ConvLayer(config, name=\"conv\") if getattr(config, \"conv_kernel_size\", 0) > 0 else None\n\n def build(self, input_shape):\n if self.relative_attention:\n self.rel_embeddings = self.add_weight(\n name=\"rel_embeddings.weight\",\n shape=[self.pos_ebd_size, self.config.hidden_size],\n initializer=get_initializer(self.config.initializer_range),\n )\n return super().build(input_shape)\n\n def get_rel_embedding(self):\n rel_embeddings = self.rel_embeddings if self.relative_attention else None\n if rel_embeddings is not None and (\"layer_norm\" in self.norm_rel_ebd):\n rel_embeddings = self.LayerNorm(rel_embeddings)\n return rel_embeddings\n\n def get_attention_mask(self, attention_mask):\n if len(shape_list(attention_mask)) <= 2:\n extended_attention_mask = tf.expand_dims(tf.expand_dims(attention_mask, 1), 2)\n attention_mask = extended_attention_mask * tf.expand_dims(tf.squeeze(extended_attention_mask, -2), -1)\n attention_mask = tf.cast(attention_mask, tf.uint8)\n elif len(shape_list(attention_mask)) == 3:\n attention_mask = tf.expand_dims(attention_mask, 1)\n\n return attention_mask\n\n def get_rel_pos(self, hidden_states, query_states=None, relative_pos=None):\n if self.relative_attention and relative_pos is None:\n q = shape_list(query_states)[-2] if query_states is not None else shape_list(hidden_states)[-2]\n relative_pos = build_relative_position(\n q,\n shape_list(hidden_states)[-2],\n bucket_size=self.position_buckets,\n max_position=self.max_relative_positions,\n )\n return relative_pos\n\n def call(\n self,\n hidden_states: tf.Tensor,\n attention_mask: tf.Tensor,\n query_states: tf.Tensor = None,\n relative_pos: tf.Tensor = None,\n output_attentions: bool = False,\n output_hidden_states: bool = False,\n return_dict: bool = True,\n training: bool = False,\n ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:\n if len(shape_list(attention_mask)) <= 2:\n input_mask = attention_mask\n else:\n input_mask = tf.cast(tf.math.reduce_sum(attention_mask, axis=-2) > 0, dtype=tf.uint8)\n\n all_hidden_states = () if output_hidden_states else None\n all_attentions = () if output_attentions else None\n\n attention_mask = self.get_attention_mask(attention_mask)\n relative_pos = self.get_rel_pos(hidden_states, query_states, relative_pos)\n\n next_kv = hidden_states\n\n rel_embeddings = self.get_rel_embedding()\n output_states = next_kv\n for i, layer_module in enumerate(self.layer):\n\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (output_states,)\n\n layer_outputs = layer_module(\n hidden_states=next_kv,\n attention_mask=attention_mask,\n query_states=query_states,\n relative_pos=relative_pos,\n rel_embeddings=rel_embeddings,\n output_attentions=output_attentions,\n training=training,\n )\n output_states = layer_outputs[0]\n\n if i == 0 and self.conv is not None:\n output_states = self.conv(hidden_states, output_states, input_mask)\n\n next_kv = output_states\n\n if output_attentions:\n all_attentions = all_attentions + (layer_outputs[1],)\n\n # Add last layer\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (output_states,)\n\n if not return_dict:\n return tuple(v for v in [output_states, all_hidden_states, all_attentions] if v is not None)\n\n return TFBaseModelOutput(\n last_hidden_state=output_states, hidden_states=all_hidden_states, attentions=all_attentions\n )\n\n\ndef make_log_bucket_position(relative_pos, bucket_size, max_position):\n \"\"\" \"\"\"\n sign = tf.math.sign(relative_pos)\n mid = bucket_size // 2\n abs_pos = tf.where((relative_pos < mid) & (relative_pos > -mid), mid - 1, tf.math.abs(relative_pos))\n log_pos = (\n tf.math.ceil(\n tf.cast(tf.math.log(abs_pos / mid), tf.float32) / tf.math.log((max_position - 1) / mid) * (mid - 1)\n )\n + mid\n )\n bucket_pos = tf.cast(\n tf.where(abs_pos <= mid, tf.cast(relative_pos, tf.float32), log_pos * tf.cast(sign, tf.float32)), tf.int32\n )\n return bucket_pos\n\n\ndef build_relative_position(query_size, key_size, bucket_size=-1, max_position=-1):\n \"\"\"\n Build relative position according to the query and key\n\n We assume the absolute position of query :math:`P_q` is range from (0, query_size) and the absolute position of key\n :math:`P_k` is range from (0, key_size), The relative positions from query to key is :math:`R_{q \\\\rightarrow k} =\n P_q - P_k`\n\n Args:\n query_size (int): the length of query\n key_size (int): the length of key\n bucket_size (int): the size of position bucket\n max_position (int): the maximum allowed absolute position\n\n Return:\n :obj:`tf.Tensor`: A tensor with shape [1, query_size, key_size]\n\n \"\"\"\n q_ids = tf.range(query_size, dtype=tf.int32)\n k_ids = tf.range(key_size, dtype=tf.int32)\n rel_pos_ids = q_ids[:, None] - tf.tile(tf.expand_dims(k_ids, axis=0), [shape_list(q_ids)[0], 1])\n if bucket_size > 0 and max_position > 0:\n rel_pos_ids = make_log_bucket_position(rel_pos_ids, bucket_size, max_position)\n rel_pos_ids = rel_pos_ids[:query_size, :]\n rel_pos_ids = tf.expand_dims(rel_pos_ids, axis=0)\n return tf.cast(rel_pos_ids, tf.int64)\n\n\ndef c2p_dynamic_expand(c2p_pos, query_layer, relative_pos):\n shapes = [\n shape_list(query_layer)[0],\n shape_list(query_layer)[1],\n shape_list(query_layer)[2],\n shape_list(relative_pos)[-1],\n ]\n return tf.broadcast_to(c2p_pos, shapes)\n\n\ndef p2c_dynamic_expand(c2p_pos, query_layer, key_layer):\n shapes = [\n shape_list(query_layer)[0],\n shape_list(query_layer)[1],\n shape_list(key_layer)[-2],\n shape_list(key_layer)[-2],\n ]\n return tf.broadcast_to(c2p_pos, shapes)\n\n\ndef pos_dynamic_expand(pos_index, p2c_att, key_layer):\n shapes = shape_list(p2c_att)[:2] + [shape_list(pos_index)[-2], shape_list(key_layer)[-2]]\n return tf.broadcast_to(pos_index, shapes)\n\n\ndef take_along_axis(x, indices, gather_axis):\n if gather_axis < 0:\n gather_axis = tf.rank(x) + gather_axis\n\n if gather_axis != tf.rank(x) - 1:\n pre_roll = tf.rank(x) - 1 - gather_axis\n permutation = tf.roll(tf.range(tf.rank(x)), pre_roll, axis=0)\n x = tf.transpose(x, perm=permutation)\n indices = tf.transpose(indices, perm=permutation)\n else:\n pre_roll = 0\n\n flat_x = tf.reshape(x, (-1, tf.shape(x)[-1]))\n flat_indices = tf.reshape(indices, (-1, tf.shape(indices)[-1]))\n gathered = tf.gather(flat_x, flat_indices, batch_dims=1)\n gathered = tf.reshape(gathered, tf.shape(indices))\n\n if pre_roll != 0:\n permutation = tf.roll(tf.range(tf.rank(x)), -pre_roll, axis=0)\n gathered = tf.transpose(gathered, perm=permutation)\n\n return gathered\n\n\nclass TFDebertaV2DisentangledSelfAttention(tf.keras.layers.Layer):\n \"\"\"\n Disentangled self-attention module\n\n Parameters:\n config (:obj:`DebertaV2Config`):\n A model config class instance with the configuration to build a new model. The schema is similar to\n `BertConfig`, for more details, please refer :class:`~transformers.DebertaV2Config`\n\n \"\"\"\n\n def __init__(self, config: DebertaV2Config, **kwargs):\n super().__init__(**kwargs)\n if config.hidden_size % config.num_attention_heads != 0:\n raise ValueError(\n f\"The hidden size ({config.hidden_size}) is not a multiple of the number of attention \"\n f\"heads ({config.num_attention_heads})\"\n )\n self.num_attention_heads = config.num_attention_heads\n _attention_head_size = config.hidden_size // config.num_attention_heads\n self.attention_head_size = getattr(config, \"attention_head_size\", _attention_head_size)\n self.all_head_size = self.num_attention_heads * self.attention_head_size\n self.query_proj = tf.keras.layers.Dense(\n self.all_head_size,\n kernel_initializer=get_initializer(config.initializer_range),\n name=\"query_proj\",\n use_bias=True,\n )\n self.key_proj = tf.keras.layers.Dense(\n self.all_head_size,\n kernel_initializer=get_initializer(config.initializer_range),\n name=\"key_proj\",\n use_bias=True,\n )\n self.value_proj = tf.keras.layers.Dense(\n self.all_head_size,\n kernel_initializer=get_initializer(config.initializer_range),\n name=\"value_proj\",\n use_bias=True,\n )\n\n self.share_att_key = getattr(config, \"share_att_key\", False)\n self.pos_att_type = config.pos_att_type if config.pos_att_type is not None else []\n self.relative_attention = getattr(config, \"relative_attention\", False)\n\n if self.relative_attention:\n self.position_buckets = getattr(config, \"position_buckets\", -1)\n self.max_relative_positions = getattr(config, \"max_relative_positions\", -1)\n if self.max_relative_positions < 1:\n self.max_relative_positions = config.max_position_embeddings\n self.pos_ebd_size = self.max_relative_positions\n if self.position_buckets > 0:\n self.pos_ebd_size = self.position_buckets\n\n self.pos_dropout = TFDebertaV2StableDropout(config.hidden_dropout_prob, name=\"pos_dropout\")\n\n if not self.share_att_key:\n if \"c2p\" in self.pos_att_type or \"p2p\" in self.pos_att_type:\n self.pos_proj = tf.keras.layers.Dense(\n self.all_head_size,\n kernel_initializer=get_initializer(config.initializer_range),\n name=\"pos_proj\",\n use_bias=True,\n )\n if \"p2c\" in self.pos_att_type or \"p2p\" in self.pos_att_type:\n self.pos_q_proj = tf.keras.layers.Dense(\n self.all_head_size,\n kernel_initializer=get_initializer(config.initializer_range),\n name=\"pos_q_proj\",\n )\n self.softmax = TFDebertaV2XSoftmax(axis=-1)\n self.dropout = TFDebertaV2StableDropout(config.attention_probs_dropout_prob, name=\"dropout\")\n\n def transpose_for_scores(self, tensor: tf.Tensor, attention_heads: int) -> tf.Tensor:\n shape = shape_list(tensor)[:-1] + [attention_heads, -1]\n # Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size]\n tensor = tf.reshape(tensor=tensor, shape=shape)\n x_shape = shape_list(tensor)\n return tf.reshape(tf.transpose(tensor, perm=[0, 2, 1, 3]), shape=[-1, x_shape[1], x_shape[-1]])\n\n def call(\n self,\n hidden_states: tf.Tensor,\n attention_mask: tf.Tensor,\n query_states: tf.Tensor = None,\n relative_pos: tf.Tensor = None,\n rel_embeddings: tf.Tensor = None,\n output_attentions: bool = False,\n training: bool = False,\n ) -> Tuple[tf.Tensor]:\n \"\"\"\n Call the module\n\n Args:\n hidden_states (:obj:`tf.Tensor`):\n Input states to the module usually the output from previous layer, it will be the Q,K and V in\n `Attention(Q,K,V)`\n\n attention_mask (:obj:`tf.Tensor`):\n An attention mask matrix of shape [`B`, `N`, `N`] where `B` is the batch size, `N` is the maximum\n sequence length in which element [i,j] = `1` means the `i` th token in the input can attend to the `j`\n th token.\n\n return_att (:obj:`bool`, optional):\n Whether return the attention matrix.\n\n query_states (:obj:`tf.Tensor`, optional):\n The `Q` state in `Attention(Q,K,V)`.\n\n relative_pos (:obj:`tf.Tensor`):\n The relative position encoding between the tokens in the sequence. It's of shape [`B`, `N`, `N`] with\n values ranging in [`-max_relative_positions`, `max_relative_positions`].\n\n rel_embeddings (:obj:`tf.Tensor`):\n The embedding of relative distances. It's a tensor of shape [:math:`2 \\\\times\n \\\\text{max_relative_positions}`, `hidden_size`].\n\n\n \"\"\"\n if query_states is None:\n query_states = hidden_states\n query_layer = self.transpose_for_scores(self.query_proj(query_states), self.num_attention_heads)\n key_layer = self.transpose_for_scores(self.key_proj(hidden_states), self.num_attention_heads)\n value_layer = self.transpose_for_scores(self.value_proj(hidden_states), self.num_attention_heads)\n\n rel_att = None\n # Take the dot product between \"query\" and \"key\" to get the raw attention scores.\n scale_factor = 1\n if \"c2p\" in self.pos_att_type:\n scale_factor += 1\n if \"p2c\" in self.pos_att_type:\n scale_factor += 1\n if \"p2p\" in self.pos_att_type:\n scale_factor += 1\n scale = tf.math.sqrt(tf.cast(shape_list(query_layer)[-1] * scale_factor, tf.float32))\n attention_scores = tf.matmul(query_layer, tf.transpose(key_layer, [0, 2, 1])) / scale\n if self.relative_attention:\n rel_embeddings = self.pos_dropout(rel_embeddings)\n rel_att = self.disentangled_att_bias(query_layer, key_layer, relative_pos, rel_embeddings, scale_factor)\n\n if rel_att is not None:\n attention_scores = attention_scores + rel_att\n attention_scores = attention_scores\n attention_scores = tf.reshape(\n attention_scores,\n (-1, self.num_attention_heads, shape_list(attention_scores)[-2], shape_list(attention_scores)[-1]),\n )\n\n # bsz x height x length x dimension\n attention_probs = self.softmax(attention_scores, attention_mask)\n attention_probs = self.dropout(attention_probs, training=training)\n context_layer = tf.matmul(\n tf.reshape(attention_probs, [-1, shape_list(attention_probs)[-2], shape_list(attention_probs)[-1]]),\n value_layer,\n )\n context_layer = tf.transpose(\n tf.reshape(\n context_layer,\n [-1, self.num_attention_heads, shape_list(context_layer)[-2], shape_list(context_layer)[-1]],\n ),\n [0, 2, 1, 3],\n )\n new_context_layer_shape = shape_list(context_layer)[:-2] + [\n -1,\n ]\n context_layer = tf.reshape(context_layer, new_context_layer_shape)\n outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)\n return outputs\n\n def disentangled_att_bias(self, query_layer, key_layer, relative_pos, rel_embeddings, scale_factor):\n\n if relative_pos is None:\n q = shape_list(query_layer)[-2]\n relative_pos = build_relative_position(\n q,\n shape_list(key_layer)[-2],\n bucket_size=self.position_buckets,\n max_position=self.max_relative_positions,\n )\n shape_list_pos = shape_list(relative_pos)\n if len(shape_list_pos) == 2:\n relative_pos = tf.expand_dims(tf.expand_dims(relative_pos, 0), 0)\n elif len(shape_list_pos) == 3:\n relative_pos = tf.expand_dims(relative_pos, 1)\n # bsz x height x query x key\n elif len(shape_list_pos) != 4:\n raise ValueError(f\"Relative position ids must be of dim 2 or 3 or 4. {len(shape_list_pos)}\")\n\n att_span = self.pos_ebd_size\n rel_embeddings = tf.expand_dims(\n rel_embeddings[self.pos_ebd_size - att_span : self.pos_ebd_size + att_span, :], 0\n )\n if self.share_att_key:\n pos_query_layer = tf.tile(\n self.transpose_for_scores(self.query_proj(rel_embeddings), self.num_attention_heads),\n [shape_list(query_layer)[0] // self.num_attention_heads, 1, 1],\n )\n pos_key_layer = tf.tile(\n self.transpose_for_scores(self.key_proj(rel_embeddings), self.num_attention_heads),\n [shape_list(query_layer)[0] // self.num_attention_heads, 1, 1],\n )\n else:\n if \"c2p\" in self.pos_att_type or \"p2p\" in self.pos_att_type:\n pos_key_layer = tf.tile(\n self.transpose_for_scores(self.pos_key_proj(rel_embeddings), self.num_attention_heads),\n [shape_list(query_layer)[0] // self.num_attention_heads, 1, 1],\n ) # .split(self.all_head_size, dim=-1)\n if \"p2c\" in self.pos_att_type or \"p2p\" in self.pos_att_type:\n pos_query_layer = tf.tile(\n self.transpose_for_scores(self.pos_query_proj(rel_embeddings), self.num_attention_heads),\n [shape_list(query_layer)[0] // self.num_attention_heads, 1, 1],\n ) # .split(self.all_head_size, dim=-1)\n\n score = 0\n # content->position\n if \"c2p\" in self.pos_att_type:\n scale = tf.math.sqrt(tf.cast(shape_list(pos_key_layer)[-1] * scale_factor, tf.float32))\n c2p_att = tf.matmul(query_layer, tf.transpose(pos_key_layer, [0, 2, 1]))\n c2p_pos = tf.clip_by_value(relative_pos + att_span, 0, att_span * 2 - 1)\n c2p_att = take_along_axis(\n c2p_att,\n tf.broadcast_to(\n tf.squeeze(c2p_pos, 0),\n [shape_list(query_layer)[0], shape_list(query_layer)[1], shape_list(relative_pos)[-1]],\n ),\n -1,\n )\n score += c2p_att / scale\n\n # position->content\n if \"p2c\" in self.pos_att_type or \"p2p\" in self.pos_att_type:\n scale = tf.math.sqrt(tf.cast(shape_list(pos_query_layer)[-1] * scale_factor, tf.float32))\n if shape_list(key_layer)[-2] != shape_list(query_layer)[-2]:\n r_pos = build_relative_position(\n shape_list(key_layer)[-2],\n shape_list(key_layer)[-2],\n bucket_size=self.position_buckets,\n max_position=self.max_relative_positions,\n )\n r_pos = tf.expand_dims(r_pos, 0)\n else:\n r_pos = relative_pos\n\n p2c_pos = tf.clip_by_value(-r_pos + att_span, 0, att_span * 2 - 1)\n\n if \"p2c\" in self.pos_att_type:\n p2c_att = tf.matmul(key_layer, tf.transpose(pos_query_layer, [0, 2, 1]))\n p2c_att = tf.transpose(\n take_along_axis(\n p2c_att,\n tf.broadcast_to(\n tf.squeeze(p2c_pos, 0),\n [shape_list(query_layer)[0], shape_list(key_layer)[-2], shape_list(key_layer)[-2]],\n ),\n -1,\n ),\n [0, 2, 1],\n )\n score += p2c_att / scale\n\n # position->position\n if \"p2p\" in self.pos_att_type:\n pos_query = pos_query_layer[:, :, att_span:, :]\n p2p_att = tf.matmul(pos_query, tf.transpose(pos_key_layer, [0, 2, 1]))\n p2p_att = tf.broadcast_to(shape_list(query_layer)[:2] + shape_list(p2p_att)[2:])\n p2p_att = take_along_axis(\n p2p_att,\n tf.broadcast_to(\n c2p_pos,\n [\n shape_list(query_layer)[0],\n shape_list(query_layer)[1],\n shape_list(query_layer)[2],\n shape_list(relative_pos)[-1],\n ],\n ),\n -1,\n )\n score += p2p_att\n\n return score\n\n\n# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaEmbeddings Deberta->DebertaV2\nclass TFDebertaV2Embeddings(tf.keras.layers.Layer):\n \"\"\"Construct the embeddings from word, position and token_type embeddings.\"\"\"\n\n def __init__(self, config, **kwargs):\n super().__init__(**kwargs)\n\n self.vocab_size = config.vocab_size\n self.type_vocab_size = config.type_vocab_size\n self.embedding_size = getattr(config, \"embedding_size\", config.hidden_size)\n self.hidden_size = config.hidden_size\n self.max_position_embeddings = config.max_position_embeddings\n self.position_biased_input = getattr(config, \"position_biased_input\", True)\n self.initializer_range = config.initializer_range\n self.embeddings_sum = tf.keras.layers.Add()\n if self.embedding_size != config.hidden_size:\n self.embed_proj = tf.keras.layers.Dense(config.hidden_size, bias=False)\n self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name=\"LayerNorm\")\n self.dropout = TFDebertaV2StableDropout(config.hidden_dropout_prob, name=\"dropout\")\n\n def build(self, input_shape: tf.TensorShape):\n with tf.name_scope(\"word_embeddings\"):\n self.weight = self.add_weight(\n name=\"weight\",\n shape=[self.vocab_size, self.embedding_size],\n initializer=get_initializer(self.initializer_range),\n )\n\n with tf.name_scope(\"token_type_embeddings\"):\n if self.type_vocab_size > 0:\n self.token_type_embeddings = self.add_weight(\n name=\"embeddings\",\n shape=[self.type_vocab_size, self.embedding_size],\n initializer=get_initializer(self.initializer_range),\n )\n else:\n self.token_type_embeddings = None\n\n with tf.name_scope(\"position_embeddings\"):\n if self.position_biased_input:\n self.position_embeddings = self.add_weight(\n name=\"embeddings\",\n shape=[self.max_position_embeddings, self.hidden_size],\n initializer=get_initializer(self.initializer_range),\n )\n else:\n self.position_embeddings = None\n\n super().build(input_shape)\n\n def call(\n self,\n input_ids: tf.Tensor = None,\n position_ids: tf.Tensor = None,\n token_type_ids: tf.Tensor = None,\n inputs_embeds: tf.Tensor = None,\n mask: tf.Tensor = None,\n training: bool = False,\n ) -> tf.Tensor:\n \"\"\"\n Applies embedding based on inputs tensor.\n\n Returns:\n final_embeddings (:obj:`tf.Tensor`): output embedding tensor.\n \"\"\"\n assert not (input_ids is None and inputs_embeds is None)\n\n if input_ids is not None:\n inputs_embeds = tf.gather(params=self.weight, indices=input_ids)\n\n input_shape = shape_list(inputs_embeds)[:-1]\n\n if token_type_ids is None:\n token_type_ids = tf.fill(dims=input_shape, value=0)\n\n if position_ids is None:\n position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0)\n\n final_embeddings = inputs_embeds\n if self.position_biased_input:\n position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)\n position_embeds = tf.tile(input=position_embeds, multiples=(input_shape[0], 1, 1))\n final_embeddings += position_embeds\n if self.type_vocab_size > 0:\n token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids)\n final_embeddings += token_type_embeds\n\n if self.embedding_size != self.hidden_size:\n final_embeddings = self.embed_proj(final_embeddings)\n\n final_embeddings = self.LayerNorm(final_embeddings)\n\n if mask is not None:\n if len(shape_list(mask)) != len(shape_list(final_embeddings)):\n if len(shape_list(mask)) == 4:\n mask = tf.squeeze(tf.squeeze(mask, axis=1), axis=1)\n mask = tf.cast(tf.expand_dims(mask, axis=2), tf.float32)\n\n final_embeddings = final_embeddings * mask\n\n final_embeddings = self.dropout(final_embeddings, training=training)\n\n return final_embeddings\n\n\n# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaPredictionHeadTransform with Deberta->DebertaV2\nclass TFDebertaV2PredictionHeadTransform(tf.keras.layers.Layer):\n def __init__(self, config: DebertaV2Config, **kwargs):\n super().__init__(**kwargs)\n\n self.dense = tf.keras.layers.Dense(\n units=config.hidden_size,\n kernel_initializer=get_initializer(config.initializer_range),\n name=\"dense\",\n )\n\n if isinstance(config.hidden_act, str):\n self.transform_act_fn = get_tf_activation(config.hidden_act)\n else:\n self.transform_act_fn = config.hidden_act\n self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name=\"LayerNorm\")\n\n def call(self, hidden_states: tf.Tensor) -> tf.Tensor:\n hidden_states = self.dense(inputs=hidden_states)\n hidden_states = self.transform_act_fn(hidden_states)\n hidden_states = self.LayerNorm(hidden_states)\n\n return hidden_states\n\n\n# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaLMPredictionHead with Deberta->DebertaV2\nclass TFDebertaV2LMPredictionHead(tf.keras.layers.Layer):\n def __init__(self, config: DebertaV2Config, input_embeddings: tf.keras.layers.Layer, **kwargs):\n super().__init__(**kwargs)\n\n self.vocab_size = config.vocab_size\n self.hidden_size = config.hidden_size\n\n self.transform = TFDebertaV2PredictionHeadTransform(config, name=\"transform\")\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.input_embeddings = input_embeddings\n\n def build(self, input_shape: tf.TensorShape):\n self.bias = self.add_weight(shape=(self.vocab_size,), initializer=\"zeros\", trainable=True, name=\"bias\")\n\n super().build(input_shape)\n\n def get_output_embeddings(self) -> tf.keras.layers.Layer:\n return self.input_embeddings\n\n def set_output_embeddings(self, value: tf.Variable):\n self.input_embeddings.weight = value\n self.input_embeddings.vocab_size = shape_list(value)[0]\n\n def get_bias(self) -> Dict[str, tf.Variable]:\n return {\"bias\": self.bias}\n\n def set_bias(self, value: tf.Variable):\n self.bias = value[\"bias\"]\n self.vocab_size = shape_list(value[\"bias\"])[0]\n\n def call(self, hidden_states: tf.Tensor) -> tf.Tensor:\n hidden_states = self.transform(hidden_states=hidden_states)\n seq_length = shape_list(hidden_states)[1]\n hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.hidden_size])\n hidden_states = tf.matmul(a=hidden_states, b=self.input_embeddings.weight, transpose_b=True)\n hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.vocab_size])\n hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias)\n\n return hidden_states\n\n\n# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaOnlyMLMHead with Deberta->DebertaV2\nclass TFDebertaV2OnlyMLMHead(tf.keras.layers.Layer):\n def __init__(self, config: DebertaV2Config, input_embeddings: tf.keras.layers.Layer, **kwargs):\n super().__init__(**kwargs)\n self.predictions = TFDebertaV2LMPredictionHead(config, input_embeddings, name=\"predictions\")\n\n def call(self, sequence_output: tf.Tensor) -> tf.Tensor:\n prediction_scores = self.predictions(hidden_states=sequence_output)\n\n return prediction_scores\n\n\n# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaMainLayer with Deberta->DebertaV2\nclass TFDebertaV2MainLayer(tf.keras.layers.Layer):\n config_class = DebertaV2Config\n\n def __init__(self, config: DebertaV2Config, **kwargs):\n super().__init__(**kwargs)\n\n self.config = config\n\n self.embeddings = TFDebertaV2Embeddings(config, name=\"embeddings\")\n self.encoder = TFDebertaV2Encoder(config, name=\"encoder\")\n\n def get_input_embeddings(self) -> tf.keras.layers.Layer:\n return self.embeddings\n\n def set_input_embeddings(self, value: tf.Variable):\n self.embeddings.weight = value\n self.embeddings.vocab_size = shape_list(value)[0]\n\n def _prune_heads(self, heads_to_prune):\n \"\"\"\n Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base\n class PreTrainedModel\n \"\"\"\n raise NotImplementedError\n\n def call(\n self,\n input_ids: Optional[TFModelInputType] = None,\n attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,\n token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,\n position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,\n inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n training: bool = False,\n **kwargs,\n ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:\n inputs = input_processing(\n func=self.call,\n config=self.config,\n input_ids=input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n training=training,\n kwargs_call=kwargs,\n )\n\n if inputs[\"input_ids\"] is not None and inputs[\"inputs_embeds\"] is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n elif inputs[\"input_ids\"] is not None:\n input_shape = shape_list(inputs[\"input_ids\"])\n elif inputs[\"inputs_embeds\"] is not None:\n input_shape = shape_list(inputs[\"inputs_embeds\"])[:-1]\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n if inputs[\"attention_mask\"] is None:\n inputs[\"attention_mask\"] = tf.fill(dims=input_shape, value=1)\n\n if inputs[\"token_type_ids\"] is None:\n inputs[\"token_type_ids\"] = tf.fill(dims=input_shape, value=0)\n\n embedding_output = self.embeddings(\n input_ids=inputs[\"input_ids\"],\n position_ids=inputs[\"position_ids\"],\n token_type_ids=inputs[\"token_type_ids\"],\n inputs_embeds=inputs[\"inputs_embeds\"],\n mask=inputs[\"attention_mask\"],\n training=inputs[\"training\"],\n )\n\n encoder_outputs = self.encoder(\n hidden_states=embedding_output,\n attention_mask=inputs[\"attention_mask\"],\n output_attentions=inputs[\"output_attentions\"],\n output_hidden_states=inputs[\"output_hidden_states\"],\n return_dict=inputs[\"return_dict\"],\n training=inputs[\"training\"],\n )\n\n sequence_output = encoder_outputs[0]\n\n if not inputs[\"return_dict\"]:\n return (sequence_output,) + encoder_outputs[1:]\n\n return TFBaseModelOutput(\n last_hidden_state=sequence_output,\n hidden_states=encoder_outputs.hidden_states,\n attentions=encoder_outputs.attentions,\n )\n\n\n# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaPreTrainedModel with Deberta->DebertaV2\nclass TFDebertaV2PreTrainedModel(TFPreTrainedModel):\n \"\"\"\n An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained\n models.\n \"\"\"\n\n config_class = DebertaV2Config\n base_model_prefix = \"deberta\"\n\n\nDEBERTA_START_DOCSTRING = r\"\"\"\n The DeBERTa model was proposed in `DeBERTa: Decoding-enhanced BERT with Disentangled Attention\n <https://arxiv.org/abs/2006.03654>`_ by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It's build on top of\n BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two\n improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data.\n\n This model is also a `tf.keras.Model <https://www.tensorflow.org/api_docs/python/tf/keras/Model>`__ subclass. Use\n it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage\n and behavior.\n\n .. note::\n\n TF 2.0 models accepts two formats as inputs:\n\n - having all inputs as keyword arguments (like PyTorch models), or\n - having all inputs as a list, tuple or dict in the first positional arguments.\n\n This second option is useful when using :meth:`tf.keras.Model.fit` method which currently requires having all\n the tensors in the first argument of the model call function: :obj:`model(inputs)`.\n\n If you choose this second option, there are three possibilities you can use to gather all the input Tensors in\n the first positional argument :\n\n - a single Tensor with :obj:`input_ids` only and nothing else: :obj:`model(inputs_ids)`\n - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:\n :obj:`model([input_ids, attention_mask])` or :obj:`model([input_ids, attention_mask, token_type_ids])`\n - a dictionary with one or several input Tensors associated to the input names given in the docstring:\n :obj:`model({\"input_ids\": input_ids, \"token_type_ids\": token_type_ids})`\n\n\n\n Parameters:\n config (:class:`~transformers.DebertaV2Config`): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model\n weights.\n\"\"\"\n\nDEBERTA_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (:obj:`np.ndarray`, :obj:`tf.Tensor`, :obj:`List[tf.Tensor]` :obj:`Dict[str, tf.Tensor]` or :obj:`Dict[str, np.ndarray]` and each example must have the shape :obj:`({0})`)\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using :class:`transformers.DebertaV2Tokenizer`. See\n :func:`transformers.PreTrainedTokenizer.encode` and :func:`transformers.PreTrainedTokenizer.__call__` for\n details.\n\n `What are input IDs? <../glossary.html#input-ids>`__\n attention_mask (:obj:`np.ndarray` or :obj:`tf.Tensor` of shape :obj:`({0})`, `optional`):\n Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n token_type_ids (:obj:`np.ndarray` or :obj:`tf.Tensor` of shape :obj:`({0})`, `optional`):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,\n 1]``:\n\n - 0 corresponds to a `sentence A` token,\n - 1 corresponds to a `sentence B` token.\n\n `What are token type IDs? <../glossary.html#token-type-ids>`_\n position_ids (:obj:`np.ndarray` or :obj:`tf.Tensor` of shape :obj:`({0})`, `optional`):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,\n config.max_position_embeddings - 1]``.\n\n `What are position IDs? <../glossary.html#position-ids>`_\n inputs_embeds (:obj:`np.ndarray` or :obj:`tf.Tensor` of shape :obj:`({0}, hidden_size)`, `optional`):\n Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert `input_ids` indices into associated vectors\n than the model's internal embedding lookup matrix.\n output_attentions (:obj:`bool`, `optional`):\n Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned\n tensors for more detail.\n output_hidden_states (:obj:`bool`, `optional`):\n Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for\n more detail.\n return_dict (:obj:`bool`, `optional`):\n Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.\n\"\"\"\n\n\n@add_start_docstrings(\n \"The bare DeBERTa Model transformer outputting raw hidden-states without any specific head on top.\",\n DEBERTA_START_DOCSTRING,\n)\n# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaModel with Deberta->DebertaV2\nclass TFDebertaV2Model(TFDebertaV2PreTrainedModel):\n def __init__(self, config: DebertaV2Config, *inputs, **kwargs):\n super().__init__(config, *inputs, **kwargs)\n\n self.deberta = TFDebertaV2MainLayer(config, name=\"deberta\")\n\n @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=TFBaseModelOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def call(\n self,\n input_ids: Optional[TFModelInputType] = None,\n attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,\n token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,\n position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,\n inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n training: Optional[bool] = False,\n **kwargs,\n ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:\n inputs = input_processing(\n func=self.call,\n config=self.config,\n input_ids=input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n training=training,\n kwargs_call=kwargs,\n )\n outputs = self.deberta(\n input_ids=inputs[\"input_ids\"],\n attention_mask=inputs[\"attention_mask\"],\n token_type_ids=inputs[\"token_type_ids\"],\n position_ids=inputs[\"position_ids\"],\n inputs_embeds=inputs[\"inputs_embeds\"],\n output_attentions=inputs[\"output_attentions\"],\n output_hidden_states=inputs[\"output_hidden_states\"],\n return_dict=inputs[\"return_dict\"],\n training=inputs[\"training\"],\n )\n\n return outputs\n\n def serving_output(self, output: TFBaseModelOutput) -> TFBaseModelOutput:\n hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None\n attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None\n\n return TFBaseModelOutput(last_hidden_state=output.last_hidden_state, hidden_states=hs, attentions=attns)\n\n\n@add_start_docstrings(\"\"\"DeBERTa Model with a `language modeling` head on top. \"\"\", DEBERTA_START_DOCSTRING)\n# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaForMaskedLM with Deberta->DebertaV2\nclass TFDebertaV2ForMaskedLM(TFDebertaV2PreTrainedModel, TFMaskedLanguageModelingLoss):\n def __init__(self, config: DebertaV2Config, *inputs, **kwargs):\n super().__init__(config, *inputs, **kwargs)\n\n if config.is_decoder:\n logger.warning(\n \"If you want to use `TFDebertaV2ForMaskedLM` make sure `config.is_decoder=False` for \"\n \"bi-directional self-attention.\"\n )\n\n self.deberta = TFDebertaV2MainLayer(config, name=\"deberta\")\n self.mlm = TFDebertaV2OnlyMLMHead(config, input_embeddings=self.deberta.embeddings, name=\"cls\")\n\n def get_lm_head(self) -> tf.keras.layers.Layer:\n return self.mlm.predictions\n\n @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=TFMaskedLMOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def call(\n self,\n input_ids: Optional[TFModelInputType] = None,\n attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,\n token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,\n position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,\n inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n labels: Optional[Union[np.ndarray, tf.Tensor]] = None,\n training: Optional[bool] = False,\n **kwargs,\n ) -> Union[TFMaskedLMOutput, Tuple[tf.Tensor]]:\n r\"\"\"\n labels (:obj:`tf.Tensor` or :obj:`np.ndarray` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,\n config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored\n (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``\n \"\"\"\n inputs = input_processing(\n func=self.call,\n config=self.config,\n input_ids=input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n labels=labels,\n training=training,\n kwargs_call=kwargs,\n )\n outputs = self.deberta(\n input_ids=inputs[\"input_ids\"],\n attention_mask=inputs[\"attention_mask\"],\n token_type_ids=inputs[\"token_type_ids\"],\n position_ids=inputs[\"position_ids\"],\n inputs_embeds=inputs[\"inputs_embeds\"],\n output_attentions=inputs[\"output_attentions\"],\n output_hidden_states=inputs[\"output_hidden_states\"],\n return_dict=inputs[\"return_dict\"],\n training=inputs[\"training\"],\n )\n sequence_output = outputs[0]\n prediction_scores = self.mlm(sequence_output=sequence_output, training=inputs[\"training\"])\n loss = (\n None if inputs[\"labels\"] is None else self.compute_loss(labels=inputs[\"labels\"], logits=prediction_scores)\n )\n\n if not inputs[\"return_dict\"]:\n output = (prediction_scores,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return TFMaskedLMOutput(\n loss=loss,\n logits=prediction_scores,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n def serving_output(self, output: TFMaskedLMOutput) -> TFMaskedLMOutput:\n hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None\n attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None\n\n return TFMaskedLMOutput(logits=output.logits, hidden_states=hs, attentions=attns)\n\n\n@add_start_docstrings(\n \"\"\"\n DeBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the\n pooled output) e.g. for GLUE tasks.\n \"\"\",\n DEBERTA_START_DOCSTRING,\n)\n# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaForSequenceClassification with Deberta->DebertaV2\nclass TFDebertaV2ForSequenceClassification(TFDebertaV2PreTrainedModel, TFSequenceClassificationLoss):\n def __init__(self, config: DebertaV2Config, *inputs, **kwargs):\n super().__init__(config, *inputs, **kwargs)\n\n self.num_labels = config.num_labels\n\n self.deberta = TFDebertaV2MainLayer(config, name=\"deberta\")\n self.pooler = TFDebertaV2ContextPooler(config, name=\"pooler\")\n\n drop_out = getattr(config, \"cls_dropout\", None)\n drop_out = self.config.hidden_dropout_prob if drop_out is None else drop_out\n self.dropout = TFDebertaV2StableDropout(drop_out, name=\"cls_dropout\")\n self.classifier = tf.keras.layers.Dense(\n units=config.num_labels,\n kernel_initializer=get_initializer(config.initializer_range),\n name=\"classifier\",\n )\n\n @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=TFSequenceClassifierOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def call(\n self,\n input_ids: Optional[TFModelInputType] = None,\n attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,\n token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,\n position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,\n inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n labels: Optional[Union[np.ndarray, tf.Tensor]] = None,\n training: Optional[bool] = False,\n **kwargs,\n ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:\n r\"\"\"\n labels (:obj:`tf.Tensor` or :obj:`np.ndarray` of shape :obj:`(batch_size,)`, `optional`):\n Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,\n config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),\n If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).\n \"\"\"\n inputs = input_processing(\n func=self.call,\n config=self.config,\n input_ids=input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n labels=labels,\n training=training,\n kwargs_call=kwargs,\n )\n outputs = self.deberta(\n input_ids=inputs[\"input_ids\"],\n attention_mask=inputs[\"attention_mask\"],\n token_type_ids=inputs[\"token_type_ids\"],\n position_ids=inputs[\"position_ids\"],\n inputs_embeds=inputs[\"inputs_embeds\"],\n output_attentions=inputs[\"output_attentions\"],\n output_hidden_states=inputs[\"output_hidden_states\"],\n return_dict=inputs[\"return_dict\"],\n training=inputs[\"training\"],\n )\n sequence_output = outputs[0]\n pooled_output = self.pooler(sequence_output, training=inputs[\"training\"])\n pooled_output = self.dropout(pooled_output, training=inputs[\"training\"])\n logits = self.classifier(pooled_output)\n loss = None if inputs[\"labels\"] is None else self.compute_loss(labels=inputs[\"labels\"], logits=logits)\n\n if not inputs[\"return_dict\"]:\n output = (logits,) + outputs[1:]\n\n return ((loss,) + output) if loss is not None else output\n\n return TFSequenceClassifierOutput(\n loss=loss,\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n def serving_output(self, output: TFSequenceClassifierOutput) -> TFSequenceClassifierOutput:\n hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None\n attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None\n\n return TFSequenceClassifierOutput(logits=output.logits, hidden_states=hs, attentions=attns)\n\n\n@add_start_docstrings(\n \"\"\"\n DeBERTa Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for\n Named-Entity-Recognition (NER) tasks.\n \"\"\",\n DEBERTA_START_DOCSTRING,\n)\n# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaForTokenClassification with Deberta->DebertaV2\nclass TFDebertaV2ForTokenClassification(TFDebertaV2PreTrainedModel, TFTokenClassificationLoss):\n def __init__(self, config: DebertaV2Config, *inputs, **kwargs):\n super().__init__(config, *inputs, **kwargs)\n\n self.num_labels = config.num_labels\n\n self.deberta = TFDebertaV2MainLayer(config, name=\"deberta\")\n self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)\n self.classifier = tf.keras.layers.Dense(\n units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name=\"classifier\"\n )\n\n @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=TFTokenClassifierOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def call(\n self,\n input_ids: Optional[TFModelInputType] = None,\n attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,\n token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,\n position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,\n inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n labels: Optional[Union[np.ndarray, tf.Tensor]] = None,\n training: Optional[bool] = False,\n **kwargs,\n ) -> Union[TFTokenClassifierOutput, Tuple[tf.Tensor]]:\n r\"\"\"\n labels (:obj:`tf.Tensor` or :obj:`np.ndarray` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels -\n 1]``.\n \"\"\"\n inputs = input_processing(\n func=self.call,\n config=self.config,\n input_ids=input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n labels=labels,\n training=training,\n kwargs_call=kwargs,\n )\n outputs = self.deberta(\n input_ids=inputs[\"input_ids\"],\n attention_mask=inputs[\"attention_mask\"],\n token_type_ids=inputs[\"token_type_ids\"],\n position_ids=inputs[\"position_ids\"],\n inputs_embeds=inputs[\"inputs_embeds\"],\n output_attentions=inputs[\"output_attentions\"],\n output_hidden_states=inputs[\"output_hidden_states\"],\n return_dict=inputs[\"return_dict\"],\n training=inputs[\"training\"],\n )\n sequence_output = outputs[0]\n sequence_output = self.dropout(sequence_output, training=inputs[\"training\"])\n logits = self.classifier(inputs=sequence_output)\n loss = None if inputs[\"labels\"] is None else self.compute_loss(labels=inputs[\"labels\"], logits=logits)\n\n if not inputs[\"return_dict\"]:\n output = (logits,) + outputs[1:]\n return ((loss,) + output) if loss is not None else output\n\n return TFTokenClassifierOutput(\n loss=loss,\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n def serving_output(self, output: TFTokenClassifierOutput) -> TFTokenClassifierOutput:\n hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None\n attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None\n\n return TFTokenClassifierOutput(logits=output.logits, hidden_states=hs, attentions=attns)\n\n\n@add_start_docstrings(\n \"\"\"\n DeBERTa Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear\n layers on top of the hidden-states output to compute `span start logits` and `span end logits`).\n \"\"\",\n DEBERTA_START_DOCSTRING,\n)\n# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaForQuestionAnswering with Deberta->DebertaV2\nclass TFDebertaV2ForQuestionAnswering(TFDebertaV2PreTrainedModel, TFQuestionAnsweringLoss):\n def __init__(self, config: DebertaV2Config, *inputs, **kwargs):\n super().__init__(config, *inputs, **kwargs)\n\n self.num_labels = config.num_labels\n\n self.deberta = TFDebertaV2MainLayer(config, name=\"deberta\")\n self.qa_outputs = tf.keras.layers.Dense(\n units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name=\"qa_outputs\"\n )\n\n @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=TFQuestionAnsweringModelOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def call(\n self,\n input_ids: Optional[TFModelInputType] = None,\n attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,\n token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,\n position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,\n inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n start_positions: Optional[Union[np.ndarray, tf.Tensor]] = None,\n end_positions: Optional[Union[np.ndarray, tf.Tensor]] = None,\n training: Optional[bool] = False,\n **kwargs,\n ) -> Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]:\n r\"\"\"\n start_positions (:obj:`tf.Tensor` or :obj:`np.ndarray` of shape :obj:`(batch_size,)`, `optional`):\n Labels for position (index) of the start of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the\n sequence are not taken into account for computing the loss.\n end_positions (:obj:`tf.Tensor` or :obj:`np.ndarray` of shape :obj:`(batch_size,)`, `optional`):\n Labels for position (index) of the end of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the\n sequence are not taken into account for computing the loss.\n \"\"\"\n inputs = input_processing(\n func=self.call,\n config=self.config,\n input_ids=input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n start_positions=start_positions,\n end_positions=end_positions,\n training=training,\n kwargs_call=kwargs,\n )\n outputs = self.deberta(\n input_ids=inputs[\"input_ids\"],\n attention_mask=inputs[\"attention_mask\"],\n token_type_ids=inputs[\"token_type_ids\"],\n position_ids=inputs[\"position_ids\"],\n inputs_embeds=inputs[\"inputs_embeds\"],\n output_attentions=inputs[\"output_attentions\"],\n output_hidden_states=inputs[\"output_hidden_states\"],\n return_dict=inputs[\"return_dict\"],\n training=inputs[\"training\"],\n )\n sequence_output = outputs[0]\n logits = self.qa_outputs(inputs=sequence_output)\n start_logits, end_logits = tf.split(value=logits, num_or_size_splits=2, axis=-1)\n start_logits = tf.squeeze(input=start_logits, axis=-1)\n end_logits = tf.squeeze(input=end_logits, axis=-1)\n loss = None\n\n if inputs[\"start_positions\"] is not None and inputs[\"end_positions\"] is not None:\n labels = {\"start_position\": inputs[\"start_positions\"]}\n labels[\"end_position\"] = inputs[\"end_positions\"]\n loss = self.compute_loss(labels=labels, logits=(start_logits, end_logits))\n\n if not inputs[\"return_dict\"]:\n output = (start_logits, end_logits) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return TFQuestionAnsweringModelOutput(\n loss=loss,\n start_logits=start_logits,\n end_logits=end_logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n def serving_output(self, output: TFQuestionAnsweringModelOutput) -> TFQuestionAnsweringModelOutput:\n hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None\n attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None\n\n return TFQuestionAnsweringModelOutput(\n start_logits=output.start_logits, end_logits=output.end_logits, hidden_states=hs, attentions=attns\n )\n",
"# coding=utf-8\n# Copyright 2021 Google Research The HuggingFace Inc. team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" PyTorch BigBirdPegasus model. \"\"\"\n\n\nimport copy\nimport math\nimport random\nfrom typing import Optional, Tuple\n\nimport numpy as np\nimport torch\nfrom torch import nn\nfrom torch.nn import CrossEntropyLoss, MSELoss\n\nfrom ...activations import ACT2FN\nfrom ...file_utils import (\n add_code_sample_docstrings,\n add_end_docstrings,\n add_start_docstrings,\n add_start_docstrings_to_model_forward,\n replace_return_docstrings,\n)\nfrom ...modeling_outputs import (\n BaseModelOutput,\n BaseModelOutputWithPastAndCrossAttentions,\n CausalLMOutputWithCrossAttentions,\n Seq2SeqLMOutput,\n Seq2SeqModelOutput,\n Seq2SeqQuestionAnsweringModelOutput,\n Seq2SeqSequenceClassifierOutput,\n)\nfrom ...modeling_utils import PreTrainedModel\nfrom ...utils import logging\nfrom .configuration_bigbird_pegasus import BigBirdPegasusConfig\n\n\nlogger = logging.get_logger(__name__)\n\n_CHECKPOINT_FOR_DOC = \"google/bigbird-pegasus-large-arxiv\"\n_CONFIG_FOR_DOC = \"BigBirdPegasusConfig\"\n_TOKENIZER_FOR_DOC = \"PegasusTokenizer\"\n\n\nBIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST = [\n \"google/bigbird-pegasus-large-arxiv\",\n \"google/bigbird-pegasus-large-pubmed\",\n \"google/bigbird-pegasus-large-bigpatent\",\n # See all BigBirdPegasus models at https://huggingface.co/models?filter=bigbird_pegasus\n]\n\n\ndef shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int):\n \"\"\"\n Shift input ids one token to the right.\n \"\"\"\n shifted_input_ids = input_ids.new_zeros(input_ids.shape)\n shifted_input_ids[:, 1:] = input_ids[:, :-1].clone()\n shifted_input_ids[:, 0] = decoder_start_token_id\n\n assert pad_token_id is not None, \"self.model.config.pad_token_id has to be defined.\"\n # replace possible -100 values in labels by `pad_token_id`\n shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)\n\n return shifted_input_ids\n\n\ndef _make_causal_mask(input_ids_shape: torch.Size, dtype: torch.dtype, past_key_values_length: int = 0):\n \"\"\"\n Make causal mask used for bi-directional self-attention.\n \"\"\"\n bsz, tgt_len = input_ids_shape\n mask = torch.full((tgt_len, tgt_len), float(\"-inf\"))\n mask_cond = torch.arange(mask.size(-1))\n mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)\n mask = mask.to(dtype)\n\n if past_key_values_length > 0:\n mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype), mask], dim=-1)\n return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)\n\n\ndef _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):\n \"\"\"\n Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.\n \"\"\"\n bsz, src_len = mask.size()\n tgt_len = tgt_len if tgt_len is not None else src_len\n\n expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)\n\n inverted_mask = 1.0 - expanded_mask\n\n return inverted_mask.masked_fill(inverted_mask.bool(), torch.finfo(dtype).min)\n\n\nclass BigBirdPegasusLearnedPositionalEmbedding(nn.Embedding):\n \"\"\"\n This module learns positional embeddings up to a fixed maximum size.\n \"\"\"\n\n def __init__(self, num_embeddings: int, embedding_dim: int):\n super().__init__(num_embeddings, embedding_dim)\n\n def forward(self, input_ids_shape: torch.Size, past_key_values_length: int = 0):\n \"\"\"`input_ids_shape` is expected to be [bsz x seqlen].\"\"\"\n bsz, seq_len = input_ids_shape[:2]\n positions = torch.arange(\n past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device\n )\n return super().forward(positions)\n\n\n# Copied from transformers.models.big_bird.modeling_big_bird.BigBirdSelfAttention with BigBird->BigBirdPegasus\nclass BigBirdPegasusSelfAttention(nn.Module):\n def __init__(self, config):\n super().__init__()\n if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, \"embedding_size\"):\n raise ValueError(\n f\"The hidden size ({config.hidden_size}) is not a multiple of the number of attention \"\n f\"heads ({config.num_attention_heads})\"\n )\n\n self.num_attention_heads = config.num_attention_heads\n self.attention_head_size = int(config.hidden_size / config.num_attention_heads)\n self.all_head_size = self.num_attention_heads * self.attention_head_size\n\n self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias)\n self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias)\n self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias)\n\n self.dropout = nn.Dropout(config.attention_probs_dropout_prob)\n self.is_decoder = config.is_decoder\n\n def transpose_for_scores(self, x):\n new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)\n x = x.view(*new_x_shape)\n return x.permute(0, 2, 1, 3)\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_value=None,\n output_attentions=False,\n ):\n mixed_query_layer = self.query(hidden_states)\n\n # If this is instantiated as a cross-attention module, the keys\n # and values come from an encoder; the attention mask needs to be\n # such that the encoder's padding tokens are not attended to.\n is_cross_attention = encoder_hidden_states is not None\n\n if is_cross_attention and past_key_value is not None:\n # reuse k,v, cross_attentions\n key_layer = past_key_value[0]\n value_layer = past_key_value[1]\n attention_mask = encoder_attention_mask\n elif is_cross_attention:\n key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))\n value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))\n attention_mask = encoder_attention_mask\n elif past_key_value is not None:\n key_layer = self.transpose_for_scores(self.key(hidden_states))\n value_layer = self.transpose_for_scores(self.value(hidden_states))\n key_layer = torch.cat([past_key_value[0], key_layer], dim=2)\n value_layer = torch.cat([past_key_value[1], value_layer], dim=2)\n else:\n key_layer = self.transpose_for_scores(self.key(hidden_states))\n value_layer = self.transpose_for_scores(self.value(hidden_states))\n\n query_layer = self.transpose_for_scores(mixed_query_layer)\n\n if self.is_decoder:\n # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.\n # Further calls to cross_attention layer can then reuse all cross-attention\n # key/value_states (first \"if\" case)\n # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of\n # all previous decoder key/value_states. Further calls to uni-directional self-attention\n # can concat previous decoder key/value_states to current projected key/value_states (third \"elif\" case)\n # if encoder bi-directional self-attention `past_key_value` is always `None`\n past_key_value = (key_layer, value_layer)\n\n # Take the dot product between \"query\" and \"key\" to get the raw attention scores.\n attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))\n\n attention_scores = attention_scores / math.sqrt(self.attention_head_size)\n if attention_mask is not None:\n # Apply the attention mask is (precomputed for all layers in BigBirdPegasusModel forward() function)\n attention_scores = attention_scores + attention_mask\n\n # Normalize the attention scores to probabilities.\n attention_probs = nn.functional.softmax(attention_scores, dim=-1)\n\n # This is actually dropping out entire tokens to attend to, which might\n # seem a bit unusual, but is taken from the original Transformer paper.\n attention_probs = self.dropout(attention_probs)\n\n # Mask heads if we want to\n if head_mask is not None:\n attention_probs = attention_probs * head_mask\n\n context_layer = torch.matmul(attention_probs, value_layer)\n\n context_layer = context_layer.permute(0, 2, 1, 3).contiguous()\n new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)\n context_layer = context_layer.view(*new_context_layer_shape)\n\n outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)\n\n if self.is_decoder:\n outputs = outputs + (past_key_value,)\n return outputs\n\n\n# Copied from transformers.models.big_bird.modeling_big_bird.BigBirdBlockSparseAttention with BigBird->BigBirdPegasus\nclass BigBirdPegasusBlockSparseAttention(nn.Module):\n def __init__(self, config, seed=None):\n super().__init__()\n\n self.max_seqlen = config.max_position_embeddings\n self.seed = seed\n\n if config.hidden_size % config.num_attention_heads != 0:\n raise ValueError(\n f\"The hidden size {config.hidden_size} is not a multiple of the number of attention \"\n f\"heads {config.num_attention_heads}.\"\n )\n\n self.num_attention_heads = config.num_attention_heads\n self.num_random_blocks = config.num_random_blocks\n self.block_size = config.block_size\n\n self.attention_head_size = int(config.hidden_size / config.num_attention_heads)\n self.all_head_size = self.num_attention_heads * self.attention_head_size\n\n self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias)\n self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias)\n self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias)\n\n def transpose_for_scores(self, x):\n new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)\n x = x.view(*new_x_shape)\n return x.permute(0, 2, 1, 3)\n\n def forward(\n self,\n hidden_states,\n band_mask=None,\n from_mask=None,\n to_mask=None,\n from_blocked_mask=None,\n to_blocked_mask=None,\n output_attentions=None,\n ):\n # Currently this `class` can't be used in decoder.\n\n batch_size, seqlen, _ = hidden_states.size()\n to_seq_length = from_seq_length = seqlen\n from_block_size = to_block_size = self.block_size\n\n assert from_seq_length % from_block_size == 0, \"Query sided sequence length must be multiple of block size\"\n assert to_seq_length % to_block_size == 0, \"Key/Value sided sequence length must be multiple of block size\"\n\n query_layer = self.transpose_for_scores(self.query(hidden_states))\n key_layer = self.transpose_for_scores(self.key(hidden_states))\n value_layer = self.transpose_for_scores(self.value(hidden_states))\n\n context_layer, attention_probs = self.bigbird_block_sparse_attention(\n query_layer,\n key_layer,\n value_layer,\n band_mask,\n from_mask,\n to_mask,\n from_blocked_mask,\n to_blocked_mask,\n self.num_attention_heads,\n self.num_random_blocks,\n self.attention_head_size,\n from_block_size,\n to_block_size,\n batch_size,\n from_seq_length,\n to_seq_length,\n seed=self.seed,\n plan_from_length=None,\n plan_num_rand_blocks=None,\n output_attentions=output_attentions,\n )\n\n context_layer = context_layer.contiguous().view(batch_size, from_seq_length, -1)\n\n outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)\n return outputs\n\n @staticmethod\n def torch_bmm_nd(inp_1, inp_2, ndim=None):\n \"\"\"Fast nd matrix multiplication\"\"\"\n # faster replacement of torch.einsum (\"bhqk,bhkd->bhqd\")\n return torch.bmm(inp_1.reshape((-1,) + inp_1.shape[-2:]), inp_2.reshape((-1,) + inp_2.shape[-2:])).view(\n inp_1.shape[: ndim - 2] + (inp_1.shape[ndim - 2], inp_2.shape[ndim - 1])\n )\n\n @staticmethod\n def torch_bmm_nd_transpose(inp_1, inp_2, ndim=None):\n \"\"\"Fast nd matrix multiplication with transpose\"\"\"\n # faster replacement of torch.einsum (bhqd,bhkd->bhqk)\n return torch.bmm(\n inp_1.reshape((-1,) + inp_1.shape[-2:]), inp_2.reshape((-1,) + inp_2.shape[-2:]).transpose(1, 2)\n ).view(inp_1.shape[: ndim - 2] + (inp_1.shape[ndim - 2], inp_2.shape[ndim - 2]))\n\n def bigbird_block_sparse_attention(\n self,\n query_layer,\n key_layer,\n value_layer,\n band_mask,\n from_mask,\n to_mask,\n from_blocked_mask,\n to_blocked_mask,\n n_heads,\n n_rand_blocks,\n attention_head_size,\n from_block_size,\n to_block_size,\n batch_size,\n from_seq_len,\n to_seq_len,\n seed,\n plan_from_length,\n plan_num_rand_blocks,\n output_attentions,\n ):\n\n # BigBirdPegasus block-sparse attention as suggested in paper\n\n # ITC:\n # global tokens: 2 x block_size\n # window tokens: 3 x block_size\n # random tokens: num_rand_tokens x block_size\n\n # ETC:\n # global tokens: extra_globals_tokens + 2 x block_size\n # window tokens: 3 x block_size\n # random tokens: num_rand_tokens x block_size\n\n # Note:\n # 1) Currently, ETC is not supported.\n # 2) Window size is fixed to 3 blocks & it can be changed only by\n # changing `block_size`.\n # 3) Number of global blocks are fixed (2 blocks here) & global tokens can be\n # controlled only by `block_size`.\n\n # attention is calculated separately for q[0], q[1], q[2:-2], q[-2], q[-1] in order to use special trick of shifting tokens (for calculating sliding attention)\n # hence following code can be divided into 5 parts.\n\n if from_seq_len // from_block_size != to_seq_len // to_block_size:\n raise ValueError(\"Error the number of blocks needs to be same!\")\n\n rsqrt_d = 1 / math.sqrt(attention_head_size)\n bsz = batch_size\n attn_mask_penalty = -10000.0\n\n # generate random attention and corresponding masks\n np.random.seed(seed)\n if from_seq_len in [1024, 3072, 4096]: # old plans used in paper\n rand_attn = [\n self._bigbird_block_rand_mask(\n self.max_seqlen, self.max_seqlen, from_block_size, to_block_size, n_rand_blocks, last_idx=1024\n )[: (from_seq_len // from_block_size - 2)]\n for _ in range(n_heads)\n ]\n else:\n if plan_from_length is None:\n plan_from_length, plan_num_rand_blocks = self._get_rand_attn_plan(\n from_seq_len, from_block_size, n_rand_blocks\n )\n\n rand_attn = self._bigbird_block_rand_mask_with_head(\n from_seq_length=from_seq_len,\n to_seq_length=to_seq_len,\n from_block_size=from_block_size,\n to_block_size=to_block_size,\n num_heads=n_heads,\n plan_from_length=plan_from_length,\n plan_num_rand_blocks=plan_num_rand_blocks,\n )\n\n rand_attn = np.stack(rand_attn, axis=0)\n rand_attn = torch.tensor(rand_attn, device=query_layer.device, dtype=torch.long)\n rand_attn.unsqueeze_(0)\n rand_attn = torch.cat([rand_attn for _ in range(batch_size)], dim=0)\n\n rand_mask = self._create_rand_mask_from_inputs(\n from_blocked_mask, to_blocked_mask, rand_attn, n_heads, n_rand_blocks, bsz, from_seq_len, from_block_size\n )\n\n blocked_query_matrix = query_layer.view(bsz, n_heads, from_seq_len // from_block_size, from_block_size, -1)\n blocked_key_matrix = key_layer.view(bsz, n_heads, to_seq_len // to_block_size, to_block_size, -1)\n blocked_value_matrix = value_layer.view(bsz, n_heads, to_seq_len // to_block_size, to_block_size, -1)\n\n # preparing block for randn attn\n gathered_key = self.torch_gather_b2(blocked_key_matrix, rand_attn)\n gathered_key = gathered_key.view(\n bsz, n_heads, to_seq_len // to_block_size - 2, n_rand_blocks * to_block_size, -1\n ) # [bsz, n_heads, to_seq_len//to_block_size-2, n_rand_blocks, to_block_size, -1]\n gathered_value = self.torch_gather_b2(blocked_value_matrix, rand_attn)\n gathered_value = gathered_value.view(\n bsz, n_heads, to_seq_len // to_block_size - 2, n_rand_blocks * to_block_size, -1\n ) # [bsz, n_heads, to_seq_len//to_block_size-2, n_rand_blocks, to_block_size, -1]\n\n # 1st PART\n # 1st block (global block) attention scores\n # q[0] x (k[0], k[1], k[2], k[3], k[4] .... )\n\n # [bsz, n_heads, from_block_size, -1] x [bsz, n_heads, to_seq_len, -1] ==> [bsz, n_heads, from_block_size, to_seq_len]\n first_product = self.torch_bmm_nd_transpose(blocked_query_matrix[:, :, 0], key_layer, ndim=4)\n\n first_product = first_product * rsqrt_d\n first_product += (1.0 - to_mask) * attn_mask_penalty\n first_attn_weights = nn.functional.softmax(\n first_product, dim=-1\n ) # [bsz, n_heads, from_block_size, to_seq_len]\n\n # [bsz, n_heads, from_block_size, to_seq_len] x [bsz, n_heads, to_seq_len, -1] ==> [bsz, n_heads, from_block_size, -1]\n first_context_layer = self.torch_bmm_nd(first_attn_weights, value_layer, ndim=4)\n first_context_layer.unsqueeze_(2)\n\n # 2nd PART\n # 2nd block attention scores\n # q[1] x (sliding_keys, random_keys, global_keys)\n # sliding key blocks -> 2nd, 3rd blocks\n # global key blocks -> 1st block\n\n second_key_mat = torch.cat(\n [\n blocked_key_matrix[:, :, 0],\n blocked_key_matrix[:, :, 1],\n blocked_key_matrix[:, :, 2],\n blocked_key_matrix[:, :, -1],\n gathered_key[:, :, 0],\n ],\n dim=2,\n ) # [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1]\n second_value_mat = torch.cat(\n [\n blocked_value_matrix[:, :, 0],\n blocked_value_matrix[:, :, 1],\n blocked_value_matrix[:, :, 2],\n blocked_value_matrix[:, :, -1],\n gathered_value[:, :, 0],\n ],\n dim=2,\n ) # [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1]\n\n # [bsz, n_heads, from_block_size, -1] x [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] ==> [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size]\n second_product = self.torch_bmm_nd_transpose(blocked_query_matrix[:, :, 1], second_key_mat, ndim=4)\n second_seq_pad = torch.cat(\n [\n to_mask[:, :, :, : 3 * to_block_size],\n to_mask[:, :, :, -to_block_size:],\n to_mask.new_ones([bsz, 1, 1, n_rand_blocks * to_block_size]),\n ],\n dim=3,\n )\n second_rand_pad = torch.cat(\n [\n rand_mask.new_ones([bsz, n_heads, from_block_size, 4 * to_block_size]),\n rand_mask[:, :, 0],\n ],\n dim=3,\n )\n second_product = second_product * rsqrt_d\n second_product += (1.0 - torch.minimum(second_seq_pad, second_rand_pad)) * attn_mask_penalty\n second_attn_weights = nn.functional.softmax(\n second_product, dim=-1\n ) # [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size]\n\n # [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size] x [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] ==> [bsz, n_heads, from_block_size, -1]\n second_context_layer = self.torch_bmm_nd(second_attn_weights, second_value_mat, ndim=4)\n\n second_context_layer.unsqueeze_(2)\n\n # 3rd PART\n # Middle blocks attention scores\n # q[-2:2] x (sliding_keys, random_keys, global_keys)\n # sliding attn is calculated using special trick of shifting tokens as discussed in paper\n # random keys are generated by taking random indices as per `rand_attn`\n # global keys -> 1st & last block\n\n exp_blocked_key_matrix = torch.cat(\n [blocked_key_matrix[:, :, 1:-3], blocked_key_matrix[:, :, 2:-2], blocked_key_matrix[:, :, 3:-1]], dim=3\n ) # [bsz, n_heads, from_seq_len//from_block_size-4, 3*to_block_size, -1]\n exp_blocked_value_matrix = torch.cat(\n [blocked_value_matrix[:, :, 1:-3], blocked_value_matrix[:, :, 2:-2], blocked_value_matrix[:, :, 3:-1]],\n dim=3,\n ) # [bsz, n_heads, from_seq_len//from_block_size-4, 3*to_block_size, -1]\n middle_query_matrix = blocked_query_matrix[:, :, 2:-2]\n\n # sliding attention scores for q[-2:2]\n # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] x [b, n_heads, from_seq_len//from_block_size-4, 3*to_block_size, -1]\n inner_band_product = self.torch_bmm_nd_transpose(middle_query_matrix, exp_blocked_key_matrix, ndim=5)\n # ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, 3*to_block_size]\n inner_band_product = inner_band_product * rsqrt_d\n\n # randn attention scores for q[-2:2]\n # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] x [bsz, n_heads, from_seq_len//from_block_size-4, n_rand_blocks*to_block_size, -1]\n rand_band_product = self.torch_bmm_nd_transpose(middle_query_matrix, gathered_key[:, :, 1:-1], ndim=5)\n # ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, n_rand_blocks*to_block_size]\n rand_band_product = rand_band_product * rsqrt_d\n\n # Including 1st block (since it's global)\n first_band_product = torch.einsum(\n \"bhlqd,bhkd->bhlqk\", middle_query_matrix, blocked_key_matrix[:, :, 0]\n ) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] x [bsz, n_heads, to_block_size, -1] ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, to_block_size]\n first_band_product = first_band_product * rsqrt_d\n\n # Including last block (since it's global)\n last_band_product = torch.einsum(\n \"bhlqd,bhkd->bhlqk\", middle_query_matrix, blocked_key_matrix[:, :, -1]\n ) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] x [bsz, n_heads, to_block_size, -1] ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, to_block_size]\n last_band_product = last_band_product * rsqrt_d\n\n # masking padded tokens\n inner_band_product += (1.0 - band_mask) * attn_mask_penalty\n first_band_product += (1.0 - to_mask[:, :, :, :to_block_size].unsqueeze(3)) * attn_mask_penalty\n last_band_product += (1.0 - to_mask[:, :, :, -to_block_size:].unsqueeze(3)) * attn_mask_penalty\n rand_band_product += (1.0 - rand_mask[:, :, 1:-1]) * attn_mask_penalty\n\n # completing attention scores matrix for all q[-2:2]\n band_product = torch.cat(\n [first_band_product, inner_band_product, rand_band_product, last_band_product], dim=-1\n ) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, (5+n_rand_blocks)*to_block_size]\n\n # safely doing softmax since attention matrix is completed\n attn_weights = nn.functional.softmax(\n band_product, dim=-1\n ) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, (5+n_rand_blocks)*to_block_size]\n\n # contribution of sliding keys\n # [bsz, n_heads, m//from_block_size-4, from_block_size, 3*to_block_size] x [bsz, n_heads, from_seq_len//from_block_size-4, 3*to_block_size, -1]\n context_layer = self.torch_bmm_nd(\n attn_weights[:, :, :, :, to_block_size : 4 * to_block_size], exp_blocked_value_matrix, ndim=5\n )\n # ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1]\n\n # adding contribution of random keys\n # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, n_rand_blocks*to_block_size] x [bsz, n_heads, from_seq_len//from_block_size-4, n_rand_blocks*to_block_size, -1]\n context_layer += self.torch_bmm_nd(\n attn_weights[:, :, :, :, 4 * to_block_size : -to_block_size], gathered_value[:, :, 1:-1], ndim=5\n )\n # ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1]\n\n # adding contribution of global keys\n context_layer += torch.einsum(\n \"bhlqk,bhkd->bhlqd\", attn_weights[:, :, :, :, :to_block_size], blocked_value_matrix[:, :, 0]\n ) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, to_block_size] x [bsz, n_heads, to_block_size, -1] ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1]\n context_layer += torch.einsum(\n \"bhlqk,bhkd->bhlqd\", attn_weights[:, :, :, :, -to_block_size:], blocked_value_matrix[:, :, -1]\n ) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, to_block_size] x [bsz, n_heads, to_block_size, -1] ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1]\n\n # 4th PART\n # last 2nd token attention scores\n # q[-2] x (sliding_keys, random_keys, global_keys)\n # sliding key blocks -> last 3 blocks\n # global key block -> 1st block\n # random key block -> based on indices stored in `randn_attn`\n\n second_last_key_mat = torch.cat(\n [\n blocked_key_matrix[:, :, 0],\n blocked_key_matrix[:, :, -3],\n blocked_key_matrix[:, :, -2],\n blocked_key_matrix[:, :, -1],\n gathered_key[:, :, -1],\n ],\n dim=2,\n ) # [bsz, n_heads, (4+n_random_blocks)*to_block_size, -1]\n second_last_value_mat = torch.cat(\n [\n blocked_value_matrix[:, :, 0],\n blocked_value_matrix[:, :, -3],\n blocked_value_matrix[:, :, -2],\n blocked_value_matrix[:, :, -1],\n gathered_value[:, :, -1],\n ],\n dim=2,\n ) # [bsz, n_heads, (4+r)*to_block_size, -1]\n\n # [bsz, n_heads, from_block_size, -1] x [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] ==> [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size]\n second_last_product = self.torch_bmm_nd_transpose(blocked_query_matrix[:, :, -2], second_last_key_mat, ndim=4)\n second_last_seq_pad = torch.cat(\n [\n to_mask[:, :, :, :to_block_size],\n to_mask[:, :, :, -3 * to_block_size :],\n to_mask.new_ones([bsz, 1, 1, n_rand_blocks * to_block_size]),\n ],\n dim=3,\n )\n second_last_rand_pad = torch.cat(\n [\n rand_mask.new_ones([bsz, n_heads, from_block_size, 4 * to_block_size]),\n rand_mask[:, :, -1],\n ],\n dim=3,\n )\n second_last_product = second_last_product * rsqrt_d\n second_last_product += (1.0 - torch.minimum(second_last_seq_pad, second_last_rand_pad)) * attn_mask_penalty\n second_last_attn_weights = nn.functional.softmax(\n second_last_product, dim=-1\n ) # [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size]\n\n # [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size] x [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] ==> [bsz, n_heads, from_block_size, -1]\n second_last_context_layer = self.torch_bmm_nd(second_last_attn_weights, second_last_value_mat, ndim=4)\n second_last_context_layer.unsqueeze_(2)\n\n # 5th PART\n # last block (global) attention scores\n # q[-1] x (k[0], k[1], k[2], k[3], .... )\n\n # [bsz, n_heads, from_block_size, -1] x [bsz, n_heads, to_seq_len, -1] ==> [bsz, n_heads, from_block_size, to_seq_len]\n last_product = self.torch_bmm_nd_transpose(blocked_query_matrix[:, :, -1], key_layer, ndim=4)\n last_product = last_product * rsqrt_d\n last_product += (1.0 - to_mask) * attn_mask_penalty\n last_attn_weights = nn.functional.softmax(last_product, dim=-1) # [bsz, n_heads, from_block_size, n]\n\n # [bsz, n_heads, from_block_size, to_seq_len] x [bsz, n_heads, to_seq_len, -1] ==> [bsz, n_heads, from_block_size, -1]\n last_context_layer = self.torch_bmm_nd(last_attn_weights, value_layer, ndim=4)\n last_context_layer.unsqueeze_(2)\n\n # combining representations of all tokens\n context_layer = torch.cat(\n [first_context_layer, second_context_layer, context_layer, second_last_context_layer, last_context_layer],\n dim=2,\n )\n context_layer = context_layer.view((bsz, n_heads, from_seq_len, -1)) * from_mask\n context_layer = torch.transpose(context_layer, 1, 2)\n\n # this is just for visualizing; forward pass doesn't depend on following code\n if output_attentions:\n # TODO(PVP): need to verify if below code is correct\n attention_probs = torch.zeros(\n bsz, n_heads, from_seq_len, to_seq_len, dtype=torch.float, device=context_layer.device\n )\n\n # 1st query block\n # corresponding to `first_context_layer`\n attention_probs[:, :, :from_block_size, :] = first_attn_weights # all keys global\n\n # 2nd query block\n # corresponding to `second_context_layer`\n attention_probs[:, :, from_block_size : 2 * from_block_size, : 3 * to_block_size] = second_attn_weights[\n :, :, :, : 3 * to_block_size\n ] # 1st three key blocks (global + sliding)\n attention_probs[:, :, from_block_size : 2 * from_block_size, -to_block_size:] = second_attn_weights[\n :, :, :, 3 * to_block_size : 4 * to_block_size\n ] # last key block (global)\n # random keys\n for p1, i1, w1 in zip(range(bsz), rand_attn, second_attn_weights):\n # p1, i1, w1 corresponds to batch_dim i.e. following operation is done for each sequence in batch\n for p2, i2, w2 in zip(range(n_heads), i1, w1):\n # p2, i2, w2 corresponds to head_dim i.e. following operation is done for each heads\n attn_probs_view = attention_probs.view(\n bsz,\n n_heads,\n from_seq_len // from_block_size,\n from_block_size,\n to_seq_len // to_block_size,\n to_block_size,\n )\n right_slice = w2[:, 4 * to_block_size :]\n attn_probs_view[p1, p2, 1, :, i2[0]] = right_slice.view(\n from_block_size, n_rand_blocks, to_block_size\n )\n\n # Middle query blocks\n # corresponding to `context_layer`\n # sliding keys\n for q_idx in range(from_seq_len // from_block_size - 4):\n attn_probs_view = attention_probs.view(\n bsz,\n n_heads,\n from_seq_len // from_block_size,\n from_block_size,\n to_seq_len // to_block_size,\n to_block_size,\n )[:, :, 2:-2, :, 1:-1, :]\n right_slice = attn_weights[:, :, q_idx, :, to_block_size : 4 * to_block_size]\n attn_probs_view[:, :, q_idx, :, q_idx : q_idx + 3, :] = right_slice.view(\n bsz, n_heads, from_block_size, 3, to_block_size\n ) # inner_band_product\n # global keys (corresponding to 1st key block)\n attention_probs[:, :, 2 * from_block_size : -2 * from_block_size, :to_block_size] = attn_weights[\n :, :, :, :, :to_block_size\n ].view(\n bsz, n_heads, -1, to_block_size\n ) # first_band_product\n # global keys (corresponding to last key block)\n attention_probs[:, :, 2 * from_block_size : -2 * from_block_size, -to_block_size:] = attn_weights[\n :, :, :, :, -to_block_size:\n ].view(\n bsz, n_heads, -1, to_block_size\n ) # last_band_product\n # random keys\n for p1, i1, w1 in zip(range(bsz), rand_attn, attn_weights):\n # p1, i1, w1 corresponds to batch_dim i.e. following operation is done for each sequence in batch\n for p2, i2, w2 in zip(range(n_heads), i1, w1):\n # p2, i2, w2 corresponds to head_dim i.e. following operation is done for each heads\n for q_idx in range(1, len(i2) - 1):\n attn_probs_view = attention_probs.view(\n bsz,\n n_heads,\n from_seq_len // from_block_size,\n from_block_size,\n to_seq_len // to_block_size,\n to_block_size,\n )\n right_slice = w2[q_idx - 1, :, 4 * to_block_size : -to_block_size]\n attn_probs_view[p1, p2, q_idx + 1, :, i2[q_idx]] = right_slice.view(\n from_block_size, n_rand_blocks, to_block_size\n )\n\n # Second-last query block\n # corresponding to `second_last_context_layer`\n attention_probs[:, :, -2 * from_block_size : -from_block_size, :to_block_size] = second_last_attn_weights[\n :, :, :, :to_block_size\n ] # 1st key block (global)\n attention_probs[\n :, :, -2 * from_block_size : -from_block_size, -3 * to_block_size :\n ] = second_last_attn_weights[\n :, :, :, to_block_size : 4 * to_block_size\n ] # last three blocks (global + sliding)\n # random keys\n for p1, i1, w1 in zip(range(bsz), rand_attn, second_last_attn_weights):\n # p1, i1, w1 corresponds to batch_dim i.e. following operation is done for each sequence in batch\n for p2, i2, w2 in zip(range(n_heads), i1, w1):\n # p2, i2, w2 corresponds to head_dim i.e. following operation is done for each heads\n attn_probs_view = attention_probs.view(\n bsz,\n n_heads,\n from_seq_len // from_block_size,\n from_block_size,\n to_seq_len // to_block_size,\n to_block_size,\n )\n right_slice = w2[:, 4 * to_block_size :]\n attn_probs_view[p1, p2, -2, :, i2[-1]] = right_slice.view(\n from_block_size, n_rand_blocks, to_block_size\n )\n\n # last query block\n # corresponding to `last_context_layer`\n attention_probs[:, :, -from_block_size:, :] = last_attn_weights # all keys global\n\n else:\n attention_probs = None\n\n return context_layer, attention_probs\n\n @staticmethod\n def torch_gather_b2(params, indices):\n # this operation is equivalent to tf.gather when batch_dims=2\n\n if params.shape[:2] != indices.shape[:2]:\n raise ValueError(\n f\"Make sure that the first two dimensions of params and indices are identical, \\\n but they are params: {params.shape[:2]} vs. indices: {params.shape[:2]}\"\n )\n num_indices_to_gather = indices.shape[-2] * indices.shape[-1]\n num_indices_to_pick_from = params.shape[2]\n\n indices_shift = (\n torch.arange(indices.shape[0] * indices.shape[1] * num_indices_to_gather, device=indices.device)\n // num_indices_to_gather\n * num_indices_to_pick_from\n )\n\n flattened_indices = indices.view(-1) + indices_shift\n flattened_params = params.reshape(-1, params.shape[-2], params.shape[-1])\n\n out_flattened = flattened_params.index_select(0, flattened_indices)\n\n out = out_flattened.reshape(params.shape[:2] + (num_indices_to_gather,) + params.shape[3:])\n return out\n\n @staticmethod\n def _create_rand_mask_from_inputs(\n from_blocked_mask,\n to_blocked_mask,\n rand_attn,\n num_attention_heads,\n num_rand_blocks,\n batch_size,\n from_seq_length,\n from_block_size,\n ):\n \"\"\"\n Create 3D attention mask from a 2D tensor mask.\n\n Args:\n from_blocked_mask: 2D Tensor of shape [batch_size,\n from_seq_length//from_block_size, from_block_size].\n to_blocked_mask: int32 Tensor of shape [batch_size,\n to_seq_length//to_block_size, to_block_size].\n rand_attn: [batch_size, num_attention_heads,\n from_seq_length//from_block_size-2, num_rand_blocks]\n num_attention_heads: int. Number of attention heads.\n num_rand_blocks: int. Number of random chunks per row.\n batch_size: int. Batch size for computation.\n from_seq_length: int. length of from sequence.\n from_block_size: int. size of block in from sequence.\n\n Returns:\n float Tensor of shape [batch_size, num_attention_heads, from_seq_length//from_block_size-2,\n from_block_size, num_rand_blocks*to_block_size].\n \"\"\"\n num_windows = from_seq_length // from_block_size - 2\n rand_mask = torch.stack([p1[i1.flatten()] for p1, i1 in zip(to_blocked_mask, rand_attn)])\n rand_mask = rand_mask.view(batch_size, num_attention_heads, num_windows, num_rand_blocks * from_block_size)\n rand_mask = torch.einsum(\"blq,bhlk->bhlqk\", from_blocked_mask[:, 1:-1], rand_mask)\n return rand_mask\n\n @staticmethod\n def _get_rand_attn_plan(from_seq_length, from_block_size, num_rand_blocks):\n \"\"\"\n Gives the plan of where to put random attention.\n\n Args:\n from_seq_length: int. length of from sequence.\n from_block_size: int. size of block in from sequence.\n num_rand_blocks: int. Number of random chunks per row.\n\n Returns:\n plan_from_length: ending location of from block plan_num_rand_blocks: number of random ending location for\n each block\n \"\"\"\n\n plan_from_length = []\n plan_num_rand_blocks = []\n if (2 * num_rand_blocks + 5) < (from_seq_length // from_block_size):\n plan_from_length.append(int((2 * num_rand_blocks + 5) * from_block_size))\n plan_num_rand_blocks.append(num_rand_blocks)\n plan_from_length.append(from_seq_length)\n plan_num_rand_blocks.append(0)\n elif (num_rand_blocks + 5) < (from_seq_length // from_block_size):\n plan_from_length.append(int((num_rand_blocks + 5) * from_block_size))\n plan_num_rand_blocks.append(num_rand_blocks // 2)\n plan_from_length.append(from_seq_length)\n plan_num_rand_blocks.append(num_rand_blocks - (num_rand_blocks // 2))\n else:\n plan_from_length.append(from_seq_length)\n plan_num_rand_blocks.append(num_rand_blocks)\n\n return plan_from_length, plan_num_rand_blocks\n\n @staticmethod\n def _bigbird_block_rand_mask(\n from_seq_length, to_seq_length, from_block_size, to_block_size, num_rand_blocks, last_idx=-1\n ):\n \"\"\"\n Create adjacency list of random attention.\n\n Args:\n from_seq_length: int. length of from sequence.\n to_seq_length: int. length of to sequence.\n from_block_size: int. size of block in from sequence.\n to_block_size: int. size of block in to sequence.\n num_rand_blocks: int. Number of random chunks per row.\n last_idx: if -1 then num_rand_blocks blocks chosen anywhere in to sequence,\n if positive then num_rand_blocks blocks chosen only up to last_idx.\n\n Returns:\n adjacency list of size from_seq_length//from_block_size-2 by num_rand_blocks\n \"\"\"\n # using this method when from_seq_length in [1024, 3072, 4096]\n\n assert (\n from_seq_length // from_block_size == to_seq_length // to_block_size\n ), \"Error the number of blocks needs to be same!\"\n\n rand_attn = np.zeros((from_seq_length // from_block_size - 2, num_rand_blocks), dtype=np.int32)\n middle_seq = np.arange(1, to_seq_length // to_block_size - 1, dtype=np.int32)\n last = to_seq_length // to_block_size - 1\n if last_idx > (2 * to_block_size):\n last = (last_idx // to_block_size) - 1\n\n r = num_rand_blocks # shorthand\n for i in range(1, from_seq_length // from_block_size - 1):\n start = i - 2\n end = i\n if i == 1:\n rand_attn[i - 1, :] = np.random.permutation(middle_seq[2:last])[:r]\n elif i == 2:\n rand_attn[i - 1, :] = np.random.permutation(middle_seq[3:last])[:r]\n elif i == from_seq_length // from_block_size - 3:\n rand_attn[i - 1, :] = np.random.permutation(middle_seq[:last])[:r]\n # Missing -3: should have been sliced till last-3\n elif i == from_seq_length // from_block_size - 2:\n rand_attn[i - 1, :] = np.random.permutation(middle_seq[:last])[:r]\n # Missing -4: should have been sliced till last-4\n else:\n if start > last:\n start = last\n rand_attn[i - 1, :] = np.random.permutation(middle_seq[:start])[:r]\n elif (end + 1) == last:\n rand_attn[i - 1, :] = np.random.permutation(middle_seq[:start])[:r]\n else:\n rand_attn[i - 1, :] = np.random.permutation(\n np.concatenate((middle_seq[:start], middle_seq[end + 1 : last]))\n )[:r]\n return rand_attn\n\n def _bigbird_block_rand_mask_with_head(\n self,\n from_seq_length,\n to_seq_length,\n from_block_size,\n to_block_size,\n num_heads,\n plan_from_length,\n plan_num_rand_blocks,\n window_block_left=1,\n window_block_right=1,\n global_block_top=1,\n global_block_bottom=1,\n global_block_left=1,\n global_block_right=1,\n ):\n \"\"\"\n Create adjacency list of random attention.\n\n Args:\n from_seq_length: int. length of from sequence.\n to_seq_length: int. length of to sequence.\n from_block_size: int. size of block in from sequence.\n to_block_size: int. size of block in to sequence.\n num_heads: int. total number of heads.\n plan_from_length: list. plan from length where num_random_blocks are chosen from.\n plan_num_rand_blocks: list. number of rand blocks within the plan.\n window_block_left: int. number of blocks of window to left of a block.\n window_block_right: int. number of blocks of window to right of a block.\n global_block_top: int. number of blocks at the top.\n global_block_bottom: int. number of blocks at the bottom.\n global_block_left: int. Number of blocks globally used to the left.\n global_block_right: int. Number of blocks globally used to the right.\n\n Returns:\n adjacency list of size num_head where each element is of size from_seq_length//from_block_size-2 by\n num_rand_blocks\n \"\"\"\n # using this method when from_seq_length not in [1024, 3072, 4096]\n\n assert (\n from_seq_length // from_block_size == to_seq_length // to_block_size\n ), \"Error the number of blocks needs to be same!\"\n\n assert from_seq_length in plan_from_length, \"Error from sequence length not in plan!\"\n\n # Total number of blocks in the mmask\n num_blocks = from_seq_length // from_block_size\n # Number of blocks per plan\n plan_block_length = np.array(plan_from_length) // from_block_size\n # till when to follow plan\n max_plan_idx = plan_from_length.index(from_seq_length)\n # Random Attention adjacency list\n rand_attn = [\n np.zeros((num_blocks, np.sum(plan_num_rand_blocks[: max_plan_idx + 1])), dtype=np.int32)\n for i in range(num_heads)\n ]\n\n # We will go iteratively over the plan blocks and pick random number of\n # Attention blocks from the legally allowed blocks\n for plan_idx in range(max_plan_idx + 1):\n rnd_r_cnt = 0\n if plan_idx > 0:\n # set the row for all from_blocks starting from 0 to\n # plan_block_length[plan_idx-1]\n # column indx start fromm plan_block_length[plan_idx-1] and ends at\n # plan_block_length[plan_idx]\n if plan_num_rand_blocks[plan_idx] > 0:\n rnd_r_cnt = int(np.sum(plan_num_rand_blocks[:plan_idx]))\n curr_r_cnt = int(np.sum(plan_num_rand_blocks[: plan_idx + 1]))\n for blk_rw_idx in range(global_block_top, plan_block_length[plan_idx - 1]):\n for h in range(num_heads):\n rand_attn[h][blk_rw_idx, rnd_r_cnt:curr_r_cnt] = self._get_single_block_row_attention(\n block_id=blk_rw_idx,\n to_start_block_id=plan_block_length[plan_idx - 1],\n to_end_block_id=plan_block_length[plan_idx],\n num_rand_blocks=plan_num_rand_blocks[plan_idx],\n window_block_left=window_block_left,\n window_block_right=window_block_right,\n global_block_left=global_block_left,\n global_block_right=global_block_right,\n )\n\n for pl_id in range(plan_idx):\n if plan_num_rand_blocks[pl_id] == 0:\n continue\n for blk_rw_idx in range(plan_block_length[plan_idx - 1], plan_block_length[plan_idx]):\n rnd_r_cnt = 0\n to_start_block_id = 0\n if pl_id > 0:\n rnd_r_cnt = int(np.sum(plan_num_rand_blocks[:pl_id]))\n to_start_block_id = plan_block_length[pl_id - 1]\n curr_r_cnt = int(np.sum(plan_num_rand_blocks[: pl_id + 1]))\n for h in range(num_heads):\n rand_attn[h][blk_rw_idx, rnd_r_cnt:curr_r_cnt] = self._get_single_block_row_attention(\n block_id=blk_rw_idx,\n to_start_block_id=to_start_block_id,\n to_end_block_id=plan_block_length[pl_id],\n num_rand_blocks=plan_num_rand_blocks[pl_id],\n window_block_left=window_block_left,\n window_block_right=window_block_right,\n global_block_left=global_block_left,\n global_block_right=global_block_right,\n )\n\n if plan_num_rand_blocks[plan_idx] == 0:\n continue\n curr_r_cnt = int(np.sum(plan_num_rand_blocks[: plan_idx + 1]))\n from_start_block_id = global_block_top\n to_start_block_id = 0\n if plan_idx > 0:\n rnd_r_cnt = int(np.sum(plan_num_rand_blocks[:plan_idx]))\n from_start_block_id = plan_block_length[plan_idx - 1]\n to_start_block_id = plan_block_length[plan_idx - 1]\n\n for blk_rw_idx in range(from_start_block_id, plan_block_length[plan_idx]):\n for h in range(num_heads):\n rand_attn[h][blk_rw_idx, rnd_r_cnt:curr_r_cnt] = self._get_single_block_row_attention(\n block_id=blk_rw_idx,\n to_start_block_id=to_start_block_id,\n to_end_block_id=plan_block_length[plan_idx],\n num_rand_blocks=plan_num_rand_blocks[plan_idx],\n window_block_left=window_block_left,\n window_block_right=window_block_right,\n global_block_left=global_block_left,\n global_block_right=global_block_right,\n )\n\n for nh in range(num_heads):\n rand_attn[nh] = rand_attn[nh][global_block_top : num_blocks - global_block_bottom, :]\n\n return rand_attn\n\n @staticmethod\n def _get_single_block_row_attention(\n block_id,\n to_start_block_id,\n to_end_block_id,\n num_rand_blocks,\n window_block_left=1,\n window_block_right=1,\n global_block_left=1,\n global_block_right=1,\n ):\n \"\"\"\n For a single row block get random row attention.\n\n Args:\n block_id: int. block id of row.\n to_start_block_id: int. random attention column start id.\n to_end_block_id: int. random attention column end id.\n num_rand_blocks: int. number of random blocks to be selected.\n window_block_left: int. number of blocks of window to left of a block.\n window_block_right: int. number of blocks of window to right of a block.\n global_block_left: int. Number of blocks globally used to the left.\n global_block_right: int. Number of blocks globally used to the right.\n\n Returns:\n row containing the random attention vector of size num_rand_blocks.\n \"\"\"\n # list of to_blocks from which to choose random attention\n to_block_list = np.arange(to_start_block_id, to_end_block_id, dtype=np.int32)\n # permute the blocks\n perm_block = np.random.permutation(to_block_list)\n\n # illegal blocks for the current block id, using window\n illegal_blocks = list(range(block_id - window_block_left, block_id + window_block_right + 1))\n\n # Add blocks at the start and at the end\n illegal_blocks.extend(list(range(global_block_left)))\n illegal_blocks.extend(list(range(to_end_block_id - global_block_right, to_end_block_id)))\n\n # The second from_block cannot choose random attention on second last to_block\n if block_id == 1:\n illegal_blocks.append(to_end_block_id - 2)\n\n # The second last from_block cannot choose random attention on second to_block\n if block_id == to_end_block_id - 2:\n illegal_blocks.append(1)\n\n selected_random_blokcs = []\n\n for i in range(to_end_block_id - to_start_block_id):\n if perm_block[i] not in illegal_blocks:\n selected_random_blokcs.append(perm_block[i])\n if len(selected_random_blokcs) == num_rand_blocks:\n break\n return np.array(selected_random_blokcs, dtype=np.int32)\n\n\nclass BigBirdPegasusEncoderAttention(nn.Module):\n def __init__(self, config, seed=None):\n super().__init__()\n self.config = config\n self.seed = seed\n\n self.attention_type = config.attention_type\n\n if self.attention_type == \"original_full\":\n self.self = BigBirdPegasusSelfAttention(config)\n elif self.attention_type == \"block_sparse\":\n self.self = BigBirdPegasusBlockSparseAttention(config, seed)\n else:\n raise ValueError(\n f\"attention_type can either be original_full or block_sparse, but is {self.config.attention_type}\"\n )\n\n self.output = nn.Linear(config.hidden_size, config.hidden_size, bias=config.use_bias)\n\n def set_attention_type(self, value: str):\n if value not in [\"original_full\", \"block_sparse\"]:\n raise ValueError(\n f\"attention_type can only be set to either 'original_full' or 'block_sparse', but is {value}\"\n )\n # attention type is already correctly set\n if value == self.attention_type:\n return\n\n self.attention_type = value\n if value == \"original_full\":\n # copy all weights to new full attention class\n attn_weights = BigBirdPegasusSelfAttention(self.config)\n else:\n # copy all weights to new sparse attention class\n attn_weights = BigBirdPegasusBlockSparseAttention(self.config, self.seed)\n\n attn_weights.query = self.self.query\n attn_weights.value = self.self.value\n attn_weights.key = self.self.key\n self.self = attn_weights\n self.attention_type = value\n\n if not self.training:\n self.self.eval()\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n past_key_value=None,\n output_attentions=False,\n band_mask=None,\n from_mask=None,\n to_mask=None,\n from_blocked_mask=None,\n to_blocked_mask=None,\n ):\n # Expand dims to enable multiplication in the self-attention module\n head_mask = head_mask.reshape(1, -1, 1, 1) if head_mask is not None else None\n\n if self.config.attention_type == \"original_full\":\n self_outputs = self.self(\n hidden_states,\n attention_mask,\n head_mask,\n past_key_value=past_key_value,\n output_attentions=output_attentions,\n )\n else:\n self_outputs = self.self(\n hidden_states, band_mask, from_mask, to_mask, from_blocked_mask, to_blocked_mask, output_attentions\n )\n\n attention_output = self.output(self_outputs[0])\n outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them\n return outputs\n\n\n# Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->BigBirdPegasusDecoder\nclass BigBirdPegasusDecoderAttention(nn.Module):\n \"\"\"Multi-headed attention from 'Attention Is All You Need' paper\"\"\"\n\n def __init__(\n self,\n embed_dim: int,\n num_heads: int,\n dropout: float = 0.0,\n is_decoder: bool = False,\n bias: bool = True,\n ):\n super().__init__()\n self.embed_dim = embed_dim\n self.num_heads = num_heads\n self.dropout = dropout\n self.head_dim = embed_dim // num_heads\n assert (\n self.head_dim * num_heads == self.embed_dim\n ), f\"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {num_heads}).\"\n self.scaling = self.head_dim ** -0.5\n self.is_decoder = is_decoder\n\n self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)\n self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)\n self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)\n self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)\n\n def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):\n return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()\n\n def forward(\n self,\n hidden_states: torch.Tensor,\n key_value_states: Optional[torch.Tensor] = None,\n past_key_value: Optional[Tuple[torch.Tensor]] = None,\n attention_mask: Optional[torch.Tensor] = None,\n layer_head_mask: Optional[torch.Tensor] = None,\n output_attentions: bool = False,\n ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:\n \"\"\"Input shape: Batch x Time x Channel\"\"\"\n\n # if key_value_states are provided this layer is used as a cross-attention layer\n # for the decoder\n is_cross_attention = key_value_states is not None\n bsz, tgt_len, embed_dim = hidden_states.size()\n\n # get query proj\n query_states = self.q_proj(hidden_states) * self.scaling\n # get key, value proj\n if is_cross_attention and past_key_value is not None:\n # reuse k,v, cross_attentions\n key_states = past_key_value[0]\n value_states = past_key_value[1]\n elif is_cross_attention:\n # cross_attentions\n key_states = self._shape(self.k_proj(key_value_states), -1, bsz)\n value_states = self._shape(self.v_proj(key_value_states), -1, bsz)\n elif past_key_value is not None:\n # reuse k, v, self_attention\n key_states = self._shape(self.k_proj(hidden_states), -1, bsz)\n value_states = self._shape(self.v_proj(hidden_states), -1, bsz)\n key_states = torch.cat([past_key_value[0], key_states], dim=2)\n value_states = torch.cat([past_key_value[1], value_states], dim=2)\n else:\n # self_attention\n key_states = self._shape(self.k_proj(hidden_states), -1, bsz)\n value_states = self._shape(self.v_proj(hidden_states), -1, bsz)\n\n if self.is_decoder:\n # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.\n # Further calls to cross_attention layer can then reuse all cross-attention\n # key/value_states (first \"if\" case)\n # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of\n # all previous decoder key/value_states. Further calls to uni-directional self-attention\n # can concat previous decoder key/value_states to current projected key/value_states (third \"elif\" case)\n # if encoder bi-directional self-attention `past_key_value` is always `None`\n past_key_value = (key_states, value_states)\n\n proj_shape = (bsz * self.num_heads, -1, self.head_dim)\n query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)\n key_states = key_states.view(*proj_shape)\n value_states = value_states.view(*proj_shape)\n\n src_len = key_states.size(1)\n attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))\n\n if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):\n raise ValueError(\n f\"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {attn_weights.size()}\"\n )\n\n if attention_mask is not None:\n if attention_mask.size() != (bsz, 1, tgt_len, src_len):\n raise ValueError(\n f\"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}\"\n )\n attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask\n attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)\n\n attn_weights = nn.functional.softmax(attn_weights, dim=-1)\n\n if layer_head_mask is not None:\n if layer_head_mask.size() != (self.num_heads,):\n raise ValueError(\n f\"Head mask for a single layer should be of size {(self.num_heads,)}, but is {layer_head_mask.size()}\"\n )\n attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)\n attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)\n\n if output_attentions:\n # this operation is a bit awkward, but it's required to\n # make sure that attn_weights keeps its gradient.\n # In order to do so, attn_weights have to be reshaped\n # twice and have to be reused in the following\n attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)\n attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)\n else:\n attn_weights_reshaped = None\n\n attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)\n\n attn_output = torch.bmm(attn_probs, value_states)\n\n if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):\n raise ValueError(\n f\"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {attn_output.size()}\"\n )\n\n attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)\n attn_output = attn_output.transpose(1, 2)\n attn_output = attn_output.reshape(bsz, tgt_len, embed_dim)\n\n attn_output = self.out_proj(attn_output)\n\n return attn_output, attn_weights_reshaped, past_key_value\n\n\nclass BigBirdPegasusEncoderLayer(nn.Module):\n def __init__(self, config: BigBirdPegasusConfig, seed=None):\n super().__init__()\n self.attention_type = config.attention_type\n self.embed_dim = config.d_model\n self.self_attn = BigBirdPegasusEncoderAttention(config, seed=seed)\n self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)\n self.dropout = config.dropout\n self.activation_fn = ACT2FN[config.activation_function]\n self.activation_dropout = config.activation_dropout\n self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)\n self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)\n self.final_layer_norm = nn.LayerNorm(self.embed_dim)\n\n def forward(\n self,\n hidden_states: torch.Tensor,\n attention_mask: torch.Tensor,\n layer_head_mask: torch.Tensor,\n band_mask=None,\n from_mask=None,\n to_mask=None,\n from_blocked_mask=None,\n to_blocked_mask=None,\n output_attentions: bool = False,\n ):\n \"\"\"\n Args:\n hidden_states (:obj:`torch.FloatTensor`): input to the layer of shape :obj:`(seq_len, batch, embed_dim)`\n attention_mask (:obj:`torch.FloatTensor`): attention mask of size\n :obj:`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.\n output_attentions (:obj:`bool`, `optional`):\n Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under\n returned tensors for more detail.\n \"\"\"\n residual = hidden_states\n hidden_states = self.self_attn_layer_norm(hidden_states)\n\n self_attention_outputs = self.self_attn(\n hidden_states=hidden_states,\n attention_mask=attention_mask,\n head_mask=layer_head_mask,\n output_attentions=output_attentions,\n band_mask=band_mask,\n from_mask=from_mask,\n to_mask=to_mask,\n from_blocked_mask=from_blocked_mask,\n to_blocked_mask=to_blocked_mask,\n )\n hidden_states = self_attention_outputs[0]\n\n hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n hidden_states = residual + hidden_states\n\n residual = hidden_states\n hidden_states = self.final_layer_norm(hidden_states)\n hidden_states = self.activation_fn(self.fc1(hidden_states))\n\n hidden_states = self.fc2(hidden_states)\n hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n hidden_states = residual + hidden_states\n\n if hidden_states.dtype == torch.float16 and (\n torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()\n ):\n clamp_value = torch.finfo(hidden_states.dtype).max - 1000\n hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)\n\n outputs = (hidden_states,)\n\n if output_attentions:\n outputs += (self_attention_outputs[1],)\n\n return outputs\n\n def set_attention_type(self, value: str):\n if value not in [\"original_full\", \"block_sparse\"]:\n raise ValueError(\n f\"attention_type can only be set to either 'original_full' or 'block_sparse', but is {value}\"\n )\n # attention type is already correctly set\n if value == self.attention_type:\n return\n self.attention_type = value\n self.self_attn.set_attention_type(value)\n\n\nclass BigBirdPegasusDecoderLayer(nn.Module):\n def __init__(self, config: BigBirdPegasusConfig):\n super().__init__()\n self.embed_dim = config.d_model\n self.self_attn = BigBirdPegasusDecoderAttention(\n embed_dim=self.embed_dim,\n num_heads=config.decoder_attention_heads,\n dropout=config.attention_dropout,\n is_decoder=True,\n bias=config.use_bias,\n )\n self.dropout = config.dropout\n self.activation_fn = ACT2FN[config.activation_function]\n self.activation_dropout = config.activation_dropout\n\n self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)\n self.encoder_attn = BigBirdPegasusDecoderAttention(\n self.embed_dim,\n config.decoder_attention_heads,\n dropout=config.attention_dropout,\n is_decoder=True,\n bias=config.use_bias,\n )\n self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim)\n self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)\n self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)\n self.final_layer_norm = nn.LayerNorm(self.embed_dim)\n\n # Copied from transformers.models.mbart.modeling_mbart.MBartDecoderLayer.forward\n def forward(\n self,\n hidden_states: torch.Tensor,\n attention_mask: Optional[torch.Tensor] = None,\n encoder_hidden_states: Optional[torch.Tensor] = None,\n encoder_attention_mask: Optional[torch.Tensor] = None,\n layer_head_mask: Optional[torch.Tensor] = None,\n cross_attn_layer_head_mask: Optional[torch.Tensor] = None,\n past_key_value: Optional[Tuple[torch.Tensor]] = None,\n output_attentions: Optional[bool] = False,\n use_cache: Optional[bool] = True,\n ):\n \"\"\"\n Args:\n hidden_states (:obj:`torch.FloatTensor`): input to the layer of shape `(seq_len, batch, embed_dim)`\n attention_mask (:obj:`torch.FloatTensor`): attention mask of size\n `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.\n encoder_hidden_states (:obj:`torch.FloatTensor`): cross attention input to the layer of shape `(seq_len, batch, embed_dim)`\n encoder_attention_mask (:obj:`torch.FloatTensor`): encoder attention mask of size\n `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.\n layer_head_mask (:obj:`torch.FloatTensor`): mask for attention heads in a given layer of size\n `(encoder_attention_heads,)`.\n cross_attn_layer_head_mask (:obj:`torch.FloatTensor`): mask for cross-attention heads in a given layer of\n size `(decoder_attention_heads,)`.\n past_key_value (:obj:`Tuple(torch.FloatTensor)`): cached past key and value projection states\n output_attentions (:obj:`bool`, `optional`):\n Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under\n returned tensors for more detail.\n \"\"\"\n residual = hidden_states\n hidden_states = self.self_attn_layer_norm(hidden_states)\n\n # Self Attention\n # decoder uni-directional self-attention cached key/values tuple is at positions 1,2\n self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None\n # add present self-attn cache to positions 1,2 of present_key_value tuple\n hidden_states, self_attn_weights, present_key_value = self.self_attn(\n hidden_states=hidden_states,\n past_key_value=self_attn_past_key_value,\n attention_mask=attention_mask,\n layer_head_mask=layer_head_mask,\n output_attentions=output_attentions,\n )\n hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n hidden_states = residual + hidden_states\n\n # Cross-Attention Block\n cross_attn_present_key_value = None\n cross_attn_weights = None\n if encoder_hidden_states is not None:\n residual = hidden_states\n hidden_states = self.encoder_attn_layer_norm(hidden_states)\n\n # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple\n cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None\n hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(\n hidden_states=hidden_states,\n key_value_states=encoder_hidden_states,\n attention_mask=encoder_attention_mask,\n layer_head_mask=cross_attn_layer_head_mask,\n past_key_value=cross_attn_past_key_value,\n output_attentions=output_attentions,\n )\n hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n hidden_states = residual + hidden_states\n\n # add cross-attn to positions 3,4 of present_key_value tuple\n present_key_value = present_key_value + cross_attn_present_key_value\n\n # Fully Connected\n residual = hidden_states\n hidden_states = self.final_layer_norm(hidden_states)\n hidden_states = self.activation_fn(self.fc1(hidden_states))\n hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)\n hidden_states = self.fc2(hidden_states)\n hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n hidden_states = residual + hidden_states\n\n outputs = (hidden_states,)\n\n if output_attentions:\n outputs += (self_attn_weights, cross_attn_weights)\n\n if use_cache:\n outputs += (present_key_value,)\n\n return outputs\n\n\n# Copied from transformers.models.bart.modeling_bart.BartClassificationHead with Bart->BigBirdPegasus\nclass BigBirdPegasusClassificationHead(nn.Module):\n \"\"\"Head for sentence-level classification tasks.\"\"\"\n\n def __init__(\n self,\n input_dim: int,\n inner_dim: int,\n num_classes: int,\n pooler_dropout: float,\n ):\n super().__init__()\n self.dense = nn.Linear(input_dim, inner_dim)\n self.dropout = nn.Dropout(p=pooler_dropout)\n self.out_proj = nn.Linear(inner_dim, num_classes)\n\n def forward(self, hidden_states: torch.Tensor):\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.dense(hidden_states)\n hidden_states = torch.tanh(hidden_states)\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.out_proj(hidden_states)\n return hidden_states\n\n\nclass BigBirdPegasusPreTrainedModel(PreTrainedModel):\n config_class = BigBirdPegasusConfig\n base_model_prefix = \"model\"\n\n def _init_weights(self, module):\n std = self.config.init_std\n if isinstance(module, nn.Linear):\n module.weight.data.normal_(mean=0.0, std=std)\n if module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, nn.Embedding):\n module.weight.data.normal_(mean=0.0, std=std)\n if module.padding_idx is not None:\n module.weight.data[module.padding_idx].zero_()\n\n @property\n def dummy_inputs(self):\n pad_token = self.config.pad_token_id\n input_ids = torch.tensor([[0, 6, 10, 4, 2], [0, 8, 12, 2, pad_token]], device=self.device)\n dummy_inputs = {\n \"attention_mask\": input_ids.ne(pad_token),\n \"input_ids\": input_ids,\n }\n return dummy_inputs\n\n\nBIGBIRD_PEGASUS_START_DOCSTRING = r\"\"\"\n This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic\n methods the library implements for all its model (such as downloading or saving, resizing the input embeddings\n etc.)\n\n This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__\n subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to\n general usage and behavior.\n\n Parameters:\n config (:class:`~transformers.BigBirdPegasusConfig`):\n Model configuration class with all the parameters of the model. Initializing with a config file does not\n load the weights associated with the model, only the configuration. Check out the\n :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.\n\"\"\"\n\nBIGBIRD_PEGASUS_GENERATION_EXAMPLE = r\"\"\"\n Summarization example::\n\n >>> from transformers import PegasusTokenizer, BigBirdPegasusForConditionalGeneration, BigBirdPegasusConfig\n\n >>> model = BigBirdPegasusForConditionalGeneration.from_pretrained('bigbird-pegasus-large-arxiv')\n >>> tokenizer = PegasusTokenizer.from_pretrained('bigbird-pegasus-large-arxiv')\n\n >>> ARTICLE_TO_SUMMARIZE = \"My friends are cool but they eat too many carbs.\"\n >>> inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=4096, return_tensors='pt', truncation=True)\n\n >>> # Generate Summary\n >>> summary_ids = model.generate(inputs['input_ids'], num_beams=4, max_length=5, early_stopping=True)\n >>> print([tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=False) for g in summary_ids])\n\"\"\"\n\nBIGBIRD_PEGASUS_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide\n it.\n\n Indices can be obtained using :class:`~transformers.PegasusTokenizer`. See\n :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for\n details.\n\n `What are input IDs? <../glossary.html#input-ids>`__\n attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n decoder_input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, target_sequence_length)`, `optional`):\n Provide for translation and summarization training. By default, the model will create this tensor by\n shifting the :obj:`input_ids` to the right, following the paper.\n decoder_attention_mask (:obj:`torch.LongTensor` of shape :obj:`(batch_size, target_sequence_length)`, `optional`):\n Default behavior: generate a tensor that ignores pad tokens in :obj:`decoder_input_ids`. Causal mask will\n also be used by default.\n\n If you want to change padding behavior, you should read\n :func:`modeling_bigbird_pegasus._prepare_decoder_inputs` and modify to your needs. See diagram 1 in `the\n paper <https://arxiv.org/abs/1910.13461>`__ for more information on the default strategy.\n\n decoder_head_mask (:obj:`torch.Tensor` of shape :obj:`(num_layers, num_heads)`, `optional`):\n Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in ``[0, 1]``:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n encoder_outputs (:obj:`tuple(tuple(torch.FloatTensor)`, `optional`):\n Tuple consists of (:obj:`last_hidden_state`, `optional`: :obj:`hidden_states`, `optional`:\n :obj:`attentions`) :obj:`last_hidden_state` of shape :obj:`(batch_size, sequence_length, hidden_size)`,\n `optional`) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the\n cross-attention of the decoder.\n past_key_values (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):\n Tuple of :obj:`tuple(torch.FloatTensor)` of length :obj:`config.n_layers`, with each tuple having 2 tensors\n of shape :obj:`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of\n shape :obj:`(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.\n\n Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention\n blocks) that can be used (see :obj:`past_key_values` input) to speed up sequential decoding.\n\n If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`\n (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`\n instead of all :obj:`decoder_input_ids`` of shape :obj:`(batch_size, sequence_length)`.\n inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):\n Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert :obj:`input_ids` indices into associated\n vectors than the model's internal embedding lookup matrix.\n decoder_inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, target_sequence_length, hidden_size)`, `optional`):\n Optionally, instead of passing :obj:`decoder_input_ids` you can choose to directly pass an embedded\n representation. If :obj:`past_key_values` is used, optionally only the last :obj:`decoder_inputs_embeds`\n have to be input (see :obj:`past_key_values`). This is useful if you want more control over how to convert\n :obj:`decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.\n\n If :obj:`decoder_input_ids` and :obj:`decoder_inputs_embeds` are both unset, :obj:`decoder_inputs_embeds`\n takes the value of :obj:`inputs_embeds`.\n use_cache (:obj:`bool`, `optional`):\n If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up\n decoding (see :obj:`past_key_values`).\n output_attentions (:obj:`bool`, `optional`):\n Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned\n tensors for more detail.\n output_hidden_states (:obj:`bool`, `optional`):\n Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for\n more detail.\n return_dict (:obj:`bool`, `optional`):\n Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.\n\"\"\"\n\nBIGBIRD_PEGASUS_STANDALONE_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide\n it.\n\n Indices can be obtained using :class:`~transformers.ProphetNetTokenizer`. See\n :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for\n details.\n\n `What are input IDs? <../glossary.html#input-ids>`__\n attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n output_attentions (:obj:`bool`, `optional`):\n Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned\n tensors for more detail.\n output_hidden_states (:obj:`bool`, `optional`):\n Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for\n more detail.\n return_dict (:obj:`bool`, `optional`):\n Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.\n\"\"\"\n\n\nclass BigBirdPegasusEncoder(BigBirdPegasusPreTrainedModel):\n \"\"\"\n Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a\n :class:`BigBirdPegasusEncoderLayer`.\n\n Args:\n config: BigBirdPegasusConfig\n embed_tokens (nn.Embedding): output embedding\n \"\"\"\n\n def __init__(self, config: BigBirdPegasusConfig, embed_tokens: Optional[nn.Embedding] = None):\n super().__init__(config)\n\n self.attention_type = config.attention_type\n self.block_size = config.block_size\n\n self.dropout = config.dropout\n self.layerdrop = config.encoder_layerdrop\n\n embed_dim = config.d_model\n self.padding_idx = config.pad_token_id\n self.max_source_positions = config.max_position_embeddings\n self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0\n\n if embed_tokens is not None:\n self.embed_tokens = embed_tokens\n else:\n self.embed_tokens = nn.Embedding(config.vocab_size, embed_dim, self.padding_idx)\n\n self.embed_positions = BigBirdPegasusLearnedPositionalEmbedding(\n config.max_position_embeddings,\n embed_dim,\n )\n self.layers = nn.ModuleList([BigBirdPegasusEncoderLayer(config, seed=i) for i in range(config.encoder_layers)])\n self.layernorm_embedding = nn.LayerNorm(embed_dim)\n\n self.init_weights()\n\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n head_mask=None,\n inputs_embeds=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n Args:\n input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you\n provide it.\n\n Indices can be obtained using :class:`~transformers.PegasusTokenizer`. See\n :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__`\n for details.\n\n `What are input IDs? <../glossary.html#input-ids>`__\n attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n\n inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):\n Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded\n representation. This is useful if you want more control over how to convert :obj:`input_ids` indices\n into associated vectors than the model's internal embedding lookup matrix.\n output_attentions (:obj:`bool`, `optional`):\n Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under\n returned tensors for more detail.\n output_hidden_states (:obj:`bool`, `optional`):\n Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors\n for more detail.\n return_dict (:obj:`bool`, `optional`):\n Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.\n \"\"\"\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n # retrieve input_ids and inputs_embeds\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n elif input_ids is not None:\n input_shape = input_ids.size()\n input_ids = input_ids.view(-1, input_shape[-1])\n elif inputs_embeds is not None:\n input_shape = inputs_embeds.size()[:-1]\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n if inputs_embeds is None:\n inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale\n\n embed_pos = self.embed_positions(input_shape)\n\n hidden_states = inputs_embeds + embed_pos\n hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n\n if attention_mask is None:\n attention_mask = torch.ones(input_shape, device=hidden_states.device)\n attention_mask = attention_mask.long()\n\n # in order to use block_sparse attention, sequence_length has to be at least\n # bigger than all global attentions: 2 * block_size\n # + sliding tokens: 3 * block_size\n # + random tokens: 2 * num_random_blocks * block_size\n max_tokens_to_attend = (5 + 2 * self.config.num_random_blocks) * self.config.block_size\n if self.attention_type == \"block_sparse\" and input_shape[1] <= max_tokens_to_attend:\n # change attention_type from block_sparse to original_full\n sequence_length = input_shape[1]\n logger.warning(\n \"Attention type 'block_sparse' is not possible if sequence_length: \"\n f\"{sequence_length} <= num global tokens: 2 * config.block_size \"\n \"+ min. num sliding tokens: 3 * config.block_size \"\n \"+ config.num_random_blocks * config.block_size \"\n \"+ additional buffer: config.num_random_blocks * config.block_size \"\n f\"= {max_tokens_to_attend} with config.block_size \"\n f\"= {self.config.block_size}, config.num_random_blocks \"\n f\"= {self.config.num_random_blocks}.\"\n \"Changing attention type to 'original_full'...\"\n )\n self.set_attention_type(\"original_full\")\n\n if self.attention_type == \"block_sparse\":\n padding_len, hidden_states, attention_mask = self._pad_to_block_size(hidden_states, attention_mask)\n else:\n padding_len = 0\n\n # expand attention_mask\n if self.attention_type == \"original_full\":\n # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]\n attention_mask = _expand_mask(attention_mask, inputs_embeds.dtype)\n blocked_encoder_mask = band_mask = from_mask = to_mask = None\n elif self.attention_type == \"block_sparse\":\n blocked_encoder_mask, band_mask, from_mask, to_mask = self.create_masks_for_block_sparse_attn(\n attention_mask, self.block_size\n )\n attention_mask = None\n else:\n raise ValueError(\n f\"attention_type can either be original_full or block_sparse, but is {self.attention_type}\"\n )\n\n encoder_states = () if output_hidden_states else None\n all_attentions = () if output_attentions else None\n\n # check if head_mask has a correct number of layers specified if desired\n if head_mask is not None:\n assert head_mask.size()[0] == (\n len(self.layers)\n ), f\"The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}.\"\n\n for idx, encoder_layer in enumerate(self.layers):\n if output_hidden_states:\n encoder_states = encoder_states + (hidden_states,)\n # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)\n dropout_probability = random.uniform(0, 1)\n if self.training and (dropout_probability < self.layerdrop): # skip the layer\n layer_outputs = (None, None)\n else:\n if getattr(self.config, \"gradient_checkpointing\", False) and self.training:\n\n def create_custom_forward(module):\n def custom_forward(*inputs):\n return module(*inputs, output_attentions)\n\n return custom_forward\n\n layer_outputs = torch.utils.checkpoint.checkpoint(\n create_custom_forward(encoder_layer),\n hidden_states,\n attention_mask,\n (head_mask[idx] if head_mask is not None else None),\n band_mask,\n from_mask,\n to_mask,\n blocked_encoder_mask,\n blocked_encoder_mask,\n )\n else:\n layer_outputs = encoder_layer(\n hidden_states,\n attention_mask,\n layer_head_mask=(head_mask[idx] if head_mask is not None else None),\n band_mask=band_mask,\n from_mask=from_mask,\n to_mask=to_mask,\n from_blocked_mask=blocked_encoder_mask,\n to_blocked_mask=blocked_encoder_mask,\n output_attentions=output_attentions,\n )\n\n hidden_states = layer_outputs[0]\n\n if output_attentions:\n all_attentions = all_attentions + (layer_outputs[1],)\n\n hidden_states = self.layernorm_embedding(hidden_states)\n\n if output_hidden_states:\n encoder_states = encoder_states + (hidden_states,)\n\n if padding_len > 0:\n # unpad `sequence_output` because the calling function is expecting a length == input_ids.size(1)\n hidden_states = hidden_states[:, :-padding_len]\n\n if not return_dict:\n return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)\n\n self.encoder_o = hidden_states\n\n return BaseModelOutput(\n last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions\n )\n\n def set_attention_type(self, value: str):\n if value not in [\"original_full\", \"block_sparse\"]:\n raise ValueError(\n f\"attention_type can only be set to either 'original_full' or 'block_sparse', but is {value}\"\n )\n # attention type is already correctly set\n if value == self.attention_type:\n return\n self.attention_type = value\n for layer in self.layers:\n layer.set_attention_type(value)\n\n @staticmethod # Copied from transformers.models.big_bird.modeling_big_bird.BigBirdModel.create_masks_for_block_sparse_attn\n def create_masks_for_block_sparse_attn(attention_mask: torch.Tensor, block_size: int):\n\n batch_size, seq_length = attention_mask.size()\n assert (\n seq_length % block_size == 0\n ), f\"Sequence length must be multiple of block size, but sequence length is {seq_length}, while block size is {block_size}.\"\n\n def create_band_mask_from_inputs(from_blocked_mask, to_blocked_mask):\n \"\"\"\n Create 3D attention mask from a 2D tensor mask.\n\n Args:\n from_blocked_mask: 2D Tensor of shape [batch_size,\n from_seq_length//from_block_size, from_block_size].\n to_blocked_mask: int32 Tensor of shape [batch_size,\n to_seq_length//to_block_size, to_block_size].\n\n Returns:\n float Tensor of shape [batch_size, 1, from_seq_length//from_block_size-4, from_block_size,\n 3*to_block_size].\n \"\"\"\n exp_blocked_to_pad = torch.cat(\n [to_blocked_mask[:, 1:-3], to_blocked_mask[:, 2:-2], to_blocked_mask[:, 3:-1]], dim=2\n )\n band_mask = torch.einsum(\"blq,blk->blqk\", from_blocked_mask[:, 2:-2], exp_blocked_to_pad)\n band_mask.unsqueeze_(1)\n return band_mask\n\n blocked_encoder_mask = attention_mask.view(batch_size, seq_length // block_size, block_size)\n band_mask = create_band_mask_from_inputs(blocked_encoder_mask, blocked_encoder_mask)\n\n from_mask = attention_mask.view(batch_size, 1, seq_length, 1)\n to_mask = attention_mask.view(batch_size, 1, 1, seq_length)\n\n return blocked_encoder_mask, band_mask, from_mask, to_mask\n\n def _pad_to_block_size(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor):\n \"\"\"A helper function to pad tokens and mask to work with implementation of BigBird block-sparse attention.\"\"\"\n # padding\n block_size = self.config.block_size\n batch_size, seq_len = hidden_states.shape[:2]\n\n padding_len = (block_size - seq_len % block_size) % block_size\n if padding_len > 0:\n logger.info(\n f\"Input ids are automatically padded from {seq_len} to {seq_len + padding_len} to be a multiple of \"\n f\"`config.block_size`: {block_size}\"\n )\n pad_id = self.config.pad_token_id\n device = hidden_states.device\n input_ids_padding = torch.ones((batch_size, padding_len), dtype=torch.long, device=device) * pad_id\n inputs_embeds_padding = self.embed_tokens(input_ids_padding)\n hidden_states = torch.cat([hidden_states, inputs_embeds_padding], dim=-2)\n\n attention_mask = nn.functional.pad(\n attention_mask, (0, padding_len), value=0\n ) # no attention on the padding tokens\n\n return padding_len, hidden_states, attention_mask\n\n\nclass BigBirdPegasusDecoder(BigBirdPegasusPreTrainedModel):\n \"\"\"\n Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a\n :class:`BigBirdPegasusDecoderLayer`\n\n Args:\n config: BigBirdPegasusConfig\n embed_tokens (nn.Embedding): output embedding\n \"\"\"\n\n def __init__(self, config: BigBirdPegasusConfig, embed_tokens: Optional[nn.Embedding] = None):\n super().__init__(config)\n self.dropout = config.dropout\n self.layerdrop = config.decoder_layerdrop\n self.padding_idx = config.pad_token_id\n self.max_target_positions = config.max_position_embeddings\n self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0\n\n if embed_tokens is not None:\n self.embed_tokens = embed_tokens\n else:\n self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model, self.padding_idx)\n\n self.embed_positions = BigBirdPegasusLearnedPositionalEmbedding(\n config.max_position_embeddings,\n config.d_model,\n )\n self.layers = nn.ModuleList([BigBirdPegasusDecoderLayer(config) for _ in range(config.decoder_layers)])\n self.layernorm_embedding = nn.LayerNorm(config.d_model)\n\n self.init_weights()\n\n def get_input_embeddings(self):\n return self.embed_tokens\n\n def set_input_embeddings(self, value):\n self.embed_tokens = value\n\n # Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask\n def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length):\n # create causal mask\n # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]\n combined_attention_mask = None\n if input_shape[-1] > 1:\n combined_attention_mask = _make_causal_mask(\n input_shape, inputs_embeds.dtype, past_key_values_length=past_key_values_length\n ).to(self.device)\n\n if attention_mask is not None:\n # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]\n expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1])\n combined_attention_mask = (\n expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask\n )\n\n return combined_attention_mask\n\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n head_mask=None,\n cross_attn_head_mask=None,\n past_key_values=None,\n inputs_embeds=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n Args:\n input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you\n provide it.\n\n Indices can be obtained using :class:`~transformers.BigBirdPegasusTokenizer`. See\n :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__`\n for details.\n\n `What are input IDs? <../glossary.html#input-ids>`__\n attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, encoder_sequence_length, hidden_size)`, `optional`):\n Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention\n of the decoder.\n encoder_attention_mask (:obj:`torch.LongTensor` of shape :obj:`(batch_size, encoder_sequence_length)`, `optional`):\n Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values\n selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n head_mask (:obj:`torch.Tensor` of shape :obj:`(decoder_layers, decoder_attention_heads)`, `optional`):\n Mask to nullify selected heads of the attention modules. Mask values selected in ``[0, 1]``:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n cross_attn_head_mask (:obj:`torch.Tensor` of shape :obj:`(decoder_layers, decoder_attention_heads)`, `optional`):\n Mask to nullify selected heads of the cross-attention modules in decoder to avoid performing\n cross-attention on hidden heads. Mask values selected in ``[0, 1]``:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n past_key_values (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):\n Tuple of :obj:`tuple(torch.FloatTensor)` of length :obj:`config.n_layers`, with each tuple having 2\n tensors of shape :obj:`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional\n tensors of shape :obj:`(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.\n\n Contains pre-computed hidden-states (key and values in the self-attention blocks and in the\n cross-attention blocks) that can be used (see :obj:`past_key_values` input) to speed up sequential\n decoding.\n\n If :obj:`past_key_values` are used, the user can optionally input only the last\n :obj:`decoder_input_ids` (those that don't have their past key value states given to this model) of\n shape :obj:`(batch_size, 1)` instead of all :obj:`decoder_input_ids`` of shape :obj:`(batch_size,\n sequence_length)`.\n inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):\n Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded\n representation. This is useful if you want more control over how to convert :obj:`input_ids` indices\n into associated vectors than the model's internal embedding lookup matrix.\n output_attentions (:obj:`bool`, `optional`):\n Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under\n returned tensors for more detail.\n output_hidden_states (:obj:`bool`, `optional`):\n Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors\n for more detail.\n return_dict (:obj:`bool`, `optional`):\n Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.\n \"\"\"\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n # retrieve input_ids and inputs_embeds\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\"You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time\")\n elif input_ids is not None:\n input_shape = input_ids.size()\n input_ids = input_ids.view(-1, input_shape[-1])\n elif inputs_embeds is not None:\n input_shape = inputs_embeds.size()[:-1]\n else:\n raise ValueError(\"You have to specify either decoder_input_ids or decoder_inputs_embeds\")\n\n # past_key_values_length\n past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0\n\n if inputs_embeds is None:\n inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale\n\n attention_mask = self._prepare_decoder_attention_mask(\n attention_mask, input_shape, inputs_embeds, past_key_values_length\n )\n\n # expand encoder attention mask\n if encoder_hidden_states is not None and encoder_attention_mask is not None:\n # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]\n encoder_attention_mask = _expand_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1])\n\n # embed positions\n positions = self.embed_positions(input_shape, past_key_values_length)\n\n hidden_states = inputs_embeds + positions\n\n hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n\n # decoder layers\n all_hidden_states = () if output_hidden_states else None\n all_self_attns = () if output_attentions else None\n all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None\n next_decoder_cache = () if use_cache else None\n\n # check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired\n for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], [\"head_mask\", \"cross_attn_head_mask\"]):\n if attn_mask is not None:\n assert attn_mask.size()[0] == (\n len(self.layers)\n ), f\"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}.\"\n for idx, decoder_layer in enumerate(self.layers):\n # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)\n if output_hidden_states:\n all_hidden_states += (hidden_states,)\n dropout_probability = random.uniform(0, 1)\n if self.training and (dropout_probability < self.layerdrop):\n continue\n\n past_key_value = past_key_values[idx] if past_key_values is not None else None\n\n if getattr(self.config, \"gradient_checkpointing\", False) and self.training:\n\n if use_cache:\n logger.warning(\n \"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting \"\n \"`use_cache=False`...\"\n )\n use_cache = False\n\n def create_custom_forward(module):\n def custom_forward(*inputs):\n # None for past_key_value\n return module(*inputs, output_attentions, use_cache)\n\n return custom_forward\n\n layer_outputs = torch.utils.checkpoint.checkpoint(\n create_custom_forward(decoder_layer),\n hidden_states,\n attention_mask,\n encoder_hidden_states,\n encoder_attention_mask,\n head_mask[idx] if head_mask is not None else None,\n cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None,\n None,\n )\n else:\n\n layer_outputs = decoder_layer(\n hidden_states,\n attention_mask=attention_mask,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n layer_head_mask=(head_mask[idx] if head_mask is not None else None),\n cross_attn_layer_head_mask=(\n cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None\n ),\n past_key_value=past_key_value,\n output_attentions=output_attentions,\n use_cache=use_cache,\n )\n hidden_states = layer_outputs[0]\n\n if use_cache:\n next_decoder_cache += (layer_outputs[3 if output_attentions else 1],)\n\n if output_attentions:\n all_self_attns += (layer_outputs[1],)\n\n if encoder_hidden_states is not None:\n all_cross_attentions += (layer_outputs[2],)\n\n hidden_states = self.layernorm_embedding(hidden_states)\n\n # add hidden states from the last decoder layer\n if output_hidden_states:\n all_hidden_states += (hidden_states,)\n\n next_cache = next_decoder_cache if use_cache else None\n if not return_dict:\n return tuple(\n v\n for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions]\n if v is not None\n )\n return BaseModelOutputWithPastAndCrossAttentions(\n last_hidden_state=hidden_states,\n past_key_values=next_cache,\n hidden_states=all_hidden_states,\n attentions=all_self_attns,\n cross_attentions=all_cross_attentions,\n )\n\n\n@add_start_docstrings(\n \"The bare BigBirdPegasus Model outputting raw hidden-states without any specific head on top.\",\n BIGBIRD_PEGASUS_START_DOCSTRING,\n)\n# Copied from transformers.models.bart.modeling_bart.BartModel with Bart->BigBirdPegasus, BART->BIGBIRD_PEGASUS\nclass BigBirdPegasusModel(BigBirdPegasusPreTrainedModel):\n def __init__(self, config: BigBirdPegasusConfig):\n super().__init__(config)\n\n padding_idx, vocab_size = config.pad_token_id, config.vocab_size\n self.shared = nn.Embedding(vocab_size, config.d_model, padding_idx)\n\n self.encoder = BigBirdPegasusEncoder(config, self.shared)\n self.decoder = BigBirdPegasusDecoder(config, self.shared)\n\n self.init_weights()\n\n def get_input_embeddings(self):\n return self.shared\n\n def set_input_embeddings(self, value):\n self.shared = value\n self.encoder.embed_tokens = self.shared\n self.decoder.embed_tokens = self.shared\n\n def get_encoder(self):\n return self.encoder\n\n def get_decoder(self):\n return self.decoder\n\n @add_start_docstrings_to_model_forward(BIGBIRD_PEGASUS_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=Seq2SeqModelOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n decoder_input_ids=None,\n decoder_attention_mask=None,\n head_mask=None,\n decoder_head_mask=None,\n cross_attn_head_mask=None,\n encoder_outputs=None,\n past_key_values=None,\n inputs_embeds=None,\n decoder_inputs_embeds=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n\n # different to other models, BigBirdPegasus automatically creates decoder_input_ids from\n # input_ids if no decoder_input_ids are provided\n if decoder_input_ids is None and decoder_inputs_embeds is None:\n decoder_input_ids = shift_tokens_right(\n input_ids, self.config.pad_token_id, self.config.decoder_start_token_id\n )\n\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if encoder_outputs is None:\n encoder_outputs = self.encoder(\n input_ids=input_ids,\n attention_mask=attention_mask,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True\n elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):\n encoder_outputs = BaseModelOutput(\n last_hidden_state=encoder_outputs[0],\n hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,\n attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,\n )\n\n # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn)\n decoder_outputs = self.decoder(\n input_ids=decoder_input_ids,\n attention_mask=decoder_attention_mask,\n encoder_hidden_states=encoder_outputs[0],\n encoder_attention_mask=attention_mask,\n head_mask=decoder_head_mask,\n cross_attn_head_mask=cross_attn_head_mask,\n past_key_values=past_key_values,\n inputs_embeds=decoder_inputs_embeds,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n if not return_dict:\n return decoder_outputs + encoder_outputs\n\n return Seq2SeqModelOutput(\n last_hidden_state=decoder_outputs.last_hidden_state,\n past_key_values=decoder_outputs.past_key_values,\n decoder_hidden_states=decoder_outputs.hidden_states,\n decoder_attentions=decoder_outputs.attentions,\n cross_attentions=decoder_outputs.cross_attentions,\n encoder_last_hidden_state=encoder_outputs.last_hidden_state,\n encoder_hidden_states=encoder_outputs.hidden_states,\n encoder_attentions=encoder_outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"The BigBirdPegasus Model with a language modeling head. Can be used for summarization.\",\n BIGBIRD_PEGASUS_START_DOCSTRING,\n)\n# Copied from transformers.models.bart.modeling_bart.BartForConditionalGeneration with Bart->BigBirdPegasus, BART->BIGBIRD_PEGASUS\nclass BigBirdPegasusForConditionalGeneration(BigBirdPegasusPreTrainedModel):\n base_model_prefix = \"model\"\n _keys_to_ignore_on_load_missing = [r\"final_logits_bias\", r\"lm_head\\.weight\"]\n\n def __init__(self, config: BigBirdPegasusConfig):\n super().__init__(config)\n self.model = BigBirdPegasusModel(config)\n self.register_buffer(\"final_logits_bias\", torch.zeros((1, self.model.shared.num_embeddings)))\n self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False)\n\n self.init_weights()\n\n def get_encoder(self):\n return self.model.get_encoder()\n\n def get_decoder(self):\n return self.model.get_decoder()\n\n def resize_token_embeddings(self, new_num_tokens: int) -> nn.Embedding:\n new_embeddings = super().resize_token_embeddings(new_num_tokens)\n self._resize_final_logits_bias(new_num_tokens)\n return new_embeddings\n\n def _resize_final_logits_bias(self, new_num_tokens: int) -> None:\n old_num_tokens = self.final_logits_bias.shape[-1]\n if new_num_tokens <= old_num_tokens:\n new_bias = self.final_logits_bias[:, :new_num_tokens]\n else:\n extra_bias = torch.zeros((1, new_num_tokens - old_num_tokens), device=self.final_logits_bias.device)\n new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1)\n self.register_buffer(\"final_logits_bias\", new_bias)\n\n def get_output_embeddings(self):\n return self.lm_head\n\n def set_output_embeddings(self, new_embeddings):\n self.lm_head = new_embeddings\n\n @add_start_docstrings_to_model_forward(BIGBIRD_PEGASUS_INPUTS_DOCSTRING)\n @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)\n @add_end_docstrings(BIGBIRD_PEGASUS_GENERATION_EXAMPLE)\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n decoder_input_ids=None,\n decoder_attention_mask=None,\n head_mask=None,\n decoder_head_mask=None,\n cross_attn_head_mask=None,\n encoder_outputs=None,\n past_key_values=None,\n inputs_embeds=None,\n decoder_inputs_embeds=None,\n labels=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Labels for computing the masked language modeling loss. Indices should either be in ``[0, ...,\n config.vocab_size]`` or -100 (see ``input_ids`` docstring). Tokens with indices set to ``-100`` are ignored\n (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``.\n\n Returns:\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if labels is not None:\n if decoder_input_ids is None:\n decoder_input_ids = shift_tokens_right(\n labels, self.config.pad_token_id, self.config.decoder_start_token_id\n )\n\n outputs = self.model(\n input_ids,\n attention_mask=attention_mask,\n decoder_input_ids=decoder_input_ids,\n encoder_outputs=encoder_outputs,\n decoder_attention_mask=decoder_attention_mask,\n head_mask=head_mask,\n decoder_head_mask=decoder_head_mask,\n cross_attn_head_mask=cross_attn_head_mask,\n past_key_values=past_key_values,\n inputs_embeds=inputs_embeds,\n decoder_inputs_embeds=decoder_inputs_embeds,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n lm_logits = self.lm_head(outputs[0]) + self.final_logits_bias\n\n masked_lm_loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))\n\n if not return_dict:\n output = (lm_logits,) + outputs[1:]\n return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output\n\n return Seq2SeqLMOutput(\n loss=masked_lm_loss,\n logits=lm_logits,\n past_key_values=outputs.past_key_values,\n decoder_hidden_states=outputs.decoder_hidden_states,\n decoder_attentions=outputs.decoder_attentions,\n cross_attentions=outputs.cross_attentions,\n encoder_last_hidden_state=outputs.encoder_last_hidden_state,\n encoder_hidden_states=outputs.encoder_hidden_states,\n encoder_attentions=outputs.encoder_attentions,\n )\n\n def prepare_inputs_for_generation(\n self,\n decoder_input_ids,\n past=None,\n attention_mask=None,\n head_mask=None,\n decoder_head_mask=None,\n cross_attn_head_mask=None,\n use_cache=None,\n encoder_outputs=None,\n **kwargs\n ):\n # cut decoder_input_ids if past is used\n if past is not None:\n decoder_input_ids = decoder_input_ids[:, -1:]\n\n return {\n \"input_ids\": None, # encoder_outputs is defined. input_ids not needed\n \"encoder_outputs\": encoder_outputs,\n \"past_key_values\": past,\n \"decoder_input_ids\": decoder_input_ids,\n \"attention_mask\": attention_mask,\n \"head_mask\": head_mask,\n \"decoder_head_mask\": decoder_head_mask,\n \"cross_attn_head_mask\": cross_attn_head_mask,\n \"use_cache\": use_cache, # change this to avoid caching (presumably for debugging)\n }\n\n def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):\n return shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id)\n\n @staticmethod\n def _reorder_cache(past, beam_idx):\n reordered_past = ()\n for layer_past in past:\n # cached cross_attention states don't have to be reordered -> they are always the same\n reordered_past += (\n tuple(past_state.index_select(0, beam_idx) for past_state in layer_past[:2]) + layer_past[2:],\n )\n return reordered_past\n\n\n@add_start_docstrings(\n \"\"\"\n BigBirdPegasus model with a sequence classification/head on top (a linear layer on top of the pooled output) e.g.\n for GLUE tasks.\n \"\"\",\n BIGBIRD_PEGASUS_START_DOCSTRING,\n)\n# Copied from transformers.models.bart.modeling_bart.BartForSequenceClassification with Bart->BigBirdPegasus, BART->BIGBIRD_PEGASUS\nclass BigBirdPegasusForSequenceClassification(BigBirdPegasusPreTrainedModel):\n def __init__(self, config: BigBirdPegasusConfig, **kwargs):\n super().__init__(config, **kwargs)\n self.model = BigBirdPegasusModel(config)\n self.classification_head = BigBirdPegasusClassificationHead(\n config.d_model,\n config.d_model,\n config.num_labels,\n config.classifier_dropout,\n )\n self.model._init_weights(self.classification_head.dense)\n self.model._init_weights(self.classification_head.out_proj)\n\n @add_start_docstrings_to_model_forward(BIGBIRD_PEGASUS_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=Seq2SeqSequenceClassifierOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n decoder_input_ids=None,\n decoder_attention_mask=None,\n head_mask=None,\n decoder_head_mask=None,\n cross_attn_head_mask=None,\n encoder_outputs=None,\n inputs_embeds=None,\n decoder_inputs_embeds=None,\n labels=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,\n config.num_labels - 1]`. If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n if labels is not None:\n use_cache = False\n\n if input_ids is None and inputs_embeds is not None:\n raise NotImplementedError(\n f\"Passing input embeddings is currently not supported for {self.__class__.__name__}\"\n )\n\n outputs = self.model(\n input_ids,\n attention_mask=attention_mask,\n decoder_input_ids=decoder_input_ids,\n decoder_attention_mask=decoder_attention_mask,\n head_mask=head_mask,\n decoder_head_mask=decoder_head_mask,\n cross_attn_head_mask=cross_attn_head_mask,\n encoder_outputs=encoder_outputs,\n inputs_embeds=inputs_embeds,\n decoder_inputs_embeds=decoder_inputs_embeds,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n hidden_states = outputs[0] # last hidden state\n\n eos_mask = input_ids.eq(self.config.eos_token_id)\n\n if len(torch.unique(eos_mask.sum(1))) > 1:\n raise ValueError(\"All examples must have the same number of <eos> tokens.\")\n sentence_representation = hidden_states[eos_mask, :].view(hidden_states.size(0), -1, hidden_states.size(-1))[\n :, -1, :\n ]\n logits = self.classification_head(sentence_representation)\n\n loss = None\n if labels is not None:\n if self.config.num_labels == 1:\n # regression\n loss_fct = MSELoss()\n loss = loss_fct(logits.view(-1), labels.view(-1))\n else:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1))\n\n if not return_dict:\n output = (logits,) + outputs[1:]\n return ((loss,) + output) if loss is not None else output\n\n return Seq2SeqSequenceClassifierOutput(\n loss=loss,\n logits=logits,\n past_key_values=outputs.past_key_values,\n decoder_hidden_states=outputs.decoder_hidden_states,\n decoder_attentions=outputs.decoder_attentions,\n cross_attentions=outputs.cross_attentions,\n encoder_last_hidden_state=outputs.encoder_last_hidden_state,\n encoder_hidden_states=outputs.encoder_hidden_states,\n encoder_attentions=outputs.encoder_attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n BigBirdPegasus Model with a span classification head on top for extractive question-answering tasks like SQuAD (a\n linear layer on top of the hidden-states output to compute `span start logits` and `span end logits`).\n \"\"\",\n BIGBIRD_PEGASUS_START_DOCSTRING,\n)\n# Copied from transformers.models.bart.modeling_bart.BartForQuestionAnswering with Bart->BigBirdPegasus, BART->BIGBIRD_PEGASUS\nclass BigBirdPegasusForQuestionAnswering(BigBirdPegasusPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n config.num_labels = 2\n self.num_labels = config.num_labels\n\n self.model = BigBirdPegasusModel(config)\n self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)\n\n self.model._init_weights(self.qa_outputs)\n\n @add_start_docstrings_to_model_forward(BIGBIRD_PEGASUS_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=Seq2SeqQuestionAnsweringModelOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n decoder_input_ids=None,\n decoder_attention_mask=None,\n head_mask=None,\n decoder_head_mask=None,\n cross_attn_head_mask=None,\n encoder_outputs=None,\n start_positions=None,\n end_positions=None,\n inputs_embeds=None,\n decoder_inputs_embeds=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for position (index) of the start of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence\n are not taken into account for computing the loss.\n end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for position (index) of the end of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence\n are not taken into account for computing the loss.\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n if start_positions is not None and end_positions is not None:\n use_cache = False\n\n outputs = self.model(\n input_ids,\n attention_mask=attention_mask,\n decoder_input_ids=decoder_input_ids,\n decoder_attention_mask=decoder_attention_mask,\n head_mask=head_mask,\n decoder_head_mask=decoder_head_mask,\n cross_attn_head_mask=cross_attn_head_mask,\n encoder_outputs=encoder_outputs,\n inputs_embeds=inputs_embeds,\n decoder_inputs_embeds=decoder_inputs_embeds,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0]\n\n logits = self.qa_outputs(sequence_output)\n start_logits, end_logits = logits.split(1, dim=-1)\n start_logits = start_logits.squeeze(-1).contiguous()\n end_logits = end_logits.squeeze(-1).contiguous()\n\n total_loss = None\n if start_positions is not None and end_positions is not None:\n # If we are on multi-GPU, split add a dimension\n if len(start_positions.size()) > 1:\n start_positions = start_positions.squeeze(-1)\n if len(end_positions.size()) > 1:\n end_positions = end_positions.squeeze(-1)\n # sometimes the start/end positions are outside our model inputs, we ignore these terms\n ignored_index = start_logits.size(1)\n start_positions = start_positions.clamp(0, ignored_index)\n end_positions = end_positions.clamp(0, ignored_index)\n\n loss_fct = CrossEntropyLoss(ignore_index=ignored_index)\n start_loss = loss_fct(start_logits, start_positions)\n end_loss = loss_fct(end_logits, end_positions)\n total_loss = (start_loss + end_loss) / 2\n\n if not return_dict:\n output = (\n start_logits,\n end_logits,\n ) + outputs[1:]\n return ((total_loss,) + output) if total_loss is not None else output\n\n return Seq2SeqQuestionAnsweringModelOutput(\n loss=total_loss,\n start_logits=start_logits,\n end_logits=end_logits,\n past_key_values=outputs.past_key_values,\n decoder_hidden_states=outputs.decoder_hidden_states,\n decoder_attentions=outputs.decoder_attentions,\n cross_attentions=outputs.cross_attentions,\n encoder_last_hidden_state=outputs.encoder_last_hidden_state,\n encoder_hidden_states=outputs.encoder_hidden_states,\n encoder_attentions=outputs.encoder_attentions,\n )\n\n\n# Copied from transformers.models.pegasus.modeling_pegasus.PegasusDecoderWrapper with Pegasus->BigBirdPegasus\nclass BigBirdPegasusDecoderWrapper(BigBirdPegasusPreTrainedModel):\n \"\"\"\n This wrapper class is a helper class to correctly load pretrained checkpoints when the causal language model is\n used in combination with the :class:`~transformers.EncoderDecoderModel` framework.\n \"\"\"\n\n def __init__(self, config):\n super().__init__(config)\n self.decoder = BigBirdPegasusDecoder(config)\n\n def forward(self, *args, **kwargs):\n return self.decoder(*args, **kwargs)\n\n\n# Copied from transformers.models.pegasus.modeling_pegasus.PegasusForCausalLM with Pegasus->BigBirdPegasus, 'facebook/bart-large'->\"google/bigbird-pegasus-large-arxiv\"\nclass BigBirdPegasusForCausalLM(BigBirdPegasusPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n config = copy.deepcopy(config)\n config.is_decoder = True\n config.is_encoder_decoder = False\n self.model = BigBirdPegasusDecoderWrapper(config)\n\n self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.init_weights()\n\n def get_input_embeddings(self):\n return self.model.decoder.embed_tokens\n\n def set_input_embeddings(self, value):\n self.model.decoder.embed_tokens = value\n\n def get_output_embeddings(self):\n return self.lm_head\n\n def set_output_embeddings(self, new_embeddings):\n self.lm_head = new_embeddings\n\n def set_decoder(self, decoder):\n self.model.decoder = decoder\n\n def get_decoder(self):\n return self.model.decoder\n\n @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n head_mask=None,\n cross_attn_head_mask=None,\n past_key_values=None,\n inputs_embeds=None,\n labels=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n Args:\n input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you\n provide it.\n\n Indices can be obtained using :class:`~transformers.BigBirdPegasusTokenizer`. See\n :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__`\n for details.\n\n `What are input IDs? <../glossary.html#input-ids>`__\n attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):\n Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention\n if the model is configured as a decoder.\n encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used\n in the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:\n head_mask (:obj:`torch.Tensor` of shape :obj:`(decoder_layers, decoder_attention_heads)`, `optional`):\n Mask to nullify selected heads of the attention modules. Mask values selected in ``[0, 1]``:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n cross_attn_head_mask (:obj:`torch.Tensor` of shape :obj:`(decoder_layers, decoder_attention_heads)`, `optional`):\n Mask to nullify selected heads of the cross-attention modules. Mask values selected in ``[0, 1]``:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n past_key_values (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):\n Tuple of :obj:`tuple(torch.FloatTensor)` of length :obj:`config.n_layers`, with each tuple having 2\n tensors of shape :obj:`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional\n tensors of shape :obj:`(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. The two\n additional tensors are only required when the model is used as a decoder in a Sequence to Sequence\n model.\n\n Contains pre-computed hidden-states (key and values in the self-attention blocks and in the\n cross-attention blocks) that can be used (see :obj:`past_key_values` input) to speed up sequential\n decoding.\n\n If :obj:`past_key_values` are used, the user can optionally input only the last ``decoder_input_ids``\n (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`\n instead of all ``decoder_input_ids`` of shape :obj:`(batch_size, sequence_length)`.\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Labels for computing the masked language modeling loss. Indices should either be in ``[0, ...,\n config.vocab_size]`` or -100 (see ``input_ids`` docstring). Tokens with indices set to ``-100`` are\n ignored (masked), the loss is only computed for the tokens with labels in ``[0, ...,\n config.vocab_size]``.\n use_cache (:obj:`bool`, `optional`):\n If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up\n decoding (see :obj:`past_key_values`).\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n output_attentions (:obj:`bool`, `optional`):\n Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under\n returned tensors for more detail.\n output_hidden_states (:obj:`bool`, `optional`):\n Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors\n for more detail.\n return_dict (:obj:`bool`, `optional`):\n Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.\n\n Returns:\n\n Example::\n\n >>> from transformers import BigBirdPegasusTokenizer, BigBirdPegasusForCausalLM\n\n >>> tokenizer = BigBirdPegasusTokenizer.from_pretrained(\"google/bigbird-pegasus-large-arxiv\")\n >>> model = BigBirdPegasusForCausalLM.from_pretrained(\"google/bigbird-pegasus-large-arxiv\", add_cross_attention=False)\n >>> assert model.config.is_decoder, f\"{model.__class__} has to be configured as a decoder.\"\n >>> inputs = tokenizer(\"Hello, my dog is cute\", return_tensors=\"pt\")\n >>> outputs = model(**inputs)\n\n >>> last_hidden_states = outputs.last_hidden_state\n \"\"\"\n\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)\n outputs = self.model.decoder(\n input_ids=input_ids,\n attention_mask=attention_mask,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n head_mask=head_mask,\n cross_attn_head_mask=cross_attn_head_mask,\n past_key_values=past_key_values,\n inputs_embeds=inputs_embeds,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n logits = self.lm_head(outputs[0])\n\n loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1))\n\n if not return_dict:\n output = (logits,) + outputs[1:]\n return (loss,) + output if loss is not None else output\n\n return CausalLMOutputWithCrossAttentions(\n loss=loss,\n logits=logits,\n past_key_values=outputs.past_key_values,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n cross_attentions=outputs.cross_attentions,\n )\n\n def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, use_cache=None, **kwargs):\n # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly\n if attention_mask is None:\n attention_mask = input_ids.new_ones(input_ids.shape)\n\n if past:\n input_ids = input_ids[:, -1:]\n # first step, decoder_cached_states are empty\n return {\n \"input_ids\": input_ids, # encoder_outputs is defined. input_ids not needed\n \"attention_mask\": attention_mask,\n \"past_key_values\": past,\n \"use_cache\": use_cache,\n }\n\n @staticmethod\n def _reorder_cache(past, beam_idx):\n reordered_past = ()\n for layer_past in past:\n reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)\n return reordered_past\n"
] |
[
[
"torch.nn.functional.softmax",
"torch.transpose",
"torch.cat",
"torch.zeros",
"torch.nn.Embedding",
"numpy.concatenate",
"torch.nn.BCEWithLogitsLoss",
"torch.nn.Dropout",
"torch.nn.CrossEntropyLoss",
"torch.ones",
"torch.einsum",
"numpy.arange",
"torch.from_numpy",
"numpy.stack",
"torch.tensor",
"tensorflow.train.list_variables",
"torch.arange",
"numpy.zeros",
"torch.nn.functional.pad",
"torch.full",
"tensorflow.train.load_variable",
"torch.nn.Linear",
"torch.minimum",
"numpy.transpose",
"numpy.array",
"numpy.sum",
"tensorflow.saved_model.load",
"numpy.random.seed",
"torch.nn.LayerNorm",
"torch.nn.Tanh",
"torch.matmul",
"numpy.random.permutation",
"torch.nn.MSELoss"
],
[
"tensorflow.convert_to_tensor",
"tensorflow.keras.layers.LayerNormalization",
"tensorflow.math.abs",
"tensorflow.math.sign",
"tensorflow.cast",
"tensorflow.where",
"tensorflow.rank",
"tensorflow.squeeze",
"tensorflow.gather",
"tensorflow.math.reduce_sum",
"tensorflow.compat.v1.distributions.Bernoulli",
"tensorflow.name_scope",
"tensorflow.keras.layers.Add",
"tensorflow.tile",
"tensorflow.matmul",
"tensorflow.fill",
"tensorflow.shape",
"tensorflow.keras.layers.Dense",
"tensorflow.zeros_initializer",
"tensorflow.split",
"tensorflow.nn.bias_add",
"tensorflow.clip_by_value",
"tensorflow.nn.softmax",
"tensorflow.transpose",
"tensorflow.range",
"tensorflow.broadcast_to",
"tensorflow.reshape",
"tensorflow.expand_dims",
"tensorflow.math.log",
"tensorflow.keras.layers.Dropout"
],
[
"torch.nn.functional.softmax",
"torch.transpose",
"torch.cat",
"torch.nn.functional.dropout",
"torch.zeros",
"torch.nn.Embedding",
"torch.tanh",
"numpy.concatenate",
"torch.finfo",
"torch.nn.Dropout",
"torch.nn.CrossEntropyLoss",
"torch.ones",
"torch.einsum",
"numpy.arange",
"numpy.stack",
"torch.tensor",
"torch.bmm",
"torch.arange",
"numpy.zeros",
"torch.nn.functional.pad",
"torch.isinf",
"torch.nn.Linear",
"torch.minimum",
"numpy.array",
"numpy.sum",
"numpy.random.seed",
"torch.isnan",
"torch.nn.LayerNorm",
"torch.matmul",
"numpy.random.permutation",
"torch.clamp",
"torch.nn.MSELoss"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kimandsharp/dockeye_multi
|
[
"f9e987024ba796fd2c12a17ef06b9bf3bd0eb69f"
] |
[
"src/dockeyeM_c.py"
] |
[
"#############################################\n# Author: Kim Sharp\n# Date: 7/21/2017\n# \n# branch off dockeye_c_v2.3.py to dock multiple conformations\n# of ligand simultaneously\n# usage inside pymol command window: de('protein_target.pdb','ligand.pdb')\n# target protein 1st, then ligand- ligand pdb file must have at least one conformation\n# bracketed by MODEL, ENDMDL records\n# branch off dockeyeM_c_v1.4.py to use fortran energy subroutine\n# interfaced by numpy.f2py\n#\n#############################################\nimport sys\nimport os\nimport math\nfrom pymol.callback import Callback\nfrom pymol.cgo import *\nfrom pymol import cmd\nfrom dockeye_methods import *\nimport dockeyeM_energy\nimport numpy as np\n\n#=======================================\n# defs to create cgo object directly rather than reading from file\ndef pymol_cgo_new(ctype):\n cgo_obj = [LINEWIDTH, 3.0,BEGIN,ctype]\n return cgo_obj\ndef pymol_cgo_end(cgo_obj):\n cgo_obj.append(END)\ndef pymol_cgo_addline(cgo_obj,vbeg,vend,cbeg,cend):\n #print 'in addline: ',vbeg,vend\n # start color\n rbeg = 1.\n gbeg = 1.\n bbeg = 1.\n if((cbeg >= 0.) and (cbeg < 0.5)):\n rbeg = 0.\n gbeg = 2.*cbeg\n bbeg = (1. - 2.*cbeg)\n elif((cbeg >= 0.5) and (cbeg <= 1.0)):\n rbeg = 2.*cbeg - 1.\n gbeg = 2. - 2.*cbeg\n bbeg = 0.\n elif((cbeg > 1.0) and (cbeg <= 1.5)):\n rbeg = 1.\n gbeg = 0.\n bbeg = 2.*(cbeg - 1.0)\n # end color\n rend = 1.\n gend = 1.\n bend = 1.\n if((cend >= 0.) and (cend < 0.5)):\n rend = 0.\n gend = 2.*cend\n bend = (1. - 2.*cend)\n elif((cend >= 0.5) and (cend <= 1.0)):\n rend = 2.*cend - 1.\n gend = 2. - 2.*cend\n bend = 0.\n elif((cend > 1.0) and (cend <= 1.5)):\n rend = 1.\n gend = 0.\n bend = 2.*(cend - 1.0)\n temp = [COLOR,rbeg,gbeg,bbeg,VERTEX,vbeg[0],vbeg[1],vbeg[2],\n COLOR,rend,gend,bend,VERTEX,vend[0],vend[1],vend[2]]\n #print 'temp', temp\n for i in range(len(temp)):\n cgo_obj.append(temp[i])\ndef pymol_cgo_addtri(cgo_obj,v1,v2,v3,color,nm):\n red = 1.\n green = 1.\n blue = 1.\n coords = [v1,v2,v3]\n temp = []\n for k in range(3):\n if((color[k] >= 0.) and (color[k] < 0.5)):\n red = 0.\n green = 2.*color[k]\n blue = (1. - 2.*color[k])\n elif((color[k] >= 0.5) and (color[k] <= 1.0)):\n red = 2.*color[k] - 1.\n green = 2. - 2.*color[k]\n blue = 0.\n elif((color[k] > 1.0) and (color[k] <= 1.5)):\n red = 1.\n green = 0.\n blue = 2.*(color[k] - 1.0)\n temp1 = [NORMAL,nm[0],nm[1],nm[2],COLOR,red,green,blue,VERTEX,coords[k][0],coords[k][1],coords[k][2]]\n for i in range(len(temp1)):\n temp.append(temp1[i])\n #print 'temp', temp\n for i in range(len(temp)):\n cgo_obj.append(temp[i])\n#=======================================\n# this is the def executed on line in pymol\n#def de(pdbfile1=\"ag.pdb\",pdbfile2=\"ab.pdb\",charges=True):\n#def de(pdbfile1=\"ab.pdb\",pdbfile2=\"ag.pdb\",charges=True,logscale=True,dielectric=80.,eps=0.1):\n#def de(pdbfile1=\"ab.pdb\",pdbfile2=\"ligand.pdbqt\",charges=True,logscale=True,dielectric=80.,eps=0.1):\ndef de(pdbfile1=\"IL1B.atm\",pdbfile2=\"MIM_tor.atm\",charges=True,logscale=True,dielectric=80.,eps=0.1,handle=False):\n # extract names, and create the 'object' name in the pymol menu window\n print('22 apr 2020 add ability to save/restore poses')\n #pdbobj1 = pdbfile1[:-4]\n pdbobj1 = 'dockeye_prt'\n cmd.load(pdbfile1, pdbobj1)\n #pdbobj2 = pdbfile2[:-4]\n #cmd.load(pdbfile2, pdbobj2)\n #\n # Dockeye class reads pdb file upon init\n pdbobj2 = 'dockeye_lig'\n obj = Dockeye(pdbfile1,pdbfile2,pdbobj1,pdbobj2,charges,logscale,dielectric,eps,handle)\n #\n # but pymol also loads first ligand model for display\n cmd.load('dockeye_lig.pdb',pdbobj2)\n os.system('/bin/rm -f dockeye_lig.pdb')\n # remove any previous temporary files \n os.system('/bin/rm -f dockeye_mark.*')\n os.system('/bin/rm -f dockeye_action')\n cmd.zoom()\n cmd.do(\"set auto_zoom, off\",echo=0)\n name = 'dockeye'\n cmd.load_callback(obj,name)\n return obj\n\n#=======================================\nclass Dockeye(Callback):\n def __init__(self,pdbfile1,pdbfile2,pdbobj1,pdbobj2,charges,logscale,dielectric,eps,handle):\n # calling arguments\n self.pdbfile1 = pdbfile1\n self.pdbfile2 = pdbfile2\n self.pdbobj1 = pdbobj1\n self.pdbobj2 = pdbobj2\n self.logscale = logscale\n self.dielectric = dielectric\n self.eps = eps\n self.nbest = [0,0]\n print('Initializing Dockeye...')\n print('pdb file 1: ',pdbfile1)\n print('pdb file 2: ',pdbfile2)\n print('charges, logscale energy bars: ',charges,logscale)\n print('energy parameters dielectric: %8.3f VDW depth: %8.3f' % (dielectric,eps))\n # read original pdb coords\n self.pdb1 = pdb_struct()\n self.pdb1.readfile(self.pdbfile1)\n self.pdb2 = pdb_struct()\n self.pdb2.readligand(self.pdbfile2,handle)\n self.iconf = self.pdb2.nmodel\n #self.iconf = 2\n self.objmat1 = [1.,0.,0.,1., 0.,1.,0.,0., 0.,0.,1.,0., 0.,0.,0.,1.]\n self.objmat2 = [1.,0.,0.,0., 0.,1.,0.,0., 0.,0.,1.,0., 0.,0.,0.,1.]\n self.my_view = [0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.]\n #print name\n #print self.pdb1.coords\n #print self.pdb2.coords\n self.gcen1 = [0.,0.,0.]\n self.qtot1 = 0.\n self.qtot2 = 0.\n self.energy = [0.,0.,0.]\n self.energy_min = 0.\n for i in range(self.pdb1.natom):\n for k in range(3):\n self.gcen1[k] += self.pdb1.coords[i][k]\n for k in range(3):\n self.gcen1[k] /= self.pdb1.natom\n self.gcen2 = [0.,0.,0.]\n for i in range(self.pdb2.natom):\n for k in range(3):\n self.gcen2[k] += self.pdb2.coords[i][k]\n for k in range(3):\n self.gcen2[k] /= self.pdb2.natom\n if(not charges):\n for i in range(self.pdb1.natom):\n self.pdb1.bfact[i] = 0.\n for i in range(self.pdb2.natom):\n self.pdb2.bfact[i] = 0.\n if(charges):\n for i in range(self.pdb1.natom):\n self.qtot1 += self.pdb1.bfact[i]\n for i in range(self.pdb2.natom):\n self.qtot2 += self.pdb2.bfact[i]\n print('using fortran version')\n print('# of atoms 1: %6d 2: %6d' % (self.pdb1.natom,self.pdb2.natom))\n print('geometric centers: ')\n print('1: %8.3f %8.3f %8.3f ' % (self.gcen1[0],self.gcen1[1],self.gcen1[2]))\n print('2: %8.3f %8.3f %8.3f ' % (self.gcen2[0],self.gcen2[1],self.gcen2[2]))\n print('net charge 1: %8.3f 2: %8.3f ' % (self.qtot1,self.qtot2))\n #\n # open timestamped logfile\n #\n os.system('date > dockeye_date.tmp')\n date_tmp = open('dockeye_date.tmp')\n date_raw = date_tmp.readline()\n date_tmp.close()\n os.system('/bin/rm -f dockeye_date.tmp')\n #print(date_raw)\n df = date_raw.split()\n #print(df)\n file_name = 'dockeyeM_' + df[2] + df[1] + df[5] + '_' + df[3] + '.log'\n print('writing to logfile: ',file_name)\n self.dockeye_log = open(file_name,'w')\n self.dockeye_log.write('pdbfile 1: '+ pdbfile1+'\\n')\n self.dockeye_log.write('pdbfile 2: '+ pdbfile2+'\\n')\n self.dockeye_log.write( '# of atoms 1: %6d 2: %6d\\n' % (self.pdb1.natom,self.pdb2.natom))\n self.dockeye_log.write( 'geometric centers: \\n')\n self.dockeye_log.write( '1: %8.3f %8.3f %8.3f \\n' % (self.gcen1[0],self.gcen1[1],self.gcen1[2]))\n self.dockeye_log.write( '2: %8.3f %8.3f %8.3f \\n' % (self.gcen2[0],self.gcen2[1],self.gcen2[2]))\n self.dockeye_log.write( 'net charge 1: %8.3f 2: %8.3f \\n' % (self.qtot1,self.qtot2))\n self.dockeye_log.write('energy parameters dielectric: %8.3f VDW depth: %8.3f\\n' % (dielectric,eps))\n self.dockeye_log.write('# of ligand conformers: %6d\\n' % (self.pdb2.nmodel))\n\n\n def __call__(self):\n # 1st check for a dockeye action flag\n de_action = 'none'\n mark_number = 0\n do_mm = True\n try:\n actionfile = open('dockeye_action','r')\n line = actionfile.readline()\n fields = line.split()\n de_action = fields[0]\n mark_number = int(fields[1])\n actionfile.close()\n os.system('/bin/rm -f dockeye_action')\n #print(de_action,mark_number)\n except:\n #\n de_action = 'none'\n mark_number = 0\n # get view on screen\n my_view = cmd.get_view()\n delta_mv = 0.\n for i in range(18):\n delta_mv = max(delta_mv,abs(my_view[i] - self.my_view[i]))\n self.my_view[i] = my_view[i] \n #print my_view\n # get orientation/position matrices for two molecules\n # how does pymol define rotation center of molecule? \n # apparnetly by geometric average\n pdbmat1 = cmd.get_object_matrix(self.pdbobj1)\n pdbmat2 = cmd.get_object_matrix(self.pdbobj2)\n #\n if(de_action == 'mark'):\n # write bookmark\n print('bookmarking... ',mark_number)\n et = self.energy[0]\n ee = self.energy[1]\n ev = self.energy[2]\n print('current energies: %12.5g %12.5g %12.5g model %4d \\n' % (ee,ev,et,self.nbest[0]))\n #mark_prt = 'dockeye_prt_mark_%d.pdb' % (mark_number)\n mark_lig = 'dockeye_lig_mark_%d.pdb' % (mark_number)\n ligfile = open(mark_lig,'w')\n ligfile.write('REMARK pdbfile 1: '+ self.pdbfile1+'\\n')\n ligfile.write('REMARK pdbfile 2: '+ self.pdbfile2+'\\n')\n ligfile.write('REMARK # of atoms 1: %6d 2: %6d\\n' % (self.pdb1.natom,self.pdb2.natom))\n ligfile.write('REMARK geometric centers: \\n')\n ligfile.write('REMARK 1: %8.3f %8.3f %8.3f \\n' % (self.gcen1[0],self.gcen1[1],self.gcen1[2]))\n ligfile.write('REMARK 2: %8.3f %8.3f %8.3f \\n' % (self.gcen2[0],self.gcen2[1],self.gcen2[2]))\n ligfile.write('REMARK net charge 1: %8.3f 2: %8.3f \\n' % (self.qtot1,self.qtot2))\n ligfile.write('REMARK energy parameters dielectric: blank VDW depth: blank\\n')\n ligfile.write('REMARK # of ligand conformers: %6d\\n' % (self.pdb2.nmodel))\n ligfile.write('REMARK current energies: %12.5g %12.5g %12.5g model %4d \\n' % (ee,ev,et,self.nbest[0]))\n for i in range(4):\n for j in range(4):\n indx = j + 4*i\n ligfile.write('REMARK %12.5f ' % (pdbmat1[indx]))\n ligfile.write('\\n')\n for i in range(4):\n for j in range(4):\n indx = j + 4*i\n ligfile.write('REMARK %12.5f ' % (pdbmat2[indx]))\n ligfile.write('\\n')\n #\n # extract rot\n rmtp = [[pdbmat1[0],pdbmat1[1],pdbmat1[2]],\n [pdbmat1[4],pdbmat1[5],pdbmat1[6]],\n [pdbmat1[8],pdbmat1[9],pdbmat1[10]]]\n rmtl = [[pdbmat2[0],pdbmat2[1],pdbmat2[2]],\n [pdbmat2[4],pdbmat2[5],pdbmat2[6]],\n [pdbmat2[8],pdbmat2[9],pdbmat2[10]]]\n #\n # extract trans\n trnp = [pdbmat1[3],pdbmat1[7],pdbmat1[11]]\n trnl = [pdbmat2[3],pdbmat2[7],pdbmat2[11]]\n #\n # apply transrot to coords \n # write out transrot to temporary pdb files\n gcenp_rot = rot_vec(rmtp,self.gcen1)\n gcenl_rot = rot_vec(rmtl,self.gcen2)\n for k in range(3):\n trnp[k] = trnp[k] - (self.gcen1[k] - gcenp_rot[k])\n trnl[k] = trnl[k] - (self.gcen2[k] - gcenl_rot[k])\n i1 = 0\n #print(rmtp,trnp)\n #print(rmtl,trnl)\n xyz = [0.,0.,0.]\n tmpfile = 'dockeye_lig_tmp.pdb'\n for n in range(self.pdb2.nmodel):\n ligfile.write('MODEL%4d \\n'%(n+1))\n for i in range(self.pdb2.natom):\n #\n # apply rotations and translations\n # and inverse of protein rot/trans to ligand\n # in case user moved protein too- now ligand should be in\n # coord frame of original protein pdb\n for k in range(3):\n xyz[k] = self.pdb2.coords[i1][k] - self.gcen2[k]\n xyz1 = rot_vec(rmtl,xyz)\n for k in range(3):\n xyz[k] = xyz1[k] + self.gcen2[k] + trnl[k] - self.gcen1[k] - trnp[k]\n xyz2 = rot_vec(rmtp,xyz,inv=1)\n for k in range(3):\n xyz[k] = xyz2[k] + self.gcen1[k]\n string = 'ATOM %6d%6s%4s%1s%4s %8.3f%8.3f%8.3f%6.2f%7.3f \\n' \\\n % (i,self.pdb2.name[i],self.pdb2.res[i], \\\n self.pdb2.chain[i], self.pdb2.resnum[i], xyz[0],xyz[1],xyz[2], \\\n self.pdb2.radius[i],self.pdb2.bfact[i])\n ligfile.write(string)\n i1 += 1\n ligfile.write('ENDMDL\\n')\n ligfile.close()\n mark_pml = 'dockeye_mark_%d.pml' % (mark_number)\n pmlfile = open(mark_pml,'w')\n pmlfile.write('#------------------------------------------------\\n')\n pmlfile.write('run $HOME/source/dockeye_multi/src/dockeyeM_c.py\\n')\n pmlfile.write('de(\"%s\",\"%s\")\\n'%(self.pdbfile1,mark_lig))\n pmlfile.write('#optional view settings\\n')\n pmlfile.write('hide lines\\n')\n pmlfile.write('spectrum b, red_white_blue\\n')\n pmlfile.write('show sticks, dockeye_lig\\n')\n pmlfile.write('show surface, dockeye_prt\\n')\n pmlfile.write('set transparency, 0.4\\n')\n pmlfile.write('#------------------------------------------------\\n')\n pmlfile.close()\n # done with bookmarking\n #\n # check for new view or pose\n #\n delta_mm = 0.\n for i in range(12):\n delta_mm = max(delta_mm,abs(pdbmat1[i] - self.objmat1[i]))\n delta_mm = max(delta_mm,abs(pdbmat2[i] - self.objmat2[i]))\n self.objmat1[i] = pdbmat1[i] \n self.objmat2[i] = pdbmat2[i] \n if(delta_mm > 0.01): # we only do expensive energy calc if pose changed\n do_mm = True\n else:\n do_mm = False\n if((delta_mv > 0.01) or do_mm): # we only update if pose or view changed\n cgo_obj = pdb_interaction(pdbmat1,pdbmat2,self.pdb1,self.pdb2,self.gcen1,self.gcen2,\n self.energy,do_mm,self.logscale,self.dielectric,self.eps,self.nbest,self.energy_min)\n if(self.nbest[0] != self.nbest[1]):\n # print('Switching models ',self.nbest)\n self.nbest[1] = self.nbest[0]\n #\n # write new best pose to logfile\n #\n et = self.energy[0]\n ee = self.energy[1]\n ev = self.energy[2]\n if(self.energy[0] < self.energy_min):\n print(' NEW MIN ee: %12.3g ev: %12.3g et: %12.3g model %4d ' % (ee,ev,et,self.nbest[0]))\n self.energy_min = et\n self.dockeye_log.write('new min: %12.5g %12.5g %12.5g model %4d \\n' % (ee,ev,et,self.nbest[0]))\n for i in range(4):\n for j in range(4):\n indx = j + 4*i\n self.dockeye_log.write('%12.5f ' % (pdbmat1[indx]))\n self.dockeye_log.write('\\n')\n for i in range(4):\n for j in range(4):\n indx = j + 4*i\n self.dockeye_log.write('%12.5f ' % (pdbmat2[indx]))\n self.dockeye_log.write('\\n')\n #else:\n # if(do_mm): \n # #print('Current energy: ee: %12.3g ev: %12.3g et: %12.3g' % (ee,ev,et))\n # continue\n if(do_mm):\n cmd.delete('dockeye_obj')\n cmd.load_cgo(cgo_obj,'dockeye_obj')\n draw_ligand(pdbmat2,self.pdb2,self.gcen2,self.nbest[0])\n #if(self.iconf != 0):\n # draw_ligand(pdbmat2,self.pdb2,self.gcen2,self.iconf)\n # self.iconf = 0\n#=======================================\ndef pnl_make(rmt1,rmt2,gcen1,gcen2,trn1,trn2,energy,emin):\n \"\"\"\n refresh and display panel with energy bars and orientations\n 2 sept 2019, move energy bars up for more room for -ve energies\n and dynamically rescale to keep bars in window\n \"\"\"\n my_view = cmd.get_view()\n mod_center = [my_view[12],my_view[13],my_view[14]]\n rmtView = [[my_view[0], my_view[3], my_view[6]],\n [my_view[1], my_view[4], my_view[7]],\n [my_view[2], my_view[5], my_view[8]]] \n cam_center = [my_view[9],my_view[10],my_view[11]]\n d_front = my_view[15]\n d_back = my_view[16]\n scale = d_back - d_front\n xsize = abs(cam_center[2])/22.\n xmove = 3.0\n ymove = 3.0\n if(emin < -4.):\n bscale = abs(emin)//4\n else:\n bscale = 1\n #\n # create pnl objects\n #\n #\n # energy bars\n # +ve energies on log scale, -ve on linear scale now\n #\n ltype = TRIANGLES\n bar_obj = pymol_cgo_new(ltype)\n dy = -0.2\n #ecut = 0.5\n ecut = 0.2\n et = energy[0]\n ee = energy[1]\n ev = energy[2]\n if(et < -ecut):\n et_color = 0. # blue\n #et_size = -1.*xsize\n # log scale\n #et_size = (0.1 -1.*math.log10(-et/ecut))*xsize*2.\n # linear scale\n et_size = 0.5*et*xsize/bscale\n elif(et > ecut):\n et_color = 1. # red\n #et_size = 1.*xsize\n et_size = (0.1 + 1.*math.log10(et/ecut))*xsize*2.\n else:\n et_color = 2. # white\n et_size = 0.1*xsize\n et_size_min = 0.5*emin*xsize/bscale\n #\n if(ee < -ecut):\n ee_color = 0. # blue\n #ee_size = -1.*xsize\n #ee_size = (0.1 -1.*math.log10(-ee/ecut))*xsize*2.\n # linear scale\n ee_size = 0.5*ee*xsize/bscale\n elif(ee > ecut):\n ee_color = 1. # red\n #ee_size = 1.*xsize\n ee_size = (0.1 + 1.*math.log10(ee/ecut))*xsize*2.\n else:\n ee_color = 2. # white\n ee_size = 0.1*xsize\n #\n if(ev < -ecut):\n ev_color = 0. # blue\n #ev_size = -1.*xsize\n #ev_size = (0.1 -1.*math.log10(-ev/ecut))*xsize*2.\n # linear scale\n ev_size = 0.5*ev*xsize/bscale\n elif(ev > ecut):\n ev_color = 1. # red\n #ev_size = 1.*xsize\n ev_size = (0.1 + 1.*math.log10(ev/ecut))*xsize*2.\n else:\n ev_color = 2. # white\n ev_size = 0.1*xsize\n #\n # at left, vertical bars\n #\n dx = -0.2 # bar separation\n #-----------------\n # E total\n #-----------------\n dend = [0.85*dx*xsize,0.,0.]\n dend_rot = rot_vec(rmtView,dend,inv=1)\n nm = [0.,0.,1.]\n nm_rot = rot_vec(rmtView,nm,inv=1)\n end1 = [0.,0.,0.]\n beg1 = [0.,0.,0.]\n end2 = [0.,0.,0.]\n #\n et_beg = [0.,0.,0.]\n et_end = [0.,et_size,0.]\n et_end_min = [0.,et_size_min,0.]\n et_end_rot = rot_vec(rmtView,et_end,inv=1)\n et_end_min_rot = rot_vec(rmtView,et_end_min,inv=1)\n #et_offset = [-1.3*xmove*xsize,0.,0.]\n et_offset = [-1.3*xmove*xsize,+1.0*ymove*xsize,0.]\n et_offset_rot = rot_vec(rmtView,et_offset,inv=1)\n for k in range(3):\n et_beg[k] = et_offset_rot[k] + mod_center[k]\n et_end_rot[k] += et_offset_rot[k] + mod_center[k]\n et_end_min_rot[k] += et_offset_rot[k] + mod_center[k]\n end1[k] = et_end_rot[k] - dend_rot[k]\n beg1[k] = et_beg[k] - dend_rot[k]\n color = [2.,et_color,et_color]\n pymol_cgo_addtri(bar_obj,et_beg,et_end_rot,end1,color,nm_rot)\n color = [2.,2.,et_color]\n pymol_cgo_addtri(bar_obj,et_beg,beg1,end1,color,nm_rot)\n #\n # low energy mark\n #\n for k in range(3):\n color[k] = 0.5\n end1[k] = et_end_min_rot[k] - dend_rot[k]\n beg1[k] = et_end_min_rot[k]\n beg1[1] = beg1[1] + 0.2\n pymol_cgo_addtri(bar_obj,beg1,et_end_min_rot,end1,color,nm_rot)\n end2[0] = end1[0]\n end2[1] = end1[1] + 0.2\n end2[2] = end1[2]\n pymol_cgo_addtri(bar_obj,beg1,end2,end1,color,nm_rot)\n #-----------------\n # E electrostatic\n #-----------------\n ee_beg = [0.,0.,0.]\n ee_end = [0.,ee_size,0.]\n ee_end_rot = rot_vec(rmtView,ee_end,inv=1)\n #ee_offset = [(-1.3*xmove+2.*dx)*xsize,0.,0.]\n ee_offset = [(-1.3*xmove+2.*dx)*xsize,+1.0*ymove*xsize,0.]\n ee_offset_rot = rot_vec(rmtView,ee_offset,inv=1)\n for k in range(3):\n ee_beg[k] = ee_offset_rot[k] + mod_center[k]\n ee_end_rot[k] += ee_offset_rot[k] + mod_center[k]\n end1[k] = ee_end_rot[k] - dend_rot[k]\n beg1[k] = ee_beg[k] - dend_rot[k]\n color = [2.,ee_color,ee_color]\n pymol_cgo_addtri(bar_obj,ee_beg,ee_end_rot,end1,color,nm_rot)\n color = [2.,2.,ee_color]\n pymol_cgo_addtri(bar_obj,ee_beg,beg1,end1,color,nm_rot)\n #-----------------\n # E vdw\n #-----------------\n ev_beg = [0.,0.,0.]\n ev_end = [0.,ev_size,0.]\n # apply view angle\n ev_end_rot = rot_vec(rmtView,ev_end,inv=1)\n #ev_offset = [(-1.3*xmove+dx)*xsize,0.,0.]\n ev_offset = [(-1.3*xmove+dx)*xsize,+1.0*ymove*xsize,0.]\n ev_offset_rot = rot_vec(rmtView,ev_offset,inv=1)\n for k in range(3):\n ev_beg[k] = ev_offset_rot[k] + mod_center[k]\n ev_end_rot[k] += ev_offset_rot[k] + mod_center[k]\n end1[k] = ev_end_rot[k] - dend_rot[k]\n end1[k] = ev_end_rot[k] - dend_rot[k]\n beg1[k] = ev_beg[k] - dend_rot[k]\n color = [2.,ev_color,ev_color]\n pymol_cgo_addtri(bar_obj,ev_beg,ev_end_rot,end1,color,nm_rot)\n color = [2.,2.,ev_color]\n pymol_cgo_addtri(bar_obj,ev_beg,beg1,end1,color,nm_rot)\n #\n # finish up & display energy bars\n #\n pymol_cgo_end(bar_obj)\n cmd.delete('bar_obj')\n cmd.load_cgo(bar_obj,'bar_obj')\n\n#=======================================\ndef pdb_interaction(pdbmat1,pdbmat2,pdb1,pdb2,gcen1,gcen2,energy,do_mm,logscale,dielectric,eps,\n nbest,emin):\n \"\"\"\n # this is where we calculated interaction energy between\n # two molecules\n #\n # extract rotation matrices and translation vectors\n # note that if center of molecule not at origin\n # then rotation of a molecule moves the origin relative to its\n # local center, and that this displacment appears in translation so\n # this apparent trans needs to be removed to get actual translation\n \"\"\"\n #=========================================\n # global parameters, these are the nominal values used by C\n # subroutine, energies are then post-scaled for actual parameters given as arguments to de()\n DIEL = 80.\n efact = 332./DIEL # dielectric constant factor gives kcal/mole for p+ unit charge, Angstroms\n EPS = 0.1 # depth of vdw potl. kcal/mole\n maxobjdata = 20000\n energy_obj = np.zeros(maxobjdata,float)\n #=========================================\n # extract rot mat\n rmt1 = [[pdbmat1[0],pdbmat1[1],pdbmat1[2]],\n [pdbmat1[4],pdbmat1[5],pdbmat1[6]],\n [pdbmat1[8],pdbmat1[9],pdbmat1[10]]]\n rmt2 = [[pdbmat2[0],pdbmat2[1],pdbmat2[2]],\n [pdbmat2[4],pdbmat2[5],pdbmat2[6]],\n [pdbmat2[8],pdbmat2[9],pdbmat2[10]]]\n #\n # extract trans\n trn1 = [pdbmat1[3],pdbmat1[7],pdbmat1[11]]\n trn2 = [pdbmat2[3],pdbmat2[7],pdbmat2[11]]\n # find rotated origin in molc. local coords\n gcen1_rot = rot_vec(rmt1,gcen1)\n gcen2_rot = rot_vec(rmt2,gcen2)\n # displacement of origin subtracted from apparant trans to get actual trans\n for k in range(3):\n trn1[k] = trn1[k] - (gcen1[k] - gcen1_rot[k])\n trn2[k] = trn2[k] - (gcen2[k] - gcen2_rot[k])\n #\n # refresh display panel\n if(not do_mm):\n pnl_make(rmt1,rmt2,gcen1,gcen2,trn1,trn2,energy,emin)\n return\n #\n #\n xyz = [0.,0.,0.]\n #atom_data = [0.,0.] # stuff # atoms (as floats), coords, radii and charges in one long array to pass to energy_c\n atom_data = [0.,0.,0.] # stuff # atoms (as floats), models, coords, radii and \n # charges in one long array to pass to energy_c\n nat1 = pdb1.natom\n nat2 = pdb2.natom\n nmod = pdb2.nmodel\n #print('data length 1: ',len(pdb1.coords))\n #print('data length 2: ',len(pdb2.coords))\n #\n # molecule 1\n #\n for i in range(nat1):\n atom_data.append(pdb1.radius[i])\n for i in range(nat1):\n atom_data.append(pdb1.bfact[i])\n for i in range(nat1):\n # apply rotations and translations\n for k in range(3):\n xyz[k] = pdb1.coords[i][k] - gcen1[k]\n xyz1 = rot_vec(rmt1,xyz)\n for k in range(3):\n xyz[k] = xyz1[k] + gcen1[k] + trn1[k]\n # store new coords\n for k in range(3):\n atom_data.append(xyz[k])\n #\n # molecule 2\n #\n for i in range(nat2):\n atom_data.append(pdb2.radius[i])\n for i in range(nat2):\n atom_data.append(pdb2.bfact[i])\n i = 0\n for nm in range(nmod):\n for j in range(nat2):\n # apply rotations and translations\n for k in range(3):\n xyz[k] = pdb2.coords[i][k] - gcen2[k]\n xyz2 = rot_vec(rmt2,xyz)\n for k in range(3):\n xyz[k] = xyz2[k] + gcen2[k] + trn2[k]\n # store new coords\n for k in range(3):\n atom_data.append(xyz[k])\n i += 1\n #print('i: ',i)\n #print('beg, end: ',pdb2.coords[0][0],pdb2.coords[i-1][2])\n #\n #==========================================\n # C subroutine version\n #==========================================\n atom_data[0] = float(nat1) # now we know # of atoms, put in front of data array\n atom_data[1] = float(nat2)\n atom_data[2] = float(nmod)\n # now call fortran version, where energy_obj is an argument\n # not a function return pointer\n dockeyeM_energy.energy_f(energy_obj,atom_data)\n ndata = int(energy_obj[0])\n #print('ndata: ',type(ndata))\n #print('ndata: ',ndata)\n # slice energy terms off end of data\n #energy[0] = energy_obj[ndata-3]\n nbest[0] = int(energy_obj[ndata-1])\n #print('from energy_c best model is: ',nbest[0])\n energy[1] = energy_obj[ndata-3]*DIEL/dielectric\n energy[2] = energy_obj[ndata-2]*eps/EPS\n energy[0] = energy[1] + energy[2]\n energy_obj[0] = LINEWIDTH # after we extract length of data, put real cgo 1st keyword back\n emin = min(emin,energy[0])\n # generate true Pymol object- one returned from C doesn't seem to work\n cgo_obj = []\n #for i in range(ndata): # bug- shouldn't pass energy, nbest!\n #for i in range(ndata-3):\n for i in range(ndata-4): # 29 nov 2020 -4 is correct: -3 leave etotal behind\n cgo_obj.append(energy_obj[i])\n pnl_make(rmt1,rmt2,gcen1,gcen2,trn1,trn2,energy,emin)\n return cgo_obj\n\ndef draw_ligand(pdbmat2,pdb2,gcen2,iconf):\n #=============================\n ibeg = iconf*pdb2.natom\n ifin = ibeg + pdb2.natom\n ltype = LINES\n ligand_obj = pymol_cgo_new(ltype)\n # print('drawing conf: ',iconf,ibeg,ifin,pdb2.natom)\n #\n vbeg = [0.,0.,0.]\n vend = [0.,0.,0.]\n cbeg = .5\n cend = .9\n #\n # extract rot mat\n rmt2 = [[pdbmat2[0],pdbmat2[1],pdbmat2[2]],\n [pdbmat2[4],pdbmat2[5],pdbmat2[6]],\n [pdbmat2[8],pdbmat2[9],pdbmat2[10]]]\n #\n # extract trans\n trn2 = [pdbmat2[3],pdbmat2[7],pdbmat2[11]]\n #\n # find rotated origin in molc. local coords\n gcen2_rot = rot_vec(rmt2,gcen2)\n #\n # displacement of origin subtracted from apparant trans to get actual trans\n for k in range(3):\n trn2[k] = trn2[k] - (gcen2[k] - gcen2_rot[k])\n #\n # extract and transform coords from required model\n xyz = [0.,0.,0.]\n crd2 = []\n for i in range(ibeg,ifin):\n # apply rotations and translations\n for k in range(3):\n xyz[k] = pdb2.coords[i][k] \n for k in range(3):\n xyz[k] = pdb2.coords[i][k] - gcen2[k]\n xyz2 = rot_vec(rmt2,xyz)\n for k in range(3):\n xyz[k] = xyz2[k] + gcen2[k] + trn2[k]\n crd2.append([xyz[0],xyz[1],xyz[2]])\n #\n # generate bonds\n nbond = 0\n margin = 1.4\n for i in range(pdb2.natom):\n if(pdb2.name[i][2:3] == 'H'):\n radi = 1.1\n else:\n radi = pdb2.radius[i]\n for j in range(i+1,pdb2.natom):\n# print 'checking: ',i,j\n dist2 = 0.\n if(pdb2.name[j][2:3] == 'H'):\n radj = 1.1\n else:\n radj = pdb2.radius[j]\n for k in range(3):\n dist2 += (crd2[i][k] - crd2[j][k])**2\n dist = math.sqrt(dist2)\n overlap = dist + margin - radi - radj\n if (overlap < 0.):\n nbond +=1\n for k in range(3):\n vbeg[k] = crd2[i][k]\n vend[k] = crd2[j][k]\n if(pdb2.bfact[i] > 0.1):\n cbeg = 0.1\n elif(pdb2.bfact[i] < -0.1):\n cbeg = 0.9\n else:\n cbeg = 2.\n if(pdb2.bfact[j] > 0.1):\n cend = 0.1\n elif(pdb2.bfact[j] < -0.1):\n cend = 0.9\n else:\n cend = 2.\n pymol_cgo_addline(ligand_obj,vbeg,vend,cbeg,cend)\n # print \"number of bonds: \",nbond\n pymol_cgo_end(ligand_obj)\n cmd.delete('ligand_obj')\n cmd.load_cgo(ligand_obj,'ligand_obj')\n\ndef mark(mark_number=1):\n if(mark_number <1)or(mark_number>9):\n print('bookmark must be number 1-9 only')\n return\n #print('pose bookmarked to ',mark_number)\n actionfile = open('dockeye_action','w')\n actionfile.write('mark %d\\n'%(mark_number))\n actionfile.close()\n"
] |
[
[
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
nudlesoup/OpticalFlow-Flownet
|
[
"0938756f5ed8dd03b5ba65af3533eea48efa15cd",
"0938756f5ed8dd03b5ba65af3533eea48efa15cd"
] |
[
"book.py",
"warping.py"
] |
[
"#!/usr/bin/env python\n\nimport cv2\nimport numpy as np\ndef getPerspectiveTransformMatrix(p1, p2):\n matrixIndex = 0\n A=[]\n for i in range(0, len(p1)):\n x, y = p1[i][0], p1[i][1]\n u, v = p2[i][0], p2[i][1]\n A.append([x, y, 1, 0, 0, 0, -u * x, -u * y, -u])\n A.append([0, 0, 0, x, y, 1, -v * x, -v * y, -v])\n A = np.asarray(A)\n U, S, Vh = np.linalg.svd(A)\n L = Vh[-1, :] / Vh[-1, -1]\n H = L.reshape(3, 3)\n return H\n\ndef getPerspectiveTransformMatrix2(p1, p2):\n matrixIndex = 0\n A=[]\n for i in range(0, len(p1)):\n x, y = p1[i][0], p1[i][1]\n u, v = p2[i][0], p2[i][1]\n A.append( [-x, -y, -1, 0, 0, 0, u * x, u * y, u])\n\n for i in range(0, len(p1)):\n x, y = p1[i][0], p1[i][1]\n u, v = p2[i][0], p2[i][1]\n A.append([0, 0, 0, -x, -y, -1, v*x, v*y, v])\n\n A = np.asarray(A)\n\n U, S, Vh = np.linalg.svd(A)\n np.set_printoptions(suppress=True)\n #print(Vh)\n L = Vh[-1,:]\n\n H = np.reshape(L,(3, 3))\n # H = np.round_(L, 4)\n print(H)\n # H=np.transpose(H)\n H=H/H[0,0]\n return H\n\nif __name__ == '__main__':\n # Read source image.\n # im_src = cv2.imread('book2.jpg')\n # Four corners of the book in source image\n # pts_src = np.array([[141, 131], [480, 159], [493, 630], [64, 601]])\n pts_src=np.array([[328, 155], [307, 285], [285, 134], [325, 301]])\n # Read destination image.\n # im_dst = cv2.imread('book1.jpg')\n # Four corners of the book in destination image.\n # pts_dst = np.array([[318, 256], [534, 372], [316, 670], [73, 473]])\n pts_dst = np.array([[324, 156], [303, 286], [279, 135], [319, 230]])\n # Calculate Homography\n #h, status = cv2.findHomography(pts_src, pts_dst)\n #h=cv2.findHomography(pts_src, pts_dst, cv2.RANSAC, 5.0)\n h=getPerspectiveTransformMatrix2(pts_src,pts_dst)\n np.set_printoptions(suppress=True)\n print(h)\n corners1=pts_src\n for i in range(len(corners1)):\n pt1 = np.array([corners1[i][0], corners1[i][1], 1])\n pt1 = pt1.reshape(3, 1)\n pt2 = np.dot(h, pt1)\n pt2 = pt2/pt2[2]\n print(pt2)\n # Warp source image to destination based on homography\n #im_out = cv2.warpPerspective(im_src, h, (im_dst.shape[1], im_dst.shape[0]))\n\n # Display images\n # cv2.imshow(\"Source Image\", im_src)\n # cv2.imshow(\"Destination Image\", im_dst)\n # cv2.imshow(\"Warped Source Image\", im_out)\n #\n # cv2.waitKey(0)\n",
"import cv2\nimport numpy as np\nfrom skimage.measure import compare_ssim\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport cv2\nimport imutils\n\n\ndef warp_flow(img, flow):\n h, w = flow.shape[:2]\n flow = -flow\n flow[:,:,0] += np.arange(w)\n flow[:,:,1] += np.arange(h)[:,np.newaxis]\n res = cv2.remap(img, flow, None, cv2.INTER_LINEAR)\n return res\n\ndef readFlow(fn):\n \"\"\" Read .flo file in Middlebury format\"\"\"\n with open(fn, 'rb') as f:\n magic = np.fromfile(f, np.float32, count=1)\n if 202021.25 != magic:\n print('Magic number incorrect. Invalid .flo file')\n return None\n else:\n w = np.fromfile(f, np.int32, count=1)\n h = np.fromfile(f, np.int32, count=1)\n #print('Reading %d x %d flo file\\n' % (w, h))\n data = np.fromfile(f, np.float32, count=2*int(w)*int(h))\n # Reshape data into 3D array (columns, rows, bands)\n # The reshape here is for visualization, the original code is (w,h,2)\n x=np.resize(data, (int(h), int(w), 2))\n x=x\n u = x[:, :, 0]\n v = x[:, :, 1]\n print(\"u mean : \" + str(np.mean(u)))\n print(\"v mean : \" + str(np.mean(v)))\n print(\"u std : \" + str(np.std(u)))\n print(\"v std : \" + str(np.std(v)))\n print(\"u max : \" + str(np.max(u)))\n print(\"u min : \" + str(np.min(u)))\n print(\"v max : \" + str(np.max(v)))\n print(\"v min : \" + str(np.min(v)))\n return x\n\nflow = readFlow(\"/home/nudlesoup/Research/flownet2-pytorch/rangetest/flownet2-catch37/flow/000204.flo\")\nim1 = np.asarray(cv2.imread(\"/home/nudlesoup/Research/flownet2-pytorch/rangetest/inference/imxx205.png\"))\nim2 = np.asarray(cv2.imread(\"/home/nudlesoup/Research/flownet2-pytorch/rangetest/inference/imxx206.png\"))\n\nwrap1 = warp_flow(im1, flow)\nwrap1_fake = warp_flow(im1, np.zeros_like(flow))\n\n# cv2.imwrite(\"/home/nudlesoup/Research/flownet2-pytorch/rangetest/ex4/im1*255.jpg\", im1*255)\n# cv2.imwrite(\"/home/nudlesoup/Research/flownet2-pytorch/rangetest/inference/imxx1-flow-warping.jpg\", wrap1)\n#cv2.imwrite(\"/home/nudlesoup/Research/flownet2-pytorch/rangetest/ex3/ssim/flownetSD400/warping-fake.jpg\", wrap1_fake)\n\n# imageA=wrap1\n# imageB=im2\nimageA=im1\nimageB=im2\n# convert the images to grayscale\ngrayA = cv2.cvtColor(imageA, cv2.COLOR_BGR2GRAY)\ngrayB = cv2.cvtColor(imageB, cv2.COLOR_BGR2GRAY)\n\n(score, diff) = compare_ssim(grayA, grayB, full=True)\ndiff = (diff * 255).astype(\"uint8\")\nprint(\"SSIM: {}\".format(score))\n\nthresh = cv2.threshold(diff, 0, 255,\n\tcv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]\ncnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,\n\tcv2.CHAIN_APPROX_SIMPLE)\ncnts = imutils.grab_contours(cnts)\nfor c in cnts:\n\t# compute the bounding box of the contour and then draw the\n\t# bounding box on both input images to represent where the two\n\t# images differ\n\t(x, y, w, h) = cv2.boundingRect(c)\n\tcv2.rectangle(imageA, (x, y), (x + w, y + h), (0, 0, 255), 2)\n\tcv2.rectangle(imageB, (x, y), (x + w, y + h), (0, 0, 255), 2)\n# show the output images\n# cv2.imwrite(\"/home/nudlesoup/Research/flownet2-pytorch/rangetest/ex3/ssim/flownetSD400/original-frame2-0.jpg\", imageA)\n# cv2.imwrite(\"/home/nudlesoup/Research/flownet2-pytorch/rangetest/ex3/ssim/flownetSD400/modified-frame2-0.jpg\", imageB)\n# cv2.imwrite(\"/home/nudlesoup/Research/flownet2-pytorch/rangetest/ex3/ssim/flownetSD400/diff-0.jpg\", diff)\n# invert = cv2.bitwise_not(diff)\n# cv2.imwrite(\"/home/nudlesoup/Research/flownet2-pytorch/rangetest/ex3/ssim/flownetSD400/diff-invert-0.jpg\", invert)\n# cv2.imwrite(\"/home/nudlesoup/Research/flownet2-pytorch/rangetest/inference/unet.jpg\", thresh)\n"
] |
[
[
"numpy.dot",
"numpy.linalg.svd",
"numpy.asarray",
"numpy.reshape",
"numpy.set_printoptions",
"numpy.array"
],
[
"numpy.fromfile",
"numpy.min",
"numpy.arange",
"numpy.max",
"numpy.std",
"numpy.zeros_like",
"numpy.mean"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mdzh10/covid19_ultrasound
|
[
"8a1192053554e0a08649c35d9686dc842f0fe026",
"3625e95bbf189926dbd12966ef59ee71ed10e453"
] |
[
"pocovidnet/pocovidnet/evaluate_covid19.py",
"pocovidnet/pocovidnet/statistical_tests.py"
] |
[
"\"\"\"\nEvaluation class that performs forward pass through trained models\n\"\"\"\nimport os\n\nimport cv2\nimport numpy as np\n\nfrom pocovidnet import MODEL_FACTORY\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\n\nDIR_PATH = os.path.dirname(os.path.realpath(__file__))\nNUM_FOLDS = 5\nCLASS_MAPPING = {\n 3: ['covid', 'pneumonia', 'regular'],\n 4: ['covid', 'pneumonia', 'regular', 'uninformative']\n}\n\n\nclass Evaluator(object):\n\n def __init__(\n self,\n weights_dir=None,\n ensemble=True,\n split=None,\n model_id=None,\n num_classes=3,\n mc_dropout: bool = False,\n test_augment: bool = False\n ):\n \"\"\"\n Constructor of COVID model evaluator class.\n \n Arguments:\n ensemble {str} -- Whether the model ensemble is used.\n num_classes: must be 3 or 4, how many classes the model was\n trained on\n \"\"\"\n self.root = os.path.join('/', *DIR_PATH.split('/')[:-1])\n if weights_dir is None:\n weights_dir = os.path.join(self.root, \"trained_models\")\n self.split = split\n self.ensemble = ensemble\n assert num_classes == 3 or num_classes == 4, \"must be 3 or 4 classes\"\n if model_id is None:\n self.model_id = 'vgg_base'\n elif model_id not in MODEL_FACTORY.keys():\n raise ValueError(\n f'Wrong model {model_id}. Options are:{MODEL_FACTORY.keys()}'\n )\n else:\n self.model_id = model_id\n\n if ensemble:\n # retores 5 weight paths\n self.weights_paths = [\n os.path.join(\n weights_dir,\n 'fold_' + str(fold),\n \"best_weights\",\n \"variables\",\n \"variables\"\n ) for fold in range(NUM_FOLDS)\n ]\n else:\n if split is None or split < 0 or split > 4:\n raise ValueError(f'Provide split between 0 and 4, not {split}')\n fold = split\n self.weights_paths = [\n os.path.join(\n weights_dir,\n 'fold_' + str(fold),\n \"best_weights\",\n \"variables\",\n \"variables\"\n )\n ]\n\n self.class_mappings = CLASS_MAPPING[num_classes]\n self.models = [\n MODEL_FACTORY[self.model_id]\n (num_classes=num_classes, mc_dropout=mc_dropout)\n for _ in range(len(self.weights_paths))\n ]\n self.mc_dropout = mc_dropout\n self.augmentor = ImageDataGenerator(\n rotation_range=10,\n fill_mode='nearest',\n horizontal_flip=True,\n vertical_flip=True,\n width_shift_range=0.1,\n height_shift_range=0.1\n )\n\n # restore weights\n try:\n for model, path in zip(self.models, self.weights_paths):\n model.load_weights(path)\n except Exception:\n raise Exception('Error in model restoring.')\n\n print(f'Model restored. Class mappings are {self.class_mappings}')\n\n def __call__(self, image, augment: bool = False, preprocess: bool = True):\n \"\"\"Performs a forward pass through the restored model\n\n Arguments:\n image {np.array} -- Input image on which prediction is performed.\n No size requirements, but the image will be reshaped to 224 x\n 224 pixels (aspec ratio is *not* preserved, so quadratic images\n are preferred).\n\n Returns:\n logits {list} -- Length 3 num_classes). Class probabilities.\n \"\"\"\n if preprocess:\n image = self.preprocess(image)\n if augment:\n image = next(self.augmentor.flow(image))\n predictions = np.squeeze(\n np.stack([model.predict(image) for model in self.models]), axis=1\n )\n return list(np.mean(predictions, axis=0, keepdims=False))\n\n def preprocess(self, image):\n \"\"\"Apply image preprocessing pipeline\n\n Arguments:\n image {np.array} -- Arbitrary shape, quadratic preferred\n\n Returns:\n np.array -- Shape 224,224. Normalized to [0, 1].\n \"\"\"\n\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n image = cv2.resize(image, (224, 224))\n image = np.expand_dims(np.array(image), 0) / 255.0\n return image\n",
"from __future__ import division\nimport numpy as np\nfrom numpy import random\nfrom scipy.spatial.distance import pdist, cdist\nfrom scipy.stats import kstwobign, pearsonr\nfrom scipy.stats import genextreme\n\n__all__ = ['ks2d2s', 'estat', 'estat2d']\n\n\ndef ks2d2s(x1, y1, x2, y2, nboot=None, extra=False):\n '''Two-dimensional Kolmogorov-Smirnov test on two samples.\n Parameters\n ----------\n x1, y1 : ndarray, shape (n1, )\n Data of sample 1.\n x2, y2 : ndarray, shape (n2, )\n Data of sample 2. Size of two samples can be different.\n extra: bool, optional\n If True, KS statistic is also returned. Default is False.\n Returns\n -------\n p : float\n Two-tailed p-value.\n D : float, optional\n KS statistic. Returned if keyword `extra` is True.\n Notes\n -----\n This is the two-sided K-S test. Small p-values means that the two samples\n are significantly different. Note that the p-value is only an approximation\n as the analytic distribution is unkonwn. The approximation is accurate\n enough when N > ~20 and p-value < ~0.20 or so. When p-value > 0.20, the\n value may not be accurate, but it certainly implies that the two samples\n are not significantly different. (cf. Press 2007)\n References\n ----------\n Peacock, J.A. 1983, Two-Dimensional Goodness-of-Fit Testing in Astronomy,\n Monthly Notices of the Royal Astronomical Society, vol. 202, pp. 615-627\n Fasano, G. and Franceschini, A. 1987, A Multidimensional Version of the\n Kolmogorov-Smirnov Test, Monthly Notices of the Royal Astronomical Society,\n vol. 225, pp. 155-170\n Press, W.H. et al. 2007, Numerical Recipes, section 14.8\n '''\n assert (len(x1) == len(y1)) and (len(x2) == len(y2))\n n1, n2 = len(x1), len(x2)\n D = avgmaxdist(x1, y1, x2, y2)\n\n if nboot is None:\n sqen = np.sqrt(n1 * n2 / (n1 + n2))\n r1 = pearsonr(x1, y1)[0]\n r2 = pearsonr(x2, y2)[0]\n r = np.sqrt(1 - 0.5 * (r1**2 + r2**2))\n d = D * sqen / (1 + r * (0.25 - 0.75 / sqen))\n p = kstwobign.sf(d)\n else:\n n = n1 + n2\n x = np.concatenate([x1, x2])\n y = np.concatenate([y1, y2])\n d = np.empty(nboot, 'f')\n for i in range(nboot):\n idx = random.choice(n, n, replace=True)\n ix1, ix2 = idx[:n1], idx[n1:]\n #ix1 = random.choice(n, n1, replace=True)\n #ix2 = random.choice(n, n2, replace=True)\n d[i] = avgmaxdist(x[ix1], y[ix1], x[ix2], y[ix2])\n p = np.sum(d > D).astype('f') / nboot\n if extra:\n return p, D\n else:\n return p\n\n\ndef avgmaxdist(x1, y1, x2, y2):\n D1 = maxdist(x1, y1, x2, y2)\n D2 = maxdist(x2, y2, x1, y1)\n return (D1 + D2) / 2\n\n\ndef maxdist(x1, y1, x2, y2):\n n1 = len(x1)\n D1 = np.empty((n1, 4))\n for i in range(n1):\n a1, b1, c1, d1 = quadct(x1[i], y1[i], x1, y1)\n a2, b2, c2, d2 = quadct(x1[i], y1[i], x2, y2)\n D1[i] = [a1 - a2, b1 - b2, c1 - c2, d1 - d2]\n\n # re-assign the point to maximize difference,\n # the discrepancy is significant for N < ~50\n D1[:, 0] -= 1 / n1\n\n dmin, dmax = -D1.min(), D1.max() + 1 / n1\n return max(dmin, dmax)\n\n\ndef quadct(x, y, xx, yy):\n n = len(xx)\n ix1, ix2 = xx <= x, yy <= y\n a = np.sum(ix1 & ix2) / n\n b = np.sum(ix1 & ~ix2) / n\n c = np.sum(~ix1 & ix2) / n\n d = 1 - a - b - c\n return a, b, c, d\n\n\ndef estat2d(x1, y1, x2, y2, **kwds):\n return estat(np.c_[x1, y1], np.c_[x2, y2], **kwds)\n\n\ndef estat(x, y, nboot=1000, replace=False, method='log', fitting=False):\n '''\n Energy distance statistics test.\n Reference\n ---------\n Aslan, B, Zech, G (2005) Statistical energy as a tool for binning-free\n multivariate goodness-of-fit tests, two-sample comparison and unfolding.\n Nuc Instr and Meth in Phys Res A 537: 626-636\n Szekely, G, Rizzo, M (2014) Energy statistics: A class of statistics\n based on distances. J Stat Planning & Infer 143: 1249-1272\n Brian Lau, multdist, https://github.com/brian-lau/multdist\n '''\n n, N = len(x), len(x) + len(y)\n stack = np.vstack([x, y])\n stack = (stack - stack.mean(0)) / stack.std(0)\n if replace:\n rand = lambda x: random.randint(x, size=x)\n else:\n rand = random.permutation\n\n en = energy(stack[:n], stack[n:], method)\n en_boot = np.zeros(nboot, 'f')\n for i in range(nboot):\n idx = rand(N)\n en_boot[i] = energy(stack[idx[:n]], stack[idx[n:]], method)\n\n if fitting:\n param = genextreme.fit(en_boot)\n p = genextreme.sf(en, *param)\n return p, en, param\n else:\n p = (en_boot >= en).sum() / nboot\n return p, en, en_boot\n\n\ndef energy(x, y, method='log'):\n dx, dy, dxy = pdist(x), pdist(y), cdist(x, y)\n n, m = len(x), len(y)\n if method == 'log':\n dx, dy, dxy = np.log(dx), np.log(dy), np.log(dxy)\n elif method == 'gaussian':\n raise NotImplementedError\n elif method == 'linear':\n pass\n else:\n raise ValueError\n z = dxy.sum() / (n * m) - dx.sum() / n**2 - dy.sum() / m**2\n # z = ((n*m)/(n+m)) * z # ref. SR\n return z"
] |
[
[
"numpy.array",
"numpy.mean",
"tensorflow.keras.preprocessing.image.ImageDataGenerator"
],
[
"numpy.log",
"numpy.sqrt",
"numpy.random.choice",
"scipy.stats.pearsonr",
"scipy.stats.kstwobign.sf",
"numpy.empty",
"scipy.stats.genextreme.sf",
"numpy.concatenate",
"scipy.spatial.distance.cdist",
"scipy.spatial.distance.pdist",
"scipy.stats.genextreme.fit",
"numpy.zeros",
"numpy.sum",
"numpy.vstack",
"numpy.random.randint"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
stevemats/mne-python
|
[
"732bb1f994e64e41a8e95dcc10dc98c22cac95c0",
"732bb1f994e64e41a8e95dcc10dc98c22cac95c0",
"47051833f21bb372d60afc3adbf4305648ac7f69",
"732bb1f994e64e41a8e95dcc10dc98c22cac95c0",
"732bb1f994e64e41a8e95dcc10dc98c22cac95c0",
"732bb1f994e64e41a8e95dcc10dc98c22cac95c0",
"732bb1f994e64e41a8e95dcc10dc98c22cac95c0",
"47051833f21bb372d60afc3adbf4305648ac7f69",
"47051833f21bb372d60afc3adbf4305648ac7f69"
] |
[
"mne/preprocessing/tests/test_realign.py",
"tutorials/simulation/80_dics.py",
"examples/inverse/psf_ctf_label_leakage.py",
"tutorials/forward/50_background_freesurfer_mne.py",
"tutorials/preprocessing/70_fnirs_processing.py",
"mne/decoding/ems.py",
"mne/io/nedf/tests/test_nedf.py",
"mne/decoding/search_light.py",
"tutorials/clinical/60_sleep.py"
] |
[
"# Author: Mark Wronkiewicz <[email protected]>\n#\n# License: BSD-3-Clause\n\nimport numpy as np\nfrom numpy.testing import assert_allclose\nfrom scipy.interpolate import interp1d\nimport pytest\n\nfrom mne import create_info, find_events, Epochs\nfrom mne.io import RawArray\nfrom mne.preprocessing import realign_raw\n\n\[email protected]('ratio_other', (1., 0.999, 1.001)) # drifts\[email protected]('start_raw, start_other', [(0, 0), (0, 3), (3, 0)])\[email protected]('stop_raw, stop_other', [(0, 0), (0, 3), (3, 0)])\ndef test_realign(ratio_other, start_raw, start_other, stop_raw, stop_other):\n \"\"\"Test realigning raw.\"\"\"\n # construct a true signal\n sfreq = 100.\n duration = 50\n stop_raw = duration - stop_raw\n stop_other = duration - stop_other\n signal = np.zeros(int(round((duration + 1) * sfreq)))\n orig_events = np.round(\n np.arange(max(start_raw, start_other) + 2,\n min(stop_raw, stop_other) - 2) * sfreq).astype(int)\n signal[orig_events] = 1.\n n_events = len(orig_events)\n times = np.arange(len(signal)) / sfreq\n stim = np.convolve(signal, np.ones(int(round(0.02 * sfreq))))[:len(times)]\n signal = np.convolve(\n signal, np.hanning(int(round(0.2 * sfreq))))[:len(times)]\n\n # construct our sampled versions of these signals (linear interp is fine)\n sfreq_raw = sfreq\n sfreq_other = ratio_other * sfreq\n raw_times = np.arange(start_raw, stop_raw, 1. / sfreq_raw)\n other_times = np.arange(start_other, stop_other, 1. / sfreq_other)\n assert raw_times[0] >= times[0]\n assert raw_times[-1] <= times[-1]\n assert other_times[0] >= times[0]\n assert other_times[-1] <= times[-1]\n data_raw = np.array(\n [interp1d(times, d, kind)(raw_times)\n for d, kind in ((signal, 'linear'), (stim, 'nearest'))])\n data_other = np.array(\n [interp1d(times, d, kind)(other_times)\n for d, kind in ((signal, 'linear'), (stim, 'nearest'))])\n info_raw = create_info(\n ['raw_data', 'raw_stim'], sfreq, ['eeg', 'stim'])\n info_other = create_info(\n ['other_data', 'other_stim'], sfreq, ['eeg', 'stim'])\n raw = RawArray(data_raw, info_raw, first_samp=111)\n other = RawArray(data_other, info_other, first_samp=222)\n\n # naive processing\n evoked_raw, events_raw, _, events_other = _assert_similarity(\n raw, other, n_events)\n if start_raw == start_other: # can just naively crop\n a, b = data_raw[0], data_other[0]\n n = min(len(a), len(b))\n corr = np.corrcoef(a[:n], b[:n])[0, 1]\n min_, max_ = (0.99999, 1.) if sfreq_raw == sfreq_other else (0.8, 0.9)\n assert min_ <= corr <= max_\n\n # realign\n t_raw = (events_raw[:, 0] - raw.first_samp) / other.info['sfreq']\n t_other = (events_other[:, 0] - other.first_samp) / other.info['sfreq']\n assert duration - 10 <= len(events_raw) < duration\n raw_orig, other_orig = raw.copy(), other.copy()\n realign_raw(raw, other, t_raw, t_other)\n\n # old events should still work for raw and produce the same result\n evoked_raw_2, _, _, _ = _assert_similarity(\n raw, other, n_events, events_raw=events_raw)\n assert_allclose(evoked_raw.data, evoked_raw_2.data)\n assert_allclose(raw.times, other.times)\n # raw data now aligned\n corr = np.corrcoef(raw.get_data([0])[0], other.get_data([0])[0])[0, 1]\n assert 0.99 < corr <= 1.\n\n # Degenerate conditions -- only test in one run\n test_degenerate = (start_raw == start_other and\n stop_raw == stop_other and\n ratio_other == 1)\n if not test_degenerate:\n return\n # these alignments will not be correct but it shouldn't matter\n with pytest.warns(RuntimeWarning, match='^Fewer.*may be unreliable.*'):\n realign_raw(raw, other, raw_times[:5], other_times[:5])\n with pytest.raises(ValueError, match='same shape'):\n realign_raw(raw_orig, other_orig, raw_times[:5], other_times)\n rand_times = np.random.RandomState(0).randn(len(other_times))\n with pytest.raises(ValueError, match='cannot resample safely'):\n realign_raw(raw_orig, other_orig, rand_times, other_times)\n with pytest.warns(RuntimeWarning, match='.*computed as R=.*unreliable'):\n realign_raw(\n raw_orig, other_orig, raw_times + rand_times * 1000, other_times)\n\n\ndef _assert_similarity(raw, other, n_events, events_raw=None):\n if events_raw is None:\n events_raw = find_events(raw)\n events_other = find_events(other)\n assert len(events_raw) == n_events\n assert len(events_other) == n_events\n kwargs = dict(baseline=None, tmin=0, tmax=0.2)\n evoked_raw = Epochs(raw, events_raw, **kwargs).average()\n evoked_other = Epochs(other, events_other, **kwargs).average()\n assert evoked_raw.nave == evoked_other.nave == len(events_raw)\n assert len(evoked_raw.data) == len(evoked_other.data) == 1 # just EEG\n corr = np.corrcoef(evoked_raw.data[0], evoked_other.data[0])[0, 1]\n assert 0.9 <= corr <= 1.\n return evoked_raw, events_raw, evoked_other, events_other\n",
"# -*- coding: utf-8 -*-\n\"\"\"\nDICS for power mapping\n======================\n\nIn this tutorial, we'll simulate two signals originating from two\nlocations on the cortex. These signals will be sinusoids, so we'll be looking\nat oscillatory activity (as opposed to evoked activity).\n\nWe'll use dynamic imaging of coherent sources (DICS) :footcite:`GrossEtAl2001`\nto map out spectral power along the cortex. Let's see if we can find our two\nsimulated sources.\n\"\"\"\n# Author: Marijn van Vliet <[email protected]>\n#\n# License: BSD-3-Clause\n\n# %%\n# Setup\n# -----\n# We first import the required packages to run this tutorial and define a list\n# of filenames for various things we'll be using.\nimport os.path as op\nimport numpy as np\nfrom scipy.signal import welch, coherence, unit_impulse\nfrom matplotlib import pyplot as plt\n\nimport mne\nfrom mne.simulation import simulate_raw, add_noise\nfrom mne.datasets import sample\nfrom mne.minimum_norm import make_inverse_operator, apply_inverse\nfrom mne.time_frequency import csd_morlet\nfrom mne.beamformer import make_dics, apply_dics_csd\n\n# We use the MEG and MRI setup from the MNE-sample dataset\ndata_path = sample.data_path(download=False)\nsubjects_dir = op.join(data_path, 'subjects')\n\n# Filenames for various files we'll be using\nmeg_path = op.join(data_path, 'MEG', 'sample')\nraw_fname = op.join(meg_path, 'sample_audvis_raw.fif')\nfwd_fname = op.join(meg_path, 'sample_audvis-meg-eeg-oct-6-fwd.fif')\ncov_fname = op.join(meg_path, 'sample_audvis-cov.fif')\nfwd = mne.read_forward_solution(fwd_fname)\n\n# Seed for the random number generator\nrand = np.random.RandomState(42)\n\n# %%\n# Data simulation\n# ---------------\n#\n# The following function generates a timeseries that contains an oscillator,\n# whose frequency fluctuates a little over time, but stays close to 10 Hz.\n# We'll use this function to generate our two signals.\n\nsfreq = 50. # Sampling frequency of the generated signal\nn_samp = int(round(10. * sfreq))\ntimes = np.arange(n_samp) / sfreq # 10 seconds of signal\nn_times = len(times)\n\n\ndef coh_signal_gen():\n \"\"\"Generate an oscillating signal.\n\n Returns\n -------\n signal : ndarray\n The generated signal.\n \"\"\"\n t_rand = 0.001 # Variation in the instantaneous frequency of the signal\n std = 0.1 # Std-dev of the random fluctuations added to the signal\n base_freq = 10. # Base frequency of the oscillators in Hertz\n n_times = len(times)\n\n # Generate an oscillator with varying frequency and phase lag.\n signal = np.sin(2.0 * np.pi *\n (base_freq * np.arange(n_times) / sfreq +\n np.cumsum(t_rand * rand.randn(n_times))))\n\n # Add some random fluctuations to the signal.\n signal += std * rand.randn(n_times)\n\n # Scale the signal to be in the right order of magnitude (~100 nAm)\n # for MEG data.\n signal *= 100e-9\n\n return signal\n\n\n# %%\n# Let's simulate two timeseries and plot some basic information about them.\nsignal1 = coh_signal_gen()\nsignal2 = coh_signal_gen()\n\nfig, axes = plt.subplots(2, 2, figsize=(8, 4))\n\n# Plot the timeseries\nax = axes[0][0]\nax.plot(times, 1e9 * signal1, lw=0.5)\nax.set(xlabel='Time (s)', xlim=times[[0, -1]], ylabel='Amplitude (Am)',\n title='Signal 1')\nax = axes[0][1]\nax.plot(times, 1e9 * signal2, lw=0.5)\nax.set(xlabel='Time (s)', xlim=times[[0, -1]], title='Signal 2')\n\n# Power spectrum of the first timeseries\nf, p = welch(signal1, fs=sfreq, nperseg=128, nfft=256)\nax = axes[1][0]\n# Only plot the first 100 frequencies\nax.plot(f[:100], 20 * np.log10(p[:100]), lw=1.)\nax.set(xlabel='Frequency (Hz)', xlim=f[[0, 99]],\n ylabel='Power (dB)', title='Power spectrum of signal 1')\n\n# Compute the coherence between the two timeseries\nf, coh = coherence(signal1, signal2, fs=sfreq, nperseg=100, noverlap=64)\nax = axes[1][1]\nax.plot(f[:50], coh[:50], lw=1.)\nax.set(xlabel='Frequency (Hz)', xlim=f[[0, 49]], ylabel='Coherence',\n title='Coherence between the timeseries')\nfig.tight_layout()\n\n# %%\n# Now we put the signals at two locations on the cortex. We construct a\n# :class:`mne.SourceEstimate` object to store them in.\n#\n# The timeseries will have a part where the signal is active and a part where\n# it is not. The techniques we'll be using in this tutorial depend on being\n# able to contrast data that contains the signal of interest versus data that\n# does not (i.e. it contains only noise).\n\n# The locations on the cortex where the signal will originate from. These\n# locations are indicated as vertex numbers.\nvertices = [[146374], [33830]]\n\n# Construct SourceEstimates that describe the signals at the cortical level.\ndata = np.vstack((signal1, signal2))\nstc_signal = mne.SourceEstimate(\n data, vertices, tmin=0, tstep=1. / sfreq, subject='sample')\nstc_noise = stc_signal * 0.\n\n# %%\n# Before we simulate the sensor-level data, let's define a signal-to-noise\n# ratio. You are encouraged to play with this parameter and see the effect of\n# noise on our results.\nsnr = 1. # Signal-to-noise ratio. Decrease to add more noise.\n\n# %%\n# Now we run the signal through the forward model to obtain simulated sensor\n# data. To save computation time, we'll only simulate gradiometer data. You can\n# try simulating other types of sensors as well.\n#\n# Some noise is added based on the baseline noise covariance matrix from the\n# sample dataset, scaled to implement the desired SNR.\n\n# Read the info from the sample dataset. This defines the location of the\n# sensors and such.\ninfo = mne.io.read_raw(raw_fname).crop(0, 1).resample(50).info\n\n# Only use gradiometers\npicks = mne.pick_types(info, meg='grad', stim=True, exclude=())\nmne.pick_info(info, picks, copy=False)\n\n# Define a covariance matrix for the simulated noise. In this tutorial, we use\n# a simple diagonal matrix.\ncov = mne.cov.make_ad_hoc_cov(info)\ncov['data'] *= (20. / snr) ** 2 # Scale the noise to achieve the desired SNR\n\n# Simulate the raw data, with a lowpass filter on the noise\nstcs = [(stc_signal, unit_impulse(n_samp, dtype=int) * 1),\n (stc_noise, unit_impulse(n_samp, dtype=int) * 2)] # stacked in time\nduration = (len(stc_signal.times) * 2) / sfreq\nraw = simulate_raw(info, stcs, forward=fwd)\nadd_noise(raw, cov, iir_filter=[4, -4, 0.8], random_state=rand)\n\n\n# %%\n# We create an :class:`mne.Epochs` object containing two trials: one with\n# both noise and signal and one with just noise\n\nevents = mne.find_events(raw, initial_event=True)\ntmax = (len(stc_signal.times) - 1) / sfreq\nepochs = mne.Epochs(raw, events, event_id=dict(signal=1, noise=2),\n tmin=0, tmax=tmax, baseline=None, preload=True)\nassert len(epochs) == 2 # ensure that we got the two expected events\n\n# Plot some of the channels of the simulated data that are situated above one\n# of our simulated sources.\npicks = mne.pick_channels(epochs.ch_names,\n mne.read_vectorview_selection('Left-frontal'))\nepochs.plot(picks=picks)\n\n# %%\n# Power mapping\n# -------------\n# With our simulated dataset ready, we can now pretend to be researchers that\n# have just recorded this from a real subject and are going to study what parts\n# of the brain communicate with each other.\n#\n# First, we'll create a source estimate of the MEG data. We'll use both a\n# straightforward MNE-dSPM inverse solution for this, and the DICS beamformer\n# which is specifically designed to work with oscillatory data.\n\n# %%\n# Computing the inverse using MNE-dSPM:\n\n# Compute the inverse operator\nfwd = mne.read_forward_solution(fwd_fname)\ninv = make_inverse_operator(epochs.info, fwd, cov)\n\n# Apply the inverse model to the trial that also contains the signal.\ns = apply_inverse(epochs['signal'].average(), inv)\n\n# Take the root-mean square along the time dimension and plot the result.\ns_rms = np.sqrt((s ** 2).mean())\ntitle = 'MNE-dSPM inverse (RMS)'\nbrain = s_rms.plot('sample', subjects_dir=subjects_dir, hemi='both', figure=1,\n size=600, time_label=title, title=title)\n\n# Indicate the true locations of the source activity on the plot.\nbrain.add_foci(vertices[0][0], coords_as_verts=True, hemi='lh')\nbrain.add_foci(vertices[1][0], coords_as_verts=True, hemi='rh')\n\n# Rotate the view and add a title.\nbrain.show_view(azimuth=0, elevation=0, distance=550, focalpoint=(0, 0, 0))\n\n# %%\n# We will now compute the cortical power map at 10 Hz. using a DICS beamformer.\n# A beamformer will construct for each vertex a spatial filter that aims to\n# pass activity originating from the vertex, while dampening activity from\n# other sources as much as possible.\n#\n# The :func:`mne.beamformer.make_dics` function has many switches that offer\n# precise control\n# over the way the filter weights are computed. Currently, there is no clear\n# consensus regarding the best approach. This is why we will demonstrate two\n# approaches here:\n#\n# 1. The approach as described in :footcite:`vanVlietEtAl2018`, which first\n# normalizes the forward solution and computes a vector beamformer.\n# 2. The scalar beamforming approach based on\n# :footcite:`SekiharaNagarajan2008`, which uses weight normalization\n# instead of normalizing the forward solution.\n\n# Estimate the cross-spectral density (CSD) matrix on the trial containing the\n# signal.\ncsd_signal = csd_morlet(epochs['signal'], frequencies=[10])\n\n# Compute the spatial filters for each vertex, using two approaches.\nfilters_approach1 = make_dics(\n info, fwd, csd_signal, reg=0.05, pick_ori='max-power', depth=1.,\n inversion='single', weight_norm=None, real_filter=True)\nprint(filters_approach1)\n\nfilters_approach2 = make_dics(\n info, fwd, csd_signal, reg=0.05, pick_ori='max-power', depth=None,\n inversion='matrix', weight_norm='unit-noise-gain', real_filter=True)\nprint(filters_approach2)\n\n# You can save these to disk with:\n# filters_approach1.save('filters_1-dics.h5')\n\n# Compute the DICS power map by applying the spatial filters to the CSD matrix.\npower_approach1, f = apply_dics_csd(csd_signal, filters_approach1)\npower_approach2, f = apply_dics_csd(csd_signal, filters_approach2)\n\n# %%\n# Plot the DICS power maps for both approaches, starting with the first:\n\n\ndef plot_approach(power, n):\n \"\"\"Plot the results on a brain.\"\"\"\n title = 'DICS power map, approach %d' % n\n brain = power_approach1.plot(\n 'sample', subjects_dir=subjects_dir, hemi='both',\n size=600, time_label=title, title=title)\n # Indicate the true locations of the source activity on the plot.\n brain.add_foci(vertices[0][0], coords_as_verts=True, hemi='lh', color='b')\n brain.add_foci(vertices[1][0], coords_as_verts=True, hemi='rh', color='b')\n # Rotate the view and add a title.\n brain.show_view(azimuth=0, elevation=0, distance=550, focalpoint=(0, 0, 0))\n return brain\n\n\nbrain1 = plot_approach(power_approach1, 1)\n\n# %%\n# Now the second:\n\nbrain2 = plot_approach(power_approach2, 2)\n\n# %%\n# Excellent! All methods found our two simulated sources. Of course, with a\n# signal-to-noise ratio (SNR) of 1, is isn't very hard to find them. You can\n# try playing with the SNR and see how the MNE-dSPM and DICS approaches hold up\n# in the presence of increasing noise. In the presence of more noise, you may\n# need to increase the regularization parameter of the DICS beamformer.\n#\n# References\n# ----------\n# .. footbibliography::\n",
"\"\"\"\n============================================================\nVisualize source leakage among labels using a circular graph\n============================================================\n\nThis example computes all-to-all pairwise leakage among 68 regions in\nsource space based on MNE inverse solutions and a FreeSurfer cortical\nparcellation. Label-to-label leakage is estimated as the correlation among the\nlabels' point-spread functions (PSFs). It is visualized using a circular graph\nwhich is ordered based on the locations of the regions in the axial plane.\n\"\"\"\n# Authors: Olaf Hauk <[email protected]>\n# Martin Luessi <[email protected]>\n# Alexandre Gramfort <[email protected]>\n# Nicolas P. Rougier (graph code borrowed from his matplotlib gallery)\n#\n# License: BSD-3-Clause\n\n# %%\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport mne\nfrom mne.datasets import sample\nfrom mne.minimum_norm import (read_inverse_operator,\n make_inverse_resolution_matrix,\n get_point_spread)\n\nfrom mne.viz import circular_layout\nfrom mne_connectivity.viz import plot_connectivity_circle\n\n\nprint(__doc__)\n\n# %%\n# Load forward solution and inverse operator\n# ------------------------------------------\n#\n# We need a matching forward solution and inverse operator to compute\n# resolution matrices for different methods.\n\ndata_path = sample.data_path()\nsubjects_dir = data_path + '/subjects'\nfname_fwd = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'\nfname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-fixed-inv.fif'\nforward = mne.read_forward_solution(fname_fwd)\n# Convert forward solution to fixed source orientations\nmne.convert_forward_solution(\n forward, surf_ori=True, force_fixed=True, copy=False)\ninverse_operator = read_inverse_operator(fname_inv)\n\n# Compute resolution matrices for MNE\nrm_mne = make_inverse_resolution_matrix(forward, inverse_operator,\n method='MNE', lambda2=1. / 3.**2)\nsrc = inverse_operator['src']\ndel forward, inverse_operator # save memory\n\n# %%\n# Read and organise labels for cortical parcellation\n# --------------------------------------------------\n#\n# Get labels for FreeSurfer 'aparc' cortical parcellation with 34 labels/hemi\nlabels = mne.read_labels_from_annot('sample', parc='aparc',\n subjects_dir=subjects_dir)\nn_labels = len(labels)\nlabel_colors = [label.color for label in labels]\n# First, we reorder the labels based on their location in the left hemi\nlabel_names = [label.name for label in labels]\nlh_labels = [name for name in label_names if name.endswith('lh')]\n\n# Get the y-location of the label\nlabel_ypos = list()\nfor name in lh_labels:\n idx = label_names.index(name)\n ypos = np.mean(labels[idx].pos[:, 1])\n label_ypos.append(ypos)\n\n# Reorder the labels based on their location\nlh_labels = [label for (yp, label) in sorted(zip(label_ypos, lh_labels))]\n\n# For the right hemi\nrh_labels = [label[:-2] + 'rh' for label in lh_labels]\n\n# %%\n# Compute point-spread function summaries (PCA) for all labels\n# ------------------------------------------------------------\n#\n# We summarise the PSFs per label by their first five principal components, and\n# use the first component to evaluate label-to-label leakage below.\n\n# Compute first PCA component across PSFs within labels.\n# Note the differences in explained variance, probably due to different\n# spatial extents of labels.\nn_comp = 5\nstcs_psf_mne, pca_vars_mne = get_point_spread(\n rm_mne, src, labels, mode='pca', n_comp=n_comp, norm=None,\n return_pca_vars=True)\nn_verts = rm_mne.shape[0]\ndel rm_mne\n\n# %%\n# We can show the explained variances of principal components per label. Note\n# how they differ across labels, most likely due to their varying spatial\n# extent.\n\nwith np.printoptions(precision=1):\n for [name, var] in zip(label_names, pca_vars_mne):\n print(f'{name}: {var.sum():.1f}% {var}')\n\n# %%\n# The output shows the summed variance explained by the first five principal\n# components as well as the explained variances of the individual components.\n#\n# Evaluate leakage based on label-to-label PSF correlations\n# ---------------------------------------------------------\n#\n# Note that correlations ignore the overall amplitude of PSFs, i.e. they do\n# not show which region will potentially be the bigger \"leaker\".\n\n# get PSFs from Source Estimate objects into matrix\npsfs_mat = np.zeros([n_labels, n_verts])\n# Leakage matrix for MNE, get first principal component per label\nfor [i, s] in enumerate(stcs_psf_mne):\n psfs_mat[i, :] = s.data[:, 0]\n# Compute label-to-label leakage as Pearson correlation of PSFs\n# Sign of correlation is arbitrary, so take absolute values\nleakage_mne = np.abs(np.corrcoef(psfs_mat))\n\n# Save the plot order and create a circular layout\nnode_order = lh_labels[::-1] + rh_labels # mirror label order across hemis\nnode_angles = circular_layout(label_names, node_order, start_pos=90,\n group_boundaries=[0, len(label_names) / 2])\n# Plot the graph using node colors from the FreeSurfer parcellation. We only\n# show the 200 strongest connections.\nfig = plt.figure(num=None, figsize=(8, 8), facecolor='black')\nplot_connectivity_circle(leakage_mne, label_names, n_lines=200,\n node_angles=node_angles, node_colors=label_colors,\n title='MNE Leakage', fig=fig)\n\n# %%\n# Most leakage occurs for neighbouring regions, but also for deeper regions\n# across hemispheres.\n#\n# Save the figure (optional)\n# --------------------------\n#\n# Matplotlib controls figure facecolor separately for interactive display\n# versus for saved figures. Thus when saving you must specify ``facecolor``,\n# else your labels, title, etc will not be visible::\n#\n# >>> fname_fig = data_path + '/MEG/sample/plot_label_leakage.png'\n# >>> fig.savefig(fname_fig, facecolor='black')\n#\n# Plot PSFs for individual labels\n# -------------------------------\n#\n# Let us confirm for left and right lateral occipital lobes that there is\n# indeed no leakage between them, as indicated by the correlation graph.\n# We can plot the summary PSFs for both labels to examine the spatial extent of\n# their leakage.\n\n# left and right lateral occipital\nidx = [22, 23]\nstc_lh = stcs_psf_mne[idx[0]]\nstc_rh = stcs_psf_mne[idx[1]]\n\n# Maximum for scaling across plots\nmax_val = np.max([stc_lh.data, stc_rh.data])\n\n# %%\n# Point-spread function for the lateral occipital label in the left hemisphere\n\nbrain_lh = stc_lh.plot(subjects_dir=subjects_dir, subject='sample',\n hemi='both', views='caudal',\n clim=dict(kind='value',\n pos_lims=(0, max_val / 2., max_val)))\nbrain_lh.add_text(0.1, 0.9, label_names[idx[0]], 'title', font_size=16)\n\n# %%\n# and in the right hemisphere.\n\nbrain_rh = stc_rh.plot(subjects_dir=subjects_dir, subject='sample',\n hemi='both', views='caudal',\n clim=dict(kind='value',\n pos_lims=(0, max_val / 2., max_val)))\nbrain_rh.add_text(0.1, 0.9, label_names[idx[1]], 'title', font_size=16)\n\n# %%\n# Both summary PSFs are confined to their respective hemispheres, indicating\n# that there is indeed low leakage between these two regions.\n",
"\"\"\"\n.. _tut-freesurfer-mne:\n\n=================================\nHow MNE uses FreeSurfer's outputs\n=================================\n\nThis tutorial explains how MRI coordinate frames are handled in MNE-Python,\nand how MNE-Python integrates with FreeSurfer for handling MRI data and\nsource space data in general.\n\nAs usual we'll start by importing the necessary packages; for this tutorial\nthat includes :mod:`nibabel` to handle loading the MRI images (MNE-Python also\nuses :mod:`nibabel` under the hood). We'll also use a special :mod:`Matplotlib\n<matplotlib.patheffects>` function for adding outlines to text, so that text is\nreadable on top of an MRI image.\n\"\"\"\n\n# %%\n\nimport os\n\nimport numpy as np\nimport nibabel\nimport matplotlib.pyplot as plt\nimport matplotlib.patheffects as path_effects\n\nimport mne\nfrom mne.transforms import apply_trans\nfrom mne.io.constants import FIFF\n\n# %%\n# MRI coordinate frames\n# =====================\n#\n# Let's start out by looking at the ``sample`` subject MRI. Following standard\n# FreeSurfer convention, we look at :file:`T1.mgz`, which gets created from the\n# original MRI :file:`sample/mri/orig/001.mgz` when you run the FreeSurfer\n# command `recon-all <https://surfer.nmr.mgh.harvard.edu/fswiki/recon-all>`_.\n# Here we use :mod:`nibabel` to load the T1 image, and the resulting object's\n# :meth:`~nibabel.spatialimages.SpatialImage.orthoview` method to view it.\n\ndata_path = mne.datasets.sample.data_path()\nsubjects_dir = os.path.join(data_path, 'subjects')\nsubject = 'sample'\nt1_fname = os.path.join(subjects_dir, subject, 'mri', 'T1.mgz')\nt1 = nibabel.load(t1_fname)\nt1.orthoview()\n\n# %%\n# Notice that the axes in the\n# :meth:`~nibabel.spatialimages.SpatialImage.orthoview` figure are labeled\n# L-R, S-I, and P-A. These reflect the standard RAS (right-anterior-superior)\n# coordinate system that is widely used in MRI imaging. If you are unfamiliar\n# with RAS coordinates, see the excellent nibabel tutorial\n# :doc:`nibabel:coordinate_systems`.\n#\n# Nibabel already takes care of some coordinate frame transformations under the\n# hood, so let's do it manually so we understand what is happening. First let's\n# get our data as a 3D array and note that it's already a standard size:\n\ndata = np.asarray(t1.dataobj)\nprint(data.shape)\n\n# %%\n# These data are voxel intensity values. Here they are unsigned integers in the\n# range 0-255, though in general they can be floating point values. A value\n# ``data[i, j, k]`` at a given index triplet ``(i, j, k)`` corresponds to some\n# real-world physical location ``(x, y, z)`` in space. To get its physical\n# location, first we have to choose what coordinate frame we're going to use.\n#\n# For example, we could choose a geographical coordinate\n# frame, with origin is at the center of the earth, Z axis through the north\n# pole, X axis through the prime meridian (zero degrees longitude), and Y axis\n# orthogonal to these forming a right-handed coordinate system. This would not\n# be a very useful choice for defining the physical locations of the voxels\n# during the MRI acquisition for analysis, but you could nonetheless figure out\n# the transformation that related the ``(i, j, k)`` to this coordinate frame.\n#\n# Instead, each scanner defines a more practical, native coordinate system that\n# it uses during acquisition, usually related to the physical orientation of\n# the scanner itself and/or the subject within it. During acquisition the\n# relationship between the voxel indices ``(i, j, k)`` and the physical\n# location ``(x, y, z)`` in the *scanner's native coordinate frame* is saved in\n# the image's *affine transformation*.\n#\n# .. sidebar:: Under the hood\n#\n# ``mne.transforms.apply_trans`` effectively does a matrix multiplication\n# (i.e., :func:`numpy.dot`), with a little extra work to handle the shape\n# mismatch (the affine has shape ``(4, 4)`` because it includes a\n# *translation*, which is applied separately).\n#\n# We can use :mod:`nibabel` to examine this transformation, keeping in mind\n# that it processes everything in units of millimeters, unlike MNE where things\n# are always in SI units (meters).\n#\n# This allows us to take an arbitrary voxel or slice of data and know where it\n# is in the scanner's native physical space ``(x, y, z)`` (in mm) by applying\n# the affine transformation to the voxel coordinates.\n\nprint(t1.affine)\nvox = np.array([122, 119, 102])\nxyz_ras = apply_trans(t1.affine, vox)\nprint('Our voxel has real-world coordinates {}, {}, {} (mm)'\n .format(*np.round(xyz_ras, 3)))\n\n# %%\n# If you have a point ``(x, y, z)`` in scanner-native RAS space and you want\n# the corresponding voxel number, you can get it using the inverse of the\n# affine. This involves some rounding, so it's possible to end up off by one\n# voxel if you're not careful:\n\nras_coords_mm = np.array([1, -17, -18])\ninv_affine = np.linalg.inv(t1.affine)\ni_, j_, k_ = np.round(apply_trans(inv_affine, ras_coords_mm)).astype(int)\nprint('Our real-world coordinates correspond to voxel ({}, {}, {})'\n .format(i_, j_, k_))\n\n# %%\n# Let's write a short function to visualize where our voxel lies in an\n# image, and annotate it in RAS space (rounded to the nearest millimeter):\n\n\ndef imshow_mri(data, img, vox, xyz, suptitle):\n \"\"\"Show an MRI slice with a voxel annotated.\"\"\"\n i, j, k = vox\n fig, ax = plt.subplots(1, figsize=(6, 6))\n codes = nibabel.orientations.aff2axcodes(img.affine)\n # Figure out the title based on the code of this axis\n ori_slice = dict(P='Coronal', A='Coronal',\n I='Axial', S='Axial',\n L='Sagittal', R='Saggital')\n ori_names = dict(P='posterior', A='anterior',\n I='inferior', S='superior',\n L='left', R='right')\n title = ori_slice[codes[0]]\n ax.imshow(data[i], vmin=10, vmax=120, cmap='gray', origin='lower')\n ax.axvline(k, color='y')\n ax.axhline(j, color='y')\n for kind, coords in xyz.items():\n annotation = ('{}: {}, {}, {} mm'\n .format(kind, *np.round(coords).astype(int)))\n text = ax.text(k, j, annotation, va='baseline', ha='right',\n color=(1, 1, 0.7))\n text.set_path_effects([\n path_effects.Stroke(linewidth=2, foreground='black'),\n path_effects.Normal()])\n # reorient view so that RAS is always rightward and upward\n x_order = -1 if codes[2] in 'LIP' else 1\n y_order = -1 if codes[1] in 'LIP' else 1\n ax.set(xlim=[0, data.shape[2] - 1][::x_order],\n ylim=[0, data.shape[1] - 1][::y_order],\n xlabel=f'k ({ori_names[codes[2]]}+)',\n ylabel=f'j ({ori_names[codes[1]]}+)',\n title=f'{title} view: i={i} ({ori_names[codes[0]]}+)')\n fig.suptitle(suptitle)\n fig.subplots_adjust(0.1, 0.1, 0.95, 0.85)\n return fig\n\n\nimshow_mri(data, t1, vox, {'Scanner RAS': xyz_ras}, 'MRI slice')\n\n# %%\n# Notice that the axis scales (``i``, ``j``, and ``k``) are still in voxels\n# (ranging from 0-255); it's only the annotation text that we've translated\n# into real-world RAS in millimeters.\n#\n#\n# \"MRI coordinates\" in MNE-Python: FreeSurfer surface RAS\n# -------------------------------------------------------\n#\n# While :mod:`nibabel` uses **scanner RAS** ``(x, y, z)`` coordinates,\n# FreeSurfer uses a slightly different coordinate frame: **MRI surface RAS**.\n# The transform from voxels to the FreeSurfer MRI surface RAS coordinate frame\n# is known in the `FreeSurfer documentation\n# <https://surfer.nmr.mgh.harvard.edu/fswiki/CoordinateSystems>`_ as ``Torig``,\n# and in nibabel as :meth:`vox2ras_tkr\n# <nibabel.freesurfer.mghformat.MGHHeader.get_vox2ras_tkr>`. This\n# transformation sets the center of its coordinate frame in the middle of the\n# conformed volume dimensions (``N / 2.``) with the axes oriented along the\n# axes of the volume itself. For more information, see\n# :ref:`coordinate_systems`.\n#\n# .. note:: In general, you should assume that the MRI coordinate system for\n# a given subject is specific to that subject, i.e., it is not the\n# same coordinate MRI coordinate system that is used for any other\n# FreeSurfer subject. Even though during processing FreeSurfer will\n# align each subject's MRI to ``fsaverage`` to do reconstruction,\n# all data (surfaces, MRIs, etc.) get stored in the coordinate frame\n# specific to that subject. This is why it's important for group\n# analyses to transform data to a common coordinate frame for example\n# by :ref:`surface <ex-morph-surface>` or\n# :ref:`volumetric <ex-morph-volume>` morphing, or even by just\n# applying :ref:`mni-affine-transformation` to points.\n#\n# Since MNE-Python uses FreeSurfer extensively for surface computations (e.g.,\n# white matter, inner/outer skull meshes), internally MNE-Python uses the\n# Freeurfer surface RAS coordinate system (not the :mod:`nibabel` scanner RAS\n# system) for as many computations as possible, such as all source space\n# and BEM mesh vertex definitions.\n#\n# Whenever you see \"MRI coordinates\" or \"MRI coords\" in MNE-Python's\n# documentation, you should assume that we are talking about the\n# \"FreeSurfer MRI surface RAS\" coordinate frame!\n#\n# We can do similar computations as before to convert the given voxel indices\n# into FreeSurfer MRI coordinates (i.e., what we call \"MRI coordinates\" or\n# \"surface RAS\" everywhere else in MNE), just like we did above to convert\n# voxel indices to *scanner* RAS:\n\nTorig = t1.header.get_vox2ras_tkr()\nprint(t1.affine)\nprint(Torig)\nxyz_mri = apply_trans(Torig, vox)\nimshow_mri(data, t1, vox, dict(MRI=xyz_mri), 'MRI slice')\n\n# %%\n# Knowing these relationships and being mindful about transformations, we\n# can get from a point in any given space to any other space. Let's start out\n# by plotting the Nasion on a saggital MRI slice:\n\nfiducials = mne.coreg.get_mni_fiducials(subject, subjects_dir=subjects_dir)\nnasion_mri = [d for d in fiducials if d['ident'] == FIFF.FIFFV_POINT_NASION][0]\nprint(nasion_mri) # note it's in Freesurfer MRI coords\n\n# %%\n# When we print the nasion, it displays as a ``DigPoint`` and shows its\n# coordinates in millimeters, but beware that the underlying data is\n# :ref:`actually stored in meters <units>`,\n# so before transforming and plotting we'll convert to millimeters:\n\nnasion_mri = nasion_mri['r'] * 1000 # meters → millimeters\nnasion_vox = np.round(\n apply_trans(np.linalg.inv(Torig), nasion_mri)).astype(int)\nimshow_mri(data, t1, nasion_vox, dict(MRI=nasion_mri),\n 'Nasion estimated from MRI transform')\n\n# %%\n# We can also take the digitization point from the MEG data, which is in the\n# \"head\" coordinate frame.\n#\n# Let's look at the nasion in the head coordinate frame:\n\ninfo = mne.io.read_info(\n os.path.join(data_path, 'MEG', 'sample', 'sample_audvis_raw.fif'))\nnasion_head = [d for d in info['dig'] if\n d['kind'] == FIFF.FIFFV_POINT_CARDINAL and\n d['ident'] == FIFF.FIFFV_POINT_NASION][0]\nprint(nasion_head) # note it's in \"head\" coordinates\n\n# %%\n# .. sidebar:: Head coordinate frame\n#\n# The head coordinate frame in MNE is the \"Neuromag\" head coordinate\n# frame. The origin is given by the intersection between a line connecting\n# the LPA and RPA and the line orthogonal to it that runs through the\n# nasion. It is also in RAS orientation, meaning that +X runs through\n# the RPA, +Y goes through the nasion, and +Z is orthogonal to these\n# pointing upward. See :ref:`coordinate_systems` for more information.\n#\n# Notice that in \"head\" coordinate frame the nasion has values of 0 for the\n# ``x`` and ``z`` directions (which makes sense given that the nasion is used\n# to define the ``y`` axis in that system).\n# To convert from head coordinate frame to voxels, we first apply the head →\n# MRI (surface RAS) transform\n# from a :file:`trans` file (typically created with the MNE-Python\n# coregistration GUI), then convert meters → millimeters, and finally apply the\n# inverse of ``Torig`` to get to voxels.\n#\n# Under the hood, functions like :func:`mne.setup_source_space`,\n# :func:`mne.setup_volume_source_space`, and :func:`mne.compute_source_morph`\n# make extensive use of these coordinate frames.\n\ntrans = mne.read_trans(\n os.path.join(data_path, 'MEG', 'sample', 'sample_audvis_raw-trans.fif'))\n\n# first we transform from head to MRI, and *then* convert to millimeters\nnasion_dig_mri = apply_trans(trans, nasion_head['r']) * 1000\n\n# ...then we can use Torig to convert MRI to voxels:\nnasion_dig_vox = np.round(\n apply_trans(np.linalg.inv(Torig), nasion_dig_mri)).astype(int)\nimshow_mri(data, t1, nasion_dig_vox, dict(MRI=nasion_dig_mri),\n 'Nasion transformed from digitization')\n\n# %%\n# Using FreeSurfer's surface reconstructions\n# ==========================================\n# An important part of what FreeSurfer does is provide cortical surface\n# reconstructions. For example, let's load and view the ``white`` surface\n# of the brain. This is a 3D mesh defined by a set of vertices (conventionally\n# called ``rr``) with shape ``(n_vertices, 3)`` and a set of triangles\n# (``tris``) with shape ``(n_tris, 3)`` defining which vertices in ``rr`` form\n# each triangular facet of the mesh.\n\nfname = os.path.join(subjects_dir, subject, 'surf', 'rh.white')\nrr_mm, tris = mne.read_surface(fname)\nprint(f'rr_mm.shape == {rr_mm.shape}')\nprint(f'tris.shape == {tris.shape}')\nprint(f'rr_mm.max() = {rr_mm.max()}') # just to show that we are in mm\n\n# %%\n# Let's actually plot it:\n\nrenderer = mne.viz.backends.renderer.create_3d_figure(\n size=(600, 600), bgcolor='w', scene=False)\ngray = (0.5, 0.5, 0.5)\nrenderer.mesh(*rr_mm.T, triangles=tris, color=gray)\nview_kwargs = dict(elevation=90, azimuth=0)\nmne.viz.set_3d_view(\n figure=renderer.figure, distance=350, focalpoint=(0., 0., 40.),\n **view_kwargs)\nrenderer.show()\n\n# %%\n# We can also plot the mesh on top of an MRI slice. The mesh surfaces are\n# defined in millimeters in the MRI (FreeSurfer surface RAS) coordinate frame,\n# so we can convert them to voxels by applying the inverse of the ``Torig``\n# transform:\n\nrr_vox = apply_trans(np.linalg.inv(Torig), rr_mm)\nfig = imshow_mri(data, t1, vox, {'Scanner RAS': xyz_ras}, 'MRI slice')\n# Based on how imshow_mri works, the \"X\" here is the last dim of the MRI vol,\n# the \"Y\" is the middle dim, and the \"Z\" is the first dim, so now that our\n# points are in the correct coordinate frame, we need to ask matplotlib to\n# do a tricontour slice like:\nfig.axes[0].tricontour(rr_vox[:, 2], rr_vox[:, 1], tris, rr_vox[:, 0],\n levels=[vox[0]], colors='r', linewidths=1.0,\n zorder=1)\n\n# %%\n# This is the method used by :func:`mne.viz.plot_bem` to show the BEM surfaces.\n#\n# Cortical alignment (spherical)\n# ------------------------------\n# A critical function provided by FreeSurfer is spherical surface alignment\n# of cortical surfaces, maximizing sulcal-gyral alignment. FreeSurfer first\n# expands the cortical surface to a sphere, then aligns it optimally with\n# fsaverage. Because the vertex ordering is preserved when expanding to a\n# sphere, a given vertex in the source (sample) mesh can be mapped easily\n# to the same location in the destination (fsaverage) mesh, and vice-versa.\n\nrenderer_kwargs = dict(bgcolor='w', smooth_shading=False)\nrenderer = mne.viz.backends.renderer.create_3d_figure(\n size=(800, 400), scene=False, **renderer_kwargs)\ncurvs = [\n (mne.surface.read_curvature(os.path.join(\n subjects_dir, subj, 'surf', 'rh.curv'),\n binary=False) > 0).astype(float)\n for subj in ('sample', 'fsaverage') for _ in range(2)]\nfnames = [os.path.join(subjects_dir, subj, 'surf', surf)\n for subj in ('sample', 'fsaverage')\n for surf in ('rh.white', 'rh.sphere')]\ny_shifts = [-450, -150, 450, 150]\nz_shifts = [-40, 0, -30, 0]\nfor name, y_shift, z_shift, curv in zip(fnames, y_shifts, z_shifts, curvs):\n this_rr, this_tri = mne.read_surface(name)\n this_rr += [0, y_shift, z_shift]\n renderer.mesh(*this_rr.T, triangles=this_tri, color=None, scalars=curv,\n colormap='copper_r', vmin=-0.2, vmax=1.2)\nzero = [0., 0., 0.]\nwidth = 50.\ny = np.sort(y_shifts)\ny = (y[1:] + y[:-1]) / 2. - width / 2.\nrenderer.quiver3d(zero, y, zero,\n zero, [1] * 3, zero, 'k', width, 'arrow')\nview_kwargs['focalpoint'] = (0., 0., 0.)\nmne.viz.set_3d_view(figure=renderer.figure, distance=1000, **view_kwargs)\nrenderer.show()\n\n# %%\n# Let's look a bit more closely at the spherical alignment by overlaying the\n# two spherical meshes as wireframes and zooming way in (the purple points are\n# separated by about 1 mm):\n\ncyan = '#66CCEE'\npurple = '#AA3377'\nrenderer = mne.viz.backends.renderer.create_3d_figure(\n size=(800, 800), scene=False, **renderer_kwargs)\nfnames = [os.path.join(subjects_dir, subj, 'surf', 'rh.sphere')\n for subj in ('sample', 'fsaverage')]\ncolors = [cyan, purple]\nfor name, color in zip(fnames, colors):\n this_rr, this_tri = mne.read_surface(name)\n renderer.mesh(*this_rr.T, triangles=this_tri, color=color,\n representation='wireframe')\nmne.viz.set_3d_view(figure=renderer.figure, distance=20, **view_kwargs)\nrenderer.show()\n\n# %%\n# You can see that the fsaverage (purple) mesh is uniformly spaced, and the\n# mesh for subject \"sample\" (in cyan) has been deformed along the spherical\n# surface by\n# FreeSurfer. This deformation is designed to optimize the sulcal-gyral\n# alignment.\n#\n# Surface decimation\n# ------------------\n# These surfaces have a lot of vertices, and in general we only need to use\n# a subset of these vertices for creating source spaces. A uniform sampling can\n# easily be achieved by subsampling in the spherical space. To do this, we\n# use a recursively subdivided icosahedron or octahedron. For example, let's\n# load a standard oct-6 source space, and at the same zoom level as before\n# visualize how it subsampled the dense mesh:\n\nsrc = mne.read_source_spaces(os.path.join(subjects_dir, 'sample', 'bem',\n 'sample-oct-6-src.fif'))\nprint(src)\n\n# sphinx_gallery_thumbnail_number = 10\nblue = '#4477AA'\nrenderer = mne.viz.backends.renderer.create_3d_figure(\n size=(800, 800), scene=False, **renderer_kwargs)\nrr_sph, _ = mne.read_surface(fnames[0])\nfor tris, color in [(src[1]['tris'], cyan), (src[1]['use_tris'], blue)]:\n renderer.mesh(*rr_sph.T, triangles=tris, color=color,\n representation='wireframe')\nmne.viz.set_3d_view(figure=renderer.figure, distance=20, **view_kwargs)\nrenderer.show()\n\n# %%\n# We can also then look at how these two meshes compare by plotting the\n# original, high-density mesh as well as our decimated mesh white surfaces.\n\nrenderer = mne.viz.backends.renderer.create_3d_figure(\n size=(800, 400), scene=False, **renderer_kwargs)\ny_shifts = [-125, 125]\ntris = [src[1]['tris'], src[1]['use_tris']]\nfor y_shift, tris in zip(y_shifts, tris):\n this_rr = src[1]['rr'] * 1000. + [0, y_shift, -40]\n renderer.mesh(*this_rr.T, triangles=tris, color=None, scalars=curvs[0],\n colormap='copper_r', vmin=-0.2, vmax=1.2)\nrenderer.quiver3d([0], [-width / 2.], [0], [0], [1], [0], 'k', width, 'arrow')\nmne.viz.set_3d_view(figure=renderer.figure, distance=400, **view_kwargs)\nrenderer.show()\n\n\n# %%\n# .. warning::\n# Some source space vertices can be removed during forward computation.\n# See :ref:`tut-forward` for more information.\n#\n# .. _mni-affine-transformation:\n#\n# FreeSurfer's MNI affine transformation\n# --------------------------------------\n# In addition to surface-based approaches, FreeSurfer also provides a simple\n# affine coregistration of each subject's data to the ``fsaverage`` subject.\n# Let's pick a point for ``sample`` and plot it on the brain:\n\nbrain = mne.viz.Brain('sample', 'lh', 'white', subjects_dir=subjects_dir,\n background='w')\nxyz = np.array([[-55, -10, 35]])\nbrain.add_foci(xyz, hemi='lh', color='k')\nbrain.show_view('lat')\n\n# %%\n# We can take this point and transform it to MNI space:\n\nmri_mni_trans = mne.read_talxfm(subject, subjects_dir)\nprint(mri_mni_trans)\nxyz_mni = apply_trans(mri_mni_trans, xyz / 1000.) * 1000.\nprint(np.round(xyz_mni, 1))\n\n# %%\n# And because ``fsaverage`` is special in that it's already in MNI space\n# (its MRI-to-MNI transform is identity), it should land in the equivalent\n# anatomical location:\n\nbrain = mne.viz.Brain('fsaverage', 'lh', 'white', subjects_dir=subjects_dir,\n background='w')\nbrain.add_foci(xyz_mni, hemi='lh', color='k')\nbrain.show_view('lat')\n",
"\"\"\"\n.. _tut-fnirs-processing:\n\nPreprocessing functional near-infrared spectroscopy (fNIRS) data\n================================================================\n\nThis tutorial covers how to convert functional near-infrared spectroscopy\n(fNIRS) data from raw measurements to relative oxyhaemoglobin (HbO) and\ndeoxyhaemoglobin (HbR) concentration, view the average waveform, and\ntopographic representation of the response.\n\nHere we will work with the :ref:`fNIRS motor data <fnirs-motor-dataset>`.\n\"\"\"\n\n# %%\n\nimport os.path as op\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom itertools import compress\n\nimport mne\n\n\nfnirs_data_folder = mne.datasets.fnirs_motor.data_path()\nfnirs_cw_amplitude_dir = op.join(fnirs_data_folder, 'Participant-1')\nraw_intensity = mne.io.read_raw_nirx(fnirs_cw_amplitude_dir, verbose=True)\nraw_intensity.load_data()\n\n\n# %%\n# Providing more meaningful annotation information\n# ------------------------------------------------\n#\n# First, we attribute more meaningful names to the trigger codes which are\n# stored as annotations. Second, we include information about the duration of\n# each stimulus, which was 5 seconds for all conditions in this experiment.\n# Third, we remove the trigger code 15, which signaled the start and end\n# of the experiment and is not relevant to our analysis.\n\nraw_intensity.annotations.set_durations(5)\nraw_intensity.annotations.rename({'1.0': 'Control',\n '2.0': 'Tapping/Left',\n '3.0': 'Tapping/Right'})\nunwanted = np.nonzero(raw_intensity.annotations.description == '15.0')\nraw_intensity.annotations.delete(unwanted)\n\n\n# %%\n# Viewing location of sensors over brain surface\n# ----------------------------------------------\n#\n# Here we validate that the location of sources-detector pairs and channels\n# are in the expected locations. Source-detector pairs are shown as lines\n# between the optodes, channels (the mid point of source-detector pairs) are\n# optionally shown as orange dots. Source are optionally shown as red dots and\n# detectors as black.\n\nsubjects_dir = op.join(mne.datasets.sample.data_path(), 'subjects')\n\nbrain = mne.viz.Brain(\n 'fsaverage', subjects_dir=subjects_dir, background='w', cortex='0.5')\nbrain.add_sensors(\n raw_intensity.info, trans='fsaverage',\n fnirs=['channels', 'pairs', 'sources', 'detectors'])\nbrain.show_view(azimuth=20, elevation=60, distance=400)\n\n# %%\n# Selecting channels appropriate for detecting neural responses\n# -------------------------------------------------------------\n#\n# First we remove channels that are too close together (short channels) to\n# detect a neural response (less than 1 cm distance between optodes).\n# These short channels can be seen in the figure above.\n# To achieve this we pick all the channels that are not considered to be short.\n\npicks = mne.pick_types(raw_intensity.info, meg=False, fnirs=True)\ndists = mne.preprocessing.nirs.source_detector_distances(\n raw_intensity.info, picks=picks)\nraw_intensity.pick(picks[dists > 0.01])\nraw_intensity.plot(n_channels=len(raw_intensity.ch_names),\n duration=500, show_scrollbars=False)\n\n\n# %%\n# Converting from raw intensity to optical density\n# ------------------------------------------------\n#\n# The raw intensity values are then converted to optical density.\n\nraw_od = mne.preprocessing.nirs.optical_density(raw_intensity)\nraw_od.plot(n_channels=len(raw_od.ch_names),\n duration=500, show_scrollbars=False)\n\n\n# %%\n# Evaluating the quality of the data\n# ----------------------------------\n#\n# At this stage we can quantify the quality of the coupling\n# between the scalp and the optodes using the scalp coupling index. This\n# method looks for the presence of a prominent synchronous signal in the\n# frequency range of cardiac signals across both photodetected signals.\n#\n# In this example the data is clean and the coupling is good for all\n# channels, so we will not mark any channels as bad based on the scalp\n# coupling index.\n\nsci = mne.preprocessing.nirs.scalp_coupling_index(raw_od)\nfig, ax = plt.subplots()\nax.hist(sci)\nax.set(xlabel='Scalp Coupling Index', ylabel='Count', xlim=[0, 1])\n\n\n# %%\n# In this example we will mark all channels with a SCI less than 0.5 as bad\n# (this dataset is quite clean, so no channels are marked as bad).\n\nraw_od.info['bads'] = list(compress(raw_od.ch_names, sci < 0.5))\n\n\n# %%\n# At this stage it is appropriate to inspect your data\n# (for instructions on how to use the interactive data visualisation tool\n# see :ref:`tut-visualize-raw`)\n# to ensure that channels with poor scalp coupling have been removed.\n# If your data contains lots of artifacts you may decide to apply\n# artifact reduction techniques as described in :ref:`ex-fnirs-artifacts`.\n\n\n# %%\n# Converting from optical density to haemoglobin\n# ----------------------------------------------\n#\n# Next we convert the optical density data to haemoglobin concentration using\n# the modified Beer-Lambert law.\n\nraw_haemo = mne.preprocessing.nirs.beer_lambert_law(raw_od, ppf=0.1)\nraw_haemo.plot(n_channels=len(raw_haemo.ch_names),\n duration=500, show_scrollbars=False)\n\n\n# %%\n# Removing heart rate from signal\n# -------------------------------\n#\n# The haemodynamic response has frequency content predominantly below 0.5 Hz.\n# An increase in activity around 1 Hz can be seen in the data that is due to\n# the person's heart beat and is unwanted. So we use a low pass filter to\n# remove this. A high pass filter is also included to remove slow drifts\n# in the data.\n\nfig = raw_haemo.plot_psd(average=True)\nfig.suptitle('Before filtering', weight='bold', size='x-large')\nfig.subplots_adjust(top=0.88)\nraw_haemo = raw_haemo.filter(0.05, 0.7, h_trans_bandwidth=0.2,\n l_trans_bandwidth=0.02)\nfig = raw_haemo.plot_psd(average=True)\nfig.suptitle('After filtering', weight='bold', size='x-large')\nfig.subplots_adjust(top=0.88)\n\n# %%\n# Extract epochs\n# --------------\n#\n# Now that the signal has been converted to relative haemoglobin concentration,\n# and the unwanted heart rate component has been removed, we can extract epochs\n# related to each of the experimental conditions.\n#\n# First we extract the events of interest and visualise them to ensure they are\n# correct.\n\nevents, event_dict = mne.events_from_annotations(raw_haemo)\nfig = mne.viz.plot_events(events, event_id=event_dict,\n sfreq=raw_haemo.info['sfreq'])\nfig.subplots_adjust(right=0.7) # make room for the legend\n\n\n# %%\n# Next we define the range of our epochs, the rejection criteria,\n# baseline correction, and extract the epochs. We visualise the log of which\n# epochs were dropped.\n\nreject_criteria = dict(hbo=80e-6)\ntmin, tmax = -5, 15\n\nepochs = mne.Epochs(raw_haemo, events, event_id=event_dict,\n tmin=tmin, tmax=tmax,\n reject=reject_criteria, reject_by_annotation=True,\n proj=True, baseline=(None, 0), preload=True,\n detrend=None, verbose=True)\nepochs.plot_drop_log()\n\n\n# %%\n# View consistency of responses across trials\n# -------------------------------------------\n#\n# Now we can view the haemodynamic response for our tapping condition.\n# We visualise the response for both the oxy- and deoxyhaemoglobin, and\n# observe the expected peak in HbO at around 6 seconds consistently across\n# trials, and the consistent dip in HbR that is slightly delayed relative to\n# the HbO peak.\n\nepochs['Tapping'].plot_image(combine='mean', vmin=-30, vmax=30,\n ts_args=dict(ylim=dict(hbo=[-15, 15],\n hbr=[-15, 15])))\n\n\n# %%\n# We can also view the epoched data for the control condition and observe\n# that it does not show the expected morphology.\n\nepochs['Control'].plot_image(combine='mean', vmin=-30, vmax=30,\n ts_args=dict(ylim=dict(hbo=[-15, 15],\n hbr=[-15, 15])))\n\n\n# %%\n# View consistency of responses across channels\n# ---------------------------------------------\n#\n# Similarly we can view how consistent the response is across the optode\n# pairs that we selected. All the channels in this data are located over the\n# motor cortex, and all channels show a similar pattern in the data.\n\nfig, axes = plt.subplots(nrows=2, ncols=2, figsize=(15, 6))\nclims = dict(hbo=[-20, 20], hbr=[-20, 20])\nepochs['Control'].average().plot_image(axes=axes[:, 0], clim=clims)\nepochs['Tapping'].average().plot_image(axes=axes[:, 1], clim=clims)\nfor column, condition in enumerate(['Control', 'Tapping']):\n for ax in axes[:, column]:\n ax.set_title('{}: {}'.format(condition, ax.get_title()))\n\n\n# %%\n# Plot standard fNIRS response image\n# ----------------------------------\n#\n# Next we generate the most common visualisation of fNIRS data: plotting\n# both the HbO and HbR on the same figure to illustrate the relation between\n# the two signals.\n\nevoked_dict = {'Tapping/HbO': epochs['Tapping'].average(picks='hbo'),\n 'Tapping/HbR': epochs['Tapping'].average(picks='hbr'),\n 'Control/HbO': epochs['Control'].average(picks='hbo'),\n 'Control/HbR': epochs['Control'].average(picks='hbr')}\n\n# Rename channels until the encoding of frequency in ch_name is fixed\nfor condition in evoked_dict:\n evoked_dict[condition].rename_channels(lambda x: x[:-4])\n\ncolor_dict = dict(HbO='#AA3377', HbR='b')\nstyles_dict = dict(Control=dict(linestyle='dashed'))\n\nmne.viz.plot_compare_evokeds(evoked_dict, combine=\"mean\", ci=0.95,\n colors=color_dict, styles=styles_dict)\n\n\n# %%\n# View topographic representation of activity\n# -------------------------------------------\n#\n# Next we view how the topographic activity changes throughout the response.\n\ntimes = np.arange(-3.5, 13.2, 3.0)\ntopomap_args = dict(extrapolate='local')\nepochs['Tapping'].average(picks='hbo').plot_joint(\n times=times, topomap_args=topomap_args)\n\n\n# %%\n# Compare tapping of left and right hands\n# ---------------------------------------\n#\n# Finally we generate topo maps for the left and right conditions to view\n# the location of activity. First we visualise the HbO activity.\n\ntimes = np.arange(4.0, 11.0, 1.0)\nepochs['Tapping/Left'].average(picks='hbo').plot_topomap(\n times=times, **topomap_args)\nepochs['Tapping/Right'].average(picks='hbo').plot_topomap(\n times=times, **topomap_args)\n\n# %%\n# And we also view the HbR activity for the two conditions.\n\nepochs['Tapping/Left'].average(picks='hbr').plot_topomap(\n times=times, **topomap_args)\nepochs['Tapping/Right'].average(picks='hbr').plot_topomap(\n times=times, **topomap_args)\n\n# %%\n# And we can plot the comparison at a single time point for two conditions.\n\nfig, axes = plt.subplots(nrows=2, ncols=4, figsize=(9, 5),\n gridspec_kw=dict(width_ratios=[1, 1, 1, 0.1]))\nvmin, vmax, ts = -8, 8, 9.0\n\nevoked_left = epochs['Tapping/Left'].average()\nevoked_right = epochs['Tapping/Right'].average()\n\nevoked_left.plot_topomap(ch_type='hbo', times=ts, axes=axes[0, 0],\n vmin=vmin, vmax=vmax, colorbar=False,\n **topomap_args)\nevoked_left.plot_topomap(ch_type='hbr', times=ts, axes=axes[1, 0],\n vmin=vmin, vmax=vmax, colorbar=False,\n **topomap_args)\nevoked_right.plot_topomap(ch_type='hbo', times=ts, axes=axes[0, 1],\n vmin=vmin, vmax=vmax, colorbar=False,\n **topomap_args)\nevoked_right.plot_topomap(ch_type='hbr', times=ts, axes=axes[1, 1],\n vmin=vmin, vmax=vmax, colorbar=False,\n **topomap_args)\n\nevoked_diff = mne.combine_evoked([evoked_left, evoked_right], weights=[1, -1])\n\nevoked_diff.plot_topomap(ch_type='hbo', times=ts, axes=axes[0, 2:],\n vmin=vmin, vmax=vmax, colorbar=True,\n **topomap_args)\nevoked_diff.plot_topomap(ch_type='hbr', times=ts, axes=axes[1, 2:],\n vmin=vmin, vmax=vmax, colorbar=True,\n **topomap_args)\n\nfor column, condition in enumerate(\n ['Tapping Left', 'Tapping Right', 'Left-Right']):\n for row, chroma in enumerate(['HbO', 'HbR']):\n axes[row, column].set_title('{}: {}'.format(chroma, condition))\nfig.tight_layout()\n\n# %%\n# Lastly, we can also look at the individual waveforms to see what is\n# driving the topographic plot above.\n\nfig, axes = plt.subplots(nrows=1, ncols=1, figsize=(6, 4))\nmne.viz.plot_evoked_topo(epochs['Left'].average(picks='hbo'), color='b',\n axes=axes, legend=False)\nmne.viz.plot_evoked_topo(epochs['Right'].average(picks='hbo'), color='r',\n axes=axes, legend=False)\n\n# Tidy the legend.\nleg_lines = [line for line in axes.lines if line.get_c() == 'b'][:1]\nleg_lines.append([line for line in axes.lines if line.get_c() == 'r'][0])\nfig.legend(leg_lines, ['Left', 'Right'], loc='lower right')\n",
"# Author: Denis Engemann <[email protected]>\n# Alexandre Gramfort <[email protected]>\n# Jean-Remi King <[email protected]>\n#\n# License: BSD-3-Clause\n\nfrom collections import Counter\n\nimport numpy as np\n\nfrom .mixin import TransformerMixin, EstimatorMixin\nfrom .base import _set_cv\nfrom ..io.pick import _picks_to_idx\nfrom ..parallel import parallel_func\nfrom ..utils import logger, verbose\nfrom .. import pick_types, pick_info\n\n\nclass EMS(TransformerMixin, EstimatorMixin):\n \"\"\"Transformer to compute event-matched spatial filters.\n\n This version of EMS :footcite:`SchurgerEtAl2013` operates on the entire\n time course. No time\n window needs to be specified. The result is a spatial filter at each\n time point and a corresponding time course. Intuitively, the result\n gives the similarity between the filter at each time point and the\n data vector (sensors) at that time point.\n\n .. note:: EMS only works for binary classification.\n\n Attributes\n ----------\n filters_ : ndarray, shape (n_channels, n_times)\n The set of spatial filters.\n classes_ : ndarray, shape (n_classes,)\n The target classes.\n\n References\n ----------\n .. footbibliography::\n \"\"\"\n\n def __repr__(self): # noqa: D105\n if hasattr(self, 'filters_'):\n return '<EMS: fitted with %i filters on %i classes.>' % (\n len(self.filters_), len(self.classes_))\n else:\n return '<EMS: not fitted.>'\n\n def fit(self, X, y):\n \"\"\"Fit the spatial filters.\n\n .. note : EMS is fitted on data normalized by channel type before the\n fitting of the spatial filters.\n\n Parameters\n ----------\n X : array, shape (n_epochs, n_channels, n_times)\n The training data.\n y : array of int, shape (n_epochs)\n The target classes.\n\n Returns\n -------\n self : instance of EMS\n Returns self.\n \"\"\"\n classes = np.unique(y)\n if len(classes) != 2:\n raise ValueError('EMS only works for binary classification.')\n self.classes_ = classes\n filters = X[y == classes[0]].mean(0) - X[y == classes[1]].mean(0)\n filters /= np.linalg.norm(filters, axis=0)[None, :]\n self.filters_ = filters\n return self\n\n def transform(self, X):\n \"\"\"Transform the data by the spatial filters.\n\n Parameters\n ----------\n X : array, shape (n_epochs, n_channels, n_times)\n The input data.\n\n Returns\n -------\n X : array, shape (n_epochs, n_times)\n The input data transformed by the spatial filters.\n \"\"\"\n Xt = np.sum(X * self.filters_, axis=1)\n return Xt\n\n\n@verbose\ndef compute_ems(epochs, conditions=None, picks=None, n_jobs=1, cv=None,\n verbose=None):\n \"\"\"Compute event-matched spatial filter on epochs.\n\n This version of EMS :footcite:`SchurgerEtAl2013` operates on the entire\n time course. No time\n window needs to be specified. The result is a spatial filter at each\n time point and a corresponding time course. Intuitively, the result\n gives the similarity between the filter at each time point and the\n data vector (sensors) at that time point.\n\n .. note : EMS only works for binary classification.\n\n .. note : The present function applies a leave-one-out cross-validation,\n following Schurger et al's paper. However, we recommend using\n a stratified k-fold cross-validation. Indeed, leave-one-out tends\n to overfit and cannot be used to estimate the variance of the\n prediction within a given fold.\n\n .. note : Because of the leave-one-out, this function needs an equal\n number of epochs in each of the two conditions.\n\n Parameters\n ----------\n epochs : instance of mne.Epochs\n The epochs.\n conditions : list of str | None, default None\n If a list of strings, strings must match the epochs.event_id's key as\n well as the number of conditions supported by the objective_function.\n If None keys in epochs.event_id are used.\n %(picks_good_data)s\n %(n_jobs)s\n cv : cross-validation object | str | None, default LeaveOneOut\n The cross-validation scheme.\n %(verbose)s\n\n Returns\n -------\n surrogate_trials : ndarray, shape (n_trials // 2, n_times)\n The trial surrogates.\n mean_spatial_filter : ndarray, shape (n_channels, n_times)\n The set of spatial filters.\n conditions : ndarray, shape (n_classes,)\n The conditions used. Values correspond to original event ids.\n\n References\n ----------\n .. footbibliography::\n \"\"\"\n logger.info('...computing surrogate time series. This can take some time')\n\n # Default to leave-one-out cv\n cv = 'LeaveOneOut' if cv is None else cv\n picks = _picks_to_idx(epochs.info, picks)\n\n if not len(set(Counter(epochs.events[:, 2]).values())) == 1:\n raise ValueError('The same number of epochs is required by '\n 'this function. Please consider '\n '`epochs.equalize_event_counts`')\n\n if conditions is None:\n conditions = epochs.event_id.keys()\n epochs = epochs.copy()\n else:\n epochs = epochs[conditions]\n\n epochs.drop_bad()\n\n if len(conditions) != 2:\n raise ValueError('Currently this function expects exactly 2 '\n 'conditions but you gave me %i' %\n len(conditions))\n\n ev = epochs.events[:, 2]\n # Special care to avoid path dependent mappings and orders\n conditions = list(sorted(conditions))\n cond_idx = [np.where(ev == epochs.event_id[k])[0] for k in conditions]\n\n info = pick_info(epochs.info, picks)\n data = epochs.get_data(picks=picks)\n\n # Scale (z-score) the data by channel type\n # XXX the z-scoring is applied outside the CV, which is not standard.\n for ch_type in ['mag', 'grad', 'eeg']:\n if ch_type in epochs:\n # FIXME should be applied to all sort of data channels\n if ch_type == 'eeg':\n this_picks = pick_types(info, meg=False, eeg=True)\n else:\n this_picks = pick_types(info, meg=ch_type, eeg=False)\n data[:, this_picks] /= np.std(data[:, this_picks])\n\n # Setup cross-validation. Need to use _set_cv to deal with sklearn\n # deprecation of cv objects.\n y = epochs.events[:, 2]\n _, cv_splits = _set_cv(cv, 'classifier', X=y, y=y)\n\n parallel, p_func, _ = parallel_func(_run_ems, n_jobs=n_jobs)\n # FIXME this parallelization should be removed.\n # 1) it's numpy computation so it's already efficient,\n # 2) it duplicates the data in RAM,\n # 3) the computation is already super fast.\n out = parallel(p_func(_ems_diff, data, cond_idx, train, test)\n for train, test in cv_splits)\n\n surrogate_trials, spatial_filter = zip(*out)\n surrogate_trials = np.array(surrogate_trials)\n spatial_filter = np.mean(spatial_filter, axis=0)\n\n return surrogate_trials, spatial_filter, epochs.events[:, 2]\n\n\ndef _ems_diff(data0, data1):\n \"\"\"Compute the default diff objective function.\"\"\"\n return np.mean(data0, axis=0) - np.mean(data1, axis=0)\n\n\ndef _run_ems(objective_function, data, cond_idx, train, test):\n \"\"\"Run EMS.\"\"\"\n d = objective_function(*(data[np.intersect1d(c, train)] for c in cond_idx))\n d /= np.sqrt(np.sum(d ** 2, axis=0))[None, :]\n # compute surrogates\n return np.sum(data[test[0]] * d, axis=0), d\n",
"# -*- coding: utf-8 -*-\n\"\"\"Test reading of NEDF format.\"\"\"\n# Author: Tristan Stenner <[email protected]>\n#\n# License: BSD-3-Clause\n\nimport os.path as op\n\nimport pytest\nfrom numpy.testing import assert_allclose, assert_array_equal\n\nfrom mne import find_events\nfrom mne.io.constants import FIFF\nfrom mne.io.nedf import read_raw_nedf, _parse_nedf_header\nfrom mne.datasets import testing\nfrom mne.io.tests.test_raw import _test_raw_reader\n\neeg_path = testing.data_path(download=False, verbose=True)\neegfile = op.join(eeg_path, 'nedf', 'testdata.nedf')\n\nstimhdr = b\"\"\"\n<nedf>\n <NEDFversion>1.3</NEDFversion>\n <NumberOfChannelsOfAccelerometer>%d</NumberOfChannelsOfAccelerometer>\n <EEGSettings>\n <TotalNumberOfChannels>4</TotalNumberOfChannels>\n <EEGSamplingRate>500</EEGSamplingRate>\n <EEGMontage><C>A</C><C>B</C><C>C</C><C>D</C></EEGMontage>\n <NumberOfRecordsOfEEG>11</NumberOfRecordsOfEEG>\n </EEGSettings>\n <STIMSettings/>\n</nedf>\\x00\"\"\"\n\n\[email protected]('nacc', (0, 3))\ndef test_nedf_header_parser(nacc):\n \"\"\"Test NEDF header parsing and dtype extraction.\"\"\"\n with pytest.warns(RuntimeWarning, match='stim channels.*ignored'):\n info, dt, dt_last, n_samples, n_full = _parse_nedf_header(\n stimhdr % nacc)\n assert n_samples == 11\n assert n_full == 2\n nchan = 4\n assert info['nchan'] == nchan\n assert dt.itemsize == 200 + nacc * 2\n if nacc:\n assert dt.names[0] == 'acc'\n assert dt['acc'].shape == (nacc,)\n\n assert dt['data'].shape == (5,) # blocks of 5 EEG samples each\n assert dt_last['data'].shape == (1,) # plus one last extra one\n\n eegsampledt = dt['data'].subdtype[0]\n assert eegsampledt.names == ('eeg', 'stim', 'trig')\n assert eegsampledt['eeg'].shape == (nchan, 3)\n assert eegsampledt['stim'].shape == (2, nchan, 3)\n\n\ndef test_invalid_headers():\n \"\"\"Test that invalid headers raise exceptions.\"\"\"\n tpl = b\"\"\"<nedf>\n <NEDFversion>1.3</NEDFversion>\n <EEGSettings>\n %s\n <EEGMontage><C>A</C><C>B</C><C>C</C><C>D</C></EEGMontage>\n </EEGSettings>\n </nedf>\\x00\"\"\"\n nchan = b'<TotalNumberOfChannels>4</TotalNumberOfChannels>'\n sr = b'<EEGSamplingRate>500</EEGSamplingRate>'\n hdr = {\n 'null':\n b'No null terminator',\n 'Unknown additional':\n (b'<a><NEDFversion>1.3</NEDFversion>' +\n b'<AdditionalChannelStatus>???</AdditionalChannelStatus></a>\\x00'), # noqa: E501\n 'No EEG channels found':\n b'<a><NEDFversion>1.3</NEDFversion></a>\\x00',\n 'TotalNumberOfChannels not found':\n tpl % b'No nchan.',\n '!= channel count':\n tpl % (sr + b'<TotalNumberOfChannels>52</TotalNumberOfChannels>'),\n 'EEGSamplingRate not found':\n tpl % nchan,\n 'NumberOfRecordsOfEEG not found':\n tpl % (sr + nchan),\n }\n for match, invalid_hdr in hdr.items():\n with pytest.raises(RuntimeError, match=match):\n _parse_nedf_header(invalid_hdr)\n\n sus_hdrs = {\n 'unsupported': b'<a><NEDFversion>25</NEDFversion></a>\\x00',\n 'tested': (\n b'<a><NEDFversion>1.3</NEDFversion><stepDetails>' +\n b'<DeviceClass>STARSTIM</DeviceClass></stepDetails></a>\\x00'),\n }\n for match, sus_hdr in sus_hdrs.items():\n with pytest.warns(RuntimeWarning, match=match):\n with pytest.raises(RuntimeError, match='No EEG channels found'):\n _parse_nedf_header(sus_hdr)\n\n\[email protected]_testing_data\ndef test_nedf_data():\n \"\"\"Test reading raw NEDF files.\"\"\"\n raw = read_raw_nedf(eegfile)\n nsamples = len(raw)\n assert nsamples == 32538\n\n events = find_events(raw, shortest_event=1)\n assert len(events) == 4\n assert_array_equal(events[:, 2], [1, 1, 1, 1])\n onsets = events[:, 0] / raw.info['sfreq']\n assert raw.info['sfreq'] == 500\n\n data_end = raw.get_data('Fp1', nsamples - 100, nsamples).mean()\n assert_allclose(data_end, .0176, atol=.01)\n assert_allclose(raw.get_data('Fpz', 0, 100).mean(), .0185, atol=.01)\n\n assert_allclose(onsets, [22.384, 38.238, 49.496, 63.15])\n assert raw.info['meas_date'].year == 2019\n assert raw.ch_names[2] == 'AF7'\n\n for ch in raw.info['chs'][:-1]:\n assert ch['kind'] == FIFF.FIFFV_EEG_CH\n assert ch['unit'] == FIFF.FIFF_UNIT_V\n assert raw.info['chs'][-1]['kind'] == FIFF.FIFFV_STIM_CH\n assert raw.info['chs'][-1]['unit'] == FIFF.FIFF_UNIT_V\n\n # full tests\n _test_raw_reader(read_raw_nedf, filename=eegfile)\n",
"# Author: Jean-Remi King <[email protected]>\n#\n# License: BSD-3-Clause\n\nimport numpy as np\n\nfrom .mixin import TransformerMixin\nfrom .base import BaseEstimator, _check_estimator\nfrom ..fixes import _get_check_scoring\nfrom ..parallel import parallel_func\nfrom ..utils import (_validate_type, array_split_idx, ProgressBar,\n verbose, fill_doc)\n\n\n@fill_doc\nclass SlidingEstimator(BaseEstimator, TransformerMixin):\n \"\"\"Search Light.\n\n Fit, predict and score a series of models to each subset of the dataset\n along the last dimension. Each entry in the last dimension is referred\n to as a task.\n\n Parameters\n ----------\n %(base_estimator)s\n %(scoring)s\n %(n_jobs)s\n %(verbose)s\n\n Attributes\n ----------\n estimators_ : array-like, shape (n_tasks,)\n List of fitted scikit-learn estimators (one per task).\n \"\"\"\n\n def __init__(self, base_estimator, scoring=None, n_jobs=1,\n verbose=None): # noqa: D102\n _check_estimator(base_estimator)\n self._estimator_type = getattr(base_estimator, \"_estimator_type\", None)\n self.base_estimator = base_estimator\n self.n_jobs = n_jobs\n self.scoring = scoring\n self.verbose = verbose\n\n _validate_type(self.n_jobs, 'int', 'n_jobs')\n\n def __repr__(self): # noqa: D105\n repr_str = '<' + super(SlidingEstimator, self).__repr__()\n if hasattr(self, 'estimators_'):\n repr_str = repr_str[:-1]\n repr_str += ', fitted with %i estimators' % len(self.estimators_)\n return repr_str + '>'\n\n @verbose # to use class value\n def fit(self, X, y, **fit_params):\n \"\"\"Fit a series of independent estimators to the dataset.\n\n Parameters\n ----------\n X : array, shape (n_samples, nd_features, n_tasks)\n The training input samples. For each data slice, a clone estimator\n is fitted independently. The feature dimension can be\n multidimensional e.g.\n X.shape = (n_samples, n_features_1, n_features_2, n_tasks).\n y : array, shape (n_samples,) | (n_samples, n_targets)\n The target values.\n **fit_params : dict of string -> object\n Parameters to pass to the fit method of the estimator.\n\n Returns\n -------\n self : object\n Return self.\n \"\"\"\n self._check_Xy(X, y)\n self.estimators_ = list()\n self.fit_params = fit_params\n # For fitting, the parallelization is across estimators.\n parallel, p_func, n_jobs = parallel_func(_sl_fit, self.n_jobs,\n verbose=False)\n n_jobs = min(n_jobs, X.shape[-1])\n mesg = 'Fitting %s' % (self.__class__.__name__,)\n with ProgressBar(X.shape[-1], mesg=mesg) as pb:\n estimators = parallel(\n p_func(self.base_estimator, split, y, pb.subset(pb_idx),\n **fit_params)\n for pb_idx, split in array_split_idx(X, n_jobs, axis=-1))\n\n # Each parallel job can have a different number of training estimators\n # We can't directly concatenate them because of sklearn's Bagging API\n # (see scikit-learn #9720)\n self.estimators_ = np.empty(X.shape[-1], dtype=object)\n idx = 0\n for job_estimators in estimators:\n for est in job_estimators:\n self.estimators_[idx] = est\n idx += 1\n return self\n\n def fit_transform(self, X, y, **fit_params):\n \"\"\"Fit and transform a series of independent estimators to the dataset.\n\n Parameters\n ----------\n X : array, shape (n_samples, nd_features, n_tasks)\n The training input samples. For each task, a clone estimator\n is fitted independently. The feature dimension can be\n multidimensional, e.g.::\n\n X.shape = (n_samples, n_features_1, n_features_2, n_estimators)\n y : array, shape (n_samples,) | (n_samples, n_targets)\n The target values.\n **fit_params : dict of string -> object\n Parameters to pass to the fit method of the estimator.\n\n Returns\n -------\n y_pred : array, shape (n_samples, n_tasks) | (n_samples, n_tasks, n_targets)\n The predicted values for each estimator.\n \"\"\" # noqa: E501\n return self.fit(X, y, **fit_params).transform(X)\n\n @verbose # to use the class value\n def _transform(self, X, method):\n \"\"\"Aux. function to make parallel predictions/transformation.\"\"\"\n self._check_Xy(X)\n method = _check_method(self.base_estimator, method)\n if X.shape[-1] != len(self.estimators_):\n raise ValueError('The number of estimators does not match '\n 'X.shape[-1]')\n # For predictions/transforms the parallelization is across the data and\n # not across the estimators to avoid memory load.\n mesg = 'Transforming %s' % (self.__class__.__name__,)\n parallel, p_func, n_jobs = parallel_func(\n _sl_transform, self.n_jobs, verbose=False)\n n_jobs = min(n_jobs, X.shape[-1])\n X_splits = np.array_split(X, n_jobs, axis=-1)\n idx, est_splits = zip(*array_split_idx(self.estimators_, n_jobs))\n with ProgressBar(X.shape[-1], mesg=mesg) as pb:\n y_pred = parallel(p_func(est, x, method, pb.subset(pb_idx))\n for pb_idx, est, x in zip(\n idx, est_splits, X_splits))\n\n y_pred = np.concatenate(y_pred, axis=1)\n return y_pred\n\n def transform(self, X):\n \"\"\"Transform each data slice/task with a series of independent estimators.\n\n The number of tasks in X should match the number of tasks/estimators\n given at fit time.\n\n Parameters\n ----------\n X : array, shape (n_samples, nd_features, n_tasks)\n The input samples. For each data slice/task, the corresponding\n estimator makes a transformation of the data, e.g.\n ``[estimators[ii].transform(X[..., ii]) for ii in range(n_estimators)]``.\n The feature dimension can be multidimensional e.g.\n X.shape = (n_samples, n_features_1, n_features_2, n_tasks).\n\n Returns\n -------\n Xt : array, shape (n_samples, n_estimators)\n The transformed values generated by each estimator.\n \"\"\" # noqa: E501\n return self._transform(X, 'transform')\n\n def predict(self, X):\n \"\"\"Predict each data slice/task with a series of independent estimators.\n\n The number of tasks in X should match the number of tasks/estimators\n given at fit time.\n\n Parameters\n ----------\n X : array, shape (n_samples, nd_features, n_tasks)\n The input samples. For each data slice, the corresponding estimator\n makes the sample predictions, e.g.:\n ``[estimators[ii].predict(X[..., ii]) for ii in range(n_estimators)]``.\n The feature dimension can be multidimensional e.g.\n X.shape = (n_samples, n_features_1, n_features_2, n_tasks).\n\n Returns\n -------\n y_pred : array, shape (n_samples, n_estimators) | (n_samples, n_tasks, n_targets)\n Predicted values for each estimator/data slice.\n \"\"\" # noqa: E501\n return self._transform(X, 'predict')\n\n def predict_proba(self, X):\n \"\"\"Predict each data slice with a series of independent estimators.\n\n The number of tasks in X should match the number of tasks/estimators\n given at fit time.\n\n Parameters\n ----------\n X : array, shape (n_samples, nd_features, n_tasks)\n The input samples. For each data slice, the corresponding estimator\n makes the sample probabilistic predictions, e.g.:\n ``[estimators[ii].predict_proba(X[..., ii]) for ii in range(n_estimators)]``.\n The feature dimension can be multidimensional e.g.\n X.shape = (n_samples, n_features_1, n_features_2, n_tasks).\n\n Returns\n -------\n y_pred : array, shape (n_samples, n_tasks, n_classes)\n Predicted probabilities for each estimator/data slice/task.\n \"\"\" # noqa: E501\n return self._transform(X, 'predict_proba')\n\n def decision_function(self, X):\n \"\"\"Estimate distances of each data slice to the hyperplanes.\n\n Parameters\n ----------\n X : array, shape (n_samples, nd_features, n_tasks)\n The input samples. For each data slice, the corresponding estimator\n outputs the distance to the hyperplane, e.g.:\n ``[estimators[ii].decision_function(X[..., ii]) for ii in range(n_estimators)]``.\n The feature dimension can be multidimensional e.g.\n X.shape = (n_samples, n_features_1, n_features_2, n_estimators).\n\n Returns\n -------\n y_pred : array, shape (n_samples, n_estimators, n_classes * (n_classes-1) // 2)\n Predicted distances for each estimator/data slice.\n\n Notes\n -----\n This requires base_estimator to have a ``decision_function`` method.\n \"\"\" # noqa: E501\n return self._transform(X, 'decision_function')\n\n def _check_Xy(self, X, y=None):\n \"\"\"Aux. function to check input data.\"\"\"\n if y is not None:\n if len(X) != len(y) or len(y) < 1:\n raise ValueError('X and y must have the same length.')\n if X.ndim < 3:\n raise ValueError('X must have at least 3 dimensions.')\n\n def score(self, X, y):\n \"\"\"Score each estimator on each task.\n\n The number of tasks in X should match the number of tasks/estimators\n given at fit time, i.e. we need\n ``X.shape[-1] == len(self.estimators_)``.\n\n Parameters\n ----------\n X : array, shape (n_samples, nd_features, n_tasks)\n The input samples. For each data slice, the corresponding estimator\n scores the prediction, e.g.:\n ``[estimators[ii].score(X[..., ii], y) for ii in range(n_estimators)]``.\n The feature dimension can be multidimensional e.g.\n X.shape = (n_samples, n_features_1, n_features_2, n_tasks).\n y : array, shape (n_samples,) | (n_samples, n_targets)\n The target values.\n\n Returns\n -------\n score : array, shape (n_samples, n_estimators)\n Score for each estimator/task.\n \"\"\" # noqa: E501\n check_scoring = _get_check_scoring()\n\n self._check_Xy(X)\n if X.shape[-1] != len(self.estimators_):\n raise ValueError('The number of estimators does not match '\n 'X.shape[-1]')\n\n scoring = check_scoring(self.base_estimator, self.scoring)\n y = _fix_auc(scoring, y)\n\n # For predictions/transforms the parallelization is across the data and\n # not across the estimators to avoid memory load.\n parallel, p_func, n_jobs = parallel_func(_sl_score, self.n_jobs)\n n_jobs = min(n_jobs, X.shape[-1])\n X_splits = np.array_split(X, n_jobs, axis=-1)\n est_splits = np.array_split(self.estimators_, n_jobs)\n score = parallel(p_func(est, scoring, x, y)\n for (est, x) in zip(est_splits, X_splits))\n\n score = np.concatenate(score, axis=0)\n return score\n\n @property\n def classes_(self):\n if not hasattr(self.estimators_[0], 'classes_'):\n raise AttributeError('classes_ attribute available only if '\n 'base_estimator has it, and estimator %s does'\n ' not' % (self.estimators_[0],))\n return self.estimators_[0].classes_\n\n\n@fill_doc\ndef _sl_fit(estimator, X, y, pb, **fit_params):\n \"\"\"Aux. function to fit SlidingEstimator in parallel.\n\n Fit a clone estimator to each slice of data.\n\n Parameters\n ----------\n %(base_estimator)s\n X : array, shape (n_samples, nd_features, n_estimators)\n The target data. The feature dimension can be multidimensional e.g.\n X.shape = (n_samples, n_features_1, n_features_2, n_estimators)\n y : array, shape (n_sample, )\n The target values.\n fit_params : dict | None\n Parameters to pass to the fit method of the estimator.\n\n Returns\n -------\n estimators_ : list of estimators\n The fitted estimators.\n \"\"\"\n from sklearn.base import clone\n estimators_ = list()\n for ii in range(X.shape[-1]):\n est = clone(estimator)\n est.fit(X[..., ii], y, **fit_params)\n estimators_.append(est)\n pb.update(ii + 1)\n return estimators_\n\n\ndef _sl_transform(estimators, X, method, pb):\n \"\"\"Aux. function to transform SlidingEstimator in parallel.\n\n Applies transform/predict/decision_function etc for each slice of data.\n\n Parameters\n ----------\n estimators : list of estimators\n The fitted estimators.\n X : array, shape (n_samples, nd_features, n_estimators)\n The target data. The feature dimension can be multidimensional e.g.\n X.shape = (n_samples, n_features_1, n_features_2, n_estimators)\n method : str\n The estimator method to use (e.g. 'predict', 'transform').\n\n Returns\n -------\n y_pred : array, shape (n_samples, n_estimators, n_classes * (n_classes-1) // 2)\n The transformations for each slice of data.\n \"\"\" # noqa: E501\n for ii, est in enumerate(estimators):\n transform = getattr(est, method)\n _y_pred = transform(X[..., ii])\n # Initialize array of predictions on the first transform iteration\n if ii == 0:\n y_pred = _sl_init_pred(_y_pred, X)\n y_pred[:, ii, ...] = _y_pred\n pb.update(ii + 1)\n return y_pred\n\n\ndef _sl_init_pred(y_pred, X):\n \"\"\"Aux. function to SlidingEstimator to initialize y_pred.\"\"\"\n n_sample, n_tasks = X.shape[0], X.shape[-1]\n y_pred = np.zeros((n_sample, n_tasks) + y_pred.shape[1:], y_pred.dtype)\n return y_pred\n\n\ndef _sl_score(estimators, scoring, X, y):\n \"\"\"Aux. function to score SlidingEstimator in parallel.\n\n Predict and score each slice of data.\n\n Parameters\n ----------\n estimators : list, shape (n_tasks,)\n The fitted estimators.\n X : array, shape (n_samples, nd_features, n_tasks)\n The target data. The feature dimension can be multidimensional e.g.\n X.shape = (n_samples, n_features_1, n_features_2, n_tasks)\n scoring : callable, str or None\n If scoring is None (default), the predictions are internally\n generated by estimator.score(). Else, we must first get the\n predictions to pass them to ad-hoc scorer.\n y : array, shape (n_samples,) | (n_samples, n_targets)\n The target values.\n\n Returns\n -------\n score : array, shape (n_tasks,)\n The score for each task / slice of data.\n \"\"\"\n n_tasks = X.shape[-1]\n score = np.zeros(n_tasks)\n for ii, est in enumerate(estimators):\n score[ii] = scoring(est, X[..., ii], y)\n return score\n\n\ndef _check_method(estimator, method):\n \"\"\"Check that an estimator has the method attribute.\n\n If method == 'transform' and estimator does not have 'transform', use\n 'predict' instead.\n \"\"\"\n if method == 'transform' and not hasattr(estimator, 'transform'):\n method = 'predict'\n if not hasattr(estimator, method):\n ValueError('base_estimator does not have `%s` method.' % method)\n return method\n\n\n@fill_doc\nclass GeneralizingEstimator(SlidingEstimator):\n \"\"\"Generalization Light.\n\n Fit a search-light along the last dimension and use them to apply a\n systematic cross-tasks generalization.\n\n Parameters\n ----------\n %(base_estimator)s\n %(scoring)s\n %(n_jobs)s\n %(verbose)s\n \"\"\"\n\n def __repr__(self): # noqa: D105\n repr_str = super(GeneralizingEstimator, self).__repr__()\n if hasattr(self, 'estimators_'):\n repr_str = repr_str[:-1]\n repr_str += ', fitted with %i estimators>' % len(self.estimators_)\n return repr_str\n\n @verbose # use class value\n def _transform(self, X, method):\n \"\"\"Aux. function to make parallel predictions/transformation.\"\"\"\n self._check_Xy(X)\n method = _check_method(self.base_estimator, method)\n mesg = 'Transforming %s' % (self.__class__.__name__,)\n parallel, p_func, n_jobs = parallel_func(\n _gl_transform, self.n_jobs, verbose=False)\n n_jobs = min(n_jobs, X.shape[-1])\n with ProgressBar(X.shape[-1] * len(self.estimators_), mesg=mesg) as pb:\n y_pred = parallel(\n p_func(self.estimators_, x_split, method, pb.subset(pb_idx))\n for pb_idx, x_split in array_split_idx(\n X, n_jobs, axis=-1, n_per_split=len(self.estimators_)))\n\n y_pred = np.concatenate(y_pred, axis=2)\n return y_pred\n\n def transform(self, X):\n \"\"\"Transform each data slice with all possible estimators.\n\n Parameters\n ----------\n X : array, shape (n_samples, nd_features, n_slices)\n The input samples. For estimator the corresponding data slice is\n used to make a transformation. The feature dimension can be\n multidimensional e.g.\n X.shape = (n_samples, n_features_1, n_features_2, n_estimators).\n\n Returns\n -------\n Xt : array, shape (n_samples, n_estimators, n_slices)\n The transformed values generated by each estimator.\n \"\"\"\n return self._transform(X, 'transform')\n\n def predict(self, X):\n \"\"\"Predict each data slice with all possible estimators.\n\n Parameters\n ----------\n X : array, shape (n_samples, nd_features, n_slices)\n The training input samples. For each data slice, a fitted estimator\n predicts each slice of the data independently. The feature\n dimension can be multidimensional e.g.\n X.shape = (n_samples, n_features_1, n_features_2, n_estimators).\n\n Returns\n -------\n y_pred : array, shape (n_samples, n_estimators, n_slices) | (n_samples, n_estimators, n_slices, n_targets)\n The predicted values for each estimator.\n \"\"\" # noqa: E501\n return self._transform(X, 'predict')\n\n def predict_proba(self, X):\n \"\"\"Estimate probabilistic estimates of each data slice with all possible estimators.\n\n Parameters\n ----------\n X : array, shape (n_samples, nd_features, n_slices)\n The training input samples. For each data slice, a fitted estimator\n predicts a slice of the data. The feature dimension can be\n multidimensional e.g.\n ``X.shape = (n_samples, n_features_1, n_features_2, n_estimators)``.\n\n Returns\n -------\n y_pred : array, shape (n_samples, n_estimators, n_slices, n_classes)\n The predicted values for each estimator.\n\n Notes\n -----\n This requires ``base_estimator`` to have a ``predict_proba`` method.\n \"\"\" # noqa: E501\n return self._transform(X, 'predict_proba')\n\n def decision_function(self, X):\n \"\"\"Estimate distances of each data slice to all hyperplanes.\n\n Parameters\n ----------\n X : array, shape (n_samples, nd_features, n_slices)\n The training input samples. Each estimator outputs the distance to\n its hyperplane, e.g.:\n ``[estimators[ii].decision_function(X[..., ii]) for ii in range(n_estimators)]``.\n The feature dimension can be multidimensional e.g.\n ``X.shape = (n_samples, n_features_1, n_features_2, n_estimators)``.\n\n Returns\n -------\n y_pred : array, shape (n_samples, n_estimators, n_slices, n_classes * (n_classes-1) // 2)\n The predicted values for each estimator.\n\n Notes\n -----\n This requires ``base_estimator`` to have a ``decision_function``\n method.\n \"\"\" # noqa: E501\n return self._transform(X, 'decision_function')\n\n @verbose # to use class value\n def score(self, X, y):\n \"\"\"Score each of the estimators on the tested dimensions.\n\n Parameters\n ----------\n X : array, shape (n_samples, nd_features, n_slices)\n The input samples. For each data slice, the corresponding estimator\n scores the prediction, e.g.:\n ``[estimators[ii].score(X[..., ii], y) for ii in range(n_slices)]``.\n The feature dimension can be multidimensional e.g.\n ``X.shape = (n_samples, n_features_1, n_features_2, n_estimators)``.\n y : array, shape (n_samples,) | (n_samples, n_targets)\n The target values.\n\n Returns\n -------\n score : array, shape (n_samples, n_estimators, n_slices)\n Score for each estimator / data slice couple.\n \"\"\" # noqa: E501\n check_scoring = _get_check_scoring()\n self._check_Xy(X)\n # For predictions/transforms the parallelization is across the data and\n # not across the estimators to avoid memory load.\n mesg = 'Scoring %s' % (self.__class__.__name__,)\n parallel, p_func, n_jobs = parallel_func(_gl_score, self.n_jobs,\n verbose=False)\n n_jobs = min(n_jobs, X.shape[-1])\n scoring = check_scoring(self.base_estimator, self.scoring)\n y = _fix_auc(scoring, y)\n with ProgressBar(X.shape[-1] * len(self.estimators_), mesg=mesg) as pb:\n score = parallel(p_func(self.estimators_, scoring, x, y,\n pb.subset(pb_idx))\n for pb_idx, x in array_split_idx(\n X, n_jobs, axis=-1,\n n_per_split=len(self.estimators_)))\n\n score = np.concatenate(score, axis=1)\n return score\n\n\ndef _gl_transform(estimators, X, method, pb):\n \"\"\"Transform the dataset.\n\n This will apply each estimator to all slices of the data.\n\n Parameters\n ----------\n X : array, shape (n_samples, nd_features, n_slices)\n The training input samples. For each data slice, a clone estimator\n is fitted independently. The feature dimension can be multidimensional\n e.g. X.shape = (n_samples, n_features_1, n_features_2, n_estimators)\n\n Returns\n -------\n Xt : array, shape (n_samples, n_slices)\n The transformed values generated by each estimator.\n \"\"\"\n n_sample, n_iter = X.shape[0], X.shape[-1]\n for ii, est in enumerate(estimators):\n # stack generalized data for faster prediction\n X_stack = X.transpose(np.r_[0, X.ndim - 1, range(1, X.ndim - 1)])\n X_stack = X_stack.reshape(np.r_[n_sample * n_iter, X_stack.shape[2:]])\n transform = getattr(est, method)\n _y_pred = transform(X_stack)\n # unstack generalizations\n if _y_pred.ndim == 2:\n _y_pred = np.reshape(_y_pred, [n_sample, n_iter, _y_pred.shape[1]])\n else:\n shape = np.r_[n_sample, n_iter, _y_pred.shape[1:]].astype(int)\n _y_pred = np.reshape(_y_pred, shape)\n # Initialize array of predictions on the first transform iteration\n if ii == 0:\n y_pred = _gl_init_pred(_y_pred, X, len(estimators))\n y_pred[:, ii, ...] = _y_pred\n pb.update((ii + 1) * n_iter)\n return y_pred\n\n\ndef _gl_init_pred(y_pred, X, n_train):\n \"\"\"Aux. function to GeneralizingEstimator to initialize y_pred.\"\"\"\n n_sample, n_iter = X.shape[0], X.shape[-1]\n if y_pred.ndim == 3:\n y_pred = np.zeros((n_sample, n_train, n_iter, y_pred.shape[-1]),\n y_pred.dtype)\n else:\n y_pred = np.zeros((n_sample, n_train, n_iter), y_pred.dtype)\n return y_pred\n\n\ndef _gl_score(estimators, scoring, X, y, pb):\n \"\"\"Score GeneralizingEstimator in parallel.\n\n Predict and score each slice of data.\n\n Parameters\n ----------\n estimators : list of estimators\n The fitted estimators.\n scoring : callable, string or None\n If scoring is None (default), the predictions are internally\n generated by estimator.score(). Else, we must first get the\n predictions to pass them to ad-hoc scorer.\n X : array, shape (n_samples, nd_features, n_slices)\n The target data. The feature dimension can be multidimensional e.g.\n X.shape = (n_samples, n_features_1, n_features_2, n_estimators)\n y : array, shape (n_samples,) | (n_samples, n_targets)\n The target values.\n\n Returns\n -------\n score : array, shape (n_estimators, n_slices)\n The score for each slice of data.\n \"\"\"\n # FIXME: The level parallelization may be a bit high, and might be memory\n # consuming. Perhaps need to lower it down to the loop across X slices.\n score_shape = [len(estimators), X.shape[-1]]\n for jj in range(X.shape[-1]):\n for ii, est in enumerate(estimators):\n _score = scoring(est, X[..., jj], y)\n # Initialize array of predictions on the first score iteration\n if (ii == 0) and (jj == 0):\n dtype = type(_score)\n score = np.zeros(score_shape, dtype)\n score[ii, jj, ...] = _score\n pb.update(jj * len(estimators) + ii + 1)\n return score\n\n\ndef _fix_auc(scoring, y):\n from sklearn.preprocessing import LabelEncoder\n # This fixes sklearn's inability to compute roc_auc when y not in [0, 1]\n # scikit-learn/scikit-learn#6874\n if scoring is not None:\n score_func = getattr(scoring, '_score_func', None)\n kwargs = getattr(scoring, '_kwargs', {})\n if (getattr(score_func, '__name__', '') == 'roc_auc_score' and\n kwargs.get('multi_class', 'raise') == 'raise'):\n if np.ndim(y) != 1 or len(set(y)) != 2:\n raise ValueError('roc_auc scoring can only be computed for '\n 'two-class problems.')\n y = LabelEncoder().fit_transform(y)\n return y\n",
"# -*- coding: utf-8 -*-\n\"\"\"\n.. _tut-sleep-stage-classif:\n\nSleep stage classification from polysomnography (PSG) data\n==========================================================\n\n.. note:: This code is taken from the analysis code used in\n :footcite:`ChambonEtAl2018`. If you reuse this code please consider\n citing this work.\n\nThis tutorial explains how to perform a toy polysomnography analysis that\nanswers the following question:\n\n.. important:: Given two subjects from the Sleep Physionet dataset\n :footcite:`KempEtAl2000,GoldbergerEtAl2000`, namely\n *Alice* and *Bob*, how well can we predict the sleep stages of\n *Bob* from *Alice's* data?\n\nThis problem is tackled as supervised multiclass classification task. The aim\nis to predict the sleep stage from 5 possible stages for each chunk of 30\nseconds of data.\n\n.. _Pipeline: https://scikit-learn.org/stable/modules/generated/sklearn.pipeline.Pipeline.html\n.. _FunctionTransformer: https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.FunctionTransformer.html\n.. _physionet_labels: https://physionet.org/physiobank/database/sleep-edfx/#sleep-cassette-study-and-data\n\"\"\" # noqa: E501\n\n# Authors: Alexandre Gramfort <[email protected]>\n# Stanislas Chambon <[email protected]>\n# Joan Massich <[email protected]>\n#\n# License: BSD-3-Clause\n\n# %%\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport mne\nfrom mne.datasets.sleep_physionet.age import fetch_data\nfrom mne.time_frequency import psd_welch\n\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import classification_report\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.preprocessing import FunctionTransformer\n\n##############################################################################\n# Load the data\n# -------------\n#\n# Here we download the data from two subjects and the end goal is to obtain\n# :term:`epochs` and its associated ground truth.\n#\n# MNE-Python provides us with\n# :func:`mne.datasets.sleep_physionet.age.fetch_data` to conveniently download\n# data from the Sleep Physionet dataset\n# :footcite:`KempEtAl2000,GoldbergerEtAl2000`.\n# Given a list of subjects and records, the fetcher downloads the data and\n# provides us for each subject, a pair of files:\n#\n# * ``-PSG.edf`` containing the polysomnography. The :term:`raw` data from the\n# EEG helmet,\n# * ``-Hypnogram.edf`` containing the :term:`annotations` recorded by an\n# expert.\n#\n# Combining these two in a :class:`mne.io.Raw` object then we can extract\n# :term:`events` based on the descriptions of the annotations to obtain the\n# :term:`epochs`.\n#\n# Read the PSG data and Hypnograms to create a raw object\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nALICE, BOB = 0, 1\n\n[alice_files, bob_files] = fetch_data(subjects=[ALICE, BOB], recording=[1])\n\nraw_train = mne.io.read_raw_edf(alice_files[0], stim_channel='marker',\n misc=['rectal'])\nannot_train = mne.read_annotations(alice_files[1])\n\nraw_train.set_annotations(annot_train, emit_warning=False)\n\n# plot some data\n# scalings were chosen manually to allow for simultaneous visualization of\n# different channel types in this specific dataset\nraw_train.plot(start=60, duration=60,\n scalings=dict(eeg=1e-4, resp=1e3, eog=1e-4, emg=1e-7,\n misc=1e-1))\n\n##############################################################################\n# Extract 30s events from annotations\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n#\n# The Sleep Physionet dataset is annotated using\n# `8 labels <physionet_labels_>`_:\n# Wake (W), Stage 1, Stage 2, Stage 3, Stage 4 corresponding to the range from\n# light sleep to deep sleep, REM sleep (R) where REM is the abbreviation for\n# Rapid Eye Movement sleep, movement (M), and Stage (?) for any none scored\n# segment.\n#\n# We will work only with 5 stages: Wake (W), Stage 1, Stage 2, Stage 3/4, and\n# REM sleep (R). To do so, we use the ``event_id`` parameter in\n# :func:`mne.events_from_annotations` to select which events are we\n# interested in and we associate an event identifier to each of them.\n#\n# Moreover, the recordings contain long awake (W) regions before and after each\n# night. To limit the impact of class imbalance, we trim each recording by only\n# keeping 30 minutes of wake time before the first occurrence and 30 minutes\n# after the last occurrence of sleep stages.\n\nannotation_desc_2_event_id = {'Sleep stage W': 1,\n 'Sleep stage 1': 2,\n 'Sleep stage 2': 3,\n 'Sleep stage 3': 4,\n 'Sleep stage 4': 4,\n 'Sleep stage R': 5}\n\n# keep last 30-min wake events before sleep and first 30-min wake events after\n# sleep and redefine annotations on raw data\nannot_train.crop(annot_train[1]['onset'] - 30 * 60,\n annot_train[-2]['onset'] + 30 * 60)\nraw_train.set_annotations(annot_train, emit_warning=False)\n\nevents_train, _ = mne.events_from_annotations(\n raw_train, event_id=annotation_desc_2_event_id, chunk_duration=30.)\n\n# create a new event_id that unifies stages 3 and 4\nevent_id = {'Sleep stage W': 1,\n 'Sleep stage 1': 2,\n 'Sleep stage 2': 3,\n 'Sleep stage 3/4': 4,\n 'Sleep stage R': 5}\n\n# plot events\nfig = mne.viz.plot_events(events_train, event_id=event_id,\n sfreq=raw_train.info['sfreq'],\n first_samp=events_train[0, 0])\n\n# keep the color-code for further plotting\nstage_colors = plt.rcParams['axes.prop_cycle'].by_key()['color']\n\n##############################################################################\n# Create Epochs from the data based on the events found in the annotations\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\ntmax = 30. - 1. / raw_train.info['sfreq'] # tmax in included\n\nepochs_train = mne.Epochs(raw=raw_train, events=events_train,\n event_id=event_id, tmin=0., tmax=tmax, baseline=None)\n\nprint(epochs_train)\n\n##############################################################################\n# Applying the same steps to the test data from Bob\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nraw_test = mne.io.read_raw_edf(bob_files[0], stim_channel='marker',\n misc=['rectal'])\nannot_test = mne.read_annotations(bob_files[1])\nannot_test.crop(annot_test[1]['onset'] - 30 * 60,\n annot_test[-2]['onset'] + 30 * 60)\nraw_test.set_annotations(annot_test, emit_warning=False)\nevents_test, _ = mne.events_from_annotations(\n raw_test, event_id=annotation_desc_2_event_id, chunk_duration=30.)\nepochs_test = mne.Epochs(raw=raw_test, events=events_test, event_id=event_id,\n tmin=0., tmax=tmax, baseline=None)\n\nprint(epochs_test)\n\n\n##############################################################################\n# Feature Engineering\n# -------------------\n#\n# Observing the power spectral density (PSD) plot of the :term:`epochs` grouped\n# by sleeping stage we can see that different sleep stages have different\n# signatures. These signatures remain similar between Alice and Bob's data.\n#\n# The rest of this section we will create EEG features based on relative power\n# in specific frequency bands to capture this difference between the sleep\n# stages in our data.\n\n# visualize Alice vs. Bob PSD by sleep stage.\nfig, (ax1, ax2) = plt.subplots(ncols=2)\n\n# iterate over the subjects\nstages = sorted(event_id.keys())\nfor ax, title, epochs in zip([ax1, ax2],\n ['Alice', 'Bob'],\n [epochs_train, epochs_test]):\n\n for stage, color in zip(stages, stage_colors):\n epochs[stage].plot_psd(area_mode=None, color=color, ax=ax,\n fmin=0.1, fmax=20., show=False,\n average=True, spatial_colors=False)\n ax.set(title=title, xlabel='Frequency (Hz)')\nax2.set(ylabel='µV^2/Hz (dB)')\nax2.legend(ax2.lines[2::3], stages)\nplt.show()\n\n##############################################################################\n# Design a scikit-learn transformer from a Python function\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n#\n# We will now create a function to extract EEG features based on relative power\n# in specific frequency bands to be able to predict sleep stages from EEG\n# signals.\n\n\ndef eeg_power_band(epochs):\n \"\"\"EEG relative power band feature extraction.\n\n This function takes an ``mne.Epochs`` object and creates EEG features based\n on relative power in specific frequency bands that are compatible with\n scikit-learn.\n\n Parameters\n ----------\n epochs : Epochs\n The data.\n\n Returns\n -------\n X : numpy array of shape [n_samples, 5]\n Transformed data.\n \"\"\"\n # specific frequency bands\n FREQ_BANDS = {\"delta\": [0.5, 4.5],\n \"theta\": [4.5, 8.5],\n \"alpha\": [8.5, 11.5],\n \"sigma\": [11.5, 15.5],\n \"beta\": [15.5, 30]}\n\n psds, freqs = psd_welch(epochs, picks='eeg', fmin=0.5, fmax=30.)\n # Normalize the PSDs\n psds /= np.sum(psds, axis=-1, keepdims=True)\n\n X = []\n for fmin, fmax in FREQ_BANDS.values():\n psds_band = psds[:, :, (freqs >= fmin) & (freqs < fmax)].mean(axis=-1)\n X.append(psds_band.reshape(len(psds), -1))\n\n return np.concatenate(X, axis=1)\n\n\n##############################################################################\n# Multiclass classification workflow using scikit-learn\n# -----------------------------------------------------\n#\n# To answer the question of how well can we predict the sleep stages of Bob\n# from Alice's data and avoid as much boilerplate code as possible, we will\n# take advantage of two key features of sckit-learn: `Pipeline`_ , and\n# `FunctionTransformer`_.\n#\n# Scikit-learn pipeline composes an estimator as a sequence of transforms\n# and a final estimator, while the FunctionTransformer converts a python\n# function in an estimator compatible object. In this manner we can create\n# scikit-learn estimator that takes :class:`mne.Epochs` thanks to\n# ``eeg_power_band`` function we just created.\n\npipe = make_pipeline(FunctionTransformer(eeg_power_band, validate=False),\n RandomForestClassifier(n_estimators=100, random_state=42))\n\n# Train\ny_train = epochs_train.events[:, 2]\npipe.fit(epochs_train, y_train)\n\n# Test\ny_pred = pipe.predict(epochs_test)\n\n# Assess the results\ny_test = epochs_test.events[:, 2]\nacc = accuracy_score(y_test, y_pred)\n\nprint(\"Accuracy score: {}\".format(acc))\n\n##############################################################################\n# In short, yes. We can predict Bob's sleeping stages based on Alice's data.\n#\n# Further analysis of the data\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n#\n# We can check the confusion matrix or the classification report.\n\nprint(confusion_matrix(y_test, y_pred))\n\n##############################################################################\n#\n\nprint(classification_report(y_test, y_pred, target_names=event_id.keys()))\n\n##############################################################################\n# Exercise\n# --------\n#\n# Fetch 50 subjects from the Physionet database and run a 5-fold\n# cross-validation leaving each time 10 subjects out in the test set.\n#\n# References\n# ----------\n# .. footbibliography::\n"
] |
[
[
"numpy.arange",
"scipy.interpolate.interp1d",
"numpy.testing.assert_allclose",
"numpy.corrcoef",
"numpy.random.RandomState"
],
[
"scipy.signal.unit_impulse",
"scipy.signal.coherence",
"numpy.arange",
"scipy.signal.welch",
"matplotlib.pyplot.subplots",
"numpy.log10",
"numpy.random.RandomState",
"numpy.vstack"
],
[
"numpy.printoptions",
"numpy.max",
"numpy.mean",
"numpy.corrcoef",
"numpy.zeros",
"matplotlib.pyplot.figure"
],
[
"matplotlib.patheffects.Normal",
"numpy.asarray",
"numpy.linalg.inv",
"matplotlib.pyplot.subplots",
"numpy.sort",
"numpy.round",
"matplotlib.patheffects.Stroke",
"numpy.array"
],
[
"numpy.arange",
"matplotlib.pyplot.subplots",
"numpy.nonzero"
],
[
"numpy.unique",
"numpy.linalg.norm",
"numpy.intersect1d",
"numpy.std",
"numpy.mean",
"numpy.array",
"numpy.where",
"numpy.sum"
],
[
"numpy.testing.assert_array_equal",
"numpy.testing.assert_allclose"
],
[
"numpy.reshape",
"numpy.concatenate",
"sklearn.base.clone",
"numpy.ndim",
"sklearn.preprocessing.LabelEncoder",
"numpy.array_split",
"numpy.zeros",
"numpy.empty"
],
[
"sklearn.ensemble.RandomForestClassifier",
"sklearn.preprocessing.FunctionTransformer",
"matplotlib.pyplot.subplots",
"sklearn.metrics.confusion_matrix",
"numpy.concatenate",
"matplotlib.pyplot.show",
"numpy.sum",
"sklearn.metrics.accuracy_score"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.6",
"1.10",
"1.4",
"1.9",
"0.19",
"1.5",
"1.2",
"1.7",
"1.0",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
YangChenye/Python-Code
|
[
"66a7edff84143ed6cc9518717c555399683e334c",
"66a7edff84143ed6cc9518717c555399683e334c"
] |
[
"Data-Processing/for_txt/write_txt.py",
"Algorithm/Least-Squares/Least squares.py"
] |
[
"# 将列表写入txt\nimport numpy as np\na = [1, 2, 3]\n\nnp.savetxt('11.txt', a, fmt='%i', delimiter=',')\n ",
"import pandas as pd\n\nsales=pd.read_csv('train_data.csv',sep='\\s*,\\s*',engine='python') #读取CSV\nX=sales['X'].values #存csv的第一列\nY=sales['Y'].values #存csv的第二列\n\n#初始化赋值\ns1 = 0\ns2 = 0\ns3 = 0\ns4 = 0\nn = 4 ####你需要根据的数据量进行修改\n\n#循环累加\nfor i in range(n):\n\ts1 = s1 + X[i]*Y[i]\n\ts2 = s2 + X[i]\n\ts3 = s3 + Y[i]\n\ts4 = s4 + X[i]*X[i]\n\t\n#计算斜率和截距\nb = (s2*s3-n*s1)/(s2*s2-s4*n)\na = (s3 - b*s2)/n\nprint(\"Coeff: {} Intercept: {}\".format(b, a))\n\n"
] |
[
[
"numpy.savetxt"
],
[
"pandas.read_csv"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
itamaker/delta
|
[
"0a0eae8f860c28131b4543ecaa933e3dfbe3efde"
] |
[
"delta/utils/solver/raw_solver.py"
] |
[
"# Copyright (C) 2017 Beijing Didi Infinity Technology and Development Co.,Ltd.\n# All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Solver for raw tensorflow model.\"\"\"\n\nimport re\nimport math\nimport numpy as np\nimport tensorflow as tf\nfrom absl import logging\n\nfrom delta.utils.solver.base_solver import Solver\n\nfrom delta import utils\nfrom delta.utils.register import registers\nfrom delta.utils.solver.utils.solver_utils import get_checkpoint_dir\nfrom delta.utils.solver.utils.solver_utils import get_ckpt_state\nfrom delta.utils.solver.utils.solver_utils import get_session_conf\nfrom delta.utils.solver.utils.solver_utils import to_saved_model\nfrom delta.utils.solver.utils.solver_utils import run_metrics\nfrom delta.utils.solver.utils.hooks import DatasetInitializerHook\n\n# pylint: disable=too-many-instance-attributes, not-context-manager, bad-continuation\n\n\[email protected]\nclass RawSolver(Solver):\n \"\"\"Solver for raw tensorflow model.\"\"\"\n\n def __init__(self, config):\n super().__init__(config)\n self.session_conf, self.smax_to_keep, \\\n self.batch_size, self.num_epochs, \\\n self.save_checkpoint_steps, \\\n self.resume_model_path, self.print_every = self.set_experimental_environment()\n self.first_eval = True\n self.do_eval = False\n self.is_multi_output = False\n self.output_num = 1\n self.infer_no_label = self.config['data'][utils.INFER].get(\n 'infer_no_label', False)\n\n def process_config(self, config):\n \"\"\"Process the configs.\"\"\"\n return config\n\n def input_fn(self, mode):\n \"\"\"Get the input function for training, evaluation and inference.\"\"\"\n super().input_fn(mode)\n return self.task.input_fn()()\n\n def export_input(self, mode):\n \"\"\"Get the input function for model export.\"\"\"\n super().input_fn(mode)\n return self.task.export_inputs()\n\n def set_experimental_environment(self):\n \"\"\"Set the experimental environment.\"\"\"\n # Set configuration\n session_conf = get_session_conf(self.config)\n\n task_config = self.config[\"data\"][\"task\"]\n batch_size = task_config['batch_size']\n num_epochs = task_config['epochs']\n\n saver_conf = self.config['solver']['saver']\n smax_to_keep = saver_conf['max_to_keep']\n save_checkpoint_steps = saver_conf['save_checkpoint_steps']\n resume_model_path = saver_conf.get('resume_model_path', None)\n print_every = saver_conf['print_every']\n\n return session_conf, smax_to_keep, batch_size, num_epochs, \\\n save_checkpoint_steps, \\\n resume_model_path, print_every\n\n def get_scaffold(self, mode, global_step=None):\n \"\"\"Get training scaffold.\"\"\"\n\n init_op = tf.global_variables_initializer()\n local_init_op = tf.tables_initializer()\n saver = self.get_saver(global_step)\n scaffold = tf.train.Scaffold(\n saver=saver, init_op=init_op, local_init_op=local_init_op)\n return scaffold\n\n def get_generated_model_path(self):\n \"\"\"Get the path of the checkpoint which is most recently generated during training process.\"\"\"\n ckpt = get_ckpt_state(self.config)\n if ckpt is None:\n return None\n model_path = ckpt.model_checkpoint_path # pylint: disable=no-member\n return model_path\n\n def get_model_path(self, mode):\n \"\"\"Get the path of the checkpoint of the model.\"\"\"\n model_path = \"\"\n if \"{}_model_path\".format(mode) in self.config[\"solver\"][\"saver\"]:\n model_path = self.config[\"saver\"][\"{}_model_path\".format(mode)]\n if model_path == \"\":\n model_path = self.get_generated_model_path()\n return model_path\n\n def build(self, mode: str):\n \"\"\"Build the model for training, eval and infer.\"\"\"\n inputs = self.input_fn(mode)\n logging.info(\"build input data done...\")\n\n model = self.model_fn()\n training = mode == utils.TRAIN\n model.logits = model(inputs[\"input_x_dict\"], training=training)\n model.input_x_len = inputs[\"input_x_len\"]\n model.iterator = inputs[\"iterator\"]\n model.input_x_dict = inputs[\"input_x_dict\"]\n model.input_x_len = inputs[\"input_x_len\"]\n model.temp_init_feed_dict = inputs[\"init_feed_dict\"]\n model.loss_fn = self.get_loss_fn()\n if mode != utils.INFER or not self.infer_no_label:\n input_y = inputs[\"input_y_dict\"][\"input_y\"]\n if isinstance(model.loss_fn, list):\n model.loss = []\n for i, one_loss_fn in enumerate(model.loss_fn):\n one_loss = one_loss_fn(\n labels=input_y[i],\n logits=model.logits[i],\n input_length=model.input_x_len,\n model=model,\n name=\"loss_{}\".format(i))\n model.loss.append(one_loss)\n model.loss_op = tf.add_n(model.loss, name=\"loss_sum\")\n else:\n model.loss = model.loss_fn(\n labels=input_y,\n logits=model.logits,\n input_length=model.input_x_len,\n model=model,\n name=\"loss\")\n model.loss_op = model.loss\n logging.info(\"model.loss done\")\n model.input_y = input_y\n\n # output related\n self.build_output(model)\n return model\n\n def build_export_model(self):\n \"\"\"Build the model for export.\"\"\"\n mode = utils.INFER\n export_inputs = self.export_input(mode)\n\n model = self.model_fn()\n training = mode == utils.TRAIN\n model.logits = model(export_inputs[\"model_inputs\"], training=training)\n model.model_inputs = export_inputs[\"model_inputs\"]\n model.export_inputs = export_inputs[\"export_inputs\"]\n model.input_x_len = export_inputs[\"model_inputs\"][\"input_x_len\"]\n\n # output related\n self.build_export_output(model)\n return model\n\n def build_output(self, model): # pylint: disable=no-self-use\n \"\"\"\n Build the output of the model.\n `score` and `input_y` are for loss calculation.\n `preds` and `y_ground_truth` are for metric calculation.\n \"\"\"\n raise NotImplementedError\n\n def build_export_output(self, model): # pylint: disable=no-self-use\n \"\"\"\n Build the output of the model for export.\n `score` and `input_y` are for loss calculation.\n `preds` and `y_ground_truth` are for metric calculation.\n \"\"\"\n raise NotImplementedError\n\n def eval(self):\n \"\"\"Evaluate the model.\"\"\"\n mode = utils.EVAL\n graph = tf.Graph()\n with graph.as_default():\n self.eval_or_infer_once(mode)\n\n def infer(self, **kwargs): # pylint: disable=unused-argument, arguments-differ\n \"\"\"Make a inference.\"\"\"\n mode = utils.INFER\n graph = tf.Graph()\n with graph.as_default():\n self.eval_or_infer_once(mode)\n\n def postproc_fn(self):\n \"\"\"Post-process function, called after inference.\"\"\"\n postproc = self.config['solver']['postproc']\n if isinstance(postproc, list):\n postproc_fn = []\n for one_postproc in postproc:\n postproc_fn.append(registers.postprocess[one_postproc[\"name\"]](\n self.config))\n else:\n postproc_fn = registers.postprocess[postproc[\"name\"]](self.config)\n return postproc_fn\n\n def eval_or_infer_once(self, mode):\n \"\"\"Do evaluation or inference once.\"\"\"\n model = self.build(mode)\n model.sess = tf.Session(config=self.session_conf)\n model.saver = tf.train.Saver()\n self.eval_or_infer_core(model, mode)\n model.sess.close()\n\n def eval_or_infer_core(self, model, mode): # pylint: disable=too-many-locals, too-many-branches, too-many-statements\n \"\"\"The core part of evaluation.\"\"\"\n\n self.do_eval = bool(mode == utils.EVAL or not self.infer_no_label)\n self.is_multi_output = bool(isinstance(model.preds, (tuple, list)))\n if self.is_multi_output:\n self.output_num = len(model.preds)\n model_path = self.get_model_path(mode)\n if model_path is None:\n logging.warning(\"model_path is None!\")\n return\n\n with model.sess.graph.as_default():\n model.saver.restore(model.sess, save_path=model_path)\n if self.first_eval:\n model.sess.run(tf.tables_initializer())\n self.first_eval = False\n model.sess.run(\n model.iterator.initializer, feed_dict=model.temp_init_feed_dict)\n\n # Evaluating loop.\n data_size = self.config[\"data\"]['{}_data_size'.format(mode)]\n num_batch_every_epoch = int(math.ceil(data_size / self.batch_size))\n\n all_fetch_vals = []\n\n logging.info(\"Total eval data size: {},\"\n \"batch num per epoch: {}\".format(data_size,\n num_batch_every_epoch))\n\n for i in range(num_batch_every_epoch):\n if self.do_eval:\n if self.is_multi_output:\n fetch_ops = model.loss + list(model.logits) + list(\n model.preds) + list(model.y_ground_truth)\n else:\n fetch_ops = [\n model.loss, model.logits, model.preds, model.y_ground_truth\n ]\n else:\n fetch_ops = [model.logits, model.preds]\n logging.debug(\"fetch_ops: {}\".format(fetch_ops))\n fetch_vals = model.sess.run(fetch_ops)\n\n end_id = (i + 1) * self.batch_size\n\n if data_size < end_id:\n logging.debug(\"data_size: {}, end_id: {}\".format(data_size, end_id))\n act_end_id = self.batch_size - end_id + data_size\n new_fetch_vals = []\n for fetch_val in fetch_vals:\n if np.isscalar(fetch_val):\n new_fetch_vals.append(fetch_val)\n else:\n new_fetch_vals.append(fetch_val[:act_end_id])\n else:\n new_fetch_vals = fetch_vals\n\n all_fetch_vals.append(new_fetch_vals)\n\n if i % self.print_every == 0 or i == num_batch_every_epoch - 1:\n logging.info(\"Evaluation rate of \"\n \"progress: [ {:.2%} ]\".format(\n i / (num_batch_every_epoch - 1)))\n\n all_fetch_nps = []\n for one_fetch_vals in zip(*all_fetch_vals):\n if len(np.shape(one_fetch_vals[0])) <= 0: # pylint: disable=len-as-condition\n one_fetch_np = one_fetch_vals\n else:\n one_fetch_np = np.concatenate(one_fetch_vals, axis=0)\n all_fetch_nps.append(one_fetch_np)\n\n # reshape for multi-output\n if self.is_multi_output:\n logging.debug(\"all_fetch_nps before reshape: {}\".format(\n len(all_fetch_nps)))\n new_all_fetch_nps = []\n sub_fetch_nps = []\n for one_fetch_np in all_fetch_nps:\n sub_fetch_nps.append(one_fetch_np)\n if len(sub_fetch_nps) == self.output_num:\n new_all_fetch_nps.append(sub_fetch_nps)\n sub_fetch_nps = []\n\n logging.debug(\"new_all_fetch_nps after reshape: {}\".format(\n len(new_all_fetch_nps)))\n else:\n new_all_fetch_nps = all_fetch_nps\n\n if self.do_eval:\n _, _, preds_val, y_ground_truth_val = new_all_fetch_nps\n run_metrics(self.config, preds_val, y_ground_truth_val, mode)\n\n if mode == utils.INFER:\n if self.do_eval:\n _, logits_val, preds_val, _ = new_all_fetch_nps\n else:\n logits_val, preds_val = new_all_fetch_nps\n\n postproc_fn = self.postproc_fn()\n logging.info(postproc_fn)\n if isinstance(postproc_fn, list):\n for i, one_postproc_fn in enumerate(postproc_fn):\n predictions = {\n \"logits\": logits_val[i],\n \"preds\": preds_val[i],\n \"output_index\": i\n }\n one_postproc_fn(predictions, log_verbose=False)\n else:\n predictions = {\n \"logits\": logits_val,\n \"preds\": preds_val,\n \"output_index\": None\n }\n postproc_fn(predictions, log_verbose=False)\n\n def export_model(self):\n \"\"\"Export a model to tensorflow SavedModel.\"\"\"\n mode = utils.INFER\n graph = tf.Graph()\n with graph.as_default():\n infer_model = self.build_export_model()\n infer_model.sess = tf.Session(config=self.session_conf)\n infer_model.saver = tf.train.Saver()\n\n model_path = self.get_model_path(mode)\n infer_model.saver.restore(infer_model.sess, save_path=model_path)\n\n to_saved_model(self.config, infer_model.sess, infer_model.export_inputs,\n infer_model.output_dict)\n\n def train(self): # pylint: disable=too-many-locals\n \"\"\"Train the model.\"\"\"\n mode = utils.TRAIN\n train_model = self.build(mode)\n\n multitask = self.config['solver']['optimizer']['multitask']\n\n # Supervisor\n with tf.name_scope(\"train\"):\n global_step = tf.train.get_or_create_global_step()\n train_op = self.get_train_op(train_model.loss_op, multitask, global_step)\n\n checkpoint_dir = get_checkpoint_dir(self.config)\n\n # scaffold\n scaffold = self.get_scaffold(mode, global_step)\n\n ds_init_hook = DatasetInitializerHook(train_model.iterator,\n train_model.temp_init_feed_dict)\n with tf.train.MonitoredTrainingSession(\n checkpoint_dir=checkpoint_dir,\n scaffold=scaffold,\n hooks=[ds_init_hook],\n save_checkpoint_steps=self.save_checkpoint_steps,\n config=self.session_conf) as sess:\n # Training loop. For each batch...\n data_size = self.config['data']['train_data_size']\n num_epochs = self.config[\"data\"][\"task\"]['epochs']\n num_batch = int(math.ceil(data_size * num_epochs / self.batch_size))\n num_batch_per_epoch = int(data_size / self.batch_size)\n logging.info(\n \"num_batch: {}, num_batch_per_epoch: {}, num_epochs: {}\".format(\n num_batch, num_batch_per_epoch, num_epochs))\n for i in range(num_batch):\n _, _, out_loss = sess.run([train_op, global_step, train_model.loss_op])\n if i % self.print_every == 0 or i == num_batch - 1:\n logging.info(\"Training for epoch {}: [ {:.2%} ] loss is {:g}\".format(\n int(i / num_batch_per_epoch),\n (i % num_batch_per_epoch) / num_batch_per_epoch, out_loss))\n\n def train_and_eval(self): # pylint: disable=too-many-locals\n \"\"\"Train and evaluate the model.\"\"\"\n # train related\n g_train = tf.Graph()\n with g_train.as_default():\n logging.info(\"Compiling train model ...\")\n train_model = self.build(utils.TRAIN)\n # eval related\n g_eval = tf.Graph()\n with g_eval.as_default():\n logging.info(\"Compiling eval model ...\")\n eval_model = self.build(utils.EVAL)\n eval_model.sess = tf.Session(config=self.session_conf, graph=g_eval)\n eval_model.saver = tf.train.Saver()\n\n # start train\n with g_train.as_default():\n multitask = self.config['solver']['optimizer']['multitask']\n\n # Supervisor\n with tf.name_scope(\"train\"):\n global_step = tf.train.get_or_create_global_step()\n\n train_op = self.get_train_op(train_model.loss_op, multitask,\n global_step)\n\n checkpoint_dir = get_checkpoint_dir(self.config)\n\n # scaffold\n scaffold = self.get_scaffold(utils.TRAIN, global_step)\n\n ds_init_hook = DatasetInitializerHook(train_model.iterator,\n train_model.temp_init_feed_dict)\n with tf.train.MonitoredTrainingSession(\n checkpoint_dir=checkpoint_dir,\n scaffold=scaffold,\n hooks=[ds_init_hook],\n save_checkpoint_steps=self.save_checkpoint_steps,\n config=self.session_conf) as sess:\n # Training loop. For each batch...\n train_data_size = self.config['data']['train_data_size']\n num_batch = math.ceil(train_data_size * self.num_epochs /\n self.batch_size)\n num_batch_per_epoch = math.ceil(train_data_size / self.batch_size)\n logging.info(\"Total data size: {}, batch num: {}, \"\n \"batch num per epoch: {}\".format(train_data_size,\n num_batch,\n num_batch_per_epoch))\n for i in range(0, num_batch):\n\n if i % self.save_checkpoint_steps == 0 and i != 0:\n self.eval_or_infer_core(eval_model, utils.EVAL)\n _, _, out_loss = sess.run(\n [train_op, global_step, train_model.loss_op])\n if i % self.print_every == 0 or i == num_batch - 1 or (\n i +\n 1) % num_batch_per_epoch == 0 or i % num_batch_per_epoch == 0:\n logging.info(\n \"Training for epoch {}: [ {:.2%} ] loss is {:g}\".format(\n int(i / num_batch_per_epoch),\n (i % num_batch_per_epoch) / num_batch_per_epoch,\n out_loss))\n eval_model.sess.close()\n"
] |
[
[
"tensorflow.Graph",
"tensorflow.train.Scaffold",
"tensorflow.train.get_or_create_global_step",
"numpy.concatenate",
"tensorflow.global_variables_initializer",
"numpy.shape",
"tensorflow.name_scope",
"tensorflow.Session",
"tensorflow.add_n",
"tensorflow.train.Saver",
"numpy.isscalar",
"tensorflow.train.MonitoredTrainingSession",
"tensorflow.tables_initializer"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
NK-CS-ZZL/Neural_Network
|
[
"4de6c4b94e87e2e154e94aca26b7d03c40124e05"
] |
[
"BasicMultiLayerPerceptron/mlpModel.py"
] |
[
"import numpy as np\nimport math\n\ndef sigmoid(z):\n\treturn np.array([1/(np.exp(-x)+1) for x in z])\n\n\nclass MLPModel:\n\t# w^(l)_(j,k) = weight[l][j][k]\n\t# b^(l)_(k) = biases[l][k]\n\t# z^(l)_(j) = ∑(k)w^(l)_(j,k)*a^(l-1)_(k) + b^(l)_(j)\n\t# = ∑(k)weight[l][j][k]*record[l-1][k] + biases[j]\n\t# a^(l)_(k) = record[l][k]\n\tdef __init__(self, layersInfo, learnRate):\n\t\tself.weights = [[]]\n\t\tself.biases = [[]]\n\t\tself.learnRate = learnRate\n\t\tfor i in range(len(layersInfo) - 1):\n\t\t\tweight = np.random.randn(layersInfo[i+1], layersInfo[i])\n\t\t\t# weight = np.ones((layersInfo[i + 1], layersInfo[i]))\n\t\t\tself.weights.append(weight)\n\t\t\tbias = np.random.randn(layersInfo[i+1])\n\t\t\t# bias = np.ones(layersInfo[i + 1])\n\t\t\tself.biases.append(bias)\n\t\tself.record = []\n\t\n\t\n\tdef feedForward(self, input):\n\t\tself.record = []\n\t\toutput = np.array(input)\n\t\tself.record.append(output)\n\t\tfor l in range(1, len(self.weights)):\n\t\t\tw = self.weights[l]\n\t\t\tb = self.biases[l]\n\t\t\toutput = sigmoid(w.dot(output) + b)\n\t\t\tself.record.append(output)\n\t\treturn output\n\t\n\tdef backPropagate(self, output, trueval):\n\t\tdeltas = []\n\t\tfor j in range(len(self.record[-1])):\n\t\t# \tsize = output.shape[0]\n\t\t# \tdvt = 0\n\t\t# \tfor n in range(size):\n\t\t# \t\tdvt += (output)\n\t\t# \tdvt /= size\n\t\t\tdelta = (output[j] - trueval[j]) * output[j] * (1 - output[j])\n\t\t\tdeltas.append(delta)\n\t\t\tself.biases[j] -= self.learnRate * delta\n\t\t\tk = 0\n\t\t\tfor k in range(len(self.record[-2])):\n\t\t\t\tself.weights[-1][j][k] -= self.learnRate * delta * self.record[-2][k]\n\t\t\n\t\tl = len(self.record) - 2\n\t\t\n\t\twhile l > 0:\n\t\t\tnewdeltas = []\n\t\t\tj = 0\n\t\t\tfor j in range(len(self.record[l])):\n\t\t\t\tnewdelta = 0\n\t\t\t\tk = 0\n\t\t\t\tfor k in range(len(self.record[l+1])):\n\t\t\t\t\tnewdelta += self.weights[l+1][k][j] * deltas[k] * \\\n\t\t\t\t\t self.record[l][j] * (1 - self.record[l][j])\n\t\t\t\tnewdeltas.append(newdelta)\n\t\t\t\tk = 0\n\t\t\t\tfor k in range(len(self.record[l-1])):\n\t\t\t\t\tself.weights[l][j][k] -= self.learnRate * newdelta * self.record[l-1][k]\n\t\t\t\tself.biases[l][j] -= self.learnRate * newdelta\n\t\t\tdeltas = newdeltas\n\t\t\tl-=1\n\tdef train(self, data, trueval):\n\t\tfor i in range(len(trueval)):\n\t\t\toutput = self.feedForward(data[i])\n\t\t\tself.backPropagate(output, trueval[i])\n\t\t\tprint(i)\n\n\t\nif __name__ == \"__main__\":\n\tmodel = MLPModel([2,64,1], 0.5)\n\tmetadata = [[0, 0], [1, 0],[0, 1],[1,1]]\n\tmetatrue = [[0], [1], [1], [0]]\n\tdata = []\n\ttrue = []\n\tprint(model.feedForward([0,0]))\n\tfor i in range(150000):\n\t\tdata.append(metadata[i%4])\n\t\ttrue.append(metatrue[i%4])\n\tmodel.train(data, true)\n\tprint(model.feedForward([0,0]))\n\tprint(model.feedForward([0,1]))\n\tprint(model.feedForward([1,0]))\n\tprint(model.feedForward([1,1]))\n\t\n"
] |
[
[
"numpy.exp",
"numpy.array",
"numpy.random.randn"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Janik135/AttentionDeepMIL
|
[
"03609a56a6527b9cab666d07037b0513be682577"
] |
[
"mains/main_featureimportance.py"
] |
[
"import numpy as np\nfrom torch.utils.data import Dataset, DataLoader\nimport os\nimport torch\nfrom tqdm import tqdm\nfrom sklearn.metrics import balanced_accuracy_score\nfrom uv_dataloader import LeafDataset as DatasetGerste\nimport uuid\nimport argparse\n\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nfrom matplotlib.ticker import MaxNLocator\nfrom matplotlib import rc\nfrom torch.utils.tensorboard import SummaryWriter\nimport spectral\n\nmpl.rcParams['savefig.pad_inches'] = 0\n\nparser = argparse.ArgumentParser(description='Crazy Stuff')\nparser.add_argument('-m', '--mode', default=\"train\", type=str,\n help='What do you want to do? Select either train, test, full_image_test, attention')\nparser.add_argument('--lr', default=0.01, type=float,\n help='')\nparser.add_argument('-e', '--num_epochs', default=1000, type=int,\n help='')\nparser.add_argument('--lr_scheduler_steps', default=1000, type=int,\n help='')\nparser.add_argument('--lr_scheduler_laststep', default=1000000, type=int,\n help='epoch of last lr decay')\n\nparser.add_argument('--test_epoch', default=10, type=int,\n help='')\n\nparser.add_argument('--n_splits', default=5, type=int,\n help='')\nparser.add_argument('--split', default=0, type=int,\n help='')\nparser.add_argument('-ds', '--dataset',\n default=\"gerste\", type=str,\n help='')\nparser.add_argument('-dp', '--dataset_path', default=\"/Users/janik/Downloads/UV_Gerste/\", type=str,\n help='')\nparser.add_argument('--device', default=\"cuda\", type=str, help='')\nparser.add_argument('--save_dir', default=\"results_cv_new\", type=str, help='')\nparser.add_argument(\"--port\", default=57865)\n\n\n\ndef balanced_accuracy(target, pred):\n \"\"\"Computes the accuracy over the k top predictions for the specified values of k\"\"\"\n res = balanced_accuracy_score(target, pred)\n return res\n\n\ndef getPredAndTarget(output, target, topk=(1,)):\n \"\"\"Computes the accuracy over the k top predictions for the specified values of k\"\"\"\n with torch.no_grad():\n maxk = max(topk)\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n return pred.view(-1).detach().cpu().numpy().tolist(), target.view(-1).detach().cpu().numpy().tolist()\n\n\ndef getDataset(mode, param_class, test_size=None):\n if isinstance(param_class, dict):\n _param_class = Dict(param_class)\n else:\n _param_class = param_class\n\n if test_size is None:\n test_size = _param_class.test_size\n\n hyperparams = {\n 'genotype': [\"WT\"],\n 'inoculated': [0, 1], # [0, 1],\n 'dai': [\"5\"],\n 'signature_pre_clip': 0,\n 'signature_post_clip': 250,\n 'test_size': 0.5,\n 'max_num_balanced_inoculated': 5000,\n 'num_classes': 2,\n 'num_heads': 2,\n 'hidden_layer_size': 64,\n 'savgol_filter_params': [7, 3]\n }\n if args.dataset == 'gerste':\n dataset_ = DatasetGerste(data_path=args.dataset_path,\n genotype=hyperparams['genotype'], inoculated=hyperparams['inoculated'],\n dai=hyperparams['dai'],\n test_size=hyperparams['test_size'],\n signature_pre_clip=hyperparams['signature_pre_clip'],\n signature_post_clip=hyperparams['signature_post_clip'],\n max_num_balanced_inoculated=hyperparams['max_num_balanced_inoculated'],\n num_samples_file=5,\n mode='train',\n n_splits=args.n_splits,\n split=args.split,\n superpixel=True)\n elif args.dataset == 'ruebe':\n dataset_ = DatasetRuebe(data_path=args.dataset_path,\n dai=_param_class.dai,\n test_size=_param_class.test_size,\n signature_pre_clip=_param_class.signature_pre_clip,\n signature_post_clip=_param_class.signature_post_clip,\n max_num_balanced_leafs=_param_class.max_num_balanced_leafs,\n num_samples_file=_param_class.num_samples_file,\n mode=mode,\n n_splits=args.n_splits,\n split=args.split,\n superpixel=_param_class.superpixel)\n else:\n raise ValueError('dataset unknown')\n\n return dataset_\n\n\ndef train_gradientboosting():\n global proctitle\n from uv_dataset.gradientboosting import gradientboosting\n import pickle\n setproctitle(proctitle + args.mode + \" GB\")\n param_class = get_param_class(args.data)\n run_id = args.data + '_' + str(uuid.uuid1())\n\n save_dir = \"./uv_dataset/data/{}/gb_superpixel/cv_{}/\".format(args.dataset, args.n_splits)\n data_file_name = os.path.join(save_dir, \"data_{}.p\".format(args.data))\n\n dataset_train = getDataset('train', param_class, test_size=0)\n\n print(\"Number of samples\", len(dataset_train))\n wavelength = dataset_train.wavelength\n if not os.path.isfile(data_file_name):\n print(\"Pre-processing data\")\n dataloader = DataLoader(dataset_train, batch_size=16, shuffle=False, num_workers=0) # len(dataset_train)\n\n \"\"\"\n it = iter(dataloader)\n X_train, y_train = next(it)\n \n it = iter(dataloader_test)\n X_test, y_test = next(it)\n \n print(len(X_train), len(y_train))\n print(len(X_test), len(y_test))\n \"\"\"\n X_, y_ = [], []\n for features, labels in tqdm(dataloader):\n X_ += features.tolist()\n labels = labels[2]\n y_ += labels.tolist()\n\n X_, y_ = np.array(X_), np.array(y_)\n\n os.makedirs(save_dir, exist_ok=True)\n\n if not os.path.isfile(data_file_name):\n pickle.dump({'X_': X_, 'y_': y_},\n open(data_file_name, \"wb\"))\n else:\n print(\"Loading pre-processed data\")\n data_dict = pickle.load(open(data_file_name, \"rb\"))\n X_, y_ = data_dict['X_'], data_dict['y_']\n\n save_dir = save_dir.replace('data/', 'results_gb_superpixel/{}/'.format(args.data))\n os.makedirs(save_dir, exist_ok=True)\n gradientboosting(X_, y_, param_class, save_dir, args.n_splits)\n\n\ndef test_gradientboosting():\n global proctitle\n from uv_dataset.gradientboosting import gradientboosting\n import pickle\n setproctitle(proctitle + args.mode + \" GB\")\n\n # path = 'uv_dataset/results/2786fc22-515a-11ea-b1c3-1c1b0d97d8cf/best_model.pth.tar'\n path = 'uv_dataset/{}'.format(args.save_dir) # /{}/best_model.pth.tar'\n # models_to_test_all = [x[0] for x in os.walk(path)][1:]\n results = dict()\n used_dict_classes_keys = [x for x in dict_classes_keys if \"_260\" in x]\n for class_key in sorted(used_dict_classes_keys):\n # models_to_test = [x for x in models_to_test_all if class_key in x]\n\n param_class = get_param_class(class_key)\n save_dir = \"./uv_dataset/data/{}/gb{}/cv_{}/\".format(args.dataset, args.save_dir, args.n_splits)\n data_file_name = os.path.join(save_dir, \"data_{}.p\".format(class_key))\n if os.path.isfile(data_file_name):\n print(\"Loading pre-processed data\")\n data_dict = pickle.load(open(data_file_name, \"rb\"))\n X_, y_ = data_dict['X_'], data_dict['y_']\n\n save_dir = save_dir.replace('data/', 'results_gb{}/{}/'.format(args.save_dir, class_key))\n print(\"--\" * 42)\n print(\"Testing\", class_key)\n results[class_key] = gradientboosting(X_, y_, param_class, save_dir, args.n_splits, mode=\"test\")\n\n for k in sorted(list(results.keys())):\n print(k, results[k]['mean'], \"%, +-\", results[k]['std'])\n\n\ndef train_attention():\n global proctitle\n\n param_class = get_param_class(args.data)\n run_id = args.data + '_' + '{}of{}cv'.format(args.split, args.n_splits) + '_' + str(uuid.uuid1())\n dataset_train = getDataset('train', param_class)\n dataset_test = getDataset('test', param_class)\n\n print(\"Number of samples train\", len(dataset_train))\n print(\"Number of samples test\", len(dataset_test))\n dataloader = DataLoader(dataset_train, batch_size=param_class.batch_size, shuffle=True, num_workers=0)\n dataloader_test = DataLoader(dataset_test, batch_size=param_class.batch_size, shuffle=False, num_workers=0,\n drop_last=False)\n\n hyperparams = dataset_train.hyperparams\n print(\"Number of batches train\", len(dataloader))\n print(\"Number of batches test\", len(dataloader_test))\n # Original class counts train: 67578 264112\n # Original class counts test: 68093 263597\n hyperparams['num_classes'] = param_class.num_classes\n hyperparams['hidden_layer_size'] = param_class.hidden_layer_size\n hyperparams['num_heads'] = param_class.num_heads\n hyperparams['lr'] = args.lr\n hyperparams['num_epochs'] = args.num_epochs\n hyperparams['lr_scheduler_steps'] = args.lr_scheduler_steps\n hyperparams['lr_scheduler_laststep'] = args.lr_scheduler_laststep\n # input(\"{} press key\".format(args.data))\n # exit()\n\n device = args.device\n\n model = SANNetwork(input_size=dataset_train.input_size,\n num_classes=hyperparams['num_classes'],\n hidden_layer_size=hyperparams['hidden_layer_size'],\n dropout=0.02,\n num_heads=hyperparams['num_heads'],\n device=device)\n\n # path = 'uv_dataset/results/0_first_good_resultWT_2786fc22-515a-11ea-b1c3-1c1b0d97d8cf/best_model.pth.tar'\n # checkpoint = torch.load(path)\n # model.load_state_dict(checkpoint['state_dict'])\n\n num_epochs = hyperparams['num_epochs']\n optimizer = torch.optim.Adam(model.parameters(), lr=hyperparams['lr'])\n # optimizer = torch.optim.SGD(model.parameters(), lr=hyperparams['lr'], momentum=0.9)\n\n scheduler = torch.optim.lr_scheduler.StepLR(optimizer, hyperparams['lr_scheduler_steps'], gamma=0.5, last_epoch=-1)\n num_params = sum(p.numel() for p in model.parameters())\n print(\"Number of parameters {}\".format(num_params))\n print(\"Starting training for {} epochs\".format(num_epochs))\n # os.makedirs('./uv_dataset/results', exist_ok=True)\n save_dir = \"./uv_dataset/{}/\".format(args.save_dir)\n writer = SummaryWriter(log_dir=save_dir + run_id, comment=\"_\" + \"_id_{}\".format(run_id))\n\n model.to(device)\n\n balanced_loss_weight = torch.tensor([1.] * hyperparams['num_classes'],\n device=device) # torch.tensor([0.75, 0.25], device=device)\n crit = torch.nn.CrossEntropyLoss(weight=balanced_loss_weight)\n best_acc = 0\n for epoch in tqdm(range(num_epochs)):\n setproctitle(proctitle + args.mode + \" {}|{}-{}cv|e {} of {}\".format(args.data,\n hyperparams[\"split\"],\n hyperparams[\"n_splits\"],\n epoch + 1,\n num_epochs))\n losses_per_batch = []\n correct = 0\n total = 0\n for i, (features, labels) in enumerate(dataloader):\n labels = labels[2]\n\n features = features.float().to(device)\n labels = labels.long().to(device)\n\n model.train()\n outputs = model.forward(features)\n outputs = outputs.view(labels.shape[0], -1)\n loss = crit(outputs, labels)\n optimizer.zero_grad()\n _, predicted = torch.max(outputs.data, 1)\n correct += (predicted == labels).sum().item()\n total += labels.size(0)\n loss.backward()\n optimizer.step()\n losses_per_batch.append(float(loss))\n mean_loss = np.mean(losses_per_batch)\n\n writer.add_scalar('Loss/train', mean_loss, epoch)\n writer.add_scalar('Accuracy/train', 100 * correct / total, epoch)\n print(\"Epoch {}, mean loss per batch {}, train acc {}\".format(epoch, mean_loss, 100 * correct / total))\n\n if (epoch + 1) % args.test_epoch == 0 or epoch + 1 == num_epochs:\n correct = 0\n total = 0\n model.eval()\n losses_per_batch = []\n with torch.no_grad():\n for i, (features, labels) in enumerate(dataloader_test):\n labels = labels[2]\n features = features.float().to(device)\n labels = labels.long().to(device)\n outputs = model.forward(features)\n loss = crit(outputs, labels)\n losses_per_batch.append(float(loss))\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n # batch_pred, batch_target = getPredAndTarget(outputs, labels)\n # correct += balanced_accuracy(batch_target, batch_pred) * labels.size(0) # mean\n # correct += (predicted == labels).sum().item()\n mean_loss = np.mean(losses_per_batch)\n writer.add_scalar('Loss/test', mean_loss, epoch)\n\n save_checkpoint(save_dir, {\n 'epoch': epoch + 1,\n 'state_dict': model.state_dict(),\n 'optimizer': optimizer.state_dict(),\n 'hyper_params': hyperparams,\n 'eval_acc': 100 * correct / total,\n }, epoch + 1, best=(correct / total) >= best_acc, run_id=run_id)\n print('Accuracy of the network on the test samples: %f %%' % (\n 100 * correct / total))\n writer.add_scalar('Accuracy/test', 100 * correct / total, epoch)\n\n if (correct / total) >= best_acc:\n best_acc = (correct / total)\n model.train()\n if epoch < args.lr_scheduler_laststep:\n scheduler.step()\n\n\ndef test_model_accuracy():\n # path = 'uv_dataset/results/2786fc22-515a-11ea-b1c3-1c1b0d97d8cf/best_model.pth.tar'\n path = 'uv_dataset/{}'.format(args.save_dir) # /{}/best_model.pth.tar'\n checkpoint_template = 'best_model.pth.tar'\n models_to_test_all = [x[0] for x in os.walk(path)][1:]\n models_to_test_all = [x for x in models_to_test_all if \"_260_\" in x]\n print(models_to_test_all)\n # 1 / 0\n device = args.device\n results = dict()\n used_dict_classes_keys = [x for x in dict_classes_keys if \"_260\" in x]\n for class_key in used_dict_classes_keys:\n models_to_test = [x for x in models_to_test_all if class_key in x]\n print(models_to_test)\n # if len(models_to_test) != 5:\n # continue\n cv_acc = [0] * len(models_to_test)\n for checkpoint_dir in models_to_test:\n checkpoint_path = os.path.join(checkpoint_dir, checkpoint_template)\n print(checkpoint_path)\n checkpoint = torch.load(checkpoint_path)\n hyperparams = checkpoint['hyper_params']\n # print(\"Model with params:\", hyperparams)\n param_class = get_param_class(class_key)\n epoch = checkpoint['epoch']\n eval_acc = checkpoint['eval_acc']\n # print(\"\\n\")\n # print(class_key, epoch, eval_acc)\n # print(\"\\n\")\n cv_acc[int(hyperparams['split'])] = eval_acc\n continue\n \"\"\"\n dataset_test = LeafDataset(data_path=args.dataset_path,\n genotype=param_class.genotype, inoculated=param_class.inoculated,\n dai=param_class.dai,\n test_size=param_class.test_size,\n signature_pre_clip=param_class.signature_pre_clip,\n signature_post_clip=param_class.signature_post_clip,\n max_num_balanced_inoculated=param_class.max_num_balanced_inoculated,\n num_samples_file=param_class.num_samples_file,\n mode=\"test\",\n n_splits=hyperparams['n_splits'],\n split=hyperparams['split'],\n superpixel=param_class.superpixel)\n dataloader_test = DataLoader(dataset_test, batch_size=128, shuffle=False, num_workers=0)\n print(\"Number of batches test\", len(dataloader_test))\n\n model = SANNetwork(input_size=dataset_test.input_size,\n num_classes=hyperparams['num_classes'],\n hidden_layer_size=hyperparams['hidden_layer_size'],\n dropout=0.02,\n num_heads=hyperparams['num_heads'],\n device=device)\n\n epoch = checkpoint['epoch']\n eval_acc = checkpoint['eval_acc']\n print(\"Loaded model with Acc of {} trained for {} epochs\".format(eval_acc, epoch))\n model.load_state_dict(checkpoint['state_dict'])\n model.to(device)\n model.eval()\n\n correct = 0\n total = 0\n model.eval()\n with torch.no_grad():\n for i, (features, labels) in enumerate(tqdm(dataloader_test)):\n labels = labels[2]\n features = features.float().to(device)\n labels = labels.long().to(device)\n outputs = model.forward(features)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n batch_pred, batch_target = getPredAndTarget(outputs, labels)\n correct += balanced_accuracy(batch_target, batch_pred) * labels.size(0) # mean\n # correct += (predicted == labels).sum().item()\n\n print('Accuracy of the network on the test samples: %d %%' % (\n 100 * correct / total))\n \"\"\"\n results[class_key] = {'all': cv_acc,\n 'mean': round(np.mean(cv_acc).item(), 2),\n 'std': round(np.std(cv_acc).item(), 2)}\n\n for k in sorted(list(results.keys())):\n print(k, results[k]['mean'], \"+-\", results[k]['std'])\n # print(results)\n\n\ndef test_attention_acc_on_full_image():\n mpl.rcParams['figure.figsize'] = (20.0, 8.0)\n\n models_to_test = [\"run_dgx2/P22_3_dcf23826-54be-11ea-9a44-0242ac150002\",\n \"run_dgx2/WT_3_dcf9eddc-54be-11ea-a0ce-0242ac150002\"]\n path = 'uv_dataset/results/{}/best_model.pth.tar'\n\n checkpoint = torch.load(path.format(models_to_test[0]))\n if 'hyper_params' in list(checkpoint.keys()):\n hyperparams = checkpoint['hyper_params']\n else:\n hyperparams = {\n 'genotype': [\"WT\"],\n 'inoculated': [0], # [0, 1],\n 'dai': [\"5\"],\n 'signature_pre_clip': 0,\n 'signature_post_clip': 250,\n 'test_size': 0.5,\n 'max_num_balanced_inoculated': 5000,\n 'num_classes': 2,\n 'num_heads': 2,\n 'hidden_layer_size': 64,\n 'savgol_filter_params': [7, 3]\n }\n print(hyperparams)\n # exit()\n if args.dataset == 'gerste':\n dataset = DatasetGerste(genotype=hyperparams['genotype'],\n inoculated=[0, 1],\n dai=hyperparams['dai'],\n test_size=hyperparams['test_size'],\n signature_pre_clip=hyperparams['signature_pre_clip'],\n signature_post_clip=hyperparams['signature_post_clip'],\n max_num_balanced_inoculated=hyperparams['max_num_balanced_inoculated'], mode='test')\n elif args.dataset == 'ruebe':\n dataset = DatasetRuebe(leaf_type=hyperparams['leaf_type'],\n dai=hyperparams['dai'],\n test_size=hyperparams['test_size'],\n signature_pre_clip=hyperparams['signature_pre_clip'],\n signature_post_clip=hyperparams['signature_post_clip'],\n max_num_balanced_leafs=hyperparams['max_num_balanced_leafs'], mode='test')\n else:\n raise ValueError('unknown dataset')\n\n device = \"cuda\"\n model = SANNetwork(input_size=dataset.input_size,\n num_classes=hyperparams['num_classes'],\n hidden_layer_size=hyperparams['hidden_layer_size'],\n dropout=0.02,\n num_heads=hyperparams['num_heads'],\n device=\"cuda\")\n\n epoch = checkpoint['epoch']\n eval_acc = \"Something\" # checkpoint['eval_acc']\n print(\"Loaded model with Acc of {} trained for {} epochs\".format(eval_acc, epoch))\n model.load_state_dict(checkpoint['state_dict'])\n model.to(device)\n model.eval()\n\n def forward(x):\n x = torch.tensor(x, dtype=torch.float32)\n x = x.to(device)\n return model.forward(x)\n\n dataset.test_full_image(forward, num_images_per_class=10)\n\n\ndef _create_fig(hyperparams, title=\"\", figsize=(10, 5)):\n fig = plt.figure(figsize=figsize)\n # plt.title(str(hyperparams['genotype'][0]) + \" \" + title, fontdict={\"fontsize\": 24})\n ax = plt.gca()\n ax.tick_params(axis='both', which='major', labelsize=20)\n ax.yaxis.set_major_locator(MaxNLocator(prune='both'))\n ax.xaxis.set_major_locator(MaxNLocator(prune='both', nbins=26))\n return fig, ax\n\n\ndef test_attention_weights():\n path_template = 'uv_dataset/{}/best_model.pth.tar'\n if args.dataset == 'gerste':\n result_dir = 'cv_superpixel_2run'\n labels = [0, 1]\n label_dict_name = 'genotype'\n\n elif args.dataset == 'ruebe':\n result_dir = 'results_ruebe/cv_superpixel'\n labels = [0, 1, 2]\n label_dict_name = 'leaftype'\n\n else:\n raise ValueError('not implemented')\n for checkpoint_dirs_cv in superpixel_trained_models:\n # attn_cv_mean = []\n # attn_cv_std = []\n\n checkpoint_dirs = [\"{}/\".format(result_dir) + x for x in checkpoint_dirs_cv]\n i_label = \"both\"\n #for i_label in labels:\n attn_cv = []\n for checkpoint_dir in checkpoint_dirs:\n path = path_template.format(checkpoint_dir)\n checkpoint = torch.load(path)\n if 'hyper_params' in list(checkpoint.keys()):\n hyperparams = checkpoint['hyper_params']\n if args.dataset == 'ruebe' and 'max_num_balanced_inoculated' in hyperparams.keys():\n hyperparams['max_num_balanced_leafs'] = hyperparams['max_num_balanced_inoculated']\n else:\n raise ValueError(\"Should not happen\")\n\n dataset = getDataset('test', hyperparams)\n\n if hyperparams[label_dict_name] is None:\n hyperparams[label_dict_name] = 'all'\n\n device = \"cuda\"\n model = SANNetwork(input_size=dataset.input_size,\n num_classes=hyperparams['num_classes'],\n hidden_layer_size=hyperparams['hidden_layer_size'],\n dropout=0.02,\n num_heads=hyperparams['num_heads'],\n device=\"cuda\")\n\n epoch = checkpoint['epoch']\n eval_acc = checkpoint['eval_acc']\n print(\"Loaded model with Acc of {} trained for {} epochs\".format(eval_acc, epoch))\n model.load_state_dict(checkpoint['state_dict'])\n model.to(device)\n model.eval()\n\n dataloader = DataLoader(dataset, batch_size=len(dataset), shuffle=False, num_workers=0)\n\n wavelength = dataset.wavelength\n plt.close()\n plt.clf()\n\n palette = sns.color_palette(palette='Set2', n_colors=None, desat=None)\n\n threshold = 0.05\n\n # for i, (features, _) in enumerate(dataloader):\n it = iter(dataloader)\n features, labels = next(it)\n features = features.float().to(device)\n labels = labels[2]\n labels = labels.long().to(device)\n\n #features = features[labels == i_label]\n #labels = labels[labels == i_label]\n\n attn = model.get_attention(features)\n\n outputs = model.forward(features)\n outputs = outputs.view(labels.shape[0], -1)\n\n _, predicted = torch.max(outputs.data, 1)\n\n print(hyperparams)\n print(\"Running \" + str(hyperparams[label_dict_name]))\n\n attn_mean = torch.mean(attn, dim=(0,)).detach().cpu().numpy()\n attn_std = torch.std(attn, dim=(0,)).detach().cpu().numpy()\n attn = attn[predicted == labels]\n print(\"Test num correct\", len(attn), \"/\", len(features))\n attn_cv += attn.detach().cpu().numpy().tolist()\n # attn_cv_mean.append(attn_mean)\n # attn_cv_std.append(attn_std)\n\n attn_cv = np.array(attn_cv)\n attn_mean = np.mean(attn_cv, axis=(0,))\n attn_std = np.std(attn_cv, axis=(0,))\n fig, ax = _create_fig(hyperparams, \"\", figsize=(5, 5))\n\n plt.xlabel(\"Wavelength [nm]\", fontsize=22)\n plt.ylabel(\"Feature Importance [0-1]\", fontsize=22)\n\n xticks = np.array(wavelength)[attn_mean > threshold]\n y_data = attn_mean[attn_mean > threshold]\n y_err = attn_std[attn_mean > threshold]\n #print(y_err)\n y_err = np.array([[np.minimum(y_data[i], err), err] for i, err in enumerate(y_err)]).T\n #print(y_err)\n # input(\"press key\")\n ax.bar(np.arange(len(y_data)), y_data,\n yerr=y_err, align=\"center\", width=.8,\n ecolor=palette[7],\n capsize=4., )\n # ax.set_xticks(xticks)\n plt.xticks(np.arange(len(y_data)), xticks, rotation='vertical') #\n plt.ylim(top=0.35)\n save_dir = \"uv_dataset/{}/plots_\".format(args.dataset) + result_dir + \"/{}\".format(\n str(hyperparams[label_dict_name]))\n os.makedirs(save_dir, exist_ok=True)\n print(save_dir)\n fig.savefig(save_dir + \"/{}_feature_importance.png\".format(i_label),\n bbox_inches='tight', dpi=300)\n #plt.show()\n plt.clf()\n plt.close()\n exit()\n\n\ndef test_attention_weights_image():\n path_template = 'uv_dataset/results/{}/best_model.pth.tar'\n checkpoint_dirs = [\"cv_superpixel_2run/WT_superpixel_dai5_260_5789cf2c-6108-11ea-8cea-0242ac150002\"]\n\n for checkpoint_dir in checkpoint_dirs:\n path = path_template.format(checkpoint_dir)\n checkpoint = torch.load(path)\n if 'hyper_params' in list(checkpoint.keys()):\n hyperparams = checkpoint['hyper_params']\n # print(hyperparams)\n # continue\n if hyperparams['n_splits'] not in list(hyperparams.keys()):\n hyperparams['n_splits'] = args.n_splits\n if hyperparams['split'] not in list(hyperparams.keys()):\n hyperparams['split'] = args.split\n\n else:\n raise ValueError(\"Should not happen\")\n dataset = getDataset('test', hyperparams)\n\n device = \"cuda\"\n model = SANNetwork(input_size=dataset.input_size,\n num_classes=hyperparams['num_classes'],\n hidden_layer_size=hyperparams['hidden_layer_size'],\n dropout=0.02,\n num_heads=hyperparams['num_heads'],\n device=\"cuda\")\n\n epoch = checkpoint['epoch']\n eval_acc = checkpoint['eval_acc']\n print(\"Loaded model with Acc of {} trained for {} epochs\".format(eval_acc, epoch))\n model.load_state_dict(checkpoint['state_dict'])\n model.to(device)\n model.eval()\n\n wavelength = dataset.wavelength\n\n def forward(x):\n x = torch.tensor(x, dtype=torch.float32)\n x = x.to(device)\n return model.forward(x)\n\n num_images_per_class = 2\n res_samples = dataset.test_full_image(forward, num_images_per_class=num_images_per_class)\n print(\"Number Images\", len(res_samples))\n cnt_selected_inoculated = [0, 0]\n for res_sample in res_samples:\n i_label = res_sample['label']\n save_dir = \"uv_dataset/plots_cv/{}_{}/{}_{}\".format(hyperparams['n_splits'], hyperparams['split'],\n str(hyperparams['genotype'][0]),\n cnt_selected_inoculated[i_label])\n cnt_selected_inoculated[i_label] += 1\n plt_title = \"control\" if res_sample['label'] == 0 else \"inoculated\"\n\n res_sample_mask = res_sample['mask'].astype(float)\n palette = sns.color_palette(palette='Set2', n_colors=None, desat=None)\n\n print(\"Saving to \", save_dir)\n os.makedirs(save_dir, exist_ok=True)\n\n view = spectral.imshow(res_sample['img'],\n classes=res_sample['pred'])\n\n hs_img_view = view.data_rgb\n plt.clf()\n plt.close()\n\n f, (ax1, ax2, ax3) = plt.subplots(3, 1, sharey=True, sharex=True)\n ax1.imshow(hs_img_view)\n\n ax2.imshow(hs_img_view)\n ax2.imshow(res_sample_mask, alpha=1., vmin=0, vmax=2. if res_sample['label'] == 0 else 1.)\n\n ax3.imshow(hs_img_view)\n ax3.imshow(res_sample['pred'], alpha=1.)\n ax1.axis('off')\n ax2.axis('off')\n ax3.axis('off')\n ax1.set_aspect('equal')\n ax1.set_xticklabels([])\n ax1.set_yticklabels([])\n ax2.set_aspect('equal')\n ax2.set_xticklabels([])\n ax2.set_yticklabels([])\n ax3.set_aspect('equal')\n ax3.set_xticklabels([])\n ax3.set_yticklabels([])\n # plt.show()\n plt.subplots_adjust(top=0.25, wspace=0, hspace=0.2)\n\n plt.savefig(save_dir + \"/{}_sample_viz.png\".format(i_label),\n bbox_inches='tight', dpi=300)\n plt.clf()\n plt.close()\n\n features = res_sample['img'][res_sample['mask'] == 1]\n assert features.shape[0] == np.sum(res_sample['mask'])\n\n features = np.array([dataset.normalize(f) for f in features])\n features = torch.tensor(features, dtype=torch.float32)\n features = features.to(device)\n attn = model.get_attention(features)\n\n attn_mean = torch.mean(attn, dim=(0,)).detach().cpu().numpy()\n attn_std = torch.std(attn, dim=(0,)).detach().cpu().numpy()\n\n fig, ax = _create_fig(hyperparams, plt_title, figsize=(10, 5))\n\n plt.xlabel(\"Wavelength [nm]\", fontsize=22)\n plt.ylabel(\"Feature Importance [0-1]\", fontsize=22)\n\n # ax.plot(wavelength, attn_mean, linewidth=4)\n attn_mean_plt = attn_mean.copy()\n attn_mean_plt_inv = attn_mean.copy()\n attn_std_plt = attn_std.copy()\n attn_mean_plt[attn_mean <= 0.05] = 0.\n attn_mean_plt_inv[attn_mean > 0.05] = 0.\n\n attn_std_plt[attn_mean <= 0.05] = None\n\n ax.bar(wavelength, attn_mean_plt, align=\"center\",\n width=3.,\n ecolor=palette[7],\n capsize=4.)\n ax.bar(wavelength, attn_mean_plt_inv, align=\"center\", width=1.)\n\n xticks = np.array(wavelength)[attn_mean > 0.05]\n ax.set_xticks(xticks)\n ax.set_xticklabels(xticks, rotation='vertical')\n\n fig.savefig(save_dir + \"/{}_mean.png\".format(i_label),\n bbox_inches='tight', dpi=300)\n plt.clf()\n plt.close()\n\n fig, ax = _create_fig(hyperparams, plt_title, figsize=(5, 5))\n\n plt.xlabel(\"Wavelength [nm]\", fontsize=22)\n plt.ylabel(\"Feature Importance [0-1]\", fontsize=22)\n\n xticks = np.array(wavelength)[attn_mean > 0.05]\n y_data = attn_mean[attn_mean > 0.05]\n y_err = attn_std[attn_mean > 0.05]\n ax.bar(np.arange(len(y_data)), y_data,\n yerr=y_err, align=\"center\", width=.8,\n ecolor=palette[7],\n capsize=4.)\n # ax.set_xticks(xticks)\n plt.xticks(np.arange(len(y_data)), xticks, rotation='vertical')\n fig.savefig(save_dir + \"/{}_feature_importance.png\".format(i_label),\n bbox_inches='tight', dpi=300)\n plt.clf()\n plt.close()\n\n fig, ax = _create_fig(hyperparams, plt_title)\n\n plt.xlabel(\"Wavelength [nm]\", fontsize=22)\n plt.ylabel(\"Feature Importance [0-1]\", fontsize=22)\n\n ax.bar(wavelength, attn_std, width=2.)\n\n # xticks = np.array(wavelength)[attn_mean > 0.05]\n # ax.set_xticks(xticks)\n # ax.set_xticklabels(xticks, rotation='vertical')\n\n # ax.set_yticks(xticks)\n\n fig.savefig(save_dir + \"/{}_std.png\".format(i_label),\n bbox_inches='tight', dpi=300)\n plt.clf()\n plt.close()\n\n feature = features[0:100]\n attn_times_sample = model.forward_attention(feature, return_softmax=False)\n attn_times_sample = torch.mean(attn_times_sample, dim=(0,)).detach().cpu().numpy()\n\n attn = model.get_attention(features)\n attn_mean = torch.mean(attn, dim=(0,)).detach().cpu().numpy()\n\n feature = torch.mean(feature, dim=(0,)).detach().cpu().numpy()\n\n fig, ax = _create_fig(hyperparams, plt_title)\n\n plt.xlabel(\"Wavelength [nm]\", fontsize=22)\n plt.ylabel(\"Feature Importance [0-1]\", fontsize=22)\n\n attn_mean_plt = attn.copy()\n attn_mean_plt_inv = attn.copy()\n attn_mean_plt[attn <= 0.05] = 0.\n attn_mean_plt_inv[attn > 0.05] = 0.\n\n ax.bar(wavelength, attn_mean_plt, align=\"center\",\n width=3.,\n ecolor=palette[7],\n capsize=4.)\n ax.bar(wavelength, attn_mean_plt_inv, align=\"center\", width=1.)\n\n xticks = np.array(wavelength)[attn > 0.05]\n ax.set_xticks(xticks)\n ax.set_xticklabels(xticks, rotation='vertical')\n\n fig.savefig(save_dir + \"/{}_sample_attn.png\".format(i_label),\n bbox_inches='tight', dpi=300)\n plt.clf()\n plt.close()\n\n fig, ax = _create_fig(hyperparams, plt_title)\n\n plt.xlabel(\"Wavelength [nm]\", fontsize=22)\n # plt.ylabel(\"Feature Importance [0-1]\", fontsize=22)\n\n ax.plot(wavelength, feature, linewidth=2, alpha=0.6)\n fig.savefig(save_dir + \"/{}_sample.png\".format(i_label),\n bbox_inches='tight', dpi=300)\n plt.clf()\n plt.close()\n\n fig, ax = _create_fig(hyperparams, plt_title)\n ax.plot(wavelength, attn_times_sample, linewidth=2, alpha=0.6)\n fig.savefig(save_dir + \"/{}_sample_after_attn.png\".format(i_label),\n bbox_inches='tight', dpi=300)\n plt.clf()\n plt.close()\n\n\ndef save_checkpoint(save_dir, state, epoch, best, run_id, filename='checkpoint.pth.tar'):\n save_path_checkpoint = os.path.join(os.path.join(save_dir, run_id), filename)\n\n os.makedirs(os.path.dirname(save_path_checkpoint), exist_ok=True)\n if epoch % 10 == 0:\n torch.save(state, save_path_checkpoint)\n if best:\n torch.save(state, save_path_checkpoint.replace('checkpoint.pth.tar', 'best_model.pth.tar'))\n\n\nif __name__ == '__main__':\n torch.manual_seed(0)\n np.random.seed(0)\n\n args = parser.parse_args()\n\n if args.dataset == 'gerste':\n from uv_dataset.hyperparams.hyperparams import get_param_class, dict_classes_keys, superpixel_trained_models\n elif args.dataset == 'ruebe':\n from uv_dataset.hyperparams.hyperparams_ruebe import get_param_class, dict_classes_keys, \\\n superpixel_trained_models\n\n if args.device == \"cpu\":\n torch.set_num_threads(2)\n\n global proctitle\n proctitle = \"PS43 \"\n setproctitle(proctitle + args.mode + \" | warming up\")\n\n if args.mode == 'train':\n train_attention()\n elif args.mode == 'test':\n test_model_accuracy()\n elif args.mode == 'train_gb':\n train_gradientboosting()\n elif args.mode == 'test_gb':\n test_gradientboosting()\n elif args.mode == 'full_image_test':\n test_attention_acc_on_full_image()\n elif args.mode == 'attention':\n test_attention_weights()\n elif args.mode == 'attention_image':\n test_attention_weights_image()\n else:\n print(\"Nothing to do here\")\n exit()\n"
] |
[
[
"torch.mean",
"numpy.minimum",
"torch.max",
"torch.load",
"torch.utils.data.DataLoader",
"torch.no_grad",
"numpy.mean",
"torch.set_num_threads",
"torch.save",
"matplotlib.pyplot.gca",
"torch.nn.CrossEntropyLoss",
"torch.tensor",
"numpy.std",
"torch.std",
"matplotlib.pyplot.close",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.figure",
"torch.optim.lr_scheduler.StepLR",
"matplotlib.pyplot.ylim",
"numpy.array",
"numpy.sum",
"matplotlib.pyplot.ylabel",
"numpy.random.seed",
"sklearn.metrics.balanced_accuracy_score",
"torch.manual_seed",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.clf",
"matplotlib.ticker.MaxNLocator",
"matplotlib.pyplot.xlabel"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
basanto/HistomicsTK
|
[
"f3dbd93a7f31c7825574f9ccf0b86e09e9fee360",
"f3dbd93a7f31c7825574f9ccf0b86e09e9fee360",
"f3dbd93a7f31c7825574f9ccf0b86e09e9fee360",
"f3dbd93a7f31c7825574f9ccf0b86e09e9fee360"
] |
[
"histomicstk/deeplab/evaluation/panoptic_quality.py",
"histomicstk/segmentation/nuclear/gvf_tracking.py",
"histomicstk/preprocessing/color_normalization/deconvolution_based_normalization.py",
"histomicstk/preprocessing/color_conversion/lab_to_rgb.py"
] |
[
"# Lint as: python2, python3\n# Copyright 2019 The TensorFlow Authors All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Implementation of the Panoptic Quality metric.\n\nPanoptic Quality is an instance-based metric for evaluating the task of\nimage parsing, aka panoptic segmentation.\n\nPlease see the paper for details:\n\"Panoptic Segmentation\", Alexander Kirillov, Kaiming He, Ross Girshick,\nCarsten Rother and Piotr Dollar. arXiv:1801.00868, 2018.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport numpy as np\nimport prettytable\nimport six\n\nfrom deeplab.evaluation import base_metric\n\n\ndef _ids_to_counts(id_array):\n \"\"\"Given a numpy array, a mapping from each unique entry to its count.\"\"\"\n ids, counts = np.unique(id_array, return_counts=True)\n return dict(six.moves.zip(ids, counts))\n\n\nclass PanopticQuality(base_metric.SegmentationMetric):\n \"\"\"Metric class for Panoptic Quality.\n\n \"Panoptic Segmentation\" by Alexander Kirillov, Kaiming He, Ross Girshick,\n Carsten Rother, Piotr Dollar.\n https://arxiv.org/abs/1801.00868\n \"\"\"\n\n def compare_and_accumulate(\n self, groundtruth_category_array, groundtruth_instance_array,\n predicted_category_array, predicted_instance_array):\n \"\"\"See base class.\"\"\"\n # First, combine the category and instance labels so that every unique\n # value for (category, instance) is assigned a unique integer label.\n pred_segment_id = self._naively_combine_labels(predicted_category_array,\n predicted_instance_array)\n gt_segment_id = self._naively_combine_labels(groundtruth_category_array,\n groundtruth_instance_array)\n\n # Pre-calculate areas for all groundtruth and predicted segments.\n gt_segment_areas = _ids_to_counts(gt_segment_id)\n pred_segment_areas = _ids_to_counts(pred_segment_id)\n\n # We assume there is only one void segment and it has instance id = 0.\n void_segment_id = self.ignored_label * self.max_instances_per_category\n\n # There may be other ignored groundtruth segments with instance id > 0, find\n # those ids using the unique segment ids extracted with the area computation\n # above.\n ignored_segment_ids = {\n gt_segment_id for gt_segment_id in six.iterkeys(gt_segment_areas)\n if (gt_segment_id //\n self.max_instances_per_category) == self.ignored_label\n }\n\n # Next, combine the groundtruth and predicted labels. Dividing up the pixels\n # based on which groundtruth segment and which predicted segment they belong\n # to, this will assign a different 32-bit integer label to each choice\n # of (groundtruth segment, predicted segment), encoded as\n # gt_segment_id * offset + pred_segment_id.\n intersection_id_array = (\n gt_segment_id.astype(np.uint32) * self.offset +\n pred_segment_id.astype(np.uint32))\n\n # For every combination of (groundtruth segment, predicted segment) with a\n # non-empty intersection, this counts the number of pixels in that\n # intersection.\n intersection_areas = _ids_to_counts(intersection_id_array)\n\n # Helper function that computes the area of the overlap between a predicted\n # segment and the ground-truth void/ignored segment.\n def prediction_void_overlap(pred_segment_id):\n void_intersection_id = void_segment_id * self.offset + pred_segment_id\n return intersection_areas.get(void_intersection_id, 0)\n\n # Compute overall ignored overlap.\n def prediction_ignored_overlap(pred_segment_id):\n total_ignored_overlap = 0\n for ignored_segment_id in ignored_segment_ids:\n intersection_id = ignored_segment_id * self.offset + pred_segment_id\n total_ignored_overlap += intersection_areas.get(intersection_id, 0)\n return total_ignored_overlap\n\n # Sets that are populated with which segments groundtruth/predicted segments\n # have been matched with overlapping predicted/groundtruth segments\n # respectively.\n gt_matched = set()\n pred_matched = set()\n\n # Calculate IoU per pair of intersecting segments of the same category.\n for intersection_id, intersection_area in six.iteritems(intersection_areas):\n gt_segment_id = intersection_id // self.offset\n pred_segment_id = intersection_id % self.offset\n\n gt_category = gt_segment_id // self.max_instances_per_category\n pred_category = pred_segment_id // self.max_instances_per_category\n if gt_category != pred_category:\n continue\n\n # Union between the groundtruth and predicted segments being compared does\n # not include the portion of the predicted segment that consists of\n # groundtruth \"void\" pixels.\n union = (\n gt_segment_areas[gt_segment_id] +\n pred_segment_areas[pred_segment_id] - intersection_area -\n prediction_void_overlap(pred_segment_id))\n iou = intersection_area / union\n if iou > 0.5:\n self.tp_per_class[gt_category] += 1\n self.iou_per_class[gt_category] += iou\n gt_matched.add(gt_segment_id)\n pred_matched.add(pred_segment_id)\n\n # Count false negatives for each category.\n for gt_segment_id in six.iterkeys(gt_segment_areas):\n if gt_segment_id in gt_matched:\n continue\n category = gt_segment_id // self.max_instances_per_category\n # Failing to detect a void segment is not a false negative.\n if category == self.ignored_label:\n continue\n self.fn_per_class[category] += 1\n\n # Count false positives for each category.\n for pred_segment_id in six.iterkeys(pred_segment_areas):\n if pred_segment_id in pred_matched:\n continue\n # A false positive is not penalized if is mostly ignored in the\n # groundtruth.\n if (prediction_ignored_overlap(pred_segment_id) /\n pred_segment_areas[pred_segment_id]) > 0.5:\n continue\n category = pred_segment_id // self.max_instances_per_category\n self.fp_per_class[category] += 1\n\n return self.result()\n\n def _valid_categories(self):\n \"\"\"Categories with a \"valid\" value for the metric, have > 0 instances.\n\n We will ignore the `ignore_label` class and other classes which have\n `tp + fn + fp = 0`.\n\n Returns:\n Boolean array of shape `[num_categories]`.\n \"\"\"\n valid_categories = np.not_equal(\n self.tp_per_class + self.fn_per_class + self.fp_per_class, 0)\n if self.ignored_label >= 0 and self.ignored_label < self.num_categories:\n valid_categories[self.ignored_label] = False\n return valid_categories\n\n def detailed_results(self, is_thing=None):\n \"\"\"See base class.\"\"\"\n valid_categories = self._valid_categories()\n\n # If known, break down which categories are valid _and_ things/stuff.\n category_sets = collections.OrderedDict()\n category_sets['All'] = valid_categories\n if is_thing is not None:\n category_sets['Things'] = np.logical_and(valid_categories, is_thing)\n category_sets['Stuff'] = np.logical_and(valid_categories,\n np.logical_not(is_thing))\n\n # Compute individual per-class metrics that constitute factors of PQ.\n sq = base_metric.realdiv_maybe_zero(self.iou_per_class, self.tp_per_class)\n rq = base_metric.realdiv_maybe_zero(\n self.tp_per_class,\n self.tp_per_class + 0.5 * self.fn_per_class + 0.5 * self.fp_per_class)\n pq = np.multiply(sq, rq)\n\n # Assemble detailed results dictionary.\n results = {}\n for category_set_name, in_category_set in six.iteritems(category_sets):\n if np.any(in_category_set):\n results[category_set_name] = {\n 'pq': np.mean(pq[in_category_set]),\n 'sq': np.mean(sq[in_category_set]),\n 'rq': np.mean(rq[in_category_set]),\n # The number of categories in this subset.\n 'n': np.sum(in_category_set.astype(np.int32)),\n }\n else:\n results[category_set_name] = {'pq': 0, 'sq': 0, 'rq': 0, 'n': 0}\n\n return results\n\n def result_per_category(self):\n \"\"\"See base class.\"\"\"\n sq = base_metric.realdiv_maybe_zero(self.iou_per_class, self.tp_per_class)\n rq = base_metric.realdiv_maybe_zero(\n self.tp_per_class,\n self.tp_per_class + 0.5 * self.fn_per_class + 0.5 * self.fp_per_class)\n return np.multiply(sq, rq)\n\n def print_detailed_results(self, is_thing=None, print_digits=3):\n \"\"\"See base class.\"\"\"\n results = self.detailed_results(is_thing=is_thing)\n\n tab = prettytable.PrettyTable()\n\n tab.add_column('', [], align='l')\n for fieldname in ['PQ', 'SQ', 'RQ', 'N']:\n tab.add_column(fieldname, [], align='r')\n\n for category_set, subset_results in six.iteritems(results):\n data_cols = [\n round(subset_results[col_key], print_digits) * 100\n for col_key in ['pq', 'sq', 'rq']\n ]\n data_cols += [subset_results['n']]\n tab.add_row([category_set] + data_cols)\n\n print(tab)\n\n def result(self):\n \"\"\"See base class.\"\"\"\n pq_per_class = self.result_per_category()\n valid_categories = self._valid_categories()\n if not np.any(valid_categories):\n return 0.\n return np.mean(pq_per_class[valid_categories])\n\n def merge(self, other_instance):\n \"\"\"See base class.\"\"\"\n self.iou_per_class += other_instance.iou_per_class\n self.tp_per_class += other_instance.tp_per_class\n self.fn_per_class += other_instance.fn_per_class\n self.fp_per_class += other_instance.fp_per_class\n\n def reset(self):\n \"\"\"See base class.\"\"\"\n self.iou_per_class = np.zeros(self.num_categories, dtype=np.float64)\n self.tp_per_class = np.zeros(self.num_categories, dtype=np.float64)\n self.fn_per_class = np.zeros(self.num_categories, dtype=np.float64)\n self.fp_per_class = np.zeros(self.num_categories, dtype=np.float64)\n",
"from histomicstk.utils import gradient_diffusion\nimport numpy as np\nimport skimage.morphology as mp\nfrom skimage import measure as ms\n\n\ndef gvf_tracking(I, Mask, K=1000, Diffusions=10, Mu=5, Lambda=5, Iterations=10,\n dT=0.05):\n \"\"\"\n Performs gradient-field tracking to segment smoothed images of cell nuclei.\n\n Takes as input a smoothed intensity or Laplacian-of-Gaussian filtered image\n and a foreground mask, and groups pixels by tracking them to mutual\n gradient sinks. Typically requires merging of sinks (seeds) as a post\n processing steps.\n\n Parameters\n ----------\n I : array_like\n Smoothed intensity or log-filtered response where nuclei regions have\n larger intensity values than background.\n Mask : array_like\n Binary mask where foreground objects have value 1, and background\n objects have value 0. Used to restrict influence of background vectors\n on diffusion process and to reduce tracking computations.\n K : float\n Number of steps to check for tracking cycle. Default value = 1000.\n Mu : float\n Weight parmeter from Navier-Stokes diffusion - weights divergence and\n Laplacian terms. Default value = 5.\n Lambda : float\n Weight parameter from Navier-Stokes diffusion - used to weight\n divergence. Default value = 5.\n Iterations : float\n Number of time-steps to use in Navier-Stokes diffusion. Default value =\n 10.\n dT : float\n Timestep to be used in Navier-Stokes diffusion. Default value = 0.05.\n\n Returns\n -------\n Segmentation : array_like\n Label image where positive values correspond to foreground pixels that\n share mutual sinks.\n Sinks : array_like\n N x 2 array containing the (x,y) locations of the tracking sinks. Each\n row is an (x,y) pair - in that order.\n\n See Also\n --------\n histomicstk.utils.gradient_diffusion,\n histomicstk.segmentation.label.shuffle\n\n References\n ----------\n .. [#] G. Li et al \"3D cell nuclei segmentation based on gradient flow\n tracking\" in BMC Cell Biology,vol.40,no.8, 2007.\n\n \"\"\"\n\n # get image shape\n M = I.shape[0]\n N = I.shape[1]\n\n # calculate gradient\n dy, dx = np.gradient(I)\n\n # diffusion iterations\n if Diffusions > 0:\n dx, dy = gradient_diffusion(dx, dy, Mask, Mu, Lambda, Diffusions,\n dT)\n\n # normalize to unit magnitude\n Mag = ((dx**2 + dy**2)**0.5 + np.finfo(float).eps)\n dy = dy / Mag\n dx = dx / Mag\n\n # define mask to track pixels that are mapped to a sink\n Mapped = np.zeros(I.shape)\n\n # define label image\n Segmentation = np.zeros(I.shape)\n\n # initialize lists of sinks\n Sinks = []\n\n # define coordinates for foreground pixels (Mask == 1)\n i, j = np.nonzero(Mask)\n\n # track pixels\n for index, (x, y) in enumerate(zip(j, i)):\n\n # initialize angle, trajectory length, novel flag, and allocation count\n phi = 0\n points = 1\n novel = 1\n alloc = 1\n\n # initialize trajectory\n Trajectory = np.zeros((K, 2))\n Trajectory[0, 0] = x\n Trajectory[0, 1] = y\n\n # track while angle defined by successive steps is < np.pi / 2\n while(phi < np.pi / 2):\n\n # calculate step\n xStep = round_float(dx[Trajectory[points-1, 1],\n Trajectory[points-1, 0]])\n yStep = round_float(dy[Trajectory[points-1, 1],\n Trajectory[points-1, 0]])\n\n # check image edge\n if ((Trajectory[points-1, 0] + xStep < 0) or\n (Trajectory[points-1, 0] + xStep > N-1) or\n (Trajectory[points-1, 1] + yStep < 0) or\n (Trajectory[points-1, 1] + yStep > M-1)):\n break\n\n # add new point to trajectory list\n if points < K: # buffer is not overrun\n Trajectory[points, 0] = Trajectory[points-1, 0] + xStep\n Trajectory[points, 1] = Trajectory[points-1, 1] + yStep\n\n else: # buffer overrun\n\n # check for cycle\n cycle = detect_cycle(Trajectory, points)\n\n if cycle == points: # no cycle, simple overflow. grow buffer.\n\n # copy and reallocate\n temp = Trajectory\n Trajectory = np.zeros((K*alloc, 2))\n Trajectory[K*(alloc-1):K*alloc, ] = temp\n alloc += 1\n\n # add new point\n Trajectory[points, 0] = Trajectory[points-1, 0] + xStep\n Trajectory[points, 1] = Trajectory[points-1, 1] + yStep\n\n else: # overflow due to cycle, terminate tracking\n points = cycle\n\n # check mapping\n if Mapped[Trajectory[points, 1], Trajectory[points, 0]] == 1:\n novel = 0\n phi = np.pi\n elif Mask[Trajectory[points, 1], Trajectory[points, 0]] == 0:\n phi = np.pi\n else:\n phi = np.arccos(dy[Trajectory[points-1, 1],\n Trajectory[points-1, 0]] *\n dy[Trajectory[points, 1],\n Trajectory[points, 0]] +\n dx[Trajectory[points-1, 1],\n Trajectory[points-1, 0]] *\n dx[Trajectory[points, 1],\n Trajectory[points, 0]])\n\n # increment trajectory length counter\n points += 1\n\n # determine if sink is novel\n if novel == 1:\n\n # record sinks\n Sinks.append(Trajectory[points-1, ])\n\n # add trajectory to label image with new sink value, add mapping\n for j in range(points):\n Segmentation[Trajectory[j, 1], Trajectory[j, 0]] = len(Sinks)\n Mapped[Trajectory[j, 1], Trajectory[j, 0]] = 1\n\n else:\n\n # add trajectory to label image with sink value of final point\n for j in range(points):\n Segmentation[Trajectory[j, 1], Trajectory[j, 0]] = \\\n Segmentation[Trajectory[points-1, 1],\n Trajectory[points-1, 0]]\n\n # convert Sinks to numpy array\n Sinks = np.asarray(Sinks)\n\n return Segmentation, Sinks\n\n\ndef merge_sinks(Label, Sinks, Radius=5):\n \"\"\"\n Merges attraction basins obtained from gradient flow tracking using\n sink locations.\n\n Parameters\n ----------\n Segmentation : array_like\n Label image where positive values correspond to foreground pixels that\n share mutual sinks.\n Sinks : array_like\n N x 2 array containing the (x,y) locations of the tracking sinks. Each\n row is an (x,y) pair - in that order.\n Radius : float\n Radius used to merge sinks. Sinks closer than this radius to one\n another will have their regions of attraction merged.\n Default value = 5.\n\n Returns\n -------\n Merged : array_like\n Label image where attraction regions are merged.\n\n \"\"\"\n\n # build seed image\n SeedImage = np.zeros(Label.shape)\n for i in range(Sinks.shape[0]):\n SeedImage[Sinks[i, 1], Sinks[i, 0]] = i+1\n\n # dilate sink image\n Dilated = mp.binary_dilation(SeedImage, mp.disk(Radius))\n\n # generate new labels for merged seeds, define memberships\n Labels = ms.label(Dilated)\n New = Labels[Sinks[:, 1].astype(np.int), Sinks[:, 0].astype(np.int)]\n\n # get unique list of seed clusters\n Unique = np.arange(1, New.max()+1)\n\n # generate new seed list\n Merged = np.zeros(Label.shape)\n\n # get pixel list for each sink object\n Props = ms.regionprops(Label.astype(np.int))\n\n # fill in new values\n for i in Unique:\n Indices = np.nonzero(New == i)[0]\n for j in Indices:\n Coords = Props[j].coords\n Merged[Coords[:, 0], Coords[:, 1]] = i\n\n return Merged\n\n\ndef detect_cycle(Trajectory, points):\n\n # initialize trajectory length\n length = 0\n\n # identify trajectory bounding box\n xMin = np.min(Trajectory[0:points, 0])\n xMax = np.max(Trajectory[0:points, 0])\n xRange = xMax - xMin + 1\n yMin = np.min(Trajectory[0:points, 1])\n yMax = np.max(Trajectory[0:points, 1])\n yRange = yMax - yMin + 1\n\n # fill in trajectory map\n Map = np.zeros((yRange, xRange))\n for i in range(points):\n if Map[Trajectory[i, 1]-yMin, Trajectory[i, 0]-xMin] == 1:\n break\n else:\n Map[Trajectory[i, 1]-yMin, Trajectory[i, 0]-xMin] = 1\n length += 1\n\n return length\n\n\ndef round_float(x):\n if x >= 0.0:\n t = np.ceil(x)\n if t - x > 0.5:\n t -= 1.0\n return t\n else:\n t = np.ceil(-x)\n if t + x > 0.5:\n t -= 1.0\n return -t\n",
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Oct 18 02:31:32 2019.\n\n@author: mtageld\n\"\"\"\nimport numpy as np\nfrom histomicstk.preprocessing.color_deconvolution.stain_color_map import (\n stain_color_map)\nfrom histomicstk.preprocessing.color_deconvolution.color_deconvolution import (\n stain_unmixing_routine, color_deconvolution_routine)\nfrom histomicstk.preprocessing.color_deconvolution.color_convolution import (\n color_convolution)\nfrom histomicstk.preprocessing.color_deconvolution import (\n complement_stain_matrix)\n\n\ndef deconvolution_based_normalization(\n im_src, W_source=None, W_target=None, im_target=None,\n stains=None, mask_out=None, stain_unmixing_routine_params=None):\n \"\"\"Perform color normalization using color deconvolution to transform the.\n\n ... color characteristics of an image to a desired standard.\n After the image is deconvolved into its component stains (eg, H&E), it is\n convolved with a stain column vectors matrix from the target image from\n which the color characteristics need to be transferred.\n\n Parameters\n ------------\n im_src : array_like\n An RGB image (m x n x 3) to color normalize\n\n W_source : np array, default is None\n A 3x3 matrix of source stain column vectors. Only provide this\n if you know the stains matrix in advance (unlikely) and would\n like to perform supervised deconvolution. If this is not provided,\n stain_unmixing_routine() is used to estimate W_source.\n\n W_target : np array, default is None\n A 3x3 matrix of target stain column vectors. If not provided,\n and im_target is also not provided, the default behavior is to use\n histomicstk.preprocessing.color_deconvolution.stain_color_map\n to provide an idealized target matrix.\n\n im_target : array_like, default is None\n An RGB image (m x n x 3) that has good color properties that ought to\n be transferred to im_src. If you provide this parameter, im_target\n will be used to extract W_target and the W_target parameter will\n be ignored.\n\n stains : list, optional\n List of stain names (order is important). Default is H&E. This is\n particularly relevant in macenco where the order of stains is not\n preserved during stain unmixing, so this method uses\n histomicstk.preprocessing.color_deconvolution.find_stain_index\n to reorder the stains matrix to the order provided by this parameter\n\n mask_out : array_like, default is None\n if not None, should be (m x n) boolean numpy array.\n This parameter ensures exclusion of non-masked areas from calculations\n and normalization. This is relevant because elements like blood,\n sharpie marker, white space, etc may throw off the normalization.\n\n stain_unmixing_routine_params : dict, default is empty dict\n k,v for stain_unmixing_routine().\n\n Returns\n --------\n array_like\n Color Normalized RGB image (m x n x 3)\n\n\n See Also\n --------\n histomicstk.preprocessing.color_deconvolution.color_deconvolution_routine\n histomicstk.preprocessing.color_convolution.color_convolution\n\n References\n ----------\n .. [#] Van Eycke, Y. R., Allard, J., Salmon, I., Debeir, O., &\n Decaestecker, C. (2017). Image processing in digital pathology: an\n opportunity to solve inter-batch variability of immunohistochemical\n staining. Scientific Reports, 7.\n .. [#] Macenko, M., Niethammer, M., Marron, J. S., Borland, D.,\n Woosley, J. T., Guan, X., ... & Thomas, N. E. (2009, June).\n A method for normalizing histology slides for quantitative analysis.\n In Biomedical Imaging: From Nano to Macro, 2009. ISBI'09.\n IEEE International Symposium on (pp. 1107-1110). IEEE.\n .. [#] Xu, J., Xiang, L., Wang, G., Ganesan, S., Feldman, M., Shih, N. N.,\n ...& Madabhushi, A. (2015). Sparse Non-negative Matrix Factorization\n (SNMF) based color unmixing for breast histopathological image\n analysis. Computerized Medical Imaging and Graphics, 46, 20-29.\n\n \"\"\"\n stains = ['hematoxylin', 'eosin'] if stains is None else stains\n stain_unmixing_routine_params = (\n {} if stain_unmixing_routine_params is None else\n stain_unmixing_routine_params)\n for k in ['W_source', 'mask_out']:\n assert k not in stain_unmixing_routine_params.keys(), \\\n \"%s must be provided as a separate parameter.\" % k\n\n # find stains matrix from source image\n stain_unmixing_routine_params['stains'] = stains\n _, StainsFloat, _ = color_deconvolution_routine(\n im_src, W_source=W_source, mask_out=mask_out,\n **stain_unmixing_routine_params)\n\n # Get W_target\n\n if all(j is None for j in [W_target, im_target]):\n # Normalize to 'ideal' stain matrix if none is provided\n W_target = np.array(\n [stain_color_map[stains[0]], stain_color_map[stains[1]]]).T\n W_target = complement_stain_matrix(W_target)\n\n elif im_target is not None:\n # Get W_target from target image\n W_target = stain_unmixing_routine(\n im_target, **stain_unmixing_routine_params)\n\n # Convolve source image StainsFloat with W_target\n im_src_normalized = color_convolution(StainsFloat, W_target)\n\n # return masked values using unnormalized image\n if mask_out is not None:\n keep_mask = np.not_equal(mask_out, True)\n for i in range(3):\n original = im_src[:, :, i].copy()\n new = im_src_normalized[:, :, i].copy()\n original[keep_mask] = 0\n new[mask_out] = 0\n im_src_normalized[:, :, i] = new + original\n\n return im_src_normalized\n",
"import numpy as np\n\nfrom .rgb_to_lab import _rgb2lms, _lms2lab\n\n\n# Define conversion matrices\n_lms2rgb = np.linalg.inv(_rgb2lms)\n_lab2lms = np.linalg.inv(_lms2lab)\n\n\ndef lab_to_rgb(im_lab):\n \"\"\"Transforms an image from LAB to RGB color space\n\n Parameters\n ----------\n im_lab : array_like\n An image in LAB color space\n\n Returns\n -------\n im_rgb : array_like\n The RGB representation of the input image 'im_lab'.\n\n See Also\n --------\n histomicstk.preprocessing.color_conversion.rgb_to_lab,\n histomicstk.preprocessing.color_normalization.reinhard\n\n References\n ----------\n .. [#] D. Ruderman, T. Cronin, and C. Chiao, \"Statistics of cone\n responses to natural images: implications for visual coding,\"\n J. Opt. Soc. Am. A 15, 2036-2045 (1998).\n\n \"\"\"\n\n # get input image dimensions\n m = im_lab.shape[0]\n n = im_lab.shape[1]\n\n # calculate im_lms values from LAB\n im_lab = np.reshape(im_lab, (m * n, 3))\n im_lms = np.dot(_lab2lms, np.transpose(im_lab))\n\n # calculate RGB values from im_lms\n im_lms = np.exp(im_lms)\n im_lms[im_lms == np.spacing(1)] = 0\n\n im_rgb = np.dot(_lms2rgb, im_lms)\n\n # reshape to 3-channel image\n im_rgb = np.reshape(im_rgb.transpose(), (m, n, 3))\n\n return im_rgb\n"
] |
[
[
"numpy.logical_not",
"numpy.multiply",
"numpy.unique",
"numpy.mean",
"numpy.any",
"numpy.not_equal",
"numpy.logical_and",
"numpy.zeros"
],
[
"numpy.nonzero",
"numpy.min",
"numpy.asarray",
"numpy.gradient",
"numpy.arccos",
"numpy.finfo",
"numpy.max",
"numpy.ceil",
"numpy.zeros"
],
[
"numpy.not_equal",
"numpy.array"
],
[
"numpy.dot",
"numpy.spacing",
"numpy.linalg.inv",
"numpy.reshape",
"numpy.transpose",
"numpy.exp"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
force-h2020/force-bdss-plugin-enthought-example
|
[
"c56d47521233e369d42fe82282ed8e113a3747f7"
] |
[
"eggbox_potential_sampler/scripts/plot_sigma.py"
] |
[
"# (C) Copyright 2010-2020 Enthought, Inc., Austin, TX\n# All rights reserved.\n\nfrom unittest import mock\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom force_bdss.api import BaseDataSourceFactory, DataValue\nfrom eggbox_potential_sampler.eggbox_pes_data_source.data_source import (\n EggboxPESDataSource)\nfrom eggbox_potential_sampler.eggbox_pes_data_source.model import (\n EggboxPESDataSourceModel)\n\n\ndef plot_potentials(sigmas=[0, 0.001, 0.01, 0.1, 1]):\n \"\"\" Plot and optimise 2D potentials over a range of sigma values. \"\"\"\n factory = mock.Mock(spec=BaseDataSourceFactory)\n for sigma in sigmas:\n ds = EggboxPESDataSource(factory)\n model = EggboxPESDataSourceModel(factory,\n num_cells=10,\n dimension=2,\n sigma_star=sigma)\n print(sigma)\n sample(1000, ds, model)\n plot_surface_to_file(ds, model)\n\n\ndef sample(n, ds, model):\n \"\"\" Sample the potential of the given data source n times. \"\"\"\n i = 0\n while i < n:\n random = np.random.rand(2)\n mock_params = [DataValue(value=random[0], type=\"float\"),\n DataValue(value=random[1], type=\"float\")]\n ds.run(model, mock_params)\n i += 1\n\n\ndef plot_surface_to_file(data_source, model):\n \"\"\" Plot the 2D surface to file. \"\"\"\n x = np.linspace(0, 1, 256)\n xx, yy = np.meshgrid(x, x)\n\n z = np.zeros((len(x), len(x)))\n evaluate_potential = EggboxPESDataSource.evaluate_potential\n for ind, _ in enumerate(xx):\n for jnd, _ in enumerate(yy):\n trial = np.asarray([xx[ind][jnd], yy[ind][jnd]])\n z[ind, jnd] = evaluate_potential(trial, model)\n\n trials = np.asarray(model.trials)\n results = np.asarray(model.results)\n\n fig = plt.figure(figsize=(4, 4))\n ax1 = fig.add_subplot(111, aspect='equal')\n im = ax1.pcolormesh(xx, yy, z)\n plt.colorbar(im, label='$V(x,y)$ (arb.)', ticks=[])\n ax1.set_title('$\\\\sigma^* = {}$'.format(model.sigma_star))\n ax1.set_xlabel('$x$')\n ax1.set_ylabel('$y$')\n ax1.set_yticklabels([])\n ax1.set_xticklabels([])\n ax1.plot([trials[:, 0], results[:, 0]], [trials[:, 1], results[:, 1]],\n lw=0.2, c='k', alpha=0.5)\n plt.tight_layout()\n plt.savefig('pot_2d_{}.png'.format(model.sigma_star))\n\n\nif __name__ == '__main__':\n plot_potentials()\n"
] |
[
[
"matplotlib.pyplot.tight_layout",
"numpy.linspace",
"numpy.asarray",
"matplotlib.pyplot.colorbar",
"numpy.random.rand",
"numpy.meshgrid",
"matplotlib.pyplot.figure"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
LostCow/KLUE
|
[
"73b1b0526cf6b1b6f5ef535b9527d8abe6ca1a77"
] |
[
"re/dataset.py"
] |
[
"# coding=utf-8\n\nimport json\nimport logging\nimport os\nfrom typing import List, Tuple\n\nimport torch\nimport transformers\nfrom torch.utils.data import DataLoader, Dataset, TensorDataset\nfrom utils import InputExample, InputFeatures\nimport re\n\nlogger = logging.getLogger(__name__)\n\n\nclass KlueReProcessor:\n def __init__(self, args, tokenizer) -> None:\n\n self.hparams = args\n self.tokenizer = tokenizer\n self.emap = {\n \"PER\": \"인물\",\n \"ORG\": \"기관\",\n \"LOC\": \"지명\",\n \"POH\": \"명사\",\n \"DAT\": \"날짜\",\n \"NOH\": \"수량\",\n }\n\n # special tokens to mark the subject/object entity boundaries\n self.subject_start_marker = \"[subj]\"\n self.subject_end_marker = \"[/subj]\"\n self.object_start_marker = \"[obj]\"\n self.object_end_marker = \"[/obj]\"\n self.si_start_marker = \"[si]\"\n self.si_end_marker = \"[/si]\"\n self.ctx_start_marker = \"[ctx]\"\n self.ctx_end_marker = \"[/ctx]\"\n\n self.tokenizer.add_special_tokens(\n {\n \"additional_special_tokens\": [\n self.subject_start_marker,\n self.subject_end_marker,\n self.object_start_marker,\n self.object_end_marker,\n self.si_start_marker,\n self.si_end_marker,\n self.ctx_start_marker,\n self.ctx_end_marker,\n ]\n }\n )\n\n # Load relation class\n relation_class_file_path = os.path.join(\n self.hparams.data_dir, self.hparams.relation_filename\n )\n\n with open(relation_class_file_path, \"r\", encoding=\"utf-8\") as f:\n self.relation_class = json.load(f)[\"relations\"]\n\n def get_test_dataset(self, data_dir: str, file_name: str = None) -> Dataset:\n file_path = os.path.join(data_dir, file_name)\n\n assert os.path.exists(\n file_path\n ), \"KlueReProcessor tries to open test file, but test dataset doesn't exists.\"\n\n logger.info(f\"Loading from {file_path}\")\n return self._create_dataset(file_path)\n\n def get_labels(self):\n return self.relation_class\n\n def _create_examples(self, file_path: str) -> List[InputExample]:\n examples = []\n with open(file_path, \"r\", encoding=\"utf-8\") as f:\n data_lst = json.load(f)\n\n for data in data_lst:\n guid = data[\"guid\"]\n text = data[\"sentence\"]\n subject_entity = data[\"subject_entity\"]\n object_entity = data[\"object_entity\"]\n label = data[\"label\"]\n\n text = self._mark_entity_spans(\n text=text,\n subject_range=(\n int(subject_entity[\"start_idx\"]),\n int(subject_entity[\"end_idx\"]),\n ),\n object_range=(\n int(object_entity[\"start_idx\"]),\n int(object_entity[\"end_idx\"]),\n ),\n sub_type=subject_entity[\"type\"],\n obj_type=object_entity[\"type\"],\n )\n examples.append(InputExample(guid=guid, text_a=text, label=label))\n\n return examples\n\n def _mark_entity_spans(\n self,\n text: str,\n subject_range: Tuple[int, int],\n object_range: Tuple[int, int],\n sub_type,\n obj_type,\n ) -> str:\n \"\"\"\n Add entity markers to the text to identify the subject/object entities.\n\n Args:\n text: Original sentence\n subject_range: Pair of start and end indices of subject entity\n object_range: Pair of start and end indices of object entity\n\n Returns:\n A string of text with subject/object entity markers\n \"\"\"\n sub_type = \" \" + self.emap[sub_type] + \" \"\n obj_type = \" \" + self.emap[obj_type] + \" \"\n if subject_range < object_range:\n segments = [\n #####\n self.si_start_marker,\n sub_type,\n obj_type,\n self.si_end_marker,\n # \"[SEP]\",\n #####\n self.ctx_start_marker,\n text[: subject_range[0]],\n self.subject_start_marker,\n text[subject_range[0] : subject_range[1] + 1],\n self.subject_end_marker,\n text[subject_range[1] + 1 : object_range[0]],\n self.object_start_marker,\n text[object_range[0] : object_range[1] + 1],\n self.object_end_marker,\n text[object_range[1] + 1 :],\n self.ctx_end_marker,\n ]\n elif subject_range > object_range:\n segments = [\n #####\n self.si_start_marker,\n sub_type,\n obj_type,\n self.si_end_marker,\n # \"[SEP]\",\n #####\n self.ctx_start_marker,\n text[: object_range[0]],\n self.object_start_marker,\n text[object_range[0] : object_range[1] + 1],\n self.object_end_marker,\n text[object_range[1] + 1 : subject_range[0]],\n self.subject_start_marker,\n text[subject_range[0] : subject_range[1] + 1],\n self.subject_end_marker,\n text[subject_range[1] + 1 :],\n self.ctx_end_marker,\n ]\n else:\n raise ValueError(\"Entity boundaries overlap.\")\n # \".\", \",\", \"!\", \"?\", \";\", \":\"\n marked_text = \"\".join(segments)\n marked_text = re.sub(r\"[^ a-zA-Z0-9가-힣<>\\]\\[/.,!?;:()%]\", \" \", marked_text)\n # marked_text = marked_text.replace(self.subject_start_marker, \"@\")\n # marked_text = marked_text.replace(self.subject_end_marker, \"#\")\n # marked_text = marked_text.replace(self.object_start_marker, \"$\")\n # marked_text = marked_text.replace(self.object_end_marker, \"%\")\n # marked_text = marked_text.replace(self.si_start_marker, \"#\")\n # marked_text = marked_text.replace(self.si_end_marker, \"$\")\n\n return marked_text\n\n # copied from klue_baseline.data.utils.convert_examples_to_features\n def _convert_features(self, examples: List[InputExample]) -> List[InputFeatures]:\n max_length = self.hparams.max_seq_length\n if max_length is None:\n max_length = self.tokenizer.max_len\n\n label_map = {label: i for i, label in enumerate(self.get_labels())}\n labels = [label_map[example.label] for example in examples]\n\n def check_tokenizer_type():\n \"\"\"\n Check tokenizer type.\n In KLUE paper, we only support wordpiece (BERT, KLUE-RoBERTa, ELECTRA) & sentencepiece (XLM-R).\n Will give warning if you use other tokenization. (e.g. bbpe)\n \"\"\"\n if isinstance(self.tokenizer, transformers.XLMRobertaTokenizer):\n logger.info(\n f\"Using {type(self.tokenizer).__name__} for fixing tokenization result\"\n )\n return \"xlm-sp\" # Sentencepiece\n elif isinstance(self.tokenizer, transformers.BertTokenizer) or isinstance(\n self.tokenizer, transformers.BertTokenizerFast\n ):\n logger.info(\n f\"Using {type(self.tokenizer).__name__} for fixing tokenization result\"\n )\n return (\n \"bert-wp\" # Wordpiece (including BertTokenizer & ElectraTokenizer)\n )\n else:\n logger.warn(\n f\"your tokenizer : {type(self.tokenizer).__name__}, If you are using other tokenizer (e.g. bbpe), you should change code in `fix_tokenization_error()`\"\n )\n return \"other\"\n\n def fix_tokenization_error(text, tokenizer_type):\n tokens = self.tokenizer.tokenize(text)\n # subject\n # subject_end_marker = \"#\"\n # object_end_marker = \"%\"\n # 스페셜 토큰으로 (ex: <subj>) 사용할 경우 self.subject_end_marker 로 사용\n\n if (\n text[text.find(self.subject_end_marker) + len(self.subject_end_marker)]\n != \" \"\n ):\n space_idx = tokens.index(self.subject_end_marker) + 1\n if tokenizer_type == \"xlm-sp\":\n if tokens[space_idx] == \"▁\":\n tokens.pop(space_idx)\n elif tokens[space_idx].startswith(\"▁\"):\n tokens[space_idx] = tokens[space_idx][1:]\n elif tokenizer_type == \"bert-wp\":\n if (\n not tokens[space_idx].startswith(\"##\")\n and \"가\" <= tokens[space_idx][0] <= \"힣\"\n ):\n tokens[space_idx] = \"##\" + tokens[space_idx]\n\n # object\n if (\n text[text.find(self.object_end_marker) + len(self.object_end_marker)]\n != \" \"\n ):\n space_idx = tokens.index(self.object_end_marker) + 1\n if tokenizer_type == \"xlm-sp\":\n if tokens[space_idx] == \"▁\":\n tokens.pop(space_idx)\n elif tokens[space_idx].startswith(\"▁\"):\n tokens[space_idx] = tokens[space_idx][1:]\n elif tokenizer_type == \"bert-wp\":\n if (\n not tokens[space_idx].startswith(\"##\")\n and \"가\" <= tokens[space_idx][0] <= \"힣\"\n ):\n tokens[space_idx] = \"##\" + tokens[space_idx]\n\n return tokens\n\n tokenizer_type = check_tokenizer_type()\n tokenized_examples = [\n fix_tokenization_error(example.text_a, tokenizer_type)\n for example in examples\n ]\n batch_encoding = self.tokenizer.batch_encode_plus(\n [\n (self.tokenizer.convert_tokens_to_ids(tokens), None)\n for tokens in tokenized_examples\n ],\n max_length=max_length,\n # padding=\"max_length\",\n padding=True,\n truncation=True,\n )\n ####\n special_token_ids = self.tokenizer.additional_special_tokens_ids\n subj_start_token = special_token_ids[0]\n subj_end_token = special_token_ids[1]\n obj_start_token = special_token_ids[2]\n obj_end_token = special_token_ids[3]\n si_start_token = special_token_ids[4]\n si_end_token = special_token_ids[5]\n ctx_start_token = special_token_ids[6]\n ctx_end_token = special_token_ids[7]\n\n subject_entity_lst = []\n object_entity_lst = []\n si_mask_lst = []\n n_tts_lst = []\n ctx_mask_lst = []\n for i in range(len(examples)):\n token_len = len(batch_encoding[\"input_ids\"][i])\n subj_mask = [0] * token_len\n obj_mask = [0] * token_len\n si_mask = [0] * token_len\n n_tts = [0] * token_len\n ctx_mask = [0] * token_len\n\n sub_start_idx = batch_encoding[\"input_ids\"][i].index(subj_start_token)\n sub_end_idx = batch_encoding[\"input_ids\"][i].index(subj_end_token)\n obj_start_idx = batch_encoding[\"input_ids\"][i].index(obj_start_token)\n obj_end_idx = batch_encoding[\"input_ids\"][i].index(obj_end_token)\n si_start_idx = batch_encoding[\"input_ids\"][i].index(si_start_token)\n si_end_idx = batch_encoding[\"input_ids\"][i].index(si_end_token)\n ctx_start_idx = batch_encoding[\"input_ids\"][i].index(ctx_start_token)\n ctx_end_idx = batch_encoding[\"input_ids\"][i].index(ctx_end_token)\n #\n # sep_id = batch_encoding[\"input_ids\"][i].index(self.tokenizer.sep_token_id)\n # pad_id = batch_encoding[\"input_ids\"][i].index(self.tokenizer.pad_token_id)\n #\n\n for idx in range(sub_start_idx, sub_end_idx + 1):\n subj_mask[idx] = 1\n\n for idx in range(obj_start_idx, obj_end_idx + 1):\n obj_mask[idx] = 1\n\n for idx in range(si_start_idx, si_end_idx + 1):\n si_mask[idx] = 1\n\n for idx in range(ctx_start_idx, ctx_end_idx):\n ctx_mask[idx] = 1\n\n # for idx in range(sep_id + 1, pad_id):\n # n_tts[idx] = 1\n\n subject_entity_lst.append(subj_mask)\n object_entity_lst.append(obj_mask)\n si_mask_lst.append(si_mask)\n ctx_mask_lst.append(ctx_mask)\n # n_tts_lst.append(n_tts)\n batch_encoding[\"e1_mask\"] = subject_entity_lst\n batch_encoding[\"e2_mask\"] = object_entity_lst\n batch_encoding[\"si_mask\"] = si_mask_lst\n batch_encoding[\"ctx_mask\"] = ctx_mask_lst\n # batch_encoding[\"token_type_ids\"] = n_tts_lst\n ####\n features = []\n for i in range(len(examples)):\n inputs = {k: batch_encoding[k][i] for k in batch_encoding}\n\n feature = InputFeatures(**inputs, label=labels[i])\n features.append(feature)\n\n for i in range(5):\n logger.info(\"*** Example ***\")\n logger.info(\"guid: %s\" % (examples[i].guid))\n logger.info(\"origin example: %s\" % examples[i])\n logger.info(\n \"origin tokens: %s\" % self.tokenizer.tokenize(examples[i].text_a)\n )\n logger.info(\"fixed tokens: %s\" % tokenized_examples[i])\n logger.info(\"features: %s\" % features[i])\n\n return features\n\n def _create_dataset(self, file_path: str) -> Dataset:\n examples = self._create_examples(file_path)\n features = self._convert_features(examples)\n\n all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)\n all_attention_mask = torch.tensor(\n [f.attention_mask for f in features], dtype=torch.long\n )\n # Some model does not make use of token type ids (e.g. RoBERTa)\n all_token_type_ids = torch.tensor(\n [0 if f.token_type_ids is None else f.token_type_ids for f in features],\n dtype=torch.long,\n )\n ###\n all_e1_mask = torch.tensor(\n [0 if f.e1_mask is None else f.e1_mask for f in features],\n dtype=torch.long,\n )\n all_e2_mask = torch.tensor(\n [0 if f.e2_mask is None else f.e2_mask for f in features],\n dtype=torch.long,\n )\n ###\n all_labels = torch.tensor([f.label for f in features], dtype=torch.long)\n\n return TensorDataset(\n all_input_ids,\n all_attention_mask,\n all_token_type_ids,\n all_e1_mask,\n all_e2_mask,\n all_labels,\n )\n\n\nclass KlueReDataLoader:\n def __init__(self, args, tokenizer):\n self.hparams = args\n self.processor = KlueReProcessor(args, tokenizer)\n\n def get_dataloader(\n self, batch_size: int, shuffle: bool = False, num_workers: int = 0\n ):\n return DataLoader(\n self.processor.get_test_dataset(\n self.hparams.data_dir, self.hparams.test_filename\n ),\n batch_size=batch_size,\n shuffle=shuffle,\n num_workers=num_workers,\n )\n"
] |
[
[
"torch.utils.data.TensorDataset",
"torch.tensor"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
JRhodes95/big-data-cw
|
[
"874d517ebe4efd0704592df1d2815f102f3012d7"
] |
[
"plots.py"
] |
[
"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\n\nimport pandas as pd\n\n\ndef plot_grid_search(file_name, title):\n \"\"\"Function to plot the results of the grid search optimisation process.\"\"\"\n\n data = pd.read_csv(file_name) # Read the data to a pandas DataFrame\n data_acc = data.pivot(index=\"no feats\", columns=\"splits\", values=\"Accuracy\")\n data_prec = data.pivot(index=\"no feats\", columns=\"splits\", values=\"Precision\")\n data_rec = data.pivot(index=\"no feats\", columns=\"splits\", values=\"Recall\")\n data_f1 = data.pivot(index=\"no feats\", columns=\"splits\", values=\"F1\")\n\n X_acc = data_acc.columns.values\n Y_acc = data_acc.index.values\n Z_acc = data_acc.values\n x_acc,y_acc = np.meshgrid(X_acc,Y_acc)\n\n plt.figure()\n plt.suptitle(title, fontsize=20)\n plt.subplot(221)\n CS = plt.contour(x_acc,y_acc,Z_acc, cmap=cm.RdYlGn)\n plt.clabel(CS, inline=1, fontsize=10)\n plt.title('Accuracy')\n plt.xlabel('Fraction of dataset used for training.')\n plt.ylabel('Maximum number of features')\n\n X_prec = data_prec.columns.values\n Y_prec = data_prec.index.values\n Z_prec = data_prec.values\n x_prec,y_prec = np.meshgrid(X_prec,Y_prec)\n\n plt.subplot(222)\n CS = plt.contour(x_prec,y_prec,Z_prec, cmap=cm.RdYlGn)\n plt.clabel(CS, inline=1, fontsize=10)\n plt.title('Precision')\n plt.xlabel('Fraction of dataset used for training.')\n plt.ylabel('Maximum number of features')\n\n X_rec = data_rec.columns.values\n Y_rec = data_rec.index.values\n Z_rec = data_rec.values\n x_rec,y_rec = np.meshgrid(X_rec,Y_rec)\n\n plt.subplot(223)\n CS = plt.contour(x_rec,y_rec,Z_rec, cmap=cm.RdYlGn)\n plt.clabel(CS, inline=1, fontsize=10)\n plt.title('Recall')\n plt.xlabel('Fraction of dataset used for training.')\n plt.ylabel('Maximum number of features')\n\n X_f1 = data_f1.columns.values\n Y_f1 = data_f1.index.values\n Z_f1 = data_f1.values\n x_f1,y_f1 = np.meshgrid(X_f1,Y_f1)\n\n plt.subplot(224)\n CS = plt.contour(x_f1,y_f1,Z_f1, cmap=cm.RdYlGn)\n plt.clabel(CS, inline=1, fontsize=10)\n plt.title('F1 Measure')\n plt.xlabel('Fraction of dataset used for training.')\n plt.ylabel('Maximum number of features')\n\n plt.show()\n"
] |
[
[
"pandas.read_csv",
"matplotlib.pyplot.clabel",
"numpy.meshgrid",
"matplotlib.pyplot.title",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.contour",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.suptitle",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
ZongSingHuang/random-key
|
[
"24daf307e0dfc719c9f92d1b53394bcab2880707"
] |
[
"random key.py"
] |
[
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Sep 24 14:27:25 2021\r\n\r\n@author: zongsing.huang\r\n\"\"\"\r\n\r\nimport numpy as np\r\n\r\n# dim size\r\nD = 6\r\n\r\n# method 1\r\n\r\nRK1 = np.random.uniform(low=0, high=D, size=[D])\r\n\r\n# method2\r\nRK2 = [ np.random.uniform(low=i, high=i+1) for i in range(D)]\r\nnp.random.shuffle(RK2)"
] |
[
[
"numpy.random.uniform",
"numpy.random.shuffle"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
nuswgg/QUANTAXIS
|
[
"8c88985538dde0903510d3368520b6624b03a396"
] |
[
"QUANTAXIS/QASU/save_binance.py"
] |
[
"# coding: utf-8\n# Author: Will\n# Contributor: 阿财(Rgveda@github)([email protected])\n# Created date: 2018-06-08\n#\n# The MIT License (MIT)\n#\n# Copyright (c) 2016-2018 yutiansut/QUANTAXIS\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\nimport datetime\nimport time\nfrom dateutil.tz import tzutc\nfrom dateutil.relativedelta import relativedelta\nimport pandas as pd\n\nfrom QUANTAXIS.QAUtil import (\n DATABASE,\n QASETTING,\n QA_util_log_info,\n QA_util_log_expection,\n QA_util_to_json_from_pandas\n)\nfrom QUANTAXIS.QAUtil.QADate_Adv import (\n QA_util_timestamp_to_str,\n QA_util_datetime_to_Unix_timestamp,\n QA_util_print_timestamp\n)\nfrom QUANTAXIS.QAFetch.QAbinance import (\n QA_fetch_binance_symbols,\n QA_fetch_binance_kline,\n QA_fetch_binance_kline_min,\n Binance2QA_FREQUENCY_DICT\n)\nfrom QUANTAXIS.QAUtil.QAcrypto import (\n QA_util_save_raw_symbols,\n QA_util_find_missing_kline\n)\nfrom QUANTAXIS.QAFetch.QAQuery import (QA_fetch_cryptocurrency_list)\n\nimport pymongo\n\n# binance的历史数据只是从2017年7月开始有,以前的貌似都没有保留 . author:Will\nBINANCE_MIN_DATE = datetime.datetime(2017, 7, 1, tzinfo=tzutc())\nBinance_EXCHANGE = 'BINANCE'\nBinance_SYMBOL = 'BINANCE.{}'\n\n\ndef QA_SU_save_binance(frequency):\n \"\"\"\n Save binance kline \"smart\"\n \"\"\"\n if (frequency not in [\"1d\", \"1day\", \"day\"]):\n return QA_SU_save_binance_min(frequency)\n else:\n return QA_SU_save_binance_day(frequency)\n\n\ndef QA_SU_save_binance_day(\n frequency='1d', \n ui_log=None, \n ui_progress=None):\n \"\"\"\n Save binance day kline\n \"\"\"\n symbol_template = Binance_SYMBOL\n symbol_list = QA_fetch_cryptocurrency_list(Binance_EXCHANGE)\n col = DATABASE.cryptocurrency_day\n col.create_index(\n [\n (\"symbol\",\n pymongo.ASCENDING),\n (\"date_stamp\",\n pymongo.ASCENDING)\n ],\n unique=True\n )\n\n end = datetime.datetime.now(tzutc())\n\n QA_util_log_info(\n 'Starting DOWNLOAD PROGRESS of day Klines from {:s}... '.format(Binance_EXCHANGE),\n ui_log=ui_log,\n ui_progress=ui_progress\n )\n for index in range(len(symbol_list)):\n symbol_info = symbol_list.iloc[index]\n # 上架仅处理交易对\n QA_util_log_info(\n 'The \"{}\" #{} of total in {}'.format(\n symbol_template.format(symbol_info['symbol']),\n index,\n len(symbol_list)\n ),\n ui_log=ui_log,\n ui_progress=ui_progress\n )\n QA_util_log_info(\n 'DOWNLOAD PROGRESS {} '\n .format(str(float(index / len(symbol_list) * 100))[0:4] + '%'),\n ui_log=ui_log,\n ui_progress=ui_progress\n )\n query_id = {\n \"symbol\": symbol_template.format(symbol_info['symbol']),\n }\n ref = col.find(query_id).sort('date_stamp', -1)\n\n if (col.count_documents(query_id) > 0):\n start_stamp = ref.next()['date_stamp']\n start_time = datetime.datetime.fromtimestamp(start_stamp)\n QA_util_log_info(\n 'UPDATE_SYMBOL \"{}\" Trying updating \"{}\" from {} to {}'.format(\n symbol_template.format(symbol_info['symbol']),\n Binance2QA_FREQUENCY_DICT[frequency],\n QA_util_timestamp_to_str(start_time),\n QA_util_timestamp_to_str(end)\n ),\n ui_log=ui_log,\n ui_progress=ui_progress\n )\n # 查询到 Kline 缺漏,点抓取模式,按缺失的时间段精确请求K线数据\n missing_data_list = QA_util_find_missing_kline(\n symbol_template.format(symbol_info['symbol']),\n Binance2QA_FREQUENCY_DICT[frequency],\n )[::-1]\n else:\n start_time = BINANCE_MIN_DATE\n QA_util_log_info(\n 'NEW_SYMBOL \"{}\" Trying downloading \"{}\" from {} to {}'.format(\n symbol_template.format(symbol_info['symbol']),\n Binance2QA_FREQUENCY_DICT[frequency],\n QA_util_timestamp_to_str(start_time),\n QA_util_timestamp_to_str(end)\n ),\n ui_log=ui_log,\n ui_progress=ui_progress\n )\n \n miss_kline = pd.DataFrame(\n [\n [\n int(QA_util_datetime_to_Unix_timestamp(start_time)),\n int(QA_util_datetime_to_Unix_timestamp(end)),\n '{} to {}'.format(start_time,\n end)\n ]\n ],\n columns=['expected',\n 'between',\n 'missing']\n )\n missing_data_list = miss_kline.values\n\n if len(missing_data_list) > 0:\n # 查询确定中断的K线数据起止时间,缺分时数据,补分时数据\n expected = 0\n between = 1\n missing = 2\n reqParams = {}\n for i in range(len(missing_data_list)):\n reqParams['from'] = int(missing_data_list[i][expected])\n reqParams['to'] = int(missing_data_list[i][between])\n if (reqParams['from'] >\n (QA_util_datetime_to_Unix_timestamp() + 120)):\n # 出现“未来”时间,一般是默认时区设置错误造成的\n QA_util_log_info(\n 'A unexpected \\'Future\\' timestamp got, Please check self.missing_data_list_func param \\'tzlocalize\\' set. More info: {:s}@{:s} at {:s} but current time is {}'\n .format(\n symbol_template.format(symbol_info['symbol']),\n frequency,\n QA_util_print_timestamp(reqParams['from']),\n QA_util_print_timestamp(\n QA_util_datetime_to_Unix_timestamp()\n )\n )\n )\n # 跳到下一个时间段\n continue\n\n QA_util_log_info(\n 'Fetch \"{:s}\" slices \"{:s}\" kline:{:s} to {:s}'.format(\n symbol_template.format(symbol_info['symbol']),\n Binance2QA_FREQUENCY_DICT[frequency],\n QA_util_timestamp_to_str(\n missing_data_list[i][expected]\n )[2:16],\n QA_util_timestamp_to_str(\n missing_data_list[i][between]\n )[2:16]\n )\n )\n data = QA_fetch_binance_kline(\n symbol_info['symbol'],\n time.mktime(start_time.utctimetuple()),\n time.mktime(end.utctimetuple()),\n frequency,\n callback_func=QA_SU_save_data_binance_callback\n )\n\n if data is None:\n QA_util_log_info(\n 'SYMBOL \"{}\" from {} to {} has no data'.format(\n symbol_template.format(symbol_info['symbol']),\n QA_util_timestamp_to_str(start_time),\n QA_util_timestamp_to_str(end)\n ),\n ui_log=ui_log,\n ui_progress=ui_progress\n )\n continue\n QA_util_log_info(\n 'DOWNLOAD PROGRESS of day Klines from {:s} accomplished.'.format(Binance_EXCHANGE),\n ui_log=ui_log,\n ui_progress=ui_progress\n )\n\n\ndef QA_SU_save_binance_min(\n frequency='1m', \n ui_log=None, \n ui_progress=None):\n \"\"\"\n Save binance min kline\n \"\"\"\n symbol_template = Binance_SYMBOL\n symbol_list = QA_fetch_cryptocurrency_list(Binance_EXCHANGE)\n col = DATABASE.cryptocurrency_min\n col.create_index(\n [\n (\"symbol\",\n pymongo.ASCENDING),\n ('time_stamp',\n pymongo.ASCENDING),\n ('date_stamp',\n pymongo.ASCENDING)\n ]\n )\n col.create_index(\n [\n (\"symbol\",\n pymongo.ASCENDING),\n (\"type\",\n pymongo.ASCENDING),\n ('time_stamp',\n pymongo.ASCENDING)\n ],\n unique=True\n )\n\n end = datetime.datetime.now(tzutc())\n\n QA_util_log_info(\n 'Starting DOWNLOAD PROGRESS of min Klines from {:s}... '.format(Binance_EXCHANGE),\n ui_log=ui_log,\n ui_progress=ui_progress\n )\n for index in range(len(symbol_list)):\n symbol_info = symbol_list.iloc[index]\n # 上架仅处理交易对\n QA_util_log_info(\n 'The \"{}\" #{} of total in {}'.format(\n symbol_template.format(symbol_info['symbol']),\n index,\n len(symbol_list)\n ),\n ui_log=ui_log,\n ui_progress=ui_progress\n )\n QA_util_log_info(\n 'DOWNLOAD PROGRESS {} '\n .format(str(float(index / len(symbol_list) * 100))[0:4] + '%'),\n ui_log=ui_log,\n ui_progress=ui_progress\n )\n query_id = {\n \"symbol\": symbol_template.format(symbol_info['symbol']),\n 'type': Binance2QA_FREQUENCY_DICT[frequency]\n }\n ref = col.find(query_id).sort('time_stamp', -1)\n\n if (col.count_documents(query_id) > 0):\n start_stamp = ref.next()['time_stamp']\n start_time = datetime.datetime.fromtimestamp(start_stamp)\n QA_util_log_info(\n 'UPDATE_SYMBOL \"{}\" Trying updating \"{}\" from {} to {}'.format(\n symbol_template.format(symbol_info['symbol']),\n Binance2QA_FREQUENCY_DICT[frequency],\n QA_util_timestamp_to_str(start_time),\n QA_util_timestamp_to_str(end)\n ),\n ui_log=ui_log,\n ui_progress=ui_progress\n )\n\n # 查询到 Kline 缺漏,点抓取模式,按缺失的时间段精确请求K线数据\n missing_data_list = QA_util_find_missing_kline(\n symbol_template.format(symbol_info['symbol']),\n Binance2QA_FREQUENCY_DICT[frequency],\n )[::-1]\n else:\n start_time = BINANCE_MIN_DATE\n QA_util_log_info(\n 'NEW_SYMBOL \"{}\" Trying downloading \"{}\" from {} to {}'.format(\n symbol_template.format(symbol_info['symbol']),\n Binance2QA_FREQUENCY_DICT[frequency],\n QA_util_timestamp_to_str(start_time),\n QA_util_timestamp_to_str(end)\n ),\n ui_log=ui_log,\n ui_progress=ui_progress\n )\n miss_kline = pd.DataFrame(\n [\n [\n int(QA_util_datetime_to_Unix_timestamp(start_time)),\n int(QA_util_datetime_to_Unix_timestamp(end)),\n '{} to {}'.format(start_time,\n end)\n ]\n ],\n columns=['expected',\n 'between',\n 'missing']\n )\n missing_data_list = miss_kline.values\n\n if len(missing_data_list) > 0:\n # 查询确定中断的K线数据起止时间,缺分时数据,补分时数据\n expected = 0\n between = 1\n missing = 2\n reqParams = {}\n for i in range(len(missing_data_list)):\n reqParams['from'] = int(missing_data_list[i][expected])\n reqParams['to'] = int(missing_data_list[i][between])\n if (reqParams['from'] >\n (QA_util_datetime_to_Unix_timestamp() + 120)):\n # 出现“未来”时间,一般是默认时区设置错误造成的\n QA_util_log_info(\n 'A unexpected \\'Future\\' timestamp got, Please check self.missing_data_list_func param \\'tzlocalize\\' set. More info: {:s}@{:s} at {:s} but current time is {}'\n .format(\n symbol_template.format(symbol_info['symbol']),\n frequency,\n QA_util_print_timestamp(reqParams['from']),\n QA_util_print_timestamp(\n QA_util_datetime_to_Unix_timestamp()\n )\n )\n )\n # 跳到下一个时间段\n continue\n\n QA_util_log_info(\n 'Fetch \"{:s}\" slices \"{:s}\" kline:{:s} to {:s}'.format(\n symbol_template.format(symbol_info['symbol']),\n Binance2QA_FREQUENCY_DICT[frequency],\n QA_util_timestamp_to_str(\n missing_data_list[i][expected]\n )[2:16],\n QA_util_timestamp_to_str(\n missing_data_list[i][between]\n )[2:16]\n )\n )\n data = QA_fetch_binance_kline_min(\n symbol_info['symbol'],\n start_time=reqParams['from'],\n end_time=reqParams['to'],\n frequency=frequency,\n callback_func=QA_SU_save_data_binance_callback\n )\n\n if data is None:\n QA_util_log_info(\n 'SYMBOL \"{}\" from {} to {} has no MORE data'.format(\n symbol_template.format(symbol_info['symbol']),\n QA_util_timestamp_to_str(start_time),\n QA_util_timestamp_to_str(end)\n )\n )\n continue\n QA_util_log_info(\n 'DOWNLOAD PROGRESS of min Klines from {:s} accomplished.'.format(Binance_EXCHANGE),\n ui_log=ui_log,\n ui_progress=ui_progress\n )\n\n\ndef QA_SU_save_binance_1min():\n QA_SU_save_binance('1m')\n\n\ndef QA_SU_save_binance_1day():\n QA_SU_save_binance(\"1d\")\n\n\ndef QA_SU_save_binance_1hour():\n QA_SU_save_binance(\"1h\")\n\n\ndef QA_SU_save_binance_symbol(\n market=Binance_EXCHANGE,\n client=DATABASE,\n):\n \"\"\"\n 保存币安交易对信息\n \"\"\"\n market = market.upper()\n QA_util_log_info('Downloading {:s} symbol list...'.format(market))\n\n # 保存 Binance API 原始 Symbol 数据备查阅,自动交易用得着\n raw_symbol_lists = QA_util_save_raw_symbols(\n QA_fetch_binance_symbols,\n market\n )\n if (len(raw_symbol_lists) > 0):\n # 保存到 QUANTAXIS.crypto_asset_list 数字资产列表,为了跨市场统一查询做数据汇总\n symbol_lists = pd.DataFrame(raw_symbol_lists)\n\n # market,symbol为 mongodb 索引字段,保存之前必须要检查存在\n symbol_lists['market'] = market\n symbol_lists['category'] = 1\n symbol_lists.rename(\n {\n 'baseAssetPrecision': 'price_precision',\n 'baseAsset': 'base_currency',\n 'quoteAsset': 'quote_currency',\n 'status': 'state',\n },\n axis=1,\n inplace=True\n )\n symbol_lists['name'] = symbol_lists.apply(\n lambda x: '{:s}/{:s}'.\n format(x['base_currency'].upper(),\n x['quote_currency'].upper()),\n axis=1\n )\n symbol_lists['desc'] = symbol_lists['name']\n\n # 移除非共性字段,这些字段只有 broker 才关心,做对应交易所 broker 接口的时候在交易所 raw_symbol_lists\n # 数据中读取。\n symbol_lists.drop(\n [\n 'baseCommissionPrecision',\n 'quotePrecision',\n 'filters',\n 'icebergAllowed',\n 'isMarginTradingAllowed',\n 'isSpotTradingAllowed',\n 'ocoAllowed',\n 'orderTypes',\n 'quoteCommissionPrecision',\n 'quoteOrderQtyMarketAllowed',\n ],\n axis=1,\n inplace=True\n )\n if ('_id' in symbol_lists.columns.values):\n symbol_lists.drop(\n [\n '_id',\n ],\n axis=1,\n inplace=True\n )\n # 删除不交易的交易对\n symbol_lists = symbol_lists[symbol_lists['state'].isin(['TRADING'])]\n symbol_lists['created_at'] = int(\n time.mktime(datetime.datetime.now().utctimetuple())\n )\n symbol_lists['updated_at'] = int(\n time.mktime(datetime.datetime.now().utctimetuple())\n )\n\n coll_cryptocurrency_list = client.cryptocurrency_list\n coll_cryptocurrency_list.create_index(\n [('market',\n pymongo.ASCENDING),\n ('symbol',\n pymongo.ASCENDING)],\n unique=True\n )\n try:\n query_id = {'market': market}\n if (coll_cryptocurrency_list.count_documents(query_id) > 0):\n # 删掉重复数据\n query_id = {\n 'market': market,\n 'symbol': {\n '$in': symbol_lists['symbol'].tolist()\n }\n }\n coll_cryptocurrency_list.delete_many(query_id)\n coll_cryptocurrency_list.insert_many(\n QA_util_to_json_from_pandas(symbol_lists)\n )\n return symbol_lists\n except:\n QA_util_log_expection(\n 'QA_SU_save_binance_symbol(): Insert_many(symbol) to \"cryptocurrency_list\" got Exception with {} klines'\n .format(len(symbol_lists))\n )\n pass\n return []\n\n\ndef QA_SU_save_data_binance_callback(data, freq):\n \"\"\"\n 异步获取数据回调用的 MongoDB 存储函数\n \"\"\"\n symbol_template = Binance_SYMBOL\n QA_util_log_info(\n 'SYMBOL \"{}\" Recived \"{}\" from {} to {} in total {} klines'.format(\n data.iloc[0].symbol,\n freq,\n time.strftime(\n '%Y-%m-%d %H:%M:%S',\n time.localtime(data.iloc[0].time_stamp)\n )[2:16],\n time.strftime(\n '%Y-%m-%d %H:%M:%S',\n time.localtime(data.iloc[-1].time_stamp)\n )[2:16],\n len(data)\n )\n )\n if (freq not in ['1day', '86400', 'day', '1d']):\n col = DATABASE.cryptocurrency_min\n col.create_index(\n [\n (\"symbol\",\n pymongo.ASCENDING),\n ('time_stamp',\n pymongo.ASCENDING),\n ('date_stamp',\n pymongo.ASCENDING)\n ]\n )\n col.create_index(\n [\n (\"symbol\",\n pymongo.ASCENDING),\n (\"type\",\n pymongo.ASCENDING),\n ('time_stamp',\n pymongo.ASCENDING)\n ],\n unique=True\n )\n\n # 查询是否新 tick\n query_id = {\n \"symbol\": data.iloc[0].symbol,\n 'type': data.iloc[0].type,\n 'time_stamp': {\n '$in': data['time_stamp'].tolist()\n }\n }\n refcount = col.count_documents(query_id)\n else:\n col = DATABASE.cryptocurrency_day\n col.create_index(\n [\n (\"symbol\",\n pymongo.ASCENDING),\n (\"date_stamp\",\n pymongo.ASCENDING)\n ],\n unique=True\n )\n\n # 查询是否新 tick\n query_id = {\n \"symbol\": data.iloc[0].symbol,\n 'date_stamp': {\n '$in': data['date_stamp'].tolist()\n }\n }\n refcount = col.count_documents(query_id)\n\n # 删除多余列\n if ('_id' in data.columns.values):\n data.drop(\n [\n '_id',\n ],\n axis=1,\n inplace=True\n )\n if refcount > 0:\n if (len(data) > 1):\n # 删掉重复数据\n col.delete_many(query_id)\n data = QA_util_to_json_from_pandas(data)\n col.insert_many(data)\n else:\n # 持续接收行情,更新记录\n data.drop('created_at', axis=1, inplace=True)\n data = QA_util_to_json_from_pandas(data)\n col.replace_one(query_id, data[0])\n else:\n # 新 tick,插入记录\n data = QA_util_to_json_from_pandas(data)\n col.insert_many(data)\n\n\nif __name__ == '__main__':\n #QA_SU_save_binance_symbol()\n QA_SU_save_binance_1day()\n #QA_SU_save_binance_1hour()\n #QA_SU_save_binance_1min()\n"
] |
[
[
"pandas.DataFrame"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
EasternEdgeRobotics/2019
|
[
"19f833262e7bdd026fffc0ac894327369d6bb66f"
] |
[
"topsides/benthicSpecies4.py"
] |
[
"import numpy as np\nimport cv2 as cv\nimport cmath\n\n#cap = cv.VideoCapture(0)\ncap = cv.VideoCapture('udpsrc port=5004 ! application/x-rtp,encoding-name=H264,payload=96 ! rtph264depay ! avdec_h264 ! videoconvert ! appsink', cv.CAP_GSTREAMER)\n \n\nPI = 3.14159\n\nwhile(1):\n # read the video capture frame\n _, frame = cap.read()\n #cv.imshow('frame',frame)\n #break\n # blur for better edge finding\n blur = cv.GaussianBlur(frame,(5,5),0)\n frameGray = cv.cvtColor(blur, cv.COLOR_BGR2GRAY)\n # create threshold for edge finding\n ret, thresh = cv.threshold(frameGray, 120, 255, cv.THRESH_BINARY)\n contours, _ = cv.findContours(thresh, cv.RETR_TREE, cv.CHAIN_APPROX_NONE)\n count = 0\n tri = 0\n sqr = 0\n rect = 0 \n circ = 0\n for contour in contours:\n area = cv.contourArea(contour)\n if area > 1000 and area < 30000:\n M = cv.moments(contour)\n cX = int(M[\"m10\"]/M[\"m00\"])\n cY = int(M[\"m01\"]/M[\"m00\"])\n if(frame[cY,cX][0] < 50 and frame[cY,cX][1] < 50 and frame[cY,cX][2] < 50):\n cv.circle(frame, (cX,cY), 7, (255,255,0), -1)\n #cv.drawContours(frame, contour, -1, (0,255,0), 3)\n count += 1\n \n (x,y), (MA, ma), angle = cv.fitEllipse(contour)\n areaEllipse = PI/4 * MA * ma\n if(abs(areaEllipse - area) < 100):\n #is circle\n circ += 1\n cv.drawContours(frame, contour, -1, (0,255,255), 3)\n else:\n ((x,y), (w,h), rot) = cv.minAreaRect(contour)\n if(float(w) > 0.0 and float(h) > 0.0):\n ratio = w / float(h)\n #font = cv.FONT_HERSHEY_COMPLEX_SMALL\n #cv.putText(frame, str(ratio), (cX, cY - 40), font, 2, (0, 0, 255), 2, cv.LINE_AA)\n \n if ratio <= 0.6 or ratio >= 2.8:\n #is rect\n cv.drawContours(frame, contour, -1, (0,255,0), 3)\n rect += 1\n else:\n #peri = cv.arcLength(contour, True)\n #approx = cv.approxPolyDP(contour, 0.04 * peri, True)\n #if len(approx) == 3:\n \n \n areaAdj = 1400\n #font = cv.FONT_HERSHEY_COMPLEX_SMALL\n #cv.putText(frame, str(int(area)), (cX, cY - 40), font, 2, (0, 0, 255), 2, cv.LINE_AA)\n #cv.putText(frame, str(int(w*h/2)), (cX, cY - 60), font, 2, (0, 0, 255), 2, cv.LINE_AA)\n if(w*h/2 > area - areaAdj and w*h/2 < area + areaAdj):\n #is triangle\n \n cv.drawContours(frame, contour, -1, (255,0,0), 3)\n tri += 1\n else:\n #is square\n sqr += 1\n cv.drawContours(frame, contour, -1, (0,0,255), 3)\n\n cv.circle(frame, (70, 300), 20, (0,0,255), -1)\n pts = np.array([[70, 330], [50, 360], [90, 360]], np.int32)\n pts = pts.reshape((-1,1,2))\n cv.fillPoly(frame, [pts], (0, 0, 255))\n cv.rectangle(frame, (50, 381), (90, 389), (0,0,255), -1)\n cv.rectangle(frame, (50, 410), (90, 450), (0,0,255), -1)\n font = cv.FONT_HERSHEY_COMPLEX_SMALL\n cv.putText(frame, str(circ), (10, 310), font, 2, (0, 0, 255), 2, cv.LINE_AA)\n cv.putText(frame, str(tri), (10, 355), font, 2, (0, 0, 255), 2, cv.LINE_AA)\n cv.putText(frame, str(rect), (10, 400), font, 2, (0, 0, 255), 2, cv.LINE_AA)\n cv.putText(frame, str(sqr), (10, 445), font, 2, (0, 0, 255), 2, cv.LINE_AA)\n\n cv.imshow('frame',frame)\n #cv.imshow('thresh', thresh)\n\n k = cv.waitKey(5) & 0xFF\n if k == 27:\n break\n\ncv.destroyAllWindows()\ncap.release()\n"
] |
[
[
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mrtichou/JumpFloodingAlgorythm
|
[
"d7bd56e74aa527cf27a14efe986f98fed4c18d2b"
] |
[
"jumpflood/jumpflood.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Nov 13 10:50:55 2021\n\n@author: marti\n\"\"\"\nif __name__ == '__main__':\n import os, sys\n depth = 1\n root_folder = os.path.realpath(os.path.join(__file__,'../' * (1+depth)))\n os.chdir(root_folder)\n sys.path.append(root_folder)\n \nfrom jumpflood.helpers import (init_maps, \n step_sequence, \n spread, \n inf_mask, \n borders, \n voronoi,\n image_to_boolean)\n\nclass JumpFlood:\n def __init__(self,dimensions,seeds_ancestor,metric):\n \n self.dimensions = dimensions\n self.seeds_ancestor = seeds_ancestor\n self.metric = metric\n \n # initial seed coordinates\n self.seeds_coordinates = seeds_ancestor.astype(int)\n \n # instanciate maps used to store most promising seeds\n self.distance_map, \\\n self.seed_map, \\\n self.ancestor_map = init_maps(dimensions)\n \n \n @property\n def height(self):\n return self.dimensions[0]\n \n @property\n def width(self):\n return self.dimensions[1]\n \n @property\n def distance_field(self):\n return self.distance_map\n \n @property\n def voronoi_diagram(self):\n return voronoi(self.ancestor_map)\n \n def sequence(self, variant):\n return step_sequence(self.dimensions, variant)\n \n def message(self,verbose,index,end,stepsize):\n if verbose:\n print((f'Iteration: {index:2d}/{end:2d} \\t'\n f'Step size: {stepsize:5d} \\t '\n f'Active seeds: {self.seeds_coordinates.shape[1]:10d}'))\n \n def spread_seeds(self,stepsize):\n return spread(self.seeds_coordinates,\n self.seeds_ancestor,\n stepsize,\n self.metric)\n \n def pin_on_map(self, seeds_candidates, candidates_ancestors):\n # calculate new distances from each seed to its ancestor\n distances = self.metric.distance(seeds_candidates, candidates_ancestors)\n \n for k, d in enumerate(distances):\n i,j = seeds_candidates[:,k]\n \n # skip seed if out of bounds\n if not(0 <= i < self.height and 0 <= j < self.width):\n continue\n \n # replace current seed on the map if better than already existing one\n if d < self.distance_map[i,j]:\n self.ancestor_map[i,j,:] = candidates_ancestors[:,k]\n self.seed_map[i,j,:] = seeds_candidates[:,k]\n self.distance_map[i,j] = d\n \n def list_from_map(self):\n # mask map where no seed has arrived yet\n seed_mask = inf_mask(self.distance_map)\n \n # extract all remaining seed from the maps\n self.seeds_coordinates = self.seed_map[seed_mask].T\n self.seeds_ancestor = self.ancestor_map[seed_mask].T\n \n def iterate(self,stepsize):\n # 1. spread seeds in all directions by a distance equal to the stepsize\n seeds_candidates, candidates_ancestors = self.spread_seeds(stepsize)\n \n # 2. pin all seeds on a 2D-map and keep only best candidates\n self.pin_on_map(seeds_candidates, candidates_ancestors)\n \n # 3. lookup all remaining seeds on the map\n self.list_from_map()\n \n def flood(self, variant = 'JFA', verbose = False):\n sequence = self.sequence(variant)\n for index, stepsize in enumerate(sequence, start=1):\n self.message(verbose, index, len(sequence), stepsize)\n self.iterate(stepsize)\n\n\nclass SignedJumpFlood(JumpFlood):\n def __init__(self, bool_array, metric):\n self.bool_array = bool_array\n super().__init__(bool_array.shape,borders(bool_array),metric)\n \n @classmethod\n def from_image(cls, input_image, metric):\n \"\"\"Instanciate SignedJumpFlood from PIL bw image.\"\"\"\n bool_array = image_to_boolean(input_image)\n return cls(bool_array, metric)\n \n @property\n def signed_distance_field(self):\n sign = (1 - 2 * self.bool_array)\n return sign * self.distance_field\n\n\n\ndef _test(input_path= './jumpflood/test/water.jpg', metric_name= 'r',variant= '1+JFA', verbose= True):\n from numpy import array, nanmax, nanmin, tanh, save\n from PIL import Image\n from tools.metricpresets import metric_from_preset\n from matplotlib import pyplot as plt\n\n \n ## Input loading\n \n # load input image as PIL Image\n input_image = Image.open(input_path)\n \n ## Setup available metrics\n metric = metric_from_preset(metric_name, bool_image)\n \n ## Setup initial state\n jf = SignedJumpFlood.from_image(input_image, metric)\n jf.flood(variant = variant, verbose = verbose)\n \n ## Plotting\n \n plt.figure(figsize=(20,10))\n plt.imshow(bool_image)\n # plt.plot(coast_lines[:,0],coast_lines[:,1],'xg',markersize=1)\n \n plt.figure(figsize=(20,10))\n plt.imshow(jf.distance_map/nanmax(jf.distance_map),extent=[-180,180,-90,90])\n \n \n beautyful_signed = -jf.signed_distance_field\n beautyful_signed[beautyful_signed< 0] /= -nanmin(beautyful_signed)\n beautyful_signed[beautyful_signed>=0] /= nanmax(beautyful_signed)\n beautyful_signed = tanh(beautyful_signed * 1.5)\n \n plt.figure(figsize=(20,10))\n plt.imshow(beautyful_signed,extent=[-180,180,-90,90],\n cmap='twilight')\n \n plt.figure(figsize=(20,10))\n plt.imshow(jf.voronoi_diagram,extent=[-180,180,-90,90],cmap='hsv',\n interpolation = 'nearest')\n \n save('./jumpflood/test/signed_tst.npy',jf.signed_distance_field)\n save('./jumpflood/test/voronoi_tst.npy',jf.voronoi_diagram)\n\nif __name__ == '__main__':\n _test('./jumpflood/test/water.jpg')\n\n\nif __name__ == '__main__':\n sys.path.remove(root_folder)"
] |
[
[
"numpy.nanmax",
"matplotlib.pyplot.imshow",
"numpy.nanmin",
"numpy.save",
"numpy.tanh",
"matplotlib.pyplot.figure"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jinfagang/mmdetection
|
[
"707c90ff1621c509eecba21ee7a9b97955003873"
] |
[
"mmdet/models/necks/rssh_fpn.py"
] |
[
"# Copyright (C) 2020 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions\n# and limitations under the License.\n\nimport torch\nimport torch.nn as nn\nfrom mmdet.core import auto_fp16\n\nfrom .fpn import FPN\nfrom ..registry import NECKS\nfrom ..utils import ConvModule\n\n\nclass RSSH(nn.Module):\n def __init__(self, in_channels, conv_cfg, norm_cfg, activation):\n super(RSSH, self).__init__()\n self.in_channels = in_channels\n self.activation = activation\n\n self.conv1 = ConvModule(\n in_channels,\n in_channels // 2,\n 1,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg,\n activation=self.activation,\n inplace=False)\n\n self.conv2 = ConvModule(\n in_channels // 2,\n in_channels // 4,\n 1,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg,\n activation=self.activation,\n inplace=False)\n\n self.conv3 = ConvModule(\n in_channels // 4,\n in_channels // 4,\n 1,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg,\n activation=self.activation,\n inplace=False)\n\n def forward(self, inputs):\n x1 = self.conv1(inputs)\n x2 = self.conv2(x1)\n x3 = self.conv3(x2)\n\n return torch.cat((x1, x2, x3), axis=1)\n\n\[email protected]_module\nclass RSSH_FPN(FPN):\n\n def __init__(self,\n in_channels,\n out_channels,\n num_outs,\n start_level=0,\n end_level=-1,\n add_extra_convs=False,\n extra_convs_on_inputs=True,\n relu_before_extra_convs=False,\n no_norm_on_lateral=False,\n conv_cfg=None,\n norm_cfg=None,\n activation=None):\n super().__init__(in_channels,\n out_channels,\n num_outs,\n start_level,\n end_level,\n add_extra_convs,\n extra_convs_on_inputs,\n relu_before_extra_convs,\n no_norm_on_lateral,\n conv_cfg,\n norm_cfg,\n activation)\n\n self.context_modules = \\\n nn.ModuleList(\n [RSSH(out_channels, conv_cfg, norm_cfg, activation) for _ in self.fpn_convs])\n\n @auto_fp16()\n def forward(self, inputs):\n outs = super().forward(inputs)\n outs = [self.context_modules[i](out) for i, out in enumerate(outs)]\n return tuple(outs)\n"
] |
[
[
"torch.cat"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
bartnikm/tardis-bartnikm
|
[
"2b0f3110fefd6740349ca7b33fe72bf025c88452"
] |
[
"tardis/montecarlo/montecarlo_numba/vpacket.py"
] |
[
"import math\n\nimport numpy as np\nfrom numba import float64, int64\nfrom numba import njit\nfrom numba.experimental import jitclass\n\nfrom tardis.montecarlo.montecarlo_numba import njit_dict_no_parallel\nfrom tardis.montecarlo import (\n montecarlo_configuration as montecarlo_configuration,\n)\n\nfrom tardis.montecarlo.montecarlo_numba.r_packet import (\n PacketStatus,\n)\nfrom tardis.montecarlo.montecarlo_numba.r_packet_transport import (\n move_packet_across_shell_boundary,\n)\n\nfrom tardis.montecarlo.montecarlo_numba.calculate_distances import (\n calculate_distance_boundary,\n calculate_distance_line,\n)\n\nfrom tardis.montecarlo.montecarlo_numba.frame_transformations import (\n get_doppler_factor,\n angle_aberration_LF_to_CMF,\n angle_aberration_CMF_to_LF,\n)\n\nfrom tardis.montecarlo.montecarlo_numba.numba_config import SIGMA_THOMSON\n\nvpacket_spec = [\n (\"r\", float64),\n (\"mu\", float64),\n (\"nu\", float64),\n (\"energy\", float64),\n (\"next_line_id\", int64),\n (\"current_shell_id\", int64),\n (\"status\", int64),\n (\"index\", int64),\n]\n\n\n@jitclass(vpacket_spec)\nclass VPacket(object):\n def __init__(\n self,\n r,\n mu,\n nu,\n energy,\n current_shell_id,\n next_line_id,\n index=0,\n ):\n self.r = r\n self.mu = mu\n self.nu = nu\n self.energy = energy\n self.current_shell_id = current_shell_id\n self.next_line_id = next_line_id\n self.status = PacketStatus.IN_PROCESS\n self.index = index\n\n\n@njit(**njit_dict_no_parallel)\ndef trace_vpacket_within_shell(v_packet, numba_model, numba_plasma):\n \"\"\"\n Trace VPacket within one shell (relatively simple operation)\n \"\"\"\n r_inner = numba_model.r_inner[v_packet.current_shell_id]\n r_outer = numba_model.r_outer[v_packet.current_shell_id]\n\n distance_boundary, delta_shell = calculate_distance_boundary(\n v_packet.r, v_packet.mu, r_inner, r_outer\n )\n # defining start for line interaction\n start_line_id = v_packet.next_line_id\n\n # e scattering initialization\n\n cur_electron_density = numba_plasma.electron_density[\n v_packet.current_shell_id\n ]\n chi_e = cur_electron_density * SIGMA_THOMSON\n\n # Calculating doppler factor\n doppler_factor = get_doppler_factor(\n v_packet.r, v_packet.mu, numba_model.time_explosion\n )\n comov_nu = v_packet.nu * doppler_factor\n\n chi_continuum = chi_e\n\n tau_continuum = chi_continuum * distance_boundary\n tau_trace_combined = tau_continuum\n\n cur_line_id = start_line_id\n\n for cur_line_id in range(start_line_id, len(numba_plasma.line_list_nu)):\n # if tau_trace_combined > 10: ### FIXME ?????\n # break\n\n nu_line = numba_plasma.line_list_nu[cur_line_id]\n # TODO: Check if this is what the C code does\n\n tau_trace_line = numba_plasma.tau_sobolev[\n cur_line_id, v_packet.current_shell_id\n ]\n\n is_last_line = cur_line_id == len(numba_plasma.line_list_nu) - 1\n\n distance_trace_line = calculate_distance_line(\n v_packet,\n comov_nu,\n is_last_line,\n nu_line,\n numba_model.time_explosion,\n )\n\n if distance_boundary <= distance_trace_line:\n break\n\n tau_trace_combined += tau_trace_line\n\n else:\n if cur_line_id == (len(numba_plasma.line_list_nu) - 1):\n cur_line_id += 1\n v_packet.next_line_id = cur_line_id\n\n return tau_trace_combined, distance_boundary, delta_shell\n\n\n@njit(**njit_dict_no_parallel)\ndef trace_vpacket(v_packet, numba_model, numba_plasma):\n \"\"\"\n Trace single vpacket.\n Parameters\n ----------\n v_packet\n numba_model\n numba_plasma\n\n Returns\n -------\n\n \"\"\"\n\n tau_trace_combined = 0.0\n while True:\n (\n tau_trace_combined_shell,\n distance_boundary,\n delta_shell,\n ) = trace_vpacket_within_shell(v_packet, numba_model, numba_plasma)\n tau_trace_combined += tau_trace_combined_shell\n\n move_packet_across_shell_boundary(\n v_packet, delta_shell, len(numba_model.r_inner)\n )\n\n if tau_trace_combined > montecarlo_configuration.tau_russian:\n event_random = np.random.random()\n if event_random > montecarlo_configuration.survival_probability:\n v_packet.energy = 0.0\n v_packet.status = PacketStatus.EMITTED\n else:\n v_packet.energy = (\n v_packet.energy\n / montecarlo_configuration.survival_probability\n * math.exp(-tau_trace_combined)\n )\n tau_trace_combined = 0.0\n\n # Moving the v_packet\n new_r = math.sqrt(\n v_packet.r * v_packet.r\n + distance_boundary * distance_boundary\n + 2.0 * v_packet.r * distance_boundary * v_packet.mu\n )\n v_packet.mu = (v_packet.mu * v_packet.r + distance_boundary) / new_r\n v_packet.r = new_r\n\n if v_packet.status == PacketStatus.EMITTED:\n break\n return tau_trace_combined\n\n\n@njit(**njit_dict_no_parallel)\ndef trace_vpacket_volley(\n r_packet, vpacket_collection, numba_model, numba_plasma\n):\n \"\"\"\n Shoot a volley of vpackets (the vpacket collection specifies how many)\n from the current position of the rpacket.\n\n Parameters\n ----------\n r_packet : [type]\n [description]\n vpacket_collection : [type]\n [description]\n numba_model : [type]\n [description]\n numba_plasma : [type]\n [description]\n \"\"\"\n\n if (r_packet.nu < vpacket_collection.v_packet_spawn_start_frequency) or (\n r_packet.nu > vpacket_collection.v_packet_spawn_end_frequency\n ):\n\n return\n\n no_of_vpackets = vpacket_collection.number_of_vpackets\n if no_of_vpackets == 0:\n return\n\n ### TODO theoretical check for r_packet nu within vpackets bins - is done somewhere else I think\n if r_packet.r > numba_model.r_inner[0]: # not on inner_boundary\n r_inner_over_r = numba_model.r_inner[0] / r_packet.r\n mu_min = -math.sqrt(1 - r_inner_over_r * r_inner_over_r)\n v_packet_on_inner_boundary = False\n if montecarlo_configuration.full_relativity:\n mu_min = angle_aberration_LF_to_CMF(\n r_packet, numba_model.time_explosion, mu_min\n )\n else:\n v_packet_on_inner_boundary = True\n mu_min = 0.0\n\n mu_bin = (1.0 - mu_min) / no_of_vpackets\n r_packet_doppler_factor = get_doppler_factor(\n r_packet.r, r_packet.mu, numba_model.time_explosion\n )\n for i in range(no_of_vpackets):\n v_packet_mu = mu_min + i * mu_bin + np.random.random() * mu_bin\n\n if v_packet_on_inner_boundary: # The weights are described in K&S 2014\n weight = 2 * v_packet_mu / no_of_vpackets\n else:\n weight = (1 - mu_min) / (2 * no_of_vpackets)\n\n # C code: next line, angle_aberration_CMF_to_LF( & virt_packet, storage);\n if montecarlo_configuration.full_relativity:\n v_packet_mu = angle_aberration_CMF_to_LF(\n r_packet, numba_model.time_explosion, v_packet_mu\n )\n v_packet_doppler_factor = get_doppler_factor(\n r_packet.r, v_packet_mu, numba_model.time_explosion\n )\n\n # transform between r_packet mu and v_packet_mu\n\n doppler_factor_ratio = r_packet_doppler_factor / v_packet_doppler_factor\n\n v_packet_nu = r_packet.nu * doppler_factor_ratio\n v_packet_energy = r_packet.energy * weight * doppler_factor_ratio\n\n # TODO: Make sure we have a new continuum object for each vpacket\n # comov_nu = v_packet_nu * v_packet_doppler_factor\n # continuum.calculate(comov_nu, r_packet.current_shell_id)\n\n v_packet = VPacket(\n r_packet.r,\n v_packet_mu,\n v_packet_nu,\n v_packet_energy,\n r_packet.current_shell_id,\n r_packet.next_line_id,\n i,\n )\n\n tau_vpacket = trace_vpacket(v_packet, numba_model, numba_plasma)\n\n v_packet.energy *= math.exp(-tau_vpacket)\n\n vpacket_collection.set_properties(\n v_packet.nu,\n v_packet.energy,\n v_packet_mu,\n r_packet.r,\n r_packet.last_interaction_in_nu,\n r_packet.last_interaction_type,\n r_packet.last_line_interaction_in_id,\n r_packet.last_line_interaction_out_id,\n )\n"
] |
[
[
"numpy.random.random"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
luk036/corr-solver
|
[
"efc48c6d85183ad1a3dfa6b319f5bd532c10f089"
] |
[
"src/corr_solver/mle_corr_oracle.py"
] |
[
"# -*- coding: utf-8 -*-\n# import cvxpy as cvx\nfrom typing import Optional, Tuple, Union\n\nimport numpy as np\nfrom lmi_solver.lmi0_oracle import lmi0_oracle\nfrom lmi_solver.lmi_oracle import lmi_oracle\n\nArr = Union[np.ndarray]\nCut = Tuple[Arr, float]\n\n\nclass mle_oracle:\n def __init__(self, Sig: Arr, Y: Arr):\n \"\"\"Maximum likelyhood estimation:\n\n min log det Ω(p) + Tr( Ω(p)^{-1} Y )\n s.t. 2Y ⪰ Ω(p) ⪰ 0,\n\n Arguments:\n Sig (Arr): Covariance matrix\n Y (Arr): Biased covariance matrix\n \"\"\"\n self.Y = Y\n self.Sig = Sig\n self.lmi0 = lmi0_oracle(Sig)\n self.lmi = lmi_oracle(Sig, 2 * Y)\n # self.lmi2 = lmi2_oracle(Sig, 2*Y)\n\n def __call__(self, x: Arr, t: float) -> Tuple[Cut, Optional[float]]:\n \"\"\"[summary]\n\n Arguments:\n x (Arr): coefficients of basis functions\n t (float): the best-so-far optimal value\n\n Returns:\n Tuple[Cut, float]: [description]\n \"\"\"\n if cut := self.lmi(x):\n return cut, None\n\n if cut := self.lmi0(x):\n return cut, None\n\n R = self.lmi0.Q.sqrt()\n invR = np.linalg.inv(R)\n S = invR @ invR.T\n SY = S @ self.Y\n diag = np.diag(R)\n f1 = 2 * np.sum(np.log(diag)) + np.trace(SY)\n\n n = len(x)\n m = len(self.Y)\n g = np.zeros(n)\n for i in range(n):\n SFsi = S @ self.Sig[i]\n # g[i] = sum(S[k] @ self.Sig[k] for k in range(m))\n g[i] = np.trace(SFsi)\n g[i] -= sum(SFsi[k, :] @ SY[:, k] for k in range(m))\n\n f = f1 - t\n if (f := f1 - t) >= 0:\n return (g, f), None\n return (g, 0.0), f1\n"
] |
[
[
"numpy.diag",
"numpy.log",
"numpy.linalg.inv",
"numpy.zeros",
"numpy.trace"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Ryoich/python_zero
|
[
"fe4a5fd8b11c8c059d82b797cd1668f96d54e541",
"fe4a5fd8b11c8c059d82b797cd1668f96d54e541"
] |
[
"samples/popmap/popmap.py",
"samples/dcgan/dcgan.py"
] |
[
"import matplotlib.cm as cm\nimport matplotlib.pyplot as plt\n\nd_pop = {}\n\nwith open(\"population.csv\") as f:\n for line in f:\n code, _, pop = line.split(\",\")\n d_pop[int(code)] = int(pop)\n\ndata = []\nwith open(\"position.csv\") as f:\n for line in f:\n a = line.strip().split(\",\")\n if len(a) < 4:\n continue\n code, _, y, x = a\n code = int(code)\n x, y = float(x), float(y)\n if code in d_pop:\n print(x,y,d_pop[code])\n data.append((x, y, d_pop[code]))\n\ndata = sorted(data, key=lambda x: x[2])\n\nnx, ny, nn = [], [], []\nfor x, y, n in data:\n nx.append(x)\n ny.append(y)\n nn.append(n ** 0.5 * 0.3)\nplt.figure(figsize=(15, 15), dpi=50)\nplt.scatter(nx, ny, c=nn, s=nn, cmap=cm.seismic)\nplt.savefig(\"test.png\")\n",
"\"\"\"\nThe original file was released under the Apache 2.0 License\nhttps://www.tensorflow.org/tutorials/generative/dcgan\n\nModified by H. Watanabe (2020)\n\"\"\"\n\nimport time\n\nfrom IPython import display\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.keras import layers\n\n\ndef load_dataset(filename, batch_size=32):\n train_images = np.load(filename)\n train_images = train_images.reshape(\n train_images.shape[0], 28, 28, 1).astype('float32')\n train_images = (train_images - 127.5) / 127.5\n buffer_size = train_images.shape[0]\n dataset = tf.data.Dataset.from_tensor_slices(\n train_images).shuffle(buffer_size).batch(batch_size)\n return dataset\n\n\ndef make_generator_model():\n model = tf.keras.Sequential()\n model.add(layers.Dense(7 * 7 * 256, use_bias=False, input_shape=(100,)))\n model.add(layers.BatchNormalization())\n model.add(layers.LeakyReLU())\n\n model.add(layers.Reshape((7, 7, 256)))\n # Note: None is the batch size\n assert model.output_shape == (None, 7, 7, 256)\n\n model.add(layers.Conv2DTranspose(\n 128, (5, 5), strides=(1, 1), padding='same', use_bias=False))\n assert model.output_shape == (None, 7, 7, 128)\n model.add(layers.BatchNormalization())\n model.add(layers.LeakyReLU())\n\n model.add(layers.Conv2DTranspose(\n 64, (5, 5), strides=(2, 2), padding='same', use_bias=False))\n assert model.output_shape == (None, 14, 14, 64)\n model.add(layers.BatchNormalization())\n model.add(layers.LeakyReLU())\n\n model.add(layers.Conv2DTranspose(1, (5, 5), strides=(2, 2),\n padding='same', use_bias=False, activation='tanh'))\n assert model.output_shape == (None, 28, 28, 1)\n\n return model\n\n\ndef make_discriminator_model():\n model = tf.keras.Sequential()\n model.add(layers.Conv2D(64, (5, 5), strides=(2, 2), padding='same',\n input_shape=[28, 28, 1]))\n model.add(layers.LeakyReLU())\n model.add(layers.Dropout(0.3))\n\n model.add(layers.Conv2D(128, (5, 5), strides=(2, 2), padding='same'))\n model.add(layers.LeakyReLU())\n model.add(layers.Dropout(0.3))\n\n model.add(layers.Flatten())\n model.add(layers.Dense(1))\n\n return model\n\n\ndef discriminator_loss(real_output, fake_output):\n real_loss = cross_entropy(tf.ones_like(real_output), real_output)\n fake_loss = cross_entropy(tf.zeros_like(fake_output), fake_output)\n total_loss = real_loss + fake_loss\n return total_loss\n\n\ndef generator_loss(fake_output):\n return cross_entropy(tf.ones_like(fake_output), fake_output)\n\n\[email protected]\ndef train_step(generator_optimizer, discriminator_optimizer, images, batch_size, noise_dim):\n noise = tf.random.normal([batch_size, noise_dim])\n\n with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:\n generated_images = generator(noise, training=True)\n\n real_output = discriminator(images, training=True)\n fake_output = discriminator(generated_images, training=True)\n\n gen_loss = generator_loss(fake_output)\n disc_loss = discriminator_loss(real_output, fake_output)\n\n gradients_of_generator = gen_tape.gradient(\n gen_loss, generator.trainable_variables)\n gradients_of_discriminator = disc_tape.gradient(\n disc_loss, discriminator.trainable_variables)\n\n generator_optimizer.apply_gradients(\n zip(gradients_of_generator, generator.trainable_variables))\n discriminator_optimizer.apply_gradients(\n zip(gradients_of_discriminator, discriminator.trainable_variables))\n\n\ndef train(dataset, epochs=100, batch_size=32):\n noise_dim = 100\n generator_optimizer = tf.keras.optimizers.Adam(1e-4)\n discriminator_optimizer = tf.keras.optimizers.Adam(1e-4)\n seed = tf.random.normal([16, noise_dim])\n for epoch in range(epochs):\n start = time.time()\n\n for image_batch in dataset:\n train_step(generator_optimizer, discriminator_optimizer,\n image_batch, batch_size, noise_dim)\n\n generate_and_save_images(generator, epoch + 1, seed)\n print('Time for epoch {} is {} sec'.format(\n epoch + 1, time.time() - start))\n\n\ndef generate_and_save_images(model, epoch, test_input):\n predictions = model(test_input, training=False)\n\n for i in range(predictions.shape[0]):\n plt.subplot(4, 4, i + 1)\n plt.imshow(predictions[i, :, :, 0] * 127.5 + 127.5, cmap='gray')\n plt.axis('off')\n\n filename = 'image_at_epoch_{:04d}.png'.format(epoch)\n plt.savefig(filename)\n plt.show()\n\n\ndef run(filename):\n dataset = load_dataset(filename)\n train(dataset)\n\n\ngenerator = make_generator_model()\ndiscriminator = make_discriminator_model()\ncross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True)\n"
] |
[
[
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.figure"
],
[
"matplotlib.pyplot.imshow",
"tensorflow.keras.layers.Conv2DTranspose",
"tensorflow.keras.Sequential",
"tensorflow.keras.layers.LeakyReLU",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.losses.BinaryCrossentropy",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.axis",
"numpy.load",
"tensorflow.keras.layers.Flatten",
"tensorflow.keras.layers.Dense",
"matplotlib.pyplot.savefig",
"tensorflow.zeros_like",
"tensorflow.keras.layers.Reshape",
"matplotlib.pyplot.show",
"tensorflow.GradientTape",
"tensorflow.data.Dataset.from_tensor_slices",
"tensorflow.ones_like",
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.keras.layers.Dropout",
"tensorflow.random.normal"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
bmurdata/sktime
|
[
"fb079e76e3f3dfbb849fb805e53f09adec6cdf79",
"fb079e76e3f3dfbb849fb805e53f09adec6cdf79",
"fb079e76e3f3dfbb849fb805e53f09adec6cdf79"
] |
[
"sktime/forecasting/compose/_pipeline.py",
"sktime/transformers/panel/tests/test_hog1d_transformer.py",
"sktime/contrib/basic_benchmarking.py"
] |
[
"#!/usr/bin/env python3 -u\n# -*- coding: utf-8 -*-\n# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)\n\n__author__ = [\"Markus Löning\"]\n__all__ = [\"TransformedTargetForecaster\"]\n\nfrom sklearn.base import clone\n\nfrom sktime.base import _HeterogenousMetaEstimator\nfrom sktime.forecasting.base._base import BaseForecaster\nfrom sktime.forecasting.base._base import DEFAULT_ALPHA\nfrom sktime.forecasting.base._sktime import _OptionalForecastingHorizonMixin\nfrom sktime.forecasting.base._sktime import _SktimeForecaster\nfrom sktime.transformers.base import _SeriesToSeriesTransformer\nfrom sktime.utils.validation.forecasting import check_y\nfrom sktime.utils.validation.series import check_series\n\n\nclass TransformedTargetForecaster(\n _OptionalForecastingHorizonMixin,\n _SktimeForecaster,\n _HeterogenousMetaEstimator,\n _SeriesToSeriesTransformer,\n):\n \"\"\"Meta-estimator for forecasting transformed time series.\"\"\"\n\n _required_parameters = [\"steps\"]\n _tags = {\"univariate-only\": True}\n\n def __init__(self, steps):\n self.steps = steps\n self.steps_ = None\n super(TransformedTargetForecaster, self).__init__()\n\n def _check_steps(self):\n names, estimators = zip(*self.steps)\n\n # validate names\n self._check_names(names)\n\n # validate estimators\n transformers = estimators[:-1]\n forecaster = estimators[-1]\n\n valid_transformer_type = _SeriesToSeriesTransformer\n for transformer in transformers:\n if not isinstance(transformer, valid_transformer_type):\n raise TypeError(\n f\"All intermediate steps should be \"\n f\"instances of {valid_transformer_type}, \"\n f\"but transformer: {transformer} is not.\"\n )\n\n valid_forecaster_type = BaseForecaster\n if not isinstance(forecaster, valid_forecaster_type):\n raise TypeError(\n f\"Last step of {self.__class__.__name__} must be of type: \"\n f\"{valid_forecaster_type}, \"\n f\"but forecaster: {forecaster} is not.\"\n )\n\n # Shallow copy\n return list(self.steps)\n\n def _iter_transformers(self, reverse=False):\n\n # exclude final forecaster\n steps = self.steps_[:-1]\n\n if reverse:\n steps = reversed(steps)\n\n for idx, (name, transformer) in enumerate(steps):\n yield idx, name, transformer\n\n def __len__(self):\n \"\"\"\n Returns the length of the Pipeline\n \"\"\"\n return len(self.steps)\n\n @property\n def named_steps(self):\n \"\"\"Map the steps to a dictionary\"\"\"\n return dict(self.steps)\n\n def fit(self, y, X=None, fh=None):\n \"\"\"Fit to training data.\n\n Parameters\n ----------\n y : pd.Series\n Target time series to which to fit the forecaster.\n fh : int, list or np.array, optional (default=None)\n The forecasters horizon with the steps ahead to to predict.\n X : pd.DataFrame, optional (default=None)\n Exogenous variables are ignored\n Returns\n -------\n self : returns an instance of self.\n \"\"\"\n self.steps_ = self._check_steps()\n self._set_y_X(y, X)\n self._set_fh(fh)\n\n # transform\n yt = check_y(y)\n for step_idx, name, transformer in self._iter_transformers():\n t = clone(transformer)\n yt = t.fit_transform(yt)\n self.steps_[step_idx] = (name, t)\n\n # fit forecaster\n name, forecaster = self.steps[-1]\n f = clone(forecaster)\n f.fit(yt, X, fh)\n self.steps_[-1] = (name, f)\n\n self._is_fitted = True\n return self\n\n def _predict(self, fh=None, X=None, return_pred_int=False, alpha=DEFAULT_ALPHA):\n if return_pred_int:\n raise NotImplementedError()\n\n forecaster = self.steps_[-1][1]\n y_pred = forecaster.predict(fh, X, return_pred_int=return_pred_int, alpha=alpha)\n\n for _, _, transformer in self._iter_transformers(reverse=True):\n y_pred = transformer.inverse_transform(y_pred)\n\n return y_pred\n\n def update(self, y, X=None, update_params=False):\n \"\"\"Update fitted parameters\n\n Parameters\n ----------\n y : pd.Series\n X : pd.DataFrame\n update_params : bool, optional (default=False)\n\n Returns\n -------\n self : an instance of self\n \"\"\"\n self.check_is_fitted()\n self._update_y_X(y, X)\n\n for step_idx, name, transformer in self._iter_transformers():\n if hasattr(transformer, \"update\"):\n transformer.update(y, update_params=update_params)\n self.steps_[step_idx] = (name, transformer)\n\n name, forecaster = self.steps_[-1]\n forecaster.update(y, update_params=update_params)\n self.steps_[-1] = (name, forecaster)\n return self\n\n def transform(self, Z, X=None):\n self.check_is_fitted()\n zt = check_series(Z, enforce_univariate=True)\n for _, _, transformer in self._iter_transformers():\n zt = transformer.transform(zt, X)\n return zt\n\n def inverse_transform(self, Z, X=None):\n self.check_is_fitted()\n zt = check_series(Z, enforce_univariate=True)\n for _, _, transformer in self._iter_transformers(reverse=True):\n zt = transformer.inverse_transform(zt, X)\n return zt\n\n def get_params(self, deep=True):\n \"\"\"Get parameters for this estimator.\n Parameters\n ----------\n deep : boolean, optional\n If True, will return the parameters for this estimator and\n contained subobjects that are estimators.\n Returns\n -------\n params : mapping of string to any\n Parameter names mapped to their values.\n \"\"\"\n return self._get_params(\"steps\", deep=deep)\n\n def set_params(self, **kwargs):\n \"\"\"Set the parameters of this estimator.\n Valid parameter keys can be listed with ``get_params()``.\n Returns\n -------\n self\n \"\"\"\n self._set_params(\"steps\", **kwargs)\n return self\n",
"# -*- coding: utf-8 -*-\nimport numpy as np\nimport pandas as pd\nimport pytest\nimport numbers\nfrom sktime.transformers.panel.hog1d import HOG1DTransformer\nfrom sktime.utils._testing.panel import _make_nested_from_array\n\n\n# Check that exception is raised for bad num intervals.\n# input types - string, float, negative int, negative float, empty dict\n# and an int that is larger than the time series length.\n# correct input is meant to be a positive integer of 1 or more.\[email protected](\"bad_num_intervals\", [\"str\", 1.2, -1.2, -1, {}, 11, 0])\ndef test_bad_num_intervals(bad_num_intervals):\n X = _make_nested_from_array(np.ones(10), n_instances=10, n_columns=1)\n\n if not isinstance(bad_num_intervals, int):\n with pytest.raises(TypeError):\n HOG1DTransformer(num_intervals=bad_num_intervals).fit(X).transform(X)\n else:\n with pytest.raises(ValueError):\n HOG1DTransformer(num_intervals=bad_num_intervals).fit(X).transform(X)\n\n\n# Check that exception is raised for bad num bins.\n# input types - string, float, negative float,\n# negative int, empty dict and zero.\n# correct input is meant to be a positive integer of 1 or more.\[email protected](\"bad_num_bins\", [\"str\", 1.2, -1.2, -1, {}, 0])\ndef test_bad_num_bins(bad_num_bins):\n X = _make_nested_from_array(np.ones(10), n_instances=10, n_columns=1)\n\n if not isinstance(bad_num_bins, int):\n with pytest.raises(TypeError):\n HOG1DTransformer(num_bins=bad_num_bins).fit(X).transform(X)\n else:\n with pytest.raises(ValueError):\n HOG1DTransformer(num_bins=bad_num_bins).fit(X).transform(X)\n\n\n# Check that exception is raised for bad scaling factor.\n# input types - string, float, negative float, negative int,\n# empty dict and zero.\n# correct input is meant to be any number (so the floats and\n# ints shouldn't raise an error).\[email protected](\"bad_scaling_factor\", [\"str\", 1.2, -1.2, -1, {}, 0])\ndef test_bad_scaling_factor(bad_scaling_factor):\n\n X = _make_nested_from_array(np.ones(10), n_instances=10, n_columns=1)\n\n if not isinstance(bad_scaling_factor, numbers.Number):\n with pytest.raises(TypeError):\n HOG1DTransformer(scaling_factor=bad_scaling_factor).fit(X).transform(X)\n else:\n HOG1DTransformer(scaling_factor=bad_scaling_factor).fit(X).transform(X)\n\n\n# Check the transformer has changed the data correctly.\ndef test_output_of_transformer():\n\n X = _make_nested_from_array(\n np.array([4, 6, 10, 12, 8, 6, 5, 5]), n_instances=1, n_columns=1\n )\n\n h = HOG1DTransformer().fit(X)\n res = h.transform(X)\n orig = convert_list_to_dataframe([[0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0]])\n orig.columns = X.columns\n assert check_if_dataframes_are_equal(res, orig)\n\n X = _make_nested_from_array(\n np.array([-5, 2.5, 1, 3, 10, -1.5, 6, 12, -3, 0.2]), n_instances=1, n_columns=1\n )\n h = h.fit(X)\n res = h.transform(X)\n orig = convert_list_to_dataframe([[0, 0, 0, 0, 4, 1, 0, 0, 0, 0, 2, 0, 2, 1, 0, 0]])\n orig.columns = X.columns\n assert check_if_dataframes_are_equal(res, orig)\n\n\n# the time series length should always be num_bins*num_intervals\n# (num_intervals is 2 by default)\[email protected](\"num_bins,corr_series_length\", [(4, 8), (8, 16), (12, 24)])\ndef test_output_dimensions(num_bins, corr_series_length):\n\n X = _make_nested_from_array(np.ones(13), n_instances=10, n_columns=1)\n\n h = HOG1DTransformer(num_bins=num_bins).fit(X)\n res = h.transform(X)\n\n # get the dimension of the generated dataframe.\n act_time_series_length = res.iloc[0, 0].shape[0]\n num_rows = res.shape[0]\n num_cols = res.shape[1]\n\n assert act_time_series_length == corr_series_length\n assert num_rows == 10\n assert num_cols == 1\n\n\n# This is to check that HOG1D produces the same result along each dimension\ndef test_hog1d_performs_correcly_along_each_dim():\n\n X = _make_nested_from_array(\n np.array([4, 6, 10, 12, 8, 6, 5, 5]), n_instances=1, n_columns=2\n )\n\n h = HOG1DTransformer().fit(X)\n res = h.transform(X)\n orig = convert_list_to_dataframe(\n [\n [0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0],\n [0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0],\n ]\n )\n orig.columns = X.columns\n assert check_if_dataframes_are_equal(res, orig)\n\n\n# Helper function to convert a Python list to a Pandas dataframe.\ndef convert_list_to_dataframe(list_to_convert):\n # Convert this into a panda's data frame\n df = pd.DataFrame()\n for i in range(len(list_to_convert)):\n inst = list_to_convert[i]\n data = []\n data.append(pd.Series(inst))\n df[i] = data\n\n return df\n\n\ndef check_if_dataframes_are_equal(df1, df2):\n \"\"\"\n for some reason, this is how you check that two dataframes are equal.\n \"\"\"\n from pandas.testing import assert_frame_equal\n\n try:\n assert_frame_equal(df1, df2)\n return True\n except AssertionError:\n return False\n",
"# -*- coding: utf-8 -*-\nimport os\n\nos.environ[\"MKL_NUM_THREADS\"] = \"1\" # must be done before numpy import!!\nos.environ[\"NUMEXPR_NUM_THREADS\"] = \"1\" # must be done before numpy import!!\nos.environ[\"OMP_NUM_THREADS\"] = \"1\" # must be done before numpy import!!\n\nimport numpy as np\nfrom sklearn.preprocessing import FunctionTransformer\nfrom sklearn.tree import DecisionTreeClassifier\nfrom statsmodels.tsa.stattools import acf\n\nfrom sktime.transformers.panel.compose import make_row_transformer\nfrom sktime.transformers.panel.segment import RandomIntervalSegmenter\n\nfrom sktime.transformers.panel.reduce import Tabularizer\nfrom sklearn.pipeline import Pipeline\nfrom sktime.series_as_features.compose import FeatureUnion\nfrom sktime.classification.compose import TimeSeriesForestClassifier\nfrom sktime.utils.time_series import time_series_slope\nimport sktime.classification.interval_based._tsf as ib\nimport sktime.classification.frequency_based._rise as fb\nimport sktime.classification.dictionary_based._boss as db\nimport sktime.classification.distance_based._time_series_neighbors as dist\nimport sktime.contrib.experiments as exp\n\n# method 1\n\n\nbenchmark_datasets = [\n \"ACSF1\",\n \"Adiac\",\n \"ArrowHead\",\n \"Beef\",\n \"BeetleFly\",\n \"BirdChicken\",\n \"BME\",\n \"Car\",\n \"CBF\",\n \"ChlorineConcentration\",\n \"CinCECGTorso\",\n \"Coffee\",\n \"Computers\",\n \"CricketX\",\n \"CricketY\",\n \"CricketZ\",\n \"DiatomSizeReduction\",\n \"DistalPhalanxOutlineCorrect\",\n \"DistalPhalanxOutlineAgeGroup\",\n \"DistalPhalanxTW\",\n \"Earthquakes\",\n \"ECG200\",\n \"ECG5000\",\n \"ECGFiveDays\",\n \"EOGHorizontalSignal\",\n \"EOGVerticalSignal\",\n \"EthanolLevel\",\n \"FaceAll\",\n \"FaceFour\",\n \"FacesUCR\",\n \"FiftyWords\",\n \"Fish\",\n \"FreezerRegularTrain\",\n \"FreezerSmallTrain\",\n \"Ham\",\n \"Haptics\",\n \"Herring\",\n \"InlineSkate\",\n \"InsectEPGRegularTrain\",\n \"InsectEPGSmallTrain\",\n \"InsectWingbeatSound\",\n \"ItalyPowerDemand\",\n \"LargeKitchenAppliances\",\n \"Lightning2\",\n \"Lightning7\",\n \"Mallat\",\n \"Meat\",\n \"MedicalImages\",\n \"MiddlePhalanxOutlineCorrect\",\n \"MiddlePhalanxOutlineAgeGroup\",\n \"MiddlePhalanxTW\",\n \"MixedShapesRegularTrain\",\n \"MixedShapesSmallTrain\",\n \"MoteStrain\",\n \"OliveOil\",\n \"OSULeaf\",\n \"PhalangesOutlinesCorrect\",\n \"Phoneme\",\n \"PigAirwayPressure\",\n \"PigArtPressure\",\n \"PigCVP\",\n \"Plane\",\n \"PowerCons\",\n \"ProximalPhalanxOutlineCorrect\",\n \"ProximalPhalanxOutlineAgeGroup\",\n \"ProximalPhalanxTW\",\n \"RefrigerationDevices\",\n \"Rock\",\n \"ScreenType\",\n \"SemgHandGenderCh2\",\n \"SemgHandMovementCh2\",\n \"SemgHandSubjectCh2\",\n \"ShapeletSim\",\n \"SmallKitchenAppliances\",\n \"SmoothSubspace\",\n \"SonyAIBORobotSurface1\",\n \"SonyAIBORobotSurface2\",\n \"Strawberry\",\n \"SwedishLeaf\",\n \"Symbols\",\n \"SyntheticControl\",\n \"ToeSegmentation1\",\n \"ToeSegmentation2\",\n \"Trace\",\n \"TwoLeadECG\",\n \"TwoPatterns\",\n \"UMD\",\n \"UWaveGestureLibraryX\",\n \"UWaveGestureLibraryY\",\n \"UWaveGestureLibraryZ\",\n \"Wafer\",\n \"Wine\",\n \"WordSynonyms\",\n \"Worms\",\n \"WormsTwoClass\",\n \"Yoga\",\n]\n\ndata_dir = \"Z:/ArchiveData/Univariate_ts/\"\nresults_dir = \"Z:/Benchmarking/\"\n\n\ndef acf_coefs(x, maxlag=100):\n x = np.asarray(x).ravel()\n nlags = np.minimum(len(x) - 1, maxlag)\n return acf(x, nlags=nlags).ravel()\n\n\ndef powerspectrum(x, **kwargs):\n x = np.asarray(x).ravel()\n fft = np.fft.fft(x)\n ps = fft.real * fft.real + fft.imag * fft.imag\n return ps[: ps.shape[0] // 2].ravel()\n\n\ndef tsf_benchmarking():\n for i in range(0, len(benchmark_datasets)):\n dataset = benchmark_datasets[i]\n print(str(i) + \" problem = \" + dataset)\n tsf = ib.TimeSeriesForest(n_estimators=100)\n exp.run_experiment(\n overwrite=False,\n problem_path=data_dir,\n results_path=results_dir,\n cls_name=\"PythonTSF\",\n classifier=tsf,\n dataset=dataset,\n train_file=False,\n )\n steps = [\n (\"segment\", RandomIntervalSegmenter(n_intervals=\"sqrt\")),\n (\n \"transform\",\n FeatureUnion(\n [\n (\n \"mean\",\n make_row_transformer(\n FunctionTransformer(func=np.mean, validate=False)\n ),\n ),\n (\n \"std\",\n make_row_transformer(\n FunctionTransformer(func=np.std, validate=False)\n ),\n ),\n (\n \"slope\",\n make_row_transformer(\n FunctionTransformer(\n func=time_series_slope, validate=False\n )\n ),\n ),\n ]\n ),\n ),\n (\"clf\", DecisionTreeClassifier()),\n ]\n base_estimator = Pipeline(steps)\n tsf = TimeSeriesForestClassifier(estimator=base_estimator, n_estimators=100)\n exp.run_experiment(\n overwrite=False,\n problem_path=data_dir,\n results_path=results_dir,\n cls_name=\"PythonTSFComposite\",\n classifier=tsf,\n dataset=dataset,\n train_file=False,\n )\n\n\ndef rise_benchmarking():\n for i in range(0, len(benchmark_datasets)):\n dataset = benchmark_datasets[i]\n print(str(i) + \" problem = \" + dataset)\n rise = fb.RandomIntervalSpectralForest(n_estimators=100)\n exp.run_experiment(\n overwrite=True,\n problem_path=data_dir,\n results_path=results_dir,\n cls_name=\"PythonRISE\",\n classifier=rise,\n dataset=dataset,\n train_file=False,\n )\n steps = [\n (\"segment\", RandomIntervalSegmenter(n_intervals=1, min_length=5)),\n (\n \"transform\",\n FeatureUnion(\n [\n (\n \"acf\",\n make_row_transformer(\n FunctionTransformer(func=acf_coefs, validate=False)\n ),\n ),\n (\n \"ps\",\n make_row_transformer(\n FunctionTransformer(func=powerspectrum, validate=False)\n ),\n ),\n ]\n ),\n ),\n (\"tabularise\", Tabularizer()),\n (\"clf\", DecisionTreeClassifier()),\n ]\n base_estimator = Pipeline(steps)\n rise = TimeSeriesForestClassifier(estimator=base_estimator, n_estimators=100)\n exp.run_experiment(\n overwrite=True,\n problem_path=data_dir,\n results_path=results_dir,\n cls_name=\"PythonRISEComposite\",\n classifier=rise,\n dataset=dataset,\n train_file=False,\n )\n\n\ndef boss_benchmarking():\n for i in range(0, int(len(benchmark_datasets))):\n dataset = benchmark_datasets[i]\n print(\n str(i) + \" problem = \" + dataset + \" writing to \" + results_dir + \"/BOSS/\"\n )\n boss = db.BOSSEnsemble()\n exp.run_experiment(\n overwrite=False,\n problem_path=data_dir,\n results_path=results_dir + \"/BOSS/\",\n cls_name=\"PythonBOSS\",\n classifier=boss,\n dataset=dataset,\n train_file=False,\n )\n\n\ndistance_test = [\n \"Chinatown\",\n \"ItalyPowerDemand\",\n]\n\n\ndef elastic_distance_benchmarking():\n for i in range(0, int(len(distance_test))):\n dataset = distance_test[i]\n print(str(i) + \" problem = \" + dataset + \" writing to \" + results_dir + \"/DTW/\")\n dtw = dist.KNeighborsTimeSeriesClassifier(metric=\"dtw\")\n exp.run_experiment(\n overwrite=False,\n problem_path=data_dir,\n results_path=results_dir + \"/DTW/\",\n cls_name=\"PythonDTW\",\n classifier=dtw,\n dataset=dataset,\n train_file=False,\n )\n twe = dist.KNeighborsTimeSeriesClassifier(metric=\"dtw\")\n exp.run_experiment(\n overwrite=False,\n problem_path=data_dir,\n results_path=results_dir + \"/DTW/\",\n cls_name=\"PythonTWE\",\n classifier=twe,\n dataset=dataset,\n train_file=False,\n )\n\n\nif __name__ == \"__main__\":\n # tsf_benchmarking()\n # rise_benchmarking()\n # boss_benchmarking()\n elastic_distance_benchmarking()\n"
] |
[
[
"sklearn.base.clone"
],
[
"pandas.Series",
"pandas.DataFrame",
"numpy.ones",
"pandas.testing.assert_frame_equal",
"numpy.array"
],
[
"numpy.fft.fft",
"sklearn.preprocessing.FunctionTransformer",
"numpy.asarray",
"sklearn.pipeline.Pipeline",
"sklearn.tree.DecisionTreeClassifier"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
BywinTec/cogdl
|
[
"3c0abcfe364a69061c84c8170d4f5e6a17a4668d",
"3c0abcfe364a69061c84c8170d4f5e6a17a4668d",
"3c0abcfe364a69061c84c8170d4f5e6a17a4668d",
"3c0abcfe364a69061c84c8170d4f5e6a17a4668d"
] |
[
"cogdl/models/nn/diffpool.py",
"cogdl/tasks/attributed_graph_clustering.py",
"cogdl/models/nn/gin.py",
"cogdl/models/nn/pprgo.py"
] |
[
"import numpy as np\nimport random\n\nfrom scipy.linalg import block_diag\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom .. import BaseModel, register_model\nfrom .gin import split_dataset_general\nfrom .graphsage import GraphSAGELayer\n\n\nclass EntropyLoss(nn.Module):\n # Return Scalar\n def forward(self, adj, anext, s_l):\n # entropy.mean(-1).mean(-1): 1/n in node and batch\n # entropy = (torch.distributions.Categorical(\n # probs=s_l).entropy()).sum(-1).mean(-1)\n entropy = (torch.distributions.Categorical(probs=s_l).entropy()).mean()\n assert not torch.isnan(entropy)\n return entropy\n\n\nclass LinkPredLoss(nn.Module):\n def forward(self, adj, anext, s_l):\n link_pred_loss = (adj - s_l.matmul(s_l.transpose(-1, -2))).norm(dim=(1, 2))\n link_pred_loss = link_pred_loss / (adj.size(1) * adj.size(2))\n return link_pred_loss.mean()\n\n\nclass GraphSAGE(nn.Module):\n r\"\"\"GraphSAGE from `\"Inductive Representation Learning on Large Graphs\" <https://arxiv.org/pdf/1706.02216.pdf>`__.\n\n ..math::\n h^{i+1}_{\\mathcal{N}(v)}=AGGREGATE_{k}(h_{u}^{k})\n h^{k+1}_{v} = \\sigma(\\mathbf{W}^{k}·CONCAT(h_{v}^{k}, h_{\\mathcal{N}(v)}))\n\n Args:\n in_feats (int) : Size of each input sample.\n hidden_dim (int) : Size of hidden layer dimension.\n out_feats (int) : Size of each output sample.\n num_layers (int) : Number of GraphSAGE Layers.\n dropout (float, optional) : Size of dropout, default: ``0.5``.\n normalize (bool, optional) : Normalze features after each layer if True, default: ``True``.\n \"\"\"\n\n def __init__(\n self, in_feats, hidden_dim, out_feats, num_layers, dropout=0.5, normalize=False, concat=False, use_bn=False\n ):\n super(GraphSAGE, self).__init__()\n self.convlist = nn.ModuleList()\n self.bn_list = nn.ModuleList()\n self.num_layers = num_layers\n self.dropout = dropout\n self.use_bn = use_bn\n aggr = \"concat\" if concat else \"mean\"\n if num_layers == 1:\n self.convlist.append(GraphSAGELayer(in_feats, out_feats, normalize, aggr))\n else:\n self.convlist.append(GraphSAGELayer(in_feats, hidden_dim, normalize, aggr))\n if use_bn:\n self.bn_list.append(nn.BatchNorm1d(hidden_dim))\n for _ in range(num_layers - 2):\n self.convlist.append(GraphSAGELayer(hidden_dim, hidden_dim, normalize, aggr))\n if use_bn:\n self.bn_list.append(nn.BatchNorm1d(hidden_dim))\n self.convlist.append(GraphSAGELayer(hidden_dim, out_feats, normalize, aggr))\n\n def forward(self, graph, x):\n h = x\n for i in range(self.num_layers - 1):\n h = F.dropout(h, p=self.dropout, training=self.training)\n h = self.convlist[i](graph, h)\n if self.use_bn:\n h = self.bn_list[i](h)\n return self.convlist[self.num_layers - 1](graph, h)\n\n\nclass BatchedGraphSAGE(nn.Module):\n r\"\"\"GraphSAGE with mini-batch\n\n Args:\n in_feats (int) : Size of each input sample.\n out_feats (int) : Size of each output sample.\n use_bn (bool) : Apply batch normalization if True, default: ``True``.\n self_loop (bool) : Add self loop if True, default: ``True``.\n \"\"\"\n\n def __init__(self, in_feats, out_feats, use_bn=True, self_loop=True):\n super(BatchedGraphSAGE, self).__init__()\n self.self_loop = self_loop\n self.use_bn = use_bn\n self.weight = nn.Linear(in_feats, out_feats, bias=True)\n\n nn.init.xavier_uniform_(self.weight.weight.data, gain=nn.init.calculate_gain(\"relu\"))\n\n def forward(self, x, adj):\n device = x.device\n if self.self_loop:\n adj = adj + torch.eye(x.shape[1]).to(device)\n adj = adj / adj.sum(dim=1, keepdim=True)\n h = torch.matmul(adj, x)\n h = self.weight(h)\n h = F.normalize(h, dim=2, p=2)\n h = F.relu(h)\n # TODO: shape = [a, 0, b]\n # if self.use_bn and h.shape[1] > 0:\n # self.bn = nn.BatchNorm1d(h.shape[1]).to(device)\n # h = self.bn(h)\n return h\n\n\nclass BatchedDiffPoolLayer(nn.Module):\n r\"\"\"DIFFPOOL from paper `\"Hierarchical Graph Representation Learning\n with Differentiable Pooling\" <https://arxiv.org/pdf/1806.08804.pdf>`__.\n\n .. math::\n X^{(l+1)} = S^{l)}^T Z^{(l)}\n A^{(l+1)} = S^{(l)}^T A^{(l)} S^{(l)}\n Z^{(l)} = GNN_{l, embed}(A^{(l)}, X^{(l)})\n S^{(l)} = softmax(GNN_{l,pool}(A^{(l)}, X^{(l)}))\n\n Parameters\n ----------\n in_feats : int\n Size of each input sample.\n out_feats : int\n Size of each output sample.\n assign_dim : int\n Size of next adjacency matrix.\n batch_size : int\n Size of each mini-batch.\n dropout : float, optional\n Size of dropout, default: ``0.5``.\n link_pred_loss : bool, optional\n Use link prediction loss if True, default: ``True``.\n \"\"\"\n\n def __init__(\n self, in_feats, out_feats, assign_dim, batch_size, dropout=0.5, link_pred_loss=True, entropy_loss=True\n ):\n super(BatchedDiffPoolLayer, self).__init__()\n self.assign_dim = assign_dim\n self.dropout = dropout\n self.use_link_pred = link_pred_loss\n self.batch_size = batch_size\n self.embd_gnn = GraphSAGELayer(in_feats, out_feats, normalize=False)\n self.pool_gnn = GraphSAGELayer(in_feats, assign_dim, normalize=False)\n\n self.loss_dict = dict()\n\n def forward(self, graph, x, batch):\n embed = self.embd_gnn(graph, x)\n pooled = F.softmax(self.pool_gnn(graph, x), dim=-1)\n device = x.device\n masked_tensor = []\n value_set, value_counts = torch.unique(batch, return_counts=True)\n batch_size = len(value_set)\n for i in value_counts:\n masked = torch.ones((i, int(pooled.size()[1] / batch_size)))\n masked_tensor.append(masked)\n masked = torch.FloatTensor(block_diag(*masked_tensor)).to(device)\n\n result = torch.nn.functional.softmax(masked * pooled, dim=-1)\n result = result * masked\n result = result / (result.sum(dim=-1, keepdim=True) + 1e-13)\n # result = masked_softmax(pooled, masked, memory_efficient=False)\n\n h = torch.matmul(result.t(), embed)\n adj = torch.sparse_coo_tensor(graph.edge_index, graph.edge_weight)\n adj_new = torch.sparse.mm(adj, result)\n adj_new = torch.mm(result.t(), adj_new)\n\n if self.use_link_pred:\n adj_loss = torch.norm((adj.to_dense() - torch.mm(result, result.t()))) / np.power((len(batch)), 2)\n self.loss_dict[\"adj_loss\"] = adj_loss\n entropy_loss = (torch.distributions.Categorical(probs=pooled).entropy()).mean()\n assert not torch.isnan(entropy_loss)\n self.loss_dict[\"entropy_loss\"] = entropy_loss\n return adj_new, h\n\n def get_loss(self):\n loss_n = 0\n for _, value in self.loss_dict.items():\n loss_n += value\n return loss_n\n\n\nclass BatchedDiffPool(nn.Module):\n r\"\"\"DIFFPOOL layer with batch forward\n\n Parameters\n ----------\n in_feats : int\n Size of each input sample.\n next_size : int\n Size of next adjacency matrix.\n emb_size : int\n Dimension of next node feature matrix.\n use_bn : bool, optional\n Apply batch normalization if True, default: ``True``.\n self_loop : bool, optional\n Add self loop if True, default: ``True``.\n use_link_loss : bool, optional\n Use link prediction loss if True, default: ``True``.\n use_entropy : bool, optioinal\n Use entropy prediction loss if True, default: ``True``.\n \"\"\"\n\n def __init__(\n self, in_feats, next_size, emb_size, use_bn=True, self_loop=True, use_link_loss=False, use_entropy=True\n ):\n super(BatchedDiffPool, self).__init__()\n self.use_link_loss = use_link_loss\n self.use_bn = use_bn\n self.feat_trans = BatchedGraphSAGE(in_feats, emb_size)\n self.assign_trans = BatchedGraphSAGE(in_feats, next_size)\n\n self.link_loss = LinkPredLoss()\n self.entropy = EntropyLoss()\n\n self.loss_module = nn.ModuleList()\n if use_link_loss:\n self.loss_module.append(LinkPredLoss())\n if use_entropy:\n self.loss_module.append(EntropyLoss())\n self.loss = {}\n\n def forward(self, x, adj):\n h = self.feat_trans(x, adj)\n next_l = F.softmax(self.assign_trans(x, adj), dim=-1)\n\n h = torch.matmul(next_l.transpose(-1, -2), h)\n next = torch.matmul(next_l.transpose(-1, -2), torch.matmul(adj, next_l))\n\n for layer in self.loss_module:\n self.loss[str(type(layer).__name__)] = layer(adj, next, next_l)\n\n return h, next\n\n def get_loss(self):\n value = 0\n for _, v in self.loss.items():\n value += v\n return value\n\n\ndef toBatchedGraph(batch_adj, batch_feat, node_per_pool_graph):\n adj_list = [\n batch_adj[i : i + node_per_pool_graph, i : i + node_per_pool_graph]\n for i in range(0, batch_adj.size()[0], node_per_pool_graph)\n ]\n feat_list = [batch_feat[i : i + node_per_pool_graph, :] for i in range(0, batch_adj.size()[0], node_per_pool_graph)]\n adj_list = list(map(lambda x: torch.unsqueeze(x, 0), adj_list))\n feat_list = list(map(lambda x: torch.unsqueeze(x, 0), feat_list))\n adj = torch.cat(adj_list, dim=0)\n feat = torch.cat(feat_list, dim=0)\n return adj, feat\n\n\n@register_model(\"diffpool\")\nclass DiffPool(BaseModel):\n r\"\"\"DIFFPOOL from paper `Hierarchical Graph Representation Learning\n with Differentiable Pooling <https://arxiv.org/pdf/1806.08804.pdf>`__.\n\n Parameters\n ----------\n in_feats : int\n Size of each input sample.\n hidden_dim : int\n Size of hidden layer dimension of GNN.\n embed_dim : int\n Size of embeded node feature, output size of GNN.\n num_classes : int\n Number of target classes.\n num_layers : int\n Number of GNN layers.\n num_pool_layers : int\n Number of pooling.\n assign_dim : int\n Embedding size after the first pooling.\n pooling_ratio : float\n Size of each poolling ratio.\n batch_size : int\n Size of each mini-batch.\n dropout : float, optional\n Size of dropout, default: `0.5`.\n no_link_pred : bool, optional\n If True, use link prediction loss, default: `True`.\n \"\"\"\n\n @staticmethod\n def add_args(parser):\n parser.add_argument(\"--num-layers\", type=int, default=2)\n parser.add_argument(\"--num-pooling-layers\", type=int, default=1)\n parser.add_argument(\"--no-link-pred\", dest=\"no_link_pred\", action=\"store_true\")\n parser.add_argument(\"--pooling-ratio\", type=float, default=0.15)\n parser.add_argument(\"--embedding-dim\", type=int, default=64)\n parser.add_argument(\"--hidden-size\", type=int, default=64)\n parser.add_argument(\"--dropout\", type=float, default=0.1)\n parser.add_argument(\"--batch-size\", type=int, default=20)\n parser.add_argument(\"--train-ratio\", type=float, default=0.7)\n parser.add_argument(\"--test-ratio\", type=float, default=0.1)\n parser.add_argument(\"--lr\", type=float, default=0.001)\n\n @classmethod\n def build_model_from_args(cls, args):\n return cls(\n args.num_features,\n args.hidden_size,\n args.embedding_dim,\n args.num_classes,\n args.num_layers,\n args.num_pooling_layers,\n int(args.max_graph_size * args.pooling_ratio) * args.batch_size,\n args.pooling_ratio,\n args.batch_size,\n args.dropout,\n args.no_link_pred,\n )\n\n @classmethod\n def split_dataset(cls, dataset, args):\n return split_dataset_general(dataset, args)\n\n def __init__(\n self,\n in_feats,\n hidden_dim,\n embed_dim,\n num_classes,\n num_layers,\n num_pool_layers,\n assign_dim,\n pooling_ratio,\n batch_size,\n dropout=0.5,\n no_link_pred=True,\n concat=False,\n use_bn=False,\n ):\n super(DiffPool, self).__init__()\n self.assign_dim = assign_dim\n self.assign_dim_list = [assign_dim]\n self.use_bn = use_bn\n self.dropout = dropout\n self.use_link_loss = not no_link_pred\n # assert num_layers > 3, \"layers > 3\"\n self.diffpool_layers = nn.ModuleList()\n self.before_pooling = GraphSAGE(\n in_feats, hidden_dim, embed_dim, num_layers=num_layers, dropout=dropout, use_bn=self.use_bn\n )\n self.init_diffpool = BatchedDiffPoolLayer(\n embed_dim, hidden_dim, assign_dim, batch_size, dropout, self.use_link_loss\n )\n\n pooled_emb_dim = embed_dim\n self.after_pool = nn.ModuleList()\n after_per_pool = nn.ModuleList()\n for _ in range(num_layers - 1):\n after_per_pool.append(BatchedGraphSAGE(hidden_dim, hidden_dim))\n after_per_pool.append(BatchedGraphSAGE(hidden_dim, pooled_emb_dim))\n self.after_pool.append(after_per_pool)\n\n for _ in range(num_pool_layers - 1):\n self.assign_dim = int(self.assign_dim // batch_size * pooling_ratio) * batch_size\n self.diffpool_layers.append(\n BatchedDiffPool(\n pooled_emb_dim, self.assign_dim, hidden_dim, use_bn=self.use_bn, use_link_loss=self.use_link_loss\n )\n )\n\n for _ in range(num_layers - 1):\n after_per_pool.append(BatchedGraphSAGE(hidden_dim, hidden_dim))\n after_per_pool.append(BatchedGraphSAGE(hidden_dim, pooled_emb_dim))\n self.after_pool.append(after_per_pool)\n\n self.assign_dim_list.append(self.assign_dim)\n\n if concat:\n out_dim = pooled_emb_dim * (num_pool_layers + 1)\n else:\n out_dim = pooled_emb_dim\n self.fc = nn.Linear(out_dim, num_classes)\n\n def reset_parameters(self):\n for i in self.modules():\n if isinstance(i, nn.Linear):\n nn.init.xavier_uniform_(i.weight.data, gain=nn.init.calculate_gain(\"relu\"))\n if i.bias is not None:\n nn.init.constant_(i.bias.data, 0.0)\n\n def after_pooling_forward(self, gnn_layers, adj, x, concat=False):\n readouts = []\n h = x\n for layer in gnn_layers:\n h = layer(h, adj)\n readouts.append(h)\n # readout = torch.cat(readouts, dim=1)\n return h\n\n def forward(self, batch):\n readouts_all = []\n\n init_emb = self.before_pooling(batch, batch.x)\n adj, h = self.init_diffpool(batch, init_emb, batch.batch)\n value_set, value_counts = torch.unique(batch.batch, return_counts=True)\n batch_size = len(value_set)\n adj, h = toBatchedGraph(adj, h, adj.size(0) // batch_size)\n h = self.after_pooling_forward(self.after_pool[0], adj, h)\n readout = torch.sum(h, dim=1)\n readouts_all.append(readout)\n\n for i, diff_layer in enumerate(self.diffpool_layers):\n h, adj = diff_layer(h, adj)\n h = self.after_pooling_forward(self.after_pool[i + 1], adj, h)\n readout = torch.sum(h, dim=1)\n readouts_all.append(readout)\n pred = self.fc(readout)\n return pred\n\n def graph_classificatoin_loss(self, batch):\n pred = self.forward(batch)\n pred = F.log_softmax(pred, dim=-1)\n loss_n = F.nll_loss(pred, batch.y)\n loss_n += self.init_diffpool.get_loss()\n for layer in self.diffpool_layers:\n loss_n += layer.get_loss()\n return loss_n\n",
"import argparse\nfrom typing import Dict\nimport numpy as np\nimport networkx as nx\n\nfrom sklearn.cluster import KMeans, SpectralClustering\nfrom sklearn.metrics.cluster import normalized_mutual_info_score\nfrom sklearn.metrics import f1_score\nfrom scipy.optimize import linear_sum_assignment\n\nimport torch\nimport torch.nn.functional as F\n\nfrom cogdl.datasets import build_dataset\nfrom cogdl.models import build_model\nfrom . import BaseTask, register_task\n\n\n@register_task(\"attributed_graph_clustering\")\nclass AttributedGraphClustering(BaseTask):\n \"\"\"Attributed graph clustring task.\"\"\"\n\n @staticmethod\n def add_args(parser: argparse.ArgumentParser):\n \"\"\"Add task-specific arguments to the parser.\"\"\"\n # fmt: off\n # parser.add_argument(\"--num-features\", type=int)\n parser.add_argument(\"--num-clusters\", type=int, default=7)\n parser.add_argument(\"--cluster-method\", type=str, default=\"kmeans\")\n parser.add_argument(\"--hidden-size\", type=int, default=128)\n parser.add_argument(\"--model-type\", type=str, default=\"content\")\n parser.add_argument(\"--evaluate\", type=str, default=\"full\")\n parser.add_argument('--enhance', type=str, default=None, help='use prone or prone++ to enhance embedding')\n # fmt: on\n\n def __init__(\n self,\n args,\n dataset=None,\n _=None,\n ):\n super(AttributedGraphClustering, self).__init__(args)\n\n self.args = args\n self.model_name = args.model\n self.device = \"cpu\" if not torch.cuda.is_available() or args.cpu else args.device_id[0]\n if dataset is None:\n dataset = build_dataset(args)\n self.dataset = dataset\n self.data = dataset[0]\n self.num_nodes = self.data.y.shape[0]\n args.num_clusters = torch.max(self.data.y) + 1\n\n if args.model == \"prone\":\n self.hidden_size = args.hidden_size = args.num_features = 13\n else:\n self.hidden_size = args.hidden_size = args.hidden_size\n args.num_features = dataset.num_features\n self.model = build_model(args)\n self.num_clusters = args.num_clusters\n if args.cluster_method not in [\"kmeans\", \"spectral\"]:\n raise Exception(\"cluster method must be kmeans or spectral\")\n if args.model_type not in [\"content\", \"spectral\", \"both\"]:\n raise Exception(\"model type must be content, spectral or both\")\n self.cluster_method = args.cluster_method\n if args.evaluate not in [\"full\", \"NMI\"]:\n raise Exception(\"evaluation must be full or NMI\")\n self.model_type = args.model_type\n self.evaluate = args.evaluate\n self.is_weighted = self.data.edge_attr is not None\n self.enhance = args.enhance\n\n def train(self) -> Dict[str, float]:\n if self.model_type == \"content\":\n features_matrix = self.data.x\n elif self.model_type == \"spectral\":\n G = nx.Graph()\n if self.is_weighted:\n edges, weight = (\n self.data.edge_index.t().tolist(),\n self.data.edge_attr.tolist(),\n )\n G.add_weighted_edges_from([(edges[i][0], edges[i][1], weight[i][0]) for i in range(len(edges))])\n else:\n G.add_edges_from(self.data.edge_index.t().tolist())\n embeddings = self.model.train(G)\n if self.enhance is not None:\n embeddings = self.enhance_emb(G, embeddings)\n # Map node2id\n features_matrix = np.zeros((self.num_nodes, self.hidden_size))\n for vid, node in enumerate(G.nodes()):\n features_matrix[node] = embeddings[vid]\n features_matrix = torch.tensor(features_matrix)\n features_matrix = F.normalize(features_matrix, p=2, dim=1)\n else:\n trainer = self.model.get_trainer(AttributedGraphClustering, self.args)(self.args)\n self.model = trainer.fit(self.model, self.data)\n features_matrix = self.model.get_features(self.data)\n\n features_matrix = features_matrix.cpu().numpy()\n print(\"Clustering...\")\n if self.cluster_method == \"kmeans\":\n kmeans = KMeans(n_clusters=self.num_clusters, random_state=0).fit(features_matrix)\n clusters = kmeans.labels_\n else:\n clustering = SpectralClustering(\n n_clusters=self.num_clusters, assign_labels=\"discretize\", random_state=0\n ).fit(features_matrix)\n clusters = clustering.labels_\n if self.evaluate == \"full\":\n return self.__evaluate(clusters, True)\n else:\n return self.__evaluate(clusters, False)\n\n def __evaluate(self, clusters, full=True) -> Dict[str, float]:\n print(\"Evaluating...\")\n truth = self.data.y.cpu().numpy()\n if full:\n mat = np.zeros([self.num_clusters, self.num_clusters])\n for i in range(self.num_nodes):\n mat[clusters[i]][truth[i]] -= 1\n _, row_idx = linear_sum_assignment(mat)\n acc = - mat[_, row_idx].sum() / self.num_nodes\n for i in range(self.num_nodes):\n clusters[i] = row_idx[clusters[i]]\n macro_f1 = f1_score(truth, clusters, average=\"macro\")\n return dict(Accuracy=acc, NMI=normalized_mutual_info_score(clusters, truth), Macro_F1=macro_f1)\n else:\n return dict(NMI=normalized_mutual_info_score(clusters, truth))\n",
"import random\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom .. import BaseModel, register_model\nfrom .mlp import MLP\nfrom cogdl.data import DataLoader\nfrom cogdl.utils import spmm\n\n\ndef split_dataset_general(dataset, args):\n droplast = args.model == \"diffpool\"\n\n train_size = int(len(dataset) * args.train_ratio)\n test_size = int(len(dataset) * args.test_ratio)\n index = list(range(len(dataset)))\n random.shuffle(index)\n\n train_index = index[:train_size]\n test_index = index[-test_size:]\n\n bs = args.batch_size\n train_loader = DataLoader([dataset[i] for i in train_index], batch_size=bs, drop_last=droplast)\n test_loader = DataLoader([dataset[i] for i in test_index], batch_size=bs, drop_last=droplast)\n if args.train_ratio + args.test_ratio < 1:\n val_index = index[train_size:-test_size]\n valid_loader = DataLoader([dataset[i] for i in val_index], batch_size=bs, drop_last=droplast)\n else:\n valid_loader = test_loader\n return train_loader, valid_loader, test_loader\n\n\nclass GINLayer(nn.Module):\n r\"\"\"Graph Isomorphism Network layer from paper `\"How Powerful are Graph\n Neural Networks?\" <https://arxiv.org/pdf/1810.00826.pdf>`__.\n\n .. math::\n h_i^{(l+1)} = f_\\Theta \\left((1 + \\epsilon) h_i^{l} +\n \\mathrm{sum}\\left(\\left\\{h_j^{l}, j\\in\\mathcal{N}(i)\n \\right\\}\\right)\\right)\n\n Parameters\n ----------\n apply_func : callable layer function)\n layer or function applied to update node feature\n eps : float32, optional\n Initial `\\epsilon` value.\n train_eps : bool, optional\n If True, `\\epsilon` will be a learnable parameter.\n \"\"\"\n\n def __init__(self, apply_func=None, eps=0, train_eps=True):\n super(GINLayer, self).__init__()\n if train_eps:\n self.eps = torch.nn.Parameter(torch.FloatTensor([eps]))\n else:\n self.register_buffer(\"eps\", torch.FloatTensor([eps]))\n self.apply_func = apply_func\n\n def forward(self, graph, x):\n # edge_index, _ = remove_self_loops()\n # edge_weight = torch.ones(edge_index.shape[1]).to(x.device) if edge_weight is None else edge_weight\n # adj = torch.sparse_coo_tensor(edge_index, edge_weight, (x.shape[0], x.shape[0]))\n # adj = adj.to(x.device)\n # out = (1 + self.eps) * x + torch.spmm(adj, x)\n out = (1 + self.eps) * x + spmm(graph, x)\n if self.apply_func is not None:\n out = self.apply_func(out)\n return out\n\n\n@register_model(\"gin\")\nclass GIN(BaseModel):\n r\"\"\"Graph Isomorphism Network from paper `\"How Powerful are Graph\n Neural Networks?\" <https://arxiv.org/pdf/1810.00826.pdf>`__.\n\n Args:\n num_layers : int\n Number of GIN layers\n in_feats : int\n Size of each input sample\n out_feats : int\n Size of each output sample\n hidden_dim : int\n Size of each hidden layer dimension\n num_mlp_layers : int\n Number of MLP layers\n eps : float32, optional\n Initial `\\epsilon` value, default: ``0``\n pooling : str, optional\n Aggregator type to use, default: ``sum``\n train_eps : bool, optional\n If True, `\\epsilon` will be a learnable parameter, default: ``True``\n \"\"\"\n\n @staticmethod\n def add_args(parser):\n parser.add_argument(\"--epsilon\", type=float, default=0.0)\n parser.add_argument(\"--hidden-size\", type=int, default=32)\n parser.add_argument(\"--num-layers\", type=int, default=3)\n parser.add_argument(\"--num-mlp-layers\", type=int, default=2)\n parser.add_argument(\"--dropout\", type=float, default=0.5)\n parser.add_argument(\"--train-epsilon\", dest=\"train_epsilon\", action=\"store_false\")\n parser.add_argument(\"--pooling\", type=str, default=\"sum\")\n parser.add_argument(\"--batch-size\", type=int, default=128)\n parser.add_argument(\"--lr\", type=float, default=0.001)\n parser.add_argument(\"--train-ratio\", type=float, default=0.7)\n parser.add_argument(\"--test-ratio\", type=float, default=0.1)\n\n @classmethod\n def build_model_from_args(cls, args):\n return cls(\n args.num_layers,\n args.num_features,\n args.num_classes,\n args.hidden_size,\n args.num_mlp_layers,\n args.epsilon,\n args.pooling,\n args.train_epsilon,\n args.dropout,\n )\n\n @classmethod\n def split_dataset(cls, dataset, args):\n return split_dataset_general(dataset, args)\n\n def __init__(\n self,\n num_layers,\n in_feats,\n out_feats,\n hidden_dim,\n num_mlp_layers,\n eps=0,\n pooling=\"sum\",\n train_eps=False,\n dropout=0.5,\n ):\n super(GIN, self).__init__()\n self.gin_layers = nn.ModuleList()\n self.batch_norm = nn.ModuleList()\n self.num_layers = num_layers\n for i in range(num_layers - 1):\n if i == 0:\n mlp = MLP(in_feats, hidden_dim, hidden_dim, num_mlp_layers, norm=\"batchnorm\")\n else:\n mlp = MLP(hidden_dim, hidden_dim, hidden_dim, num_mlp_layers, norm=\"batchnorm\")\n self.gin_layers.append(GINLayer(mlp, eps, train_eps))\n self.batch_norm.append(nn.BatchNorm1d(hidden_dim))\n\n self.linear_prediction = nn.ModuleList()\n for i in range(self.num_layers):\n if i == 0:\n self.linear_prediction.append(nn.Linear(in_feats, out_feats))\n else:\n self.linear_prediction.append(nn.Linear(hidden_dim, out_feats))\n self.dropout = nn.Dropout(dropout)\n self.criterion = torch.nn.CrossEntropyLoss()\n\n def forward(self, batch):\n h = batch.x\n device = h.device\n batchsize = int(torch.max(batch.batch)) + 1\n\n layer_rep = [h]\n for i in range(self.num_layers - 1):\n h = self.gin_layers[i](batch, h)\n h = self.batch_norm[i](h)\n h = F.relu(h)\n layer_rep.append(h)\n\n final_score = 0\n\n for i in range(self.num_layers):\n # pooled = self.pooling(layer_rep[i], batch, dim=0)\n # pooled = scatter_add(layer_rep[i], batch.batch, dim=0)\n hsize = layer_rep[i].shape[1]\n output = torch.zeros(batchsize, layer_rep[i].shape[1]).to(device)\n pooled = output.scatter_add_(dim=0, index=batch.batch.view(-1, 1).repeat(1, hsize), src=layer_rep[i])\n final_score += self.dropout(self.linear_prediction[i](pooled))\n return final_score\n",
"import math\n\nfrom typing import Any\nimport torch\nimport torch.nn as nn\n\nfrom .. import BaseModel, register_model\nfrom cogdl.utils import get_activation, spmm\nfrom cogdl.trainers.ppr_trainer import PPRGoTrainer\n\n\nclass LinearLayer(nn.Module):\n def __init__(self, in_features, out_features, bias=True):\n super(LinearLayer, self).__init__()\n self.in_features = in_features\n self.out_features = out_features\n self.weight = nn.Parameter(torch.Tensor(out_features, in_features))\n if bias:\n self.bias = nn.Parameter(torch.Tensor(out_features))\n else:\n self.register_parameter(\"bias\", None)\n self.reset_parameters()\n\n def reset_parameters(self):\n nn.init.kaiming_uniform_(self.weight, mode=\"fan_out\", a=math.sqrt(5))\n if self.bias is not None:\n fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.weight)\n bound = 1 / nn.math.sqrt(fan_in)\n nn.init.uniform_(self.bias, -bound, bound)\n\n def forward(self, input):\n return torch.nn.functional.linear(input, self.weight, self.bias)\n\n\nclass PPRGoMLP(nn.Module):\n def __init__(self, in_feats, hidden_size, out_feats, num_layers, dropout, activation=\"relu\"):\n super(PPRGoMLP, self).__init__()\n self.dropout = dropout\n self.nlayers = num_layers\n shapes = [hidden_size] * (num_layers - 1) + [out_feats]\n self.layers = nn.ModuleList()\n self.layers.append(LinearLayer(in_feats, hidden_size, bias=False))\n for i in range(num_layers - 1):\n self.layers.append(nn.Linear(shapes[i], shapes[i + 1], bias=False))\n self.activation = get_activation(activation)\n\n def forward(self, x):\n h = x\n for i, layer in enumerate(self.layers):\n h = nn.functional.dropout(h, p=self.dropout, training=self.training)\n h = layer(h)\n if i != self.nlayers - 1:\n h = self.activation(h)\n return h\n\n\n@register_model(\"pprgo\")\nclass PPRGo(BaseModel):\n @staticmethod\n def add_args(parser):\n parser.add_argument(\"--hidden-size\", type=int, default=32)\n parser.add_argument(\"--num-layers\", type=int, default=2)\n parser.add_argument(\"--dropout\", type=float, default=0.1)\n parser.add_argument(\"--activation\", type=str, default=\"relu\")\n parser.add_argument(\"--nprop-inference\", type=int, default=2)\n\n parser.add_argument(\"--alpha\", type=float, default=0.5)\n parser.add_argument(\"--k\", type=int, default=32)\n parser.add_argument(\"--norm\", type=str, default=\"sym\")\n parser.add_argument(\"--eps\", type=float, default=1e-4)\n\n parser.add_argument(\"--eval-step\", type=int, default=4)\n parser.add_argument(\"--batch-size\", type=int, default=512)\n parser.add_argument(\"--test-batch-size\", type=int, default=10000)\n\n @classmethod\n def build_model_from_args(cls, args):\n return cls(\n in_feats=args.num_features,\n hidden_size=args.hidden_size,\n out_feats=args.num_classes,\n num_layers=args.num_layers,\n alpha=args.alpha,\n dropout=args.dropout,\n activation=args.activation,\n nprop=args.nprop_inference,\n )\n\n def __init__(self, in_feats, hidden_size, out_feats, num_layers, alpha, dropout, activation=\"relu\", nprop=2):\n super(PPRGo, self).__init__()\n self.alpha = alpha\n self.nprop = nprop\n self.fc = PPRGoMLP(in_feats, hidden_size, out_feats, num_layers, dropout, activation)\n\n def forward(self, x, targets, ppr_scores):\n h = self.fc(x)\n h = ppr_scores.unsqueeze(1) * h\n batch_size = targets[-1] + 1\n out = torch.zeros(batch_size, h.shape[1]).to(x.device).to(x.dtype)\n out = out.scatter_add_(dim=0, index=targets[:, None].repeat(1, h.shape[1]), src=h)\n return out\n\n def node_classification_loss(self, x, targets, ppr_scores, y):\n pred = self.forward(x, targets, ppr_scores)\n loss = self.loss_fn(pred, y)\n return loss\n\n def predict(self, graph, batch_size, norm):\n device = next(self.fc.parameters()).device\n x = graph.x\n num_nodes = x.shape[0]\n pred_logits = []\n with torch.no_grad():\n for i in range(0, num_nodes, batch_size):\n batch_x = x[i : i + batch_size].to(device)\n batch_logits = self.fc(batch_x)\n pred_logits.append(batch_logits.cpu())\n pred_logits = torch.cat(pred_logits, dim=0)\n\n with graph.local_graph():\n if norm == \"sym\":\n graph.sym_norm()\n elif norm == \"row\":\n graph.row_norm()\n else:\n raise NotImplementedError\n edge_weight = graph.edge_weight * (1 - self.alpha)\n\n graph.edge_weight = edge_weight\n predictions = pred_logits\n for _ in range(self.nprop):\n predictions = spmm(graph, predictions) + self.alpha * pred_logits\n return predictions\n\n @staticmethod\n def get_trainer(taskType: Any, args: Any):\n return PPRGoTrainer\n"
] |
[
[
"torch.nn.functional.softmax",
"torch.nn.functional.nll_loss",
"torch.cat",
"torch.nn.functional.dropout",
"torch.sum",
"torch.sparse.mm",
"torch.sparse_coo_tensor",
"torch.unique",
"torch.nn.init.calculate_gain",
"torch.eye",
"torch.nn.functional.relu",
"torch.nn.BatchNorm1d",
"torch.nn.init.constant_",
"torch.nn.ModuleList",
"torch.unsqueeze",
"torch.nn.Linear",
"torch.nn.functional.normalize",
"torch.nn.functional.log_softmax",
"torch.isnan",
"scipy.linalg.block_diag",
"torch.matmul",
"torch.distributions.Categorical"
],
[
"torch.nn.functional.normalize",
"torch.max",
"sklearn.cluster.KMeans",
"torch.tensor",
"sklearn.cluster.SpectralClustering",
"torch.cuda.is_available",
"sklearn.metrics.f1_score",
"scipy.optimize.linear_sum_assignment",
"numpy.zeros",
"sklearn.metrics.cluster.normalized_mutual_info_score"
],
[
"torch.nn.BatchNorm1d",
"torch.nn.Dropout",
"torch.nn.CrossEntropyLoss",
"torch.max",
"torch.zeros",
"torch.nn.ModuleList",
"torch.nn.Linear",
"torch.nn.functional.relu",
"torch.FloatTensor"
],
[
"torch.nn.init.uniform_",
"torch.Tensor",
"torch.cat",
"torch.nn.functional.dropout",
"torch.zeros",
"torch.nn.ModuleList",
"torch.nn.Linear",
"torch.no_grad",
"torch.nn.math.sqrt",
"torch.nn.init._calculate_fan_in_and_fan_out",
"torch.nn.functional.linear"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"0.14",
"0.15",
"0.12",
"0.10"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.6",
"1.4",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ashkan-software/captum
|
[
"b8b7d4b10a9646e4da827635d6947499fbde3326"
] |
[
"tests/attr/layer/test_grad_cam.py"
] |
[
"#!/usr/bin/env python3\n\nimport unittest\nfrom typing import List, Tuple, Union, Any\n\nimport torch\nfrom torch import Tensor\nfrom torch.nn import Module\nfrom captum.attr._core.layer.grad_cam import LayerGradCam\n\nfrom ..helpers.basic_models import BasicModel_MultiLayer, BasicModel_ConvNet_One_Conv\nfrom ..helpers.utils import assertTensorTuplesAlmostEqual, BaseTest\n\n\nclass Test(BaseTest):\n def test_simple_input_non_conv(self) -> None:\n net = BasicModel_MultiLayer()\n inp = torch.tensor([[0.0, 100.0, 0.0]], requires_grad=True)\n self._grad_cam_test_assert(net, net.linear0, inp, [400.0])\n\n def test_simple_multi_input_non_conv(self) -> None:\n net = BasicModel_MultiLayer(multi_input_module=True)\n inp = torch.tensor([[0.0, 6.0, 0.0]], requires_grad=True)\n self._grad_cam_test_assert(net, net.relu, inp, ([21.0], [21.0]))\n\n def test_simple_input_conv(self) -> None:\n net = BasicModel_ConvNet_One_Conv()\n inp = torch.arange(16).view(1, 1, 4, 4).float()\n self._grad_cam_test_assert(net, net.conv1, inp, [[11.25, 13.5], [20.25, 22.5]])\n\n def test_simple_input_conv_no_grad(self) -> None:\n net = BasicModel_ConvNet_One_Conv()\n\n # this way we deactivate require_grad. Some models explicitly\n # do that before interpreting the model.\n for param in net.parameters():\n param.requires_grad = False\n\n inp = torch.arange(16).view(1, 1, 4, 4).float()\n self._grad_cam_test_assert(net, net.conv1, inp, [[11.25, 13.5], [20.25, 22.5]])\n\n def test_simple_input_conv_relu(self) -> None:\n net = BasicModel_ConvNet_One_Conv()\n inp = torch.arange(16).view(1, 1, 4, 4).float()\n self._grad_cam_test_assert(net, net.relu1, inp, [[0.0, 4.0], [28.0, 32.5]])\n\n def test_simple_input_conv_without_final_relu(self) -> None:\n net = BasicModel_ConvNet_One_Conv()\n inp = torch.arange(16).view(1, 1, 4, 4).float()\n inp.requires_grad_()\n # Adding negative value to test final relu is not applied by default\n inp[0, 0, 1, 1] = -4.0\n self._grad_cam_test_assert(\n net, net.conv1, inp, (0.5625 * inp,), attribute_to_layer_input=True\n )\n\n def test_simple_input_conv_fc_with_final_relu(self) -> None:\n net = BasicModel_ConvNet_One_Conv()\n inp = torch.arange(16).view(1, 1, 4, 4).float()\n inp.requires_grad_()\n # Adding negative value to test final relu is applied\n inp[0, 0, 1, 1] = -4.0\n exp = 0.5625 * inp\n exp[0, 0, 1, 1] = 0.0\n self._grad_cam_test_assert(\n net,\n net.conv1,\n inp,\n (exp,),\n attribute_to_layer_input=True,\n relu_attributions=True,\n )\n\n def test_simple_multi_input_conv(self) -> None:\n net = BasicModel_ConvNet_One_Conv()\n inp = torch.arange(16).view(1, 1, 4, 4).float()\n inp2 = torch.ones((1, 1, 4, 4))\n self._grad_cam_test_assert(\n net, net.conv1, (inp, inp2), [[14.5, 19.0], [32.5, 37.0]]\n )\n\n def _grad_cam_test_assert(\n self,\n model: Module,\n target_layer: Module,\n test_input: Union[Tensor, Tuple[Tensor, ...]],\n expected_activation: Union[\n List[float], Tuple[List[float], ...], List[List[float]], Tuple[Tensor, ...]\n ],\n additional_input: Any = None,\n attribute_to_layer_input: bool = False,\n relu_attributions: bool = False,\n ):\n layer_gc = LayerGradCam(model, target_layer)\n attributions = layer_gc.attribute(\n test_input,\n target=0,\n additional_forward_args=additional_input,\n attribute_to_layer_input=attribute_to_layer_input,\n relu_attributions=relu_attributions,\n )\n assertTensorTuplesAlmostEqual(\n self, attributions, expected_activation, delta=0.01\n )\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] |
[
[
"torch.ones",
"torch.arange",
"torch.tensor"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
davide97l/DI-engine
|
[
"268d77db3cb54401b2cfc83e2bc3ec87c31e7b83",
"268d77db3cb54401b2cfc83e2bc3ec87c31e7b83"
] |
[
"ding/framework/wrapper/step_timer.py",
"ding/policy/sql.py"
] |
[
"from collections import deque, defaultdict\nfrom functools import wraps\nfrom types import GeneratorType\nfrom typing import Callable\nimport numpy as np\nimport time\nfrom ditk import logging\n\n\nclass StepTimer:\n\n def __init__(self, print_per_step: int = 1, smooth_window: int = 10) -> None:\n self.print_per_step = print_per_step\n self.records = defaultdict(lambda: deque(maxlen=print_per_step * smooth_window))\n\n def __call__(self, fn: Callable) -> Callable:\n step_name = getattr(fn, \"__name__\", type(fn).__name__)\n\n @wraps(fn)\n def executor(ctx):\n start_time = time.time()\n time_cost = 0\n g = fn(ctx)\n if isinstance(g, GeneratorType):\n try:\n next(g)\n except StopIteration:\n pass\n time_cost = time.time() - start_time\n yield\n start_time = time.time()\n try:\n next(g)\n except StopIteration:\n pass\n time_cost += time.time() - start_time\n else:\n time_cost = time.time() - start_time\n self.records[step_name].append(time_cost * 1000)\n if ctx.total_step % self.print_per_step == 0:\n logging.info(\n \"[Step Timer] {}: Cost: {:.2f}ms, Mean: {:.2f}ms\".format(\n step_name, time_cost * 1000, np.mean(self.records[step_name])\n )\n )\n\n return executor\n",
"from typing import List, Dict, Any, Tuple, Union, Optional\nfrom collections import namedtuple, deque\nimport copy\nimport torch\nfrom torch.distributions import Categorical\nfrom ditk import logging\nfrom easydict import EasyDict\nfrom ding.torch_utils import Adam, to_device\nfrom ding.utils.data import default_collate, default_decollate\nfrom ding.rl_utils import q_nstep_td_data, q_nstep_sql_td_error, get_nstep_return_data, get_train_sample\nfrom ding.model import model_wrap\nfrom ding.utils import POLICY_REGISTRY\nfrom .base_policy import Policy\nfrom .common_utils import default_preprocess_learn\n\n\n@POLICY_REGISTRY.register('sql')\nclass SQLPolicy(Policy):\n r\"\"\"\n Overview:\n Policy class of SQL algorithm.\n \"\"\"\n\n config = dict(\n # (str) RL policy register name (refer to function \"POLICY_REGISTRY\").\n type='sql',\n # (bool) Whether to use cuda for network.\n cuda=False,\n # (bool) Whether the RL algorithm is on-policy or off-policy.\n on_policy=False,\n # (bool) Whether use priority(priority sample, IS weight, update priority)\n priority=False,\n # (float) Reward's future discount factor, aka. gamma.\n discount_factor=0.97,\n # (int) N-step reward for target q_value estimation\n nstep=1,\n learn=dict(\n # (bool) Whether to use multi gpu\n multi_gpu=False,\n # How many updates(iterations) to train after collector's one collection.\n # Bigger \"update_per_collect\" means bigger off-policy.\n # collect data -> update policy-> collect data -> ...\n update_per_collect=3, # after the batch data come into the learner, train with the data for 3 times\n batch_size=64,\n learning_rate=0.001,\n # ==============================================================\n # The following configs are algorithm-specific\n # ==============================================================\n # (int) Frequence of target network update.\n target_update_freq=100,\n # (bool) Whether ignore done(usually for max step termination env)\n ignore_done=False,\n alpha=0.1,\n ),\n # collect_mode config\n collect=dict(\n # (int) Only one of [n_sample, n_episode] shoule be set\n # n_sample=8, # collect 8 samples and put them in collector\n # (int) Cut trajectories into pieces with length \"unroll_len\".\n unroll_len=1,\n ),\n eval=dict(),\n # other config\n other=dict(\n # Epsilon greedy with decay.\n eps=dict(\n # (str) Decay type. Support ['exp', 'linear'].\n type='exp',\n start=0.95,\n end=0.1,\n # (int) Decay length(env step)\n decay=10000,\n ),\n replay_buffer=dict(replay_buffer_size=10000, )\n ),\n )\n\n def _init_learn(self) -> None:\n r\"\"\"\n Overview:\n Learn mode init method. Called by ``self.__init__``.\n Init the optimizer, algorithm config, main and target models.\n \"\"\"\n self._priority = self._cfg.priority\n # Optimizer\n self._optimizer = Adam(self._model.parameters(), lr=self._cfg.learn.learning_rate)\n self._gamma = self._cfg.discount_factor\n self._nstep = self._cfg.nstep\n self._alpha = self._cfg.learn.alpha\n # use wrapper instead of plugin\n self._target_model = copy.deepcopy(self._model)\n self._target_model = model_wrap(\n self._target_model,\n wrapper_name='target',\n update_type='assign',\n update_kwargs={'freq': self._cfg.learn.target_update_freq}\n )\n self._learn_model = model_wrap(self._model, wrapper_name='argmax_sample')\n self._learn_model.reset()\n self._target_model.reset()\n\n def _forward_learn(self, data: dict) -> Dict[str, Any]:\n r\"\"\"\n Overview:\n Forward and backward function of learn mode.\n Arguments:\n - data (:obj:`dict`): Dict type data, including at least ['obs', 'action', 'reward', 'next_obs']\n Returns:\n - info_dict (:obj:`Dict[str, Any]`): Including current lr and loss.\n \"\"\"\n data = default_preprocess_learn(\n data, use_priority=self._priority, ignore_done=self._cfg.learn.ignore_done, use_nstep=True\n )\n if self._cuda:\n data = to_device(data, self._device)\n # ====================\n # Q-learning forward\n # ====================\n self._learn_model.train()\n self._target_model.train()\n # Current q value (main model)\n q_value = self._learn_model.forward(data['obs'])['logit']\n with torch.no_grad():\n # Target q value\n target_q_value = self._target_model.forward(data['next_obs'])['logit']\n # Max q value action (main model)\n target_q_action = self._learn_model.forward(data['next_obs'])['action']\n\n data_n = q_nstep_td_data(\n q_value, target_q_value, data['action'], target_q_action, data['reward'], data['done'], data['weight']\n )\n value_gamma = data.get('value_gamma')\n loss, td_error_per_sample, record_target_v = q_nstep_sql_td_error(\n data_n, self._gamma, self._cfg.learn.alpha, nstep=self._nstep, value_gamma=value_gamma\n )\n record_target_v = record_target_v.mean()\n # ====================\n # Q-learning update\n # ====================\n self._optimizer.zero_grad()\n loss.backward()\n if self._cfg.learn.multi_gpu:\n self.sync_gradients(self._learn_model)\n self._optimizer.step()\n\n # =============\n # after update\n # =============\n self._target_model.update(self._learn_model.state_dict())\n return {\n 'cur_lr': self._optimizer.defaults['lr'],\n 'total_loss': loss.item(),\n 'priority': td_error_per_sample.abs().tolist(),\n 'record_value_function': record_target_v\n # Only discrete action satisfying len(data['action'])==1 can return this and draw histogram on tensorboard.\n # '[histogram]action_distribution': data['action'],\n }\n\n def _state_dict_learn(self) -> Dict[str, Any]:\n return {\n 'model': self._learn_model.state_dict(),\n 'target_model': self._target_model.state_dict(),\n 'optimizer': self._optimizer.state_dict(),\n }\n\n def _load_state_dict_learn(self, state_dict: Dict[str, Any]) -> None:\n self._learn_model.load_state_dict(state_dict['model'])\n self._target_model.load_state_dict(state_dict['target_model'])\n self._optimizer.load_state_dict(state_dict['optimizer'])\n\n def _init_collect(self) -> None:\n r\"\"\"\n Overview:\n Collect mode init method. Called by ``self.__init__``.\n Init traj and unroll length, collect model.\n Enable the eps_greedy_sample\n \"\"\"\n self._unroll_len = self._cfg.collect.unroll_len\n self._gamma = self._cfg.discount_factor # necessary for parallel\n self._nstep = self._cfg.nstep # necessary for parallel\n self._collect_model = model_wrap(self._model, wrapper_name='eps_greedy_multinomial_sample')\n self._collect_model.reset()\n\n def _forward_collect(self, data: Dict[int, Any], eps: float) -> Dict[int, Any]:\n r\"\"\"\n Overview:\n Forward function for collect mode with eps_greedy\n Arguments:\n - data (:obj:`Dict[str, Any]`): Dict type data, stacked env data for predicting policy_output(action), \\\n values are torch.Tensor or np.ndarray or dict/list combinations, keys are env_id indicated by integer.\n - eps (:obj:`float`): epsilon value for exploration, which is decayed by collected env step.\n Returns:\n - output (:obj:`Dict[int, Any]`): Dict type data, including at least inferred action according to input obs.\n ReturnsKeys\n - necessary: ``action``\n \"\"\"\n data_id = list(data.keys())\n data = default_collate(list(data.values()))\n if self._cuda:\n data = to_device(data, self._device)\n self._collect_model.eval()\n with torch.no_grad():\n output = self._collect_model.forward(data, eps=eps, alpha=self._cfg.learn.alpha)\n if self._cuda:\n output = to_device(output, 'cpu')\n output = default_decollate(output)\n return {i: d for i, d in zip(data_id, output)}\n\n def _get_train_sample(self, data: List[Dict[str, Any]]) -> List[Dict[str, Any]]:\n \"\"\"\n Overview:\n For a given trajectory(transitions, a list of transition) data, process it into a list of sample that \\\n can be used for training directly. A train sample can be a processed transition(DQN with nstep TD) \\\n or some continuous transitions(DRQN).\n Arguments:\n - data (:obj:`List[Dict[str, Any]`): The trajectory data(a list of transition), each element is the same \\\n format as the return value of ``self._process_transition`` method.\n Returns:\n - samples (:obj:`dict`): The list of training samples.\n\n .. note::\n We will vectorize ``process_transition`` and ``get_train_sample`` method in the following release version. \\\n And the user can customize the this data processing procecure by overriding this two methods and collector \\\n itself.\n \"\"\"\n data = get_nstep_return_data(data, self._nstep, gamma=self._gamma)\n return get_train_sample(data, self._unroll_len)\n\n def _process_transition(self, obs: Any, model_output: dict, timestep: namedtuple) -> dict:\n r\"\"\"\n Overview:\n Generate dict type transition data from inputs.\n Arguments:\n - obs (:obj:`Any`): Env observation\n - model_output (:obj:`dict`): Output of collect model, including at least ['action']\n - timestep (:obj:`namedtuple`): Output after env step, including at least ['obs', 'reward', 'done'] \\\n (here 'obs' indicates obs after env step).\n Returns:\n - transition (:obj:`dict`): Dict type transition data.\n \"\"\"\n transition = {\n 'obs': obs,\n 'next_obs': timestep.obs,\n 'action': model_output['action'],\n 'reward': timestep.reward,\n 'done': timestep.done,\n }\n return transition\n\n def _init_eval(self) -> None:\n r\"\"\"\n Overview:\n Evaluate mode init method. Called by ``self.__init__``.\n Init eval model with argmax strategy.\n \"\"\"\n self._eval_model = model_wrap(self._model, wrapper_name='argmax_sample')\n self._eval_model.reset()\n\n def _forward_eval(self, data: dict) -> dict:\n r\"\"\"\n Overview:\n Forward function of eval mode, similar to ``self._forward_collect``.\n Arguments:\n - data (:obj:`Dict[str, Any]`): Dict type data, stacked env data for predicting policy_output(action), \\\n values are torch.Tensor or np.ndarray or dict/list combinations, keys are env_id indicated by integer.\n Returns:\n - output (:obj:`Dict[int, Any]`): The dict of predicting action for the interaction with env.\n ReturnsKeys\n - necessary: ``action``\n \"\"\"\n data_id = list(data.keys())\n data = default_collate(list(data.values()))\n if self._cuda:\n data = to_device(data, self._device)\n self._eval_model.eval()\n with torch.no_grad():\n output = self._eval_model.forward(data)\n if self._cuda:\n output = to_device(output, 'cpu')\n output = default_decollate(output)\n return {i: d for i, d in zip(data_id, output)}\n\n def default_model(self) -> Tuple[str, List[str]]:\n \"\"\"\n Overview:\n Return this algorithm default model setting for demonstration.\n Returns:\n - model_info (:obj:`Tuple[str, List[str]]`): model name and mode import_names\n\n .. note::\n The user can define and use customized network model but must obey the same inferface definition indicated \\\n by import_names path. For DQN, ``ding.model.template.q_learning.DQN``\n \"\"\"\n return 'dqn', ['ding.model.template.q_learning']\n\n def _monitor_vars_learn(self) -> List[str]:\n return super()._monitor_vars_learn() + ['record_value_function']\n"
] |
[
[
"numpy.mean"
],
[
"torch.no_grad"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
qilutong/pyutil
|
[
"e1119d2b8251e14acde65f5e0134366bc6209329"
] |
[
"pyutil/classes/dataset.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\n@FileName : dataset.py\n@Description : 数据集类,仿照Tensorflow功能编写\n@Author : 齐鲁桐\n@Email : [email protected]\n@Time : 2019-05-28 15:11\n@Modify : None\n\"\"\"\nfrom __future__ import absolute_import, division, print_function\n\nimport numpy as np\n\nimport fairy.tail as ft\n\nfrom .ringlist import RingList\n\n\nclass DataSet(object):\n \"\"\"\n DataSet类,可用来对数据集进行处理和迭代\n\n Attributes:\n data_num: 数据个数,共输入了几种数据(x:1种,(x,y):2种)\n data_type: 输入数据的类型\n ring: 循环列表类\n counter: 循环计数器,表示已经循环了几次\n batch_list: batch数据缓存区\n batch_size: batch大小,默认为1\n epochs: 数据循环次数,默认为1\n \"\"\"\n\n def __init__(self, data, batch_size=1, epochs=1, shuffle=False):\n \"\"\"\n 初始化\n :param data: 数组 x_data\n 元组 (x_data, y_data,...)\n 字典 {\"x\":x_data, \"y\":y_data,...}\n \"\"\"\n if isinstance(data, tuple):\n self.data_num = len(data)\n else:\n self.data_num = 1\n\n if isinstance(data, dict):\n self.data_type = \"dict\"\n else:\n self.data_type = None\n\n self.ring = RingList(ft.data_zip(data))\n\n self.counter = self.ring.get_counter()\n\n self.batch_list = []\n\n self.batch_size = batch_size\n self.epochs = epochs\n self.shuffle_flag = shuffle\n\n def shuffle(self, flag=True):\n \"\"\"\n 对数据进行shuffle处理\n :return:self\n \"\"\"\n if flag:\n self.shuffle_flag = True\n np.random.shuffle(self.ring.data)\n else:\n self.shuffle_flag = False\n return self\n\n def map(self, func):\n \"\"\"\n 对数据进行map处理\n :param func:处理函数,输入输出数据格式应该一致,如:add1(data)->data+1\n :return:self\n \"\"\"\n \"\"\"\"\"\"\n self.ring.data = [i for i in map(func, self.ring.data)]\n return self\n\n def batch(self, batch_size=1):\n \"\"\"\n 设置 batch_size 大小\n :param batch_size:批次大小,默认为1\n :return:self\n \"\"\"\n self.batch_size = batch_size\n return self\n\n def repeat(self, epochs=None):\n \"\"\"\n 设置数据重复次数,默认None无限循环\n :param epochs:重复次数\n :return: self\n \"\"\"\n if epochs is None:\n self.epochs = -1\n else:\n self.epochs = epochs\n return self\n\n def _get_mini_batch(self):\n \"\"\"\n 获取一个 mini batch,暂存到self.batch_list\n \"\"\"\n # 暂存batch_size和counter\n batch_size = self.batch_size\n counter = self.counter\n\n if self.epochs == self.counter:\n raise StopIteration(\"Data has been iteratively completed\")\n\n # 清空list\n self.batch_list.clear()\n # 最后一次epoch并且剩余不足batch_size时,将batch_size的值设为剩余元素数量\n if ((self.epochs - self.counter) == 1) and (\n (self.ring.length - self.ring.index) // self.batch_size == 0):\n batch_size = (self.ring.length - self.ring.index) % self.batch_size\n\n for _ in range(batch_size):\n # 每个epoch混洗一次\n if (counter != self.counter) and self.shuffle_flag:\n np.random.shuffle(self.ring.data)\n counter = self.counter\n\n data_np = self.ring.next()\n self.counter = self.ring.get_counter() # 更新self.counter\n\n self.batch_list.append(data_np)\n\n def get_next(self):\n \"\"\"\n 获取一个 mini batch\n :return: numpy数组或形似(x,y)的元组\n \"\"\"\n # 判断输入的数据种类,返回numpy数组或形似(x,y)的元组\n if self.data_type == \"dict\":\n return self._get_next_dict()\n elif self.data_num == 1:\n return self._get_next_x()\n else:\n return self._get_next_mul()\n\n def _get_next_x(self):\n \"\"\"\n 在输入数据只有x时,获取一个 mini batch\n :return: numpy数组\n \"\"\"\n self._get_mini_batch()\n\n return np.array(self.batch_list)\n\n def _get_next_mul(self):\n \"\"\"\n 在输入数据只有多个时,获取一个 mini batch\n :return: 形似(x,y)的元组\n \"\"\"\n self._get_mini_batch()\n\n data = ft.data_zip(tuple(self.batch_list))\n data = map(lambda x: np.array(x), data) # 修改为numpy数组\n\n return tuple(data)\n\n def _get_next_dict(self):\n \"\"\"\n 在输入数据只有x时,获取一个 mini batch\n :return: numpy数组\n \"\"\"\n self._get_mini_batch()\n\n data = ft.merge_key(self.batch_list)\n print(\"data is :\", data)\n for key in data.keys():\n data[key] = np.array(data[key]) # 修改为numpy数组\n\n return data\n\n def make_iterator(self):\n \"\"\"\n 生成迭代器,迭代设置好的循环\n :return: 生成器\n \"\"\"\n if self.epochs == self.counter:\n raise StopIteration(\"Data has been iteratively completed\")\n\n if self.data_type == \"dict\":\n while 1:\n yield self._get_next_dict()\n elif self.data_num == 1:\n while 1:\n yield self._get_next_x()\n else:\n while 1:\n yield self._get_next_mul()\n\n\nclass DataIter(object):\n def __init__(self):\n pass\n\n def get_next(self):\n pass\n"
] |
[
[
"numpy.array",
"numpy.random.shuffle"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
tomassosorio/audio
|
[
"0f8fa5f82af47543a68f1d3fb8921f8f9b6b15f8"
] |
[
"torchaudio/datasets/utils.py"
] |
[
"import csv\nimport errno\nimport hashlib\nimport logging\nimport os\nimport sys\nimport tarfile\nimport threading\nimport zipfile\nfrom queue import Queue\n\nimport torch\nimport urllib\nfrom torch.utils.data import Dataset\nfrom torch.utils.model_zoo import tqdm\n\n\ndef unicode_csv_reader(unicode_csv_data, **kwargs):\n r\"\"\"Since the standard csv library does not handle unicode in Python 2, we need a wrapper.\n Borrowed and slightly modified from the Python docs:\n https://docs.python.org/2/library/csv.html#csv-examples\n Arguments:\n unicode_csv_data: unicode csv data (see example below)\n Examples:\n >>> from torchaudio.datasets.utils import unicode_csv_reader\n >>> import io\n >>> with io.open(data_path, encoding=\"utf8\") as f:\n >>> reader = unicode_csv_reader(f)\n \"\"\"\n\n # Fix field larger than field limit error\n maxInt = sys.maxsize\n while True:\n # decrease the maxInt value by factor 10\n # as long as the OverflowError occurs.\n try:\n csv.field_size_limit(maxInt)\n break\n except OverflowError:\n maxInt = int(maxInt / 10)\n csv.field_size_limit(maxInt)\n\n for line in csv.reader(unicode_csv_data, **kwargs):\n yield line\n\n\ndef makedir_exist_ok(dirpath):\n \"\"\"\n Python2 support for os.makedirs(.., exist_ok=True)\n \"\"\"\n try:\n os.makedirs(dirpath)\n except OSError as e:\n if e.errno == errno.EEXIST:\n pass\n else:\n raise\n\n\ndef stream_url(url, start_byte=None, block_size=32 * 1024, progress_bar=True):\n \"\"\"Stream url by chunk\n\n Args:\n url (str): Url.\n start_byte (Optional[int]): Start streaming at that point.\n block_size (int): Size of chunks to stream.\n progress_bar (bool): Display a progress bar.\n \"\"\"\n\n # If we already have the whole file, there is no need to download it again\n req = urllib.request.Request(url, method=\"HEAD\")\n url_size = int(urllib.request.urlopen(req).info().get(\"Content-Length\", -1))\n if url_size == start_byte:\n return\n\n req = urllib.request.Request(url)\n if start_byte:\n req.headers[\"Range\"] = \"bytes={}-\".format(start_byte)\n\n with urllib.request.urlopen(req) as upointer, tqdm(\n unit=\"B\",\n unit_scale=True,\n unit_divisor=1024,\n total=url_size,\n disable=not progress_bar,\n ) as pbar:\n\n num_bytes = 0\n while True:\n chunk = upointer.read(block_size)\n if not chunk:\n break\n yield chunk\n num_bytes += len(chunk)\n pbar.update(len(chunk))\n\n\ndef download_url(\n url,\n download_folder,\n filename=None,\n hash_value=None,\n hash_type=\"sha256\",\n progress_bar=True,\n resume=False,\n):\n \"\"\"Download file to disk.\n\n Args:\n url (str): Url.\n download_folder (str): Folder to download file.\n filename (str): Name of downloaded file. If None, it is inferred from the url.\n hash_value (str): Hash for url.\n hash_type (str): Hash type, among \"sha256\" and \"md5\".\n progress_bar (bool): Display a progress bar.\n resume (bool): Enable resuming download.\n \"\"\"\n\n req = urllib.request.Request(url, method=\"HEAD\")\n req_info = urllib.request.urlopen(req).info()\n\n # Detect filename\n filename = filename or req_info.get_filename() or os.path.basename(url)\n filepath = os.path.join(download_folder, filename)\n\n if resume and os.path.exists(filepath):\n mode = \"ab\"\n local_size = os.path.getsize(filepath)\n elif not resume and os.path.exists(filepath):\n raise RuntimeError(\n \"{} already exists. Delete the file manually and retry.\".format(filepath)\n )\n else:\n mode = \"wb\"\n local_size = None\n\n if hash_value and local_size == int(req_info.get(\"Content-Length\", -1)):\n with open(filepath, \"rb\") as file_obj:\n if validate_file(file_obj, hash_value, hash_type):\n return\n raise RuntimeError(\n \"The hash of {} does not match. Delete the file manually and retry.\".format(\n filepath\n )\n )\n\n with open(filepath, mode) as fpointer:\n for chunk in stream_url(url, start_byte=local_size, progress_bar=progress_bar):\n fpointer.write(chunk)\n\n with open(filepath, \"rb\") as file_obj:\n if hash_value and not validate_file(file_obj, hash_value, hash_type):\n raise RuntimeError(\n \"The hash of {} does not match. Delete the file manually and retry.\".format(\n filepath\n )\n )\n\n\ndef validate_file(file_obj, hash_value, hash_type=\"sha256\"):\n \"\"\"Validate a given file object with its hash.\n\n Args:\n file_obj: File object to read from.\n hash_value (str): Hash for url.\n hash_type (str): Hash type, among \"sha256\" and \"md5\".\n \"\"\"\n\n if hash_type == \"sha256\":\n hash_func = hashlib.sha256()\n elif hash_type == \"md5\":\n hash_func = hashlib.md5()\n else:\n raise ValueError\n\n while True:\n # Read by chunk to avoid filling memory\n chunk = file_obj.read(1024 ** 2)\n if not chunk:\n break\n hash_func.update(chunk)\n\n return hash_func.hexdigest() == hash_value\n\n\ndef extract_archive(from_path, to_path=None, overwrite=False):\n \"\"\"Extract archive.\n Arguments:\n from_path: the path of the archive.\n to_path: the root path of the extraced files (directory of from_path)\n overwrite: overwrite existing files (False)\n Returns:\n List of paths to extracted files even if not overwritten.\n Examples:\n >>> url = 'http://www.quest.dcs.shef.ac.uk/wmt16_files_mmt/validation.tar.gz'\n >>> from_path = './validation.tar.gz'\n >>> to_path = './'\n >>> torchaudio.datasets.utils.download_from_url(url, from_path)\n >>> torchaudio.datasets.utils.extract_archive(from_path, to_path)\n \"\"\"\n\n if to_path is None:\n to_path = os.path.dirname(from_path)\n\n try:\n with tarfile.open(from_path, \"r\") as tar:\n logging.info(\"Opened tar file {}.\".format(from_path))\n files = []\n for file_ in tar:\n file_path = os.path.join(to_path, file_.name)\n if file_.isfile():\n files.append(file_path)\n if os.path.exists(file_path):\n logging.info(\"{} already extracted.\".format(file_path))\n if not overwrite:\n continue\n tar.extract(file_, to_path)\n return files\n except tarfile.ReadError:\n pass\n\n try:\n with zipfile.ZipFile(from_path, \"r\") as zfile:\n logging.info(\"Opened zip file {}.\".format(from_path))\n files = zfile.namelist()\n for file_ in files:\n file_path = os.path.join(to_path, file_)\n if os.path.exists(file_path):\n logging.info(\"{} already extracted.\".format(file_path))\n if not overwrite:\n continue\n zfile.extract(file_, to_path)\n return files\n except zipfile.BadZipFile:\n pass\n\n raise NotImplementedError(\"We currently only support tar.gz, tgz, and zip achives.\")\n\n\ndef walk_files(root, suffix, prefix=False, remove_suffix=False):\n \"\"\"List recursively all files ending with a suffix at a given root\n Args:\n root (str): Path to directory whose folders need to be listed\n suffix (str or tuple): Suffix of the files to match, e.g. '.png' or ('.jpg', '.png').\n It uses the Python \"str.endswith\" method and is passed directly\n prefix (bool, optional): If true, prepends the full path to each result, otherwise\n only returns the name of the files found (Default: ``False``)\n remove_suffix (bool, optional): If true, removes the suffix to each result defined in suffix,\n otherwise will return the result as found (Default: ``False``).\n \"\"\"\n\n root = os.path.expanduser(root)\n\n for dirpath, _, files in os.walk(root):\n for f in files:\n if f.endswith(suffix):\n\n if remove_suffix:\n f = f[: -len(suffix)]\n\n if prefix:\n f = os.path.join(dirpath, f)\n\n yield f\n\n\nclass _DiskCache(Dataset):\n \"\"\"\n Wrap a dataset so that, whenever a new item is returned, it is saved to disk.\n \"\"\"\n\n def __init__(self, dataset, location=\".cached\"):\n self.dataset = dataset\n self.location = location\n\n self._id = id(self)\n self._cache = [None] * len(dataset)\n\n def __getitem__(self, n):\n if self._cache[n]:\n f = self._cache[n]\n return torch.load(f)\n\n f = str(self._id) + \"-\" + str(n)\n f = os.path.join(self.location, f)\n item = self.dataset[n]\n\n self._cache[n] = f\n makedir_exist_ok(self.location)\n torch.save(item, f)\n\n return item\n\n def __len__(self):\n return len(self.dataset)\n\n\ndef diskcache_iterator(dataset, location=\".cached\"):\n return _DiskCache(dataset, location)\n\n\nclass _ThreadedIterator(threading.Thread):\n \"\"\"\n Prefetch the next queue_length items from iterator in a background thread.\n\n Example:\n >> for i in bg_iterator(range(10)):\n >> print(i)\n \"\"\"\n\n class _End:\n pass\n\n def __init__(self, generator, maxsize):\n threading.Thread.__init__(self)\n self.queue = Queue(maxsize)\n self.generator = generator\n self.daemon = True\n self.start()\n\n def run(self):\n for item in self.generator:\n self.queue.put(item)\n self.queue.put(self._End)\n\n def __iter__(self):\n return self\n\n def __next__(self):\n next_item = self.queue.get()\n if next_item == self._End:\n raise StopIteration\n return next_item\n\n # Required for Python 2.7 compatibility\n def next(self):\n return self.__next__()\n\n\ndef bg_iterator(iterable, maxsize):\n return _ThreadedIterator(iterable, maxsize=maxsize)\n"
] |
[
[
"torch.load",
"torch.utils.model_zoo.tqdm",
"torch.save"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
edmundsj/sugarplot
|
[
"4e25eebfc6045482b0a3add978a1047d8fc696db"
] |
[
"sugarplot/source/plotters.py"
] |
[
"\"\"\"\nContains plotters for various types of datasets which require special plotting requirements.\n\"\"\"\nfrom matplotlib.figure import Figure\nimport sys, pathlib\nfrom sugarplot import normalize_pandas, prettifyPlot, ureg, plt, weibull, fit_weibull, \\\n fit_lia, fit_impedance, cmap\nfrom sciparse import to_standard_quantity, title_to_quantity, column_from_unit, cname_from_unit\nfrom scipy.optimize import curve_fit\nimport pandas as pd\nimport numpy as np\nfrom warnings import warn\n\ndef default_plotter(data, y_data=None, fig=None, ax=None,\n theory_func=None, theory_args=(), theory_kw={},\n theory_data=None, theory_y_data=None, fit_func=None,\n line_kw={}, subplot_kw={}, plot_type='line',\n theory_name='Theory', **kwargs):\n \"\"\"\n Default plotter which handles plotting pandas DataFrames, numpy arrays, and regular ol data.\n\n :param data: pandas DataFrame or array-like xdata\n :param y_data: array-like ydata\n :param theory_func: Function to plot along with xdata\n :param theory_kw: Keyword arguments to pass into theory_func\n :param theory_data: Theoretical data with same x/y axes as data\n :param line_kw: Keyword arguments to pass into ax.plot() function\n :param subplot_kw: Keyword arguments to pass into fig.subplots() function\n :param plot_type: Type of plot to generate. Current options are \"line\" and \"scatter\". Default is \"line\".\n \"\"\"\n if isinstance(data, pd.DataFrame):\n return _default_plot_pandas(data, fig=fig, ax=ax,\n theory_func=theory_func, theory_kw=theory_kw,\n theory_args=theory_args,\n theory_name=theory_name,\n theory_data=theory_data,\n subplot_kw=subplot_kw, line_kw=line_kw,\n plot_type=plot_type,\n fit_func=fit_func)\n elif isinstance(data, np.ndarray):\n return _default_plot_numpy(data, y_data, fig=fig, ax=ax,\n theory_func=theory_func, theory_kw=theory_kw,\n theory_args=theory_args,\n theory_name=theory_name,\n theory_x_data=theory_data,\n theory_y_data=theory_y_data,\n subplot_kw=subplot_kw, line_kw=line_kw,\n plot_type=plot_type,\n fit_func=fit_func)\n else:\n raise ValueError(f'Plot not implemented for type {type(data)}. Only pandas.DataFrame is supported')\n\ndef _default_plot_pandas(data, theory_data=None,\n subplot_kw={}, **kwargs):\n \"\"\"\n Plots a pandas DataFrame, assuming the xdata is located in the first column and the ydata is located in the second column. Not to be called directly.\n\n :param data: DataFrame to be plotted.\n :param fig: Figure to plot the data to\n :param ax: axes to plot the data to\n :param theory_func: Function to plot along with xdata, of the form theory_func(xdata, theory_kw)\n :param theory_kw: Keyword arguments to be passed into theory_func\n :param subplot_kw: Keyword arguments to be passed into fig.subplots()\n \"\"\"\n if 'xlabel' not in subplot_kw.keys():\n subplot_kw = dict(subplot_kw, xlabel=data.columns[0])\n if 'ylabel' not in subplot_kw.keys():\n subplot_kw = dict(subplot_kw, ylabel=data.columns[1])\n\n if isinstance(theory_data, pd.DataFrame):\n theory_x_data = theory_data.iloc[:,0].values\n theory_y_data = theory_data.iloc[:,1].values\n else:\n theory_x_data = None\n theory_y_data = None\n\n x_data = data.iloc[:, 0].values\n y_data = data.iloc[:, 1].values\n\n fig, ax = _default_plot_numpy(\n x_data, y_data,\n theory_x_data=theory_x_data,\n theory_y_data=theory_y_data,\n subplot_kw=subplot_kw,\n **kwargs)\n\n return fig, ax\n\ndef _default_plot_numpy(x_data, y_data, fig=None, ax=None,\n theory_func=None, theory_args=(), theory_kw={},\n theory_x_data=None, theory_y_data=None,\n subplot_kw={}, line_kw={}, theory_name='Theory',\n fit_func=None,\n plot_type='line'):\n\n if fig is None:\n fig = Figure()\n if ax is None:\n if len(fig.axes) == 0:\n ax = fig.subplots(subplot_kw=subplot_kw)\n elif len(fig.axes) >= 1:\n ax = fig.axes[0]\n if 'xlabel' in subplot_kw:\n if subplot_kw['xlabel'] == ax.get_xlabel() and \\\n subplot_kw['ylabel'] != ax.get_ylabel():\n ax = ax.twinx()\n twinned=True\n if 'xlabel' in subplot_kw:\n ax.set_xlabel(subplot_kw['xlabel'])\n if 'ylabel' in subplot_kw:\n ax.set_ylabel(subplot_kw['ylabel'])\n if 'xscale' in subplot_kw:\n ax.set_xscale(subplot_kw['xscale'])\n if 'yscale' in subplot_kw:\n ax.set_xscale(subplot_kw['yscale'])\n line_kw = dict(color=cmap(1), **line_kw)\n\n\n if plot_type == 'line':\n ax.plot(x_data, y_data, **line_kw)\n elif plot_type == 'scatter':\n ax.scatter(x_data, y_data, **line_kw)\n else:\n raise ValueError(f'Plot type {plot_type} is unavailable. Only \"line\" and \"scatter\" are implemented')\n\n if fit_func is not None:\n theory_args, pcov = curve_fit(fit_func, x_data, y_data)\n theory_func = fit_func\n print(f'Fit params: {theory_args}')\n\n if theory_func:\n ax.plot(x_data, theory_func(x_data, *theory_args, **theory_kw),\n linestyle='dashed', **line_kw)\n if plot_type == 'line':\n ax.legend(['Measured', theory_name])\n else:\n ax.legend([theory_name, 'Measured'])\n\n if theory_x_data is not None and theory_y_data is not None:\n ax.plot(theory_x_data, theory_y_data,\n linestyle='dashed', **line_kw)\n if plot_type == 'line':\n ax.legend(['Measured', theory_name])\n else:\n ax.legend([theory_name, 'Measured'])\n if 'xlim' not in subplot_kw:\n xlim_lower = min(x_data) - abs(min(x_data))*0.1\n xlim_higher = max(x_data) + abs(max(x_data))*0.1\n ax.set_xlim(xlim_lower, xlim_higher)\n\n prettifyPlot(fig=fig)\n return fig, ax\n\ndef reflectance_plotter(\n photocurrent, reference_photocurrent, R_ref,\n subplot_kw={}, **kwargs):\n \"\"\"\n Plotter which takes a photocurrent, normalizes it to a reference photocurrent, and multiplies that be the reference's known or theoretical reflectance.\n\n :param photocurrent: Pandas DataFrame of measured photocurrent vs. wavelength (or frequency)\n :param reference_photocurrent: Pandas DataFrame of measured photocurrent reflecting from a reference surface with a known reflectance\n :param R_ref: Pandas DataFrame of known reflectance of surface (theoretical or measured)\n :param fig: Optional figure to plot to. If empty, creates a figure.\n :param ax: Optional axes to plot to. If empty, creates a new axes\n :param theory_func: Theoretical reflectance function to plot alongside the measured reflectance\n :param theory_kw: Keyword arguments for theoretical plotting function\n :param subplot_kw: Keyword argumets to pass into the .subplots() function during Axes creation.\n :param line_kw: Keyword arguments to pass into the .plot() function during Line2D creation.\n \"\"\"\n subplot_kw = dict({'ylabel': 'R', 'xlabel': photocurrent.columns[0]},\n **subplot_kw)\n\n R_norm = normalize_pandas(photocurrent, reference_photocurrent, np.divide, new_name='R')\n R_actual = normalize_pandas(R_norm, R_ref, np.multiply, new_name='R')\n fig, ax = default_plotter(R_actual,\n subplot_kw=subplot_kw, **kwargs)\n return fig, ax\n\ndef power_spectrum_plot(power_spectrum, **kwargs):\n \"\"\"\n Plots a given power spectrum.\n\n :param power_spectrum: Power spectrum pandas DataFrame with Frequency in the first column and power in the second column\n :returns fig, ax: Figure, axes pair for power spectrum plot\n\n \"\"\"\n if isinstance(power_spectrum, pd.DataFrame):\n return _power_spectrum_plot_pandas(power_spectrum, **kwargs)\n else:\n raise NotImplementedError(\"Power spectrum plot not implemented\" +\n f\" for type {type(power_spectrum)}\")\n\ndef _power_spectrum_plot_pandas(power_spectrum, subplot_kw={},\n theory_data=None, **kwargs):\n \"\"\"\n Implementation of powerSpectrumPlot for a pandas DataFrame. Plots a given power spectrum with units in the form Unit Name (unit type), i.e. Photocurrent (mA).\n\n :param power_spectrum: The power spectrum to be plotted, with frequency bins on one column and power in the second column\n :param fig: (optional) Figure to plot the data to\n :param ax: (optional) axes to plot the data to\n :param line_kw: Keyword arguments to pass into ax.plot()\n :param subplot_kw: Keyword arguments to pass into fig.subplots()\n :param theory_func: Theoretical PSD function\n :param theory_kw: Keyword arguments to pass into theory_func\n \"\"\"\n\n frequency_label = power_spectrum.columns.values[0]\n power_label = power_spectrum.columns.values[1]\n power_quantity = title_to_quantity(power_label)\n standard_quantity = to_standard_quantity(power_quantity)\n if '/ hertz' in str(power_quantity):\n is_psd = True\n standard_quantity = to_standard_quantity(power_quantity*ureg.Hz)\n else:\n is_psd = False\n standard_quantity = to_standard_quantity(power_quantity)\n base_units = np.sqrt(standard_quantity).units\n\n ylabel = 'Power (dB{:~}'.format(base_units)\n if is_psd:\n ylabel += '/Hz'\n ylabel += ')'\n\n subplot_kw = dict(\n subplot_kw,\n xlabel=power_spectrum.columns[0],\n ylabel=ylabel)\n\n x_data = power_spectrum[frequency_label].values\n y_data = 10*np.log10(standard_quantity.magnitude * \\\n power_spectrum[power_label].values)\n\n if isinstance(theory_data, pd.DataFrame):\n theory_x_data = theory_data.iloc[:,0].values\n theory_y_data = theory_data.iloc[:,1].values\n else:\n theory_x_data = None\n theory_y_data = None\n\n fig, ax = _default_plot_numpy(x_data, y_data,\n theory_x_data=theory_x_data, theory_y_data=theory_y_data,\n subplot_kw=subplot_kw, **kwargs)\n return fig, ax\n\ndef plot_weibull(data, subplot_kw={}, theory_name='Fit',\n plot_type='scatter', **kwargs):\n \"\"\"\n Plots a dataset to the best-fit Weibull distribution\n\n :param data: 1-D array-like data to be plotted\n\n \"\"\"\n if isinstance(data, pd.DataFrame):\n subplot_kw = dict(subplot_kw, xlabel=data.columns[-1])\n x_data = np.array(data.iloc[:,-1])\n else:\n x_data = data\n x_data = np.sort(x_data)\n\n subplot_kw = dict(subplot_kw, xscale='log', yscale='log',\n ylabel='-ln(1-F)')\n fit_params, pcov, cdf = fit_weibull(data)\n weibull_kw = {'beta': fit_params[0], 'x0': fit_params[1]}\n\n def theory_func(x, **kwargs):\n return -np.log(1 - weibull(x, **kwargs))\n\n y_data = -np.log(1 - cdf[1])\n\n fig, ax = _default_plot_numpy(x_data, y_data,\n theory_func=theory_func,\n theory_kw=weibull_kw, subplot_kw=subplot_kw,\n theory_name=theory_name, plot_type=plot_type,\n **kwargs)\n return fig, ax\n\ndef plot_lia(data, n_points=101, fit=True, **kwargs):\n \"\"\"\n Plots a cosine-fitted curve to the phase delay used in a lock-in amplifier - used to find the exact offset phase given a known sequence of synchronization pulses.\n\n :param data: Input data as a pandas DataFrame which is compatible with liapy.\n :returns fig, ax: Figure and axes generated by this function\n \"\"\"\n fitted_data, params = fit_lia(data, n_points=n_points, fit=fit)\n if fit:\n def theory_func(x, a=1, phase_delay=np.pi):\n return a * np.cos(x - phase_delay)\n theory_kw = {'a': params[0], 'phase_delay': params[1]}\n else:\n theory_func = None\n theory_kw={}\n return default_plotter(\n fitted_data,\n theory_func=theory_func, theory_kw=theory_kw,\n plot_type='scatter', **kwargs)\n\ndef plot_fit(data, fit_func, **kwargs):\n xdata = data.iloc[:,0]\n ydata = data.iloc[:,1]\n params, pcov = curve_fit(fit_func, xdata, ydata)\n print(f\"Fit params: {params}\")\n\n def theory_func(x):\n return fit_func(x, *params)\n\n return default_plotter(\n data,\n theory_func=theory_func,\n plot_type='scatter', **kwargs)\n\ndef plot_impedance(data, fit=True,\n model='rc', model_config='series', **kwargs):\n \"\"\"\n Plots the magnitude / phase of a set of impedance data.\n\n :param fit: Whether to attempt to fit an impedance model to the data\n :param model: Real/reactive model - options are \"rc\"\n :param model_config: Model configuration, either \"series\" or \"parallel\"\n \"\"\"\n# The challenge here is that we need to plot two things with the same x axis and different y axes: the magnitude and phase data. \n if data.shape[1] == 2:\n data_complex = True\n z_data = column_from_unit(data, ureg.ohm).to(ureg.ohm).m\n phase_data = np.angle(z_data)\n magnitude_data = np.abs(z_data)\n data_to_plot = pd.DataFrame({\n 'Frequency (Hz)': data.iloc[:,0],\n '|Z| (ohm)': magnitude_data,\n 'Phase (deg)': phase_data * 180/np.pi\n })\n else:\n data_complex = False\n phase_data = column_from_unit(data, ureg.rad).to(ureg.deg).m\n phase_name = cname_from_unit(data, ureg.rad)\n data_to_plot = data.rename(columns={phase_name: 'Phase (deg)'})\n data_to_plot['Phase (deg)'] = phase_data\n\n if fit:\n _, _, impedance_func = fit_impedance(data)\n # This needs to return a functional form for the impedance vs. frequency in addition to the relevant parameters, as it's not clear\n freq_data = column_from_unit(data, ureg.Hz)\n min_log = np.log10(freq_data.min().to(ureg.Hz).m)\n max_log = np.log10(freq_data.max().to(ureg.Hz).m)\n freq_theory_data = np.logspace(min_log, max_log, 100)\n impedance_theory_data = impedance_func(freq_theory_data)\n mag_theory_data = abs(impedance_theory_data)\n phase_theory_data = np.angle(impedance_theory_data)*180/np.pi\n theory_data = pd.DataFrame({\n 'Frequency (Hz)': freq_theory_data,\n 'Z (ohm)': mag_theory_data,\n 'Phase (deg)': phase_theory_data\n })\n\n else:\n theory_data = None\n\n theory_to_plot = None\n subplot_kw = {'xscale': 'log', 'yscale': 'log'}\n\n if theory_data is not None:\n theory_to_plot = theory_data.iloc[:,[0,1]]\n\n kwargs.update(subplot_kw=subplot_kw, theory_data=theory_to_plot,\n theory_name='|Z| (Fit)', plot_type='scatter')\n fig, ax = default_plotter(data_to_plot.iloc[:,[0,1]], **kwargs)\n\n if theory_data is not None:\n theory_to_plot = theory_data.iloc[:,[0,-1]]\n kwargs.update(subplot_kw=subplot_kw, fig=fig, ax=ax, theory_data=theory_to_plot,\n theory_name='Phase (Fit)', plot_type='scatter')\n fig, ax = default_plotter(data_to_plot.iloc[:,[0,-1]], **kwargs)\n\n prettifyPlot(fig=fig, ax=ax)\n return fig, ax\n\n\ndef show_figure(fig):\n \"\"\"\n create a dummy figure and use its manager to display \"fig\"\n \"\"\"\n\n dummy = plt.figure()\n new_manager = dummy.canvas.manager\n new_manager.canvas.figure = fig\n fig.set_canvas(new_manager.canvas)\n plt.show()\n"
] |
[
[
"numpy.log",
"numpy.sqrt",
"numpy.abs",
"matplotlib.figure.Figure",
"numpy.logspace",
"numpy.cos",
"numpy.sort",
"pandas.DataFrame",
"numpy.log10",
"numpy.angle",
"numpy.array",
"scipy.optimize.curve_fit"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
piercus/triplet-reid
|
[
"0e82c86f4a8440a44850be0d64544f14034e88ad",
"0e82c86f4a8440a44850be0d64544f14034e88ad"
] |
[
"heads/fc1024_normalize.py",
"heads/fc1024.py"
] |
[
"import tensorflow as tf\nimport tf_slim as slim\n\ndef head(endpoints, embedding_dim, is_training):\n endpoints['head_output'] = slim.fully_connected(\n endpoints['model_output'], 1024, normalizer_fn=slim.batch_norm,\n normalizer_params={\n 'decay': 0.9,\n 'epsilon': 1e-5,\n 'scale': True,\n 'is_training': is_training,\n 'updates_collections': tf.compat.v1.GraphKeys.UPDATE_OPS,\n })\n\n endpoints['emb_raw'] = slim.fully_connected(\n endpoints['head_output'], embedding_dim, activation_fn=None,\n weights_initializer=tf.compat.v1.orthogonal_initializer(), scope='emb')\n endpoints['emb'] = tf.nn.l2_normalize(endpoints['emb_raw'], -1, name=\"out_emb\")\n\n return endpoints\n",
"import tensorflow as tf\nimport tf_slim as slim\n\ndef head(endpoints, embedding_dim, is_training):\n endpoints['head_output'] = slim.fully_connected(\n endpoints['model_output'], 1024, normalizer_fn=slim.batch_norm,\n normalizer_params={\n 'decay': 0.9,\n 'epsilon': 1e-5,\n 'scale': True,\n 'is_training': is_training,\n 'updates_collections': tf.compat.v1.GraphKeys.UPDATE_OPS,\n })\n\n endpoints['emb_raw'] = slim.fully_connected(\n endpoints['head_output'], embedding_dim, activation_fn=None,\n weights_initializer=tf.compat.v1.orthogonal_initializer(), scope='emb')\n endpoints['emb'] = tf.identity(endpoints['emb_raw'], name=\"out_emb\")\n \n return endpoints\n"
] |
[
[
"tensorflow.nn.l2_normalize",
"tensorflow.compat.v1.orthogonal_initializer"
],
[
"tensorflow.identity",
"tensorflow.compat.v1.orthogonal_initializer"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dylanirion/wildbook-ia
|
[
"3b7c30a6e123d87999950bfbb5035c4d9c1a6f5d",
"ac433d4f2a47b1d905c421a36c497f787003afc3"
] |
[
"wbia/unstable/_scriptvsone_grave.py",
"wbia/plottool/__MPL_INIT__.py"
] |
[
"# -*- coding: utf-8 -*-\n# flake8: noqa\nimport logging\nimport utool as ut\n\n(print, rrr, profile) = ut.inject2(__name__)\nlogger = logging.getLogger('wbia')\n\n\ndef load_multiclass_scores(self):\n # convert simple scores to multiclass scores\n import vtool as vt\n\n self.multiclass_scores = {}\n for key in self.samples.simple_scores.keys():\n scores = self.samples.simple_scores[key].values\n # Hack scores into the range 0 to 1\n normer = vt.ScoreNormalizer(adjust=8, monotonize=True)\n normer.fit(scores, y=self.samples.is_same())\n normed_scores = normer.normalize_scores(scores)\n # Create a dimension for each class\n # but only populate two of the dimensions\n class_idxs = ut.take(self.samples.text_to_class, ['nomatch', 'match'])\n pred = np.zeros((len(scores), len(self.samples.class_names)))\n pred[:, class_idxs[0]] = 1 - normed_scores\n pred[:, class_idxs[1]] = normed_scores\n self.multiclass_scores[key] = pred\n\n\ndef photobombing_subset():\n \"\"\"\n CommandLine:\n python -m wbia.scripts.script_vsone photobombing_subset\n \"\"\"\n import wbia\n\n # pair_sample = ut.odict([\n # ('top_gt', 4), ('mid_gt', 2), ('bot_gt', 2), ('rand_gt', 2),\n # ('top_gf', 3), ('mid_gf', 2), ('bot_gf', 1), ('rand_gf', 2),\n # ])\n qreq_ = wbia.testdata_qreq_(\n defaultdb='PZ_Master1',\n a=':mingt=2,species=primary',\n # t='default:K=4,Knorm=1,score_method=csum,prescore_method=csum',\n t='default:K=4,Knorm=1,score_method=csum,prescore_method=csum,QRH=True',\n )\n ibs = qreq_.ibs\n # cm_list = qreq_.execute()\n # infr = wbia.AnnotInference.from_qreq_(qreq_, cm_list, autoinit=True)\n # aid_pairs_ = infr._cm_training_pairs(rng=np.random.RandomState(42),\n # **pair_sample)\n\n # # ut.dict_hist(ut.flatten(am_tags))\n # am_rowids = ibs._get_all_annotmatch_rowids()\n # am_tags = ibs.get_annotmatch_case_tags(am_rowids)\n # am_flags = ut.filterflags_general_tags(am_tags, has_any=['photobomb'])\n # am_rowids_ = ut.compress(am_rowids, am_flags)\n # aids1 = ibs.get_annotmatch_aid1(am_rowids_)\n # aids2 = ibs.get_annotmatch_aid2(am_rowids_)\n # pb_aids_pairs = list(zip(aids1, aids2))\n\n # # aids = unique_pb_aids = ut.unique(ut.flatten(pb_aids_pairs))\n # # ut.compress(unique_pb_aids, ibs.is_aid_unknown(unique_pb_aids))\n\n # assert len(pb_aids_pairs) > 0\n\n # # Keep only a random subset\n # subset_idxs = list(range(len(aid_pairs_)))\n # rng = np.random.RandomState(3104855634)\n # num_max = len(pb_aids_pairs)\n # if num_max < len(subset_idxs):\n # subset_idxs = rng.choice(subset_idxs, size=num_max, replace=False)\n # subset_idxs = sorted(subset_idxs)\n # aid_pairs_ = ut.take(aid_pairs_, subset_idxs)\n\n # aid_pairs_ += pb_aids_pairs\n # unique_aids = ut.unique(ut.flatten(aid_pairs_))\n\n # a1 = ibs.filter_annots_general(unique_aids, is_known=True, verbose=True, min_pername=2, has_none=['photobomb'])\n # a2 = ibs.filter_annots_general(unique_aids, has_any=['photobomb'], verbose=True, is_known=True)\n # a = sorted(set(a1 + a2))\n # ibs.print_annot_stats(a)\n # len(a)\n\n a = [\n 8,\n 27,\n 30,\n 86,\n 87,\n 90,\n 92,\n 94,\n 99,\n 103,\n 104,\n 106,\n 111,\n 217,\n 218,\n 242,\n 298,\n 424,\n 425,\n 456,\n 464,\n 465,\n 472,\n 482,\n 529,\n 559,\n 574,\n 585,\n 588,\n 592,\n 598,\n 599,\n 601,\n 617,\n 630,\n 645,\n 661,\n 664,\n 667,\n 694,\n 723,\n 724,\n 759,\n 768,\n 843,\n 846,\n 861,\n 862,\n 866,\n 933,\n 934,\n 980,\n 987,\n 1000,\n 1003,\n 1005,\n 1011,\n 1017,\n 1020,\n 1027,\n 1059,\n 1074,\n 1076,\n 1080,\n 1095,\n 1096,\n 1107,\n 1108,\n 1192,\n 1203,\n 1206,\n 1208,\n 1220,\n 1222,\n 1223,\n 1224,\n 1256,\n 1278,\n 1293,\n 1294,\n 1295,\n 1296,\n 1454,\n 1456,\n 1474,\n 1484,\n 1498,\n 1520,\n 1521,\n 1548,\n 1563,\n 1576,\n 1593,\n 1669,\n 1675,\n 1680,\n 1699,\n 1748,\n 1751,\n 1811,\n 1813,\n 1821,\n 1839,\n 1927,\n 1934,\n 1938,\n 1952,\n 1992,\n 2003,\n 2038,\n 2054,\n 2066,\n 2080,\n 2103,\n 2111,\n 2170,\n 2171,\n 2175,\n 2192,\n 2216,\n 2227,\n 2240,\n 2250,\n 2253,\n 2266,\n 2272,\n 2288,\n 2292,\n 2314,\n 2329,\n 2341,\n 2344,\n 2378,\n 2397,\n 2417,\n 2429,\n 2444,\n 2451,\n 2507,\n 2551,\n 2552,\n 2553,\n 2581,\n 2628,\n 2640,\n 2642,\n 2646,\n 2654,\n 2667,\n 2686,\n 2733,\n 2743,\n 2750,\n 2759,\n 2803,\n 2927,\n 3008,\n 3054,\n 3077,\n 3082,\n 3185,\n 3205,\n 3284,\n 3306,\n 3334,\n 3370,\n 3386,\n 3390,\n 3393,\n 3401,\n 3448,\n 3508,\n 3542,\n 3597,\n 3614,\n 3680,\n 3684,\n 3695,\n 3707,\n 3727,\n 3758,\n 3765,\n 3790,\n 3812,\n 3813,\n 3818,\n 3858,\n 3860,\n 3874,\n 3875,\n 3887,\n 3892,\n 3915,\n 3918,\n 3924,\n 3927,\n 3929,\n 3933,\n 3941,\n 3952,\n 3955,\n 3956,\n 3959,\n 4004,\n 4059,\n 4073,\n 4076,\n 4089,\n 4094,\n 4124,\n 4126,\n 4128,\n 4182,\n 4189,\n 4217,\n 4222,\n 4229,\n 4257,\n 4266,\n 4268,\n 4288,\n 4289,\n 4296,\n 4306,\n 4339,\n 4353,\n 4376,\n 4403,\n 4428,\n 4455,\n 4487,\n 4494,\n 4515,\n 4517,\n 4524,\n 4541,\n 4544,\n 4556,\n 4580,\n 4585,\n 4597,\n 4604,\n 4629,\n 4639,\n 4668,\n 4671,\n 4672,\n 4675,\n 4686,\n 4688,\n 4693,\n 4716,\n 4730,\n 4731,\n 4749,\n 4772,\n 4803,\n 4820,\n 4823,\n 4832,\n 4833,\n 4836,\n 4900,\n 4902,\n 4909,\n 4924,\n 4936,\n 4938,\n 4939,\n 4944,\n 5004,\n 5006,\n 5034,\n 5043,\n 5044,\n 5055,\n 5064,\n 5072,\n 5115,\n 5131,\n 5150,\n 5159,\n 5165,\n 5167,\n 5168,\n 5174,\n 5218,\n 5235,\n 5245,\n 5249,\n 5309,\n 5319,\n 5334,\n 5339,\n 5344,\n 5347,\n 5378,\n 5379,\n 5384,\n 5430,\n 5447,\n 5466,\n 5509,\n 5546,\n 5587,\n 5588,\n 5621,\n 5640,\n 5663,\n 5676,\n 5682,\n 5685,\n 5687,\n 5690,\n 5707,\n 5717,\n 5726,\n 5732,\n 5733,\n 5791,\n 5830,\n 5863,\n 5864,\n 5869,\n 5870,\n 5877,\n 5879,\n 5905,\n 5950,\n 6008,\n 6110,\n 6134,\n 6160,\n 6167,\n 6234,\n 6238,\n 6265,\n 6344,\n 6345,\n 6367,\n 6384,\n 6386,\n 6437,\n 6495,\n 6533,\n 6538,\n 6569,\n 6587,\n 6626,\n 6634,\n 6643,\n 6659,\n 6661,\n 6689,\n 6714,\n 6725,\n 6739,\n 6754,\n 6757,\n 6759,\n 6763,\n 6781,\n 6830,\n 6841,\n 6843,\n 6893,\n 6897,\n 6913,\n 6930,\n 6932,\n 6936,\n 6944,\n 6976,\n 7003,\n 7022,\n 7037,\n 7052,\n 7058,\n 7074,\n 7103,\n 7107,\n 7108,\n 7113,\n 7143,\n 7183,\n 7185,\n 7187,\n 7198,\n 7200,\n 7202,\n 7207,\n 7222,\n 7275,\n 7285,\n 7388,\n 7413,\n 7421,\n 7425,\n 7429,\n 7445,\n 7487,\n 7507,\n 7508,\n 7528,\n 7615,\n 7655,\n 7696,\n 7762,\n 7786,\n 7787,\n 7796,\n 7797,\n 7801,\n 7807,\n 7808,\n 7809,\n 7826,\n 7834,\n 7835,\n 7852,\n 7861,\n 7874,\n 7881,\n 7901,\n 7902,\n 7905,\n 7913,\n 7918,\n 7941,\n 7945,\n 7990,\n 7999,\n 8007,\n 8009,\n 8017,\n 8018,\n 8019,\n 8034,\n 8041,\n 8057,\n 8058,\n 8079,\n 8080,\n 8086,\n 8089,\n 8092,\n 8094,\n 8100,\n 8105,\n 8109,\n 8147,\n 8149,\n 8153,\n 8221,\n 8264,\n 8302,\n 8303,\n 8331,\n 8366,\n 8367,\n 8370,\n 8376,\n 8474,\n 8501,\n 8504,\n 8506,\n 8507,\n 8514,\n 8531,\n 8532,\n 8534,\n 8538,\n 8563,\n 8564,\n 8587,\n 8604,\n 8608,\n 8751,\n 8771,\n 8792,\n 9175,\n 9204,\n 9589,\n 9726,\n 9841,\n 10674,\n 12122,\n 12305,\n 12796,\n 12944,\n 12947,\n 12963,\n 12966,\n 13098,\n 13099,\n 13101,\n 13103,\n 13109,\n 13147,\n 13157,\n 13168,\n 13194,\n 13236,\n 13253,\n 13255,\n 13410,\n 13450,\n 13474,\n 13477,\n 13481,\n 13508,\n 13630,\n 13670,\n 13727,\n 13741,\n 13819,\n 13820,\n 13908,\n 13912,\n 13968,\n 13979,\n 14007,\n 14009,\n 14010,\n 14019,\n 14066,\n 14067,\n 14072,\n 14074,\n 14148,\n 14153,\n 14224,\n 14230,\n 14237,\n 14239,\n 14241,\n 14274,\n 14277,\n 14290,\n 14293,\n 14308,\n 14309,\n 14313,\n 14319,\n 14668,\n 14670,\n 14776,\n 14918,\n 14920,\n 14924,\n 15135,\n 15157,\n 15318,\n 15319,\n 15490,\n 15518,\n 15531,\n 15777,\n 15903,\n 15913,\n 16004,\n 16012,\n 16013,\n 16014,\n 16020,\n 16215,\n 16221,\n 16235,\n 16240,\n 16259,\n 16273,\n 16279,\n 16284,\n 16289,\n 16316,\n 16322,\n 16329,\n 16336,\n 16364,\n 16389,\n 16706,\n 16897,\n 16898,\n 16903,\n 16949,\n 17094,\n 17101,\n 17137,\n 17200,\n 17222,\n 17290,\n 17327,\n 17336,\n ]\n\n from wbia.dbio import export_subset\n\n export_subset.export_annots(ibs, a, 'PZ_PB_RF_TRAIN')\n\n # closed_aids = ibs.annots(unique_aids).get_name_image_closure()\n\n # annots = ibs.annots(unique_aids)\n # closed_gt_aids = ut.unique(ut.flatten(ibs.get_annot_groundtruth(unique_aids)))\n # closed_gt_aids = ut.unique(ut.flatten(ibs.get_annot_groundtruth(unique_aids)))\n # closed_img_aids = ut.unique(ut.flatten(ibs.get_annot_otherimage_aids(unique_aids)))\n\n # ibs.print_annot_stats(unique_aids)\n # all_annots = ibs.annots()\n\n\ndef bigcache_vsone(qreq_, hyper_params):\n \"\"\"\n Cached output of one-vs-one matches\n\n >>> from wbia.scripts.script_vsone import * # NOQA\n >>> self = OneVsOneProblem()\n >>> qreq_ = self.qreq_\n >>> hyper_params = self.hyper_params\n \"\"\"\n import vtool as vt\n import wbia\n\n # Get a set of training pairs\n ibs = qreq_.ibs\n cm_list = qreq_.execute()\n infr = wbia.AnnotInference.from_qreq_(qreq_, cm_list, autoinit=True)\n\n # Per query choose a set of correct, incorrect, and random training pairs\n aid_pairs_ = infr._cm_training_pairs(\n rng=np.random.RandomState(42), **hyper_params.pair_sample\n )\n\n aid_pairs_ = vt.unique_rows(np.array(aid_pairs_), directed=False).tolist()\n\n pb_aid_pairs_ = photobomb_samples(ibs)\n\n # TODO: try to add in more non-comparable samples\n aid_pairs_ = pb_aid_pairs_ + aid_pairs_\n aid_pairs_ = vt.unique_rows(np.array(aid_pairs_))\n\n # ======================================\n # Compute one-vs-one scores and local_measures\n # ======================================\n\n # Prepare lazy attributes for annotations\n qreq_ = infr.qreq_\n ibs = qreq_.ibs\n qconfig2_ = qreq_.extern_query_config2\n dconfig2_ = qreq_.extern_data_config2\n qannot_cfg = ibs.depc.stacked_config(None, 'featweight', qconfig2_)\n dannot_cfg = ibs.depc.stacked_config(None, 'featweight', dconfig2_)\n\n # Remove any pairs missing features\n if dannot_cfg == qannot_cfg:\n unique_annots = ibs.annots(np.unique(np.array(aid_pairs_)), config=dannot_cfg)\n bad_aids = unique_annots.compress(~np.array(unique_annots.num_feats) > 0).aids\n bad_aids = set(bad_aids)\n else:\n annots1_ = ibs.annots(ut.unique(ut.take_column(aid_pairs_, 0)), config=qannot_cfg)\n annots2_ = ibs.annots(ut.unique(ut.take_column(aid_pairs_, 1)), config=dannot_cfg)\n bad_aids1 = annots1_.compress(~np.array(annots1_.num_feats) > 0).aids\n bad_aids2 = annots2_.compress(~np.array(annots2_.num_feats) > 0).aids\n bad_aids = set(bad_aids1 + bad_aids2)\n subset_idxs = np.where(\n [not (a1 in bad_aids or a2 in bad_aids) for a1, a2 in aid_pairs_]\n )[0]\n # Keep only a random subset\n if hyper_params.subsample:\n rng = np.random.RandomState(3104855634)\n num_max = hyper_params.subsample\n if num_max < len(subset_idxs):\n subset_idxs = rng.choice(subset_idxs, size=num_max, replace=False)\n subset_idxs = sorted(subset_idxs)\n\n # Take the current selection\n aid_pairs = ut.take(aid_pairs_, subset_idxs)\n\n if True:\n # NEW WAY\n config = hyper_params.vsone_assign\n # TODO: ensure annot probs like chips and features can be appropriately\n # set via qreq_ config or whatever\n matches = infr.exec_vsone_subset(aid_pairs, config=config)\n else:\n query_aids = ut.take_column(aid_pairs, 0)\n data_aids = ut.take_column(aid_pairs, 1)\n # OLD WAY\n # Determine a unique set of annots per config\n configured_aids = ut.ddict(set)\n configured_aids[qannot_cfg].update(query_aids)\n configured_aids[dannot_cfg].update(data_aids)\n\n # Make efficient annot-object representation\n configured_obj_annots = {}\n for config, aids in configured_aids.items():\n annots = ibs.annots(sorted(list(aids)), config=config)\n configured_obj_annots[config] = annots\n\n annots1 = configured_obj_annots[qannot_cfg].loc(query_aids)\n annots2 = configured_obj_annots[dannot_cfg].loc(data_aids)\n\n # Get hash based on visual annotation appearence of each pair\n # as well as algorithm configurations used to compute those properties\n qvuuids = annots1.visual_uuids\n dvuuids = annots2.visual_uuids\n qcfgstr = annots1._config.get_cfgstr()\n dcfgstr = annots2._config.get_cfgstr()\n annots_cfgstr = ut.hashstr27(qcfgstr) + ut.hashstr27(dcfgstr)\n vsone_uuids = [\n ut.combine_uuids(uuids, salt=annots_cfgstr)\n for uuids in ut.ProgIter(\n zip(qvuuids, dvuuids), length=len(qvuuids), label='hashing ids'\n )\n ]\n\n # Combine into a big cache for the entire 1-v-1 matching run\n big_uuid = ut.hashstr_arr27(vsone_uuids, '', pathsafe=True)\n cacher = ut.Cacher('vsone_v7', cfgstr=str(big_uuid), appname='vsone_rf_train')\n\n cached_data = cacher.tryload()\n if cached_data is not None:\n # Caching doesn't work 100% for PairwiseMatch object, so we need to do\n # some postprocessing\n configured_lazy_annots = ut.ddict(dict)\n for config, annots in configured_obj_annots.items():\n annot_dict = configured_lazy_annots[config]\n for _annot in ut.ProgIter(annots.scalars(), label='make lazy dict'):\n annot_dict[_annot.aid] = _annot._make_lazy_dict()\n\n # Extract pairs of annot objects (with shared caches)\n lazy_annots1 = ut.take(configured_lazy_annots[qannot_cfg], query_aids)\n lazy_annots2 = ut.take(configured_lazy_annots[dannot_cfg], data_aids)\n\n # Create a set of PairwiseMatches with the correct annot properties\n matches = [\n vt.PairwiseMatch(annot1, annot2)\n for annot1, annot2 in zip(lazy_annots1, lazy_annots2)\n ]\n\n # Updating a new matches dictionary ensure the annot1/annot2 properties\n # are set correctly\n for key, cached_matches in list(cached_data.items()):\n fixed_matches = [match.copy() for match in matches]\n for fixed, internal in zip(fixed_matches, cached_matches):\n dict_ = internal.__dict__\n ut.delete_dict_keys(dict_, ['annot1', 'annot2'])\n fixed.__dict__.update(dict_)\n cached_data[key] = fixed_matches\n else:\n cached_data = vsone_(\n qreq_,\n query_aids,\n data_aids,\n qannot_cfg,\n dannot_cfg,\n configured_obj_annots,\n hyper_params,\n )\n cacher.save(cached_data)\n # key_ = 'SV_LNBNN'\n key_ = 'RAT_SV'\n # for key in list(cached_data.keys()):\n # if key != 'SV_LNBNN':\n # del cached_data[key]\n matches = cached_data[key_]\n return matches, infr\n\n\ndef bigcache_vsone(qreq_, hyper_params):\n \"\"\"\n Cached output of one-vs-one matches\n\n >>> from wbia.scripts.script_vsone import * # NOQA\n >>> self = OneVsOneProblem()\n >>> qreq_ = self.qreq_\n >>> hyper_params = self.hyper_params\n \"\"\"\n import vtool as vt\n import wbia\n\n # Get a set of training pairs\n ibs = qreq_.ibs\n cm_list = qreq_.execute()\n infr = wbia.AnnotInference.from_qreq_(qreq_, cm_list, autoinit=True)\n\n # Per query choose a set of correct, incorrect, and random training pairs\n aid_pairs_ = infr._cm_training_pairs(\n rng=np.random.RandomState(42), **hyper_params.pair_sample\n )\n\n aid_pairs_ = vt.unique_rows(np.array(aid_pairs_), directed=False).tolist()\n\n pb_aid_pairs_ = photobomb_samples(ibs)\n\n # TODO: try to add in more non-comparable samples\n aid_pairs_ = pb_aid_pairs_ + aid_pairs_\n aid_pairs_ = vt.unique_rows(np.array(aid_pairs_))\n\n # ======================================\n # Compute one-vs-one scores and local_measures\n # ======================================\n\n # Prepare lazy attributes for annotations\n qreq_ = infr.qreq_\n ibs = qreq_.ibs\n qconfig2_ = qreq_.extern_query_config2\n dconfig2_ = qreq_.extern_data_config2\n qannot_cfg = ibs.depc.stacked_config(None, 'featweight', qconfig2_)\n dannot_cfg = ibs.depc.stacked_config(None, 'featweight', dconfig2_)\n\n # Remove any pairs missing features\n if dannot_cfg == qannot_cfg:\n unique_annots = ibs.annots(np.unique(np.array(aid_pairs_)), config=dannot_cfg)\n bad_aids = unique_annots.compress(~np.array(unique_annots.num_feats) > 0).aids\n bad_aids = set(bad_aids)\n else:\n annots1_ = ibs.annots(ut.unique(ut.take_column(aid_pairs_, 0)), config=qannot_cfg)\n annots2_ = ibs.annots(ut.unique(ut.take_column(aid_pairs_, 1)), config=dannot_cfg)\n bad_aids1 = annots1_.compress(~np.array(annots1_.num_feats) > 0).aids\n bad_aids2 = annots2_.compress(~np.array(annots2_.num_feats) > 0).aids\n bad_aids = set(bad_aids1 + bad_aids2)\n subset_idxs = np.where(\n [not (a1 in bad_aids or a2 in bad_aids) for a1, a2 in aid_pairs_]\n )[0]\n # Keep only a random subset\n if hyper_params.subsample:\n rng = np.random.RandomState(3104855634)\n num_max = hyper_params.subsample\n if num_max < len(subset_idxs):\n subset_idxs = rng.choice(subset_idxs, size=num_max, replace=False)\n subset_idxs = sorted(subset_idxs)\n\n # Take the current selection\n aid_pairs = ut.take(aid_pairs_, subset_idxs)\n\n if True:\n # NEW WAY\n config = hyper_params.vsone_assign\n # TODO: ensure annot probs like chips and features can be appropriately\n # set via qreq_ config or whatever\n matches = infr.exec_vsone_subset(aid_pairs, config=config)\n else:\n query_aids = ut.take_column(aid_pairs, 0)\n data_aids = ut.take_column(aid_pairs, 1)\n # OLD WAY\n # Determine a unique set of annots per config\n configured_aids = ut.ddict(set)\n configured_aids[qannot_cfg].update(query_aids)\n configured_aids[dannot_cfg].update(data_aids)\n\n # Make efficient annot-object representation\n configured_obj_annots = {}\n for config, aids in configured_aids.items():\n annots = ibs.annots(sorted(list(aids)), config=config)\n configured_obj_annots[config] = annots\n\n annots1 = configured_obj_annots[qannot_cfg].loc(query_aids)\n annots2 = configured_obj_annots[dannot_cfg].loc(data_aids)\n\n # Get hash based on visual annotation appearence of each pair\n # as well as algorithm configurations used to compute those properties\n qvuuids = annots1.visual_uuids\n dvuuids = annots2.visual_uuids\n qcfgstr = annots1._config.get_cfgstr()\n dcfgstr = annots2._config.get_cfgstr()\n annots_cfgstr = ut.hashstr27(qcfgstr) + ut.hashstr27(dcfgstr)\n vsone_uuids = [\n ut.combine_uuids(uuids, salt=annots_cfgstr)\n for uuids in ut.ProgIter(\n zip(qvuuids, dvuuids), length=len(qvuuids), label='hashing ids'\n )\n ]\n\n # Combine into a big cache for the entire 1-v-1 matching run\n big_uuid = ut.hashstr_arr27(vsone_uuids, '', pathsafe=True)\n cacher = ut.Cacher('vsone_v7', cfgstr=str(big_uuid), appname='vsone_rf_train')\n\n cached_data = cacher.tryload()\n if cached_data is not None:\n # Caching doesn't work 100% for PairwiseMatch object, so we need to do\n # some postprocessing\n configured_lazy_annots = ut.ddict(dict)\n for config, annots in configured_obj_annots.items():\n annot_dict = configured_lazy_annots[config]\n for _annot in ut.ProgIter(annots.scalars(), label='make lazy dict'):\n annot_dict[_annot.aid] = _annot._make_lazy_dict()\n\n # Extract pairs of annot objects (with shared caches)\n lazy_annots1 = ut.take(configured_lazy_annots[qannot_cfg], query_aids)\n lazy_annots2 = ut.take(configured_lazy_annots[dannot_cfg], data_aids)\n\n # Create a set of PairwiseMatches with the correct annot properties\n matches = [\n vt.PairwiseMatch(annot1, annot2)\n for annot1, annot2 in zip(lazy_annots1, lazy_annots2)\n ]\n\n # Updating a new matches dictionary ensure the annot1/annot2 properties\n # are set correctly\n for key, cached_matches in list(cached_data.items()):\n fixed_matches = [match.copy() for match in matches]\n for fixed, internal in zip(fixed_matches, cached_matches):\n dict_ = internal.__dict__\n ut.delete_dict_keys(dict_, ['annot1', 'annot2'])\n fixed.__dict__.update(dict_)\n cached_data[key] = fixed_matches\n else:\n cached_data = vsone_(\n qreq_,\n query_aids,\n data_aids,\n qannot_cfg,\n dannot_cfg,\n configured_obj_annots,\n hyper_params,\n )\n cacher.save(cached_data)\n # key_ = 'SV_LNBNN'\n key_ = 'RAT_SV'\n # for key in list(cached_data.keys()):\n # if key != 'SV_LNBNN':\n # del cached_data[key]\n matches = cached_data[key_]\n return matches, infr\n\n\ndef vsone_(\n qreq_,\n query_aids,\n data_aids,\n qannot_cfg,\n dannot_cfg,\n configured_obj_annots,\n hyper_params,\n):\n # Do vectorized preload before constructing lazy dicts\n # Then make sure the lazy dicts point to this subset\n unique_obj_annots = list(configured_obj_annots.values())\n for annots in ut.ProgIter(unique_obj_annots, 'vectorized preload'):\n annots.set_caching(True)\n annots.chip_size\n annots.vecs\n annots.kpts\n annots.yaw\n annots.qual\n annots.gps\n annots.time\n if qreq_.qparams.featweight_enabled:\n annots.fgweights\n # annots._internal_attrs.clear()\n\n # Make convinient lazy dict representations (after loading pre info)\n configured_lazy_annots = ut.ddict(dict)\n for config, annots in configured_obj_annots.items():\n annot_dict = configured_lazy_annots[config]\n for _annot in ut.ProgIter(annots.scalars(), label='make lazy dict'):\n annot = _annot._make_lazy_dict()\n annot_dict[_annot.aid] = annot\n\n unique_lazy_annots = ut.flatten([x.values() for x in configured_lazy_annots.values()])\n\n flann_params = {'algorithm': 'kdtree', 'trees': 4}\n for annot in ut.ProgIter(unique_lazy_annots, label='lazy flann'):\n vt.matching.ensure_metadata_flann(annot, flann_params)\n vt.matching.ensure_metadata_normxy(annot)\n\n for annot in ut.ProgIter(unique_lazy_annots, 'preload kpts'):\n annot['kpts']\n for annot in ut.ProgIter(unique_lazy_annots, 'preload normxy'):\n annot['norm_xys']\n for annot in ut.ProgIter(unique_lazy_annots, 'preload vecs'):\n annot['vecs']\n\n # Extract pairs of annot objects (with shared caches)\n lazy_annots1 = ut.take(configured_lazy_annots[qannot_cfg], query_aids)\n lazy_annots2 = ut.take(configured_lazy_annots[dannot_cfg], data_aids)\n\n # TODO: param search over grid\n # 'use_sv': [0, 1],\n # 'use_fg': [0, 1],\n # 'use_ratio_test': [0, 1],\n matches_RAT = [\n vt.PairwiseMatch(annot1, annot2)\n for annot1, annot2 in zip(lazy_annots1, lazy_annots2)\n ]\n\n # Construct global measurements\n global_keys = ['yaw', 'qual', 'gps', 'time']\n for match in ut.ProgIter(matches_RAT, label='setup globals'):\n match.add_global_measures(global_keys)\n\n # Preload flann for only specific annots\n for match in ut.ProgIter(matches_RAT, label='preload FLANN'):\n match.annot1['flann']\n\n cfgdict = hyper_params.vsone_assign\n # Find one-vs-one matches\n # cfgdict = {'checks': 20, 'symmetric': False}\n for match in ut.ProgIter(matches_RAT, label='assign vsone'):\n match.assign(cfgdict=cfgdict)\n\n # gridsearch_ratio_thresh()\n # vt.matching.gridsearch_match_operation(matches_RAT, 'apply_ratio_test', {\n # 'ratio_thresh': np.linspace(.6, .7, 50)\n # })\n for match in ut.ProgIter(matches_RAT, label='apply ratio thresh'):\n match.apply_ratio_test({'ratio_thresh': 0.638}, inplace=True)\n\n # TODO gridsearch over sv params\n # vt.matching.gridsearch_match_operation(matches_RAT, 'apply_sver', {\n # 'xy_thresh': np.linspace(0, 1, 3)\n # })\n matches_RAT_SV = [\n match.apply_sver(inplace=True) for match in ut.ProgIter(matches_RAT, label='sver')\n ]\n\n # Add keypoint spatial information to local features\n for match in matches_RAT_SV:\n match.add_local_measures()\n # key_ = 'norm_xys'\n # norm_xy1 = match.annot1[key_].take(match.fm.T[0], axis=1)\n # norm_xy2 = match.annot2[key_].take(match.fm.T[1], axis=1)\n # match.local_measures['norm_x1'] = norm_xy1[0]\n # match.local_measures['norm_y1'] = norm_xy1[1]\n # match.local_measures['norm_x2'] = norm_xy2[0]\n # match.local_measures['norm_y2'] = norm_xy2[1]\n\n # match.local_measures['scale1'] = vt.get_scales(\n # match.annot1['kpts'].take(match.fm.T[0], axis=0))\n # match.local_measures['scale2'] = vt.get_scales(\n # match.annot2['kpts'].take(match.fm.T[1], axis=0))\n\n # Create another version where we find global normalizers for the data\n # qreq_.load_indexer()\n # matches_SV_LNBNN = batch_apply_lnbnn(matches_RAT_SV, qreq_, inplace=True)\n\n # if 'weight' in cfgdict:\n # for match in matches_SV_LNBNN[::-1]:\n # lnbnn_dist = match.local_measures['lnbnn']\n # ndist = match.local_measures['lnbnn_norm_dist']\n # weights = match.local_measures[cfgdict['weight']]\n # match.local_measures['weighted_lnbnn'] = weights * lnbnn_dist\n # match.local_measures['weighted_lnbnn_norm_dist'] = weights * ndist\n # match.fs = match.local_measures['weighted_lnbnn']\n\n cached_data = {\n # 'RAT': matches_RAT,\n 'RAT_SV': matches_RAT_SV,\n # 'SV_LNBNN': matches_SV_LNBNN,\n }\n return cached_data\n\n from sklearn.metrics.classification import coo_matrix\n\n def quick_cm(y_true, y_pred, labels, sample_weight):\n n_labels = len(labels)\n C = coo_matrix(\n (sample_weight, (y_true, y_pred)), shape=(n_labels, n_labels)\n ).toarray()\n return C\n\n def quick_mcc(C):\n \"\"\" assumes y_true and y_pred are in index/encoded format \"\"\"\n t_sum = C.sum(axis=1)\n p_sum = C.sum(axis=0)\n n_correct = np.diag(C).sum()\n n_samples = p_sum.sum()\n cov_ytyp = n_correct * n_samples - np.dot(t_sum, p_sum)\n cov_ypyp = n_samples ** 2 - np.dot(p_sum, p_sum)\n cov_ytyt = n_samples ** 2 - np.dot(t_sum, t_sum)\n mcc = cov_ytyp / np.sqrt(cov_ytyt * cov_ypyp)\n return mcc\n\n def mcc_hack():\n sample_weight = np.ones(len(self.samples), dtype=np.int)\n task_mccs = ut.ddict(dict)\n # Determine threshold levels per score type\n score_to_order = {}\n for scoretype in score_dict.keys():\n y_score = score_dict[scoretype].values\n sortx = np.argsort(y_score, kind='mergesort')[::-1]\n y_score = y_score[sortx]\n distinct_value_indices = np.where(np.diff(y_score))[0]\n threshold_idxs = np.r_[distinct_value_indices, y_score.size - 1]\n thresh = y_score[threshold_idxs]\n score_to_order[scoretype] = (sortx, y_score, thresh)\n\n classes_ = np.array([0, 1], dtype=np.int)\n for task in task_list:\n labels = self.samples.subtasks[task]\n for sublabels in labels.gen_one_vs_rest_labels():\n for scoretype in score_dict.keys():\n sortx, y_score, thresh = score_to_order[scoretype]\n y_true = sublabels.y_enc[sortx]\n mcc = -np.inf\n for t in thresh:\n y_pred = (y_score > t).astype(np.int)\n C1 = quick_cm(y_true, y_pred, classes_, sample_weight)\n mcc1 = quick_mcc(C1)\n if mcc1 < 0:\n C2 = quick_cm(y_true, 1 - y_pred, classes_, sample_weight)\n mcc1 = quick_mcc(C2)\n mcc = max(mcc1, mcc)\n # logger.info('mcc = %r' % (mcc,))\n task_mccs[sublabels.task_name][scoretype] = mcc\n return task_mccs\n\n if 0:\n with ut.Timer('mcc'):\n task_mccs = mcc_hack()\n logger.info('\\nMCC of simple scoring measures:')\n df = pd.DataFrame.from_dict(task_mccs, orient='index')\n from utool.experimental.pandas_highlight import to_string_monkey\n\n logger.info(to_string_monkey(df, highlight_cols=np.arange(len(df.columns))))\n\n # _all_dfs.append(df_rf)\n # df_all = pd.concat(_all_dfs, axis=1)\n\n # # Add in the simple scores\n # from utool.experimental.pandas_highlight import to_string_monkey\n # logger.info(to_string_monkey(df_all, highlight_cols=np.arange(len(df_all.columns))))\n\n # best_name = df_all.columns[df_all.values.argmax()]\n # pt.show_if_requested()\n # import utool\n # utool.embed()\n # logger.info('rat_sver_rf_auc = %r' % (rat_sver_rf_auc,))\n # columns = ['Method', 'AUC']\n # data = [\n # ['1vM-LNBNN', vsmany_lnbnn_auc],\n # ['1v1-LNBNN', vsone_sver_lnbnn_auc],\n # ['1v1-RAT', rat_auc],\n # ['1v1-RAT+SVER', rat_sver_auc],\n # ['1v1-RAT+SVER+RF', rat_sver_rf_auc],\n # ]\n # table = pd.DataFrame(data, columns=columns)\n # error = 1 - table['AUC']\n # orig = 1 - vsmany_lnbnn_auc\n # import tabulate\n # table = table.assign(percent_error_decrease=(orig - error) / orig * 100)\n # col_to_nice = {\n # 'percent_error_decrease': '% error decrease',\n # }\n # header = [col_to_nice.get(c, c) for c in table.columns]\n # logger.info(tabulate.tabulate(table.values, header, tablefmt='orgtbl'))\n",
"# -*- coding: utf-8 -*-\n\"\"\"\nNotes:\n To use various backends certian packages are required\n\n PyQt\n ...\n\n Tk\n pip install\n sudo apt-get install tk\n sudo apt-get install tk-dev\n\n Wx\n pip install wxPython\n\n GTK\n pip install PyGTK\n pip install pygobject\n pip install pygobject\n\n Cairo\n pip install pycairo\n pip install py2cairo\n pip install cairocffi\n sudo apt-get install libcairo2-dev\n\n\nCommandLine:\n python -m wbia.plottool.draw_func2 --exec-imshow --show --mplbe=GTKAgg\n python -m wbia.plottool.draw_func2 --exec-imshow --show --mplbe=TkAgg\n python -m wbia.plottool.draw_func2 --exec-imshow --show --mplbe=WxAgg\n python -m wbia.plottool.draw_func2 --exec-imshow --show --mplbe=WebAgg\n python -m wbia.plottool.draw_func2 --exec-imshow --show --mplbe=gdk\n python -m wbia.plottool.draw_func2 --exec-imshow --show --mplbe=cairo\n\n\"\"\"\nimport sys\nimport os\nimport utool as ut\n\nut.noinject(__name__, '[plottool.__MPL_INIT__]')\n\ntry:\n import builtins\n\n profile = getattr(builtins, 'profile')\nexcept AttributeError:\n\n def profile(func):\n return func\n\n\n__IS_INITIALIZED__ = False\n__WHO_INITIALIZED__ = None\n\n\nVERBOSE_MPLINIT = ut.get_argflag(('--verb-mpl', '--verbose'))\nTARGET_BACKEND = ut.get_argval(\n ('--mpl-backend', '--mplbe'), type_=str, default=os.environ.get('MPL_BACKEND', None)\n)\nFALLBACK_BACKEND = ut.get_argval(\n ('--mpl-fallback-backend', '--mplfbbe'), type_=str, default='agg'\n)\n\n\ndef print_all_backends():\n import matplotlib.rcsetup as rcsetup\n\n print(rcsetup.all_backends)\n valid_backends = [\n 'GTK',\n 'GTKAgg',\n 'GTKCairo',\n 'MacOSX',\n 'Qt4Agg',\n 'Qt5Agg',\n 'TkAgg',\n 'WX',\n 'WXAgg',\n 'CocoaAgg',\n 'GTK3Cairo',\n 'GTK3Agg',\n 'WebAgg',\n 'nbAgg',\n 'agg',\n 'cairo',\n 'emf',\n 'gdk',\n 'pdf',\n 'pgf',\n 'ps',\n 'svg',\n 'template',\n ]\n del valid_backends\n\n\ndef get_pyqt():\n have_guitool = ut.check_module_installed('guitool')\n try:\n if have_guitool:\n from wbia.guitool import __PYQT__ as PyQt # NOQA\n\n pyqt_version = PyQt._internal.GUITOOL_PYQT_VERSION\n else:\n try:\n import PyQt5 as PyQt\n\n pyqt_version = 5\n except ImportError:\n import PyQt4 as PyQt\n\n pyqt_version = 4\n except ImportError:\n PyQt = None\n pyqt_version = None\n return PyQt, pyqt_version\n\n\ndef get_target_backend():\n if (\n not sys.platform.startswith('win32')\n and not sys.platform.startswith('darwin')\n and os.environ.get('DISPLAY', None) is None\n ):\n # Write to files if we cannot display\n # target_backend = 'PDF'\n target_backend = FALLBACK_BACKEND\n else:\n target_backend = TARGET_BACKEND\n if target_backend is None:\n PyQt, pyqt_version = get_pyqt()\n if pyqt_version is None:\n print('[!plotttool] WARNING backend fallback to %s' % (FALLBACK_BACKEND,))\n target_backend = FALLBACK_BACKEND\n elif pyqt_version == 4:\n target_backend = 'Qt4Agg'\n elif pyqt_version == 5:\n target_backend = 'Qt5Agg'\n else:\n raise ValueError('Unknown pyqt version %r' % (pyqt_version,))\n return target_backend\n\n\ndef _init_mpl_rcparams():\n import matplotlib as mpl\n from matplotlib import style\n\n # http://matplotlib.org/users/style_sheets.html\n nogg = ut.get_argflag('--nogg')\n if not nogg:\n style.use('ggplot')\n # style.use(['ggplot'])\n # print('style.available = %r' % (style.available,))\n # style.use(['bmh'])\n # style.use(['classic'])\n # import utool\n # utool.embed()\n # style.use(['ggplot', 'dark_background'])\n if ut.get_argflag('--notoolbar'):\n toolbar = 'None'\n else:\n toolbar = 'toolbar2'\n mpl.rcParams['toolbar'] = toolbar\n # mpl.rc('text', usetex=False)\n\n if ut.get_argflag('--usetex'):\n # mpl.rc('text', usetex=True)\n mpl.rcParams['text.usetex'] = True\n # matplotlib.rcParams['text.latex.preamble']=[r\"\\usepackage{amsmath}\"]\n mpl.rcParams['text.latex.unicode'] = True\n mpl_keypress_shortcuts = [\n key for key in mpl.rcParams.keys() if key.find('keymap') == 0\n ]\n for key in mpl_keypress_shortcuts:\n mpl.rcParams[key] = ''\n\n CUSTOM_GGPLOT = 1\n if CUSTOM_GGPLOT and not nogg:\n ggplot_style = style.library['ggplot'] # NOQA\n # print('ggplot_style = %r' % (ggplot_style,))\n custom_gg = {\n 'axes.axisbelow': True,\n # 'axes.edgecolor': 'white',\n 'axes.facecolor': '#E5E5E5',\n 'axes.edgecolor': 'none',\n # 'axes.facecolor': 'white',\n 'axes.grid': True,\n 'axes.labelcolor': '#555555',\n 'axes.labelsize': 'large',\n 'axes.linewidth': 1.0,\n 'axes.titlesize': 'x-large',\n 'figure.edgecolor': '0.50',\n 'figure.facecolor': 'white',\n 'font.size': 10.0,\n 'grid.color': 'white',\n 'grid.linestyle': '-',\n 'patch.antialiased': True,\n 'patch.edgecolor': '#EEEEEE',\n 'patch.facecolor': '#348ABD',\n 'patch.linewidth': 0.5,\n 'xtick.color': '#555555',\n 'xtick.direction': 'out',\n 'ytick.color': '#555555',\n 'ytick.direction': 'out',\n 'axes.prop_cycle': mpl.cycler(\n 'color',\n [\n '#E24A33',\n '#348ABD',\n '#988ED5',\n '#777777',\n '#FBC15E',\n '#8EBA42',\n '#FFB5B8',\n ],\n ),\n }\n mpl.rcParams.update(custom_gg)\n\n NICE_DARK_BG = False\n if NICE_DARK_BG:\n dark_style = {\n 'axes.edgecolor': 'white',\n 'axes.facecolor': 'black',\n 'axes.labelcolor': 'white',\n 'figure.edgecolor': 'black',\n 'figure.facecolor': 'black',\n 'grid.color': 'white',\n 'lines.color': 'white',\n 'patch.edgecolor': 'white',\n 'savefig.edgecolor': 'black',\n 'savefig.facecolor': 'black',\n 'text.color': 'white',\n 'xtick.color': 'white',\n 'ytick.color': 'white',\n }\n mpl.rcParams.update(dark_style)\n mpl.rcParams['figure.subplot.top'] = 0.8\n # mpl.rcParams['text'].usetex = False\n # for key in mpl_keypress_shortcuts:\n # print('%s = %s' % (key, mpl.rcParams[key]))\n # Disable mpl shortcuts\n # mpl.rcParams['toolbar'] = 'None'\n # mpl.rcParams['interactive'] = True\n\n # import matplotlib.pyplot as plt\n # plt.xkcd()\n\n\ndef _mpl_set_backend(target_backend):\n import matplotlib as mpl\n\n if ut.get_argflag('--leave-mpl-backend-alone'):\n print('[pt] LEAVE THE BACKEND ALONE !!! was specified')\n print('[pt] not changing mpl backend')\n else:\n # mpl.use(target_backend, force=True)\n mpl.use(target_backend, force=False)\n current_backend = mpl.get_backend()\n if not ut.QUIET and ut.VERBOSE:\n print('[pt] current backend is: %r' % current_backend)\n\n\ndef _init_mpl_mainprocess(verbose=VERBOSE_MPLINIT):\n global __IS_INITIALIZED__\n global __WHO_INITIALIZED__\n import matplotlib as mpl\n\n # mpl.interactive(True)\n current_backend = mpl.get_backend()\n target_backend = get_target_backend()\n if __IS_INITIALIZED__ is True:\n if verbose:\n print(\n '[!plottool] matplotlib has already been initialized. backend=%r'\n % current_backend\n )\n print('[!plottool] Initially initialized by %r' % __WHO_INITIALIZED__)\n print(\n '[!plottool] Trying to be init by %r'\n % (ut.get_caller_name(N=range(0, 5)))\n )\n return False\n __IS_INITIALIZED__ = True\n\n if verbose:\n print('[plottool] matplotlib initialized by %r' % __WHO_INITIALIZED__)\n __WHO_INITIALIZED__ = ut.get_caller_name(N=range(0, 5))\n if verbose:\n print('--- INIT MPL---')\n print('[pt] current backend is: %r' % current_backend)\n print('[pt] mpl.use(%r)' % target_backend)\n if current_backend != target_backend:\n _mpl_set_backend(target_backend)\n _init_mpl_rcparams()\n\n\n@profile\ndef init_matplotlib(verbose=VERBOSE_MPLINIT):\n if ut.in_main_process():\n PyQt, pyqt_version = get_pyqt()\n return _init_mpl_mainprocess(verbose=verbose)\n"
] |
[
[
"sklearn.metrics.classification.coo_matrix"
],
[
"matplotlib.rcParams.keys",
"matplotlib.style.use",
"matplotlib.use",
"matplotlib.get_backend",
"matplotlib.rcParams.update",
"matplotlib.cycler"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
madphysicist/qutip
|
[
"297776737cf91df0468022ba2b1a3090af0d6549"
] |
[
"qutip/mcsolve.py"
] |
[
"# This file is part of QuTiP: Quantum Toolbox in Python.\n#\n# Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n#\n# 1. Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n#\n# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names\n# of its contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A\n# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n###############################################################################\n\n__all__ = ['mcsolve']\n\nimport os\nimport numpy as np\nfrom numpy.random import RandomState, randint\nimport scipy.sparse as sp\nfrom scipy.integrate import ode\nfrom scipy.integrate._ode import zvode\n\nfrom types import FunctionType, BuiltinFunctionType\nfrom functools import partial\nfrom qutip.fastsparse import csr2fast\nfrom qutip.qobj import Qobj\nfrom qutip.qobjevo import QobjEvo\nfrom qutip.parallel import parfor, parallel_map, serial_map\nfrom qutip.cy.mcsolve import CyMcOde, CyMcOdeDiag\nfrom qutip.cy.spconvert import dense1D_to_fastcsr_ket\nfrom qutip.sesolve import sesolve\nfrom qutip.solver import (Options, Result, ExpectOps,\n solver_safe, SolverSystem)\nfrom qutip.settings import debug\nfrom qutip.ui.progressbar import TextProgressBar, BaseProgressBar\nimport qutip.settings\n\nif debug:\n import inspect\n\n#\n# Internal, global variables for storing references to dynamically loaded\n# cython functions\n\n\n# Todo: use real warning\ndef warn(text):\n print(text)\n\n\nclass qutip_zvode(zvode):\n def step(self, *args):\n itask = self.call_args[2]\n self.rwork[0] = args[4]\n self.call_args[2] = 5\n r = self.run(*args)\n self.call_args[2] = itask\n return r\n\n\ndef mcsolve(H, psi0, tlist, c_ops=[], e_ops=[], ntraj=0,\n args={}, options=None, progress_bar=True,\n map_func=parallel_map, map_kwargs={}, _safe_mode=True):\n r\"\"\"Monte Carlo evolution of a state vector :math:`|\\psi \\rangle` for a\n given Hamiltonian and sets of collapse operators, and possibly, operators\n for calculating expectation values. Options for the underlying ODE solver\n are given by the Options class.\n\n mcsolve supports time-dependent Hamiltonians and collapse operators using\n either Python functions of strings to represent time-dependent\n coefficients. Note that, the system Hamiltonian MUST have at least one\n constant term.\n\n As an example of a time-dependent problem, consider a Hamiltonian with two\n terms ``H0`` and ``H1``, where ``H1`` is time-dependent with coefficient\n ``sin(w*t)``, and collapse operators ``C0`` and ``C1``, where ``C1`` is\n time-dependent with coeffcient ``exp(-a*t)``. Here, w and a are constant\n arguments with values ``W`` and ``A``.\n\n Using the Python function time-dependent format requires two Python\n functions, one for each collapse coefficient. Therefore, this problem could\n be expressed as::\n\n def H1_coeff(t,args):\n return sin(args['w']*t)\n\n def C1_coeff(t,args):\n return exp(-args['a']*t)\n\n H = [H0, [H1, H1_coeff]]\n\n c_ops = [C0, [C1, C1_coeff]]\n\n args={'a': A, 'w': W}\n\n or in String (Cython) format we could write::\n\n H = [H0, [H1, 'sin(w*t)']]\n\n c_ops = [C0, [C1, 'exp(-a*t)']]\n\n args={'a': A, 'w': W}\n\n Constant terms are preferably placed first in the Hamiltonian and collapse\n operator lists.\n\n Parameters\n ----------\n H : :class:`qutip.Qobj`, ``list``\n System Hamiltonian.\n\n psi0 : :class:`qutip.Qobj`\n Initial state vector\n\n tlist : array_like\n Times at which results are recorded.\n\n ntraj : int\n Number of trajectories to run.\n\n c_ops : :class:`qutip.Qobj`, ``list``\n single collapse operator or a ``list`` of collapse operators.\n\n e_ops : :class:`qutip.Qobj`, ``list``\n single operator as Qobj or ``list`` or equivalent of Qobj operators\n for calculating expectation values.\n\n args : dict\n Arguments for time-dependent Hamiltonian and collapse operator terms.\n\n options : Options\n Instance of ODE solver options.\n\n progress_bar: BaseProgressBar\n Optional instance of BaseProgressBar, or a subclass thereof, for\n showing the progress of the simulation. Set to None to disable the\n progress bar.\n\n map_func: function\n A map function for managing the calls to the single-trajactory solver.\n\n map_kwargs: dictionary\n Optional keyword arguments to the map_func function.\n\n Returns\n -------\n results : :class:`qutip.solver.Result`\n Object storing all results from the simulation.\n\n .. note::\n\n It is possible to reuse the random number seeds from a previous run\n of the mcsolver by passing the output Result object seeds via the\n Options class, i.e. Options(seeds=prev_result.seeds).\n \"\"\"\n if isinstance(c_ops, (Qobj, QobjEvo)):\n c_ops = [c_ops]\n\n if options is None:\n options = Options()\n\n if options.rhs_reuse and not isinstance(H, SolverSystem):\n # TODO: deprecate when going to class based solver.\n if \"mcsolve\" in solver_safe:\n # print(\" \")\n H = solver_safe[\"mcsolve\"]\n else:\n pass\n # raise Exception(\"Could not find the Hamiltonian to reuse.\")\n\n if not ntraj:\n ntraj = options.ntraj\n\n if len(c_ops) == 0 and not options.rhs_reuse:\n warn(\"No c_ops, using sesolve\")\n return sesolve(H, psi0, tlist, e_ops=e_ops, args=args,\n options=options, progress_bar=progress_bar,\n _safe_mode=_safe_mode)\n\n try:\n num_traj = int(ntraj)\n except TypeError:\n num_traj = max(ntraj)\n\n # set the physics\n if not psi0.isket:\n raise Exception(\"Initial state must be a state vector.\")\n\n # load monte carlo class\n mc = _MC(options)\n\n if isinstance(H, SolverSystem):\n mc.ss = H\n else:\n mc.make_system(H, c_ops, tlist, args, options)\n\n mc.reset(tlist[0], psi0)\n\n mc.set_e_ops(e_ops)\n\n if options.seeds is not None:\n mc.seed(num_traj, options.seeds)\n\n if _safe_mode:\n mc.run_test()\n\n # Run the simulation\n mc.run(num_traj=num_traj, tlist=tlist,\n progress_bar=progress_bar,\n map_func=map_func, map_kwargs=map_kwargs)\n\n return mc.get_result(ntraj)\n\n\n# -----------------------------------------------------------------------------\n# MONTE CARLO CLASS\n# -----------------------------------------------------------------------------\nclass _MC():\n \"\"\"\n Private class for solving Monte Carlo evolution from mcsolve\n \"\"\"\n def __init__(self, options=None):\n if options is None:\n options = Options()\n self.options = options\n self.ss = None\n self.tlist = None\n self.e_ops = None\n self.ran = False\n self.psi0 = None\n self.seeds = []\n self.t = 0.\n self.num_traj = 0\n self.args_col = None\n\n self._psi_out = []\n self._expect_out = []\n self._collapse = []\n self._ss_out = []\n\n def reset(self, t=0., psi0=None):\n if psi0 is not None:\n self.psi0 = psi0\n if self.psi0 is not None:\n self.initial_vector = self.psi0.full().ravel(\"F\")\n if self.ss is not None and self.ss.type == \"Diagonal\":\n self.initial_vector = np.dot(self.ss.Ud, self.initial_vector)\n\n self.t = t\n self.ran = False\n self._psi_out = []\n self._expect_out = []\n self._collapse = []\n self._ss_out = []\n\n def seed(self, ntraj, seeds=[]):\n # setup seeds array\n np.random.seed()\n try:\n seed = int(seeds)\n np.random.seed(seed)\n seeds = []\n except TypeError:\n pass\n\n if len(seeds) < ntraj:\n self.seeds = seeds + list(randint(0, 2**32,\n size=ntraj-len(seeds),\n dtype=np.uint32))\n else:\n self.seeds = seeds[:ntraj]\n\n def make_system(self, H, c_ops, tlist=None, args={}, options=None):\n if options is None:\n options = self.options\n else:\n self.options = options\n var = _collapse_args(args)\n\n ss = SolverSystem()\n ss.td_c_ops = []\n ss.td_n_ops = []\n ss.args = args\n ss.col_args = var\n for c in c_ops:\n cevo = QobjEvo(c, args, tlist=tlist)\n cdc = cevo._cdc()\n cevo.compile()\n cdc.compile()\n ss.td_c_ops.append(cevo)\n ss.td_n_ops.append(cdc)\n\n try:\n H_td = QobjEvo(H, args, tlist=tlist)\n H_td *= -1j\n for c in ss.td_n_ops:\n H_td += -0.5 * c\n if options.rhs_with_state:\n H_td._check_old_with_state()\n H_td.compile()\n ss.H_td = H_td\n ss.makefunc = _qobjevo_set\n ss.set_args = _qobjevo_args\n ss.type = \"QobjEvo\"\n\n except Exception:\n ss.h_func = H\n ss.Hc_td = -0.5 * sum(ss.td_n_ops)\n ss.Hc_td.compile()\n ss.with_state = options.rhs_with_state\n ss.makefunc = _func_set\n ss.set_args = _func_args\n ss.type = \"callback\"\n\n solver_safe[\"mcsolve\"] = ss\n self.ss = ss\n self.reset()\n\n def set_e_ops(self, e_ops=[]):\n if e_ops:\n self.e_ops = ExpectOps(e_ops)\n else:\n self.e_ops = ExpectOps([])\n\n ss = self.ss\n if ss is not None and ss.type == \"Diagonal\" and not self.e_ops.isfunc:\n e_ops = [\n Qobj(ss.Ud @ e.full() @ ss.U, dims=e.dims)\n for e in self.e_ops.e_ops\n ]\n self.e_ops = ExpectOps(e_ops)\n\n if not self.e_ops:\n self.options.store_states = True\n\n def run_test(self):\n try:\n for c_op in self.ss.td_c_ops:\n c_op.mul_vec(0, self.psi0)\n except Exception as e:\n raise Exception(\"c_ops are not consistant with psi0\") from e\n\n if self.ss.type == \"QobjEvo\":\n try:\n self.ss.H_td.mul_vec(0., self.psi0)\n except Exception as e:\n raise Exception(\"Error calculating H\") from e\n else:\n try:\n rhs, ode_args = self.ss.makefunc(self.ss)\n rhs(0, self.psi0.full().ravel(), ode_args)\n except Exception as e:\n raise Exception(\"Error calculating H\") from e\n\n def run(self, num_traj=0, psi0=None, tlist=None,\n args={}, e_ops=None, options=None,\n progress_bar=True,\n map_func=parallel_map, map_kwargs={}):\n # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n # 4 situation for run:\n # - first run\n # - change parameters\n # - add trajectories\n # (self.add_traj) Not Implemented\n # - continue from the last time and states\n # (self.continue_runs) Not Implemented\n # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n options = options if options is not None else self.options\n\n if self.ran and tlist[0] == self.t:\n # psi0 is ignored since we restart from a\n # different states for each trajectories\n self.continue_runs(num_traj, tlist, args, e_ops, options,\n progress_bar, map_func, map_kwargs)\n return\n\n if args and args != self.ss.args:\n self.ss.set_args(self.ss, args)\n self.reset()\n\n if e_ops and e_ops != self.e_ops:\n self.set_e_ops(e_ops)\n self.reset()\n\n if psi0 is not None and psi0 != self.psi0:\n self.psi0 = psi0\n self.reset()\n\n tlist = np.array(tlist)\n if tlist is not None and np.all(tlist != self.tlist):\n self.tlist = tlist\n self.reset()\n\n if self.ran:\n if options.store_states and self._psi_out[0].shape[0] == 1:\n self.reset()\n else:\n # if not reset here, add trajectories\n self.add_traj(num_traj, progress_bar, map_func, map_kwargs)\n return\n\n if not num_traj:\n num_traj = options.ntraj\n\n if options.num_cpus == 1 or num_traj == 1:\n map_func = serial_map\n\n if len(self.seeds) != num_traj:\n self.seed(num_traj, self.seeds)\n\n if not progress_bar:\n progress_bar = BaseProgressBar()\n elif progress_bar is True:\n progress_bar = TextProgressBar()\n\n # set arguments for input to monte carlo\n map_kwargs_ = {'progress_bar': progress_bar,\n 'num_cpus': options.num_cpus}\n map_kwargs_.update(map_kwargs)\n map_kwargs = map_kwargs_\n\n if self.e_ops is None:\n self.set_e_ops()\n\n if self.ss.type == \"Diagonal\":\n results = map_func(self._single_traj_diag, list(range(num_traj)),\n **map_kwargs)\n else:\n results = map_func(self._single_traj, list(range(num_traj)),\n **map_kwargs)\n\n self.t = self.tlist[-1]\n self.num_traj = num_traj\n self.ran = True\n\n for result in results:\n state_out, ss_out, expect, collapse = result\n self._psi_out.append(state_out)\n self._ss_out.append(ss_out)\n self._expect_out.append(expect)\n self._collapse.append(collapse)\n self._psi_out = np.stack(self._psi_out)\n self._ss_out = np.stack(self._ss_out)\n\n def add_traj(self, num_traj,\n progress_bar=True,\n map_func=parallel_map, map_kwargs={}):\n raise NotImplementedError\n\n def continue_runs(self, num_traj, tlist, args={}, e_ops=[], options=None,\n progress_bar=True,\n map_func=parallel_map, map_kwargs={}):\n raise NotImplementedError\n\n # --------------------------------------------------------------------------\n # results functions\n # --------------------------------------------------------------------------\n @property\n def states(self):\n dims = self.psi0.dims[0]\n len_ = self._psi_out.shape[2]\n if self._psi_out.shape[1] == 1:\n dm_t = np.zeros((len_, len_), dtype=complex)\n for i in range(self.num_traj):\n vec = self._psi_out[i, 0]\n dm_t += np.outer(vec, vec.conj())\n return Qobj(dm_t/self.num_traj, dims=[dims, dims])\n else:\n states = np.empty((len(self.tlist)), dtype=object)\n for j in range(len(self.tlist)):\n dm_t = np.zeros((len_, len_), dtype=complex)\n for i in range(self.num_traj):\n vec = self._psi_out[i, j]\n dm_t += np.outer(vec, vec.conj())\n states[j] = Qobj(dm_t/self.num_traj, dims=[dims, dims])\n return states\n\n @property\n def final_state(self):\n dims = self.psi0.dims[0]\n len_ = self._psi_out.shape[2]\n dm_t = np.zeros((len_, len_), dtype=complex)\n for i in range(self.num_traj):\n vec = self._psi_out[i, -1]\n dm_t += np.outer(vec, vec.conj())\n return Qobj(dm_t/self.num_traj, dims=[dims, dims])\n\n @property\n def runs_final_states(self):\n dims = self.psi0.dims[0]\n psis = np.empty((self.num_traj), dtype=object)\n for i in range(self.num_traj):\n psis[i] = Qobj(dense1D_to_fastcsr_ket(self._psi_out[i, -1]),\n dims=dims, fast='mc')\n return psis\n\n @property\n def expect(self):\n return self.expect_traj_avg()\n\n @property\n def runs_expect(self):\n return [expt.finish() for expt in self._expect_out]\n\n def expect_traj_avg(self, ntraj=0):\n if not ntraj:\n ntraj = len(self._expect_out)\n expect = np.stack([expt.raw_out for expt in self._expect_out[:ntraj]])\n expect = np.mean(expect, axis=0)\n\n result = []\n for ii in range(self.e_ops.e_num):\n if self.e_ops.e_ops_isherm[ii]:\n result.append(np.real(expect[ii, :]))\n else:\n result.append(expect[ii, :])\n\n if self.e_ops.e_ops_dict:\n result = {e: result[n]\n for n, e in enumerate(self.e_ops.e_ops_dict.keys())}\n return result\n\n @property\n def steady_state(self):\n if self._ss_out is not None:\n dims = self.psi0.dims[0]\n len_ = self.psi0.shape[0]\n return Qobj(np.mean(self._ss_out, axis=0),\n dims=[dims, dims], shape=(len_, len_))\n # TO-DO rebuild steady_state from _psi_out if needed\n # elif self._psi_out is not None:\n # return sum(self.state_average) / self.num_traj\n else:\n return None\n\n @property\n def runs_states(self):\n dims = self.psi0.dims\n psis = np.empty((self.num_traj, len(self.tlist)), dtype=object)\n for i in range(self.num_traj):\n for j in range(len(self.tlist)):\n psis[i, j] = Qobj(dense1D_to_fastcsr_ket(self._psi_out[i, j]),\n dims=dims, fast='mc')\n return psis\n\n @property\n def collapse(self):\n return self._collapse\n\n @property\n def collapse_times(self):\n out = []\n for col_ in self._collapse:\n col = list(zip(*col_))\n col = ([] if len(col) == 0 else col[0])\n out.append(np.array(col))\n return out\n return [np.array(list(zip(*col_))[0]) for col_ in self._collapse]\n\n @property\n def collapse_which(self):\n out = []\n for col_ in self._collapse:\n col = list(zip(*col_))\n col = ([] if len(col) == 0 else col[1])\n out.append(np.array(col))\n return out\n return [np.array(list(zip(*col_))[1]) for col_ in self._collapse]\n\n def get_result(self, ntraj=[]):\n # Store results in the Result object\n if not ntraj:\n ntraj = [self.num_traj]\n elif not isinstance(ntraj, list):\n ntraj = [ntraj]\n\n output = Result()\n output.solver = 'mcsolve'\n output.seeds = self.seeds\n\n options = self.options\n output.options = options\n\n if options.steady_state_average:\n output.states = self.steady_state\n elif options.average_states and options.store_states:\n output.states = self.states\n elif options.store_states:\n output.states = self.runs_states\n\n if options.store_final_state:\n if options.average_states:\n output.final_state = self.final_state\n else:\n output.final_state = self.runs_final_states\n\n if options.average_expect:\n output.expect = [self.expect_traj_avg(n) for n in ntraj]\n if len(output.expect) == 1:\n output.expect = output.expect[0]\n else:\n output.expect = self.runs_expect\n\n # simulation parameters\n output.times = self.tlist\n output.num_expect = self.e_ops.e_num\n output.num_collapse = len(self.ss.td_c_ops)\n output.ntraj = self.num_traj\n output.col_times = self.collapse_times\n output.col_which = self.collapse_which\n\n return output\n\n # --------------------------------------------------------------------------\n # single-trajectory for monte carlo\n # --------------------------------------------------------------------------\n def _single_traj(self, nt):\n \"\"\"\n Monte Carlo algorithm returning state-vector or expectation values\n at times tlist for a single trajectory.\n \"\"\"\n # SEED AND RNG AND GENERATE\n prng = RandomState(self.seeds[nt])\n opt = self.options\n\n # set initial conditions\n ss = self.ss\n tlist = self.tlist\n e_ops = self.e_ops.copy()\n opt = self.options\n rhs, ode_args = self.ss.makefunc(ss)\n ODE = self._build_integration_func(rhs, ode_args, opt)\n ODE.set_initial_value(self.initial_vector, tlist[0])\n e_ops.init(tlist)\n\n cymc = CyMcOde(ss, opt)\n states_out, ss_out, collapses = cymc.run_ode(ODE, tlist, e_ops, prng)\n\n # Run at end of mc_alg function\n # -----------------------------\n if opt.steady_state_average:\n ss_out /= float(len(tlist))\n\n return (states_out, ss_out, e_ops, collapses)\n\n def _build_integration_func(self, rhs, ode_args, opt):\n \"\"\"\n Create the integration function while fixing the parameters\n \"\"\"\n ODE = ode(rhs)\n if ode_args:\n ODE.set_f_params(ode_args)\n # initialize ODE solver for RHS\n ODE.set_integrator('zvode', method=\"adams\")\n ODE._integrator = qutip_zvode(\n method=opt.method, order=opt.order, atol=opt.atol,\n rtol=opt.rtol, nsteps=opt.nsteps, first_step=opt.first_step,\n min_step=opt.min_step, max_step=opt.max_step)\n return ODE\n\n # --------------------------------------------------------------------------\n # In development diagonalize the Hamiltonian before solving\n # Same seeds give same evolution\n # 3~5 time faster\n # constant system only.\n # --------------------------------------------------------------------------\n def make_diag_system(self, H, c_ops):\n ss = SolverSystem()\n ss.td_c_ops = []\n ss.td_n_ops = []\n\n H_ = H.copy()\n H_ *= -1j\n for c in c_ops:\n H_ += -0.5 * c.dag() * c\n\n w, v = np.linalg.eig(H_.full())\n arg = np.argsort(np.abs(w))\n eig = w[arg]\n U = v.T[arg].T\n Ud = U.T.conj()\n\n for c in c_ops:\n c_diag = Qobj(Ud @ c.full() @ U, dims=c.dims)\n cevo = QobjEvo(c_diag)\n cdc = cevo._cdc()\n cevo.compile()\n cdc.compile()\n ss.td_c_ops.append(cevo)\n ss.td_n_ops.append(cdc)\n\n ss.H_diag = eig\n ss.Ud = Ud\n ss.U = U\n ss.args = {}\n ss.type = \"Diagonal\"\n solver_safe[\"mcsolve\"] = ss\n\n if self.e_ops and not self.e_ops.isfunc:\n e_ops = [\n Qobj(Ud @ e.full() @ U, dims=e.dims)\n for e in self.e_ops.e_ops\n ]\n self.e_ops = ExpectOps(e_ops)\n self.ss = ss\n self.reset()\n\n def _single_traj_diag(self, nt):\n \"\"\"\n Monte Carlo algorithm returning state-vector or expectation values\n at times tlist for a single trajectory.\n \"\"\"\n # SEED AND RNG AND GENERATE\n prng = RandomState(self.seeds[nt])\n opt = self.options\n\n ss = self.ss\n tlist = self.tlist\n e_ops = self.e_ops.copy()\n opt = self.options\n e_ops.init(tlist)\n\n cymc = CyMcOdeDiag(ss, opt)\n states_out, ss_out, collapses =\\\n cymc.run_ode(self.initial_vector, tlist, e_ops, prng)\n\n if opt.steady_state_average:\n ss_out = ss.U @ ss_out @ ss.Ud\n states_out = np.inner(ss.U, states_out).T\n if opt.steady_state_average:\n ss_out /= float(len(tlist))\n return (states_out, ss_out, e_ops, collapses)\n\n\n# -----------------------------------------------------------------------------\n# CODES FOR PYTHON FUNCTION BASED TIME-DEPENDENT RHS\n# -----------------------------------------------------------------------------\ndef _qobjevo_set(ss, psi0=None, args={}, opt=None):\n if args:\n self.set_args(args)\n rhs = ss.H_td.compiled_qobjevo.mul_vec\n return rhs, ()\n\n\ndef _qobjevo_args(ss, args):\n var = _collapse_args(args)\n ss.col_args = var\n ss.args = args\n ss.H_td.solver_set_args(args, psi0, e_ops)\n for c in ss.td_c_ops:\n c.solver_set_args(args, psi0, e_ops)\n for c in ss.td_n_ops:\n c.solver_set_args(args, psi0, e_ops)\n\n\ndef _func_set(HS, psi0=None, args={}, opt=None):\n if args:\n self.set_args(args)\n else:\n args = ss.args\n if ss.with_state:\n rhs = _funcrhs\n else:\n rhs = _funcrhs_with_state\n return rhs, (ss.h_func, ss.Hc_td, args)\n\n\ndef _func_args(ss, args):\n var = _collapse_args(args)\n ss.col_args = var\n ss.args = args\n for c in ss.td_c_ops:\n c.solver_set_args(args, psi0, e_ops)\n for c in ss.td_n_ops:\n c.solver_set_args(args, psi0, e_ops)\n return rhs, (ss.h_func, ss.Hc_td, args)\n\n\n# RHS of ODE for python function Hamiltonian\ndef _funcrhs(t, psi, h_func, Hc_td, args):\n h_func_data = -1.0j * h_func(t, args).data\n h_func_term = h_func_data * psi\n return h_func_term + Hc_td.mul_vec(t, psi)\n\n\ndef _funcrhs_with_state(t, psi, h_func, Hc_td, args):\n h_func_data = - 1.0j * h_func(t, psi, args).data\n h_func_term = h_func_data * psi\n return h_func_term + Hc_td.mul_vec(t, psi)\n\n\ndef _mc_dm_avg(psi_list):\n \"\"\"\n Private function that averages density matrices in parallel\n over all trajectories for a single time using parfor.\n \"\"\"\n ln = len(psi_list)\n dims = psi_list[0].dims\n shape = psi_list[0].shape\n out_data = sum([psi.data for psi in psi_list]) / ln\n return Qobj(out_data, dims=dims, shape=shape, fast='mc-dm')\n\n\ndef _collapse_args(args):\n for key in args:\n if key == \"collapse\":\n if not isinstance(args[key], list):\n args[key] = []\n return key\n return \"\"\n"
] |
[
[
"numpy.dot",
"scipy.integrate.ode",
"numpy.abs",
"numpy.random.seed",
"numpy.inner",
"numpy.random.RandomState",
"numpy.stack",
"numpy.all",
"numpy.real",
"numpy.mean",
"numpy.array",
"numpy.zeros",
"numpy.empty"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
KyrieHee/LeNet-tensorflow
|
[
"7e9347b646f2892d836e164e8ab901dfe021146b"
] |
[
"train.py"
] |
[
"import tensorflow.examples.tutorials.mnist.input_data as input_data\nimport tensorflow as tf\nimport config as cfg\nimport os\nimport lenet\nfrom lenet import Lenet\n\n\ndef main():\n mnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)\n sess = tf.Session()\n batch_size = cfg.BATCH_SIZE\n parameter_path = cfg.PARAMETER_FILE\n lenet = Lenet()\n max_iter = cfg.MAX_ITER\n\n\n saver = tf.train.Saver()\n if os.path.exists(parameter_path):\n saver.restore(parameter_path)\n else:\n sess.run(tf.initialize_all_variables())\n\n for i in range(max_iter):\n batch = mnist.train.next_batch(50)\n if i % 100 == 0:\n train_accuracy = sess.run(lenet.train_accuracy,feed_dict={\n lenet.raw_input_image: batch[0],lenet.raw_input_label: batch[1]\n })\n print(\"step %d, training accuracy %g\" % (i, train_accuracy))\n sess.run(lenet.train_op,feed_dict={lenet.raw_input_image: batch[0],lenet.raw_input_label: batch[1]})\n save_path = saver.save(sess, parameter_path)\n\nif __name__ == '__main__':\n main()\n\n"
] |
[
[
"tensorflow.train.Saver",
"tensorflow.initialize_all_variables",
"tensorflow.examples.tutorials.mnist.input_data.read_data_sets",
"tensorflow.Session"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
ratnania/mhd
|
[
"89f383695075aa26fc805ffbf6ba458f0cdd6f4e",
"89f383695075aa26fc805ffbf6ba458f0cdd6f4e"
] |
[
"pic/bsplines.py",
"02_HybridModelling/01_Old_versions/bsplines.py"
] |
[
"from pyccel.decorators import types\n\n#==============================================================================\n@types('double[:]','int','double')\ndef find_span( knots, degree, x ):\n \"\"\"\n Determine the knot span index at location x, given the\n B-Splines' knot sequence and polynomial degree. See\n Algorithm A2.1 in [1].\n\n For a degree p, the knot span index i identifies the\n indices [i-p:i] of all p+1 non-zero basis functions at a\n given location x.\n\n Parameters\n ----------\n knots : array_like\n Knots sequence.\n\n degree : int\n Polynomial degree of B-splines.\n\n x : float\n Location of interest.\n\n Returns\n -------\n span : int\n Knot span index.\n\n \"\"\"\n # Knot index at left/right boundary\n low = degree\n high = 0\n high = len(knots)-1-degree\n\n # Check if point is exactly on left/right boundary, or outside domain\n if x <= knots[low ]: returnVal = low\n elif x >= knots[high]: returnVal = high-1\n else:\n # Perform binary search\n span = (low+high)//2\n while x < knots[span] or x >= knots[span+1]:\n if x < knots[span]:\n high = span\n else:\n low = span\n span = (low+high)//2\n returnVal = span\n\n return returnVal\n\n#==============================================================================\n@types('double[:]','int','double','int','double[:]')\ndef basis_funs( knots, degree, x, span, values ):\n \"\"\"\n Compute the non-vanishing B-splines at location x,\n given the knot sequence, polynomial degree and knot\n span. See Algorithm A2.2 in [1].\n\n Parameters\n ----------\n knots : array_like\n Knots sequence.\n\n degree : int\n Polynomial degree of B-splines.\n\n x : float\n Evaluation point.\n\n span : int\n Knot span index.\n\n Results\n -------\n values : numpy.ndarray\n Values of p+1 non-vanishing B-Splines at location x.\n\n Notes\n -----\n The original Algorithm A2.2 in The NURBS Book [1] is here\n slightly improved by using 'left' and 'right' temporary\n arrays that are one element shorter.\n\n \"\"\"\n from numpy import empty\n left = empty( degree , dtype=float )\n right = empty( degree , dtype=float )\n\n values[0] = 1.0\n for j in range(0,degree):\n left [j] = x - knots[span-j]\n right[j] = knots[span+1+j] - x\n saved = 0.0\n for r in range(0,j+1):\n temp = values[r] / (right[r] + left[j-r])\n values[r] = saved + right[r] * temp\n saved = left[j-r] * temp\n values[j+1] = saved\n",
"# coding: utf-8\n#\n# Copyright 2018 Yaman Güçlü\n\n\"\"\"\nBasic module that provides the means for evaluating the B-Splines basis\nfunctions and their derivatives. In order to simplify automatic Fortran code\ngeneration with Pyccel, no object-oriented features are employed.\n\nReferences\n----------\n[1] L. Piegl and W. Tiller. The NURBS Book, 2nd ed.,\n Springer-Verlag Berlin Heidelberg GmbH, 1997.\n\n[2] SELALIB, Semi-Lagrangian Library. http://selalib.gforge.inria.fr\n\n\"\"\"\nimport numpy as np\n\n__all__ = ['find_span',\n 'basis_funs',\n 'basis_funs_1st_der',\n 'basis_funs_all_ders',\n 'collocation_matrix',\n 'breakpoints',\n 'greville',\n 'elements_spans',\n 'make_knots',\n 'quadrature_grid',\n 'basis_ders_on_quad_grid']\n\n#==============================================================================\ndef find_span( knots, degree, x ):\n \"\"\"\n Determine the knot span index at location x, given the B-Splines' knot\n sequence and polynomial degree. See Algorithm A2.1 in [1].\n\n For a degree p, the knot span index i identifies the indices [i-p:i] of all\n p+1 non-zero basis functions at a given location x.\n\n Parameters\n ----------\n knots : array_like\n Knots sequence.\n\n degree : int\n Polynomial degree of B-splines.\n\n x : float\n Location of interest.\n\n Returns\n -------\n span : int\n Knot span index.\n\n \"\"\"\n # Knot index at left/right boundary\n low = degree\n high = len(knots)-1-degree\n\n # Check if point is exactly on left/right boundary, or outside domain\n if x <= knots[low ]: return low\n if x >= knots[high]: return high-1\n\n # Perform binary search\n span = (low+high)//2\n while x < knots[span] or x >= knots[span+1]:\n if x < knots[span]:\n high = span\n else:\n low = span\n span = (low+high)//2\n\n return span\n\n#==============================================================================\ndef basis_funs( knots, degree, x, span ):\n \"\"\"\n Compute the non-vanishing B-splines at location x, given the knot sequence,\n polynomial degree and knot span. See Algorithm A2.2 in [1].\n\n Parameters\n ----------\n knots : array_like\n Knots sequence.\n\n degree : int\n Polynomial degree of B-splines.\n\n x : float\n Evaluation point.\n\n span : int\n Knot span index.\n\n Results\n -------\n values : numpy.ndarray\n Values of p+1 non-vanishing B-Splines at location x.\n\n Notes\n -----\n The original Algorithm A2.2 in The NURBS Book [1] is here slightly improved\n by using 'left' and 'right' temporary arrays that are one element shorter.\n\n \"\"\"\n left = np.empty( degree , dtype=float )\n right = np.empty( degree , dtype=float )\n values = np.empty( degree+1, dtype=float )\n\n values[0] = 1.0\n for j in range(0,degree):\n left [j] = x - knots[span-j]\n right[j] = knots[span+1+j] - x\n saved = 0.0\n for r in range(0,j+1):\n temp = values[r] / (right[r] + left[j-r])\n values[r] = saved + right[r] * temp\n saved = left[j-r] * temp\n values[j+1] = saved\n\n return values\n\n#==============================================================================\ndef basis_funs_1st_der( knots, degree, x, span ):\n \"\"\"\n Compute the first derivative of the non-vanishing B-splines at location x,\n given the knot sequence, polynomial degree and knot span.\n\n See function 's_bsplines_non_uniform__eval_deriv' in Selalib's source file\n 'src/splines/sll_m_bsplines_non_uniform.F90'.\n\n Parameters\n ----------\n knots : array_like\n Knots sequence.\n\n degree : int\n Polynomial degree of B-splines.\n\n x : float\n Evaluation point.\n\n span : int\n Knot span index.\n\n Results\n -------\n ders : numpy.ndarray\n Derivatives of p+1 non-vanishing B-Splines at location x.\n\n \"\"\"\n # Compute nonzero basis functions and knot differences for splines\n # up to degree deg-1\n values = basis_funs( knots, degree-1, x, span )\n\n # Compute derivatives at x using formula based on difference of splines of\n # degree deg-1\n # -------\n # j = 0\n ders = np.empty( degree+1, dtype=float )\n saved = degree * values[0] / (knots[span+1]-knots[span+1-degree])\n ders[0] = -saved\n # j = 1,...,degree-1\n for j in range(1,degree):\n temp = saved\n saved = degree * values[j] / (knots[span+j+1]-knots[span+j+1-degree])\n ders[j] = temp - saved\n # j = degree\n ders[degree] = saved\n\n return ders\n\n#==============================================================================\ndef basis_funs_all_ders( knots, degree, x, span, n ):\n \"\"\"\n Evaluate value and n derivatives at x of all basis functions with\n support in interval [x_{span-1}, x_{span}].\n\n ders[i,j] = (d/dx)^i B_k(x) with k=(span-degree+j),\n for 0 <= i <= n and 0 <= j <= degree+1.\n\n Parameters\n ----------\n knots : array_like\n Knots sequence.\n\n degree : int\n Polynomial degree of B-splines.\n\n x : float\n Evaluation point.\n\n span : int\n Knot span index.\n\n n : int\n Max derivative of interest.\n\n Results\n -------\n ders : numpy.ndarray (n+1,degree+1)\n 2D array of n+1 (from 0-th to n-th) derivatives at x of all (degree+1)\n non-vanishing basis functions in given span.\n\n Notes\n -----\n The original Algorithm A2.3 in The NURBS Book [1] is here improved:\n - 'left' and 'right' arrays are 1 element shorter;\n - inverse of knot differences are saved to avoid unnecessary divisions;\n - innermost loops are replaced with vector operations on slices.\n\n \"\"\"\n left = np.empty( degree )\n right = np.empty( degree )\n ndu = np.empty( (degree+1, degree+1) )\n a = np.empty( ( 2, degree+1) )\n ders = np.zeros( ( n+1, degree+1) ) # output array\n\n # Number of derivatives that need to be effectively computed\n # Derivatives higher than degree are = 0.\n ne = min( n, degree )\n\n # Compute nonzero basis functions and knot differences for splines\n # up to degree, which are needed to compute derivatives.\n # Store values in 2D temporary array 'ndu' (square matrix).\n ndu[0,0] = 1.0\n for j in range(0,degree):\n left [j] = x - knots[span-j]\n right[j] = knots[span+1+j] - x\n saved = 0.0\n for r in range(0,j+1):\n # compute inverse of knot differences and save them into lower triangular part of ndu\n ndu[j+1,r] = 1.0 / (right[r] + left[j-r])\n # compute basis functions and save them into upper triangular part of ndu\n temp = ndu[r,j] * ndu[j+1,r]\n ndu[r,j+1] = saved + right[r] * temp\n saved = left[j-r] * temp\n ndu[j+1,j+1] = saved\n\n # Compute derivatives in 2D output array 'ders'\n ders[0,:] = ndu[:,degree]\n for r in range(0,degree+1):\n s1 = 0\n s2 = 1\n a[0,0] = 1.0\n for k in range(1,ne+1):\n d = 0.0\n rk = r-k\n pk = degree-k\n if r >= k:\n a[s2,0] = a[s1,0] * ndu[pk+1,rk]\n d = a[s2,0] * ndu[rk,pk]\n j1 = 1 if (rk > -1 ) else -rk\n j2 = k-1 if (r-1 <= pk) else degree-r\n a[s2,j1:j2+1] = (a[s1,j1:j2+1] - a[s1,j1-1:j2]) * ndu[pk+1,rk+j1:rk+j2+1]\n d += np.dot( a[s2,j1:j2+1], ndu[rk+j1:rk+j2+1,pk] )\n if r <= pk:\n a[s2,k] = - a[s1,k-1] * ndu[pk+1,r]\n d += a[s2,k] * ndu[r,pk]\n ders[k,r] = d\n j = s1\n s1 = s2\n s2 = j\n\n # Multiply derivatives by correct factors\n r = degree\n for k in range(1,ne+1):\n ders[k,:] = ders[k,:] * r\n r = r * (degree-k)\n\n return ders\n\n#==============================================================================\ndef collocation_matrix( knots, degree, xgrid, periodic, normalize=False ):\n \"\"\"\n Compute the collocation matrix $C_ij = B_j(x_i)$, which contains the\n values of each B-spline basis function $B_j$ at all locations $x_i$.\n\n Parameters\n ----------\n knots : 1D array_like\n Knots sequence.\n\n degree : int\n Polynomial degree of B-splines.\n\n xgrid : 1D array_like\n Evaluation points.\n\n periodic : bool\n True if domain is periodic, False otherwise.\n\n Returns\n -------\n mat : 2D numpy.ndarray\n Collocation matrix: values of all basis functions on each point in xgrid.\n\n \"\"\"\n \n el_b = breakpoints(knots, degree)\n ne = len(el_b) - 1 \n \n if normalize == True:\n x_norm = np.zeros((ne, degree + 1))\n \n for ie in range(ne):\n \n loc = ie + np.arange(degree + 1)\n x_norm[ie] = (degree + 1)/(knots[loc + degree + 1] - knots[loc])\n \n else:\n x_norm = np.ones((ne, degree + 1))\n \n \n # Number of basis functions (in periodic case remove degree repeated elements)\n nb = len(knots)-degree-1\n if periodic:\n nb -= degree\n\n # Number of evaluation points\n nx = len(xgrid)\n\n # Collocation matrix as 2D Numpy array (dense storage)\n mat = np.zeros( (nx,nb) )\n\n # Indexing of basis functions (periodic or not) for a given span\n if periodic:\n js = lambda span: [(span-degree+s) % nb for s in range( degree+1 )]\n else:\n js = lambda span: slice( span-degree, span+1 )\n\n # Fill in non-zero matrix values\n for i,x in enumerate( xgrid ):\n span = find_span( knots, degree, x )\n basis = basis_funs( knots, degree, x, span )*x_norm[span - degree]\n mat[i,js(span)] = basis\n\n return mat\n\n#==============================================================================\ndef breakpoints( knots, degree ):\n \"\"\"\n Determine breakpoints' coordinates.\n\n Parameters\n ----------\n knots : 1D array_like\n Knots sequence.\n\n degree : int\n Polynomial degree of B-splines.\n\n Returns\n -------\n breaks : numpy.ndarray (1D)\n Abscissas of all breakpoints.\n\n \"\"\"\n return np.unique( knots[degree:-degree] )\n\n#==============================================================================\ndef greville( knots, degree, periodic ):\n \"\"\"\n Compute coordinates of all Greville points.\n\n Parameters\n ----------\n knots : 1D array_like\n Knots sequence.\n\n degree : int\n Polynomial degree of B-splines.\n\n periodic : bool\n True if domain is periodic, False otherwise.\n\n Returns\n -------\n xg : numpy.ndarray (1D)\n Abscissas of all Greville points.\n\n \"\"\"\n T = knots\n p = degree\n s = 1+p//2 if periodic else 1\n n = len(T)-2*p-1 if periodic else len(T)-p-1\n\n # Compute greville abscissas as average of p consecutive knot values\n xg = np.around( [sum(T[i:i+p])/p for i in range(s,s+n)], decimals=15 )\n\n # If needed apply periodic boundary conditions\n if periodic:\n a = T[ p]\n b = T[-p]\n xg = np.around( (xg-a)%(b-a)+a, decimals=15 )\n\n return xg\n\n#===============================================================================\ndef elements_spans( knots, degree ):\n \"\"\"\n Compute the index of the last non-vanishing spline on each grid element\n (cell). The length of the returned array is the number of cells.\n\n Parameters\n ----------\n knots : 1D array_like\n Knots sequence.\n\n degree : int\n Polynomial degree of B-splines.\n\n Returns\n -------\n spans : numpy.ndarray (1D)\n Index of last non-vanishing spline on each grid element.\n\n Examples\n --------\n >>> import numpy as np\n >>> from psydac.core.bsplines import make_knots, elements_spans\n\n >>> p = 3 ; n = 8\n >>> grid = np.arange( n-p+1 )\n >>> knots = make_knots( breaks=grid, degree=p, periodic=False )\n >>> spans = elements_spans( knots=knots, degree=p )\n >>> spans\n array([3, 4, 5, 6, 7])\n\n Notes\n -----\n 1) Numbering of basis functions starts from 0, not 1;\n 2) This function could be written in two lines:\n\n breaks = breakpoints( knots, degree )\n spans = np.searchsorted( knots, breaks[:-1], side='right' ) - 1\n\n \"\"\"\n breaks = breakpoints( knots, degree )\n nk = len(knots)\n ne = len(breaks)-1\n spans = np.zeros( ne, dtype=int )\n\n ie = 0\n for ik in range( degree, nk-degree ):\n if knots[ik] != knots[ik+1]:\n spans[ie] = ik\n ie += 1\n if ie == ne:\n break\n\n return spans\n\n#===============================================================================\ndef make_knots( breaks, degree, periodic ):\n \"\"\"\n Create spline knots from breakpoints, with appropriate boundary conditions.\n Let p be spline degree. If domain is periodic, knot sequence is extended\n by periodicity so that first p basis functions are identical to last p.\n Otherwise, knot sequence is clamped (i.e. endpoints are repeated p times).\n\n Parameters\n ----------\n breaks : array_like\n Coordinates of breakpoints (= cell edges); given in increasing order and\n with no duplicates.\n\n degree : int\n Spline degree (= polynomial degree within each interval).\n\n periodic : bool\n True if domain is periodic, False otherwise.\n\n Result\n ------\n T : numpy.ndarray (1D)\n Coordinates of spline knots.\n\n \"\"\"\n # Type checking\n assert isinstance( degree , int )\n assert isinstance( periodic, bool )\n\n # Consistency checks\n assert len(breaks) > 1\n assert all( np.diff(breaks) > 0 )\n assert degree > 0\n if periodic:\n assert len(breaks) > degree\n\n p = degree\n T = np.zeros( len(breaks)+2*p )\n T[p:-p] = breaks\n\n if periodic:\n period = breaks[-1]-breaks[0]\n T[0:p] = [xi-period for xi in breaks[-p-1:-1 ]]\n T[-p:] = [xi+period for xi in breaks[ 1:p+1]]\n else:\n T[0:p] = breaks[ 0]\n T[-p:] = breaks[-1]\n\n return T\n\n#==============================================================================\ndef quadrature_grid( breaks, quad_rule_x, quad_rule_w ):\n \"\"\"\n Compute the quadrature points and weights for performing integrals over\n each element (interval) of the 1D domain, given a certain Gaussian\n quadrature rule.\n\n An n-point Gaussian quadrature rule for the canonical interval $[-1,+1]$\n and trivial weighting function $\\omega(x)=1$ is defined by the n abscissas\n $x_i$ and n weights $w_i$ that satisfy the following identity for\n polynomial functions $f(x)$ of degree $2n-1$ or less:\n\n $\\int_{-1}^{+1} f(x) dx = \\sum_{i=0}^{n-1} w_i f(x_i)$.\n\n Parameters\n ----------\n breaks : 1D array_like\n Coordinates of spline breakpoints.\n\n quad_rule_x : 1D array_like\n Coordinates of quadrature points on canonical interval [-1,1].\n\n quad_rule_w : 1D array_like\n Weights assigned to quadrature points on canonical interval [-1,1].\n\n Returns\n -------\n quad_x : 2D numpy.ndarray\n Abscissas of quadrature points on each element (interval) of the 1D\n domain. See notes below.\n\n quad_w : 2D numpy.ndarray\n Weights assigned to the quadrature points on each element (interval)\n of the 1D domain. See notes below.\n\n Notes\n -----\n Contents of 2D output arrays 'quad_x' and 'quad_w' are accessed with two\n indices (ie,iq) where:\n . ie is the global element index;\n . iq is the local index of a quadrature point within the element.\n\n \"\"\"\n # Check that input arrays have correct size\n assert len(breaks) >= 2\n assert len(quad_rule_x) == len(quad_rule_w)\n\n # Check that provided quadrature rule is defined on interval [-1,1]\n assert min(quad_rule_x) >= -1\n assert max(quad_rule_x) <= +1\n\n quad_rule_x = np.asarray( quad_rule_x )\n quad_rule_w = np.asarray( quad_rule_w )\n\n ne = len(breaks)-1\n nq = len(quad_rule_x)\n quad_x = np.zeros( (ne,nq) )\n quad_w = np.zeros( (ne,nq) )\n\n # Compute location and weight of quadrature points from basic rule\n for ie,(a,b) in enumerate(zip(breaks[:-1],breaks[1:])):\n c0 = 0.5*(a+b)\n c1 = 0.5*(b-a)\n quad_x[ie,:] = c1*quad_rule_x[:] + c0\n quad_w[ie,:] = c1*quad_rule_w[:]\n\n return quad_x, quad_w\n\n#==============================================================================\ndef basis_ders_on_quad_grid( knots, degree, quad_grid, nders, normalize=False ):\n \"\"\"\n Evaluate B-Splines and their derivatives on the quadrature grid.\n\n Parameters\n ----------\n knots : array_like\n Knots sequence.\n\n degree : int\n Polynomial degree of B-splines.\n\n quad_grid: 2D numpy.ndarray (ne,nq)\n Coordinates of quadrature points of each element in 1D domain,\n given by quadrature_grid() function.\n\n nders : int\n Maximum derivative of interest.\n\n Returns\n -------\n basis: 4D numpy.ndarray\n Values of B-Splines and their derivatives at quadrature points in\n each element of 1D domain. Indices are\n . ie: global element (0 <= ie < ne )\n . il: local basis function (0 <= il <= degree)\n . id: derivative (0 <= id <= nders )\n . iq: local quadrature point (0 <= iq < nq )\n\n \"\"\"\n # TODO: add example to docstring\n # TODO: check if it is safe to compute span only once for each element\n\n ne,nq = quad_grid.shape\n basis = np.zeros( (ne,degree+1,nders+1,nq) )\n \n if normalize == True:\n x = np.zeros((ne, degree + 1))\n \n for ie in range(ne):\n \n loc = ie + np.arange(degree + 1)\n x[ie] = (degree + 1)/(knots[loc + degree + 1] - knots[loc])\n \n else:\n x = np.ones((ne, degree + 1))\n\n for ie in range(ne):\n xx = quad_grid[ie,:]\n \n \n for iq,xq in enumerate(xx):\n span = find_span( knots, degree, xq )\n ders = basis_funs_all_ders( knots, degree, xq, span, nders )*x[ie]\n basis[ie,:,:,iq] = ders.transpose()\n\n return basis\n\n#==============================================================================\ndef scaling_vector(p, n, T):\n \"\"\"Returns the scaling array for M-splines.\n It is an array whose elements are (p+1)/(T[i+p+1]-T[i])\n\n\n \"\"\"\n\n x = np.zeros(n)\n for i in range(0, n):\n x[i] = (p+1)/(T[i+p+1]-T[i])\n return x\n"
] |
[
[
"numpy.empty"
],
[
"numpy.dot",
"numpy.unique",
"numpy.asarray",
"numpy.around",
"numpy.arange",
"numpy.ones",
"numpy.diff",
"numpy.zeros",
"numpy.empty"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
juwangvsu/AugmentedAutoencoder_new
|
[
"f29d657f369332a6284cf39e615af24758add624",
"f29d657f369332a6284cf39e615af24758add624"
] |
[
"auto_pose/meshrenderer/scenerenderer.py",
"auto_pose/ae/queue.py"
] |
[
"# -*- coding: utf-8 -*-\nimport os\nimport glob\n\nimport math\nimport numpy as np\nfrom .write_xml import *\nimport auto_pose.meshrenderer.meshrenderer as mr\nimport auto_pose.meshrenderer.meshrenderer_phong as mr_phong\nimport cv2\n\nfrom .pysixd import view_sampler\nfrom .pysixd import transform\n\nclass SceneRenderer(object):\n\n def __init__(self,\n models_cad_files,\n vertex_tmp_store_folder,\n vertex_scale,\n width,\n height,\n K,\n augmenters,\n vocdevkit_path,\n min_num_objects_per_scene,\n max_num_objects_per_scene,\n near_plane=10,\n far_plane=2000,\n min_n_views=1000,\n radius=650,\n obj_ids=None,\n model_type='reconst'):\n\n self._models_cad_files = models_cad_files\n self._width = width\n self._height = height\n self._radius = radius\n self._K = K\n self._augmenters = augmenters\n self._min_num_objects_per_scene = min_num_objects_per_scene\n self._max_num_objects_per_scene = max_num_objects_per_scene\n self._near_plane = near_plane\n self._far_plane = far_plane\n self.obj_ids = np.array(obj_ids)\n\n\n # pascal_imgs_path = os.path.join(vocdevkit_path, 'VOC2012/JPEGImages')\n self._voc_imgs = glob.glob( os.path.join(vocdevkit_path , '*.jpg' )) + glob.glob( os.path.join(vocdevkit_path, '*.png') )\n print (len(self._voc_imgs))\n if model_type == 'reconst':\n self._renderer = mr_phong.Renderer(\n self._models_cad_files,\n 1,\n vertex_tmp_store_folder=vertex_tmp_store_folder,\n vertex_scale=vertex_scale\n )\n elif model_type == 'cad':\n self._renderer = mr.Renderer(\n self._models_cad_files,\n 1,\n vertex_tmp_store_folder=vertex_tmp_store_folder,\n vertex_scale=vertex_scale\n )\n else:\n print ('unknown model_type, ', model_type)\n exit()\n\n azimuth_range = (0, 2 * math.pi)\n elev_range = (-0.5 * math.pi, 0.5 * math.pi)\n self.all_views, _ = view_sampler.sample_views(min_n_views, radius, azimuth_range, elev_range)\n\n\n def render(self):\n if self._min_num_objects_per_scene == self._max_num_objects_per_scene:\n N = self._min_num_objects_per_scene\n else:\n N = np.random.randint(\n self._min_num_objects_per_scene,\n self._max_num_objects_per_scene\n )\n views = np.random.choice(self.all_views, N)\n obj_is = np.random.choice(len(self._models_cad_files), N)\n\n\n ts = []\n ts_norm = []\n Rs = []\n\n for v in views:\n success = False\n while not success:\n\n tz = np.random.triangular(self._radius-self._radius/3,self._radius,self._radius+self._radius/3)\n\n tx = np.random.uniform(-0.35 * tz * self._width / self._K[0,0], 0.35 * tz * self._width / self._K[0,0])\n ty = np.random.uniform(-0.35 * tz * self._height / self._K[1,1], 0.35 * tz * self._height / self._K[1,1])\n\n t = np.array([tx, ty, tz])\n R = transform.random_rotation_matrix()[:3,:3]\n t_norm = t/np.linalg.norm(t)\n\n if len(ts_norm) > 0 and np.any(np.dot(np.array(ts_norm),t_norm.reshape(3,1)) > 0.99):\n success = False\n print ('fail')\n else:\n ts_norm.append(t_norm)\n ts.append( t )\n Rs.append( R )\n success = True\n\n\n bgr, depth, bbs = self._renderer.render_many(\n obj_is,\n self._width,\n self._height,\n self._K.copy(),\n Rs,\n ts,\n self._near_plane,\n self._far_plane,\n random_light=True\n )\n\n\n rand_voc = cv2.imread( self._voc_imgs[np.random.randint( len(self._voc_imgs) )] )\n rand_voc = cv2.resize(rand_voc, (self._width, self._height))\n rand_voc = rand_voc.astype(np.float32) / 255.\n # print bgr.max()\n bgr = bgr.astype(np.float32) / 255.\n\n depth_three_chan = np.dstack((depth,)*3)\n bgr = rand_voc*(depth_three_chan==0.0).astype(np.uint8) + bgr*(depth_three_chan>0).astype(np.uint8)\n\n obj_info = []\n for (x, y, w, h), obj_id in zip(bbs, self.obj_ids[np.array(obj_is)]):\n xmin = np.minimum(x, x+w)\n xmax = np.maximum(x, x+w)\n ymin = np.minimum(y, y+h)\n ymax = np.maximum(y, y+h)\n obj_info.append({'id': obj_id, 'bb': [int(xmin), int(ymin), int(xmax), int(ymax)]})\n\n bgr = (bgr*255.0).astype(np.uint8)\n\n if self._augmenters != None:\n bgr = self._augmenters.augment_image(bgr)\n\n return bgr, obj_info\n",
"# -*- coding: utf-8 -*-\n\nimport threading\n\nimport tensorflow as tf\n\nfrom .utils import lazy_property\nimport time\n\nclass Queue(object):\n\n def __init__(self, dataset, num_threads, queue_size, batch_size):\n self._dataset = dataset\n self._num_threads = num_threads\n self._queue_size = queue_size\n self._batch_size = batch_size\n\n datatypes = 2*['float32']\n shapes = 2*[self._dataset.shape]\n\n batch_shape = [None]+list(self._dataset.shape)\n \n self._placeholders = 2*[\n tf.placeholder(dtype=tf.float32, shape=batch_shape),\n tf.placeholder(dtype=tf.float32, shape=batch_shape) \n ]\n\n self._queue = tf.FIFOQueue(self._queue_size, datatypes, shapes=shapes)\n self.x, self.y = self._queue.dequeue_up_to(self._batch_size)\n self.enqueue_op = self._queue.enqueue_many(self._placeholders)\n\n self._coordinator = tf.train.Coordinator()\n\n self._threads = []\n\n\n def start(self, session):\n assert len(self._threads) == 0\n tf.train.start_queue_runners(session, self._coordinator)\n for _ in range(self._num_threads):\n thread = threading.Thread(\n target=Queue.__run__, \n args=(self, session)\n )\n thread.deamon = True\n thread.start()\n self._threads.append(thread)\n\n\n def stop(self, session):\n self._coordinator.request_stop()\n session.run(self._queue.close(cancel_pending_enqueues=True))\n self._coordinator.join(self._threads)\n self._threads[:] = []\n\n\n def __run__(self, session):\n while not self._coordinator.should_stop(): \n # a= time.time()\n # print 'batching...'\n batch = self._dataset.batch(self._batch_size)\n # print 'batch creation time ', time.time()-a\n \n feed_dict = { k:v for k,v in zip( self._placeholders, batch ) }\n try:\n session.run(self.enqueue_op, feed_dict)\n # print 'enqueued something'\n except tf.errors.CancelledError as e:\n print('worker was cancelled')\n pass\n "
] |
[
[
"numpy.minimum",
"numpy.maximum",
"numpy.random.choice",
"numpy.linalg.norm",
"numpy.dstack",
"numpy.random.triangular",
"numpy.random.uniform",
"numpy.array",
"numpy.random.randint"
],
[
"tensorflow.train.start_queue_runners",
"tensorflow.train.Coordinator",
"tensorflow.placeholder",
"tensorflow.FIFOQueue"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
yujialuo/erdos
|
[
"7a631b55895f1a473b0f4d38a0d6053851e65b5d",
"7a631b55895f1a473b0f4d38a0d6053851e65b5d"
] |
[
"tests/subgraph_test.py",
"examples/pylot/segmented_video_operator.py"
] |
[
"from __future__ import print_function\n\nimport time\nfrom absl import app\nfrom absl import flags\nfrom multiprocessing import Process\nimport numpy as np\n\ntry:\n from std_msgs.msg import Int64\nexcept ModuleNotFoundError:\n # ROS not installed\n Int64 = int\n\nfrom erdos.data_stream import DataStream\nimport erdos.graph\nfrom erdos.graph import Graph\nfrom erdos.op import Op\nfrom erdos.message import Message\nfrom erdos.timestamp import Timestamp\nfrom erdos.utils import frequency\n\nfrom sum_squares_test import SquareOp, SumOp\n\nFLAGS = flags.FLAGS\n\n\nclass IntegerOp(Op):\n \"\"\"Operator which publishes an integer every second\"\"\"\n\n def __init__(self, name, number, stream_name):\n super(IntegerOp, self).__init__(name)\n self.number = np.int64(number)\n self.stream_name = stream_name\n\n @staticmethod\n def setup_streams(input_streams, stream_name=\"integer_out\"):\n return [DataStream(data_type=Int64, name=stream_name)]\n\n @frequency(1)\n def publish_random_number(self):\n output_msg = Message(self.number, Timestamp(coordinates=[0]))\n self.get_output_stream(self.stream_name).send(output_msg)\n print(\"%s sent %d\" % (self.name, self.number))\n\n def execute(self):\n self.publish_random_number()\n self.spin()\n\n\nclass SumSquaresGraph(Graph):\n def construct(self, input_ops):\n square_op = self.add(SquareOp, \"square\")\n sum_op = self.add(SumOp, name=\"sum\")\n self.connect(input_ops, [square_op])\n self.connect([square_op], [sum_op])\n return [sum_op]\n\n\ndef run_graph(argv):\n \"\"\"Sums the squares of 2 numbers. \"\"\"\n\n graph = erdos.graph.get_current_graph()\n sub_graph = graph.add(SumSquaresGraph, name=\"sum_squares\")\n\n # Add operators\n int1 = graph.add(\n IntegerOp,\n name=\"int1\",\n init_args={\n \"number\": 1,\n \"stream_name\": \"int1_out\"\n },\n setup_args={\"stream_name\": \"int1_out\"})\n int2 = graph.add(\n IntegerOp,\n name=\"int2\",\n init_args={\n \"number\": 2,\n \"stream_name\": \"int2_out\"\n },\n setup_args={\"stream_name\": \"int2_out\"})\n square_op = graph.add(SquareOp, name=\"default_square\")\n\n # Connect operators\n graph.connect([int1, int2], [sub_graph])\n graph.connect([sub_graph], [square_op])\n\n # Execute graph\n graph.execute(FLAGS.framework)\n\n\ndef main(argv):\n proc = Process(target=run_graph)\n proc.start()\n time.sleep(10)\n proc.terminate()\n\n\nif __name__ == \"__main__\":\n app.run(main)\n",
"import cv2\nimport numpy as np\nimport PIL.Image as Image\n\nfrom carla.image_converter import labels_to_cityscapes_palette\n\nfrom erdos.op import Op\nfrom erdos.utils import setup_logging\n\n\nclass SegmentedVideoOperator(Op):\n def __init__(self, name, log_file_name=None):\n super(SegmentedVideoOperator, self).__init__(name)\n self._logger = setup_logging(self.name, log_file_name)\n\n @staticmethod\n def setup_streams(input_streams, filter_name):\n input_streams.filter_name(filter_name)\\\n .add_callback(SegmentedVideoOperator.display_frame)\n return []\n\n def display_frame(self, msg):\n frame_array = labels_to_cityscapes_palette(msg.data)\n img = Image.fromarray(np.uint8(frame_array)).convert('RGB')\n open_cv_image = np.array(img)\n cv2.imshow(self.name, open_cv_image)\n cv2.waitKey(1)\n\n def execute(self):\n self.spin()\n"
] |
[
[
"numpy.int64"
],
[
"numpy.uint8",
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
AndresJejen/pruebaQuantil
|
[
"780ec3426f2539631c8d9457a5de192622de35a0"
] |
[
"data/cargar_data.py"
] |
[
"import json\nimport requests\nimport pandas as pd\nimport pickle\n#from utiles import *\n\nimport numpy as np\n\n\"\"\"Setting the headers to send and accept json responses\n\"\"\"\nheader = {'Content-Type': 'application/json', \\\n 'Accept': 'application/json'}\n\n# creamos un dataset de pruebas\ndf = pd.read_csv('time_series.csv', parse_dates=[0], header=None,index_col=0, names=['fecha','unidades'])\n\n\nprint(df.shape)\n\nprint('JSON para enviar en POST', df)\n\n\"\"\"POST <url>/predict\n\"\"\"\n\nfor i in df.index:\n\n data = {\n \"fecha\": str(i),\n \"demanda\": int(df.loc[i][0])\n }\n print(data)\n resp = requests.post(\"http://localhost:8001/api/newday\", \\\n data = json.dumps(data),\\\n headers= header)\n \n print('status',resp.status_code)\n print('Respuesta de Servidor')\n print(resp.json())"
] |
[
[
"pandas.read_csv"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
xuyu0010/ARID_UG2_2.1
|
[
"bd487c5d6c7d5aa1be775536a1aa7ea4cf59662d"
] |
[
"train_model.py"
] |
[
"import os\nimport logging\n\nimport torch\nimport torch.nn as nn\nimport torchvision\nimport torch.backends.cudnn as cudnn\nimport torch.distributed as dist\n\nfrom data import iterator_factory as iter_fac\nfrom train import metric\nfrom train.model import model\nfrom train.lr_scheduler import MultiFactorScheduler\n\ndef train_model(sym_net, model_prefix, dataset, input_conf, clip_length=16, train_frame_interval=2,\n\t\t\t\tresume_epoch=-1, batch_size=4, save_frequency=1, lr_base=0.01, lr_factor=0.1, lr_steps=[400000, 800000],\n\t\t\t\tend_epoch=1000, distributed=False, fine_tune=False, **kwargs):\n\n\tassert torch.cuda.is_available(), \"Currently, we only support CUDA version\"\n\n\t# data iterator\n\titer_seed = torch.initial_seed() + 100 + max(0, resume_epoch) * 100\n\ttrain_iter = iter_fac.creat(name=dataset, batch_size=batch_size, clip_length=clip_length, train_interval=train_frame_interval,\n\t\t\t\t\t\t\t\t\t\tmean=input_conf['mean'], std=input_conf['std'], seed=iter_seed)\n\t# wapper (dynamic model)\n\tnet = model(net=sym_net, criterion=nn.CrossEntropyLoss().cuda(),model_prefix=model_prefix, step_callback_freq=50,\n\t\t\t\tsave_checkpoint_freq=save_frequency, opt_batch_size=batch_size, )\n\tnet.net.cuda()\n\n\t# config optimization\n\tparam_base_layers = []\n\tparam_new_layers = []\n\tname_base_layers = []\n\tfor name, param in net.net.named_parameters():\n\t\tif fine_tune:\n\t\t\tif ('classifier' in name) or ('fc' in name):\n\t\t\t\tparam_new_layers.append(param)\n\t\t\telse:\n\t\t\t\tparam_base_layers.append(param)\n\t\t\t\tname_base_layers.append(name)\n\t\telse:\n\t\t\tparam_new_layers.append(param)\n\n\tif name_base_layers:\n\t\tout = \"[\\'\" + '\\', \\''.join(name_base_layers) + \"\\']\"\n\t\tlogging.info(\"Optimizer:: >> recuding the learning rate of {} params: {}\".format(len(name_base_layers),\n\t\t\t\t\t out if len(out) < 300 else out[0:150] + \" ... \" + out[-150:]))\n\n\tnet.net = torch.nn.DataParallel(net.net).cuda()\n\n\toptimizer = torch.optim.SGD([{'params': param_base_layers, 'lr_mult': 0.2}, {'params': param_new_layers, 'lr_mult': 1.0}],\n\t\t\t\t\t\t\t\tlr=lr_base, momentum=0.9, weight_decay=0.0001, nesterov=True)\n\n\t# load params from pretrained 3d network\n\tif resume_epoch > 0:\n\t\tlogging.info(\"Initializer:: resuming model from previous training\")\n\n\t# resume training: model and optimizer\n\tif resume_epoch < 0:\n\t\tepoch_start = 0\n\t\tstep_counter = 0\n\telse:\n\t\tnet.load_checkpoint(epoch=resume_epoch, optimizer=optimizer)\n\t\tepoch_start = resume_epoch\n\t\tstep_counter = epoch_start * train_iter.__len__()\n\n\t# set learning rate scheduler\n\tnum_worker = dist.get_world_size() if torch.distributed.is_initialized() else 1\n\tlr_scheduler = MultiFactorScheduler(base_lr=lr_base, steps=[int(x/(batch_size*num_worker)) for x in lr_steps],\n\t\t\t\t\t\t\t\t\t\tfactor=lr_factor, step_counter=step_counter)\n\t# define evaluation metric\n\tmetrics = metric.MetricList(metric.Loss(name=\"loss-ce\"), metric.Accuracy(name=\"top1\", topk=1), metric.Accuracy(name=\"top5\", topk=5),)\n\n\tnet.fit(train_iter=train_iter, optimizer=optimizer, lr_scheduler=lr_scheduler, metrics=metrics, epoch_start=epoch_start, epoch_end=end_epoch,)\n"
] |
[
[
"torch.nn.CrossEntropyLoss",
"torch.initial_seed",
"torch.distributed.is_initialized",
"torch.nn.DataParallel",
"torch.cuda.is_available",
"torch.optim.SGD",
"torch.distributed.get_world_size"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
MaxwellFX/Advanced_Lane_Finding
|
[
"55dde1716d662323438152000eb88812843919bb"
] |
[
"PracticeCodes/HLS.py"
] |
[
"import matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport numpy as np\nimport cv2\n\n# Read in an image, you can also try test1.jpg or test4.jpg\nimage = mpimg.imread('test6.jpg') \n\n# Define a function that thresholds the S-channel of HLS\n# Use exclusive lower bound (>) and inclusive upper (<=)\ndef hls_select(img, thresh=(0, 255)):\n # 1) Convert to HLS color space\n hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)\n s_channel = hls[:,:,2]\n # 2) Apply a threshold to the S channel\n binary_output = np.zeros_like(s_channel)\n binary_output[(s_channel > thresh[0]) & (s_channel <= thresh[1])] = 1\n # 3) Return a binary image of threshold result\n return binary_output\n \nhls_binary = hls_select(image, thresh=(0, 255))\n\n# Plot the result\nf, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))\nf.tight_layout()\nax1.imshow(image)\nax1.set_title('Original Image', fontsize=50)\nax2.imshow(hls_binary, cmap='gray')\nax2.set_title('Thresholded S', fontsize=50)\nplt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)"
] |
[
[
"matplotlib.image.imread",
"numpy.zeros_like",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.subplots_adjust"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
gavinuhma/tf-encrypted
|
[
"48c9dc7419163425e736ad05bb19980d134fc851"
] |
[
"tests/test_tanh.py"
] |
[
"# pylint: disable=missing-docstring\nimport unittest\n\nimport numpy as np\nimport tensorflow as tf\nimport tf_encrypted as tfe\nfrom tf_encrypted.layers.activation import Tanh\n\n\nclass TestTanh(unittest.TestCase):\n def setUp(self):\n tf.reset_default_graph()\n\n def test_forward(self):\n input_shape = [4]\n input_tanh = np.array([-1.0, -0.5, 0.5, 3.0]).astype(np.float32)\n\n # tanh pond\n with tfe.protocol.Pond() as prot:\n\n tanh_input = prot.define_private_variable(input_tanh)\n tanh_layer = Tanh(input_shape)\n\n tanh_out_pond = tanh_layer.forward(tanh_input)\n\n with tfe.Session() as sess:\n\n sess.run(tf.global_variables_initializer())\n # outputs\n out_pond = sess.run(tanh_out_pond.reveal())\n\n # reset graph\n tf.reset_default_graph()\n\n with tf.Session() as sess:\n x = tf.Variable(input_tanh, dtype=tf.float32)\n\n tanh_out_tf = tf.nn.tanh(x)\n\n sess.run(tf.global_variables_initializer())\n\n out_tensorflow = sess.run(tanh_out_tf)\n\n assert np.isclose(out_pond, out_tensorflow, atol=0.2).all()\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] |
[
[
"tensorflow.Variable",
"tensorflow.nn.tanh",
"tensorflow.global_variables_initializer",
"tensorflow.reset_default_graph",
"tensorflow.Session",
"numpy.array",
"numpy.isclose"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
ackim-fsi/AI-IDS
|
[
"36f65a038da7a09567979cbbfdbd43f94a223856",
"36f65a038da7a09567979cbbfdbd43f94a223856"
] |
[
"inv_app/inv_app_crd_parse.py",
"inv_sql/inv_sql_parse.py"
] |
[
"# -*- coding: utf-8 -*-\nimport os\nimport numpy as np\nimport gc\nimport multiprocessing\nimport pandas\nimport datetime\nimport sys\nfrom pandas.errors import EmptyDataError\n\nfrom functools import partial\n\n\n\ndef convert_content(content_string, x_dim, pad_before=True):\n\n int_list = list(map(np.uint8, str(content_string).encode('utf-8')))[:x_dim]\n if len(int_list) < x_dim:\n if pad_before:\n int_list = [np.uint8(0)] * (x_dim - len(int_list)) + int_list # Pad Before\n else:\n int_list = int_list + [np.uint8(0)] * (x_dim - len(int_list)) # Pad After\n\n return int_list\n\n\ndef convert_data(start_index, filename, npy_dir, batch_size, x_dim, pad_before=True, augmentation=1):\n\n try:\n dataframe = pandas.read_csv(filename,\n header=0,\n usecols=[\"src_content\", \"label\"],\n skiprows=list(range(1, start_index)),\n nrows=batch_size,\n engine='python')\n labels = dataframe[\"label\"].values.astype(np.uint8)\n except ValueError:\n dataframe = pandas.read_csv(filename,\n header=0,\n usecols=[\"src_content\"],\n skiprows=list(range(1, start_index)),\n nrows=batch_size,\n engine='python')\n labels = np.array([np.uint8(0)] * dataframe.shape[0])\n\n labels = labels.reshape((labels.shape[0], 1))\n src_content = list(convert_content(x, x_dim=x_dim, pad_before=pad_before)\n for x in dataframe[\"src_content\"].values)\n\n src_content_aug = src_content\n labels_aug = np.concatenate(tuple([labels] * augmentation))\n\n for i in range(1, augmentation):\n if pad_before:\n src_content_aug = src_content_aug + list(\n [np.uint8(0)]*i + content[:-i] for content in src_content\n )\n else:\n src_content_aug = src_content_aug + list(\n content[:-i] + [np.uint8(0)] * i for content in src_content\n )\n\n src_content_aug = np.array(src_content_aug)\n file_no = int(start_index / batch_size)\n if pad_before:\n pad_string = '_prepad'\n else:\n pad_string = '_postpad'\n\n basename = os.path.basename(filename)\n file_extension_index = basename.rfind('.')\n save_basename = basename[:file_extension_index] + pad_string + '_' + str(file_no) + '.npy'\n save_filename = os.path.join(npy_dir, save_basename)\n np.save(save_filename, np.concatenate((src_content_aug, labels_aug), axis=1))\n gc.collect()\n\n return\n\n\ndef convert_file_list(datafile_list, npy_dir, x_dim=1000, pad_before=True, augmentation=1):\n\n processors = int(multiprocessing.cpu_count() / 1.5)\n line_per_processor = int(1048576 / augmentation) # pow(2, 20)\n\n for filepath in datafile_list:\n if pad_before:\n pad_string = '_prepad'\n else:\n pad_string = '_postpad'\n\n filename = os.path.basename(filepath)\n file_extension_index = filename.rfind('.')\n npy_filename = filename[:file_extension_index] + pad_string + \"_0.npy\"\n\n if npy_filename in os.listdir(npy_dir): # Check already parsed npy existence\n continue\n\n try:\n df_temp = pandas.read_csv(filepath, header=0, engine='python')\n except EmptyDataError:\n continue\n\n row_count = df_temp.shape[0]\n del(df_temp)\n gc.collect()\n\n pool = multiprocessing.Pool(processes=processors)\n\n split_size = int(np.ceil(row_count / line_per_processor))\n index_list = list(range(0, split_size*line_per_processor, line_per_processor))\n\n pool.map(partial(convert_data,\n filename=filepath,\n npy_dir=npy_dir,\n batch_size=line_per_processor,\n x_dim=x_dim,\n pad_before=pad_before,\n augmentation=augmentation\n ),\n index_list)\n\n pool.close()\n pool.join()\n gc.collect()\n\n\nif __name__ == \"__main__\":\n\n yesterday = datetime.datetime.today() + datetime.timedelta(days=-1)\n day_before_yesterday = datetime.datetime.today() + datetime.timedelta(days=-2)\n yesterday_string = yesterday.strftime(\"%Y%m%d\")\n day_before_yesterday_string = day_before_yesterday.strftime(\"%Y%m%d\")\n data_dir = \"./data/\"\n npy_dir = \"./npy/\"\n\n payload_file_list = list(os.path.join(data_dir, f)\n for f in os.listdir(data_dir)\n if \"payload\" in f and day_before_yesterday_string in f)\n\n app_file_list = list(os.path.join(data_dir, f)\n for f in os.listdir(data_dir)\n if \"app\" in f and yesterday_string not in f)\n\n label_file_list = list(os.path.join(data_dir, f)\n for f in os.listdir(data_dir)\n if \"INV-APP\" in f and yesterday_string not in f)\n\n convert_file_list(payload_file_list, npy_dir, x_dim=1000, pad_before=True, augmentation=1)\n convert_file_list(app_file_list, npy_dir, x_dim=1000, pad_before=True, augmentation=20)\n convert_file_list(label_file_list, npy_dir, x_dim=1000, pad_before=True, augmentation=20)\n\n",
"# -*- coding: utf-8 -*-\nimport os\nimport numpy as np\nimport gc\nimport multiprocessing\nimport pandas\nimport datetime\nimport sys\nfrom pandas.errors import EmptyDataError\n\nfrom functools import partial\n\n\ndef convert_content(content_string, x_dim, pad_before=True):\n\n\tint_list = list(map(np.uint8, str(content_string).encode('utf-8')))[:x_dim]\n\tif len(int_list) < x_dim:\n\t\tif pad_before:\n\t\t\tint_list = [np.uint8(0)] * (x_dim - len(int_list)) + int_list # Pad Before\n\t\telse:\n\t\t\tint_list = int_list + [np.uint8(0)] * (x_dim - len(int_list)) # Pad After\n\n\treturn int_list\n\ndef convert_data(start_index, filename, npy_dir, batch_size, x_dim, pad_before=True, augmentation=1):\n\n\ttry:\n\t\tdataframe = pandas.read_csv(filename,\n\t\t\t\t\theader=0,\n\t\t\t\t\tusecols=[\"src_content\", \"label\"],\n\t\t\t\t\tskiprows=list(range(1, start_index)),\n\t\t\t\t\tnrows=batch_size,\n\t\t\t\t\tengine='python')\n\t\tlabels = dataframe[\"label\"].values.astype(np.uint8)\n\texcept ValueError:\n\t\tdataframe = pandas.read_csv(filename,\n\t\t\t\t\theader=0,\n\t\t\t\t\tusecols=[\"src_content\"],\n\t\t\t\t\tskiprows=list(range(1, start_index)),\n\t\t\t\t\tnrows=batch_size,\n\t\t\t\t\tengine='python')\n\t\tlabels = np.array([np.uint8(0)] * dataframe.shape[0])\n\n\tlabels = labels.reshape((labels.shape[0], 1))\n\tsrc_content = list(convert_content(x, x_dim=x_dim, pad_before=pad_before)\n\t\t\t for x in dataframe[\"src_content\"].values)\n\n\tsrc_content_aug = src_content\n\tlabels_aug = np.concatenate(tuple([labels] * augmentation))\n\n\tfor i in range(1, augmentation):\n\t\tif pad_before:\n\t\t\tsrc_content_aug = src_content_aug + list(\n\t\t\t\t[np.uint8(0)]*i + content[:-i] for content in src_content\n\t\t\t)\n\t\telse:\n\t\t\tsrc_content_aug = src_content_aug + list(\n\t\t\t\tcontent[:-i] + [np.uint8(0)] * i for content in src_content\n\t\t\t)\n\n\tsrc_content_aug = np.array(src_content_aug)\n\tfile_no = int(start_index / batch_size)\n\tif pad_before:\n\t\tpad_string = '_prepad'\n\telse:\n\t\tpad_string = '_postpad'\n\n\tbasename = os.path.basename(filename)\n\tfile_extension_index = basename.rfind('.')\n\tsave_basename = basename[:file_extension_index] + pad_string + '_' + str(file_no) + '.npy'\n\tsave_filename = os.path.join(npy_dir, save_basename)\n\tnp.save(save_filename, np.concatenate((src_content_aug, labels_aug), axis=1))\n\tgc.collect()\n\n\treturn\n\n\ndef convert_file_list(datafile_list, npy_dir, x_dim=1000, pad_before=True, augmentation=1):\n\n\tprocessors = int(multiprocessing.cpu_count() / 1.5)\n\tline_per_processor = int(1048576 / augmentation) # pow(2, 20)\n\n\tfor filepath in datafile_list:\n\t\tif pad_before:\n\t\t\tpad_string = '_prepad'\n\t\telse:\n\t\t\tpad_string = '_postpad'\n\n\t\tfilename = os.path.basename(filepath)\n\t\tfile_extension_index = filename.rfind('.')\n\t\tnpy_filename = filename[:file_extension_index] + pad_string + '_0.npy'\n\n\t\tif npy_filename in os.listdir(npy_dir):\n\t\t\tcontinue\n\n\t\ttry:\n\t\t\tdf_temp = pandas.read_csv(filepath, header=0, engine='python')\n\t\texcept EmptyDataError:\n\t\t\tcontinue\n\n\t\trow_count = df_temp.shape[0]\n\t\tdel(df_temp)\n\t\tgc.collect()\n\n\t\tpool = multiprocessing.Pool(processes=processors)\n\n\t\tsplit_size = int(np.ceil(row_count / line_per_processor))\n\t\tindex_list = list(range(0, split_size*line_per_processor, line_per_processor))\n\n\t\tpool.map(partial(convert_data,\n\t\t\t\t filename=filepath,\n\t\t\t\t batch_size=line_per_processor,\n\t\t\t\t x_dim=1000,\n\t\t\t\t pad_before=pad_before,\n\t\t\t\t augmentation=augmentation\n\t\t\t\t ),\n\t\t\t index_list)\n\n\t\tpool.close()\n\t\tpool.join()\n\t\tgc.collect()\n\n\nif __name__ == \"__main__\":\n\tyesterday = datetime.datetime.today() + datetime.timedelta(days=-1)\n\tday_before_yesterday = datetime.datetime.today() + datetime.timedelta(days=-2)\n\tyesterday_string = yesterday.strftime(\"%Y%m%d\")\n\tday_before_yesterday_string = day_before_yesterday.strftime(\"%Y%m%d\")\n\tdata_dir = \"./data/\"\n\tnpy_dir = \"./npy/\"\n\t\n\tpayload_file_list = list(os.path.join(data_dir, f)\n\t\t\t\tfor f in os.listdir(data_dir)\n\t\t\t\tif \"payload\" in f and day_before_yesterday_string in f)\n\n\tsql_file_list = list(os.path.join(data_dir, f)\n\t\t\t\tfor f in os.listdir(data_dir)\n\t\t\t\tif \"sql\" in f and yesterday_string not in f)\n\n\tlabel_file_list = list(os.path.join(data_dir, f)\n\t\t\t\tfor f in os.listdir(data_dir)\n\t\t\t\tif \"inv-sql\" in f and yesterday_string not in f)\n\n\tconvert_file_list(payload_file_list, npy_dir, x_dim=1000, pad_before=True, augmentation=1)\n\tconvert_file_list(sql_file_list, npy_dir, x_dim=1000, pad_before=True, augmentation=20)\n\tconvert_file_list(label_file_list, npy_dir, x_dim=1000, pad_before=True, augmentation=20)\n"
] |
[
[
"pandas.read_csv",
"numpy.uint8",
"numpy.concatenate",
"numpy.ceil",
"numpy.array"
],
[
"pandas.read_csv",
"numpy.uint8",
"numpy.concatenate",
"numpy.ceil",
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
Tikquuss/speech2speech
|
[
"0ab5879afa6b6593b62fffb0af2851584cd280ba"
] |
[
"NMT/XLM/src/data/loader.py"
] |
[
"# Copyright (c) 2019-present, Facebook, Inc.\n# All rights reserved.\n#\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n#\n\nfrom logging import getLogger\nimport os\nimport numpy as np\nimport torch\n\nfrom .dataset import StreamDataset, Dataset, ParallelDataset\nfrom .dictionary import BOS_WORD, EOS_WORD, PAD_WORD, UNK_WORD, MASK_WORD\n\n# our\nimport copy\n\n\nlogger = getLogger()\n\n\ndef process_binarized(data, params):\n \"\"\"\n Process a binarized dataset and log main statistics.\n \"\"\"\n dico = data['dico']\n assert ((data['sentences'].dtype == np.uint16) and (len(dico) < 1 << 16) or\n (data['sentences'].dtype == np.int32) and (1 << 16 <= len(dico) < 1 << 31))\n logger.info(\"%i words (%i unique) in %i sentences. %i unknown words (%i unique) covering %.2f%% of the data.\" % (\n len(data['sentences']) - len(data['positions']),\n len(dico), len(data['positions']),\n sum(data['unk_words'].values()), len(data['unk_words']),\n 100. * sum(data['unk_words'].values()) / (len(data['sentences']) - len(data['positions']))\n ))\n if params.max_vocab != -1:\n assert params.max_vocab > 0\n logger.info(\"Selecting %i most frequent words ...\" % params.max_vocab)\n dico.max_vocab(params.max_vocab)\n data['sentences'][data['sentences'] >= params.max_vocab] = dico.index(UNK_WORD)\n unk_count = (data['sentences'] == dico.index(UNK_WORD)).sum()\n logger.info(\"Now %i unknown words covering %.2f%% of the data.\"\n % (unk_count, 100. * unk_count / (len(data['sentences']) - len(data['positions']))))\n if params.min_count > 0:\n logger.info(\"Selecting words with >= %i occurrences ...\" % params.min_count)\n dico.min_count(params.min_count)\n data['sentences'][data['sentences'] >= len(dico)] = dico.index(UNK_WORD)\n unk_count = (data['sentences'] == dico.index(UNK_WORD)).sum()\n logger.info(\"Now %i unknown words covering %.2f%% of the data.\"\n % (unk_count, 100. * unk_count / (len(data['sentences']) - len(data['positions']))))\n if (data['sentences'].dtype == np.int32) and (len(dico) < 1 << 16):\n logger.info(\"Less than 65536 words. Moving data from int32 to uint16 ...\")\n data['sentences'] = data['sentences'].astype(np.uint16)\n return data\n\n\ndef load_binarized(path, params):\n \"\"\"\n Load a binarized dataset.\n \"\"\"\n assert path.endswith('.pth')\n if params.debug_train:\n path = path.replace('train', 'valid')\n if getattr(params, 'multi_gpu', False):\n split_path = '%s.%i.pth' % (path[:-4], params.local_rank)\n if os.path.isfile(split_path):\n assert params.split_data is False\n path = split_path\n assert os.path.isfile(path), path\n logger.info(\"Loading data from %s ...\" % path)\n data = torch.load(path)\n data = process_binarized(data, params)\n return data\n\n\ndef set_dico_parameters(params, data, dico):\n \"\"\"\n Update dictionary parameters.\n \"\"\"\n if 'dico' in data:\n assert data['dico'] == dico\n else:\n data['dico'] = dico\n\n n_words = len(dico)\n bos_index = dico.index(BOS_WORD)\n eos_index = dico.index(EOS_WORD)\n pad_index = dico.index(PAD_WORD)\n unk_index = dico.index(UNK_WORD)\n mask_index = dico.index(MASK_WORD)\n if hasattr(params, 'bos_index'):\n assert params.n_words == n_words\n assert params.bos_index == bos_index\n assert params.eos_index == eos_index\n assert params.pad_index == pad_index\n assert params.unk_index == unk_index\n assert params.mask_index == mask_index\n else:\n params.n_words = n_words\n params.bos_index = bos_index\n params.eos_index = eos_index\n params.pad_index = pad_index\n params.unk_index = unk_index\n params.mask_index = mask_index\n\n\ndef load_mono_data(params, data):\n \"\"\"\n Load monolingual data.\n \"\"\"\n\n data['mono'] = {}\n data['mono_stream'] = {}\n\n for lang in params.mono_dataset.keys():\n\n logger.info('============ Monolingual data (%s)' % lang)\n\n assert lang in params.langs and lang not in data['mono']\n data['mono'][lang] = {}\n data['mono_stream'][lang] = {}\n\n for splt in ['train', 'valid', 'test']:\n\n # no need to load training data for evaluation\n if splt == 'train' and params.eval_only:\n continue\n\n # load data / update dictionary parameters / update data\n mono_data = load_binarized(params.mono_dataset[lang][splt], params)\n set_dico_parameters(params, data, mono_data['dico'])\n\n # create stream dataset\n bs = params.batch_size if splt == 'train' else 1\n data['mono_stream'][lang][splt] = StreamDataset(mono_data['sentences'], mono_data['positions'], bs, params)\n \n # our \n n_sentences = data['mono_stream'][lang][splt].n_sentences\n if 0 < params.n_samples[splt] < n_sentences :\n # todo : shuffle the dataset before selecting\n a, b, n_samples = 0, 0, 0\n while n_samples < params.n_samples[splt] :\n b = b + 1\n enquete = copy.copy(data['mono_stream'][lang][splt])\n enquete.select_data(a = a, b = b, log = False)\n n_samples = len(enquete)\n \n data['mono_stream'][lang][splt].select_data(a = a, b = b)\n \n # if there are several processes on the same machine, we can split the dataset\n if splt == 'train' and params.split_data and 1 < params.n_gpu_per_node <= data['mono_stream'][lang][splt].n_batches:\n n_batches = data['mono_stream'][lang][splt].n_batches // params.n_gpu_per_node\n a = n_batches * params.local_rank\n b = n_batches * params.local_rank + n_batches\n data['mono_stream'][lang][splt].select_data(a, b)\n\n # for denoising auto-encoding and online back-translation, we need a non-stream (batched) dataset\n if lang in params.ae_steps or lang in params.bt_src_langs:\n\n # create batched dataset\n dataset = Dataset(mono_data['sentences'], mono_data['positions'], params)\n \n # remove empty and too long sentences\n if splt == 'train':\n dataset.remove_empty_sentences()\n \n # our\n if params.remove_long_sentences[splt] :\n dataset.remove_long_sentences(params.max_len)\n\n # our\n if 0 < params.n_samples[splt] < len(dataset.pos):\n # todo : shuffle the dataset before selecting\n dataset.select_data(a = 0, b = params.n_samples[splt])\n\n # if there are several processes on the same machine, we can split the dataset\n if splt == 'train' and params.n_gpu_per_node > 1 and params.split_data:\n n_sent = len(dataset) // params.n_gpu_per_node\n a = n_sent * params.local_rank\n b = n_sent * params.local_rank + n_sent\n dataset.select_data(a, b)\n\n data['mono'][lang][splt] = dataset\n\n logger.info(\"\")\n\n logger.info(\"\")\n\n\ndef load_para_data(params, data):\n \"\"\"\n Load parallel data.\n \"\"\"\n data['para'] = {}\n\n required_para_train = set(params.clm_steps + params.mlm_steps + params.pc_steps + params.mt_steps)\n\n for src, tgt in params.para_dataset.keys():\n\n logger.info('============ Parallel data (%s-%s)' % (src, tgt))\n\n assert (src, tgt) not in data['para']\n data['para'][(src, tgt)] = {}\n\n for splt in ['train', 'valid', 'test']:\n\n # no need to load training data for evaluation\n if splt == 'train' and params.eval_only:\n continue\n\n # for back-translation, we can't load training data\n if splt == 'train' and (src, tgt) not in required_para_train and (tgt, src) not in required_para_train:\n continue\n\n # load binarized datasets\n src_path, tgt_path = params.para_dataset[(src, tgt)][splt]\n src_data = load_binarized(src_path, params)\n tgt_data = load_binarized(tgt_path, params)\n\n # update dictionary parameters\n set_dico_parameters(params, data, src_data['dico'])\n set_dico_parameters(params, data, tgt_data['dico'])\n\n # create ParallelDataset\n dataset = ParallelDataset(\n src_data['sentences'], src_data['positions'],\n tgt_data['sentences'], tgt_data['positions'],\n params\n )\n\n # remove empty and too long sentences\n if splt == 'train':\n dataset.remove_empty_sentences()\n \n # our\n if params.remove_long_sentences[splt] :\n dataset.remove_long_sentences(params.max_len)\n \n # our\n if 0 < params.n_samples[splt] < len(dataset.pos1):\n # todo : shuffle the dataset before selecting\n dataset.select_data(a = 0, b = params.n_samples[splt])\n\n # for validation and test set, enumerate sentence per sentence\n if splt != 'train':\n dataset.tokens_per_batch = -1\n\n # if there are several processes on the same machine, we can split the dataset\n if splt == 'train' and params.n_gpu_per_node > 1 and params.split_data:\n n_sent = len(dataset) // params.n_gpu_per_node\n a = n_sent * params.local_rank\n b = n_sent * params.local_rank + n_sent\n dataset.select_data(a, b)\n\n data['para'][(src, tgt)][splt] = dataset\n logger.info(\"\")\n\n logger.info(\"\")\n\n\ndef check_data_params(params, check_only_objectifs = False):\n \"\"\"\n Check datasets parameters.\n \"\"\"\n # data path\n if not check_only_objectifs :\n assert os.path.isdir(params.data_path), params.data_path\n\n # check languages\n params.langs = params.lgs.split('-') if params.lgs != 'debug' else ['en']\n assert len(params.langs) == len(set(params.langs)) >= 1\n # assert sorted(params.langs) == params.langs\n params.id2lang = {k: v for k, v in enumerate(sorted(params.langs))}\n params.lang2id = {k: v for v, k in params.id2lang.items()}\n params.n_langs = len(params.langs)\n\n # CLM steps\n clm_steps = [s.split('-') for s in params.clm_steps.split(',') if len(s) > 0]\n params.clm_steps = [(s[0], None) if len(s) == 1 else tuple(s) for s in clm_steps]\n assert all([(l1 in params.langs) and (l2 in params.langs or l2 is None) for l1, l2 in params.clm_steps])\n assert len(params.clm_steps) == len(set(params.clm_steps))\n\n # MLM / TLM steps\n mlm_steps = [s.split('-') for s in params.mlm_steps.split(',') if len(s) > 0]\n params.mlm_steps = [(s[0], None) if len(s) == 1 else tuple(s) for s in mlm_steps]\n assert all([(l1 in params.langs) and (l2 in params.langs or l2 is None) for l1, l2 in params.mlm_steps])\n assert len(params.mlm_steps) == len(set(params.mlm_steps))\n\n # parallel classification steps\n params.pc_steps = [tuple(s.split('-')) for s in params.pc_steps.split(',') if len(s) > 0]\n assert all([len(x) == 2 for x in params.pc_steps])\n assert all([l1 in params.langs and l2 in params.langs for l1, l2 in params.pc_steps])\n assert all([l1 != l2 for l1, l2 in params.pc_steps])\n assert len(params.pc_steps) == len(set(params.pc_steps))\n\n # machine translation steps\n params.mt_steps = [tuple(s.split('-')) for s in params.mt_steps.split(',') if len(s) > 0]\n assert all([len(x) == 2 for x in params.mt_steps])\n assert all([l1 in params.langs and l2 in params.langs for l1, l2 in params.mt_steps])\n assert all([l1 != l2 for l1, l2 in params.mt_steps])\n assert len(params.mt_steps) == len(set(params.mt_steps))\n assert len(params.mt_steps) == 0 or not params.encoder_only\n\n # denoising auto-encoder steps\n params.ae_steps = [s for s in params.ae_steps.split(',') if len(s) > 0]\n assert all([lang in params.langs for lang in params.ae_steps])\n assert len(params.ae_steps) == len(set(params.ae_steps))\n assert len(params.ae_steps) == 0 or not params.encoder_only\n\n # back-translation steps\n params.bt_steps = [tuple(s.split('-')) for s in params.bt_steps.split(',') if len(s) > 0]\n assert all([len(x) == 3 for x in params.bt_steps])\n assert all([l1 in params.langs and l2 in params.langs and l3 in params.langs for l1, l2, l3 in params.bt_steps])\n assert all([l1 == l3 and l1 != l2 for l1, l2, l3 in params.bt_steps])\n assert len(params.bt_steps) == len(set(params.bt_steps))\n assert len(params.bt_steps) == 0 or not params.encoder_only\n params.bt_src_langs = [l1 for l1, _, _ in params.bt_steps]\n\n if not check_only_objectifs :\n # check monolingual datasets\n required_mono = set([l1 for l1, l2 in (params.mlm_steps + params.clm_steps) if l2 is None] + params.ae_steps + params.bt_src_langs)\n params.mono_dataset = {\n lang: {\n splt: os.path.join(params.data_path, '%s.%s.pth' % (splt, lang))\n for splt in ['train', 'valid', 'test']\n } for lang in params.langs if lang in required_mono\n }\n for paths in params.mono_dataset.values():\n for p in paths.values():\n if not os.path.isfile(p):\n logger.error(f\"{p} not found\")\n assert all([all([os.path.isfile(p) for p in paths.values()]) for paths in params.mono_dataset.values()])\n\n # check parallel datasets\n required_para_train = set(params.clm_steps + params.mlm_steps + params.pc_steps + params.mt_steps)\n required_para = required_para_train | set([(l2, l3) for _, l2, l3 in params.bt_steps])\n params.para_dataset = {\n (src, tgt): {\n splt: (os.path.join(params.data_path, '%s.%s-%s.%s.pth' % (splt, src, tgt, src)),\n os.path.join(params.data_path, '%s.%s-%s.%s.pth' % (splt, src, tgt, tgt)))\n for splt in ['train', 'valid', 'test']\n if splt != 'train' or (src, tgt) in required_para_train or (tgt, src) in required_para_train\n } for src in params.langs for tgt in params.langs\n if src < tgt and ((src, tgt) in required_para or (tgt, src) in required_para)\n }\n for paths in params.para_dataset.values():\n for p1, p2 in paths.values():\n if not os.path.isfile(p1):\n logger.error(f\"{p1} not found\")\n if not os.path.isfile(p2):\n logger.error(f\"{p2} not found\")\n assert all([all([os.path.isfile(p1) and os.path.isfile(p2) for p1, p2 in paths.values()]) for paths in params.para_dataset.values()])\n\n # check that we can evaluate on BLEU\n assert params.eval_bleu is False or len(params.mt_steps + params.bt_steps) > 0\n\n\ndef load_data(params):\n \"\"\"\n Load data.\n The returned (for each task in meta_learning) dictionary contains:\n - dico (dictionary)\n - vocab (FloatTensor)\n - train / valid / test (monolingual and parallel datasets)\n \"\"\"\n data = {}\n for lgs, valeur in params.meta_params.items():\n valeur.n_gpu_per_node = params.n_gpu_per_node\n \n logger.info(\"============ langs: %s\" % \", \".join(valeur.langs))\n \n data[lgs] = {}\n \n # monolingual datasets\n load_mono_data(params = valeur, data = data[lgs])\n \n # parallel datasets\n valeur.n_gpu_per_node = params.n_gpu_per_node\n load_para_data(params = valeur, data = data[lgs])\n \n # monolingual data summary\n logger.info('============ Data summary')\n for lang, v in data[lgs]['mono_stream'].items():\n for data_set in v.keys():\n logger.info('{: <18} - {: >5} - {: >12}:{: >10}'.format('Monolingual data', data_set, lang, len(v[data_set])))\n\n # parallel data summary\n for (src, tgt), v in data[lgs]['para'].items():\n for data_set in v.keys():\n logger.info('{: <18} - {: >5} - {: >12}:{: >10}'.format('Parallel data', data_set, '%s-%s' % (src, tgt), len(v[data_set])))\n\n logger.info(\"\")\n \n # if only one language (so only one meta-tack), no metalearning\n if not params.meta_learning :\n key = list(data.keys())[0]\n data = data[key]\n data['key'] = key\n else :\n # todo : le meta_dico doit etre commun à tout les corpus\n \"\"\"\n But we think that if all the task data are based on the same vocabulary, all these parameters will be the same, \n and therefore no problem if we choose one at random.\n \"\"\"\n for key in data.keys() :\n try :\n data['dico'] = data[key]['dico']\n data['key'] = key\n break\n except :\n pass\n return data\n"
] |
[
[
"torch.load"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
codacy-badger/flopy
|
[
"de874b02661f59ef4e99f18272883a13a4d55f16",
"de874b02661f59ef4e99f18272883a13a4d55f16"
] |
[
"flopy/utils/formattedfile.py",
"flopy/modflow/mfhob.py"
] |
[
"\"\"\"\nModule to read MODFLOW formatted output files. The module contains one\nimportant classes that can be accessed by the user.\n\n* FormattedHeadFile (Formatted head file. Can also be used for drawdown)\n\n\"\"\"\n\nimport numpy as np\nfrom ..utils.datafile import Header, LayerFile\n\n\ndef is_int(s):\n try:\n int(s)\n return True\n except ValueError:\n return False\n\n\ndef is_float(s):\n try:\n float(s)\n return True\n except ValueError:\n return False\n\n\nclass FormattedHeader(Header):\n \"\"\"\n The TextHeader class is a class to read in headers from MODFLOW\n formatted files.\n\n Parameters\n ----------\n text_ident is the text string in the header that identifies the type\n of data (eg. 'head') precision is the precision of the floating point\n data in the file\n \"\"\"\n\n def __init__(self, text_ident, precision='single'):\n Header.__init__(self, text_ident, precision)\n self.format_string = ''\n self.text_ident = text_ident\n\n def read_header(self, text_file):\n \"\"\"\n Read header information from a formatted file\n\n Parameters\n ----------\n text_file is an open file object currently at the beginning of\n the header\n\n Returns\n ----------\n out : numpy array of header information\n also stores the header's format string as self.format_string\n\n \"\"\"\n\n header_text = text_file.readline().decode('ascii')\n arrheader = header_text.split()\n\n # Verify header exists and is in the expected format\n if len(arrheader) >= 5 and arrheader[\n 4].upper() != self.text_ident.upper():\n raise Exception(\n 'Expected header not found. Make sure the file being processed includes headers ' +\n '(LABEL output control option): ' + header_text)\n if len(arrheader) != 9 or not is_int(arrheader[0]) or not is_int(\n arrheader[1]) or not is_float(arrheader[2]) \\\n or not is_float(arrheader[3]) or not is_int(\n arrheader[5]) or not is_int(arrheader[6]) or not is_int(\n arrheader[7]):\n raise Exception(\n 'Unexpected format for FHDTextHeader: ' + header_text)\n\n headerinfo = np.empty([8], dtype=self.dtype)\n headerinfo['kstp'] = int(arrheader[0])\n headerinfo['kper'] = int(arrheader[1])\n headerinfo['pertim'] = float(arrheader[2])\n headerinfo['totim'] = float(arrheader[3])\n headerinfo['text'] = arrheader[4]\n headerinfo['ncol'] = int(arrheader[5])\n headerinfo['nrow'] = int(arrheader[6])\n headerinfo['ilay'] = int(arrheader[7])\n\n self.format_string = arrheader[8]\n\n return headerinfo\n\n\nclass FormattedLayerFile(LayerFile):\n \"\"\"\n The FormattedLayerFile class is the super class from which specific derived\n classes are formed. This class should not be instantiated directly\n\n \"\"\"\n\n def __init__(self, filename, precision, verbose, kwargs):\n super(FormattedLayerFile, self).__init__(filename, precision, verbose,\n kwargs)\n return\n\n def _build_index(self):\n \"\"\"\n Build the recordarray and iposarray, which maps the header information\n to the position in the formatted file.\n \"\"\"\n self.kstpkper # array of time step/stress periods with data available\n self.recordarray # array of data headers\n self.iposarray # array of seek positions for each record\n self.nlay # Number of model layers\n\n # Get total file size\n self.file.seek(0, 2)\n self.totalbytes = self.file.tell()\n self.file.seek(0, 0)\n\n # Process first header\n self.header = self._get_text_header()\n header_info = self.header.read_header(self.file)[0]\n\n self.nrow = header_info['nrow']\n self.ncol = header_info['ncol']\n\n ipos = self.file.tell()\n self._store_record(header_info, ipos)\n\n # Process enough data to calculate seek distance between headers\n self._col_data_size = self._get_data_size(header_info)\n self._data_size = self._col_data_size * self.nrow\n\n # While more data in file\n while ipos + self._data_size < self.totalbytes:\n # Seek and get next header\n self.file.seek(ipos + self._data_size)\n header_info = self.header.read_header(self.file)[0]\n ipos = self.file.tell()\n self._store_record(header_info, ipos)\n\n # self.recordarray contains a recordarray of all the headers.\n self.recordarray = np.array(self.recordarray, self.header.get_dtype())\n self.iposarray = np.array(self.iposarray)\n self.nlay = np.max(self.recordarray['ilay'])\n return\n\n def _store_record(self, header, ipos):\n \"\"\"\n Store file header information in various formats for quick retrieval\n\n \"\"\"\n self.recordarray.append(header)\n self.iposarray.append(ipos) # store the position right after header2\n totim = header['totim']\n if totim > 0 and totim not in self.times:\n self.times.append(totim)\n kstpkper = (header['kstp'], header['kper'])\n if kstpkper not in self.kstpkper:\n self.kstpkper.append(kstpkper)\n\n def _get_text_header(self):\n \"\"\"\n Return a text header object containing header formatting information\n\n \"\"\"\n raise Exception(\n 'Abstract method _get_text_header called in FormattedLayerFile. ' +\n 'This method needs to be overridden.')\n\n def _read_data(self, shp):\n \"\"\"\n Read 2-D data from file\n\n \"\"\"\n\n nrow, ncol = shp\n current_row = 0\n current_col = 0\n result = np.empty((nrow, ncol), self.realtype)\n # Loop until all data retrieved or eof\n while (\n current_row < nrow or current_col < ncol) and self.file.tell() != self.totalbytes:\n line = self.file.readline()\n\n # Read data into 2-D array\n arrline = line.split()\n for val in arrline:\n if not is_float(val):\n raise Exception(\n 'Invalid data encountered while reading data file.' +\n ' Unable to convert data to float.')\n result[current_row, current_col] = float(val)\n current_col += 1\n if current_col >= ncol:\n current_row += 1\n if current_row < nrow:\n current_col = 0\n\n if current_row < nrow - 1 or current_col < ncol - 1:\n raise Exception('Unexpected end of file while reading data.')\n\n return result\n\n def _read_val(self, i):\n \"\"\"\n Read ith data value from file\n\n \"\"\"\n current_col = 0\n result = None\n # Loop until data retrieved or eof\n while (\n current_col < self.ncol - 1 or self.file.tell() == self.totalbytes) and current_col <= i:\n line = self.file.readline()\n arrline = line.split()\n for val in arrline:\n if not is_float(val):\n raise Exception(\n 'Invalid data encountered while reading data file.' +\n ' Unable to convert data to float.')\n result = float(val)\n current_col = current_col + 1\n if current_col > i:\n break\n\n if (current_col < self.ncol - 1) and (current_col < i):\n raise Exception('Unexpected end of file while reading data.')\n\n return result\n\n def get_ts(self, idx):\n \"\"\"\n Get a time series from the formatted file.\n\n Parameters\n ----------\n idx : tuple of ints, or a list of a tuple of ints\n idx can be (layer, row, column) or it can be a list in the form\n [(layer, row, column), (layer, row, column), ...]. The layer,\n row, and column values must be zero based.\n\n Returns\n ----------\n out : numpy array\n Array has size (ntimes, ncells + 1). The first column in the\n data array will contain time (totim).\n\n See Also\n --------\n\n Notes\n -----\n\n The layer, row, and column values must be zero-based, and must be\n within the following ranges: 0 <= k < nlay; 0 <= i < nrow; 0 <= j < ncol\n\n Examples\n --------\n\n \"\"\"\n kijlist = self._build_kijlist(idx)\n nstation = self._get_nstation(idx, kijlist)\n\n # Initialize result array and put times in first column\n result = self._init_result(nstation)\n\n istat = 1\n for k, i, j in kijlist:\n ioffset_col = (i * self._col_data_size)\n for irec, header in enumerate(self.recordarray):\n # change ilay from header to zero-based\n ilay = header['ilay'] - 1\n if ilay != k:\n continue\n ipos = self.iposarray[irec]\n\n # Calculate offset necessary to reach intended column\n self.file.seek(ipos + ioffset_col, 0)\n\n # Find the time index and then put value into result in the\n # correct location.\n itim = np.where(result[:, 0] == header['totim'])[0]\n result[itim, istat] = self._read_val(j)\n istat += 1\n return result\n\n def close(self):\n \"\"\"\n Close the file handle.\n\n \"\"\"\n self.file.close()\n return\n\n\nclass FormattedHeadFile(FormattedLayerFile):\n \"\"\"\n FormattedHeadFile Class.\n\n Parameters\n ----------\n filename : string\n Name of the formatted head file\n text : string\n Name of the text string in the formatted head file. Default is 'head'\n precision : string\n 'single' or 'double'. Default is 'single'.\n verbose : bool\n Write information to the screen. Default is False.\n\n Attributes\n ----------\n\n Methods\n -------\n\n See Also\n --------\n\n Notes\n -----\n The FormattedHeadFile class provides simple ways to retrieve 2d and 3d\n head arrays from a MODFLOW formatted head file and time series\n arrays for one or more cells.\n\n The FormattedHeadFile class is built on a record array consisting of\n headers, which are record arrays of the modflow header information\n (kstp, kper, pertim, totim, text, nrow, ncol, ilay)\n and long integers, which are pointers to first bytes of data for\n the corresponding data array.\n\n FormattedHeadFile can only read formatted head files containing headers.\n Use the LABEL option in the output control file to generate head files\n with headers.\n\n Examples\n --------\n\n >>> import flopy.utils.formattedfile as ff\n >>> hdobj = ff.FormattedHeadFile('model.fhd', precision='single')\n >>> hdobj.list_records()\n >>> rec = hdobj.get_data(kstpkper=(1, 50))\n >>> rec2 = ddnobj.get_data(totim=100.)\n\n\n \"\"\"\n\n def __init__(self, filename, text='head', precision='single',\n verbose=False, **kwargs):\n self.text = text\n super(FormattedHeadFile, self).__init__(filename, precision, verbose,\n kwargs)\n return\n\n def _get_text_header(self):\n \"\"\"\n Return a text header object containing header formatting information\n\n \"\"\"\n return FormattedHeader(self.text, self.precision)\n\n def _get_data_size(self, header):\n \"\"\"\n Calculate the size of the data set in terms of a seek distance\n\n \"\"\"\n start_pos = self.file.tell()\n data_count = 0\n # Loop through data until at end of column\n while data_count < header['ncol']:\n column_data = self.file.readline()\n arr_column_data = column_data.split()\n data_count += len(arr_column_data)\n\n if data_count != header['ncol']:\n e = 'Unexpected data formatting in head file. Expected ' + \\\n '{:d} columns, '.format(header['ncol']) + \\\n 'but found {:d}.'.format(data_count)\n raise Exception(e)\n\n # Calculate seek distance based on data size\n stop_pos = self.file.tell()\n data_seek_distance = stop_pos - start_pos\n\n # Return to last file position\n self.file.seek(start_pos)\n\n return data_seek_distance\n",
"import sys\nimport collections\nimport numpy as np\nfrom ..pakbase import Package\nfrom ..utils.recarray_utils import create_empty_recarray\n\n\nclass ModflowHob(Package):\n \"\"\"\n Head Observation package class\n\n Parameters\n ----------\n iuhobsv : int\n unit number where output is saved. If iuhobsv is None, a unit number\n will be assigned (default is None).\n hobdry : float\n Value of the simulated equivalent written to the observation output\n file when the observation is omitted because a cell is dry\n (default is 0).\n tomulth : float\n Time step multiplier for head observations. The product of tomulth and\n toffset must produce a time value in units consistent with other model\n input. tomulth can be dimensionless or can be used to convert the units\n of toffset to the time unit used in the simulation (default is 1).\n obs_data : HeadObservation or list of HeadObservation instances\n A single HeadObservation instance or a list of HeadObservation\n instances containing all of the data for each observation. If obs_data\n is None a default HeadObservation with an observation in layer, row,\n column (0, 0, 0) and a head value of 0 at totim 0 will be created\n (default is None).\n hobname : str\n Name of head observation output file. If iuhobsv is greater than 0,\n and hobname is None, the model basename with a '.hob.out' extension\n will be used (default is None).\n extension : string\n Filename extension (default is hob)\n no_print : boolean\n When True or 1, a list of head observations will not be\n written to the Listing File (default is False)\n options : list of strings\n Package options (default is None).\n unitnumber : int\n File unit number (default is None)\n filenames : str or list of str\n Filenames to use for the package and the output files. If filenames\n is None the package name will be created using the model name and\n package extension and the hob output name will be created using the\n model name and .hob.out extension (for example, modflowtest.hob.out),\n if iuhobsv is a number greater than zero. If a single string is passed\n the package will be set to the string and hob output name will be\n created using the model name and .hob.out extension, if iuhobsv is a\n number greater than zero. To define the names for all package files\n (input and output) the length of the list of strings should be 2.\n Default is None.\n\n Attributes\n ----------\n\n Methods\n -------\n\n See Also\n --------\n\n Notes\n\n Examples\n --------\n\n >>> import flopy\n >>> model = flopy.modflow.Modflow()\n >>> dis = flopy.modflow.ModflowDis(model, nlay=1, nrow=11, ncol=11, nper=2,\n ... perlen=[1,1])\n >>> tsd = [[1.,54.4], [2., 55.2]]\n >>> obsdata = flopy.modflow.HeadObservation(model, layer=0, row=5,\n ... column=5, time_series_data=tsd)\n >>> hob = flopy.modflow.ModflowHob(model, iuhobsv=51, hobdry=-9999.,\n ... obs_data=obsdata)\n\n\n \"\"\"\n\n def __init__(self, model, iuhobsv=None, hobdry=0, tomulth=1.0,\n obs_data=None, hobname=None, extension='hob',\n no_print=False, options=None,\n unitnumber=None, filenames=None):\n \"\"\"\n Package constructor\n \"\"\"\n # set default unit number of one is not specified\n if unitnumber is None:\n unitnumber = ModflowHob.defaultunit()\n\n # set filenames\n if filenames is None:\n filenames = [None, None]\n elif isinstance(filenames, str):\n filenames = [filenames, None]\n elif isinstance(filenames, list):\n if len(filenames) < 2:\n filenames.append(None)\n\n # set filenames[1] to hobname if filenames[1] is not None\n if filenames[1] is None:\n if hobname is not None:\n filenames[1] = hobname\n\n if iuhobsv is not None:\n fname = filenames[1]\n model.add_output_file(iuhobsv, fname=fname,\n extension='hob.out', binflag=False,\n package=ModflowHob.ftype())\n else:\n iuhobsv = 0\n\n # Fill namefile items\n name = [ModflowHob.ftype()]\n units = [unitnumber]\n extra = ['']\n\n # set package name\n fname = [filenames[0]]\n\n # Call ancestor's init to set self.parent,\n # extension, name and unit number\n Package.__init__(self, model, extension=extension, name=name,\n unit_number=units, extra=extra, filenames=fname)\n\n self.url = 'hob.htm'\n self.heading = '# {} package for '.format(self.name[0]) + \\\n ' {}, '.format(model.version_types[model.version]) + \\\n 'generated by Flopy.'\n\n self.iuhobsv = iuhobsv\n self.hobdry = hobdry\n self.tomulth = tomulth\n\n # create default\n if obs_data is None:\n obs_data = HeadObservation(model)\n\n # make sure obs_data is a list\n if isinstance(obs_data, HeadObservation):\n obs_data = [obs_data]\n\n # set self.obs_data\n self.obs_data = obs_data\n\n self.no_print = no_print\n self.np = 0\n if options is None:\n options = []\n if self.no_print:\n options.append('NOPRINT')\n self.options = options\n\n # add checks for input compliance (obsnam length, etc.)\n self.parent.add_package(self)\n\n def _set_dimensions(self):\n \"\"\"\n Set the length of the obs_data list\n\n Returns\n -------\n None\n\n \"\"\"\n # make sure each entry of obs_data list is a HeadObservation instance\n # and calculate nh, mobs, and maxm\n msg = ''\n self.nh = 0\n self.mobs = 0\n self.maxm = 0\n for idx, obs in enumerate(self.obs_data):\n if not isinstance(obs, HeadObservation):\n msg += 'ModflowHob: obs_data entry {} '.format(idx) + \\\n 'is not a HeadObservation instance.\\n'\n continue\n self.nh += obs.nobs\n if obs.multilayer:\n self.mobs += obs.nobs\n self.maxm = max(self.maxm, obs.maxm)\n if msg != '':\n raise ValueError(msg)\n return\n\n def write_file(self):\n \"\"\"\n Write the package file\n\n Returns\n -------\n None\n\n \"\"\"\n # determine the dimensions of HOB data\n self._set_dimensions()\n\n # open file for writing\n f = open(self.fn_path, 'w')\n\n # write dataset 0\n f.write('{}\\n'.format(self.heading))\n\n # write dataset 1\n f.write('{:10d}'.format(self.nh))\n f.write('{:10d}'.format(self.mobs))\n f.write('{:10d}'.format(self.maxm))\n f.write('{:10d}'.format(self.iuhobsv))\n f.write('{:10.4g}'.format(self.hobdry))\n if self.no_print or 'NOPRINT' in self.options:\n f.write('{: >10}'.format('NOPRINT'))\n f.write('\\n')\n\n # write dataset 2\n f.write('{:10.4g}\\n'.format(self.tomulth))\n\n # write datasets 3-6\n for idx, obs in enumerate(self.obs_data):\n # dataset 3\n obsname = obs.obsname\n if isinstance(obsname, bytes):\n obsname = obsname.decode('utf-8')\n line = '{:12s} '.format(obsname)\n layer = obs.layer\n if layer >= 0:\n layer += 1\n line += '{:10d} '.format(layer)\n line += '{:10d} '.format(obs.row + 1)\n line += '{:10d} '.format(obs.column + 1)\n irefsp = obs.irefsp\n if irefsp >= 0:\n irefsp += 1\n line += '{:10d} '.format(irefsp)\n if obs.nobs == 1:\n toffset = obs.time_series_data[0]['toffset']\n hobs = obs.time_series_data[0]['hobs']\n else:\n toffset = 0.\n hobs = 0.\n line += '{:20} '.format(toffset)\n line += '{:10.4f} '.format(obs.roff)\n line += '{:10.4f} '.format(obs.coff)\n line += '{:10.4f} '.format(hobs)\n line += ' # DATASET 3 - Observation {}'.format(idx + 1)\n f.write('{}\\n'.format(line))\n\n # dataset 4\n if len(obs.mlay.keys()) > 1:\n line = ''\n for key, value in iter(obs.mlay.items()):\n line += '{:5d}{:10.4f}'.format(key + 1, value)\n line += ' # DATASET 4 - Observation {}'.format(idx + 1)\n f.write('{}\\n'.format(line))\n\n # dataset 5\n if irefsp < 0:\n line = '{:10d}'.format(obs.itt)\n line += 103 * ' '\n line += ' # DATASET 5 - Observation {}'.format(idx + 1)\n f.write('{}\\n'.format(line))\n\n # dataset 6:\n if obs.nobs > 1:\n for jdx, t in enumerate(obs.time_series_data):\n obsname = t['obsname']\n if isinstance(obsname, bytes):\n obsname = obsname.decode('utf-8')\n line = '{:12s} '.format(obsname)\n line += '{:10d} '.format(t['irefsp'] + 1)\n line += '{:20} '.format(t['toffset'])\n line += '{:10.4f} '.format(t['hobs'])\n line += 55 * ' '\n line += ' # DATASET 6 - ' + \\\n 'Observation {}.{}'.format(idx + 1, jdx + 1)\n f.write('{}\\n'.format(line))\n\n # close the hob package file\n f.close()\n\n return\n\n @staticmethod\n def load(f, model, ext_unit_dict=None, check=True):\n \"\"\"\n Load an existing package.\n\n Parameters\n ----------\n f : filename or file handle\n File to load.\n model : model object\n The model object (of type :class:`flopy.modflow.mf.Modflow`) to\n which this package will be added.\n ext_unit_dict : dictionary, optional\n If the arrays in the file are specified using EXTERNAL,\n or older style array control records, then `f` should be a file\n handle. In this case ext_unit_dict is required, which can be\n constructed using the function\n :class:`flopy.utils.mfreadnam.parsenamefile`.\n check : boolean\n Check package data for common errors. (default True)\n\n Returns\n -------\n hob : ModflowHob package object\n ModflowHob package object.\n\n Examples\n --------\n\n >>> import flopy\n >>> m = flopy.modflow.Modflow()\n >>> hobs = flopy.modflow.ModflowHob.load('test.hob', m)\n\n \"\"\"\n\n if model.verbose:\n sys.stdout.write('loading hob package file...\\n')\n\n openfile = not hasattr(f, 'read')\n if openfile:\n filename = f\n f = open(filename, 'r')\n\n # dataset 0 -- header\n while True:\n line = f.readline()\n if line[0] != '#':\n break\n\n # read dataset 1\n t = line.strip().split()\n nh = int(t[0])\n iuhobsv = None\n hobdry = 0\n if len(t) > 3:\n iuhobsv = int(t[3])\n hobdry = float(t[4])\n\n # read dataset 2\n line = f.readline()\n t = line.strip().split()\n tomulth = float(t[0])\n\n # read observation data\n obs_data = []\n\n # read datasets 3-6\n nobs = 0\n while True:\n # read dataset 3\n line = f.readline()\n t = line.strip().split()\n obsnam = t[0]\n layer = int(t[1])\n row = int(t[2]) - 1\n col = int(t[3]) - 1\n irefsp0 = int(t[4])\n toffset = float(t[5])\n roff = float(t[6])\n coff = float(t[7])\n hob = float(t[8])\n\n # read dataset 4 if multilayer obs\n if layer > 0:\n layer -= 1\n mlay = {layer: 1.}\n else:\n line = f.readline()\n t = line.strip().split()\n mlay = collections.OrderedDict()\n if len(t) >= abs(layer) * 2:\n for j in range(0, abs(layer) * 2, 2):\n k = int(t[j]) - 1\n # catch case where the same layer is specified\n # more than once. In this case add previous\n # value to the current value\n keys = list(mlay.keys())\n v = 0.\n if k in keys:\n v = mlay[k]\n mlay[k] = float(t[j + 1]) + v\n else:\n for j in range(abs(layer)):\n k = int(t[0]) - 1\n keys = list(mlay.keys())\n v = 0.\n if k in keys:\n v = mlay[k]\n mlay[k] = float(t[1]) + v\n\n if j != abs(layer) - 1:\n line = f.readline()\n t = line.strip().split()\n # reset layer\n layer = -len(list(mlay.keys()))\n\n # read datasets 5 & 6. Index loop variable\n if irefsp0 > 0:\n itt = 1\n irefsp0 -= 1\n totim = model.dis.get_totim_from_kper_toffset(\n irefsp0,\n toffset * tomulth)\n names = [obsnam]\n tsd = [totim, hob]\n nobs += 1\n else:\n names = []\n tsd = []\n # read data set 5\n line = f.readline()\n t = line.strip().split()\n itt = int(t[0])\n # dataset 6\n for j in range(abs(irefsp0)):\n line = f.readline()\n t = line.strip().split()\n names.append(t[0])\n irefsp = int(t[1]) - 1\n toffset = float(t[2])\n totim = model.dis.get_totim_from_kper_toffset(\n irefsp,\n toffset * tomulth)\n hob = float(t[3])\n tsd.append([totim, hob])\n nobs += 1\n\n obs_data.append(HeadObservation(model, tomulth=tomulth,\n layer=layer, row=row, column=col,\n roff=roff, coff=coff,\n obsname=obsnam,\n mlay=mlay, itt=itt,\n time_series_data=tsd,\n names=names))\n if nobs == nh:\n break\n\n if openfile:\n f.close()\n\n # set package unit number\n unitnumber = None\n filenames = [None, None]\n if ext_unit_dict is not None:\n unitnumber, filenames[0] = \\\n model.get_ext_dict_attr(ext_unit_dict,\n filetype=ModflowHob.ftype())\n if iuhobsv is not None:\n if iuhobsv > 0:\n iu, filenames[1] = \\\n model.get_ext_dict_attr(ext_unit_dict, unit=iuhobsv)\n model.add_pop_key_list(iuhobsv)\n\n # create hob object instance\n hob = ModflowHob(model, iuhobsv=iuhobsv, hobdry=hobdry,\n tomulth=tomulth, obs_data=obs_data,\n unitnumber=unitnumber, filenames=filenames)\n\n return hob\n\n @staticmethod\n def ftype():\n return 'HOB'\n\n @staticmethod\n def defaultunit():\n return 39\n\n\nclass HeadObservation(object):\n \"\"\"\n Create single HeadObservation instance from a time series array. A list of\n HeadObservation instances are passed to the ModflowHob package.\n\n Parameters\n ----------\n tomulth : float\n Time-offset multiplier for head observations. Default is 1.\n obsname : string\n Observation name. Default is 'HOBS'\n layer : int\n The zero-based layer index of the cell in which the head observation\n is located. If layer is less than zero, hydraulic heads from multiple\n layers are combined to calculate a simulated value. The number of\n layers equals the absolute value of layer, or abs(layer). Default is 0.\n row : int\n The zero-based row index for the observation. Default is 0.\n column : int\n The zero-based column index of the observation. Default is 0.\n irefsp : int\n The zero-based stress period to which the observation time is\n referenced.\n roff : float\n Fractional offset from center of cell in Y direction (between rows).\n Default is 0.\n coff : float\n Fractional offset from center of cell in X direction (between columns).\n Default is 0.\n itt : int\n Flag that identifies whether head or head changes are used as\n observations. itt = 1 specified for heads and itt = 2 specified\n if initial value is head and subsequent changes in head. Only\n specified if irefsp is < 0. Default is 1.\n mlay : dictionary of length (abs(irefsp))\n Key represents zero-based layer numbers for multilayer observations and\n value represents the fractional value for each layer of multilayer\n observations. If mlay is None, a default mlay of {0: 1.} will be\n used (default is None).\n time_series_data : list or numpy array\n Two-dimensional list or numpy array containing the simulation time of\n the observation and the observed head [[totim, hob]]. If\n time_series_dataDefault is None, a default observation of 0. at\n totim 0. will be created (default is None).\n names : list\n List of specified observation names. If names is None, observation\n names will be automatically generated from obsname and the order\n of the timeseries data (default is None).\n\n Returns\n -------\n obs : HeadObservation\n HeadObservation object.\n\n Examples\n --------\n\n >>> import flopy\n >>> model = flopy.modflow.Modflow()\n >>> dis = flopy.modflow.ModflowDis(model, nlay=1, nrow=11, ncol=11, nper=2,\n ... perlen=[1,1])\n >>> tsd = [[1.,54.4], [2., 55.2]]\n >>> obsdata = flopy.modflow.HeadObservation(model, layer=0, row=5,\n ... column=5, time_series_data=tsd)\n\n \"\"\"\n\n def __init__(self, model, tomulth=1., obsname='HOBS',\n layer=0, row=0, column=0, irefsp=None,\n roff=0., coff=0., itt=1, mlay=None,\n time_series_data=None, names=None):\n \"\"\"\n Object constructor\n \"\"\"\n\n if mlay is None:\n mlay = {0: 1.}\n if time_series_data is None:\n time_series_data = [[0., 0.]]\n if irefsp is None:\n if len(time_series_data) == 1:\n irefsp = 1\n else:\n irefsp = -1 * len(time_series_data)\n\n # set class attributes\n self.obsname = obsname\n self.layer = layer\n self.row = row\n self.column = column\n self.irefsp = irefsp\n self.roff = roff\n self.coff = coff\n self.itt = itt\n self.mlay = mlay\n self.maxm = 0\n\n # check if multilayer observation\n self.multilayer = False\n if len(self.mlay.keys()) > 1:\n self.maxm = len(self.mlay.keys())\n self.multilayer = True\n tot = 0.\n for key, value in self.mlay.items():\n tot += value\n if not (np.isclose(tot, 1.0, rtol=0)):\n msg = ('sum of dataset 4 proportions must equal 1.0 - ' + \\\n 'sum of dataset 4 proportions = {tot} for ' + \\\n 'observation name {obsname}.').format(\n tot=tot,\n obsname=self.obsname)\n raise ValueError(msg)\n\n # convert passed time_series_data to a numpy array\n if isinstance(time_series_data, list):\n time_series_data = np.array(time_series_data, dtype=np.float)\n\n # if a single observation is passed as a list reshape to a\n # two-dimensional numpy array\n if len(time_series_data.shape) == 1:\n time_series_data = np.reshape(time_series_data, (1, 2))\n\n # find indices of time series data that are valid\n tmax = model.dis.get_final_totim()\n keep_idx = time_series_data[:, 0] <= tmax\n time_series_data = time_series_data[keep_idx, :]\n\n # set the number of observations in this time series\n shape = time_series_data.shape\n self.nobs = shape[0]\n\n # construct names if not passed\n if names is None:\n if self.nobs == 1:\n names = [obsname]\n else:\n names = []\n for idx in range(self.nobs):\n names.append('{}.{}'.format(obsname, idx + 1))\n # make sure the length of names is greater than or equal to nobs\n else:\n if isinstance(names, str):\n names = [names]\n elif not isinstance(names, list):\n msg = 'HeadObservation names must be a ' + \\\n 'string or a list of strings'\n raise ValueError(msg)\n if len(names) < self.nobs:\n msg = 'a name must be specified for every valid ' + \\\n 'observation - {} '.format(len(names)) + \\\n 'names were passed but at least ' + \\\n '{} names are required.'.format(self.nobs)\n raise ValueError(msg)\n\n # create time_series_data\n self.time_series_data = self._get_empty(ncells=shape[0])\n for idx in range(self.nobs):\n t = time_series_data[idx, 0]\n kstp, kper, toffset = model.dis.get_kstp_kper_toffset(t)\n self.time_series_data[idx]['totim'] = t\n self.time_series_data[idx]['irefsp'] = kper\n self.time_series_data[idx]['toffset'] = toffset / tomulth\n self.time_series_data[idx]['hobs'] = time_series_data[idx, 1]\n self.time_series_data[idx]['obsname'] = names[idx]\n\n if self.nobs > 1:\n self.irefsp = -self.nobs\n else:\n self.irefsp = self.time_series_data[0]['irefsp']\n\n def _get_empty(self, ncells=0):\n \"\"\"\n Get an empty time_series_data recarray for a HeadObservation\n\n Parameters\n ----------\n ncells : int\n number of time entries in a HeadObservation\n\n Returns\n -------\n d : np.recarray\n\n \"\"\"\n # get an empty recarray that corresponds to dtype\n dtype = self._get_dtype()\n d = create_empty_recarray(ncells, dtype, default_value=-1.0E+10)\n d['obsname'] = ''\n return d\n\n def _get_dtype(self):\n \"\"\"\n Get the dtype for HeadObservation time_series_data\n\n\n Returns\n -------\n dtype : np.dtype\n\n \"\"\"\n # get the default HOB dtype\n dtype = np.dtype([(\"totim\", np.float32), (\"irefsp\", np.int),\n (\"toffset\", np.float32),\n (\"hobs\", np.float32), (\"obsname\", '|S12')])\n return dtype\n"
] |
[
[
"numpy.max",
"numpy.array",
"numpy.where",
"numpy.empty"
],
[
"numpy.reshape",
"numpy.array",
"numpy.dtype",
"numpy.isclose"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
DreamBlack/APCNet
|
[
"d76bc9e46c3b631035c5c67e2367b6fb80621333",
"d76bc9e46c3b631035c5c67e2367b6fb80621333",
"d76bc9e46c3b631035c5c67e2367b6fb80621333",
"d76bc9e46c3b631035c5c67e2367b6fb80621333"
] |
[
"finetune_pf_four.py",
"pix3d_data_gen.py",
"four_test_fig.py",
"finetune_my_three.py"
] |
[
"import os\nimport sys\nimport argparse\nimport random\nimport torch\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.utils.data\nimport torchvision.transforms as transforms\nfrom torch.autograd import Variable\nimport utils\nfrom utils import PointLoss\nfrom utils import distance_squre\nimport data_utils as d_utils\nfrom model_PFNet import _netlocalD, _netG\nfrom tensorboardX import SummaryWriter\nfrom pix3d.pix3d_dataset import Pix3DMultiDataset\n\n\nparser = argparse.ArgumentParser()\nexpdir = '/home/dream/study/codes/PCCompletion/PFNet/PF-Net-Point-Fractal-Network/exp/four/pf-net/Lamp'\nparser.add_argument('--dataroot', default='dataset/train', help='path to dataset')\nparser.add_argument('--workers', type=int, default=16, help='number of data loading workers')\nparser.add_argument('--batchSize', type=int, default=16, help='input batch size')\nparser.add_argument('--pnum', type=int, default=1024, help='the point number of a sample')\nparser.add_argument('--crop_point_num', type=int, default=256, help='0 means do not use else use with this weight')\nparser.add_argument('--nc', type=int, default=3)\nparser.add_argument('--niter', type=int, default=131, help='number of epochs to train for')\nparser.add_argument('--class_choice', default='Lamp', help='random|center|random_center')\nparser.add_argument('--weight_decay', type=float, default=0.001)\nparser.add_argument('--learning_rate', default=0.00004, type=float, help='learning rate in training')\nparser.add_argument('--beta1', type=float, default=0.9, help='beta1 for adam. default=0.9')\nparser.add_argument('--cuda', type=bool, default=False, help='enables cuda')\nparser.add_argument('--ngpu', type=int, default=2, help='number of GPUs to use')\nparser.add_argument('--D_choose', type=int, default=0, help='0 not use D-net,1 use D-net')\nparser.add_argument('--netG', default='', help=\"path to netG (to continue training)\")\nparser.add_argument('--netD', default='', help=\"path to netD (to continue training)\")\nparser.add_argument('--expdir', default='', help=\"path to netD (to continue training)\")\nparser.add_argument('--manualSeed', type=int, help='manual seed')\nparser.add_argument('--drop', type=float, default=0.2)\nparser.add_argument('--num_scales', type=int, default=3, help='number of scales')\nparser.add_argument('--point_scales_list', type=list, default=[1024, 512, 256], help='number of points in each scales')\nparser.add_argument('--each_scales_size', type=int, default=1, help='each scales size')\nparser.add_argument('--wtl2', type=float, default=0.95, help='0 means do not use else use with this weight')\nparser.add_argument('--cropmethod', default='random_center', help='random|center|random_center')\nopt = parser.parse_args()\nprint(opt)\n# python finetune_pf_four.py --folding_decoder=0 --expdir=/home/dream/study/codes/PCCompletion/best_four_exp/pfnet/with_image/pix3d/chair --workers=16 --batchSize=16 --D_choose=0 --Rep_choose=1 --class_choice=Chair --attention_encoder=1 --step_size=40 --gamma=0.2 --alpha=0.3 --loss_emd=1 --pointnetplus_encoder=1 --learning_rate=0.00004 --niter=151 --netG=/home/dream/study/codes/PCCompletion/best_four_exp/chair/checkpoint/point_netG150.pth\nblue = lambda x: '\\033[94m' + x + '\\033[0m'\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nUSE_CUDA = True\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\npoint_netG = _netG(opt.num_scales, opt.each_scales_size, opt.point_scales_list, opt.crop_point_num)\npoint_netD = _netlocalD(opt.crop_point_num)\ncudnn.benchmark = True\nresume_epoch = 0\n\n\ndef weights_init_normal(m):\n classname = m.__class__.__name__\n if classname.find(\"Conv2d\") != -1:\n torch.nn.init.normal_(m.weight.data, 0.0, 0.02)\n elif classname.find(\"Conv1d\") != -1:\n torch.nn.init.normal_(m.weight.data, 0.0, 0.02)\n elif classname.find(\"BatchNorm2d\") != -1:\n torch.nn.init.normal_(m.weight.data, 1.0, 0.02)\n torch.nn.init.constant_(m.bias.data, 0.0)\n elif classname.find(\"BatchNorm1d\") != -1:\n torch.nn.init.normal_(m.weight.data, 1.0, 0.02)\n torch.nn.init.constant_(m.bias.data, 0.0)\n\n\nif USE_CUDA:\n print(\"Let's use\", torch.cuda.device_count(), \"GPUs!\")\n point_netG = torch.nn.DataParallel(point_netG)\n point_netD = torch.nn.DataParallel(point_netD)\n point_netG.to(device)\n point_netG.apply(weights_init_normal)\n point_netD.to(device)\n point_netD.apply(weights_init_normal)\nif opt.netG != '':\n point_netG.load_state_dict(torch.load(opt.netG, map_location=lambda storage, location: storage)['state_dict'])\n resume_epoch = torch.load(opt.netG)['epoch']\nif opt.netD != '':\n point_netD.load_state_dict(torch.load(opt.netD, map_location=lambda storage, location: storage)['state_dict'])\n resume_epoch = torch.load(opt.netD)['epoch']\n\nif opt.manualSeed is None:\n opt.manualSeed = random.randint(1, 10000)\nprint(\"Random Seed: \", opt.manualSeed)\nrandom.seed(opt.manualSeed)\ntorch.manual_seed(opt.manualSeed)\nif opt.cuda:\n torch.cuda.manual_seed_all(opt.manualSeed)\n\ntransforms = transforms.Compose(\n [\n d_utils.PointcloudToTensor(),\n ]\n)\ndset = Pix3DMultiDataset( class_choice=opt.class_choice)\nassert dset\ndataloader = torch.utils.data.DataLoader(dset, batch_size=opt.batchSize, shuffle=True, num_workers=opt.workers,\n drop_last=True)\n\n# dset = ModelNet40Loader.ModelNet40Cls(opt.pnum, train=True, transforms=transforms, download = False)\n# assert dset\n# dataloader = torch.utils.data.DataLoader(dset, batch_size=opt.batchSize,\n# shuffle=True,num_workers = int(opt.workers))\n#\n#\n# test_dset = ModelNet40Loader.ModelNet40Cls(opt.pnum, train=False, transforms=transforms, download = False)\n# test_dataloader = torch.utils.data.DataLoader(test_dset, batch_size=opt.batchSize,\n# shuffle=True,num_workers = int(opt.workers))\n\n# pointcls_net.apply(weights_init)\n\n\ncriterion = torch.nn.BCEWithLogitsLoss().to(device)\ncriterion_PointLoss = PointLoss().to(device)\n\n# setup optimizer\noptimizerD = torch.optim.Adam(point_netD.parameters(), lr=0.0001, betas=(0.9, 0.999), eps=1e-05,\n weight_decay=opt.weight_decay)\noptimizerG = torch.optim.Adam(point_netG.parameters(), lr=0.0001, betas=(0.9, 0.999), eps=1e-05,\n weight_decay=opt.weight_decay)\nschedulerD = torch.optim.lr_scheduler.StepLR(optimizerD, step_size=40, gamma=0.2)\nschedulerG = torch.optim.lr_scheduler.StepLR(optimizerG, step_size=30, gamma=0.2)\n\nreal_label = 1\nfake_label = 0\n\ncrop_point_num = int(opt.crop_point_num)\ninput_cropped1 = torch.FloatTensor(opt.batchSize, opt.pnum, 3)\nlabel = torch.FloatTensor(opt.batchSize)\n\nnum_batch = len(dset) / opt.batchSize\n###########################\n# G-NET and T-NET\n##########################\nif opt.D_choose == 1:\n for epoch in range(resume_epoch, opt.niter):\n if epoch < 30:\n alpha1 = 0.01\n alpha2 = 0.02\n elif epoch < 80:\n alpha1 = 0.05\n alpha2 = 0.1\n else:\n alpha1 = 0.1\n alpha2 = 0.2\n\n for i, data in enumerate(dataloader, 0):\n incomplete, gt, image,pcl_2025,filename= data\n\n batch_size = incomplete.size()[0]\n\n incomplete = incomplete.to(device)\n\n gt = gt.to(device)\n image = image.to(device)\n\n incomplete = Variable(incomplete, requires_grad=True).cuda()\n image = Variable(image.float(), requires_grad=True).cuda()\n image = torch.squeeze(image, 1)\n label.resize_([batch_size, 1]).fill_(real_label)\n label = label.to(device)\n\n ############################\n # (1) data prepare\n ###########################\n real_center = Variable(gt, requires_grad=True)\n real_center = torch.squeeze(real_center, 1)\n real_center_key1_idx = utils.farthest_point_sample(real_center, 64, RAN=False)\n real_center_key1 = utils.index_points(real_center, real_center_key1_idx)\n real_center_key1 = Variable(real_center_key1, requires_grad=True)\n\n real_center_key2_idx = utils.farthest_point_sample(real_center, 128, RAN=True)\n real_center_key2 = utils.index_points(real_center, real_center_key2_idx)\n real_center_key2 = Variable(real_center_key2, requires_grad=True)\n\n input_cropped1 = torch.squeeze(incomplete, 1)\n input_cropped2_idx = utils.farthest_point_sample(input_cropped1, opt.point_scales_list[1], RAN=True)\n input_cropped2 = utils.index_points(input_cropped1, input_cropped2_idx)\n input_cropped3_idx = utils.farthest_point_sample(input_cropped1, opt.point_scales_list[2], RAN=False)\n input_cropped3 = utils.index_points(input_cropped1, input_cropped3_idx)\n input_cropped1 = Variable(input_cropped1, requires_grad=True)\n input_cropped2 = Variable(input_cropped2, requires_grad=True)\n input_cropped3 = Variable(input_cropped3, requires_grad=True)\n input_cropped2 = input_cropped2.to(device)\n input_cropped3 = input_cropped3.to(device)\n input_cropped = [input_cropped1, input_cropped2, input_cropped3]\n point_netG = point_netG.train()\n point_netD = point_netD.train()\n ############################\n # (2) Update D network\n ###########################\n point_netD.zero_grad()\n real_center = torch.unsqueeze(real_center, 1)\n output = point_netD(real_center)\n errD_real = criterion(output, label)\n errD_real.backward()\n fake_center1, fake_center2, fake = point_netG(input_cropped, image)\n fake = torch.unsqueeze(fake, 1)\n label.data.fill_(fake_label)\n output = point_netD(fake.detach())\n errD_fake = criterion(output, label)\n errD_fake.backward()\n errD = errD_real + errD_fake\n optimizerD.step()\n ############################\n # (3) Update G network: maximize log(D(G(z)))\n ###########################\n point_netG.zero_grad()\n label.data.fill_(real_label)\n output = point_netD(fake)\n errG_D = criterion(output, label)\n errG_l2 = 0\n CD_LOSS = criterion_PointLoss(torch.squeeze(fake, 1), torch.squeeze(real_center, 1))\n\n errG_l2 = criterion_PointLoss(torch.squeeze(fake, 1), torch.squeeze(real_center, 1)) \\\n + alpha1 * criterion_PointLoss(fake_center1, real_center_key1) \\\n + alpha2 * criterion_PointLoss(fake_center2, real_center_key2)\n\n errG = (1 - opt.wtl2) * errG_D + opt.wtl2 * errG_l2\n errG.backward()\n optimizerG.step()\n writer = SummaryWriter(log_dir=os.path.join(opt.expdir, 'tensorboard'))\n writer.add_scalar('cd_missing', CD_LOSS, num_batch * epoch + i)\n writer.add_scalar('D_Loss', errD.data, num_batch * epoch + i)\n writer.add_scalar('GD_Loss', errG_D.data, num_batch * epoch + i)\n writer.add_scalar('GwithD_sum_loss', errG, num_batch * epoch + i)\n writer.add_scalar('GwithD_l2', errG_l2, num_batch * epoch + i)\n complete_pc = torch.cat([torch.squeeze(fake, 1), incomplete], dim=1)\n complete_gt = torch.cat([gt, incomplete], dim=1)\n CD_LOSS_ALL = criterion_PointLoss(torch.squeeze(complete_pc, 1).cuda(),\n torch.squeeze(complete_gt, 1).cuda())\n CD_LOSS_ALL = CD_LOSS_ALL.data.cpu()\n writer.add_scalar('cd_all', CD_LOSS_ALL, num_batch * epoch + i)\n writer.add_scalar('lr', schedulerG.get_lr()[0], num_batch * epoch + i)\n writer.close()\n print(\n '[%d/%d][%d/%d] [missing_cd/all_cd/d_loss/GD_loss/GwithD_l2/GwithD_sum_loss]: %.4f / %.4f / %.4f / %.4f / %.4f / %.4f '\n % (epoch, opt.niter, i, len(dataloader),\n CD_LOSS, CD_LOSS_ALL, errD.data, errG_D.data, errG_l2, errG))\n\n schedulerD.step()\n schedulerG.step()\n if epoch % 10 == 0:\n torch.save({'epoch': epoch + 1,\n 'state_dict': point_netG.state_dict()},\n opt.expdir + '/checkpoint/point_netG' + str(epoch) + '.pth')\n torch.save({'epoch': epoch + 1,\n 'state_dict': point_netD.state_dict()},\n opt.expdir + '/checkpoint/point_netD' + str(epoch) + '.pth')\n\n\n\n\n#\n#############################\n## ONLY G-NET\n############################\nelse:\n for epoch in range(resume_epoch, opt.niter):\n if epoch < 30:\n alpha1 = 0.01\n alpha2 = 0.02\n elif epoch < 80:\n alpha1 = 0.05\n alpha2 = 0.1\n else:\n alpha1 = 0.1\n alpha2 = 0.2\n if epoch == 0:\n torch.save({'epoch': epoch + 1,\n 'state_dict': point_netG.state_dict()},\n opt.expdir + '/checkpoint/point_netG' + str(epoch) + '.pth')\n torch.save({'epoch': epoch + 1,\n 'state_dict': point_netD.state_dict()},\n opt.expdir + '/checkpoint/point_netD' + str(epoch) + '.pth')\n for i, data in enumerate(dataloader, 0):\n incomplete, gt, image,pcl_2025,filename = data\n\n batch_size = incomplete.size()[0]\n\n incomplete = incomplete.to(device)\n\n gt = gt.to(device)\n image = image.to(device)\n\n incomplete = Variable(incomplete, requires_grad=True).cuda()\n image = Variable(image.float(), requires_grad=True).cuda()\n image = torch.squeeze(image, 1)\n label.resize_([batch_size, 1]).fill_(real_label)\n label = label.to(device)\n\n ############################\n # (1) data prepare\n ###########################\n real_center = Variable(gt, requires_grad=True)\n real_center = torch.squeeze(real_center, 1)\n real_center_key1_idx = utils.farthest_point_sample(real_center, 64, RAN=False)\n real_center_key1 = utils.index_points(real_center, real_center_key1_idx)\n real_center_key1 = Variable(real_center_key1, requires_grad=True)\n\n real_center_key2_idx = utils.farthest_point_sample(real_center, 128, RAN=True)\n real_center_key2 = utils.index_points(real_center, real_center_key2_idx)\n real_center_key2 = Variable(real_center_key2, requires_grad=True)\n\n input_cropped1 = torch.squeeze(incomplete, 1)\n input_cropped2_idx = utils.farthest_point_sample(input_cropped1, opt.point_scales_list[1], RAN=True)\n input_cropped2 = utils.index_points(input_cropped1, input_cropped2_idx)\n input_cropped3_idx = utils.farthest_point_sample(input_cropped1, opt.point_scales_list[2], RAN=False)\n input_cropped3 = utils.index_points(input_cropped1, input_cropped3_idx)\n input_cropped1 = Variable(input_cropped1, requires_grad=True)\n input_cropped2 = Variable(input_cropped2, requires_grad=True)\n input_cropped3 = Variable(input_cropped3, requires_grad=True)\n input_cropped2 = input_cropped2.to(device)\n input_cropped3 = input_cropped3.to(device)\n input_cropped = [input_cropped1, input_cropped2, input_cropped3]\n point_netG = point_netG.train()\n point_netG.zero_grad()\n fake_center1, fake_center2, fake = point_netG(input_cropped, image)\n fake = torch.unsqueeze(fake, 1)\n ############################\n # (3) Update G network: maximize log(D(G(z)))\n ###########################\n\n CD_LOSS = criterion_PointLoss(torch.squeeze(fake, 1), torch.squeeze(real_center, 1))\n\n errG_l2 = criterion_PointLoss(torch.squeeze(fake, 1), torch.squeeze(real_center, 1)) \\\n + alpha1 * criterion_PointLoss(fake_center1, real_center_key1) \\\n + alpha2 * criterion_PointLoss(fake_center2, real_center_key2)\n\n errG_l2.backward()\n optimizerG.step()\n writer = SummaryWriter(log_dir=os.path.join(opt.expdir, 'tensorboard'))\n writer.add_scalar('cd_missing', CD_LOSS, num_batch * epoch + i)\n writer.add_scalar('GwithD_l2', errG_l2, num_batch * epoch + i)\n complete_pc = torch.cat([torch.squeeze(fake, 1), incomplete], dim=1)\n complete_gt = torch.cat([gt, incomplete], dim=1)\n CD_LOSS_ALL = criterion_PointLoss(torch.squeeze(complete_pc, 1).cuda(),\n torch.squeeze(complete_gt, 1).cuda())\n CD_LOSS_ALL = CD_LOSS_ALL.data.cpu()\n writer.add_scalar('cd_all', CD_LOSS_ALL, num_batch * epoch + i)\n writer.add_scalar('lr', schedulerG.get_lr()[0], num_batch * epoch + i)\n writer.close()\n print(\n '[%d/%d][%d/%d] [missing_cd/all_cd]: %.4f / %.4f '\n % (epoch, opt.niter, i, len(dataloader),\n CD_LOSS, CD_LOSS_ALL))\n\n schedulerG.step()\n if epoch % 10 == 0:\n torch.save({'epoch': epoch + 1,\n 'state_dict': point_netG.state_dict()},\n opt.expdir + '/checkpoint/point_netG' + str(epoch) + '.pth')\n torch.save({'epoch': epoch + 1,\n 'state_dict': point_netD.state_dict()},\n opt.expdir + '/checkpoint/point_netD' + str(epoch) + '.pth')\n\n\n\n\n",
"import torch.utils.data as data\nimport os\nimport os.path\nimport torch\nimport numpy as np\nimport cv2\nimport json\nfrom DatasetGeneration import dividePointCloud\nimport matplotlib\ndata_dir_imgs = '/home/dream/study/codes/PCCompletion/datasets/pix3d/pix3d'\ndata_out_father='/home/dream/study/codes/PCCompletion/datasets/my_pix3d/three/chair'\nHEIGHT = 128\nWIDTH = 128\nPAD = 35\nchoice = [torch.Tensor([1, 0, 0]), torch.Tensor([0, 0, 1]), torch.Tensor([1, 0, 1]), torch.Tensor([-1, 0, 0]),\n torch.Tensor([-1, 1, 0])]\n\ndef get_pix3d_models(cat):\n with open(os.path.join(data_dir_imgs, 'pix3d.json'), 'r') as f:\n models_dict = json.load(f)\n models = []\n\n cats = [cat] # 'sofa'['chair', 'table']\n\n # Check for truncation and occlusion before adding a model to the evaluation list\n for d in models_dict:\n if d['category'] in cats:\n if not d['truncated'] and not d['occluded'] and not d['slightly_occluded']:\n models.append(d)\n\n print('Total models = {}\\n'.format(len(models)))\n return models\n\n\ndef pc_normalize(pc):\n \"\"\" pc: NxC, return NxC \"\"\"\n l = pc.shape[0]\n centroid = np.mean(pc, axis=0)\n pc = pc - centroid\n m = np.max(np.sqrt(np.sum(pc ** 2, axis=1)))\n pc = pc / m\n return pc\ncatname_lower={'Chair':\"chair\",\"Table\":'table'}\ndef generateFour(class_choice):\n models = get_pix3d_models(catname_lower[class_choice])\n cnt=0\n for ind in range(len(models)):\n last_model_path, file = os.path.split(os.path.join(data_dir_imgs, models[ind]['model']))\n model_path = os.path.splitext(os.path.join(data_dir_imgs, models[ind]['model']))[0]\n _dict = models[ind]\n img_path = os.path.join(data_dir_imgs, _dict['img'])\n mask_path = os.path.join(data_dir_imgs, _dict['mask'])\n bbox = _dict['bbox'] # [width_from, height_from, width_to, height_to]\n\n pcl_path_1K = model_path + \".pts\" # point cloud path\n last_model_path = last_model_path.split('.')[0] + \"/model.pts\" # point cloud path\n\n gt_filename = os.path.join(data_out_father, 'pc256', str(ind) + \".pts\")\n incomplete_filename = os.path.join(data_out_father, 'pc1024', str(ind) + \".pts\")\n pc2025_filename = os.path.join(data_out_father, 'pc2025', str(ind) + \".pts\")\n image_filename = os.path.join(data_out_father, 'image_clean', str(ind) + \".png\")\n if os.path.exists(gt_filename) and os.path.exists(incomplete_filename) and os.path.exists(pc2025_filename) and os.path.exists(image_filename) :\n cnt = cnt + 1\n if cnt % 100 == 0:\n print(\"Succeed generate gt and incomplete for %s [%d/%d]\" % (class_choice, cnt, len(models)))\n continue\n\n\n ip_image = cv2.imread(img_path)\n ip_image = cv2.cvtColor(ip_image, cv2.COLOR_BGR2RGB)\n mask_image = cv2.imread(mask_path) != 0\n ip_image = ip_image * mask_image\n ip_image = ip_image[bbox[1]:bbox[3], bbox[0]:bbox[2], :]\n\n current_size = ip_image.shape[:2] # current_size is in (height, width) format\n ratio = float(HEIGHT - PAD) / max(current_size)\n new_size = tuple([int(x * ratio) for x in current_size])\n ip_image = cv2.resize(ip_image,\n (new_size[1], new_size[0])) # new_size should be in (width, height) format\n delta_w = WIDTH - new_size[1]\n delta_h = HEIGHT - new_size[0]\n top, bottom = delta_h // 2, delta_h - (delta_h // 2)\n left, right = delta_w // 2, delta_w - (delta_w // 2)\n color = [0, 0, 0]\n ip_image = cv2.copyMakeBorder(ip_image, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color)\n\n xangle = np.pi / 180. * -90\n yangle = np.pi / 180. * -90\n #pcl_2025 = rotate(rotate(np.loadtxt(pcl_path_1K).astype(np.float32), xangle, yangle), xangle)\n pcl_2025 = np.loadtxt(pcl_path_1K).astype(np.float32)\n pcl_2025 = pc_normalize(pcl_2025)\n pcl_2025 = torch.from_numpy(pcl_2025)\n\n pcl_256 = torch.FloatTensor(256, 3)\n start = 0\n num_each_group = [50, 50, 50, 50, 56]\n left_pc_num = 1280\n pcl_1024 = pcl_2025.float()\n for i in range(len(choice)):\n left_pc_num = left_pc_num - num_each_group[i]\n part_gt, pcl_1024 = dividePointCloud(pcl_1024, choice[i], num_each_group[i], left_pc_num)\n for index in range(start, start + num_each_group[i]):\n pcl_256.data[index] = part_gt[index - start]\n start = start + num_each_group[i]\n\n # To Pytorch\n incomplete = pcl_1024\n gt = pcl_256\n #print(model_path)\n gt_filename = os.path.join(data_out_father, 'pc256', str(ind)+\".pts\")\n incomplete_filename = os.path.join(data_out_father, 'pc1024', str(ind)+\".pts\" )\n pc2025_filename = os.path.join(data_out_father, 'pc2025', str(ind) + \".pts\")\n image_filename= os.path.join(data_out_father, 'image_clean', str(ind) + \".png\")\n\n np.savetxt(gt_filename, gt.numpy())\n np.savetxt(incomplete_filename, incomplete.numpy())\n np.savetxt(pc2025_filename, pcl_2025.numpy())\n matplotlib.image.imsave(image_filename,ip_image)\n cnt = cnt + 1\n if cnt % 100 == 0:\n print(\"Succeed generate gt and incomplete for %s [%d/%d]\" % (class_choice, cnt, len(models)))\n\ndef generateThree(class_choice):\n models = get_pix3d_models(catname_lower[class_choice])\n cnt = 0\n for ind in range(len(models)):\n\n last_model_path, file = os.path.split(os.path.join(data_dir_imgs, models[ind]['model']))\n model_path = os.path.splitext(os.path.join(data_dir_imgs, models[ind]['model']))[0]\n _dict = models[ind]\n img_path = os.path.join(data_dir_imgs, _dict['img'])\n mask_path = os.path.join(data_dir_imgs, _dict['mask'])\n bbox = _dict['bbox'] # [width_from, height_from, width_to, height_to]\n\n pcl_path_1K = model_path + \".pts\" # point cloud path\n\n\n pc2025_filename = os.path.join(data_out_father, 'pc2025', str(ind) + \".pts\")\n image_filename = os.path.join(data_out_father, 'image_clean', str(ind) + \".png\")\n ip_image = cv2.imread(img_path)\n ip_image = cv2.cvtColor(ip_image, cv2.COLOR_BGR2RGB)\n mask_image = cv2.imread(mask_path) != 0\n ip_image = ip_image * mask_image\n ip_image = ip_image[bbox[1]:bbox[3], bbox[0]:bbox[2], :]\n\n current_size = ip_image.shape[:2] # current_size is in (height, width) format\n ratio = float(HEIGHT - PAD) / max(current_size)\n new_size = tuple([int(x * ratio) for x in current_size])\n ip_image = cv2.resize(ip_image,\n (new_size[1], new_size[0])) # new_size should be in (width, height) format\n delta_w = WIDTH - new_size[1]\n delta_h = HEIGHT - new_size[0]\n top, bottom = delta_h // 2, delta_h - (delta_h // 2)\n left, right = delta_w // 2, delta_w - (delta_w // 2)\n color = [0, 0, 0]\n ip_image = cv2.copyMakeBorder(ip_image, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color)\n matplotlib.image.imsave(image_filename, ip_image)\n\n pcl_2025 = np.loadtxt(pcl_path_1K).astype(np.float32)\n pcl_2025 = pc_normalize(pcl_2025)\n pcl_2025 = torch.from_numpy(pcl_2025)\n np.savetxt(pc2025_filename, pcl_2025.numpy())\n for center in range(5):\n gt_filename = os.path.join(data_out_father, 'pc256', str(ind*5+center) + \".pts\")\n incomplete_filename = os.path.join(data_out_father, 'pc1024', str(ind*5+center) + \".pts\")\n pcl_256, pcl_1024 = dividePointCloud(pcl_2025.float(), choice[center])\n\n np.savetxt(gt_filename, pcl_256.numpy())\n np.savetxt(incomplete_filename, pcl_1024.numpy())\n cnt = cnt + 1\n if cnt % 100 == 0:\n print(\"Succeed generate gt and incomplete for %s [%d/%d]\" % (class_choice, cnt, 5 * len(models)))\n\n\n\n\n\nimport matplotlib.pyplot as plt\nif __name__ == '__main__':\n data_out_father = '/home/dream/study/codes/PCCompletion/datasets/my_pix3d/three/table'\n generateThree(\"Table\")\n data_out_father = '/home/dream/study/codes/PCCompletion/datasets/my_pix3d/three/chair'\n generateThree(\"Chair\")\n # d = PartDataset( root='./dataset/shapenetcore_partanno_segmentation_benchmark_v0/',classification=False, class_choice=None, npoints=4096, split='test')\n # print(len(dset))\n # incomplete,gt, image, filename= dset[1000]\n #\n # print(incomplete.size())\n # print(gt.size())\n # print(image.size())\n# print(incomplete)\n# ps = ps.numpy()\n #np.savetxt('ps'+'.xyz', incomplete, fmt = \"%f %f %f\")\n",
"import sys\nsys.path.append('/home/dream/study/codes/PCCompletion/PFNet/PF-Net-Point-Fractal-Network')\nimport argparse\nimport random\nimport torch.backends.cudnn as cudnn\nimport torch.utils.data\nfrom torch.autograd import Variable\nfrom MyDataset_former import MyDataset\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport utils\nimport os\nfrom PIL import Image\nfrom completion_net import myNet\nfrom test_fig import get_img_by_modelname\nfrom model_PFNet import _netG\nfrom pf_net_three import _netG as vanilia_netG\nparser = argparse.ArgumentParser() # create an argumentparser object\nparser.add_argument('--class_choice', default='Car', help=\"which class choose to train\")\nparser.add_argument('--attention_encoder', type = int, default = 1, help='enables cuda')\nparser.add_argument('--folding_decoder', type = int, default = 0, help='enables cuda')\nparser.add_argument('--pointnetplus_encoder', type = int, default = 1, help='enables cuda')\nparser.add_argument('--cuda', type = bool, default = True, help='enables cuda')\nparser.add_argument('--ngpu', type=int, default=2, help='number of GPUs to use')\nparser.add_argument('--manualSeed', type=int, help='manual seed')\nparser.add_argument('--four_data', type = int, default = 1, help='enables cuda')\nparser.add_argument('--index', type = int, default = 150, help='enables cuda')\nparser.add_argument('--usedataset', type = bool, default = True, help='enables cuda')\nparser.add_argument('--test', type = bool, default = True, help='enables cuda')\nparser.add_argument('--needSave', type = bool, default = False, help='enables cuda')\n\nopt = parser.parse_args()\nprint(opt)\n\n#good lamp\n# 244 -134 -138\n# 234 -99 99\n# 184 -127 148\n# 174 -87 84\n# 144 -155 132\n\nelev = 78 # a=elev\nazim = -61 # b=azim\n# good airplane\ncatname_lower={'Car':\"car\",'Lamp':\"lamp\",'Chair':\"chair\",\"Table\":'table',\"Airplane\":'airplane'}\nmy_net_path_home='/home/dream/study/codes/PCCompletion/best_four_exp'\nnetpaths=[\nos.path.join(my_net_path_home,catname_lower[opt.class_choice],\"checkpoint\",\"point_netG130.pth\")]\n\npf_net_path_vanilia=os.path.join(\"/home/dream/study/codes/PCCompletion/best_four_exp/pfnet/\",\"vanilia\",catname_lower[opt.class_choice],\"checkpoint\",\"point_netG130.pth\")\npf_net_path_image=os.path.join(\"/home/dream/study/codes/PCCompletion/best_four_exp/pfnet/\",\"with_image\",catname_lower[opt.class_choice],\"checkpoint\",\"point_netG130.pth\")\npfnet_path=[pf_net_path_vanilia,pf_net_path_image]\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\ncudnn.benchmark = True # promote\nMLP_dimsG = (3, 64, 64, 64, 128, 1024)\nFC_dimsG = (1024, 1024, 512)\n\nif opt.class_choice=='Lamp' or opt.class_choice=='Car' :\n if opt.pointnetplus_encoder==0 and opt.folding_decoder==0:# 第四章,在所有car lamp数据集上的folding实验point都用小的\n MLP_dimsG = (3, 64, 64, 64, 128, 512)\n FC_dimsG = (512, 512, 512)\n\ngrid_dims = (16, 16) # defaul 45\nFolding1_dims = (514, 512, 512, 3) # for foldingnet\nFolding2_dims = (515, 512, 512, 3) # for foldingnet\nWeight1_dims = (16 * 16 + 512, 512, 512, 128) # for weight matrix estimation 45x45+512 = 2537\nWeight3_dims = (512 + 128, 1024, 1024, 256)\nknn = 48\nsigma = 0.008\n\nif opt.manualSeed is None:\n opt.manualSeed = random.randint(1, 10000)\nprint(\"Random Seed: \", opt.manualSeed)\nrandom.seed(opt.manualSeed)\ntorch.manual_seed(opt.manualSeed)\ntorch.cuda.manual_seed_all(opt.manualSeed)\n\n\nimage_home='/home/dream/study/codes/densepcr/datasets/rendering'\npc_home='/home/dream/study/codes/PCCompletion/datasets/dataFromPFNet/shapenet_part/shapenet_part/datagenerate'\ncatname_id={'Car':\"02958343\",'Lamp':\"03636649\",'Chair':\"03001627\",\"Table\":'04379243',\"Airplane\":'02691156'}\n\ndef get_result_for_image_pfNet(incomplete,image,netpath):\n point_netG = _netG(3, 1, [1024, 512, 256], 256)\n point_netG = torch.nn.DataParallel(point_netG)\n point_netG.to(device)\n point_netG.load_state_dict(torch.load(netpath, map_location=lambda storage, location: storage)['state_dict'],\n strict=False)\n point_netG.eval()\n\n ############################\n # (1) data prepare\n ###########################\n input_cropped1 = torch.squeeze(incomplete, 1)\n input_cropped2_idx = utils.farthest_point_sample(input_cropped1, 512, RAN=True)\n input_cropped2 = utils.index_points(input_cropped1, input_cropped2_idx)\n input_cropped3_idx = utils.farthest_point_sample(input_cropped1, 256, RAN=False)\n input_cropped3 = utils.index_points(input_cropped1, input_cropped3_idx)\n input_cropped1 = Variable(input_cropped1, requires_grad=False)\n input_cropped2 = Variable(input_cropped2, requires_grad=False)\n input_cropped3 = Variable(input_cropped3, requires_grad=False)\n input_cropped2 = input_cropped2.to(device)\n input_cropped3 = input_cropped3.to(device)\n input_cropped = [input_cropped1, input_cropped2, input_cropped3]\n fake_center1, fake_center2, fake = point_netG(input_cropped,image)\n return fake.cuda().data.cpu().squeeze(0).numpy()\n\ndef get_result_for_myNet(incomplete,image,netpath):\n net = myNet(3, 128, 128, MLP_dimsG, FC_dimsG, grid_dims, Folding1_dims, Folding2_dims, Weight1_dims,\n Weight3_dims, folding=opt.folding_decoder, attention=opt.attention_encoder,\n pointnetplus=opt.pointnetplus_encoder)\n net = torch.nn.DataParallel(net)\n net.to(device)\n net.load_state_dict(torch.load(netpath, map_location=lambda storage, location: storage)['state_dict'],\n strict=False)\n net.eval()\n fake = net(incomplete, image)\n return fake.cuda().data.cpu().squeeze(0).numpy()\n\ndef get_result_for_pfNet(incomplete,netpath=\"\"):\n point_netG = vanilia_netG(3, 1, [1024, 512, 256], 256)\n point_netG = torch.nn.DataParallel(point_netG)\n point_netG.to(device)\n point_netG.load_state_dict(torch.load(netpath, map_location=lambda storage, location: storage)['state_dict'],\n strict=False)\n point_netG.eval()\n\n ############################\n # (1) data prepare\n ###########################\n input_cropped1 = torch.squeeze(incomplete, 1)\n input_cropped2_idx = utils.farthest_point_sample(input_cropped1, 512, RAN=True)\n input_cropped2 = utils.index_points(input_cropped1, input_cropped2_idx)\n input_cropped3_idx = utils.farthest_point_sample(input_cropped1, 256, RAN=False)\n input_cropped3 = utils.index_points(input_cropped1, input_cropped3_idx)\n input_cropped1 = Variable(input_cropped1, requires_grad=False)\n input_cropped2 = Variable(input_cropped2, requires_grad=False)\n input_cropped3 = Variable(input_cropped3, requires_grad=False)\n input_cropped2 = input_cropped2.to(device)\n input_cropped3 = input_cropped3.to(device)\n input_cropped = [input_cropped1, input_cropped2, input_cropped3]\n fake_center1, fake_center2, fake = point_netG(input_cropped)\n return fake.cuda().data.cpu().squeeze(0).numpy()\n\ndef compare_my_and_pf(cat):\n incomplete, gt, image, filename, view_id, center_id = test_dset.__getitem__(opt.index)\n print(\"view_id:\" + str(view_id))\n print(\"center_id:\" + str(center_id))\n print(\"filename:\" + str(filename))\n\n image_path = os.path.join(image_home, catname_id[cat], filename, \"rendering\", view_id + \".png\")\n imgpng = get_img_by_modelname(image_path)\n\n incomplete = torch.unsqueeze(incomplete, 0)\n image = torch.unsqueeze(image, 0)\n\n incomplete = incomplete.to(device)\n image = image.to(device)\n\n incomplete = Variable(incomplete, requires_grad=False)\n image = Variable(image.float(), requires_grad=False)\n\n pf_fakes = []\n pf_fakes.append(get_result_for_image_pfNet(incomplete,image,pfnet_path[1]))\n pf_fakes.append(get_result_for_pfNet(incomplete, pfnet_path[0]))\n\n\n my_fake=get_result_for_myNet(incomplete, image,netpaths[0])\n\n incomplete=incomplete.cuda().data.squeeze(0).cpu().numpy()\n fig = plt.figure(figsize=(26, 6), facecolor='w') #\n plt.subplots_adjust(left=0.05, right=0.95, bottom=0.05, top=0.95)\n\n if opt.test:\n ax = fig.add_subplot(151)\n fig.set_facecolor('none')\n ax.imshow(imgpng)\n #plt.title(\"input image\")\n plt.axis(\"off\")\n\n\n #plt.title(\"GT\")\n\n ax1 = fig.add_subplot(152, projection='3d')\n ax1.scatter(incomplete[:, 0], incomplete[:, 1], incomplete[:, 2], color=incompleteColor, s=pointSize)\n ax1.scatter(pf_fakes[0][:, 0], pf_fakes[0][:, 1], pf_fakes[0][:, 2], color=recColor, s=pointSize)\n ax1.set_axis_off()\n #plt.title(\"PF-Net-vanilia\")\n\n ax2 = fig.add_subplot(153, projection='3d')\n ax2.scatter(incomplete[:, 0], incomplete[:, 1], incomplete[:, 2], color=incompleteColor, s=pointSize)\n ax2.scatter(pf_fakes[1][:, 0], pf_fakes[1][:, 1], pf_fakes[1][:, 2], color=recColor, s=pointSize)\n ax2.set_axis_off()\n #plt.title(\"PF-Net-image\")\n\n ax3 = fig.add_subplot(154, projection='3d')\n ax3.scatter(incomplete[:, 0], incomplete[:, 1], incomplete[:, 2], color=incompleteColor, s=pointSize)\n ax3.scatter(my_fake[:, 0], my_fake[:, 1], my_fake[:, 2], color=recColor, s=pointSize)\n ax3.set_axis_off()\n #plt.title(\"My\")\n\n ax0 = fig.add_subplot(155, projection='3d')\n ax0.scatter(incomplete[:, 0], incomplete[:, 1], incomplete[:, 2], color=incompleteColor, s=pointSize)\n ax0.scatter(gt[:, 0], gt[:, 1], gt[:, 2], color=recColor, s=pointSize)\n ax0.set_axis_off()\n\n # ax4 = fig.add_subplot(166, projection='3d')\n # ax4.scatter(incomplete[:, 0], incomplete[:, 1], incomplete[:, 2], color=incompleteColor, s=pointSize)\n # ax4.scatter(my_fakes[2][:, 0], my_fakes[2][:, 1], my_fakes[2][:, 2], color=recColor, s=pointSize)\n # ax4.set_axis_off()\n # plt.title(\"My2\")\n\n\n ax0.view_init(elev, azim)\n ax1.view_init(elev, azim)\n ax2.view_init(elev, azim)\n ax3.view_init(elev, azim)\n fig.set_tight_layout(True)\n if opt.needSave:\n output_filename = os.path.join(\"/home/dream/study/four_result_images\",\n cat +str(opt.index)+ \"_\" + filename + \"_\" + str(center_id) + \"_\" + view_id + \".png\")\n plt.savefig(output_filename)\n plt.show()\n im = Image.open(output_filename)\n x = 100\n y = 50\n h = 500\n w = 2600\n region = im.crop((x, y, x + w, y + h))\n region.save(output_filename)\n else:\n plt.show()\n else:\n ax = fig.add_subplot(151)\n fig.set_facecolor('none')\n ax.imshow(imgpng)\n plt.axis(\"off\")\n\n ax0 = fig.add_subplot(152, projection='3d')\n ax0.scatter(incomplete[:, 0], incomplete[:, 1], incomplete[:, 2], color=incompleteColor, s=pointSize)\n ax0.scatter(gt[:, 0], gt[:, 1], gt[:, 2], color=recColor, s=pointSize)\n ax0.set_axis_off()\n\n ax1 = fig.add_subplot(153, projection='3d')\n ax1.scatter(incomplete[:, 0], incomplete[:, 1], incomplete[:, 2], color=incompleteColor, s=pointSize)\n ax1.scatter(pf_fakes[0][:, 0], pf_fakes[0][:, 1], pf_fakes[0][:, 2], color=recColor, s=pointSize)\n ax1.set_axis_off()\n\n ax2 = fig.add_subplot(154, projection='3d')\n ax2.scatter(incomplete[:, 0], incomplete[:, 1], incomplete[:, 2], color=incompleteColor, s=pointSize)\n ax2.scatter(pf_fakes[1][:, 0], pf_fakes[1][:, 1], pf_fakes[1][:, 2], color=recColor, s=pointSize)\n ax2.set_axis_off()\n\n ax3 = fig.add_subplot(155, projection='3d')\n ax3.scatter(incomplete[:, 0], incomplete[:, 1], incomplete[:, 2], color=incompleteColor, s=pointSize)\n ax3.scatter(my_fake[:, 0], my_fake[:, 1], my_fake[:, 2], color=recColor, s=pointSize)\n ax3.set_axis_off()\n\n ax0.view_init(elev, azim)\n ax1.view_init(elev, azim)\n ax2.view_init(elev, azim)\n\n fig.set_tight_layout(True)\n if opt.needSave:\n output_filename = os.path.join(\"/home/dream/study/four_result_images\",\n cat + \"_\" + filename + \"_\" + str(center_id) + \"_\" + view_id + \".png\")\n plt.savefig(output_filename)\n plt.show()\n im = Image.open(output_filename)\n x = 100\n y = 50\n h = 500\n w = 2300\n region = im.crop((x, y, x + w, y + h))\n region.save(output_filename)\n else:\n plt.show()\n\n\n\nincompleteColor='lightgray'\nrecColor='royalblue'\npointSize=55\ndef draw_point_cloud(incomplete,rec_missing, elev=0,azim=0,output_filename=None):\n \"\"\" points is a Nx3 numpy array \"\"\"\n fig = plt.figure(figsize=(24, 12), facecolor='w') #\n plt.subplots_adjust(left=0.05, right=0.95, bottom=0.05, top=0.9)\n\n\n ax0 = fig.add_subplot(111, projection='3d')\n ax0.scatter(incomplete[:, 0], incomplete[:, 1], incomplete[:, 2], color=incompleteColor, s=pointSize)\n ax0.scatter(rec_missing[:, 0], rec_missing[:, 1], rec_missing[:, 2], color=recColor, s=pointSize)\n ax0.set_axis_off()\n plt.title(\"GT\")\n\n\n plt.show()\n\ntest_dset = MyDataset(classification=True,three=opt.folding_decoder,\n class_choice=opt.class_choice, split='test',four_data=opt.four_data)\nassert test_dset\n# good:10749,2269,5320\n# bad:9958,9960,4305\nif __name__ == '__main__':\n\n no=True\n if no:\n missings = []\n #incomplete, gt, image, filename = test_dset.__getitem__(opt.index)\n gt=torch.rand((4,6))\n missings.append(gt)\n compare_my_and_pf(opt.class_choice) # (cat,obj_id,imageview,pccenter):\n\n else:\n # dir_path = '/home/dream/study/codes/PCCompletion/datasets/dataFromPFNet/shapenet_part/shapenet_part/datagenerateForFour/03636649'\n dir_path='/home/dream/study/codes/PCCompletion/datasets/dataFromPFNet/shapenet_part/shapenet_part/datagenerateForFour/04379243'\n filename1 = '/gt4ceba450382724f7861fea89ab9e083a.pts'\n filename2 = '/incomplete4ceba450382724f7861fea89ab9e083a.pts'\n # complete = np.loadtxt(\"/home/dream/study/codes/PCCompletion/新建文件夹chair/test/4incomplete1b81441b7e597235d61420a53a0cb96d.pts\").astype(np.float32)\n # incomplete = np.loadtxt(\"/home/dream/study/codes/PCCompletion/新建文件夹chair/test/4gt1b81441b7e597235d61420a53a0cb96d.pts\").astype(np.float32)\n # incompletechair=np.loadtxt(\"/home/dream/study/0incomplete1a00aa6b75362cc5b324368d54a7416f.pts\")\n complete = '/home/dream/study/codes/PCCompletion/datasets/dataFromPFNet/shapenet_part/shapenet_part/shapenetcore_partanno_segmentation_benchmark_v0/04379243/points/1a00aa6b75362cc5b324368d54a7416f.pts'\n gt = np.loadtxt(dir_path + filename1)\n incomplete = np.loadtxt(dir_path + filename2)\n draw_point_cloud(gt, incomplete)\n",
"import sys\nsys.path.append('/home/dream/study/codes/PCCompletion/PFNet/PF-Net-Point-Fractal-Network')\nimport argparse\nimport random\nimport torch\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.utils.data\nimport torchvision.transforms as transforms\nfrom torch.autograd import Variable\nfrom completion_net import myNet\nfrom myutils import PointLoss,EMDLoss,RepulsionLoss\nfrom tensorboardX import SummaryWriter\nfrom my_discriminator import myDiscriminator\nfrom pix3d.pix3d_dataset import Pix3DSingleDataset\nimport time\nimport os\n#python finetune_my_three.py --tensorboard=/home/dream/study/codes/PCCompletion/best_three_exp/pix3d/table/tensorboard --checkpoint=/home/dream/study/codes/PCCompletion/best_three_exp/pix3d/table/checkpoint --class_choice=Table --netG=/home/dream/study/codes/PCCompletion/best_three_exp/pix3d/table/checkpoint/point_netGshapenet.pth --Rep_choose=1 --alpha=0.3\nparser = argparse.ArgumentParser() # create an argumentparser object\nparser.add_argument('--workers', type=int,default=16, help='number of data loading workers')\nparser.add_argument('--batchSize', type=int, default=16, help='input batch size')\nparser.add_argument('--class_choice', default='Lamp', help=\"which class choose to train\")\nparser.add_argument('--niter', type=int, default=201, help='number of epochs to train for')\nparser.add_argument('--weight_decay', type=float, default=0.001)\nparser.add_argument('--learning_rate', default=0.0002, type=float, help='learning rate in training')\nparser.add_argument('--beta1', type=float, default=0.9, help='beta1 for adam. default=0.9')\nparser.add_argument('--cuda', type = bool, default = True, help='enables cuda')\nparser.add_argument('--attention_encoder', type = int, default = 1, help='enables cuda')\nparser.add_argument('--folding_decoder', type = int, default = 1, help='enables cuda')\nparser.add_argument('--pointnetplus_encoder', type = int, default = 0, help='enables cuda')\nparser.add_argument('--four_data', type = int, default = 0, help='enables cuda')\nparser.add_argument('--ngpu', type=int, default=1, help='number of GPUs to use')\nparser.add_argument('--D_choose',type=int, default=0, help='0 not use D-net,1 use D-net')\nparser.add_argument('--Rep_choose',type=int, default=1, help='0 not use Rep Loss,1 use Rep Loss')\nparser.add_argument('--loss_emd',type=int, default=0, help='0 use cd Loss,1 use emd Loss')\nparser.add_argument('--netG', default='', help=\"path to netG (to continue training)\")\nparser.add_argument('--netD', default='', help=\"path to netD (to continue training)\")\nparser.add_argument('--checkpointDir', default='', help=\"path to netG (to continue training)\")\nparser.add_argument('--tensorboardDir', default='', help=\"path to netD (to continue training)\")\nparser.add_argument('--manualSeed', type=int, help='manual seed')\nparser.add_argument('--drop',type=float,default=0, help='use when charpter 4 in fc decoder')\nparser.add_argument('--dropout_feature',type=float,default=0, help='use when charpter 4 folding decoder')\nparser.add_argument('--alpha',type=float,default=0.3, help='rep loss weight')\nparser.add_argument('--radius',type=float,default=0.07, help='radius of rep loss')\nparser.add_argument('--num_scales',type=int,default=3,help='number of scales')\nparser.add_argument('--step_size',type=int,default=40,help='LR step size')\nparser.add_argument('--gamma',type=float,default=0.2,help='LR gamma')\nparser.add_argument('--point_scales_list',type=list,default=[2048,1024,512],help='number of points in each scales')\nparser.add_argument('--each_scales_size',type=int,default=1,help='each scales size')\nparser.add_argument('--wtl2',type=float,default=0.95,help='weight for loss 0 means do not use else use with this weight')\nopt = parser.parse_args()\nprint(opt)\nif opt.pointnetplus_encoder==1:\n torch.backends.cudnn.enabled = False\n\nroot_path='/home/dream/study/codes/PCCompletion/PFNet/PF-Net-Point-Fractal-Network/exp/best_three/02958343'\n\nprint('命令行参数写入文件')\nf_all=open(os.path.join(opt.checkpointDir,'train_param.txt'), 'w')\nf_all.write(\"\\n\"+\"workers\"+\" \"+str(opt.workers))\nf_all.write(\"\\n\"+\"batchSize\"+\" \"+str(opt.batchSize))\nf_all.write(\"\\n\"+\"attention_encoder\"+\" \"+str(opt.attention_encoder))\nf_all.write(\"\\n\"+\"folding_decoder\"+\" \"+str(opt.folding_decoder))\nf_all.write(\"\\n\"+\"pointnetplus_encoder\"+\" \"+str(opt.pointnetplus_encoder))\nf_all.write(\"\\n\"+\"class_choice\"+\" \"+str(opt.class_choice))\nf_all.write(\"\\n\"+\"D_choose\"+\" \"+str(opt.D_choose))\nf_all.write(\"\\n\"+\"Rep_choose\"+\" \"+str(opt.Rep_choose))\nf_all.write(\"\\n\"+\"alpha\"+\" \"+str(opt.alpha))\nf_all.write(\"\\n\"+\"step_size\"+\" \"+str(opt.step_size))\nf_all.write(\"\\n\"+\"gamma\"+\" \"+str(opt.gamma))\nf_all.write(\"\\n\"+\"drop\"+\" \"+str(opt.drop))\nf_all.write(\"\\n\"+\"dropout_feature\"+\" \"+str(opt.dropout_feature))\nf_all.write(\"\\n\"+\"learning_rate\"+\" \"+str(opt.learning_rate))\nf_all.write(\"\\n\"+\"four_data\"+\" \"+str(opt.four_data))\nf_all.write(\"\\n\"+\"loss_emd\"+\" \"+str(opt.loss_emd))\n\n\ncontinueLast=False\nresume_epoch=0\nweight_decay=0.001\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\ncudnn.benchmark = True # promote\nUSE_CUDA=opt.cuda\n\n# weight init\ndef weights_init_normal(m):\n classname = m.__class__.__name__\n if classname.find(\"Conv2d\") != -1:\n torch.nn.init.normal_(m.weight.data, 0.0, 0.02)\n elif classname.find(\"Conv1d\") != -1:\n torch.nn.init.normal_(m.weight.data, 0.0, 0.02)\n elif classname.find(\"BatchNorm2d\") != -1:\n torch.nn.init.normal_(m.weight.data, 1.0, 0.02)\n torch.nn.init.constant_(m.bias.data, 0.0)\n elif classname.find(\"BatchNorm1d\") != -1:\n torch.nn.init.normal_(m.weight.data, 1.0, 0.02)\n torch.nn.init.constant_(m.bias.data, 0.0)\n\nMLP_dimsG = (3, 64, 64, 64, 128, 1024)# 所有第三章的实验point都用大的\nFC_dimsG = (1024, 1024, 512)\nif opt.class_choice=='Lamp' or opt.class_choice=='Car' :\n if opt.pointnetplus_encoder==0 and opt.folding_decoder==0:# 第四章,在所有car lamp数据集上的folding实验point都用小的\n MLP_dimsG = (3, 64, 64, 64, 128, 512)\n FC_dimsG = (512, 512, 512)\n# MLP_dimsG = (3, 64, 64, 64, 128, 512)\n# FC_dimsG = (512, 512, 512)\nf_all.write(\"\\n\"+\"MLP_dimsG\"+\" \"+str(MLP_dimsG))\nf_all.write(\"\\n\"+\"FC_dimsG\"+\" \"+str(FC_dimsG))\n\nf_all.close()\nMLP_dimsD = (3, 64, 64, 64, 128, 1024)\nFC_dimsD = (1024, 1024, 512)\ngrid_dims = (16, 16) # defaul 45\nFolding1_dims = (514, 512, 512, 3) # for foldingnet\nFolding2_dims = (515, 512, 512, 3) # for foldingnet\nWeight1_dims = (16 * 16 + 512, 512, 512, 128) # for weight matrix estimation 45x45+512 = 2537\nWeight3_dims = (512 + 128, 1024, 1024, 256)\nknn = 48\nsigma = 0.008\nmyNet = myNet(3, 128, 128, MLP_dimsG, FC_dimsG, grid_dims, Folding1_dims, Folding2_dims, Weight1_dims, Weight3_dims,dropout=opt.drop,folding=opt.folding_decoder,dropout_feature=opt.dropout_feature,attention=opt.attention_encoder,pointnetplus=opt.pointnetplus_encoder)\nmyNetD=myDiscriminator(256,MLP_dimsD,FC_dimsD)\n\n\nprint(\"Let's use\", torch.cuda.device_count(), \"GPUs!\")\nmyNet = torch.nn.DataParallel(myNet)\nmyNet.to(device)\nmyNet.apply(weights_init_normal)\nmyNetD = torch.nn.DataParallel(myNetD)\nmyNetD.to(device)\nmyNetD.apply(weights_init_normal)\n\nif opt.netG != '' :\n myNet.load_state_dict(torch.load(opt.netG, map_location=lambda storage, location: storage)['state_dict'],strict=False)\n resume_epoch = torch.load(opt.netG)['epoch']\nif opt.netD != '' :\n myNetD.load_state_dict(torch.load(opt.netD,map_location=lambda storage, location: storage)['state_dict'],strict=False)\n resume_epoch = torch.load(opt.netD)['epoch']\n\nif opt.manualSeed is None:\n opt.manualSeed = random.randint(1, 10000)\nprint(\"Random Seed: \", opt.manualSeed)\nrandom.seed(opt.manualSeed)\ntorch.manual_seed(opt.manualSeed)\nif opt.cuda:\n torch.cuda.manual_seed_all(opt.manualSeed)\n\n# load data\ndset = Pix3DSingleDataset(class_choice=opt.class_choice)\nassert dset\ndataloader = torch.utils.data.DataLoader(dset, batch_size=opt.batchSize,shuffle=True,num_workers = opt.workers,drop_last=True)\n\ncriterion = torch.nn.BCEWithLogitsLoss().to(device)\ncriterion_PointLoss = PointLoss().to(device)\nif opt.loss_emd==1:\n criterion_PointLoss = EMDLoss().to(device)\n print(\"Emd loss is used.\")\ncriterion_RepLoss=RepulsionLoss(alpha=opt.alpha,radius=opt.radius).to(device)\n\n# setup optimizer\noptimizerG = torch.optim.Adam(myNet.parameters(), lr=opt.learning_rate, betas=(0.9, 0.999), eps=1e-05, weight_decay=opt.weight_decay)\nschedulerG = torch.optim.lr_scheduler.StepLR(optimizerG, step_size=opt.step_size, gamma=opt.gamma)\noptimizerD = torch.optim.Adam(myNetD.parameters(), lr=opt.learning_rate*0.5,betas=(0.9, 0.999),eps=1e-05,weight_decay=opt.weight_decay)\nschedulerD = torch.optim.lr_scheduler.StepLR(optimizerD, step_size=opt.step_size, gamma=opt.gamma)\n\nreal_label = 1\nfake_label = 0\nresume_epoch=0\nnum_batch = len(dset) / opt.batchSize\nlabel = torch.FloatTensor(opt.batchSize).cuda()\nif __name__=='__main__':\n ###########################\n # G-NET and T-NET\n ##########################\n if opt.D_choose == 1:\n\n for epoch in range(resume_epoch, opt.niter):\n for i, data in enumerate(dataloader, 0):\n\n incomplete, gt, image, pcl_2025, image_path = data\n\n batch_size = incomplete.size()[0]\n\n incomplete = incomplete.to(device)\n\n gt = gt.to(device)\n image = image.to(device)\n\n incomplete = Variable(incomplete, requires_grad=True).cuda()\n image = Variable(image.float(), requires_grad=True).cuda()\n image = torch.squeeze(image, 1)\n label.resize_([batch_size, 1]).fill_(real_label)\n label = label.to(device)\n\n myNet = myNet.train()\n myNetD=myNetD.train()\n #########################\n # update d\n ############################\n myNetD.zero_grad()\n output=myNetD(torch.squeeze(gt, 1)) # input B*N*3\n errD_real = criterion(output, label)\n errD_real.backward()\n\n fake = myNet(incomplete, image)\n fake = torch.squeeze(fake, 1).cuda()\n label.data.fill_(fake_label)\n output = myNetD(fake.detach())\n errD_fake = criterion(output, label)\n errD_fake.backward()\n errD = errD_real + errD_fake\n optimizerD.step()\n #########################\n # update g\n ############################\n myNet.zero_grad()\n label.data.fill_(real_label)\n output = myNetD(torch.squeeze(fake, 1).cuda())\n errG_D = criterion(output, label.cuda())\n errG_l2=0\n CD_LOSS = criterion_PointLoss(torch.squeeze(fake, 1).cuda(), torch.squeeze(gt, 1).cuda())\n RepLoss=0\n errG_l2 = CD_LOSS\n if opt.Rep_choose==1:\n RepLoss = criterion_RepLoss(torch.squeeze(fake, 1).cuda())\n errG_l2+=RepLoss\n\n errG = (1 - opt.wtl2) * errG_D + opt.wtl2 * errG_l2\n errG.backward()\n optimizerG.step()\n writer = SummaryWriter(log_dir=opt.tensorboardDir)\n writer.add_scalar('cd_missing', CD_LOSS, num_batch * epoch + i)\n writer.add_scalar('repulsion', RepLoss, num_batch * epoch + i)\n writer.add_scalar('D_Loss', errD.data, num_batch * epoch + i)\n writer.add_scalar('GD_Loss', errG_D.data, num_batch * epoch + i)\n writer.add_scalar('GwithD_sum_loss', errG, num_batch * epoch + i)\n writer.add_scalar('GwithD_l2', errG_l2, num_batch * epoch + i)\n complete_pc = torch.cat([fake, incomplete], dim=1)\n complete_gt = torch.cat([gt, incomplete], dim=1)\n CD_LOSS_ALL = criterion_PointLoss(torch.squeeze(complete_pc, 1).cuda(), torch.squeeze(complete_gt, 1).cuda())\n CD_LOSS_ALL=CD_LOSS_ALL.data.cpu()\n writer.add_scalar('cd_all', CD_LOSS_ALL, num_batch * epoch + i)\n writer.add_scalar('lr', schedulerG.get_lr()[0], num_batch * epoch + i)\n writer.close()\n print('[%d/%d][%d/%d] [missing_cd/all_cd/rep/d_loss/GD_loss/GwithD_l2/GwithD_sum_loss]: %.4f / %.4f / %.4f / %.4f / %.4f / %.4f / %.4f '\n % (epoch, opt.niter, i, len(dataloader),\n CD_LOSS, CD_LOSS_ALL, RepLoss, errD.data,errG_D.data,errG_l2,errG))\n\n schedulerD.step()\n schedulerG.step()\n if epoch % 10 == 0:\n torch.save({'epoch': epoch + 1,\n 'state_dict': myNet.state_dict()},\n opt.checkpointDir+'/Trained_Model/point_netG' + str(epoch) + '.pth')\n torch.save({'epoch': epoch + 1,\n 'state_dict': myNetD.state_dict()},\n opt.checkpointDir+'/Trained_Model/point_netD' + str(epoch) + '.pth')\n\n else:\n\n # only g-net\n for epoch in range(resume_epoch, opt.niter):\n for i, data in enumerate(dataloader, 0):\n incomplete, gt, image, pcl_2025, image_path = data\n batch_size = incomplete.size()[0]\n incomplete = incomplete.to(device)\n gt = gt.to(device)\n image = image.to(device)\n incomplete = Variable(incomplete, requires_grad=True).cuda()\n image = Variable(image.float(), requires_grad=True).cuda()\n image = torch.squeeze(image, 1)\n\n myNet = myNet.train()\n myNet.zero_grad()\n fake = myNet(incomplete, image)\n ############################\n # (3) Update G network: maximize log(D(G(z)))\n ###########################\n\n CD_LOSS = criterion_PointLoss(torch.squeeze(fake, 1).cuda(), torch.squeeze(gt, 1).cuda())\n RepLoss = 0\n errG = CD_LOSS\n if opt.Rep_choose == 1:\n RepLoss = criterion_RepLoss(torch.squeeze(fake, 1))\n errG =errG+ RepLoss\n errG.backward()\n\n optimizerG.step()\n\n writer = SummaryWriter(log_dir=opt.tensorboardDir)\n writer.add_scalar('cd_missing', CD_LOSS, num_batch * epoch + i)\n writer.add_scalar('repulsion', RepLoss, num_batch * epoch + i)\n writer.add_scalar('GnoD_SumLoss', errG, num_batch * epoch + i)\n complete_pc = torch.cat([fake, incomplete], dim=1)\n complete_gt = torch.cat([gt, incomplete], dim=1)\n CD_LOSS_ALL = criterion_PointLoss(torch.squeeze(complete_pc, 1), torch.squeeze(complete_gt, 1))\n writer.add_scalar('cd_all', CD_LOSS_ALL, num_batch * epoch + i)\n writer.add_scalar('lr', schedulerG.get_lr()[0], num_batch * epoch + i)\n writer.close()\n print('[%d/%d][%d/%d] [missing_cd/all_cd/rep/all_loss]: %.4f / %.4f / %.4f / %.4f '\n % (epoch, opt.niter, i, len(dataloader),\n CD_LOSS, CD_LOSS_ALL,RepLoss, errG))\n\n schedulerG.step()\n\n if epoch % 5 == 0:\n torch.save({'epoch': epoch + 1,\n 'state_dict': myNet.state_dict()},\n opt.checkpointDir+'/point_netG' + str(epoch) + '.pth')"
] |
[
[
"torch.load",
"torch.cat",
"torch.manual_seed",
"torch.cuda.device_count",
"torch.nn.init.constant_",
"torch.utils.data.DataLoader",
"torch.unsqueeze",
"torch.autograd.Variable",
"torch.nn.BCEWithLogitsLoss",
"torch.FloatTensor",
"torch.nn.init.normal_",
"torch.cuda.manual_seed_all",
"torch.cuda.is_available",
"torch.nn.DataParallel",
"torch.squeeze",
"torch.optim.lr_scheduler.StepLR"
],
[
"matplotlib.image.imsave",
"torch.Tensor",
"torch.from_numpy",
"torch.FloatTensor",
"numpy.mean",
"numpy.sum",
"numpy.loadtxt"
],
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.savefig",
"torch.autograd.Variable",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.show",
"numpy.loadtxt",
"matplotlib.pyplot.figure"
],
[
"torch.load",
"torch.nn.init.constant_",
"torch.manual_seed",
"torch.cuda.device_count",
"torch.cat",
"torch.utils.data.DataLoader",
"torch.autograd.Variable",
"torch.nn.BCEWithLogitsLoss",
"torch.nn.init.normal_",
"torch.FloatTensor",
"torch.cuda.manual_seed_all",
"torch.cuda.is_available",
"torch.nn.DataParallel",
"torch.squeeze",
"torch.optim.lr_scheduler.StepLR"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
stefanDeveloper/bomberman
|
[
"20a424cca97cf8df6fb7c2ff4b41ce834031077d"
] |
[
"replay.py"
] |
[
"import pickle\nfrom time import sleep\n\nimport numpy as np\n\nimport settings as s\nfrom agents import Agent\nfrom environment import GenericWorld, WorldArgs\nfrom fallbacks import pygame\nfrom items import Coin\n\n\nclass ReplayWorld(GenericWorld):\n def __init__(self, args: WorldArgs):\n super().__init__(args)\n\n replay_file = args.replay\n self.logger.info(f'Loading replay file \"{replay_file}\"')\n self.replay_file = replay_file\n with open(replay_file, 'rb') as f:\n self.replay = pickle.load(f)\n if not 'n_steps' in self.replay:\n self.replay['n_steps'] = s.MAX_STEPS\n\n pygame.display.set_caption(f'{replay_file}')\n\n # Recreate the agents\n self.agents = [ReplayAgent(name, self.colors.pop())\n for (name, s, b, xy) in self.replay['agents']]\n self.new_round()\n\n def new_round(self):\n self.logger.info('STARTING REPLAY')\n\n # Bookkeeping\n self.step = 0\n self.bombs = []\n self.explosions = []\n self.running = True\n self.frame = 0\n\n # Game world and objects\n self.arena = np.array(self.replay['arena'])\n self.coins = []\n for xy in self.replay['coins']:\n if self.arena[xy] == 0:\n self.coins.append(Coin(xy, True))\n else:\n self.coins.append(Coin(xy, False))\n self.active_agents = [a for a in self.agents]\n for i, agent in enumerate(self.agents):\n agent.start_round()\n agent.x, agent.y = self.replay['agents'][i][-1]\n agent.total_score = 0\n\n def poll_and_run_agents(self):\n # Perform recorded agent actions\n perm = self.replay['permutations'][self.step - 1]\n for i in perm:\n a = self.active_agents[i]\n self.logger.debug(f'Repeating action from agent <{a.name}>')\n action = self.replay['actions'][a.name][self.step - 1]\n self.logger.info(f'Agent <{a.name}> chose action {action}.')\n self.perform_agent_action(a, action)\n\n def time_to_stop(self):\n time_to_stop = super().time_to_stop()\n if self.step == self.replay['n_steps']:\n self.logger.info('Replay ends here, wrap up round')\n time_to_stop = True\n return time_to_stop\n\n def end_round(self):\n super().end_round()\n if self.running:\n self.running = False\n # Wait in case there is still a game step running\n sleep(self.args.update_interval)\n else:\n self.logger.warning('End-of-round requested while no round was running')\n\n self.logger.debug('Setting ready_for_restart_flag')\n self.ready_for_restart_flag.set()\n\n\nclass ReplayAgent(Agent):\n \"\"\"\n Agents class firing off a predefined sequence of actions.\n \"\"\"\n\n def __init__(self, name, color):\n \"\"\"Recreate the agent as it was at the beginning of the original game.\"\"\"\n super().__init__(color, name, None, False, None)\n\n def setup(self):\n pass\n\n def act(self, game_state):\n pass\n\n def wait_for_act(self):\n return 0, self.actions.popleft()\n"
] |
[
[
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
shalijiang/bo
|
[
"af13f0a38b579ab504f49a01f1ced13532a3ad49",
"af13f0a38b579ab504f49a01f1ced13532a3ad49",
"af13f0a38b579ab504f49a01f1ced13532a3ad49"
] |
[
"botorch/acquisition/monte_carlo.py",
"enbo/util/test_rollout.py",
"enbo/util/glasses.py"
] |
[
"#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nr\"\"\"\nBatch acquisition functions using the reparameterization trick in combination\nwith (quasi) Monte-Carlo sampling. See [Rezende2014reparam]_ and\n[Wilson2017reparam]_\n\n.. [Rezende2014reparam]\n D. J. Rezende, S. Mohamed, and D. Wierstra. Stochastic backpropagation and\n approximate inference in deep generative models. ICML 2014.\n\n.. [Wilson2017reparam]\n J. T. Wilson, R. Moriconi, F. Hutter, and M. P. Deisenroth.\n The reparameterization trick for acquisition functions. ArXiv 2017.\n\"\"\"\n\nimport math\nfrom abc import ABC, abstractmethod\nfrom typing import Optional, Union\n\nimport torch\nfrom torch import Tensor\n\nfrom ..exceptions.errors import UnsupportedError\nfrom ..models.model import Model\nfrom ..sampling.samplers import MCSampler, SobolQMCNormalSampler\nfrom ..utils.transforms import (\n concatenate_pending_points,\n match_batch_shape,\n t_batch_mode_transform,\n)\nfrom .acquisition import AcquisitionFunction\nfrom .objective import IdentityMCObjective, MCAcquisitionObjective\nfrom .utils import prune_inferior_points\n\n\nclass MCAcquisitionFunction(AcquisitionFunction, ABC):\n r\"\"\"Abstract base class for Monte-Carlo based batch acquisition functions.\"\"\"\n\n def __init__(\n self,\n model: Model,\n sampler: Optional[MCSampler] = None,\n objective: Optional[MCAcquisitionObjective] = None,\n X_pending: Optional[Tensor] = None,\n ) -> None:\n r\"\"\"Constructor for the MCAcquisitionFunction base class.\n\n Args:\n model: A fitted model.\n sampler: The sampler used to draw base samples. Defaults to\n `SobolQMCNormalSampler(num_samples=512, collapse_batch_dims=True)`.\n objective: The MCAcquisitionObjective under which the samples are\n evaluated. Defaults to `IdentityMCObjective()`.\n X_pending: A `m x d`-dim Tensor of `m` design points that have\n points that have been submitted for function evaluation\n but have not yet been evaluated.\n \"\"\"\n super().__init__(model=model)\n if sampler is None:\n sampler = SobolQMCNormalSampler(num_samples=512, collapse_batch_dims=True)\n self.add_module(\"sampler\", sampler)\n if objective is None:\n if model.num_outputs != 1:\n raise UnsupportedError(\n \"Must specify an objective when using a multi-output model.\"\n )\n objective = IdentityMCObjective()\n elif not isinstance(objective, MCAcquisitionObjective):\n raise UnsupportedError(\n \"Only objectives of type MCAcquisitionObjective are supported for \"\n \"MC acquisition functions.\"\n )\n self.add_module(\"objective\", objective)\n self.set_X_pending(X_pending)\n\n @abstractmethod\n def forward(self, X: Tensor) -> Tensor:\n r\"\"\"Takes in a `(b) x q x d` X Tensor of `(b)` t-batches with `q` `d`-dim\n design points each, and returns a one-dimensional Tensor with\n `(b)` elements. Should utilize the result of set_X_pending as needed\n to account for pending function evaluations.\n \"\"\"\n pass # pragma: no cover\n\n\nclass qExpectedImprovement(MCAcquisitionFunction):\n r\"\"\"MC-based batch Expected Improvement.\n\n This computes qEI by\n (1) sampling the joint posterior over q points\n (2) evaluating the improvement over the current best for each sample\n (3) maximizing over q\n (4) averaging over the samples\n\n `qEI(X) = E(max(max Y - best_f, 0)), Y ~ f(X), where X = (x_1,...,x_q)`\n\n Example:\n >>> model = SingleTaskGP(train_X, train_Y)\n >>> best_f = train_Y.max()[0]\n >>> sampler = SobolQMCNormalSampler(1000)\n >>> qEI = qExpectedImprovement(model, best_f, sampler)\n >>> qei = qEI(test_X)\n \"\"\"\n\n def __init__(\n self,\n model: Model,\n best_f: Union[float, Tensor],\n sampler: Optional[MCSampler] = None,\n objective: Optional[MCAcquisitionObjective] = None,\n X_pending: Optional[Tensor] = None,\n ) -> None:\n r\"\"\"q-Expected Improvement.\n\n Args:\n model: A fitted model.\n best_f: The best objective value observed so far (assumed noiseless).\n sampler: The sampler used to draw base samples. Defaults to\n `SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)`\n objective: The MCAcquisitionObjective under which the samples are\n evaluated. Defaults to `IdentityMCObjective()`.\n X_pending: A `m x d`-dim Tensor of `m` design points that have\n points that have been submitted for function evaluation\n but have not yet been evaluated. Concatenated into X upon\n forward call. Copied and set to have no gradient.\n \"\"\"\n super().__init__(\n model=model, sampler=sampler, objective=objective, X_pending=X_pending\n )\n if not torch.is_tensor(best_f):\n best_f = torch.tensor(float(best_f))\n self.register_buffer(\"best_f\", best_f)\n\n @concatenate_pending_points\n @t_batch_mode_transform()\n def forward(self, X: Tensor) -> Tensor:\n r\"\"\"Evaluate qExpectedImprovement on the candidate set `X`.\n\n Args:\n X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim\n design points each.\n\n Returns:\n A `(b)`-dim Tensor of Expected Improvement values at the given\n design points `X`.\n \"\"\"\n posterior = self.model.posterior(X)\n samples = self.sampler(posterior)\n obj = self.objective(samples)\n obj = (obj - self.best_f.unsqueeze(-1)).clamp_min(0)\n q_ei = obj.max(dim=-1)[0].mean(dim=0)\n return q_ei\n\n\nclass qNoisyExpectedImprovement(MCAcquisitionFunction):\n r\"\"\"MC-based batch Noisy Expected Improvement.\n\n This function does not assume a `best_f` is known (which would require\n noiseless observations). Instead, it uses samples from the joint posterior\n over the `q` test points and previously observed points. The improvement\n over previously observed points is computed for each sample and averaged.\n\n `qNEI(X) = E(max(max Y - max Y_baseline, 0))`, where\n `(Y, Y_baseline) ~ f((X, X_baseline)), X = (x_1,...,x_q)`\n\n Example:\n >>> model = SingleTaskGP(train_X, train_Y)\n >>> sampler = SobolQMCNormalSampler(1000)\n >>> qNEI = qNoisyExpectedImprovement(model, train_X, sampler)\n >>> qnei = qNEI(test_X)\n \"\"\"\n\n def __init__(\n self,\n model: Model,\n X_baseline: Tensor,\n sampler: Optional[MCSampler] = None,\n objective: Optional[MCAcquisitionObjective] = None,\n X_pending: Optional[Tensor] = None,\n prune_baseline: bool = False,\n ) -> None:\n r\"\"\"q-Noisy Expected Improvement.\n\n Args:\n model: A fitted model.\n X_baseline: A `r x d`-dim Tensor of `r` design points that have\n already been observed. These points are considered as the\n potential best design point.\n sampler: The sampler used to draw base samples. Defaults to\n `SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)`.\n objective: The MCAcquisitionObjective under which the samples are\n evaluated. Defaults to `IdentityMCObjective()`.\n X_pending: A `m x d`-dim Tensor of `m` design points that have\n points that have been submitted for function evaluation\n but have not yet been evaluated. Concatenated into X upon\n forward call. Copied and set to have no gradient.\n prune_baseline: If True, remove points in `X_baseline` that are\n highly unlikely to be the best point. This can significantly\n improve performance and is generally recommended. In order to\n customize pruning parameters, instead manually call\n `botorch.acquisition.utils.prune_inferior_points` on `X_baseline`\n before instantiating the acquisition function.\n \"\"\"\n super().__init__(\n model=model, sampler=sampler, objective=objective, X_pending=X_pending\n )\n if prune_baseline:\n X_baseline = prune_inferior_points(\n model=model, X=X_baseline, objective=objective\n )\n self.register_buffer(\"X_baseline\", X_baseline)\n\n @concatenate_pending_points\n @t_batch_mode_transform()\n def forward(self, X: Tensor) -> Tensor:\n r\"\"\"Evaluate qNoisyExpectedImprovement on the candidate set `X`.\n\n Args:\n X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim\n design points each.\n\n Returns:\n A `(b)`-dim Tensor of Noisy Expected Improvement values at the given\n design points `X`.\n \"\"\"\n q = X.shape[-2]\n X_full = torch.cat([X, match_batch_shape(self.X_baseline, X)], dim=-2)\n # TODO (T41248036): Implement more efficient way to compute posterior\n # over both training and test points in GPyTorch\n posterior = self.model.posterior(X_full)\n samples = self.sampler(posterior)\n obj = self.objective(samples)\n diffs = obj[:, :, :q].max(dim=-1)[0] - obj[:, :, q:].max(dim=-1)[0]\n return diffs.clamp_min(0).mean(dim=0)\n\n\nclass qProbabilityOfImprovement(MCAcquisitionFunction):\n r\"\"\"MC-based batch Probability of Improvement.\n\n Estimates the probability of improvement over the current best observed\n value by sampling from the joint posterior distribution of the q-batch.\n MC-based estimates of a probability involves taking expectation of an\n indicator function; to support auto-differntiation, the indicator is\n replaced with a sigmoid function with temperature parameter `tau`.\n\n `qPI(X) = P(max Y >= best_f), Y ~ f(X), X = (x_1,...,x_q)`\n\n Example:\n >>> model = SingleTaskGP(train_X, train_Y)\n >>> best_f = train_Y.max()[0]\n >>> sampler = SobolQMCNormalSampler(1000)\n >>> qPI = qProbabilityOfImprovement(model, best_f, sampler)\n >>> qpi = qPI(test_X)\n \"\"\"\n\n def __init__(\n self,\n model: Model,\n best_f: Union[float, Tensor],\n sampler: Optional[MCSampler] = None,\n objective: Optional[MCAcquisitionObjective] = None,\n X_pending: Optional[Tensor] = None,\n tau: float = 1e-3,\n ) -> None:\n r\"\"\"q-Probability of Improvement.\n\n Args:\n model: A fitted model.\n best_f: The best objective value observed so far (assumed noiseless).\n sampler: The sampler used to draw base samples. Defaults to\n `SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)`\n objective: The MCAcquisitionObjective under which the samples are\n evaluated. Defaults to `IdentityMCObjective()`.\n X_pending: A `m x d`-dim Tensor of `m` design points that have\n points that have been submitted for function evaluation\n but have not yet been evaluated. Concatenated into X upon\n forward call. Copied and set to have no gradient.\n tau: The temperature parameter used in the sigmoid approximation\n of the step function. Smaller values yield more accurate\n approximations of the function, but result in gradients\n estimates with higher variance.\n \"\"\"\n super().__init__(\n model=model, sampler=sampler, objective=objective, X_pending=X_pending\n )\n if not torch.is_tensor(best_f):\n best_f = torch.tensor(float(best_f))\n self.register_buffer(\"best_f\", best_f)\n if not torch.is_tensor(tau):\n tau = torch.tensor(float(tau))\n self.register_buffer(\"tau\", tau)\n\n @concatenate_pending_points\n @t_batch_mode_transform()\n def forward(self, X: Tensor) -> Tensor:\n r\"\"\"Evaluate qProbabilityOfImprovement on the candidate set `X`.\n\n Args:\n X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim\n design points each.\n\n Returns:\n A `(b)`-dim Tensor of Probability of Improvement values at the given\n design points `X`.\n \"\"\"\n posterior = self.model.posterior(X)\n samples = self.sampler(posterior)\n obj = self.objective(samples)\n max_obj = obj.max(dim=-1)[0]\n val = torch.sigmoid((max_obj - self.best_f) / self.tau).mean(dim=0)\n return val\n\n\nclass qSimpleRegret(MCAcquisitionFunction):\n r\"\"\"MC-based batch Simple Regret.\n\n Samples from the joint posterior over the q-batch and computes the simple\n regret.\n\n `qSR(X) = E(max Y), Y ~ f(X), X = (x_1,...,x_q)`\n\n Example:\n >>> model = SingleTaskGP(train_X, train_Y)\n >>> sampler = SobolQMCNormalSampler(1000)\n >>> qSR = qSimpleRegret(model, sampler)\n >>> qsr = qSR(test_X)\n \"\"\"\n\n @concatenate_pending_points\n @t_batch_mode_transform()\n def forward(self, X: Tensor) -> Tensor:\n r\"\"\"Evaluate qSimpleRegret on the candidate set `X`.\n\n Args:\n X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim\n design points each.\n\n Returns:\n A `(b)`-dim Tensor of Simple Regret values at the given design\n points `X`.\n \"\"\"\n posterior = self.model.posterior(X)\n samples = self.sampler(posterior)\n obj = self.objective(samples)\n val = obj.max(dim=-1)[0].mean(dim=0)\n return val\n\n\nclass qUpperConfidenceBound(MCAcquisitionFunction):\n r\"\"\"MC-based batch Upper Confidence Bound.\n\n Uses a reparameterization to extend UCB to qUCB for q > 1 (See Appendix A\n of [Wilson2017reparam].)\n\n `qUCB = E(max(mu + |Y_tilde - mu|))`, where `Y_tilde ~ N(mu, beta pi/2 Sigma)`\n and `f(X)` has distribution `N(mu, Sigma)`.\n\n Example:\n >>> model = SingleTaskGP(train_X, train_Y)\n >>> sampler = SobolQMCNormalSampler(1000)\n >>> qUCB = qUpperConfidenceBound(model, 0.1, sampler)\n >>> qucb = qUCB(test_X)\n \"\"\"\n\n def __init__(\n self,\n model: Model,\n beta: float,\n sampler: Optional[MCSampler] = None,\n objective: Optional[MCAcquisitionObjective] = None,\n X_pending: Optional[Tensor] = None,\n ) -> None:\n r\"\"\"q-Upper Confidence Bound.\n\n Args:\n model: A fitted model.\n beta: Controls tradeoff between mean and standard deviation in UCB.\n sampler: The sampler used to draw base samples. Defaults to\n `SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)`\n objective: The MCAcquisitionObjective under which the samples are\n evaluated. Defaults to `IdentityMCObjective()`.\n X_pending: A `m x d`-dim Tensor of `m` design points that have\n points that have been submitted for function evaluation\n but have not yet been evaluated. Concatenated into X upon\n forward call. Copied and set to have no gradient.\n \"\"\"\n super().__init__(\n model=model, sampler=sampler, objective=objective, X_pending=X_pending\n )\n self.beta_prime = math.sqrt(beta * math.pi / 2)\n\n @concatenate_pending_points\n @t_batch_mode_transform()\n def forward(self, X: Tensor) -> Tensor:\n r\"\"\"Evaluate qUpperConfidenceBound on the candidate set `X`.\n\n Args:\n X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim\n design points each.\n\n Returns:\n A `(b)`-dim Tensor of Upper Confidence Bound values at the given\n design points `X`.\n \"\"\"\n posterior = self.model.posterior(X)\n samples = self.sampler(posterior)\n obj = self.objective(samples)\n mean = obj.mean(dim=0)\n ucb_samples = mean + self.beta_prime * (obj - mean).abs()\n return ucb_samples.max(dim=-1)[0].mean(dim=0)\n",
"import torch\nfrom torch import Tensor\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom botorch.acquisition.analytic import ExpectedImprovement\nfrom botorch.models import SingleTaskGP\nfrom rollout import rollout, rollout_quad\nimport warnings\nimport time\nimport pickle\n\nwarnings.filterwarnings(\"ignore\")\n\nbound = 1.0\nn = 50\nx = torch.linspace(-bound, bound, n).view(-1, 1)\ntrain_idx = [np.round(n / 3), np.round(n * 2 / 3)]\ntrain_idx = [0, n - 1]\ntrain_x = x[train_idx]\n\nmodel = SingleTaskGP(train_x, Tensor([0, 0]))\n\nmodel.covar_module.base_kernel.lengthscale = 0.4\nmodel.covar_module.outputscale = 1.0\nmodel.likelihood.noise = 0.0001\nmodel.eval()\ny_prior = model(x)\ntorch.manual_seed(0)\ny = y_prior.sample()\n\ntrain_y = y[train_idx]\ny_best = torch.max(train_y).item()\nprint(train_x)\nprint(train_y)\nmodel.set_train_data(train_x, train_y, strict=False)\nmodel.eval()\ny_post = model(x)\n\nf, ax = plt.subplots(2, 1, figsize=(6, 12))\nwith torch.no_grad():\n # Initialize plot\n\n # Get upper and lower confidence bounds\n lower, upper = y_post.confidence_region()\n # Plot training data as black stars\n ax[0].plot(x.squeeze().numpy(), y.numpy(), \"r\")\n ax[0].plot(train_x.squeeze().numpy(), train_y.numpy(), \"k*\")\n # Plot predictive means as blue line\n ax[0].plot(x.squeeze().numpy(), y_post.mean.detach().numpy(), \"b\")\n # Shade between the lower and upper confidence bounds\n ax[0].fill_between(x.squeeze().numpy(), lower.numpy(), upper.numpy(), alpha=0.5)\n ax[0].set_ylim([-3, 3])\n ax[0].legend([\"True func\", \"Observed Data\", \"Mean\", \"Confidence\"])\n\n## compute EI\nexpected_improvement = ExpectedImprovement(model, best_f=y_best)\n\nwith torch.no_grad():\n ei_values = expected_improvement(x.unsqueeze(1))\n ax[1].plot(x.squeeze().numpy(), ei_values.numpy(), label=\"EI\")\n\n## compute two-step EI\ntwo_step_ei = np.zeros((3, n))\ntimes = np.zeros((3, n))\nnum_y_samples = 100\nsamples, weights = np.polynomial.hermite.hermgauss(num_y_samples)\nfor i in range(n):\n print(\"point\", i)\n this_x = x[i]\n\n # start = time.time()\n # two_step_ei[0, i] = rollout(this_x, model,\n # best_f=y_best,\n # bounds=Tensor([-bound, bound]).view(-1, 1),\n # horizon=2,\n # quadrature='qmc',\n # num_y_samples=num_y_samples,\n # )\n # end = time.time()\n # times[0, i] = end - start\n # print('qmc', end - start)\n\n start = time.time()\n two_step_ei[1, i] = rollout(\n this_x,\n model,\n best_f=y_best,\n bounds=Tensor([-bound, bound]).view(-1, 1),\n horizon=2,\n x_grid=x,\n idx=i,\n quadrature=(samples, weights),\n num_y_samples=num_y_samples,\n )\n end = time.time()\n times[1, i] = end - start\n print(\"gauss-hermite\", end - start)\n\n # start = time.time()\n # two_step_ei[2, i] = rollout_quad(this_x, model,\n # best_f=y_best,\n # bounds=Tensor([-bound, bound]).view(-1, 1),\n # horizon=2,\n # )\n # end = time.time()\n # times[2, i] = end - start\n # print('adap-gauss', end - start)\n\nmean_time = times.mean(axis=1)\nwith torch.no_grad():\n # ax[1].plot(x.squeeze().numpy(), two_step_ei[0], label='two-step EI qmc %.2fs' % mean_time[0])\n ax[1].plot(\n x.squeeze().numpy(),\n two_step_ei[1],\n label=\"two-step EI gauss-hermite %.2fs\" % mean_time[1],\n )\n # ax[1].plot(x.squeeze().numpy(), two_step_ei[2], label='two-step EI adap-gauss %.2fs' % mean_time[2])\nax[1].legend()\n\nprint(times.mean(axis=1))\nwith open(\"rollout_test_results\", \"wb\") as f:\n pickle.dump({\"time\": times, \"ei\": two_step_ei}, f)\nplt.show()\n",
"import torch\nfrom botorch.models.model import Model\nfrom botorch.acquisition import AcquisitionFunction\nfrom botorch.acquisition.monte_carlo import qExpectedImprovement\nfrom botorch.acquisition.analytic import ExpectedImprovement\nfrom torch import Tensor\nfrom botorch.optim import optimize_acqf\nfrom botorch.sampling.samplers import SobolQMCNormalSampler\nfrom typing import Optional\n\n\ndef glasses(\n x: Tensor,\n model: Model,\n bounds: Tensor,\n L: float,\n y0: float,\n horizon: int = 1,\n num_mc_samples: int = 10000,\n sample_seed: Optional[int] = None,\n):\n \"\"\"\n Compute the GLASSES acquisition function proposed in Gonzalez et al.,\n Batch Bayesian Optimization via Local Penalization, AISTATS 2016.\n\n Assume maximization problem.\n\n Input:\n x: Tensor, point to evaluate [currently only support evaluating a single point]\n model: GP model, containing current observations\n bounds: Tensor, optimization domain,\n e.g., [-1, 1]^2 for a 2D function\n L: float, an estimate of the Lipschitz constant of the target function\n y0: float, current best observed value\n horizon: integer, number of steps to lookahead,\n e.g., the remaining budget\n num_mc_samples: integer,\n number of quasi Monte Carlo samples to estimate qEI\n sample_seed: None or int, random seed for sampling y\n\n Output:\n a scalar, the GLASSES utility of x\n \"\"\"\n X = predict_future_locations(x, model, horizon, bounds, L, y0)\n qmc_sampler = SobolQMCNormalSampler(num_samples=num_mc_samples, seed=sample_seed)\n qEI = qExpectedImprovement(model=model, best_f=y0, sampler=qmc_sampler)\n return qEI(X)\n\n\ndef predict_future_locations(\n x, model, horizon, bounds, L, M, num_restarts=2, raw_samples=100, options=None\n):\n X = [x] + [0] * (horizon - 1)\n ei = ExpectedImprovement(model=model, best_f=M)\n\n for i in range(1, horizon):\n\n def penalizer(x):\n res = 1\n for j in range(i):\n res = res * local_penalizer(x, X[j], model, L, M)\n return res\n\n def penalized_ei(x):\n return ei(x) * penalizer(x)\n\n x_i, _ = optimize_acqf(\n acq_function=penalized_ei,\n bounds=bounds,\n q=1,\n num_restarts=num_restarts,\n raw_samples=raw_samples,\n options=options or {},\n )\n X[i] = x_i.squeeze()\n return torch.stack(X)\n\n\ndef local_penalizer(x, x_j, model, L, M):\n threshold = M - L * (x - x_j).norm()\n posterior = model.posterior(x_j.unsqueeze(0))\n normal = torch.distributions.Normal(\n posterior.mean.item(), posterior.variance.sqrt().item()\n )\n return 1 - normal.cdf(threshold)\n\n\nclass MeanGradientL2(AcquisitionFunction):\n @torch.enable_grad()\n def forward(self, x):\n x.requires_grad = True\n posterior_mean = self.model.posterior(x).mean\n x_grad = torch.autograd.grad(\n posterior_mean.squeeze(),\n x,\n grad_outputs=torch.Tensor([1.0] * len(posterior_mean)),\n create_graph=True,\n )[0]\n return x_grad.norm(dim=-1).squeeze()\n\n\ndef estimate_lipschitz_constant(\n model, bounds, num_restarts=20, raw_samples=512, options=None\n):\n # return 1.0 # mock test\n mean_gradient_l2 = MeanGradientL2(model)\n _, L = optimize_acqf(\n acq_function=mean_gradient_l2,\n bounds=bounds,\n q=1,\n num_restarts=num_restarts,\n raw_samples=raw_samples,\n options=options or {},\n )\n\n return L\n"
] |
[
[
"torch.sigmoid",
"torch.is_tensor"
],
[
"torch.linspace",
"torch.max",
"torch.Tensor",
"torch.manual_seed",
"matplotlib.pyplot.subplots",
"numpy.polynomial.hermite.hermgauss",
"numpy.round",
"torch.no_grad",
"matplotlib.pyplot.show",
"numpy.zeros"
],
[
"torch.stack",
"torch.enable_grad"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jscastanoc/wyrm
|
[
"c3593efe3cb5507ac525be4d650df7ce504aab00"
] |
[
"test/test_blockbuffer.py"
] |
[
"from __future__ import division\n\nimport unittest\n\nimport numpy as np\n\nfrom wyrm.types import Data, BlockBuffer\nfrom wyrm.processing import append_cnt\nfrom functools import reduce\n\n\nclass TestBlockBuffer(unittest.TestCase):\n\n def setUp(self):\n self.empty_dat = Data(np.array([]), [], [], [])\n self.dat_1 = Data(np.array([0, 0])[np.newaxis, :], [np.array([0]), np.array(['ch1', 'ch2'])], ['time', 'channel'], ['ms', '#'])\n self.dat_1.fs = 1000\n self.dat_1.markers = [[0, 'x']]\n self.dat_5 = reduce(append_cnt, [self.dat_1 for i in range(5)])\n\n def test_append_empty(self):\n \"\"\"Appending several emtpy dats must not modify the Block Buffer.\"\"\"\n b = BlockBuffer(5)\n b.append(self.empty_dat)\n b.append(self.empty_dat)\n b.append(self.empty_dat)\n b.append(self.empty_dat)\n b.append(self.empty_dat)\n b.append(self.empty_dat)\n self.assertEqual(self.empty_dat, b.get())\n\n def test_append_until_full(self):\n \"\"\"Appending fractions of block_length, must accumulate in the buffer until block_length is reached.\"\"\"\n b = BlockBuffer(5)\n for i in range(4):\n b.append(self.dat_1)\n ret = b.get()\n self.assertEqual(self.empty_dat, ret)\n b.append(self.dat_1)\n ret = b.get()\n self.assertEqual(self.dat_5, ret)\n\n def test_append_with_markers(self):\n \"\"\"Check if markers are handled correctly.\"\"\"\n markers = [[i, 'x'] for i in range(5)]\n b = BlockBuffer(5)\n for i in range(4):\n b.append(self.dat_1)\n ret = b.get()\n self.assertEqual(self.empty_dat, ret)\n b.append(self.dat_1)\n ret = b.get()\n self.assertEqual(ret.markers, markers)\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] |
[
[
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
suqi/gym-sandbox
|
[
"3e8f4139796953c0e81b5669ac9b396f306c45a5"
] |
[
"test_algos/MADDPG_Morvan/a3c_commnet.py"
] |
[
"import multiprocessing\nimport threading\nimport tensorflow as tf\nimport numpy as np\nimport gym\nimport os\nimport shutil\nimport matplotlib.pyplot as plt\nimport gym_sandbox\nimport time\nimport multiprocessing\nGAME = 'police-commnet-discret-2agent-v0'\n\nOUTPUT_GRAPH = True\nLOG_DIR = './.tf-log'\nN_WORKERS = 4 #multiprocessing.cpu_count()\nMAX_GLOBAL_EP = 30000\nGLOBAL_NET_SCOPE = 'Global_Net'\nUPDATE_GLOBAL_ITER = 20\nGAMMA = 0.9\nENTROPY_BETA = 0.001\nLR_A = 0.001 # learning rate for actor\nLR_C = 0.001 # learning rate for critic\nGLOBAL_RUNNING_R = []\nGLOBAL_EP = 0\n\nRUN_MODE = \"training\" # execution\n\nenv = gym.make(GAME)\n_s = env.reset()\nN_S = _s.shape[0] # env.observation_space.shape[0]\nN_A = env.action_space.n\nAGENT_NUM = 2\n\n\nclass ACNet(object):\n def __init__(self, scope, globalAC=None):\n\n if scope == GLOBAL_NET_SCOPE: # get global network\n with tf.variable_scope(scope):\n self.s = tf.placeholder(tf.float32, [None, N_S], 'S')\n self._build_net()\n self.a_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/actor')\n self.c_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/critic')\n else: # local net, calculate losses\n with tf.variable_scope(scope):\n self.s = tf.placeholder(tf.float32, [None, N_S], 'S')\n self.a_his = tf.placeholder(tf.int32, [None, AGENT_NUM], 'A')\n self.v_target = tf.placeholder(tf.float32, [None, 1], 'Vtarget')\n\n self.a_prob, self.v = self._build_net()\n\n td = tf.subtract(self.v_target, self.v, name='TD_error')\n with tf.name_scope('c_loss'):\n self.c_loss = tf.reduce_mean(tf.square(td))\n\n with tf.name_scope('a_loss'):\n self.a_prob = tf.clip_by_value(self.a_prob, 1e-6, 1)\n log_prob = tf.reduce_sum(tf.log(self.a_prob) * tf.one_hot(self.a_his, N_A, dtype=tf.float32),\n axis=1, keep_dims=True)\n exp_v = log_prob * td\n entropy = -tf.reduce_sum(self.a_prob * tf.log(self.a_prob), axis=1,\n keep_dims=True) # encourage exploration\n self.exp_v = ENTROPY_BETA * entropy + exp_v\n self.a_loss = tf.reduce_mean(-self.exp_v)\n\n with tf.name_scope('local_grad'):\n self.a_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/actor')\n self.c_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/critic')\n self.a_grads = tf.gradients(self.a_loss, self.a_params)\n self.c_grads = tf.gradients(self.c_loss, self.c_params)\n\n with tf.name_scope('sync'):\n with tf.name_scope('pull'):\n self.pull_a_params_op = [l_p.assign(g_p) for l_p, g_p in zip(self.a_params, globalAC.a_params)]\n self.pull_c_params_op = [l_p.assign(g_p) for l_p, g_p in zip(self.c_params, globalAC.c_params)]\n with tf.name_scope('push'):\n self.update_a_op = OPT_A.apply_gradients(zip(self.a_grads, globalAC.a_params))\n self.update_c_op = OPT_C.apply_gradients(zip(self.c_grads, globalAC.c_params))\n\n def _build_net(self):\n w_init = tf.random_normal_initializer(0., .1)\n with tf.variable_scope('actor'):\n l_a = tf.layers.dense(self.s, 200, tf.nn.relu6, kernel_initializer=w_init, name='la')\n a_prob = tf.layers.dense(l_a, [N_A, AGENT_NUM], tf.nn.softmax, kernel_initializer=w_init, name='ap')\n with tf.variable_scope('critic'):\n l_c = tf.layers.dense(self.s, 100, tf.nn.relu6, kernel_initializer=w_init, name='lc')\n v = tf.layers.dense(l_c, 1, kernel_initializer=w_init, name='v') # state value\n return a_prob, v\n\n def update_global(self, feed_dict): # run by a local\n SESS.run([self.update_a_op, self.update_c_op], feed_dict) # local grads applies to global net\n\n def pull_global(self): # run by a local\n SESS.run([self.pull_a_params_op, self.pull_c_params_op])\n\n def choose_action(self, s): # run by a local\n prob_weights = SESS.run(self.a_prob, feed_dict={self.s: s[np.newaxis, :]})\n #print(prob_weights)\n action = np.random.choice(range(prob_weights.shape[1]), # first digit is batch size, drop it\n p=prob_weights.ravel()) # select action w.r.t the actions prob\n return action\n\n\nclass Worker(object):\n def __init__(self, name, globalAC):\n self.env = gym.make(GAME)\n self.env.env.init_params(show_dashboard=name == 'W_0', bokeh_output=\"standalone\")\n self.name = name\n self.AC = ACNet(name, globalAC)\n\n def work(self):\n global GLOBAL_RUNNING_R, GLOBAL_EP\n total_step = 1\n buffer_s, buffer_a, buffer_r = [], [], []\n while not COORD.should_stop() and GLOBAL_EP < MAX_GLOBAL_EP:\n s = self.env.reset()\n ep_r = 0\n\n # 每100回合保存训练参数\n if self.name == 'W_0' and (GLOBAL_EP+1) % 10000 == 0 and RUN_MODE == 'training':\n saver.save(SESS, \".tf-models/a3c-1vn-dynamic\", global_step=GLOBAL_EP)\n\n while True:\n a = self.AC.choose_action(s)\n s_, r, done, info = self.env.step(a)\n\n if self.name == 'W_0':\n if True:\n self.env.render()\n\n time.sleep(0.2)\n\n if done:\n time.sleep(0.3) # 回合结束给点时间看看效果\n\n # print('>>>>', 's:', s, ' s_:', s_, 'action:', a, ' -- reward:', r, ' -- done:', done, )\n\n ep_r += r\n buffer_s.append(s)\n buffer_a.append(a)\n buffer_r.append(r)\n\n # 只在训练模式下进行learning\n if RUN_MODE == \"training\" and (total_step % UPDATE_GLOBAL_ITER == 0 or done): # update global and assign to local net\n if done:\n v_s_ = 0 # terminal\n else:\n v_s_ = SESS.run(self.AC.v, {self.AC.s: s_[np.newaxis, :]})[0, 0]\n buffer_v_target = []\n for r in buffer_r[::-1]: # reverse buffer r\n v_s_ = r + GAMMA * v_s_\n buffer_v_target.append(v_s_)\n buffer_v_target.reverse()\n\n buffer_s, buffer_a, buffer_v_target = np.vstack(buffer_s), np.array(buffer_a), np.vstack(\n buffer_v_target)\n feed_dict = {\n self.AC.s: buffer_s,\n self.AC.a_his: buffer_a,\n self.AC.v_target: buffer_v_target,\n }\n self.AC.update_global(feed_dict)\n\n buffer_s, buffer_a, buffer_r = [], [], []\n self.AC.pull_global()\n\n s = s_\n total_step += 1\n if done:\n if len(GLOBAL_RUNNING_R) == 0: # record running episode reward\n GLOBAL_RUNNING_R.append(ep_r)\n else:\n GLOBAL_RUNNING_R.append(0.99 * GLOBAL_RUNNING_R[-1] + 0.01 * ep_r)\n print(\n self.name,\n \"Ep:\", GLOBAL_EP,\n \"| Ep_r: %i\" % GLOBAL_RUNNING_R[-1],\n )\n GLOBAL_EP += 1\n break\n\n\nSESS = tf.Session()\n\n\nwith tf.device(\"/cpu:0\"):\n OPT_A = tf.train.RMSPropOptimizer(LR_A, name='RMSPropA')\n OPT_C = tf.train.RMSPropOptimizer(LR_C, name='RMSPropC')\n GLOBAL_AC = ACNet(GLOBAL_NET_SCOPE) # we only need its params\n workers = []\n # Create worker\n for i in range(N_WORKERS):\n i_name = 'W_%i' % i # worker name\n workers.append(Worker(i_name, GLOBAL_AC))\n\nCOORD = tf.train.Coordinator()\nsaver = tf.train.Saver()\nSESS.run(tf.global_variables_initializer())\n\n# 先加载训练过的参数\n# saver.restore(SESS, 'models-ma-balls/a3c-1thread-1v1-dynamic-4110')\n\nif OUTPUT_GRAPH:\n if os.path.exists(LOG_DIR):\n shutil.rmtree(LOG_DIR)\n tf.summary.FileWriter(LOG_DIR, SESS.graph)\n\nworker_threads = []\nfor worker in workers:\n job = lambda: worker.work()\n t = threading.Thread(target=job)\n t.start()\n worker_threads.append(t)\nCOORD.join(worker_threads)\n"
] |
[
[
"tensorflow.device",
"tensorflow.get_collection",
"tensorflow.gradients",
"tensorflow.layers.dense",
"tensorflow.subtract",
"tensorflow.name_scope",
"tensorflow.Session",
"tensorflow.square",
"tensorflow.train.Saver",
"tensorflow.random_normal_initializer",
"tensorflow.train.RMSPropOptimizer",
"tensorflow.train.Coordinator",
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"tensorflow.one_hot",
"numpy.array",
"tensorflow.clip_by_value",
"tensorflow.summary.FileWriter",
"tensorflow.reduce_mean",
"tensorflow.log",
"tensorflow.variable_scope",
"numpy.vstack"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
dtasev/Savu
|
[
"acb2578c85472e76cb292c4242c1ed2f2332f3e3",
"acb2578c85472e76cb292c4242c1ed2f2332f3e3",
"acb2578c85472e76cb292c4242c1ed2f2332f3e3"
] |
[
"savu/plugins/segmentation/gaussian_mixtures/gmm_segment3D.py",
"savu/data/transport_data/slice_lists.py",
"savu/plugins/filters/pymca.py"
] |
[
"# Copyright 2019 Diamond Light Source Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\n.. module:: Gaussian mixture models for classification-segmentation routine\n :platform: Unix\n :synopsis: Wrapper around scikit GMM function\n\n.. moduleauthor:: Daniil Kazantsev <[email protected]>\n\"\"\"\n\nfrom savu.plugins.plugin import Plugin\nfrom savu.plugins.driver.multi_threaded_plugin import MultiThreadedPlugin\nfrom savu.plugins.utils import register_plugin\n\nimport numpy as np\nfrom sklearn.mixture import GaussianMixture\n\n@register_plugin\nclass GmmSegment3d(Plugin, MultiThreadedPlugin):\n \"\"\"\n A Plugin to segment data using Gaussian Mixtures from scikit\n\n :param classes: The number of classes for GMM. Default: 5.\n \"\"\"\n\n def __init__(self):\n super(GmmSegment3d, self).__init__(\"GmmSegment3d\")\n\n def setup(self):\n \n in_dataset, out_dataset = self.get_datasets()\n out_dataset[0].create_dataset(in_dataset[0], dtype=np.uint8)\n in_pData, out_pData = self.get_plugin_datasets()\n in_pData[0].plugin_data_setup('VOLUME_3D', 'single')\n out_pData[0].plugin_data_setup('VOLUME_3D', 'single')\n \n def pre_process(self):\n # extract given parameters\n self.classes = self.parameters['classes']\n\n def process_frames(self, data):\n # Do GMM classification/segmentation first\n dimensdata = data[0].ndim\n if (dimensdata == 2):\n (Nsize1, Nsize2) = np.shape(data[0])\n Nsize3 = 1\n if (dimensdata == 3):\n (Nsize1, Nsize2, Nsize3) = np.shape(data[0])\n \n inputdata = data[0].reshape((Nsize1*Nsize2*Nsize3), 1)/np.max(data[0])\n \n classif = GaussianMixture(n_components=self.classes, covariance_type=\"tied\")\n classif.fit(inputdata)\n cluster = classif.predict(inputdata)\n segm = classif.means_[cluster]\n if (dimensdata == 2):\n segm = segm.reshape(Nsize1, Nsize3, Nsize2)\n else:\n segm = segm.reshape(Nsize1, Nsize2, Nsize3)\n maskGMM = segm.astype(np.float64) / np.max(segm)\n maskGMM = 255 * maskGMM # Now scale by 255\n maskGMM = maskGMM.astype(np.uint8) # obtain the GMM mask\n \n return [maskGMM]\n \n def nInput_datasets(self):\n return 1\n def nOutput_datasets(self):\n return 1\n",
"# Copyright 2014 Diamond Light Source Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\n.. module:: slice_lists\n :platform: Unix\n :synopsis: Contains classes for creating global and local slice lists\n\n.. moduleauthor:: Nicola Wadeson <[email protected]>\n\n\"\"\"\n\nimport numpy as np\n\n\nclass SliceLists(object):\n \"\"\"\n The Hdf5TransportData class performs the organising and movement of data.\n \"\"\"\n\n def __init__(self, name='Hdf5TransportData'):\n super(SliceLists, self).__init__()\n self.pad = False\n self.transfer_data = None\n\n def _get_process_data(self):\n return self.process_data\n\n def _single_slice_list(self, nSlices, nDims, core_slice, core_dirs,\n slice_dirs, fix, index):\n\n fix_dirs, value = fix\n slice_list = []\n for i in range(nSlices):\n getitem = np.array([slice(None)]*nDims)\n getitem[core_dirs] = core_slice[np.arange(len(core_dirs))]\n for f in range(len(fix_dirs)):\n getitem[fix_dirs[f]] = slice(value[f], value[f] + 1, 1)\n for sdir in range(len(slice_dirs)):\n getitem[slice_dirs[sdir]] = slice(index[sdir, i],\n index[sdir, i] + 1, 1)\n slice_list.append(tuple(getitem))\n return slice_list\n\n def _get_slice_dirs_index(self, slice_dirs, shape, value, calc=None):\n \"\"\"\n returns a list of arrays for each slice dimension, where each array\n gives the indices for that slice dimension.\n \"\"\"\n # create the indexing array\n chunk, length, repeat = self.__chunk_length_repeat(slice_dirs, shape)\n values = None\n idx_list = []\n for i in range(len(slice_dirs)):\n c = chunk[i]\n r = repeat[i]\n exec('values = ' + value)\n idx = np.ravel(np.kron(values, np.ones((r, c))))\n idx_list.append(idx.astype(int))\n return np.array(idx_list)\n\n def __chunk_length_repeat(self, slice_dirs, shape):\n \"\"\"\n For each slice dimension, determine 3 values relevant to the slicing.\n\n :returns: chunk, length, repeat\n chunk: how many repeats of the same index value before an increment\n length: the slice dimension length (sequence length)\n repeat: how many times does the sequence of chunked numbers repeat\n :rtype: [int, int, int]\n \"\"\"\n sshape = self.__get_shape_of_slice_dirs(slice_dirs, shape)\n if not slice_dirs:\n return [1], [1], [1]\n\n chunk = []\n length = []\n repeat = []\n for dim in range(len(slice_dirs)):\n chunk.append(int(np.prod(sshape[0:dim])))\n length.append(sshape[dim])\n repeat.append(int(np.prod(sshape[dim+1:])))\n return chunk, length, repeat\n\n def __get_shape_of_slice_dirs(self, slice_dirs, shape):\n sshape = [shape[sslice] for sslice in slice_dirs]\n if 'var' in sshape:\n shape = list(shape)\n for index, value in enumerate(shape):\n if isinstance(value, str):\n shape[index] = \\\n len(self.data_info.get('axis_labels')[index])\n shape = tuple(shape)\n sshape = [shape[sslice] for sslice in slice_dirs]\n return sshape\n\n def _get_core_slices(self, core_dirs):\n core_slice = []\n starts, stops, steps, chunks = \\\n self.data.get_preview().get_starts_stops_steps()\n\n for c in core_dirs:\n if (chunks[c]) > 1:\n if (stops[c] - starts[c] == 1):\n start = starts[c] - int(chunks[c]/2)\n if start < 0:\n raise Exception('Cannot have a negative value in the '\n 'slice list.')\n stop = starts[c] + (chunks[c] - int(chunks[c]/2))\n core_slice.append(slice(start, stop, 1))\n else:\n raise Exception(\"The core dimension does not support \"\n \"multiple chunks.\")\n else:\n core_slice.append(slice(starts[c], stops[c], steps[c]))\n return np.array(core_slice)\n\n def _banked_list(self, slice_list, max_frames, pad=False):\n shape = self.data.get_shape()\n slice_dirs = self.data.get_slice_dimensions()\n chunk, length, repeat = self.__chunk_length_repeat(slice_dirs, shape)\n sdir_shape = [shape[i] for i in slice_dirs]\n split, split_dim = self.__get_split_length(max_frames, sdir_shape)\n split_list = self._split_list(slice_list, split)\n \n banked = []\n for s in split_list:\n b = self._split_list(s, max_frames)\n if pad:\n diff = max_frames - len(b[-1])\n b[-1][-1] = self._fix_list_length(b[-1][-1], diff, split_dim) if diff \\\n else b[-1][-1]\n banked.extend(b)\n return banked\n\n def __get_split_length(self, max_frames, shape):\n nDims = 1\n while(nDims <= len(shape)):\n prod = np.prod([shape[i] for i in range(nDims)])\n if prod/float(max_frames) <=1:\n nDims += 1\n else:\n break\n return prod, nDims-2\n\n def _group_dimension(self, sl, dim, step):\n start = sl[0][dim].start\n stop = sl[-1][dim].stop\n working_slice = list(sl[0])\n working_slice[dim] = slice(start, stop, step)\n return tuple(working_slice)\n\n def _split_list(self, the_list, size):\n return [the_list[x:x+size] for x in xrange(0, len(the_list), size)]\n\n # This method only works if the split dimensions in the slice list contain\n # slice objects\n def __split_frames(self, slice_list, split_list):\n split = [map(int, a.split('.')) for a in split_list]\n dims = [s[0] for s in split]\n length = [s[1] for s in split]\n replace = self.__get_split_frame_entries(slice_list, dims, length)\n # now replace each slice list entry with multiple entries\n array_list = []\n for sl in slice_list:\n new_list = np.array([sl for i in range(len(replace[0]))])\n for d, i in zip(dims, range(len(dims))):\n new_list[:, d] = replace[i]\n array_list += [tuple(a) for a in new_list]\n\n return tuple(array_list)\n\n def __get_split_frame_entries(self, slice_list, dims, length):\n shape = self.get_shape\n replace = []\n seq_len = []\n\n # get the new entries\n for d, l in zip(dims, length):\n sl = slice_list[0][d]\n start = 0 if sl.start is None else sl.start\n stop = shape[d] if sl.stop is None else sl.stop\n inc = l*sl.step if sl.step else l\n temp_list = [slice(a, a+inc) for a in np.arange(start, stop, inc)]\n if temp_list[-1].stop > stop:\n temp = temp_list[-1]\n temp_list[-1] = slice(temp.start, stop, temp.step)\n replace.append(temp_list)\n seq_len.append(len(temp_list))\n\n # calculate the permutations\n length = np.array(seq_len)\n chunk = [int(np.prod(length[0:dim])) for dim in range(len(dims))]\n repeat = [int(np.prod(length[dim+1:])) for dim in range(len(dims))]\n full_replace = []\n for d in range(len(dims)):\n temp = [[replace[d][i]]*chunk[d] for x in range(repeat[d]) for i\n in range(len(replace[d]))]\n full_replace.append([t for sub in temp for t in sub])\n return full_replace\n\n def _get_frames_per_process(self, slice_list):\n processes = self.data.exp.meta_data.get(\"processes\")\n process = self.data.exp.meta_data.get(\"process\")\n frame_idx = np.arange(len(slice_list))\n try:\n frames = np.array_split(frame_idx, len(processes))[process]\n slice_list = slice_list[frames[0]:frames[-1]+1]\n except IndexError:\n slice_list = []\n return slice_list, frames\n\n def _pad_slice_list(self, slice_list, inc_start_str, inc_stop_str):\n \"\"\" Amend the slice lists to include padding. Includes variations for\n transfer and process slice lists. \"\"\"\n pData = self.data._get_plugin_data()\n if not pData.padding:\n return slice_list\n\n pad_dict = pData.padding._get_padding_directions()\n\n shape = self.data.get_shape()\n for ddir, value in pad_dict.iteritems():\n exec('inc_start = ' + inc_start_str)\n exec('inc_stop = ' + inc_stop_str)\n for i in range(len(slice_list)):\n slice_list[i] = list(slice_list[i])\n sl = slice_list[i][ddir]\n if sl.start is None:\n sl = slice(0, shape[ddir], 1)\n slice_list[i][ddir] = \\\n slice(sl.start + inc_start, sl.stop + inc_stop, sl.step)\n slice_list[i] = tuple(slice_list[i])\n return slice_list\n\n def _fix_list_length(self, sl, length, dim):\n sdir = self.data.get_slice_dimensions()\n sl = list(sl)\n s = sl[sdir[dim]]\n sl[sdir[dim]] = slice(s.start, s.stop + length*s.step, s.step)\n return tuple(sl)\n\n\n def _get_local_single_slice_list(self, shape):\n slice_dirs = self.data.get_slice_dimensions()\n core_dirs = np.array(self.data.get_core_dimensions())\n fix = [[]]*2\n core_slice = np.array([slice(None)]*len(core_dirs))\n shape = tuple([shape[i] for i in range(len(shape))])\n values = 'np.arange(shape[slice_dirs[i]])'\n index = self._get_slice_dirs_index(slice_dirs, shape, values)\n # there may be no slice dirs\n index = index if index.size else np.array([[0]])\n nSlices = index.shape[1] if index.size else len(fix[0])\n nDims = len(shape)\n\n ssl = self._single_slice_list(\n nSlices, nDims, core_slice, core_dirs, slice_dirs, fix, index)\n return ssl\n\n def _group_slice_list_in_one_dimension(self, slice_list, max_frames,\n group_dim, pad=False):\n \"\"\" Group the slice list in one dimension, stopping at \\\n boundaries - prepare a slice list for multi-frame plugin processing.\n \"\"\"\n if group_dim is None:\n return slice_list\n\n banked = self._banked_list(slice_list, max_frames, pad=pad)\n grouped = []\n for group in banked:\n sub_groups = self._split_list(group, max_frames)\n for sub in sub_groups:\n grouped.append(self._group_dimension(sub, group_dim, 1))\n return grouped\n\n def _get_global_single_slice_list(self, shape):\n slice_dirs = self.data.get_slice_dimensions()\n core_dirs = np.array(self.data.get_core_dimensions())\n fix = self.data._get_plugin_data()._get_fixed_dimensions()\n core_slice = self._get_core_slices(core_dirs)\n values = 'self._get_slice_dir_index(slice_dirs[i])'\n index = self._get_slice_dirs_index(slice_dirs, shape, values)\n nSlices = index.shape[1] if index.size else len(fix[0])\n nDims = len(shape)\n ssl = self._single_slice_list(\n nSlices, nDims, core_slice, core_dirs, slice_dirs, fix, index)\n return ssl\n\n def _group_slice_list_in_multiple_dimensions(self, slice_list, max_frames,\n group_dim, pad=False):\n \"\"\" Group the slice list in multiple dimensions - prepare a slice list\\\n for file transfer.\n \"\"\"\n if group_dim is None:\n return slice_list\n\n steps = self.data.get_preview().get_starts_stops_steps('steps')\n sub_groups = self._banked_list(slice_list, max_frames, pad=pad)\n\n grouped = []\n for sub in sub_groups:\n temp = list(sub[0])\n for dim in group_dim:\n temp[dim] = self._group_dimension(sub, dim, steps[dim])[dim]\n grouped.append(tuple(temp))\n\n return grouped\n\nclass LocalData(object):\n \"\"\" The LocalData class organises the slicing of transferred data to \\\n give the shape requested by a plugin for each run of 'process_frames'.\n \"\"\"\n\n def __init__(self, dtype, transport_data):\n self.dtype = dtype # in or out ProcessData object\n self.td = transport_data\n self.data = transport_data.data\n self.pData = self.data._get_plugin_data()\n self.shape = self.data.get_shape()\n self.sdir = None\n\n def _get_dict(self):\n return self._get_dict_in() if self.dtype == 'in' else \\\n self._get_dict_out()\n\n def _get_dict_in(self):\n sl_dict = {}\n sl = self._get_slice_list()\n sl = self.td._pad_slice_list(sl, '0', 'sum(value.values())')\n sl_dict['process'] = sl\n return sl_dict\n\n def _get_dict_out(self):\n sl_dict = {}\n sl_dict['process'] = self._get_slice_list()\n sl_dict['unpad'] = self.__get_unpad_slice_list(len(sl_dict['process']))\n return sl_dict\n\n def _get_slice_list(self):\n \"\"\" Splits a file transfer slice list into a list of (padded) slices\n required for each loop of process_frames.\n \"\"\"\n slice_dirs = self.data.get_slice_dimensions()\n self.sdir = slice_dirs[0] if len(slice_dirs) > 0 else None\n\n pData = self.pData\n mf_process = pData.meta_data.get('max_frames_process')\n shape = pData.get_shape_transfer()\n\n process_ssl = self.td._get_local_single_slice_list(shape)\n\n process_gsl = self.td._group_slice_list_in_one_dimension(\n process_ssl, mf_process, self.sdir)\n return process_gsl\n\n def __get_unpad_slice_list(self, reps):\n # setting process slice list unpad here - not currently working for 4D data\n sl = [slice(None)]*len(self.pData.get_shape_transfer())\n if not self.pData.padding:\n return tuple([tuple(sl)]*reps)\n pad_dict = self.pData.padding._get_padding_directions()\n for ddir, value in pad_dict.iteritems():\n sl[ddir] = slice(value['before'], -value['after'])\n return tuple([tuple(sl)]*reps)\n\n\nclass GlobalData(object):\n \"\"\"\n The GlobalData class organises the movement and slicing of the data from\n file.\n \"\"\"\n\n def __init__(self, dtype, transport):\n self.dtype = dtype # in or out TransferData object\n self.trans = transport\n self.data = transport.data\n self.pData = self.data._get_plugin_data()\n self.shape = self.data.get_shape()\n\n def _get_dict(self):\n return self._get_dict_in() if self.dtype == 'in' else \\\n self._get_dict_out()\n\n def _get_dict_in(self):\n sl_dict = {}\n sl, current = \\\n self._get_slice_list(self.shape, current_sl=True, pad=True)\n\n sl_dict['current'], _ = self.trans._get_frames_per_process(current)\n sl, sl_dict['frames'] = self.trans._get_frames_per_process(sl)\n\n if self.trans.pad:\n sl = self.trans._pad_slice_list(\n sl, \"-value['before']\", \"value['after']\")\n sl_dict['transfer'] = sl\n return sl_dict\n\n def _get_dict_out(self):\n sl_dict = {}\n sl, _ = self._get_slice_list(self.shape)\n sl_dict['transfer'], _ = self.trans._get_frames_per_process(sl)\n return sl_dict\n\n def _get_slice_list(self, shape, current_sl=None, pad=False):\n mft = self.pData._get_max_frames_transfer()\n transfer_ssl = self.trans._get_global_single_slice_list(shape)\n\n if transfer_ssl is None:\n raise Exception(\"Data type %s does not support slicing in \"\n \"directions %s\" % (self.get_current_pattern_name(),\n self.get_slice_directions()))\n slice_dims = self.data.get_slice_dimensions()\n transfer_gsl = self.trans._group_slice_list_in_multiple_dimensions(\n transfer_ssl, mft, slice_dims, pad=pad)\n\n if current_sl:\n mfp = self.pData._get_max_frames_process()\n current_sl = self.trans._group_slice_list_in_multiple_dimensions(\n transfer_ssl, mfp, slice_dims, pad=pad)\n\n split_list = self.pData.split\n transfer_gsl = self.__split_frames(transfer_gsl, split_list) if \\\n split_list else transfer_gsl\n\n return transfer_gsl, current_sl\n\n def _get_padded_data(self, slice_list, end=False):\n slice_list = list(slice_list)\n pData = self.pData\n pad_dims = list(set(self.data.get_core_dimensions() +\n (self.data.get_slice_dimensions())))\n pad_list = []\n for i in range(len(slice_list)):\n pad_list.append([0, 0])\n\n data_dict = self.data.data_info.get_dictionary()\n shape = data_dict['orig_shape'] if 'orig_shape' in data_dict.keys() \\\n else self.data.get_shape()\n\n for dim in range(len(pad_dims)):\n sl = slice_list[dim]\n if sl.start < 0:\n pad_list[dim][0] = -sl.start\n slice_list[dim] = slice(0, sl.stop, sl.step)\n diff = sl.stop - shape[dim]\n if diff > 0:\n pad_list[dim][1] = diff\n slice_list[dim] = \\\n slice(slice_list[dim].start, sl.stop - diff, sl.step)\n\n data = self.data.data[tuple(slice_list)]\n\n if np.sum(pad_list):\n mode = pData.padding.mode if pData.padding else 'edge'\n temp = np.pad(data, tuple(pad_list), mode=mode)\n return temp\n return data\n",
"# Copyright 2014 Diamond Light Source Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\n.. module:: pymca\n :platform: Unix\n :synopsis: A plugin to fit spectral data\n\n.. moduleauthor:: Aaron D. Parsons <[email protected]>\n\"\"\"\n\nimport logging\nfrom savu.plugins.filters.base_filter import BaseFilter\nfrom savu.plugins.driver.cpu_plugin import CpuPlugin\nfrom savu.plugins.utils import register_plugin, dawn_compatible, OUTPUT_TYPE_METADATA_ONLY\nimport numpy as np\nimport os\nimport savu.test.test_utils as tu\nfrom PyMca5.PyMcaPhysics.xrf import McaAdvancedFitBatch\n\n\n@dawn_compatible(OUTPUT_TYPE_METADATA_ONLY)\n@register_plugin\nclass Pymca(BaseFilter, CpuPlugin):\n \"\"\"\n uses pymca to fit spectral data\n\n :u*param config: path to the config file. Default: 'Savu/test_data/data/test_config.cfg'.\n\n \"\"\"\n\n def __init__(self):\n logging.debug(\"fitting spectrum\") \n super(Pymca, self).__init__(\"Pymca\")\n \n def pre_process(self):\n in_dataset, _out_datasets = self.get_datasets()\n in_d1 = in_dataset[0]\n self.sh = in_d1.get_shape()\n self.b = self.setup_fit(np.random.random((1,1,self.sh[-1])))\n\n def process_frames(self, data):\n y = np.expand_dims(data,0)\n self.b = self.setup_fit(y)\n self.b._McaAdvancedFitBatch__processStack()\n try:\n stack = self.b.outbuffer['parameters']\n op_stack = np.rollaxis(stack,0,3)\n except (AttributeError, KeyError) as e:\n op_stack = -np.ones((1,1,self.outputshape[-1]))\n logging.warn(\"Error in fit:%s\",e) \n op = op_stack[0,0]\n return op\n\n def setup(self):\n logging.debug('setting up the pymca fitting')\n in_dataset, out_datasets = self.get_datasets()\n inshape = in_dataset[0].get_shape()\n spectra_shape = inshape[-1] # nearly always true.\n rest_shape = inshape[:-1]\n np.random.seed=1\n dummy_spectrum = np.random.random((1, 1, spectra_shape))\n c = self.setup_fit(dummy_spectrum)# seed it with junk, zeros made the matrix singular unsurprisingly and this bungles it.\n \n # temporary measure to stop the next line printing arrays to screen.\n c.processList()#_McaAdvancedFitBatch__processStack()# perform an initial fit to get the shapes\n\n fit_labels = c.outbuffer.labels('parameters') # and then take out the axis labels for the channels\n out_meta_data = out_datasets[0].meta_data\n out_meta_data.set(\"PeakElements\",fit_labels)\n \n self.outputshape = rest_shape+(len(fit_labels),) # and this is the shape the thing will be\n# print \"input shape is\", in_dataset[0].get_shape()\n# print \"the output shape in setup is\"+str(outputshape)\n \n axis_labels = ['-1.PeakElements.label']\n in_patterns = in_dataset[0].get_data_patterns()\n# pattern_list = ['SINOGRAM', 'PROJECTION']\n pattern_list = in_patterns.keys()\n\n fitResult = out_datasets[0]\n\n fitResult.create_dataset(patterns={in_dataset[0]: pattern_list},\n axis_labels={in_dataset[0]: axis_labels},\n shape=self.outputshape)\n fitResult.meta_data.set('FitAreas',np.array(fit_labels))\n slice_directions = tuple(range(len(rest_shape)))\n# print \"slice directions are:\"+str(slice_directions)\n fitResult.add_pattern(\"CHANNEL\", core_dims=(-1,),\n slice_dims=slice_directions)\n \n in_pData, out_pData = self.get_plugin_datasets()\n in_pData[0].plugin_data_setup(self.get_plugin_pattern(), self.get_max_frames())\n out_pData[0].plugin_data_setup('CHANNEL', self.get_max_frames())\n \n \n def get_max_frames(self):\n return 'single'\n\n def get_plugin_pattern(self):\n return 'SPECTRUM'\n\n def nOutput_datasets(self):\n return 1\n\n def setup_fit(self,y):\n '''\n takes a data shape and returns a fit-primed object\n ''' \n outputdir=None # nope\n roifit=0# nope\n roiwidth=y.shape[1] #need this to pretend its an image\n b = McaAdvancedFitBatch.McaAdvancedFitBatch(self.get_conf_path(),\n [y],\n outputdir,\n roifit,\n roiwidth,\n fitfiles=0,\n nosave=True,\n quiet=True) # prime the beauty\n b.pleaseBreak = 1\n \n # temporary measure to stop the next line printing arrays to screen.\n b.processList()\n\n b.pleaseBreak = 0\n return b\n \n def get_conf_path(self):\n path = self.parameters['config']\n if path.split(os.sep)[0] == 'Savu':\n path = tu.get_test_data_path(path.split('/test_data/data')[1])\n return path\n \n def get_dummyhdf_path(self):\n return tu.get_test_data_path('i18_test_data.nxs')\n"
] |
[
[
"sklearn.mixture.GaussianMixture",
"numpy.max",
"numpy.shape"
],
[
"numpy.arange",
"numpy.ones",
"numpy.prod",
"numpy.array",
"numpy.sum"
],
[
"numpy.rollaxis",
"numpy.expand_dims",
"numpy.random.random",
"numpy.ones",
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
uci-uav-forge/2020_path_planning
|
[
"a0d95a2a9a54418c66d5575aa569a92dcbe9865b"
] |
[
"forge_pathplanner/bdc.py"
] |
[
"from polyskel import polyskel\nimport networkx as nx\nimport numpy as np\nimport copy, enum\nimport matplotlib.pyplot as plt\nfrom . import polygon\nfrom shapely import geometry\n\n\nclass Event(enum.Enum):\n CLOSE = 1\n OPEN = 2\n SPLIT = 3\n MERGE = 4\n INFLECTION = 5\n INTERSECT = 6\n\n\ndef rad2degree(rad):\n \"\"\"Convert angle from radians to degrees\n\n Parameters\n ----------\n rad : int or float\n Angle in radians\n\n Returns\n -------\n float\n angle in degrees\n \"\"\"\n return rad * 180 / np.pi\n\n\ndef degree2rad(deg):\n \"\"\"Convert angle from degrees to radians\n\n Parameters\n ----------\n deg : int or float\n angle in degrees\n\n Returns\n -------\n float\n angle in radians\n \"\"\"\n return deg * np.pi / 180\n\n\ndef discretize_entire(J: nx.DiGraph, R: nx.Graph, gridsz: float):\n pts = get_points_array(J)\n xmin, xmax = np.min(pts[:, 0]), np.max(pts[:, 0])\n ymin, ymax = np.min(pts[:, 1]), np.max(pts[:, 1])\n x, y = np.meshgrid(np.arange(xmin, xmax, gridsz), np.arange(ymin, ymax, gridsz))\n P = nx.grid_2d_graph(*x.shape)\n\n polygons = []\n for n in R.nodes:\n polygons.append(geometry.Polygon(R.nodes[n][\"cellpts\"]))\n delnodes = []\n for n in P.nodes:\n point = np.array((x[n], y[n]))\n if any([p.contains(geometry.Point(point)) for p in polygons]):\n P.nodes[n][\"points\"] = point\n P.nodes[n][\"height\"] = 0.0\n else:\n delnodes.append(n)\n P.remove_nodes_from(delnodes)\n P = P.subgraph(max(nx.connected_components(P), key=len))\n return P\n\n\ndef add_discretized_cells(J: nx.DiGraph, R: nx.Graph, theta, gridsz) -> None:\n for n in R.nodes:\n P = discretize_cell(J, R.nodes[n][\"cell\"], theta, gridsz)\n R.nodes[n][\"grid\"] = P\n\n\ndef discretize_cell(J: nx.DiGraph, c, theta, gridsz, diags=True):\n # offset by small amount so that grid begins just inside of polygon edge\n eps = 0.0001\n pts = get_points_array(J, c)\n matr = make_rot_matrix(theta)\n revmatr = make_rot_matrix(-theta)\n # rotate points\n pts = np.dot(pts, matr)\n poly = geometry.Polygon(pts)\n # cover cell in points\n xmin, xmax = np.min(pts[:, 0]) + eps, np.max(pts[:, 0])\n ymin, ymax = np.min(pts[:, 1]) + eps, np.max(pts[:, 1])\n x, y = np.meshgrid(np.arange(xmin, xmax, gridsz), np.arange(ymin, ymax, gridsz))\n # draw 2dgraph\n P = nx.grid_2d_graph(*x.shape)\n rm_node_list = []\n for n in P.nodes:\n point = np.array((x[n], y[n]))\n if poly.contains(geometry.Point(point)):\n P.nodes[n][\"points\"] = np.dot(point, revmatr)\n else:\n rm_node_list.append(n)\n P.remove_nodes_from(rm_node_list)\n ccs = list(nx.connected_components(P))\n if len(ccs) > 1:\n P = P.subgraph(max(ccs, key=len))\n return P\n\n\ndef iscw(points: np.ndarray) -> bool:\n \"\"\"Determine orientation of a set of points\n\n Parameters\n ----------\n points : np.ndarray\n An ordered set of points\n\n Returns\n -------\n bool\n True if clockwise, False if not clockwise\n \"\"\"\n c = 0\n for i, _ in enumerate(points[:-1, :]):\n p1, p2 = points[i + 1, :], points[i, :]\n c += (p2[0] - p1[0]) * (p2[1] + p1[1])\n if c > 0:\n return True\n else:\n return False\n\n\ndef line_sweep(G: nx.DiGraph, theta: float = 0, posattr: str = \"points\") -> tuple:\n \"\"\"Perform a boustrophedon line sweep over a world graph G.\n\n Parameters\n ----------\n G : nx.DiGraph\n World graph. Contains an outer boundary, which is made up of a clockwise ordering of edges\n with weight=1, and optionally contains holes, which are counterclockwise orderings of edges\n with weight=2. Worlds must be non-degenerate planar graphs!\n theta : float, optional\n Angle of the line sweep from x-axis, by default 0\n posattr : str, optional\n attribute of points in `G`, by default 'points'\n\n Returns\n -------\n tuple\n H, which is a graph of the new bdc-composed world rotated to `theta`\n J, which is the graph of the new bdc-compoased world rotated back to its original orientation\n R, which is the reeb graph containing connectvitity information between each cell\n S, which is the skel graph containing connected straight skeletons of each cell in `R`.\n \"\"\"\n # sorted node list becomes our list of events\n H = rotate_graph(G, theta, posattr=posattr)\n # sort left to right and store sorted node list in L.\n def left_to_right(n):\n return H.nodes[n][posattr][0]\n\n L = sorted(list(H.nodes), key=left_to_right)\n # sweep left-to-right\n crits, cells = [], {}\n for v in L:\n etype = check_lu(H, v)\n if etype == Event.SPLIT or etype == Event.MERGE:\n splitmerge_points(H, v)\n # crits\n if etype in (Event.OPEN, Event.SPLIT, Event.MERGE, Event.CLOSE):\n crits.append(v)\n for c in crits:\n cells = append_cell(H, c, cells)\n # rotate the graph back into original orientation\n J = rotate_graph(H, -theta, posattr=posattr)\n # # build the reebgraph of J\n R = build_reebgraph(J, cells)\n # S = create_skelgraph(R, J)\n return J, R\n\n\ndef rg_centroid(H: nx.DiGraph, cell: set) -> np.ndarray:\n \"\"\"Get centroid of a cell `cell` made up of nodes in `H`\n\n Parameters\n ----------\n H : nx.DiGraph\n The world graph\n cell : set\n An ordered or unordered list of nodes in `H`\n\n Returns\n -------\n np.ndarray\n the centroid as an xy point.\n \"\"\"\n p = np.zeros((2,), dtype=np.float64)\n for c in cell:\n p += H.nodes[c][\"points\"]\n p /= len(cell)\n return p\n\n\ndef build_reebgraph(H: nx.DiGraph, cells: list) -> nx.Graph:\n \"\"\"Build the reebgraph on `H` using the cell information contained in 'cells'\n\n Parameters\n ----------\n H : nx.DiGraph\n The bdc composed world\n cells : list\n each entry in cells is a list of nodes in `H` which form a closed cell in the bdc composition\n\n Returns\n -------\n nx.Graph\n Graph, `R` which represents connectivity of each cell in `H`\n \"\"\"\n rgedges, rgcells = [], {}\n for i, a in enumerate(cells.values()):\n for j, b in enumerate(cells.values()):\n union = tuple(set(a) & set(b))\n if len(union) == 2:\n w = None\n try:\n w = H[union[0]][union[1]][\"weight\"]\n except:\n pass\n try:\n w = H[union[1]][union[0]][\"weight\"]\n except:\n pass\n if w == 3 or w == 4:\n rgedges.append((i, j, {\"shared\": union}))\n rgcells[i] = a\n rgcells[j] = b\n\n R = nx.Graph()\n R.add_edges_from(rgedges)\n for n in R.nodes:\n R.nodes[n][\"cell\"] = rgcells[n]\n centroid = rg_centroid(H, rgcells[n])\n R.nodes[n][\"centroid\"] = centroid\n cellpts = [H.nodes[c][\"points\"] for c in rgcells[n]]\n # close the polygon\n cellpts.append(cellpts[0])\n cellpts = np.array(cellpts)\n R.nodes[n][\"cellpts\"] = cellpts\n\n # plt.plot(cellpts[:,0], cellpts[:,1])\n # triangles can't have straight skeleton\n if len(rgcells[n]) > 3:\n skel = polyskel.skeletonize(cellpts, holes=[])\n skel = fix_polyskel(skel)\n T = traverse_polyskel(R, n, skel)\n else:\n T = nx.Graph()\n T.add_node(\n 0,\n points=centroid,\n interior=True,\n parent=(R.nodes[n][\"cell\"], R.nodes[n][\"cellpts\"]),\n )\n for e1, e2 in T.edges:\n T[e1][e2][\"interior\"] = True\n R.nodes[n][\"skel_graph\"] = T\n return R\n\n\ndef create_skelgraph(R: nx.Graph, H: nx.DiGraph) -> nx.Graph:\n \"\"\"Create a \"skelgraph\" from a bdc world `H` and its reeb graph, `R`.\n A skelgraph is a graph of the straight skeletons of each cell of a world `H`,\n joined by the midpoints of each cell wall on `H`.\n\n Traversing the skelgraph of `H` means visiting each cell the boustrophedon\n decomposition of `H`.\n\n Parameters\n ----------\n R : nx.Graph\n The reeb graph of the world\n H : nx.DiGraph\n The BDC of the world\n\n Returns\n -------\n nx.Graph\n An undirected graph joining the straight skeleton of each cell.\n \"\"\"\n # in preparation to join, make nodes unique\n unique_node = 0\n for n in R.nodes:\n unique_node = remap_nodes_unique(unique_node, R.nodes[n][\"skel_graph\"])\n\n visited = set()\n for this, that in nx.eulerian_circuit(nx.eulerize(R)):\n if frozenset((this, that)) not in visited:\n visited.add(frozenset((this, that)))\n # midpoint is the midpoint of the line between cells of 'this' and 'that' nodes\n # on reeb graph. We walk through each transition between cells, finding the midpoint\n # and adding it to the straight skeletons in R.\n midp = get_midpoint_shared(H, R, this, that)\n this_closest = get_closest_on_skel(R, this, midp)\n that_closest = get_closest_on_skel(R, that, midp)\n # get midpoint node\n midp_node = -unique_node\n unique_node += 1\n R.nodes[this][\"skel_graph\"].add_node(\n midp_node, interior=False, points=midp, parent=None\n )\n R.nodes[this][\"skel_graph\"].add_edge(\n this_closest, midp_node, interior=False\n )\n R.nodes[that][\"skel_graph\"].add_node(\n midp_node, interior=False, points=midp, parent=None\n )\n R.nodes[that][\"skel_graph\"].add_edge(\n that_closest, midp_node, interior=False\n )\n # compose all\n for n in R.nodes:\n S = nx.compose_all([R.nodes[n][\"skel_graph\"] for n in R.nodes])\n for e1, e2 in S.edges:\n S[e1][e2][\"distance\"] = np.linalg.norm(\n S.nodes[e1][\"points\"] - S.nodes[e2][\"points\"]\n )\n return S\n\n\ndef get_closest_on_skel(R: nx.graph, rnode: int, midp: np.array) -> np.array:\n \"\"\"Get the closest point of a skel graph stored in the `rnode` of `R` to the midpoint `midp`.\n\n Parameters\n ----------\n R : nx.graph\n Reeb graph containing straight skeleton in key 'skel_graph'\n rnode : int\n node on that reeb graph\n midp : np.array\n the point to test\n\n Returns\n -------\n int\n a node on skel_graph which is closest to `midp`\n \"\"\"\n this_connectors = []\n for n in R.nodes[rnode][\"skel_graph\"].nodes:\n if R.nodes[rnode][\"skel_graph\"].nodes[n][\"interior\"]:\n thispt = R.nodes[rnode][\"skel_graph\"].nodes[n][\"points\"]\n dist = dotself(midp - thispt)\n this_connectors.append((n, dist))\n # return closest\n return sorted(this_connectors, key=lambda t: t[1])[0][0]\n\n\ndef fix_polyskel(skel: list):\n \"\"\"this is a helper function for casting the results of `polyskel` to\n numpy arrays, rather than Euler3 points\n\n Parameters\n ----------\n skel : list\n results of `polyskel` function call\n\n Returns\n -------\n list\n each element of this list contains origin, height, and leafs.\n see polyskel documentation for more\n \"\"\"\n newskel = []\n\n def strip(leaf):\n return np.array(leaf)\n\n for origin, height, leafs in skel:\n origin = np.array(origin)\n leafs = list(map(strip, leafs))\n newskel.append((origin, height, leafs))\n return newskel\n\n\ndef traverse_polyskel(R: nx.Graph, rn: int, skel: list) -> nx.Graph:\n \"\"\"This is a helper function for casting the list returned by `polyskel` to\n a networkx Graph.\n\n This function also adds the list from `skel` to a reebgraph stored in `R` at node `rn`.\n\n Parameters\n ----------\n R : nx.Graph\n Reeb Graph\n rn : int\n node on the reeb graph\n skel : list\n list returned by polyskel\n\n Returns\n -------\n nx.Graph\n the straight skel graph\n \"\"\"\n epsilon = 1e-5\n elist = []\n for i, (pt1, _, _) in enumerate(skel):\n for j, (_, _, lfs2) in enumerate(skel):\n for l in lfs2:\n if dotself(pt1 - l) <= epsilon:\n elist.append((i, j))\n T = nx.Graph(elist)\n for n in T.nodes:\n T.nodes[n][\"points\"] = skel[n][0]\n T.nodes[n][\"interior\"] = True\n T.nodes[n][\"parent\"] = R.nodes[rn][\"cell\"], R.nodes[rn][\"cellpts\"]\n return T\n\n\ndef get_points_array(\n H: nx.Graph, nlist: list = None, posattr: str = \"points\"\n) -> np.ndarray:\n \"\"\"Get an array of points from `H` which stores points in `posattr` from indices in `nlist`\n\n Returns all points in H by default.\n\n Parameters\n ----------\n H : nx.Graph\n world graph\n nlist : list, optional\n subset of nodes on H to get points from, by default None\n posattr : str, optional\n attribute that the points are stored under, by default 'points'\n\n Returns\n -------\n np.ndarray\n Mx2 array for M points\n \"\"\"\n if nlist == None:\n nlist = list(H.nodes())\n points = np.empty((len(nlist), 2))\n for i, n in enumerate(nlist):\n points[i] = H.nodes[n][\"points\"]\n return points\n\n\ndef get_midpoint_shared(H: nx.DiGraph, R: nx.Graph, e1: int, e2: int) -> np.ndarray:\n \"\"\"Get the midpoint of the line which joins two cells, e1 and e2.\n\n E.g, returns the point x from two neighboring rectangular cells:\n\n ┌──────────┐\n │ │\n │ e1 │\n │ │\n └───┬──x───┴──┐\n │ │\n │ e2 │\n │ │\n └─────────┘\n\n Parameters\n ----------\n H : nx.DiGraph\n the world graph\n R : nx.Graph\n the reeb graph of the world `H`\n e1 : int\n first shared edge. Node of `R`\n e2 : int\n second shared edge. Node of `R`\n\n Returns\n -------\n np.ndarray\n the midpoint\n \"\"\"\n n1, n2 = R[e1][e2][\"shared\"]\n p1, p2 = H.nodes[n1][\"points\"], H.nodes[n2][\"points\"]\n v = (p2 - p1) / 2\n return p1 + v\n\n\ndef dotself(x: np.ndarray) -> float:\n \"\"\"dot a vector x with itself to produce a scalar\n\n Parameters\n ----------\n x : np.ndarray\n the vector\n\n Returns\n -------\n float\n scalar value |x|^2\n \"\"\"\n return np.dot(x, x)\n\n\ndef remap_nodes_unique(new_node: int, T: nx.Graph) -> int:\n \"\"\"Alters T in place, replacing its non-unique nodes by iterating on `new_node`.\n we can run this function on several graphs, thus guaranteeing that their nodes\n do not collide.\n\n Parameters\n ----------\n new_node : int\n starting value for new nodes\n T : nx.Graph\n A graph with integer nodes\n\n Returns\n -------\n int\n ending value for new nodes\n \"\"\"\n mapping = {}\n for n in T.nodes:\n mapping[n] = -new_node\n new_node += 1\n nx.relabel_nodes(T, mapping, copy=False)\n return new_node\n\n\ndef make_skelgraph(H: nx.DiGraph, R: nx.Graph):\n \"\"\"Make the \"straight skeleton graph\" over the world `H` with\n its reeb-graph `R`.\n\n Parameters\n ----------\n H : nx.DiGraph\n The world. Must already be BDC decomposed.\n R : nx.Graph\n Reeb graph of the world.\n\n Returns\n -------\n nx.Graph\n the \"skeleton\" graph. This undirected graph is made up of the straight skeleton\n of each cell, connected by the midpoints of the dividing lines between cells.\n \"\"\"\n S = nx.Graph()\n new_node = 0\n eulerian = nx.eulerian_circuit(nx.eulerize(R))\n for n in R.nodes:\n new_node = remap_nodes_unique(new_node, R.nodes[n][\"tgraph\"])\n nx.set_edge_attributes(R.nodes[n][\"tgraph\"], True, \"original\")\n nx.set_node_attributes(R.nodes[n][\"tgraph\"], True, \"original\")\n\n for e1, e2 in eulerian:\n T_this: nx.Graph = R.nodes[e1][\"tgraph\"]\n T_other: nx.Graph = R.nodes[e2][\"tgraph\"]\n for n in T_this.nodes:\n T_this.nodes[n][\"cell\"] = R.nodes[e1][\"cell\"]\n T_this.nodes[n][\"middle\"] = False\n if T_this.degree(n) > 1:\n T_this.nodes[n][\"end\"] = False\n else:\n T_this.nodes[n][\"end\"] = True\n for n in T_other.nodes:\n T_other.nodes[n][\"cell\"] = R.nodes[e2][\"cell\"]\n T_other.nodes[n][\"middle\"] = False\n if T_other.degree(n) > 1:\n T_other.nodes[n][\"end\"] = False\n else:\n T_other.nodes[n][\"end\"] = True\n # get midpoint and add the node to both graphs\n midp = get_midpoint_shared(H, R, e1, e2)\n # find closest in both skelgraphs to this new node\n close_this = min(\n [n for n in T_this.nodes if T_this.nodes[n][\"end\"]],\n key=lambda n: dotself(T_this.nodes[n][\"points\"] - midp),\n )\n close_other = min(\n [n for n in T_other.nodes if T_other.nodes[n][\"end\"]],\n key=lambda n: dotself(T_other.nodes[n][\"points\"] - midp),\n )\n # add midpoint to both graphs after finding closest\n midp_node = -new_node\n T_this.add_node(\n midp_node,\n points=midp,\n cell=None,\n original=False,\n middle=True,\n entry_cell=R.nodes[e1][\"cell\"],\n )\n T_other.add_node(midp_node, points=midp, cell=None, original=False, middle=True)\n new_node += 1\n # add edge to both subgraphs\n T_this.add_edge(close_this, midp_node, original=False)\n T_other.add_edge(midp_node, close_other, original=False)\n S = nx.compose_all((S, T_this, T_other))\n\n for e1, e2 in S.edges:\n S[e1][e2][\"dist\"] = np.sqrt(\n dotself(S.nodes[e2][\"points\"] - S.nodes[e1][\"points\"])\n )\n return S\n\n\ndef cul_de_sac_check(S: nx.Graph, n):\n deg = S.degree(n) == 1\n orig = S.nodes[n][\"original\"] == 1\n cell = S.nodes[n][\"cell\"]\n if cell is not None:\n cellch = len(cell) > 3\n else:\n cellch = False\n return deg and orig and cellch\n\n\ndef rcross(H: nx.DiGraph, u: int, v: int, w: int) -> float:\n \"\"\"compute the vector cross product of edges `u`,`v` and `v`,`w` on `H`.\n This one is more expensive than qcross but returns a numerical value.\n\n Parameters\n ----------\n H : nx.DiGraph\n Graph\n u : int\n idx of node\n v : int\n idx of node\n w : int\n idx of node\n\n Returns\n -------\n float\n cross product\n \"\"\"\n a = H.nodes[v][\"points\"] - H.nodes[u][\"points\"]\n b = H.nodes[w][\"points\"] - H.nodes[v][\"points\"]\n a /= np.linalg.norm(a)\n b /= np.linalg.norm(b)\n return a[0] * b[1] - a[1] * b[0]\n\n\ndef append_cell(H: nx.DiGraph, v: int, cells: dict) -> list:\n \"\"\"[summary]\n\n Parameters\n ----------\n H : nx.DiGraph\n graph on which to append cells\n v : int\n vertex from which to start\n cells : list\n list of cells.\n\n Returns\n -------\n list\n new list of cells\n \"\"\"\n prev_node = v\n for path_start in H.adj[v]:\n path_end = False\n path = []\n node = path_start\n while not path_end:\n cvals = []\n for candidate in H.adj[node]:\n if candidate != prev_node:\n cval = rcross(H, prev_node, node, candidate)\n cvals.append((candidate, cval))\n\n def cvalsort(c):\n return c[1]\n\n cvals.sort(key=cvalsort)\n # get most clockwise cval\n best = cvals[0][0]\n prev_node = node\n node = best\n path.append(best)\n if best == path_start:\n path_end = True\n to_add = frozenset(path)\n if to_add not in cells.keys():\n cells[to_add] = path\n return cells\n\n\ndef splitmerge_points(H: nx.DiGraph, v: int) -> None:\n \"\"\"Alters H in place to produce split/merge points\n from an event on node `v`\n\n Parameters\n ----------\n H : nx.DiGraph\n Shape graph\n v : int\n index of the event vertex\n \"\"\"\n a, b = get_intersects(H, v)\n if a is not None:\n ai = max(H.nodes()) + 1\n assert ai not in set(H.nodes)\n H.add_node(ai, points=a[\"pt\"])\n # add new edge to H\n H.add_edge(ai, v, weight=3) # close\n H.add_edge(v, ai, weight=4) # open\n # split the edge with which there was a collision in half\n H.add_edge(a[\"e1\"], ai, weight=a[\"weight\"])\n H.add_edge(ai, a[\"e2\"], weight=a[\"weight\"])\n H.remove_edge(a[\"e1\"], a[\"e2\"])\n if b is not None:\n bi = max(H.nodes()) + 1\n assert bi not in set(H.nodes)\n H.add_node(bi, points=b[\"pt\"])\n # add new edge to H\n H.add_edge(v, bi, weight=3) # open\n H.add_edge(bi, v, weight=4) # close\n # split the edge with which there was a collision in half\n H.add_edge(b[\"e1\"], bi, weight=b[\"weight\"])\n H.add_edge(bi, b[\"e2\"], weight=b[\"weight\"])\n H.remove_edge(b[\"e1\"], b[\"e2\"])\n\n\ndef get_intersects(H: nx.DiGraph, v: int) -> tuple:\n \"\"\"Check for intersects a vertical line passing through v on the graph H.\n Think of it like this: We basically draw a line upwards from v until we\n reach a line on H; then we register a collision, which contains the origin\n vertex of the collision in 'vx', the point of the collision in 'pt', the edge\n with which the line collided in 'edge', and the weight of that edge in 'weight'.\n\n This function returns a maximum of two collisions, one above and one below,\n which correspond to the closest edge with which the line starting at `v`\n collided. If there is no such edge, it returns `None`, otherwise returns a\n dict with information about the collision specified above.\n\n Parameters\n ----------\n H : nx.DiGraph\n graph\n v : int\n event idx\n\n Returns\n -------\n tuple\n collisions above or/and below v on edges of H\n \"\"\"\n # list of all collisions with vertex v on the vertical\n collisions = []\n # check each edge\n for edge in H.edges:\n # check whether there could be a collision\n if check_edge(H, v, edge):\n e1, e2 = edge\n p1, p2, pv = (\n H.nodes[e1][\"points\"],\n H.nodes[e2][\"points\"],\n H.nodes[v][\"points\"],\n )\n ipt = intersect_vertical_line(p1, p2, pv)\n # store attrs of collision\n collisions.append(\n {\"vx\": v, \"pt\": ipt, \"e1\": e1, \"e2\": e2, \"weight\": H[e1][e2][\"weight\"]}\n )\n above, below = None, None\n y = H.nodes[v][\"points\"][1]\n # the difference between the collision point and the vertex point\n def ydiff(c):\n return c[\"pt\"][1] - y\n\n if collisions:\n # the minimum collision point above `v`\n above = min([c for c in collisions if c[\"pt\"][1] > y], key=ydiff, default=None)\n # the maximum collision point below `v`\n below = max([c for c in collisions if c[\"pt\"][1] < y], key=ydiff, default=None)\n return above, below\n\n\ndef intersect_vertical_line(\n p1: np.ndarray, p2: np.ndarray, pv: np.ndarray\n) -> np.ndarray:\n \"\"\"Get line intersect on the line formed by [p1, p2] for the point at `pv`\n\n Parameters\n ----------\n p1 : np.ndarray\n end point of line\n p2 : np.ndarray\n end point of line\n pv : np.ndarray\n point of intersect\n\n Returns\n -------\n np.ndarray\n line from `pv` to the point of intersection\n \"\"\"\n m = abs(pv[0] - p1[0]) / abs(p2[0] - p1[0])\n # y = mx + b\n py = m * (p2[1] - p1[1]) + p1[1]\n return np.array([pv[0], py])\n\n\ndef check_edge(H: nx.DiGraph, v: int, edge: tuple) -> bool:\n \"\"\"Check whether an edge [edge=(e1, e2)] on H intersects with a straight vertical line\n at `v`\n\n Parameters\n ----------\n H : nx.DiGraph\n graph\n v : int\n vertex idx\n edge : tuple of (int, int)\n edge idxs\n\n Returns\n -------\n bool\n Whether the x coordinates of the point `v` are beteen the edge x-coords\n \"\"\"\n e1, e2 = edge\n p1, p2, pv = H.nodes[e1][\"points\"], H.nodes[e2][\"points\"], H.nodes[v][\"points\"]\n if p1[0] > pv[0] and p2[0] < pv[0]:\n return True\n elif p1[0] < pv[0] and p2[0] > pv[0]:\n return True\n else:\n return False\n\n\ndef check_lu(H: nx.DiGraph, v: int) -> int:\n l, u, above = lower_upper(H, v)\n lpoint = H.nodes[l][\"points\"] # lower point\n upoint = H.nodes[u][\"points\"] # upper point\n vpoint = H.nodes[v][\"points\"] # vertex point\n\n event = None\n # both points on the right of v\n if lpoint[0] > vpoint[0] and upoint[0] > vpoint[0]:\n if above:\n event = Event.OPEN\n else:\n event = Event.SPLIT\n # both points on the left of v\n elif lpoint[0] < vpoint[0] and upoint[0] < vpoint[0]:\n if above:\n event = Event.CLOSE\n else:\n event = Event.MERGE\n # lower right, upper left\n elif lpoint[0] > vpoint[0] and upoint[0] < vpoint[0]:\n event = Event.INFLECTION\n # lower left, upper right\n elif lpoint[0] < vpoint[0] and upoint[0] > vpoint[0]:\n event = Event.INFLECTION\n\n if event is None:\n raise (ValueError(\"Event was not categorized correctly!\"))\n return event\n\n\ndef lower_upper(H: nx.DiGraph, v: int):\n \"\"\"Check the neighbors of `v` to determine which is the lower neighbor\n and which is the upper neighbor. If the predecessor is the upper neighbor,\n also returns True\n\n Parameters\n ----------\n H : nx.DiGraph\n must contain 'weight' as a key.\n v : int\n The vertex to check\n\n Returns\n -------\n tuple of (int, int, bool)\n lower vertex, upper vertex, above\n \"\"\"\n # filter only for neighboring nodes which were not constructed by a split,\n # ie node-->neighbor or neighbor-->node edge weight is 1 or 2.\n vA, vB = None, None\n for p in H.predecessors(v):\n w = H[p][v][\"weight\"]\n vA = p\n if w == 1 or w == 2:\n vA = p\n for s in H.successors(v):\n w = H[v][s][\"weight\"]\n vB = s\n if w == 1 or w == 2:\n vB = s\n # cannot have trailing nodes\n assert vA is not None\n assert vB is not None\n # if vA is None or vB is None:\n # raise(ValueError('No valid predecessors or successors to node {}'.format(v)))\n # compute cross product. if qcross is true (i.e. positive cross product), then\n # the vertex vA is \"above\" the vertex vB. [\"above\" here means \"above\" in a\n # topological sense; it's not necessarily above in the cartesian sense.]\n #\n # Note that when \"above\" is true, vB and vA are flipped (i.e. the successor\n # is stored into lower, and the predecessor is stored into upper.\n above = qcross(H, vA, v, vB)\n if above:\n lower, upper = vB, vA\n else:\n lower, upper = vA, vB\n return lower, upper, above\n\n\ndef qcross(H, vA, v, vB):\n \"\"\"Compute cross product on ordered nodes `vA`, `v`, `vB` of graph `H`.\n\n Parameters\n ----------\n H : nx.DiGraph\n The graph for which `vA`, `v`, `vB` are nodes.\n vA : int\n Node of H.\n v : int\n Node of H\n vB : int\n Node of H\n \"\"\"\n # find vectors with tails at `v` and pointing to vA, vB\n p1 = H.nodes[vA][\"points\"]\n p2 = H.nodes[vB][\"points\"]\n pv = H.nodes[v][\"points\"]\n\n a = pv - p1\n b = pv - p2\n # this is roughly a measure of the topological orientation of vA, vB\n if a[0] * b[1] - b[0] * a[1] >= 0:\n return True\n else:\n return False\n\n\ndef rotate_graph(G: nx.DiGraph, theta: float, posattr: str = \"points\") -> nx.DiGraph:\n \"\"\"Rotate a graph `G`. This makes a copy of `G` and returns it, leaving `G` untouched.\n\n Parameters\n ----------\n G : nx.DiGraph\n input Graph\n theta : float\n rotation angle, radians\n posattr : str, optional\n which key the points are stored under, by default 'points'\n\n Returns\n -------\n nx.DiGraph\n the new rotated graph\n \"\"\"\n H = copy.deepcopy(G)\n rotmatr = make_rot_matrix(theta)\n for node in G.nodes:\n original = G.nodes[node][\"points\"]\n rotated = original @ rotmatr\n H.nodes[node][\"points\"] = rotated\n return H\n\n\ndef make_rot_matrix(theta: float) -> np.ndarray:\n \"\"\"Create a rotation matrix\n\n Parameters\n ----------\n theta : float\n Angle\n\n Returns\n -------\n np.ndarray\n 2x2 Rotation Matrix created from Angle.\n \"\"\"\n rotmatr = np.array(\n [[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]\n )\n return rotmatr\n"
] |
[
[
"numpy.dot",
"numpy.min",
"numpy.arange",
"numpy.linalg.norm",
"numpy.cos",
"numpy.sin",
"numpy.max",
"numpy.array",
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
JuliaSprenger/nixpy
|
[
"46d15954db664ae394ff2e19163958f6ed3c1fad",
"46d15954db664ae394ff2e19163958f6ed3c1fad",
"46d15954db664ae394ff2e19163958f6ed3c1fad",
"46d15954db664ae394ff2e19163958f6ed3c1fad"
] |
[
"nixio/property.py",
"nixio/test/test_multi_tag.py",
"nixio/test/test_file.py",
"docs/source/examples/spikeFeatures.py"
] |
[
"# -*- coding: utf-8 -*-\n# Copyright © 2014, German Neuroinformatics Node (G-Node)\n#\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted under the terms of the BSD License. See\n# LICENSE file in the root of the Project.\n\ntry:\n from collections.abc import Sequence, Iterable\nexcept ImportError:\n from collections import Sequence, Iterable\nfrom enum import Enum\nfrom numbers import Number\nfrom six import string_types, ensure_str, ensure_text\nimport numpy as np\n\nfrom .datatype import DataType\nfrom .entity import Entity\nfrom . import util\n\n\nclass OdmlType(Enum):\n \"\"\"\n OdmlType provides all types currently supported by the odML\n data format. It provides additional information about the\n nature of the values of an odML Property.\n \"\"\"\n Boolean = 'boolean'\n Int = 'int'\n Float = 'float'\n String = 'string'\n Text = 'text'\n URL = 'url'\n Person = 'person'\n Datetime = 'datetime'\n Date = 'date'\n Time = 'time'\n\n def __str__(self):\n return self.value\n\n def compatible(self, value):\n \"\"\"\n compatible returns True or False depending on whether a\n passed value can be mapped to an OdmlType or not.\n\n :param value: Any single value\n :return: Boolean\n \"\"\"\n if (self in (self.String, self.Text, self.URL, self.Person) and\n DataType.get_dtype(value) == DataType.String):\n return True\n elif (self == self.Boolean and\n DataType.get_dtype(value) == DataType.Bool):\n return True\n elif (self == self.Float and\n DataType.get_dtype(value) == DataType.Double):\n return True\n elif self == self.Int and DataType.get_dtype(value) == DataType.Int64:\n return True\n elif (self in (self.Time, self.Date, self.Datetime) and\n DataType.get_dtype(value) == DataType.String):\n # This might need some extra work, treating as String for now, but\n # keeping it separated from other String values.\n return True\n\n return False\n\n @classmethod\n def get_odml_type(cls, dtype):\n \"\"\"\n get_odml_type returns the appropriate OdmlType\n for a handed in nix value DataType.\n\n :param dtype: nix DataType\n :return: OdmlType\n \"\"\"\n if dtype in DataType.FloatTypes:\n return cls.Float\n elif dtype in DataType.IntTypes:\n return cls.Int\n elif dtype == DataType.String:\n return cls.String\n elif dtype == DataType.Bool:\n return cls.Boolean\n\n raise TypeError(\"No available OdmlType for type '%s'\" % dtype)\n\n\nclass Property(Entity):\n \"\"\"An odML Property\"\"\"\n def __init__(self, nixfile, nixparent, h5dataset):\n super(Property, self).__init__(nixfile, nixparent, h5dataset)\n self._h5dataset = self._h5group\n\n @classmethod\n def create_new(cls, nixfile, nixparent, h5parent, name,\n dtype, shape=None, oid=None):\n if shape is None or shape[0] == 0:\n shape = (8, )\n util.check_entity_name(name)\n dtype = cls._make_h5_dtype(dtype)\n\n h5dataset = h5parent.create_dataset(name, shape=shape, dtype=dtype)\n h5dataset.set_attr(\"name\", name)\n\n if not util.is_uuid(oid):\n oid = util.create_id()\n\n h5dataset.set_attr(\"entity_id\", oid)\n\n newentity = cls(nixfile, nixparent, h5dataset)\n newentity.force_created_at()\n newentity.force_updated_at()\n\n return newentity\n\n @property\n def name(self):\n return self._h5dataset.get_attr(\"name\")\n\n @property\n def definition(self):\n return self._h5dataset.get_attr(\"definition\")\n\n @definition.setter\n def definition(self, d):\n util.check_attr_type(d, str)\n self._h5dataset.set_attr(\"definition\", d)\n\n @property\n def unit(self):\n return self._h5dataset.get_attr(\"unit\")\n\n @unit.setter\n def unit(self, new):\n if new:\n new = util.units.sanitizer(new)\n\n if new == \"\":\n new = None\n\n util.check_attr_type(new, str)\n self._h5dataset.set_attr(\"unit\", new)\n\n @property\n def uncertainty(self):\n dataset = self._h5dataset\n filever = tuple(dataset._parent.file.attrs[\"version\"])\n if filever < (1, 1, 1):\n val = self._h5dataset.dataset[:]\n uncertainty = val[0][\"uncertainty\"]\n return uncertainty\n return self._h5dataset.get_attr(\"uncertainty\")\n\n @uncertainty.setter\n def uncertainty(self, uncertainty):\n util.check_attr_type(uncertainty, Number)\n uncertainty = float(uncertainty) if uncertainty is not None else None\n self._h5dataset.set_attr(\"uncertainty\", uncertainty)\n\n @property\n def reference(self):\n dataset = self._h5dataset\n filever = tuple(dataset._parent.file.attrs[\"version\"])\n if filever < (1, 1, 1):\n val = self._h5dataset.dataset[:]\n reference = val[0][\"reference\"]\n return reference\n return self._h5dataset.get_attr(\"reference\")\n\n @reference.setter\n def reference(self, ref):\n util.check_attr_type(ref, str)\n self._h5dataset.set_attr(\"reference\", ref)\n\n @property\n def dependency(self):\n return self._h5dataset.get_attr(\"dependency\")\n\n @dependency.setter\n def dependency(self, dep):\n util.check_attr_type(dep, str)\n self._h5dataset.set_attr(\"dependency\", dep)\n\n @property\n def dependency_value(self):\n return self._h5dataset.get_attr(\"dependency_value\")\n\n @dependency_value.setter\n def dependency_value(self, depval):\n util.check_attr_type(depval, str)\n self._h5dataset.set_attr(\"dependency_value\", depval)\n\n @property\n def value_origin(self):\n return self._h5dataset.get_attr(\"value_origin\")\n\n @value_origin.setter\n def value_origin(self, origin):\n util.check_attr_type(origin, str)\n self._h5dataset.set_attr(\"value_origin\", origin)\n\n @property\n def odml_type(self):\n otype = self._h5dataset.get_attr(\"odml_type\")\n if not otype:\n return None\n\n return OdmlType(otype)\n\n @odml_type.setter\n def odml_type(self, new_type):\n \"\"\"\n odml_type can only be set if the handed in new type is a valid\n OdmlType and if it is compatible with the value data type of\n the property.\n\n :param new_type: OdmlType\n \"\"\"\n if not isinstance(new_type, OdmlType):\n raise TypeError(\"'{}' is not a valid odml_type.\".format(new_type))\n\n if not new_type.compatible(self.values[0]):\n raise TypeError(\"Type '{}' is incompatible \"\n \"with property values\".format(new_type))\n\n self._h5dataset.set_attr(\"odml_type\", str(new_type))\n\n def _read_old_values(self):\n val = self._h5dataset.dataset[:]\n return tuple(v[\"value\"] for v in val)\n\n @property\n def values(self):\n dataset = self._h5dataset\n filever = tuple(dataset._parent.file.attrs[\"version\"])\n if filever < (1, 1, 1):\n values = self._read_old_values()\n return values\n if not sum(dataset.shape):\n return tuple()\n\n data = dataset.read_data()\n\n def data_to_value(dat):\n if isinstance(dat, bytes):\n dat = ensure_str(dat) # py2compat\n return dat\n\n values = tuple(map(data_to_value, data))\n\n return values\n\n @values.setter\n def values(self, vals):\n \"\"\"\n Set the value of the property discarding any previous information.\n\n :param vals: a single value or list of values.\n \"\"\"\n # Make sure boolean value 'False' gets through as well...\n if vals is None or (isinstance(vals, (Sequence, Iterable)) and not len(vals)):\n self.delete_values()\n return\n\n if not isinstance(vals, (Sequence, Iterable)) or isinstance(vals, string_types):\n vals = [vals]\n\n # Make sure all values are of the same data type\n vtype = self._check_new_value_types(vals)\n if vtype == DataType.String:\n vals = [ensure_text(v) for v in vals] # py2compat\n self._h5dataset.shape = np.shape(vals)\n data = np.array(vals, dtype=vtype)\n self._h5dataset.write_data(data)\n\n def extend_values(self, data):\n \"\"\"\n Extends values to existing data.\n Suitable when new data is nested or original data is long.\n \"\"\"\n vtype = self._check_new_value_types(data)\n\n arr = np.array(data, dtype=vtype).flatten('C')\n dataset = self._h5dataset\n src_len = len(self.values)\n dlen = len(arr)\n dataset.shape = (src_len+dlen,)\n dataset.write_data(arr, slc=np.s_[src_len: src_len+dlen])\n\n def _check_new_value_types(self, data):\n if isinstance(data, (Sequence, Iterable)) and not isinstance(data, string_types):\n single_val = data[0]\n else:\n single_val = data\n data = [data]\n\n def check_prop_consistent(vtype):\n # Check if the new data has the same type as the existing property\n # data\n if vtype != self.data_type:\n raise TypeError(\"New data type '{}' is inconsistent with the \"\n \"Property's data type '{}'\".format(\n vtype, self.data_type))\n\n def check_new_data_consistent(vtype):\n # Check if each value in the new data has the same type\n for val in data:\n if DataType.get_dtype(val) != vtype:\n raise TypeError(\"Array contains inconsistent values. \"\n \"Only values of type '{}' can be \"\n \"assigned\".format(vtype))\n\n if hasattr(data, \"dtype\"):\n # numpy array: no need to scan values, arrays are consistent but\n # check for 1D\n vtype = data.dtype\n check_prop_consistent(vtype)\n else:\n # Will raise an error, if the data type of the first value is not\n # valid\n vtype = DataType.get_dtype(single_val)\n check_prop_consistent(vtype)\n check_new_data_consistent(vtype)\n\n return vtype\n\n @property\n def data_type(self):\n return self._h5dataset.dtype\n\n def delete_values(self):\n self._h5dataset.shape = (0,)\n\n @staticmethod\n def _make_h5_dtype(valued_type):\n str_ = util.vlen_str_dtype\n\n if valued_type == DataType.String:\n valued_type = str_\n\n return valued_type\n\n def __str__(self):\n return \"{}: {{name = {}}}\".format(\n type(self).__name__, self.name\n )\n\n def __repr__(self):\n return self.__str__()\n\n def pprint(self, indent=2, max_length=80, current_depth=-1):\n \"\"\"\n Pretty print method. Method is called in Section.pprint()\n \"\"\"\n property_spaces = \"\"\n prefix = \"\"\n if current_depth >= 0:\n property_spaces = \" \" * ((current_depth + 2) * indent)\n prefix = \"|-\"\n if self.unit is None:\n value_string = str(self.values)\n else:\n value_string = \"{}{}\".format(self.values, self.unit)\n p_len = len(property_spaces) + len(self.name) + len(value_string)\n if p_len >= max_length - 4:\n split_len = int((max_length - len(property_spaces)\n + len(self.name) - len(prefix))/2)\n str1 = value_string[0: split_len]\n str2 = value_string[-split_len:]\n print((\"{}{} {}: {} ... {}\".format(property_spaces, prefix,\n self.name, str1, str2)))\n else:\n print((\"{}{} {}: {}\".format(property_spaces, prefix, self.name,\n value_string)))\n",
"# -*- coding: utf-8 -*-\n# Copyright © 2014, German Neuroinformatics Node (G-Node)\n#\n# All rights reserved.\n#\n# Redistribution and use in section and binary forms, with or without\n# modification, are permitted under the terms of the BSD License. See\n# LICENSE file in the root of the Project.\nfrom nixio.exceptions.exceptions import InvalidSlice\nimport os\nimport time\nimport unittest\nfrom collections import OrderedDict\nimport numpy as np\nimport nixio as nix\nfrom nixio.exceptions import DuplicateName, UnsupportedLinkType\nfrom .tmp import TempDir\n\n\nclass TestMultiTags(unittest.TestCase):\n\n def setUp(self):\n interval = 1.0\n ticks = [1.2, 2.3, 3.4, 4.5, 6.7]\n unit = \"ms\"\n\n self.tmpdir = TempDir(\"mtagtest\")\n self.testfilename = os.path.join(self.tmpdir.path, \"mtagtest.nix\")\n self.file = nix.File.open(self.testfilename, nix.FileMode.Overwrite)\n self.block = self.file.create_block(\"test block\", \"recordingsession\")\n\n self.my_array = self.block.create_data_array(\"my array\", \"test\", nix.DataType.Int16, (0, 0))\n self.my_tag = self.block.create_multi_tag(\"my tag\", \"tag\", self.my_array)\n\n self.your_array = self.block.create_data_array(\"your array\", \"test\", nix.DataType.Int16, (0, 0))\n self.your_tag = self.block.create_multi_tag(\"your tag\", \"tag\", self.your_array)\n\n self.data_array = self.block.create_data_array(\"featureTest\", \"test\", nix.DataType.Double, (2, 10, 5))\n\n data = np.zeros((2, 10, 5))\n value = 0.\n for i in range(2):\n value = 0\n for j in range(10):\n for k in range(5):\n value += 1\n data[i, j, k] = value\n\n self.data_array[:, :, :] = data\n\n set_dim = self.data_array.append_set_dimension()\n set_dim.labels = [\"label_a\", \"label_b\"]\n sampled_dim = self.data_array.append_sampled_dimension(interval)\n sampled_dim.unit = unit\n range_dim = self.data_array.append_range_dimension(ticks)\n range_dim.unit = unit\n\n event_positions = np.zeros((2, 3))\n event_positions[0, 0] = 0.0\n event_positions[0, 1] = 3.0\n event_positions[0, 2] = 3.4\n\n event_positions[1, 0] = 0.0\n event_positions[1, 1] = 8.0\n event_positions[1, 2] = 2.3\n\n event_extents = np.zeros((2, 3))\n event_extents[0, 0] = 1.0\n event_extents[0, 1] = 6.0\n event_extents[0, 2] = 2.3\n\n event_extents[1, 0] = 1.0\n event_extents[1, 1] = 3.0\n event_extents[1, 2] = 2.0\n\n event_labels = [\"event 1\", \"event 2\"]\n dim_labels = [\"dim 0\", \"dim 1\", \"dim 2\"]\n\n self.event_array = self.block.create_data_array(\"positions\", \"test\",\n data=event_positions)\n\n self.extent_array = self.block.create_data_array(\"extents\", \"test\",\n data=event_extents)\n extent_set_dim = self.extent_array.append_set_dimension()\n extent_set_dim.labels = event_labels\n extent_set_dim = self.extent_array.append_set_dimension()\n extent_set_dim.labels = dim_labels\n\n self.feature_tag = self.block.create_multi_tag(\"feature_tag\", \"events\",\n self.event_array)\n self.feature_tag.extents = self.extent_array\n self.feature_tag.references.append(self.data_array)\n\n def tearDown(self):\n del self.file.blocks[self.block.id]\n self.file.close()\n self.tmpdir.cleanup()\n\n def test_multi_tag_new_constructor(self):\n pos = np.random.random_sample((2, 3))\n ext = np.random.random_sample((2, 3))\n mt = self.block.create_multi_tag(\"conv_test\", \"test\", pos, ext)\n np.testing.assert_almost_equal(pos, mt.positions[:])\n np.testing.assert_almost_equal(ext, mt.extents[:])\n # try reset positions and ext\n assert mt.positions.name == \"conv_test-positions\"\n assert mt.positions.type == \"test-positions\"\n assert mt.extents.name == \"conv_test-extents\"\n assert mt.extents.type == \"test-extents\"\n # test positions extents deleted if multitag creation failed\n pos = None\n ext = np.random.random_sample((2, 3))\n self.assertRaises(ValueError, self.block.create_multi_tag,\n \"err_test\", \"test\", pos, ext)\n self.block.create_data_array(\"dup_test-\"\n \"positions\", \"test\", data=[0])\n pos = np.random.random_sample((2, 3))\n ext = np.random.random_sample((2, 3))\n self.assertRaises(DuplicateName, self.block.create_multi_tag,\n \"dup_test\", \"test\", pos, ext)\n del self.block.data_arrays[\"dup_test-positions\"]\n self.block.create_data_array(\"dup_test2-\"\n \"extents\", \"test\", data=[0])\n pos = np.random.random_sample((2, 3))\n ext = np.random.random_sample((2, 3))\n self.assertRaises(DuplicateName, self.block.create_multi_tag,\n \"dup_test2\", \"test\", pos, ext)\n pos = np.random.random_sample((2, 3))\n ext = [None, None]\n self.assertRaises(TypeError, self.block.create_multi_tag,\n \"dup_test3\", \"test\", pos, ext)\n\n def test_multi_tag_flex(self):\n pos1d = self.block.create_data_array(\"pos1\", \"pos\", data=[[0], [1]])\n pos1d1d = self.block.create_data_array(\"pos1d1d\", \"pos\", data=[0, 1])\n pos2d = self.block.create_data_array(\"pos2\", \"pos\", data=[[0, 0], [1, 1]])\n pos3d = self.block.create_data_array(\"pos3\", \"pos\", data=[[0, 1, 2], [1, 2, 3]])\n ext1d = self.block.create_data_array('ext1', 'ext', data=[[1], [1]])\n ext1d1d = self.block.create_data_array('ext1d1d', 'ext', data=[1, 1])\n ext2d = self.block.create_data_array('ext2', 'ext', data=[[1, 2], [0, 2]])\n ext3d = self.block.create_data_array('ext3', 'ext', data=[[1, 1, 1], [1, 1, 1]])\n mt1d = self.block.create_multi_tag(\"mt1d\", \"mt\", pos1d)\n mt1d.extents = ext1d\n mt1d1d = self.block.create_multi_tag(\"mt1d1d\", \"mt\", pos1d1d)\n mt1d1d.extents = ext1d1d\n mt2d = self.block.create_multi_tag(\"mt2d\", \"mt\", pos2d)\n mt2d.extents = ext2d\n mt3d = self.block.create_multi_tag(\"mt3d\", \"mt\", pos3d)\n mt3d.extents = ext3d\n # create some references\n da1d = self.block.create_data_array('ref1d', 'ref', data=np.arange(10))\n da1d.append_sampled_dimension(1., label=\"time\", unit=\"s\")\n da2d = self.block.create_data_array('ref2d', 'ref', data=np.arange(100).reshape((10, 10)))\n da2d.append_sampled_dimension(1., label=\"time\", unit=\"s\")\n da2d.append_set_dimension()\n da3d = self.block.create_data_array('ref3d', 'ref', data=np.arange(1000).reshape((10, 10, 10)))\n da3d.append_sampled_dimension(1., label=\"time\", unit=\"s\")\n da3d.append_set_dimension()\n da3d.append_set_dimension()\n mt1d.references.extend([da1d, da2d, da3d])\n mt1d1d.references.extend([da1d, da2d, da3d])\n mt2d.references.extend([da1d, da2d, da3d])\n mt3d.references.extend([da1d, da2d, da3d])\n np.testing.assert_almost_equal(mt1d.tagged_data(0, 0)[:], da1d[0:1])\n np.testing.assert_almost_equal(mt1d.tagged_data(0, 1)[:], da2d[0:1, :])\n np.testing.assert_almost_equal(mt1d.tagged_data(0, 2)[:], da3d[0:1, :, :])\n np.testing.assert_almost_equal(mt1d1d.tagged_data(0, 0)[:], da1d[0:1])\n np.testing.assert_almost_equal(mt1d1d.tagged_data(0, 1)[:], da2d[0:1, :])\n np.testing.assert_almost_equal(mt1d1d.tagged_data(0, 2)[:], da3d[0:1, :, :])\n np.testing.assert_almost_equal(mt2d.tagged_data(0, 0)[:], da1d[0:1])\n np.testing.assert_almost_equal(mt2d.tagged_data(0, 1)[:], da2d[0:1, 0:2])\n np.testing.assert_almost_equal(mt2d.tagged_data(0, 2)[:], da3d[0:1, 0:2, :])\n np.testing.assert_almost_equal(mt3d.tagged_data(1, 0)[:], da1d[1:2])\n np.testing.assert_almost_equal(mt3d.tagged_data(1, 1)[:], da2d[1:2, 2:3])\n np.testing.assert_almost_equal(mt3d.tagged_data(1, 2)[:], da3d[1:2, 2:3, 3:4])\n\n def test_multi_tag_eq(self):\n assert self.my_tag == self.my_tag\n assert not self.my_tag == self.your_tag\n assert self.my_tag is not None\n\n def test_multi_tag_id(self):\n assert self.my_tag.id is not None\n\n def test_multi_tag_name(self):\n assert self.my_tag.name is not None\n\n def test_multi_tag_type(self):\n def set_none():\n self.my_tag.type = None\n\n assert self.my_tag.type is not None\n self.assertRaises(Exception, set_none)\n\n self.my_tag.type = \"foo type\"\n assert self.my_tag.type == \"foo type\"\n\n def test_multi_tag_definition(self):\n assert self.my_tag.definition is None\n\n self.my_tag.definition = \"definition\"\n assert self.my_tag.definition == \"definition\"\n\n self.my_tag.definition = None\n assert self.my_tag.definition is None\n\n def test_multi_tag_timestamps(self):\n created_at = self.my_tag.created_at\n assert created_at > 0\n\n updated_at = self.my_tag.updated_at\n assert updated_at > 0\n\n self.my_tag.force_created_at(1403530068)\n assert self.my_tag.created_at == 1403530068\n\n def test_multi_tag_units(self):\n assert self.my_tag.units == ()\n\n self.my_tag.units = [\"mV\", \"ms\"]\n assert self.my_tag.units == (\"mV\", \"ms\")\n\n self.my_tag.units = [] # () also works!\n assert self.my_tag.units == ()\n\n def test_multi_tag_positions(self):\n def set_none():\n self.my_tag.positions = None\n\n assert self.my_tag.positions is not None\n old_positions = self.my_tag.positions\n\n new_positions = self.block.create_data_array(\"pos\", \"position\",\n nix.DataType.Int16,\n (0, 0))\n self.my_tag.positions = new_positions\n assert self.my_tag.positions == new_positions\n\n self.assertRaises(TypeError, set_none)\n\n self.my_tag.positions = old_positions\n assert self.my_tag.positions == old_positions\n\n def test_multi_tag_extents(self):\n assert self.my_tag.extents is None\n\n new_extents = self.block.create_data_array(\"ext\", \"extent\",\n nix.DataType.Int16, (0, 0))\n self.my_tag.extents = new_extents\n assert self.my_tag.extents == new_extents\n\n self.my_tag.extents = None\n assert self.my_tag.extents is None\n\n def test_multi_tag_references(self):\n assert len(self.my_tag.references) == 0\n\n self.assertRaises(TypeError, self.my_tag.references.append, 100)\n\n reference1 = self.block.create_data_array(\"reference1\", \"stimuli\",\n nix.DataType.Int16, (0,))\n reference2 = self.block.create_data_array(\"reference2\", \"stimuli\",\n nix.DataType.Int16, (0,))\n\n self.my_tag.references.append(reference1)\n self.my_tag.references.append(reference2)\n\n assert len(self.my_tag.references) == 2\n assert reference1 in self.my_tag.references\n assert reference2 in self.my_tag.references\n\n # id and name access\n assert reference1 == self.my_tag.references[reference1.name]\n assert reference1 == self.my_tag.references[reference1.id]\n assert reference2 == self.my_tag.references[reference2.name]\n assert reference2 == self.my_tag.references[reference2.id]\n\n assert reference1.name in self.my_tag.references\n assert reference2.name in self.my_tag.references\n assert reference1.id in self.my_tag.references\n assert reference2.id in self.my_tag.references\n\n del self.my_tag.references[reference2]\n assert self.my_tag.references[0] == reference1\n\n del self.my_tag.references[reference1]\n assert len(self.my_tag.references) == 0\n\n def test_multi_tag_features(self):\n assert len(self.my_tag.features) == 0\n\n data_array = self.block.create_data_array(\"feature\", \"stimuli\",\n nix.DataType.Int16, (0,))\n feature = self.my_tag.create_feature(data_array,\n nix.LinkType.Untagged)\n assert len(self.my_tag.features) == 1\n\n assert feature in self.my_tag.features\n assert feature.id in self.my_tag.features\n assert \"notexist\" not in self.my_tag.features\n\n assert feature.id == self.my_tag.features[0].id\n assert feature.id == self.my_tag.features[-1].id\n\n # id and name access\n assert feature.id == self.my_tag.features[feature.id].id\n assert feature.id == self.my_tag.features[data_array.id].id\n assert feature.id == self.my_tag.features[data_array.name].id\n assert data_array == self.my_tag.features[data_array.id].data\n assert data_array == self.my_tag.features[data_array.name].data\n\n assert data_array.id in self.my_tag.features\n assert data_array.name in self.my_tag.features\n\n data_frame = self.block.create_data_frame(\n \"dataframe feature\", \"test\",\n col_dict=OrderedDict([(\"number\", nix.DataType.Float)]),\n data=[(10.,)]\n )\n df_feature = self.my_tag.create_feature(data_frame, nix.LinkType.Untagged)\n\n assert len(self.my_tag.features) == 2\n\n assert df_feature in self.my_tag.features\n assert df_feature.id in self.my_tag.features\n\n assert df_feature.id == self.my_tag.features[1].id\n assert df_feature.id == self.my_tag.features[-1].id\n\n # id and name access\n assert df_feature.id == self.my_tag.features[df_feature.id].id\n assert df_feature.id == self.my_tag.features[data_frame.id].id\n assert df_feature.id == self.my_tag.features[data_frame.name].id\n assert data_frame == self.my_tag.features[data_frame.id].data\n assert data_frame == self.my_tag.features[data_frame.name].data\n\n assert data_frame.id in self.my_tag.features\n assert data_frame.name in self.my_tag.features\n\n assert isinstance(self.my_tag.features[0].data, nix.DataArray)\n assert isinstance(self.my_tag.features[1].data, nix.DataFrame)\n\n del self.my_tag.features[0]\n assert len(self.my_tag.features) == 1\n del self.my_tag.features[0]\n assert len(self.my_tag.features) == 0\n\n def test_multi_tag_tagged_data(self):\n sample_iv = 0.001\n x_data = np.arange(0, 10, sample_iv)\n y_data = np.sin(2 * np.pi * x_data)\n\n block = self.block\n da = block.create_data_array(\"sin\", \"data\", data=y_data)\n da.unit = 'dB'\n dim = da.append_sampled_dimension(sample_iv)\n dim.unit = 's'\n\n pos = block.create_data_array('pos1', 'positions', data=np.array([0.]).reshape(1, 1))\n pos.append_set_dimension()\n pos.append_set_dimension()\n pos.unit = 'ms'\n ext = block.create_data_array('ext1', 'extents', data=np.array([2000.]).reshape(1, 1))\n ext.append_set_dimension()\n ext.append_set_dimension()\n ext.unit = 'ms'\n\n mtag = block.create_multi_tag(\"sin1\", \"tag\", pos)\n mtag.extents = ext\n mtag.units = ['ms']\n mtag.references.append(da)\n\n assert mtag.tagged_data(0, 0).shape == (2000,)\n assert np.array_equal(y_data[:2000], mtag.tagged_data(0, 0)[:])\n assert mtag.tagged_data(0, 0, stop_rule=nix.SliceMode.Inclusive).shape == (2001,)\n assert np.array_equal(y_data[:2001], mtag.tagged_data(0, 0, stop_rule=nix.SliceMode.Inclusive)[:])\n\n # get by name\n data = mtag.tagged_data(0, da.name)\n assert data.shape == (2000,)\n assert np.array_equal(y_data[:2000], data[:])\n\n # get by id\n data = mtag.tagged_data(0, da.id)\n assert data.shape == (2000,)\n assert np.array_equal(y_data[:2000], data[:])\n\n # multi dimensional data\n # position 1 should fail since the position in the third dimension does not point to a valid point\n # positon 2 and 3 should deliver valid DataViews\n # same for segment 0 should again return an invalid DataView because of dimension 3\n sample_iv = 1.0\n ticks = [1.2, 2.3, 3.4, 4.5, 6.7]\n unit = \"ms\"\n pos = self.block.create_data_array(\"pos\", \"test\", data=[[1, 1, 1], [1, 1, 1.2], [1, 1, 1.2]])\n pos.append_set_dimension()\n pos.append_set_dimension()\n ext = self.block.create_data_array(\"ext\", \"test\", data=[[1, 5, 2], [1, 5, 2], [0, 4, 1]])\n ext.append_set_dimension()\n ext.append_set_dimension()\n units = [\"none\", \"ms\", \"ms\"]\n data = np.random.random_sample((3, 10, 5))\n da = self.block.create_data_array(\"dimtest\", \"test\", data=data)\n setdim = da.append_set_dimension()\n setdim.labels = [\"Label A\", \"Label B\", \"Label D\"]\n samdim = da.append_sampled_dimension(sample_iv)\n samdim.unit = unit\n randim = da.append_range_dimension(ticks)\n randim.unit = unit\n\n postag = self.block.create_multi_tag(\"postag\", \"event\", pos)\n postag.references.append(da)\n postag.units = units\n\n segtag = self.block.create_multi_tag(\"region\", \"segment\", pos)\n segtag.references.append(da)\n segtag.extents = ext\n segtag.units = units\n\n posdata = postag.tagged_data(0, 0)\n assert not posdata.valid\n assert \"InvalidSlice error\" in posdata.debug_message\n assert posdata.data_extent is None\n assert posdata.shape is None\n with self.assertRaises(InvalidSlice):\n posdata._write_data(np.random.randn(1))\n assert sum(posdata[:].shape) == 0\n\n posdata = postag.tagged_data(1, 0)\n assert posdata.valid\n assert posdata.debug_message == \"\"\n assert len(posdata.shape) == 3\n assert posdata.shape == (1, 1, 1)\n assert np.isclose(posdata[0, 0, 0], data[1, 1, 0])\n\n posdata = postag.tagged_data(2, 0)\n assert len(posdata.shape) == 3\n assert posdata.shape == (1, 1, 1)\n assert np.isclose(posdata[0, 0, 0], data[1, 1, 0])\n\n segdata = segtag.tagged_data(1, 0)\n assert len(segdata.shape) == 3\n assert segdata.shape == (1, 5, 2)\n\n segdata = segtag.tagged_data(2, 0)\n assert len(segdata.shape) == 3\n assert segdata.shape == (1, 4, 1)\n\n # retrieve all positions for all references\n for ridx, _ in enumerate(mtag.references):\n for pidx, _ in enumerate(mtag.positions):\n mtag.tagged_data(pidx, ridx)\n\n wrong_pos = self.block.create_data_array(\"incorpos\", \"test\", data=[[1, 1, 1], [100, 1, 1]])\n wrong_pos.append_set_dimension()\n wrong_pos.append_set_dimension()\n postag.positions = wrong_pos\n self.assertRaises(IndexError, postag.tagged_data, 1, 1)\n wrong_ext = self.block.create_data_array(\"incorext\", \"test\", data=[[1, 500, 2], [0, 4, 1]])\n wrong_ext.append_set_dimension()\n wrong_ext.append_set_dimension()\n segtag.extents = wrong_ext\n self.assertRaises(IndexError, segtag.tagged_data, 0, 1)\n\n def test_multi_tag_data_coefficients(self):\n sample_iv = 0.001\n x_data = np.arange(0, 10, sample_iv)\n y_data = np.sin(2 * np.pi * x_data)\n\n block = self.block\n da = block.create_data_array(\"sin\", \"data\", data=y_data)\n da.unit = 'V'\n da.polynom_coefficients = (10, 0.3)\n dim = da.append_sampled_dimension(sample_iv)\n dim.unit = 's'\n\n pos = block.create_data_array('pos1', 'positions', data=np.array([0.]).reshape(1, 1))\n pos.append_set_dimension()\n pos.append_set_dimension()\n pos.unit = 'ms'\n ext = block.create_data_array('ext1', 'extents', data=np.array([2000.]).reshape(1, 1))\n ext.append_set_dimension()\n ext.append_set_dimension()\n ext.unit = 'ms'\n\n mtag = block.create_multi_tag(\"sin1\", \"tag\", pos)\n mtag.extents = ext\n mtag.units = ['ms']\n mtag.references.append(da)\n\n assert np.array_equal(da[:2000], mtag.tagged_data(0, 0)[:])\n\n da.expansion_origin = 0.89\n assert np.array_equal(da[:2000], mtag.tagged_data(0, 0)[:])\n\n def test_multi_tag_tagged_data_1d(self):\n # MultiTags to vectors behave a bit differently\n # Testing separately\n oneddata = self.block.create_data_array(\"1dda\", \"data\",\n data=list(range(100)))\n oneddata.append_sampled_dimension(0.1)\n onedpos = self.block.create_data_array(\"1dpos\", \"positions\",\n data=[1, 9, 9.5])\n onedmtag = self.block.create_multi_tag(\"2dmt\", \"mtag\",\n positions=onedpos)\n onedmtag.references.append(oneddata)\n for pidx, _ in enumerate(onedmtag.positions):\n onedmtag.tagged_data(pidx, 0)\n\n def test_multi_tag_feature_data(self):\n index_data = self.block.create_data_array(\"indexed feature data\", \"test\",\n dtype=nix.DataType.Double, shape=(10, 10))\n dim1 = index_data.append_sampled_dimension(1.0)\n dim1.unit = \"ms\"\n dim2 = index_data.append_sampled_dimension(1.0)\n dim2.unit = \"ms\"\n\n data1 = np.zeros((10, 10))\n value = 0.0\n total = 0.0\n for i in range(10):\n value = 100 * i\n for j in range(10):\n value += 1\n data1[i, j] = value\n total += data1[i, j]\n\n index_data[:, :] = data1\n\n tagged_data = self.block.create_data_array(\"tagged feature data\", \"test\",\n dtype=nix.DataType.Double, shape=(10, 20, 10))\n dim1 = tagged_data.append_sampled_dimension(1.0)\n dim1.unit = \"ms\"\n dim2 = tagged_data.append_sampled_dimension(1.0)\n dim2.unit = \"ms\"\n dim3 = tagged_data.append_sampled_dimension(1.0)\n dim3.unit = \"ms\"\n\n data2 = np.zeros((10, 20, 10))\n for i in range(10):\n value = 100 * i\n for j in range(20):\n for k in range(10):\n value += 1\n data2[i, j, k] = value\n\n tagged_data[:, :, :] = data2\n\n self.feature_tag.create_feature(index_data, nix.LinkType.Indexed)\n self.feature_tag.create_feature(tagged_data, nix.LinkType.Tagged)\n self.feature_tag.create_feature(index_data, nix.LinkType.Untagged)\n\n # preparations done, actually test\n assert len(self.feature_tag.features) == 3\n\n # indexed feature\n feat_data = self.feature_tag.feature_data(0, 0)\n assert len(feat_data.shape) == 2\n assert feat_data.size == 10\n assert np.sum(feat_data) == 55\n\n # disabled, don't understand how it could ever have worked,\n # there are only 3 positions\n data_view = self.feature_tag.feature_data(9, 0)\n assert np.sum(data_view[:, :]) == 9055\n\n # untagged feature\n data_view = self.feature_tag.feature_data(0, 2)\n assert data_view.size == 100\n\n data_view = self.feature_tag.feature_data(0, 2)\n assert data_view.size == 100\n assert np.sum(data_view) == total\n\n # tagged feature\n data_view = self.feature_tag.feature_data(0, 1)\n assert len(data_view.shape) == 3\n\n data_view = self.feature_tag.feature_data(1, 1)\n assert len(data_view.shape) == 3\n\n # === retrieve by name ===\n # indexed feature\n feat_data = self.feature_tag.feature_data(0, index_data.name)\n assert len(feat_data.shape) == 2\n assert feat_data.size == 10\n assert np.sum(feat_data) == 55\n\n # disabled, there are only 3 positions\n data_view = self.feature_tag.feature_data(9, index_data.name)\n assert np.sum(data_view[:, :]) == 9055\n\n # tagged feature\n data_view = self.feature_tag.feature_data(0, tagged_data.name)\n assert len(data_view.shape) == 3\n\n data_view = self.feature_tag.feature_data(1, tagged_data.name)\n assert len(data_view.shape) == 3\n\n def out_of_bounds():\n self.feature_tag.feature_data(2, 1)\n\n self.assertRaises(IndexError, out_of_bounds)\n\n def test_timestamp_autoupdate(self):\n pos = self.block.create_data_array(\"positions.time\", \"test.time\",\n nix.DataType.Int16, (0, 0))\n mtag = self.block.create_multi_tag(\"mtag.time\", \"test.time\", pos)\n\n mtagtime = mtag.updated_at\n time.sleep(1) # wait for time to change\n mtag.positions = self.block.create_data_array(\"pos2.time\",\n \"test.time\",\n nix.DataType.Int8, (0,))\n self.assertNotEqual(mtag.updated_at, mtagtime)\n\n mtagtime = mtag.updated_at\n time.sleep(1) # wait for time to change\n mtag.extents = self.block.create_data_array(\"extents.time\",\n \"test.time\",\n nix.DataType.Int8, (0,))\n self.assertNotEqual(mtag.updated_at, mtagtime)\n\n def test_timestamp_noautoupdate(self):\n self.file.auto_update_timestamps = False\n pos = self.block.create_data_array(\"positions.time\", \"test.time\",\n nix.DataType.Int16, (0, 0))\n mtag = self.block.create_multi_tag(\"mtag.time\", \"test.time\", pos)\n\n mtagtime = mtag.updated_at\n time.sleep(1) # wait for time to change\n mtag.positions = self.block.create_data_array(\"pos2.time\",\n \"test.time\",\n nix.DataType.Int8, (0,))\n self.assertEqual(mtag.updated_at, mtagtime)\n\n mtagtime = mtag.updated_at\n time.sleep(1) # wait for time to change\n mtag.extents = self.block.create_data_array(\"extents.time\",\n \"test.time\",\n nix.DataType.Int8, (0,))\n self.assertEqual(mtag.updated_at, mtagtime)\n\n def test_multi_tag_feature_dataframe(self):\n numberdata = np.random.random(20)\n number_feat = self.block.create_data_frame(\n \"number feature\", \"test\",\n col_dict=OrderedDict([(\"number\", nix.DataType.Float)]),\n data=[(n,) for n in numberdata]\n )\n column_descriptions = OrderedDict([(\"name\", nix.DataType.String),\n (\"duration\", nix.DataType.Double)])\n values = [(\"One\", 0.1), (\"Two\", 0.2), (\"Three\", 0.3), (\"Four\", 0.4),\n (\"Five\", 0.5), (\"Six\", 0.6), (\"Seven\", 0.7), (\"Eight\", 0.8),\n (\"Nine\", 0.9), (\"Ten\", 1.0)]\n ramp_feat = self.block.create_data_frame(\"ramp feature\", \"test\",\n col_dict=column_descriptions,\n data=values)\n ramp_feat.label = \"voltage\"\n ramp_feat.units = (None, \"s\")\n\n pos_tag = self.block.create_multi_tag(\"feature test\", \"test\", [4, 7, 8])\n\n with self.assertRaises(UnsupportedLinkType):\n pos_tag.create_feature(number_feat, nix.LinkType.Tagged)\n pos_tag.create_feature(number_feat, nix.LinkType.Untagged)\n pos_tag.create_feature(number_feat, nix.LinkType.Indexed)\n with self.assertRaises(UnsupportedLinkType):\n pos_tag.create_feature(ramp_feat, nix.LinkType.Tagged)\n pos_tag.create_feature(ramp_feat, nix.LinkType.Untagged)\n pos_tag.create_feature(ramp_feat, nix.LinkType.Indexed)\n assert len(pos_tag.features) == 4\n\n for idx, _ in enumerate(pos_tag.positions):\n data1 = pos_tag.feature_data(idx, 0)\n data2 = pos_tag.feature_data(idx, 1)\n data3 = pos_tag.feature_data(idx, 2)\n data4 = pos_tag.feature_data(idx, 3)\n\n # check expected data\n assert np.all(data1[:] == number_feat[:])\n assert np.all(data2[:] == number_feat[idx])\n assert np.all(data3[:] == ramp_feat[:])\n assert np.all(data4[:] == ramp_feat[idx])\n\n # add extents (should have no effect)\n extents = self.block.create_data_array(\"feature test.extents\", \"test\",\n data=[2, 2, 5])\n pos_tag.extents = extents\n for idx, _ in enumerate(pos_tag.positions):\n data1 = pos_tag.feature_data(idx, 0)\n data2 = pos_tag.feature_data(idx, 1)\n data3 = pos_tag.feature_data(idx, 2)\n data4 = pos_tag.feature_data(idx, 3)\n\n # check expected data\n assert np.all(data1[:] == number_feat[:])\n assert np.all(data2[:] == number_feat[idx])\n assert np.all(data3[:] == ramp_feat[:])\n assert np.all(data4[:] == ramp_feat[idx])\n\n def test_multi_tag_tagged_data_slice_mode(self):\n data = np.random.random_sample((3, 100, 10))\n da = self.block.create_data_array(\"signals\", \"test.signals\", data=data)\n da.unit = \"mV\"\n da.append_set_dimension(labels=[\"A\", \"B\", \"C\"])\n sample_iv = 0.001\n timedim = da.append_sampled_dimension(sampling_interval=sample_iv)\n timedim.unit = \"s\"\n posdim = da.append_range_dimension([1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9])\n posdim.unit = \"mm\"\n\n # exact_tag has a pos+ext that is exactly equal to a dimension tick\n exact_tag = self.block.create_multi_tag(\"tickpoint\", \"test.tag\",\n positions=[(0, 0.03, 0.0011), (1, 0.05, 0.0015)],\n extents=[(1, 0.02, 0.0005), (1, 0.04, 0.0003)])\n exact_tag.units = [\"none\", \"s\", \"m\"]\n\n exact_tag.references.append(da)\n\n # FIRST TAG\n # dim2: [0.001, 0.002, ..., 0.03, 0.031, ..., 0.049, 0.05, 0.051, ...]\n # ^ pos [30] ^ pos+ext [50]\n # Inclusive mode includes index 50, exclusive does not\n #\n # dim3: [1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9]\n # ^ pos [1] ^ pos+ext [6]\n # Inclusive mode includes index 6, exclusive does not\n\n slice_default = exact_tag.tagged_data(0, 0)\n assert slice_default.shape == (1, 20, 5)\n np.testing.assert_array_equal(slice_default, da[0:1, 30:50, 1:6]) # default exclusive\n\n slice_inclusive = exact_tag.tagged_data(0, 0, stop_rule=nix.SliceMode.Inclusive)\n assert slice_inclusive.shape == (2, 21, 6)\n np.testing.assert_array_equal(slice_inclusive, da[0:2, 30:51, 1:7])\n\n slice_exclusive = exact_tag.tagged_data(0, 0, stop_rule=nix.SliceMode.Exclusive)\n assert slice_exclusive.shape == (1, 20, 5)\n np.testing.assert_array_equal(slice_exclusive, da[0:1, 30:50, 1:6])\n\n # SECOND TAG\n # dim2: [0.001, 0.002, ..., 0.05, 0.051, ..., 0.089, 0.09, 0.091, ...]\n # ^ pos [50] ^ pos+ext [90]\n # Inclusive mode includes index 90, exclusive does not\n #\n # dim3: [1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9]\n # ^ pos [5] ^ pos+ext [8]\n # Inclusive mode includes index 8, exclusive does not\n\n slice_default = exact_tag.tagged_data(1, 0)\n assert slice_default.shape == (1, 40, 3)\n np.testing.assert_array_equal(slice_default, da[1:2, 50:90, 5:8]) # default exclusive\n\n slice_inclusive = exact_tag.tagged_data(1, 0, stop_rule=nix.SliceMode.Inclusive)\n assert slice_inclusive.shape == (2, 41, 4)\n np.testing.assert_array_equal(slice_inclusive, da[1:3, 50:91, 5:9])\n\n slice_exclusive = exact_tag.tagged_data(1, 0, stop_rule=nix.SliceMode.Exclusive)\n assert slice_exclusive.shape == (1, 40, 3)\n np.testing.assert_array_equal(slice_exclusive, da[1:2, 50:90, 5:8])\n\n # midpoint_tag has a pos+ext that falls between dimension ticks\n midpoint_tag = self.block.create_multi_tag(\"midpoint\", \"test.tag\",\n positions=([0, 0.03, 0.0011], [1, 0.05, 0.0015]),\n extents=([1, 0.0301, 0.00051], [1, 0.0401, 0.00031])) # .1 offset\n midpoint_tag.units = [\"none\", \"s\", \"m\"]\n\n # FIRST TAG\n # dim2: [0.001, 0.002, ..., 0.03, 0.031, ..., 0.059, 0.06,| 0.061, ...]\n # ^ pos [30] ^ pos+ext [60] + 0.1\n # Both inclusive and exclusive include index 60\n #\n # dim3: [1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6,| 1.7, 1.8, 1.9]\n # ^ pos [1] ^ pos+ext [6] + 0.1\n # Both inclusive and exclusive include index 6\n\n midpoint_tag.references.append(da)\n\n # all slicing is inclusive since the pos+ext points are between ticks\n slice_default = midpoint_tag.tagged_data(0, 0)\n assert slice_default.shape == (1, 31, 6)\n np.testing.assert_array_equal(slice_default, da[0:1, 30:61, 1:7])\n\n slice_inclusive = midpoint_tag.tagged_data(0, 0, stop_rule=nix.SliceMode.Inclusive)\n assert slice_inclusive.shape == (2, 31, 6)\n np.testing.assert_array_equal(slice_inclusive, da[0:2, 30:61, 1:7])\n\n slice_exclusive = midpoint_tag.tagged_data(0, 0, stop_rule=nix.SliceMode.Exclusive)\n assert slice_exclusive.shape == (1, 31, 6)\n np.testing.assert_array_equal(slice_exclusive, da[0:1, 30:61, 1:7])\n\n # SECOND TAG\n # dim2: [0.001, 0.002, ..., 0.05, 0.051, ..., 0.089, 0.09,| 0.091, ...]\n # ^ pos [50] ^ pos+ext [90] + 0.1\n # Both inclusive and exclusive include index 90\n #\n # dim3: [1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8,| 1.9]\n # ^ pos [5] ^ pos+ext [8] + 0.1\n # Both inclusive and exclusive include index 8\n\n midpoint_tag.references.append(da)\n\n # all slicing is inclusive since the pos+ext points are between ticks\n slice_default = midpoint_tag.tagged_data(1, 0)\n assert slice_default.shape == (1, 41, 4)\n np.testing.assert_array_equal(slice_default, da[1:2, 50:91, 5:9])\n\n slice_inclusive = midpoint_tag.tagged_data(1, 0, stop_rule=nix.SliceMode.Inclusive)\n assert slice_inclusive.shape == (2, 41, 4)\n np.testing.assert_array_equal(slice_inclusive, da[1:3, 50:91, 5:9])\n\n slice_exclusive = midpoint_tag.tagged_data(1, 0, stop_rule=nix.SliceMode.Exclusive)\n assert slice_exclusive.shape == (1, 41, 4)\n np.testing.assert_array_equal(slice_exclusive, da[1:2, 50:91, 5:9])\n\n def test_tagged_set_dim(self):\n \"\"\"\n Simple test where the slice can be calculated directly from the position and extent and compared to the original\n data.\n Set dimension slicing.\n \"\"\"\n nsignals = 10\n data = np.random.random_sample((nsignals, 100))\n da = self.block.create_data_array(\"data\", \"data\", data=data)\n da.append_set_dimension()\n da.append_sampled_dimension(sampling_interval=1).unit = \"s\"\n\n posarray = self.block.create_data_array(\"mtag.positions\", \"test.positions\", dtype=float, shape=(1,))\n extarray = self.block.create_data_array(\"mtag.extents\", \"test.extents\", dtype=float, shape=(1,))\n mtag = self.block.create_multi_tag(\"mtag\", \"simple\", positions=posarray)\n mtag.extents = extarray\n\n mtag.references.append(da)\n\n for pos in range(nsignals):\n for ext in range(2, nsignals-pos):\n mtag.positions[:] = [pos]\n mtag.extents[:] = [ext]\n np.testing.assert_array_almost_equal(mtag.tagged_data(0, 0), da[pos:pos+ext])\n np.testing.assert_array_almost_equal(mtag.tagged_data(0, 0, nix.SliceMode.Exclusive), da[pos:pos+ext])\n np.testing.assert_array_almost_equal(mtag.tagged_data(0, 0, nix.SliceMode.Inclusive), da[pos:pos+ext+1])\n\n # +0.1 should round up (ceil) the start position\n # +0.1 * 2 should round down (floor) the stop position and works the same for both inclusive and\n # exclusive\n mtag.positions[:] = [pos+0.1]\n mtag.extents[:] = [ext+0.1]\n start = pos+1\n stop = pos+ext+1\n np.testing.assert_array_almost_equal(mtag.tagged_data(0, 0), da[start:stop])\n np.testing.assert_array_almost_equal(mtag.tagged_data(0, 0, nix.SliceMode.Exclusive), da[start:stop])\n np.testing.assert_array_almost_equal(mtag.tagged_data(0, 0, nix.SliceMode.Inclusive), da[start:stop])\n\n if pos+ext+2 < len(da):\n # +0.9 should round up (ceil) the start position\n # +0.9 * 2 should round down (floor) the stop position and works the same for both inclusive and\n # exclusive\n mtag.positions[:] = [pos+0.9]\n mtag.extents[:] = [ext+0.9]\n start = pos+1\n stop = pos+ext+2\n np.testing.assert_array_almost_equal(mtag.tagged_data(0, 0), da[start:stop])\n np.testing.assert_array_almost_equal(mtag.tagged_data(0, 0, nix.SliceMode.Exclusive),\n da[start:stop])\n np.testing.assert_array_almost_equal(mtag.tagged_data(0, 0, nix.SliceMode.Inclusive),\n da[start:stop])\n\n def test_tagged_range_dim(self):\n \"\"\"\n Simple test where the slice can be calculated directly from the position and extent and compared to the original\n data.\n Range dimension slicing.\n \"\"\"\n nticks = 10\n data = np.random.random_sample((nticks, 100))\n da = self.block.create_data_array(\"data\", \"data\", data=data)\n da.append_range_dimension(ticks=range(nticks))\n da.append_sampled_dimension(sampling_interval=1).unit = \"s\"\n\n posarray = self.block.create_data_array(\"mtag.positions\", \"test.positions\", dtype=float, shape=(1,))\n extarray = self.block.create_data_array(\"mtag.extents\", \"test.extents\", dtype=float, shape=(1,))\n mtag = self.block.create_multi_tag(\"mtag\", \"simple\", positions=posarray)\n mtag.extents = extarray\n\n mtag.references.append(da)\n\n for pos in range(nticks):\n for ext in range(2, nticks-pos):\n mtag.positions[:] = [pos]\n mtag.extents[:] = [ext]\n np.testing.assert_array_almost_equal(mtag.tagged_data(0, 0), da[pos:pos+ext])\n np.testing.assert_array_almost_equal(mtag.tagged_data(0, 0, nix.SliceMode.Exclusive), da[pos:pos+ext])\n np.testing.assert_array_almost_equal(mtag.tagged_data(0, 0, nix.SliceMode.Inclusive), da[pos:pos+ext+1])\n\n # +0.1 should round up (ceil) the start position\n # +0.1 * 2 should round down (floor) the stop position and works the same for both inclusive and\n # exclusive\n mtag.positions[:] = [pos+0.1]\n mtag.extents[:] = [ext+0.1]\n start = pos+1\n stop = pos+ext+1\n np.testing.assert_array_almost_equal(mtag.tagged_data(0, 0), da[start:stop])\n np.testing.assert_array_almost_equal(mtag.tagged_data(0, 0, nix.SliceMode.Exclusive), da[start:stop])\n np.testing.assert_array_almost_equal(mtag.tagged_data(0, 0, nix.SliceMode.Inclusive), da[start:stop])\n\n if pos+ext+2 < len(da):\n # +0.9 should round up (ceil) the start position\n # +0.9 * 2 should round down (floor) the stop position and works the same for both inclusive and\n # exclusive\n mtag.positions[:] = [pos+0.9]\n mtag.extents[:] = [ext+0.9]\n start = pos+1\n stop = pos+ext+2\n np.testing.assert_array_almost_equal(mtag.tagged_data(0, 0), da[start:stop])\n np.testing.assert_array_almost_equal(mtag.tagged_data(0, 0, nix.SliceMode.Exclusive),\n da[start:stop])\n np.testing.assert_array_almost_equal(mtag.tagged_data(0, 0, nix.SliceMode.Inclusive),\n da[start:stop])\n\n def test_tagged_sampled_dim(self):\n \"\"\"\n Simple test where the slice can be calculated directly from the position and extent and compared to the original\n data.\n Sampled dimension slicing.\n \"\"\"\n nticks = 10\n data = np.random.random_sample((nticks, 100))\n da = self.block.create_data_array(\"data\", \"data\", data=data)\n da.append_sampled_dimension(sampling_interval=1).unit = \"V\"\n da.append_sampled_dimension(sampling_interval=1).unit = \"s\"\n\n posarray = self.block.create_data_array(\"mtag.positions\", \"test.positions\", dtype=float, shape=(1,))\n extarray = self.block.create_data_array(\"mtag.extents\", \"test.extents\", dtype=float, shape=(1,))\n mtag = self.block.create_multi_tag(\"mtag\", \"simple\", positions=posarray)\n mtag.extents = extarray\n mtag.units = [\"V\", \"s\"]\n\n mtag.references.append(da)\n\n for pos in range(nticks):\n for ext in range(2, nticks-pos):\n mtag.positions[:] = [pos]\n mtag.extents[:] = [ext]\n np.testing.assert_array_almost_equal(mtag.tagged_data(0, 0), da[pos:pos+ext])\n np.testing.assert_array_almost_equal(mtag.tagged_data(0, 0, nix.SliceMode.Exclusive), da[pos:pos+ext])\n np.testing.assert_array_almost_equal(mtag.tagged_data(0, 0, nix.SliceMode.Inclusive),\n da[pos:pos+ext+1])\n\n # +0.1 should round up (ceil) the start position\n # +0.1 * 2 should round down (floor) the stop position and works the same for both inclusive and\n # exclusive\n mtag.positions[:] = [pos+0.1]\n mtag.extents[:] = [ext+0.1]\n start = pos+1\n stop = pos+ext+1\n np.testing.assert_array_almost_equal(mtag.tagged_data(0, 0), da[start:stop])\n np.testing.assert_array_almost_equal(mtag.tagged_data(0, 0, nix.SliceMode.Exclusive), da[start:stop])\n np.testing.assert_array_almost_equal(mtag.tagged_data(0, 0, nix.SliceMode.Inclusive), da[start:stop])\n\n if pos+ext+2 < len(da):\n # +0.9 should round up (ceil) the start position\n # +0.9 * 2 should round down (floor) the stop position and works the same for both inclusive and\n # exclusive\n mtag.positions[:] = [pos+0.9]\n mtag.extents[:] = [ext+0.9]\n start = pos+1\n stop = pos+ext+2\n np.testing.assert_array_almost_equal(mtag.tagged_data(0, 0), da[start:stop])\n np.testing.assert_array_almost_equal(mtag.tagged_data(0, 0, nix.SliceMode.Exclusive),\n da[start:stop])\n np.testing.assert_array_almost_equal(mtag.tagged_data(0, 0, nix.SliceMode.Inclusive),\n da[start:stop])\n",
"# -*- coding: utf-8 -*-\n# Copyright © 2014, German Neuroinformatics Node (G-Node)\n#\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted under the terms of the BSD License. See\n# LICENSE file in the root of the Project.\nimport os\nimport unittest\nimport h5py\nimport numpy as np\nimport time\n\nimport nixio as nix\nimport nixio.file as filepy\nfrom nixio.exceptions import InvalidFile\nfrom .tmp import TempDir\n\n\nclass TestFile(unittest.TestCase):\n\n def setUp(self):\n self.tmpdir = TempDir(\"filetest\")\n self.testfilename = os.path.join(self.tmpdir.path, \"filetest.nix\")\n self.file = nix.File.open(self.testfilename, nix.FileMode.Overwrite)\n\n def tearDown(self):\n self.file.close()\n self.tmpdir.cleanup()\n\n def test_file_format(self):\n assert self.file.format == \"nix\"\n assert self.file.version == filepy.HDF_FF_VERSION\n\n def test_file_timestamps(self):\n created_at = self.file.created_at\n assert created_at > 0\n\n updated_at = self.file.updated_at\n assert updated_at > 0\n\n self.file.force_created_at(1403530068)\n assert self.file.created_at == 1403530068\n\n def test_file_blocks(self):\n assert len(self.file.blocks) == 0\n\n block = self.file.create_block(\"test block\", \"recordingsession\")\n\n assert len(self.file.blocks) == 1\n\n assert block in self.file.blocks\n assert block.id in self.file.blocks\n assert \"notexist\" not in self.file.blocks\n\n assert block.id == self.file.blocks[0].id\n assert block.id == self.file.blocks[-1].id\n\n del self.file.blocks[0]\n\n assert len(self.file.blocks) == 0\n\n def test_file_sections(self):\n assert len(self.file.sections) == 0\n\n section = self.file.create_section(\"test section\", \"recordingsession\")\n\n assert len(self.file.sections) == 1\n\n assert section in self.file.sections\n assert section.id in self.file.sections\n assert \"notexist\" not in self.file.sections\n\n assert section.id == self.file.sections[0].id\n assert section.id == self.file.sections[-1].id\n\n del self.file.sections[0]\n\n assert len(self.file.sections) == 0\n\n def test_file_find_sections(self):\n for i in range(2):\n self.file.create_section(\"level1-p0-s\" + str(i), \"dummy\")\n for i in range(2):\n self.file.sections[0].create_section(\"level2-p1-s\" + str(i),\n \"dummy\")\n for i in range(2):\n self.file.sections[1].create_section(\"level2-p2-s\" + str(i),\n \"dummy\")\n for i in range(2):\n self.file.sections[0].sections[0].create_section(\n \"level3-p1-s\" + str(i), \"dummy\"\n )\n\n assert len(self.file.find_sections()) == 8\n assert len(self.file.find_sections(limit=1)) == 2\n assert(len(self.file.find_sections(filtr=lambda x: \"level2-p1-s\" in\n x.name)) == 2)\n assert(len(self.file.find_sections(filtr=lambda x: \"level2-p1-s\" in\n x.name,\n limit=1)) == 0)\n\n def test_order_tracking(self):\n blknames = []\n for idx in range(10):\n name = \"block_\" + str(idx)\n self.file.create_block(name, \"ordertest\")\n blknames.append(name)\n\n danames = []\n datablockname = blknames[0]\n datablock = self.file.blocks[datablockname]\n for idx in range(7):\n name = \"data_\" + str(idx)\n da = datablock.create_data_array(name, \"thedata\",\n data=np.array([0]))\n da.definition = \"da definition\"\n danames.append(name)\n self.file.close()\n\n self.file = nix.File.open(self.testfilename, nix.FileMode.ReadOnly)\n\n for idx in range(len(self.file.blocks)):\n self.assertEqual(blknames[idx], self.file.blocks[idx].name)\n\n datablock = self.file.blocks[datablockname]\n for idx in range(len(datablock.data_arrays)):\n self.assertEqual(danames[idx], datablock.data_arrays[idx].name)\n\n def test_context_open(self):\n fname = os.path.join(self.tmpdir.path, \"contextopen.nix\")\n with nix.File.open(fname, nix.FileMode.Overwrite) as nf:\n nf.create_block(\"blocky\", \"test-block\")\n\n with nix.File.open(fname, nix.FileMode.ReadOnly) as nf:\n self.assertEqual(nf.blocks[0].name, \"blocky\")\n\n def test_copy_on_file(self):\n tar_filename = os.path.join(self.tmpdir.path, \"copytarget.nix\")\n tar_file = nix.File.open(tar_filename, nix.FileMode.Overwrite)\n blk1 = self.file.create_block(\"t111t bk\", \"testcopy\")\n blk2 = tar_file.create_block(\"blk2\", \"blk\")\n blk1.create_data_array(\"da1\", 'grp da1', data=[(1, 2, 3)])\n da2 = blk2.create_data_array(\"da2\", 'grp da2', data=[(4, 5, 6)])\n blk2.create_multi_tag(\"mt2\", \"useless\", da2)\n sec1 = self.file.create_section(\"test sec\", 'test')\n sec1.create_section(\"child sec\", \"child\")\n ori_prop = sec1.create_property(\"prop origin\",\n values_or_dtype=[1, 2, 3])\n tar_file.create_block(copy_from=blk1, keep_copy_id=False)\n copied_sec = tar_file.copy_section(sec1, children=False, keep_id=True)\n assert tar_file.sections[0].name == sec1.name\n assert tar_file.blocks[1].name == blk1.name\n assert tar_file.blocks[1].data_arrays[0].name \\\n == blk1.data_arrays[0].name\n assert tar_file.blocks[1].data_arrays[0].id != blk1.data_arrays[0].id\n assert tar_file.sections[0] == sec1\n assert len(self.file.find_sections()) == 2\n assert len(tar_file.find_sections()) == 1\n assert copied_sec.props[0] == ori_prop # Properties are still there\n assert not copied_sec.sections # children is False\n tar_file.close()\n # test copying on the same file\n self.assertRaises(NameError, self.file.create_block, copy_from=blk1)\n self.file.create_block(name=\"111\", copy_from=blk1)\n assert self.file.blocks[0] == self.file.blocks[1] # ID stays the same\n assert self.file.blocks[0].name != self.file.blocks[1].name\n\n def test_timestamp_autoupdate(self):\n # Using Block to test Entity.definition\n blk = self.file.create_block(\"block\", \"timetest\")\n blktime = blk.updated_at\n time.sleep(1) # wait for time to change\n blk.definition = \"updated\"\n # no update\n self.assertNotEqual(blk.updated_at, blktime)\n\n rblk = self.file.blocks[\"block\"] # read through container\n time.sleep(1) # wait for time to change\n rblk.definition = \"updated again\"\n self.assertNotEqual(rblk.updated_at, blktime)\n\n # Using Block to test Entity.type\n blktime = blk.updated_at\n time.sleep(1) # wait for time to change\n blk.type = \"updated\"\n # no update\n self.assertNotEqual(blk.updated_at, blktime)\n\n rblk = self.file.blocks[\"block\"] # read through container\n time.sleep(1) # wait for time to change\n rblk.type = \"updated again\"\n self.assertNotEqual(rblk.updated_at, blktime)\n\n def test_timestamp_noautoupdate(self):\n # Using Block to test Entity.definition\n blk = self.file.create_block(\"block\", \"timetest\")\n\n # disable timestamp autoupdating\n self.file.auto_update_timestamps = False\n blktime = blk.updated_at\n time.sleep(1) # wait for time to change\n blk.definition = \"update\"\n self.assertEqual(blk.updated_at, blktime)\n\n rblk = self.file.blocks[\"block\"] # read through container\n rblktime = rblk.updated_at\n time.sleep(1) # wait for time to change\n rblk.definition = \"time should change\"\n self.assertEqual(rblk.updated_at, rblktime)\n\n blktime = blk.updated_at\n time.sleep(1) # wait for time to change\n blk.type = \"update\"\n self.assertEqual(blk.updated_at, blktime)\n\n rblk = self.file.blocks[\"block\"] # read through container\n rblktime = rblk.updated_at\n time.sleep(1) # wait for time to change\n rblk.type = \"time should change\"\n self.assertEqual(rblk.updated_at, rblktime)\n\n\nclass TestFileVer(unittest.TestCase):\n\n backend = \"h5py\"\n filever = filepy.HDF_FF_VERSION\n fformat = filepy.FILE_FORMAT\n\n def try_open(self, mode):\n nix_file = nix.File.open(self.testfilename, mode)\n nix_file.close()\n\n def set_header(self, fformat=None, version=None, fileid=None):\n if fformat is None:\n fformat = self.fformat\n if version is None:\n version = self.filever\n if fileid is None:\n fileid = nix.util.create_id()\n self.h5root.attrs[\"format\"] = fformat\n self.h5root.attrs[\"version\"] = version\n self.h5root.attrs[\"id\"] = fileid\n self.h5root.attrs[\"created_at\"] = 0\n self.h5root.attrs[\"updated_at\"] = 0\n if \"data\" not in self.h5root:\n self.h5root.create_group(\"data\")\n self.h5root.create_group(\"metadata\")\n\n def setUp(self):\n self.tmpdir = TempDir(\"vertest\")\n self.testfilename = os.path.join(self.tmpdir.path, \"vertest.nix\")\n self.h5file = h5py.File(self.testfilename, mode=\"w\")\n self.h5root = self.h5file[\"/\"]\n\n def tearDown(self):\n self.h5file.close()\n self.tmpdir.cleanup()\n\n def test_read_write(self):\n self.set_header()\n self.try_open(nix.FileMode.ReadWrite)\n\n def test_read_only(self):\n ver_x, ver_y, ver_z = self.filever\n roversion = (ver_x, ver_y, ver_z+2)\n self.set_header(version=roversion)\n self.try_open(nix.FileMode.ReadOnly)\n with self.assertRaises(RuntimeError):\n self.try_open(nix.FileMode.ReadWrite)\n\n def test_no_open(self):\n ver_x, ver_y, ver_z = self.filever\n noversion = (ver_x, ver_y+3, ver_z+2)\n self.set_header(version=noversion)\n with self.assertRaises(RuntimeError):\n self.try_open(nix.FileMode.ReadWrite)\n with self.assertRaises(RuntimeError):\n self.try_open(nix.FileMode.ReadOnly)\n noversion = (ver_x, ver_y+1, ver_z)\n self.set_header(version=noversion)\n with self.assertRaises(RuntimeError):\n self.try_open(nix.FileMode.ReadWrite)\n with self.assertRaises(RuntimeError):\n self.try_open(nix.FileMode.ReadOnly)\n noversion = (ver_x+1, ver_y, ver_z)\n self.set_header(version=noversion)\n with self.assertRaises(RuntimeError):\n self.try_open(nix.FileMode.ReadWrite)\n with self.assertRaises(RuntimeError):\n self.try_open(nix.FileMode.ReadOnly)\n\n def test_bad_tuple(self):\n self.set_header(version=(-1, -1, -1))\n with self.assertRaises(RuntimeError):\n self.try_open(nix.FileMode.ReadOnly)\n self.set_header(version=(1, 2))\n with self.assertRaises(RuntimeError):\n self.try_open(nix.FileMode.ReadOnly)\n\n def test_bad_format(self):\n self.set_header(fformat=\"NOT_A_NIX_FILE\")\n with self.assertRaises(InvalidFile):\n self.try_open(nix.FileMode.ReadOnly)\n\n def test_bad_id(self):\n self.set_header(fileid=\"\")\n with self.assertRaises(RuntimeError):\n self.try_open(nix.FileMode.ReadOnly)\n\n # empty file ID OK for versions older than 1.2.0\n self.set_header(version=(1, 1, 1), fileid=\"\")\n self.try_open(nix.FileMode.ReadOnly)\n\n self.set_header(version=(1, 1, 0), fileid=\"\")\n self.try_open(nix.FileMode.ReadOnly)\n\n self.set_header(version=(1, 0, 0), fileid=\"\")\n self.try_open(nix.FileMode.ReadOnly)\n",
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"Copyright © 2014 - 2021 German Neuroinformatics Node (G-Node)\n\n All rights reserved.\n\n Redistribution and use in source and binary forms, with or without\n modification, are permitted under the terms of the BSD License. See\n LICENSE file in the root of the Project.\n\n Author: Jan Grewe <[email protected]>\n\n See https://github.com/G-node/nix/wiki for more information.\n\n\"\"\"\n\nimport nixio\nimport lif\nimport numpy as np\nimport scipy.signal as signal\nimport matplotlib.pylab as plt\n\n\ndef fake_neuron(stepsize=0.001, offset=.8, sta_offset=1000):\n stimulus = np.random.randn(102000) * 3.5\n b, a = signal.butter(1, 12.5, fs=1. / stepsize, btype=\"low\")\n stimulus = signal.filtfilt(b, a, stimulus)\n stimulus = stimulus[1000:-1000]\n lif_model = lif.LIF(stepsize=stepsize, offset=offset)\n time, v, spike_times = lif_model.run_stimulus(stimulus)\n snippets = np.zeros((len(spike_times), 2 * sta_offset))\n\n for i, t in enumerate(spike_times):\n index = int(round(t / stepsize))\n if index < sta_offset:\n snip = stimulus[0:index + sta_offset]\n snippets[i, -len(snip):] = snip\n elif (index + sta_offset) > len(stimulus):\n snip = stimulus[index - sta_offset:]\n snippets[i, 0:len(snip)] = snip\n else:\n snippets[i, :] = stimulus[index - sta_offset:index + sta_offset]\n\n return time, v, spike_times, snippets\n\n\ndef plot_data(tag):\n data_array = tag.references[0]\n voltage = np.zeros(data_array.shape)\n data_array.read_direct(voltage)\n\n x_axis = data_array.dimensions[0]\n time = x_axis.axis(data_array.data_extent[0])\n\n spike_times = tag.positions[:]\n\n feature_data_array = tag.features[0].data\n snippets = tag.features[0].data[:]\n\n single_snippet = tag.feature_data(3, 0)[:]\n\n snippet_time_dim = feature_data_array.dimensions[1]\n snippet_time = snippet_time_dim.axis(feature_data_array.data_extent[1])\n\n response_axis = plt.subplot2grid((2, 2), (0, 0), rowspan=1, colspan=2)\n single_snippet_axis = plt.subplot2grid((2, 2), (1, 0), rowspan=1, colspan=1)\n average_snippet_axis = plt.subplot2grid((2, 2), (1, 1), rowspan=1, colspan=1)\n\n response_axis.plot(time, voltage, color='dodgerblue', label=data_array.name)\n response_axis.scatter(spike_times, np.ones(spike_times.shape)*np.max(voltage), color='red', label=tag.name)\n response_axis.set_xlabel(x_axis.label + ((\" [\" + x_axis.unit + \"]\") if x_axis.unit else \"\"))\n response_axis.set_ylabel(data_array.label + ((\" [\" + data_array.unit + \"]\") if data_array.unit else \"\"))\n response_axis.set_title(data_array.name)\n response_axis.set_xlim(0, np.max(time))\n response_axis.set_ylim((1.2 * np.min(voltage), 1.2 * np.max(voltage)))\n response_axis.legend(ncol=2, loc=\"lower center\", fontsize=8)\n\n single_snippet_axis.plot(snippet_time, single_snippet.T, color=\"red\", label=(\"snippet No 4\"))\n single_snippet_axis.set_xlabel(snippet_time_dim.label + ((\" [\" + snippet_time_dim.unit + \"]\") if snippet_time_dim.unit else \"\"))\n single_snippet_axis.set_ylabel(feature_data_array.label + ((\" [\" + feature_data_array.unit + \"]\") if feature_data_array.unit else \"\"))\n single_snippet_axis.set_title(\"single stimulus snippet\")\n single_snippet_axis.set_xlim(np.min(snippet_time), np.max(snippet_time))\n single_snippet_axis.set_ylim((1.2 * np.min(snippets[3,:]), 1.2 * np.max(snippets[3,:])))\n single_snippet_axis.legend()\n\n mean_snippet = np.mean(snippets, axis=0)\n std_snippet = np.std(snippets, axis=0)\n average_snippet_axis.fill_between(snippet_time, mean_snippet + std_snippet, mean_snippet - std_snippet, color=\"tab:red\", alpha=0.25)\n average_snippet_axis.plot(snippet_time, mean_snippet, color=\"red\", label=(feature_data_array.name + str(4)))\n average_snippet_axis.set_xlabel(snippet_time_dim.label + ((\" [\" + snippet_time_dim.unit + \"]\") if snippet_time_dim.unit else \"\"))\n average_snippet_axis.set_ylabel(feature_data_array.label + ((\" [\" + feature_data_array.unit + \"]\") if feature_data_array.unit else \"\"))\n average_snippet_axis.set_title(\"spike-triggered average\")\n average_snippet_axis.set_xlim(np.min(snippet_time), np.max(snippet_time))\n average_snippet_axis.set_ylim((1.2 * np.min(mean_snippet - std_snippet), 1.2 * np.max(mean_snippet + std_snippet)))\n\n plt.subplots_adjust(left=0.15, top=0.875, bottom=0.1, right=0.98, hspace=0.35, wspace=0.25)\n plt.gcf().set_size_inches((5.5, 4.5))\n # plt.savefig(\"../images/spike_features.png\")\n plt.show()\n\n\ndef main():\n stepsize = 0.0001 # s\n sta_offset = 1000 # samples\n time, voltage, spike_times, sts = fake_neuron(stepsize=0.0001, sta_offset=sta_offset)\n\n # create a new file overwriting any existing content\n file_name = 'spike_features.h5'\n file = nixio.File.open(file_name, nixio.FileMode.Overwrite)\n\n # create a 'Block' that represents a grouping object. Here, the recording session.\n # it gets a name and a type\n block = file.create_block(\"block name\", \"nix.session\")\n\n # create a 'DataArray' to take the membrane voltage\n data = block.create_data_array(\"membrane voltage\", \"nix.regular_sampled.time_series\", data=voltage, label=\"membrane voltage\")\n # add descriptors for the time axis\n data.append_sampled_dimension(stepsize, label=\"time\", unit=\"s\")\n\n # create the positions DataArray\n positions = block.create_data_array(\"times\", \"nix.events.spike_times\", data=spike_times)\n positions.append_range_dimension_using_self()\n\n # create a MultiTag\n multi_tag = block.create_multi_tag(\"spike times\", \"nix.events.spike_times\", positions)\n multi_tag.references.append(data)\n\n # save stimulus snippets in a DataArray\n snippets = block.create_data_array(\"spike triggered stimulus\", \"nix.regular_sampled.multiple_series\", data=sts, label=\"stimulus\", unit=\"nA\")\n snippets.append_set_dimension()\n snippets.append_sampled_dimension(stepsize, offset= -sta_offset * stepsize, label=\"time\", unit=\"s\")\n\n # set snippets as an indexed feature of the multi_tag\n multi_tag.create_feature(snippets, nixio.LinkType.Indexed)\n\n # let's plot the data from the stored information\n plot_data(multi_tag)\n file.close()\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"numpy.array",
"numpy.shape"
],
[
"numpy.random.random",
"numpy.array_equal",
"numpy.arange",
"numpy.random.random_sample",
"numpy.sin",
"numpy.testing.assert_array_equal",
"numpy.testing.assert_almost_equal",
"numpy.all",
"numpy.random.randn",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"numpy.isclose"
],
[
"numpy.array"
],
[
"matplotlib.pylab.show",
"scipy.signal.filtfilt",
"matplotlib.pylab.gcf",
"numpy.min",
"matplotlib.pylab.subplot2grid",
"numpy.ones",
"numpy.max",
"scipy.signal.butter",
"numpy.std",
"numpy.mean",
"matplotlib.pylab.subplots_adjust",
"numpy.random.randn",
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
valeriehk/tf-pose-estimation
|
[
"da6e9fd5f43bdcaffd20df4d55d60c5dfbb5a39f"
] |
[
"src/assignment.py"
] |
[
"import argparse\nimport logging\nimport time\n\nimport cv2\nimport numpy as np\n\nimport common\nfrom estimator import TfPoseEstimator\nfrom networks import get_graph_path, model_wh\n\nimport platform\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\nlogger = logging.getLogger('TfPoseEstimator-WebCam')\nlogger.setLevel(logging.CRITICAL)\nch = logging.StreamHandler()\nch.setLevel(logging.CRITICAL)\nformatter = logging.Formatter(\n '[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s')\nch.setFormatter(formatter)\nlogger.addHandler(ch)\n\nfps_time = 0\n\n#\nPOSE_COCO_BODY_PARTS = {\n 0: \"Nose\",\n 1: \"Neck\",\n 2: \"RShoulder\",\n 3: \"RElbow\",\n 4: \"RWrist\",\n 5: \"LShoulder\",\n 6: \"LElbow\",\n 7: \"LWrist\",\n 8: \"RHip\",\n 9: \"RKnee\",\n 10: \"RAnkle\",\n 11: \"LHip\",\n 12: \"LKnee\",\n 13: \"LAnkle\",\n 14: \"REye\",\n 15: \"LEye\",\n 16: \"REar\",\n 17: \"LEar\",\n 18: \"Background\",\n}\n\n# call this when a taxi is being hailed!\ndef hail_taxi(img):\n print(\"Someone is hailing a taxi!\")\n cv2.putText(img, \"TAXI!\",\n (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (94, 218, 255), 2)\n cv2.putText(img, platform.uname().node,\n (10, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 2)\n\nif __name__ == '__main__':\n # arguements to your program\n parser = argparse.ArgumentParser(\n description='tf-pose-estimation realtime webcam')\n parser.add_argument('--camera', type=int, default=0)\n parser.add_argument('--zoom', type=float, default=1.0)\n parser.add_argument(\n '--resolution',\n type=str,\n default='432x368',\n help='network input resolution. default=432x368')\n parser.add_argument(\n '--model',\n type=str,\n default='mobilenet_thin',\n help='cmu / mobilenet_thin')\n parser.add_argument(\n '--show-process',\n type=bool,\n default=False,\n help='for debug purpose, if enabled, speed for inference is dropped.')\n args = parser.parse_args()\n\n w, h = model_wh(args.resolution)\n e = TfPoseEstimator(get_graph_path(args.model), target_size=(w, h))\n camera = cv2.VideoCapture(args.camera)\n ret_val, image = camera.read()\n \n print(\"**** CTRL+C to exit ****\")\n while True:\n # get image form the camera\n ret_val, image = camera.read()\n # boilerplate\n canvas = np.zeros_like(image)\n img_scaled = cv2.resize(\n image,\n None,\n fx=args.zoom,\n fy=args.zoom,\n interpolation=cv2.INTER_LINEAR)\n dx = (canvas.shape[1] - img_scaled.shape[1]) // 2\n dy = (canvas.shape[0] - img_scaled.shape[0]) // 2\n canvas[dy:dy + img_scaled.shape[0], dx:\n dx + img_scaled.shape[1]] = img_scaled\n image = canvas\n # feed image into the neural network\n humans = e.inference(image) # list of humans\n for id, human in enumerate(humans):\n\n # TODO ensure it only does this when someone is hailing a taxi.\n # That is, an arm is above their head.\n hail_taxi(image)\n\n # Debugging statement: remove before demonstration.\n # print([(POSE_COCO_BODY_PARTS[k], v.x, v.y) for k,v in human.body_parts.items()])\n\n # drawing lines on an image\n image = TfPoseEstimator.draw_humans(image, humans, imgcopy=False)\n\n # FPS counter\n cv2.putText(image, \"FPS: {:.2f}\".format(1.0 / (time.time() - fps_time)),\n (10, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (128, 0, 0), 2)\n cv2.imshow('tf-pose-estimation result', image)\n fps_time = time.time()\n if cv2.waitKey(1) == 27:\n break\n\n cv2.destroyAllWindows()\n"
] |
[
[
"numpy.zeros_like"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dyfloveslife/DeblurGANv2
|
[
"d56508704d0b0463f84bca0ad39f700eb91c2aae"
] |
[
"metric_counter.py"
] |
[
"import logging\nfrom collections import defaultdict\n\nimport numpy as np\nfrom tensorboardX import SummaryWriter\n\nWINDOW_SIZE = 100\n\n\nclass MetricCounter:\n def __init__(self, exp_name):\n self.writer = SummaryWriter(exp_name)\n logging.basicConfig(filename='{}.log'.format(exp_name), level=logging.DEBUG)\n self.metrics = defaultdict(list)\n self.images = defaultdict(list)\n self.best_metric = 0\n\n def add_image(self, x: np.ndarray, tag: str):\n self.images[tag].append(x)\n\n def clear(self):\n self.metrics = defaultdict(list)\n self.images = defaultdict(list)\n\n def add_losses(self, l_G, l_content, l_D=0):\n for name, value in zip(('G_loss', 'G_loss_content', 'G_loss_adv', 'D_loss'),\n (l_G, l_content, l_G - l_content, l_D)):\n self.metrics[name].append(value)\n\n def add_metrics(self, psnr, ssim):\n for name, value in zip(('PSNR', 'SSIM'),\n (psnr, ssim)):\n self.metrics[name].append(value)\n\n def loss_message(self):\n metrics = ((k, np.mean(self.metrics[k][-WINDOW_SIZE:])) for k in ('G_loss', 'PSNR', 'SSIM'))\n return '; '.join(map(lambda x: f'{x[0]}={x[1]:.4f}', metrics))\n\n def write_to_tensorboard(self, epoch_num, validation=False):\n scalar_prefix = 'Validation' if validation else 'Train'\n for tag in ('G_loss', 'D_loss', 'G_loss_adv', 'G_loss_content', 'SSIM', 'PSNR'):\n self.writer.add_scalar(f'{scalar_prefix}_{tag}', np.mean(self.metrics[tag]), global_step=epoch_num)\n for tag in self.images:\n imgs = self.images[tag]\n if imgs:\n imgs = np.array(imgs)\n self.writer.add_images(tag, imgs[:, :, :, ::-1].astype('float32') / 255, dataformats='NHWC',\n global_step=epoch_num)\n self.images[tag] = []\n\n def update_best_model(self):\n cur_metric = np.mean(self.metrics['PSNR'])\n if self.best_metric < cur_metric:\n self.best_metric = cur_metric\n return True\n return False\n"
] |
[
[
"numpy.array",
"numpy.mean"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
linzhiqiu/avalanche
|
[
"947fccb9a1c204b76d26057719e8932801b36132"
] |
[
"avalanche/benchmarks/datasets/lvis_dataset/lvis_dataset.py"
] |
[
"################################################################################\n# Copyright (c) 2022 ContinualAI #\n# Copyrights licensed under the MIT License. #\n# See the accompanying LICENSE file for terms. #\n# #\n# Date: 18-02-2022 #\n# Author: Lorenzo Pellegrini #\n# #\n# E-mail: [email protected] #\n# Website: www.continualai.org #\n################################################################################\n\n\"\"\" LVIS PyTorch Object Detection Dataset \"\"\"\n\nfrom pathlib import Path\nfrom typing import Union, List, Sequence\n\nfrom PIL import Image\nfrom torchvision.datasets.folder import default_loader\nfrom torchvision.transforms import ToTensor\nfrom typing_extensions import TypedDict\n\nfrom avalanche.benchmarks.datasets import (\n DownloadableDataset,\n default_dataset_location,\n)\nfrom avalanche.benchmarks.datasets.lvis_dataset.lvis_data import lvis_archives\nfrom lvis import LVIS\nimport torch\n\n\nclass LvisDataset(DownloadableDataset):\n \"\"\" LVIS PyTorch Object Detection Dataset \"\"\"\n\n def __init__(\n self,\n root: Union[str, Path] = None,\n *,\n train=True,\n transform=None,\n loader=default_loader,\n download=True,\n lvis_api=None,\n img_ids: List[int] = None,\n ):\n \"\"\"\n Creates an instance of the LVIS dataset.\n\n :param root: The directory where the dataset can be found or downloaded.\n Defaults to None, which means that the default location for\n \"lvis\" will be used.\n :param train: If True, the training set will be returned. If False,\n the test set will be returned.\n :param transform: The transformation to apply to (img, annotations)\n values.\n :param loader: The image loader to use.\n :param download: If True, the dataset will be downloaded if needed.\n :param lvis_api: An instance of the LVIS class (from the lvis-api) to\n use. Defaults to None, which means that annotations will be loaded\n from the annotation json found in the root directory.\n :param img_ids: A list representing a subset of images to use. Defaults\n to None, which means that the dataset will contain all images\n in the LVIS dataset.\n \"\"\"\n\n if root is None:\n root = default_dataset_location(\"lvis\")\n\n self.train = train # training set or test set\n self.transform = transform\n self.loader = loader\n self.bbox_crop = True\n self.img_ids = img_ids\n\n self.targets = None\n self.lvis_api = lvis_api\n\n super(LvisDataset, self).__init__(root, download=download, verbose=True)\n\n self._load_dataset()\n\n def _download_dataset(self) -> None:\n data2download = lvis_archives\n\n for name, url, checksum in data2download:\n if self.verbose:\n print(\"Downloading \" + name + \"...\")\n\n result_file = self._download_file(\n url, name, checksum\n )\n if self.verbose:\n print(\"Download completed. Extracting...\")\n\n self._extract_archive(result_file)\n if self.verbose:\n print(\"Extraction completed!\")\n\n def _load_metadata(self) -> bool:\n must_load_api = self.lvis_api is None\n must_load_img_ids = self.img_ids is None\n try:\n # Load metadata\n if must_load_api:\n if self.train:\n ann_json_path = str(self.root / \"lvis_v1_train.json\")\n else:\n ann_json_path = str(self.root / \"lvis_v1_val.json\")\n\n self.lvis_api = LVIS(ann_json_path)\n\n if must_load_img_ids:\n self.img_ids = list(sorted(self.lvis_api.get_img_ids()))\n\n self.targets = LVISDetectionTargets(self.lvis_api, self.img_ids)\n\n # Try loading an image\n if len(self.img_ids) > 0:\n img_id = self.img_ids[0]\n img_dict: LVISImgEntry = \\\n self.lvis_api.load_imgs(ids=[img_id])[0]\n assert self._load_img(img_dict) is not None\n except BaseException:\n if must_load_api:\n self.lvis_api = None\n if must_load_img_ids:\n self.img_ids = None\n\n self.targets = None\n raise\n\n return True\n\n def _download_error_message(self) -> str:\n return (\n \"[LVIS] Error downloading the dataset. Consider \"\n \"downloading it manually at: https://www.lvisdataset.org/dataset\"\n \" and placing it in: \" + str(self.root)\n )\n\n def __getitem__(self, index):\n \"\"\"\n Loads an instance given its index.\n\n :param index: The index of the instance to retrieve.\n\n :return: a (sample, target) tuple where the target is a\n torchvision-style annotation for object detection\n https://pytorch.org/tutorials/intermediate/torchvision_tutorial.html\n \"\"\"\n img_id = self.img_ids[index]\n img_dict: LVISImgEntry = self.lvis_api.load_imgs(ids=[img_id])[0]\n annotation_dicts = self.targets[index]\n\n # Transform from LVIS dictionary to torchvision-style target\n num_objs = len(annotation_dicts)\n\n boxes = []\n labels = []\n for i in range(num_objs):\n xmin = annotation_dicts[i]['bbox'][0]\n ymin = annotation_dicts[i]['bbox'][1]\n xmax = xmin + annotation_dicts[i]['bbox'][2]\n ymax = ymin + annotation_dicts[i]['bbox'][3]\n boxes.append([xmin, ymin, xmax, ymax])\n labels.append(annotation_dicts[i]['category_id'])\n\n if len(boxes) > 0:\n boxes = torch.as_tensor(boxes, dtype=torch.float32)\n else:\n boxes = torch.empty((0, 4), dtype=torch.float32)\n labels = torch.as_tensor(labels, dtype=torch.int64)\n\n image_id = torch.tensor([img_id])\n areas = []\n for i in range(num_objs):\n areas.append(annotation_dicts[i]['area'])\n areas = torch.as_tensor(areas, dtype=torch.float32)\n iscrowd = torch.zeros((num_objs,), dtype=torch.int64)\n\n target = dict()\n target[\"boxes\"] = boxes\n target[\"labels\"] = labels\n target[\"image_id\"] = image_id\n target[\"area\"] = areas\n target[\"iscrowd\"] = iscrowd\n\n img = self._load_img(img_dict)\n\n if self.transform is not None:\n img, target = self.transform(img, target)\n\n return img, target\n\n def __len__(self):\n return len(self.img_ids)\n\n def _load_img(self, img_dict: \"LVISImgEntry\"):\n coco_url = img_dict['coco_url']\n splitted_url = coco_url.split('/')\n img_path = splitted_url[-2] + '/' + splitted_url[-1]\n final_path = self.root / img_path # <root>/train2017/<img_id>.jpg\n return self.loader(str(final_path))\n\n\nclass LVISImgEntry(TypedDict):\n id: int\n date_captured: str\n neg_category_ids: List[int]\n license: int\n height: int\n width: int\n flickr_url: str\n coco_url: str\n not_exhaustive_category_ids: List[int]\n\n\nclass LVISAnnotationEntry(TypedDict):\n id: int\n area: float\n segmentation: List[List[float]]\n image_id: int\n bbox: List[int]\n category_id: int\n\n\nclass LVISDetectionTargets(Sequence[List[LVISAnnotationEntry]]):\n def __init__(\n self,\n lvis_api: LVIS,\n img_ids: List[int] = None):\n super(LVISDetectionTargets, self).__init__()\n self.lvis_api = lvis_api\n if img_ids is None:\n img_ids = list(sorted(lvis_api.get_img_ids()))\n\n self.img_ids = img_ids\n\n def __len__(self):\n return len(self.img_ids)\n\n def __getitem__(self, index):\n img_id = self.img_ids[index]\n annotation_ids = self.lvis_api.get_ann_ids(img_ids=[img_id])\n annotation_dicts: List[LVISAnnotationEntry] = \\\n self.lvis_api.load_anns(annotation_ids)\n return annotation_dicts\n\n\ndef _test_to_tensor(a, b):\n return ToTensor()(a), b\n\n\ndef _detection_collate_fn(batch):\n return tuple(zip(*batch))\n\n\ndef _plot_detection_sample(img: Image.Image, target):\n from matplotlib import patches\n import matplotlib.pyplot as plt\n plt.gca().imshow(img)\n for box in target['boxes']:\n box = box.tolist()\n\n rect = patches.Rectangle(\n (box[0], box[1]), box[2] - box[0], box[3] - box[1],\n linewidth=1,\n edgecolor='r',\n facecolor='none')\n plt.gca().add_patch(rect)\n\n\nif __name__ == \"__main__\":\n # this little example script can be used to visualize the first image\n # loaded from the dataset.\n from torch.utils.data.dataloader import DataLoader\n import matplotlib.pyplot as plt\n from torchvision import transforms\n import torch\n\n train_data = LvisDataset(transform=_test_to_tensor)\n test_data = LvisDataset(transform=_test_to_tensor, train=False)\n print(\"train size: \", len(train_data))\n print(\"Test size: \", len(test_data))\n dataloader = DataLoader(train_data, batch_size=1,\n collate_fn=_detection_collate_fn)\n\n n_to_show = 5\n for instance_idx, batch_data in enumerate(dataloader):\n x, y = batch_data\n x = x[0]\n y = y[0]\n _plot_detection_sample(transforms.ToPILImage()(x), y)\n plt.show()\n print('X image shape', x.shape)\n print('N annotations:', len(y['boxes']))\n if (instance_idx + 1) >= n_to_show:\n break\n\n__all__ = [\n \"LvisDataset\",\n \"LVISImgEntry\",\n \"LVISAnnotationEntry\",\n \"LVISDetectionTargets\"\n]\n"
] |
[
[
"matplotlib.pyplot.gca",
"torch.empty",
"torch.zeros",
"matplotlib.patches.Rectangle",
"torch.tensor",
"torch.utils.data.dataloader.DataLoader",
"matplotlib.pyplot.show",
"torch.as_tensor"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dionresearch/hotelling
|
[
"ac6ef7d3674f1e9af7511d3a1fb205ee40b321b7"
] |
[
"hotelling/plots.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"plot.py.\n\nHotelling's T-Squared multivariate control charts\n\nSee:\n\n - Hotelling, Harold. (1931). The Generalization of Student's Ratio. Ann. Math. Statist. 2,\n no. 3, 360--378. doi:10.1214/aoms/1177732979.\n - Tukey, J. W. (1960). A survey of sampling from contaminated distributions. In: Contributions\n to Probability and Statistics. Stanford Univ. Press. 448-85\n - Gnanadesikan, R. and J.R. Kettenring (1972). Robust Estimates, Residuals, and Outlier Detection\n with Multiresponse Data. Biometrics 28, 81-124\n\n\"\"\"\nfrom warnings import warn\n\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\ntry:\n from plotly.offline import iplot\n from plotly.subplots import make_subplots\n import plotly.tools as tls\n\n plotly_module = True\nexcept ModuleNotFoundError:\n plotly_module = False\nfrom scipy import stats\n\nfrom hotelling.stats import hotelling_t2\n\n\ndef control_interval(m, n, f, phase=1, alpha=0.001):\n \"\"\"control_interval.\n\n For Hotelling control charts, phase 1 is using Qi. This follows a beta distribution, not an F distribution. For\n phase 2 uses future observations. These would follow a known distribution ~ F (Seber, 1984).\n The lower and upper lines are based on the quantiles of the distribution (aka `percent point function`)\n for α and 1 - α, while the center line is the median (50%).\n\n See:\n - Seber, G (1984). Multivariate Observations. John Wiley & Sons.\n - Nola D. Tracy, John C. Young & Robert L. Mason (1992) Multivariate Control Charts for individual Observations,\n Journal or Quality Technology, 24:2, 88-95, DOI:10.1080/00224065.1992.12015232\n\n :param m: sample groups (between 1 and n)\n :param n: number of samples\n :param f: number of features in the multivariate samples\n :param phase: 1 or 2 - phase 1 is within initial sample, phase 2 is measuring implemented control\n :param alpha: significance level - used to calculate control lines at α/2 and 1-α/2\n :return:\n \"\"\"\n if phase == 1:\n lcl = float(\n ((m - 1) * (n - 1) / m)\n * (stats.beta(f / 2, ((m - f - 1) / 2)).ppf(alpha / 2)),\n )\n cl = float(\n ((m - 1) * (n - 1) / m) * (stats.beta(f / 2, ((m - f - 1) / 2)).ppf(0.5)),\n )\n ucl = float(\n ((m - 1) * (n - 1) / m)\n * (stats.beta(f / 2, ((m - f - 1) / 2)).ppf(1 - alpha / 2)),\n )\n else:\n lcl = float(\n (f * (m - 1) * (m + 1)) / (m * (m - f)) * stats.f(f, m - f).ppf(alpha / 2)\n )\n cl = float((f * (m - 1) * (m + 1)) / (m * (m - f)) * stats.f(f, m - f).ppf(0.5))\n ucl = float(\n (f * (m - 1) * (m + 1))\n / (m * (m - f))\n * stats.f(f, m - f).ppf(1 - alpha / 2)\n )\n return lcl, cl, ucl\n\n\ndef control_stats(x):\n \"\"\"control_stats.\n\n Compute the sample mean vector and the covariance matrix\n\n :param x: pandas dataframe, uni or multivariate\n :return: sample mean, sample covariance\n \"\"\"\n try:\n return x.mean(0).compute(), x.cov().compute()\n except AttributeError:\n return x.mean(0), x.cov()\n\n\ndef control_chart(\n x,\n phase=1,\n alpha=0.001,\n x_bar=None,\n s=None,\n legend_right=False,\n interactive=False,\n width=10,\n cusum=False,\n template=\"none\",\n marker=\"o\",\n ooc_marker=\"x\",\n random_state=42,\n limit=1000,\n no_display=False,\n):\n \"\"\"control_chart.\n\n Hotelling Control Chart based on Q / T^2.\n\n See also `control_interval` for more detail\n\n :param x: pandas dataframe, uni or multivariate\n :param phase: 1 or 2 - phase 1 is within initial sample, phase 2 is measuring implemented control\n :param alpha: significance level - used to calculate control lines at α/2 and 1-α/2\n :param x_bar: sample mean (optional, required with s)\n :param s: sample covariance (optional, required with x_bar)\n :param legend_right: default to 'left', can specify 'right'\n :param interactive: if True and plotly is available, renders as interactive plot in notebook. False, render image.\n :param width: how many units wide. defaults to 10, good for notebooks\n :param cusum: use cumulative sum instead of average\n :param template: plotly template, defaults to 'none', matching default matplotlib\n :param marker: default marker symbol - one valid for matplotlib\n :param ooc_marker: out of control marker symbol (x) - one valid for matplotlib\n :param random_state: seed for sample (n > limit)\n :param limit: max number of points to plot, defaults to 1000\n :return: matplotlib ax / plotly fig\n \"\"\"\n n, subset = limit_display(x, limit, random_state)\n m = n\n\n # computing each individual values to the mean and covariance of the whole dataset\n if x_bar is None and s is None:\n x_bar, s = control_stats(x)\n elif x_bar is None or s is None:\n raise ValueError(\"Error: must specify both x_bar and s, or none at all.\")\n\n # data might be a subset (sample), but control stats above are calculated on the whole dataset\n points, f = subset.shape\n qi = [hotelling_t2(subset[i:i + 1], x_bar, S=s) for i in range(points)]\n\n df = pd.DataFrame({\"qi\": qi})\n\n lcl, cl, ucl = control_interval(m, n, f, phase=phase, alpha=alpha)\n\n cusum_text = \"\"\n if cusum:\n df[\"deviation\"] = (df[\"qi\"] - cl).cumsum() + cl\n cusum_text = f\" w/deviation (ref={cl:.3f})\"\n ax = df.plot(\n title=f\"Hotelling Control Chart (α={alpha}, phase={phase}{cusum_text})\",\n marker=marker,\n figsize=(width, width / 2),\n )\n ax.set_xlabel(\"samples\")\n\n try:\n df[(df[\"qi\"] > ucl) | (df[\"qi\"] < lcl)].plot(\n ax=ax, marker=ooc_marker, linestyle=\"None\", color=\"red\", legend=None\n )\n except TypeError:\n # nothing to plot\n pass\n x_pos = 0\n align = \"left\"\n if legend_right:\n x_pos = len(qi)\n align = \"right\"\n font_dict = {\"family\": \"serif\", \"color\": \"red\", \"size\": 10}\n if not interactive:\n ax.hlines(\n ucl,\n xmin=0,\n xmax=len(qi),\n linestyles=\"dashed\",\n color=\"r\",\n label=f\"UCL={ucl}\",\n )\n plt.text(\n x_pos,\n ucl + 0.1,\n s=f\"UCL={ucl:.3f}\",\n fontdict=font_dict,\n horizontalalignment=align,\n )\n if not interactive:\n ax.hlines(\n cl, xmin=0, xmax=len(qi), linestyles=\"dashed\", color=\"k\", label=f\"CL={cl}\"\n )\n font_dict = {\"family\": \"serif\", \"color\": \"black\", \"size\": 10}\n plt.text(\n x_pos, cl + 0.1, s=f\"CL={cl:.3f}\", fontdict=font_dict, horizontalalignment=align\n )\n if not interactive:\n ax.hlines(\n lcl,\n xmin=0,\n xmax=len(qi),\n linestyles=\"dashed\",\n color=\"r\",\n label=f\"LCL={lcl}\",\n )\n font_dict = {\"family\": \"serif\", \"color\": \"red\", \"size\": 10}\n plt.text(\n x_pos,\n lcl + 0.1,\n s=f\"LCL={lcl:.3f}\",\n fontdict=font_dict,\n horizontalalignment=align,\n )\n if plotly_module and interactive:\n fig = tls.mpl_to_plotly(ax.get_figure())\n for var, col in [(ucl, \"Red\"), (lcl, \"Red\"), (cl, \"Black\")]:\n fig.add_shape(\n type=\"line\",\n x0=0,\n y0=var,\n x1=len(qi),\n y1=var,\n line=dict(color=col, width=4, dash=\"dashdot\",),\n )\n fig.update_layout(template=template)\n if no_display is False:\n iplot(fig)\n return fig\n else:\n return ax\n\n\ndef limit_display(x, limit, random_state):\n \"\"\"limit_displau.\n\n Convenient way to get around the issue of very large datasets. We can't show everything, so we display\n a subset. The tests and stats like T2, F and P values are not affected, because we calculate them on all\n the data.\n\n :param x: dask or pandas dataframe, uni or multivariate\n :param random_state: seed for sample (n > limit)\n :param limit: max number of points to plot, defaults to 1000\n :return: returns original number of rows and limited dataframe\n \"\"\"\n n, *f = x.shape\n\n try:\n n = n.compute()\n except AttributeError:\n pass\n if n > limit:\n try:\n frac = 1000 / n\n subset = x.sample(frac=frac, random_state=random_state).compute()\n except AttributeError:\n subset = x.sample(n=1000, random_state=random_state)\n else:\n # The whole thing\n try:\n subset = x.compute()\n except AttributeError:\n subset = x\n\n return n, subset\n\n\ndef univariate_control_chart(\n x,\n var=None,\n sigma=3,\n legend_right=False,\n interactive=False,\n connected=True,\n width=10,\n cusum=False,\n cusum_only=False,\n template=\"none\",\n marker=\"o\",\n ooc_marker=\"x\",\n limit=1000,\n random_state=42,\n no_display=False,\n):\n \"\"\"univariate_control_chart.\n\n :param x: dask or pandas dataframe, uni or multivariate\n :param var: optional, variable to plot (default to all)\n :param sigma: default to 3 sigma from mean for upper and lower control lines\n :param legend_right: default to 'left', can specify 'right'\n :param interactive: if plotly is available, renders as interactive plot in notebook. False to render image.\n :param connected: defaults to True. Appropriate when time related /consecutive batches, else, should be False\n :param width: how many units wide. defaults to 10, good for notebooks\n :param cusum: use cumulative sum instead of average\n :param cusum_only: don't display values, just cusum referenced to 0\n :param template: plotly template, defaults to 'none', matching default matplotlib\n :param marker: default marker symbol (o) - one valid for matplotlib\n :param ooc_marker: out of control marker symbol (x) - one valid for matplotlib\n :param random_state: seed for sample (n > limit)\n :param limit: max number of points to plot, defaults to 1000\n :return: returns matplotlib figure or array of plotly figures\n \"\"\"\n n, *f, df = limit_display(x, limit, random_state)\n num_plots = len(df.columns)\n k = sigma # 3 sigma default\n if interactive:\n fig = make_subplots(rows=num_plots, cols=1)\n else:\n fig = plt.figure(figsize=(width, num_plots * width / 2))\n\n ax = list(range(num_plots))\n\n layout = num_plots * 100 + 11\n features = df.columns if var is None else [var]\n x_pos = 0\n align = \"left\"\n if legend_right:\n x_pos = n\n align = \"right\"\n\n plotly_figs = []\n for i, var in enumerate(features):\n x_bar = df[var].mean()\n cusum_text = \"\"\n columns = var\n if cusum:\n if cusum_only:\n columns = \"deviation\"\n df[\"deviation\"] = (df[var] - x_bar).cumsum()\n cusum_text = f\" cumulative deviation (ref={x_bar:.3f})\"\n else:\n columns = [var, \"deviation\"]\n df[\"deviation\"] = (df[var] - x_bar).cumsum() + (x_bar)\n cusum_text = f\" w/deviation (ref={x_bar:.3f})\"\n ucl = x_bar + k * df[var].std()\n lcl = x_bar - k * df[var].std()\n if interactive:\n mpl_fig, ax[i] = plt.subplots(figsize=(width, width / 2))\n else:\n ax[i] = fig.add_subplot(layout + i)\n if connected:\n df[columns].plot(ax=ax[i], marker=marker)\n else:\n df[columns].plot(ax=ax[i], marker=marker, linestyle=\"None\")\n try:\n if cusum_only is False:\n df[var][(x[var] > ucl) | (x[var] < lcl)].plot(\n ax=ax[i], marker=ooc_marker, linestyle=\"None\", color=\"red\"\n )\n except TypeError:\n # no outliers\n pass\n x_min = df.index.min()\n x_max = df.index.max()\n if cusum_only is False:\n y_low = min(df[var].min(), lcl) - 0.1 * abs(df[var].min())\n y_high = max(df[var].max(), ucl) + 0.1 * abs(df[var].max())\n elif cusum:\n y_low = min(df[\"deviation\"].min() - 0.1 * abs(df[\"deviation\"].min()), 0)\n y_high = df[\"deviation\"].max() + 0.1 * abs(df[\"deviation\"].max())\n else:\n warn(\"Error: must specify cusum=True when using cusum_only=True.\")\n\n if plotly_module and interactive and cusum_only is False:\n ucl_text = dict(\n x=x_pos,\n y=ucl + 0.2,\n showarrow=False,\n text=f\"UCL={ucl:.3f}\",\n xref=\"x\",\n yref=\"y\",\n font=dict(family=\"serif\", color=\"red\", size=10),\n )\n mean_text = dict(\n x=x_pos,\n y=x_bar + 0.2,\n showarrow=False,\n text=f\"mean={x_bar:.3f}\",\n xref=\"x\",\n yref=\"y\",\n font=dict(family=\"serif\", color=\"black\", size=10),\n )\n lcl_text = dict(\n x=x_pos,\n y=lcl + 0.2,\n showarrow=False,\n text=f\"LCL={lcl:.3f}\",\n xref=\"x\",\n yref=\"y\",\n font=dict(family=\"serif\", color=\"red\", size=10),\n )\n elif cusum_only is False:\n ax[i].hlines(\n ucl, xmin=x_min, xmax=x_max, linestyles=\"dashed\", color=\"r\", label=\"UCL\"\n )\n font_dict = {\"family\": \"serif\", \"color\": \"red\", \"size\": 10}\n plt.text(\n x_pos,\n ucl + 0.2,\n s=f\"UCL={ucl:.3f}\",\n fontdict=font_dict,\n horizontalalignment=align,\n )\n ax[i].hlines(\n x_bar,\n xmin=x_min,\n xmax=x_max,\n linestyles=\"dashed\",\n color=\"k\",\n label=\"mean\",\n )\n font_dict = {\"family\": \"serif\", \"color\": \"black\", \"size\": 10}\n plt.text(\n x_pos,\n x_bar + 0.2,\n s=f\"mean={x_bar:.3f}\",\n fontdict=font_dict,\n horizontalalignment=align,\n )\n\n ax[i].hlines(\n lcl, xmin=x_min, xmax=x_max, linestyles=\"dashed\", color=\"r\", label=\"LCL\"\n )\n font_dict = {\"family\": \"serif\", \"color\": \"red\", \"size\": 10}\n plt.text(\n x_pos,\n lcl + 0.2,\n s=f\"LCL={ucl:.3f}\",\n fontdict=font_dict,\n horizontalalignment=align,\n )\n\n ax[i].title.set_text(\n f\"Univariate Control Chart for {var}{cusum_text} (σ={sigma})\"\n )\n plt.tight_layout()\n if plotly_module and interactive:\n pfig = tls.mpl_to_plotly(mpl_fig)\n if cusum_only is False:\n for var, col in [(ucl, \"Red\"), (lcl, \"Red\"), (x_bar, \"Black\")]:\n pfig.add_shape(\n type=\"line\",\n x0=x_min,\n y0=var,\n x1=x_max,\n y1=var,\n line=dict(color=col, width=4, dash=\"dashdot\",),\n )\n pfig.update_xaxes(range=(x_min - 1, x_max + 1))\n pfig.update_yaxes(range=(y_low, y_high))\n annotations = None if cusum_only else [ucl_text, mean_text, lcl_text]\n pfig.update_layout(margin=dict(l=1, r=1), # noqa\n yaxis_tickmode=\"auto\",\n annotations=annotations,\n template=template,\n )\n if no_display is False:\n iplot(pfig)\n plotly_figs.append(pfig)\n if interactive:\n return plotly_figs\n else:\n return fig\n"
] |
[
[
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.subplots",
"pandas.DataFrame",
"scipy.stats.beta",
"scipy.stats.f",
"matplotlib.pyplot.text",
"matplotlib.pyplot.figure"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
arvind1998/Generative-Adversarial-Networks
|
[
"939a2aa30fbaf5cd6ea419ebb0b93f403015a278"
] |
[
"MNIST/dcgan.py"
] |
[
"\nfrom __future__ import print_function\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.optim as optim\nimport torch.utils.data\nimport torchvision.datasets as dset\nimport torchvision.transforms as transforms\nimport torchvision.utils as vutils\nfrom torch.autograd import Variable\n\n\nbatchSize = 64 \nimageSize = 64 \n\ntransform = transforms.Compose([transforms.Scale(imageSize), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),]) # We \n\n\ndataset = dset.MNIST(root = './data', download = True, transform = transform) \ndataloader = torch.utils.data.DataLoader(dataset, batch_size = batchSize, shuffle = True, num_workers = 2) \n\n\ndef weights_init(m):\n classname = m.__class__.__name__\n if classname.find('Conv') != -1:\n m.weight.data.normal_(0.0, 0.02)\n elif classname.find('BatchNorm') != -1:\n m.weight.data.normal_(1.0, 0.02)\n m.bias.data.fill_(0)\n\n\n\nclass G(nn.Module):\n\n def __init__(self):\n super(G, self).__init__()\n self.main = nn.Sequential(\n nn.ConvTranspose2d(100, 512, 4, 1, 0, bias = False),\n nn.BatchNorm2d(512),\n nn.LeakyReLU(0.2, inplace = True),\n nn.ConvTranspose2d(512, 256, 4, 2, 1, bias = False),\n nn.BatchNorm2d(256),\n nn.LeakyReLU(0.2, inplace = True),\n nn.ConvTranspose2d(256, 128, 4, 2, 1, bias = False),\n nn.BatchNorm2d(128),\n nn.LeakyReLU(0.2, inplace = True),\n nn.ConvTranspose2d(128, 64, 4, 2, 1, bias = False),\n nn.BatchNorm2d(64),\n nn.LeakyReLU(0.2, inplace = True),\n nn.ConvTranspose2d(64, 1, 4, 2, 1, bias = False),\n nn.Tanh()\n )\n\n def forward(self, input):\n output = self.main(input)\n return output\n\n\nnetG = G()\nnetG.apply(weights_init)\n\n\n\nclass D(nn.Module):\n\n def __init__(self):\n super(D, self).__init__()\n self.main = nn.Sequential(\n nn.Conv2d(1, 64, 4, 2, 1, bias = False),\n nn.LeakyReLU(0.2, inplace = True),\n nn.Conv2d(64, 128, 4, 2, 1, bias = False),\n nn.BatchNorm2d(128),\n nn.LeakyReLU(0.2, inplace = True),\n nn.Conv2d(128, 256, 4, 2, 1, bias = False),\n nn.BatchNorm2d(256),\n nn.LeakyReLU(0.2, inplace = True),\n nn.Conv2d(256, 512, 4, 2, 1, bias = False),\n nn.BatchNorm2d(512),\n nn.LeakyReLU(0.2, inplace = True),\n nn.Conv2d(512, 1, 4, 1, 0, bias = False),\n nn.Sigmoid()\n )\n\n def forward(self, input):\n output = self.main(input)\n return output.view(-1)\n\n\nnetD = D()\nnetD.apply(weights_init)\n\n\n\ncriterion = nn.BCELoss()\noptimizerD = optim.Adam(netD.parameters(), lr = 0.0002, betas = (0.5, 0.999))\noptimizerG = optim.Adam(netG.parameters(), lr = 0.0002, betas = (0.5, 0.999))\n\nfor epoch in range(25):\n\n for i, data in enumerate(dataloader, 0):\n \n\n\n netD.zero_grad()\n \n\n real, _ = data\n input = Variable(real)\n target = Variable(torch.ones(input.size()[0]))\n output = netD(input)\n errD_real = criterion(output, target)\n \n\n noise = Variable(torch.randn(input.size()[0], 100, 1, 1))\n fake = netG(noise)\n target = Variable(torch.zeros(input.size()[0]))\n output = netD(fake.detach())\n errD_fake = criterion(output, target)\n \n\n errD = errD_real + errD_fake\n errD.backward()\n optimizerD.step()\n\n\n\n netG.zero_grad()\n target = Variable(torch.ones(input.size()[0]))\n output = netD(fake)\n errG = criterion(output, target)\n errG.backward()\n optimizerG.step()\n \n\n\n print('[%d/%d][%d/%d] Loss_D: %.4f Loss_G: %.4f' % (epoch, 25, i, len(dataloader), errD.data[0], errG.data[0]))\n if i % 100 == 0:\n vutils.save_image(real, '%s/real_samples.png' % \"./results\", normalize = True)\n fake = netG(noise)\n vutils.save_image(fake.data, '%s/fake_samples_epoch_%03d.png' % (\"./results\", epoch), normalize = True)\n"
] |
[
[
"torch.nn.ConvTranspose2d",
"torch.nn.Conv2d",
"torch.utils.data.DataLoader",
"torch.nn.BCELoss",
"torch.nn.Tanh",
"torch.nn.Sigmoid",
"torch.nn.LeakyReLU",
"torch.nn.BatchNorm2d",
"torch.autograd.Variable"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
tiandiao123/deep_learning
|
[
"e7dbc617eff80d62f66068294b81f76ba48a25a6"
] |
[
"PyTorch_Tutorial/example_simple.py"
] |
[
"# -*- coding: utf-8 -*-\nimport random\nimport torch\n\n\nclass DynamicNet(torch.nn.Module):\n def __init__(self, D_in, H, D_out):\n \"\"\"\n In the constructor we construct three nn.Linear instances that we will use\n in the forward pass.\n \"\"\"\n super(DynamicNet, self).__init__()\n self.input_linear = torch.nn.Linear(D_in, H)\n self.middle_linear = torch.nn.Linear(H, H)\n self.output_linear = torch.nn.Linear(H, D_out)\n\n def forward(self, x):\n \"\"\"\n For the forward pass of the model, we randomly choose either 0, 1, 2, or 3\n and reuse the middle_linear Module that many times to compute hidden layer\n representations.\n\n Since each forward pass builds a dynamic computation graph, we can use normal\n Python control-flow operators like loops or conditional statements when\n defining the forward pass of the model.\n\n Here we also see that it is perfectly safe to reuse the same Module many\n times when defining a computational graph. This is a big improvement from Lua\n Torch, where each Module could be used only once.\n \"\"\"\n h_relu = self.input_linear(x).clamp(min=0)\n for _ in range(random.randint(0, 3)):\n h_relu = self.middle_linear(h_relu).clamp(min=0)\n y_pred = self.output_linear(h_relu)\n return y_pred\n\n\n# N is batch size; D_in is input dimension;\n# H is hidden dimension; D_out is output dimension.\nN, D_in, H, D_out = 64, 1000, 100, 10\n\n# Create random Tensors to hold inputs and outputs\nx = torch.randn(N, D_in)\ny = torch.randn(N, D_out)\n\n# Construct our model by instantiating the class defined above\nmodel = DynamicNet(D_in, H, D_out)\n\n# Construct our loss function and an Optimizer. Training this strange model with\n# vanilla stochastic gradient descent is tough, so we use momentum\ncriterion = torch.nn.MSELoss(reduction='sum')\noptimizer = torch.optim.SGD(model.parameters(), lr=1e-4, momentum=0.9)\nfor t in range(500):\n # Forward pass: Compute predicted y by passing x to the model\n y_pred = model(x)\n\n # Compute and print loss\n loss = criterion(y_pred, y)\n print(t, loss.item())\n\n # Zero gradients, perform a backward pass, and update the weights.\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()"
] |
[
[
"torch.nn.Linear",
"torch.randn",
"torch.nn.MSELoss"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sherjilozair/Rainbow
|
[
"d7b4bd2d0fbe542e223e409229c4d867da1314cd"
] |
[
"memory.py"
] |
[
"# -*- coding: utf-8 -*-\nfrom __future__ import division\nimport numpy as np\nimport torch\n\n\nTransition_dtype = np.dtype([('timestep', np.int32), ('state', np.uint8, (1, 512)), ('action', np.int32), ('reward', np.float32), ('nonterminal', np.bool_)])\nblank_trans = (0, np.zeros((1, 512), dtype=np.uint8), 0, 0.0, False)\n\n\n# Segment tree data structure where parent node values are sum/max of children node values\nclass SegmentTree():\n def __init__(self, size):\n self.index = 0\n self.size = size\n self.full = False # Used to track actual capacity\n self.tree_start = 2**(size-1).bit_length()-1 # Put all used node leaves on last tree level\n self.sum_tree = np.zeros((self.tree_start + self.size,), dtype=np.float32)\n self.data = np.array([blank_trans] * size, dtype=Transition_dtype) # Build structured array\n self.max = 1 # Initial max value to return (1 = 1^ω)\n\n # Updates nodes values from current tree\n def _update_nodes(self, indices):\n children_indices = indices * 2 + np.expand_dims([1, 2], axis=1)\n self.sum_tree[indices] = np.sum(self.sum_tree[children_indices], axis=0)\n\n # Propagates changes up tree given tree indices\n def _propagate(self, indices):\n parents = (indices - 1) // 2\n unique_parents = np.unique(parents)\n self._update_nodes(unique_parents)\n if parents[0] != 0:\n self._propagate(parents)\n\n # Propagates single value up tree given a tree index for efficiency\n def _propagate_index(self, index):\n parent = (index - 1) // 2\n left, right = 2 * parent + 1, 2 * parent + 2\n self.sum_tree[parent] = self.sum_tree[left] + self.sum_tree[right]\n if parent != 0:\n self._propagate_index(parent)\n\n # Updates values given tree indices\n def update(self, indices, values):\n self.sum_tree[indices] = values # Set new values\n self._propagate(indices) # Propagate values\n current_max_value = np.max(values)\n self.max = max(current_max_value, self.max)\n\n # Updates single value given a tree index for efficiency\n def _update_index(self, index, value):\n self.sum_tree[index] = value # Set new value\n self._propagate_index(index) # Propagate value\n self.max = max(value, self.max)\n\n def append(self, data, value):\n self.data[self.index] = data # Store data in underlying data structure\n self._update_index(self.index + self.tree_start, value) # Update tree\n self.index = (self.index + 1) % self.size # Update index\n self.full = self.full or self.index == 0 # Save when capacity reached\n self.max = max(value, self.max)\n\n # Searches for the location of values in sum tree\n def _retrieve(self, indices, values):\n children_indices = (indices * 2 + np.expand_dims([1, 2], axis=1)) # Make matrix of children indices\n # If indices correspond to leaf nodes, return them\n if children_indices[0, 0] >= self.sum_tree.shape[0]:\n return indices\n # If children indices correspond to leaf nodes, bound rare outliers in case total slightly overshoots\n elif children_indices[0, 0] >= self.tree_start:\n children_indices = np.minimum(children_indices, self.sum_tree.shape[0] - 1)\n left_children_values = self.sum_tree[children_indices[0]]\n successor_choices = np.greater(values, left_children_values).astype(np.int32) # Classify which values are in left or right branches\n successor_indices = children_indices[successor_choices, np.arange(indices.size)] # Use classification to index into the indices matrix\n successor_values = values - successor_choices * left_children_values # Subtract the left branch values when searching in the right branch\n return self._retrieve(successor_indices, successor_values)\n\n # Searches for values in sum tree and returns values, data indices and tree indices\n def find(self, values):\n indices = self._retrieve(np.zeros(values.shape, dtype=np.int32), values)\n data_index = indices - self.tree_start\n return (self.sum_tree[indices], data_index, indices) # Return values, data indices, tree indices\n\n # Returns data given a data index\n def get(self, data_index):\n return self.data[data_index % self.size]\n\n def total(self):\n return self.sum_tree[0]\n\nclass ReplayMemory():\n def __init__(self, args, capacity):\n self.device = args.device\n self.capacity = capacity\n self.history = args.history_length\n self.discount = args.discount\n self.n = args.multi_step\n self.priority_weight = args.priority_weight # Initial importance sampling weight β, annealed to 1 over course of training\n self.priority_exponent = args.priority_exponent\n self.t = 0 # Internal episode timestep counter\n self.n_step_scaling = torch.tensor([self.discount ** i for i in range(self.n)], dtype=torch.float32, device=self.device) # Discount-scaling vector for n-step returns\n self.transitions = SegmentTree(capacity) # Store transitions in a wrap-around cyclic buffer within a sum tree for querying priorities\n\n # Adds state and action at time t, reward and terminal at time t + 1\n def append(self, state, action, reward, terminal):\n state = state[-1].mul(255).to(dtype=torch.uint8, device=torch.device('cpu')) # Only store last frame and discretise to save memory\n self.transitions.append((self.t, state, action, reward, not terminal), self.transitions.max) # Store new transition with maximum priority\n self.t = 0 if terminal else self.t + 1 # Start new episodes with t = 0\n\n # Returns the transitions with blank states where appropriate\n def _get_transitions(self, idxs):\n transition_idxs = np.arange(-self.history + 1, self.n + 1) + np.expand_dims(idxs, axis=1)\n transitions = self.transitions.get(transition_idxs)\n transitions_firsts = transitions['timestep'] == 0\n blank_mask = np.zeros_like(transitions_firsts, dtype=np.bool_)\n for t in range(self.history - 2, -1, -1): # e.g. 2 1 0\n blank_mask[:, t] = np.logical_or(blank_mask[:, t + 1], transitions_firsts[:, t + 1]) # True if future frame has timestep 0\n for t in range(self.history, self.history + self.n): # e.g. 4 5 6\n blank_mask[:, t] = np.logical_or(blank_mask[:, t - 1], transitions_firsts[:, t]) # True if current or past frame has timestep 0\n transitions[blank_mask] = blank_trans\n return transitions\n\n # Returns a valid sample from each segment\n def _get_samples_from_segments(self, batch_size, p_total):\n segment_length = p_total / batch_size # Batch size number of segments, based on sum over all probabilities\n segment_starts = np.arange(batch_size) * segment_length\n valid = False\n while not valid:\n samples = np.random.uniform(0.0, segment_length, [batch_size]) + segment_starts # Uniformly sample from within all segments\n probs, idxs, tree_idxs = self.transitions.find(samples) # Retrieve samples from tree with un-normalised probability\n if np.all((self.transitions.index - idxs) % self.capacity > self.n) and np.all((idxs - self.transitions.index) % self.capacity >= self.history) and np.all(probs != 0):\n valid = True # Note that conditions are valid but extra conservative around buffer index 0\n # Retrieve all required transition data (from t - h to t + n)\n transitions = self._get_transitions(idxs)\n # Create un-discretised states and nth next states\n all_states = transitions['state']\n states = torch.tensor(all_states[:, :self.history], device=self.device, dtype=torch.float32).div_(255)\n next_states = torch.tensor(all_states[:, self.n:self.n + self.history], device=self.device, dtype=torch.float32).div_(255)\n # Discrete actions to be used as index\n actions = torch.tensor(np.copy(transitions['action'][:, self.history - 1]), dtype=torch.int64, device=self.device)\n # Calculate truncated n-step discounted returns R^n = Σ_k=0->n-1 (γ^k)R_t+k+1 (note that invalid nth next states have reward 0)\n rewards = torch.tensor(np.copy(transitions['reward'][:, self.history - 1:-1]), dtype=torch.float32, device=self.device)\n R = torch.matmul(rewards, self.n_step_scaling)\n # Mask for non-terminal nth next states\n nonterminals = torch.tensor(np.expand_dims(transitions['nonterminal'][:, self.history + self.n - 1], axis=1), dtype=torch.float32, device=self.device)\n return probs, idxs, tree_idxs, states, actions, R, next_states, nonterminals\n\n def sample(self, batch_size):\n p_total = self.transitions.total() # Retrieve sum of all priorities (used to create a normalised probability distribution)\n probs, idxs, tree_idxs, states, actions, returns, next_states, nonterminals = self._get_samples_from_segments(batch_size, p_total) # Get batch of valid samples\n probs = probs / p_total # Calculate normalised probabilities\n capacity = self.capacity if self.transitions.full else self.transitions.index\n weights = (capacity * probs) ** -self.priority_weight # Compute importance-sampling weights w\n weights = torch.tensor(weights / weights.max(), dtype=torch.float32, device=self.device) # Normalise by max importance-sampling weight from batch\n return tree_idxs, states, actions, returns, next_states, nonterminals, weights\n\n def update_priorities(self, idxs, priorities):\n priorities = np.power(priorities, self.priority_exponent)\n self.transitions.update(idxs, priorities)\n\n # Set up internal state for iterator\n def __iter__(self):\n self.current_idx = 0\n return self\n\n # Return valid states for validation\n def __next__(self):\n if self.current_idx == self.capacity:\n raise StopIteration\n transitions = self.transitions.data[np.arange(self.current_idx - self.history + 1, self.current_idx + 1)]\n transitions_firsts = transitions['timestep'] == 0\n blank_mask = np.zeros_like(transitions_firsts, dtype=np.bool_)\n for t in reversed(range(self.history - 1)):\n blank_mask[t] = np.logical_or(blank_mask[t + 1], transitions_firsts[t + 1]) # If future frame has timestep 0\n transitions[blank_mask] = blank_trans\n state = torch.tensor(transitions['state'], dtype=torch.float32, device=self.device).div_(255) # Agent will turn into batch\n self.current_idx += 1\n return state\n\n next = __next__ # Alias __next__ for Python 2 compatibility\n"
] |
[
[
"numpy.expand_dims",
"numpy.minimum",
"numpy.dtype",
"numpy.all",
"numpy.max",
"numpy.zeros_like",
"torch.device",
"numpy.greater",
"numpy.unique",
"numpy.arange",
"torch.tensor",
"numpy.copy",
"numpy.zeros",
"numpy.power",
"numpy.logical_or",
"numpy.array",
"numpy.sum",
"torch.matmul",
"numpy.random.uniform"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
rcyndie/paraphase
|
[
"3be47bf46b2d9b432e94fc997e59092bb73a7b81"
] |
[
"paraphase/calibration/basis_compute.py"
] |
[
"import numpy as np\n\ndef make_basis_vec(n_par, l_d, m_d):\n \"\"\"\n Generating the basis polynomial to compute the phase equation. Right now, \n it is of the form [1, l_d, m_d, l_d**2, m_d**2, ...] with l_d and m_d being \n scalars. The function returns a vector of length n_par.\n\n \"\"\"\n \n N = (n_par+1) // 2\n lvec = (np.tile(l_d, N-1) ** np.arange(1, N))\n mvec = (np.tile(m_d, N-1) ** np.arange(1, N))\n\n main_vec = np.ones((N-1, 2))\n main_vec[:, 0] = lvec\n main_vec[:, 1] = mvec\n main_vec = (main_vec).flatten()\n \n return np.insert(main_vec, 0, [1])\n\ndef get_basis_poly(sources, n_par):\n \"\"\"\n Get basis matrix of shape (n_params, n_dir). Both l and m, which represent the\n direction cosine coordinates of the sources are each vectors of length n_dir.\n \n \"\"\"\n\n #Get the dimension and the direction cosines.\n l = sources[:, 1]\n m = sources[:, 2]\n\n n_dir = sources.shape[0]\n basis = np.zeros((n_par, n_dir))\n\n for d in range(n_dir):\n basis[:, d] = make_basis_vec(n_par, l[d], m[d])\n\n return basis\n\ndef get_basis_cov(sources, bparams):\n \"\"\"\n Get basis matrix >>> covariance matrix >>> Cholesky decomposition.\n recall bparams = {\"n_par\": n_dir, \"sigmaf\": options.sigmaf, \"lscale\": options.lscale}\n and more.\n\n \"\"\"\n\n #Get covariance matrix.\n #(sources, sources)? (x, xp)? maybe needs tweaking?\n K = squared_exp(sources, sources, bparams[\"sigmaf\"], bparams[\"lscale\"])\n\n #Compute Cholesky decomposition of K_inv.\n L = np.linalg.cholesky(K + bparams[\"jitter\"] * np.eye(K.shape[0]))\n #Remember L and K are of shapes n_sources \\times n_sources, or\n #if I am incorrect, they should at least be of the same shapes.\n\n return L\n\ndef squared_exp(x, xp, sigmaf, l):\n \"\"\"\n The function returns a covariance matrix using the squared \n exponential (SE) kernel.\n \n k(x, xp) = sigma^2 * exp(-(x-xp)^2/(2*l^2))\n \n \"\"\"\n \n # if x.ndim > 1 or xp.ndim > 1:\n # raise ValueError(\"Inputs must be 1D\")\n \n #Get shape of x and xp.\n N = x.shape[0]\n M = xp.shape[0]\n\n #Create covariance matrix.\n C = np.zeros((N, M))\n\n for i in range(N):\n for j in range(M):\n C[i, j] = sigmaf**2*np.exp(-(1/2*l**2)*((x[:, 1][i] - xp[:, 1][j])**2 + (x[:, 2][i] - xp[:, 2][j])**2))\n \n return C\n\n\ndef basis_compute(msrcs, bparams):\n \"\"\"\n The function computes a basis given the specifications.\n params is n_par when using a gtype-ppoly.\n params is n_dir, sigmaf, l and more when using a gtype-pcov.\n\n \"\"\"\n\n if bparams[\"gtype\"] == \"ppoly\":\n return get_basis_poly(msrcs, bparams[\"n_par\"])\n elif bparams[\"gtype\"] == \"pcov\":\n return get_basis_cov(msrcs, bparams)"
] |
[
[
"numpy.arange",
"numpy.eye",
"numpy.tile",
"numpy.ones",
"numpy.insert",
"numpy.exp",
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
fardal/galpy
|
[
"93a1b6fc8d138899922127086cc66184919c8cba"
] |
[
"galpy/orbit_src/integrateFullOrbit.py"
] |
[
"import sys\nimport sysconfig\nimport warnings\nimport numpy as nu\nimport ctypes\nimport ctypes.util\nfrom numpy.ctypeslib import ndpointer\nimport os\nfrom galpy import potential\nfrom galpy.util import galpyWarning\nfrom galpy.orbit_src.integratePlanarOrbit import _parse_integrator, _parse_tol\n#Find and load the library\n_lib= None\nouterr= None\nPY3= sys.version > '3'\nif PY3: #pragma: no cover\n _ext_suffix= sysconfig.get_config_var('EXT_SUFFIX')\nelse:\n _ext_suffix= '.so'\nfor path in sys.path:\n try:\n _lib = ctypes.CDLL(os.path.join(path,'galpy_integrate_c%s' % _ext_suffix))\n except OSError as e:\n if os.path.exists(os.path.join(path,'galpy_integrate_c%s' % _ext_suffix)): #pragma: no cover\n outerr= e\n _lib = None\n else:\n break\nif _lib is None: #pragma: no cover\n if not outerr is None:\n warnings.warn(\"integrateFullOrbit_c extension module not loaded, because of error '%s' \" % outerr,\n galpyWarning)\n else:\n warnings.warn(\"integrateFullOrbit_c extension module not loaded, because galpy_integrate_c%s image was not found\" % _ext_suffix,\n galpyWarning)\n _ext_loaded= False\nelse:\n _ext_loaded= True\n\ndef _parse_pot(pot,potforactions=False):\n \"\"\"Parse the potential so it can be fed to C\"\"\"\n #Figure out what's in pot\n if not isinstance(pot,list):\n pot= [pot]\n #Initialize everything\n pot_type= []\n pot_args= []\n npot= len(pot)\n for p in pot:\n if isinstance(p,potential.LogarithmicHaloPotential):\n pot_type.append(0)\n pot_args.extend([p._amp,p._q,p._core2])\n elif isinstance(p,potential.MiyamotoNagaiPotential):\n pot_type.append(5)\n pot_args.extend([p._amp,p._a,p._b])\n elif isinstance(p,potential.PowerSphericalPotential):\n pot_type.append(7)\n pot_args.extend([p._amp,p.alpha])\n elif isinstance(p,potential.HernquistPotential):\n pot_type.append(8)\n pot_args.extend([p._amp,p.a])\n elif isinstance(p,potential.FlattenedNFWPotential):\n pot_type.append(91)\n pot_args.extend([p._amp,p.a,p.q])\n elif isinstance(p,potential.NFWPotential):\n pot_type.append(9)\n pot_args.extend([p._amp,p.a])\n elif isinstance(p,potential.JaffePotential):\n pot_type.append(10)\n pot_args.extend([p._amp,p.a])\n elif isinstance(p,potential.DoubleExponentialDiskPotential):\n pot_type.append(11)\n pot_args.extend([p._amp,p._alpha,p._beta,p._kmaxFac,\n p._nzeros,p._glorder])\n pot_args.extend([p._glx[ii] for ii in range(p._glorder)])\n pot_args.extend([p._glw[ii] for ii in range(p._glorder)])\n pot_args.extend([p._j0zeros[ii] for ii in range(p._nzeros+1)])\n pot_args.extend([p._dj0zeros[ii] for ii in range(p._nzeros+1)])\n pot_args.extend([p._j1zeros[ii] for ii in range(p._nzeros+1)])\n pot_args.extend([p._dj1zeros[ii] for ii in range(p._nzeros+1)])\n pot_args.extend([p._kp._amp,p._kp.alpha])\n elif isinstance(p,potential.FlattenedPowerPotential):\n pot_type.append(12)\n pot_args.extend([p._amp,p.alpha,p.q2,p.core2])\n elif isinstance(p,potential.interpRZPotential):\n pot_type.append(13)\n pot_args.extend([len(p._rgrid),len(p._zgrid)])\n if p._logR:\n pot_args.extend([p._logrgrid[ii] for ii in range(len(p._rgrid))])\n else:\n pot_args.extend([p._rgrid[ii] for ii in range(len(p._rgrid))])\n pot_args.extend([p._zgrid[ii] for ii in range(len(p._zgrid))])\n if potforactions:\n pot_args.extend([x for x in p._potGrid_splinecoeffs.flatten(order='C')])\n else:\n pot_args.extend([x for x in p._rforceGrid_splinecoeffs.flatten(order='C')])\n pot_args.extend([x for x in p._zforceGrid_splinecoeffs.flatten(order='C')])\n pot_args.extend([p._amp,int(p._logR)])\n elif isinstance(p,potential.IsochronePotential):\n pot_type.append(14)\n pot_args.extend([p._amp,p.b])\n elif isinstance(p,potential.PowerSphericalPotentialwCutoff):\n pot_type.append(15)\n pot_args.extend([p._amp,p.alpha,p.rc])\n elif isinstance(p,potential.MN3ExponentialDiskPotential):\n # Three Miyamoto-Nagai disks\n npot+= 2\n pot_type.extend([5,5,5])\n pot_args.extend([p._amp*p._mn3[0]._amp,\n p._mn3[0]._a,p._mn3[0]._b,\n p._amp*p._mn3[1]._amp,\n p._mn3[1]._a,p._mn3[1]._b,\n p._amp*p._mn3[2]._amp,\n p._mn3[2]._a,p._mn3[2]._b])\n elif isinstance(p,potential.KuzminKutuzovStaeckelPotential):\n pot_type.append(16)\n pot_args.extend([p._amp,p._ac,p._Delta])\n elif isinstance(p,potential.PlummerPotential):\n pot_type.append(17)\n pot_args.extend([p._amp,p._b])\n elif isinstance(p,potential.PseudoIsothermalPotential):\n pot_type.append(18)\n pot_args.extend([p._amp,p._a])\n pot_type= nu.array(pot_type,dtype=nu.int32,order='C')\n pot_args= nu.array(pot_args,dtype=nu.float64,order='C')\n return (npot,pot_type,pot_args)\n\ndef integrateFullOrbit_c(pot,yo,t,int_method,rtol=None,atol=None,dt=None):\n \"\"\"\n NAME:\n integrateFullOrbit_c\n PURPOSE:\n C integrate an ode for a FullOrbit\n INPUT:\n pot - Potential or list of such instances\n yo - initial condition [q,p]\n t - set of times at which one wants the result\n int_method= 'leapfrog_c', 'rk4_c', 'rk6_c', 'symplec4_c'\n rtol, atol\n dt= (None) force integrator to use this stepsize (default is to automatically determine one))\n OUTPUT:\n (y,err)\n y : array, shape (len(y0), len(t))\n Array containing the value of y for each desired time in t, \\\n with the initial value y0 in the first row.\n err: error message, if not zero: 1 means maximum step reduction happened for adaptive integrators\n HISTORY:\n 2011-11-13 - Written - Bovy (IAS)\n \"\"\"\n rtol, atol= _parse_tol(rtol,atol)\n npot, pot_type, pot_args= _parse_pot(pot)\n int_method_c= _parse_integrator(int_method)\n if dt is None: \n dt= -9999.99\n\n #Set up result array\n result= nu.empty((len(t),6))\n err= ctypes.c_int(0)\n\n #Set up the C code\n ndarrayFlags= ('C_CONTIGUOUS','WRITEABLE')\n integrationFunc= _lib.integrateFullOrbit\n integrationFunc.argtypes= [ndpointer(dtype=nu.float64,flags=ndarrayFlags),\n ctypes.c_int, \n ndpointer(dtype=nu.float64,flags=ndarrayFlags),\n ctypes.c_int,\n ndpointer(dtype=nu.int32,flags=ndarrayFlags),\n ndpointer(dtype=nu.float64,flags=ndarrayFlags),\n ctypes.c_double,\n ctypes.c_double,\n ctypes.c_double,\n ndpointer(dtype=nu.float64,flags=ndarrayFlags),\n ctypes.POINTER(ctypes.c_int),\n ctypes.c_int]\n\n #Array requirements, first store old order\n f_cont= [yo.flags['F_CONTIGUOUS'],\n t.flags['F_CONTIGUOUS']]\n yo= nu.require(yo,dtype=nu.float64,requirements=['C','W'])\n t= nu.require(t,dtype=nu.float64,requirements=['C','W'])\n result= nu.require(result,dtype=nu.float64,requirements=['C','W'])\n\n #Run the C code\n integrationFunc(yo,\n ctypes.c_int(len(t)),\n t,\n ctypes.c_int(npot),\n pot_type,\n pot_args,\n ctypes.c_double(dt),\n ctypes.c_double(rtol),ctypes.c_double(atol),\n result,\n ctypes.byref(err),\n ctypes.c_int(int_method_c))\n\n #Reset input arrays\n if f_cont[0]: yo= nu.asfortranarray(yo)\n if f_cont[1]: t= nu.asfortranarray(t)\n\n return (result,err.value)\n\ndef integrateFullOrbit_dxdv_c(pot,yo,dyo,t,int_method,rtol=None,atol=None): #pragma: no cover because not included in v1, uncover when included\n \"\"\"\n NAME:\n integrateFullOrbit_dxdv_c\n PURPOSE:\n C integrate an ode for a planarOrbit+phase space volume dxdv\n INPUT:\n pot - Potential or list of such instances\n yo - initial condition [q,p]\n dyo - initial condition [dq,dp]\n t - set of times at which one wants the result\n int_method= 'leapfrog_c', 'rk4_c', 'rk6_c', 'symplec4_c'\n rtol, atol\n OUTPUT:\n (y,err)\n y : array, shape (len(y0), len(t))\n Array containing the value of y for each desired time in t, \\\n with the initial value y0 in the first row.\n err: error message if not zero, 1: maximum step reduction happened for adaptive integrators\n HISTORY:\n 2011-11-13 - Written - Bovy (IAS)\n \"\"\"\n rtol, atol= _parse_tol(rtol,atol)\n npot, pot_type, pot_args= _parse_pot(pot)\n int_method_c= _parse_integrator(int_method)\n yo= nu.concatenate((yo,dyo))\n\n #Set up result array\n result= nu.empty((len(t),12))\n err= ctypes.c_int(0)\n\n #Set up the C code\n ndarrayFlags= ('C_CONTIGUOUS','WRITEABLE')\n integrationFunc= _lib.integrateFullOrbit_dxdv\n integrationFunc.argtypes= [ndpointer(dtype=nu.float64,flags=ndarrayFlags),\n ctypes.c_int, \n ndpointer(dtype=nu.float64,flags=ndarrayFlags),\n ctypes.c_int,\n ndpointer(dtype=nu.int32,flags=ndarrayFlags),\n ndpointer(dtype=nu.float64,flags=ndarrayFlags),\n ctypes.c_double,\n ctypes.c_double,\n ndpointer(dtype=nu.float64,flags=ndarrayFlags),\n ctypes.POINTER(ctypes.c_int),\n ctypes.c_int]\n\n #Array requirements, first store old order\n f_cont= [yo.flags['F_CONTIGUOUS'],\n t.flags['F_CONTIGUOUS']]\n yo= nu.require(yo,dtype=nu.float64,requirements=['C','W'])\n t= nu.require(t,dtype=nu.float64,requirements=['C','W'])\n result= nu.require(result,dtype=nu.float64,requirements=['C','W'])\n\n #Run the C code\n integrationFunc(yo,\n ctypes.c_int(len(t)),\n t,\n ctypes.c_int(npot),\n pot_type,\n pot_args,\n ctypes.c_double(rtol),ctypes.c_double(atol),\n result,\n ctypes.byref(err),\n ctypes.c_int(int_method_c))\n\n #Reset input arrays\n if f_cont[0]: yo= nu.asfortranarray(yo)\n if f_cont[1]: t= nu.asfortranarray(t)\n\n return (result,err.value)\n"
] |
[
[
"numpy.asfortranarray",
"numpy.concatenate",
"numpy.require",
"numpy.ctypeslib.ndpointer",
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
tiagovla/fdtd.py
|
[
"c2b98c054f2471f5d92e760768b94d97b590f468"
] |
[
"fdtd/sources.py"
] |
[
"\"\"\"This module implement sources.\"\"\"\nfrom __future__ import annotations\n\nimport logging\nfrom functools import partial\nfrom typing import Callable, Optional, Tuple, Union\n\nimport numpy as np\n\nfrom .bases import FDTDElementBase\nfrom .constants import FREESPACE_PERMITTIVITY as EPS_0\nfrom .constants import SPEED_LIGHT as C\nfrom .utils import BoundingBox, Direction\nfrom .waveforms import Waveform\n\nlogger = logging.getLogger(__name__)\n\n\nclass Source(FDTDElementBase):\n \"\"\"Base class for all sources.\n\n Parameters\n ----------\n x_min : float\n Minimum x coordinate of the bounding box containing the source.\n y_min : float\n Minimum y coordinate of the bounding box containing the source.\n z_min : float\n Minimum z coordinate of the bounding box containing the source.\n x_max : float\n Maximum x coordinate of the bounding box containing the source.\n y_max : float\n Maximum y coordinate of the bounding box containing the source.\n z_max : float\n Maximum z coordinate of the bounding box containing the source.\n waveform : Union[Waveform, Callable]\n Waveform type.\n resistance : float\n Internal resistance of the source.\n name : Optional[str]\n Name of the source.\n direction : Direction\n Direction of the source.\n \"\"\"\n\n def __init__(\n self,\n x_min: float,\n y_min: float,\n z_min: float,\n x_max: float,\n y_max: float,\n z_max: float,\n waveform: Union[Waveform, Callable],\n resistance: float = 50,\n name: Optional[str] = None,\n direction: Direction = Direction.Z,\n ):\n \"\"\"Initialize the object.\"\"\"\n super().__init__()\n self.x_min = x_min\n self.y_min = y_min\n self.z_min = z_min\n self.x_max = x_max\n self.y_max = y_max\n self.z_max = z_max\n self.resistance = resistance\n self.waveform = waveform\n self.bounding_box = BoundingBox(x_min, x_max, y_min, y_max, z_min,\n z_max)\n self.direction = direction\n self.name = name if name else self._create_new_name()\n self.idx_s: Optional[Tuple] = None\n self.idx_e: Optional[Tuple] = None\n\n self.c_v: Optional[np.ndarray] = None\n self.i_s: Optional[slice] = None\n self.j_s: Optional[slice] = None\n self.k_s: Optional[slice] = None\n\n def attach_to_grid(self):\n \"\"\"Attach object to grid.\"\"\"\n self.idx_s = (\n np.argmin(np.abs(self.grid.x - self.x_min)),\n np.argmin(np.abs(self.grid.y - self.y_min)),\n np.argmin(np.abs(self.grid.z - self.z_min)),\n )\n self.idx_e = (\n np.argmin(np.abs(self.grid.x - self.x_max)),\n np.argmin(np.abs(self.grid.y - self.y_max)),\n np.argmin(np.abs(self.grid.z - self.z_max)),\n )\n\n logger.debug(f\"Source attached to {self.idx_s} to {self.idx_e}\")\n\n def update_E(self):\n \"\"\"Update E fields.\"\"\"\n\n def update_H(self):\n \"\"\"Update H fields.\"\"\"\n\n def __repr__(self):\n \"\"\"Dev. string representation.\"\"\"\n return (f\"{self.__class__.__name__}\"\n f\"[name={self.name}, waveform={self.waveform}, \"\n f\"x_min={self.x_min}, y_min={self.y_min}, z_min={self.z_min}, \"\n f\"x_max={self.x_max}, y_max={self.y_max}, z_max={self.z_max}]\")\n\n def plot_3d(self, ax, alpha=0.5):\n \"\"\"Plot a source and attach to an axis.\"\"\"\n\n\nclass ImpressedMagneticCurrentSource(Source):\n \"\"\"Model of an impressed magnetic current source.\n\n Parameters\n ----------\n x_min : float\n Minimum x coordinate of the bounding box containing the source.\n y_min : float\n Minimum y coordinate of the bounding box containing the source.\n z_min : float\n Minimum z coordinate of the bounding box containing the source.\n x_max : float\n Maximum x coordinate of the bounding box containing the source.\n y_max : float\n Maximum y coordinate of the bounding box containing the source.\n z_max : float\n Maximum z coordinate of the bounding box containing the source.\n waveform : Union[Waveform, Callable]\n Waveform type.\n resistance : float\n Internal resistance of the source.\n name : Optional[str]\n Name of the source.\n direction : Direction\n Direction of the source.\n \"\"\"\n\n def update_H(self):\n \"\"\"Update field.\"\"\"\n source_value = self.waveform(self.grid.current_time)\n self.grid.H[self.i_s, self.j_s, self.k_s, self.direction.value] += (\n -self.grid.c_he[self.i_s, self.j_s, self.k_s, self.direction.value]\n * source_value)\n\n def attach_to_grid(self):\n \"\"\"Attach object to grid.\"\"\"\n self.idx_s = (\n np.argmin(np.abs(self.grid.x - self.x_min)),\n np.argmin(np.abs(self.grid.y - self.y_min)),\n np.argmin(np.abs(self.grid.z - self.z_min)),\n )\n self.idx_e = (\n np.argmin(np.abs(self.grid.x - self.x_max)),\n np.argmin(np.abs(self.grid.y - self.y_max)),\n np.argmin(np.abs(self.grid.z - self.z_max)),\n )\n\n logger.debug(\n f\"Source {self.name} attached to {self.idx_s} to {self.idx_e}\")\n\n self.i_s = slice(self.idx_s[0], self.idx_e[0] + 1)\n self.j_s = slice(self.idx_s[1], self.idx_e[1] + 1)\n self.k_s = slice(self.idx_s[2], self.idx_e[2] + 1)\n\n def plot_3d(self, ax, alpha: float = 0.5):\n \"\"\"Plot a brick and attach to an axis.\"\"\"\n X, Y, Z = np.meshgrid(\n [self.x_min, self.x_max],\n [self.y_min, self.y_max],\n [self.z_min, self.z_max],\n )\n plot = partial(ax.plot_surface, alpha=alpha, color=\"#00FF00\")\n\n plot(X[:, :, 0], Y[:, :, 0], Z[:, :, 0])\n plot(X[:, :, -1], Y[:, :, -1], Z[:, :, -1])\n plot(X[:, 0, :], Y[:, 0, :], Z[:, 0, :])\n plot(X[:, -1, :], Y[:, -1, :], Z[:, -1, :])\n plot(X[0, :, :], Y[0, :, :], Z[0, :, :])\n plot(X[-1, :, :], Y[-1, :, :], Z[-1, :, :])\n\n\nclass ImpressedElectricCurrentSource(Source):\n \"\"\"Model of an impressed electric current source.\n\n Parameters\n ----------\n x_min : float\n Minimum x coordinate of the bounding box containing the source.\n y_min : float\n Minimum y coordinate of the bounding box containing the source.\n z_min : float\n Minimum z coordinate of the bounding box containing the source.\n x_max : float\n Maximum x coordinate of the bounding box containing the source.\n y_max : float\n Maximum y coordinate of the bounding box containing the source.\n z_max : float\n Maximum z coordinate of the bounding box containing the source.\n waveform : Union[Waveform, Callable]\n Waveform type.\n resistance : float\n Internal resistance of the source.\n name : Optional[str]\n Name of the source.\n direction : Direction\n Direction of the source.\n \"\"\"\n\n def update_E(self):\n \"\"\"Update field.\"\"\"\n source_value = self.waveform(self.grid.current_time)\n self.grid.E[self.i_s, self.j_s, self.k_s, self.direction.value] += (\n -self.grid.c_eh[self.i_s, self.j_s, self.k_s, self.direction.value]\n * source_value)\n\n def attach_to_grid(self):\n \"\"\"Attach object to grid.\"\"\"\n self.idx_s = (\n np.argmin(np.abs(self.grid.x - self.x_min)),\n np.argmin(np.abs(self.grid.y - self.y_min)),\n np.argmin(np.abs(self.grid.z - self.z_min)),\n )\n self.idx_e = (\n np.argmin(np.abs(self.grid.x - self.x_max)),\n np.argmin(np.abs(self.grid.y - self.y_max)),\n np.argmin(np.abs(self.grid.z - self.z_max)),\n )\n\n logger.debug(\n f\"Source {self.name} attached to {self.idx_s} to {self.idx_e}\")\n\n self.i_s = slice(self.idx_s[0], self.idx_e[0] + 1)\n self.j_s = slice(self.idx_s[1], self.idx_e[1] + 1)\n self.k_s = slice(self.idx_s[2], self.idx_e[2] + 1)\n\n def plot_3d(self, ax, alpha: float = 0.5):\n \"\"\"Plot a brick and attach to an axis.\"\"\"\n X, Y, Z = np.meshgrid(\n [self.x_min, self.x_max],\n [self.y_min, self.y_max],\n [self.z_min, self.z_max],\n )\n plot = partial(ax.plot_surface, alpha=alpha, color=\"#00FF00\")\n\n plot(X[:, :, 0], Y[:, :, 0], Z[:, :, 0])\n plot(X[:, :, -1], Y[:, :, -1], Z[:, :, -1])\n plot(X[:, 0, :], Y[:, 0, :], Z[:, 0, :])\n plot(X[:, -1, :], Y[:, -1, :], Z[:, -1, :])\n plot(X[0, :, :], Y[0, :, :], Z[0, :, :])\n plot(X[-1, :, :], Y[-1, :, :], Z[-1, :, :])\n\n\nclass EFieldSource(Source):\n \"\"\"Model of an electric field source.\n\n Parameters\n ----------\n x_min : float\n Minimum x coordinate of the bounding box containing the source.\n y_min : float\n Minimum y coordinate of the bounding box containing the source.\n z_min : float\n Minimum z coordinate of the bounding box containing the source.\n x_max : float\n Maximum x coordinate of the bounding box containing the source.\n y_max : float\n Maximum y coordinate of the bounding box containing the source.\n z_max : float\n Maximum z coordinate of the bounding box containing the source.\n waveform : Union[Waveform, Callable]\n Waveform type.\n resistance : float\n Internal resistance of the source.\n name : Optional[str]\n Name of the source.\n direction : Direction\n Direction of the source.\n \"\"\"\n\n def update_E(self):\n \"\"\"Update field.\"\"\"\n E_s = self.waveform(self.grid.current_time)\n self.grid.E[self.i_s, self.j_s, self.k_s, self.direction.value] += E_s\n\n def attach_to_grid(self):\n \"\"\"Attach object to grid.\"\"\"\n self.idx_s = (\n np.argmin(np.abs(self.grid.x - self.x_min)),\n np.argmin(np.abs(self.grid.y - self.y_min)),\n np.argmin(np.abs(self.grid.z - self.z_min)),\n )\n self.idx_e = (\n np.argmin(np.abs(self.grid.x - self.x_max)),\n np.argmin(np.abs(self.grid.y - self.y_max)),\n np.argmin(np.abs(self.grid.z - self.z_max)),\n )\n\n logger.debug(\n f\"Source {self.name} attached to {self.idx_s} to {self.idx_e}\")\n\n self.i_s = slice(self.idx_s[0], self.idx_e[0] + 1)\n self.j_s = slice(self.idx_s[1], self.idx_e[1] + 1)\n self.k_s = slice(self.idx_s[2], self.idx_e[2] + 1)\n\n def plot_3d(self, ax, alpha: float = 0.5):\n \"\"\"Plot a brick and attach to an axis.\"\"\"\n X, Y, Z = np.meshgrid(\n [self.x_min, self.x_max],\n [self.y_min, self.y_max],\n [self.z_min, self.z_max],\n )\n plot = partial(ax.plot_surface, alpha=alpha, color=\"#00FF00\")\n\n plot(X[:, :, 0], Y[:, :, 0], Z[:, :, 0])\n plot(X[:, :, -1], Y[:, :, -1], Z[:, :, -1])\n plot(X[:, 0, :], Y[:, 0, :], Z[:, 0, :])\n plot(X[:, -1, :], Y[:, -1, :], Z[:, -1, :])\n plot(X[0, :, :], Y[0, :, :], Z[0, :, :])\n plot(X[-1, :, :], Y[-1, :, :], Z[-1, :, :])\n\n\nclass VoltageSource(Source):\n \"\"\"Model of a voltage source.\n\n Parameters\n ----------\n x_min : float\n Minimum x coordinate of the bounding box containing the source.\n y_min : float\n Minimum y coordinate of the bounding box containing the source.\n z_min : float\n Minimum z coordinate of the bounding box containing the source.\n x_max : float\n Maximum x coordinate of the bounding box containing the source.\n y_max : float\n Maximum y coordinate of the bounding box containing the source.\n z_max : float\n Maximum z coordinate of the bounding box containing the source.\n waveform : Union[Waveform, Callable]\n Waveform type.\n resistance : float\n Internal resistance of the source.\n name : Optional[str]\n Name of the source.\n direction : Direction\n Direction of the source.\n \"\"\"\n\n def update_E(self):\n \"\"\"Update field.\"\"\"\n Vs = self._v_f * self.waveform(self.grid.current_time)\n\n if self.c_v is not None:\n if self.direction == Direction.X:\n self.grid.E[self.i_s, self.j_s, self.k_s, 0] += self.c_v * Vs\n elif self.direction == Direction.Y:\n self.grid.E[self.i_s, self.j_s, self.k_s, 1] += self.c_v * Vs\n elif self.direction == Direction.Z:\n self.grid.E[self.i_s, self.j_s, self.k_s, 2] += self.c_v * Vs\n\n def attach_to_grid(self):\n \"\"\"Attach object to grid.\"\"\"\n self.idx_s = s = (\n np.argmin(np.abs(self.grid.x - self.x_min)),\n np.argmin(np.abs(self.grid.y - self.y_min)),\n np.argmin(np.abs(self.grid.z - self.z_min)),\n )\n self.idx_e = e = (\n np.argmin(np.abs(self.grid.x - self.x_max)),\n np.argmin(np.abs(self.grid.y - self.y_max)),\n np.argmin(np.abs(self.grid.z - self.z_max)),\n )\n\n dx, dy, dz = self.grid.spacing\n dt = self.grid.dt\n r_s = self.resistance\n\n if self.direction == Direction.X:\n self.i_s = i_s = slice(self.idx_s[0], self.idx_e[0])\n self.j_s = j_s = slice(self.idx_s[1], self.idx_e[1] + 1)\n self.k_s = k_s = slice(self.idx_s[2], self.idx_e[2] + 1)\n\n term = (dt*dx) / (r_s*dy*dz)\n eps = self.grid.eps_r[i_s, j_s, k_s, 0] * EPS_0\n sigma_e = self.grid.sigma_e[i_s, j_s, k_s, 0]\n\n self.grid.c_ee[i_s, j_s, k_s, 0] = \\\n (2*eps - dt*sigma_e - term) / (2*eps + dt*sigma_e + term)\n self.grid.c_eh[i_s, j_s, k_s, 0] = \\\n (2*dt) / (2*eps + dt*sigma_e + term)\n self.c_v = (2*dt) / (2*eps + dt*sigma_e + term) / (r_s*dy*dz)\n\n elif self.direction == Direction.Y:\n self.i_s = i_s = slice(self.idx_s[0], self.idx_e[0])\n self.j_s = j_s = slice(self.idx_s[1], self.idx_e[1] + 1)\n self.k_s = k_s = slice(self.idx_s[2], self.idx_e[2] + 1)\n term = (dt*dy) / (r_s*dx*dz)\n\n eps = self.grid.eps_r[i_s, j_s, k_s, 1] * EPS_0\n sigma_e = self.grid.sigma_e[i_s, j_s, k_s, 1]\n\n self.grid.c_ee[i_s, j_s, k_s, 1] = \\\n (2*eps - dt*sigma_e - term) / (2*eps + dt*sigma_e + term)\n self.grid.c_eh[i_s, j_s, k_s, 1] = \\\n (2*dt) / (2*eps + dt*sigma_e + term)\n self.c_v = (2*dt) / (2*eps + dt*sigma_e + term) / (r_s*dx*dz)\n\n else:\n self.i_s = i_s = slice(self.idx_s[0], self.idx_e[0] + 1)\n self.j_s = j_s = slice(self.idx_s[1], self.idx_e[1] + 1)\n self.k_s = k_s = slice(self.idx_s[2], self.idx_e[2])\n term = (dt*dz) / (r_s*dx*dy)\n\n self._v_f = 1 / (e[2] - s[2])\n self._r_f = (e[0] - s[0] + 1) * (e[1] - s[1] + 1) * self._v_f\n\n eps = self.grid.eps_r[i_s, j_s, k_s, 2] * EPS_0\n sigma_e = self.grid.sigma_e[i_s, j_s, k_s, 2]\n r_s *= self._r_f\n term = (dt*dz) / (r_s*dx*dy)\n\n self.grid.c_ee[i_s, j_s, k_s, 2] = \\\n (2*eps - dt*sigma_e - term) / (2*eps + dt*sigma_e + term)\n self.grid.c_eh[i_s, j_s, k_s, 2] = \\\n (2*dt) / (2*eps + dt*sigma_e + term)\n self.c_v = -(2 * dt) / (2*eps + dt*sigma_e + term) / (r_s*dx*dy)\n\n def plot_3d(self, ax, alpha: float = 0.5):\n \"\"\"Plot a brick and attach to an axis.\"\"\"\n X, Y, Z = np.meshgrid(\n [self.x_min, self.x_max],\n [self.y_min, self.y_max],\n [self.z_min, self.z_max],\n )\n plot = partial(ax.plot_surface, alpha=alpha, color=\"#00FF00\")\n\n plot(X[:, :, 0], Y[:, :, 0], Z[:, :, 0])\n plot(X[:, :, -1], Y[:, :, -1], Z[:, :, -1])\n plot(X[:, 0, :], Y[:, 0, :], Z[:, 0, :])\n plot(X[:, -1, :], Y[:, -1, :], Z[:, -1, :])\n plot(X[0, :, :], Y[0, :, :], Z[0, :, :])\n plot(X[-1, :, :], Y[-1, :, :], Z[-1, :, :])\n\n\nclass CurrentSource(Source):\n \"\"\"Implement a current source.\"\"\"\n"
] |
[
[
"numpy.meshgrid",
"numpy.abs"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
artkuli/openvino
|
[
"5939cb1b363ebb56b73c2ad95d8899961a084677"
] |
[
"src/bindings/python/tests/test_onnx/test_ops_nonlinear.py"
] |
[
"# -*- coding: utf-8 -*-\n# Copyright (C) 2018-2022 Intel Corporation\n# SPDX-License-Identifier: Apache-2.0\n\nimport numpy as np\nimport onnx\nimport pytest\n\nfrom tests.test_onnx.utils import run_node\n\n\ndef import_and_compute(op_type, input_data, **node_attrs):\n data_inputs = [np.array(input_data)]\n node = onnx.helper.make_node(op_type, inputs=[\"x\"], outputs=[\"y\"], **node_attrs)\n return run_node(node, data_inputs).pop()\n\n\ndef assert_onnx_import_equals_callable(onnx_op_type, python_function, data, **kwargs):\n data = np.array(data, dtype=np.float32)\n assert np.allclose(import_and_compute(onnx_op_type, data, **kwargs), python_function(data, **kwargs))\n\n\ndef test_sigmoid():\n def sigmoid(value):\n return 1 / (1 + np.exp(-value))\n\n assert_onnx_import_equals_callable(\"Sigmoid\", sigmoid, [-2, -1.0, 0.0, 1.0, 2.0])\n assert_onnx_import_equals_callable(\"Sigmoid\", sigmoid, [0.0])\n assert_onnx_import_equals_callable(\"Sigmoid\", sigmoid, [-2, -1.0, 0.0, 1.0, 2.0])\n\n\ndef test_tanh():\n assert_onnx_import_equals_callable(\"Tanh\", np.tanh, [-2, -1.0, 0.0, 1.0, 2.0])\n assert_onnx_import_equals_callable(\"Tanh\", np.tanh, [0.0])\n assert_onnx_import_equals_callable(\"Tanh\", np.tanh, [-2, -1.0, 0.0, 1.0, 2.0])\n\n\ndef test_relu():\n def relu(value):\n return np.maximum(value, 0)\n\n assert_onnx_import_equals_callable(\"Relu\", relu, [-2, -1.0, 0.0, 1.0, 2.0])\n assert_onnx_import_equals_callable(\"Relu\", relu, [0.0])\n assert_onnx_import_equals_callable(\"Relu\", relu, [-0.9, -0.8, -0.7, -0.4, -0.3, -0.2, -0.1])\n assert_onnx_import_equals_callable(\"Relu\", relu, [[1, 2, 3], [4, 5, 6]])\n assert_onnx_import_equals_callable(\"Relu\", relu, [[-3, -2, -1], [1, 2, 3]])\n\n\ndef test_leaky_relu():\n def leaky_relu(value, alpha=0.01):\n return np.maximum(alpha * value, value)\n\n assert_onnx_import_equals_callable(\"LeakyRelu\", leaky_relu, [-2, -1.0, 0.0, 1.0, 2.0], alpha=0.5)\n assert_onnx_import_equals_callable(\"LeakyRelu\", leaky_relu, [0.0])\n assert_onnx_import_equals_callable(\"LeakyRelu\", leaky_relu, [-0.9, -0.8, -0.7, -0.4, -0.3, -0.2, -0.1], alpha=1.0)\n assert_onnx_import_equals_callable(\"LeakyRelu\", leaky_relu, [[1, 2, 3], [4, 5, 6]], alpha=0.2)\n assert_onnx_import_equals_callable(\"LeakyRelu\", leaky_relu, [[-3, -2, -1], [1, 2, 3]])\n\n\[email protected](\n (\"value\", \"slope\"),\n [\n ([-2, -1.0, 0.0, 1.0, 2.0], 0.5),\n ([0.0], 1),\n ([-0.9, -0.8, -0.7, -0.4, -0.3, -0.2, -0.1], 1),\n ([[1, 2, 3], [4, 5, 6]], 0.5),\n ([[-3, -2, -1], [1, 2, 3]], 1),\n ],\n)\ndef test_parametric_relu(value, slope):\n def parametic_relu(value, slope):\n return np.where(value < 0, slope * value, value)\n\n value, slope = np.array(value).astype(np.float32), np.array(slope).astype(np.float32)\n expected_output = parametic_relu(value, slope)\n node = onnx.helper.make_node(\"PRelu\", inputs=[\"x\", \"slope\"], outputs=[\"y\"])\n output = run_node(node, [value, slope]).pop()\n assert np.allclose(output, expected_output)\n\n\ndef test_selu():\n # f(x) = gamma * (alpha * exp(x) - alpha) for x <= 0, y = gamma * x for x > 0\n def selu(value, alpha=1.67326319217681884765625, gamma=1.05070102214813232421875):\n return np.where(value <= 0, gamma * (alpha * np.exp(value) - alpha), gamma * value)\n\n assert_onnx_import_equals_callable(\"Selu\", selu, [-2, -1.0, 0.0, 1.0, 2.0])\n assert_onnx_import_equals_callable(\"Selu\", selu, [0.0])\n assert_onnx_import_equals_callable(\"Selu\", selu, [-0.9, -0.8, -0.7, -0.4, -0.3, -0.2, -0.1])\n assert_onnx_import_equals_callable(\"Selu\", selu, [[1, 2, 3], [4, 5, 6]])\n assert_onnx_import_equals_callable(\"Selu\", selu, [-2, -1.0, 0.0, 1.0, 2.0], gamma=0.5, alpha=0.5)\n\n\[email protected](\n (\"data\", \"alpha_value\"),\n [\n pytest.param([-2, -1.0, 0.0, 1.0, 2.0], 1.0),\n pytest.param([0.0], 1.0),\n pytest.param([-0.9, -0.8, -0.7, -0.4, -0.3, -0.2, -0.1], 1.0),\n pytest.param([[1, 2, 3], [4, 5, 6]], 1.0),\n pytest.param([-2, -1.0, 0.0, 1.0, 2.0], 0.5),\n ],\n)\ndef test_elu(data, alpha_value):\n # f(x) = alpha * (exp(x) - 1) for x < 0, f(x) = x for x >= 0\n def elu(value, alpha):\n return np.where(value < 0, alpha * (np.exp(value) - 1), value)\n\n assert_onnx_import_equals_callable(\"Elu\", elu, data, alpha=alpha_value)\n"
] |
[
[
"numpy.maximum",
"numpy.allclose",
"numpy.array",
"numpy.exp",
"numpy.where"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ikkoham/qiskit-core
|
[
"a7d66f9aa7b5d61fa6ead7ac3ac4348dec9f5c93"
] |
[
"qiskit/circuit/library/arithmetic/weighted_adder.py"
] |
[
"# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017, 2020.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"Compute the weighted sum of qubit states.\"\"\"\n\nfrom typing import List, Optional\nimport numpy as np\n\nfrom qiskit.circuit import QuantumRegister, AncillaRegister, QuantumCircuit\n\nfrom ..blueprintcircuit import BlueprintCircuit\n\n\nclass WeightedAdder(BlueprintCircuit):\n r\"\"\"A circuit to compute the weighted sum of qubit registers.\n\n Given :math:`n` qubit basis states :math:`q_0, \\ldots, q_{n-1} \\in \\{0, 1\\}` and non-negative\n integer weights :math:`\\lambda_0, \\ldots, \\lambda_{n-1}`, this circuit performs the operation\n\n .. math::\n\n |q_0 \\ldots q_{n-1}\\rangle |0\\rangle_s\n \\mapsto |q_0 \\ldots q_{n-1}\\rangle |\\sum_{j=0}^{n-1} \\lambda_j q_j\\rangle_s\n\n where :math:`s` is the number of sum qubits required.\n This can be computed as\n\n .. math::\n\n s = 1 + \\left\\lfloor \\log_2\\left( \\sum_{j=0}^{n-1} \\lambda_j \\right) \\right\\rfloor\n\n or :math:`s = 1` if the sum of the weights is 0 (then the expression in the logarithm is\n invalid).\n\n For qubits in a circuit diagram, the first weight applies to the upper-most qubit.\n For an example where the state of 4 qubits is added into a sum register, the circuit can\n be schematically drawn as\n\n .. parsed-literal::\n\n ┌────────┐\n state_0: ┤0 ├ | state_0 * weights[0]\n │ │ |\n state_1: ┤1 ├ | + state_1 * weights[1]\n │ │ |\n state_2: ┤2 ├ | + state_2 * weights[2]\n │ │ |\n state_3: ┤3 ├ | + state_3 * weights[3]\n │ │\n sum_0: ┤4 ├ |\n │ Adder │ |\n sum_1: ┤5 ├ | = sum_0 * 2^0 + sum_1 * 2^1 + sum_2 * 2^2\n │ │ |\n sum_2: ┤6 ├ |\n │ │\n carry_0: ┤7 ├\n │ │\n carry_1: ┤8 ├\n │ │\n control_0: ┤9 ├\n └────────┘\n \"\"\"\n\n def __init__(\n self,\n num_state_qubits: Optional[int] = None,\n weights: Optional[List[int]] = None,\n name: str = \"adder\",\n ) -> None:\n \"\"\"Computes the weighted sum controlled by state qubits.\n\n Args:\n num_state_qubits: The number of state qubits.\n weights: List of weights, one for each state qubit. If none are provided they\n default to 1 for every qubit.\n name: The name of the circuit.\n \"\"\"\n super().__init__(name=name)\n\n self._weights = None\n self._num_state_qubits = None\n\n self.weights = weights\n self.num_state_qubits = num_state_qubits\n\n @property\n def num_sum_qubits(self) -> int:\n \"\"\"The number of sum qubits in the circuit.\n\n Returns:\n The number of qubits needed to represent the weighted sum of the qubits.\n \"\"\"\n if sum(self.weights) > 0:\n return int(np.floor(np.log2(sum(self.weights))) + 1)\n return 1\n\n @property\n def weights(self) -> List[int]:\n \"\"\"The weights for the qubit states.\n\n Returns:\n The weight for the qubit states.\n \"\"\"\n if self._weights:\n return self._weights\n if self.num_state_qubits:\n return [1] * self.num_state_qubits\n return None\n\n @weights.setter\n def weights(self, weights: List[int]) -> None:\n \"\"\"Set the weights for summing the qubit states.\n\n Args:\n weights: The new weights.\n\n Raises:\n ValueError: If not all weights are close to an integer.\n \"\"\"\n if weights:\n for i, weight in enumerate(weights):\n if not np.isclose(weight, np.round(weight)):\n raise ValueError(\"Non-integer weights are not supported!\")\n weights[i] = np.round(weight)\n\n self._invalidate()\n self._weights = weights\n self._reset_registers()\n\n @property\n def num_state_qubits(self) -> int:\n \"\"\"The number of qubits to be summed.\n\n Returns:\n The number of state qubits.\n \"\"\"\n return self._num_state_qubits\n\n @num_state_qubits.setter\n def num_state_qubits(self, num_state_qubits: int) -> None:\n \"\"\"Set the number of state qubits.\n\n Args:\n num_state_qubits: The new number of state qubits.\n \"\"\"\n if self._num_state_qubits is None or num_state_qubits != self._num_state_qubits:\n self._invalidate()\n self._num_state_qubits = num_state_qubits\n self._reset_registers()\n\n def _reset_registers(self):\n \"\"\"Reset the registers.\"\"\"\n self.qregs = []\n\n if self.num_state_qubits:\n qr_state = QuantumRegister(self.num_state_qubits, name=\"state\")\n qr_sum = QuantumRegister(self.num_sum_qubits, name=\"sum\")\n self.qregs = [qr_state, qr_sum]\n\n if self.num_carry_qubits > 0:\n qr_carry = AncillaRegister(self.num_carry_qubits, name=\"carry\")\n self.add_register(qr_carry)\n\n if self.num_control_qubits > 0:\n qr_control = AncillaRegister(self.num_control_qubits, name=\"control\")\n self.add_register(qr_control)\n\n @property\n def num_carry_qubits(self) -> int:\n \"\"\"The number of carry qubits required to compute the sum.\n\n Note that this is not necessarily equal to the number of ancilla qubits, these can\n be queried using ``num_ancilla_qubits``.\n\n Returns:\n The number of carry qubits required to compute the sum.\n \"\"\"\n return self.num_sum_qubits - 1\n\n @property\n def num_control_qubits(self) -> int:\n \"\"\"The number of additional control qubits required.\n\n Note that the total number of ancilla qubits can be obtained by calling the\n method ``num_ancilla_qubits``.\n\n Returns:\n The number of additional control qubits required (0 or 1).\n \"\"\"\n return int(self.num_sum_qubits > 2)\n\n def _check_configuration(self, raise_on_failure=True):\n \"\"\"Check if the current configuration is valid.\"\"\"\n valid = True\n if self._num_state_qubits is None:\n valid = False\n if raise_on_failure:\n raise AttributeError(\"The number of state qubits has not been set.\")\n\n if self._num_state_qubits != len(self.weights):\n valid = False\n if raise_on_failure:\n raise ValueError(\"Mismatching number of state qubits and weights.\")\n\n return valid\n\n def _build(self):\n \"\"\"If not already built, build the circuit.\"\"\"\n if self._is_built:\n return\n\n super()._build()\n\n num_result_qubits = self.num_state_qubits + self.num_sum_qubits\n\n circuit = QuantumCircuit(*self.qregs)\n qr_state = circuit.qubits[: self.num_state_qubits]\n qr_sum = circuit.qubits[self.num_state_qubits : num_result_qubits]\n qr_carry = circuit.qubits[num_result_qubits : num_result_qubits + self.num_carry_qubits]\n qr_control = circuit.qubits[num_result_qubits + self.num_carry_qubits :]\n\n # loop over state qubits and corresponding weights\n for i, weight in enumerate(self.weights):\n # only act if non-trivial weight\n if np.isclose(weight, 0):\n continue\n\n # get state control qubit\n q_state = qr_state[i]\n\n # get bit representation of current weight\n weight_binary = f\"{int(weight):b}\".rjust(self.num_sum_qubits, \"0\")[::-1]\n\n # loop over bits of current weight and add them to sum and carry registers\n for j, bit in enumerate(weight_binary):\n if bit == \"1\":\n if self.num_sum_qubits == 1:\n circuit.cx(q_state, qr_sum[j])\n elif j == 0:\n # compute (q_sum[0] + 1) into (q_sum[0], q_carry[0])\n # - controlled by q_state[i]\n circuit.ccx(q_state, qr_sum[j], qr_carry[j])\n circuit.cx(q_state, qr_sum[j])\n elif j == self.num_sum_qubits - 1:\n # compute (q_sum[j] + q_carry[j-1] + 1) into (q_sum[j])\n # - controlled by q_state[i] / last qubit,\n # no carry needed by construction\n circuit.cx(q_state, qr_sum[j])\n circuit.ccx(q_state, qr_carry[j - 1], qr_sum[j])\n else:\n # compute (q_sum[j] + q_carry[j-1] + 1) into (q_sum[j], q_carry[j])\n # - controlled by q_state[i]\n circuit.x(qr_sum[j])\n circuit.x(qr_carry[j - 1])\n circuit.mct(\n [q_state, qr_sum[j], qr_carry[j - 1]],\n qr_carry[j],\n qr_control,\n mode=\"v-chain\",\n )\n circuit.cx(q_state, qr_carry[j])\n circuit.x(qr_sum[j])\n circuit.x(qr_carry[j - 1])\n circuit.cx(q_state, qr_sum[j])\n circuit.ccx(q_state, qr_carry[j - 1], qr_sum[j])\n else:\n if self.num_sum_qubits == 1:\n pass # nothing to do, since nothing to add\n elif j == 0:\n pass # nothing to do, since nothing to add\n elif j == self.num_sum_qubits - 1:\n # compute (q_sum[j] + q_carry[j-1]) into (q_sum[j])\n # - controlled by q_state[i] / last qubit,\n # no carry needed by construction\n circuit.ccx(q_state, qr_carry[j - 1], qr_sum[j])\n else:\n # compute (q_sum[j] + q_carry[j-1]) into (q_sum[j], q_carry[j])\n # - controlled by q_state[i]\n circuit.mcx(\n [q_state, qr_sum[j], qr_carry[j - 1]],\n qr_carry[j],\n qr_control,\n mode=\"v-chain\",\n )\n circuit.ccx(q_state, qr_carry[j - 1], qr_sum[j])\n\n # uncompute carry qubits\n for j in reversed(range(len(weight_binary))):\n bit = weight_binary[j]\n if bit == \"1\":\n if self.num_sum_qubits == 1:\n pass\n elif j == 0:\n circuit.x(qr_sum[j])\n circuit.ccx(q_state, qr_sum[j], qr_carry[j])\n circuit.x(qr_sum[j])\n elif j == self.num_sum_qubits - 1:\n pass\n else:\n circuit.x(qr_carry[j - 1])\n circuit.mcx(\n [q_state, qr_sum[j], qr_carry[j - 1]],\n qr_carry[j],\n qr_control,\n mode=\"v-chain\",\n )\n circuit.cx(q_state, qr_carry[j])\n circuit.x(qr_carry[j - 1])\n else:\n if self.num_sum_qubits == 1:\n pass\n elif j == 0:\n pass\n elif j == self.num_sum_qubits - 1:\n pass\n else:\n # compute (q_sum[j] + q_carry[j-1]) into (q_sum[j], q_carry[j])\n # - controlled by q_state[i]\n circuit.x(qr_sum[j])\n circuit.mcx(\n [q_state, qr_sum[j], qr_carry[j - 1]],\n qr_carry[j],\n qr_control,\n mode=\"v-chain\",\n )\n circuit.x(qr_sum[j])\n\n self.append(circuit.to_gate(), self.qubits)\n"
] |
[
[
"numpy.round",
"numpy.isclose"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
vkola-lab/ar2021
|
[
"2fd98776c5329fef8ece9be02f0281d366ca47df"
] |
[
"loaders/loader_imorphics.py"
] |
[
"import os, glob, torch, time\nimport numpy as np\nimport pandas as pd\nfrom PIL import Image\nfrom torchvision import transforms\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader, Dataset\nfrom loaders.OAI_imorphics_extras import get_ids_by_subjects, imorphics_info\nfrom loaders.loader_utils import imorphics_masks, append_dict, resize_and_crop, imagesc\n\n\nclass LoaderImorphicsDual(Dataset):\n def __init__(self, args_d, subjects_list, transform):\n self.Imorphics0 = LoaderImorphics(args_d, subjects_list[0], transform)\n # do not apply interval to the second dataset for matching\n args_d_01 = args_d.copy()\n args_d_01['interval'] = 1\n self.Imorphics1 = LoaderImorphics(args_d_01, subjects_list[1], transform)\n self.imorphics_info = imorphics_info()\n\n def __len__(self):\n return len(self.Imorphics0)\n\n def __getitem__(self, idx):\n id0, img0, (mask0,) = self.Imorphics0.__getitem__(idx)\n #id1, img1, (mask1,) = self.Imorphics1.__getitem__(idx)\n # find matching slices between 00 and 01\n i = int(id0.split('_')[0])\n s = int(id0.split('_')[1])\n id1_match = str(i+88) + '_' + str(int(s + self.imorphics_info['s_00_01'][i-1]))\n id1, img1, (mask1,) = self.Imorphics1.__getitem__(self.Imorphics1.ids.index(id1_match))\n\n img = np.concatenate([np.expand_dims(x, 0) for x in [img0, img1]], 0)\n mask = np.concatenate([np.expand_dims(x, 0) for x in [mask0, mask1]], 0)\n\n return (id0, id1), img, (mask, )#((mask0,), (mask1,))\n\n\nclass LoaderImorphics(Dataset):\n def __init__(self, args_d, subjects_list, transform):\n dir_img = os.path.join(args_d['data_path'], args_d['mask_name'], 'original/')\n dir_mask = [[os.path.join(args_d['data_path'], args_d['mask_name'],\n 'train_masks/' + str(y) + '/') for y in x] for x in\n args_d['mask_used']]\n\n self.transform = transform\n self.dir_img = dir_img\n self.fmt_img = glob.glob(self.dir_img+'/*')[0].split('.')[-1]\n self.dir_mask = dir_mask\n\n self.scale = args_d['scale']\n self.copy_channel = args_d['copy_channel']\n\n \"\"\" Masks \"\"\"\n self.masks = imorphics_masks(adapt=None)\n\n \"\"\" Splitting Subjects\"\"\"\n self.subjects, self.slices = get_ids_by_subjects(self.dir_img, args_d['method'])\n self.ids = append_dict([self.slices[s][0:None:args_d['interval']] for s in subjects_list])\n\n def load_imgs(self, id):\n x = Image.open(self.dir_img + id + '.' + self.fmt_img)\n x = resize_and_crop(x, self.scale)\n x = np.expand_dims(np.array(x), 0) # to numpy\n return x\n\n def __len__(self):\n return len(self.ids)\n\n def __getitem__(self, idx):\n id = self.ids[idx]\n\n # load image\n img = self.load_imgs(id)\n\n # load mask\n mask = self.masks.load_masks(id, self.dir_mask, '.png', scale=self.scale)\n\n # transform images\n if self.transform:\n img = self.transform(img)\n\n # normalization\n img = torch.from_numpy(img)\n img = img.type(torch.float32)\n img = img / img.max()\n\n if self.copy_channel:\n img = torch.cat([1*img, 1*img, 1*img], 0)\n\n return id, img, (mask, )\n\n\n\n"
] |
[
[
"numpy.array",
"numpy.expand_dims",
"torch.from_numpy",
"torch.cat"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
uchuhimo/Ptolemy
|
[
"5c8ae188af30ee49d38f27d54c67af2eab9489e7"
] |
[
"src/nninst/plot/plot_layerwise_similarity_alexnet_imagenet_attack.py"
] |
[
"import numpy as np\nimport pandas as pd\nimport seaborn as sns\n\nfrom nninst.backend.tensorflow.model import AlexNet\nfrom nninst.op import Conv2dOp, DenseOp\nfrom nninst.utils.fs import abspath\n\nthreshold = 0.5\n\npath_template = \"alexnet_imagenet_real_metrics_per_layer_{0:.1f}_{1}_{2}.csv\"\nattack_name = \"FGSM\"\nlabel_name = \"import_rank1\"\npath = \"metrics/\" + path_template.format(threshold, attack_name, label_name,)\ndf = pd.read_csv(abspath(path))\ndf.info()\n\nlayers = AlexNet.graph().load().ops_in_layers(Conv2dOp, DenseOp)\nfor layer in layers:\n df[f\"{layer}.similarity\"] = (\n df[f\"{layer}.overlap_size_in_class\"] / df[f\"{layer}.overlap_size_total\"]\n )\ndf.info()\n\nsimilarity_col = np.concatenate([df[f\"{layer}.similarity\"] for layer in layers])\nlayer_col = np.concatenate(\n [[layer[: layer.index(\"/\")]] * len(df[f\"{layer}.similarity\"]) for layer in layers]\n)\nplot_df = pd.DataFrame({\"Similarity\": similarity_col, \"Layer\": layer_col})\nplot_df.info()\n\nax = sns.boxplot(x=\"Layer\", y=\"Similarity\", data=plot_df)\nax.tick_params(axis=\"x\", labelrotation=45)\nfig = ax.get_figure()\nfig.savefig(\n f\"layerwise_similarity_alexnet_imagenet_{attack_name}.pdf\", bbox_inches=\"tight\"\n)\nfig.savefig(\n f\"layerwise_similarity_alexnet_imagenet_{attack_name}.png\", bbox_inches=\"tight\"\n)\n\nsummary_df = plot_df.groupby(\"Layer\").mean().reset_index()\nsummary_df.to_csv(\n f\"layerwise_similarity_alexnet_imagenet_{attack_name}.csv\", index=False\n)\n"
] |
[
[
"numpy.concatenate",
"pandas.DataFrame"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
kayoyin/DialogueMT
|
[
"aa426ebcdbdfe0366ed06081a842945f2108e85f"
] |
[
"fairseq/tasks/no_context_tag.py"
] |
[
"from argparse import Namespace\n\nimport os\nimport torch\nimport logging\nimport json\n\nimport numpy as np\nfrom tqdm import tqdm\nfrom fairseq import metrics, options, utils\nfrom fairseq.data import Dictionary, TaggedDataset\nfrom fairseq.tasks import FairseqTask, register_task\nimport sentencepiece as spm\nEVAL_BLEU_ORDER = 4\n\n\nlogger = logging.getLogger(__name__)\n\n\n@register_task('no_context_tag')\nclass NoContextTag(FairseqTask):\n @staticmethod\n def add_args(parser):\n # Add some command-line arguments for specifying where the data is\n # located and the maximum supported input length.\n parser.add_argument('data', metavar='FILE',\n help='file prefix for data')\n parser.add_argument('--left-pad-source', default='True', type=str, metavar='BOOL',\n help='pad the source on the left')\n parser.add_argument('--left-pad-target', default='False', type=str, metavar='BOOL',\n help='pad the target on the left')\n parser.add_argument('--max-source-positions', default=1024, type=int, metavar='N',\n help='max number of tokens in the source sequence')\n parser.add_argument('--max-target-positions', default=1024, type=int, metavar='N',\n help='max number of tokens in the target sequence')\n\n # options for reporting BLEU during validation\n parser.add_argument('--eval-bleu', action='store_true',\n help='evaluation with BLEU scores')\n parser.add_argument('--eval-bleu-detok', type=str, default=\"space\",\n help='detokenize before computing BLEU (e.g., \"moses\"); '\n 'required if using --eval-bleu; use \"space\" to '\n 'disable detokenization; see fairseq.data.encoders '\n 'for other options')\n parser.add_argument('--eval-bleu-detok-args', type=str, metavar='JSON',\n help='args for building the tokenizer, if needed')\n parser.add_argument('--eval-tokenized-bleu', action='store_true', default=False,\n help='compute tokenized BLEU instead of sacrebleu')\n parser.add_argument('--eval-bleu-remove-bpe', nargs='?', const='@@ ', default=None,\n help='remove BPE before computing BLEU')\n parser.add_argument('--eval-bleu-args', type=str, metavar='JSON',\n help='generation args for BLUE scoring, '\n 'e.g., \\'{\"beam\": 4, \"lenpen\": 0.6}\\'')\n parser.add_argument('--eval-bleu-print-samples', action='store_true',\n help='print sample generations during validation')\n\n @classmethod\n def setup_task(cls, args, **kwargs):\n # Here we can perform any setup required for the task. This may include\n # loading Dictionaries, initializing shared Embedding layers, etc.\n # In this case we'll just load the Dictionaries.\n \n\n args.left_pad_source = options.eval_bool(args.left_pad_source)\n args.left_pad_target = options.eval_bool(args.left_pad_target)\n\n\n # load dictionaries\n vocab = cls.load_dictionary(os.path.join(args.data, 'dict.txt'))\n logger.info('[{}] dictionary: {} types'.format('Src + tgt', len(vocab)))\n vocab.model = spm.SentencePieceProcessor(model_file=os.path.join(args.data, 'spm.model'))\n\n return cls(args, vocab)\n\n def __init__(self, args, vocab):\n super().__init__(args)\n self.vocab = vocab\n\n def load_dataset(self, split, **kwargs):\n \"\"\"Load a given dataset split (e.g., train, valid, test).\"\"\"\n\n path = os.path.join(self.args.data, '{}.json'.format(split))\n\n self.datasets[split] = TaggedDataset(path, self.vocab)\n\n def build_model(self, args):\n model = super().build_model(args)\n if getattr(args, 'eval_bleu', False):\n assert getattr(args, 'eval_bleu_detok', None) is not None, (\n '--eval-bleu-detok is required if using --eval-bleu; '\n 'try --eval-bleu-detok=moses (or --eval-bleu-detok=space '\n 'to disable detokenization, e.g., when using sentencepiece)'\n )\n detok_args = json.loads(getattr(args, 'eval_bleu_detok_args', '{}') or '{}')\n self.tokenizer = self.vocab.model\n\n gen_args = json.loads(getattr(args, 'eval_bleu_args', '{}') or '{}')\n self.sequence_generator = self.build_generator([model], Namespace(**gen_args))\n return model\n\n def prepare_gen(self, model, args):\n if getattr(args, 'eval_bleu', False):\n assert getattr(args, 'eval_bleu_detok', None) is not None, (\n '--eval-bleu-detok is required if using --eval-bleu; '\n 'try --eval-bleu-detok=moses (or --eval-bleu-detok=space '\n 'to disable detokenization, e.g., when using sentencepiece)'\n )\n detok_args = json.loads(getattr(args, 'eval_bleu_detok_args', '{}') or '{}')\n self.tokenizer = self.vocab.model\n\n gen_args = json.loads(getattr(args, 'eval_bleu_args', '{}') or '{}')\n self.sequence_generator = self.build_generator([model], Namespace(**gen_args))\n\n def valid_step(self, sample, model, criterion):\n loss, sample_size, logging_output = super().valid_step(sample, model, criterion)\n if self.args.eval_bleu:\n bleu = self._inference_with_bleu(self.sequence_generator, sample, model)\n logging_output['_bleu_sys_len'] = bleu.sys_len\n logging_output['_bleu_ref_len'] = bleu.ref_len\n # we split counts into separate entries so that they can be\n # summed efficiently across workers using fast-stat-sync\n assert len(bleu.counts) == EVAL_BLEU_ORDER\n for i in range(EVAL_BLEU_ORDER):\n logging_output['_bleu_counts_' + str(i)] = bleu.counts[i]\n logging_output['_bleu_totals_' + str(i)] = bleu.totals[i]\n return loss, sample_size, logging_output\n\n def reduce_metrics(self, logging_outputs, criterion):\n super().reduce_metrics(logging_outputs, criterion)\n if self.args.eval_bleu:\n\n def sum_logs(key):\n return sum(log.get(key, 0) for log in logging_outputs)\n\n counts, totals = [], []\n for i in range(EVAL_BLEU_ORDER):\n counts.append(sum_logs('_bleu_counts_' + str(i)))\n totals.append(sum_logs('_bleu_totals_' + str(i)))\n\n if max(totals) > 0:\n # log counts as numpy arrays -- log_scalar will sum them correctly\n metrics.log_scalar('_bleu_counts', np.array(counts))\n metrics.log_scalar('_bleu_totals', np.array(totals))\n metrics.log_scalar('_bleu_sys_len', sum_logs('_bleu_sys_len'))\n metrics.log_scalar('_bleu_ref_len', sum_logs('_bleu_ref_len'))\n\n def compute_bleu(meters):\n import inspect\n import sacrebleu\n fn_sig = inspect.getfullargspec(sacrebleu.compute_bleu)[0]\n if 'smooth_method' in fn_sig:\n smooth = {'smooth_method': 'exp'}\n else:\n smooth = {'smooth': 'exp'}\n bleu = sacrebleu.compute_bleu(\n correct=meters['_bleu_counts'].sum,\n total=meters['_bleu_totals'].sum,\n sys_len=meters['_bleu_sys_len'].sum,\n ref_len=meters['_bleu_ref_len'].sum,\n **smooth\n )\n return round(bleu.score, 2)\n\n metrics.log_derived('bleu', compute_bleu)\n\n def max_positions(self):\n \"\"\"Return the max input length allowed by the task.\"\"\"\n return (self.args.max_tokens, self.args.max_tokens)\n\n @property\n def source_dictionary(self):\n \"\"\"Return the source and target :class:`~fairseq.data.Dictionary`.\"\"\"\n return self.vocab\n\n @property\n def target_dictionary(self):\n \"\"\"Return the target :class:`~fairseq.data.Dictionary`.\"\"\"\n return self.vocab\n\n def _inference_with_bleu(self, generator, sample, model, return_hyps=False):\n import sacrebleu\n\n def decode(toks, escape_unk=False):\n toks = toks.tolist()\n #bos = task.vocab.encode(\"<s>\")\n #eos = task.vocab.encode(\"</s>\")\n bos = self.tokenizer.bos_id()\n eos = self.tokenizer.eos_id()\n while bos in toks:\n toks.remove(bos)\n while eos in toks:\n toks.remove(eos)\n if len(toks) == 0: \n return \"\"\n s = self.tokenizer.decode(toks)\n unk_string = \"UNKNOWNTOKENINREF\" if escape_unk else \"UNKNOWNTOKENINHYP\"\n while \"<unk>\" in s:\n s.replace(\"<unk>\", unk_string)\n return s.strip()\n\n gen_out = self.inference_step(generator, [model], sample, prefix_tokens=None)\n srcs, hyps, refs = [], [], []\n for i in range(len(gen_out)):\n srcs.append(decode(utils.strip_pad(sample['net_input']['src_tokens'][i], self.vocab.pad()),\n escape_unk=True, # don't count <unk> as matches to the hypo \n ))\n hyps.append(decode(gen_out[i][0]['tokens']))\n refs.append(decode(\n utils.strip_pad(sample['target'][i], self.vocab.pad()),\n escape_unk=True, # don't count <unk> as matches to the hypo\n ))\n if self.args.eval_bleu_print_samples:\n logger.info('example hypothesis: ' + hyps[0])\n logger.info('example reference: ' + refs[0])\n if self.args.eval_tokenized_bleu:\n return sacrebleu.corpus_bleu(hyps, [refs], tokenize='none')\n if return_hyps:\n return srcs, hyps, refs\n else:\n return sacrebleu.corpus_bleu(hyps, [refs])\n\n\n def eval_with_bleu(self, model, dataloader):\n import sacrebleu\n \n def decode(task, toks, escape_unk=False):\n toks = toks.tolist()\n #bos = task.vocab.encode(\"<s>\")\n #eos = task.vocab.encode(\"</s>\")\n bos = task.vocab.model.bos_id()\n eos = task.vocab.model.eos_id()\n while bos in toks:\n toks.remove(bos)\n while eos in toks:\n toks.remove(eos)\n s = task.vocab.decode(\n toks)\n return s.strip()\n\n hyps = []\n refs = []\n preds = torch.Tensor([self.vocab.model.bos_id()])\n for batch in tqdm(dataloader):\n mask_batch = batch\n mask_batch['net_input']['prev_output_tokens'] = prev_outputs\n preds = model(**mask_batch['net_input'])\n # print(preds[0][0].shape)\n # print(decode(task,torch.argmax(preds[0][0], dim=1)))\n # print(decode(task,\n # utils.strip_pad(batch['target'][0], task.vocab.pad()),\n # escape_unk=True, # don't count <unk> as matches to the hypo\n # ))\n for i in range(preds[0].shape[0]):\n hyps.append(decode(self,torch.argmax(preds[0][i], dim=1)))\n refs.append(decode(self,\n utils.strip_pad(batch['target'][i], self.vocab.pad()),\n escape_unk=True, # don't count <unk> as matches to the hypo\n ))\n\n return sacrebleu.corpus_bleu(hyps, [refs]), hyps"
] |
[
[
"numpy.array",
"torch.argmax"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
DavianYang/yolo.ai
|
[
"0856d4f1e84428667046ee27270ff1bf742e658a",
"0856d4f1e84428667046ee27270ff1bf742e658a"
] |
[
"yolo/utils/utils.py",
"yolo/models/modules/modules.py"
] |
[
"import torch\n\ndef corner_to_center(xmin, ymin, xmax, ymax):\n cx, cy = (xmin + xmax) / 2, (ymin + ymax) / 2\n w = xmax - xmin\n h = ymax - ymin\n return cx, cy, w, h\n\ndef center_to_corner(cx, cy, w, h):\n xmin, ymin = cx - 0.5 * w, cy - 0.5 * h\n xmax, ymax = cx + 0.5 * w, cy + 0.5 * h\n return xmin, ymin, xmax, ymax\n\ndef cells_to_bboxes(preds, anchors, S, is_pred):\n batch_size = preds.shape[0]\n num_anchors = len(anchors)\n \n x_pred, y_pred = preds[..., 1:2], preds[..., 2:3]\n w_pred, h_pred = preds[..., 3:4], preds[..., 4:5]\n \n if is_pred:\n anchors = anchors.reshape(1, len(anchors), 1, 1, 2)\n x_pred, y_pred = torch.sigmoid(x_pred), torch.sigmoid(y_pred)\n w_pred, h_pred = torch.exp(w_pred) * anchors, torch.exp(h_pred) * anchors\n \n scores = preds[..., 0:1]\n best_class = preds[..., 5:6]\n \n cell_indices = (\n torch.arange(S)\n .repeat(batch_size, num_anchors, S, 1)\n .unsqueeze(-1)\n .to(preds.device)\n )\n \n x = 1 / S * (x_pred + cell_indices)\n y = 1 / S * (y_pred + cell_indices.permute(0, 1, 3, 2, 4))\n w, h = 1 / S * w_pred, 1 / S * h_pred\n \n converted_bboxes = torch.cat((best_class, scores, x, y, w, h), dim=-1).reshape(batch_size, num_anchors * S * S, 6)\n return converted_bboxes.tolist()\n ",
"from typing import Sequence, Union, Callable, AnyStr, Any\n\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\nfrom torch import Tensor\n\nfrom yolo.models.modules.activation import get_activation_layer\n\nclass ConvBlock(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n kernel_size: Union[int, Sequence],\n stride: Union[int, Sequence] = 1, \n padding: int = 0,\n dilation: int = 1,\n groups: int = 1,\n bias: bool = False,\n activation: Union[Callable, AnyStr] = (lambda: nn.ReLU(inplace=True))\n ) -> None:\n super().__init__()\n self.activate = (activation is not None)\n self.conv = nn.Conv2d(in_channels, out_channels, \n kernel_size, stride, \n padding, dilation,\n groups, bias\n )\n self.bn = nn.BatchNorm2d(out_channels)\n if self.activate:\n self.act = get_activation_layer(activation)\n \n def forward(self, x: Tensor) -> Tensor:\n x = self.bn(self.conv(x))\n if self.activate:\n x = self.act(x)\n return x\n\n\nclass ResBlock(nn.Module):\n def __init__(\n self,\n channels: int,\n blocks: list\n ) -> None:\n super().__init__()\n conv1 = blocks[0]\n conv2 = blocks[1]\n self.resblock = nn.Sequential(\n ConvBlock(channels, conv1.filters, \n kernel_size=conv1.kernel_size, \n stride=conv1.stride, padding=conv1.padding),\n ConvBlock(conv1.filters, conv2.filters, \n kernel_size=conv2.kernel_size, \n stride=conv2.stride, padding=conv2.padding)\n )\n \n def forward(self, x: Tensor) -> Tensor:\n x = self.resblock(x) + x\n return x\n\nclass Upsample(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n kernel_size: Union[int, Sequence],\n stride: Union[int, Sequence] = 1,\n padding: int = 0,\n ):\n super().__init__()\n self.upsample = nn.Sequential(\n ConvBlock(in_channels, out_channels, kernel_size, stride, padding),\n nn.Upsample(scale_factor=2, mode=\"nearest\")\n )\n \n def forward(self, x: Tensor) -> Tensor:\n return self.upsample(x)\n \nclass ScalePrediction(nn.Module):\n def __init__(\n self,\n in_channels,\n num_classes,\n num_anchors\n ) -> Any:\n super().__init__()\n self.num_classes = num_classes\n self.num_anchors = num_anchors\n self.pred = nn.Sequential(\n ConvBlock(in_channels, 2*in_channels, kernel_size=3, padding=1),\n nn.Conv2d(2*in_channels, (num_classes + 5) * num_anchors, kernel_size=1)\n )\n \n def forward(self, x: Tensor) -> Tensor:\n return (\n self.pred(x)\n .reshape(x.shape[0], self.num_anchors, self.num_classes + 5, x.shape[2], x.shape[3])\n .permute(0, 1, 3, 4, 2) # N x num_anchors x 13 x 13 x 5 + num_classes\n )\n \nclass SEBlock(nn.Module):\n def __init__(\n self, \n in_channels: int, \n squeeze_channels: int, \n activation: Union[Callable, AnyStr] = (lambda: nn.SiLU())\n ) -> None:\n super().__init__()\n self.se = nn.Sequential(\n nn.AdaptiveAvgPool2d(1),\n nn.Conv2d(in_channels, squeeze_channels, kernel_size=1),\n get_activation_layer(activation),\n nn.Conv2d(squeeze_channels, in_channels, kernel_size=1),\n nn.Sigmoid()\n )\n \n def forward(self, x: Tensor) -> Tensor:\n return x * self.se(x)\n \nclass MBConvBlock(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n kernel_size: Union[int, Sequence],\n stride: Union[int, Sequence],\n padding: int,\n expand_ratio: float,\n reduction: int = 4, # squeeze excitation\n survival_prob: float = 0.8 # for stochastic depth\n ) -> None:\n super().__init__()\n self.survival_prob = survival_prob\n self.use_residual = in_channels == out_channels and stride == 1\n \n hidden_dim = in_channels * expand_ratio\n \n self.expand = in_channels != hidden_dim\n squeeze_dim = int(in_channels / reduction)\n \n if self.expand:\n self.expand_conv = ConvBlock(\n in_channels, \n hidden_dim, \n kernel_size=3, \n stride=1, \n padding=1, \n activation='silu'\n )\n self.conv = nn.Sequential(\n ConvBlock(\n hidden_dim, \n hidden_dim, \n kernel_size,\n stride,\n padding,\n groups=hidden_dim,\n activation='silu'\n ),\n SEBlock(hidden_dim, squeeze_dim),\n nn.Conv2d(hidden_dim, out_channels, 1, bias=False),\n nn.BatchNorm2d(out_channels)\n )\n \n def forward(self, inputs: Tensor) -> Tensor:\n x = self.expand_conv(inputs) if self.expand else inputs\n x = self.stochastic_depth(self.conv(x)) + inputs if self.use_residual else self.conv(x)\n return x\n \n \n def stochastic_depth(self, x: Tensor) -> Tensor:\n if not self.training:\n return x\n \n binary_tensor = torch.rand(x.shape[0], 1, 1, 1, device=x.device) < self.survival_prob\n \n return torch.div(x, self.survival_prob) * binary_tensor"
] |
[
[
"torch.exp",
"torch.sigmoid",
"torch.arange",
"torch.cat"
],
[
"torch.div",
"torch.nn.Conv2d",
"torch.nn.Sigmoid",
"torch.nn.Upsample",
"torch.nn.AdaptiveAvgPool2d",
"torch.rand",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.nn.SiLU"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
enricor2/CS437LAB1
|
[
"71b3533c22bc56083dd0ae785a176dcd5a4b75af"
] |
[
"server/showImg.py"
] |
[
"import matplotlib.pyplot as plt\nx=[[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,1,1,0,0,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,1,1,0,1,1,1,1,1,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,1,1,1,0,0,0,0,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,1,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,1,1,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1\n,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1\n,0,0,0,0,0,0,0,0,0,0,0,0,0,9,0,0,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,1,0,0,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0\n,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0\n,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0\n,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0\n,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,1,0,1,1,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0\n,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1\n,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,1,0,1,1,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1\n,1,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,1,1,0,1,0,0,0,0,0,0,0,0,0,1,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,1,1,0,0,0,0,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]]\nplt.imshow(x,cmap='Greys')\nplt.show()"
] |
[
[
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.show"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
PranavSudersan/Buggee
|
[
"5767d1c259d3570086d7c389440605fa0f681336",
"5767d1c259d3570086d7c389440605fa0f681336",
"5767d1c259d3570086d7c389440605fa0f681336"
] |
[
"analysis/forceanalysis.py",
"process/imagetransform.py",
"analysis/plotting.py"
] |
[
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Jun 21 19:42:45 2019\r\n\r\n@author: adwait\r\n\"\"\"\r\n# import matplotlib.pyplot as plt\r\nimport numpy as np\r\n# import glob\r\nimport os.path\r\n# from tkinter import filedialog\r\nfrom PyQt5.QtWidgets import QFileDialog\r\nimport tkinter as tk\r\nfrom statistics import mean, stdev\r\nfrom scipy.signal import savgol_filter, medfilt\r\nfrom tkinter import messagebox\r\n# import ast\r\nfrom scipy import integrate\r\nimport logging\r\n\r\nfrom source.analysis.plotting import Plotting\r\n\r\nclass ForceAnal(Plotting):\r\n\r\n def __init__(self, fitWindow = None, configPlotWindow = None,\r\n analyzeDataWindow = None):\r\n super().__init__(fitWindow, configPlotWindow)\r\n \r\n self.analyzeDataWindow = analyzeDataWindow\r\n # if self.analyzeDataWindow != None:\r\n # self.analyzeDataWindow.__init__()\r\n # self.analyzeDataWindow.zeroBtn.clicked.connect(lambda: \r\n # self.setCursorPosition(\r\n # self.analyzeDataWindow.zeroLabel))\r\n # self.analyzeDataWindow.forceBtn.clicked.connect(lambda: \r\n # self.setCursorPosition(\r\n # self.analyzeDataWindow.forceLabel))\r\n # self.analyzeDataWindow.preloadBtn.clicked.connect(lambda: \r\n # self.setCursorPosition(\r\n # self.analyzeDataWindow.preloadLabel))\r\n # self.analyzeDataWindow.deformBtn.clicked.connect(lambda: \r\n # self.setCursorPosition(\r\n # self.analyzeDataWindow.deformLabel))\r\n self.force_filepath = \"\"\r\n # self.force_vert1 = [0,0,0]\r\n # self.time1 = [0,0,0]\r\n self.speed_um = \"\"\r\n self.ptsnumber = 0\r\n # self.fig1_close = True\r\n \r\n # self.flag_ca = True\r\n # self.flag_ra = False\r\n # self.flag_cl = False\r\n # self.flag_rl = False\r\n # self.flag_cn = False\r\n # self.flag_ecc = False\r\n # self.flag_lf = False \r\n # self.flag_zp = False\r\n # self.flag_xp = False\r\n # self.flag_ap = False\r\n # self.flag_fp = False\r\n # self.flag_st = False\r\n # self.flag_zd = False\r\n # self.x_var = 'Time' #x axis default parameter\r\n # self.flag_zshift = False #zero shift\r\n # self.flag_lf_filter = False\r\n # self.window_length = 101\r\n # self.polyorder = 2\r\n self.calib_lat1 = \"29181.73*x\"\r\n # self.invert_latf = False\r\n # self.flag_ct = True\r\n # self.ctv_slope = 0 #vertical cross talk slope\r\n # self.ctl_slope = 0 #lateral cross talk slope\r\n # self.startFull = 0 #plot range\r\n # self.endFull = 100\r\n # self.noiseSteps = \"\"\r\n # self.legendPos = \"upper right\"\r\n # #fitting\r\n # self.flag_fit = False\r\n # self.fit_x = 'Vertical Position (μm)'\r\n # self.fit_y = 'Vertical Force'\r\n # self.startFit = 0\r\n # self.endFit = 100\r\n # self.fit_pos = '0.5,0.5'\r\n # self.fit_show = False\r\n # self.slope = ''\r\n # self.slope_unit = ''\r\n # self.k_beam = '30,1' # Beam spring constant (μN/μm)\r\n # self.deform_tol = 100 #deformation contact start auto detect tolerance\r\n \r\n## self.friction_start = 1\r\n## self.friction_stop = 100\r\n## self.zero_start = 1 #percent of total number of points\r\n## self.zero_stop = 10 #percent of total number of points\r\n## self.adh_start = 1\r\n## self.adh_stop = 100\r\n## self.prl_start = 1\r\n## self.prl_stop = 100\r\n## self.force_friction = 0 #initialize\r\n\r\n # #area data dictionary\r\n # self.areaDict = {\"area2_init\":[], \"area2_max\":[], \"area3_init\":[],\r\n # \"area2_pulloff\":[], \"area2_residue\":[],\r\n # \"area3_pulloff\":[], \"area3_max\":[],\r\n # \"area_friction\":[]}\r\n # #length data dictionary\r\n # self.lengthDict = {\"contLength_init\":[], \"contLength_max\":[],\r\n # \"roilen_init\":[], \"roilen_max\":[],\r\n # \"contLength_pulloff\":[], \"roilen_pulloff\":[],\r\n # \"ecc_pulloff\":[], \"contnum_pulloff\": []} \r\n # #bounding ellipse data dictionary\r\n # self.ellipDict = {\"ellipAr_max\":[], \"ellipPer_max\":[],\r\n # \"ellipMajr_max\":[], \"ellipMinr_max\":[]}\r\n # #force data dictionaryrangeDict\r\n # self.forceDict = {\"force_adhesion1\":[], \"force_preload1\":[],\r\n # \"force_friction\":[], \"force_preload2\":[],\r\n # \"force_min1\":[], \"force_max1\":[], \"zero1\":[], \"zero2\":[],\r\n # \"zero1_stdv\":[], \"zero2_stdv\":[], \"force_lat_min\":[],\r\n # \"force_lat_max\":[], \"force_max2\":[]}\r\n # #index/time data dictionary for calculation\r\n # self.indDict = {\"force_lat_max_index\":[], \"force_lat_min_index\":[],\r\n # \"time1_max\":[0], \"time1_lat_avg\":[]}\r\n #range dictionary (zero, adhesion, adh preload, friction, fr preload, fr zero)\r\n # self.rangeDict = {\"Default\" : [[0,1],[0,100],[0,100],[0,100],[0,100], [0,1]]} \r\n \r\n\r\n def importData(self, msrListMode):\r\n if msrListMode == False:\r\n force_filepath, _ = QFileDialog.getOpenFileName(caption = \"Select force data file\")\r\n else:\r\n force_filepath = self.force_filepath\r\n # root = tk.Tk()\r\n # root.withdraw()\r\n # self.force_filepath = filedialog.askopenfilename(title =\r\n # \"Select force data file\")\r\n # root.destroy()\r\n if force_filepath != \"\":\r\n self.force_filepath = force_filepath\r\n with open(self.force_filepath, 'r') as f: #open force data file\r\n x1 = f.read().splitlines()\r\n \r\n self.fileDataDict = {} # for plotting\r\n # self.fileDataDict2 = {} # for summary file data\r\n self.summaryDataDict = {} #summary data values\r\n self.summaryDataDict['measurement params'] = {}\r\n self.summaryDataDict['data params'] = {}\r\n self.summaryDataDict['misc'] = {}\r\n \r\n expt_date = x1[1].split('\\t')[0]\r\n folder_name = os.path.dirname(os.path.dirname(self.force_filepath))\r\n \r\n self.summaryDataDict['measurement params'][\"Date of Experiment\"] = expt_date\r\n self.summaryDataDict['measurement params'][\"Data Folder\"] = folder_name\r\n \r\n self.waveform = x1[5].split('\\t')[1]\r\n logging.debug('%s', self.waveform)\r\n\r\n if self.waveform == 'custom': #index adjust for force data\r\n ir = 2\r\n ic = 1\r\n else:\r\n ir = 0\r\n ic = 0\r\n \r\n #collect force data\r\n header_row = 23 #change!\r\n self.fileDataDict[\"Vertical piezo\"] = [(float(y.split('\\t')[0]))/1000 \\\r\n for y in x1[header_row+ir:]] #nm to μm units\r\n self.ptsnumber = int(float((x1[11+ir].split('\\t')[1])))\r\n if ic == 1:\r\n self.fileDataDict[\"Lateral piezo\"] = [(float(y.split('\\t')[ic])/1000) \\\r\n for y in x1[header_row+ir:]] #nm to μm units\r\n speed1 = [int(float(y)) for y in x1[8].split('\\t')[1:]]\r\n self.steps = [y for y in x1[7].split('\\t')[1:]]\r\n logging.debug('%s', self.steps)\r\n self.step_num = len(x1[7].split('\\t')[1:])\r\n self.pause = [int(float(y)) for y in x1[9].split('\\t')[1:]]\r\n else:\r\n self.fileDataDict[\"Lateral piezo\"] = [0] * self.ptsnumber\r\n speed1 = [int(float(x1[6].split('\\t')[1]))]\r\n self.steps = [\"Up/Down\"] #Check\r\n self.step_num = 1\r\n self.pause = [0]\r\n self.fps = float((x1[20+ir].split('\\t')[1]))\r\n self.frame_num = [int(float(y)) for y in (x1[21+ir].split('\\t')[1:])]\r\n self.fileDataDict[\"Time\"] = [float(y.split('\\t')[1+ic]) for y in x1[header_row+ir:]]\r\n self.fileDataDict[\"Vertical Deflection Raw\"] = [float(y.split('\\t')[2+ic]) \\\r\n for y in x1[header_row+ir:]]\r\n self.fileDataDict[\"Lateral Deflection Raw\"] = [float(y.split('\\t')[3+ic])\\\r\n for y in x1[header_row+ir:]]\r\n self.calib_vert1 = x1[19+ir].split('\\t')[1].replace(\"^\", \"**\")\r\n #make 'Back' speeds negative\r\n # self.speed = [-speed1[self.steps.index(a)] if a == 'Back' \\\r\n # else speed1[self.steps.index(a)] for a in self.steps]\r\n self.speed = [-speed1[i] if self.steps[i] == 'Back' \\\r\n else speed1[i] for i in range(len(self.steps))]\r\n logging.debug('%s', self.ptsnumber)\r\n \r\n self.fileDataDict[\"Index\"] = np.linspace(0, self.ptsnumber-1, \r\n self.ptsnumber, \r\n dtype = np.uint)\r\n \r\n # self.fileDataDict2[\"Steps\"] = self.steps\r\n # self.summaryDataDict['data params'] = {}\r\n # self.summaryDataDict['data params'][\"Steps\"] = self.steps\r\n # self.dataClean()\r\n\r\n # self.calcData()\r\n\r\n def dataClean(self): #clean force data\r\n #correct offshoot\r\n self.fileDataDict[\"Vertical Deflection\"] = self.offShootCorrect(\r\n self.fileDataDict[\"Vertical Deflection Raw\"])\r\n self.fileDataDict[\"Lateral Deflection\"] = self.offShootCorrect(\r\n self.fileDataDict[\"Lateral Deflection Raw\"])\r\n \r\n def evaluateForce(self): #calculate force from calibration equations\r\n self.fileDataDict[\"Vertical force\"] = [-eval(self.calib_vert1) for x in \\\r\n self.fileDataDict[\"Vertical Deflection\"]]\r\n self.fileDataDict[\"Lateral force\"] = [-eval(self.calib_lat1) for x in \\\r\n self.fileDataDict[\"Lateral Deflection\"]]\r\n \r\n def transformForce(self): #apply transformations to force\r\n dataAnalDict = self.analyzeDataWindow.dataAnalDict\r\n force_list = ['Vertical force', 'Lateral force']\r\n i = 0 #used to find correct key of force for cross talk correction\r\n for force in force_list:\r\n #zero force subtraction\r\n if dataAnalDict[force][\"transform\"][\"Zero subtract\"] == True:\r\n self.fileDataDict[force] = self.zeroSubtract(self.fileDataDict[force],\r\n self.zeroDataDict[force])\r\n #cross talk correction\r\n if dataAnalDict['misc settings']['apply cross talk'].isChecked() == True:\r\n self.fileDataDict[force] = self.crosstalkCorrect(\r\n self.fileDataDict[force_list[i]], \r\n self.fileDataDict[force_list[i-1]],\r\n dataAnalDict[force][\"transform\"][\"Cross Talk\"], \r\n force.split(' ')[0])\r\n #filter data\r\n if dataAnalDict[force][\"transform\"][\"Filter\"] == True:\r\n window = dataAnalDict[force][\"transform\"][\"Filter window\"]\r\n order = dataAnalDict[force][\"transform\"][\"Filter order\"]\r\n self.fileDataDict[force] = savgol_filter(self.fileDataDict[force], \r\n window, order).tolist()\r\n \r\n i += 1 \r\n \r\n def crosstalkCorrect(self, f_in, f_dep, slope, f_type):\r\n if f_type == 'Vertical':\r\n f_err = [slope*(x - f_dep[1]) for x in f_dep]\r\n elif f_type == 'Lateral':\r\n f_err = [slope*(x - f_dep[1]) for x in f_dep]\r\n f_out = [f_in[i]-f_err[i] for i in range(0,len(f_in))]\r\n return f_out\r\n\r\n def offShootCorrect(self, data): #remove first point of each step\r\n # steps_bad = self.noiseSteps.split(\",\")\r\n steps_bad = self.analyzeDataWindow.dataAnalDict['misc settings']\\\r\n ['noise steps'].text().split(\",\")\r\n data_new = data.copy()\r\n if steps_bad[0] == '':\r\n logging.debug(\"no steps\")\r\n return data\r\n for i in steps_bad:\r\n logging.debug('%s, %s', \"step\", i)\r\n if i == '':\r\n continue\r\n ind = int((int(i)-1) * self.ptsnumber/self.step_num)\r\n data_new[ind] = data_new[ind + 1] #replace to next value\r\n return data_new\r\n\r\n # def zeroShift(self, data, zero): #shift force to zero\r\n # data_shifted = [x-zero for x in data]\r\n # return data_shifted\r\n \r\n def zeroSubtract(self, actual_data, zero_data):\r\n zero_shift = zero_data[0] - actual_data[0]\r\n zero_data_shifted = [x-zero_shift for x in zero_data]\r\n## self.defl_vert1_raw = self.forceData.defl_vert1.copy()\r\n data_subtracted = [actual_data[0] + actual_data[i] - zero_data_shifted[i] \\\r\n for i in range(len(actual_data))]\r\n return data_subtracted\r\n\r\n def interpolData(self, t, data): #interpolate data at force time resolution\r\n t_near = sorted([[abs(a - t), a] for a in self.time2],\r\n key=lambda l:l[0])[:2]\r\n wt_sum = t_near[0][0] + t_near[1][0] #take weighted avg\r\n wt = [t_near[1][0]/wt_sum, t_near[0][0]/wt_sum]\r\n data_t = np.average([data[self.time2.index(t_near[0][1])],\r\n data[self.time2.index(t_near[1][1])]],\r\n weights = wt)\r\n return data_t\r\n \r\n def calcData(self): #datafile related calculations\r\n## self.calib_lat1 = \"-10.249*1000*x\"\r\n logging.debug(\"calc\")\r\n dataAnalDict = self.analyzeDataWindow.dataAnalDict\r\n \r\n #update plot slice\r\n plot_range = self.configPlotWindow.plotDict['plot settings']\\\r\n ['plot range'].text().split(',')\r\n self.plot_slice = slice(int(plot_range[0]), int(plot_range[1]) + 1)\r\n \r\n time1 = self.fileDataDict[\"Time\"]\r\n \r\n # self.plot_slice = slice(int(self.startFull * self.ptsnumber/100),\r\n # int(self.endFull * self.ptsnumber/100))\r\n \r\n## #correct offshoot\r\n## self.defl_vert1 = self.offShootCorrect(self.defl_vert)\r\n## self.defl_lat1 = self.offShootCorrect(self.defl_lat)\r\n self.dataClean()\r\n \r\n self.evaluateForce()\r\n \r\n self.transformForce()\r\n \r\n #file data units\r\n self.unitDict = {'Time': ' [s]',\r\n 'Index': '',\r\n 'Vertical force': ' [μN]',\r\n 'Lateral force': ' [μN]',\r\n 'Vertical piezo': ' [μm]',\r\n 'Lateral piezo': ' [μm]',\r\n 'Deformation': ' [μm]',\r\n 'Speed': ' [μm/s]'}\r\n \r\n # self.fileDataDict[\"Vertical force\"] = [-eval(self.calib_vert1) for x in \\\r\n # self.fileDataDict[\"Vertical Deflection\"]]\r\n # self.fileDataDict[\"Lateral force\"] = [-eval(self.calib_lat1) for x in \\\r\n # self.fileDataDict[\"Lateral Deflection\"]]\r\n\r\n # if self.flag_ct == True: #cross talk correction\r\n # force_list = ['Vertical force', 'Lateral force']\r\n # i = 0 #used to find correct key of force for cross talk correction\r\n # for force in force_list:\r\n # #cross talk correction\r\n # if dataAnalDict['misc settings']['apply cross talk'].isChecked() == True:\r\n # self.fileDataDict[force] = self.crosstalkCorrect(\r\n # self.fileDataDict[force_list[i]], \r\n # self.fileDataDict[force_list[i-1]],\r\n # dataAnalDict[force][\"transform\"][\"Cross Talk\"], \r\n # force.split(' ')[0])\r\n # #filter data\r\n # if dataAnalDict[force][\"transform\"][\"Filter\"] == True:\r\n # window = dataAnalDict[force][\"transform\"][\"Filter window\"]\r\n # order = dataAnalDict[force][\"transform\"][\"Filter order\"]\r\n # self.fileDataDict[force] = savgol_filter(self.fileDataDict[force], \r\n # window, order).tolist()\r\n # if dataAnalDict[force][\"transform\"][\"Zero subtract\"] == True:\r\n # self.fileDataDict[force] = self.zeroSubtract(self.fileDataDict[force],\r\n # self.zeroDataDict[force])\r\n # i += 1\r\n # self.force_lat1 = self.crosstalkCorrect(force_lat, force_vert,\r\n # dataAnalDict[\"Lateral force\"]\r\n # [\"transform\"][\"Cross Talk\"],\r\n # 'Lateral')\r\n # self.force_vert1 = self.crosstalkCorrect(force_vert, force_lat,\r\n # self.ctv_slope, 'Vertical')\r\n # self.force_lat1 = self.crosstalkCorrect(force_lat, force_vert,\r\n # self.ctl_slope, 'Lateral')\r\n # else:\r\n # self.force_vert1 = force_vert\r\n # self.force_lat1 = force_lat\r\n \r\n #recalculate time array of video (considering measurement delay)\r\n tstart = 0\r\n self.time_video = np.empty((0, 100), dtype = np.float64)\r\n for y in range(self.step_num):\r\n time_video_temp = np.linspace(tstart, tstart + (self.frame_num[y]/self.fps),\r\n int(self.frame_num[y]), dtype = np.float64)\r\n self.time_video = np.append(self.time_video, time_video_temp)\r\n if y < self.step_num-1: \r\n tstart = time1[int((y+1)*self.ptsnumber/self.step_num)] #CHANGE TO DIRECT DATA\r\n\r\n #noise filter lateral force\r\n # if self.flag_lf_filter == True:\r\n # self.force_lat1_filtered = savgol_filter(self.force_lat1, self.window_length,\r\n # self.polyorder).tolist()\r\n # else:\r\n # self.force_lat1_filtered = [] \r\n \r\n force_vert1 = self.fileDataDict[\"Vertical force\"]\r\n force_lat1 = self.fileDataDict[\"Lateral force\"]\r\n \r\n# self.forceDict[\"force_adhesion1\"] = []\r\n# self.forceDict[\"force_preload1\"] = []\r\n# self.forceDict[\"force_friction\"] = []\r\n# self.forceDict[\"force_preload2\"] = []\r\n# self.forceDict[\"force_max1\"] = []\r\n# self.forceDict[\"force_max2\"] = []\r\n# self.forceDict[\"force_min1\"] = []\r\n# self.forceDict[\"zero1\"] = []\r\n# self.forceDict[\"force_lat_min\"] = []\r\n# self.forceDict[\"force_lat_max\"] = []\r\n# self.indDict[\"force_lat_max_index\"] = []\r\n# self.indDict[\"force_lat_min_index\"] = []\r\n# ## self.indDict[\"contact_time1\"] = []\r\n# self.indDict[\"time1_max\"] = []\r\n \r\n self.forceDict = {} #force calculation values\r\n self.indDict = {} #index values\r\n # self.summaryDataDict = {} #summary data values\r\n self.summaryDataDict['Vertical force'] = {}\r\n self.summaryDataDict['Lateral force'] = {}\r\n \r\n roi_list = dataAnalDict[\"Vertical force\"][\"ranges\"].keys()\r\n for k in roi_list:\r\n logging.debug('%s', roi_list)\r\n if len(roi_list) > 1 and k == \"Default\":\r\n continue\r\n #initialize dict for the given roi label\r\n self.forceDict[k] = {}\r\n self.indDict[k] = {}\r\n self.summaryDataDict['Vertical force'][k] = {}\r\n self.summaryDataDict['Lateral force'][k] = {}\r\n #calculate friction force\r\n## if self.flag_lf == True or self.flag_lf_filter == True:\r\n limits = eval(dataAnalDict[\"Lateral force\"][\"ranges\"][k][\"Force\"])\r\n friction_slice = slice(limits[0], limits[1]+1)\r\n # friction_slice = slice(int(self.rangeDict[k][3][0] * self.ptsnumber/100),\r\n # int(self.rangeDict[k][3][1] * self.ptsnumber/100))\r\n force_lat_max = max(force_lat1[friction_slice])\r\n force_lat_max_index = friction_slice.start + \\\r\n force_lat1[friction_slice]. \\\r\n index(force_lat_max)\r\n force_lat_min = min(force_lat1[friction_slice])\r\n force_lat_min_index = friction_slice.start + \\\r\n force_lat1[friction_slice]. \\\r\n index(force_lat_min)\r\n force_friction = abs(force_lat_max - force_lat_min)\r\n logging.debug('%s, %s, %s', friction_slice, force_lat_max_index, force_lat_min_index)\r\n## else:\r\n## force_friction = 0\r\n## force_lat_min = 0\r\n## force_lat_max = 0\r\n## force_lat_max_index = 0\r\n## force_lat_min_index = 0\r\n \r\n \r\n #contact time calculate\r\n## contact_time1 = sum(self.pause)\r\n #Note: Contact time is the time for which the pad is kept stationary\r\n #in contact witht the surface\r\n## dist_vert1_maxcount = self.dist_vert1.count(min(self.dist_vert1))\r\n## dist_vert1_max_index = self.dist_vert1.index(min(self.dist_vert1))\r\n## if dist_vert1_maxcount == 1:\r\n## contact_time1 = 0\r\n## else:\r\n## contact_time1 = (dist_vert1_maxcount - 1) * (self.time1[dist_vert1_max_index+1] -\r\n## self.time1[dist_vert1_max_index]) \r\n\r\n #ignore first point of force data due to overshoot\r\n limits = eval(dataAnalDict[\"Vertical force\"][\"ranges\"][k][\"Force\"])\r\n adh_slice = slice(limits[0], limits[1]+1)\r\n # adh_slice = slice(int(self.rangeDict[k][1][0] * self.ptsnumber/100),\r\n # int(self.rangeDict[k][1][1] * self.ptsnumber/100))\r\n force_min1 = min(force_vert1[adh_slice])\r\n self.force_min_index = adh_slice.start + \\\r\n force_vert1[adh_slice].index(force_min1)\r\n limits = eval(dataAnalDict[\"Vertical force\"][\"ranges\"][k][\"Preload\"])\r\n prl1_slice = slice(limits[0], limits[1]+1)\r\n # prl1_slice = slice(int(self.rangeDict[k][2][0] * self.ptsnumber/100),\r\n # int(self.rangeDict[k][2][1] * self.ptsnumber/100))\r\n force_max1 = max(force_vert1[prl1_slice])\r\n \r\n limits = eval(dataAnalDict[\"Lateral force\"][\"ranges\"][k][\"Preload\"])\r\n prl2_slice = slice(limits[0], limits[1]+1)\r\n # prl2_slice = slice(int(self.rangeDict[k][4][0] * self.ptsnumber/100),\r\n # int(self.rangeDict[k][4][1] * self.ptsnumber/100))\r\n force_max2 = max(force_vert1[prl2_slice])\r\n \r\n limits = eval(dataAnalDict[\"Vertical force\"][\"ranges\"][k][\"Zero\"])\r\n zero_slice = slice(limits[0], limits[1]+1)\r\n # zero_slice = slice(int(self.rangeDict[k][0][0] * self.ptsnumber/100),\r\n # int(self.rangeDict[k][0][1] * self.ptsnumber/100))\r\n zero1 = mean(force_vert1[zero_slice]) #average n points as vert zero\r\n zero1_stdv = stdev(force_vert1[zero_slice]) #vertical force error\r\n \r\n limits = eval(dataAnalDict[\"Lateral force\"][\"ranges\"][k][\"Zero\"])\r\n zero2_slice = slice(limits[0], limits[1]+1)\r\n # zero2_slice = slice(int(self.rangeDict[k][5][0] * self.ptsnumber/100),\r\n # int(self.rangeDict[k][5][1] * self.ptsnumber/100))\r\n zero2 = mean(force_lat1[zero2_slice]) #average n points as lat zero\r\n zero2_stdv = stdev(force_lat1[zero2_slice]) #lateral force error\r\n time1_max = time1[self.force_min_index]\r\n\r\n zero2_filter = mean(force_lat1[zero2_slice]) #average n points as lat zero\r\n\r\n force_preload1 = abs(force_max1 - zero1) #adhesion preload\r\n # if self.flag_lf == True or self.flag_lf_filter == True:\r\n force_preload2 = abs(force_max2 - zero1) #friction preload\r\n # else:\r\n # force_preload2 = 0\r\n force_adhesion1 = abs(force_min1 - zero1)\r\n logging.debug('%s, %s, %s', force_preload1, force_adhesion1, self.speed_um)\r\n\r\n self.summaryDataDict['Vertical force'][k][\"Pulloff Force\"] = force_adhesion1\r\n self.summaryDataDict['Vertical force'][k][\"Adhesion Preload\"] = force_preload1\r\n self.summaryDataDict['Vertical force'][k][\"Vertical Force Stdev\"] = zero1_stdv\r\n self.summaryDataDict['Lateral force'][k][\"Friction Force\"] = force_friction\r\n self.summaryDataDict['Lateral force'][k][\"Friction Preload\"] = force_preload2\r\n self.summaryDataDict['Lateral force'][k][\"Lateral Force Stdev\"] = zero2_stdv\r\n \r\n self.forceDict[k][\"force_max2\"] = force_max2\r\n self.forceDict[k][\"force_max1\"] = force_max1\r\n self.forceDict[k][\"force_min1\"] = force_min1\r\n self.forceDict[k][\"zero1\"] = zero1\r\n # self.forceDict[k][\"zero1_stdv\"] = zero1_stdv\r\n self.forceDict[k][\"zero2\"] = zero2\r\n # self.forceDict[k][\"zero2_stdv\"] = zero2_stdv\r\n self.forceDict[k][\"force_lat_min\"] = force_lat_min\r\n self.forceDict[k][\"force_lat_max\"] = force_lat_max\r\n self.indDict[k][\"force_lat_max_index\"] = force_lat_max_index\r\n self.indDict[k][\"force_lat_min_index\"] = force_lat_min_index\r\n## self.indDict[\"contact_time1\"].append(contact_time1)\r\n self.indDict[k][\"time1_max\"] = time1_max\r\n logging.debug(\"end\")\r\n\r\n #IMP: CHECK zero1 VARIABLE BELOW. DIFFERENT ZEROS NOT CONSIDERED BELOW!\r\n #shift force data for plotting\r\n # if self.flag_zshift == True:\r\n # self.force_vert1_shifted = [x-zero1 for x in self.force_vert1]\r\n # self.force_lat1_shifted = [x-zero2 for x in self.force_lat1]\r\n # self.force_lat1_filtered_shifted = [x-zero2_filter for x in self.force_lat1_filtered]\r\n # else:\r\n # self.force_vert1_shifted = self.force_vert1\r\n # self.force_lat1_shifted = self.force_lat1\r\n # self.force_lat1_filtered_shifted = self.force_lat1_filtered \r\n self.speed_um = [x/1000 for x in self.speed] #speed in μm/s\r\n # self.fileDataDict2[\"Speed\"] = self.speed_um\r\n # self.summaryDataDict['data params'] = {}\r\n self.summaryDataDict['data params']['Speed'] = {}\r\n # self.summaryDataDict['misc']['Speed'][\"Speed list\"] = self.speed_um\r\n \r\n # self.speedDict = {} #step number corresponding to sliding/attachment detachment\r\n self.ptsperstep = int(self.ptsnumber/self.step_num) #number of points per step\r\n force_lat_index = int(mean([force_lat_min_index, force_lat_max_index]))\r\n logging.debug('%s, %s, %s', force_lat_index, self.ptsperstep, self.ptsnumber)\r\n if self.steps[0] == \"Up/Down\":\r\n self.summaryDataDict['data params']['Speed'][\"Sliding Speed\"] = 0\r\n self.summaryDataDict['data params']['Speed'][\"Detachment Speed\"] = self.speed[0]\r\n self.summaryDataDict['data params']['Speed'][\"Attachment Speed\"] = self.speed[0] #CHECK!\r\n self.slideStep = \"None\"\r\n else:\r\n self.summaryDataDict['data params']['Speed'][\"Sliding Speed\"] = self.speed_um[int(force_lat_index/self.ptsperstep)]\r\n ind_detach = int(self.force_min_index/self.ptsperstep)\r\n self.summaryDataDict['data params']['Speed'][\"Detachment Speed\"] = self.speed_um[ind_detach]\r\n #last down step bfore detachment\r\n self.summaryDataDict['data params']['Speed'][\"Attachment Speed\"] = self.speed_um[ind_detach - \\\r\n self.steps[ind_detach::-1].index('Down')]\r\n #lateral sliding step\r\n self.slideStep = self.steps[int(force_lat_index/self.ptsperstep)]\r\n logging.debug('%s, %s, %s', \"slide step \", force_lat_index, self.ptsperstep)\r\n # print(\"Speed dict\", self.speedDict)\r\n \r\n self.contact_time1 = sum(self.pause) #contact time\r\n \r\n # self.fileDataDict2[\"Sliding Step\"] = self.slideStep\r\n # self.fileDataDict2[\"Contact Time\"] = self.contact_time1\r\n # self.summaryDataDict['data params'][\"Sliding Step\"] = self.slideStep\r\n self.summaryDataDict['data params']['Time'] = {}\r\n self.summaryDataDict['data params']['Time'][\"Contact Time\"] = self.contact_time1\r\n #calculate actual vertical deformation\r\n # for a in self.force_vert1: \r\n # if abs(a-zero1) > self.deform_tol*zero1_stdv: #point of contact for given tolerence\r\n # deform_index1 = self.force_vert1.index(a)\r\n # break\r\n # else:\r\n # deform_index1 = 0\r\n\r\n kBeam = dataAnalDict['misc settings']['beam spring constant'].text()\r\n deform_limits = eval(dataAnalDict['misc settings']['deformation range'].text())\r\n dist_vert1 = self.fileDataDict[\"Vertical piezo\"]\r\n # deform_index1 = int(dataAnalDict['misc settings']['deformation start'].text())\r\n # deform_index1 = self.deform_tol #index of point of contact\r\n # deform_index2 = max([self.force_vert1.index(a) for a in self.forceDict[\"force_min1\"]]) #point of contact loss\r\n # deform_index2 = len(self.force_vert1)-1 if deform_index2 <= deform_index1 else deform_index2\r\n logging.debug('%s, %s, %s', \"deform index\", deform_limits[0], deform_limits[1])\r\n self.deform_init = dist_vert1[deform_limits[0]]#piezo value at contact loss\r\n self.fileDataDict[\"Deformation\"] = [dist_vert1[i] - self.deform_init - \\\r\n ((force_vert1[i] - zero1)/float(kBeam.split(',')[0])) \\\r\n if i >= deform_limits[0] and i <= deform_limits[1] else 0 \\\r\n for i in range(len(dist_vert1))]\r\n self.deform_pulloff = self.fileDataDict[\"Deformation\"][deform_limits[1]] #deformation at contact loss\r\n \r\n #calculate adhesion energy (area under curve) \r\n #TODO! Last zero1 value of rois taken for energy calculation\r\n pulloff_index = int(self.ptsperstep * int(deform_limits[1]/self.ptsperstep)) #start index of contact loss \"step\"\r\n force_shifted = [x-zero1 for x in force_vert1]\r\n zero_index = (force_shifted[pulloff_index:deform_limits[1]+1].index\\\r\n (sorted([[abs(a - 0), a] for a in force_shifted[pulloff_index:deform_limits[1]+1]], \r\n key=lambda l:l[0])[0][1])) + pulloff_index #point where force reaches zero\r\n self.energy_slice = slice(zero_index, deform_limits[1] + 1)\r\n self.energy_adhesion = integrate.simps(force_shifted[self.energy_slice],\r\n self.fileDataDict[\"Deformation\"][self.energy_slice])\r\n logging.debug('%s, %s, %s, %s', \"energy\", self.energy_adhesion, pulloff_index, zero_index)\r\n self.zero_array =zero1*np.ones(len(force_vert1))\r\n \r\n # self.fileDataDict2[\"Initial Deformation\"] = self.deform_init\r\n # self.fileDataDict2[\"Pulloff Deformation\"] = self.deform_pulloff\r\n # self.fileDataDict2[\"Adhesion Energy\"] = self.energy_adhesion\r\n # self.summaryDataDict['misc'] = {}\r\n self.summaryDataDict['misc'][\"Adhesion Energy\"] = self.energy_adhesion\r\n self.summaryDataDict['data params']['Deformation'] = {}\r\n self.summaryDataDict['data params']['Deformation'][\"Initial Deformation\"] = self.deform_init\r\n self.summaryDataDict['data params']['Deformation'][\"Pulloff Deformation\"] = self.deform_pulloff\r\n \r\n \r\n def getArea(self, time, dataDict): #get contact area/lenghths at pulloff etc\r\n #area data (2)\r\n logging.debug(\"Get area begin\")\r\n self.dataDict = dataDict #data dictionary from videos\r\n \r\n time1 = self.fileDataDict[\"Time\"]\r\n\r\n # self.areaDict[\"area2_init\"] = []\r\n # self.areaDict[\"area2_max\"] = []\r\n # self.areaDict[\"area3_init\"] = []\r\n # self.areaDict[\"area3_max\"] = []\r\n # self.areaDict[\"area2_pulloff\"] = []\r\n # self.areaDict[\"area2_residue\"] = []\r\n # self.areaDict[\"area3_pulloff\"] = []\r\n # self.areaDict[\"area_friction\"] = []\r\n\r\n # self.lengthDict[\"contLength_init\"] = []\r\n # self.lengthDict[\"contLength_max\"] = []\r\n # self.lengthDict[\"roilen_init\"] = []\r\n # self.lengthDict[\"roilen_max\"] = []\r\n # self.lengthDict[\"contLength_pulloff\"] = []\r\n # self.lengthDict[\"roilen_pulloff\"] = []\r\n # self.lengthDict[\"ecc_pulloff\"] = []\r\n # self.lengthDict[\"contnum_pulloff\"] = []\r\n\r\n # self.ellipDict[\"ellipAr_max\"] = []\r\n # self.ellipDict[\"ellipPer_max\"] = []\r\n # self.ellipDict[\"ellipMajr_max\"] = []\r\n # self.ellipDict[\"ellipMinr_max\"] = []\r\n \r\n # self.indDict[\"time1_lat_avg\"] = []\r\n\r\n self.time2 = time.tolist()\r\n area2_full = [0] * len(time) #initialize\r\n\r\n #area/length whole plot slice\r\n time2_start = sorted([[abs(a - time1[self.plot_slice.start]), a] \\\r\n for a in self.time2], key=lambda l:l[0])[0][1]\r\n time2_end = sorted([[abs(a - time1[self.plot_slice.stop-1]), a] \\\r\n for a in self.time2], key=lambda l:l[0])[0][1] \r\n self.plot_slice2 = slice(self.time2.index(time2_start),\r\n self.time2.index(time2_end) + 1) \r\n i = 0\r\n logging.debug('%s', dataDict.keys())\r\n self.areaDict = {}\r\n self.lengthDict = {}\r\n # self.ellipDict = {}\r\n self.summaryDataDict['Contact area'] = {}\r\n self.summaryDataDict['Contact length'] = {}\r\n self.summaryDataDict['ROI area'] = {}\r\n self.summaryDataDict['ROI length'] = {}\r\n self.summaryDataDict['Contact number'] = {}\r\n self.summaryDataDict['Eccentricity'] = {}\r\n \r\n for k in self.dataDict.keys():\r\n if len(self.dataDict.keys()) > 1 and k == \"Default\":\r\n continue\r\n logging.debug('%s', k)\r\n self.areaDict[k] = {}\r\n self.lengthDict[k] = {}\r\n self.summaryDataDict['Contact area'][k] = {}\r\n self.summaryDataDict['Contact length'][k] = {}\r\n self.summaryDataDict['ROI area'][k] = {}\r\n self.summaryDataDict['ROI length'][k] = {}\r\n self.summaryDataDict['Contact number'][k] = {}\r\n self.summaryDataDict['Eccentricity'][k] = {} \r\n # self.ellipDict[k] = {}\r\n \r\n area2 = dataDict[k][\"Contact area\"].tolist() #contact area\r\n contLength = dataDict[k][\"Contact length\"].tolist() #contact length\r\n contnum = dataDict[k][\"Contact number\"].tolist() #contact number\r\n area3 = dataDict[k][\"ROI area\"].tolist() #roi area\r\n roilen = dataDict[k][\"ROI length\"].tolist() #roi length\r\n ecc = dataDict[k][\"Eccentricity\"].tolist() #median eccentricity\r\n # print(\"ellip\", dataDict[k][6][0])\r\n ellipMajr = [x[3]*max(x[1][0],x[1][1]) for x in dataDict[k][\"Ellipse fit\"]] #ellipse major axis length\r\n ellipMinr = [x[3]*min(x[1][0],x[1][1]) for x in dataDict[k][\"Ellipse fit\"]] #ellipse minor axis length\r\n ellipAr = [np.pi*(x[1][0]/2)*(x[1][1]/2)*(x[3]**2) for x in dataDict[k][\"Ellipse fit\"]] #bounding ellipse area\r\n ellipPer = [x[3]*np.pi*((3*((x[1][0]/2)+(x[1][1]/2)))-\\\r\n (((3*x[1][0]/2)+(x[1][1]/2))*\\\r\n ((x[1][0]/2)+(3*x[1][1]/2)))**0.5)\\\r\n for x in dataDict[k][\"Ellipse fit\"]] #bounding ellipse perimeter (Ramanujan first approximation)\r\n \r\n \r\n area2_init = area2[0] #initial areas/lengths\r\n area3_init = area3[0]\r\n contLength_init = contLength[0] #initial lengths\r\n roilen_init = roilen[0]\r\n## print(self.indDict[\"time1_max\"])\r\n\r\n area2_max = max(area2) #max area/lengths\r\n area3_max = max(area3)\r\n contLength_max = max(contLength)\r\n roilen_max = max(roilen)\r\n time2_max = sorted([[abs(a - self.indDict[k][\"time1_max\"]), a] \\\r\n for a in self.time2], key=lambda l:l[0])[:2]\r\n logging.debug('%s', time2_max)\r\n\r\n wt_sum = time2_max[0][0] + time2_max[1][0] #take weighted avg\r\n wt = [time2_max[1][0]/wt_sum, time2_max[0][0]/wt_sum]\r\n try:\r\n area2_pulloff = np.average([area2[self.time2.index(\r\n time2_max[0][1])],area2[self.time2.index(time2_max[1][1])]],\r\n weights = wt)\r\n area3_pulloff = np.average([area3[self.time2.index(\r\n time2_max[0][1])],area3[self.time2.index(time2_max[1][1])]],\r\n weights = wt)\r\n contLength_pulloff = np.average([contLength[self.time2.index(\r\n time2_max[0][1])],contLength[self.time2.index(time2_max[1][1])]],\r\n weights = wt)\r\n roilen_pulloff = np.average([roilen[self.time2.index(\r\n time2_max[0][1])],roilen[self.time2.index(time2_max[1][1])]],\r\n weights = wt)\r\n ecc_pulloff = np.average([ecc[self.time2.index(\r\n time2_max[0][1])],ecc[self.time2.index(time2_max[1][1])]],\r\n weights = wt)\r\n contnum_pulloff = int(np.average([contnum[self.time2.index(\r\n time2_max[0][1])],contnum[self.time2.index(time2_max[1][1])]],\r\n weights = wt))\r\n #last area point of detachment step\r\n area2_residue = area2[sum(self.frame_num[:int(self.force_min_index/\r\n self.ptsperstep)+1])-1]\r\n except Exception as e:\r\n logging.error(str(e))\r\n root = tk.Tk()\r\n root.withdraw()\r\n messagebox.showinfo(\"Analysis Error!\", \"Check force file/video file\\n\" +\r\n \"Exception: \" + str(e))\r\n root.destroy()\r\n area2_pulloff = 0\r\n return\r\n \r\n logging.debug('%s, %s, %s, %s, %s', \"adhesion calc\", area2_max,\r\n self.indDict[k][\"time1_max\"], time2_max, area2_pulloff)\r\n\r\n## if self.flag_lf == True or self.flag_lf_filter == True:\r\n force_lat_avg_index = int(mean([self.indDict[k][\"force_lat_max_index\"],\r\n self.indDict[k][\"force_lat_min_index\"]]))\r\n time1_lat_avg = time1[force_lat_avg_index]\r\n time2_lat_avg = sorted([[abs(a - time1_lat_avg), a] \\\r\n for a in self.time2], key=lambda l:l[0])[:2]\r\n wt_sum2 = time2_lat_avg[0][0] + time2_lat_avg[1][0] #take weighted avg\r\n wt2 = [time2_lat_avg[1][0]/wt_sum2, time2_lat_avg[0][0]/wt_sum2]\r\n try:\r\n area_friction = np.average([area2[self.time2.index(\r\n time2_lat_avg[0][1])],area2[self.time2.index(time2_lat_avg[1][1])]],\r\n weights = wt2)\r\n except Exception as e:\r\n logging.error(str(e))\r\n root = tk.Tk()\r\n root.withdraw()\r\n messagebox.showinfo(\"Analysis Error!\", \"Check force file/video file\\n\" +\r\n \"Exception: \" + str(e))\r\n root.destroy()\r\n area_friction = 0\r\n return\r\n logging.debug('%s, %s, %s', \"friction calc\", area_friction, time1_lat_avg)\r\n## else:\r\n## area_friction = area2_init #zero\r\n## time1_lat_avg = 0\r\n \r\n self.indDict[k][\"time1_lat_avg\"] = time1_lat_avg\r\n\r\n #save contour properties to dictionary\r\n self.areaDict[k][\"area2_init\"] = area2_init\r\n self.areaDict[k][\"area2_max\"] = area2_max\r\n self.areaDict[k][\"area3_init\"] = area3_init\r\n self.areaDict[k][\"area3_max\"] = area3_max\r\n self.areaDict[k][\"area2_pulloff\"] = area2_pulloff\r\n self.areaDict[k][\"area3_pulloff\"] = area3_pulloff\r\n self.areaDict[k][\"area_friction\"] = area_friction\r\n self.areaDict[k][\"area2_residue\"] = area2_residue\r\n\r\n self.summaryDataDict['Contact area'][k][\"Max Area\"] = area2_max - area2_init\r\n self.summaryDataDict['Contact area'][k][\"Pulloff Area\"] = area2_pulloff - area2_init\r\n self.summaryDataDict['ROI area'][k][\"ROI Max Area\"] = area3_max\r\n self.summaryDataDict['ROI area'][k][\"ROI Pulloff Area\"] = area3_pulloff\r\n self.summaryDataDict['Contact area'][k][\"Friction Area\"] = area_friction - area2_init\r\n self.summaryDataDict['Contact area'][k][\"Residue Area\"] = area2_residue - area2_init\r\n\r\n self.lengthDict[k][\"contLength_init\"] = contLength_init\r\n self.lengthDict[k][\"contLength_max\"] = contLength_max\r\n self.lengthDict[k][\"roilen_init\"] = roilen_init\r\n self.lengthDict[k][\"roilen_max\"] = roilen_max\r\n self.lengthDict[k][\"contLength_pulloff\"] = contLength_pulloff\r\n self.lengthDict[k][\"roilen_pulloff\"] = roilen_pulloff\r\n # self.lengthDict[k][\"ecc_pulloff\"] = ecc_pulloff\r\n # self.lengthDict[k][\"contnum_pulloff\"] = contnum_pulloff\r\n \r\n self.summaryDataDict[\"Contact length\"][k][\"Max Length\"] = contLength_max - contLength_init\r\n self.summaryDataDict[\"Contact length\"][k][\"Pulloff Length\"] = contLength_pulloff - contLength_init\r\n self.summaryDataDict[\"ROI length\"][k][\"ROI Max Length\"] = roilen_max\r\n self.summaryDataDict[\"ROI length\"][k][\"ROI Pulloff Length\"] = roilen_pulloff\r\n \r\n self.summaryDataDict[\"Eccentricity\"][k][\"Pulloff Median Eccentricity\"] = ecc_pulloff\r\n self.summaryDataDict['Contact number'][k][\"Pulloff Contact Number\"] = contnum_pulloff\r\n \r\n #get bounding ellipse properties at maximum contact\r\n ind_max = area2.index(area2_max) #index of maximum contact\r\n self.summaryDataDict['Contact area'][k][\"Max Bounding Area\"] = ellipAr[ind_max]\r\n self.summaryDataDict[\"Contact length\"][k][\"Max Bounding Perimeter\"] = ellipPer[ind_max]\r\n self.summaryDataDict[\"Contact length\"][k][\"Max Bounding Length\"] = ellipMajr[ind_max]\r\n self.summaryDataDict[\"Contact length\"][k][\"Max Bounding Width\"] = ellipMinr[ind_max]\r\n \r\n \r\n #calculate full area\r\n area2_full = [area2_full[i] + area2[i] for i in range(len(area2))]\r\n i += 1\r\n \r\n #interpolate area data\r\n # area_interpol = [self.interpolData(self.time1[i], area2_full) \\\r\n # for i in range(len(self.time1))]\r\n \r\n # if self.flag_st == True or self.flag_lf_filter == True: #stress \r\n if self.configPlotWindow.plotDict['extras']['Stress'].isChecked() == True: #stress \r\n #local stress dF/dA #CHECK\r\n ## stress_local = np.diff(np.array(self.force_vert1))/np.diff(np.array(area_interpol))\r\n ## self.stress = np.append(stress_local, stress_local[-1]) #make array size same as time1\r\n \r\n \r\n #stress F/A\r\n self.stress = [(self.fileDataDict[\"Vertical force\"][i]-\r\n self.fileDataDict[\"Vertical force\"][0])/\r\n (self.interpolData(time1[i], area2_full)-\r\n self.interpolData(time1[0], area2_full)) for i \\\r\n in range(len(time1))]\r\n #noise filter stress using vertical force filter parameters\r\n window_length = self.analyzeDataWindow.dataAnalDict[\"Vertical force\"]\\\r\n [\"transform\"][\"Filter window\"]\r\n k_size = window_length+1 if window_length % 2 == 0 else window_length\r\n self.stress_filtered = medfilt(self.stress, kernel_size=k_size).tolist()\r\n \r\n logging.debug(\"get area finished\")\r\n\r\n\r\n# def polyfitData(self, xdata, ydata, ax, x_plot, unit,\r\n# eq_pos = [1,0.2], fit_order = 1, fit_show = False): #fit data and plot\r\n# data = zip(xdata, ydata, x_plot)\r\n# data = np.array(sorted(data, key = lambda x: x[0]))\r\n# coeff = np.polyfit(data[:,0],data[:,1], fit_order) #fitting coeffients\r\n# slope = coeff[0]\r\n# p_fit = np.poly1d(coeff)\r\n# y_fit = p_fit(data[:,0])\r\n# y_avg = np.sum(data[:,1])/len(data[:,1])\r\n# r2 = (np.sum((y_fit-y_avg)**2))/(np.sum((data[:,1] - y_avg)**2))\r\n# sign = '' if coeff[1] < 0 else '+'\r\n# eq_id = 'Slope'\r\n# eq_coff = [\"$%.1e\"%(coeff[i]) + \"x^\" + str(len(coeff) - i - 1) + \"$\"\\\r\n# if i < len(coeff) - 2 else \"%.4fx\"%(coeff[i]) for i in range(len(coeff)-1)]\r\n# eq = \"y=\" + '+'.join(eq_coff) + \"+%.4f\"%(coeff[len(coeff)-1]) + \"; $R^2$=\" + \"%.4f\"%(r2) \r\n# eq_clean = eq.replace('+-', '-')\r\n# ## x_fit = np.linspace(min(data[:,0]), max(data[:,0]), 100)\r\n# ax.plot(data[:,2], y_fit, color = 'black',\r\n# linewidth=2, linestyle='dashed')\r\n# ## print(eq_pos)\r\n# if fit_show == True:\r\n# ax.text(float(eq_pos[0]), float(eq_pos[1]), eq_id + \": \" + \"%.4f\"%(slope) + ' (' + unit + ')',\r\n# ha = 'right', transform=ax.transAxes, color = 'black',\r\n# bbox=dict(facecolor='white', edgecolor = 'black', alpha=0.5))\r\n# print('data fitted', eq_clean)\r\n# return slope\r\n\r\n# def plotData(self, unit): #prepare plot\r\n\r\n# xDict = {'Vertical Position (μm)':self.dist_vert1,\r\n# 'Lateral Position (μm)':self.dist_lat1,\r\n# 'Deformation (μm)':self.deform_vert,\r\n# 'Time (s)':self.time1}\r\n# xAxisData = xDict.get(self.x_var)\r\n \r\n# markerlist = [\"o\", \"v\", \"^\", \"s\", \"P\", \"*\", \"D\", \"<\", \"X\", \">\"]\r\n# linelist = [\":\", \"-.\", \"--\", \"-\", \":\", \"-.\", \"--\", \"-\", \":\", \"-.\"]\r\n \r\n# self.fig1 = plt.figure(num=\"Force/Area vs Time\", figsize = [11, 5])\r\n# self.fig1.canvas.mpl_connect('close_event', self.handle_close)\r\n# print(\"fig1\")\r\n# self.fig1.clear()\r\n# ax1 = self.fig1.add_subplot(1,1,1)\r\n# lns = []\r\n \r\n# ax1.set_title('Speed = ' + str(self.speed_um) + ' μm/s')\r\n# ax1.set_xlabel(self.x_var)\r\n# ax1.set_ylabel('Vertical Force (μN)', color = 'r')\r\n# p1, = ax1.plot(xAxisData[self.plot_slice], self.force_vert1_shifted[self.plot_slice], 'ro',\r\n# alpha=0.5, linewidth=1, markersize=1, label=\"Vertical Force\")\r\n# lns.append(p1)\r\n\r\n# if self.ptsnumber != 0:\r\n# ## ptsperstep = int(self.ptsnumber/self.step_num)\r\n# i = 0\r\n# lns_reg = [] #region legend handle\r\n# lab_reg = [] #region legend label\r\n# for a in self.steps: #shade step regions\r\n# if i < ((self.plot_slice.start+1)/self.ptsperstep)-1:\r\n# i += 1\r\n# continue\r\n \r\n# if self.ptsperstep*(i+1)-1 > self.plot_slice.stop:\r\n# endpoint = self.plot_slice.stop - 1\r\n# exit_flag = True\r\n# else:\r\n# endpoint = self.ptsperstep*(i+1) - 1\r\n# exit_flag = False\r\n \r\n# if self.ptsperstep*i < self.plot_slice.start:\r\n# startpoint = self.plot_slice.start\r\n# else:\r\n# startpoint = self.ptsperstep*i \r\n \r\n# x_start = min(xAxisData[startpoint:endpoint])\r\n# x_end = max(xAxisData[startpoint:endpoint])\r\n# if a == 'Front':\r\n# v1 = ax1.axvspan(x_start, x_end, alpha=0.9,\r\n# color='aliceblue', label = a)\r\n# lns_reg.append(v1)\r\n# lab_reg.append(a)\r\n# if exit_flag == True:\r\n# break\r\n# elif a == 'Back':\r\n# v2 = ax1.axvspan(x_start, x_end, alpha=0.9,\r\n# color='whitesmoke', label = a)\r\n# lns_reg.append(v2)\r\n# lab_reg.append(a)\r\n# if exit_flag == True:\r\n# break \r\n# elif a == 'Up':\r\n# v3 = ax1.axvspan(x_start, x_end, alpha=0.9,\r\n# color='honeydew', label = a)\r\n# lns_reg.append(v3)\r\n# lab_reg.append(a)\r\n# if exit_flag == True:\r\n# break \r\n# elif a == 'Down':\r\n# v4 = ax1.axvspan(x_start, x_end, alpha=0.9,\r\n# color='linen', label = a)\r\n# lns_reg.append(v4)\r\n# lab_reg.append(a)\r\n# if exit_flag == True:\r\n# break \r\n# elif a == 'Pause':\r\n# v5 = ax1.axvspan(x_start, x_end, alpha=0.9,\r\n# color='lightyellow', label = a)\r\n# lns_reg.append(v5)\r\n# lab_reg.append(a)\r\n# if exit_flag == True:\r\n# break\r\n# i += 1\r\n \r\n \r\n# dict_reg = dict(zip(lab_reg, lns_reg)) #legend dictionary (remove dup)\r\n# self.fig1.legend(dict_reg.values(), dict_reg.keys(), loc='lower right',\r\n# ncol=len(lns_reg))\r\n \r\n# if self.flag_ap == True: #show adhesion calc\r\n# #fill adhesion energy region \r\n# ax1.fill_between(xAxisData[self.energy_slice],\r\n# self.forceDict[\"zero1\"][0],\r\n# self.force_vert1_shifted[self.energy_slice],\r\n# color = 'black') \r\n# i = 0\r\n# for k in self.rangeDict.keys():\r\n# if len(self.rangeDict.keys()) > 1 and k == \"Default\":\r\n# continue\r\n# ax1.axhline(y=self.forceDict[\"zero1\"][i], color='y',\r\n# alpha=1, linestyle=linelist[i], linewidth=1) \r\n# ax1.axhline(y=self.forceDict[\"force_min1\"][i], color='y',\r\n# alpha=1, linestyle=linelist[i], linewidth=1)\r\n# ax1.axhline(y=self.forceDict[\"force_max1\"][i], color='y',\r\n# alpha=1, linestyle=linelist[i], linewidth=1)\r\n# ax1.axvline(x=xAxisData[self.time1.index(self.indDict[\"time1_max\"][i])], \r\n# color='y', alpha=1, linestyle=linelist[i], linewidth=1)\r\n# i += 1\r\n\r\n# if self.flag_ca == True or self.flag_ra == True:\r\n# ax2 = ax1.twinx() #secondary axis\r\n# ## cmap = plt.cm.get_cmap(\"Reds\") # type: matplotlib.colors.ListedColormap\r\n# num = len(self.rangeDict.keys())\r\n# ## colors = plt.cm.Reds(np.linspace(0.3,1,num))\r\n# colors = plt.cm.Greens([0.7, 0.5, 0.9, 0.3, 1])\r\n# ax2.set_prop_cycle(color=colors)\r\n# ax2.set_ylabel('Area ($' + unit + '^2$)', color = 'g')\r\n# if self.flag_ca == True:\r\n# i = 0\r\n# for k in self.rangeDict.keys():\r\n# if len(self.rangeDict.keys()) > 1 and k == \"Default\":\r\n# continue\r\n# p2, = ax2.plot(self.time2[self.plot_slice2],\r\n# self.dataDict[k][0][self.plot_slice2],\r\n# '-' + markerlist[i], alpha=0.5,\r\n# linewidth=1, markersize=2,\r\n# label=\"Contact Area: \" + k)\r\n# lns.append(p2)\r\n# if self.flag_ap == True:\r\n# ax2.plot(self.indDict[\"time1_max\"][i],\r\n# self.areaDict[\"area2_pulloff\"][i],\r\n# 'y' + markerlist[i], alpha=0.8)\r\n# i += 1\r\n# if self.flag_ra == True: #consider first key since auto roi is same for all keys\r\n# colors = plt.cm.Blues([0.7, 0.5, 0.9, 0.3, 1])\r\n# ax2.set_prop_cycle(color=colors)\r\n# j = 0\r\n# for k in self.rangeDict.keys():\r\n# if len(self.rangeDict.keys()) > 1 and k == \"Default\":\r\n# continue \r\n# p3, = ax2.plot(self.time2[self.plot_slice2],\r\n# self.dataDict[k][3][self.plot_slice2],\r\n# '-' + markerlist[j], alpha=0.5, linewidth=1, markersize=2,\r\n# label=\"ROI Area: \" + k)\r\n# lns.append(p3)\r\n# j += 1\r\n\r\n \r\n# if self.flag_lf == True:\r\n# ax3 = ax1.twinx() #lateral force\r\n# ax3.set_ylabel('Lateral Force (μN)', color = 'c')\r\n# ax3.spines['left'].set_position(('outward', 60))\r\n# ax3.spines[\"left\"].set_visible(True)\r\n# ax3.yaxis.set_label_position('left')\r\n# ax3.yaxis.set_ticks_position('left')\r\n# if self.invert_latf == True:\r\n# ax3.invert_yaxis()\r\n# if self.flag_lf == True:\r\n# p4, = ax3.plot(xAxisData[self.plot_slice], self.force_lat1_shifted[self.plot_slice], 'co',\r\n# alpha=0.5, linewidth=1, markersize=1, label=\"Lateral Force\")\r\n\r\n# ## if self.flag_lf_filter == True:\r\n# ## p4, = ax3.plot(self.time1[self.plot_slice], self.force_lat1_filtered_shifted[self.plot_slice], '-c',\r\n# ## alpha=0.5, linewidth=1, label=\"Lateral Force\")\r\n\r\n# if self.flag_fp == True: #show friction calc\r\n# i = 0\r\n# for k in self.rangeDict.keys():\r\n# if len(self.rangeDict.keys()) > 1 and k == \"Default\":\r\n# continue\r\n# ax3.axhline(y=self.forceDict[\"force_lat_max\"][i],\r\n# color='g', alpha=1,\r\n# linestyle=linelist[i], linewidth=1)\r\n# ax3.axhline(y=self.forceDict[\"force_lat_min\"][i],\r\n# color='g', alpha=1,\r\n# linestyle=linelist[i], linewidth=1)\r\n# ax1.axhline(y=self.forceDict[\"force_max2\"][i],\r\n# color='g', alpha=1,\r\n# linestyle=linelist[i], linewidth=1)\r\n# ax3.axvline(x=xAxisData[self.time1.index(self.indDict[\"time1_lat_avg\"][i])],\r\n# color='g', alpha=1,\r\n# linestyle=linelist[i], linewidth=1)\r\n# ax2.plot(self.indDict[\"time1_lat_avg\"][i],\r\n# self.areaDict[\"area_friction\"][i],\r\n# 'g' + markerlist[i], alpha=0.8)\r\n# i += 1\r\n# ax3.axhline(y=self.forceDict[\"zero2\"],\r\n# color='g', alpha=0.5,\r\n# linestyle=linelist[0], linewidth=1) \r\n# lns.append(p4)\r\n# else:\r\n# ax3 = None\r\n\r\n# if self.flag_zp == True or self.flag_xp == True or self.flag_zd: #piezo position/deformation\r\n# ax4 = ax1.twinx() #piezo waveform\r\n# ax4.set_ylabel('Displacement (μm)', color = 'violet')\r\n# if self.flag_ca == True or self.flag_ra == True: #shift axis if area plotted\r\n# ax4.spines['right'].set_position(('outward', 70))\r\n# ## ax4.invert_yaxis()\r\n# if self.flag_zp == True:\r\n# p5, = ax4.plot(xAxisData[self.plot_slice], self.dist_vert1[self.plot_slice], '-',\r\n# markersize=1, color = 'violet',\r\n# alpha=0.5, label=\"Vertical Piezo\")\r\n# lns.append(p5)\r\n# if self.flag_xp == True:\r\n# p6, = ax4.plot(xAxisData[self.plot_slice], self.dist_lat1[self.plot_slice], '-.',\r\n# markersize=1, color = 'violet',\r\n# alpha=0.5, label=\"Lateral Piezo\")\r\n# lns.append(p6)\r\n# if self.flag_zd == True: #actual deformation plot\r\n# p12, = ax4.plot(xAxisData[self.plot_slice], self.deform_vert[self.plot_slice], '-o',\r\n# markersize=1, color = 'violet',\r\n# alpha=0.5, label=\"Deformation\")\r\n# if self.flag_ap == True:\r\n# ax1.axvline(x=xAxisData[self.deform_tol], color='violet', \r\n# alpha=1, linestyle=\":\", linewidth=1)\r\n# lns.append(p12)\r\n \r\n# if self.flag_cl == True or self.flag_rl == True:\r\n# ax5 = ax1.twinx()\r\n# num = len(self.rangeDict.keys())\r\n# colors = plt.cm.copper(np.linspace(0.2,0.7,num))\r\n# ax5.set_prop_cycle(color=colors)\r\n# ax5.set_ylabel('Length ($' + unit + '$)', color = 'brown')\r\n# if self.flag_ca == True or self.flag_ra == True: \r\n# ax5.spines['right'].set_position(('outward', 70)) \r\n# if self.flag_cl == True: #contact length\r\n# i = 0\r\n# for k in self.rangeDict.keys():\r\n# if len(self.rangeDict.keys()) > 1 and k == \"Default\":\r\n# continue \r\n# p7, = ax5.plot(self.time2[self.plot_slice2],\r\n# self.dataDict[k][1][self.plot_slice2],\r\n# '-' + markerlist[i], alpha=0.5, linewidth=1,\r\n# markersize=2, label=\"Contact Length: \" + k)\r\n# lns.append(p7)\r\n# i += 1\r\n# if self.flag_rl == True: #roi length\r\n# ## ax5 = ax1.twinx()\r\n# num = len(self.rangeDict.keys())\r\n# colors = plt.cm.Wistia(np.linspace(0.2,0.7,num))\r\n# ax5.set_prop_cycle(color=colors)\r\n# ## ax5.spines['right'].set_position(('outward', 70))\r\n# j = 0\r\n# for k in self.rangeDict.keys():\r\n# if len(self.rangeDict.keys()) > 1 and k == \"Default\":\r\n# continue\r\n# ## ax5.set_ylabel('Length ($' + unit + '$)', color = 'brown')\r\n# p8, = ax5.plot(self.time2[self.plot_slice2],\r\n# self.dataDict[k][4][self.plot_slice2],\r\n# '-' + markerlist[j], alpha=0.5, linewidth=1,\r\n# markersize=2, label=\"ROI Length: \" + k)\r\n# lns.append(p8)\r\n# j += 1\r\n# if self.flag_cn == True: #contact number\r\n# ax5 = ax1.twinx()\r\n# num = len(self.rangeDict.keys())\r\n# colors = plt.cm.copper(np.linspace(0.2,0.7,num))\r\n# ax5.set_prop_cycle(color=colors)\r\n# ax5.spines['right'].set_position(('outward', 70))\r\n# i = 0\r\n# for k in self.rangeDict.keys():\r\n# if len(self.rangeDict.keys()) > 1 and k == \"Default\":\r\n# continue\r\n# ax5.set_ylabel('Number', color = 'brown')\r\n# p9, = ax5.plot(self.time2[self.plot_slice2],\r\n# self.dataDict[k][2][self.plot_slice2],\r\n# '-' + markerlist[i], alpha=0.5, linewidth=1,\r\n# markersize=2, label=\"Contact Number: \" + k)\r\n# lns.append(p9)\r\n# i += 1\r\n# if self.flag_ecc == True: #contact eccentricity\r\n# ax5 = ax1.twinx()\r\n# num = len(self.rangeDict.keys())\r\n# colors = plt.cm.copper(np.linspace(0.2,0.7,num))\r\n# ax5.set_prop_cycle(color=colors)\r\n# ax5.spines['right'].set_position(('outward', 70))\r\n# i = 0\r\n# for k in self.rangeDict.keys():\r\n# if len(self.rangeDict.keys()) > 1 and k == \"Default\":\r\n# continue\r\n# ax5.set_ylabel('Eccentricity' + unit + '$)', color = 'brown')\r\n# p10, = ax5.plot(self.time2[self.plot_slice2],\r\n# self.dataDict[k][5][self.plot_slice2],\r\n# '-' + markerlist[i], alpha=0.5, linewidth=1,\r\n# markersize=2, label=\"Median Eccentricity: \" + k)\r\n# lns.append(p10)\r\n# i += 1\r\n \r\n# if self.flag_st == True or self.flag_lf_filter == True: #stress\r\n# ax6 = ax1.twinx() \r\n# ax6.set_ylabel('Stress (μN/$' + unit + '^2$)', color = 'c')\r\n# ax6.spines['left'].set_position(('outward', 60))\r\n# ax6.spines[\"left\"].set_visible(True)\r\n# ax6.yaxis.set_label_position('left')\r\n# ax6.yaxis.set_ticks_position('left')\r\n# if self.flag_st == True:\r\n# p11, = ax6.plot(xAxisData[self.plot_slice],\r\n# self.stress[self.plot_slice], 'co',\r\n# alpha=0.5, linewidth=1, markersize=1,\r\n# label=\"Stress\") \r\n# if self.flag_lf_filter == True:\r\n# p11, = ax6.plot(xAxisData[self.plot_slice],\r\n# self.stress_filtered[self.plot_slice], '-c',\r\n# alpha=0.5, linewidth=1, markersize=1,\r\n# label=\"Stress\")\r\n\r\n# lns.append(p11)\r\n \r\n# ## lns = [p1, p3, p2, p4, p5]\r\n# ## else:\r\n# ## lns = [p1, p2]\r\n\r\n# ax1.legend(handles=lns, loc = self.legendPos)\r\n\r\n# if self.flag_fit == True:\r\n# axDict = {'Vertical Force (μN)':ax1, 'Lateral Force (μN)':ax3}\r\n# yDict = {'Vertical Force (μN)':self.force_vert1_shifted,\r\n# 'Lateral Force (μN)':self.force_lat1_shifted}\r\n# fit_slice = slice(int(self.startFit * self.ptsnumber/100),\r\n# int(self.endFit * self.ptsnumber/100))\r\n# self.slope_unit = self.fit_y.split('(')[1].split(')')[0] + '/' +\\\r\n# self.fit_x.split('(')[1].split(')')[0]\r\n# text_pos = self.fit_pos.split(\",\")\r\n \r\n# self.slope = self.fitData(xDict.get(self.fit_x)[fit_slice], yDict.get(self.fit_y)[fit_slice],\r\n# axDict.get(self.fit_y), xAxisData[fit_slice], unit = self.slope_unit,\r\n# eq_pos = text_pos, fit_order = 1, fit_show = self.fit_show)\r\n# else:\r\n# self.slope = ''\r\n# self.slope_unit = ''\r\n# self.fig1.tight_layout()\r\n\r\n# ## print(e)\r\n# ## messagebox.showinfo(\"Plot Error!\", \"Check force file/video file\\n\" +\r\n# ## \"Exception: \" + str(e))\r\n \r\n# def showPlot(self): #show plot\r\n# ## self.fig1.show()\r\n# try:\r\n# plt.pause(0.05)\r\n# self.fig1.canvas.draw()\r\n# except Exception as e:\r\n# print(e)\r\n \r\n# ## plt.show(block=False)\r\n# ## plt.draw()\r\n\r\n# def handle_close(self, evt): #figure closed event\r\n# self.fig1_close = True\r\n \r\n# def convertPlot(self): #convert plot to numpy\r\n# self.fig1.canvas.draw()\r\n# data = np.fromstring(self.fig1.canvas.tostring_rgb(),\r\n# dtype=np.uint8, sep='')\r\n# data = data.reshape(self.fig1.canvas.get_width_height()[::-1] + (3,))\r\n# return data\r\n\r\n# def savePlot(self, filepath): #save force plots\r\n# print(\"save plot\")\r\n# self.fig1.savefig(filepath, orientation='landscape',\r\n# transparent = True)\r\n \r\n #find corresponding unit from the different unit dictionaries\r\n def findUnit(self, key):\r\n if key in self.unitDict.keys():\r\n return self.unitDict[key].replace('$', '')\r\n elif key in self.imageDataUnitDict.keys():\r\n return self.imageDataUnitDict[key].replace('$', '')\r\n elif key in self.miscUnitDict.keys():\r\n return self.miscUnitDict[key].replace('$', '')\r\n else:\r\n return '' #no unit\r\n \r\n \r\n def saveSummaryData(self, videofile1, videofile2, zeroforcefile, imageDataUnitDict, \r\n msrmnt_num, summary_filepath): #save and append data\r\n logging.debug(\"save summary\")\r\n## self.summary_filepath = os.path.dirname(os.path.dirname(\r\n## self.force_filepath))+ \"/Analysis/Summary/summary data.txt\"\r\n# if os.path.exists(summary_filepath) == False:\r\n# with open(summary_filepath, \"w\", encoding=\"utf-8\") as f:\r\n# ## f.write(\"Lateral Force Calib [μN] \\t\" + self.calib_lat1 + \"\\n\")\r\n# f.write(\"Max Area [\" + unit + \"^2]\\tPulloff Area [\" + unit + \"^2]\\tAdhesion [μN]\\t\" +\r\n# \"Adhesion Preload [μN]\\tContact Time[s]\\tSpeed [μm/s]\\t\" +\r\n# \"Steps\\tFriction [μN]\\tFriction Area [\" + unit + \"^2]\\t\" +\r\n# \"Friction Preload [μN]\\tMeasurement number\\t\" +\r\n# \"Measurement OK?\\tROI Labels\\t\" +\r\n# \"Step Dictionary\\tVertical Force Stdev\\t\" +\r\n# \"Lateral Force Stdev\\tSliding Step\\t\" +\r\n# \"ROI Max Area [\" + unit + \"^2]\\tROI Pulloff Area [\" + unit + \"^2]\\t\" +\r\n# \"Max Length [\" + unit + \"]\\tPulloff Length [\" + unit + \"]\\t\" +\r\n# \"ROI Max Length [\" + unit + \"^2]\\tROI Pulloff Length [\" + unit + \"^2]\\t\" +\r\n# \"Pulloff Median Eccentricity\\tPulloff Contact Number\\t\" +\r\n# \"Residue Area [\" + unit + \"^2]\\tSlope [\" + self.slope_unit + \"]\\t\" + \r\n# \"Beam Spring Constant [μN/μm]\\tBeam Spring Constant Stdev\\t\" + \r\n# \"Initial Deformation[μm]\\tPulloff Deformation[μm]\\tAdhesion Energy [pJ]\\t\" +\r\n# \"Max Bounding Area [\" + unit + \"^2]\\t Max Bounding Perimeter [\" + unit + \"]\\t\" +\r\n# \"Max Bounding Length [\" + unit + \"]\\tMax Bounding Width [\" + unit + \"]\\t\" +\r\n# \"Video file\\tForce data file\\t2nd Video file\\tZero-Force file\\n\")\r\n\r\n## max_area = [self.areaDict[\"area2_max\"][k] - self.areaDict[\"area2_init\"][k] \\\r\n## for k in range(0, len(self.dataDict.keys())-1)]\r\n## pulloff_area = [self.areaDict[\"area_pulloff\"][k] - self.areaDict[\"area2_init\"][k] \\\r\n## for k in range(0, len(self.dataDict.keys())-1)]\r\n## friction_area = [self.areaDict[\"area_friction\"][k] - self.areaDict[\"area2_init\"][k] \\\r\n## for k in range(0, len(self.dataDict.keys())-1)]\r\n \r\n# roi_label = []\r\n# max_area2 = []\r\n# pulloff_area2 = []\r\n# max_area3 = []\r\n# pulloff_area3 = []\r\n# friction_area = []\r\n# max_contLength = []\r\n# pulloff_contLength = []\r\n# max_roilen = []\r\n# pulloff_roilen = []\r\n# pulloff_ecc = []\r\n# pulloff_contnum = []\r\n# residue_area2 = []\r\n# adhesion = []\r\n# preload1 = []\r\n# ## contact_time = []\r\n# friction = []\r\n# preload2 = []\r\n# elip_area = []\r\n# elip_per = []\r\n# elip_maj = []\r\n# elip_min = []\r\n# i = 0\r\n self.imageDataUnitDict = imageDataUnitDict\r\n # self.summaryDataDict['data params'] = {}\r\n \r\n self.summaryDataDict['measurement params']['Measurement number'] = msrmnt_num\r\n self.summaryDataDict['measurement params']['Measurement OK?'] = 'Y' #CHANGE!\r\n # self.summaryDataDict['data params'][\"Steps\"] = self.steps \r\n self.summaryDataDict['misc'][\"Sliding Step\"] = self.slideStep\r\n \r\n kBeam = self.analyzeDataWindow.dataAnalDict['misc settings']['beam spring constant'].text()\r\n # self.fileDataDict2[\"Beam Spring Constant\"] = kBeam.split(',')[0]\r\n # self.fileDataDict2[\"Beam Spring Constant Stdev\"] = kBeam.split(',')[1]\r\n self.summaryDataDict['data params'][\"Spring Constant\"] = {}\r\n self.summaryDataDict['data params'][\"Spring Constant\"][\"Beam Spring Constant\"] = kBeam.split(',')[0]\r\n self.summaryDataDict['data params'][\"Spring Constant\"][\"Beam Spring Constant Stdev\"] = kBeam.split(',')[1]\r\n \r\n # self.fileDataDict2[\"Slope\"] = self.slope\r\n self.summaryDataDict['misc'][\"Slope\"] = self.slope\r\n \r\n self.miscUnitDict = {'Adhesion Energy': ' [pJ]',\r\n 'Spring Constant': ' [μN/μm]',\r\n 'Slope': ' [' + self.slope_unit + ']'}\r\n \r\n roi_list = self.analyzeDataWindow.dataAnalDict[\"Vertical force\"][\"ranges\"].keys()\r\n for k in roi_list:\r\n # print(i)\r\n if len(roi_list) > 1 and k == \"Default\":\r\n continue\r\n \r\n self.summaryDataDict['measurement params'][\"ROI Label\"] = k\r\n summary_filepath_new = summary_filepath[:-4] + '(' + k + ')' + summary_filepath[-4:]\r\n \r\n header_list = []\r\n if os.path.exists(summary_filepath_new) == False:\r\n for x in self.summaryDataDict.keys():\r\n if x in ['measurement params', 'misc']:\r\n for y in self.summaryDataDict[x].keys():\r\n unit = self.findUnit(y)\r\n header_list.append(y + unit)\r\n elif x == 'data params':\r\n for y in self.summaryDataDict[x].keys():\r\n unit = self.findUnit(y)\r\n for z in self.summaryDataDict[x][y].keys():\r\n header_list.append(z + unit)\r\n else:\r\n unit = self.findUnit(x)\r\n for y in self.summaryDataDict[x][k].keys():\r\n header_list.append(y + unit)\r\n \r\n header = '\\t'.join(header_list) + '\\t' + \\\r\n \"Force data file\\tZero-Force file\\tVideo file\\t2nd Video file\\n\" \r\n \r\n # header = \"Measurement number\\tMeasurement OK?\\tROI Label\\t\" + \\\r\n # '\\t'.join(self.speedDict.keys()) + '\\t' + \\\r\n # '\\t'.join(self.fileDataDict2.keys()) + '\\t' + \\\r\n # '\\t'.join(self.summaryDataDict[k].keys()) + '\\t' + \\\r\n # \"Force data file\\tZero-Force file\\tVideo file\\t2nd Video file\\n\" \r\n with open(summary_filepath_new, \"w\", encoding=\"utf-8\") as f:\r\n f.write(header)\r\n \r\n data_list = []\r\n for x in self.summaryDataDict.keys():\r\n if x in ['measurement params', 'misc']:\r\n for y in self.summaryDataDict[x].keys():\r\n data_list.append(self.summaryDataDict[x][y])\r\n elif x == 'data params':\r\n for y in self.summaryDataDict[x].keys():\r\n for z in self.summaryDataDict[x][y].keys():\r\n data_list.append(self.summaryDataDict[x][y][z])\r\n else:\r\n for y in self.summaryDataDict[x][k].keys():\r\n data_list.append(self.summaryDataDict[x][k][y]) \r\n # for x in self.summaryDataDict.keys():\r\n # if x in ['measurement params', 'misc']:\r\n # for y in self.summaryDataDict[x].keys():\r\n # data_list.append(self.summaryDataDict[x][y])\r\n # # elif x == 'misc':\r\n # # for y in self.summaryDataDict[x].keys():\r\n # # data_list.append(self.summaryDataDict[x][y])\r\n # else:\r\n # for y in self.summaryDataDict[x][k].keys():\r\n # data_list.append(self.summaryDataDict[x][k][y])\r\n \r\n data_string = '\\t'.join(map(str,data_list)) + '\\t' + \\\r\n self.force_filepath.split('/')[-1][:-4] + '\\t' + \\\r\n zeroforcefile + '\\t' + videofile1 + '\\t' + \\\r\n videofile2 + '\\n'\r\n \r\n # # data_string = str(msrmnt_num) + '\\tY\\t' + k + '\\t' + \\\r\n # # '\\t'.join(map(str, self.speedDict.values())) + '\\t' + \\\r\n # # '\\t'.join(map(str, self.fileDataDict2.values())) + '\\t' + \\\r\n # # '\\t'.join(map(str, self.summaryDataDict[k].values())) + '\\t' + \\\r\n # # self.force_filepath.split('/')[-1][:-4] + '\\t' + \\\r\n # # zeroforcefile + '\\t' + videofile1 + '\\t' + \\\r\n # # videofile2 + '\\n'\r\n \r\n with open(summary_filepath_new, \"a\") as f:\r\n f.write(data_string)\r\n \r\n# roi_label.append(k)\r\n# max_area2.append(self.areaDict[\"area2_max\"][i] -\r\n# self.areaDict[\"area2_init\"][i])\r\n# pulloff_area2.append(self.areaDict[\"area2_pulloff\"][i] -\r\n# self.areaDict[\"area2_init\"][i])\r\n# max_area3.append(self.areaDict[\"area3_max\"][i])\r\n# pulloff_area3.append(self.areaDict[\"area3_pulloff\"][i])\r\n# friction_area.append(self.areaDict[\"area_friction\"][i] -\r\n# self.areaDict[\"area2_init\"][i])\r\n# max_contLength.append(self.lengthDict[\"contLength_max\"][i] -\r\n# self.lengthDict[\"contLength_init\"][i])\r\n# pulloff_contLength.append(self.lengthDict[\"contLength_pulloff\"][i] -\r\n# self.lengthDict[\"contLength_init\"][i])\r\n# max_roilen.append(self.lengthDict[\"roilen_max\"][i])\r\n# pulloff_roilen.append(self.lengthDict[\"roilen_pulloff\"][i])\r\n# pulloff_ecc.append(self.lengthDict[\"ecc_pulloff\"][i])\r\n# pulloff_contnum.append(self.lengthDict[\"contnum_pulloff\"][i])\r\n# residue_area2.append(self.areaDict[\"area2_residue\"][i] -\r\n# self.areaDict[\"area2_init\"][i]) \r\n# adhesion.append(self.forceDict[\"force_adhesion1\"][i])\r\n# preload1.append(self.forceDict[\"force_preload1\"][i])\r\n# ## contact_time.append(self.indDict[\"contact_time1\"][i])\r\n# friction.append(self.forceDict[\"force_friction\"][i])\r\n# preload2.append(self.forceDict[\"force_preload2\"][i])\r\n# elip_area.append(self.ellipDict[\"ellipAr_max\"][i])\r\n# elip_per.append(self.ellipDict[\"ellipPer_max\"][i])\r\n# elip_maj.append(self.ellipDict[\"ellipMajr_max\"][i])\r\n# elip_min.append(self.ellipDict[\"ellipMinr_max\"][i])\r\n \r\n# i += 1\r\n \r\n \r\n \r\n # with open(summary_filepath, \"a\") as f:\r\n # f.write(str(max_area2)+'\\t' +\r\n # str(pulloff_area2)+'\\t' +\r\n # str(adhesion) + '\\t' +\r\n # str(preload1) +'\\t' +\r\n # str(self.contact_time1) +'\\t' +\r\n # str(self.speed_um) + '\\t' + str(self.steps) + '\\t' +\r\n # str(friction) + '\\t' +\r\n # str(friction_area) + '\\t' +\r\n # str(preload2) + '\\t' +\r\n # str(msrmnt_num) + '\\tY\\t' +\r\n # str(roi_label) + '\\t' + str(self.speedDict) + '\\t' +\r\n # str(self.forceDict[\"zero1_stdv\"]) + '\\t' +\r\n # str(self.forceDict[\"zero2_stdv\"]) + '\\t' +\r\n # self.slideStep + '\\t' +\r\n # str(max_area3)+'\\t' +\r\n # str(pulloff_area3)+'\\t' +\r\n # str(max_contLength)+'\\t' +\r\n # str(pulloff_contLength)+'\\t' +\r\n # str(max_roilen)+'\\t' +\r\n # str(pulloff_roilen)+'\\t' +\r\n # str(pulloff_ecc)+'\\t' +\r\n # str(pulloff_contnum)+'\\t' +\r\n # str(residue_area2)+'\\t' + str(self.slope) + '\\t' +\r\n # kBeam.split(',')[0] + '\\t' + kBeam.split(',')[1] + '\\t' +\r\n # str(self.deform_init) + '\\t' + str(self.deform_pulloff) + '\\t' +\r\n # str(self.energy_adhesion) + '\\t' +\r\n # str(elip_area)+'\\t' + str(elip_per) + '\\t' +\r\n # str(elip_maj)+'\\t' + str(elip_min) + '\\t' +\r\n # videofile1 + '\\t' +\r\n # self.force_filepath.split('/')[-1][:-4] +\r\n # '\\t' + videofile2 +\r\n # '\\t' + zeroforcefile + '\\n')\r\n \r\n\r\n##a = ForceData()\r\n##a.importData()\r\n##print(\"end\")\r\n####a.getArea([1,2,3], [4,5,6])\r\n####a.plotData()\r\n####a.savePlot(\"C:/Users/sudersanp/Desktop/Work/Codes/Video Analyser/test123\")\r\n####a.saveSummaryData()\r\n##a.plotSummary()\r\n##a.showSummaryPlot()\r\n",
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Jun 27 21:30:37 2020\r\n\r\n@author: adwait\r\n\"\"\"\r\nimport cv2\r\nimport numpy as np\r\nimport logging\r\n\r\n# class ImageTransform:\r\n\r\ndef backgroundSubtract(frame, frame_bg, alpha, inv = False): #subtract background\r\n logging.debug('%s', frame.shape)\r\n if alpha == 1: #direct subtraction if alpha is one\r\n frame_fg = cv2.subtract(255-frame,255-frame_bg)\r\n frame_fg_scaled = 255 - frame_fg\r\n else: #blend\r\n## alpha = 0.5\r\n frame_fg_scaled = cv2.addWeighted(frame, 1 - alpha, 255 - frame_bg,\r\n alpha, 0.0)\r\n frame_subtracted = 255 - frame_fg_scaled if inv == True else frame_fg_scaled\r\n logging.debug(\"bgSubtract\")\r\n return frame_subtracted\r\n\r\ndef applyBrightnessContrast(brightness, contrast, frame):\r\n \r\n if brightness != 0:\r\n if brightness > 0:\r\n shadow = brightness\r\n highlight = 255\r\n else:\r\n shadow = 0\r\n highlight = 255 + brightness\r\n alpha_b = (highlight - shadow)/255\r\n gamma_b = shadow\r\n\r\n buf = cv2.addWeighted(frame, alpha_b, frame, 0, gamma_b)\r\n else:\r\n buf = frame.copy()\r\n\r\n if contrast != 0:\r\n f = 131*(contrast + 127)/(127*(131-contrast))\r\n alpha_c = f\r\n gamma_c = 127*(1-f)\r\n buf = cv2.addWeighted(buf, alpha_c, buf, 0, gamma_c)\r\n \r\n logging.debug(\"brightness\")\r\n return buf\r\n \r\n#histogram correction\r\ndef histogramCorrection(frame, correction_type = 'None', clip_lim = 2, tile_size = 8):\r\n if correction_type == 'None':\r\n return frame\r\n elif correction_type == 'Global':\r\n frame_corrected = cv2.equalizeHist(frame)\r\n elif correction_type == 'Adaptive':\r\n clahe = cv2.createCLAHE(clipLimit=clip_lim, tileGridSize=(tile_size,\r\n tile_size))\r\n frame_corrected = clahe.apply(frame)\r\n\r\n logging.debug(\"histogram correct\")\r\n return frame_corrected\r\n\r\n## def window_show(window_name, frame, posx, posy, resize_fraction):\r\n## cv2.namedWindow(window_name, cv2.WINDOW_NORMAL)\r\n## # sc = 0.5 #window resize scale factor\r\n## h, w = tuple(int(resize_fraction*x) for x in frame.shape[:2]) \r\n## cv2.moveWindow(window_name, posx, posy)\r\n## cv2.resizeWindow(window_name, w, h)\r\n## cv2.imshow(window_name, frame)\r\n \r\n\r\ndef imageFilter(ftype, param1, param2, frame): #image filtering\r\n # roi = self.roiBound\r\n # frame1 = frame[roi[1]:roi[3], roi[0]:roi[2]].copy() #filter inside roi\r\n## frame_gray = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)\r\n## del frame1\r\n if ftype == \"Average Filter\":\r\n frame_filtered = cv2.blur(frame,(param1,param1))\r\n elif ftype == \"Gaussian Filter\":\r\n frame_filtered = cv2.GaussianBlur(frame,(param1,param1),param2)\r\n elif ftype == \"Median Filter\":\r\n frame_filtered = cv2.medianBlur(frame,param1)\r\n elif ftype == \"Bilateral Filter\":\r\n frame_filtered = cv2.bilateralFilter(frame,0,param1,param2)\r\n elif ftype == \"Morph Open\":\r\n rect_kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (param1,param2))\r\n frame_filtered = cv2.morphologyEx(frame, cv2.MORPH_OPEN, rect_kernel)\r\n elif ftype == \"Morph Close\":\r\n rect_kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (param1,param2))\r\n frame_filtered = cv2.morphologyEx(frame, cv2.MORPH_CLOSE, rect_kernel)\r\n else:\r\n frame_filtered = frame.copy()\r\n\r\n## frame_filtered2 = cv2.cvtColor(frame_filtered.astype(np.uint8),\r\n## cv2.COLOR_GRAY2BGR)\r\n # h, w, s = frame.shape\r\n # l, r, t, d = roi[0], w - roi[2], roi[1], h - roi[3]\r\n # print(h, w, s, t, d, l, r)\r\n # #fill border with zero to equalize frame size\r\n # frame_filtered2 = cv2.copyMakeBorder(frame_filtered, t, d, l, r,\r\n # cv2.BORDER_CONSTANT, 0)\r\n \r\n return frame_filtered\r\n \r\n\r\ndef dftFilter(r_lp, r_hp, frame_gray): #DFT Filter (Gaussian Bandpass)\r\n mask_gauss, img_back, img_back_gauss, img_back_scaled, \\\r\n img_filtered, magnitude_spectrum, spectrum_masked = (None,)*7\r\n #DFT\r\n #if frame == None:\r\n # frame = self.frame\r\n # print(\"dft init\", self.roiBound)\r\n # roi = self.roiBound\r\n # print(\"roi\")\r\n # frame1 = frame[roi[1]:roi[3], roi[0]:roi[2]].copy()\r\n logging.debug(\"dft\")\r\n # frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\r\n # del frame\r\n logging.debug('%s', frame_gray.shape)\r\n\r\n dft = cv2.dft(np.float32(frame_gray),flags = cv2.DFT_COMPLEX_OUTPUT)\r\n dft_shift = np.fft.fftshift(dft)\r\n\r\n\r\n magnitude_spectrum = 20*np.log(cv2.magnitude(dft_shift[:,:,0],\r\n dft_shift[:,:,1]))\r\n rows, cols = frame_gray.shape\r\n # crow,ccol = int(rows/2) , int(cols/2)\r\n logging.debug(\"low pass\")\r\n #Low Pass\r\n kernal = cv2.getGaussianKernel(max(rows, cols), r_lp)\r\n kernal2d = kernal * kernal.transpose()\r\n kernal2d = kernal2d / kernal2d.max()\r\n## kernal2d_inverse = 1 - kernal2d\r\n\r\n mask_lowpass = np.zeros((rows,cols),np.float64)\r\n if r_lp > 0:\r\n mask_lowpass = kernal2d[int((max(rows, cols)-rows)/2):\r\n int((max(rows, cols)+rows)/2),int((max(rows, cols)-cols)/2):\r\n int((max(rows, cols)+cols)/2)] #image sizes must be even integer\r\n else:\r\n mask_lowpass = np.zeros((rows,cols),np.float64)\r\n logging.debug(\"high pass\")\r\n #High Pass\r\n kernal = cv2.getGaussianKernel(max(rows, cols), r_hp)\r\n kernal2d = kernal * kernal.transpose()\r\n kernal2d = kernal2d / kernal2d.max()\r\n kernal2d_inverse = 1 - kernal2d\r\n\r\n mask_highpass = np.ones((rows,cols),np.float64)\r\n if r_hp > 0:\r\n mask_highpass = kernal2d_inverse[int((max(rows, cols)-rows)/2):\r\n int((max(rows, cols)+rows)/2),int((max(rows, cols)-cols)/2):\r\n int((max(rows, cols)+cols)/2)] #image sizes must be even integer\r\n else:\r\n mask_highpass = np.ones((rows,cols),np.float64)\r\n logging.debug(\"band pass\")\r\n #Band Pass\r\n if r_hp <= r_lp and r_lp > 0:\r\n mask_gauss = mask_lowpass * mask_highpass\r\n mask_gauss = mask_gauss/mask_gauss.max()\r\n else:\r\n mask_gauss = np.zeros((rows,cols),np.float64)\r\n\r\n del mask_lowpass, mask_highpass, kernal, kernal2d, kernal2d_inverse\r\n \r\n #Inverse DFT\r\n fshift = dft_shift*np.expand_dims(mask_gauss, axis = 2)\r\n f_ishift = np.fft.ifftshift(fshift)\r\n del fshift\r\n\r\n img_back = cv2.idft(f_ishift)\r\n img_back_gauss = cv2.magnitude(img_back[:,:,0],img_back[:,:,1])\r\n logging.debug('%s, %s', \"img_back_gauss\", img_back_gauss.shape)\r\n spectrum_masked = magnitude_spectrum * mask_gauss\r\n## img_back_scaled = None\r\n img_back_scaled = 255*img_back_gauss/img_back_gauss.max()\r\n img_filtered = img_back_scaled.astype(np.uint8)\r\n # img_filtered = cv2.cvtColor(img_back_scaled.astype(np.uint8),\r\n # cv2.COLOR_GRAY2BGR)\r\n logging.debug(\"dft end\")\r\n # h, w, s = frame.shape\r\n # l, r, t, d = roi[0], w - roi[2], roi[1], h - roi[3]\r\n # print(h, w, s, t, d, l, r)\r\n # #fill border with zero to equalize frame size\r\n # img_filtered1 = cv2.copyMakeBorder(img_filtered, t, d, l, r,\r\n # cv2.BORDER_CONSTANT, 0)\r\n\r\n # print(img_filtered.shape, img_filtered1.shape, frame.shape)\r\n return img_filtered, spectrum_masked\r\n ",
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Jun 29 22:49:51 2020\r\n\r\n@author: adwait\r\n\"\"\"\r\nimport matplotlib\r\nmatplotlib.use('Qt5Agg')\r\nfrom matplotlib.figure import Figure\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport logging\r\nimport pickle\r\n\r\nfrom source.analysis.plot2widget import PlotWidget\r\n# from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar\r\n\r\n# from PyQt5.QtWidgets import QWidget, QGroupBox, QGridLayout\r\n\r\nclass Plotting:\r\n \r\n def __init__(self, fitWindow, configPlotWindow):\r\n self.fitWindow = fitWindow\r\n self.configPlotWindow = configPlotWindow\r\n #plot display flags\r\n # self.flag_ca = True\r\n # self.flag_ra = False\r\n # self.flag_cl = False\r\n # self.flag_rl = False\r\n # self.flag_cn = False\r\n # self.flag_ecc = False\r\n # self.flag_vf = True\r\n # self.flag_lf = False \r\n # self.flag_zp = False\r\n # self.flag_xp = False\r\n # self.flag_ap = False\r\n # self.flag_fp = False\r\n # self.flag_st = False\r\n # self.flag_zd = False\r\n # self.invert_latf = False\r\n # self.x_var = 'Time' #x axis default parameter\r\n # self.legendPos = \"upper right\"\r\n # self.fig1_close = True\r\n # self.show_title = True\r\n # self.showLegend2 = True\r\n # self.fontSize = 12\r\n #fitting\r\n # self.flag_fit = False\r\n # self.fit_x = 'Vertical Position (μm)'\r\n # self.fit_y = 'Vertical Force'\r\n # self.startFit = 0\r\n # self.endFit = 100\r\n self.fit_pos = [0.5,0.5]\r\n # self.fit_show = False\r\n self.slope = ''\r\n self.slope_unit = ''\r\n self.fitTextBox = None\r\n \r\n self.markerlist = [\"o\", \"v\", \"^\", \"s\", \"P\", \"*\", \"D\", \"<\", \"X\", \">\"]\r\n self.linelist = [\":\", \"-.\", \"--\", \"-\", \":\", \"-.\", \"--\", \"-\", \":\", \"-.\"]\r\n # num = len(self.rangeDict.keys())\r\n # self.colorDict = {'Green':plt.cm.Greens([0.7, 0.5, 0.9, 0.3, 1]),\r\n # 'Blue':plt.cm.Blues([0.7, 0.5, 0.9, 0.3, 1]),\r\n # 'Copper':plt.cm.copper([0.3, 0.9, 0.5, 0.7, 1]),\r\n # 'Wistia':plt.cm.Wistia([0.7, 0.5, 0.9, 0.3, 1])}\r\n self.sourceLabel = None\r\n #initialize figure with random data\r\n self.fig1 = Figure(figsize=(11, 5), dpi=100)\r\n ax = self.fig1.add_subplot(111)\r\n self.xAxisData = np.linspace(0, 4, 50)\r\n ydata = np.sin(self.xAxisData)\r\n self.fixYLimits = True\r\n ax.plot(self.xAxisData, ydata, 'r-', linewidth=1, markersize=1)\r\n \r\n self.plotWidget = PlotWidget(fig = self.fig1,\r\n cursor1_init=0,\r\n cursor2_init=None,\r\n fixYLimits = self.fixYLimits,\r\n method = self.updatePosition)\r\n\r\n \r\n def plotData(self, imageDataUnitDict): #prepare plot\r\n #plot settings parameters\r\n self.x_var = self.configPlotWindow.plotDict['plot settings']['x axis'].currentText() #x axis default parameter\r\n legendPos = self.configPlotWindow.plotDict['plot settings']['legend position'].text()\r\n show_title = self.configPlotWindow.plotDict['plot settings']['show title'].isChecked()\r\n showLegend2 = self.configPlotWindow.plotDict['plot settings']['show step legend'].isChecked()\r\n # figsize = self.configPlotWindow.plotDict['plot settings']['figure size'].text()\r\n plot_style = self.configPlotWindow.plotDict['plot settings']['style'].currentText()\r\n self.fontSize = self.configPlotWindow.plotDict['plot settings']['font size'].value()\r\n self.lineWidth = self.configPlotWindow.plotDict['plot settings']['line width'].value()\r\n self.markerSize = self.configPlotWindow.plotDict['plot settings']['marker size'].value()\r\n self.opacity = self.configPlotWindow.plotDict['plot settings']['opacity'].value()\r\n self.fixYLimits = self.configPlotWindow.plotDict['plot settings']['fix y bounds'].isChecked()\r\n self.zeroShiftY = self.configPlotWindow.plotDict['plot settings']['zero shift Y'].isChecked()\r\n # self.plot_slice = self.configPlotWindow.plotDict['plot settings']['plot range']\r\n stress_show = self.configPlotWindow.plotDict['extras']['Stress'].isChecked()\r\n adhesion_show = self.configPlotWindow.plotDict['extras']['Adhesion'].isChecked()\r\n friction_show = self.configPlotWindow.plotDict['extras']['Friction'].isChecked()\r\n steps_show = self.configPlotWindow.plotDict['extras']['Steps'].isChecked()\r\n self.fit_show = self.configPlotWindow.plotDict['extras']['Fit'].isChecked()\r\n \r\n deform_range = eval(self.analyzeDataWindow.dataAnalDict['misc settings']['deformation range'].text())\r\n \r\n # xDict = {'Vertical Position (μm)':self.dist_vert1,\r\n # 'Lateral Position (μm)':self.dist_lat1,\r\n # 'Deformation (μm)':self.deform_vert,\r\n # 'Time (s)':self.time1}\r\n # self.xAxisData = xDict.get(self.x_var)\r\n \r\n # self.fileDataDict = {'Time': self.time1,\r\n # 'Vertical force': self.force_vert1_shifted,\r\n # 'Lateral force': self.force_lat1_shifted,\r\n # 'Vertical piezo': self.dist_vert1,\r\n # 'Lateral piezo': self.dist_lat1,\r\n # 'Deformation': self.deform_vert}\r\n \r\n #image data units\r\n self.imageDataUnitDict = imageDataUnitDict\r\n \r\n self.xAxisData = self.fileDataDict.get(self.x_var)\r\n \r\n # markerlist = [\"o\", \"v\", \"^\", \"s\", \"P\", \"*\", \"D\", \"<\", \"X\", \">\"]\r\n seaborn_styles = ['seaborn-bright','seaborn-colorblind','seaborn-dark-palette',\r\n 'seaborn-dark','seaborn-darkgrid','seaborn-deep','seaborn-muted',\r\n 'seaborn-notebook','seaborn-paper','seaborn-pastel',\r\n 'seaborn-poster','seaborn-talk','seaborn-ticks',\r\n 'seaborn-white','seaborn-whitegrid']\r\n \r\n matplotlib.style.use(\"default\")\r\n if plot_style in seaborn_styles: #initialize seaborn\r\n matplotlib.style.use(\"seaborn\")\r\n matplotlib.style.use(plot_style)\r\n \r\n self.fig1.set_facecolor(matplotlib.rcParams[\"figure.facecolor\"])\r\n self.fig1.set_edgecolor(matplotlib.rcParams[\"figure.edgecolor\"])\r\n matplotlib.rcParams.update({'font.size': self.fontSize})\r\n self.fig1.set_size_inches(self.fig1.get_size_inches())\r\n\r\n logging.debug(\"fig resize\")\r\n # self.fig1.canvas.draw()\r\n # self.fig1 = plt.figure(num=\"Force/Area vs Time\", figsize = [11, 5])\r\n # self.fig1 = Figure(figsize=(11, 5), dpi=100)\r\n \r\n # self.fig1.canvas.mpl_connect('close_event', self.handle_close)\r\n \r\n logging.debug(\"fig1\")\r\n \r\n #store cursor position values before clearing plot\r\n # if self.plotWidget.wid.cursor1 == None:\r\n # c1_init = None\r\n # else:\r\n c1_init = self.plotWidget.wid.cursor1.get_xdata()[0]\r\n \r\n # if self.plotWidget.wid.cursor2 == None:\r\n c2_init = None\r\n # else:\r\n # c2_init = self.plotWidget.wid.cursor2.get_xdata()[0]\r\n \r\n self.fig1.clear()\r\n # self.fig1.suptitle(\"Hey there!\")\r\n # self.fig1.__init__(figsize=(11, 5), dpi=100)\r\n # self.plotWidget.fig = self.fig1\r\n\r\n # self.plotWidget.__init__(fig = self.fig1,\r\n # cursor1_init=c1_init,\r\n # cursor2_init=c2_init,\r\n # fixYLimits = self.fixYLimits,\r\n # method = self.updatePosition)\r\n \r\n ax1 = self.fig1.add_subplot(1,1,1)\r\n ax1.yaxis.set_visible(False)\r\n ax1.set_xlabel(self.x_var + self.unitDict[self.x_var])\r\n lns = []\r\n \r\n # ax1.set_title('Speed = ' + str(self.speed_um) + ' μm/s')\r\n # if self.flag_vf == True: \r\n # ax1.yaxis.set_visible(True)\r\n # # ax1.spines['right'].set_position(('outward', 0))\r\n # # ax1.spines[\"right\"].set_visible(True) \r\n # # ax1.yaxis.set_label_position('right')\r\n # # ax1.yaxis.set_ticks_position('right')\r\n # self.updateAxisPos(ax1, 'left', 0) \r\n # ax1.set_xlabel(self.x_var)\r\n # ax1.set_ylabel('Vertical Force (μN)', color = 'r')\r\n # p1, = ax1.plot(self.xAxisData[self.plot_slice], self.force_vert1_shifted[self.plot_slice], 'ro',\r\n # alpha=0.5, linewidth=1, markersize=1, label=\"Vertical Force\")\r\n # lns.append(p1)\r\n \r\n\r\n # self.plotWidget.mpl_connect('close_event', self.handle_close)\r\n if steps_show == True:\r\n if self.ptsnumber != 0:\r\n ## ptsperstep = int(self.ptsnumber/self.step_num)\r\n i = 0\r\n lns_reg = [] #region legend handle\r\n lab_reg = [] #region legend label\r\n speed_inview = [] #speed list in plot range\r\n for a in self.steps: #shade step regions\r\n if i < ((self.plot_slice.start+1)/self.ptsperstep)-1:\r\n i += 1\r\n continue\r\n \r\n if self.ptsperstep*(i+1)-1 > self.plot_slice.stop:\r\n endpoint = self.plot_slice.stop - 1\r\n exit_flag = True\r\n else:\r\n endpoint = self.ptsperstep*(i+1) - 1\r\n exit_flag = False\r\n \r\n if self.ptsperstep*i < self.plot_slice.start:\r\n startpoint = self.plot_slice.start\r\n else:\r\n startpoint = self.ptsperstep*i \r\n \r\n x_start = min(self.xAxisData[startpoint:endpoint])\r\n x_end = max(self.xAxisData[startpoint:endpoint])\r\n if a == 'Front':\r\n v1 = ax1.axvspan(x_start, x_end, alpha=0.9,\r\n color='aliceblue', label = a)\r\n lns_reg.append(v1)\r\n lab_reg.append(a)\r\n speed_inview.append(self.speed_um[i])\r\n if exit_flag == True:\r\n break\r\n elif a == 'Back':\r\n v2 = ax1.axvspan(x_start, x_end, alpha=0.9,\r\n color='whitesmoke', label = a)\r\n lns_reg.append(v2)\r\n lab_reg.append(a)\r\n speed_inview.append(self.speed_um[i])\r\n if exit_flag == True:\r\n break \r\n elif a == 'Up':\r\n v3 = ax1.axvspan(x_start, x_end, alpha=0.9,\r\n color='honeydew', label = a)\r\n lns_reg.append(v3)\r\n lab_reg.append(a)\r\n speed_inview.append(self.speed_um[i])\r\n if exit_flag == True:\r\n break \r\n elif a == 'Down':\r\n v4 = ax1.axvspan(x_start, x_end, alpha=0.9,\r\n color='linen', label = a)\r\n lns_reg.append(v4)\r\n lab_reg.append(a)\r\n speed_inview.append(self.speed_um[i])\r\n if exit_flag == True:\r\n break \r\n elif a == 'Pause':\r\n v5 = ax1.axvspan(x_start, x_end, alpha=0.9,\r\n color='lightyellow', label = a)\r\n lns_reg.append(v5)\r\n lab_reg.append(a)\r\n speed_inview.append(self.speed_um[i])\r\n if exit_flag == True:\r\n break\r\n i += 1\r\n \r\n if show_title == True:\r\n title = 'Speed = ' + str(speed_inview).replace('[','').replace(']','') + ' μm/s'\r\n ax1.text(0.5, 1.05, title, ha = 'center', transform=ax1.transAxes, \r\n fontsize = 'large', color = 'black', picker = 5)\r\n # self.fig1.suptitle(title)\r\n # ax1.set_title('Speed = ' + str(speed_inview).replace('[','').replace(']','') \r\n # + ' μm/s')\r\n if showLegend2 == True:\r\n dict_reg = dict(zip(lab_reg, lns_reg)) #legend dictionary (remove dup)\r\n self.fig1.legend(dict_reg.values(), dict_reg.keys(), loc='lower right',\r\n ncol=len(lns_reg))\r\n\r\n self.imageAxisDict = {} #active axes of image analysis data\r\n self.plotAxisDict = {} #active axes of all plots\r\n \r\n #plot datafile/image curves\r\n for source in ['datafile', 'image']:\r\n for category in self.configPlotWindow.plotDict[source].keys():\r\n category_dict = self.configPlotWindow.plotDict[source][category]\r\n # ax2 = ax1.twinx()\r\n # ax2.yaxis.set_visible(False)\r\n ax2 = None\r\n i = 0 #axis counter for each category\r\n for curve in category_dict['curves'].keys():\r\n if category_dict['combine'].isChecked() == True:\r\n # ax2 = ax1.twinx()\r\n # ax2.yaxis.set_visible(False)\r\n curve_ax = list(category_dict['curves'].keys())[0]\r\n ylabel = category\r\n i = 0 \r\n else:\r\n # ax2 = ax1\r\n curve_ax = curve\r\n ylabel = curve\r\n i += 1\r\n if source == 'datafile':\r\n ax2, lns = self.plotFileData(ax1, ax2, curve, i, ylabel,\r\n category_dict['curves'][curve], \r\n category_dict['curves'][curve_ax], \r\n lns)\r\n elif source == 'image':\r\n ax2, lns = self.plotImageData(ax1, ax2,\r\n curve, i, ylabel,\r\n category_dict['curves'][curve],\r\n category_dict['curves'][curve_ax], \r\n lns)\r\n\r\n\r\n i = 0\r\n self.imageAxisDict['pulloff area point'] = []\r\n self.imageAxisDict['friction area point'] = []\r\n for k in self.dataDict.keys():\r\n if len(self.dataDict.keys()) > 1 and k == \"Default\":\r\n continue\r\n if adhesion_show == True: #show adhesion calc \r\n if 'Vertical force' in self.plotAxisDict.keys():\r\n if self.zeroShiftY == True:\r\n zero_val = self.fileDataDict['Vertical force'][self.plot_slice.start]\r\n else:\r\n zero_val = 0 \r\n ax = self.plotAxisDict['Vertical force']\r\n ax.axhline(y=self.forceDict[k][\"zero1\"]-zero_val, color='y',\r\n alpha=1, linestyle=self.linelist[i], linewidth=1) \r\n ax.axhline(y=self.forceDict[k][\"force_min1\"]-zero_val, color='y',\r\n alpha=1, linestyle=self.linelist[i], linewidth=1)\r\n ax.axhline(y=self.forceDict[k][\"force_max1\"]-zero_val, color='y',\r\n alpha=1, linestyle=self.linelist[i], linewidth=1)\r\n ax.axvline(x=self.xAxisData[self.fileDataDict['Time'].index(self.indDict[k][\"time1_max\"])], \r\n color='y', alpha=1, linestyle=self.linelist[i], linewidth=1)\r\n if 'Contact area' in self.plotAxisDict.keys():\r\n if self.zeroShiftY == True:\r\n zero_val_area = self.dataDict[k][\"Contact area\"][self.plot_slice2.start]\r\n else:\r\n zero_val_area = 0 \r\n ax = self.plotAxisDict['Contact area']\r\n p, = ax.plot(self.indDict[k][\"time1_max\"],\r\n self.areaDict[k][\"area2_pulloff\"]-zero_val_area,\r\n 'y' + self.markerlist[i], \r\n markersize = 2*self.markerSize, alpha=0.8)\r\n self.imageAxisDict['pulloff area point'].append(p)\r\n if self.fixYLimits == True:\r\n self.plotWidget.wid.artistDict['pulloff area point' + \r\n \": \" + k] = p\r\n p.set_animated(True)\r\n\r\n if friction_show == True and 'Lateral force' in self.plotAxisDict.keys(): #show friction calc\r\n if 'Lateral force' in self.plotAxisDict.keys():\r\n if self.zeroShiftY == True:\r\n zero_val = self.fileDataDict['Lateral force'][self.plot_slice.start]\r\n zero_val_vert = self.fileDataDict['Vertical force'][self.plot_slice.start]\r\n else:\r\n zero_val = 0 \r\n zero_val_vert = 0\r\n ax = self.plotAxisDict['Lateral force']\r\n ax.axhline(y=self.forceDict[k][\"zero2\"]-zero_val,\r\n color='g', alpha=0.5,\r\n linestyle=self.linelist[i], linewidth=1) \r\n ax.axhline(y=self.forceDict[k][\"force_lat_max\"]-zero_val,\r\n color='g', alpha=1,\r\n linestyle=self.linelist[i], linewidth=1)\r\n ax.axhline(y=self.forceDict[k][\"force_lat_min\"]-zero_val,\r\n color='g', alpha=1,\r\n linestyle=self.linelist[i], linewidth=1)\r\n ax.axvline(x=self.xAxisData[self.fileDataDict['Time'].index(self.indDict[k][\"time1_lat_avg\"])],\r\n color='g', alpha=1,\r\n linestyle=self.linelist[i], linewidth=1)\r\n if 'Vertical force' in self.plotAxisDict.keys():\r\n ax = self.plotAxisDict['Vertical force'] #friction preload\r\n ax.axhline(y=self.forceDict[k][\"force_max2\"]-zero_val_vert,\r\n color='g', alpha=1,\r\n linestyle=self.linelist[i], linewidth=1)\r\n # print(\"legends\",curve, lns)\r\n if 'Contact area' in self.plotAxisDict.keys():\r\n if self.zeroShiftY == True:\r\n zero_val_area = self.dataDict[k][\"Contact area\"][self.plot_slice2.start]\r\n else:\r\n zero_val_area = 0 \r\n ax = self.plotAxisDict['Contact area']\r\n p, = ax.plot(self.indDict[k][\"time1_lat_avg\"],\r\n self.areaDict[k][\"area_friction\"]-zero_val_area,\r\n 'g' + self.markerlist[i], \r\n markersize = 2*self.markerSize, alpha=0.8)\r\n self.imageAxisDict['friction area point'].append(p)\r\n if self.fixYLimits == True:\r\n self.plotWidget.wid.artistDict['friction area point' + \r\n \": \" + k] = p\r\n p.set_animated(True)\r\n\r\n i += 1\r\n \r\n #delete these if not plotted\r\n if len(self.imageAxisDict['pulloff area point']) == 0:\r\n del self.imageAxisDict['pulloff area point']\r\n if len(self.imageAxisDict['friction area point']) == 0:\r\n del self.imageAxisDict['friction area point'] \r\n \r\n\r\n if adhesion_show == True: #show adhesion calc\r\n if 'Vertical force' in self.plotAxisDict.keys():\r\n if self.zeroShiftY == True:\r\n zero_val = self.fileDataDict['Vertical force'][self.plot_slice.start]\r\n else:\r\n zero_val = 0\r\n ydata = [x - zero_val for x in self.fileDataDict['Vertical force'][self.energy_slice]]\r\n ax = self.plotAxisDict['Vertical force'] \r\n #fill adhesion energy region \r\n #CHECK! zero of last roi taken for energy shading\r\n roi_key = list(self.forceDict.keys())[-1] \r\n ax.fill_between(self.xAxisData[self.energy_slice],\r\n self.forceDict[roi_key][\"zero1\"]-zero_val,\r\n ydata,\r\n color = 'black', alpha = 0.3) \r\n ax.axvline(x=self.xAxisData[deform_range[0]], color='violet', \r\n alpha=1, linestyle=\":\", linewidth=1)\r\n ax.axvline(x=self.xAxisData[deform_range[1]], color='violet', \r\n alpha=1, linestyle=\":\", linewidth=1)\r\n # i = 0\r\n # for k in self.rangeDict.keys():\r\n # if len(self.rangeDict.keys()) > 1 and k == \"Default\":\r\n # continue\r\n # ax1.axhline(y=self.forceDict[\"zero1\"][i], color='y',\r\n # alpha=1, linestyle=self.linelist[i], linewidth=1) \r\n # ax1.axhline(y=self.forceDict[\"force_min1\"][i], color='y',\r\n # alpha=1, linestyle=self.linelist[i], linewidth=1)\r\n # ax1.axhline(y=self.forceDict[\"force_max1\"][i], color='y',\r\n # alpha=1, linestyle=self.linelist[i], linewidth=1)\r\n # ax1.axvline(x=self.xAxisData[self.time1.index(self.indDict[\"time1_max\"][i])], \r\n # color='y', alpha=1, linestyle=self.linelist[i], linewidth=1)\r\n # i += 1\r\n\r\n # if self.flag_ca == True or self.flag_ra == True: \r\n \r\n# ax2 = ax1.twinx() #secondary axis\r\n# ## cmap = plt.cm.get_cmap(\"Reds\") # type: matplotlib.colors.ListedColormap\r\n# num = len(self.rangeDict.keys())\r\n# ## colors = plt.cm.Reds(np.linspace(0.3,1,num))\r\n# colors = plt.cm.Greens([0.7, 0.5, 0.9, 0.3, 1])\r\n# # colors = plt.cm.Greens(np.linspace(0.2,0.7,num))\r\n# ax2.set_prop_cycle(color=colors)\r\n# ax2.set_ylabel('Area ($' + unit + '^2$)', color = 'g')\r\n# if self.flag_ca == True:\r\n# i = 0\r\n# for k in self.rangeDict.keys():\r\n# if len(self.rangeDict.keys()) > 1 and k == \"Default\":\r\n# continue\r\n# p2, = ax2.plot(self.time2[self.plot_slice2],\r\n# self.dataDict[k][0][self.plot_slice2],\r\n# '-' + markerlist[i], alpha=0.5,\r\n# linewidth=1, markersize=2,\r\n# label=\"Contact Area: \" + k)\r\n# # p2.set_animated(True) #BLIT THIS CHECK!!!\r\n# lns.append(p2)\r\n# if self.flag_ap == True: #adhesion calc\r\n# ax2.plot(self.indDict[\"time1_max\"][i],\r\n# self.areaDict[\"area2_pulloff\"][i],\r\n# 'y' + markerlist[i], alpha=0.8)\r\n# if self.flag_fp == True: #friction calc\r\n# ax2.plot(self.indDict[\"time1_lat_avg\"][i],\r\n# self.areaDict[\"area_friction\"][i],\r\n# 'g' + markerlist[i], alpha=0.8)\r\n# i += 1\r\n# if self.flag_ra == True: #consider first key since auto roi is same for all keys\r\n# colors = plt.cm.Blues([0.7, 0.5, 0.9, 0.3, 1])\r\n# ax2.set_prop_cycle(color=colors)\r\n# j = 0\r\n# for k in self.rangeDict.keys():\r\n# if len(self.rangeDict.keys()) > 1 and k == \"Default\":\r\n# continue \r\n# p3, = ax2.plot(self.time2[self.plot_slice2],\r\n# self.dataDict[k][3][self.plot_slice2],\r\n# '-' + markerlist[j], alpha=0.5, linewidth=1, markersize=2,\r\n# label=\"ROI Area: \" + k)\r\n# lns.append(p3)\r\n# j += 1\r\n\r\n \r\n# if self.flag_lf == True:\r\n# ax3 = ax1.twinx() #lateral force\r\n# ax3.set_ylabel('Lateral Force (μN)', color = 'c')\r\n# # ax3.spines['left'].set_position(('outward', int(6*self.fontSize)))\r\n# # ax3.spines[\"left\"].set_visible(True)\r\n# # ax3.yaxis.set_label_position('left')\r\n# # ax3.yaxis.set_ticks_position('left')\r\n# self.updateAxisPos(ax3, 'left', 6)\r\n# self.updateAxisPos(ax1, 'left', 0)\r\n# if self.invert_latf == True:\r\n# ax3.invert_yaxis()\r\n# if self.flag_lf == True:\r\n# p4, = ax3.plot(self.xAxisData[self.plot_slice], self.force_lat1_shifted[self.plot_slice], 'co',\r\n# alpha=0.5, linewidth=1, markersize=1, label=\"Lateral Force\")\r\n\r\n# ## if self.flag_lf_filter == True:\r\n# ## p4, = ax3.plot(self.time1[self.plot_slice], self.force_lat1_filtered_shifted[self.plot_slice], '-c',\r\n# ## alpha=0.5, linewidth=1, label=\"Lateral Force\")\r\n\r\n # if friction_show == True: #show friction calc\r\n # # i = 0\r\n # # for k in self.rangeDict.keys():\r\n # # if len(self.rangeDict.keys()) > 1 and k == \"Default\":\r\n # # continue\r\n # # ax3.axhline(y=self.forceDict[\"force_lat_max\"][i],\r\n # # color='g', alpha=1,\r\n # # linestyle=self.linelist[i], linewidth=1)\r\n # # ax3.axhline(y=self.forceDict[\"force_lat_min\"][i],\r\n # # color='g', alpha=1,\r\n # # linestyle=self.linelist[i], linewidth=1)\r\n # # ax1.axhline(y=self.forceDict[\"force_max2\"][i],\r\n # # color='g', alpha=1,\r\n # # linestyle=self.linelist[i], linewidth=1)\r\n # # ax3.axvline(x=self.xAxisData[self.time1.index(self.indDict[\"time1_lat_avg\"][i])],\r\n # # color='g', alpha=1,\r\n # # linestyle=self.linelist[i], linewidth=1)\r\n # # # ax2.plot(self.indDict[\"time1_lat_avg\"][i],\r\n # # # self.areaDict[\"area_friction\"][i],\r\n # # # 'g' + markerlist[i], alpha=0.8)\r\n # # i += 1\r\n # if 'Lateral force' in self.plotAxisDict.keys(): #only one zero line plotted\r\n # ax = self.plotAxisDict['Lateral force']\r\n # ax.axhline(y=self.forceDict[\"zero2\"],\r\n # color='g', alpha=0.5,\r\n # linestyle=self.linelist[0], linewidth=1) \r\n# lns.append(p4)\r\n# else:\r\n# ax3 = None\r\n\r\n# if self.flag_zp == True or self.flag_xp == True or self.flag_zd: #piezo position/deformation\r\n# ax4 = ax1.twinx() #piezo waveform\r\n# ax4.set_ylabel('Displacement (μm)', color = 'violet')\r\n# if self.flag_ca == True or self.flag_ra == True: #shift axis if area plotted\r\n# ax4.spines['right'].set_position(('outward', int(7*self.fontSize)))\r\n# ## ax4.invert_yaxis()\r\n# self.updateAxisPos(ax1, 'left', 0)\r\n# if self.flag_zp == True:\r\n# p5, = ax4.plot(self.xAxisData[self.plot_slice], self.dist_vert1[self.plot_slice], '-',\r\n# markersize=1, color = 'violet',\r\n# alpha=0.5, label=\"Vertical Piezo\")\r\n# lns.append(p5)\r\n# if self.flag_xp == True:\r\n# p6, = ax4.plot(self.xAxisData[self.plot_slice], self.dist_lat1[self.plot_slice], '-.',\r\n# markersize=1, color = 'violet',\r\n# alpha=0.5, label=\"Lateral Piezo\")\r\n# lns.append(p6)\r\n# if self.flag_zd == True: #actual deformation plot\r\n# p12, = ax4.plot(self.xAxisData[self.plot_slice], self.deform_vert[self.plot_slice], '-o',\r\n# markersize=1, color = 'violet',\r\n# alpha=0.5, label=\"Deformation\")\r\n # if adhesion_show == True:\r\n # ax1.axvline(x=self.xAxisData[self.deform_tol], color='violet', \r\n # alpha=1, linestyle=\":\", linewidth=1)\r\n# lns.append(p12)\r\n \r\n \r\n # image analysis data plotting\r\n# self.imageAxisDict = {} #active axes of image analysis data\r\n\r\n# if self.flag_ca == True: #contact area\r\n# ax2 ,lns, p = self.plotImageData(ax_primary = ax1, \r\n# ax_secondary = None,\r\n# spine_type = 'right',\r\n# spine_position= 0, \r\n# yaxis_label = 'Area ($' + unit + '^2$)', \r\n# ylabel = 'Contact Area', \r\n# y_index = 0, \r\n# color = 'Green', \r\n# legend = lns)\r\n# self.imageAxisDict[0] = p\r\n # i = 0\r\n # for k in self.rangeDict.keys():\r\n # if len(self.rangeDict.keys()) > 1 and k == \"Default\":\r\n # continue\r\n # if adhesion_show == True: #adhesion calc\r\n # p, = ax2.plot(self.indDict[\"time1_max\"][i],\r\n # self.areaDict[\"area2_pulloff\"][i],\r\n # 'y' + self.markerlist[i], alpha=0.8)\r\n # self.imageAxisDict['pulloff area point'] = [p]\r\n # if self.fixYLimits == True:\r\n # self.plotWidget.wid.artistDict['pulloff area point'] = p\r\n # p.set_animated(True)\r\n # if friction_show == True: #friction calc\r\n # p, = ax2.plot(self.indDict[\"time1_lat_avg\"][i],\r\n # self.areaDict[\"area_friction\"][i],\r\n # 'g' + self.markerlist[i], alpha=0.8)\r\n # self.imageAxisDict['friction area point'] = [p]\r\n # if self.fixYLimits == True:\r\n # self.plotWidget.wid.artistDict['friction area point'] = p\r\n # p.set_animated(True)\r\n # i += 1\r\n# if self.flag_ra == True: #roi area\r\n# ax2 ,lns, p = self.plotImageData(ax_primary = ax1, \r\n# ax_secondary = ax2,\r\n# spine_type = 'right',\r\n# spine_position= 0, \r\n# yaxis_label = 'Area ($' + unit + '^2$)', \r\n# ylabel = 'ROI Area', \r\n# y_index = 3, \r\n# color = 'Blue', \r\n# legend = lns)\r\n# self.imageAxisDict[3] = p\r\n# elif self.flag_ra == True: #roi area\r\n# ax2 ,lns, p = self.plotImageData(ax_primary = ax1, \r\n# ax_secondary = None,\r\n# spine_type = 'right',\r\n# spine_position= 0, \r\n# yaxis_label = 'Area ($' + unit + '^2$)', \r\n# ylabel = 'ROI Area', \r\n# y_index = 3, \r\n# color = 'Blue', \r\n# legend = lns)\r\n# self.imageAxisDict[3] = p\r\n \r\n# # if self.flag_cl == True or self.flag_rl == True:\r\n# if self.flag_ca == True or self.flag_ra == True:\r\n# spine_pos = 7\r\n# else:\r\n# spine_pos = 0\r\n \r\n# if self.flag_cl == True: #contact length \r\n# ax2 ,lns, p = self.plotImageData(ax_primary = ax1, \r\n# ax_secondary = None,\r\n# spine_type = 'right',\r\n# spine_position= spine_pos, \r\n# yaxis_label = 'Length ($' + unit + '$)', \r\n# ylabel = 'Contact Length', \r\n# y_index = 1, \r\n# color = 'Copper', \r\n# legend = lns)\r\n# self.imageAxisDict[1] = p\r\n# if self.flag_rl == True: #roi length\r\n# ax2 ,lns, p = self.plotImageData(ax_primary = ax1, \r\n# ax_secondary = ax2,\r\n# spine_type = 'right',\r\n# spine_position= spine_pos, \r\n# yaxis_label = 'Length ($' + unit + '$)', \r\n# ylabel = 'ROI Length', \r\n# y_index = 4, \r\n# color = 'Wistia', \r\n# legend = lns)\r\n# self.imageAxisDict[4] = p\r\n# elif self.flag_rl == True: #roi length\r\n# ax2 ,lns, p = self.plotImageData(ax_primary = ax1, \r\n# ax_secondary = None,\r\n# spine_type = 'right',\r\n# spine_position= spine_pos, \r\n# yaxis_label = 'Length ($' + unit + '$)', \r\n# ylabel = 'ROI Length', \r\n# y_index = 4, \r\n# color = 'Wistia', \r\n# legend = lns)\r\n# self.imageAxisDict[4] = p\r\n \r\n\r\n\r\n# # ax5 = ax1.twinx()\r\n# # num = len(self.rangeDict.keys())\r\n# # colors = plt.cm.copper(np.linspace(0.2,0.7,num))\r\n# # ax5.set_prop_cycle(color=colors)\r\n# # ax5.set_ylabel('Length ($' + unit + '$)', color = 'brown')\r\n# # if self.flag_ca == True or self.flag_ra == True: \r\n# # ax5.spines['right'].set_position(('outward', int(7*self.fontSize))) \r\n# # if self.flag_cl == True: #contact length\r\n# # i = 0\r\n# # for k in self.rangeDict.keys():\r\n# # if len(self.rangeDict.keys()) > 1 and k == \"Default\":\r\n# # continue \r\n# # p7, = ax5.plot(self.time2[self.plot_slice2],\r\n# # self.dataDict[k][1][self.plot_slice2],\r\n# # '-' + markerlist[i], alpha=0.5, linewidth=1,\r\n# # markersize=2, label=\"Contact Length: \" + k)\r\n# # lns.append(p7)\r\n# # i += 1\r\n# # if self.flag_rl == True: #roi length\r\n# # ## ax5 = ax1.twinx()\r\n# # num = len(self.rangeDict.keys())\r\n# # colors = plt.cm.Wistia(np.linspace(0.2,0.7,num))\r\n# # ax5.set_prop_cycle(color=colors)\r\n# # ## ax5.spines['right'].set_position(('outward', 70))\r\n# # j = 0\r\n# # for k in self.rangeDict.keys():\r\n# # if len(self.rangeDict.keys()) > 1 and k == \"Default\":\r\n# # continue\r\n# # ## ax5.set_ylabel('Length ($' + unit + '$)', color = 'brown')\r\n# # p8, = ax5.plot(self.time2[self.plot_slice2],\r\n# # self.dataDict[k][4][self.plot_slice2],\r\n# # '-' + markerlist[j], alpha=0.5, linewidth=1,\r\n# # markersize=2, label=\"ROI Length: \" + k)\r\n# # lns.append(p8)\r\n# # j += 1\r\n# if self.flag_cn == True: #contact number\r\n# # num = len(self.rangeDict.keys())\r\n# # colors = plt.cm.copper(np.linspace(0.2,0.7,num))\r\n# ax2 ,lns, p = self.plotImageData(ax_primary = ax1, \r\n# ax_secondary = None,\r\n# spine_type = 'right',\r\n# spine_position=7, \r\n# yaxis_label = 'Number', \r\n# ylabel = 'Contact Number', \r\n# y_index = 2, \r\n# color = 'Copper', \r\n# legend = lns)\r\n# self.imageAxisDict[2] = p\r\n# # ax5 = ax1.twinx()\r\n# # num = len(self.rangeDict.keys())\r\n# # colors = plt.cm.copper(np.linspace(0.2,0.7,num))\r\n# # ax5.set_prop_cycle(color=colors)\r\n# # ax5.spines['right'].set_position(('outward', int(7*self.fontSize)))\r\n# # i = 0\r\n# # for k in self.rangeDict.keys():\r\n# # if len(self.rangeDict.keys()) > 1 and k == \"Default\":\r\n# # continue\r\n# # ax5.set_ylabel('Number', color = 'brown')\r\n# # p9, = ax5.plot(self.time2[self.plot_slice2],\r\n# # self.dataDict[k][2][self.plot_slice2],\r\n# # '-' + markerlist[i], alpha=0.5, linewidth=1,\r\n# # markersize=2, label=\"Contact Number: \" + k)\r\n# # lns.append(p9)\r\n# # i += 1\r\n# if self.flag_ecc == True: #contact eccentricity\r\n# ax2 ,lns, p = self.plotImageData(ax_primary = ax1,\r\n# ax_secondary = None,\r\n# spine_type = 'right',\r\n# spine_position = 7, \r\n# yaxis_label = 'Eccentricity', \r\n# ylabel = 'Median Eccentricity', \r\n# y_index = 5, \r\n# color = 'Copper', \r\n# legend = lns)\r\n# self.imageAxisDict[5] = p\r\n # ax5 = ax1.twinx()\r\n # num = len(self.rangeDict.keys())\r\n # colors = plt.cm.copper(np.linspace(0.2,0.7,num))\r\n # ax5.set_prop_cycle(color=colors)\r\n # ax5.spines['right'].set_position(('outward', int(7*self.fontSize)))\r\n # i = 0\r\n # for k in self.rangeDict.keys():\r\n # if len(self.rangeDict.keys()) > 1 and k == \"Default\":\r\n # continue\r\n # ax5.set_ylabel('Eccentricity' + unit + '$)', color = 'brown')\r\n # p10, = ax5.plot(self.time2[self.plot_slice2],\r\n # self.dataDict[k][5][self.plot_slice2],\r\n # '-' + markerlist[i], alpha=0.5, linewidth=1,\r\n # markersize=2, label=\"Median Eccentricity: \" + k)\r\n # lns.append(p10)\r\n # i += 1\r\n \r\n if stress_show == True: #or self.flag_lf_filter == True: #stress CHECK!\r\n ax6 = ax1.twinx()\r\n area_unit = self.imageDataUnitDict['Contact area'].split('[')[1].split(']')[0]\r\n ax6.set_ylabel('Stress (μN/$' + area_unit + '$)', color = 'c')\r\n # ax6.spines['left'].set_position(('outward', int(6*self.fontSize)))\r\n # ax6.spines[\"left\"].set_visible(True)\r\n # ax6.yaxis.set_label_position('left')\r\n # ax6.yaxis.set_ticks_position('left')\r\n self.updateAxisPos(ax6, 'left', 6)\r\n # self.updateAxisPos(ax1, 'left', 0)\r\n # if self.flag_st == True:\r\n if self.zeroShiftY == True:\r\n zero_val = self.stress[self.plot_slice.start]\r\n else:\r\n zero_val = 0 \r\n ydata = [x - zero_val for x in self.stress[self.plot_slice]]\r\n p11, = ax6.plot(self.xAxisData[self.plot_slice],\r\n ydata, 'co',\r\n alpha = self.opacity, \r\n linewidth = self.lineWidth, \r\n markersize = self.markerSize,\r\n label=\"Stress\") \r\n # if self.flag_lf_filter == True:\r\n # p11, = ax6.plot(self.xAxisData[self.plot_slice],\r\n # self.stress_filtered[self.plot_slice], '-c',\r\n # alpha=0.5, linewidth=1, markersize=1,\r\n # label=\"Stress\")\r\n\r\n lns.append(p11)\r\n \r\n## lns = [p1, p3, p2, p4, p5]\r\n## else:\r\n## lns = [p1, p2]\r\n # legendPos = (0.5,0.5)\r\n # pos = eval(legendPos) if legendPos[0] == '(' else legendPos\r\n # self.legend_main = self.fig1.get_axes()[-1].legend(handles=lns, loc = pos)\r\n \r\n # if show_title == True:\r\n # title = 'Speed = ' + str(speed_inview).replace('[','').replace(']','') + ' μm/s'\r\n # self.fig1.suptitle(title)\r\n # leg_drag = self.legend_main.set_draggable(True, use_blit = True, update = 'loc')\r\n # print(self.legend_main.get_window_extent())\r\n\r\n# get_loc_in_canvas\r\n if self.fitWindow.enableFitting.isChecked() == True:\r\n # axDict = {'Vertical Force (μN)':ax1, 'Lateral Force (μN)':ax3}\r\n # yDict = {'Vertical Force (μN)':self.force_vert1_shifted,\r\n # 'Lateral Force (μN)':self.force_lat1_shifted}\r\n # fit_slice = slice(int(self.startFit * self.ptsnumber/100),\r\n # int(self.endFit * self.ptsnumber/100))\r\n # self.slope_unit = self.fitWindow.yFit.currentText().split('(')[1].split(')')[0] + '/' +\\\r\n # self.fitWindow.xFit.currentText().split('(')[1].split(')')[0]\r\n # self.slope_unit = '' #CHECK\r\n if self.unitDict[self.fitWindow.yFit.currentText()] != '':\r\n unit_top = self.unitDict[self.fitWindow.yFit.currentText()].split('[')[1].split(']')[0]\r\n else:\r\n unit_top = ''\r\n if self.unitDict[self.fitWindow.xFit.currentText()] != '':\r\n unit_bottom = self.unitDict[self.fitWindow.xFit.currentText()].split('[')[1].split(']')[0]\r\n else:\r\n unit_bottom = ''\r\n self.slope_unit = unit_top + '/' + unit_bottom\r\n # text_pos = self.fit_pos.split(\",\")\r\n \r\n # self.slope = fitting.polyfitData(xDict.get(self.fit_x)[fit_slice], yDict.get(self.fit_y)[fit_slice],\r\n # axDict.get(self.fit_y), xAxisData[fit_slice], unit = self.slope_unit,\r\n # eq_pos = text_pos, fit_order = 1, fit_show = self.fit_show)\r\n # ax_fit = self.plotAxisDict.get(self.fitWindow.yFit.currentText())\r\n # ax_fit.plot(self.xAxisData[self.fitWindow.fit_slice], \r\n # self.fitWindow.fit_ydata, color = 'black',\r\n # linewidth=2*self.lineWidth, linestyle='dashed')\r\n \r\n ## print(eq_pos)\r\n if self.fit_show == True: #show fitted curve\r\n ax_fit = self.plotAxisDict.get(self.fitWindow.yFit.currentText())\r\n if self.zeroShiftY == True:\r\n zero_val = self.fileDataDict[self.fitWindow.yFit.currentText()][self.plot_slice.start]\r\n else:\r\n zero_val = 0\r\n ydata = [x - zero_val for x in self.fitWindow.fit_ydata] \r\n ax_fit.plot(self.xAxisData[slice(*self.fitWindow.fit_range)], \r\n ydata, color = 'black', linewidth=2*self.lineWidth, \r\n linestyle='dashed')\r\n #display slope if linear fit\r\n if self.fitWindow.fittingFunctionType.currentText() == 'Linear': \r\n self.slope = self.fitWindow.fitParams['m']\r\n slope_label = \"Slope: \" + \"%.4f\"%(self.slope) + ' ' + self.slope_unit\r\n ax_top = self.fig1.get_axes()[-1]\r\n self.fitTextBox = ax_top.text(*self.fit_pos,\r\n slope_label, ha = 'right',\r\n transform=ax_top.transAxes, \r\n color = 'black',\r\n bbox=dict(facecolor='white', \r\n edgecolor = 'black', \r\n alpha=0.5),\r\n picker = 5)\r\n else:\r\n self.slope = ''\r\n self.slope_unit = ''\r\n self.fitTextBox = None\r\n \r\n #draw legend\r\n pos = eval(legendPos) if legendPos[0] == '(' else legendPos\r\n self.legend_main = self.fig1.get_axes()[-1].legend(handles=lns, loc = pos)\r\n \r\n \r\n \r\n self.fig1.tight_layout()\r\n self.fig1.canvas.draw()\r\n \r\n logging.debug('%s, %s', \"axes\", self.fig1.get_axes())\r\n # self.plotWidget.__init__(fig = self.fig1,\r\n # cursor1_init=c1_init,\r\n # cursor2_init=c2_init,\r\n # fixYLimits = self.fixYLimits,\r\n # method = self.updatePosition)\r\n self.plotWidget.wid.axes = self.fig1.get_axes()[-1]\r\n \r\n \r\n self.plotWidget.wid.add_cursors(cursor1_init=c1_init,\r\n cursor2_init=c2_init)\r\n \r\n # self.toggleAnimation(True)\r\n self.plotWidget.wid.fixYLimits = self.fixYLimits\r\n self.plotWidget.wid.updateBackground() \r\n # self.toggleAnimation(False)\r\n \r\n self.axesUnique = set([])\r\n for ind in self.imageAxisDict.keys():\r\n self.axesUnique.add(self.imageAxisDict[ind][0].axes)\r\n \r\n # self.plotWidget.resize(self.plotWidget.minimumSizeHint())\r\n # self.fig1.canvas.draw()\r\n self.sourceLabel = None #to prevent range buttons react to cursor drag\r\n logging.debug(\"plot finish\")\r\n \r\n # self.plotWidget.update()\r\n # self.plotWidget.wid.resizeWindow(None)\r\n # self.plotWidget.wid.draw_idle()\r\n\r\n \r\n def plotImageAnimate(self, frame_pos): \r\n self.plotWidget.wid.toggleAnimation(True)\r\n # restore the background region\r\n self.fig1.canvas.restore_region(self.plotWidget.wid.background)\r\n for ind in self.imageAxisDict.keys():\r\n i = 0\r\n for k in self.dataDict.keys():\r\n if len(self.dataDict.keys()) > 1 and k == \"Default\":\r\n continue\r\n if self.zeroShiftY == True:\r\n zero_val_area = self.dataDict[k][\"Contact area\"][self.plot_slice2.start]\r\n else:\r\n zero_val_area = 0\r\n if ind == \"pulloff area point\":\r\n self.imageAxisDict[ind][i].set_ydata(self.areaDict[k][\"area2_pulloff\"]-zero_val_area)\r\n elif ind == \"friction area point\":\r\n self.imageAxisDict[ind][i].set_ydata(self.areaDict[k][\"area_friction\"]-zero_val_area)\r\n else:\r\n logging.debug('%s', ind)\r\n if self.zeroShiftY == True:\r\n zero_val = self.dataDict[k][ind][self.plot_slice2.start]\r\n else:\r\n zero_val = 0\r\n ydata = [x - zero_val for x in self.dataDict[k][ind][self.plot_slice2]]\r\n self.imageAxisDict[ind][i].set_ydata(ydata)\r\n logging.debug('%s, %s, %s, %s', \"out\", ind, i, k)\r\n if self.fixYLimits == True:\r\n # redraw just the current rectangle\r\n self.imageAxisDict[ind][i].axes.draw_artist(self.imageAxisDict[ind][i])\r\n\r\n i += 1\r\n \r\n self.plotWidget.wid.updateCursor(self.plotWidget.wid.cursor1, \r\n self.time2[frame_pos-1])\r\n #draw cursor only within plot range\r\n if frame_pos >= self.plot_slice2.start + 1 and frame_pos < self.plot_slice2.stop + 1:\r\n self.plotWidget.wid.updateCursor(self.plotWidget.wid.cursor1, \r\n self.time2[frame_pos-1])\r\n self.plotWidget.wid.axes.draw_artist(self.plotWidget.wid.cursor1) \r\n else: #just set cursor on start and dont draw\r\n self.plotWidget.wid.updateCursor(self.plotWidget.wid.cursor1, \r\n self.time2[self.plot_slice2.start]) \r\n\r\n if self.fixYLimits == True:\r\n for ind in self.imageAxisDict.keys():\r\n self.fig1.canvas.blit(self.imageAxisDict[ind][0].axes.bbox)\r\n else:\r\n for axes in self.axesUnique:\r\n axes.relim()\r\n axes.autoscale_view()\r\n self.fig1.draw_artist(axes) \r\n for ind in self.imageAxisDict.keys():\r\n self.fig1.canvas.blit(self.imageAxisDict[ind][0].axes.get_tightbbox(self.fig1.canvas.get_renderer()))\r\n\r\n self.plotWidget.wid.toggleAnimation(False)\r\n self.sourceLabel = None\r\n \r\n def updateAxisPos(self, ax, spine_type, spine_position):\r\n ax.spines[spine_type].set_position(('outward', \r\n int(spine_position*self.fontSize)))\r\n ax.spines[spine_type].set_visible(True)\r\n ax.yaxis.set_ticks_position(spine_type)\r\n ax.yaxis.set_label_position(spine_type)\r\n\r\n def plotFileData(self, ax_primary, ax_secondary, curve, num, \r\n ylabel, curve_dict, curve_dict_ax, legend):\r\n if curve_dict['show'].isChecked() == True: \r\n if num > 0: #for multiple axis with no combine\r\n ax2 = ax_primary.twinx() #secondary axis\r\n else:\r\n if ax_secondary == None:\r\n ax2 = ax_primary.twinx() #secondary axis\r\n else:\r\n ax2 = ax_secondary\r\n ax2.yaxis.set_visible(True)\r\n # self.fig1.get_axes()[0].yaxis.set_visible(False)\r\n # ax2.yaxis.set_visible(False)\r\n self.updateAxisPos(ax2, curve_dict_ax['position'].currentText(), \r\n curve_dict_ax['shift'].value()) \r\n # ax2.set_xlabel(self.x_var)\r\n ax2.set_ylabel(ylabel + self.unitDict[curve], \r\n color = curve_dict_ax['color'])\r\n if curve_dict_ax['invert'].isChecked() == True:\r\n ax2.invert_yaxis()\r\n line_marker = curve_dict['line style'].currentText() + \\\r\n curve_dict['marker'].currentText()\r\n if self.zeroShiftY == True:\r\n zero_val = self.fileDataDict[curve][self.plot_slice.start]\r\n else:\r\n zero_val = 0\r\n ydata = [x - zero_val for x in self.fileDataDict[curve][self.plot_slice]]\r\n p1, = ax2.plot(self.xAxisData[self.plot_slice], \r\n ydata, \r\n line_marker,\r\n linewidth = self.lineWidth, \r\n markersize = self.markerSize,\r\n alpha = self.opacity, label=curve,\r\n color = curve_dict['color'])\r\n legend.append(p1)\r\n self.plotAxisDict[curve] = ax2\r\n else:\r\n ax2 = ax_secondary\r\n # print(\"legends\",curve, legend)\r\n \r\n return ax2, legend\r\n \r\n def plotImageData(self, ax_primary, ax_secondary, curve, num, \r\n ylabel, curve_dict, curve_dict_ax, legend):\r\n \r\n \r\n if curve_dict['show'].isChecked() == True: \r\n if num > 0: #for multiple axis with no combine\r\n ax2 = ax_primary.twinx() #secondary axis\r\n else:\r\n if ax_secondary == None:\r\n ax2 = ax_primary.twinx() #secondary axis\r\n else:\r\n ax2 = ax_secondary\r\n ax2.yaxis.set_visible(True) \r\n \r\n \r\n self.updateAxisPos(ax2, curve_dict_ax['position'].currentText(), \r\n curve_dict_ax['shift'].value())\r\n # self.updateAxisPos(ax_primary, 'left', 0)\r\n color_array = [0.7, 0.5, 0.9, 0.3, 1]\r\n color_map = plt.get_cmap(curve_dict['color'].currentText())(color_array)\r\n ax2.set_ylabel(ylabel + self.imageDataUnitDict[curve],\r\n color = color_map[0])\r\n if curve_dict_ax['invert'].isChecked() == True:\r\n ax2.invert_yaxis()\r\n if self.fixYLimits == True:\r\n y_bound = [float(x) for x in curve_dict_ax['y bounds'].text().split(',')]\r\n ax2.set_ylim(y_bound)\r\n else:\r\n self.plotWidget.wid.artistDict[ylabel] = ax2\r\n ax2.set_animated(True)\r\n \r\n \r\n ax2.set_prop_cycle(color=color_map)\r\n \r\n i = 0\r\n lines = []\r\n for k in self.dataDict.keys():\r\n if len(self.dataDict.keys()) > 1 and k == \"Default\":\r\n continue\r\n \r\n if self.zeroShiftY == True:\r\n zero_val = self.dataDict[k][curve][self.plot_slice2.start]\r\n else:\r\n zero_val = 0\r\n ydata = [x - zero_val for x in self.dataDict[k][curve][self.plot_slice2]]\r\n p, = ax2.plot(self.time2[self.plot_slice2],\r\n ydata,\r\n '-' + self.markerlist[i], \r\n alpha = self.opacity, \r\n linewidth = self.lineWidth,\r\n markersize= self.markerSize, \r\n label= curve + \": \" + k)\r\n if self.fixYLimits == True:\r\n self.plotWidget.wid.artistDict[curve + \": \" + k] = p\r\n p.set_animated(True) #BLIT THIS CHECK!!!\r\n \r\n legend.append(p)\r\n lines.append(p)\r\n i += 1\r\n self.imageAxisDict[curve] = lines\r\n self.plotAxisDict[curve] = ax2\r\n else:\r\n ax2 = ax_secondary\r\n\r\n return ax2, legend\r\n \r\n #update y bounds\r\n def updateYBounds(self):\r\n logging.debug('clicked')\r\n source = 'image'\r\n for category in self.configPlotWindow.plotDict[source].keys():\r\n category_dict = self.configPlotWindow.plotDict[source][category]\r\n for curve in category_dict['curves'].keys():\r\n if curve in self.imageAxisDict.keys():\r\n logging.debug('%s', curve)\r\n y_bounds = ','.join(map(str,self.plotAxisDict[curve].get_ybound()))\r\n logging.debug('%s', y_bounds)\r\n category_dict['curves'][curve]['y bounds'].setText(y_bounds)\r\n \r\n \r\n def updatePosition(self):\r\n\r\n # final_pos = tuple(self.plotWidget.wid.axes.transLimits.transform\r\n # ((self.plotWidget.wid.final_pos)))\r\n if self.plotWidget.wid.clicked_artist == self.fitTextBox:\r\n self.fit_pos = list(self.fitTextBox.get_position())\r\n # elif self.plotWidget.wid.clicked_artist == self.legend_main:\r\n # pos = str(tuple(self.legend_main.get_window_extent()))\r\n # self.configPlotWindow.plotDict['plot settings']['legend position'].setText(pos)\r\n elif self.plotWidget.wid.clicked_artist in [self.plotWidget.wid.cursor1,\r\n self.plotWidget.wid.cursor2]:\r\n if self.sourceLabel != None:\r\n xdata = self.xAxisData\r\n x1 = self.plotWidget.wid.cursor1.get_xdata()[0] \r\n x1_ind = np.searchsorted(xdata, [x1])[0]\r\n \r\n if len(self.sourceLabel.text().split(',')) == 2:\r\n x2 = self.plotWidget.wid.cursor2.get_xdata()[0]\r\n x2_ind = np.searchsorted(xdata, [x2])[0]\r\n xstart = min(x1_ind, x2_ind)\r\n xend = max(x1_ind, x2_ind)\r\n xend = xend-1 if xend == len(xdata) else xend \r\n self.sourceLabel.setText(str(xstart) + ',' + str(xend))\r\n else:\r\n xstart = x1_ind-1 if x1_ind == len(xdata) else x1_ind\r\n self.sourceLabel.setText(str(xstart))\r\n # self.sourceLabel = None\r\n \r\n def setCursorPosition(self, label): #change xAxisData to time1 or index\r\n self.sourceLabel = label\r\n cursor_range = label.text().split(',')\r\n if int(cursor_range[0]) < self.plot_slice.start or \\\r\n int(cursor_range[0]) >= self.plot_slice.stop:\r\n cursor_range[0] = self.plot_slice.start\r\n\r\n # cursor_list = [self.plotWidget.wid.cursor1, self.plotWidget.wid.cursor2]\r\n \r\n # i = 1\r\n # for cursor in cursor_list:\r\n # if i <= len(cursor_range):\r\n # if cursor == None:\r\n # cursor_list[i-1] = self.plotWidget.wid.cursor_initialize(\r\n # self.xAxisData[int(cursor_range[i-1])],\"cursor\" + str(i))\r\n # else:\r\n # self.plotWidget.wid.updateCursor(cursor, \r\n # self.xAxisData[int(cursor_range[i-1])])\r\n # self.plotWidget.wid.axes.draw_artist(cursor)\r\n # else:\r\n # cursor = None\r\n # i += 1\r\n # print(cursor_list)\r\n \r\n if self.plotWidget.wid.cursor1 == None:\r\n self.plotWidget.wid.cursor1 = self.plotWidget.wid.cursor_initialize(\r\n self.xAxisData[int(cursor_range[0])],\"cursor1\")\r\n else:\r\n self.plotWidget.wid.updateCursor(self.plotWidget.wid.cursor1, \r\n self.xAxisData[int(cursor_range[0])])\r\n self.plotWidget.wid.axes.draw_artist(self.plotWidget.wid.cursor1)\r\n \r\n if len(cursor_range) == 2:\r\n if int(cursor_range[1]) < self.plot_slice.start or \\\r\n int(cursor_range[1]) >= self.plot_slice.stop:\r\n cursor_range[1] = self.plot_slice.stop - 1\r\n \r\n if self.plotWidget.wid.cursor2 == None:\r\n self.plotWidget.wid.cursor2 = self.plotWidget.wid.cursor_initialize(\r\n self.xAxisData[int(cursor_range[1])],\"cursor2\")\r\n else:\r\n self.plotWidget.wid.updateCursor(self.plotWidget.wid.cursor2, \r\n self.xAxisData[int(cursor_range[1])])\r\n self.plotWidget.wid.axes.draw_artist(self.plotWidget.wid.cursor2)\r\n else:\r\n if self.plotWidget.wid.cursor2 != None:\r\n self.plotWidget.wid.cursor2.set_xdata([])\r\n self.plotWidget.wid.cursor2.set_ydata([])\r\n logging.debug('%s', cursor_range)\r\n # self.fig1.canvas.draw()\r\n self.plotWidget.wid.draw_idle()\r\n # self.plotWidget.wid.updateBackground()\r\n \r\n \r\n # def plotImageData(self, ax_primary, ax_secondary, spine_type, spine_position, \r\n # yaxis_label, ylabel, y_index, color, legend, y_bound = [50000, 100000]):\r\n \r\n # if ax_secondary == None:\r\n # ax_secondary = ax_primary.twinx()\r\n # # ax_secondary.spines[spine_type].set_position(('outward', \r\n # # int(spine_position*self.fontSize)))\r\n # # ax_secondary.spines[spine_type].set_visible(True)\r\n # # ax_secondary.yaxis.set_label_position(spine_type)\r\n # # ax_secondary.yaxis.set_ticks_position(spine_type) \r\n # ax_secondary.set_ylabel(yaxis_label, color = self.colorDict[color][0])\r\n # self.updateAxisPos(ax_secondary, spine_type, spine_position)\r\n # self.updateAxisPos(ax_primary, 'left', 0)\r\n\r\n # if self.fixYLimits == True:\r\n # ax_secondary.set_ylim(y_bound)\r\n # else:\r\n # self.plotWidget.wid.artistDict[yaxis_label] = ax_secondary\r\n # ax_secondary.set_animated(True)\r\n \r\n # ax_secondary.set_prop_cycle(color=self.colorDict[color])\r\n \r\n # i = 0\r\n # lines = []\r\n # for k in self.rangeDict.keys():\r\n # if len(self.rangeDict.keys()) > 1 and k == \"Default\":\r\n # continue\r\n \r\n # p, = ax_secondary.plot(self.time2[self.plot_slice2],\r\n # self.dataDict[k][y_index][self.plot_slice2],\r\n # '-' + self.markerlist[i], alpha=0.5, linewidth=1,\r\n # markersize=2, label= ylabel + \": \" + k)\r\n # if self.fixYLimits == True:\r\n # self.plotWidget.wid.artistDict[ylabel + \": \" + k] = p\r\n # p.set_animated(True) #BLIT THIS CHECK!!!\r\n\r\n # legend.append(p)\r\n # lines.append(p)\r\n # i += 1\r\n\r\n # return ax_secondary, legend, lines\r\n\r\n # def setPlotRange(self): #set global plot range\r\n # fig = Figure(figsize=(11, 5), dpi=100)\r\n # ax = fig.add_subplot(111)\r\n # xdata = self.time1\r\n # ydata = self.force_vert1_shifted #CHECK! make this more general\r\n # ax.plot(xdata, ydata, 'r-', linewidth=1, markersize=1) \r\n # plotWidget = PlotWidget(fig = fig,\r\n # cursor1_init=min(xdata),\r\n # cursor2_init=max(xdata),\r\n # method = self.updateRange)\r\n # self.fullPlotWidget = plotWidget.wid\r\n # plotWidget.show()\r\n \r\n # def updateRange(self): #update plot range slice\r\n # self.plot_slice = slice(self.fullPlotWidget.cursor1.get_xdata(),\r\n # self.fullPlotWidget.cursor2.get_xdata())\r\n\r\n def toggleCursorVisibility(self, state):\r\n if self.plotWidget.wid.cursor1 != None:\r\n self.plotWidget.wid.cursor1.set_visible(state)\r\n if self.plotWidget.wid.cursor2 != None:\r\n self.plotWidget.wid.cursor2.set_visible(state)\r\n \r\n \r\n def convertPlot(self): #convert plot to numpy\r\n # self.fig1.canvas.draw()\r\n data = np.fromstring(self.fig1.canvas.tostring_rgb(),\r\n dtype=np.uint8, sep='')\r\n data = data.reshape(self.fig1.canvas.get_width_height()[::-1] + (3,))\r\n return data\r\n\r\n def savePlot(self, filepath): #save force plots\r\n logging.debug(\"save plot\")\r\n self.toggleCursorVisibility(False) \r\n self.fig1.savefig(filepath, orientation='landscape',\r\n transparent = True)\r\n #save figure object as pickle file\r\n with open(filepath[:-4] + '.pickle', 'wb') as f:\r\n pickle.dump(self.fig1, f, pickle.HIGHEST_PROTOCOL)\r\n \r\n self.toggleCursorVisibility(True)\r\n \r\n\r\n# class PlotWindow(QWidget):\r\n# def __init__(self, fig, *args, **kwargs):\r\n# ## super(QWidget, self).__init__(*args, **kwargs)\r\n# super().__init__()\r\n# self.setGeometry(100, 100, 1000, 500)\r\n# self.setWindowTitle(\"Plot\")\r\n# self.fig = fig\r\n# self.home()\r\n \r\n# def home(self):\r\n# self.plotWidget = Plot2Widget(self.fig,cursor1_init=2,cursor2_init=6)\r\n# plotToolbar = NavigationToolbar(self.plotWidget, self)\r\n \r\n# plotGroupBox = QGroupBox()\r\n# plotlayout=QGridLayout()\r\n# plotGroupBox.setLayout(plotlayout)\r\n# plotlayout.addWidget(plotToolbar, 0, 0, 1, 1)\r\n# plotlayout.addWidget(self.plotWidget, 1, 0, 1, 1)\r\n \r\n# layout=QGridLayout()\r\n# layout.addWidget(plotGroupBox, 0, 0, 1, 1)\r\n \r\n# self.setLayout(layout)\r\n # self.show()\r\n # startFitLabel = QLabel(\"Start (%):\")"
] |
[
[
"numpy.linspace",
"scipy.signal.medfilt",
"numpy.append",
"scipy.integrate.simps",
"scipy.signal.savgol_filter",
"numpy.empty"
],
[
"numpy.expand_dims",
"numpy.fft.fftshift",
"numpy.ones",
"numpy.fft.ifftshift",
"numpy.float32",
"numpy.zeros"
],
[
"numpy.linspace",
"matplotlib.style.use",
"matplotlib.figure.Figure",
"matplotlib.use",
"numpy.sin",
"matplotlib.rcParams.update",
"numpy.searchsorted"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.14",
"1.6",
"1.10",
"0.15",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
erdeq-upenn/stock
|
[
"f62ebd2d87bb853c2ff1c0583e1b5efc1549d6de"
] |
[
"wk3/example.py"
] |
[
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jun 13 21:53:10 2020\n\n@author: Dequan\n\"\"\"\n\n# Python ≥3.5 is required\nimport sys\nassert sys.version_info >= (3, 5)\n\n# Scikit-Learn ≥0.20 is required\nimport sklearn\nassert sklearn.__version__ >= \"0.20\"\n\n# Common imports\nimport numpy as np\nimport os\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.linear_model import SGDRegressor\n# to make this notebook's output stable across runs\nnp.random.seed(42)\n\n# To plot pretty figures\n\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt # very useful \n\nmpl.rc('axes', labelsize=14)\nmpl.rc('xtick', labelsize=12)\nmpl.rc('ytick', labelsize=12)\n\n# Where to save the figures\nPROJECT_ROOT_DIR = \".\" # GLOBAL VARIABLE UPPER CASE \nCHAPTER_ID = \"training_linear_models\"\nIMAGES_PATH = os.path.join(PROJECT_ROOT_DIR, \"images\", CHAPTER_ID)\nos.makedirs(IMAGES_PATH, exist_ok=True)\n\ndef save_fig(fig_id, tight_layout=True, fig_extension=\"png\", resolution=300):\n path = os.path.join(IMAGES_PATH, fig_id + \".\" + fig_extension)\n print(\"Saving figure\", fig_id)\n if tight_layout:\n plt.tight_layout()\n plt.savefig(path, format=fig_extension, dpi=resolution)\n\n\ndef run_linear():\n # Where to save the figures\n PROJECT_ROOT_DIR = \".\"\n CHAPTER_ID = \"training_linear_models\"\n IMAGES_PATH = os.path.join(PROJECT_ROOT_DIR, \"images\", CHAPTER_ID)\n os.makedirs(IMAGES_PATH, exist_ok=True)\n\n def save_fig(fig_id, tight_layout=True, fig_extension=\"png\", resolution=300):\n path = os.path.join(IMAGES_PATH, fig_id + \".\" + fig_extension)\n print(\"Saving figure\", fig_id)\n if tight_layout:\n plt.tight_layout()\n plt.savefig(path, format=fig_extension, dpi=resolution)\n\n # Ignore useless warnings (see SciPy issue #5998)\n import warnings\n warnings.filterwarnings(action=\"ignore\", message=\"^internal gelsd\")\n\n import numpy as np\n # -------------------------------------------------------------------------\n # define\n X = 2 * np.random.rand(100, 1)\n y = 4 + 3 * X + np.random.randn(100, 1)\n\n plt.plot(X, y, \"b.\")\n plt.xlabel(\"$x_1$\", fontsize=18)\n plt.ylabel(\"$y$\", rotation=0, fontsize=18)\n plt.axis([0, 2, 0, 15])\n save_fig(\"generated_data_plot\")\n plt.show()\n\n X_b = np.c_[np.ones((100, 1)), X] # add x0 = 1 to each instance\n theta_best = np.linalg.inv(X_b.T.dot(X_b)).dot(X_b.T).dot(y)\n print(theta_best)\n\n X_new = np.array([[0], [2]])\n X_new_b = np.c_[np.ones((2, 1)), X_new] # add x0 = 1 to each instance\n y_predict = X_new_b.dot(theta_best)\n print('\\nusing normal eqution\\n',y_predict)\n\n plt.plot(X_new, y_predict, \"r-\")\n plt.plot(X, y, \"b.\")\n plt.axis([0, 2, 0, 15])\n plt.show()\n\n # ------------------\n\n\n lin_reg = LinearRegression()\n lin_reg.fit(X, y)\n print(lin_reg.intercept_, lin_reg.coef_)\n\ndef run_poly():\n m = 100\n X = 6 * np.random.rand(m, 1) - 3\n y = 0.5 * X**2 + X + 2 + np.random.randn(m, 1)\n\n plt.plot(X, y, \"b.\")\n plt.xlabel(\"$x_1$\", fontsize=18)\n plt.ylabel(\"$y$\", rotation=0, fontsize=18)\n plt.axis([-3, 3, 0, 10])\n save_fig(\"quadratic_data_plot\")\n plt.show()\n\n\n from sklearn.preprocessing import PolynomialFeatures\n poly_features = PolynomialFeatures(degree=2, include_bias=False)\n X_poly = poly_features.fit_transform(X)\n print(X[0])\n\n\n lin_reg = LinearRegression()\n lin_reg.fit(X_poly, y)\n print(lin_reg.intercept_, lin_reg.coef_)\n\n\n X_new=np.linspace(-3, 3, 100).reshape(100, 1)\n X_new_poly = poly_features.transform(X_new)\n y_new = lin_reg.predict(X_new_poly)\n plt.plot(X, y, \"b.\")\n plt.plot(X_new, y_new, \"r-\", linewidth=2, label=\"Predictions\")\n plt.xlabel(\"$x_1$\", fontsize=18)\n plt.ylabel(\"$y$\", rotation=0, fontsize=18)\n plt.legend(loc=\"upper left\", fontsize=14)\n plt.axis([-3, 3, 0, 10])\n save_fig(\"quadratic_predictions_plot\")\n plt.show()\n\n\n for style, width, degree in ((\"g-\", 1, 300), (\"b--\", 2, 2), (\"r-+\", 2, 1)):\n polybig_features = PolynomialFeatures(degree=degree, include_bias=False)\n std_scaler = StandardScaler()\n lin_reg = LinearRegression()\n polynomial_regression = Pipeline([\n (\"poly_features\", polybig_features),\n (\"std_scaler\", std_scaler),\n (\"lin_reg\", lin_reg),\n ])\n polynomial_regression.fit(X, y)\n y_newbig = polynomial_regression.predict(X_new)\n plt.plot(X_new, y_newbig, style, label=str(degree), linewidth=width)\n\n plt.plot(X, y, \"b.\", linewidth=3)\n plt.legend(loc=\"upper left\")\n plt.xlabel(\"$x_1$\", fontsize=18)\n plt.ylabel(\"$y$\", rotation=0, fontsize=18)\n plt.axis([-3, 3, 0, 10])\n save_fig(\"high_degree_polynomials_plot\")\n plt.show()\n\n\n from sklearn.metrics import mean_squared_error\n from sklearn.model_selection import train_test_split\n\n def plot_learning_curves(model, X, y):\n X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=10)\n train_errors, val_errors = [], []\n for m in range(1, len(X_train)):\n model.fit(X_train[:m], y_train[:m])\n y_train_predict = model.predict(X_train[:m])\n y_val_predict = model.predict(X_val)\n train_errors.append(mean_squared_error(y_train[:m], y_train_predict))\n val_errors.append(mean_squared_error(y_val, y_val_predict))\n\n plt.plot(np.sqrt(train_errors), \"r-+\", linewidth=2, label=\"train\")\n plt.plot(np.sqrt(val_errors), \"b-\", linewidth=3, label=\"val\")\n plt.legend(loc=\"upper right\", fontsize=14) # not shown in the book\n plt.xlabel(\"Training set size\", fontsize=14) # not shown\n plt.ylabel(\"RMSE\", fontsize=14) # not shown\n\n lin_reg = LinearRegression()\n plot_learning_curves(lin_reg, X, y)\n plt.axis([0, 80, 0, 3]) # not shown in the book\n save_fig(\"underfitting_learning_curves_plot\") # not shown\n plt.show() # not shown\n\n\n\n\n polynomial_regression = Pipeline([\n (\"poly_features\", PolynomialFeatures(degree=2, include_bias=False)),\n (\"lin_reg\", LinearRegression()),\n ])\n\n plot_learning_curves(polynomial_regression, X, y)\n plt.axis([0, 80, 0, 3]) # not shown\n save_fig(\"learning_curves_plot\") # not shown\n plt.show() # not shown\n\n\ndef run_sgd():\n X = 2 * np.random.rand(100, 1)\n y = 4 + 3 * X + np.random.randn(100, 1)\n X_b = np.c_[np.ones((100, 1)), X] # add x0 = 1 to each instance\n theta_best = np.linalg.inv(X_b.T.dot(X_b)).dot(X_b.T).dot(y)\n print(theta_best)\n\n X_new = np.array([[0], [2]])\n X_new_b = np.c_[np.ones((2, 1)), X_new] # add x0 = 1 to each instance\n theta_path_sgd = []\n m = len(X_b)\n np.random.seed(42)\n \n eta = 0.1 # learning rate\n n_iterations = 1000\n m = 100\n\n theta = np.random.randn(2,1) # random initialization\n\n for iteration in range(n_iterations):\n gradients = 2/m * X_b.T.dot(X_b.dot(theta) - y)\n theta = theta - eta * gradients\n\n theta_path_bgd = []\n\n def plot_gradient_descent(theta, eta, theta_path=None):\n m = len(X_b)\n plt.plot(X, y, \"b.\")\n n_iterations = 1000\n for iteration in range(n_iterations):\n if iteration < 10:\n y_predict = X_new_b.dot(theta)\n style = \"b-\" if iteration > 0 else \"r--\"\n plt.plot(X_new, y_predict, style)\n gradients = 2/m * X_b.T.dot(X_b.dot(theta) - y)\n theta = theta - eta * gradients\n if theta_path is not None:\n theta_path.append(theta)\n plt.xlabel(\"$x_1$\", fontsize=18)\n plt.axis([0, 2, 0, 15])\n plt.title(r\"$\\eta = {}$\".format(eta), fontsize=16)\n np.random.seed(42)\n theta = np.random.randn(2,1) # random initialization\n\n plt.figure(figsize=(10,4))\n plt.subplot(131); plot_gradient_descent(theta, eta=0.02)\n plt.ylabel(\"$y$\", rotation=0, fontsize=18)\n plt.subplot(132); plot_gradient_descent(theta, eta=0.1, theta_path=theta_path_bgd)\n plt.subplot(133); plot_gradient_descent(theta, eta=0.5)\n\n save_fig(\"gradient_descent_plot\")\n plt.show()\n\n\n\n n_epochs = 50\n t0, t1 = 5, 50 # learning schedule hyperparameters\n\n def learning_schedule(t):\n return t0 / (t + t1)\n\n theta = np.random.randn(2,1) # random initialization\n\n for epoch in range(n_epochs):\n for i in range(m):\n if epoch == 0 and i < 20: # not shown in the book\n y_predict = X_new_b.dot(theta) # not shown\n style = \"b-\" if i > 0 else \"r--\" # not shown\n plt.plot(X_new, y_predict, style) # not shown\n random_index = np.random.randint(m)\n xi = X_b[random_index:random_index+1]\n yi = y[random_index:random_index+1]\n gradients = 2 * xi.T.dot(xi.dot(theta) - yi)\n eta = learning_schedule(epoch * m + i)\n theta = theta - eta * gradients\n theta_path_sgd.append(theta) # not shown\n\n plt.plot(X, y, \"b.\") # not shown\n plt.xlabel(\"$x_1$\", fontsize=18) # not shown\n plt.ylabel(\"$y$\", rotation=0, fontsize=18) # not shown\n plt.axis([0, 2, 0, 15]) # not shown\n save_fig(\"sgd_plot\") # not shown\n plt.show()\n print(theta)\n\n\n\n sgd_reg = SGDRegressor(max_iter=10, tol=1e-3, penalty=None, eta0=0.1,\n random_state=42)\n sgd_reg.fit(X, y.ravel())\n\n print('sklearn:',sgd_reg.intercept_, sgd_reg.coef_)\n\n theta_path_mgd = []\n\n n_iterations = 10\n minibatch_size = 20\n\n np.random.seed(42)\n theta = np.random.randn(2,1) # random initialization\n\n t0, t1 = 200, 1000\n def learning_schedule(t):\n return t0 / (t + t1)\n\n t = 0\n for epoch in range(n_iterations):\n shuffled_indices = np.random.permutation(m)\n X_b_shuffled = X_b[shuffled_indices]\n y_shuffled = y[shuffled_indices]\n for i in range(0, m, minibatch_size):\n t += 1\n xi = X_b_shuffled[i:i+minibatch_size]\n yi = y_shuffled[i:i+minibatch_size]\n gradients = 2/minibatch_size * xi.T.dot(xi.dot(theta) - yi)\n eta = learning_schedule(t)\n theta = theta - eta * gradients\n theta_path_mgd.append(theta)\n print('Mini_batch:',theta)\n \n theta_path_bgd = np.array(theta_path_bgd)\n theta_path_sgd = np.array(theta_path_sgd)\n theta_path_mgd = np.array(theta_path_mgd)\n\n plt.figure(figsize=(7,4))\n plt.plot(theta_path_sgd[:, 0], theta_path_sgd[:, 1], \"r-s\", linewidth=1, label=\"Stochastic\")\n plt.plot(theta_path_mgd[:, 0], theta_path_mgd[:, 1], \"g-+\", linewidth=2, label=\"Mini-batch\")\n plt.plot(theta_path_bgd[:, 0], theta_path_bgd[:, 1], \"b-o\", linewidth=3, label=\"Batch\")\n plt.legend(loc=\"upper left\", fontsize=16)\n plt.xlabel(r\"$\\theta_0$\", fontsize=20)\n plt.ylabel(r\"$\\theta_1$ \", fontsize=20, rotation=0)\n plt.axis([2.5, 4.5, 2.3, 3.9])\n save_fig(\"gradient_descent_paths_plot\")\n plt.show()\n \n \nif __name__ == '__main__':\n main()\n"
] |
[
[
"matplotlib.pyplot.legend",
"numpy.sqrt",
"numpy.linspace",
"sklearn.preprocessing.PolynomialFeatures",
"sklearn.metrics.mean_squared_error",
"matplotlib.pyplot.plot",
"numpy.random.randn",
"numpy.random.randint",
"matplotlib.pyplot.tight_layout",
"sklearn.pipeline.Pipeline",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.figure",
"sklearn.model_selection.train_test_split",
"matplotlib.pyplot.savefig",
"numpy.random.rand",
"numpy.array",
"matplotlib.pyplot.show",
"matplotlib.rc",
"matplotlib.pyplot.ylabel",
"numpy.random.seed",
"sklearn.linear_model.SGDRegressor",
"numpy.ones",
"numpy.random.permutation",
"sklearn.linear_model.LinearRegression",
"matplotlib.pyplot.xlabel",
"sklearn.preprocessing.StandardScaler"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
POFK/tftool
|
[
"ed82edc48fed86e93df3860bd629ebb7089733bf"
] |
[
"model/model_base.py"
] |
[
"#!/usr/bin/env python\n# coding=utf-8\nimport tensorflow as tf\n\n#============================ setting ===========================================\ntf.logging.set_verbosity(tf.logging.INFO)\n#layers = tf.layers\nlayer = tf.contrib.framework.add_arg_scope(tf.layers.conv1d)\nslim = tf.contrib.slim\nprint_fn = tf.logging.info # print\n#================================================================================\n\ndef layer_batch_relu(func, is_training=tf.bool):\n '''wrapper for tf.layers.conv based functions'''\n def wrapper(*args, **kwargs):\n with slim.arg_scope([func], activation=None, use_bias=False):\n net = func(*args, **kwargs)\n\n net = tf.contrib.layers.batch_norm(net, \n center=True, \n scale=False,\n decay=0.99,\n epsilon=0.001,\n is_training=is_training,\n scope=kwargs['name']+'/BN')\n return tf.nn.relu(net, name=kwargs['name']+'/relu')\n return wrapper\n\ndef arg_scope(weight_decay=1e-4):\n kernel_regularizer = tf.contrib.layers.l2_regularizer(scale=weight_decay)\n # l2_loss = tf.losses.get_regularization_loss()\n with slim.arg_scope(\n [tf.layers.conv1d],\n strides=1,\n padding='valid',\n activation=tf.nn.relu,\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\n kernel_regularizer=kernel_regularizer,\n trainable=True,\n ) as arg_sc:\n return arg_sc\n\n\n\n"
] |
[
[
"tensorflow.nn.relu",
"tensorflow.logging.set_verbosity",
"tensorflow.contrib.layers.xavier_initializer",
"tensorflow.contrib.layers.l2_regularizer",
"tensorflow.contrib.framework.add_arg_scope",
"tensorflow.contrib.layers.batch_norm"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
Yaozeng/roberta-tf
|
[
"4c1ddca94c87e4c04f51b3cfc90fb642fe81ae7e"
] |
[
"modeling_roberta.py"
] |
[
"# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"PyTorch RoBERTa model. \"\"\"\n\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport logging\n\nimport torch\nimport torch.nn as nn\nfrom torch.nn import CrossEntropyLoss, MSELoss\n\nfrom modeling_bert import BertEmbeddings, BertLayerNorm, BertModel, BertPreTrainedModel, gelu\nfrom configuration_roberta import RobertaConfig\nfrom file_utils import add_start_docstrings\n\nlogger = logging.getLogger(__name__)\n\nROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP = {\n 'roberta-base': \"https://s3.amazonaws.com/models.huggingface.co/bert/roberta-base-pytorch_model.bin\",\n 'roberta-large': \"https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-pytorch_model.bin\",\n 'roberta-large-mnli': \"https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-mnli-pytorch_model.bin\",\n}\n\nclass RobertaEmbeddings(BertEmbeddings):\n \"\"\"\n Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.\n \"\"\"\n def __init__(self, config):\n super(RobertaEmbeddings, self).__init__(config)\n self.padding_idx = 1\n self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=self.padding_idx)\n self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size,\n padding_idx=self.padding_idx)\n\n def forward(self, input_ids, token_type_ids=None, position_ids=None):\n seq_length = input_ids.size(1)\n if position_ids is None:\n # Position numbers begin at padding_idx+1. Padding symbols are ignored.\n # cf. fairseq's `utils.make_positions`\n position_ids = torch.arange(self.padding_idx+1, seq_length+self.padding_idx+1, dtype=torch.long, device=input_ids.device)\n position_ids = position_ids.unsqueeze(0).expand_as(input_ids)\n return super(RobertaEmbeddings, self).forward(input_ids,\n token_type_ids=token_type_ids,\n position_ids=position_ids)\n\n\nROBERTA_START_DOCSTRING = r\"\"\" The RoBERTa model was proposed in\n `RoBERTa: A Robustly Optimized BERT Pretraining Approach`_\n by Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer,\n Veselin Stoyanov. It is based on Google's BERT model released in 2018.\n \n It builds on BERT and modifies key hyperparameters, removing the next-sentence pretraining\n objective and training with much larger mini-batches and learning rates.\n \n This implementation is the same as BertModel with a tiny embeddings tweak as well as a setup for Roberta pretrained \n models.\n\n This model is a PyTorch `torch.nn.Module`_ sub-class. Use it as a regular PyTorch Module and\n refer to the PyTorch documentation for all matter related to general usage and behavior.\n\n .. _`RoBERTa: A Robustly Optimized BERT Pretraining Approach`:\n https://arxiv.org/abs/1907.11692\n\n .. _`torch.nn.Module`:\n https://pytorch.org/docs/stable/nn.html#module\n\n Parameters:\n config (:class:`~transformers.RobertaConfig`): Model configuration class with all the parameters of the \n model. Initializing with a config file does not load the weights associated with the model, only the configuration.\n Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.\n\"\"\"\n\nROBERTA_INPUTS_DOCSTRING = r\"\"\"\n Inputs:\n **input_ids**: ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:\n Indices of input sequence tokens in the vocabulary.\n To match pre-training, RoBERTa input sequence should be formatted with <s> and </s> tokens as follows:\n\n (a) For sequence pairs:\n\n ``tokens: <s> Is this Jacksonville ? </s> </s> No it is not . </s>``\n\n (b) For single sequences:\n\n ``tokens: <s> the dog is hairy . </s>``\n\n Fully encoded sequences or sequence pairs can be obtained using the RobertaTokenizer.encode function with \n the ``add_special_tokens`` parameter set to ``True``.\n\n RoBERTa is a model with absolute position embeddings so it's usually advised to pad the inputs on\n the right rather than the left.\n\n See :func:`transformers.PreTrainedTokenizer.encode` and\n :func:`transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details.\n **attention_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``:\n Mask to avoid performing attention on padding token indices.\n Mask values selected in ``[0, 1]``:\n ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.\n **token_type_ids**: (`optional` need to be trained) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:\n Optional segment token indices to indicate first and second portions of the inputs.\n This embedding matrice is not trained (not pretrained during RoBERTa pretraining), you will have to train it\n during finetuning.\n Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``\n corresponds to a `sentence B` token\n (see `BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding`_ for more details).\n **position_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:\n Indices of positions of each input sequence tokens in the position embeddings.\n Selected in the range ``[0, config.max_position_embeddings - 1[``.\n **head_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``:\n Mask to nullify selected heads of the self-attention modules.\n Mask values selected in ``[0, 1]``:\n ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**.\n\"\"\"\n\n@add_start_docstrings(\"The bare RoBERTa Model transformer outputting raw hidden-states without any specific head on top.\",\n ROBERTA_START_DOCSTRING, ROBERTA_INPUTS_DOCSTRING)\nclass RobertaModel(BertModel):\n r\"\"\"\n Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:\n **last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)``\n Sequence of hidden-states at the output of the last layer of the model.\n **pooler_output**: ``torch.FloatTensor`` of shape ``(batch_size, hidden_size)``\n Last layer hidden-state of the first token of the sequence (classification token)\n further processed by a Linear layer and a Tanh activation function. The Linear\n layer weights are trained from the next sentence prediction (classification)\n objective during Bert pretraining. This output is usually *not* a good summary\n of the semantic content of the input, you're often better with averaging or pooling\n the sequence of hidden-states for the whole input sequence.\n **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)\n list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)\n of shape ``(batch_size, sequence_length, hidden_size)``:\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n **attentions**: (`optional`, returned when ``config.output_attentions=True``)\n list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.\n\n Examples::\n\n tokenizer = RobertaTokenizer.from_pretrained('roberta-base')\n model = RobertaModel.from_pretrained('roberta-base')\n input_ids = torch.tensor(tokenizer.encode(\"Hello, my dog is cute\")).unsqueeze(0) # Batch size 1\n outputs = model(input_ids)\n last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple\n\n \"\"\"\n config_class = RobertaConfig\n pretrained_model_archive_map = ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP\n base_model_prefix = \"roberta\"\n\n def __init__(self, config):\n super(RobertaModel, self).__init__(config)\n\n self.embeddings = RobertaEmbeddings(config)\n self.init_weights()\n\n def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None):\n if input_ids[:, 0].sum().item() != 0:\n logger.warning(\"A sequence with no special tokens has been passed to the RoBERTa model. \"\n \"This model requires special tokens in order to work. \"\n \"Please specify add_special_tokens=True in your tokenize.encode()\"\n \"or tokenizer.convert_tokens_to_ids().\")\n return super(RobertaModel, self).forward(input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask)\n\n\n@add_start_docstrings(\"\"\"RoBERTa Model with a `language modeling` head on top. \"\"\",\n ROBERTA_START_DOCSTRING, ROBERTA_INPUTS_DOCSTRING)\nclass RobertaForMaskedLM(BertPreTrainedModel):\n r\"\"\"\n **masked_lm_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:\n Labels for computing the masked language modeling loss.\n Indices should be in ``[-1, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)\n Tokens with indices set to ``-1`` are ignored (masked), the loss is only computed for the tokens with labels\n in ``[0, ..., config.vocab_size]``\n\n Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:\n **loss**: (`optional`, returned when ``masked_lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:\n Masked language modeling loss.\n **prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)``\n Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).\n **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)\n list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)\n of shape ``(batch_size, sequence_length, hidden_size)``:\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n **attentions**: (`optional`, returned when ``config.output_attentions=True``)\n list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.\n\n Examples::\n\n tokenizer = RobertaTokenizer.from_pretrained('roberta-base')\n model = RobertaForMaskedLM.from_pretrained('roberta-base')\n input_ids = torch.tensor(tokenizer.encode(\"Hello, my dog is cute\")).unsqueeze(0) # Batch size 1\n outputs = model(input_ids, masked_lm_labels=input_ids)\n loss, prediction_scores = outputs[:2]\n\n \"\"\"\n config_class = RobertaConfig\n pretrained_model_archive_map = ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP\n base_model_prefix = \"roberta\"\n\n def __init__(self, config):\n super(RobertaForMaskedLM, self).__init__(config)\n\n self.roberta = RobertaModel(config)\n self.lm_head = RobertaLMHead(config)\n\n self.init_weights()\n self.tie_weights()\n\n def tie_weights(self):\n \"\"\" Make sure we are sharing the input and output embeddings.\n Export to TorchScript can't handle parameter sharing so we are cloning them instead.\n \"\"\"\n self._tie_or_clone_weights(self.lm_head.decoder, self.roberta.embeddings.word_embeddings)\n\n def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None,\n masked_lm_labels=None):\n outputs = self.roberta(input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask)\n sequence_output = outputs[0]\n prediction_scores = self.lm_head(sequence_output)\n\n outputs = (prediction_scores,) + outputs[2:] # Add hidden states and attention if they are here\n\n if masked_lm_labels is not None:\n loss_fct = CrossEntropyLoss(ignore_index=-1)\n masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))\n outputs = (masked_lm_loss,) + outputs\n\n return outputs # (masked_lm_loss), prediction_scores, (hidden_states), (attentions)\n\n\nclass RobertaLMHead(nn.Module):\n \"\"\"Roberta Head for masked language modeling.\"\"\"\n\n def __init__(self, config):\n super(RobertaLMHead, self).__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.layer_norm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n def forward(self, features, **kwargs):\n x = self.dense(features)\n x = gelu(x)\n x = self.layer_norm(x)\n\n # project back to size of vocabulary with bias\n x = self.decoder(x) + self.bias\n\n return x\n\n\n@add_start_docstrings(\"\"\"RoBERTa Model transformer with a sequence classification/regression head on top (a linear layer \n on top of the pooled output) e.g. for GLUE tasks. \"\"\",\n ROBERTA_START_DOCSTRING, ROBERTA_INPUTS_DOCSTRING)\nclass RobertaForSequenceClassification(BertPreTrainedModel):\n r\"\"\"\n **labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:\n Labels for computing the sequence classification/regression loss.\n Indices should be in ``[0, ..., config.num_labels]``.\n If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss),\n If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy).\n\n Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:\n **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:\n Classification (or regression if config.num_labels==1) loss.\n **logits**: ``torch.FloatTensor`` of shape ``(batch_size, config.num_labels)``\n Classification (or regression if config.num_labels==1) scores (before SoftMax).\n **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)\n list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)\n of shape ``(batch_size, sequence_length, hidden_size)``:\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n **attentions**: (`optional`, returned when ``config.output_attentions=True``)\n list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.\n\n Examples::\n\n tokenizer = RobertaTokenizer.from_pretrained('roberta-base')\n model = RobertaForSequenceClassification.from_pretrained('roberta-base')\n input_ids = torch.tensor(tokenizer.encode(\"Hello, my dog is cute\")).unsqueeze(0) # Batch size 1\n labels = torch.tensor([1]).unsqueeze(0) # Batch size 1\n outputs = model(input_ids, labels=labels)\n loss, logits = outputs[:2]\n\n \"\"\"\n config_class = RobertaConfig\n pretrained_model_archive_map = ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP\n base_model_prefix = \"roberta\"\n\n def __init__(self, config):\n super(RobertaForSequenceClassification, self).__init__(config)\n self.num_labels = config.num_labels\n\n self.roberta = RobertaModel(config)\n self.classifier = RobertaClassificationHead(config)\n \n def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None,\n labels=None):\n outputs = self.roberta(input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask)\n sequence_output = outputs[0]\n logits = self.classifier(sequence_output)\n\n outputs = (logits,) + outputs[2:]\n if labels is not None:\n if self.num_labels == 1:\n # We are doing regression\n loss_fct = MSELoss()\n loss = loss_fct(logits.view(-1), labels.view(-1))\n else:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n outputs = (loss,) + outputs\n\n return outputs # (loss), logits, (hidden_states), (attentions)\n\n@add_start_docstrings(\"\"\"Roberta Model with a multiple choice classification head on top (a linear layer on top of\n the pooled output and a softmax) e.g. for RocStories/SWAG tasks. \"\"\",\n ROBERTA_START_DOCSTRING, ROBERTA_INPUTS_DOCSTRING)\nclass RobertaForMultipleChoice(BertPreTrainedModel):\n r\"\"\"\n Inputs:\n **input_ids**: ``torch.LongTensor`` of shape ``(batch_size, num_choices, sequence_length)``:\n Indices of input sequence tokens in the vocabulary.\n The second dimension of the input (`num_choices`) indicates the number of choices to score.\n To match pre-training, RoBerta input sequence should be formatted with [CLS] and [SEP] tokens as follows:\n\n (a) For sequence pairs:\n\n ``tokens: [CLS] is this jack ##son ##ville ? [SEP] [SEP] no it is not . [SEP]``\n\n ``token_type_ids: 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1``\n\n (b) For single sequences:\n\n ``tokens: [CLS] the dog is hairy . [SEP]``\n\n ``token_type_ids: 0 0 0 0 0 0 0``\n\n Indices can be obtained using :class:`transformers.BertTokenizer`.\n See :func:`transformers.PreTrainedTokenizer.encode` and\n :func:`transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details.\n **token_type_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, num_choices, sequence_length)``:\n Segment token indices to indicate first and second portions of the inputs.\n The second dimension of the input (`num_choices`) indicates the number of choices to score.\n Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``\n **attention_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, num_choices, sequence_length)``:\n Mask to avoid performing attention on padding token indices.\n The second dimension of the input (`num_choices`) indicates the number of choices to score.\n Mask values selected in ``[0, 1]``:\n ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.\n **head_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``:\n Mask to nullify selected heads of the self-attention modules.\n Mask values selected in ``[0, 1]``:\n ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**.\n **labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:\n Labels for computing the multiple choice classification loss.\n Indices should be in ``[0, ..., num_choices]`` where `num_choices` is the size of the second dimension\n of the input tensors. (see `input_ids` above)\n\n Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:\n **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:\n Classification loss.\n **classification_scores**: ``torch.FloatTensor`` of shape ``(batch_size, num_choices)`` where `num_choices` is the size of the second dimension\n of the input tensors. (see `input_ids` above).\n Classification scores (before SoftMax).\n **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)\n list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)\n of shape ``(batch_size, sequence_length, hidden_size)``:\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n **attentions**: (`optional`, returned when ``config.output_attentions=True``)\n list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.\n\n Examples::\n\n tokenizer = RobertaTokenizer.from_pretrained('roberta-base')\n model = RobertaForMultipleChoice.from_pretrained('roberta-base')\n choices = [\"Hello, my dog is cute\", \"Hello, my cat is amazing\"]\n input_ids = torch.tensor([tokenizer.encode(s, add_special_tokens=True) for s in choices]).unsqueeze(0) # Batch size 1, 2 choices\n labels = torch.tensor(1).unsqueeze(0) # Batch size 1\n outputs = model(input_ids, labels=labels)\n loss, classification_scores = outputs[:2]\n\n \"\"\"\n config_class = RobertaConfig\n pretrained_model_archive_map = ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP\n base_model_prefix = \"roberta\"\n\n def __init__(self, config):\n super(RobertaForMultipleChoice, self).__init__(config)\n\n self.roberta = RobertaModel(config)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.classifier = nn.Linear(config.hidden_size, 1)\n\n self.init_weights()\n\n def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None,\n position_ids=None, head_mask=None):\n num_choices = input_ids.shape[1]\n\n flat_input_ids = input_ids.view(-1, input_ids.size(-1))\n flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None\n flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None\n flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None\n outputs = self.roberta(flat_input_ids, position_ids=flat_position_ids, token_type_ids=flat_token_type_ids,\n attention_mask=flat_attention_mask, head_mask=head_mask)\n pooled_output = outputs[1]\n\n pooled_output = self.dropout(pooled_output)\n logits = self.classifier(pooled_output)\n reshaped_logits = logits.view(-1, num_choices)\n\n outputs = (reshaped_logits,) + outputs[2:] # add hidden states and attention if they are here\n\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(reshaped_logits, labels)\n outputs = (loss,) + outputs\n\n return outputs # (loss), reshaped_logits, (hidden_states), (attentions)\n\n\n\nclass RobertaClassificationHead(nn.Module):\n \"\"\"Head for sentence-level classification tasks.\"\"\"\n\n def __init__(self, config):\n super(RobertaClassificationHead, self).__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.out_proj = nn.Linear(config.hidden_size, config.num_labels)\n\n def forward(self, features, **kwargs):\n x = features[:, 0, :] # take <s> token (equiv. to [CLS])\n x = self.dropout(x)\n x = self.dense(x)\n x = torch.tanh(x)\n x = self.dropout(x)\n x = self.out_proj(x)\n return x\n"
] |
[
[
"torch.nn.Dropout",
"torch.nn.CrossEntropyLoss",
"torch.zeros",
"torch.nn.Embedding",
"torch.tanh",
"torch.nn.Linear",
"torch.arange",
"torch.nn.MSELoss"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
xiaowuhu/sklearn-onnx
|
[
"e85674a67a0a043e19c2ffe181e5d31eca8ce40b",
"e85674a67a0a043e19c2ffe181e5d31eca8ce40b",
"e85674a67a0a043e19c2ffe181e5d31eca8ce40b",
"e85674a67a0a043e19c2ffe181e5d31eca8ce40b",
"e85674a67a0a043e19c2ffe181e5d31eca8ce40b",
"e85674a67a0a043e19c2ffe181e5d31eca8ce40b",
"e85674a67a0a043e19c2ffe181e5d31eca8ce40b"
] |
[
"tests/test_custom_transformer_ordwoe.py",
"skl2onnx/operator_converters/label_binariser.py",
"docs/tutorial/plot_kcustom_converter_wrapper.py",
"benchmarks/post_graph.py",
"docs/tutorial/plot_wext_pyod_forest.py",
"skl2onnx/operator_converters/local_outlier_factor.py",
"tests/test_algebra_onnx_operator_mixin_syntax.py"
] |
[
"# SPDX-License-Identifier: Apache-2.0\n\n\"\"\"\nTests scikit-learn's binarizer converter.\n\"\"\"\nimport unittest\nimport numpy as np\nfrom numpy.testing import assert_almost_equal\nfrom onnxruntime import InferenceSession\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom sklearn.datasets import load_iris\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.preprocessing import OrdinalEncoder, StandardScaler, MaxAbsScaler\nfrom skl2onnx import update_registered_converter, to_onnx, get_model_alias\nfrom skl2onnx.common.data_types import FloatTensorType, Int64TensorType\nfrom skl2onnx.common.utils import check_input_and_output_numbers\nfrom skl2onnx.algebra.onnx_ops import OnnxCast, OnnxIdentity\nfrom skl2onnx.algebra.onnx_operator import OnnxSubEstimator\nfrom skl2onnx.sklapi import WOETransformer\nimport skl2onnx.sklapi.register # noqa\nfrom test_utils import TARGET_OPSET\n\n\nclass OrdinalWOETransformer(BaseEstimator, TransformerMixin):\n def __init__(self):\n TransformerMixin.__init__(self)\n BaseEstimator.__init__(self)\n\n def fit(self, X, y, sample_weight=None):\n self.encoder_ = OrdinalEncoder().fit(X)\n tr = self.encoder_.transform(X)\n maxi = (tr.max(axis=1) + 1).astype(np.int64)\n intervals = [[(i-1, i, False, True) for i in range(0, m)]\n for m in maxi]\n weights = [[10 * j + i for i in range(len(inter))]\n for j, inter in enumerate(intervals)]\n self.woe_ = WOETransformer(intervals, onehot=False, weights=weights)\n self.woe_.fit(tr)\n return self\n\n def transform(self, X):\n tr = self.encoder_.transform(X)\n return self.woe_.transform(tr)\n\n\ndef ordwoe_encoder_parser(\n scope, model, inputs, custom_parsers=None):\n if len(inputs) != 1:\n raise RuntimeError(\n \"Unexpected number of inputs: %d != 1.\" % len(inputs))\n if inputs[0].type is None:\n raise RuntimeError(\n \"Unexpected type: %r.\" % (inputs[0], ))\n alias = get_model_alias(type(model))\n this_operator = scope.declare_local_operator(alias, model)\n this_operator.inputs.append(inputs[0])\n this_operator.outputs.append(\n scope.declare_local_variable('catwoe', FloatTensorType()))\n return this_operator.outputs\n\n\ndef ordwoe_encoder_shape_calculator(operator):\n check_input_and_output_numbers(\n operator, input_count_range=1, output_count_range=1)\n input_dim = operator.inputs[0].get_first_dimension()\n shape = operator.inputs[0].type.shape\n second_dim = None if len(shape) != 2 else shape[1]\n output_type = FloatTensorType([input_dim, second_dim])\n operator.outputs[0].type = output_type\n\n\ndef ordwoe_encoder_converter(scope, operator, container):\n op = operator.raw_operator\n opv = container.target_opset\n X = operator.inputs[0]\n\n sub = OnnxSubEstimator(op.encoder_, X, op_version=opv)\n cast = OnnxCast(sub, op_version=opv, to=np.float32)\n cat = OnnxSubEstimator(op.woe_, cast, op_version=opv,\n input_types=[Int64TensorType()])\n idcat = OnnxIdentity(cat, output_names=operator.outputs[:1],\n op_version=opv)\n idcat.add_to(scope, container)\n\n\nclass TestCustomTransformerOrdWOE(unittest.TestCase):\n\n def test_pipeline(self):\n data = load_iris()\n X = data.data.astype(np.float32)\n pipe = make_pipeline(StandardScaler(), MaxAbsScaler())\n pipe.fit(X)\n expected = pipe.transform(X)\n onx = to_onnx(pipe, X, target_opset=TARGET_OPSET)\n sess = InferenceSession(onx.SerializeToString())\n got = sess.run(None, {'X': X})[0]\n assert_almost_equal(expected, got)\n\n @unittest.skipIf(TARGET_OPSET < 12, reason=\"opset>=12 is required\")\n def test_custom_ordinal_woe(self):\n\n update_registered_converter(\n OrdinalWOETransformer, \"OrdinalWOETransformer\",\n ordwoe_encoder_shape_calculator,\n ordwoe_encoder_converter,\n parser=ordwoe_encoder_parser)\n\n data = load_iris()\n X, y = data.data, data.target\n X = X.astype(np.int64)[:, :2]\n y = (y == 2).astype(np.int64)\n\n ordwoe = OrdinalWOETransformer()\n ordwoe.fit(X, y)\n expected = ordwoe.transform(X)\n\n onx = to_onnx(ordwoe, X, target_opset=TARGET_OPSET)\n sess = InferenceSession(onx.SerializeToString())\n got = sess.run(None, {'X': X})[0]\n assert_almost_equal(expected, got)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"# SPDX-License-Identifier: Apache-2.0\n\n\nimport numpy as np\nfrom ..proto import onnx_proto\nfrom ..common._apply_operation import apply_cast, apply_reshape\nfrom ..common._registration import register_converter\nfrom ..common._topology import Scope, Operator\nfrom ..common._container import ModelComponentContainer\n\n\ndef convert_sklearn_label_binariser(scope: Scope, operator: Operator,\n container: ModelComponentContainer):\n \"\"\"Converts Scikit Label Binariser model to onnx format.\"\"\"\n binariser_op = operator.raw_operator\n classes = binariser_op.classes_\n if (hasattr(binariser_op, 'sparse_input_') and\n binariser_op.sparse_input_):\n raise RuntimeError(\"sparse is not supported for LabelBinarizer.\")\n if (hasattr(binariser_op, 'y_type_') and\n binariser_op.y_type_ == \"multilabel-indicator\"):\n if binariser_op.pos_label != 1:\n raise RuntimeError(\"pos_label != 1 is not supported \"\n \"for LabelBinarizer.\")\n if list(classes) != list(range(len(classes))):\n raise RuntimeError(\"classes != [0, 1, ..., n_classes] is not \"\n \"supported for LabelBinarizer.\")\n container.add_node('Identity', operator.inputs[0].full_name,\n operator.output_full_names,\n name=scope.get_unique_operator_name('identity'))\n else:\n zeros_tensor = np.full((1, len(classes)),\n binariser_op.neg_label, dtype=np.float)\n unit_tensor = np.full((1, len(classes)),\n binariser_op.pos_label, dtype=np.float)\n\n classes_tensor_name = scope.get_unique_variable_name('classes_tensor')\n equal_condition_tensor_name = scope.get_unique_variable_name(\n 'equal_condition_tensor')\n zeros_tensor_name = scope.get_unique_variable_name('zero_tensor')\n unit_tensor_name = scope.get_unique_variable_name('unit_tensor')\n where_result_name = scope.get_unique_variable_name('where_result')\n\n class_dtype = onnx_proto.TensorProto.STRING\n if np.issubdtype(binariser_op.classes_.dtype, np.signedinteger):\n class_dtype = onnx_proto.TensorProto.INT64\n else:\n classes = np.array([s.encode('utf-8') for s in classes])\n\n container.add_initializer(classes_tensor_name, class_dtype,\n [len(classes)], classes)\n container.add_initializer(\n zeros_tensor_name, onnx_proto.TensorProto.FLOAT,\n zeros_tensor.shape, zeros_tensor.ravel())\n container.add_initializer(\n unit_tensor_name, onnx_proto.TensorProto.FLOAT,\n unit_tensor.shape, unit_tensor.ravel())\n\n reshaped_input_name = scope.get_unique_variable_name('reshaped_input')\n apply_reshape(scope, operator.inputs[0].full_name, reshaped_input_name,\n container, desired_shape=[-1, 1])\n\n # Models with classes_/inputs of string type would fail in the\n # following step as Equal op does not support string comparison.\n container.add_node('Equal', [classes_tensor_name, reshaped_input_name],\n equal_condition_tensor_name,\n name=scope.get_unique_operator_name('equal'))\n container.add_node(\n 'Where',\n [equal_condition_tensor_name, unit_tensor_name, zeros_tensor_name],\n where_result_name,\n name=scope.get_unique_operator_name('where'))\n where_res = where_result_name\n\n if len(binariser_op.classes_) == 2:\n array_f_name = scope.get_unique_variable_name(\n 'array_feature_extractor_result')\n pos_class_index_name = scope.get_unique_variable_name(\n 'pos_class_index')\n\n container.add_initializer(\n pos_class_index_name, onnx_proto.TensorProto.INT64, [], [1])\n\n container.add_node(\n 'ArrayFeatureExtractor',\n [where_result_name, pos_class_index_name],\n array_f_name, op_domain='ai.onnx.ml',\n name=scope.get_unique_operator_name('ArrayFeatureExtractor'))\n where_res = array_f_name\n apply_cast(scope, where_res, operator.output_full_names, container,\n to=onnx_proto.TensorProto.INT64)\n\n\nregister_converter('SklearnLabelBinarizer', convert_sklearn_label_binariser)\n",
"# SPDX-License-Identifier: Apache-2.0\n\n\"\"\"\n.. _l-plot-custom-converter-wrapper:\n\nImplement a new converter using other converters\n================================================\n\n.. index:: custom converter\n\nIn many cases, a custom models leverages existing models\nwhich already have an associated converter. To convert this\npatchwork, existing converters must be called. This example\nshows how to do that. Example :ref:`l-plot-custom-converter`\ncan be rewritten by using a `PCA <https://scikit-learn.org/\nstable/modules/generated/sklearn.decomposition.PCA.html>`_.\nWe could then reuse the converter associated to this model.\n\n.. contents::\n :local:\n\nCustom model\n++++++++++++\n\nLet's implement a simple custom model using\n:epkg:`scikit-learn` API. The model is preprocessing\nwhich decorrelates correlated random variables.\nIf *X* is a matrix of features, :math:`V=\\\\frac{1}{n}X'X`\nis the covariance matrix. We compute :math:`X V^{1/2}`.\n\"\"\"\nfrom mlprodict.onnxrt import OnnxInference\nfrom pyquickhelper.helpgen.graphviz_helper import plot_graphviz\nimport pickle\nfrom io import BytesIO\nimport numpy\nfrom numpy.testing import assert_almost_equal\nfrom onnxruntime import InferenceSession\nfrom sklearn.base import TransformerMixin, BaseEstimator\nfrom sklearn.datasets import load_iris\nfrom sklearn.decomposition import PCA\nfrom skl2onnx import update_registered_converter\nfrom skl2onnx.algebra.onnx_operator import OnnxSubEstimator\nfrom skl2onnx import to_onnx\n\n\nclass DecorrelateTransformer(TransformerMixin, BaseEstimator):\n \"\"\"\n Decorrelates correlated gaussian features.\n\n :param alpha: avoids non inversible matrices\n by adding *alpha* identity matrix\n\n *Attributes*\n\n * `self.mean_`: average\n * `self.coef_`: square root of the coveriance matrix\n \"\"\"\n\n def __init__(self, alpha=0.):\n BaseEstimator.__init__(self)\n TransformerMixin.__init__(self)\n self.alpha = alpha\n\n def fit(self, X, y=None, sample_weights=None):\n self.pca_ = PCA(X.shape[1])\n self.pca_.fit(X)\n return self\n\n def transform(self, X):\n return self.pca_.transform(X)\n\n\ndef test_decorrelate_transformer():\n data = load_iris()\n X = data.data\n\n dec = DecorrelateTransformer()\n dec.fit(X)\n pred = dec.transform(X)\n cov = pred.T @ pred\n for i in range(cov.shape[0]):\n cov[i, i] = 1.\n assert_almost_equal(numpy.identity(4), cov)\n\n st = BytesIO()\n pickle.dump(dec, st)\n dec2 = pickle.load(BytesIO(st.getvalue()))\n assert_almost_equal(dec.transform(X), dec2.transform(X))\n\n\ntest_decorrelate_transformer()\n\ndata = load_iris()\nX = data.data\n\ndec = DecorrelateTransformer()\ndec.fit(X)\npred = dec.transform(X[:5])\nprint(pred)\n\n\n############################################\n# Conversion into ONNX\n# ++++++++++++++++++++\n#\n# Let's try to convert it and see what happens.\n\n\ntry:\n to_onnx(dec, X.astype(numpy.float32))\nexcept Exception as e:\n print(e)\n\n############################\n# This error means there is no converter associated\n# to *DecorrelateTransformer*. Let's do it.\n# It requires to implement the two following\n# functions, a shape calculator and a converter\n# with the same signature as below.\n# First the shape calculator. We retrieve the input type\n# add tells the output type has the same type,\n# the same number of rows and a specific number of columns.\n\n\ndef decorrelate_transformer_shape_calculator(operator):\n op = operator.raw_operator\n input_type = operator.inputs[0].type.__class__\n input_dim = operator.inputs[0].type.shape[0]\n output_type = input_type([input_dim, op.pca_.components_.shape[1]])\n operator.outputs[0].type = output_type\n\n\n###################################\n# The converter. One thing we need to pay attention to\n# is the target opset. This information is important\n# to make sure that every node is defined following the\n# specifications of that opset.\n\n\ndef decorrelate_transformer_converter(scope, operator, container):\n op = operator.raw_operator\n opv = container.target_opset\n out = operator.outputs\n\n # We retrieve the unique input.\n X = operator.inputs[0]\n\n # We tell in ONNX language how to compute the unique output.\n # op_version=opv tells which opset is requested\n Y = OnnxSubEstimator(op.pca_, X, op_version=opv, output_names=out[:1])\n Y.add_to(scope, container)\n\n\n##########################################\n# We need to let *skl2onnx* know about the new converter.\n\n\nupdate_registered_converter(\n DecorrelateTransformer, \"SklearnDecorrelateTransformer\",\n decorrelate_transformer_shape_calculator,\n decorrelate_transformer_converter)\n\n\nonx = to_onnx(dec, X.astype(numpy.float32))\n\nsess = InferenceSession(onx.SerializeToString())\n\nexp = dec.transform(X.astype(numpy.float32))\ngot = sess.run(None, {'X': X.astype(numpy.float32)})[0]\n\n\ndef diff(p1, p2):\n p1 = p1.ravel()\n p2 = p2.ravel()\n d = numpy.abs(p2 - p1)\n return d.max(), (d / numpy.abs(p1)).max()\n\n\nprint(diff(exp, got))\n\n#####################################\n# Let's check it works as well with double.\n\nonx = to_onnx(dec, X.astype(numpy.float64))\n\nsess = InferenceSession(onx.SerializeToString())\n\nexp = dec.transform(X.astype(numpy.float64))\ngot = sess.run(None, {'X': X.astype(numpy.float64)})[0]\nprint(diff(exp, got))\n\n#############################################\n# The differences are smaller with double as expected.\n\n\n#############################\n# Final graph\n# +++++++++++\n\noinf = OnnxInference(onx)\nax = plot_graphviz(oinf.to_dot())\nax.get_xaxis().set_visible(False)\nax.get_yaxis().set_visible(False)\n",
"# SPDX-License-Identifier: Apache-2.0\n\nimport os\nimport numpy\nfrom pandas import read_csv\nimport matplotlib.pyplot as plt\n\nHERE = os.path.abspath(os.path.dirname(__file__))\n\n\ndef autolabel(ax, rects):\n for rect in rects:\n height = rect.get_height()\n ax.annotate('%1.1fx' % height,\n xy=(rect.get_x() + rect.get_width() / 2, height),\n xytext=(0, 3), # 3 points vertical offset\n textcoords=\"offset points\",\n ha='center', va='bottom',\n fontsize=8)\n\n\ndef linear_models():\n filename1 = os.path.join(HERE, 'bench_plot_onnxruntime_linreg.csv')\n filename2 = os.path.join(HERE, 'bench_plot_onnxruntime_logreg.csv')\n if not os.path.exists(filename1) or not os.path.exists(filename2):\n return\n dfr = read_csv(filename1)\n dfr[\"speedup\"] = dfr[\"time_skl\"] / dfr[\"time_ort\"]\n dfc = read_csv(filename2)\n dfc = dfc[(dfc.method == \"predict_proba\") & dfc.fit_intercept]\n dfc[\"speedup\"] = dfc[\"time_skl\"] / dfc[\"time_ort\"]\n\n nfeats = [10, 50]\n fig, axs = plt.subplots(1, len(nfeats) * 2, figsize=(14, 4), sharey=True)\n\n names = [\"LinearRegression\", \"LogisticRegression\"]\n pos = 0\n for name, df in zip(names, [dfr, dfc]):\n for nf in nfeats:\n ax = axs[pos]\n sub = df[df.nfeat == nf]\n labels = sub.n_obs\n means = sub.speedup\n\n x = numpy.arange(len(labels))\n width = 0.90\n\n rects1 = ax.bar(x, means, width, label='Speedup')\n\n if pos == 0:\n ax.set_ylabel('Speedup')\n ax.set_title('%s %d features' % (name, nf))\n ax.set_xlabel('batch size')\n ax.set_xticks(x)\n ax.set_xticklabels(labels)\n autolabel(ax, rects1)\n for tick in ax.xaxis.get_major_ticks():\n tick.label.set_fontsize(8)\n for tick in ax.yaxis.get_major_ticks():\n tick.label.set_fontsize(8)\n pos += 1\n\n fig.tight_layout()\n fig.savefig(\"linear_model.png\", dpi=1000)\n\n\ndef svm_models():\n filename = os.path.join(HERE, 'bench_plot_onnxruntime_svm_reg.csv')\n if not os.path.exists(filename):\n return\n dfr = read_csv(filename)\n dfr[\"speedup\"] = dfr[\"time_skl\"] / dfr[\"time_ort\"]\n print(dfr.tail())\n\n ncols = len(set(dfr['kernel']))\n fig, axs = plt.subplots(1, ncols, figsize=(14, 4), sharey=True)\n\n name = \"SVR\"\n nf = 50\n pos = 0\n for kernel in sorted(set(dfr['kernel'])):\n sub = dfr[(dfr.kernel == kernel) & (dfr.nfeat == nf)]\n ax = axs[pos]\n labels = sub.n_obs\n means = sub.speedup\n\n x = numpy.arange(len(labels))\n width = 0.90\n\n rects1 = ax.bar(x, means, width, label='Speedup')\n\n if pos == 0:\n ax.set_ylabel('Speedup')\n ax.set_title('%s %s - %d features' % (name, kernel, nf))\n ax.set_xlabel('batch size')\n ax.set_xticks(x)\n ax.set_xticklabels(labels)\n autolabel(ax, rects1)\n for tick in ax.xaxis.get_major_ticks():\n tick.label.set_fontsize(8)\n for tick in ax.yaxis.get_major_ticks():\n tick.label.set_fontsize(8)\n pos += 1\n\n fig.tight_layout()\n fig.savefig(\"svm_model.png\", dpi=1000)\n\n\ndef rf_models():\n filename = os.path.join(\n HERE, 'bench_plot_onnxruntime_random_forest_reg.csv')\n if not os.path.exists(filename):\n return\n dfr = read_csv(filename)\n dfr[\"speedup\"] = dfr[\"time_skl\"] / dfr[\"time_ort\"]\n print(dfr.tail().T)\n\n ncols = 4\n fig, axs = plt.subplots(1, ncols, figsize=(14, 4), sharey=True)\n\n name = \"RandomForestRegressor\"\n pos = 0\n for max_depth in [10]:\n for nf in [30, 100]:\n for est in [100, 200]:\n for n_jobs in [4]:\n sub = dfr[\n (dfr.max_depth == max_depth) & (dfr.nfeat == nf) &\n (dfr.n_estimators == est) & (dfr.n_jobs == n_jobs)]\n ax = axs[pos]\n labels = sub.n_obs\n means = sub.speedup\n\n x = numpy.arange(len(labels))\n width = 0.90\n\n rects1 = ax.bar(x, means, width, label='Speedup')\n if pos == 0:\n ax.set_yscale('log')\n ax.set_ylim([0.1, max(dfr['speedup'])])\n\n if pos == 0:\n ax.set_ylabel('Speedup')\n ax.set_title(\n '%s\\ndepth %d - %d features\\n %d estimators %d jobs'\n '' % (name, max_depth, nf, est, n_jobs))\n ax.set_xlabel('batch size')\n ax.set_xticks(x)\n ax.set_xticklabels(labels)\n autolabel(ax, rects1)\n for tick in ax.xaxis.get_major_ticks():\n tick.label.set_fontsize(8)\n for tick in ax.yaxis.get_major_ticks():\n tick.label.set_fontsize(8)\n pos += 1\n\n fig.tight_layout()\n fig.savefig(\"rf_model.png\", dpi=1000)\n\n\nif __name__ == \"__main__\":\n linear_models()\n svm_models()\n rf_models()\n # plt.show()\n",
"# SPDX-License-Identifier: Apache-2.0\n\n\"\"\"\n.. _example-pyod-iforest:\n\nConverter for pyod.models.iforest.IForest\n=========================================\n\n.. index:: pyod, iforest\n\nThis example answers issues `685\n<https://github.com/onnx/sklearn-onnx/issues/685>`_.\nIt implements a custom converter for model `pyod.models.iforest.IForest\n<https://pyod.readthedocs.io/en/latest/\npyod.models.html#module-pyod.models.iforest>`_.\nThis example uses :ref:`l-plot-custom-converter` as a start.\n\n.. contents::\n :local:\n\nTrains a model\n++++++++++++++\n\nAll imports. It also registered onnx converters for :epgk:`xgboost`\nand *lightgbm*.\n\"\"\"\nimport numpy as np\nimport pandas as pd\nfrom onnxruntime import InferenceSession\nfrom sklearn.preprocessing import MinMaxScaler\nfrom skl2onnx.proto import onnx_proto\nfrom skl2onnx.common.data_types import (\n FloatTensorType, Int64TensorType, guess_numpy_type)\nfrom skl2onnx import to_onnx, update_registered_converter, get_model_alias\nfrom skl2onnx.algebra.onnx_ops import (\n OnnxIdentity, OnnxMul, OnnxLess, OnnxConcat, OnnxCast, OnnxAdd,\n OnnxClip)\nfrom skl2onnx.algebra.onnx_operator import OnnxSubEstimator\ntry:\n from pyod.models.iforest import IForest\nexcept (ValueError, ImportError) as e:\n print(\"Unable to import pyod:\", e)\n IForest = None\n\nif IForest is not None:\n data1 = {'First': [500, 500, 400, 100, 200, 300, 100],\n 'Second': ['a', 'b', 'a', 'b', 'a', 'b', 'c']}\n\n df1 = pd.DataFrame(data1, columns=['First', 'Second'])\n dumdf1 = pd.get_dummies(df1)\n scaler = MinMaxScaler()\n scaler.partial_fit(dumdf1)\n sc_data = scaler.transform(dumdf1)\n model1 = IForest(n_estimators=10, bootstrap=True, behaviour='new',\n contamination=0.1, random_state=np.random.RandomState(42),\n verbose=1, n_jobs=-1).fit(sc_data)\n feature_names2 = dumdf1.columns\n\n initial_type = [('float_input',\n FloatTensorType([None, len(feature_names2)]))]\n\n\n#############################################\n# We check that the conversion fails as expected.\n\nif IForest is not None:\n try:\n to_onnx(model1, initial_types=initial_type)\n except Exception as e:\n print(e)\n\n\n####################################################\n# Custom converter\n# ++++++++++++++++\n#\n# First the parser and the shape calculator.\n# The parser defines the number of outputs and their type.\n# The shape calculator defines their dimensions.\n\ndef pyod_iforest_parser(scope, model, inputs, custom_parsers=None):\n alias = get_model_alias(type(model))\n this_operator = scope.declare_local_operator(alias, model)\n\n # inputs\n this_operator.inputs.append(inputs[0])\n\n # outputs\n cls_type = inputs[0].type.__class__\n val_y1 = scope.declare_local_variable('label', Int64TensorType())\n val_y2 = scope.declare_local_variable('probability', cls_type())\n this_operator.outputs.append(val_y1)\n this_operator.outputs.append(val_y2)\n\n # end\n return this_operator.outputs\n\n\ndef pyod_iforest_shape_calculator(operator):\n N = operator.inputs[0].get_first_dimension()\n operator.outputs[0].type.shape = [N, 1]\n operator.outputs[1].type.shape = [N, 2]\n\n############################################\n# Then the converter.\n\n\ndef pyod_iforest_converter(scope, operator, container):\n op = operator.raw_operator\n opv = container.target_opset\n out = operator.outputs\n\n # We retrieve the unique input.\n X = operator.inputs[0]\n\n # In most case, computation happen in floats.\n # But it might be with double. ONNX is very strict\n # about types, every constant should have the same\n # type as the input.\n dtype = guess_numpy_type(X.type)\n\n detector = op.detector_ # Should be IForest from scikit-learn.\n lab_pred = OnnxSubEstimator(detector, X, op_version=opv)\n scores = OnnxIdentity(lab_pred[1], op_version=opv)\n\n # labels\n threshold = op.threshold_\n above = OnnxLess(scores, np.array([threshold], dtype=dtype),\n op_version=opv)\n labels = OnnxCast(above, op_version=opv, to=onnx_proto.TensorProto.INT64,\n output_names=out[:1])\n\n # probabilities\n train_scores = op.decision_scores_\n scaler = MinMaxScaler().fit(train_scores.reshape(-1, 1))\n scores_ = OnnxMul(scores, np.array([-1], dtype=dtype),\n op_version=opv)\n print(scaler.min_)\n print(scaler.scale_)\n\n scaled = OnnxMul(scores_, scaler.scale_.astype(dtype), op_version=opv)\n scaled_centered = OnnxAdd(scaled, scaler.min_.astype(dtype),\n op_version=opv)\n clipped = OnnxClip(scaled_centered, np.array([0], dtype=dtype),\n np.array([1], dtype=dtype),\n op_version=opv)\n clipped_ = OnnxAdd(\n OnnxMul(clipped, np.array([-1], dtype=dtype),\n op_version=opv),\n np.array([1], dtype=dtype),\n op_version=opv)\n\n scores_2d = OnnxConcat(clipped_, clipped, axis=1, op_version=opv,\n output_names=out[1:])\n\n labels.add_to(scope, container)\n scores_2d.add_to(scope, container)\n\n########################################\n# Finally the registration.\n\n\nif IForest is not None:\n update_registered_converter(\n IForest, \"PyodIForest\",\n pyod_iforest_shape_calculator,\n pyod_iforest_converter,\n parser=pyod_iforest_parser)\n\n#############################################\n# And the conversion.\n\nif IForest is not None:\n onx = to_onnx(model1, initial_types=initial_type,\n target_opset={'': 14, 'ai.onnx.ml': 2})\n\n###############################################\n# Checking discrepencies\n# ++++++++++++++++++++++\n\nif IForest is not None:\n data = sc_data.astype(np.float32)\n\n expected_labels = model1.predict(data)\n expected_proba = model1.predict_proba(data)\n\n sess = InferenceSession(onx.SerializeToString())\n res = sess.run(None, {'float_input': data})\n\n onx_labels = res[0]\n onx_proba = res[1]\n\n diff_labels = np.abs(onx_labels.ravel() - expected_labels.ravel()).max()\n diff_proba = np.abs(onx_proba.ravel() - expected_proba.ravel()).max()\n\n print(\"dicrepencies:\", diff_labels, diff_proba)\n\n print(\"ONNX labels\", onx_labels)\n print(\"ONNX probabilities\", onx_proba)\n",
"# SPDX-License-Identifier: Apache-2.0\n\nimport warnings\nimport numpy as np\nfrom onnx import TensorProto\nfrom ..common._registration import register_converter\nfrom ..common.data_types import (\n BooleanTensorType, Int64TensorType,\n guess_numpy_type, guess_proto_type)\nfrom ..algebra.onnx_ops import (\n OnnxCast, OnnxLess, OnnxMul, OnnxAdd, OnnxDiv,\n OnnxGather, OnnxReduceMean, OnnxMax, OnnxSqueezeApi11)\nfrom .nearest_neighbours import onnx_nearest_neighbors_indices_k\n\n\ndef convert_sklearn_local_outlier_factor(\n scope, operator, container, op_type='TreeEnsembleRegressor',\n op_domain='ai.onnx.ml', op_version=1):\n op = operator.raw_operator\n if not op.novelty:\n raise RuntimeError(\n \"The converter only converts the model %r is novelty is True.\"\n \"\" % type(op))\n outputs = operator.outputs\n opv = container.target_opset\n options = container.get_options(\n op, dict(score_samples=None, optim=None))\n\n X = operator.inputs[0]\n dtype = guess_numpy_type(operator.inputs[0].type)\n proto_dtype = guess_proto_type(operator.inputs[0].type)\n if type(operator.inputs[0].type) in (BooleanTensorType, Int64TensorType):\n X = OnnxCast(X, to=proto_dtype, op_version=opv)\n\n metric = (op.effective_metric_ if hasattr(op, 'effective_metric_') else\n op.metric)\n neighb = op._fit_X.astype(dtype)\n k = op.n_neighbors_\n kwargs = {}\n if op.p != 2:\n if options['optim'] == 'cdist':\n warnings.warn(\n \"Option p=%r may not be compatible with the runtime. \"\n \"See https://github.com/microsoft/onnxruntime/blob/master/\"\n \"docs/ContribOperators.md#com.microsoft.CDist.\")\n kwargs['p'] = op.p\n\n top_k, dist = onnx_nearest_neighbors_indices_k(\n X, neighb, k, metric, dtype=dtype,\n op_version=opv, keep_distances=True,\n optim=options.get('optim', None),\n **kwargs)\n\n # dist_k = self._distances_fit_X_[neighbors_indices, self.n_neighbors_ - 1]\n # reach_dist_array = np.maximum(distances_X, dist_k)\n dist_k_ = OnnxGather(op._distances_fit_X_.astype(dtype),\n top_k, op_version=opv)\n dist_k = OnnxSqueezeApi11(\n OnnxGather(dist_k_, np.array([op.n_neighbors_ - 1],\n dtype=np.int64),\n axis=2, op_version=opv),\n axes=[2], op_version=opv)\n dist_k.set_onnx_name_prefix('dist_k')\n reach_dist_array = OnnxMax(\n OnnxMul(dist, np.array([-1], dtype=dtype), op_version=opv),\n dist_k, op_version=opv)\n\n # X_lrd= return 1.0 / (np.mean(reach_dist_array, axis=1) + 1e-10)\n X_lrd = OnnxDiv(\n np.array([1], dtype=dtype),\n OnnxAdd(\n OnnxReduceMean(reach_dist_array, axes=[1],\n op_version=opv, keepdims=1),\n np.array([1e-10], dtype=dtype), op_version=opv),\n op_version=opv)\n X_lrd.set_onnx_name_prefix('X_lrd')\n\n # lrd_ratios_array = self._lrd[neighbors_indices_X] / X_lrd[:, np.newaxis]\n lrd_ratios_array = OnnxDiv(\n OnnxGather(op._lrd.astype(dtype), top_k, op_version=opv),\n X_lrd, op_version=opv)\n lrd_ratios_array.set_onnx_name_prefix('lrd_ratios_array')\n\n # -np.mean(lrd_ratios_array, axis=1)\n if options['score_samples']:\n output_names_score_samples = [outputs[2]]\n else:\n output_names_score_samples = None\n score_samples = OnnxReduceMean(\n lrd_ratios_array, axes=[1], op_version=opv)\n score_samples.set_onnx_name_prefix('score_samples')\n score_samples_neg = OnnxMul(\n score_samples, np.array([-1], dtype=dtype), op_version=opv,\n output_names=output_names_score_samples)\n final = OnnxAdd(score_samples_neg, np.array([-op.offset_], dtype=dtype),\n op_version=opv, output_names=[outputs[1]])\n\n # labels\n # is_inlier = np.ones(X.shape[0], dtype=int)\n # is_inlier[self.decision_function(X) < 0] = -1\n\n predict = OnnxAdd(\n OnnxMul(\n OnnxCast(\n OnnxLess(final, np.array([0], dtype=dtype), op_version=opv),\n to=TensorProto.INT64, op_version=opv),\n np.array([-2], dtype=np.int64), op_version=opv),\n np.array([1], dtype=np.int64), op_version=opv,\n output_names=outputs[0].full_name)\n predict.set_onnx_name_prefix('predict')\n\n predict.add_to(scope, container)\n final.add_to(scope, container)\n if options['score_samples']:\n score_samples_neg.add_to(scope, container)\n\n\nregister_converter('SklearnLocalOutlierFactor',\n convert_sklearn_local_outlier_factor,\n options={'score_samples': [True, False],\n 'optim': [None, 'cdist']})\n",
"# SPDX-License-Identifier: Apache-2.0\n\nimport unittest\nimport numpy as np\nfrom numpy.testing import assert_almost_equal\nimport onnx\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom sklearn.cluster import KMeans\nfrom sklearn.pipeline import make_pipeline\nfrom onnxruntime import InferenceSession\nfrom skl2onnx import convert_sklearn, to_onnx, wrap_as_onnx_mixin\nfrom skl2onnx.common.data_types import FloatTensorType\nfrom skl2onnx.algebra.onnx_ops import OnnxSub, OnnxDiv, OnnxClip, OnnxClip_6\nfrom skl2onnx.algebra.onnx_operator_mixin import OnnxOperatorMixin\nfrom test_utils import dump_data_and_model, TARGET_OPSET\n\n\nclass CustomOpTransformer(BaseEstimator, TransformerMixin,\n OnnxOperatorMixin):\n\n def __init__(self, op_version=TARGET_OPSET):\n BaseEstimator.__init__(self)\n TransformerMixin.__init__(self)\n self.op_version = op_version\n\n def fit(self, X, y=None):\n self.W_ = np.mean(X, axis=0)\n self.S_ = np.std(X, axis=0)\n return self\n\n def transform(self, X):\n return (X - self.W_) / self.S_\n\n def onnx_shape_calculator(self):\n def shape_calculator(operator):\n operator.outputs[0].type = operator.inputs[0].type\n return shape_calculator\n\n def to_onnx_operator(self, inputs=None, outputs=('Y', ),\n target_opset=None, **kwargs):\n if inputs is None:\n raise RuntimeError(\"inputs should contain one name\")\n i0 = self.get_inputs(inputs, 0)\n W = self.W_.astype(np.float32)\n S = self.S_.astype(np.float32)\n return OnnxDiv(\n OnnxSub(\n i0, W, op_version=self.op_version),\n S, output_names=outputs, op_version=self.op_version)\n\n\nclass TestOnnxOperatorMixinSyntax(unittest.TestCase):\n\n def test_way1_convert_sklearn(self):\n\n X = np.arange(20).reshape(10, 2)\n tr = KMeans(n_clusters=2)\n tr.fit(X)\n\n onx = convert_sklearn(\n tr, initial_types=[('X', FloatTensorType((None, X.shape[1])))],\n target_opset=TARGET_OPSET)\n if TARGET_OPSET == 11:\n sonx = str(onx)\n if \"version: 11\" not in sonx or \"ir_version: 6\" not in sonx:\n raise AssertionError(\"Issue with TARGET_OPSET: {}\\n{}\".format(\n TARGET_OPSET, sonx))\n dump_data_and_model(\n X.astype(np.float32), tr, onx,\n basename=\"MixinWay1ConvertSklearn\")\n\n def test_way2_to_onnx(self):\n\n X = np.arange(20).reshape(10, 2)\n tr = KMeans(n_clusters=2)\n tr.fit(X)\n\n onx = to_onnx(tr, X.astype(np.float32),\n target_opset=TARGET_OPSET)\n if TARGET_OPSET == 11:\n sonx = str(onx)\n if \"version: 11\" not in sonx or \"ir_version: 6\" not in sonx:\n raise AssertionError(\"Issue with TARGET_OPSET: {}\\n{}\".format(\n TARGET_OPSET, sonx))\n\n dump_data_and_model(\n X.astype(np.float32), tr, onx,\n basename=\"MixinWay2ToOnnx\")\n\n def test_way3_mixin(self):\n\n X = np.arange(20).reshape(10, 2)\n tr = KMeans(n_clusters=2)\n tr.fit(X)\n\n try:\n tr_mixin = wrap_as_onnx_mixin(tr, target_opset=TARGET_OPSET)\n except KeyError as e:\n assert (\"SklearnGaussianProcessRegressor\" in str(e) or\n \"SklearnGaussianProcessClassifier\" in str(e))\n return\n\n try:\n onx = tr_mixin.to_onnx()\n except RuntimeError as e:\n assert \"Method enumerate_initial_types\" in str(e)\n onx = tr_mixin.to_onnx(X.astype(np.float32))\n\n dump_data_and_model(\n X.astype(np.float32), tr, onx,\n basename=\"MixinWay3OnnxMixin\")\n\n def test_way4_mixin_fit(self):\n\n X = np.arange(20).reshape(10, 2)\n try:\n tr = wrap_as_onnx_mixin(KMeans(n_clusters=2),\n target_opset=TARGET_OPSET)\n except KeyError as e:\n assert (\"SklearnGaussianProcessRegressor\" in str(e) or\n \"SklearnGaussianProcessClassifier\" in str(e))\n return\n tr.fit(X)\n\n onx = tr.to_onnx(X.astype(np.float32))\n if TARGET_OPSET == 11:\n sonx = str(onx)\n if \"version: 11\" not in sonx or \"ir_version: 6\" not in sonx:\n raise AssertionError(\"Issue with TARGET_OPSET: {}\\n{}\".format(\n TARGET_OPSET, sonx))\n\n dump_data_and_model(\n X.astype(np.float32), tr, onx,\n basename=\"MixinWay4OnnxMixin2\")\n\n def test_pipe_way1_convert_sklearn(self):\n\n X = np.arange(20).reshape(10, 2)\n tr = make_pipeline(\n CustomOpTransformer(op_version=TARGET_OPSET),\n KMeans(n_clusters=2))\n tr.fit(X)\n\n onx = convert_sklearn(\n tr, initial_types=[('X', FloatTensorType((None, X.shape[1])))],\n target_opset=TARGET_OPSET)\n if TARGET_OPSET == 11:\n sonx = str(onx)\n if \"version: 11\" not in sonx or \"ir_version: 6\" not in sonx:\n raise AssertionError(\"Issue with TARGET_OPSET: {}\\n{}\".format(\n TARGET_OPSET, sonx))\n dump_data_and_model(\n X.astype(np.float32), tr, onx,\n basename=\"MixinPipeWay1ConvertSklearn\")\n\n def test_pipe_way2_to_onnx(self):\n\n X = np.arange(20).reshape(10, 2)\n tr = make_pipeline(\n CustomOpTransformer(op_version=TARGET_OPSET),\n KMeans(n_clusters=2))\n tr.fit(X)\n\n onx = to_onnx(tr, X.astype(np.float32), target_opset=TARGET_OPSET)\n if TARGET_OPSET == 11:\n sonx = str(onx)\n if \"version: 11\" not in sonx or \"ir_version: 6\" not in sonx:\n raise AssertionError(\"Issue with TARGET_OPSET: {}\\n{}\".format(\n TARGET_OPSET, sonx))\n\n dump_data_and_model(\n X.astype(np.float32), tr, onx,\n basename=\"MixinPipeWay2ToOnnx\")\n\n def test_pipe_way3_mixin(self):\n\n X = np.arange(20).reshape(10, 2)\n tr = make_pipeline(\n CustomOpTransformer(op_version=TARGET_OPSET),\n KMeans(n_clusters=2))\n tr.fit(X)\n\n try:\n tr_mixin = wrap_as_onnx_mixin(tr, target_opset=TARGET_OPSET)\n except KeyError as e:\n assert (\"SklearnGaussianProcessRegressor\" in str(e) or\n \"SklearnGaussianProcessClassifier\" in str(e))\n return\n\n try:\n onx = tr_mixin.to_onnx()\n except RuntimeError as e:\n assert \"Method enumerate_initial_types\" in str(e)\n onx = tr_mixin.to_onnx(X.astype(np.float32))\n if TARGET_OPSET == 11:\n sonx = str(onx)\n if \"version: 11\" not in sonx or \"ir_version: 6\" not in sonx:\n raise AssertionError(\"Issue with TARGET_OPSET: {}\\n{}\".format(\n TARGET_OPSET, sonx))\n\n dump_data_and_model(\n X.astype(np.float32), tr, onx,\n basename=\"MixinPipeWay3OnnxMixin\")\n\n def test_pipe_way4_mixin_fit(self):\n\n X = np.arange(20).reshape(10, 2)\n try:\n tr = wrap_as_onnx_mixin(\n make_pipeline(CustomOpTransformer(), KMeans(n_clusters=2)),\n target_opset=TARGET_OPSET)\n except KeyError as e:\n assert (\"SklearnGaussianProcessRegressor\" in str(e) or\n \"SklearnGaussianProcessClassifier\" in str(e))\n return\n\n tr.fit(X)\n\n onx = tr.to_onnx(X.astype(np.float32))\n if TARGET_OPSET == 11:\n sonx = str(onx)\n if \"version: 11\" not in sonx or \"ir_version: 6\" not in sonx:\n raise AssertionError(\"Issue with TARGET_OPSET: {}\\n{}\".format(\n TARGET_OPSET, sonx))\n\n dump_data_and_model(\n X.astype(np.float32), tr, onx,\n basename=\"MixinPipeWay4OnnxMixin2\")\n\n def common_test_onnxt_runtime_unary(self, onnx_cl, np_fct,\n op_version=None, debug=False):\n onx = onnx_cl('X', output_names=['Y'])\n X = np.array([[1, 2], [3, -4]], dtype=np.float64)\n model_def = onx.to_onnx(\n {'X': X.astype(np.float32)}, target_opset=op_version)\n if debug:\n print(model_def)\n try:\n oinf = InferenceSession(model_def.SerializeToString())\n except RuntimeError as e:\n if (\"Could not find an implementation for the node \"\n \"Cl_Clip:Clip(11)\" in str(e)):\n # Not yet implemented in onnxruntime\n return\n raise e\n X = X.astype(np.float32)\n try:\n got = oinf.run(None, {'X': X})[0]\n except Exception as e:\n raise AssertionError(\n \"Cannot run model due to %r\\n%r\\n%s\" % (\n e, onx, str(model_def))) from e\n assert_almost_equal(np_fct(X), got, decimal=6)\n\n @unittest.skipIf(onnx.defs.onnx_opset_version() < 10, \"irrelevant\")\n def test_onnx_clip_10(self):\n with self.subTest(name=\"OnnxClip_6[1e-5, 1e5]\"):\n self.common_test_onnxt_runtime_unary(\n lambda x, output_names=None: OnnxClip_6(\n x, min=1e-5, max=1e5, output_names=output_names),\n lambda x: np.clip(x, 1e-5, 1e5),\n op_version=10)\n with self.subTest(name=\"OnnxClip-10[1e-5, 1e5]\"):\n self.common_test_onnxt_runtime_unary(\n lambda x, output_names=None: OnnxClip(\n x, min=1e-5, max=1e5, output_names=output_names,\n op_version=10),\n lambda x: np.clip(x, 1e-5, 1e5),\n op_version=10)\n with self.subTest(name=\"OnnxClip-10[-1e5, 1e-5]\"):\n self.common_test_onnxt_runtime_unary(\n lambda x, output_names=None: OnnxClip(\n x, max=1e-5, output_names=output_names,\n op_version=10),\n lambda x: np.clip(x, -1e5, 1e-5),\n op_version=10)\n with self.subTest(name=\"OnnxClip-10[0.1, 2.1]\"):\n self.common_test_onnxt_runtime_unary(\n lambda x, output_names=None: OnnxClip(\n x, min=0.1, max=2.1,\n output_names=output_names,\n op_version=10),\n lambda x: np.clip(x, 0.1, 2.1),\n op_version=10)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] |
[
[
"sklearn.preprocessing.MaxAbsScaler",
"sklearn.datasets.load_iris",
"sklearn.base.TransformerMixin.__init__",
"numpy.testing.assert_almost_equal",
"sklearn.base.BaseEstimator.__init__",
"sklearn.preprocessing.OrdinalEncoder",
"sklearn.preprocessing.StandardScaler"
],
[
"numpy.issubdtype"
],
[
"numpy.abs",
"sklearn.datasets.load_iris",
"sklearn.base.TransformerMixin.__init__",
"sklearn.base.BaseEstimator.__init__",
"numpy.identity",
"sklearn.decomposition.PCA"
],
[
"pandas.read_csv",
"matplotlib.pyplot.subplots"
],
[
"pandas.DataFrame",
"numpy.array",
"numpy.random.RandomState",
"sklearn.preprocessing.MinMaxScaler",
"pandas.get_dummies"
],
[
"numpy.array"
],
[
"sklearn.cluster.KMeans",
"numpy.clip",
"numpy.arange",
"sklearn.base.TransformerMixin.__init__",
"numpy.std",
"sklearn.base.BaseEstimator.__init__",
"numpy.mean",
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
yz-cnsdqz/TemporalActionParsing-FineGrained
|
[
"c5bb289b9d51a47d617a49d60f9111eba6460a80"
] |
[
"code/utils_ablation_plot.py"
] |
[
"import numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport pandas as pd\nimport os, os.path\n\nimport argparse\nparser = argparse.ArgumentParser()\n\nparser.add_argument('--dataset', type=str, default='50', help='50 or gtea')\nparser.add_argument('--pooling', type=str, help='gaussian or binary')\nargs = parser.parse_args()\n\n# dataset = ['50', 'gtea'][1]\n# pooling=['RPGaussian', 'RPBinary'][2]\n\ndata_dir = '/is/ps2/yzhang/workspaces/TemporalActionParsing-FineGrained/code/RP_results'\n\npooling = 'RPBinary' if args.pooling == 'binary' else 'RPGaussian'\ndataset = '50' if args.dataset == '50' else 'gtea'\ndataset_fullname = '50Salads' if args.dataset == '50' else 'GTEA'\n\n## the original work\ndata_list = []\nfor dim in [1,2,4,8]:\n for comp in [1,2,4,8,16]:\n filename = os.path.join(data_dir, 'result_{}{}_dim{}_comp{}.txt'.format(dataset, pooling, dim, comp))\n data = np.loadtxt(filename, delimiter=',')\n run_info = np.repeat(np.array([[int(dim), int(comp)]]), 6, axis=0)\n data_list.append(np.concatenate([data, run_info], axis=1))\n\ndata_all = np.concatenate(data_list, axis=0)\ncolumns=['accuracy','edit score', 'f1 score', 'dimension', 'rank']\ndf = pd.DataFrame(data_all, columns=columns)\n\n## the results with stronger regularization (higher droput ratio + more iterations)\ndata_list_drop = []\nfor dim in [2,4,8]:\n for comp in [1,2,4,8,16]:\n \n if dim > 2:\n #filename = 'test_result_rebuttal_{}_{}_dim{}_comp{}_drp0.4_ep500.txt'.format(dataset_fullname, pooling, dim, comp)\n filename = 'test_result_rebuttal_{}_{}_dim{}_comp{}_dr0.5_ep500.txt'.format(dataset_fullname, pooling, dim, comp)\n else:\n filename = os.path.join(data_dir, 'result_{}{}_dim{}_comp{}.txt'.format(dataset, pooling, dim, comp))\n\n\n data = np.loadtxt(filename, delimiter=',')\n # data = np.expand_dims(data, axis=0)\n run_info = np.repeat(np.array([[int(dim), int(comp)]]), 6, axis=0)\n # run_info = np.array([[int(dim), int(comp)]])\n data_list_drop.append(np.concatenate([data, run_info], axis=1))\n\ndata_all_drop = np.concatenate(data_list_drop, axis=0)\ncolumns=['accuracy','edit score', 'f1 score', 'dimension', 'rank']\ndf_d = pd.DataFrame(data_all_drop, columns=columns)\n\n\n\nsns.set(style=\"whitegrid\")\n\n\nplt.figure()\nax = sns.lineplot(x=\"dimension\", y=\"accuracy\", #hue=\"rank\",\n marker=\"o\", \n legend=False, #err_style=None,\n palette=sns.color_palette('deep',n_colors=5),\n data=df)\n# ax.set(ylim=(60, 75))\n\nax = sns.lineplot(x=\"dimension\", y=\"accuracy\", #hue=\"rank\",\n marker=\"X\", linestyle='--', linewidth='5',\n legend=False, #err_style=None,\n palette=sns.color_palette('deep',n_colors=5),\n data=df_d)\n\nfor i in range(1,2):\n ax.lines[i].set_linestyle('--')\n\n# ax.set(ylim=(60.1, 65.5))\nax.grid(axis='x')\n\n\n\n\n\nplt.figure()\n\nax2 = sns.lineplot(x=\"dimension\", y=\"edit score\", #hue=\"rank\",\n marker=\"o\", \n legend=False, #err_style=None,\n palette=sns.color_palette('deep',n_colors=5),\n data=df)\n# ax.set(ylim=(60, 75))\n\nax2 = sns.lineplot(x=\"dimension\", y=\"edit score\", #hue=\"rank\",\n marker=\"X\", linestyle='--', linewidth='5',\n legend=False, #err_style=None,\n palette=sns.color_palette('deep',n_colors=5),\n data=df_d)\n\nfor i in range(1,2):\n ax2.lines[i].set_linestyle('--')\n\n# ax.set(ylim=(60.1, 65.5))\nax2.grid(axis='x')\n\n\nplt.figure()\n\nax3 = sns.lineplot(x=\"dimension\", y=\"f1 score\", #hue=\"rank\",\n marker=\"o\", \n legend=False, #err_style=None,\n palette=sns.color_palette('deep',n_colors=5),\n data=df)\n# ax.set(ylim=(60, 75))\n\nax3 = sns.lineplot(x=\"dimension\", y=\"f1 score\", #hue=\"rank\",\n marker=\"X\", linestyle='--', linewidth='5',\n legend=False, #err_style=None,\n palette=sns.color_palette('deep',n_colors=5),\n data=df_d)\n\nfor i in range(1,2):\n ax3.lines[i].set_linestyle('--')\n\n# ax.set(ylim=(60.1, 65.5))\nax3.grid(axis='x')\n\n\n\n# ax = sns.relplot(x=\"dimension\", y=\"edit score\", hue=\"rank\", #kind='line',\n# markers=True, dashes=True,legend=False,err_style='bars',palette=sns.color_palette('deep',n_colors=5),\n# data=df)\n# # ax.set(ylim=(60, 66))\n#\n# ax = sns.relplot(x=\"dimension\", y=\"f1 score\", hue=\"rank\", #kind='line',\n# markers=True, dashes=True,legend='full',err_style='bars',palette=sns.color_palette('deep',n_colors=5),\n# data=df)\n# # ax.set(ylim=(65, 72))\n\n\nplt.show()\nplt.savefig('result.pdf', format='pdf')\n\n\n\n\n"
] |
[
[
"matplotlib.pyplot.savefig",
"pandas.DataFrame",
"numpy.concatenate",
"matplotlib.pyplot.show",
"numpy.loadtxt",
"matplotlib.pyplot.figure"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
JohannesTheo/SurvivalBox
|
[
"e4d4c754bdc28961dc5e8fa5ce74eb1875f043e3"
] |
[
"survivalbox/game_objects.py"
] |
[
"__author__ = 'Johannes Theodoridis'\n\n# standard imports\nimport os\nimport copy\n\n# third party imports\nimport numpy as np\nimport pygame\nfrom pygame import K_UP, K_DOWN, K_LEFT, K_RIGHT, K_COMMA, K_PERIOD, K_F15\n\n# local imports\nfrom . import map\nfrom . import utils\n\nMANUAL=False\n#RANDOM=False\nRANDOM_NPC=False\n\n# orientation\nUP = 0\nRIGHT = 1\nDOWN = 2\nLEFT = 3\n# for debugging and information\nORIENTATION_STRING_MAP = {UP:'UP', RIGHT:'RIGHT', DOWN:'DOWN', LEFT:'LEFT'}\n\n# basic movement depending on orientation\nMOVE_FORWARD = {UP: [ 0,-1, 0], DOWN: [ 0, 1, 0], LEFT: [-1, 0, 0], RIGHT: [ 1, 0, 0]}\nMOVE_BACKWARD = {UP: [ 0, 1, 0], DOWN: [ 0,-1, 0], LEFT: [ 1, 0, 0], RIGHT: [-1, 0, 0]}\nMOVE_LEFT = {UP: [-1, 0, 0], DOWN: [ 1, 0, 0], LEFT: [ 0, 1, 0], RIGHT: [ 0,-1, 0]}\nMOVE_RIGHT = {UP: [ 1, 0, 0], DOWN: [-1, 0, 0], LEFT: [ 0,-1, 0], RIGHT: [ 0, 1, 0]}\nNOOP = {UP: [ 0, 0, 0], DOWN: [ 0, 0, 0], LEFT: [ 0, 0, 0], RIGHT: [ 0, 0, 0]}\n\n# Define additional Survivor specific movement\nAGENT_TURN_LEFT = {UP: [ 0, 0,-1], DOWN: [ 0, 0,-1], LEFT: [ 0, 0,-1], RIGHT: [ 0, 0,-1]}\nAGENT_TURN_RIGHT = {UP: [ 0, 0, 1], DOWN: [ 0, 0, 1], LEFT: [ 0, 0, 1], RIGHT: [ 0, 0, 1]}\n\n# Define additional Animal specific movement\n#ANIMAL_TURN_LEFT = {UP: [-1, 1,-1], DOWN: [ 0, 0,-1], LEFT: [ 1, 0,-1], RIGHT: [ 0,-1,-1]}\n#ANIMAL_TURN_RIGHT = {UP: [ 0, 1, 1], DOWN: [-1, 0, 1], LEFT: [ 1,-1, 1], RIGHT: [ 0, 0, 1]}\nANIMAL_TURN_LEFT = {UP: [-1, 0,-1], DOWN: [ 0, 1,-1], LEFT: [ 0, 0,-1], RIGHT: [ 1,-1,-1]}\nANIMAL_TURN_RIGHT = {UP: [ 0, 0, 1], DOWN: [-1, 1, 1], LEFT: [ 0,-1, 1], RIGHT: [ 1, 0, 1]}\nANIMAL_TURN_FULL = {UP: [ 0, 0, 2], DOWN: [ 0, 0, 2], LEFT: [ 0, 0, 2], RIGHT: [ 0, 0, 2]}\n\n# Constants for NPCs move mappings // Human Player uses the pygame constants for mapping\nFORWARD = 0\nTURN_L = 1\nTURN_R = 2\nTURN_F = 3\nSTAY = 4\n\n# Preload Images\n_DIR = os.path.dirname(os.path.abspath(__file__))\nFIRE_ON_SMALL = pygame.image.load(os.path.join(_DIR,'assets/fire_on_small.png' )) #.convert()\nFIRE_OFF_SMALL = pygame.image.load(os.path.join(_DIR,'assets/fire_off_small.png')) #.convert()\nFIRE_ON = pygame.image.load(os.path.join(_DIR,'assets/fire_on.png' )) #.convert()\nFIRE_OFF = pygame.image.load(os.path.join(_DIR,'assets/fire_off.png')) #.convert()\nSHEEP = pygame.image.load(os.path.join(_DIR,'assets/sheep.png')) #.convert()\nWOLF = pygame.image.load(os.path.join(_DIR,'assets/wolf.png')) #.convert()\n\nSURVIVOR_STATISTICS = {\n\n \"basics\" : {\n \"steps_alive\" : 0,\n \"steps_water\" : 0,\n \"steps_land\" : 0,\n \"collisions\" : 0\n },\n \"specialisation\" : {\n \"steps_as_fireguard\" : 0,\n \"steps_as_shepherd\" : 0,\n \"blocked_sheep\" : 0,\n \"hits_from_wolf\" : 0,\n \"catched_wolf\" : 0,\n \"energy_from_wolf\" : 0,\n \"collected_food\" : 0\n },\n \"rewards\" : {\n \"reward_from_fire\" : 0,\n \"reward_from_sheep\" : 0,\n \"reward_from_wolf\" : 0,\n \"reward_from_food\" : 0,\n \"reward_total\" : 0\n }\n }\n\nSHEEP_STATISTICS = {\n\n \"basics\" : {\n \"steps_total\" : 0,\n \"steps_slow\" : 0,\n \"steps_fast\" : 0,\n \"collisions\" : 0\n },\n \"specialisation\" : {\n \"collected_food\" : 0,\n \"catched_by_wolf\" : 0,\n \"steps_with_shepherd\" : 0,\n \"steps_without_shepherd\" : 0,\n \"shepherd_switches\" : 0\n }\n }\n\nWOLF_STATISTICS = {\n\n \"basics\" : {\n \"steps_total\" : 0,\n \"steps_slow\" : 0,\n \"steps_fast\" : 0,\n \"collisions\" : 0\n },\n \"specialisation\" : {\n \"steps_hunting\" : 0,\n \"catched_sheep\" : 0,\n \"catched_by_survivor\" : 0,\n \"attacked_survivor\" : 0\n }\n }\n\nFIRE_STATISTICS = {\n\n \"specialisation\" : {\n \"steps_fire_on\" : 0,\n \"steps_fire_off\" : 0,\n \"fire_switches\" : 0\n }\n }\n\ndef create_marker_rect(Pos, TileSize, Offset, size_x=1, size_y=1):\n '''\n This method returns the grid point infront of the given position (depending on the orientation), as a scaled rectangle.\n The returned rectangle can be used to draw an orientation marker on the map for instance.\n '''\n x = Pos[0] * TileSize + Offset\n y = Pos[1] * TileSize + Offset\n w = TileSize\n h = TileSize\n \n marker = pygame.Rect(x,y,w,h)\n\n if Pos[2] == UP:\n marker.y = (Pos[1] - 1) * TileSize + Offset\n elif Pos[2] == DOWN:\n marker.y = (Pos[1] + size_y) * TileSize + Offset\n elif Pos[2] == LEFT:\n marker.x = (Pos[0] - 1) * TileSize + Offset\n elif Pos[2] == RIGHT:\n marker.x = (Pos[0] + size_y) * TileSize + Offset\n\n return marker\n\nclass GameObject():\n\n def __init__(self, ID, start_pos, tile_size, offset, grid_size, actions, base_image=None, view_port=None, statistics_dict={}):\n\n self.STATS = copy.deepcopy(statistics_dict) #.copy()\n self.Statistics = copy.deepcopy(self.STATS.copy) #()\n\n self.ID = ID\n self.Pos = np.array(start_pos)\n self.OldPos = self.Pos.copy()\n\n self.GRID_W = grid_size[0] # grid width of the object in UP/DOWN position\n self.GRID_H = grid_size[1] # grid height of the object in UP/DOWN position\n self.GRID_MAX = max(self.GRID_W,self.GRID_H)\n \n self.Grid = self.update_collision_grid()\n self.OldGrid = self.Grid\n \n self.TileSize = tile_size\n self.Offset = offset\n\n self.ACTIONS = actions\n\n self.BASE_IMAGE = base_image\n if self.BASE_IMAGE is not None:\n self.IMAGE = pygame.transform.scale(self.BASE_IMAGE, (self.TileSize * self.GRID_W, self.TileSize * self.GRID_H))\n self.image = self.IMAGE\n self.rect = self.image.get_rect()\n self.update_render_pos()\n\n self.ViewPort = view_port\n if view_port is None:\n self.ViewPort = ViewPort(0,0,0,0)\n\n def get_grid_pos(self):\n return (self.Pos[0], self.Pos[1])\n\n def get_grid_pos_old(self):\n return (self.OldPos[0], self.OldPos[1])\n\n def get_collision_grid(self):\n return self.Grid\n\n def get_collision_grid_old(self):\n return self.OldGrid\n\n def get_view(self):\n return self.ViewPort.get_viewport(self.Pos, self.TileSize, self.Offset, self.GRID_W, self.GRID_H)\n\n def get_view_scaled(self, tile_size, offset):\n return self.ViewPort.get_viewport(self.Pos, tile_size, offset, self.GRID_W, self.GRID_H)\n\n def get_view_grid(self):\n return self.get_view_scaled(1, 0)\n\n def get_marker(self):\n return create_marker_rect(self.Pos, self.TileSize, self.Offset, self.GRID_W, self.GRID_H)\n\n def get_marker_scaled(self, tile_size, offset):\n return create_marker_rect(self.Pos, tile_size, offset, self.GRID_W, self.GRID_H)\n\n def update_collision_grid(self):\n return utils.grid_from_position(self.Pos, self.GRID_W, self.GRID_H)\n\n def select_random_move(self, actions=[]):\n return np.random.choice(actions)\n\n def move(self, action):\n self.OldPos = self.Pos.copy()\n self.OldGrid = self.Grid\n\n orientation = self.Pos[2]\n self.Pos += self.ACTIONS[action][orientation]\n self.Pos[2] %= 4 # clip orientation to (0..3)\n self.Grid = self.update_collision_grid()\n\n def set_back(self):\n self.Pos = self.OldPos.copy()\n self.Grid = self.OldGrid\n\n def update_render_pos(self, rotate=False, tile_map=None, dead=False):\n \n if rotate:\n self.image = pygame.transform.rotate(self.IMAGE, self.Pos[2] * -90)\n self.rect = self.image.get_rect()\n\n self.rect.x = self.Pos[0] * self. TileSize + self.Offset\n self.rect.y = self.Pos[1] * self. TileSize + self.Offset\n\n dirty_sprites = []\n if tile_map is not None:\n points = []\n\n if dead:\n for point in self.OldGrid:\n points.append(point)\n dirty_sprites.append(tile_map[point])\n for point in self.Grid:\n points.append(point)\n dirty_sprites.append(tile_map[point])\n else:\n for point in self.OldGrid:\n if point not in self.Grid:\n points.append(point)\n dirty_sprites.append(tile_map[point])\n #print(\"OLD: {}, NEW: {} REDRAW: {}\".format(self.OldGrid, self.Grid, points))\n \n return tuple(dirty_sprites)\n\n def scale_to(self, new_size, new_offset):\n self.TileSize = new_size\n self.Offset = new_offset\n self.IMAGE = pygame.transform.scale(self.BASE_IMAGE, (self.TileSize * self.GRID_W, self.TileSize * self.GRID_H))\n self.image = self.IMAGE\n self.rect = self.image.get_rect()\n self.reset(self.Pos)\n\n def reset(self, new_pos, reset_stats=False):\n \n if reset_stats: self.Statistics = copy.deepcopy(self.STATS) #.copy()\n \n self.Pos = np.array(new_pos)\n self.OldPos = self.Pos.copy()\n self.Grid = self.update_collision_grid()\n self.OldGrid = self.Grid\n self.update_render_pos(rotate=True)\n\n def update(self):\n raise NotImplementedError()\n\n\nclass ViewPort(pygame.Rect):\n\n def __init__(self, grid_points_left, \n grid_points_right, \n grid_points_front, \n grid_points_back):\n\n pygame.Rect.__init__(self, \n 0,\n 0, \n grid_points_left + grid_points_right + 1, \n grid_points_front + grid_points_back + 1)\n \n self.grid_left = grid_points_left\n self.grid_right = grid_points_right\n self.grid_front = grid_points_front\n self.grid_back = grid_points_back\n\n def get_grid_size(self):\n return (self.grid_left, self.grid_right, self.grid_front, self.grid_back)\n\n def get_grid_dimensions(self):\n return( self.grid_left + self.grid_right + 1 , self.grid_front + self.grid_back + 1)\n\n def get_viewport(self, position, tile_size, offset, size_x=1, size_y=1):\n grid_pos_X = position[0]\n grid_pos_Y = position[1]\n orientation = position[2]\n\n # Not smart but clear\n if orientation == 0: # UP\n self.left = (grid_pos_X - self.grid_left ) * tile_size + offset\n self.top = (grid_pos_Y - self.grid_front ) * tile_size + offset\n self.width = (self.grid_left + self.grid_right + size_x) * tile_size\n self.height = (self.grid_front + self.grid_back + size_y) * tile_size\n elif orientation == 2: # DOWN\n self.left = (grid_pos_X - self.grid_left ) * tile_size + offset\n self.top = (grid_pos_Y - self.grid_back ) * tile_size + offset\n self.width = (self.grid_left + self.grid_right + size_x) * tile_size\n self.height = (self.grid_front + self.grid_back + size_y) * tile_size\n elif orientation == 1: # RIGHT\n self.left = (grid_pos_X - self.grid_back ) * tile_size + offset\n self.top = (grid_pos_Y - self.grid_left ) * tile_size + offset\n self.width = (self.grid_front + self.grid_back + size_y) * tile_size\n self.height = (self.grid_left + self.grid_right + size_x) * tile_size\n elif orientation == 3: # LEFT\n self.left = (grid_pos_X - self.grid_front ) * tile_size + offset\n self.top = (grid_pos_Y - self.grid_right ) * tile_size + offset\n self.width = (self.grid_front + self.grid_back + size_y) * tile_size\n self.height = (self.grid_left + self.grid_right + size_x) * tile_size\n\n return self\n\nclass Survivor(pygame.sprite.DirtySprite, GameObject):\n # cost constants (game dynamics)\n #COST_PERMANENT = 1.\n #COST_MOVE = 5.\n #COST_ROTATE = 5.\n COST_MULT_LAND = 1\n COST_MULT_WATER = 3\n \n # Action Mapping for Survivor, defines possible actions!\n BASIC_ACTIONS = { K_UP : MOVE_FORWARD,\n K_DOWN : MOVE_BACKWARD,\n K_LEFT : MOVE_LEFT,\n K_RIGHT : MOVE_RIGHT,\n K_COMMA : AGENT_TURN_LEFT, \n K_PERIOD: AGENT_TURN_RIGHT, \n K_F15 : NOOP\n }\n\n def __init__(self, ID, rewards, view_port, agent_start_pos, size, offset, life_points):\n \n pygame.sprite.Sprite.__init__(self)\n\n base_image = pygame.Surface([size, size])\n base_image.fill((255,0,0))\n\n GameObject.__init__(self, ID, agent_start_pos, size, offset, (1,1), Survivor.BASIC_ACTIONS, base_image, None, SURVIVOR_STATISTICS)\n\n self.ViewPort = view_port\n\n # dynamics\n self.Energy = life_points\n self._O_ENERGY = self.Energy\n self.CostMultiplier = 1\n # rewards\n self.rewards = rewards\n self.Score = 0\n self.StepsAlive = 0\n\n def draw_as_ally(self, Surface):\n pygame.draw.rect(Surface, (0,0,255) ,self.rect)\n\n def draw_as_self(self, Surface):\n Surface.blit(self.image, self.rect)\n\n def update(self, action_list, tile_map, living_creatures):\n\n # apply the basic cost\n self.Energy -= 1 * self.CostMultiplier\n\n # If survivor is dead return\n if self.Energy <= 0:\n self.kill()\n return self.update_render_pos(tile_map=tile_map, dead=True)\n\n self.StepsAlive += 1\n #print(\"Agent {}: steps {}\".format(self.ID, self.StepsAlive))\n self.Statistics[\"basics\"][\"steps_alive\"] +=1\n\n # Apply the action and update the position\n action = action_list[self.ID]\n self.move(action)\n\n # Check collisions with map and other game_objects\n dead_wolf_sprites = ()\n for point in self.Grid:\n\n colliding_map_tile = tile_map[point]\n \n if colliding_map_tile.TileType == map.EOW:\n self.set_back()\n self.Statistics[\"basics\"][\"collisions\"] +=1\n break\n\n for creature in living_creatures:\n if point in creature.get_collision_grid():\n\n if isinstance(creature, Survivor):\n if self.ID != creature.ID:\n self.set_back()\n self.Statistics[\"basics\"][\"collisions\"] +=1\n break\n\n elif isinstance(creature, Wolf):\n #print(\"GOT THE WOLF!\")\n \n # Apply the reward\n self.Score += self.rewards[\"wolf\"]\n\n for survivor in living_creatures:\n if isinstance(survivor, Survivor):\n energy_from_wolf = (creature.StepsAlive * 0.25)\n survivor.Energy += energy_from_wolf\n self.Statistics[\"specialisation\"][\"energy_from_wolf\"] += energy_from_wolf\n\n #print(\"ENERGY FROM WOLF: {}\".format(energy_from_wolf))\n # reset wolf\n creature.StepsAlive = 0\n\n #print(\"WOLF // Agent {}: +{} new score: {}\".format(self.ID, self.rewards[\"wolf\"], self.Score))\n \n # Save the wolfs position for redrawing later\n dead_wolf_sprites = creature.get_collision_grid()\n\n # Find a new random position for the wolf and reset\n new_pos = utils.free_random_position( tile_map, living_creatures, forbidden_types=[map.WATER], min_space=creature.GRID_MAX)\n creature.reset(new_pos)\n\n creature.Statistics[\"specialisation\"][\"catched_by_survivor\"] +=1\n self.Statistics[\"specialisation\"][\"catched_wolf\"] +=1\n self.Statistics[\"rewards\"][\"reward_from_wolf\"] += self.rewards[\"wolf\"]\n self.Statistics[\"rewards\"][\"reward_total\"] = self.Score\n break\n else:\n self.set_back()\n self.Statistics[\"basics\"][\"collisions\"] +=1\n break\n\n # Now that we have the final position update the map on this position\n for point in self.Grid:\n colliding_map_tile = tile_map[point]\n\n if colliding_map_tile.TileType == map.WATER:\n self.CostMultiplier = Survivor.COST_MULT_WATER\n self.Statistics[\"basics\"][\"steps_water\"] +=1\n else:\n self.CostMultiplier = Survivor.COST_MULT_LAND\n self.Statistics[\"basics\"][\"steps_land\"] +=1\n\n collected_food = colliding_map_tile.update(self)\n if collected_food:\n for creature in living_creatures:\n if isinstance(creature, Survivor):\n creature.Energy += 0.25\n\n # Add the sprites points from the dead wolf to our self.OldGrid for redrawing!\n self.OldGrid += dead_wolf_sprites\n # Return all sprites that need redrawing\n return self.update_render_pos(tile_map=tile_map)\n\n def reset(self, new_pos, reset_stats=False):\n\n self.Energy = self._O_ENERGY\n self.Score = 0\n self.StepsAlive = 0\n self.CostMultiplier = 1\n super(Survivor, self).reset(new_pos, reset_stats)\n \nclass Fireplace(pygame.sprite.DirtySprite, GameObject):\n\n def __init__(self, ID, pos, tile_size, offset=0, small=False):\n \n pygame.sprite.Sprite.__init__(self)\n\n if small:\n IMAGE_ON = FIRE_ON_SMALL.conert()\n IMAGE_OFF = FIRE_OFF_SMALL.convert()\n NUM_TILES = 3\n else:\n IMAGE_ON = FIRE_ON.convert()\n IMAGE_OFF = FIRE_OFF.convert() \n NUM_TILES = 4\n\n FireArea = ViewPort(3,3,3,3)\n GameObject.__init__(self, ID, pos, tile_size, offset, (NUM_TILES, NUM_TILES), None, IMAGE_OFF, FireArea, FIRE_STATISTICS)\n\n # add a second Surface for the Fire ON image \n self.BASE_IMAGE_2 = IMAGE_ON\n self.IMAGE_2 = pygame.transform.scale(self.BASE_IMAGE_2, (self.TileSize * self.GRID_W, self.TileSize * self.GRID_H))\n\n self.ON = False\n self.FIRE_GUARD = -1 # only one agent can be the fire guard at the same time. First come, first serve!\n # self.test = 0\n\n def update(self, actions, tile_map, living_creatures):\n\n self.ON = False\n for creature in living_creatures:\n if isinstance(creature, Survivor):\n\n # Check if the survivor is inside the activation area!\n if self.get_view_grid().collidepoint(creature.get_grid_pos()):\n #print(\"FIRE!, Survivor in reach: {}\".format(creature.ID))\n \n # If no one or the current survivor is the fire guard, apply the reward and turn the fire on.\n if (self.FIRE_GUARD == -1) or (self.FIRE_GUARD == creature.ID):\n self.FIRE_GUARD = creature.ID\n creature.Score += creature.rewards[\"fire\"]\n creature.Statistics[\"specialisation\"][\"steps_as_fireguard\"] +=1\n creature.Statistics[\"rewards\"][\"reward_from_fire\"] += creature.rewards[\"fire\"]\n creature.Statistics[\"rewards\"][\"reward_total\"] = creature.Score\n\n self.ON = True\n #print(\"FIRE // Agent {}: +{} new score: {}\".format(agent.ID, agent.rewards[\"wolf\"], agent.Score))\n\n\n # Switch the Wolf and Sheep Movement Speed depending on the fire status and add Energy to the agents!\n for creature in living_creatures:\n if isinstance(creature, Survivor):\n if self.ON:\n creature.Energy += 0.25\n if isinstance(creature, Wolf):\n if self.ON:\n creature.MOVE_EVERY_N_STEPS = creature.SLOW\n else:\n creature.MOVE_EVERY_N_STEPS = creature.FAST\n\n if isinstance(creature, Sheep):\n if self.ON:\n creature.MOVE_EVERY_N_STEPS = creature.FAST\n else:\n creature.MOVE_EVERY_N_STEPS = creature.SLOW\n\n # Switch the image based on the fire status\n if self.ON:\n self.Statistics[\"specialisation\"][\"steps_fire_on\"] +=1\n self.image = self.IMAGE_2 # Fire on\n else:\n self.Statistics[\"specialisation\"][\"steps_fire_off\"] +=1\n #self.Statistics[\"specialisation\"][\"fire_switches\"] +=1\n self.FIRE_GUARD = -1\n self.image = self.IMAGE # Fire off\n \n # return no dirty sprites since the fire is not moving anywhere\n return []\n\n def scale_to(self, tile_size, offset):\n self.IMAGE_2 = pygame.transform.scale(self.BASE_IMAGE_2, (tile_size * self.GRID_W, tile_size * self.GRID_H))\n super(Fireplace, self).scale_to(tile_size, offset)\n \n def reset(self, new_pos, reset_stats=False):\n #self.test = 0\n self.ON = False\n self.FIRE_GUARD = -1\n super(Fireplace, self).reset(new_pos, reset_stats)\n\nclass Sheep(pygame.sprite.DirtySprite, GameObject):\n\n # Action Mapping for Sheep, defines possible actions!\n BASIC_ACTIONS = { FORWARD : MOVE_FORWARD,\n TURN_L : ANIMAL_TURN_LEFT, \n TURN_R : ANIMAL_TURN_RIGHT,\n TURN_F : ANIMAL_TURN_FULL,\n STAY : NOOP\n }\n\n def __init__(self, ID, start_pos, tile_size=8, offset=0 ):\n \n pygame.sprite.Sprite.__init__(self)\n\n SheepArea = ViewPort(5,5,5,4)\n GameObject.__init__(self, ID, start_pos, tile_size, offset, (1,2), Sheep.BASIC_ACTIONS, SHEEP.convert(), SheepArea, SHEEP_STATISTICS)\n\n self.SLOW = 6\n self.FAST = 2\n self.MOVE_EVERY_N_STEPS = self.SLOW\n self.WorldSteps = 0\n self.SHEPHERD = -1 # only one agent can be the sheeps shepherd at the same time. First come, first serve!\n\n def update(self, manual_actions, tile_map, living_creatures):\n\n # increment the WorldSteps\n self.WorldSteps += 1\n\n # select an action\n action = self.select_move(manual_actions)\n\n # apply the chosen action\n self.move(action)\n \n # Check collisions and set the final position\n for point in self.Grid:\n colliding_map_tile = tile_map[point]\n \n if colliding_map_tile.TileType == map.EOW:\n self.set_back()\n self.Statistics[\"basics\"][\"collisions\"] +=1\n break\n\n if colliding_map_tile.TileType == map.WATER:\n self.set_back()\n self.move(TURN_F)\n self.Statistics[\"basics\"][\"collisions\"] +=1\n break\n\n # The Sheep is blocked by every other creature\n for creature in living_creatures:\n if point in creature.get_collision_grid():\n\n if isinstance(creature, Sheep):\n if self.ID != creature.ID:\n self.set_back()\n self.Statistics[\"basics\"][\"collisions\"] +=1\n break\n elif isinstance(creature, Survivor):\n self.set_back()\n self.Statistics[\"basics\"][\"collisions\"] +=1\n creature.Statistics[\"specialisation\"][\"blocked_sheep\"] +=1\n break\n else: \n self.set_back()\n self.Statistics[\"basics\"][\"collisions\"] +=1\n break\n\n # Now that we have the final position update the map on this position\n for point in self.Grid:\n tile_map[point].update(self)\n\n # With the final position search for shepherds\n has_a_shepherd = False\n for creature in living_creatures:\n if isinstance(creature, Survivor):\n\n # Check if the survivor is inside the activation area!\n if self.get_view_grid().collidepoint(creature.get_grid_pos()):\n #print(\"SHEPARD, Agent in reach: {}\".format(agent.ID))\n\n # If the sheep has no shepherd or the survivor is already its shepherd, apply the reward and set \"new\" shepherd\n if (self.SHEPHERD == -1) or (self.SHEPHERD == creature.ID):\n self.SHEPHERD = creature.ID\n creature.Score += creature.rewards[\"sheep\"]\n has_a_shepherd = True\n\n creature.Statistics[\"specialisation\"][\"steps_as_shepherd\"] +=1\n creature.Statistics[\"rewards\"][\"reward_from_sheep\"] += creature.rewards[\"sheep\"]\n creature.Statistics[\"rewards\"][\"reward_total\"] = creature.Score\n self.Statistics[\"specialisation\"][\"steps_with_shepherd\"] +=1\n \n #print(\"SHEEP // Agent {}: +{} new score: {}\".format(creature.ID, creature.rewards[\"sheep\"], creature.Score))\n\n # If there was no survivor in range, reset the sheeps \"ownership\"\n if has_a_shepherd:\n for creature in living_creatures:\n if isinstance(creature, Survivor):\n creature.Energy += 0.25\n elif not has_a_shepherd:\n self.SHEPHERD = -1\n self.Statistics[\"specialisation\"][\"steps_without_shepherd\"] +=1\n #self.Statistics[\"specialisation\"][\"shepherd_switches\"] +=1\n \n # Return all sprites that need redrawing\n return self.update_render_pos(rotate=True, tile_map=tile_map)\n\n def select_move(self, manual_actions=[]):\n\n if self.MOVE_EVERY_N_STEPS == self.SLOW:\n self.Statistics[\"basics\"][\"steps_slow\"] +=1\n elif self.MOVE_EVERY_N_STEPS == self.FAST:\n self.Statistics[\"basics\"][\"steps_slow\"] +=1\n\n # The basic movement of the Sheep. Every n world steps select a move with some probability.\n if (self.WorldSteps % self.MOVE_EVERY_N_STEPS == 0):\n self.Statistics[\"basics\"][\"steps_total\"] +=1\n\n action_prob = np.random.random()\n if action_prob < 0.1:\n action = TURN_L\n elif action_prob < 0.2:\n action = TURN_R\n elif action_prob < 0.9:\n action = FORWARD\n else:\n action = STAY \n else:\n action = STAY\n \n # If the Game is set to MANUAL or RANDOM mode overwrite the action\n if MANUAL:\n # Select the same action as player x for manuel play/testing\n self.ACTIONS = { K_UP : MOVE_FORWARD, K_DOWN : MOVE_BACKWARD, K_LEFT : MOVE_LEFT, \n K_RIGHT : MOVE_RIGHT, K_COMMA : ANIMAL_TURN_LEFT, K_PERIOD : ANIMAL_TURN_RIGHT,\n FORWARD : MOVE_FORWARD, TURN_L : ANIMAL_TURN_LEFT, TURN_R : ANIMAL_TURN_RIGHT,\n TURN_F : ANIMAL_TURN_FULL, STAY : NOOP, K_F15 : NOOP}\n action = manual_actions[0]\n \n elif RANDOM_NPC:\n action = self.select_random_move([FORWARD, TURN_L, TURN_R, STAY])\n\n return action\n\nclass Wolf(pygame.sprite.DirtySprite, GameObject):\n\n # Action Mapping for Wolf, defines possible actions!\n BASIC_ACTIONS = { FORWARD : MOVE_FORWARD,\n TURN_L : ANIMAL_TURN_LEFT, \n TURN_R : ANIMAL_TURN_RIGHT,\n TURN_F : ANIMAL_TURN_FULL,\n STAY : NOOP\n }\n\n def __init__(self, ID, start_pos, tile_size=8, offset=0 ):\n \n pygame.sprite.Sprite.__init__(self)\n\n WolfArea = ViewPort(8,8,8,8)\n GameObject.__init__(self, ID, start_pos, tile_size, offset, (1,2), Wolf.BASIC_ACTIONS, WOLF.convert(), WolfArea, WOLF_STATISTICS)\n self.DMG = 50\n self.SLOW = 4\n self.FAST = 1\n self.MOVE_EVERY_N_STEPS = self.FAST\n self.WorldSteps = 0\n self.StepsAlive = 0\n\n def update(self, manual_actions, tile_map, living_creatures):\n\n # increment the WorldSteps\n self.WorldSteps += 1\n self.StepsAlive += 1\n #print(\"Wolf is {} Steps alive.\".format(self.StepsAlive))\n\n # Check if a sheep is in the hunting range\n HUNTING, SheepPos = self.snoop(living_creatures)\n\n # select an action\n action = self.select_move(HUNTING, SheepPos, manual_actions)\n\n # apply the chosen action\n self.move(action)\n\n # check collisions with map and game objects\n dead_sheep_sprites = ()\n for point in self.Grid:\n\n colliding_map_tile = tile_map[point]\n \n if colliding_map_tile.TileType == map.EOW:\n self.set_back()\n self.Statistics[\"basics\"][\"collisions\"] +=1\n break\n\n if colliding_map_tile.TileType == map.WATER:\n self.set_back()\n self.move(TURN_F)\n self.Statistics[\"basics\"][\"collisions\"] +=1\n\n if HUNTING:\n # attempt another move to better escape from \"trapped\" situatios\n self.move(self.select_random_move([FORWARD, TURN_L, TURN_R]))\n \n # check collisions with mao again.\n for new_point in self.Grid:\n \n new_map_tile = tile_map[new_point]\n\n if new_map_tile.TileType == map.WATER or new_map_tile.TileType == map.EOW:\n self.set_back()\n self.Statistics[\"basics\"][\"collisions\"] +=1\n break\n \n sheep = self.check_object_collisions(new_point, tile_map, living_creatures)\n if sheep: dead_sheep_sprites = sheep\n break\n\n sheep = self.check_object_collisions(point, tile_map, living_creatures)\n if sheep: dead_sheep_sprites = sheep\n\n # Add the sprites points from the dead sheep to our self.OldGrid for redrawing!\n self.OldGrid += dead_sheep_sprites\n # Return all sprites that need redrawing\n return self.update_render_pos(rotate=True, tile_map=tile_map)\n\n def check_object_collisions(self, point, tile_map, living_creatures):\n \n dead_sheep_sprites = ()\n for creature in living_creatures:\n if point in creature.get_collision_grid():\n\n if isinstance(creature, Wolf):\n if self.ID != creature.ID:\n self.set_back()\n self.Statistics[\"basics\"][\"collisions\"] +=1\n break\n\n elif isinstance(creature, Sheep):\n #print(\"WOLF KILLS THE SHEEP!\")\n\n # Save the sheeps position for redrawing later\n dead_sheep_sprites = creature.get_collision_grid()\n\n # Find a new random position for the sheep and reset\n new_pos = utils.free_random_position( tile_map, living_creatures, forbidden_types=[map.WATER], min_space=creature.GRID_MAX)\n creature.reset(new_pos)\n\n self.Statistics[\"specialisation\"][\"catched_sheep\"] +=1\n creature.Statistics[\"specialisation\"][\"catched_by_wolf\"] +=1\n\n #creature.kill()\n break\n\n elif isinstance(creature, Survivor):\n\n # Apply the attack damage of the wolf to the survivor, reset position\n creature.Energy -= self.DMG\n self.set_back()\n self.Statistics[\"specialisation\"][\"attacked_survivor\"] +=1\n creature.Statistics[\"specialisation\"][\"hits_from_wolf\"] +=1\n\n print(\"WOLF attacks Agent {} -{} dmg, new energy: {}!\".format(creature.ID, self.DMG, creature.Energy))\n break\n else:\n # Every other game object is just unwalkable for the wolf\n self.set_back()\n break\n\n return dead_sheep_sprites\n\n def select_random_move(self, with_stay=True):\n if with_stay:\n return np.random.choice([FORWARD, TURN_R, TURN_L, STAY])\n else:\n return np.random.choice([FORWARD, TURN_R, TURN_L])\n\n def snoop(self, living_creatures):\n '''\n This method checks if a sheep is in the hunting area\n '''\n HUNTING = False\n SheepPos = ()\n\n for creature in living_creatures:\n if isinstance(creature, Sheep):\n \n hunting_area = self.get_view_grid()\n the_sheep = creature.get_collision_grid()\n\n for point in the_sheep:\n if hunting_area.collidepoint(point):\n HUNTING = True\n SheepPos = creature.get_grid_pos()\n #print(\"WOLF spottet a sheep, hmmm.....\")\n break\n\n return HUNTING, SheepPos\n\n def select_move(self, hunting, victim_pos, manual_actions=[]):\n\n\n if self.MOVE_EVERY_N_STEPS == self.SLOW:\n self.Statistics[\"basics\"][\"steps_slow\"] +=1\n elif self.MOVE_EVERY_N_STEPS == self.FAST:\n self.Statistics[\"basics\"][\"steps_slow\"] +=1\n\n action = STAY\n # The basic movement of the Wolf. Every n world steps select a move with some probability.\n if (self.WorldSteps % self.MOVE_EVERY_N_STEPS == 0):\n self.Statistics[\"basics\"][\"steps_total\"] +=1\n\n # If the wolf is in hunting mode, select a special move, else one of the basic moves.\n if hunting:\n wolf = self.get_grid_pos()\n sheep = victim_pos\n action = self.select_hunt_move(hunter_pos=wolf, victim_pos=sheep)\n self.Statistics[\"specialisation\"][\"steps_hunting\"] +=1\n else:\n \n action_prob = np.random.random()\n if action_prob < 0.1:\n action = TURN_L\n elif action_prob < 0.2:\n action = TURN_R\n elif action_prob < 0.9:\n action = FORWARD\n else:\n action = STAY\n\n # If the Game is set to MANUAL or RANDOM mode overwrite the action\n if MANUAL:\n # Select the same action as player x for manuel play/testing\n self.ACTIONS = { K_UP : MOVE_FORWARD, K_DOWN : MOVE_BACKWARD, K_LEFT : MOVE_LEFT, \n K_RIGHT : MOVE_RIGHT, K_COMMA : ANIMAL_TURN_LEFT, K_PERIOD : ANIMAL_TURN_RIGHT,\n FORWARD : MOVE_FORWARD, TURN_L : ANIMAL_TURN_LEFT, TURN_R : ANIMAL_TURN_RIGHT,\n TURN_F : ANIMAL_TURN_FULL, STAY : NOOP, K_F15 : NOOP}\n action = manual_actions[1]\n \n elif RANDOM_NPC:\n action = self.select_random_move([FORWARD, TURN_L, TURN_R, STAY])\n\n return action\n\n def select_hunt_move(self, hunter_pos, victim_pos):\n\n diff_x = hunter_pos[0] - victim_pos[0]\n diff_y = hunter_pos[1] - victim_pos[1]\n\n reduce = diff_x if abs(diff_x) >= abs(diff_y) else diff_y\n #print(\"Wolf: {}, Sheep: {}, Diff: ({},{}, Reduce {})\".format(hunter_pos, victim_pos, diff_x, diff_y, reduce))\n\n if reduce == diff_x:\n # again not smart but clear\n if diff_x > 0:\n if self.Pos[2] == UP: return TURN_L \n elif self.Pos[2] == DOWN: return TURN_R \n elif self.Pos[2] == RIGHT: return TURN_F \n elif self.Pos[2] == LEFT: return FORWARD\n else: return Wolf.STAY\n else:\n if self.Pos[2] == UP: return TURN_R \n elif self.Pos[2] == DOWN: return TURN_L \n elif self.Pos[2] == RIGHT: return FORWARD \n elif self.Pos[2] == LEFT: return TURN_F\n else: return Wolf.STAY\n else:\n if diff_y > 0:\n if self.Pos[2] == UP: return FORWARD \n elif self.Pos[2] == DOWN: return TURN_F \n elif self.Pos[2] == RIGHT: return TURN_L \n elif self.Pos[2] == LEFT: return TURN_R\n else: return Wolf.STAY\n else:\n if self.Pos[2] == UP: return TURN_F \n elif self.Pos[2] == DOWN: return FORWARD \n elif self.Pos[2] == RIGHT: return TURN_R \n elif self.Pos[2] == LEFT: return TURN_L\n else: return Wolf.STAY\n\n def reset(self, new_pos, reset_stats=False):\n self.WorldSteps = 0\n self.StepsAlive = 0\n super(Wolf, self).reset(new_pos, reset_stats)\n "
] |
[
[
"numpy.array",
"numpy.random.random",
"numpy.random.choice"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ucgmsim/slurm_gm_workflow
|
[
"6fd7e11f3c3163dbd219b6783c32fa8085db5d35",
"6fd7e11f3c3163dbd219b6783c32fa8085db5d35",
"6fd7e11f3c3163dbd219b6783c32fa8085db5d35",
"6fd7e11f3c3163dbd219b6783c32fa8085db5d35"
] |
[
"scripts/test_binary.py",
"scripts/emod3d_scripts/check_emod3d_subdomains.py",
"e2e_tests/E2ETests.py",
"scripts/submit_sim_imcalc.py"
] |
[
"#!/usr/bin/env python3\n\nimport sys\nimport argparse\nimport numpy as np\n\nfrom qcore.timeseries import BBSeis\nfrom qcore.timeseries import HFSeis\n\n# the ratio of allowed zero's before being flagged as failed, 0.01 = 1%\nZERO_COUNT_THRESHOLD = 0.01\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"bin\", type=str)\n parser.add_argument(\"fd_ll\", type=str)\n parser.add_argument(\n \"process_type\", type=str, choices=[\"bb\", \"hf\"], help=\"Either bb or hf\"\n )\n parser.add_argument(\"--verbose\", action=\"store_true\", default=False)\n\n args = parser.parse_args()\n\n if args.process_type.lower() not in [\"bb\", \"hf\"]:\n print(\"Invalid process_type, has to be either bb or hf. Quitting!\")\n sys.exit(1)\n\n bin_class = HFSeis if args.process_type == \"hf\" else BBSeis\n\n try:\n bin = bin_class(args.bin)\n except ValueError as ex:\n if args.verbose:\n print(\"Cannot read binary file {} {}\".format(args.bin, ex))\n sys.exit(1)\n\n try:\n f = open(args.fd_ll)\n except Exception as ex:\n if args.verbose:\n print(\"Cannot open {} with exception\\n{}\".format(args.fd_ll, ex))\n sys.exit(1)\n else:\n fd_count = len(f.readlines())\n\n if fd_count != len(bin.stations.name):\n # failed the count check\n if args.verbose:\n print(\"The station count did not match the fd_ll\")\n sys.exit(1)\n\n # check for empty station names\n for station in bin.stations.name:\n if station == \"\":\n # failed\n if args.verbose:\n print(\n \"Empty station name detected, {} failed\".format(args.process_type)\n )\n sys.exit(1)\n\n # check for and vs ==0 (failed)\n vs = bin.stations.vsite if args.process_type == \"bb\" else bin.stations.vs\n if np.min(vs) == 0:\n if args.verbose:\n print(\"Some vs == 0, {} incomplete\".format(args.process_type))\n sys.exit(1)\n\n # binary zero check\n # Checks 10 random stations for any occurances of 0 in the output (aka results have not been written)\n # Removes leading 0s from the test as there may be some time at the start before the waveform starts.\n for stat_name in np.random.choice(\n bin.stations.name, replace=False, size=min(10, bin.stations.shape[0])\n ):\n acc = bin.acc(stat_name)\n for comp in acc.T:\n # trim leading and trailing zeros\n comp_trimmed = np.trim_zeros(comp)\n if comp_trimmed.size == 0:\n if args.verbose:\n print(\n f\" The waveform for station {stat_name} contains all zeros, please investigate.\"\n )\n sys.exit(1)\n ratio_zeros = (\n comp_trimmed.size - np.count_nonzero(comp_trimmed)\n ) / comp_trimmed.size\n if ratio_zeros > ZERO_COUNT_THRESHOLD:\n if args.verbose:\n print(\n f\"The waveform for station {stat_name} contains {ratio_zeros} zeros, more than {ZERO_COUNT_THRESHOLD}, please investigate. This \"\n f\"is most likely due to crashes during HF or BB resulting in no written output.\"\n )\n sys.exit(1)\n\n # pass both check\n if args.verbose:\n print(\"{} passed\".format(args.process_type))\n sys.exit(0)\n",
"\"\"\"\nContains functions related to the calculation of emod3d subdomain boundaries.\nFunctions ported from C contain a number of calls to np.int32 and np.float32 calls to emulate single precision integer and floating point behaviour.\nCode ported from emod3d v3.0.8 misc.c. This is consistent with v3.0.7. \nWhile v3.0.4 uses long doubles in place of floats, this does not seem to practically increase the accuracy of calculation.\nThis check is stricter than necessary as only on rows/columns with stations missing will cause issues when extracting the station waveforms.\n\"\"\"\n\nimport argparse\n\nimport numpy as np\n\n\ndef get_start_boundary(n_grid_points, n_subdomains, index_subdomain):\n \"\"\"\n Calculates the starting boundary of the subdomain for a given subdomain index along a velocity model axis\n Should have an overlap of 4 with the previous subdomains ending boundary\n Does not account for the first subdomain\n :param n_grid_points: The number of grid points along the axis\n :param n_subdomains: The number of subdomains along the axis\n :param index_subdomain: The index of the subdomain being tested. May be an integer or array of integers\n :return: The first grid point(s) covered by the given subdomain index(cies)\n \"\"\"\n fslice = np.float32(\n np.float32(n_grid_points + (n_subdomains - 1.0) * 4.0)\n / np.float32(n_subdomains)\n - 1.0\n )\n fn1 = np.float32(index_subdomain * (fslice - 3.0))\n nx1 = np.int32(fn1 + 0.5)\n return nx1\n\n\ndef get_end_boundary(n_grid_points, n_subdomains, index_subdomain):\n \"\"\"\n Calculates the ending boundary of the subdomain for a given subdomain index along a velocity model axis\n Should have an overlap of 4 with the next subdomains starting boundary\n Does not account for the last subdomain points\n :param n_grid_points: The number of grid points along the axis\n :param n_subdomains: The number of subdomains along the axis\n :param index_subdomain: The index of the subdomain being tested. May be an integer or array of integers\n :return: The last grid point(s) covered by the given subdomain index(cies)\n \"\"\"\n fslice = np.float32(\n np.float32(n_grid_points + (n_subdomains - 1.0) * 4.0)\n / np.float32(n_subdomains)\n - 1.0\n )\n fn1 = np.float32(index_subdomain * (fslice - 3.0))\n fn1 = np.float32(fn1 + fslice)\n nx2 = np.int32(fn1 + 0.5)\n nx2 = np.int32(nx2 + 1)\n return nx2\n\n\ndef get_nproc(\n nproc: np.int32,\n globnx: np.int32,\n globny: np.int32,\n globnz: np.int32,\n min_nproc: np.int32 = np.int32(1),\n nproc_x: np.int32 = np.int32(-1),\n nproc_z: np.int32 = np.int32(-1),\n):\n \"\"\"\n Ported from the source of emod3d. Casting enforces C like behaviour.\n Calculates the number of processes to use along each axis of a velocity model.\n The argument min_nproc sets the unit size, allowing for blocks of grid points to be assigned to subdomains, instead of individual points\n The nproc_x/z argument are available to mimic options available in the C. Not normally used\n :param nproc: The number of processes to be used.\n :param globnx: The number of velocity model grid points along the x axis.\n :param globny: The number of velocity model grid points along the y axis.\n :param globnz: The number of velocity model grid points along the z axis.\n :param min_nproc: Multiplier to perform calculations using cubes of min_nproc, defaults to 1.\n :param nproc_x: The number of processes to use in the x direction. Set value above -1 to specify the number to use. Defaults to -1.\n :param nproc_z: The number of processes to use in the y direction. Set value above -1 to specify the number to use. Defaults to -1.\n :return: A tuple containing:\n The number of processes along the x axis\n The number of processes along the y axis\n The number of processes along the z axis\n \"\"\"\n inv_fmp = np.float32(1.0 / min_nproc)\n fnp = np.float32(nproc)\n fnx = np.float32(globnx)\n fny = np.float32(globny)\n fnz = np.float32(globnz)\n\n if nproc_z < 0:\n nproc_z = np.int32(\n inv_fmp * fnz * np.exp(np.log(fnp / (fnx * fny * fnz)) / 3.0) + 0.5\n )\n if nproc_z < 1:\n nproc_z = np.int32(1)\n nproc_z = np.int32(min_nproc * nproc_z)\n\n if nproc_x < 0:\n nproc_x = np.int32(\n inv_fmp * fnx * np.exp(np.log(fnp / (fnx * fny * nproc_z)) / 2.0) + 0.5\n )\n if nproc_x < 1:\n nproc_x = np.int32(1)\n nproc_x = np.int32(min_nproc * nproc_x)\n\n nproc_y = np.int32(\n inv_fmp * fnp / (np.float32(nproc_x) * np.float32(nproc_z)) + 0.5\n )\n if nproc_y < 1:\n nproc_y = np.int32(1)\n nproc_y = np.int32(min_nproc * nproc_y)\n\n nproc_c = nproc_x * nproc_y * nproc_z\n\n if nproc_c != nproc:\n # Alternate method of calculating the processes distribution\n ip3 = np.int32(np.exp(np.log(fnp) / 3.0) + 0.5)\n\n ipt = np.int32(1)\n while 2 * ipt <= ip3 and nproc % ipt == 0 and (nproc / ipt) % 2 == 0:\n ipt = np.int32(2 * ipt)\n\n nproc_z = ipt\n\n np2 = np.int32(nproc / nproc_z)\n ip2 = np.int32(np.exp(np.log(1.0 * np2) / 2.0) + 0.5)\n\n ipt = np.int32(1)\n while 2 * ipt <= ip2 and np2 % ipt == 0 and (np2 / ipt) % 2 == 0:\n ipt = np.int32(2 * ipt)\n\n nproc_x = np.int32(ipt)\n nproc_y = np.int32(np2 / nproc_x)\n\n return nproc_x, nproc_y, nproc_z\n\n\ndef test_domain(nx, ny, nz, nc):\n \"\"\"\n Tests a given domain size and core count to check for grid points that won't be assigned to any sub domain\n :param nx: The number of grid points in the x direction\n :param ny: The number of grid points in the y direction\n :param nz: The number of grid points in the z direction\n :param nc: The number of cores to be used to perform the simulation\n :return: Three arrays with the index of any unassigned grid lines. If all three are empty then the simulation will work as expected\n \"\"\"\n nproc_x, nproc_y, nproc_z = get_nproc(nc, nx, ny, nz)\n\n x_indicies = np.arange(nproc_x - 1)\n x_n1 = get_start_boundary(nx, nproc_x, x_indicies + 1)\n x_n2 = get_end_boundary(nx, nproc_x, x_indicies)\n\n y_indicies = np.arange(nproc_y - 1)\n y_n1 = get_start_boundary(ny, nproc_y, y_indicies + 1)\n y_n2 = get_end_boundary(ny, nproc_y, y_indicies)\n\n z_indicies = np.arange(nproc_z - 1)\n z_n1 = get_start_boundary(nz, nproc_z, z_indicies + 1)\n z_n2 = get_end_boundary(nz, nproc_z, z_indicies)\n\n x_mask = np.where(x_n1 + 2 != x_n2 - 2)[0]\n y_mask = np.where(y_n1 + 2 != y_n2 - 2)[0]\n z_mask = np.where(z_n1 + 2 != z_n2 - 2)[0]\n\n return x_mask, y_mask, z_mask\n\n\ndef load_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"n_cores\", type=int, help=\"The number of cores used to perform the simulation\"\n )\n parser.add_argument(\n \"nx\", type=int, help=\"The number of grid points along the x axis\"\n )\n parser.add_argument(\n \"ny\", type=int, help=\"The number of grid points along the y axis\"\n )\n parser.add_argument(\n \"nz\", type=int, help=\"The number of grid points along the z axis\"\n )\n return parser.parse_args()\n\n\ndef main():\n \"\"\"\n Uses the command line arguments provided to determine if the simulation will have any grid lines that are not associated with a subdomain.\n If any x or y girdlines are not associated they are printed to stdout, and the script exits with an exit code of 1\n Otherwise the script exits with an exit code of 0\n z gridlines are presented if any x or y gridlines are not associated, however alone they are not enough to cause failure\n \"\"\"\n args = load_args()\n x, y, z = test_domain(args.nx, args.ny, args.nz, args.n_cores)\n\n if x.size + y.size > 0:\n # We only care if there are issues on the surface layer\n message_parts = []\n if x.size > 0:\n message_parts.append(\"Missed x axis indicies:\")\n message_parts.append(\", \".join(x.astype(str)))\n if y.size > 0:\n message_parts.append(\"Missed y axis indicies:\")\n message_parts.append(\", \".join(y.astype(str)))\n if z.size > 0:\n message_parts.append(\"Missed z axis indicies:\")\n message_parts.append(\", \".join(z.astype(str)))\n print(\". \".join(message_parts))\n return_code = 1\n else:\n return_code = 0\n exit(return_code)\n\n\nif __name__ == \"__main__\":\n main()\n",
"\"\"\"Contains class and helper functions for end to end test\"\"\"\nimport signal\nimport sys\nimport os\nimport json\nimport shutil\nimport time\nimport glob\nimport subprocess\nfrom collections import namedtuple\nfrom typing import List\nfrom threading import Thread\nfrom queue import Queue, Empty\n\nimport numpy.random as nprdm\nimport pandas as pd\nimport sqlite3 as sql\nfrom pandas.testing import assert_frame_equal\n\nfrom scripts.management.db_helper import connect_db_ctx\nfrom scripts.management.MgmtDB import SchedulerTask, MgmtDB\nimport qcore.constants as const\nimport qcore.simulation_structure as sim_struct\nfrom qcore.shared import non_blocking_exe, exe\nfrom scripts.schedulers.scheduler_factory import Scheduler\n\n\ndef get_sim_dirs(runs_dir):\n \"\"\"Gets all simualation dirs under the specified Runs dir.\n Also returns the fault dirs. Full paths.\n \"\"\"\n sim_dirs = []\n fault_dirs = get_faults(runs_dir)\n for fault in fault_dirs:\n fault_name = os.path.basename(fault)\n\n entries = os.listdir(fault)\n for entry in entries:\n entry_path = os.path.join(fault, entry)\n if entry.startswith(fault_name) and os.path.isdir(entry_path):\n sim_dirs.append(entry_path)\n\n return fault_dirs, sim_dirs\n\n\ndef get_faults(runs_dir: str):\n \"\"\"Gets all the fault directories in the specified Runs dir.\n Full path.\n \"\"\"\n return [\n os.path.join(runs_dir, entry)\n for entry in os.listdir(runs_dir)\n if os.path.isdir(os.path.join(runs_dir, entry))\n ]\n\n\nError = namedtuple(\"Error\", [\"location\", \"error\"])\nWarning = namedtuple(\"Warning\", [\"location\", \"warning\"])\n\n\nclass E2ETests(object):\n \"\"\"Class responsible for setting up, running and checking end-to-end tests\n based on the input config file\n \"\"\"\n\n # Config keys\n cf_test_dir_key = \"test_dir\"\n cf_data_dir_key = \"data_dir\"\n cf_cybershake_config_key = \"cybershake_config\"\n cf_fault_list_key = \"fault_list\"\n cf_bench_folder_key = \"bench_dir\"\n cf_version_key = \"version\"\n test_checkpoint_key = \"test_checkpoint\"\n timeout_key = \"timeout\"\n\n # Benchmark folders\n bench_IM_csv_folder = \"IM_csv\"\n\n # Log files\n install_out_file = \"install_out_log.txt\"\n install_err_file = \"install_err_log.txt\"\n\n submit_out_file = \"submit_out_log.txt\"\n submit_err_file = \"submit_err_log.txt\"\n\n warnings_file = \"warnings_log.txt\"\n errors_file = \"errors_log.txt\"\n\n # Error Keywords\n error_keywords = [\"error\", \"traceback\", \"exception\"]\n\n # Templates to check for\n expected_templates = [\n \"run_bb_mpi.sl.template\",\n \"run_emod3d.sl.template\",\n \"run_hf_mpi.sl.template\",\n \"sim_im_calc.sl.template\",\n \"post_emod3d_merge_ts.sl.template\",\n ]\n\n def __init__(self, config_file: str):\n \"\"\"Constructor, reads input config.\"\"\"\n\n try:\n assert_frame_equal(pd.DataFrame([1]), pd.DataFrame([1]), atol=1e-03)\n except TypeError as e:\n print(\n \"Please ensure pandas is at least version 1.1.0. \"\n \"The command 'pip install -U pandas' should help you. \"\n \"If this still occurs please contact the software team.\"\n )\n exit(1)\n\n with open(config_file, \"r\") as f:\n self.config_dict = json.load(f)\n\n self.version = self.config_dict[self.cf_version_key]\n\n # Add tmp directory\n self.stage_dir = os.path.join(\n self.config_dict[self.cf_test_dir_key], \"tmp_{}\".format(const.timestamp)\n )\n\n self.im_bench_folder = os.path.join(\n self.config_dict[self.cf_bench_folder_key], self.bench_IM_csv_folder\n )\n self.timeout = self.config_dict[self.timeout_key] * 60\n\n self.warnings, self.errors = [], []\n self.fault_dirs, self.sim_dirs = [], []\n self.runs_dir = None\n\n self._sim_passed, self._sim_failed = set(), set()\n self._stop_on_error, self._test_restart = None, None\n\n self.canceled_running = []\n # Resources that need to be dealt with on close\n self._processes = []\n self._files = []\n\n def run(\n self,\n user: str,\n sleep_time: int = 10,\n stop_on_error: bool = True,\n stop_on_warning: bool = False,\n no_clean_up: bool = False,\n test_restart: bool = False,\n ):\n \"\"\"\n Runs the full automated workflow and checks that everything works as\n expected. Prints out a list of errors, if there are any.\n\n The test directory is deleted if there are no errors, unless no_clean_up\n is set.\n\n Parameters\n ----------\n user: str\n The username under which to run the tasks\n \"\"\"\n self._stop_on_error = stop_on_error\n self._test_restart = test_restart\n\n # Setup folder structure\n self.setup()\n\n # Run install script\n self.install()\n if self.warnings and stop_on_warning:\n print(\"Quitting due to warnings following warnings:\")\n self.print_warnings()\n return False\n\n # Run automated workflow\n if not self._run_auto(user, sleep_time=sleep_time):\n return False\n # Only check that everything is completed, when auto submit does not\n # exit early\n else:\n self.check_mgmt_db()\n\n if self.errors:\n print(\"The following errors occurred during the automated workflow:\")\n self.print_errors()\n else:\n print(\"It appears there were no errors during the automated workflow!\")\n if not no_clean_up:\n self.teardown()\n\n return True\n\n def print_warnings(self):\n with open(os.path.join(self.stage_dir, self.warnings_file), \"a\") as f:\n for warn in self.warnings:\n text = \"WARNING: {}, {}\".format(warn.location, warn.warning)\n print(text)\n f.write(text)\n\n def print_errors(self):\n with open(os.path.join(self.stage_dir, self.errors_file), \"a\") as f:\n for err in self.errors:\n text = \"ERROR: {}, {}\\n\".format(err.location, err.error)\n print(text)\n f.write(text)\n\n def setup(self):\n \"\"\"Setup for automatic workflow\n\n Change this to use the qcore simulation structure functions!!\n \"\"\"\n print(\"Running setup...\")\n print(\"Using directory {}\".format(self.stage_dir))\n\n # Create tmp dir\n os.mkdir(self.stage_dir)\n\n # Data\n data_dir = os.path.join(self.stage_dir, \"Data\")\n shutil.copytree(self.config_dict[self.cf_data_dir_key], data_dir)\n\n # Fault list\n shutil.copy(self.config_dict[self.cf_fault_list_key], self.stage_dir)\n\n # Create runs folder\n os.mkdir(os.path.join(self.stage_dir, \"Runs\"))\n\n # Mgmt queue\n os.mkdir(os.path.join(self.stage_dir, \"mgmt_db_queue\"))\n\n self.runs_dir = sim_struct.get_runs_dir(self.stage_dir)\n\n def install(self):\n \"\"\"Install the automated workflow\n\n Runs install bash script, saves output into log files in the\n staging directory. Also checks for error keywords in the output\n and saves warnings accordingly.\n \"\"\"\n script_path = os.path.join(\n os.path.dirname(os.path.abspath(__file__)),\n \"../scripts/cybershake/install_cybershake.py\",\n )\n cmd = \"python {} {} {} {} --seed {} --stat_file_path {}\".format(\n script_path,\n self.stage_dir,\n os.path.join(\n self.stage_dir,\n os.path.basename(self.config_dict[self.cf_fault_list_key]),\n ),\n self.version,\n self.config_dict[const.RootParams.seed.value],\n self.config_dict[\"stat_file\"],\n )\n cmd = (\n cmd + \" --extended_period\"\n if self.config_dict.get(\"extended_period\") is True\n else cmd\n )\n cmd = (\n cmd + \" --keep_dup_stations\"\n if self.config_dict.get(\"keep_dup_stations\") is True\n else cmd\n )\n\n print(\"Running install...\\nCmd: {}\".format(cmd))\n out_file = os.path.join(self.stage_dir, self.install_out_file)\n err_file = os.path.join(self.stage_dir, self.install_err_file)\n with open(out_file, \"w\") as out_f, open(err_file, \"w\") as err_f:\n exe(cmd, debug=False, stdout=out_f, stderr=err_f)\n\n # Check for errors\n # Get these straight from execution?\n output = open(out_file, \"r\").read()\n error = open(err_file, \"r\").read()\n if any(cur_str in output.lower() for cur_str in self.error_keywords):\n msg = \"There appears to be errors in the install. Error keyword found in stdout!\"\n print(msg)\n print(\"##### INSTALL OUTPUT #####\")\n print(output)\n print(\"##########################\")\n self.warnings.append(Warning(\"Install - Stdout\", msg))\n\n if any(cur_str in error.lower() for cur_str in self.error_keywords):\n msg = \"There appears to be errors in the install. Error keyword found in stderr!\"\n print(msg)\n print(\"##### INSTALL OUTPUT #####\")\n print(error)\n print(\"##########################\")\n self.errors.append(Error(\"Install - Stderr\", msg))\n\n self.fault_dirs, self.sim_dirs = get_sim_dirs(self.runs_dir)\n\n def _check_true(self, check: bool, location: str, error_msg: str):\n if not check:\n self.errors.append(Error(location, error_msg))\n\n def check_install(self):\n \"\"\"Checks that all required templates exists, along with the yaml params\"\"\"\n for sim_dir in self.sim_dirs:\n # Check sim_params.yaml are there\n self._check_true(\n \"sim_params.yaml\" in os.listdir(sim_dir),\n \"Install - Sim params\",\n \"Sim params file is missing in {}\".format(sim_dir),\n )\n\n # Check fault params\n for fault in self.fault_dirs:\n self._check_true(\n \"fault_params.yaml\" in os.listdir(fault),\n \"Install - Fault params\",\n \"Fault params are missing in {}\".format(fault),\n )\n\n # Check root params\n self._check_true(\n \"root_params.yaml\" in os.listdir(self.runs_dir),\n \"Install - root params\",\n \"Root params are missing in {}\".format(self.runs_dir),\n )\n\n def _run_auto(self, user: str, sleep_time: int = 10):\n \"\"\"\n Runs auto submit\n\n Parameters\n ----------\n user: str\n The username under which to run the tasks\n sleep_time: int\n Time (in seconds) between progress checks\n \"\"\"\n submit_cmd = \"python {} {} {} {} --sleep_time 2\".format(\n os.path.join(\n os.path.dirname(os.path.abspath(__file__)),\n \"../scripts/cybershake/run_cybershake.py\",\n ),\n self.stage_dir,\n user,\n os.path.join(\n os.path.dirname(os.path.abspath(__file__)),\n self.config_dict[\"wrapper_config\"],\n ),\n )\n\n # Different process types for which canceling/resume is tested\n proc_type_cancel = None\n if self.config_dict[self.test_checkpoint_key]:\n proc_type_cancel = [\n const.ProcessType.EMOD3D,\n const.ProcessType.HF,\n const.ProcessType.BB,\n ]\n\n def run_wrapper(command: str):\n p_submit = non_blocking_exe(\n command,\n debug=False,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n encoding=\"utf-8\",\n )\n self._processes.append(p_submit)\n p_submit_out_nbsr = NonBlockingStreamReader(p_submit.stdout)\n p_submit_err_nbsr = NonBlockingStreamReader(p_submit.stderr)\n\n # Create and open the log files\n out_submit_f = open(os.path.join(self.stage_dir, self.submit_out_file), \"w\")\n err_submit_f = open(os.path.join(self.stage_dir, self.submit_err_file), \"w\")\n self._files.extend((out_submit_f, err_submit_f))\n return (\n p_submit,\n [(out_submit_f, p_submit_out_nbsr), (err_submit_f, p_submit_err_nbsr)],\n )\n\n def restart_command(process: subprocess.Popen, command: str):\n print(\"Restarting command: {}\".format(command))\n process.send_signal(signal.SIGINT)\n process.wait(5)\n if process.poll() is None:\n raise RuntimeError(\"Process {} would not die\".format(process.args))\n return run_wrapper(command)\n\n def get_laps_till_restart():\n return nprdm.poisson(3)\n\n laps_till_restart = 5\n\n # Have to put this in a massive try block, to ensure that\n # the run_queue_and_auto_submit process is terminated on any errors.\n try:\n print(\"Starting cybershake wrapper...\")\n p_submit, outputs_to_check = run_wrapper(submit_cmd)\n\n # Monitor mgmt db\n print(\"Progress: \")\n start_time = time.time()\n while time.time() - start_time < self.timeout:\n if self._test_restart:\n laps_till_restart -= 1\n if laps_till_restart < 1:\n p_submit, outputs_to_check = restart_command(\n p_submit, submit_cmd\n )\n laps_till_restart = get_laps_till_restart()\n\n try:\n (\n total_count,\n comp_count,\n failed_count,\n ) = self.check_mgmt_db_progress()\n if not self.check_completed():\n return False\n except sql.OperationalError as ex:\n print(\n \"Operational error while accessing database. \"\n \"Retrying in {} seconds\\n{}\".format(sleep_time, ex)\n )\n time.sleep(sleep_time)\n continue\n\n print(\n \"Completed: {}, Failed: {}, Total: {}\".format(\n comp_count, failed_count, total_count\n )\n )\n\n # Get the log data\n for file, reader in outputs_to_check:\n lines = reader.readlines()\n if lines:\n file.writelines(lines)\n file.flush()\n\n if proc_type_cancel:\n proc_type_cancel = self.cancel_running(proc_type_cancel)\n\n if total_count == (comp_count + failed_count):\n break\n else:\n time.sleep(sleep_time)\n\n if time.time() - start_time >= self.timeout:\n print(\"The auto-submit timeout expired.\")\n self.errors.append(\n Error(\"Auto-submit timeout\", \"The auto-submit timeout expired.\")\n )\n return False\n # Still display the exception\n except Exception as ex:\n raise ex\n # Clean up\n finally:\n self.close()\n\n return True\n\n def close(self):\n \"\"\"Terminates any running processes and closes any open files\"\"\"\n for p in self._processes:\n if p is not None:\n p.terminate()\n for f in self._files:\n if f is not None:\n f.close()\n\n def cancel_running(self, proc_types: List[const.ProcessType]):\n \"\"\"Looks for any running task of the specified process types\n and attempts to cancel one of each.\n \"\"\"\n # Get all running jobs in the mgmt db\n db = MgmtDB(sim_struct.get_mgmt_db(self.stage_dir))\n entries = db.command_builder(\n allowed_tasks=proc_types, allowed_states=[const.Status.running]\n )\n\n # Cancel one for each process type\n for entry in entries:\n if entry.proc_type in proc_types:\n print(\n f\"Checkpoint testing: Cancelling job-id {entry.job_id} \"\n \"for {entry.run_name} and process type {entry.proc_type}\"\n )\n\n out, err = Scheduler.get_scheduler().cancel_job(entry.job_id)\n\n print(\"Scancel out: \", out, err)\n if \"error\" not in out.lower() and \"error\" not in err.lower():\n self.canceled_running.append(str(entry.job_id))\n proc_types.remove(entry.proc_type)\n print(\"Cancelled job-id {}\".format(entry.job_id))\n\n return proc_types\n\n def check_mgmt_db(self):\n \"\"\"Create errors for all entries in management db that did not complete\"\"\"\n base_proc_types = [\n const.ProcessType.EMOD3D,\n const.ProcessType.HF,\n const.ProcessType.BB,\n const.ProcessType.IM_calculation,\n ]\n db = MgmtDB(sim_struct.get_mgmt_db(self.stage_dir))\n\n entries = db.command_builder(\n allowed_tasks=base_proc_types,\n allowed_states=[const.Status.unknown, const.Status.failed],\n blocked_ids=self.canceled_running,\n )\n\n for entry in entries:\n self.errors.append(\n Error(\n \"Slurm task\",\n \"Run {} did not complete task {} \"\n \"(Status {}, JobId {}\".format(\n entry.run_name,\n const.ProcessType(entry.proc_type),\n const.Status(entry.status),\n entry.job_id,\n ),\n )\n )\n\n def check_sim_result(self, sim_dir: str):\n \"\"\"Checks that all the LF, HF and BB binaries are there and that the\n IM values match up with the benchmark IMs\n \"\"\"\n result = True\n\n # Check HF binary\n hf_bin = sim_struct.get_hf_bin_path(sim_dir)\n if not os.path.isfile(hf_bin):\n self.errors.append(\n Error(\"HF - Binary\", \"The HF binary is not at {}\".format(hf_bin))\n )\n result = False\n\n # Check BB binary\n bb_bin = sim_struct.get_bb_bin_path(sim_dir)\n if not os.path.isfile(bb_bin):\n self.errors.append(\n Error(\"BB - Binary\", \"The BB binary is not at {}\".format(hf_bin))\n )\n result = False\n\n # Check IM\n im_csv = sim_struct.get_IM_csv(sim_dir)\n if not os.path.isfile(im_csv):\n self.errors.append(\n Error(\n \"IM_calc - CSV\", \"The IM_calc csv file is not at {}\".format(im_csv)\n )\n )\n result = False\n else:\n bench_csv = os.path.join(\n self.im_bench_folder,\n \"{}.csv\".format(os.path.basename(sim_dir).split(\".\")[0]),\n )\n bench_df = pd.read_csv(bench_csv)\n cur_df = pd.read_csv(im_csv)\n\n try:\n assert_frame_equal(cur_df, bench_df, atol=1e-04, rtol=1e-03)\n except AssertionError:\n self.errors.append(\n Error(\n \"IM - Values\",\n \"The IMs for {} are not equal to the benchmark {}\".format(\n im_csv, bench_csv\n ),\n )\n )\n result = False\n\n return result\n\n def check_mgmt_db_progress(self):\n \"\"\"Checks auto submit progress in the management db\"\"\"\n base_proc_types = [\n const.ProcessType.EMOD3D,\n const.ProcessType.HF,\n const.ProcessType.BB,\n const.ProcessType.IM_calculation,\n ]\n db = MgmtDB(sim_struct.get_mgmt_db(self.stage_dir))\n\n total_count = len(db.command_builder(allowed_tasks=base_proc_types))\n\n comp_count = len(\n db.command_builder(\n allowed_tasks=base_proc_types, allowed_states=[const.Status.completed]\n )\n )\n\n failed_count = len(\n db.command_builder(\n allowed_tasks=base_proc_types,\n allowed_states=[const.Status.failed, const.Status.unknown],\n )\n )\n\n return total_count, comp_count, failed_count\n\n def check_completed(self):\n \"\"\"Checks all simulations that have completed\"\"\"\n base_proc_types = [const.ProcessType.IM_calculation]\n db = MgmtDB(sim_struct.get_mgmt_db(self.stage_dir))\n entries = db.command_builder(\n allowed_tasks=base_proc_types, allowed_states=[const.Status.completed]\n )\n\n completed_sims = [sim_t.run_name for sim_t in entries]\n\n # Only check the ones that haven't been checked already\n completed_new = set(completed_sims) - (self._sim_passed | self._sim_failed)\n\n for sim in completed_new:\n result = self.check_sim_result(\n os.path.join(\n self.runs_dir, sim_struct.get_fault_from_realisation(sim), sim\n )\n )\n\n if not result:\n self._sim_failed.add(sim)\n\n if self._stop_on_error:\n print(\"Quitting as the following errors occured: \")\n self.print_errors()\n return False\n else:\n print(\"The following error occured for simulation {}:\".format(sim))\n print(\n \"ERROR: {}, {}\\n\".format(\n self.errors[-1].location, self.errors[-1].error\n )\n )\n\n else:\n self._sim_passed.add(sim)\n\n print(\n \"Passed/Failed/Total simulations: {}/{}/{}, \".format(\n len(self._sim_passed), len(self._sim_failed), len(self.sim_dirs)\n )\n )\n\n return True\n\n def teardown(self):\n \"\"\"Remove all files created during the end-to-end test\"\"\"\n print(\"Deleting everything under {}\".format(self.stage_dir))\n shutil.rmtree(self.stage_dir)\n\n\nclass NonBlockingStreamReader:\n \"\"\"A non-blocking stream reader.\n\n Based on http://eyalarubas.com/python-subproc-nonblock.html\n \"\"\"\n\n def __init__(self, stream):\n \"\"\"\n stream: the stream to read from.\n Usually a process' stdout or stderr.\n \"\"\"\n\n self._s = stream\n self._q = Queue()\n\n def _populate_queue(stream, queue):\n \"\"\"\n Collect lines from 'stream' and put them in 'queue'.\n \"\"\"\n\n while True:\n line = stream.readline()\n if line:\n queue.put(line)\n else:\n print(\"Stream has been closed.\")\n sys.exit()\n\n self._t = Thread(target=_populate_queue, args=(self._s, self._q))\n self._t.daemon = True\n self._t.start() # start collecting lines from the stream\n\n def readlines(self):\n \"\"\"Reads the lines from the queue, returns None if the queue is empty\"\"\"\n lines = []\n cur_line = \"\"\n while cur_line is not None:\n try:\n cur_line = self._q.get(block=False)\n except Empty:\n cur_line = None\n\n if cur_line is not None:\n lines.append(cur_line)\n\n if lines:\n return lines\n return None\n",
"import glob\nfrom logging import Logger\nimport numpy as np\nfrom os import path\nimport sys\n\nfrom qcore import constants as const\nfrom qcore import utils\nfrom qcore.formats import load_station_file\nfrom qcore.qclogging import get_basic_logger\nfrom qcore import simulation_structure as sim_struct\nfrom qcore.timeseries import get_observed_stations, BBSeis\nfrom qcore.config import qconfig\n\nfrom estimation.estimate_wct import est_IM_chours_single, get_wct, CH_SAFETY_FACTOR\nfrom shared_workflow.platform_config import (\n platform_config,\n get_platform_node_requirements,\n get_target_machine,\n)\nfrom shared_workflow.shared import set_wct\nfrom shared_workflow.shared_automated_workflow import submit_script_to_scheduler\nfrom shared_workflow.shared_template import write_sl_script\n\n\ndef submit_im_calc_slurm(\n sim_dir: str,\n write_dir: str = None,\n simple_out: bool = True,\n adv_ims: bool = False,\n target_machine: str = get_target_machine(const.ProcessType.IM_calculation).name,\n logger: Logger = get_basic_logger(),\n):\n \"\"\"Creates the IM calc slurm scrip, also submits if specified\n\n The options_dict is populated by the DEFAULT_OPTIONS, values can be changed by\n passing in a dict containing the entries that require changing. Merges the\n two dictionaries, the passed in one has higher priority.\n \"\"\"\n # Load the yaml params\n params = utils.load_sim_params(\n sim_struct.get_sim_params_yaml_path(sim_dir), load_vm=True\n )\n realisation_name = params[const.SimParams.run_name.value]\n fault_name = sim_struct.get_fault_from_realisation(realisation_name)\n station_count = len(load_station_file(params[\"FD_STATLIST\"]).index)\n\n header_options = {\n const.SlHdrOptConsts.description.value: \"Calculates intensity measures.\",\n const.SlHdrOptConsts.memory.value: \"2G\",\n const.SlHdrOptConsts.version.value: \"slurm\",\n \"exe_time\": const.timestamp,\n const.SlHdrOptConsts.additional.value: \"#SBATCH --hint=nomultithread\"\n if platform_config[const.PLATFORM_CONFIG.SCHEDULER.name] == \"slurm\"\n else [\"\"],\n }\n\n body_options = {\n const.SlBodyOptConsts.component.value: \"\",\n \"realisation_name\": realisation_name,\n const.SlBodyOptConsts.fault_name.value: fault_name,\n \"np\": platform_config[const.PLATFORM_CONFIG.IM_CALC_DEFAULT_N_CORES.name],\n \"sim_IM_calc_dir\": sim_struct.get_im_calc_dir(sim_dir),\n \"output_csv\": sim_struct.get_IM_csv(sim_dir),\n \"output_info\": sim_struct.get_IM_info(sim_dir),\n \"models\": \"\",\n const.SlBodyOptConsts.mgmt_db.value: \"\",\n \"n_components\": \"\",\n \"match_obs_stations\": False,\n \"station_file\": \"$(cat $fd_name | awk '{print $1}')\",\n }\n\n command_options = {\n const.SlBodyOptConsts.sim_dir.value: sim_dir,\n const.SlBodyOptConsts.component.value: \"\",\n const.SlBodyOptConsts.sim_name.value: realisation_name,\n const.SlBodyOptConsts.fault_name.value: fault_name,\n const.SlBodyOptConsts.n_procs.value: platform_config[\n const.PLATFORM_CONFIG.IM_CALC_DEFAULT_N_CORES.name\n ],\n const.SlBodyOptConsts.extended.value: \"\",\n const.SlBodyOptConsts.simple_out.value: \"\",\n const.SlBodyOptConsts.advanced_IM.value: \"\",\n \"pSA_periods\": \"\",\n }\n\n # Convert option settings to values\n if write_dir is None:\n write_dir = sim_dir\n\n # Simple vs adv im settings\n if adv_ims:\n # Common values\n proc_type = const.ProcessType.advanced_IM\n sl_template = \"adv_im_calc.sl.template\"\n script_prefix = \"adv_im_calc\"\n\n body_options[\"models\"] = \" \".join(\n params[const.SlBodyOptConsts.advanced_IM.value][\"models\"]\n )\n command_options[\n const.SlBodyOptConsts.advanced_IM.value\n ] = f\"-a {body_options['models']} --OpenSees {qconfig['OpenSees']} \"\n\n # create temporary station list if \"match_obs_stations\" is directory\n if path.isdir(\n str(params[const.SlBodyOptConsts.advanced_IM.value][\"match_obs_stations\"])\n ):\n logger.debug(\n \"match_obs_station specified: {params[const.SlBodyOptConsts.advanced_IM.value]['match_obs_stations']}\"\n )\n # retreived station list from observed/fault(eventname)/Vol*/data/accBB/station.\n obs_accBB_dir_glob = path.join(\n params[const.SlBodyOptConsts.advanced_IM.value][\"match_obs_stations\"],\n f\"{fault_name}/*/*/accBB\",\n )\n obs_accBB_dir = glob.glob(obs_accBB_dir_glob)\n if len(obs_accBB_dir) > 1:\n logger.error(\n \"got more than one folder globbed. please double check the path to the match_obs_stations is correct.\"\n )\n sys.exit()\n station_names_observed = set(get_observed_stations(obs_accBB_dir[0]))\n station_names_simulated = set(\n BBSeis(f\"{sim_dir}/BB/Acc/BB.bin\").stations.name\n )\n station_names_tmp = list(station_names_observed & station_names_simulated)\n # write to a tmp file\n tmp_station_file = path.join(sim_dir, \"tmp_station_file\")\n with open(tmp_station_file, \"w\") as f:\n for station in station_names_tmp:\n f.write(f\"{station} \")\n body_options[\"station_file\"] = f\"$(cat {tmp_station_file})\"\n command_options[const.SlBodyOptConsts.advanced_IM.value] = (\n command_options[const.SlBodyOptConsts.advanced_IM.value]\n + f\"--station_names `cat {tmp_station_file}`\"\n )\n # header_options[const.SlHdrOptConsts.n_tasks.value] = body_options[\"np\"] = qconfig[\"cores_per_node\"]\n\n # Time for one station to run in hours\n # This should be a machine property. Or take the largest across all machines used\n time_for_one_station = 0.5\n est_run_time = (\n np.ceil(station_count / qconfig[\"cores_per_node\"])\n * 2\n * time_for_one_station\n )\n\n else:\n proc_type = const.ProcessType.IM_calculation\n sl_template = \"sim_im_calc.sl.template\"\n script_prefix = \"sim_im_calc\"\n\n if simple_out:\n command_options[const.SlBodyOptConsts.simple_out.value] = \"-s\"\n\n if params[\"ims\"][const.RootParams.extended_period.name]:\n command_options[const.SlBodyOptConsts.extended.value] = \"-e\"\n period_count = len(\n np.unique(np.append(params[\"ims\"][\"pSA_periods\"], const.EXT_PERIOD))\n )\n else:\n period_count = len(params[\"ims\"][\"pSA_periods\"])\n\n if \"pSA_periods\" in params[\"ims\"]:\n command_options[\n \"pSA_periods\"\n ] = f\"-p {' '.join(str(p) for p in params['ims']['pSA_periods'])}\"\n\n comps_to_store = params[\"ims\"][const.SlBodyOptConsts.component.value]\n command_options[const.SlBodyOptConsts.component.value] = \"-c \" + \" \".join(\n comps_to_store\n )\n body_options[\"n_components\"] = len(comps_to_store)\n\n # Get wall clock estimation\n logger.info(\n \"Running wall clock estimation for IM sim for realisation {}\".format(\n realisation_name\n )\n )\n _, est_run_time = est_IM_chours_single(\n station_count,\n int(float(params[\"sim_duration\"]) / float(params[\"dt\"])),\n comps_to_store,\n period_count,\n body_options[\"np\"],\n )\n\n # Header options requiring upstream settings\n # special treatment for im_calc, as the scaling feature in estimation is not suitable\n # cap the wct, otherwise cannot submit\n est_run_time = min(est_run_time * CH_SAFETY_FACTOR, qconfig[\"MAX_JOB_WCT\"])\n # set ch_safety_factor=1 as we scale it already.\n header_options[\"wallclock_limit\"] = get_wct(est_run_time, ch_safety_factor=1)\n logger.debug(\"Using WCT for IM_calc: {header_options['wallclock_limit']}\")\n header_options[\"job_name\"] = \"{}_{}\".format(proc_type.str_value, fault_name)\n header_options[\"platform_specific_args\"] = get_platform_node_requirements(\n body_options[\"np\"]\n )\n\n script_file_path = write_sl_script(\n write_dir,\n sim_dir,\n proc_type,\n script_prefix,\n header_options,\n (sl_template, body_options),\n command_options,\n )\n\n submit_script_to_scheduler(\n script_file_path,\n proc_type.value,\n sim_struct.get_mgmt_db_queue(params[\"mgmt_db_location\"]),\n sim_dir,\n realisation_name,\n target_machine=target_machine,\n logger=logger,\n )\n"
] |
[
[
"numpy.count_nonzero",
"numpy.trim_zeros",
"numpy.min"
],
[
"numpy.log",
"numpy.arange",
"numpy.int32",
"numpy.float32",
"numpy.where"
],
[
"numpy.random.poisson",
"pandas.read_csv",
"pandas.testing.assert_frame_equal",
"pandas.DataFrame"
],
[
"numpy.ceil",
"numpy.append"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kcyu1993/ML_course_kyu
|
[
"99671281bcf83cbcd75d1c57772bdfdf79d28aff",
"99671281bcf83cbcd75d1c57772bdfdf79d28aff",
"99671281bcf83cbcd75d1c57772bdfdf79d28aff"
] |
[
"labs/ex04/template/plots.py",
"labs/ex04/template/ridge_regression.py",
"projects/project1/scripts/plots.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"a function of ploting figures.\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef cross_validation_visualization(lambds, mse_tr, mse_te):\n \"\"\"visualization the curves of mse_tr and mse_te.\"\"\"\n plt.semilogx(lambds, mse_tr, marker=\".\", color='b', label='train error')\n plt.semilogx(lambds, mse_te, marker=\".\", color='r', label='test error')\n plt.xlabel(\"lambda\")\n plt.ylabel(\"rmse\")\n plt.title(\"cross validation\")\n plt.legend(loc=2)\n plt.grid(True)\n plt.savefig(\"cross_validation\")\n\n\ndef cross_validation_visualization_for_degree(degrees, mse_tr, mse_te):\n \"\"\"visualization the curves of mse_tr and mse_te.\"\"\"\n plt.plot(degrees, mse_tr, marker=\".\", color='b', label='train error')\n plt.plot(degrees, mse_te, marker=\".\", color='r', label='test error')\n plt.xlabel(\"degree\")\n plt.ylabel(\"rmse\")\n plt.title(\"cross validation\")\n plt.legend(loc=2)\n plt.grid(True)\n plt.savefig(\"cross_validation\")\n\ndef bias_variance_decomposition_visualization(degrees, rmse_tr, rmse_te):\n \"\"\"visualize the bias variance decomposition.\"\"\"\n rmse_tr_mean = np.expand_dims(np.mean(rmse_tr, axis=0), axis=0)\n rmse_te_mean = np.expand_dims(np.mean(rmse_te, axis=0), axis=0)\n plt.plot(\n degrees,\n rmse_tr.T,\n 'b',\n linestyle=\"-\",\n color=([0.7, 0.7, 1]),\n label='train',\n linewidth=0.3)\n plt.plot(\n degrees,\n rmse_te.T,\n 'r',\n linestyle=\"-\",\n color=[1, 0.7, 0.7],\n label='test',\n linewidth=0.3)\n plt.plot(\n degrees,\n rmse_tr_mean.T,\n 'b',\n linestyle=\"-\",\n label='train',\n linewidth=3)\n plt.plot(\n degrees,\n rmse_te_mean.T,\n 'r',\n linestyle=\"-\",\n label='test',\n linewidth=3)\n plt.ylim(0.2, 0.7)\n plt.xlabel(\"degree\")\n plt.ylabel(\"error\")\n plt.title(\"Bias-Variance Decomposition\")\n plt.savefig(\"bias_variance\")\n",
"# -*- coding: utf-8 -*-\n\"\"\"Exercise 3.\n\nRidge Regression\n\"\"\"\n\nimport numpy as np\nfrom .costs import compute_mse\n\ndef ridge_regression(y, tx, lamb):\n \"\"\"implement ridge regression.\"\"\"\n # ***************************************************\n # INSERT YOUR CODE HERE\n # ridge regression: TODO\n # ***************************************************\n # Hes = tx.T * tx + 2*N*lambda * I_m\n G = np.eye(tx.shape[1])\n G[0, 0] = 0\n hes = np.dot(tx.T, tx) + lamb * G\n weight = np.linalg.solve(hes, np.dot(tx.T, y))\n mse = compute_mse(y, tx, weight)\n return mse, weight",
"# -*- coding: utf-8 -*-\n\"\"\"\nThis python file contains various of functions to generate plots.\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nfrom helpers import get_plot_path\n\n\"\"\" Lab 3 \"\"\"\n\n\ndef plot_fitted_curve(y, x, weights, ax):\n \"\"\"plot the fitted curve. x, weights should align dimension \"\"\"\n ax.scatter(x, y, color='b', s=12, facecolors='none', edgecolors='r')\n xvals = np.arange(min(x) - 0.1, max(x) + 0.1, 0.1)\n f = x.dot(weights)\n ax.plot(xvals, f)\n ax.set_xlabel(\"x\")\n ax.set_ylabel(\"y\")\n ax.set_title(\"Fitted curve for x y\")\n\n\ndef plot_train_test(train_errors, test_errors, names=['', ''], xlabel='', ylabel='',\n lambdas=None, filename=''):\n \"\"\"\n train_errors, test_errors and lambas should be list (of the same size) the respective train error and test error for a given lambda,\n * lambda[0] = 1\n * train_errors[0] = RMSE of a ridge regression on the train set\n * test_errors[0] = RMSE of the parameter found by ridge regression applied on the test set\n\n degree is just used for the title of the plot.\n \"\"\"\n plt.semilogx(lambdas, train_errors, color='b', marker='*', label=names[0])\n plt.semilogx(lambdas, test_errors, color='r', marker='*', label=names[1])\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.title(filename)\n leg = plt.legend(loc=1, shadow=True)\n leg.draw_frame(False)\n plt.show()\n plt.savefig(get_plot_path(\"train_test \" + filename))\n\n\n\"\"\" Lab 4 \"\"\"\n\n\ndef cross_validation_visualization(params, mse_tr, mse_te, params_name='', title='', error_name=''):\n \"\"\"visualization the curves of mse_tr and mse_te.\"\"\"\n plt.semilogx(params, mse_tr, marker=\".\", color='b', label='train error')\n plt.semilogx(params, mse_te, marker=\".\", color='r', label='test error')\n plt.xlabel(\"Parameters: \" + params_name)\n plt.ylabel(\"Error: \" + error_name)\n plt.title(\"cross validation\" + title)\n plt.legend(loc=2)\n plt.grid(True)\n plt.savefig(get_plot_path(\"cross_validation_\" + title))\n plt.show()\n\n\ndef cross_validation_visualization_due(params, mse_tr, mse_te, param2, tr2, te2, params_name='', prname2='', title='',\n error_name=''):\n \"\"\"visualization the curves of mse_tr and mse_te.\"\"\"\n plt.semilogx(params, mse_tr, marker=\".\", color='r', label='train error ' + params_name, linestyle='solid')\n plt.semilogx(params, mse_te, marker=\".\", color='r', label='test error ' + params_name, linestyle='dashed')\n plt.semilogx(param2, tr2, marker=\".\", color='b', label='train error ' + prname2, linestyle='solid')\n plt.semilogx(param2, te2, marker=\".\", color='b', label='test error ' + prname2, linestyle='dashed')\n plt.xlabel(\"Parameters: \" + params_name + \" \" + prname2)\n plt.ylabel(\"Error: \" + error_name)\n plt.title(\"cross validation \" + title)\n plt.legend(loc=2)\n plt.grid(True)\n plt.savefig(get_plot_path(\"cross_validation_\" + title))\n plt.show()\n\n\n\ndef bias_variance_decomposition_visualization(models, rmse_tr, rmse_te, model_names=[]):\n \"\"\"visualize the bias variance decomposition.\"\"\"\n rmse_tr_mean = np.expand_dims(np.mean(rmse_tr, axis=0), axis=0)\n rmse_te_mean = np.expand_dims(np.mean(rmse_te, axis=0), axis=0)\n degrees = np.array(range(len(models)))\n plt.plot(\n degrees,\n rmse_tr.T,\n 'b',\n linestyle=\"-\",\n color=([0.7, 0.7, 1]),\n label='train',\n linewidth=0.3)\n plt.plot(\n degrees,\n rmse_te.T,\n 'r',\n linestyle=\"-\",\n color=[1, 0.7, 0.7],\n label='test',\n linewidth=0.3)\n plt.plot(\n degrees,\n rmse_tr_mean.T,\n 'b',\n linestyle=\"-\",\n label='train',\n linewidth=3)\n plt.plot(\n degrees,\n rmse_te_mean.T,\n 'r',\n linestyle=\"-\",\n label='test',\n linewidth=3)\n # plt.ylim(0.2, 0.7)\n plt.xlabel(\"degree\")\n plt.ylabel(\"error\")\n plt.title(\"Bias-Variance Decomposition\")\n plt.savefig(get_plot_path(\"bias_variance\"))\n plt.show()\n\n\ndef pca_plot(headers, pc1, pc2, title=''):\n \"\"\" Plot pca accordingly \"\"\"\n fig, ax = plt.subplots()\n ax.scatter(pc1, pc2)\n for i, name in enumerate(headers[2:]):\n ax.annotate(name, (pc1[i], pc2[i]))\n\n ax.set_xlabel(\"PC 1\")\n ax.set_ylabel(\"PC 2\")\n ax.set_title(\"PCA plot for data\")\n fig.show()\n fig.savefig(get_plot_path(\"pca_plot \" + title))\n\n\ndef pca_plot_general(headers, pcs, pcs2=None, index=(0, 1), title='', color=['b', 'r'], print_name=False):\n pc1 = pcs[index[0]]\n pc2 = pcs[index[1]]\n fig, ax = plt.subplots()\n sct = ax.scatter(pc1, pc2, color=color[0])\n for i, name in enumerate(headers[2:]):\n if not print_name:\n ax.annotate('{}'.format(i - 2), (pc1[i], pc2[i]))\n else:\n ax.annotate('{}-'.format(i - 2) + name, (pc1[i], pc2[i]))\n\n if pcs2 is not None:\n fig.hold()\n pc1 = pcs2[index[0]]\n pc2 = pcs2[index[1]]\n sct2 = ax.scatter(pcs2[index[0]], pcs2[index[1]], color=color[1])\n for i, name in enumerate(headers[2:]):\n if not print_name:\n ax.annotate('{}'.format(i - 2), (pc1[i], pc2[i]))\n else:\n ax.annotate('{}-'.format(i - 2) + name, (pc1[i], pc2[i]))\n fig.legend((sct, sct2), ('train', 'test'))\n\n ax.set_xlabel(\"PC {}\".format(index[0]))\n ax.set_ylabel(\"PC {}\".format(index[1]))\n ax.set_title(\"PCA plot for data\")\n fig.show()\n fig.savefig(get_plot_path(\"pca_plot \" + title))\n\n\ndef histogram(label, data, headers=None, colors=['b', 'r'], print_name=True, transform=None, filename='Default.plt',\n outlier=False):\n \"\"\"\n Build up histogram regarding to labels, via each dimensions.\n Stored in the path: plots/histogram\n :param ids: index\n :param label: y\n :param data: data matrix\n :param headers: headers accordingly\n :param print_name: print name on the histogram\n :return:\n \"\"\"\n hist_path = get_plot_path() + '/histogram/'\n # Generate positive and negative index\n negative_index = np.where(label < 0)[0]\n positive_index = np.where(label > 0)[0]\n nega_data = data[negative_index, :]\n posi_data = data[positive_index, :]\n if transform is None:\n transform = [lambda x: x, lambda x: np.log(x + 0.01 - np.min(x)), lambda x: np.sqrt(np.abs(x)),\n lambda x: np.power(x, 2)]\n trans_labels = ['linear', 'log', 'sqrt|abs|', 'power']\n # Plot according to each dimensions\n headers = headers[2:]\n # Hard coded\n gs = gridspec.GridSpec(2, 2)\n assert len(headers) == len(data[0])\n for index, header in enumerate(headers):\n # fig, axs = plt.subplots(1, len(transform))\n for f_ind, f_trans in enumerate(transform):\n ax = plt.subplot(gs[int(f_ind / 2), f_ind % 2])\n ax.set_aspect('auto')\n if outlier:\n ax.hist(f_trans(nega_data[:, index]).T, bins=100, color=colors[0], alpha=0.5)\n else:\n # nega_ind = np.where(nega_data[:, index] != -999.0)\n ax.hist(f_trans(nega_data[np.where(nega_data[:, index] != -999.0)[0], index]).T,\n bins=100, color=colors[0], alpha=0.5)\n ax.hold(True)\n if outlier:\n ax.hist(f_trans(posi_data[:, index]).T, bins=100, color=colors[1], alpha=0.8)\n else:\n ax.hist(f_trans(posi_data[np.where(posi_data[:, index] != -999.0)[0], index]).T,\n bins=100, color=colors[1], alpha=0.8)\n if print_name:\n ax.set_xlabel(\"{}({})\".format(trans_labels[f_ind], header))\n else:\n ax.set_xlabel(trans_labels[f_ind])\n ax.hold(False)\n plt.savefig(hist_path + \"{}-{}_{}.png\".format(filename, index, header))\n plt.close()\n"
] |
[
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.title",
"matplotlib.pyplot.semilogx",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"numpy.mean",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.ylabel"
],
[
"numpy.dot",
"numpy.eye"
],
[
"matplotlib.pyplot.legend",
"numpy.abs",
"matplotlib.pyplot.title",
"matplotlib.pyplot.semilogx",
"numpy.power",
"numpy.min",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.plot",
"numpy.mean",
"matplotlib.gridspec.GridSpec",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.close",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"numpy.where",
"matplotlib.pyplot.ylabel"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mfrichtl/pycalphad
|
[
"a06715a27861cf801d69650468bb683956928db4"
] |
[
"pycalphad/tests/test_codegen.py"
] |
[
"\"\"\"\nTests for code generation from SymPy/SymEngine objects.\n\"\"\"\n\nimport pickle\nimport pytest\nimport numpy as np\nfrom symengine.lib.symengine_wrapper import LambdaDouble, LLVMDouble\nfrom symengine import zoo\nfrom pycalphad import Model, variables as v\nfrom pycalphad.codegen.callables import build_phase_records\nfrom pycalphad.codegen.sympydiff_utils import build_functions, build_constraint_functions\nfrom pycalphad.tests.fixtures import select_database, load_database\n\n\n@select_database(\"alnipt.tdb\")\ndef test_build_functions_options(load_database):\n \"\"\"The correct SymEngine backend can be chosen for build_functions\"\"\"\n dbf = load_database()\n mod = Model(dbf, ['AL'], 'LIQUID')\n int_cons = mod.get_internal_constraints()\n\n backend = 'lambda'\n fs_lambda = build_functions(mod.GM, mod.GM.free_symbols,\n include_obj=True, func_options={'backend': backend},\n include_grad=True, grad_options={'backend': backend},\n include_hess=True, hess_options={'backend': backend})\n assert isinstance(fs_lambda.func, LambdaDouble)\n assert isinstance(fs_lambda.grad, LambdaDouble)\n assert isinstance(fs_lambda.hess, LambdaDouble)\n\n cfs_lambda = build_constraint_functions(mod.GM.free_symbols, int_cons,\n func_options={'backend': backend},\n jac_options={'backend': backend},\n hess_options={'backend': backend})\n assert isinstance(cfs_lambda.cons_func, LambdaDouble)\n assert isinstance(cfs_lambda.cons_jac, LambdaDouble)\n assert isinstance(cfs_lambda.cons_hess, LambdaDouble)\n\n backend = 'llvm'\n fs_llvm = build_functions(mod.GM, mod.GM.free_symbols,\n include_obj=True, func_options={'backend': backend},\n include_grad=True, grad_options={'backend': backend},\n include_hess=True, hess_options={'backend': backend})\n print(fs_llvm.func)\n print(fs_lambda.func)\n assert isinstance(fs_llvm.func, LLVMDouble)\n assert isinstance(fs_llvm.grad, LLVMDouble)\n assert isinstance(fs_llvm.hess, LLVMDouble)\n\n cfs_llvm = build_constraint_functions(mod.GM.free_symbols, int_cons,\n func_options={'backend': backend},\n jac_options={'backend': backend},\n hess_options={'backend': backend})\n assert isinstance(cfs_llvm.cons_func, LLVMDouble)\n assert isinstance(cfs_llvm.cons_jac, LLVMDouble)\n assert isinstance(cfs_llvm.cons_hess, LLVMDouble)\n\n\n@select_database(\"alnipt.tdb\")\ndef test_phase_records_are_picklable(load_database):\n dbf = load_database()\n dof = np.array([300, 1.0])\n\n mod = Model(dbf, ['AL'], 'LIQUID')\n prxs = build_phase_records(dbf, [v.Species('AL')], ['LIQUID'], [v.T], {'LIQUID': mod}, build_gradients=True, build_hessians=True)\n prx_liquid = prxs['LIQUID']\n\n out = np.array([0.0])\n prx_liquid.obj(out, dof)\n\n prx_loaded = pickle.loads(pickle.dumps(prx_liquid))\n out_unpickled = np.array([0.0])\n prx_loaded.obj(out_unpickled, dof)\n\n assert np.isclose(out_unpickled[0], -1037.653911)\n assert np.all(out == out_unpickled)\n\n\[email protected]\n@select_database(\"cfe_broshe.tdb\")\ndef test_complex_infinity_can_build_callables_successfully(load_database):\n \"\"\"Test that functions that containing complex infinity can be built with codegen.\"\"\"\n dbf = load_database()\n mod = Model(dbf, ['C'], 'DIAMOND_A4')\n mod_vars = [v.N, v.P, v.T] + mod.site_fractions\n\n # Test builds functions only, since functions takes about 1 second to run.\n # Both lambda and llvm backends take a few seconds to build the derivatives\n # and are probably unnecessary to test.\n # XXX: SymEngine does not produce a zoo for this case\n assert zoo in list(mod.GM.atoms())\n build_functions(mod.GM, mod_vars, include_obj=True, include_grad=False, include_hess=False)\n\n int_cons = mod.get_internal_constraints()\n build_constraint_functions(mod_vars, int_cons)\n"
] |
[
[
"numpy.all",
"numpy.array",
"numpy.isclose"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
simonjayhawkins/pandas
|
[
"9f571c58d7796dac8fd1aa2301cf4aa30ad7143a",
"9f571c58d7796dac8fd1aa2301cf4aa30ad7143a"
] |
[
"bisect/38714.py",
"bisect/44900.py"
] |
[
"import pandas as pd\nimport pandas.testing as tm\n\nprint(pd.__version__)\n\ndf = pd.DataFrame({\"a\": [1] * 2188})\n\np = \"test.csv.zip\" # replace with available path\ndf.to_csv(p)\nresult = pd.read_csv(p)\nprint(result)\n\ntm.assert_frame_equal(result[[\"a\"]], df)\n",
"# BUG: Using pd.concat(axis=\"columns\") on differently sized MultiIndexed\n# DataFrames with a datetime index level containing exclusively NaT values\n# causes the level in the returned DataFrame to be a float instead of a datetime\n# #44900\n\nimport numpy as np\n\nimport pandas as pd\n\nprint(pd.__version__)\ndf_a = pd.DataFrame({\"a\": range(5), \"idx1\": range(5), \"idx2\": [pd.NaT] * 5}).set_index(\n [\"idx1\", \"idx2\"]\n)\ndf_b = pd.DataFrame({\"b\": range(6), \"idx1\": range(6), \"idx2\": [pd.NaT] * 6}).set_index(\n [\"idx1\", \"idx2\"]\n)\nresult = pd.concat([df_a, df_b], axis=\"columns\")\nprint(result)\n\nexpected = pd.DataFrame(\n {\n \"a\": list(range(5)) + [np.nan],\n \"b\": range(6),\n \"idx1\": range(6),\n \"idx2\": [pd.NaT] * 6,\n }\n).set_index([\"idx1\", \"idx2\"])\nexpected\n\npd.testing.assert_frame_equal(result, expected)\n"
] |
[
[
"pandas.read_csv",
"pandas.testing.assert_frame_equal",
"pandas.DataFrame"
],
[
"pandas.concat",
"pandas.testing.assert_frame_equal"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
ZhengKeli/PositronSpector
|
[
"be0281fe50fe634183b6f239f03b7140c1dc0b7f",
"be0281fe50fe634183b6f239f03b7140c1dc0b7f"
] |
[
"src/dbspy/core/analyze/curve/_curve.py",
"src/dbspy/core/utils/variance.py"
] |
[
"import numpy as np\n\nfrom dbspy.core import base\nfrom dbspy.core.analyze import _analyze as analyze\nfrom dbspy.core.utils.variance import add_var, minus_var, divide_var, sum_var\n\n\n# define\n\nclass Conf(analyze.Conf):\n fold_modes = ('fold', 'none', 'right', 'left')\n compare_modes = ('ratio', 'difference')\n \n def __init__(self, fold_mode: str = 'fold', compare_mode: str = 'ratio', control_index=0,\n base_indices=None, comb_indices=None):\n self.fold_mode = fold_mode\n self.compare_mode = compare_mode\n self.control_index = control_index\n self.base_indices = base_indices\n self.comb_indices = comb_indices\n \n @staticmethod\n def create_process(cluster_block):\n return Process(cluster_block)\n\n\nclass Process(base.ElementProcess):\n def __init__(self, cluster_block):\n super().__init__(process_func, Conf(), cluster_block)\n\n\ndef process_func(sp_result_list, conf: Conf):\n sp_list = tuple(np.array(sp) for sp, _ in sp_result_list)\n x, yv_list, center_i = align_list(sp_list, conf.control_index)\n x, yv_list = fold_list(x, yv_list, center_i, conf.fold_mode)\n yv_list = tuple(divide_var(y, y_var, *sum_var(y, y_var)) for y, y_var in yv_list)\n yv_list = compare_list(yv_list, conf.compare_mode, conf.control_index)\n curve_list = tuple((x, y, y_var) for y, y_var in yv_list)\n component_result = None if conf.base_indices is None \\\n else process_components(yv_list, conf.base_indices, conf.comb_indices, conf.control_index)\n return curve_list, component_result\n\n\n# utils\n\ndef align_list(sp_list, control_index=None):\n # todo recode if the x is not the same\n control_index = 0 if control_index is None else control_index\n \n center_i_list = tuple(np.argmax(sp[1, :]) for sp in sp_list)\n center_i_min = min(center_i_list)\n sp_list = tuple(sp_list[i][:, center_i_list[i] - center_i_min:] for i in range(len(sp_list)))\n \n len_list = tuple(np.shape(sp)[1] for sp in sp_list)\n len_min = min(len_list)\n \n x = sp_list[control_index][0][:len_min]\n yv_list = tuple((y[:len_min], y_var[:len_min]) for _, y, y_var in sp_list)\n \n return x, yv_list, center_i_min\n\n\ndef fold_list(x, yv_list, center_i, mode):\n yv_list = np.array(yv_list)\n if mode == 'fold':\n x_left, yv_left_list = fold_list(x, yv_list, center_i, 'left')\n x_right, yv_right_list = fold_list(x, yv_list, center_i, 'right')\n len_min = min(len(x_left), len(x_right))\n \n x = x_right[:len_min]\n yv_left_list = yv_left_list[:, :, :len_min]\n yv_right_list = yv_right_list[:, :, :len_min]\n yv_list = np.transpose(add_var(\n *yv_left_list.transpose([1, 0, 2]),\n *yv_right_list.transpose([1, 0, 2])\n ), [1, 0, 2])\n return x, yv_list\n elif mode == 'none':\n return x, yv_list\n elif mode == 'right':\n x_right = x[center_i:] - x[center_i]\n yv_right_list = yv_list[:, :, center_i:]\n return x_right, yv_right_list\n elif mode == 'left':\n x_left = x[center_i] - x[:center_i + 1]\n yv_left_list = yv_list[:, :, :center_i + 1]\n return np.flip(x_left), np.flip(yv_left_list, 2)\n else:\n raise TypeError(\"Unsupported fold mode\")\n\n\ndef compare_list(yv_list, mode, control_index=None):\n control_yv = None if control_index is None else yv_list[control_index]\n return list(compare_yv(yv, control_yv, mode, i == control_index) for i, yv in enumerate(yv_list))\n\n\ndef compare_yv(yv, control_yv, mode, is_control=False):\n \"\"\"\n :return: c, c_var\n \"\"\"\n y, y_var = yv\n if mode == 'ratio':\n if is_control:\n return np.ones_like(y), np.zeros_like(y_var)\n control_y, control_y_var = (1, 0) if control_yv is None else control_yv\n return divide_var(y, y_var, control_y, control_y_var)\n elif mode == 'difference':\n if is_control:\n return np.zeros_like(y), np.zeros_like(y_var)\n control_y, control_y_var = (0, 0) if control_yv is None else control_yv\n return minus_var(y, y_var, control_y, control_y_var)\n else:\n raise TypeError(f\"Unsupported compare mode {mode}\")\n\n\ndef process_components(yv_list, base_indices, comb_indices, control_index):\n y_list = tuple(y for y, _ in yv_list)\n if control_index in base_indices:\n raise TypeError(\"base_indices should not contains control_index!\")\n if comb_indices is None:\n comb_indices = tuple(filter(lambda i: i != control_index and i not in base_indices, range(len(y_list))))\n elif control_index in comb_indices:\n raise TypeError(\"comb_indices should not contains control_index!\")\n \n base_list = np.take(y_list, base_indices, 0) # [nb,n]\n comb_list = np.take(y_list, comb_indices, 0) # [nc,n]\n return tuple(fit_y(y, base_list) for y in comb_list)\n\n\ndef fit_y(y, base_list):\n # y [n]\n # base_list [nb,n]\n p = np.sum(np.expand_dims(base_list, 0) * np.expand_dims(base_list, 1), -1) # [nb,nb]\n b = np.sum(np.expand_dims(y, 0) * base_list, -1, keepdims=True) # [nb,1]\n theta = np.dot(np.linalg.inv(p), b)\n sigma = np.sqrt(np.mean(np.square(np.sum(base_list * np.expand_dims(theta, -1), 0) - y)))\n return theta, sigma\n",
"import numpy as np\n\n\ndef add_var(x1, x1_var, x2, x2_var):\n y = x1 + x2\n y_var = x1_var + x2_var\n return y, y_var\n\n\ndef minus_var(x1, x1_var, x2, x2_var):\n y = x1 - x2\n y_var = x1_var + x2_var\n return y, y_var\n\n\ndef times_var(x1, x1_var, x2, x2_var):\n y = x1 * x2\n y_var = np.square(x2) * x1_var + np.square(x1) * x2_var\n return y, y_var\n\n\ndef divide_var(x1, x1_var, x2, x2_var):\n y = x1 / x2\n y_var = x1_var / np.square(x2) + np.square(x1 / x2 / x2) * x2_var\n return y, y_var\n\n\ndef sum_var(x, x_var, axis=None):\n return np.sum(x, axis), np.sum(x_var, axis)\n\n\ndef mean_var(x, x_var, axis=None):\n return divide_var(*sum_var(x, x_var, axis), len(x), 0)\n\n\ndef spectrum_var(ys, ys_sum=None):\n if ys_sum is None:\n ys_sum = np.sum(ys)\n return ys * (1 - ys / ys_sum) + 1 / 3\n"
] |
[
[
"numpy.expand_dims",
"numpy.ones_like",
"numpy.take",
"numpy.linalg.inv",
"numpy.argmax",
"numpy.shape",
"numpy.zeros_like",
"numpy.array",
"numpy.flip"
],
[
"numpy.square",
"numpy.sum"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kevinyx2/FYPagain
|
[
"1efffcbfdd66be93318276bb183485100ea754cc"
] |
[
"utils/utils.py"
] |
[
"import numpy as np\nimport cv2\n\ndef draw_boxes(output_filename, classes_filename, inputs, original_image, resized_image):\n \"\"\"\n Draws identified boxes along with class probabilities on the original image\n and then saves the image with the output file name.\n\n Parameters\n ----------\n output_filename : string\n The name that the image with detected objects should be saved as.\n classes_filename : string\n A binary file that contains the name of the classes.\n inputs : dictionary\n\n original_image : ndarray\n An array of shape:\n [image height, image width, 3]\n The original image simply loaded into a numpy array with a RGB color profile. \n resized_image : ndarray\n An array of shape:\n [input_height, input_wdith, 3]\n The array is divided by 255.0 in order to turn the pixel values into numbers between zero \n and one. Since cv2 load images in BGR, the array is also converted to a RGB color profile.\n \"\"\"\n \n names = {}\n with open(classes_filename) as f:\n class_names = f.readlines()\n for id, name in enumerate(class_names):\n names[id] = name\n\n height_ratio = original_image.shape[0] / resized_image.shape[0]\n width_ratio = original_image.shape[1] / resized_image.shape[1]\n ratio = (width_ratio, height_ratio)\n\n for object_class, box_coords_and_prob in inputs.items():\n for box_coord, object_prob in box_coords_and_prob:\n\n box_coord = box_coord.reshape(2,2) * ratio\n box_coord = box_coord.reshape(-1)\n x1y1 = (int(box_coord[2]), int(box_coord[3]))\n x0y0 = (int(box_coord[0]),int(box_coord[1]))\n textx0y0 = (x0y0[0],x0y0[1]-4)\n\n cv2.rectangle(original_image, x0y0, x1y1, (255,255,255), 3)\n text_label = str(names[object_class])[:-1] + \", \" + str(round(object_prob*100,2)) + \"%\"\n cv2.putText(original_image, text_label, textx0y0, cv2.FONT_HERSHEY_SIMPLEX, 2, (0,0,0), 3)\n\n cv2.imwrite(output_filename, cv2.cvtColor(original_image, cv2.COLOR_RGB2BGR))\n\ndef non_max_suppression(predictions_with_boxes, confidence_threshold, iou_threshold=0.4):\n \"\"\"\n Applies non-max suppression to predicted boxes.\n\n Parameters\n ----------\n predictions_with_boxes : ndarray\n An array of shape:\n [1, num_large_obj_detectors + num_med_obj_detectors + num_small_obj_detectors, num_classes + 5]\n where num_x_obj_detectors = num_anchors_per_layer * yolo_layer_grid_w * yolo_layer_grid_h.\n confidence_threshold : float\n A number between zero and one which indicates the minimum object confidence prediction necessary\n for a particular box's data to not be thrown out. For example, the confidence threshold might be\n set to 0.7 and a detector might predict a box with confidence of 0.8. This detector's box data will\n therefore be put in the 'result' dictionary since it is above the confidence threshold.\n iou_threshold : float\n The threshold for deciding if two boxes overlap.\n\n Returns\n -------\n result : dictionary\n A dictionary of structure: \n {unique_class_index : [(box_1_data, box_1_prob),(box_2_data, box_2_prob)], etc...}\n where unique_class_index is the index that corresponds with the class's name, \n box_x_data is a ndarray of size [4] that contains the box information associated \n with the class index, and box_x_prob is a float that gives the probability of the box\n being in fact the identified class.\n \"\"\"\n\n def iou(box1, box2):\n \"\"\"\n Calculates the intersection over union (IOU) of two bounding boxes, which is the \n ratio of the area where the two boxes overlap to the joint area of the boxes as a whole.\n Two perfectly overlapping boxes will have an IOU of 1, while two boxes that don't \n overlap one another at all will have an IOU of 0.\n\n Parameters\n ----------\n box1 : ndarray\n Array of shape [x_min, y_min, x_max, y_max].\n box2 : ndarray\n Array of shape [x_min, y_min, x_max, y_max].\n \n Returns\n -------\n iou : float\n The IOU result of the two boxes.\n \"\"\"\n\n b1_x0, b1_y0, b1_x1, b1_y1 = box1\n b2_x0, b2_y0, b2_x1, b2_y1 = box2\n\n int_x0 = max(b1_x0, b2_x0)\n int_y0 = max(b1_y0, b2_y0)\n int_x1 = min(b1_x1, b2_x1)\n int_y1 = min(b1_y1, b2_y1)\n\n int_area = (int_x1 - int_x0) * (int_y1 - int_y0)\n\n b1_area = (b1_x1 - b1_x0) * (b1_y1 - b1_y0)\n b2_area = (b2_x1 - b2_x0) * (b2_y1 - b2_y0)\n\n iou = int_area / (b1_area + b2_area - int_area + 1e-05)\n\n return iou\n \n conf_mask = np.expand_dims((predictions_with_boxes[:, :, 4] > confidence_threshold), -1)\n predictions = predictions_with_boxes * conf_mask\n\n result = {}\n for i, image_pred in enumerate(predictions):\n shape = image_pred.shape\n #non_zero_idxs = np.nonzero(image_pred)\n #image_pred = image_pred[non_zero_idxs]\n temp = image_pred\n sum_t= np.sum (temp, axis=1)\n non_zero_idx = sum_t !=0\n image_pred = image_pred[non_zero_idx,:]\n image_pred = image_pred.reshape(-1, shape[-1])\n bbox_attrs = image_pred[:, :5]\n classes = image_pred[:, 5:]\n classes = np.argmax(classes, axis=-1)\n \n unique_classes = list(set(classes.reshape(-1)))\n\n for cls in unique_classes:\n cls_mask = classes == cls\n cls_boxes = bbox_attrs[np.nonzero(cls_mask)]\n cls_boxes = cls_boxes[cls_boxes[:, -1].argsort()[::-1]]\n cls_scores = cls_boxes[:, -1]\n cls_boxes = cls_boxes[:, :-1]\n\n while len(cls_boxes) > 0:\n box = cls_boxes[0]\n score = cls_scores[0]\n if not cls in result:\n result[cls] = []\n result[cls].append((box, score))\n cls_boxes = cls_boxes[1:]\n ious = np.array([iou(box, x) for x in cls_boxes])\n iou_mask = ious < iou_threshold\n cls_boxes = cls_boxes[np.nonzero(iou_mask)]\n cls_scores = cls_scores[np.nonzero(iou_mask)]\n\n return result\n\ndef convert_box_coordinates(detections):\n \"\"\"\n Converts coordinates in the form of center_x, center_y, width, height to \n min_x, min_y, max_x, max_y. The coordinate values are already scaled up to \n the input dimension shapes.\n\n Parameters\n ----------\n detections : ndarray\n An array of shape:\n [1, num_large_obj_detectors + num_med_obj_detectors + num_small_obj_detectors, num_classes + 5]\n where num_x_obj_detectors = num_anchors_per_layer * yolo_layer_grid_w * yolo_layer_grid_h. \n\n Returns\n -------\n detections : ndarray\n The original detections array with converted coordinates.\n \"\"\"\n\n split = np.array_split(detections, [1, 2, 3, 4, 85], axis=2)\n center_x = split[0]\n center_y = split[1]\n width = split[2]\n height = split[3]\n attrs = split[4]\n \n w2 = width / 2\n h2 = height / 2\n x0 = center_x - w2\n y0 = center_y - h2\n x1 = center_x + w2\n y1 = center_y + h2\n\n boxes = np.concatenate([x0, y0, x1, y1], axis=-1)\n detections = np.concatenate([boxes, attrs], axis=-1)\n \n return detections\n\ndef process_image(image_path, input_height, input_width):\n \"\"\"\n Takes any image and transforms it into the format needed for object detection with yolov3.\n\n Parameters\n ----------\n image_path : string\n Path that points to where the image on which object detection should be performed is stored.\n input_height : int\n The height of the input that will be fed into the yolov3 model.\n input_width : int\n The width of the input that will be fed into the yolov3 model.\n \n Returns\n -------\n resized_image : ndarray\n An array of shape:\n [input_height, input_wdith, 3]\n The array is divided by 255.0 in order to turn the pixel values into numbers between zero \n and one. Since cv2 load images in BGR, the array is also converted to a RGB color profile.\n image : ndarray\n An array of shape:\n [image height, image width, 3]\n The original image simply loaded into a numpy array with a RGB color profile.\n \"\"\"\n\n image = cv2.imread(image_path)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n resized_image = cv2.resize(image,(input_width,input_height))\n resized_image = resized_image / 255.0\n\n return resized_image, image\n\ndef rand(a=0, b=1):\n '''Returns a random number between a and b.'''\n return np.random.rand()*(b-a) + a\n\ndef get_classes(classes_path):\n \"\"\"\n Reads the class names from a file and returns them in a list.\n\n Parameters\n ----------\n classes_path : string\n Path that points to where the class name information is stored.\n\n Returns\n -------\n class_names : list\n A list of format:\n ['class_1', 'class_2', 'class_3', ...]\n \"\"\"\n with open(classes_path) as f:\n class_names = f.readlines()\n class_names = [c.strip() for c in class_names]\n return class_names\n\ndef get_anchors(anchors_path):\n \"\"\"\n Reads the anchors from a file and returns them in a list.\n\n Parameters\n ----------\n anchors_path : string\n Path that points to where the anchor information is stored.\n\n Returns\n -------\n anchors : list\n A list of format:\n [[anchor1_width, anchor1_height], [anchor2_width, anchor2_height], [anchor3_width, anchor3_height], ...]\n \"\"\"\n with open(anchors_path) as f:\n anchors = f.readline()\n anchors = [float(x) for x in anchors.split(',')]\n anchors = list(zip(anchors[::2], anchors[1::2]))\n\n return anchors\n\ndef prepare_data(annotations_path, training_validation_split=0.9, batch_size=32, overfit_model=False):\n \"\"\"\n Takes the raw data from the text file and splits it up into a training set\n and a validation set based on the train/val split hyperparameter. If the model\n is in overfit mode, the only data prepared will be the first image recorded in \n the text file.\n\n Parameters\n ----------\n annotations_path : string\n The path which points to the text file with image and box data.\n The file structures data in the following way:\n\n image_data/images/img1.jpg 50,100,150,200,0 30,50,200,120,3\n image_data/images/img2.jpg 20,100,123,157,70 30,77,200,120,21 44,50,60,60,2\n\n Or to put it in more abstract terms:\n path/to/first/training/image min_x,min_y,max_x,max_y,class_num, etc..\n (up to max_num_boxes_per_image of boxes per image)\n training_validation_split : float\n The percentage of the data that should be kept for training \n versus validation. A value of '1.0' would indicate that all\n of the data will be used to train the model. \n batch_size : int\n The batch size to be used per training step. If the model is training\n to overfit, the batch_size parameter will be overwritten and a batch size\n of 1 will be prescribed. In overfit mode, the model continuously only trains\n on one image over and over again. The model will choose to overfit on the \n first image listed in the text file containing the training data image paths\n and box information.\n overfit_model : bool\n Whether or not to train the model only on one image and to purposefully \n overfit it. This can be useful to see whether or not the loss function and\n hyperparameters have been set up correctly.\n\n Returns\n -------\n training_data : list\n A sublist of the entire training data set to be used for training.\n validation_data : list\n A sublist of the entire training data set to be used for validation.\n batch_size : int\n The size of the batch to be used with every training step.\n \"\"\"\n \n with open(annotations_path) as f:\n lines = f.readlines()\n if overfit_model:\n training_data = lines[0:1]\n validation_data = None\n train_batch_size = 1\n else:\n np.random.shuffle(lines)\n training_data = lines[:int(len(lines) * training_validation_split)]\n validation_data = lines[len(training_data):]\n train_batch_size = min(len(training_data), batch_size)\n val_batch_size = min(len(validation_data), batch_size)\n batch_size = min(train_batch_size, val_batch_size)\n training_data = training_data[:batch_size]\n validation_data = validation_data[:batch_size]\n \n return training_data, validation_data, batch_size\n\ndef augment_data(annotation_line, input_shape, random=True, max_boxes=20, jitter=.3, hue=.1, sat=1.5, val=1.5, proc_img=True):\n \"\"\"\n Takes the iamge and box data and applies random transformations if the 'random' parameter is set\n to true. Otherwise, the image and box data will only be reshaped to fit the input tensor size. \n\n Parameters\n ----------\n annotation_line : list\n a list in the format of ['path/to/img1.jpg', '50,50,150,200,0', '10,70,200,120,3']\n input_shape : tuple\n The height and the width of the yolov3 input shape.\n random : bool\n Whether or not random distortions should be applied to the image and box data versus\n just general resizing to fit the input shape.\n max_boxes : int\n The max number of possible boxes marking objects in the image.\n jitter : float\n Random amount to shift the image.\n hue : float\n Affect the hue of an image.\n sat : float\n Affect the saturation of an image.\n val : float\n Affects the brightness of the image.\n proc_img : bool\n When set to true, the new image will be shifted around a bit and divided\n by 255.0.\n \n Returns\n -------\n image_data : ndarray\n Array of shape [w, h, 3] containing the image data. Inputs are divided by 255.0.\n box_data : ndarray\n Array of shape [max_boxes, 5] \n where the '5' represents the min x coordinate, min y coordinate, max x coordinate, \n max y coordinate, and the box's class number. The box coordinates are fully scaled numbers\n relative to the original image size. If there are are not enough boxes to fit up the\n box_data tensor (for example: the image only contains 5 boxes but the max number of boxes\n per image is 20), then the empty entries are simply filled with zeros.\n \"\"\"\n \n line = annotation_line.split()\n image = cv2.imread(line[0])\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n ih, iw, _ = image.shape\n h, w = input_shape\n box = np.array([np.array(list(map(int,box.split(',')))) for box in line[1:]])\n\n if not random:\n # resize image\n scale = min(w/iw, h/ih)\n nw = int(iw*scale)\n nh = int(ih*scale)\n dx = (w-nw)//2\n dy = (h-nh)//2\n image_data=0\n if proc_img:\n image = cv2.resize(image, (nw,nh), interpolation=cv2.INTER_CUBIC)\n new_image = np.ones((w, h, 3), dtype=np.uint8)*128\n new_image[dy:dy+nh, dx:dx+nw, :] = image\n image_data = np.array(new_image)/255.\n\n # correct boxes\n box_data = np.zeros((max_boxes,5))\n if len(box)>0:\n np.random.shuffle(box)\n if len(box)>max_boxes: box = box[:max_boxes]\n box[:, [0,2]] = box[:, [0,2]]*scale + dx\n box[:, [1,3]] = box[:, [1,3]]*scale + dy\n box_data[:len(box)] = box\n\n return image_data, box_data\n\n # resize image\n new_ar = w/h * rand(1-jitter,1+jitter)/rand(1-jitter,1+jitter)\n scale = rand(.25, 2)\n if new_ar < 1:\n nh = int(scale*h)\n nw = int(nh*new_ar)\n else:\n nw = int(scale*w)\n nh = int(nw/new_ar)\n image = cv2.resize(image, (nw, nh), interpolation=cv2.INTER_CUBIC)\n\n # place image\n dx = int(rand(0, w-nw))\n dy = int(rand(0, h-nh))\n new_image = np.ones((w, h, 3), dtype=np.uint8)*128\n \n idx, idy, inw, inh = dx, dy, nw, nh\n if dx < 0:\n image = image[:,abs(dx):,:]\n idx = 0\n if w < nw + dx:\n image = image[:,:w,:] \n inw = w\n if dy < 0:\n image = image[abs(dy):,:,:]\n idy = 0\n if h < nh + dy:\n image = image[:h,:,:]\n inh = h\n\n new_image[idy:idy+inh, idx:idx+inw, :] = image\n image = new_image\n\n # flip image or not\n flip = rand()<.5\n if flip: image = cv2.flip(image, flipCode=1)\n\n # distort image\n hue = rand(-hue, hue)\n sat = rand(1, sat) if rand()<.5 else 1/rand(1, sat)\n val = rand(1, val) if rand()<.5 else 1/rand(1, val)\n x = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)/255.0\n x[..., 0] += hue\n x[..., 0][x[..., 0]>1] -= 1\n x[..., 0][x[..., 0]<0] += 1\n x[..., 1] *= sat\n x[..., 2] *= val\n x[x>1] = 1\n x[x<0] = 0\n image_data = cv2.cvtColor(x.astype(np.float32), cv2.COLOR_HSV2RGB) \n\n # correct boxes\n box_data = np.zeros((max_boxes,5))\n if len(box)>0:\n np.random.shuffle(box)\n box[:, [0,2]] = box[:, [0,2]]*nw/iw + dx\n box[:, [1,3]] = box[:, [1,3]]*nh/ih + dy\n if flip: box[:, [0,2]] = w - box[:, [2,0]]\n box[:, 0:2][box[:, 0:2]<0] = 0\n box[:, 2][box[:, 2]>w] = w\n box[:, 3][box[:, 3]>h] = h\n box_w = box[:, 2] - box[:, 0]\n box_h = box[:, 3] - box[:, 1]\n box = box[np.logical_and(box_w>1, box_h>1)] # discard invalid box\n if len(box)>max_boxes: box = box[:max_boxes]\n box_data[:len(box)] = box\n\n return image_data, box_data\n\ndef create_y_true(box_data, anchors, num_classes, h=416, w=416):\n \"\"\" \n A wrapper function for creating the full y_true and y_true_box_data numpy arrays used for \n training the yolov3 model. \n\n Parameters\n ----------\n box_data : ndarray\n A numpy array of shape:\n [batch_size, max_num_true_boxes_per_image, 5]\n where the '5' represents the min x coordinate, min y coordinate, max x coordinate, \n max y coordinate, and the box's class number. The box coordinates are fully scaled numbers\n relative to the original image size.\n anchors : list\n A list of anchors with format:\n [[anchor1_width, anchor1_height], [anchor2_width, anchor2_height], [anchor3_width, anchor3_height], ...]\n The anchors are necessary for calculating an IOU with the box data to determine into which layer\n a particular box's data should be placed into. Large boxes will have high IOU's with large anchors, and\n therefore they will all be grouped into the same layer that detects large objects. On the other hand,\n small boxes will have high IOU's with small anchors, and therefore will be grouped into the layer\n responsible for detecting small objects. \n num_classes : int\n The number of classes in the training data.\n h : int\n Height of the input.\n w : int\n Width of the input\n \n Returns\n -------\n y_true : ndarray\n The complete y_true array of shape:\n [batch_size, num_large_obj_detectors + num_medium_obj_detectors + num_small_obj_detectors, 5 + num_classes]\n where the '5' represents the center_x, center_y, width, height coordinates all as percentages of the\n original image size. \n num_x_obj_detectors = num_anchors_per_layer * x_grid_height * x_grid_width\n The y_true numpy array is shaped like this for easy loading into the feed dictionary.\n\n y_true_box_data : ndarray\n the complete y_true_box_data array of shape:\n [batch_size, max_num_true_boxes_per_image * num_layers, 4]\n The y_true_box_data numpy array is shaped like this for easy loading into the feed dictionary.\n \"\"\" \n\n def load_data(box_data, best_anchors, anchors_mask, grid_size, num_classes):\n \"\"\" \n Takes the box_data and maps it into the y_true numpy array for a particular\n grid size. The yolov3 model has three grid sizes for large, medium, and small.\n object detection. The mapping function used is a fully vectorized implementation \n that does not use any loops whatsoever.\n\n Parameters\n ----------\n box_data : ndarray\n A numpy array of shape:\n [batch_size, max_num_true_boxes_per_image, 5]\n where the '5' represents the center x coordinate, center y coordinate, the width,\n the height, and the box's class number. The box coordinates are percentages of \n the original image size. \n best_anchors : ndarray\n index of best anchor\n A numpy array of shape:\n [batch_size, max_num_true_boxes_per_image]\n At every column index, each individual box stores the index of the anchor with which\n it has the highest IOU (intersection over union) value. \n anchors_mask : list\n identifies which anchors should be used with this layer. If the best_anchors numpy\n array contains anchor indices that are not part of this layer (as determined by the\n anchors mask) they will be ignored.\n grid_size : tuple\n The size of this layer's grid. Will coincide with the grid sizes of yolov3's \n yolo layers.\n num_classes : int\n The number of classes in the training data.\n\n Returns\n -------\n y_true : ndarray\n A numpy array of shape:\n [batch_size, grid_h * grid_w * num_anchors_per_layer, num_classes + 5]\n This array is the y_true for a particular grid size.\n box_data : ndarray\n A numpy array of shape:\n [batch_size, max_num_true_boxes_per_image, 4]\n The data for boxes whos highest IOU values coincide with anchors not belonging to \n this particular layer have been set to zero. Only box data that belongs to the layer\n remains in the array.\n \"\"\"\n \n num_anchors = len(anchors_mask)\n box_data_shape = box_data.shape\n\n # remove all anchors that aren't part of this layer\n best_anchors_mask = np.isin(best_anchors, anchors_mask, invert=True)\n best_anchors = best_anchors*1\n best_anchors -= min(anchors_mask)\n best_anchors[best_anchors_mask] = 0\n\n # set all of the box data that isn't part of this layer to zero\n box_data_mask = np.ones_like(best_anchors)\n box_data_mask[best_anchors_mask] = 0\n box_data_mask = np.expand_dims(box_data_mask, -1)\n box_data = box_data*box_data_mask\n \n i = np.floor(box_data[:,:,1]*grid_size[0]).astype('int32')\n j = np.floor(box_data[:,:,0]*grid_size[1]).astype('int32')\n \n # reshape all of these arrays for vectorized ops\n box_data = box_data.reshape([-1,box_data.shape[-1]])\n best_anchors = best_anchors.reshape([-1,1])\n i = i.reshape([-1,1])\n j = j.reshape([-1,1])\n\n # create one-hot class encodings\n classes = box_data[:,-1].reshape([-1]).astype(np.int)\n one_hot_array = np.zeros([box_data.shape[0],num_classes])\n one_hot_array[np.arange(box_data.shape[0]),classes] = 1\n\n box_data_mask = box_data[:,2]>0\n box_data[box_data_mask,4] = 1\n box_data = np.concatenate([box_data,one_hot_array],axis=-1)\n \n y_true = np.zeros([box_data_shape[0] * int(grid_size[0]) * int(grid_size[1]) * num_anchors, 5+num_classes])\n \n image_offset = np.repeat(np.linspace(0, y_true.shape[0], box_data_shape[0], endpoint=False, dtype=np.int), box_data.shape[0] / box_data_shape[0]).reshape([-1,1])\n grid_offset = num_anchors * (grid_size[0] * i + j)\n\n indexing_array = np.array(image_offset + grid_offset + best_anchors,dtype=np.int32)\n indexing_array = indexing_array[box_data_mask,:]\n indexing_array = indexing_array.reshape([-1])\n\n y_true[indexing_array,:] = box_data[box_data_mask]\n y_true = y_true.reshape([box_data_shape[0], int(grid_size[0]) * int(grid_size[1]) * num_anchors, num_classes+5])\n box_data = box_data.reshape([box_data_shape[0],box_data_shape[1],-1])\n\n return y_true, box_data[...,0:4]\n\n # convert from (min_x, min_y, max_x, max_y) to (center_x, center_y, width, height)\n anchors = np.array(anchors)\n boxes_xy = (box_data[:,:,0:2] + box_data[:,:,2:4]) // 2\n boxes_hw = box_data[:,:,2:4] - box_data[:,:,0:2]\n # change box coordinates to be percentages of the image size\n box_data[:, :, 0] = boxes_xy[...,0]/w\n box_data[:, :, 1] = boxes_xy[...,1]/h\n box_data[:, :, 2] = boxes_hw[...,0]/w\n box_data[:, :, 3] = boxes_hw[...,1]/h\n\n hw = np.expand_dims(boxes_hw, -2)\n anchors_broad = np.expand_dims(anchors, 0)\n anchor_maxes = anchors_broad / 2.\n anchor_mins = -anchor_maxes \n box_maxes = hw / 2.\n box_mins = -box_maxes\n intersect_mins = np.maximum(box_mins, anchor_mins)\n intersect_maxes = np.minimum(box_maxes, anchor_maxes)\n intersect_hw = np.maximum(intersect_maxes - intersect_mins, 0.)\n intersect_area = intersect_hw[..., 0] * intersect_hw[..., 1]\n box_area = hw[..., 0] * hw[..., 1]\n anchor_area = anchors[..., 0] * anchors[..., 1]\n iou = intersect_area / (box_area + anchor_area - intersect_area)\n best_anchors = np.argmax(iou, axis=-1)\n large_obj_detectors, large_obj_boxes = load_data(box_data, best_anchors=best_anchors, anchors_mask=[6,7,8], grid_size=(h/32,w/32), num_classes=num_classes)\n medium_obj_detectors, medium_obj_boxes = load_data(box_data, best_anchors=best_anchors, anchors_mask=[3,4,5], grid_size=(h/16,w/16), num_classes=num_classes)\n small_obj_detectors, small_obj_boxes = load_data(box_data, best_anchors=best_anchors, anchors_mask=[0,1,2], grid_size=(h/8,w/8), num_classes=num_classes)\n \n y_true = np.concatenate([large_obj_detectors, medium_obj_detectors, small_obj_detectors], axis=1)\n y_true_box_data = np.concatenate([large_obj_boxes, medium_obj_boxes, small_obj_boxes], axis=1)\n \n return y_true, y_true_box_data\n\ndef get_training_batch(annotation_lines, anchors, num_classes, batch_size=32, h=416, w=416, random=False):\n \"\"\"\n Takes the annotion lines, reads them, and from their information constructs the necessary \n numpy arrays that store the data to train the yolov3 model.\n\n Parameters\n ----------\n annotation_lines : list\n A list of format\n ['image_data/images/img1.jpg 22,748,2184,2150,2 1590,2140,1832,2414,32 2414,858,2750,2002,0', ...\n The box data is of format min_x, min_y, max_x, max_y, class_number and is relative to the \n original image size.\n anchors : list\n A list of format:\n [[anchor1_width, anchor1_height], [anchor2_width, anchor2_height], [anchor3_width, anchor3_height], ...]\n batch_size : int\n The amount of images to use in each batch per training step.\n h : int\n Height of the input shape into the yolov3 model.\n w : int\n Width of the input shape into the yolov3 model.\n \n Returns\n -------\n image_data : ndarray\n An array of shape [batch_size, h, w, 3]\n The image pixel data has been divided by 255.0 so that all values are between\n zero and one.\n y_true : ndarray\n An array containing the ground truth box coordinate and class information used for training \n and calculating the loss of the yolo_v3 model. \n A sample y_true array would be of shape:\n [batch_size, num_large_obj_detectors + num_med_obj_detectors + num_small_obj_detectors, num_classes + 5]\n where num_x_obj_detectors = num_anchors_per_layer * yolo_layer_grid_w * yolo_layer_grid_h.\n y_true_box_data : ndarray\n An array containing ground truth box data.\n A sample y_true_boxes array would be of the shape:\n [batch_size, num_anchors_per_layer * max_num_true_boxes_per_image, 5]\n \"\"\"\n\n anchors = np.array(anchors,dtype=np.float32)\n image_data = []\n box_data = []\n \n for b in range(batch_size):\n if b==0:\n np.random.shuffle(annotation_lines)\n \n image, box = augment_data(annotation_lines[b], (h, w), random=random, max_boxes=20, jitter=.3, hue=.1, sat=1.5, val=1.5, proc_img=True)\n image_data.append(image)\n box_data.append(box)\n\n image_data = np.array(image_data)\n box_data = np.array(box_data)\n\n y_true, y_true_box_data = create_y_true(box_data, anchors, num_classes, h, w)\n\n return image_data, y_true, y_true_box_data\n"
] |
[
[
"numpy.expand_dims",
"numpy.minimum",
"numpy.linspace",
"numpy.concatenate",
"numpy.ones_like",
"numpy.arange",
"numpy.argmax",
"numpy.zeros",
"numpy.isin",
"numpy.nonzero",
"numpy.random.rand",
"numpy.floor",
"numpy.logical_and",
"numpy.array",
"numpy.sum",
"numpy.maximum",
"numpy.random.shuffle",
"numpy.ones",
"numpy.array_split"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
zlin888/e2e-coref
|
[
"67e99bee9b497e904d55470c4a0cd30a1d69116b"
] |
[
"extract_xlnet_features.py"
] |
[
"# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Extract pre-computed feature vectors from BERT.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport functools\n\nimport os\nimport codecs\nimport collections\nfrom absl.flags import Flag\n\nimport h5py\nimport json\nimport re\nfrom tqdm import tqdm\n\nimport xlnet.xlnet as modeling\nimport xlnet.tokenization as tokenization \nfrom xlnet.model_utils import get_assignment_map_from_checkpoint, configure_tpu\nimport xlnet.run_classifier\nimport tensorflow as tf\nimport numpy as np\n\nfrom data import process_example\n\nSEG_ID_A = 0\nSEG_ID_B = 1\nSEG_ID_CLS = 2\nSEG_ID_SEP = 3\nSEG_ID_PAD = 4\n\n\nBERT_MODEL_PATH = \"bert_cased_L-12_H-768_A-12\"\n\nflags = tf.flags\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string(\"input_file\", \"\", \"\")\n\nflags.DEFINE_string(\"output_file\", \"\", \"\")\n\nflags.DEFINE_string(\"layers\", \"-1,-2,-3,-4\", \"\")\n\nflags.DEFINE_string(\n \"bert_config_file\", os.path.join(BERT_MODEL_PATH, \"bert_config.json\"),\n \"The config json file corresponding to the pre-trained BERT model. \"\n \"This specifies the model architecture.\")\n\nflags.DEFINE_integer(\n \"window_size\", 128,\n \"The maximum total input sequence length after WordPiece tokenization. \"\n \"Sequences longer than this will be truncated, and sequences shorter \"\n \"than this will be padded.\")\n\nflags.DEFINE_integer(\n \"stride\", 1,\n \"The maximum total input sequence length after WordPiece tokenization. \"\n \"Sequences longer than this will be truncated, and sequences shorter \"\n \"than this will be padded.\")\n\nflags.DEFINE_string(\n \"init_checkpoint\", os.path.join(BERT_MODEL_PATH, \"bert_model.ckpt\"),\n \"Initial checkpoint (usually from a pre-trained BERT model).\")\n\nflags.DEFINE_string(\"vocab_file\", os.path.join(BERT_MODEL_PATH, \"vocab.txt\"),\n \"The vocabulary file that the BERT model was trained on.\")\n\nflags.DEFINE_bool(\n \"do_lower_case\", True,\n \"Whether to lower case the input text. Should be True for uncased \"\n \"models and False for cased models.\")\n\nflags.DEFINE_integer(\"batch_size\", 32, \"Batch size for predictions.\")\n\nflags.DEFINE_integer(\"num_core_per_host\", 1, \"Batch size for predictions.\")\n\nflags.DEFINE_bool(\"use_tpu\", False, \"Whether to use TPU or GPU/CPU.\")\n\nflags.DEFINE_string(\"master\", None,\n \"If using a TPU, the address of the master.\")\n\nflags.DEFINE_string(\"model_dir\",\"model_dir\",\"\")\n\nflags.DEFINE_integer(\n \"num_tpu_cores\", 8,\n \"Only used if `use_tpu` is True. Total number of TPU cores to use.\")\n\nflags.DEFINE_bool(\n \"use_one_hot_embeddings\", False,\n \"If True, tf.one_hot will be used for embedding lookups, otherwise \"\n \"tf.nn.embedding_lookup will be used. On TPUs, this should be True \"\n \"since it is much faster.\")\n\nflags.DEFINE_integer(\"mem_len\", default=0,\n help=\"Number of steps to cache\")\nflags.DEFINE_bool(\"same_length\", default=False,\n help=\"Same length attention\")\nflags.DEFINE_bool(\"bi_data\", default=True,\n help=\"Use bidirectional data streams, i.e., forward & backward.\")\nflags.DEFINE_integer(\"reuse_len\", default=0,\n help=\"How many tokens to be reused in the next batch. \"\n \"Could be half of `seq_len`.\")\n\ndef input_fn_builder(examples, window_size, stride, tokenizer):\n \"\"\"Creates an `input_fn` closure to be passed to TPUEstimator.\"\"\"\n\n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n batch_size = params[\"batch_size\"]\n\n d = tf.data.Dataset.from_generator(\n functools.partial(convert_examples_to_features,\n examples=examples,\n window_size=window_size,\n stride=stride,\n tokenizer=tokenizer),\n dict(unique_ids=tf.int32,\n input_ids=tf.int32,\n input_mask=tf.int32,\n input_type_ids=tf.int32,\n extract_indices=tf.int32,\n seg_ids=tf.int32),\n dict(unique_ids=tf.TensorShape([]),\n input_ids=tf.TensorShape([window_size]),\n input_mask=tf.TensorShape([window_size]),\n input_type_ids=tf.TensorShape([window_size]),\n extract_indices=tf.TensorShape([window_size]),\n seg_ids=tf.TensorShape([window_size])))\n\n d = d.batch(batch_size=batch_size, drop_remainder=False)\n return d\n\n return input_fn\n\n\ndef model_fn_builder(run_config, bert_config, init_checkpoint, layer_indexes, use_tpu,\n use_one_hot_embeddings):\n \"\"\"Returns `model_fn` closure for TPUEstimator.\"\"\"\n\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n unique_ids = features[\"unique_ids\"]\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n input_type_ids = features[\"input_type_ids\"]\n extract_indices = features[\"extract_indices\"]\n seg_ids = features[\"seg_ids\"]\n tf.logging.info(input_ids.shape)\n\n model = modeling.XLNetModel(\n xlnet_config=bert_config,\n run_config=run_config,\n seg_ids=seg_ids,\n is_training=False,\n input_ids=input_ids,\n input_mask=input_mask,\n token_type_ids=input_type_ids,\n use_one_hot_embeddings=use_one_hot_embeddings,\n flags=FLAGS)\n\n if mode != tf.estimator.ModeKeys.PREDICT:\n raise ValueError(\"Only PREDICT modes are supported: %s\" % (mode))\n\n tvars = tf.trainable_variables()\n scaffold_fn = None\n (assignment_map,\n initialized_variable_names) = get_assignment_map_from_checkpoint(\n tvars, init_checkpoint)\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n # all_layers = model.get_all_encoder_layers()\n\n predictions = {\n \"unique_ids\": unique_ids,\n \"extract_indices\": extract_indices\n }\n\n # for (i, layer_index) in enumerate(layer_indexes):\n # predictions[\"layer_output_%d\" % i] = all_layers[layer_index]\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode, predictions=predictions, scaffold_fn=scaffold_fn)\n return output_spec\n\n return model_fn\n\n\ndef _convert_example_to_features(example, window_start, window_end, tokens_ids_to_extract, tokenizer, seq_length):\n window_tokens = example.tokens[window_start:window_end]\n\n tokens = []\n input_type_ids = []\n for token in window_tokens:\n tokens.append(token)\n input_type_ids.append(0)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n \n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n seg_ids = [SEG_ID_A] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n while len(input_ids) < seq_length:\n input_ids.append(0)\n input_mask.append(0)\n input_type_ids.append(0)\n\n extract_indices = [-1] * seq_length\n for i in tokens_ids_to_extract:\n assert i - window_start >= 0\n extract_indices[i - window_start] = i\n\n assert len(input_ids) == seq_length\n assert len(input_mask) == seq_length\n assert len(input_type_ids) == seq_length\n tf.logging.info(len(input_ids))\n\n return dict(unique_ids=example.document_index,\n input_ids=input_ids,\n input_mask=input_mask,\n input_type_ids=input_type_ids,\n extract_indices=extract_indices,\n seg_ids=seg_ids)\n\n\ndef convert_examples_to_features(examples, window_size, stride, tokenizer):\n \"\"\"Loads a data file into a list of `InputBatch`s.\"\"\"\n\n assert window_size % 2 == 1\n assert stride % 2 == 1\n\n for example in examples:\n for i in range(0, len(example.tokens), stride):\n window_center = i + window_size // 2\n token_ids_to_extract = []\n extract_start = int(np.clip(window_center - stride // 2, 0, len(example.tokens)))\n extract_end = int(np.clip(window_center + stride // 2 + 1, extract_start, len(example.tokens)))\n\n if i == 0:\n token_ids_to_extract.extend(range(extract_start))\n\n token_ids_to_extract.extend(range(extract_start, extract_end))\n\n if i + stride >= len(example.tokens):\n token_ids_to_extract.extend(range(extract_end, len(example.tokens)))\n\n token_ids_to_extract = [t for t in token_ids_to_extract if example.bert_to_orig_map[t] >= 0]\n\n yield _convert_example_to_features(example,\n i,\n min(i + window_size, len(example.tokens)),\n token_ids_to_extract,\n tokenizer,\n window_size)\n\n\ndef main(_):\n tf.logging.set_verbosity(tf.logging.INFO)\n\n layer_indexes = [int(x) for x in FLAGS.layers.split(\",\")]\n\n bert_config = modeling.XLNetConfig(json_path=FLAGS.bert_config_file)\n\n tokenizer = tokenization.FullTokenizer(\n spm_model_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)\n\n is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2\n run_config =configure_tpu(FLAGS) \n \n # tf.contrib.tpu.RunConfig(\n # master=FLAGS.master,\n # tpu_config=tf.contrib.tpu.TPUConfig(\n # num_shards=FLAGS.num_tpu_cores,\n # per_host_input_for_training=is_per_host))\n\n # examples = read_examples(FLAGS.input_file)\n json_examples = []\n for x in ['test', 'train', 'dev']:\n with open(os.path.join(FLAGS.input_file, x + '.english.jsonlines')) as f:\n json_examples.extend((json.loads(jsonline) for jsonline in f.readlines()))\n\n orig_examples = []\n bert_examples = []\n for i, json_e in enumerate(json_examples):\n e = process_example(json_e, i, should_filter_embedded_mentions=True)\n orig_examples.append(e)\n bert_examples.append(e.bertify(tokenizer))\n\n model_fn = model_fn_builder(\n bert_config=bert_config,\n run_config=run_config,\n init_checkpoint=FLAGS.init_checkpoint,\n layer_indexes=layer_indexes,\n use_tpu=FLAGS.use_tpu,\n use_one_hot_embeddings=FLAGS.use_one_hot_embeddings)\n\n # If TPU is not available, this will fall back to normal Estimator on CPU\n # or GPU.\n estimator = tf.contrib.tpu.TPUEstimator(\n use_tpu=FLAGS.use_tpu,\n model_fn=model_fn,\n config=run_config,\n predict_batch_size=FLAGS.batch_size)\n\n input_fn = input_fn_builder(\n examples=bert_examples, window_size=FLAGS.window_size, stride=FLAGS.stride, tokenizer=tokenizer)\n\n writer = h5py.File(FLAGS.output_file, 'w')\n with tqdm(total=sum(len(e.tokens) for e in orig_examples)) as t:\n for result in estimator.predict(input_fn, yield_single_examples=True):\n document_index = int(result[\"unique_ids\"])\n bert_example = bert_examples[document_index]\n orig_example = orig_examples[document_index]\n file_key = bert_example.doc_key.replace('/', ':')\n\n t.update(n=(result['extract_indices'] >= 0).sum())\n\n for output_index, bert_token_index in enumerate(result['extract_indices']):\n if bert_token_index < 0:\n continue\n\n token_index = bert_example.bert_to_orig_map[bert_token_index]\n sentence_index, token_index = orig_example.unravel_token_index(token_index)\n\n dataset_key =\"{}/{}\".format(file_key, sentence_index)\n if dataset_key not in writer:\n writer.create_dataset(dataset_key,\n (len(orig_example.sentence_tokens[sentence_index]), bert_config.hidden_size, len(layer_indexes)),\n dtype=np.float32)\n\n dset = writer[dataset_key]\n for j, layer_index in enumerate(layer_indexes):\n layer_output = result[\"layer_output_%d\" % j]\n dset[token_index, :, j] = layer_output[output_index]\n writer.close()\n\n\nif __name__ == \"__main__\":\n flags.mark_flag_as_required(\"input_file\")\n flags.mark_flag_as_required(\"vocab_file\")\n flags.mark_flag_as_required(\"bert_config_file\")\n flags.mark_flag_as_required(\"init_checkpoint\")\n flags.mark_flag_as_required(\"output_file\")\n tf.app.run()\n"
] |
[
[
"tensorflow.contrib.tpu.TPUEstimator",
"tensorflow.TensorShape",
"tensorflow.train.Scaffold",
"tensorflow.train.init_from_checkpoint",
"tensorflow.logging.info",
"tensorflow.logging.set_verbosity",
"tensorflow.trainable_variables",
"tensorflow.contrib.tpu.TPUEstimatorSpec",
"tensorflow.app.run"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
kchoudhu/numpy
|
[
"2cf38000274a94c1b118f1a38e2a6e2c7401eb16"
] |
[
"numpy/core/tests/test_umath.py"
] |
[
"import platform\nimport warnings\nimport fnmatch\nimport itertools\nimport pytest\nimport sys\nfrom fractions import Fraction\n\nimport numpy.core.umath as ncu\nfrom numpy.core import _umath_tests as ncu_tests\nimport numpy as np\nfrom numpy.testing import (\n assert_, assert_equal, assert_raises, assert_raises_regex,\n assert_array_equal, assert_almost_equal, assert_array_almost_equal,\n assert_array_max_ulp, assert_allclose, assert_no_warnings, suppress_warnings,\n _gen_alignment_data, assert_array_almost_equal_nulp, assert_warns\n )\n\ndef on_powerpc():\n \"\"\" True if we are running on a Power PC platform.\"\"\"\n return platform.processor() == 'powerpc' or \\\n platform.machine().startswith('ppc')\n\n\ndef bad_arcsinh():\n \"\"\"The blocklisted trig functions are not accurate on aarch64 for\n complex256. Rather than dig through the actual problem skip the\n test. This should be fixed when we can move past glibc2.17\n which is the version in manylinux2014\n \"\"\"\n x = 1.78e-10\n v1 = np.arcsinh(np.float128(x))\n v2 = np.arcsinh(np.complex256(x)).real\n # The eps for float128 is 1-e33, so this is way bigger\n return abs((v1 / v2) - 1.0) > 1e-23\n\nif platform.machine() == 'aarch64' and bad_arcsinh():\n skip_longcomplex_msg = ('Trig functions of np.longcomplex values known to be '\n 'inaccurate on aarch64 for some compilation '\n 'configurations, should be fixed by building on a '\n 'platform using glibc>2.17')\nelse:\n skip_longcomplex_msg = ''\n\n\nclass _FilterInvalids:\n def setup(self):\n self.olderr = np.seterr(invalid='ignore')\n\n def teardown(self):\n np.seterr(**self.olderr)\n\n\nclass TestConstants:\n def test_pi(self):\n assert_allclose(ncu.pi, 3.141592653589793, 1e-15)\n\n def test_e(self):\n assert_allclose(ncu.e, 2.718281828459045, 1e-15)\n\n def test_euler_gamma(self):\n assert_allclose(ncu.euler_gamma, 0.5772156649015329, 1e-15)\n\n\nclass TestOut:\n def test_out_subok(self):\n for subok in (True, False):\n a = np.array(0.5)\n o = np.empty(())\n\n r = np.add(a, 2, o, subok=subok)\n assert_(r is o)\n r = np.add(a, 2, out=o, subok=subok)\n assert_(r is o)\n r = np.add(a, 2, out=(o,), subok=subok)\n assert_(r is o)\n\n d = np.array(5.7)\n o1 = np.empty(())\n o2 = np.empty((), dtype=np.int32)\n\n r1, r2 = np.frexp(d, o1, None, subok=subok)\n assert_(r1 is o1)\n r1, r2 = np.frexp(d, None, o2, subok=subok)\n assert_(r2 is o2)\n r1, r2 = np.frexp(d, o1, o2, subok=subok)\n assert_(r1 is o1)\n assert_(r2 is o2)\n\n r1, r2 = np.frexp(d, out=(o1, None), subok=subok)\n assert_(r1 is o1)\n r1, r2 = np.frexp(d, out=(None, o2), subok=subok)\n assert_(r2 is o2)\n r1, r2 = np.frexp(d, out=(o1, o2), subok=subok)\n assert_(r1 is o1)\n assert_(r2 is o2)\n\n with assert_raises(TypeError):\n # Out argument must be tuple, since there are multiple outputs.\n r1, r2 = np.frexp(d, out=o1, subok=subok)\n\n assert_raises(TypeError, np.add, a, 2, o, o, subok=subok)\n assert_raises(TypeError, np.add, a, 2, o, out=o, subok=subok)\n assert_raises(TypeError, np.add, a, 2, None, out=o, subok=subok)\n assert_raises(ValueError, np.add, a, 2, out=(o, o), subok=subok)\n assert_raises(ValueError, np.add, a, 2, out=(), subok=subok)\n assert_raises(TypeError, np.add, a, 2, [], subok=subok)\n assert_raises(TypeError, np.add, a, 2, out=[], subok=subok)\n assert_raises(TypeError, np.add, a, 2, out=([],), subok=subok)\n o.flags.writeable = False\n assert_raises(ValueError, np.add, a, 2, o, subok=subok)\n assert_raises(ValueError, np.add, a, 2, out=o, subok=subok)\n assert_raises(ValueError, np.add, a, 2, out=(o,), subok=subok)\n\n def test_out_wrap_subok(self):\n class ArrayWrap(np.ndarray):\n __array_priority__ = 10\n\n def __new__(cls, arr):\n return np.asarray(arr).view(cls).copy()\n\n def __array_wrap__(self, arr, context):\n return arr.view(type(self))\n\n for subok in (True, False):\n a = ArrayWrap([0.5])\n\n r = np.add(a, 2, subok=subok)\n if subok:\n assert_(isinstance(r, ArrayWrap))\n else:\n assert_(type(r) == np.ndarray)\n\n r = np.add(a, 2, None, subok=subok)\n if subok:\n assert_(isinstance(r, ArrayWrap))\n else:\n assert_(type(r) == np.ndarray)\n\n r = np.add(a, 2, out=None, subok=subok)\n if subok:\n assert_(isinstance(r, ArrayWrap))\n else:\n assert_(type(r) == np.ndarray)\n\n r = np.add(a, 2, out=(None,), subok=subok)\n if subok:\n assert_(isinstance(r, ArrayWrap))\n else:\n assert_(type(r) == np.ndarray)\n\n d = ArrayWrap([5.7])\n o1 = np.empty((1,))\n o2 = np.empty((1,), dtype=np.int32)\n\n r1, r2 = np.frexp(d, o1, subok=subok)\n if subok:\n assert_(isinstance(r2, ArrayWrap))\n else:\n assert_(type(r2) == np.ndarray)\n\n r1, r2 = np.frexp(d, o1, None, subok=subok)\n if subok:\n assert_(isinstance(r2, ArrayWrap))\n else:\n assert_(type(r2) == np.ndarray)\n\n r1, r2 = np.frexp(d, None, o2, subok=subok)\n if subok:\n assert_(isinstance(r1, ArrayWrap))\n else:\n assert_(type(r1) == np.ndarray)\n\n r1, r2 = np.frexp(d, out=(o1, None), subok=subok)\n if subok:\n assert_(isinstance(r2, ArrayWrap))\n else:\n assert_(type(r2) == np.ndarray)\n\n r1, r2 = np.frexp(d, out=(None, o2), subok=subok)\n if subok:\n assert_(isinstance(r1, ArrayWrap))\n else:\n assert_(type(r1) == np.ndarray)\n\n with assert_raises(TypeError):\n # Out argument must be tuple, since there are multiple outputs.\n r1, r2 = np.frexp(d, out=o1, subok=subok)\n\n\nclass TestComparisons:\n def test_ignore_object_identity_in_equal(self):\n # Check comparing identical objects whose comparison\n # is not a simple boolean, e.g., arrays that are compared elementwise.\n a = np.array([np.array([1, 2, 3]), None], dtype=object)\n assert_raises(ValueError, np.equal, a, a)\n\n # Check error raised when comparing identical non-comparable objects.\n class FunkyType:\n def __eq__(self, other):\n raise TypeError(\"I won't compare\")\n\n a = np.array([FunkyType()])\n assert_raises(TypeError, np.equal, a, a)\n\n # Check identity doesn't override comparison mismatch.\n a = np.array([np.nan], dtype=object)\n assert_equal(np.equal(a, a), [False])\n\n def test_ignore_object_identity_in_not_equal(self):\n # Check comparing identical objects whose comparison\n # is not a simple boolean, e.g., arrays that are compared elementwise.\n a = np.array([np.array([1, 2, 3]), None], dtype=object)\n assert_raises(ValueError, np.not_equal, a, a)\n\n # Check error raised when comparing identical non-comparable objects.\n class FunkyType:\n def __ne__(self, other):\n raise TypeError(\"I won't compare\")\n\n a = np.array([FunkyType()])\n assert_raises(TypeError, np.not_equal, a, a)\n\n # Check identity doesn't override comparison mismatch.\n a = np.array([np.nan], dtype=object)\n assert_equal(np.not_equal(a, a), [True])\n\n\nclass TestAdd:\n def test_reduce_alignment(self):\n # gh-9876\n # make sure arrays with weird strides work with the optimizations in\n # pairwise_sum_@TYPE@. On x86, the 'b' field will count as aligned at a\n # 4 byte offset, even though its itemsize is 8.\n a = np.zeros(2, dtype=[('a', np.int32), ('b', np.float64)])\n a['a'] = -1\n assert_equal(a['b'].sum(), 0)\n\n\nclass TestDivision:\n def test_division_int(self):\n # int division should follow Python\n x = np.array([5, 10, 90, 100, -5, -10, -90, -100, -120])\n if 5 / 10 == 0.5:\n assert_equal(x / 100, [0.05, 0.1, 0.9, 1,\n -0.05, -0.1, -0.9, -1, -1.2])\n else:\n assert_equal(x / 100, [0, 0, 0, 1, -1, -1, -1, -1, -2])\n assert_equal(x // 100, [0, 0, 0, 1, -1, -1, -1, -1, -2])\n assert_equal(x % 100, [5, 10, 90, 0, 95, 90, 10, 0, 80])\n\n @pytest.mark.parametrize(\"input_dtype\",\n [np.int8, np.int16, np.int32, np.int64])\n def test_division_int_boundary(self, input_dtype):\n iinfo = np.iinfo(input_dtype)\n\n # Create list with min, 25th percentile, 0, 75th percentile, max\n lst = [iinfo.min, iinfo.min//2, 0, iinfo.max//2, iinfo.max]\n divisors = [iinfo.min, iinfo.min//2, iinfo.max//2, iinfo.max]\n a = np.array(lst, dtype=input_dtype)\n\n for divisor in divisors:\n div_a = a // divisor\n b = a.copy(); b //= divisor\n div_lst = [i // divisor for i in lst]\n\n msg = \"Integer arrays floor division check (//)\"\n assert all(div_a == div_lst), msg\n\n msg = \"Integer arrays floor division check (//=)\"\n assert all(div_a == b), msg\n\n with np.errstate(divide='raise'):\n with pytest.raises(FloatingPointError):\n a // 0\n with pytest.raises(FloatingPointError):\n a //= 0\n\n np.array([], dtype=input_dtype) // 0\n\n @pytest.mark.parametrize(\n \"dividend,divisor,quotient\",\n [(np.timedelta64(2,'Y'), np.timedelta64(2,'M'), 12),\n (np.timedelta64(2,'Y'), np.timedelta64(-2,'M'), -12),\n (np.timedelta64(-2,'Y'), np.timedelta64(2,'M'), -12),\n (np.timedelta64(-2,'Y'), np.timedelta64(-2,'M'), 12),\n (np.timedelta64(2,'M'), np.timedelta64(-2,'Y'), -1),\n (np.timedelta64(2,'Y'), np.timedelta64(0,'M'), 0),\n (np.timedelta64(2,'Y'), 2, np.timedelta64(1,'Y')),\n (np.timedelta64(2,'Y'), -2, np.timedelta64(-1,'Y')),\n (np.timedelta64(-2,'Y'), 2, np.timedelta64(-1,'Y')),\n (np.timedelta64(-2,'Y'), -2, np.timedelta64(1,'Y')),\n (np.timedelta64(-2,'Y'), -2, np.timedelta64(1,'Y')),\n (np.timedelta64(-2,'Y'), -3, np.timedelta64(0,'Y')),\n (np.timedelta64(-2,'Y'), 0, np.timedelta64('Nat','Y')),\n ])\n def test_division_int_timedelta(self, dividend, divisor, quotient):\n # If either divisor is 0 or quotient is Nat, check for division by 0\n if divisor and (isinstance(quotient, int) or not np.isnat(quotient)):\n msg = \"Timedelta floor division check\"\n assert dividend // divisor == quotient, msg\n\n # Test for arrays as well\n msg = \"Timedelta arrays floor division check\"\n dividend_array = np.array([dividend]*5)\n quotient_array = np.array([quotient]*5)\n assert all(dividend_array // divisor == quotient_array), msg\n else:\n with np.errstate(divide='raise', invalid='raise'):\n with pytest.raises(FloatingPointError):\n dividend // divisor\n\n def test_division_complex(self):\n # check that implementation is correct\n msg = \"Complex division implementation check\"\n x = np.array([1. + 1.*1j, 1. + .5*1j, 1. + 2.*1j], dtype=np.complex128)\n assert_almost_equal(x**2/x, x, err_msg=msg)\n # check overflow, underflow\n msg = \"Complex division overflow/underflow check\"\n x = np.array([1.e+110, 1.e-110], dtype=np.complex128)\n y = x**2/x\n assert_almost_equal(y/x, [1, 1], err_msg=msg)\n\n def test_zero_division_complex(self):\n with np.errstate(invalid=\"ignore\", divide=\"ignore\"):\n x = np.array([0.0], dtype=np.complex128)\n y = 1.0/x\n assert_(np.isinf(y)[0])\n y = complex(np.inf, np.nan)/x\n assert_(np.isinf(y)[0])\n y = complex(np.nan, np.inf)/x\n assert_(np.isinf(y)[0])\n y = complex(np.inf, np.inf)/x\n assert_(np.isinf(y)[0])\n y = 0.0/x\n assert_(np.isnan(y)[0])\n\n def test_floor_division_complex(self):\n # check that implementation is correct\n msg = \"Complex floor division implementation check\"\n x = np.array([.9 + 1j, -.1 + 1j, .9 + .5*1j, .9 + 2.*1j], dtype=np.complex128)\n y = np.array([0., -1., 0., 0.], dtype=np.complex128)\n assert_equal(np.floor_divide(x**2, x), y, err_msg=msg)\n # check overflow, underflow\n msg = \"Complex floor division overflow/underflow check\"\n x = np.array([1.e+110, 1.e-110], dtype=np.complex128)\n y = np.floor_divide(x**2, x)\n assert_equal(y, [1.e+110, 0], err_msg=msg)\n\n def test_floor_division_signed_zero(self):\n # Check that the sign bit is correctly set when dividing positive and\n # negative zero by one.\n x = np.zeros(10)\n assert_equal(np.signbit(x//1), 0)\n assert_equal(np.signbit((-x)//1), 1)\n\n @pytest.mark.parametrize('dtype', np.typecodes['Float'])\n def test_floor_division_errors(self, dtype):\n fnan = np.array(np.nan, dtype=dtype)\n fone = np.array(1.0, dtype=dtype)\n fzer = np.array(0.0, dtype=dtype)\n finf = np.array(np.inf, dtype=dtype)\n # divide by zero error check\n with np.errstate(divide='raise', invalid='ignore'):\n assert_raises(FloatingPointError, np.floor_divide, fone, fzer)\n with np.errstate(invalid='raise'):\n assert_raises(FloatingPointError, np.floor_divide, fnan, fone)\n assert_raises(FloatingPointError, np.floor_divide, fone, fnan)\n assert_raises(FloatingPointError, np.floor_divide, fnan, fzer)\n\n @pytest.mark.parametrize('dtype', np.typecodes['Float'])\n def test_floor_division_corner_cases(self, dtype):\n # test corner cases like 1.0//0.0 for errors and return vals\n x = np.zeros(10, dtype=dtype)\n y = np.ones(10, dtype=dtype)\n fnan = np.array(np.nan, dtype=dtype)\n fone = np.array(1.0, dtype=dtype)\n fzer = np.array(0.0, dtype=dtype)\n finf = np.array(np.inf, dtype=dtype)\n with suppress_warnings() as sup:\n sup.filter(RuntimeWarning, \"invalid value encountered in floor_divide\")\n div = np.floor_divide(fnan, fone)\n assert(np.isnan(div)), \"dt: %s, div: %s\" % (dt, div)\n div = np.floor_divide(fone, fnan)\n assert(np.isnan(div)), \"dt: %s, div: %s\" % (dt, div)\n div = np.floor_divide(fnan, fzer)\n assert(np.isnan(div)), \"dt: %s, div: %s\" % (dt, div)\n # verify 1.0//0.0 computations return inf\n with np.errstate(divide='ignore'):\n z = np.floor_divide(y, x)\n assert_(np.isinf(z).all())\n\ndef floor_divide_and_remainder(x, y):\n return (np.floor_divide(x, y), np.remainder(x, y))\n\n\ndef _signs(dt):\n if dt in np.typecodes['UnsignedInteger']:\n return (+1,)\n else:\n return (+1, -1)\n\n\nclass TestRemainder:\n\n def test_remainder_basic(self):\n dt = np.typecodes['AllInteger'] + np.typecodes['Float']\n for op in [floor_divide_and_remainder, np.divmod]:\n for dt1, dt2 in itertools.product(dt, dt):\n for sg1, sg2 in itertools.product(_signs(dt1), _signs(dt2)):\n fmt = 'op: %s, dt1: %s, dt2: %s, sg1: %s, sg2: %s'\n msg = fmt % (op.__name__, dt1, dt2, sg1, sg2)\n a = np.array(sg1*71, dtype=dt1)\n b = np.array(sg2*19, dtype=dt2)\n div, rem = op(a, b)\n assert_equal(div*b + rem, a, err_msg=msg)\n if sg2 == -1:\n assert_(b < rem <= 0, msg)\n else:\n assert_(b > rem >= 0, msg)\n\n def test_float_remainder_exact(self):\n # test that float results are exact for small integers. This also\n # holds for the same integers scaled by powers of two.\n nlst = list(range(-127, 0))\n plst = list(range(1, 128))\n dividend = nlst + [0] + plst\n divisor = nlst + plst\n arg = list(itertools.product(dividend, divisor))\n tgt = list(divmod(*t) for t in arg)\n\n a, b = np.array(arg, dtype=int).T\n # convert exact integer results from Python to float so that\n # signed zero can be used, it is checked.\n tgtdiv, tgtrem = np.array(tgt, dtype=float).T\n tgtdiv = np.where((tgtdiv == 0.0) & ((b < 0) ^ (a < 0)), -0.0, tgtdiv)\n tgtrem = np.where((tgtrem == 0.0) & (b < 0), -0.0, tgtrem)\n\n for op in [floor_divide_and_remainder, np.divmod]:\n for dt in np.typecodes['Float']:\n msg = 'op: %s, dtype: %s' % (op.__name__, dt)\n fa = a.astype(dt)\n fb = b.astype(dt)\n div, rem = op(fa, fb)\n assert_equal(div, tgtdiv, err_msg=msg)\n assert_equal(rem, tgtrem, err_msg=msg)\n\n def test_float_remainder_roundoff(self):\n # gh-6127\n dt = np.typecodes['Float']\n for op in [floor_divide_and_remainder, np.divmod]:\n for dt1, dt2 in itertools.product(dt, dt):\n for sg1, sg2 in itertools.product((+1, -1), (+1, -1)):\n fmt = 'op: %s, dt1: %s, dt2: %s, sg1: %s, sg2: %s'\n msg = fmt % (op.__name__, dt1, dt2, sg1, sg2)\n a = np.array(sg1*78*6e-8, dtype=dt1)\n b = np.array(sg2*6e-8, dtype=dt2)\n div, rem = op(a, b)\n # Equal assertion should hold when fmod is used\n assert_equal(div*b + rem, a, err_msg=msg)\n if sg2 == -1:\n assert_(b < rem <= 0, msg)\n else:\n assert_(b > rem >= 0, msg)\n\n @pytest.mark.parametrize('dtype', np.typecodes['Float'])\n def test_float_divmod_errors(self, dtype):\n # Check valid errors raised for divmod and remainder\n fzero = np.array(0.0, dtype=dtype)\n fone = np.array(1.0, dtype=dtype)\n finf = np.array(np.inf, dtype=dtype)\n fnan = np.array(np.nan, dtype=dtype)\n # since divmod is combination of both remainder and divide\n # ops it will set both dividebyzero and invalid flags\n with np.errstate(divide='raise', invalid='ignore'):\n assert_raises(FloatingPointError, np.divmod, fone, fzero)\n with np.errstate(divide='ignore', invalid='raise'):\n assert_raises(FloatingPointError, np.divmod, fone, fzero)\n with np.errstate(invalid='raise'):\n assert_raises(FloatingPointError, np.divmod, fzero, fzero)\n with np.errstate(invalid='raise'):\n assert_raises(FloatingPointError, np.divmod, finf, finf)\n with np.errstate(divide='ignore', invalid='raise'):\n assert_raises(FloatingPointError, np.divmod, finf, fzero)\n with np.errstate(divide='raise', invalid='ignore'):\n assert_raises(FloatingPointError, np.divmod, finf, fzero)\n\n @pytest.mark.parametrize('dtype', np.typecodes['Float'])\n @pytest.mark.parametrize('fn', [np.fmod, np.remainder])\n def test_float_remainder_errors(self, dtype, fn):\n fzero = np.array(0.0, dtype=dtype)\n fone = np.array(1.0, dtype=dtype)\n finf = np.array(np.inf, dtype=dtype)\n fnan = np.array(np.nan, dtype=dtype)\n with np.errstate(invalid='raise'):\n assert_raises(FloatingPointError, fn, fone, fzero)\n assert_raises(FloatingPointError, fn, fnan, fzero)\n assert_raises(FloatingPointError, fn, fone, fnan)\n assert_raises(FloatingPointError, fn, fnan, fone)\n\n def test_float_remainder_overflow(self):\n a = np.finfo(np.float64).tiny\n with np.errstate(over='ignore', invalid='ignore'):\n div, mod = np.divmod(4, a)\n np.isinf(div)\n assert_(mod == 0)\n with np.errstate(over='raise', invalid='ignore'):\n assert_raises(FloatingPointError, np.divmod, 4, a)\n with np.errstate(invalid='raise', over='ignore'):\n assert_raises(FloatingPointError, np.divmod, 4, a)\n\n def test_float_divmod_corner_cases(self):\n # check nan cases\n for dt in np.typecodes['Float']:\n fnan = np.array(np.nan, dtype=dt)\n fone = np.array(1.0, dtype=dt)\n fzer = np.array(0.0, dtype=dt)\n finf = np.array(np.inf, dtype=dt)\n with suppress_warnings() as sup:\n sup.filter(RuntimeWarning, \"invalid value encountered in divmod\")\n sup.filter(RuntimeWarning, \"divide by zero encountered in divmod\")\n div, rem = np.divmod(fone, fzer)\n assert(np.isinf(div)), 'dt: %s, div: %s' % (dt, rem)\n assert(np.isnan(rem)), 'dt: %s, rem: %s' % (dt, rem)\n div, rem = np.divmod(fzer, fzer)\n assert(np.isnan(rem)), 'dt: %s, rem: %s' % (dt, rem)\n assert_(np.isnan(div)), 'dt: %s, rem: %s' % (dt, rem)\n div, rem = np.divmod(finf, finf)\n assert(np.isnan(div)), 'dt: %s, rem: %s' % (dt, rem)\n assert(np.isnan(rem)), 'dt: %s, rem: %s' % (dt, rem)\n div, rem = np.divmod(finf, fzer)\n assert(np.isinf(div)), 'dt: %s, rem: %s' % (dt, rem)\n assert(np.isnan(rem)), 'dt: %s, rem: %s' % (dt, rem)\n div, rem = np.divmod(fnan, fone)\n assert(np.isnan(rem)), \"dt: %s, rem: %s\" % (dt, rem)\n assert(np.isnan(div)), \"dt: %s, rem: %s\" % (dt, rem)\n div, rem = np.divmod(fone, fnan)\n assert(np.isnan(rem)), \"dt: %s, rem: %s\" % (dt, rem)\n assert(np.isnan(div)), \"dt: %s, rem: %s\" % (dt, rem)\n div, rem = np.divmod(fnan, fzer)\n assert(np.isnan(rem)), \"dt: %s, rem: %s\" % (dt, rem)\n assert(np.isnan(div)), \"dt: %s, rem: %s\" % (dt, rem)\n\n def test_float_remainder_corner_cases(self):\n # Check remainder magnitude.\n for dt in np.typecodes['Float']:\n fone = np.array(1.0, dtype=dt)\n fzer = np.array(0.0, dtype=dt)\n fnan = np.array(np.nan, dtype=dt)\n b = np.array(1.0, dtype=dt)\n a = np.nextafter(np.array(0.0, dtype=dt), -b)\n rem = np.remainder(a, b)\n assert_(rem <= b, 'dt: %s' % dt)\n rem = np.remainder(-a, -b)\n assert_(rem >= -b, 'dt: %s' % dt)\n\n # Check nans, inf\n with suppress_warnings() as sup:\n sup.filter(RuntimeWarning, \"invalid value encountered in remainder\")\n sup.filter(RuntimeWarning, \"invalid value encountered in fmod\")\n for dt in np.typecodes['Float']:\n fone = np.array(1.0, dtype=dt)\n fzer = np.array(0.0, dtype=dt)\n finf = np.array(np.inf, dtype=dt)\n fnan = np.array(np.nan, dtype=dt)\n rem = np.remainder(fone, fzer)\n assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem))\n # MSVC 2008 returns NaN here, so disable the check.\n #rem = np.remainder(fone, finf)\n #assert_(rem == fone, 'dt: %s, rem: %s' % (dt, rem))\n rem = np.remainder(finf, fone)\n fmod = np.fmod(finf, fone)\n assert_(np.isnan(fmod), 'dt: %s, fmod: %s' % (dt, fmod))\n assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem))\n rem = np.remainder(finf, finf)\n fmod = np.fmod(finf, fone)\n assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem))\n assert_(np.isnan(fmod), 'dt: %s, fmod: %s' % (dt, fmod))\n rem = np.remainder(finf, fzer)\n fmod = np.fmod(finf, fzer)\n assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem))\n assert_(np.isnan(fmod), 'dt: %s, fmod: %s' % (dt, fmod))\n rem = np.remainder(fone, fnan)\n fmod = np.fmod(fone, fnan)\n assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem))\n assert_(np.isnan(fmod), 'dt: %s, fmod: %s' % (dt, fmod))\n rem = np.remainder(fnan, fzer)\n fmod = np.fmod(fnan, fzer)\n assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem))\n assert_(np.isnan(fmod), 'dt: %s, fmod: %s' % (dt, rem))\n rem = np.remainder(fnan, fone)\n fmod = np.fmod(fnan, fone)\n assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem))\n assert_(np.isnan(fmod), 'dt: %s, fmod: %s' % (dt, rem))\n\n\nclass TestCbrt:\n def test_cbrt_scalar(self):\n assert_almost_equal((np.cbrt(np.float32(-2.5)**3)), -2.5)\n\n def test_cbrt(self):\n x = np.array([1., 2., -3., np.inf, -np.inf])\n assert_almost_equal(np.cbrt(x**3), x)\n\n assert_(np.isnan(np.cbrt(np.nan)))\n assert_equal(np.cbrt(np.inf), np.inf)\n assert_equal(np.cbrt(-np.inf), -np.inf)\n\n\nclass TestPower:\n def test_power_float(self):\n x = np.array([1., 2., 3.])\n assert_equal(x**0, [1., 1., 1.])\n assert_equal(x**1, x)\n assert_equal(x**2, [1., 4., 9.])\n y = x.copy()\n y **= 2\n assert_equal(y, [1., 4., 9.])\n assert_almost_equal(x**(-1), [1., 0.5, 1./3])\n assert_almost_equal(x**(0.5), [1., ncu.sqrt(2), ncu.sqrt(3)])\n\n for out, inp, msg in _gen_alignment_data(dtype=np.float32,\n type='unary',\n max_size=11):\n exp = [ncu.sqrt(i) for i in inp]\n assert_almost_equal(inp**(0.5), exp, err_msg=msg)\n np.sqrt(inp, out=out)\n assert_equal(out, exp, err_msg=msg)\n\n for out, inp, msg in _gen_alignment_data(dtype=np.float64,\n type='unary',\n max_size=7):\n exp = [ncu.sqrt(i) for i in inp]\n assert_almost_equal(inp**(0.5), exp, err_msg=msg)\n np.sqrt(inp, out=out)\n assert_equal(out, exp, err_msg=msg)\n\n def test_power_complex(self):\n x = np.array([1+2j, 2+3j, 3+4j])\n assert_equal(x**0, [1., 1., 1.])\n assert_equal(x**1, x)\n assert_almost_equal(x**2, [-3+4j, -5+12j, -7+24j])\n assert_almost_equal(x**3, [(1+2j)**3, (2+3j)**3, (3+4j)**3])\n assert_almost_equal(x**4, [(1+2j)**4, (2+3j)**4, (3+4j)**4])\n assert_almost_equal(x**(-1), [1/(1+2j), 1/(2+3j), 1/(3+4j)])\n assert_almost_equal(x**(-2), [1/(1+2j)**2, 1/(2+3j)**2, 1/(3+4j)**2])\n assert_almost_equal(x**(-3), [(-11+2j)/125, (-46-9j)/2197,\n (-117-44j)/15625])\n assert_almost_equal(x**(0.5), [ncu.sqrt(1+2j), ncu.sqrt(2+3j),\n ncu.sqrt(3+4j)])\n norm = 1./((x**14)[0])\n assert_almost_equal(x**14 * norm,\n [i * norm for i in [-76443+16124j, 23161315+58317492j,\n 5583548873 + 2465133864j]])\n\n # Ticket #836\n def assert_complex_equal(x, y):\n assert_array_equal(x.real, y.real)\n assert_array_equal(x.imag, y.imag)\n\n for z in [complex(0, np.inf), complex(1, np.inf)]:\n z = np.array([z], dtype=np.complex_)\n with np.errstate(invalid=\"ignore\"):\n assert_complex_equal(z**1, z)\n assert_complex_equal(z**2, z*z)\n assert_complex_equal(z**3, z*z*z)\n\n def test_power_zero(self):\n # ticket #1271\n zero = np.array([0j])\n one = np.array([1+0j])\n cnan = np.array([complex(np.nan, np.nan)])\n # FIXME cinf not tested.\n #cinf = np.array([complex(np.inf, 0)])\n\n def assert_complex_equal(x, y):\n x, y = np.asarray(x), np.asarray(y)\n assert_array_equal(x.real, y.real)\n assert_array_equal(x.imag, y.imag)\n\n # positive powers\n for p in [0.33, 0.5, 1, 1.5, 2, 3, 4, 5, 6.6]:\n assert_complex_equal(np.power(zero, p), zero)\n\n # zero power\n assert_complex_equal(np.power(zero, 0), one)\n with np.errstate(invalid=\"ignore\"):\n assert_complex_equal(np.power(zero, 0+1j), cnan)\n\n # negative power\n for p in [0.33, 0.5, 1, 1.5, 2, 3, 4, 5, 6.6]:\n assert_complex_equal(np.power(zero, -p), cnan)\n assert_complex_equal(np.power(zero, -1+0.2j), cnan)\n\n def test_fast_power(self):\n x = np.array([1, 2, 3], np.int16)\n res = x**2.0\n assert_((x**2.00001).dtype is res.dtype)\n assert_array_equal(res, [1, 4, 9])\n # check the inplace operation on the casted copy doesn't mess with x\n assert_(not np.may_share_memory(res, x))\n assert_array_equal(x, [1, 2, 3])\n\n # Check that the fast path ignores 1-element not 0-d arrays\n res = x ** np.array([[[2]]])\n assert_equal(res.shape, (1, 1, 3))\n\n def test_integer_power(self):\n a = np.array([15, 15], 'i8')\n b = np.power(a, a)\n assert_equal(b, [437893890380859375, 437893890380859375])\n\n def test_integer_power_with_integer_zero_exponent(self):\n dtypes = np.typecodes['Integer']\n for dt in dtypes:\n arr = np.arange(-10, 10, dtype=dt)\n assert_equal(np.power(arr, 0), np.ones_like(arr))\n\n dtypes = np.typecodes['UnsignedInteger']\n for dt in dtypes:\n arr = np.arange(10, dtype=dt)\n assert_equal(np.power(arr, 0), np.ones_like(arr))\n\n def test_integer_power_of_1(self):\n dtypes = np.typecodes['AllInteger']\n for dt in dtypes:\n arr = np.arange(10, dtype=dt)\n assert_equal(np.power(1, arr), np.ones_like(arr))\n\n def test_integer_power_of_zero(self):\n dtypes = np.typecodes['AllInteger']\n for dt in dtypes:\n arr = np.arange(1, 10, dtype=dt)\n assert_equal(np.power(0, arr), np.zeros_like(arr))\n\n def test_integer_to_negative_power(self):\n dtypes = np.typecodes['Integer']\n for dt in dtypes:\n a = np.array([0, 1, 2, 3], dtype=dt)\n b = np.array([0, 1, 2, -3], dtype=dt)\n one = np.array(1, dtype=dt)\n minusone = np.array(-1, dtype=dt)\n assert_raises(ValueError, np.power, a, b)\n assert_raises(ValueError, np.power, a, minusone)\n assert_raises(ValueError, np.power, one, b)\n assert_raises(ValueError, np.power, one, minusone)\n\n\nclass TestFloat_power:\n def test_type_conversion(self):\n arg_type = '?bhilBHILefdgFDG'\n res_type = 'ddddddddddddgDDG'\n for dtin, dtout in zip(arg_type, res_type):\n msg = \"dtin: %s, dtout: %s\" % (dtin, dtout)\n arg = np.ones(1, dtype=dtin)\n res = np.float_power(arg, arg)\n assert_(res.dtype.name == np.dtype(dtout).name, msg)\n\n\nclass TestLog2:\n def test_log2_values(self):\n x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]\n y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n for dt in ['f', 'd', 'g']:\n xf = np.array(x, dtype=dt)\n yf = np.array(y, dtype=dt)\n assert_almost_equal(np.log2(xf), yf)\n\n def test_log2_ints(self):\n # a good log2 implementation should provide this,\n # might fail on OS with bad libm\n for i in range(1, 65):\n v = np.log2(2.**i)\n assert_equal(v, float(i), err_msg='at exponent %d' % i)\n\n def test_log2_special(self):\n assert_equal(np.log2(1.), 0.)\n assert_equal(np.log2(np.inf), np.inf)\n assert_(np.isnan(np.log2(np.nan)))\n\n with warnings.catch_warnings(record=True) as w:\n warnings.filterwarnings('always', '', RuntimeWarning)\n assert_(np.isnan(np.log2(-1.)))\n assert_(np.isnan(np.log2(-np.inf)))\n assert_equal(np.log2(0.), -np.inf)\n assert_(w[0].category is RuntimeWarning)\n assert_(w[1].category is RuntimeWarning)\n assert_(w[2].category is RuntimeWarning)\n\n\nclass TestExp2:\n def test_exp2_values(self):\n x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]\n y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n for dt in ['f', 'd', 'g']:\n xf = np.array(x, dtype=dt)\n yf = np.array(y, dtype=dt)\n assert_almost_equal(np.exp2(yf), xf)\n\n\nclass TestLogAddExp2(_FilterInvalids):\n # Need test for intermediate precisions\n def test_logaddexp2_values(self):\n x = [1, 2, 3, 4, 5]\n y = [5, 4, 3, 2, 1]\n z = [6, 6, 6, 6, 6]\n for dt, dec_ in zip(['f', 'd', 'g'], [6, 15, 15]):\n xf = np.log2(np.array(x, dtype=dt))\n yf = np.log2(np.array(y, dtype=dt))\n zf = np.log2(np.array(z, dtype=dt))\n assert_almost_equal(np.logaddexp2(xf, yf), zf, decimal=dec_)\n\n def test_logaddexp2_range(self):\n x = [1000000, -1000000, 1000200, -1000200]\n y = [1000200, -1000200, 1000000, -1000000]\n z = [1000200, -1000000, 1000200, -1000000]\n for dt in ['f', 'd', 'g']:\n logxf = np.array(x, dtype=dt)\n logyf = np.array(y, dtype=dt)\n logzf = np.array(z, dtype=dt)\n assert_almost_equal(np.logaddexp2(logxf, logyf), logzf)\n\n def test_inf(self):\n inf = np.inf\n x = [inf, -inf, inf, -inf, inf, 1, -inf, 1]\n y = [inf, inf, -inf, -inf, 1, inf, 1, -inf]\n z = [inf, inf, inf, -inf, inf, inf, 1, 1]\n with np.errstate(invalid='raise'):\n for dt in ['f', 'd', 'g']:\n logxf = np.array(x, dtype=dt)\n logyf = np.array(y, dtype=dt)\n logzf = np.array(z, dtype=dt)\n assert_equal(np.logaddexp2(logxf, logyf), logzf)\n\n def test_nan(self):\n assert_(np.isnan(np.logaddexp2(np.nan, np.inf)))\n assert_(np.isnan(np.logaddexp2(np.inf, np.nan)))\n assert_(np.isnan(np.logaddexp2(np.nan, 0)))\n assert_(np.isnan(np.logaddexp2(0, np.nan)))\n assert_(np.isnan(np.logaddexp2(np.nan, np.nan)))\n\n def test_reduce(self):\n assert_equal(np.logaddexp2.identity, -np.inf)\n assert_equal(np.logaddexp2.reduce([]), -np.inf)\n assert_equal(np.logaddexp2.reduce([-np.inf]), -np.inf)\n assert_equal(np.logaddexp2.reduce([-np.inf, 0]), 0)\n\n\nclass TestLog:\n def test_log_values(self):\n x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]\n y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n for dt in ['f', 'd', 'g']:\n log2_ = 0.69314718055994530943\n xf = np.array(x, dtype=dt)\n yf = np.array(y, dtype=dt)*log2_\n assert_almost_equal(np.log(xf), yf)\n\n # test aliasing(issue #17761)\n x = np.array([2, 0.937500, 3, 0.947500, 1.054697])\n xf = np.log(x)\n assert_almost_equal(np.log(x, out=x), xf)\n\n def test_log_strides(self):\n np.random.seed(42)\n strides = np.array([-4,-3,-2,-1,1,2,3,4])\n sizes = np.arange(2,100)\n for ii in sizes:\n x_f64 = np.float64(np.random.uniform(low=0.01, high=100.0,size=ii))\n x_special = x_f64.copy()\n x_special[3:-1:4] = 1.0\n y_true = np.log(x_f64)\n y_special = np.log(x_special)\n for jj in strides:\n assert_array_almost_equal_nulp(np.log(x_f64[::jj]), y_true[::jj], nulp=2)\n assert_array_almost_equal_nulp(np.log(x_special[::jj]), y_special[::jj], nulp=2)\n\nclass TestExp:\n def test_exp_values(self):\n x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]\n y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n for dt in ['f', 'd', 'g']:\n log2_ = 0.69314718055994530943\n xf = np.array(x, dtype=dt)\n yf = np.array(y, dtype=dt)*log2_\n assert_almost_equal(np.exp(yf), xf)\n\n def test_exp_strides(self):\n np.random.seed(42)\n strides = np.array([-4,-3,-2,-1,1,2,3,4])\n sizes = np.arange(2,100)\n for ii in sizes:\n x_f64 = np.float64(np.random.uniform(low=0.01, high=709.1,size=ii))\n y_true = np.exp(x_f64)\n for jj in strides:\n assert_array_almost_equal_nulp(np.exp(x_f64[::jj]), y_true[::jj], nulp=2)\n\nclass TestSpecialFloats:\n def test_exp_values(self):\n x = [np.nan, np.nan, np.inf, 0.]\n y = [np.nan, -np.nan, np.inf, -np.inf]\n for dt in ['f', 'd', 'g']:\n xf = np.array(x, dtype=dt)\n yf = np.array(y, dtype=dt)\n assert_equal(np.exp(yf), xf)\n\n with np.errstate(over='raise'):\n assert_raises(FloatingPointError, np.exp, np.float32(100.))\n assert_raises(FloatingPointError, np.exp, np.float32(1E19))\n assert_raises(FloatingPointError, np.exp, np.float64(800.))\n assert_raises(FloatingPointError, np.exp, np.float64(1E19))\n\n def test_log_values(self):\n with np.errstate(all='ignore'):\n x = [np.nan, np.nan, np.inf, np.nan, -np.inf, np.nan]\n y = [np.nan, -np.nan, np.inf, -np.inf, 0., -1.0]\n for dt in ['f', 'd', 'g']:\n xf = np.array(x, dtype=dt)\n yf = np.array(y, dtype=dt)\n assert_equal(np.log(yf), xf)\n\n with np.errstate(divide='raise'):\n assert_raises(FloatingPointError, np.log, np.float32(0.))\n\n with np.errstate(invalid='raise'):\n assert_raises(FloatingPointError, np.log, np.float32(-np.inf))\n assert_raises(FloatingPointError, np.log, np.float32(-1.0))\n\n # See https://github.com/numpy/numpy/issues/18005 \n with assert_no_warnings():\n a = np.array(1e9, dtype='float32')\n np.log(a)\n\n def test_sincos_values(self):\n with np.errstate(all='ignore'):\n x = [np.nan, np.nan, np.nan, np.nan]\n y = [np.nan, -np.nan, np.inf, -np.inf]\n for dt in ['f', 'd', 'g']:\n xf = np.array(x, dtype=dt)\n yf = np.array(y, dtype=dt)\n assert_equal(np.sin(yf), xf)\n assert_equal(np.cos(yf), xf)\n\n with np.errstate(invalid='raise'):\n assert_raises(FloatingPointError, np.sin, np.float32(-np.inf))\n assert_raises(FloatingPointError, np.sin, np.float32(np.inf))\n assert_raises(FloatingPointError, np.cos, np.float32(-np.inf))\n assert_raises(FloatingPointError, np.cos, np.float32(np.inf))\n\n def test_sqrt_values(self):\n with np.errstate(all='ignore'):\n x = [np.nan, np.nan, np.inf, np.nan, 0.]\n y = [np.nan, -np.nan, np.inf, -np.inf, 0.]\n for dt in ['f', 'd', 'g']:\n xf = np.array(x, dtype=dt)\n yf = np.array(y, dtype=dt)\n assert_equal(np.sqrt(yf), xf)\n\n #with np.errstate(invalid='raise'):\n # for dt in ['f', 'd', 'g']:\n # assert_raises(FloatingPointError, np.sqrt, np.array(-100., dtype=dt))\n\n def test_abs_values(self):\n x = [np.nan, np.nan, np.inf, np.inf, 0., 0., 1.0, 1.0]\n y = [np.nan, -np.nan, np.inf, -np.inf, 0., -0., -1.0, 1.0]\n for dt in ['f', 'd', 'g']:\n xf = np.array(x, dtype=dt)\n yf = np.array(y, dtype=dt)\n assert_equal(np.abs(yf), xf)\n\n def test_square_values(self):\n x = [np.nan, np.nan, np.inf, np.inf]\n y = [np.nan, -np.nan, np.inf, -np.inf]\n with np.errstate(all='ignore'):\n for dt in ['f', 'd', 'g']:\n xf = np.array(x, dtype=dt)\n yf = np.array(y, dtype=dt)\n assert_equal(np.square(yf), xf)\n\n with np.errstate(over='raise'):\n assert_raises(FloatingPointError, np.square, np.array(1E32, dtype='f'))\n assert_raises(FloatingPointError, np.square, np.array(1E200, dtype='d'))\n\n def test_reciprocal_values(self):\n with np.errstate(all='ignore'):\n x = [np.nan, np.nan, 0.0, -0.0, np.inf, -np.inf]\n y = [np.nan, -np.nan, np.inf, -np.inf, 0., -0.]\n for dt in ['f', 'd', 'g']:\n xf = np.array(x, dtype=dt)\n yf = np.array(y, dtype=dt)\n assert_equal(np.reciprocal(yf), xf)\n\n with np.errstate(divide='raise'):\n for dt in ['f', 'd', 'g']:\n assert_raises(FloatingPointError, np.reciprocal, np.array(-0.0, dtype=dt))\n\nclass TestFPClass:\n @pytest.mark.parametrize(\"stride\", [-4,-2,-1,1,2,4])\n def test_fpclass(self, stride):\n arr_f64 = np.array([np.nan, -np.nan, np.inf, -np.inf, -1.0, 1.0, -0.0, 0.0, 2.2251e-308, -2.2251e-308], dtype='d')\n arr_f32 = np.array([np.nan, -np.nan, np.inf, -np.inf, -1.0, 1.0, -0.0, 0.0, 1.4013e-045, -1.4013e-045], dtype='f')\n nan = np.array([True, True, False, False, False, False, False, False, False, False])\n inf = np.array([False, False, True, True, False, False, False, False, False, False])\n sign = np.array([False, True, False, True, True, False, True, False, False, True])\n finite = np.array([False, False, False, False, True, True, True, True, True, True])\n assert_equal(np.isnan(arr_f32[::stride]), nan[::stride])\n assert_equal(np.isnan(arr_f64[::stride]), nan[::stride])\n assert_equal(np.isinf(arr_f32[::stride]), inf[::stride])\n assert_equal(np.isinf(arr_f64[::stride]), inf[::stride])\n assert_equal(np.signbit(arr_f32[::stride]), sign[::stride])\n assert_equal(np.signbit(arr_f64[::stride]), sign[::stride])\n assert_equal(np.isfinite(arr_f32[::stride]), finite[::stride])\n assert_equal(np.isfinite(arr_f64[::stride]), finite[::stride])\n\nclass TestLDExp:\n @pytest.mark.parametrize(\"stride\", [-4,-2,-1,1,2,4])\n @pytest.mark.parametrize(\"dtype\", ['f', 'd'])\n def test_ldexp(self, dtype, stride):\n mant = np.array([0.125, 0.25, 0.5, 1., 1., 2., 4., 8.], dtype=dtype)\n exp = np.array([3, 2, 1, 0, 0, -1, -2, -3], dtype='i')\n out = np.zeros(8, dtype=dtype)\n assert_equal(np.ldexp(mant[::stride], exp[::stride], out=out[::stride]), np.ones(8, dtype=dtype)[::stride])\n assert_equal(out[::stride], np.ones(8, dtype=dtype)[::stride])\n\nclass TestFRExp:\n @pytest.mark.parametrize(\"stride\", [-4,-2,-1,1,2,4])\n @pytest.mark.parametrize(\"dtype\", ['f', 'd'])\n @pytest.mark.skipif(not sys.platform.startswith('linux'),\n reason=\"np.frexp gives different answers for NAN/INF on windows and linux\")\n def test_frexp(self, dtype, stride):\n arr = np.array([np.nan, np.nan, np.inf, -np.inf, 0.0, -0.0, 1.0, -1.0], dtype=dtype)\n mant_true = np.array([np.nan, np.nan, np.inf, -np.inf, 0.0, -0.0, 0.5, -0.5], dtype=dtype)\n exp_true = np.array([0, 0, 0, 0, 0, 0, 1, 1], dtype='i')\n out_mant = np.ones(8, dtype=dtype)\n out_exp = 2*np.ones(8, dtype='i')\n mant, exp = np.frexp(arr[::stride], out=(out_mant[::stride], out_exp[::stride]))\n assert_equal(mant_true[::stride], mant)\n assert_equal(exp_true[::stride], exp)\n assert_equal(out_mant[::stride], mant_true[::stride])\n assert_equal(out_exp[::stride], exp_true[::stride])\n\n# func : [maxulperror, low, high]\navx_ufuncs = {'sqrt' :[1, 0., 100.],\n 'absolute' :[0, -100., 100.],\n 'reciprocal' :[1, 1., 100.],\n 'square' :[1, -100., 100.],\n 'rint' :[0, -100., 100.],\n 'floor' :[0, -100., 100.],\n 'ceil' :[0, -100., 100.],\n 'trunc' :[0, -100., 100.]}\n\nclass TestAVXUfuncs:\n def test_avx_based_ufunc(self):\n strides = np.array([-4,-3,-2,-1,1,2,3,4])\n np.random.seed(42)\n for func, prop in avx_ufuncs.items():\n maxulperr = prop[0]\n minval = prop[1]\n maxval = prop[2]\n # various array sizes to ensure masking in AVX is tested\n for size in range(1,32):\n myfunc = getattr(np, func)\n x_f32 = np.float32(np.random.uniform(low=minval, high=maxval,\n size=size))\n x_f64 = np.float64(x_f32)\n x_f128 = np.longdouble(x_f32)\n y_true128 = myfunc(x_f128)\n if maxulperr == 0:\n assert_equal(myfunc(x_f32), np.float32(y_true128))\n assert_equal(myfunc(x_f64), np.float64(y_true128))\n else:\n assert_array_max_ulp(myfunc(x_f32), np.float32(y_true128),\n maxulp=maxulperr)\n assert_array_max_ulp(myfunc(x_f64), np.float64(y_true128),\n maxulp=maxulperr)\n # various strides to test gather instruction\n if size > 1:\n y_true32 = myfunc(x_f32)\n y_true64 = myfunc(x_f64)\n for jj in strides:\n assert_equal(myfunc(x_f64[::jj]), y_true64[::jj])\n assert_equal(myfunc(x_f32[::jj]), y_true32[::jj])\n\nclass TestAVXFloat32Transcendental:\n def test_exp_float32(self):\n np.random.seed(42)\n x_f32 = np.float32(np.random.uniform(low=0.0,high=88.1,size=1000000))\n x_f64 = np.float64(x_f32)\n assert_array_max_ulp(np.exp(x_f32), np.float32(np.exp(x_f64)), maxulp=3)\n\n def test_log_float32(self):\n np.random.seed(42)\n x_f32 = np.float32(np.random.uniform(low=0.0,high=1000,size=1000000))\n x_f64 = np.float64(x_f32)\n assert_array_max_ulp(np.log(x_f32), np.float32(np.log(x_f64)), maxulp=4)\n\n def test_sincos_float32(self):\n np.random.seed(42)\n N = 1000000\n M = np.int_(N/20)\n index = np.random.randint(low=0, high=N, size=M)\n x_f32 = np.float32(np.random.uniform(low=-100.,high=100.,size=N))\n # test coverage for elements > 117435.992f for which glibc is used\n x_f32[index] = np.float32(10E+10*np.random.rand(M))\n x_f64 = np.float64(x_f32)\n assert_array_max_ulp(np.sin(x_f32), np.float32(np.sin(x_f64)), maxulp=2)\n assert_array_max_ulp(np.cos(x_f32), np.float32(np.cos(x_f64)), maxulp=2)\n # test aliasing(issue #17761)\n tx_f32 = x_f32.copy()\n assert_array_max_ulp(np.sin(x_f32, out=x_f32), np.float32(np.sin(x_f64)), maxulp=2)\n assert_array_max_ulp(np.cos(tx_f32, out=tx_f32), np.float32(np.cos(x_f64)), maxulp=2)\n\n def test_strided_float32(self):\n np.random.seed(42)\n strides = np.array([-4,-3,-2,-1,1,2,3,4])\n sizes = np.arange(2,100)\n for ii in sizes:\n x_f32 = np.float32(np.random.uniform(low=0.01,high=88.1,size=ii))\n x_f32_large = x_f32.copy()\n x_f32_large[3:-1:4] = 120000.0\n exp_true = np.exp(x_f32)\n log_true = np.log(x_f32)\n sin_true = np.sin(x_f32_large)\n cos_true = np.cos(x_f32_large)\n for jj in strides:\n assert_array_almost_equal_nulp(np.exp(x_f32[::jj]), exp_true[::jj], nulp=2)\n assert_array_almost_equal_nulp(np.log(x_f32[::jj]), log_true[::jj], nulp=2)\n assert_array_almost_equal_nulp(np.sin(x_f32_large[::jj]), sin_true[::jj], nulp=2)\n assert_array_almost_equal_nulp(np.cos(x_f32_large[::jj]), cos_true[::jj], nulp=2)\n\nclass TestLogAddExp(_FilterInvalids):\n def test_logaddexp_values(self):\n x = [1, 2, 3, 4, 5]\n y = [5, 4, 3, 2, 1]\n z = [6, 6, 6, 6, 6]\n for dt, dec_ in zip(['f', 'd', 'g'], [6, 15, 15]):\n xf = np.log(np.array(x, dtype=dt))\n yf = np.log(np.array(y, dtype=dt))\n zf = np.log(np.array(z, dtype=dt))\n assert_almost_equal(np.logaddexp(xf, yf), zf, decimal=dec_)\n\n def test_logaddexp_range(self):\n x = [1000000, -1000000, 1000200, -1000200]\n y = [1000200, -1000200, 1000000, -1000000]\n z = [1000200, -1000000, 1000200, -1000000]\n for dt in ['f', 'd', 'g']:\n logxf = np.array(x, dtype=dt)\n logyf = np.array(y, dtype=dt)\n logzf = np.array(z, dtype=dt)\n assert_almost_equal(np.logaddexp(logxf, logyf), logzf)\n\n def test_inf(self):\n inf = np.inf\n x = [inf, -inf, inf, -inf, inf, 1, -inf, 1]\n y = [inf, inf, -inf, -inf, 1, inf, 1, -inf]\n z = [inf, inf, inf, -inf, inf, inf, 1, 1]\n with np.errstate(invalid='raise'):\n for dt in ['f', 'd', 'g']:\n logxf = np.array(x, dtype=dt)\n logyf = np.array(y, dtype=dt)\n logzf = np.array(z, dtype=dt)\n assert_equal(np.logaddexp(logxf, logyf), logzf)\n\n def test_nan(self):\n assert_(np.isnan(np.logaddexp(np.nan, np.inf)))\n assert_(np.isnan(np.logaddexp(np.inf, np.nan)))\n assert_(np.isnan(np.logaddexp(np.nan, 0)))\n assert_(np.isnan(np.logaddexp(0, np.nan)))\n assert_(np.isnan(np.logaddexp(np.nan, np.nan)))\n\n def test_reduce(self):\n assert_equal(np.logaddexp.identity, -np.inf)\n assert_equal(np.logaddexp.reduce([]), -np.inf)\n\n\nclass TestLog1p:\n def test_log1p(self):\n assert_almost_equal(ncu.log1p(0.2), ncu.log(1.2))\n assert_almost_equal(ncu.log1p(1e-6), ncu.log(1+1e-6))\n\n def test_special(self):\n with np.errstate(invalid=\"ignore\", divide=\"ignore\"):\n assert_equal(ncu.log1p(np.nan), np.nan)\n assert_equal(ncu.log1p(np.inf), np.inf)\n assert_equal(ncu.log1p(-1.), -np.inf)\n assert_equal(ncu.log1p(-2.), np.nan)\n assert_equal(ncu.log1p(-np.inf), np.nan)\n\n\nclass TestExpm1:\n def test_expm1(self):\n assert_almost_equal(ncu.expm1(0.2), ncu.exp(0.2)-1)\n assert_almost_equal(ncu.expm1(1e-6), ncu.exp(1e-6)-1)\n\n def test_special(self):\n assert_equal(ncu.expm1(np.inf), np.inf)\n assert_equal(ncu.expm1(0.), 0.)\n assert_equal(ncu.expm1(-0.), -0.)\n assert_equal(ncu.expm1(np.inf), np.inf)\n assert_equal(ncu.expm1(-np.inf), -1.)\n\n def test_complex(self):\n x = np.asarray(1e-12)\n assert_allclose(x, ncu.expm1(x))\n x = x.astype(np.complex128)\n assert_allclose(x, ncu.expm1(x))\n\n\nclass TestHypot:\n def test_simple(self):\n assert_almost_equal(ncu.hypot(1, 1), ncu.sqrt(2))\n assert_almost_equal(ncu.hypot(0, 0), 0)\n\n def test_reduce(self):\n assert_almost_equal(ncu.hypot.reduce([3.0, 4.0]), 5.0)\n assert_almost_equal(ncu.hypot.reduce([3.0, 4.0, 0]), 5.0)\n assert_almost_equal(ncu.hypot.reduce([9.0, 12.0, 20.0]), 25.0)\n assert_equal(ncu.hypot.reduce([]), 0.0)\n\n\ndef assert_hypot_isnan(x, y):\n with np.errstate(invalid='ignore'):\n assert_(np.isnan(ncu.hypot(x, y)),\n \"hypot(%s, %s) is %s, not nan\" % (x, y, ncu.hypot(x, y)))\n\n\ndef assert_hypot_isinf(x, y):\n with np.errstate(invalid='ignore'):\n assert_(np.isinf(ncu.hypot(x, y)),\n \"hypot(%s, %s) is %s, not inf\" % (x, y, ncu.hypot(x, y)))\n\n\nclass TestHypotSpecialValues:\n def test_nan_outputs(self):\n assert_hypot_isnan(np.nan, np.nan)\n assert_hypot_isnan(np.nan, 1)\n\n def test_nan_outputs2(self):\n assert_hypot_isinf(np.nan, np.inf)\n assert_hypot_isinf(np.inf, np.nan)\n assert_hypot_isinf(np.inf, 0)\n assert_hypot_isinf(0, np.inf)\n assert_hypot_isinf(np.inf, np.inf)\n assert_hypot_isinf(np.inf, 23.0)\n\n def test_no_fpe(self):\n assert_no_warnings(ncu.hypot, np.inf, 0)\n\n\ndef assert_arctan2_isnan(x, y):\n assert_(np.isnan(ncu.arctan2(x, y)), \"arctan(%s, %s) is %s, not nan\" % (x, y, ncu.arctan2(x, y)))\n\n\ndef assert_arctan2_ispinf(x, y):\n assert_((np.isinf(ncu.arctan2(x, y)) and ncu.arctan2(x, y) > 0), \"arctan(%s, %s) is %s, not +inf\" % (x, y, ncu.arctan2(x, y)))\n\n\ndef assert_arctan2_isninf(x, y):\n assert_((np.isinf(ncu.arctan2(x, y)) and ncu.arctan2(x, y) < 0), \"arctan(%s, %s) is %s, not -inf\" % (x, y, ncu.arctan2(x, y)))\n\n\ndef assert_arctan2_ispzero(x, y):\n assert_((ncu.arctan2(x, y) == 0 and not np.signbit(ncu.arctan2(x, y))), \"arctan(%s, %s) is %s, not +0\" % (x, y, ncu.arctan2(x, y)))\n\n\ndef assert_arctan2_isnzero(x, y):\n assert_((ncu.arctan2(x, y) == 0 and np.signbit(ncu.arctan2(x, y))), \"arctan(%s, %s) is %s, not -0\" % (x, y, ncu.arctan2(x, y)))\n\n\nclass TestArctan2SpecialValues:\n def test_one_one(self):\n # atan2(1, 1) returns pi/4.\n assert_almost_equal(ncu.arctan2(1, 1), 0.25 * np.pi)\n assert_almost_equal(ncu.arctan2(-1, 1), -0.25 * np.pi)\n assert_almost_equal(ncu.arctan2(1, -1), 0.75 * np.pi)\n\n def test_zero_nzero(self):\n # atan2(+-0, -0) returns +-pi.\n assert_almost_equal(ncu.arctan2(np.PZERO, np.NZERO), np.pi)\n assert_almost_equal(ncu.arctan2(np.NZERO, np.NZERO), -np.pi)\n\n def test_zero_pzero(self):\n # atan2(+-0, +0) returns +-0.\n assert_arctan2_ispzero(np.PZERO, np.PZERO)\n assert_arctan2_isnzero(np.NZERO, np.PZERO)\n\n def test_zero_negative(self):\n # atan2(+-0, x) returns +-pi for x < 0.\n assert_almost_equal(ncu.arctan2(np.PZERO, -1), np.pi)\n assert_almost_equal(ncu.arctan2(np.NZERO, -1), -np.pi)\n\n def test_zero_positive(self):\n # atan2(+-0, x) returns +-0 for x > 0.\n assert_arctan2_ispzero(np.PZERO, 1)\n assert_arctan2_isnzero(np.NZERO, 1)\n\n def test_positive_zero(self):\n # atan2(y, +-0) returns +pi/2 for y > 0.\n assert_almost_equal(ncu.arctan2(1, np.PZERO), 0.5 * np.pi)\n assert_almost_equal(ncu.arctan2(1, np.NZERO), 0.5 * np.pi)\n\n def test_negative_zero(self):\n # atan2(y, +-0) returns -pi/2 for y < 0.\n assert_almost_equal(ncu.arctan2(-1, np.PZERO), -0.5 * np.pi)\n assert_almost_equal(ncu.arctan2(-1, np.NZERO), -0.5 * np.pi)\n\n def test_any_ninf(self):\n # atan2(+-y, -infinity) returns +-pi for finite y > 0.\n assert_almost_equal(ncu.arctan2(1, np.NINF), np.pi)\n assert_almost_equal(ncu.arctan2(-1, np.NINF), -np.pi)\n\n def test_any_pinf(self):\n # atan2(+-y, +infinity) returns +-0 for finite y > 0.\n assert_arctan2_ispzero(1, np.inf)\n assert_arctan2_isnzero(-1, np.inf)\n\n def test_inf_any(self):\n # atan2(+-infinity, x) returns +-pi/2 for finite x.\n assert_almost_equal(ncu.arctan2( np.inf, 1), 0.5 * np.pi)\n assert_almost_equal(ncu.arctan2(-np.inf, 1), -0.5 * np.pi)\n\n def test_inf_ninf(self):\n # atan2(+-infinity, -infinity) returns +-3*pi/4.\n assert_almost_equal(ncu.arctan2( np.inf, -np.inf), 0.75 * np.pi)\n assert_almost_equal(ncu.arctan2(-np.inf, -np.inf), -0.75 * np.pi)\n\n def test_inf_pinf(self):\n # atan2(+-infinity, +infinity) returns +-pi/4.\n assert_almost_equal(ncu.arctan2( np.inf, np.inf), 0.25 * np.pi)\n assert_almost_equal(ncu.arctan2(-np.inf, np.inf), -0.25 * np.pi)\n\n def test_nan_any(self):\n # atan2(nan, x) returns nan for any x, including inf\n assert_arctan2_isnan(np.nan, np.inf)\n assert_arctan2_isnan(np.inf, np.nan)\n assert_arctan2_isnan(np.nan, np.nan)\n\n\nclass TestLdexp:\n def _check_ldexp(self, tp):\n assert_almost_equal(ncu.ldexp(np.array(2., np.float32),\n np.array(3, tp)), 16.)\n assert_almost_equal(ncu.ldexp(np.array(2., np.float64),\n np.array(3, tp)), 16.)\n assert_almost_equal(ncu.ldexp(np.array(2., np.longdouble),\n np.array(3, tp)), 16.)\n\n def test_ldexp(self):\n # The default Python int type should work\n assert_almost_equal(ncu.ldexp(2., 3), 16.)\n # The following int types should all be accepted\n self._check_ldexp(np.int8)\n self._check_ldexp(np.int16)\n self._check_ldexp(np.int32)\n self._check_ldexp('i')\n self._check_ldexp('l')\n\n def test_ldexp_overflow(self):\n # silence warning emitted on overflow\n with np.errstate(over=\"ignore\"):\n imax = np.iinfo(np.dtype('l')).max\n imin = np.iinfo(np.dtype('l')).min\n assert_equal(ncu.ldexp(2., imax), np.inf)\n assert_equal(ncu.ldexp(2., imin), 0)\n\n\nclass TestMaximum(_FilterInvalids):\n def test_reduce(self):\n dflt = np.typecodes['AllFloat']\n dint = np.typecodes['AllInteger']\n seq1 = np.arange(11)\n seq2 = seq1[::-1]\n func = np.maximum.reduce\n for dt in dint:\n tmp1 = seq1.astype(dt)\n tmp2 = seq2.astype(dt)\n assert_equal(func(tmp1), 10)\n assert_equal(func(tmp2), 10)\n for dt in dflt:\n tmp1 = seq1.astype(dt)\n tmp2 = seq2.astype(dt)\n assert_equal(func(tmp1), 10)\n assert_equal(func(tmp2), 10)\n tmp1[::2] = np.nan\n tmp2[::2] = np.nan\n assert_equal(func(tmp1), np.nan)\n assert_equal(func(tmp2), np.nan)\n\n def test_reduce_complex(self):\n assert_equal(np.maximum.reduce([1, 2j]), 1)\n assert_equal(np.maximum.reduce([1+3j, 2j]), 1+3j)\n\n def test_float_nans(self):\n nan = np.nan\n arg1 = np.array([0, nan, nan])\n arg2 = np.array([nan, 0, nan])\n out = np.array([nan, nan, nan])\n assert_equal(np.maximum(arg1, arg2), out)\n\n def test_object_nans(self):\n # Multiple checks to give this a chance to\n # fail if cmp is used instead of rich compare.\n # Failure cannot be guaranteed.\n for i in range(1):\n x = np.array(float('nan'), object)\n y = 1.0\n z = np.array(float('nan'), object)\n assert_(np.maximum(x, y) == 1.0)\n assert_(np.maximum(z, y) == 1.0)\n\n def test_complex_nans(self):\n nan = np.nan\n for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]:\n arg1 = np.array([0, cnan, cnan], dtype=complex)\n arg2 = np.array([cnan, 0, cnan], dtype=complex)\n out = np.array([nan, nan, nan], dtype=complex)\n assert_equal(np.maximum(arg1, arg2), out)\n\n def test_object_array(self):\n arg1 = np.arange(5, dtype=object)\n arg2 = arg1 + 1\n assert_equal(np.maximum(arg1, arg2), arg2)\n\n def test_strided_array(self):\n arr1 = np.array([-4.0, 1.0, 10.0, 0.0, np.nan, -np.nan, np.inf, -np.inf])\n arr2 = np.array([-2.0,-1.0, np.nan, 1.0, 0.0, np.nan, 1.0, -3.0])\n maxtrue = np.array([-2.0, 1.0, np.nan, 1.0, np.nan, np.nan, np.inf, -3.0])\n out = np.ones(8)\n out_maxtrue = np.array([-2.0, 1.0, 1.0, 10.0, 1.0, 1.0, np.nan, 1.0])\n assert_equal(np.maximum(arr1,arr2), maxtrue)\n assert_equal(np.maximum(arr1[::2],arr2[::2]), maxtrue[::2])\n assert_equal(np.maximum(arr1[:4:], arr2[::2]), np.array([-2.0, np.nan, 10.0, 1.0]))\n assert_equal(np.maximum(arr1[::3], arr2[:3:]), np.array([-2.0, 0.0, np.nan]))\n assert_equal(np.maximum(arr1[:6:2], arr2[::3], out=out[::3]), np.array([-2.0, 10., np.nan]))\n assert_equal(out, out_maxtrue)\n\n\nclass TestMinimum(_FilterInvalids):\n def test_reduce(self):\n dflt = np.typecodes['AllFloat']\n dint = np.typecodes['AllInteger']\n seq1 = np.arange(11)\n seq2 = seq1[::-1]\n func = np.minimum.reduce\n for dt in dint:\n tmp1 = seq1.astype(dt)\n tmp2 = seq2.astype(dt)\n assert_equal(func(tmp1), 0)\n assert_equal(func(tmp2), 0)\n for dt in dflt:\n tmp1 = seq1.astype(dt)\n tmp2 = seq2.astype(dt)\n assert_equal(func(tmp1), 0)\n assert_equal(func(tmp2), 0)\n tmp1[::2] = np.nan\n tmp2[::2] = np.nan\n assert_equal(func(tmp1), np.nan)\n assert_equal(func(tmp2), np.nan)\n\n def test_reduce_complex(self):\n assert_equal(np.minimum.reduce([1, 2j]), 2j)\n assert_equal(np.minimum.reduce([1+3j, 2j]), 2j)\n\n def test_float_nans(self):\n nan = np.nan\n arg1 = np.array([0, nan, nan])\n arg2 = np.array([nan, 0, nan])\n out = np.array([nan, nan, nan])\n assert_equal(np.minimum(arg1, arg2), out)\n\n def test_object_nans(self):\n # Multiple checks to give this a chance to\n # fail if cmp is used instead of rich compare.\n # Failure cannot be guaranteed.\n for i in range(1):\n x = np.array(float('nan'), object)\n y = 1.0\n z = np.array(float('nan'), object)\n assert_(np.minimum(x, y) == 1.0)\n assert_(np.minimum(z, y) == 1.0)\n\n def test_complex_nans(self):\n nan = np.nan\n for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]:\n arg1 = np.array([0, cnan, cnan], dtype=complex)\n arg2 = np.array([cnan, 0, cnan], dtype=complex)\n out = np.array([nan, nan, nan], dtype=complex)\n assert_equal(np.minimum(arg1, arg2), out)\n\n def test_object_array(self):\n arg1 = np.arange(5, dtype=object)\n arg2 = arg1 + 1\n assert_equal(np.minimum(arg1, arg2), arg1)\n\n def test_strided_array(self):\n arr1 = np.array([-4.0, 1.0, 10.0, 0.0, np.nan, -np.nan, np.inf, -np.inf])\n arr2 = np.array([-2.0,-1.0, np.nan, 1.0, 0.0, np.nan, 1.0, -3.0])\n mintrue = np.array([-4.0, -1.0, np.nan, 0.0, np.nan, np.nan, 1.0, -np.inf])\n out = np.ones(8)\n out_mintrue = np.array([-4.0, 1.0, 1.0, 1.0, 1.0, 1.0, np.nan, 1.0])\n assert_equal(np.minimum(arr1,arr2), mintrue)\n assert_equal(np.minimum(arr1[::2],arr2[::2]), mintrue[::2])\n assert_equal(np.minimum(arr1[:4:], arr2[::2]), np.array([-4.0, np.nan, 0.0, 0.0]))\n assert_equal(np.minimum(arr1[::3], arr2[:3:]), np.array([-4.0, -1.0, np.nan]))\n assert_equal(np.minimum(arr1[:6:2], arr2[::3], out=out[::3]), np.array([-4.0, 1.0, np.nan]))\n assert_equal(out, out_mintrue)\n\nclass TestFmax(_FilterInvalids):\n def test_reduce(self):\n dflt = np.typecodes['AllFloat']\n dint = np.typecodes['AllInteger']\n seq1 = np.arange(11)\n seq2 = seq1[::-1]\n func = np.fmax.reduce\n for dt in dint:\n tmp1 = seq1.astype(dt)\n tmp2 = seq2.astype(dt)\n assert_equal(func(tmp1), 10)\n assert_equal(func(tmp2), 10)\n for dt in dflt:\n tmp1 = seq1.astype(dt)\n tmp2 = seq2.astype(dt)\n assert_equal(func(tmp1), 10)\n assert_equal(func(tmp2), 10)\n tmp1[::2] = np.nan\n tmp2[::2] = np.nan\n assert_equal(func(tmp1), 9)\n assert_equal(func(tmp2), 9)\n\n def test_reduce_complex(self):\n assert_equal(np.fmax.reduce([1, 2j]), 1)\n assert_equal(np.fmax.reduce([1+3j, 2j]), 1+3j)\n\n def test_float_nans(self):\n nan = np.nan\n arg1 = np.array([0, nan, nan])\n arg2 = np.array([nan, 0, nan])\n out = np.array([0, 0, nan])\n assert_equal(np.fmax(arg1, arg2), out)\n\n def test_complex_nans(self):\n nan = np.nan\n for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]:\n arg1 = np.array([0, cnan, cnan], dtype=complex)\n arg2 = np.array([cnan, 0, cnan], dtype=complex)\n out = np.array([0, 0, nan], dtype=complex)\n assert_equal(np.fmax(arg1, arg2), out)\n\n\nclass TestFmin(_FilterInvalids):\n def test_reduce(self):\n dflt = np.typecodes['AllFloat']\n dint = np.typecodes['AllInteger']\n seq1 = np.arange(11)\n seq2 = seq1[::-1]\n func = np.fmin.reduce\n for dt in dint:\n tmp1 = seq1.astype(dt)\n tmp2 = seq2.astype(dt)\n assert_equal(func(tmp1), 0)\n assert_equal(func(tmp2), 0)\n for dt in dflt:\n tmp1 = seq1.astype(dt)\n tmp2 = seq2.astype(dt)\n assert_equal(func(tmp1), 0)\n assert_equal(func(tmp2), 0)\n tmp1[::2] = np.nan\n tmp2[::2] = np.nan\n assert_equal(func(tmp1), 1)\n assert_equal(func(tmp2), 1)\n\n def test_reduce_complex(self):\n assert_equal(np.fmin.reduce([1, 2j]), 2j)\n assert_equal(np.fmin.reduce([1+3j, 2j]), 2j)\n\n def test_float_nans(self):\n nan = np.nan\n arg1 = np.array([0, nan, nan])\n arg2 = np.array([nan, 0, nan])\n out = np.array([0, 0, nan])\n assert_equal(np.fmin(arg1, arg2), out)\n\n def test_complex_nans(self):\n nan = np.nan\n for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]:\n arg1 = np.array([0, cnan, cnan], dtype=complex)\n arg2 = np.array([cnan, 0, cnan], dtype=complex)\n out = np.array([0, 0, nan], dtype=complex)\n assert_equal(np.fmin(arg1, arg2), out)\n\n\nclass TestBool:\n def test_exceptions(self):\n a = np.ones(1, dtype=np.bool_)\n assert_raises(TypeError, np.negative, a)\n assert_raises(TypeError, np.positive, a)\n assert_raises(TypeError, np.subtract, a, a)\n\n def test_truth_table_logical(self):\n # 2, 3 and 4 serves as true values\n input1 = [0, 0, 3, 2]\n input2 = [0, 4, 0, 2]\n\n typecodes = (np.typecodes['AllFloat']\n + np.typecodes['AllInteger']\n + '?') # boolean\n for dtype in map(np.dtype, typecodes):\n arg1 = np.asarray(input1, dtype=dtype)\n arg2 = np.asarray(input2, dtype=dtype)\n\n # OR\n out = [False, True, True, True]\n for func in (np.logical_or, np.maximum):\n assert_equal(func(arg1, arg2).astype(bool), out)\n # AND\n out = [False, False, False, True]\n for func in (np.logical_and, np.minimum):\n assert_equal(func(arg1, arg2).astype(bool), out)\n # XOR\n out = [False, True, True, False]\n for func in (np.logical_xor, np.not_equal):\n assert_equal(func(arg1, arg2).astype(bool), out)\n\n def test_truth_table_bitwise(self):\n arg1 = [False, False, True, True]\n arg2 = [False, True, False, True]\n\n out = [False, True, True, True]\n assert_equal(np.bitwise_or(arg1, arg2), out)\n\n out = [False, False, False, True]\n assert_equal(np.bitwise_and(arg1, arg2), out)\n\n out = [False, True, True, False]\n assert_equal(np.bitwise_xor(arg1, arg2), out)\n\n def test_reduce(self):\n none = np.array([0, 0, 0, 0], bool)\n some = np.array([1, 0, 1, 1], bool)\n every = np.array([1, 1, 1, 1], bool)\n empty = np.array([], bool)\n\n arrs = [none, some, every, empty]\n\n for arr in arrs:\n assert_equal(np.logical_and.reduce(arr), all(arr))\n\n for arr in arrs:\n assert_equal(np.logical_or.reduce(arr), any(arr))\n\n for arr in arrs:\n assert_equal(np.logical_xor.reduce(arr), arr.sum() % 2 == 1)\n\n\nclass TestBitwiseUFuncs:\n\n bitwise_types = [np.dtype(c) for c in '?' + 'bBhHiIlLqQ' + 'O']\n\n def test_values(self):\n for dt in self.bitwise_types:\n zeros = np.array([0], dtype=dt)\n ones = np.array([-1], dtype=dt)\n msg = \"dt = '%s'\" % dt.char\n\n assert_equal(np.bitwise_not(zeros), ones, err_msg=msg)\n assert_equal(np.bitwise_not(ones), zeros, err_msg=msg)\n\n assert_equal(np.bitwise_or(zeros, zeros), zeros, err_msg=msg)\n assert_equal(np.bitwise_or(zeros, ones), ones, err_msg=msg)\n assert_equal(np.bitwise_or(ones, zeros), ones, err_msg=msg)\n assert_equal(np.bitwise_or(ones, ones), ones, err_msg=msg)\n\n assert_equal(np.bitwise_xor(zeros, zeros), zeros, err_msg=msg)\n assert_equal(np.bitwise_xor(zeros, ones), ones, err_msg=msg)\n assert_equal(np.bitwise_xor(ones, zeros), ones, err_msg=msg)\n assert_equal(np.bitwise_xor(ones, ones), zeros, err_msg=msg)\n\n assert_equal(np.bitwise_and(zeros, zeros), zeros, err_msg=msg)\n assert_equal(np.bitwise_and(zeros, ones), zeros, err_msg=msg)\n assert_equal(np.bitwise_and(ones, zeros), zeros, err_msg=msg)\n assert_equal(np.bitwise_and(ones, ones), ones, err_msg=msg)\n\n def test_types(self):\n for dt in self.bitwise_types:\n zeros = np.array([0], dtype=dt)\n ones = np.array([-1], dtype=dt)\n msg = \"dt = '%s'\" % dt.char\n\n assert_(np.bitwise_not(zeros).dtype == dt, msg)\n assert_(np.bitwise_or(zeros, zeros).dtype == dt, msg)\n assert_(np.bitwise_xor(zeros, zeros).dtype == dt, msg)\n assert_(np.bitwise_and(zeros, zeros).dtype == dt, msg)\n\n def test_identity(self):\n assert_(np.bitwise_or.identity == 0, 'bitwise_or')\n assert_(np.bitwise_xor.identity == 0, 'bitwise_xor')\n assert_(np.bitwise_and.identity == -1, 'bitwise_and')\n\n def test_reduction(self):\n binary_funcs = (np.bitwise_or, np.bitwise_xor, np.bitwise_and)\n\n for dt in self.bitwise_types:\n zeros = np.array([0], dtype=dt)\n ones = np.array([-1], dtype=dt)\n for f in binary_funcs:\n msg = \"dt: '%s', f: '%s'\" % (dt, f)\n assert_equal(f.reduce(zeros), zeros, err_msg=msg)\n assert_equal(f.reduce(ones), ones, err_msg=msg)\n\n # Test empty reduction, no object dtype\n for dt in self.bitwise_types[:-1]:\n # No object array types\n empty = np.array([], dtype=dt)\n for f in binary_funcs:\n msg = \"dt: '%s', f: '%s'\" % (dt, f)\n tgt = np.array(f.identity, dtype=dt)\n res = f.reduce(empty)\n assert_equal(res, tgt, err_msg=msg)\n assert_(res.dtype == tgt.dtype, msg)\n\n # Empty object arrays use the identity. Note that the types may\n # differ, the actual type used is determined by the assign_identity\n # function and is not the same as the type returned by the identity\n # method.\n for f in binary_funcs:\n msg = \"dt: '%s'\" % (f,)\n empty = np.array([], dtype=object)\n tgt = f.identity\n res = f.reduce(empty)\n assert_equal(res, tgt, err_msg=msg)\n\n # Non-empty object arrays do not use the identity\n for f in binary_funcs:\n msg = \"dt: '%s'\" % (f,)\n btype = np.array([True], dtype=object)\n assert_(type(f.reduce(btype)) is bool, msg)\n\n\nclass TestInt:\n def test_logical_not(self):\n x = np.ones(10, dtype=np.int16)\n o = np.ones(10 * 2, dtype=bool)\n tgt = o.copy()\n tgt[::2] = False\n os = o[::2]\n assert_array_equal(np.logical_not(x, out=os), False)\n assert_array_equal(o, tgt)\n\n\nclass TestFloatingPoint:\n def test_floating_point(self):\n assert_equal(ncu.FLOATING_POINT_SUPPORT, 1)\n\n\nclass TestDegrees:\n def test_degrees(self):\n assert_almost_equal(ncu.degrees(np.pi), 180.0)\n assert_almost_equal(ncu.degrees(-0.5*np.pi), -90.0)\n\n\nclass TestRadians:\n def test_radians(self):\n assert_almost_equal(ncu.radians(180.0), np.pi)\n assert_almost_equal(ncu.radians(-90.0), -0.5*np.pi)\n\n\nclass TestHeavside:\n def test_heaviside(self):\n x = np.array([[-30.0, -0.1, 0.0, 0.2], [7.5, np.nan, np.inf, -np.inf]])\n expectedhalf = np.array([[0.0, 0.0, 0.5, 1.0], [1.0, np.nan, 1.0, 0.0]])\n expected1 = expectedhalf.copy()\n expected1[0, 2] = 1\n\n h = ncu.heaviside(x, 0.5)\n assert_equal(h, expectedhalf)\n\n h = ncu.heaviside(x, 1.0)\n assert_equal(h, expected1)\n\n x = x.astype(np.float32)\n\n h = ncu.heaviside(x, np.float32(0.5))\n assert_equal(h, expectedhalf.astype(np.float32))\n\n h = ncu.heaviside(x, np.float32(1.0))\n assert_equal(h, expected1.astype(np.float32))\n\n\nclass TestSign:\n def test_sign(self):\n a = np.array([np.inf, -np.inf, np.nan, 0.0, 3.0, -3.0])\n out = np.zeros(a.shape)\n tgt = np.array([1., -1., np.nan, 0.0, 1.0, -1.0])\n\n with np.errstate(invalid='ignore'):\n res = ncu.sign(a)\n assert_equal(res, tgt)\n res = ncu.sign(a, out)\n assert_equal(res, tgt)\n assert_equal(out, tgt)\n\n def test_sign_dtype_object(self):\n # In reference to github issue #6229\n\n foo = np.array([-.1, 0, .1])\n a = np.sign(foo.astype(object))\n b = np.sign(foo)\n\n assert_array_equal(a, b)\n\n def test_sign_dtype_nan_object(self):\n # In reference to github issue #6229\n def test_nan():\n foo = np.array([np.nan])\n # FIXME: a not used\n a = np.sign(foo.astype(object))\n\n assert_raises(TypeError, test_nan)\n\nclass TestMinMax:\n def test_minmax_blocked(self):\n # simd tests on max/min, test all alignments, slow but important\n # for 2 * vz + 2 * (vs - 1) + 1 (unrolled once)\n for dt, sz in [(np.float32, 15), (np.float64, 7)]:\n for out, inp, msg in _gen_alignment_data(dtype=dt, type='unary',\n max_size=sz):\n for i in range(inp.size):\n inp[:] = np.arange(inp.size, dtype=dt)\n inp[i] = np.nan\n emsg = lambda: '%r\\n%s' % (inp, msg)\n with suppress_warnings() as sup:\n sup.filter(RuntimeWarning,\n \"invalid value encountered in reduce\")\n assert_(np.isnan(inp.max()), msg=emsg)\n assert_(np.isnan(inp.min()), msg=emsg)\n\n inp[i] = 1e10\n assert_equal(inp.max(), 1e10, err_msg=msg)\n inp[i] = -1e10\n assert_equal(inp.min(), -1e10, err_msg=msg)\n\n def test_lower_align(self):\n # check data that is not aligned to element size\n # i.e doubles are aligned to 4 bytes on i386\n d = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64)\n assert_equal(d.max(), d[0])\n assert_equal(d.min(), d[0])\n\n def test_reduce_reorder(self):\n # gh 10370, 11029 Some compilers reorder the call to npy_getfloatstatus\n # and put it before the call to an intrisic function that causes\n # invalid status to be set. Also make sure warnings are not emitted\n for n in (2, 4, 8, 16, 32):\n for dt in (np.float32, np.float16, np.complex64):\n for r in np.diagflat(np.array([np.nan] * n, dtype=dt)):\n assert_equal(np.min(r), np.nan)\n\n def test_minimize_no_warns(self):\n a = np.minimum(np.nan, 1)\n assert_equal(a, np.nan)\n\n\nclass TestAbsoluteNegative:\n def test_abs_neg_blocked(self):\n # simd tests on abs, test all alignments for vz + 2 * (vs - 1) + 1\n for dt, sz in [(np.float32, 11), (np.float64, 5)]:\n for out, inp, msg in _gen_alignment_data(dtype=dt, type='unary',\n max_size=sz):\n tgt = [ncu.absolute(i) for i in inp]\n np.absolute(inp, out=out)\n assert_equal(out, tgt, err_msg=msg)\n assert_((out >= 0).all())\n\n tgt = [-1*(i) for i in inp]\n np.negative(inp, out=out)\n assert_equal(out, tgt, err_msg=msg)\n\n for v in [np.nan, -np.inf, np.inf]:\n for i in range(inp.size):\n d = np.arange(inp.size, dtype=dt)\n inp[:] = -d\n inp[i] = v\n d[i] = -v if v == -np.inf else v\n assert_array_equal(np.abs(inp), d, err_msg=msg)\n np.abs(inp, out=out)\n assert_array_equal(out, d, err_msg=msg)\n\n assert_array_equal(-inp, -1*inp, err_msg=msg)\n d = -1 * inp\n np.negative(inp, out=out)\n assert_array_equal(out, d, err_msg=msg)\n\n def test_lower_align(self):\n # check data that is not aligned to element size\n # i.e doubles are aligned to 4 bytes on i386\n d = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64)\n assert_equal(np.abs(d), d)\n assert_equal(np.negative(d), -d)\n np.negative(d, out=d)\n np.negative(np.ones_like(d), out=d)\n np.abs(d, out=d)\n np.abs(np.ones_like(d), out=d)\n\n\nclass TestPositive:\n def test_valid(self):\n valid_dtypes = [int, float, complex, object]\n for dtype in valid_dtypes:\n x = np.arange(5, dtype=dtype)\n result = np.positive(x)\n assert_equal(x, result, err_msg=str(dtype))\n\n def test_invalid(self):\n with assert_raises(TypeError):\n np.positive(True)\n with assert_raises(TypeError):\n np.positive(np.datetime64('2000-01-01'))\n with assert_raises(TypeError):\n np.positive(np.array(['foo'], dtype=str))\n with assert_raises(TypeError):\n np.positive(np.array(['bar'], dtype=object))\n\n\nclass TestSpecialMethods:\n def test_wrap(self):\n\n class with_wrap:\n def __array__(self):\n return np.zeros(1)\n\n def __array_wrap__(self, arr, context):\n r = with_wrap()\n r.arr = arr\n r.context = context\n return r\n\n a = with_wrap()\n x = ncu.minimum(a, a)\n assert_equal(x.arr, np.zeros(1))\n func, args, i = x.context\n assert_(func is ncu.minimum)\n assert_equal(len(args), 2)\n assert_equal(args[0], a)\n assert_equal(args[1], a)\n assert_equal(i, 0)\n\n def test_wrap_and_prepare_out(self):\n # Calling convention for out should not affect how special methods are\n # called\n\n class StoreArrayPrepareWrap(np.ndarray):\n _wrap_args = None\n _prepare_args = None\n def __new__(cls):\n return np.zeros(()).view(cls)\n def __array_wrap__(self, obj, context):\n self._wrap_args = context[1]\n return obj\n def __array_prepare__(self, obj, context):\n self._prepare_args = context[1]\n return obj\n @property\n def args(self):\n # We need to ensure these are fetched at the same time, before\n # any other ufuncs are called by the assertions\n return (self._prepare_args, self._wrap_args)\n def __repr__(self):\n return \"a\" # for short test output\n\n def do_test(f_call, f_expected):\n a = StoreArrayPrepareWrap()\n f_call(a)\n p, w = a.args\n expected = f_expected(a)\n try:\n assert_equal(p, expected)\n assert_equal(w, expected)\n except AssertionError as e:\n # assert_equal produces truly useless error messages\n raise AssertionError(\"\\n\".join([\n \"Bad arguments passed in ufunc call\",\n \" expected: {}\".format(expected),\n \" __array_prepare__ got: {}\".format(p),\n \" __array_wrap__ got: {}\".format(w)\n ]))\n\n # method not on the out argument\n do_test(lambda a: np.add(a, 0), lambda a: (a, 0))\n do_test(lambda a: np.add(a, 0, None), lambda a: (a, 0))\n do_test(lambda a: np.add(a, 0, out=None), lambda a: (a, 0))\n do_test(lambda a: np.add(a, 0, out=(None,)), lambda a: (a, 0))\n\n # method on the out argument\n do_test(lambda a: np.add(0, 0, a), lambda a: (0, 0, a))\n do_test(lambda a: np.add(0, 0, out=a), lambda a: (0, 0, a))\n do_test(lambda a: np.add(0, 0, out=(a,)), lambda a: (0, 0, a))\n\n def test_wrap_with_iterable(self):\n # test fix for bug #1026:\n\n class with_wrap(np.ndarray):\n __array_priority__ = 10\n\n def __new__(cls):\n return np.asarray(1).view(cls).copy()\n\n def __array_wrap__(self, arr, context):\n return arr.view(type(self))\n\n a = with_wrap()\n x = ncu.multiply(a, (1, 2, 3))\n assert_(isinstance(x, with_wrap))\n assert_array_equal(x, np.array((1, 2, 3)))\n\n def test_priority_with_scalar(self):\n # test fix for bug #826:\n\n class A(np.ndarray):\n __array_priority__ = 10\n\n def __new__(cls):\n return np.asarray(1.0, 'float64').view(cls).copy()\n\n a = A()\n x = np.float64(1)*a\n assert_(isinstance(x, A))\n assert_array_equal(x, np.array(1))\n\n def test_old_wrap(self):\n\n class with_wrap:\n def __array__(self):\n return np.zeros(1)\n\n def __array_wrap__(self, arr):\n r = with_wrap()\n r.arr = arr\n return r\n\n a = with_wrap()\n x = ncu.minimum(a, a)\n assert_equal(x.arr, np.zeros(1))\n\n def test_priority(self):\n\n class A:\n def __array__(self):\n return np.zeros(1)\n\n def __array_wrap__(self, arr, context):\n r = type(self)()\n r.arr = arr\n r.context = context\n return r\n\n class B(A):\n __array_priority__ = 20.\n\n class C(A):\n __array_priority__ = 40.\n\n x = np.zeros(1)\n a = A()\n b = B()\n c = C()\n f = ncu.minimum\n assert_(type(f(x, x)) is np.ndarray)\n assert_(type(f(x, a)) is A)\n assert_(type(f(x, b)) is B)\n assert_(type(f(x, c)) is C)\n assert_(type(f(a, x)) is A)\n assert_(type(f(b, x)) is B)\n assert_(type(f(c, x)) is C)\n\n assert_(type(f(a, a)) is A)\n assert_(type(f(a, b)) is B)\n assert_(type(f(b, a)) is B)\n assert_(type(f(b, b)) is B)\n assert_(type(f(b, c)) is C)\n assert_(type(f(c, b)) is C)\n assert_(type(f(c, c)) is C)\n\n assert_(type(ncu.exp(a) is A))\n assert_(type(ncu.exp(b) is B))\n assert_(type(ncu.exp(c) is C))\n\n def test_failing_wrap(self):\n\n class A:\n def __array__(self):\n return np.zeros(2)\n\n def __array_wrap__(self, arr, context):\n raise RuntimeError\n\n a = A()\n assert_raises(RuntimeError, ncu.maximum, a, a)\n assert_raises(RuntimeError, ncu.maximum.reduce, a)\n\n def test_failing_out_wrap(self):\n\n singleton = np.array([1.0])\n\n class Ok(np.ndarray):\n def __array_wrap__(self, obj):\n return singleton\n\n class Bad(np.ndarray):\n def __array_wrap__(self, obj):\n raise RuntimeError\n\n ok = np.empty(1).view(Ok)\n bad = np.empty(1).view(Bad)\n # double-free (segfault) of \"ok\" if \"bad\" raises an exception\n for i in range(10):\n assert_raises(RuntimeError, ncu.frexp, 1, ok, bad)\n\n def test_none_wrap(self):\n # Tests that issue #8507 is resolved. Previously, this would segfault\n\n class A:\n def __array__(self):\n return np.zeros(1)\n\n def __array_wrap__(self, arr, context=None):\n return None\n\n a = A()\n assert_equal(ncu.maximum(a, a), None)\n\n def test_default_prepare(self):\n\n class with_wrap:\n __array_priority__ = 10\n\n def __array__(self):\n return np.zeros(1)\n\n def __array_wrap__(self, arr, context):\n return arr\n\n a = with_wrap()\n x = ncu.minimum(a, a)\n assert_equal(x, np.zeros(1))\n assert_equal(type(x), np.ndarray)\n\n def test_prepare(self):\n\n class with_prepare(np.ndarray):\n __array_priority__ = 10\n\n def __array_prepare__(self, arr, context):\n # make sure we can return a new\n return np.array(arr).view(type=with_prepare)\n\n a = np.array(1).view(type=with_prepare)\n x = np.add(a, a)\n assert_equal(x, np.array(2))\n assert_equal(type(x), with_prepare)\n\n def test_prepare_out(self):\n\n class with_prepare(np.ndarray):\n __array_priority__ = 10\n\n def __array_prepare__(self, arr, context):\n return np.array(arr).view(type=with_prepare)\n\n a = np.array([1]).view(type=with_prepare)\n x = np.add(a, a, a)\n # Returned array is new, because of the strange\n # __array_prepare__ above\n assert_(not np.shares_memory(x, a))\n assert_equal(x, np.array([2]))\n assert_equal(type(x), with_prepare)\n\n def test_failing_prepare(self):\n\n class A:\n def __array__(self):\n return np.zeros(1)\n\n def __array_prepare__(self, arr, context=None):\n raise RuntimeError\n\n a = A()\n assert_raises(RuntimeError, ncu.maximum, a, a)\n\n def test_array_too_many_args(self):\n\n class A(object):\n def __array__(self, dtype, context):\n return np.zeros(1)\n\n a = A()\n assert_raises_regex(TypeError, '2 required positional', np.sum, a)\n\n def test_ufunc_override(self):\n # check override works even with instance with high priority.\n class A:\n def __array_ufunc__(self, func, method, *inputs, **kwargs):\n return self, func, method, inputs, kwargs\n\n class MyNDArray(np.ndarray):\n __array_priority__ = 100\n\n a = A()\n b = np.array([1]).view(MyNDArray)\n res0 = np.multiply(a, b)\n res1 = np.multiply(b, b, out=a)\n\n # self\n assert_equal(res0[0], a)\n assert_equal(res1[0], a)\n assert_equal(res0[1], np.multiply)\n assert_equal(res1[1], np.multiply)\n assert_equal(res0[2], '__call__')\n assert_equal(res1[2], '__call__')\n assert_equal(res0[3], (a, b))\n assert_equal(res1[3], (b, b))\n assert_equal(res0[4], {})\n assert_equal(res1[4], {'out': (a,)})\n\n def test_ufunc_override_mro(self):\n\n # Some multi arg functions for testing.\n def tres_mul(a, b, c):\n return a * b * c\n\n def quatro_mul(a, b, c, d):\n return a * b * c * d\n\n # Make these into ufuncs.\n three_mul_ufunc = np.frompyfunc(tres_mul, 3, 1)\n four_mul_ufunc = np.frompyfunc(quatro_mul, 4, 1)\n\n class A:\n def __array_ufunc__(self, func, method, *inputs, **kwargs):\n return \"A\"\n\n class ASub(A):\n def __array_ufunc__(self, func, method, *inputs, **kwargs):\n return \"ASub\"\n\n class B:\n def __array_ufunc__(self, func, method, *inputs, **kwargs):\n return \"B\"\n\n class C:\n def __init__(self):\n self.count = 0\n\n def __array_ufunc__(self, func, method, *inputs, **kwargs):\n self.count += 1\n return NotImplemented\n\n class CSub(C):\n def __array_ufunc__(self, func, method, *inputs, **kwargs):\n self.count += 1\n return NotImplemented\n\n a = A()\n a_sub = ASub()\n b = B()\n c = C()\n\n # Standard\n res = np.multiply(a, a_sub)\n assert_equal(res, \"ASub\")\n res = np.multiply(a_sub, b)\n assert_equal(res, \"ASub\")\n\n # With 1 NotImplemented\n res = np.multiply(c, a)\n assert_equal(res, \"A\")\n assert_equal(c.count, 1)\n # Check our counter works, so we can trust tests below.\n res = np.multiply(c, a)\n assert_equal(c.count, 2)\n\n # Both NotImplemented.\n c = C()\n c_sub = CSub()\n assert_raises(TypeError, np.multiply, c, c_sub)\n assert_equal(c.count, 1)\n assert_equal(c_sub.count, 1)\n c.count = c_sub.count = 0\n assert_raises(TypeError, np.multiply, c_sub, c)\n assert_equal(c.count, 1)\n assert_equal(c_sub.count, 1)\n c.count = 0\n assert_raises(TypeError, np.multiply, c, c)\n assert_equal(c.count, 1)\n c.count = 0\n assert_raises(TypeError, np.multiply, 2, c)\n assert_equal(c.count, 1)\n\n # Ternary testing.\n assert_equal(three_mul_ufunc(a, 1, 2), \"A\")\n assert_equal(three_mul_ufunc(1, a, 2), \"A\")\n assert_equal(three_mul_ufunc(1, 2, a), \"A\")\n\n assert_equal(three_mul_ufunc(a, a, 6), \"A\")\n assert_equal(three_mul_ufunc(a, 2, a), \"A\")\n assert_equal(three_mul_ufunc(a, 2, b), \"A\")\n assert_equal(three_mul_ufunc(a, 2, a_sub), \"ASub\")\n assert_equal(three_mul_ufunc(a, a_sub, 3), \"ASub\")\n c.count = 0\n assert_equal(three_mul_ufunc(c, a_sub, 3), \"ASub\")\n assert_equal(c.count, 1)\n c.count = 0\n assert_equal(three_mul_ufunc(1, a_sub, c), \"ASub\")\n assert_equal(c.count, 0)\n\n c.count = 0\n assert_equal(three_mul_ufunc(a, b, c), \"A\")\n assert_equal(c.count, 0)\n c_sub.count = 0\n assert_equal(three_mul_ufunc(a, b, c_sub), \"A\")\n assert_equal(c_sub.count, 0)\n assert_equal(three_mul_ufunc(1, 2, b), \"B\")\n\n assert_raises(TypeError, three_mul_ufunc, 1, 2, c)\n assert_raises(TypeError, three_mul_ufunc, c_sub, 2, c)\n assert_raises(TypeError, three_mul_ufunc, c_sub, 2, 3)\n\n # Quaternary testing.\n assert_equal(four_mul_ufunc(a, 1, 2, 3), \"A\")\n assert_equal(four_mul_ufunc(1, a, 2, 3), \"A\")\n assert_equal(four_mul_ufunc(1, 1, a, 3), \"A\")\n assert_equal(four_mul_ufunc(1, 1, 2, a), \"A\")\n\n assert_equal(four_mul_ufunc(a, b, 2, 3), \"A\")\n assert_equal(four_mul_ufunc(1, a, 2, b), \"A\")\n assert_equal(four_mul_ufunc(b, 1, a, 3), \"B\")\n assert_equal(four_mul_ufunc(a_sub, 1, 2, a), \"ASub\")\n assert_equal(four_mul_ufunc(a, 1, 2, a_sub), \"ASub\")\n\n c = C()\n c_sub = CSub()\n assert_raises(TypeError, four_mul_ufunc, 1, 2, 3, c)\n assert_equal(c.count, 1)\n c.count = 0\n assert_raises(TypeError, four_mul_ufunc, 1, 2, c_sub, c)\n assert_equal(c_sub.count, 1)\n assert_equal(c.count, 1)\n c2 = C()\n c.count = c_sub.count = 0\n assert_raises(TypeError, four_mul_ufunc, 1, c, c_sub, c2)\n assert_equal(c_sub.count, 1)\n assert_equal(c.count, 1)\n assert_equal(c2.count, 0)\n c.count = c2.count = c_sub.count = 0\n assert_raises(TypeError, four_mul_ufunc, c2, c, c_sub, c)\n assert_equal(c_sub.count, 1)\n assert_equal(c.count, 0)\n assert_equal(c2.count, 1)\n\n def test_ufunc_override_methods(self):\n\n class A:\n def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):\n return self, ufunc, method, inputs, kwargs\n\n # __call__\n a = A()\n with assert_raises(TypeError):\n np.multiply.__call__(1, a, foo='bar', answer=42)\n res = np.multiply.__call__(1, a, subok='bar', where=42)\n assert_equal(res[0], a)\n assert_equal(res[1], np.multiply)\n assert_equal(res[2], '__call__')\n assert_equal(res[3], (1, a))\n assert_equal(res[4], {'subok': 'bar', 'where': 42})\n\n # __call__, wrong args\n assert_raises(TypeError, np.multiply, a)\n assert_raises(TypeError, np.multiply, a, a, a, a)\n assert_raises(TypeError, np.multiply, a, a, sig='a', signature='a')\n assert_raises(TypeError, ncu_tests.inner1d, a, a, axis=0, axes=[0, 0])\n\n # reduce, positional args\n res = np.multiply.reduce(a, 'axis0', 'dtype0', 'out0', 'keep0')\n assert_equal(res[0], a)\n assert_equal(res[1], np.multiply)\n assert_equal(res[2], 'reduce')\n assert_equal(res[3], (a,))\n assert_equal(res[4], {'dtype':'dtype0',\n 'out': ('out0',),\n 'keepdims': 'keep0',\n 'axis': 'axis0'})\n\n # reduce, kwargs\n res = np.multiply.reduce(a, axis='axis0', dtype='dtype0', out='out0',\n keepdims='keep0', initial='init0',\n where='where0')\n assert_equal(res[0], a)\n assert_equal(res[1], np.multiply)\n assert_equal(res[2], 'reduce')\n assert_equal(res[3], (a,))\n assert_equal(res[4], {'dtype':'dtype0',\n 'out': ('out0',),\n 'keepdims': 'keep0',\n 'axis': 'axis0',\n 'initial': 'init0',\n 'where': 'where0'})\n\n # reduce, output equal to None removed, but not other explicit ones,\n # even if they are at their default value.\n res = np.multiply.reduce(a, 0, None, None, False)\n assert_equal(res[4], {'axis': 0, 'dtype': None, 'keepdims': False})\n res = np.multiply.reduce(a, out=None, axis=0, keepdims=True)\n assert_equal(res[4], {'axis': 0, 'keepdims': True})\n res = np.multiply.reduce(a, None, out=(None,), dtype=None)\n assert_equal(res[4], {'axis': None, 'dtype': None})\n res = np.multiply.reduce(a, 0, None, None, False, 2, True)\n assert_equal(res[4], {'axis': 0, 'dtype': None, 'keepdims': False,\n 'initial': 2, 'where': True})\n # np._NoValue ignored for initial\n res = np.multiply.reduce(a, 0, None, None, False,\n np._NoValue, True)\n assert_equal(res[4], {'axis': 0, 'dtype': None, 'keepdims': False,\n 'where': True})\n # None kept for initial, True for where.\n res = np.multiply.reduce(a, 0, None, None, False, None, True)\n assert_equal(res[4], {'axis': 0, 'dtype': None, 'keepdims': False,\n 'initial': None, 'where': True})\n\n # reduce, wrong args\n assert_raises(ValueError, np.multiply.reduce, a, out=())\n assert_raises(ValueError, np.multiply.reduce, a, out=('out0', 'out1'))\n assert_raises(TypeError, np.multiply.reduce, a, 'axis0', axis='axis0')\n\n # accumulate, pos args\n res = np.multiply.accumulate(a, 'axis0', 'dtype0', 'out0')\n assert_equal(res[0], a)\n assert_equal(res[1], np.multiply)\n assert_equal(res[2], 'accumulate')\n assert_equal(res[3], (a,))\n assert_equal(res[4], {'dtype':'dtype0',\n 'out': ('out0',),\n 'axis': 'axis0'})\n\n # accumulate, kwargs\n res = np.multiply.accumulate(a, axis='axis0', dtype='dtype0',\n out='out0')\n assert_equal(res[0], a)\n assert_equal(res[1], np.multiply)\n assert_equal(res[2], 'accumulate')\n assert_equal(res[3], (a,))\n assert_equal(res[4], {'dtype':'dtype0',\n 'out': ('out0',),\n 'axis': 'axis0'})\n\n # accumulate, output equal to None removed.\n res = np.multiply.accumulate(a, 0, None, None)\n assert_equal(res[4], {'axis': 0, 'dtype': None})\n res = np.multiply.accumulate(a, out=None, axis=0, dtype='dtype1')\n assert_equal(res[4], {'axis': 0, 'dtype': 'dtype1'})\n res = np.multiply.accumulate(a, None, out=(None,), dtype=None)\n assert_equal(res[4], {'axis': None, 'dtype': None})\n\n # accumulate, wrong args\n assert_raises(ValueError, np.multiply.accumulate, a, out=())\n assert_raises(ValueError, np.multiply.accumulate, a,\n out=('out0', 'out1'))\n assert_raises(TypeError, np.multiply.accumulate, a,\n 'axis0', axis='axis0')\n\n # reduceat, pos args\n res = np.multiply.reduceat(a, [4, 2], 'axis0', 'dtype0', 'out0')\n assert_equal(res[0], a)\n assert_equal(res[1], np.multiply)\n assert_equal(res[2], 'reduceat')\n assert_equal(res[3], (a, [4, 2]))\n assert_equal(res[4], {'dtype':'dtype0',\n 'out': ('out0',),\n 'axis': 'axis0'})\n\n # reduceat, kwargs\n res = np.multiply.reduceat(a, [4, 2], axis='axis0', dtype='dtype0',\n out='out0')\n assert_equal(res[0], a)\n assert_equal(res[1], np.multiply)\n assert_equal(res[2], 'reduceat')\n assert_equal(res[3], (a, [4, 2]))\n assert_equal(res[4], {'dtype':'dtype0',\n 'out': ('out0',),\n 'axis': 'axis0'})\n\n # reduceat, output equal to None removed.\n res = np.multiply.reduceat(a, [4, 2], 0, None, None)\n assert_equal(res[4], {'axis': 0, 'dtype': None})\n res = np.multiply.reduceat(a, [4, 2], axis=None, out=None, dtype='dt')\n assert_equal(res[4], {'axis': None, 'dtype': 'dt'})\n res = np.multiply.reduceat(a, [4, 2], None, None, out=(None,))\n assert_equal(res[4], {'axis': None, 'dtype': None})\n\n # reduceat, wrong args\n assert_raises(ValueError, np.multiply.reduce, a, [4, 2], out=())\n assert_raises(ValueError, np.multiply.reduce, a, [4, 2],\n out=('out0', 'out1'))\n assert_raises(TypeError, np.multiply.reduce, a, [4, 2],\n 'axis0', axis='axis0')\n\n # outer\n res = np.multiply.outer(a, 42)\n assert_equal(res[0], a)\n assert_equal(res[1], np.multiply)\n assert_equal(res[2], 'outer')\n assert_equal(res[3], (a, 42))\n assert_equal(res[4], {})\n\n # outer, wrong args\n assert_raises(TypeError, np.multiply.outer, a)\n assert_raises(TypeError, np.multiply.outer, a, a, a, a)\n assert_raises(TypeError, np.multiply.outer, a, a, sig='a', signature='a')\n\n # at\n res = np.multiply.at(a, [4, 2], 'b0')\n assert_equal(res[0], a)\n assert_equal(res[1], np.multiply)\n assert_equal(res[2], 'at')\n assert_equal(res[3], (a, [4, 2], 'b0'))\n\n # at, wrong args\n assert_raises(TypeError, np.multiply.at, a)\n assert_raises(TypeError, np.multiply.at, a, a, a, a)\n\n def test_ufunc_override_out(self):\n\n class A:\n def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):\n return kwargs\n\n class B:\n def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):\n return kwargs\n\n a = A()\n b = B()\n res0 = np.multiply(a, b, 'out_arg')\n res1 = np.multiply(a, b, out='out_arg')\n res2 = np.multiply(2, b, 'out_arg')\n res3 = np.multiply(3, b, out='out_arg')\n res4 = np.multiply(a, 4, 'out_arg')\n res5 = np.multiply(a, 5, out='out_arg')\n\n assert_equal(res0['out'][0], 'out_arg')\n assert_equal(res1['out'][0], 'out_arg')\n assert_equal(res2['out'][0], 'out_arg')\n assert_equal(res3['out'][0], 'out_arg')\n assert_equal(res4['out'][0], 'out_arg')\n assert_equal(res5['out'][0], 'out_arg')\n\n # ufuncs with multiple output modf and frexp.\n res6 = np.modf(a, 'out0', 'out1')\n res7 = np.frexp(a, 'out0', 'out1')\n assert_equal(res6['out'][0], 'out0')\n assert_equal(res6['out'][1], 'out1')\n assert_equal(res7['out'][0], 'out0')\n assert_equal(res7['out'][1], 'out1')\n\n # While we're at it, check that default output is never passed on.\n assert_(np.sin(a, None) == {})\n assert_(np.sin(a, out=None) == {})\n assert_(np.sin(a, out=(None,)) == {})\n assert_(np.modf(a, None) == {})\n assert_(np.modf(a, None, None) == {})\n assert_(np.modf(a, out=(None, None)) == {})\n with assert_raises(TypeError):\n # Out argument must be tuple, since there are multiple outputs.\n np.modf(a, out=None)\n\n # don't give positional and output argument, or too many arguments.\n # wrong number of arguments in the tuple is an error too.\n assert_raises(TypeError, np.multiply, a, b, 'one', out='two')\n assert_raises(TypeError, np.multiply, a, b, 'one', 'two')\n assert_raises(ValueError, np.multiply, a, b, out=('one', 'two'))\n assert_raises(TypeError, np.multiply, a, out=())\n assert_raises(TypeError, np.modf, a, 'one', out=('two', 'three'))\n assert_raises(TypeError, np.modf, a, 'one', 'two', 'three')\n assert_raises(ValueError, np.modf, a, out=('one', 'two', 'three'))\n assert_raises(ValueError, np.modf, a, out=('one',))\n\n def test_ufunc_override_exception(self):\n\n class A:\n def __array_ufunc__(self, *a, **kwargs):\n raise ValueError(\"oops\")\n\n a = A()\n assert_raises(ValueError, np.negative, 1, out=a)\n assert_raises(ValueError, np.negative, a)\n assert_raises(ValueError, np.divide, 1., a)\n\n def test_ufunc_override_not_implemented(self):\n\n class A:\n def __array_ufunc__(self, *args, **kwargs):\n return NotImplemented\n\n msg = (\"operand type(s) all returned NotImplemented from \"\n \"__array_ufunc__(<ufunc 'negative'>, '__call__', <*>): 'A'\")\n with assert_raises_regex(TypeError, fnmatch.translate(msg)):\n np.negative(A())\n\n msg = (\"operand type(s) all returned NotImplemented from \"\n \"__array_ufunc__(<ufunc 'add'>, '__call__', <*>, <object *>, \"\n \"out=(1,)): 'A', 'object', 'int'\")\n with assert_raises_regex(TypeError, fnmatch.translate(msg)):\n np.add(A(), object(), out=1)\n\n def test_ufunc_override_disabled(self):\n\n class OptOut:\n __array_ufunc__ = None\n\n opt_out = OptOut()\n\n # ufuncs always raise\n msg = \"operand 'OptOut' does not support ufuncs\"\n with assert_raises_regex(TypeError, msg):\n np.add(opt_out, 1)\n with assert_raises_regex(TypeError, msg):\n np.add(1, opt_out)\n with assert_raises_regex(TypeError, msg):\n np.negative(opt_out)\n\n # opt-outs still hold even when other arguments have pathological\n # __array_ufunc__ implementations\n\n class GreedyArray:\n def __array_ufunc__(self, *args, **kwargs):\n return self\n\n greedy = GreedyArray()\n assert_(np.negative(greedy) is greedy)\n with assert_raises_regex(TypeError, msg):\n np.add(greedy, opt_out)\n with assert_raises_regex(TypeError, msg):\n np.add(greedy, 1, out=opt_out)\n\n def test_gufunc_override(self):\n # gufunc are just ufunc instances, but follow a different path,\n # so check __array_ufunc__ overrides them properly.\n class A:\n def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):\n return self, ufunc, method, inputs, kwargs\n\n inner1d = ncu_tests.inner1d\n a = A()\n res = inner1d(a, a)\n assert_equal(res[0], a)\n assert_equal(res[1], inner1d)\n assert_equal(res[2], '__call__')\n assert_equal(res[3], (a, a))\n assert_equal(res[4], {})\n\n res = inner1d(1, 1, out=a)\n assert_equal(res[0], a)\n assert_equal(res[1], inner1d)\n assert_equal(res[2], '__call__')\n assert_equal(res[3], (1, 1))\n assert_equal(res[4], {'out': (a,)})\n\n # wrong number of arguments in the tuple is an error too.\n assert_raises(TypeError, inner1d, a, out='two')\n assert_raises(TypeError, inner1d, a, a, 'one', out='two')\n assert_raises(TypeError, inner1d, a, a, 'one', 'two')\n assert_raises(ValueError, inner1d, a, a, out=('one', 'two'))\n assert_raises(ValueError, inner1d, a, a, out=())\n\n def test_ufunc_override_with_super(self):\n # NOTE: this class is used in doc/source/user/basics.subclassing.rst\n # if you make any changes here, do update it there too.\n class A(np.ndarray):\n def __array_ufunc__(self, ufunc, method, *inputs, out=None, **kwargs):\n args = []\n in_no = []\n for i, input_ in enumerate(inputs):\n if isinstance(input_, A):\n in_no.append(i)\n args.append(input_.view(np.ndarray))\n else:\n args.append(input_)\n\n outputs = out\n out_no = []\n if outputs:\n out_args = []\n for j, output in enumerate(outputs):\n if isinstance(output, A):\n out_no.append(j)\n out_args.append(output.view(np.ndarray))\n else:\n out_args.append(output)\n kwargs['out'] = tuple(out_args)\n else:\n outputs = (None,) * ufunc.nout\n\n info = {}\n if in_no:\n info['inputs'] = in_no\n if out_no:\n info['outputs'] = out_no\n\n results = super().__array_ufunc__(ufunc, method,\n *args, **kwargs)\n if results is NotImplemented:\n return NotImplemented\n\n if method == 'at':\n if isinstance(inputs[0], A):\n inputs[0].info = info\n return\n\n if ufunc.nout == 1:\n results = (results,)\n\n results = tuple((np.asarray(result).view(A)\n if output is None else output)\n for result, output in zip(results, outputs))\n if results and isinstance(results[0], A):\n results[0].info = info\n\n return results[0] if len(results) == 1 else results\n\n class B:\n def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):\n if any(isinstance(input_, A) for input_ in inputs):\n return \"A!\"\n else:\n return NotImplemented\n\n d = np.arange(5.)\n # 1 input, 1 output\n a = np.arange(5.).view(A)\n b = np.sin(a)\n check = np.sin(d)\n assert_(np.all(check == b))\n assert_equal(b.info, {'inputs': [0]})\n b = np.sin(d, out=(a,))\n assert_(np.all(check == b))\n assert_equal(b.info, {'outputs': [0]})\n assert_(b is a)\n a = np.arange(5.).view(A)\n b = np.sin(a, out=a)\n assert_(np.all(check == b))\n assert_equal(b.info, {'inputs': [0], 'outputs': [0]})\n\n # 1 input, 2 outputs\n a = np.arange(5.).view(A)\n b1, b2 = np.modf(a)\n assert_equal(b1.info, {'inputs': [0]})\n b1, b2 = np.modf(d, out=(None, a))\n assert_(b2 is a)\n assert_equal(b1.info, {'outputs': [1]})\n a = np.arange(5.).view(A)\n b = np.arange(5.).view(A)\n c1, c2 = np.modf(a, out=(a, b))\n assert_(c1 is a)\n assert_(c2 is b)\n assert_equal(c1.info, {'inputs': [0], 'outputs': [0, 1]})\n\n # 2 input, 1 output\n a = np.arange(5.).view(A)\n b = np.arange(5.).view(A)\n c = np.add(a, b, out=a)\n assert_(c is a)\n assert_equal(c.info, {'inputs': [0, 1], 'outputs': [0]})\n # some tests with a non-ndarray subclass\n a = np.arange(5.)\n b = B()\n assert_(a.__array_ufunc__(np.add, '__call__', a, b) is NotImplemented)\n assert_(b.__array_ufunc__(np.add, '__call__', a, b) is NotImplemented)\n assert_raises(TypeError, np.add, a, b)\n a = a.view(A)\n assert_(a.__array_ufunc__(np.add, '__call__', a, b) is NotImplemented)\n assert_(b.__array_ufunc__(np.add, '__call__', a, b) == \"A!\")\n assert_(np.add(a, b) == \"A!\")\n # regression check for gh-9102 -- tests ufunc.reduce implicitly.\n d = np.array([[1, 2, 3], [1, 2, 3]])\n a = d.view(A)\n c = a.any()\n check = d.any()\n assert_equal(c, check)\n assert_(c.info, {'inputs': [0]})\n c = a.max()\n check = d.max()\n assert_equal(c, check)\n assert_(c.info, {'inputs': [0]})\n b = np.array(0).view(A)\n c = a.max(out=b)\n assert_equal(c, check)\n assert_(c is b)\n assert_(c.info, {'inputs': [0], 'outputs': [0]})\n check = a.max(axis=0)\n b = np.zeros_like(check).view(A)\n c = a.max(axis=0, out=b)\n assert_equal(c, check)\n assert_(c is b)\n assert_(c.info, {'inputs': [0], 'outputs': [0]})\n # simple explicit tests of reduce, accumulate, reduceat\n check = np.add.reduce(d, axis=1)\n c = np.add.reduce(a, axis=1)\n assert_equal(c, check)\n assert_(c.info, {'inputs': [0]})\n b = np.zeros_like(c)\n c = np.add.reduce(a, 1, None, b)\n assert_equal(c, check)\n assert_(c is b)\n assert_(c.info, {'inputs': [0], 'outputs': [0]})\n check = np.add.accumulate(d, axis=0)\n c = np.add.accumulate(a, axis=0)\n assert_equal(c, check)\n assert_(c.info, {'inputs': [0]})\n b = np.zeros_like(c)\n c = np.add.accumulate(a, 0, None, b)\n assert_equal(c, check)\n assert_(c is b)\n assert_(c.info, {'inputs': [0], 'outputs': [0]})\n indices = [0, 2, 1]\n check = np.add.reduceat(d, indices, axis=1)\n c = np.add.reduceat(a, indices, axis=1)\n assert_equal(c, check)\n assert_(c.info, {'inputs': [0]})\n b = np.zeros_like(c)\n c = np.add.reduceat(a, indices, 1, None, b)\n assert_equal(c, check)\n assert_(c is b)\n assert_(c.info, {'inputs': [0], 'outputs': [0]})\n # and a few tests for at\n d = np.array([[1, 2, 3], [1, 2, 3]])\n check = d.copy()\n a = d.copy().view(A)\n np.add.at(check, ([0, 1], [0, 2]), 1.)\n np.add.at(a, ([0, 1], [0, 2]), 1.)\n assert_equal(a, check)\n assert_(a.info, {'inputs': [0]})\n b = np.array(1.).view(A)\n a = d.copy().view(A)\n np.add.at(a, ([0, 1], [0, 2]), b)\n assert_equal(a, check)\n assert_(a.info, {'inputs': [0, 2]})\n\n\nclass TestChoose:\n def test_mixed(self):\n c = np.array([True, True])\n a = np.array([True, True])\n assert_equal(np.choose(c, (a, 1)), np.array([1, 1]))\n\n\nclass TestRationalFunctions:\n def test_lcm(self):\n self._test_lcm_inner(np.int16)\n self._test_lcm_inner(np.uint16)\n\n def test_lcm_object(self):\n self._test_lcm_inner(np.object_)\n\n def test_gcd(self):\n self._test_gcd_inner(np.int16)\n self._test_lcm_inner(np.uint16)\n\n def test_gcd_object(self):\n self._test_gcd_inner(np.object_)\n\n def _test_lcm_inner(self, dtype):\n # basic use\n a = np.array([12, 120], dtype=dtype)\n b = np.array([20, 200], dtype=dtype)\n assert_equal(np.lcm(a, b), [60, 600])\n\n if not issubclass(dtype, np.unsignedinteger):\n # negatives are ignored\n a = np.array([12, -12, 12, -12], dtype=dtype)\n b = np.array([20, 20, -20, -20], dtype=dtype)\n assert_equal(np.lcm(a, b), [60]*4)\n\n # reduce\n a = np.array([3, 12, 20], dtype=dtype)\n assert_equal(np.lcm.reduce([3, 12, 20]), 60)\n\n # broadcasting, and a test including 0\n a = np.arange(6).astype(dtype)\n b = 20\n assert_equal(np.lcm(a, b), [0, 20, 20, 60, 20, 20])\n\n def _test_gcd_inner(self, dtype):\n # basic use\n a = np.array([12, 120], dtype=dtype)\n b = np.array([20, 200], dtype=dtype)\n assert_equal(np.gcd(a, b), [4, 40])\n\n if not issubclass(dtype, np.unsignedinteger):\n # negatives are ignored\n a = np.array([12, -12, 12, -12], dtype=dtype)\n b = np.array([20, 20, -20, -20], dtype=dtype)\n assert_equal(np.gcd(a, b), [4]*4)\n\n # reduce\n a = np.array([15, 25, 35], dtype=dtype)\n assert_equal(np.gcd.reduce(a), 5)\n\n # broadcasting, and a test including 0\n a = np.arange(6).astype(dtype)\n b = 20\n assert_equal(np.gcd(a, b), [20, 1, 2, 1, 4, 5])\n\n def test_lcm_overflow(self):\n # verify that we don't overflow when a*b does overflow\n big = np.int32(np.iinfo(np.int32).max // 11)\n a = 2*big\n b = 5*big\n assert_equal(np.lcm(a, b), 10*big)\n\n def test_gcd_overflow(self):\n for dtype in (np.int32, np.int64):\n # verify that we don't overflow when taking abs(x)\n # not relevant for lcm, where the result is unrepresentable anyway\n a = dtype(np.iinfo(dtype).min) # negative power of two\n q = -(a // 4)\n assert_equal(np.gcd(a, q*3), q)\n assert_equal(np.gcd(a, -q*3), q)\n\n def test_decimal(self):\n from decimal import Decimal\n a = np.array([1, 1, -1, -1]) * Decimal('0.20')\n b = np.array([1, -1, 1, -1]) * Decimal('0.12')\n\n assert_equal(np.gcd(a, b), 4*[Decimal('0.04')])\n assert_equal(np.lcm(a, b), 4*[Decimal('0.60')])\n\n def test_float(self):\n # not well-defined on float due to rounding errors\n assert_raises(TypeError, np.gcd, 0.3, 0.4)\n assert_raises(TypeError, np.lcm, 0.3, 0.4)\n\n def test_builtin_long(self):\n # sanity check that array coercion is alright for builtin longs\n assert_equal(np.array(2**200).item(), 2**200)\n\n # expressed as prime factors\n a = np.array(2**100 * 3**5)\n b = np.array([2**100 * 5**7, 2**50 * 3**10])\n assert_equal(np.gcd(a, b), [2**100, 2**50 * 3**5])\n assert_equal(np.lcm(a, b), [2**100 * 3**5 * 5**7, 2**100 * 3**10])\n\n assert_equal(np.gcd(2**100, 3**100), 1)\n\n\nclass TestRoundingFunctions:\n\n def test_object_direct(self):\n \"\"\" test direct implementation of these magic methods \"\"\"\n class C:\n def __floor__(self):\n return 1\n def __ceil__(self):\n return 2\n def __trunc__(self):\n return 3\n\n arr = np.array([C(), C()])\n assert_equal(np.floor(arr), [1, 1])\n assert_equal(np.ceil(arr), [2, 2])\n assert_equal(np.trunc(arr), [3, 3])\n\n def test_object_indirect(self):\n \"\"\" test implementations via __float__ \"\"\"\n class C:\n def __float__(self):\n return -2.5\n\n arr = np.array([C(), C()])\n assert_equal(np.floor(arr), [-3, -3])\n assert_equal(np.ceil(arr), [-2, -2])\n with pytest.raises(TypeError):\n np.trunc(arr) # consistent with math.trunc\n\n def test_fraction(self):\n f = Fraction(-4, 3)\n assert_equal(np.floor(f), -2)\n assert_equal(np.ceil(f), -1)\n assert_equal(np.trunc(f), -1)\n\n\nclass TestComplexFunctions:\n funcs = [np.arcsin, np.arccos, np.arctan, np.arcsinh, np.arccosh,\n np.arctanh, np.sin, np.cos, np.tan, np.exp,\n np.exp2, np.log, np.sqrt, np.log10, np.log2,\n np.log1p]\n\n def test_it(self):\n for f in self.funcs:\n if f is np.arccosh:\n x = 1.5\n else:\n x = .5\n fr = f(x)\n fz = f(complex(x))\n assert_almost_equal(fz.real, fr, err_msg='real part %s' % f)\n assert_almost_equal(fz.imag, 0., err_msg='imag part %s' % f)\n\n def test_precisions_consistent(self):\n z = 1 + 1j\n for f in self.funcs:\n fcf = f(np.csingle(z))\n fcd = f(np.cdouble(z))\n fcl = f(np.clongdouble(z))\n assert_almost_equal(fcf, fcd, decimal=6, err_msg='fch-fcd %s' % f)\n assert_almost_equal(fcl, fcd, decimal=15, err_msg='fch-fcl %s' % f)\n\n def test_branch_cuts(self):\n # check branch cuts and continuity on them\n _check_branch_cut(np.log, -0.5, 1j, 1, -1, True)\n _check_branch_cut(np.log2, -0.5, 1j, 1, -1, True)\n _check_branch_cut(np.log10, -0.5, 1j, 1, -1, True)\n _check_branch_cut(np.log1p, -1.5, 1j, 1, -1, True)\n _check_branch_cut(np.sqrt, -0.5, 1j, 1, -1, True)\n\n _check_branch_cut(np.arcsin, [ -2, 2], [1j, 1j], 1, -1, True)\n _check_branch_cut(np.arccos, [ -2, 2], [1j, 1j], 1, -1, True)\n _check_branch_cut(np.arctan, [0-2j, 2j], [1, 1], -1, 1, True)\n\n _check_branch_cut(np.arcsinh, [0-2j, 2j], [1, 1], -1, 1, True)\n _check_branch_cut(np.arccosh, [ -1, 0.5], [1j, 1j], 1, -1, True)\n _check_branch_cut(np.arctanh, [ -2, 2], [1j, 1j], 1, -1, True)\n\n # check against bogus branch cuts: assert continuity between quadrants\n _check_branch_cut(np.arcsin, [0-2j, 2j], [ 1, 1], 1, 1)\n _check_branch_cut(np.arccos, [0-2j, 2j], [ 1, 1], 1, 1)\n _check_branch_cut(np.arctan, [ -2, 2], [1j, 1j], 1, 1)\n\n _check_branch_cut(np.arcsinh, [ -2, 2, 0], [1j, 1j, 1], 1, 1)\n _check_branch_cut(np.arccosh, [0-2j, 2j, 2], [1, 1, 1j], 1, 1)\n _check_branch_cut(np.arctanh, [0-2j, 2j, 0], [1, 1, 1j], 1, 1)\n\n def test_branch_cuts_complex64(self):\n # check branch cuts and continuity on them\n _check_branch_cut(np.log, -0.5, 1j, 1, -1, True, np.complex64)\n _check_branch_cut(np.log2, -0.5, 1j, 1, -1, True, np.complex64)\n _check_branch_cut(np.log10, -0.5, 1j, 1, -1, True, np.complex64)\n _check_branch_cut(np.log1p, -1.5, 1j, 1, -1, True, np.complex64)\n _check_branch_cut(np.sqrt, -0.5, 1j, 1, -1, True, np.complex64)\n\n _check_branch_cut(np.arcsin, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64)\n _check_branch_cut(np.arccos, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64)\n _check_branch_cut(np.arctan, [0-2j, 2j], [1, 1], -1, 1, True, np.complex64)\n\n _check_branch_cut(np.arcsinh, [0-2j, 2j], [1, 1], -1, 1, True, np.complex64)\n _check_branch_cut(np.arccosh, [ -1, 0.5], [1j, 1j], 1, -1, True, np.complex64)\n _check_branch_cut(np.arctanh, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64)\n\n # check against bogus branch cuts: assert continuity between quadrants\n _check_branch_cut(np.arcsin, [0-2j, 2j], [ 1, 1], 1, 1, False, np.complex64)\n _check_branch_cut(np.arccos, [0-2j, 2j], [ 1, 1], 1, 1, False, np.complex64)\n _check_branch_cut(np.arctan, [ -2, 2], [1j, 1j], 1, 1, False, np.complex64)\n\n _check_branch_cut(np.arcsinh, [ -2, 2, 0], [1j, 1j, 1], 1, 1, False, np.complex64)\n _check_branch_cut(np.arccosh, [0-2j, 2j, 2], [1, 1, 1j], 1, 1, False, np.complex64)\n _check_branch_cut(np.arctanh, [0-2j, 2j, 0], [1, 1, 1j], 1, 1, False, np.complex64)\n\n def test_against_cmath(self):\n import cmath\n\n points = [-1-1j, -1+1j, +1-1j, +1+1j]\n name_map = {'arcsin': 'asin', 'arccos': 'acos', 'arctan': 'atan',\n 'arcsinh': 'asinh', 'arccosh': 'acosh', 'arctanh': 'atanh'}\n atol = 4*np.finfo(complex).eps\n for func in self.funcs:\n fname = func.__name__.split('.')[-1]\n cname = name_map.get(fname, fname)\n try:\n cfunc = getattr(cmath, cname)\n except AttributeError:\n continue\n for p in points:\n a = complex(func(np.complex_(p)))\n b = cfunc(p)\n assert_(abs(a - b) < atol, \"%s %s: %s; cmath: %s\" % (fname, p, a, b))\n\n @pytest.mark.parametrize('dtype', [np.complex64, np.complex_, np.longcomplex])\n def test_loss_of_precision(self, dtype):\n \"\"\"Check loss of precision in complex arc* functions\"\"\"\n\n # Check against known-good functions\n\n info = np.finfo(dtype)\n real_dtype = dtype(0.).real.dtype\n eps = info.eps\n\n def check(x, rtol):\n x = x.astype(real_dtype)\n\n z = x.astype(dtype)\n d = np.absolute(np.arcsinh(x)/np.arcsinh(z).real - 1)\n assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(),\n 'arcsinh'))\n\n z = (1j*x).astype(dtype)\n d = np.absolute(np.arcsinh(x)/np.arcsin(z).imag - 1)\n assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(),\n 'arcsin'))\n\n z = x.astype(dtype)\n d = np.absolute(np.arctanh(x)/np.arctanh(z).real - 1)\n assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(),\n 'arctanh'))\n\n z = (1j*x).astype(dtype)\n d = np.absolute(np.arctanh(x)/np.arctan(z).imag - 1)\n assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(),\n 'arctan'))\n\n # The switchover was chosen as 1e-3; hence there can be up to\n # ~eps/1e-3 of relative cancellation error before it\n\n x_series = np.logspace(-20, -3.001, 200)\n x_basic = np.logspace(-2.999, 0, 10, endpoint=False)\n\n if dtype is np.longcomplex:\n # It's not guaranteed that the system-provided arc functions\n # are accurate down to a few epsilons. (Eg. on Linux 64-bit)\n # So, give more leeway for long complex tests here:\n # Can use 2.1 for > Ubuntu LTS Trusty (2014), glibc = 2.19.\n if skip_longcomplex_msg:\n pytest.skip(skip_longcomplex_msg)\n check(x_series, 50.0*eps)\n else:\n check(x_series, 2.1*eps)\n check(x_basic, 2.0*eps/1e-3)\n\n # Check a few points\n\n z = np.array([1e-5*(1+1j)], dtype=dtype)\n p = 9.999999999333333333e-6 + 1.000000000066666666e-5j\n d = np.absolute(1-np.arctanh(z)/p)\n assert_(np.all(d < 1e-15))\n\n p = 1.0000000000333333333e-5 + 9.999999999666666667e-6j\n d = np.absolute(1-np.arcsinh(z)/p)\n assert_(np.all(d < 1e-15))\n\n p = 9.999999999333333333e-6j + 1.000000000066666666e-5\n d = np.absolute(1-np.arctan(z)/p)\n assert_(np.all(d < 1e-15))\n\n p = 1.0000000000333333333e-5j + 9.999999999666666667e-6\n d = np.absolute(1-np.arcsin(z)/p)\n assert_(np.all(d < 1e-15))\n\n # Check continuity across switchover points\n\n def check(func, z0, d=1):\n z0 = np.asarray(z0, dtype=dtype)\n zp = z0 + abs(z0) * d * eps * 2\n zm = z0 - abs(z0) * d * eps * 2\n assert_(np.all(zp != zm), (zp, zm))\n\n # NB: the cancellation error at the switchover is at least eps\n good = (abs(func(zp) - func(zm)) < 2*eps)\n assert_(np.all(good), (func, z0[~good]))\n\n for func in (np.arcsinh, np.arcsinh, np.arcsin, np.arctanh, np.arctan):\n pts = [rp+1j*ip for rp in (-1e-3, 0, 1e-3) for ip in(-1e-3, 0, 1e-3)\n if rp != 0 or ip != 0]\n check(func, pts, 1)\n check(func, pts, 1j)\n check(func, pts, 1+1j)\n\n\nclass TestAttributes:\n def test_attributes(self):\n add = ncu.add\n assert_equal(add.__name__, 'add')\n assert_(add.ntypes >= 18) # don't fail if types added\n assert_('ii->i' in add.types)\n assert_equal(add.nin, 2)\n assert_equal(add.nout, 1)\n assert_equal(add.identity, 0)\n\n def test_doc(self):\n # don't bother checking the long list of kwargs, which are likely to\n # change\n assert_(ncu.add.__doc__.startswith(\n \"add(x1, x2, /, out=None, *, where=True\"))\n assert_(ncu.frexp.__doc__.startswith(\n \"frexp(x[, out1, out2], / [, out=(None, None)], *, where=True\"))\n\n\nclass TestSubclass:\n\n def test_subclass_op(self):\n\n class simple(np.ndarray):\n def __new__(subtype, shape):\n self = np.ndarray.__new__(subtype, shape, dtype=object)\n self.fill(0)\n return self\n\n a = simple((3, 4))\n assert_equal(a+a, a)\n\n\nclass TestFrompyfunc(object):\n\n def test_identity(self):\n def mul(a, b):\n return a * b\n\n # with identity=value\n mul_ufunc = np.frompyfunc(mul, nin=2, nout=1, identity=1)\n assert_equal(mul_ufunc.reduce([2, 3, 4]), 24)\n assert_equal(mul_ufunc.reduce(np.ones((2, 2)), axis=(0, 1)), 1)\n assert_equal(mul_ufunc.reduce([]), 1)\n\n # with identity=None (reorderable)\n mul_ufunc = np.frompyfunc(mul, nin=2, nout=1, identity=None)\n assert_equal(mul_ufunc.reduce([2, 3, 4]), 24)\n assert_equal(mul_ufunc.reduce(np.ones((2, 2)), axis=(0, 1)), 1)\n assert_raises(ValueError, lambda: mul_ufunc.reduce([]))\n\n # with no identity (not reorderable)\n mul_ufunc = np.frompyfunc(mul, nin=2, nout=1)\n assert_equal(mul_ufunc.reduce([2, 3, 4]), 24)\n assert_raises(ValueError, lambda: mul_ufunc.reduce(np.ones((2, 2)), axis=(0, 1)))\n assert_raises(ValueError, lambda: mul_ufunc.reduce([]))\n\n\ndef _check_branch_cut(f, x0, dx, re_sign=1, im_sign=-1, sig_zero_ok=False,\n dtype=complex):\n \"\"\"\n Check for a branch cut in a function.\n\n Assert that `x0` lies on a branch cut of function `f` and `f` is\n continuous from the direction `dx`.\n\n Parameters\n ----------\n f : func\n Function to check\n x0 : array-like\n Point on branch cut\n dx : array-like\n Direction to check continuity in\n re_sign, im_sign : {1, -1}\n Change of sign of the real or imaginary part expected\n sig_zero_ok : bool\n Whether to check if the branch cut respects signed zero (if applicable)\n dtype : dtype\n Dtype to check (should be complex)\n\n \"\"\"\n x0 = np.atleast_1d(x0).astype(dtype)\n dx = np.atleast_1d(dx).astype(dtype)\n\n if np.dtype(dtype).char == 'F':\n scale = np.finfo(dtype).eps * 1e2\n atol = np.float32(1e-2)\n else:\n scale = np.finfo(dtype).eps * 1e3\n atol = 1e-4\n\n y0 = f(x0)\n yp = f(x0 + dx*scale*np.absolute(x0)/np.absolute(dx))\n ym = f(x0 - dx*scale*np.absolute(x0)/np.absolute(dx))\n\n assert_(np.all(np.absolute(y0.real - yp.real) < atol), (y0, yp))\n assert_(np.all(np.absolute(y0.imag - yp.imag) < atol), (y0, yp))\n assert_(np.all(np.absolute(y0.real - ym.real*re_sign) < atol), (y0, ym))\n assert_(np.all(np.absolute(y0.imag - ym.imag*im_sign) < atol), (y0, ym))\n\n if sig_zero_ok:\n # check that signed zeros also work as a displacement\n jr = (x0.real == 0) & (dx.real != 0)\n ji = (x0.imag == 0) & (dx.imag != 0)\n if np.any(jr):\n x = x0[jr]\n x.real = np.NZERO\n ym = f(x)\n assert_(np.all(np.absolute(y0[jr].real - ym.real*re_sign) < atol), (y0[jr], ym))\n assert_(np.all(np.absolute(y0[jr].imag - ym.imag*im_sign) < atol), (y0[jr], ym))\n\n if np.any(ji):\n x = x0[ji]\n x.imag = np.NZERO\n ym = f(x)\n assert_(np.all(np.absolute(y0[ji].real - ym.real*re_sign) < atol), (y0[ji], ym))\n assert_(np.all(np.absolute(y0[ji].imag - ym.imag*im_sign) < atol), (y0[ji], ym))\n\ndef test_copysign():\n assert_(np.copysign(1, -1) == -1)\n with np.errstate(divide=\"ignore\"):\n assert_(1 / np.copysign(0, -1) < 0)\n assert_(1 / np.copysign(0, 1) > 0)\n assert_(np.signbit(np.copysign(np.nan, -1)))\n assert_(not np.signbit(np.copysign(np.nan, 1)))\n\ndef _test_nextafter(t):\n one = t(1)\n two = t(2)\n zero = t(0)\n eps = np.finfo(t).eps\n assert_(np.nextafter(one, two) - one == eps)\n assert_(np.nextafter(one, zero) - one < 0)\n assert_(np.isnan(np.nextafter(np.nan, one)))\n assert_(np.isnan(np.nextafter(one, np.nan)))\n assert_(np.nextafter(one, one) == one)\n\ndef test_nextafter():\n return _test_nextafter(np.float64)\n\n\ndef test_nextafterf():\n return _test_nextafter(np.float32)\n\n\[email protected](np.finfo(np.double) == np.finfo(np.longdouble),\n reason=\"long double is same as double\")\[email protected](condition=platform.machine().startswith(\"ppc64\"),\n reason=\"IBM double double\")\ndef test_nextafterl():\n return _test_nextafter(np.longdouble)\n\n\ndef test_nextafter_0():\n for t, direction in itertools.product(np.sctypes['float'], (1, -1)):\n tiny = np.finfo(t).tiny\n assert_(0. < direction * np.nextafter(t(0), t(direction)) < tiny)\n assert_equal(np.nextafter(t(0), t(direction)) / t(2.1), direction * 0.0)\n\ndef _test_spacing(t):\n one = t(1)\n eps = np.finfo(t).eps\n nan = t(np.nan)\n inf = t(np.inf)\n with np.errstate(invalid='ignore'):\n assert_(np.spacing(one) == eps)\n assert_(np.isnan(np.spacing(nan)))\n assert_(np.isnan(np.spacing(inf)))\n assert_(np.isnan(np.spacing(-inf)))\n assert_(np.spacing(t(1e30)) != 0)\n\ndef test_spacing():\n return _test_spacing(np.float64)\n\ndef test_spacingf():\n return _test_spacing(np.float32)\n\n\[email protected](np.finfo(np.double) == np.finfo(np.longdouble),\n reason=\"long double is same as double\")\[email protected](condition=platform.machine().startswith(\"ppc64\"),\n reason=\"IBM double double\")\ndef test_spacingl():\n return _test_spacing(np.longdouble)\n\ndef test_spacing_gfortran():\n # Reference from this fortran file, built with gfortran 4.3.3 on linux\n # 32bits:\n # PROGRAM test_spacing\n # INTEGER, PARAMETER :: SGL = SELECTED_REAL_KIND(p=6, r=37)\n # INTEGER, PARAMETER :: DBL = SELECTED_REAL_KIND(p=13, r=200)\n #\n # WRITE(*,*) spacing(0.00001_DBL)\n # WRITE(*,*) spacing(1.0_DBL)\n # WRITE(*,*) spacing(1000._DBL)\n # WRITE(*,*) spacing(10500._DBL)\n #\n # WRITE(*,*) spacing(0.00001_SGL)\n # WRITE(*,*) spacing(1.0_SGL)\n # WRITE(*,*) spacing(1000._SGL)\n # WRITE(*,*) spacing(10500._SGL)\n # END PROGRAM\n ref = {np.float64: [1.69406589450860068E-021,\n 2.22044604925031308E-016,\n 1.13686837721616030E-013,\n 1.81898940354585648E-012],\n np.float32: [9.09494702E-13,\n 1.19209290E-07,\n 6.10351563E-05,\n 9.76562500E-04]}\n\n for dt, dec_ in zip([np.float32, np.float64], (10, 20)):\n x = np.array([1e-5, 1, 1000, 10500], dtype=dt)\n assert_array_almost_equal(np.spacing(x), ref[dt], decimal=dec_)\n\ndef test_nextafter_vs_spacing():\n # XXX: spacing does not handle long double yet\n for t in [np.float32, np.float64]:\n for _f in [1, 1e-5, 1000]:\n f = t(_f)\n f1 = t(_f + 1)\n assert_(np.nextafter(f, f1) - f == np.spacing(f))\n\ndef test_pos_nan():\n \"\"\"Check np.nan is a positive nan.\"\"\"\n assert_(np.signbit(np.nan) == 0)\n\ndef test_reduceat():\n \"\"\"Test bug in reduceat when structured arrays are not copied.\"\"\"\n db = np.dtype([('name', 'S11'), ('time', np.int64), ('value', np.float32)])\n a = np.empty([100], dtype=db)\n a['name'] = 'Simple'\n a['time'] = 10\n a['value'] = 100\n indx = [0, 7, 15, 25]\n\n h2 = []\n val1 = indx[0]\n for val2 in indx[1:]:\n h2.append(np.add.reduce(a['value'][val1:val2]))\n val1 = val2\n h2.append(np.add.reduce(a['value'][val1:]))\n h2 = np.array(h2)\n\n # test buffered -- this should work\n h1 = np.add.reduceat(a['value'], indx)\n assert_array_almost_equal(h1, h2)\n\n # This is when the error occurs.\n # test no buffer\n np.setbufsize(32)\n h1 = np.add.reduceat(a['value'], indx)\n np.setbufsize(np.UFUNC_BUFSIZE_DEFAULT)\n assert_array_almost_equal(h1, h2)\n\ndef test_reduceat_empty():\n \"\"\"Reduceat should work with empty arrays\"\"\"\n indices = np.array([], 'i4')\n x = np.array([], 'f8')\n result = np.add.reduceat(x, indices)\n assert_equal(result.dtype, x.dtype)\n assert_equal(result.shape, (0,))\n # Another case with a slightly different zero-sized shape\n x = np.ones((5, 2))\n result = np.add.reduceat(x, [], axis=0)\n assert_equal(result.dtype, x.dtype)\n assert_equal(result.shape, (0, 2))\n result = np.add.reduceat(x, [], axis=1)\n assert_equal(result.dtype, x.dtype)\n assert_equal(result.shape, (5, 0))\n\ndef test_complex_nan_comparisons():\n nans = [complex(np.nan, 0), complex(0, np.nan), complex(np.nan, np.nan)]\n fins = [complex(1, 0), complex(-1, 0), complex(0, 1), complex(0, -1),\n complex(1, 1), complex(-1, -1), complex(0, 0)]\n\n with np.errstate(invalid='ignore'):\n for x in nans + fins:\n x = np.array([x])\n for y in nans + fins:\n y = np.array([y])\n\n if np.isfinite(x) and np.isfinite(y):\n continue\n\n assert_equal(x < y, False, err_msg=\"%r < %r\" % (x, y))\n assert_equal(x > y, False, err_msg=\"%r > %r\" % (x, y))\n assert_equal(x <= y, False, err_msg=\"%r <= %r\" % (x, y))\n assert_equal(x >= y, False, err_msg=\"%r >= %r\" % (x, y))\n assert_equal(x == y, False, err_msg=\"%r == %r\" % (x, y))\n\n\ndef test_rint_big_int():\n # np.rint bug for large integer values on Windows 32-bit and MKL\n # https://github.com/numpy/numpy/issues/6685\n val = 4607998452777363968\n # This is exactly representable in floating point\n assert_equal(val, int(float(val)))\n # Rint should not change the value\n assert_equal(val, np.rint(val))\n\[email protected]('ftype', [np.float32, np.float64])\ndef test_memoverlap_accumulate(ftype):\n # Reproduces bug https://github.com/numpy/numpy/issues/15597\n arr = np.array([0.61, 0.60, 0.77, 0.41, 0.19], dtype=ftype)\n out_max = np.array([0.61, 0.61, 0.77, 0.77, 0.77], dtype=ftype)\n out_min = np.array([0.61, 0.60, 0.60, 0.41, 0.19], dtype=ftype)\n assert_equal(np.maximum.accumulate(arr), out_max)\n assert_equal(np.minimum.accumulate(arr), out_min)\n\ndef test_signaling_nan_exceptions():\n with assert_no_warnings():\n a = np.ndarray(shape=(), dtype='float32', buffer=b'\\x00\\xe0\\xbf\\xff')\n np.isnan(a)\n\[email protected](\"arr\", [\n np.arange(2),\n np.matrix([0, 1]),\n np.matrix([[0, 1], [2, 5]]),\n ])\ndef test_outer_subclass_preserve(arr):\n # for gh-8661\n class foo(np.ndarray): pass\n actual = np.multiply.outer(arr.view(foo), arr.view(foo))\n assert actual.__class__.__name__ == 'foo'\n\ndef test_outer_bad_subclass():\n class BadArr1(np.ndarray):\n def __array_finalize__(self, obj):\n # The outer call reshapes to 3 dims, try to do a bad reshape.\n if self.ndim == 3:\n self.shape = self.shape + (1,)\n\n def __array_prepare__(self, obj, context=None):\n return obj\n\n class BadArr2(np.ndarray):\n def __array_finalize__(self, obj):\n if isinstance(obj, BadArr2):\n # outer inserts 1-sized dims. In that case disturb them.\n if self.shape[-1] == 1:\n self.shape = self.shape[::-1]\n\n def __array_prepare__(self, obj, context=None):\n return obj\n\n for cls in [BadArr1, BadArr2]:\n arr = np.ones((2, 3)).view(cls)\n with assert_raises(TypeError) as a:\n # The first array gets reshaped (not the second one)\n np.add.outer(arr, [1, 2])\n\n # This actually works, since we only see the reshaping error:\n arr = np.ones((2, 3)).view(cls)\n assert type(np.add.outer([1, 2], arr)) is cls\n\ndef test_outer_exceeds_maxdims():\n deep = np.ones((1,) * 17)\n with assert_raises(ValueError):\n np.add.outer(deep, deep)\n\n"
] |
[
[
"numpy.setbufsize",
"numpy.minimum",
"numpy.sqrt",
"numpy.float_power",
"numpy.core.umath.log",
"numpy.fmin.reduce",
"numpy.all",
"numpy.csingle",
"numpy.exp",
"numpy.where",
"numpy.square",
"numpy.core.umath.maximum",
"numpy.core.umath.add.__doc__.startswith",
"numpy.multiply.accumulate",
"numpy.multiply.outer",
"numpy.sin",
"numpy.cbrt",
"numpy.ceil",
"numpy.copysign",
"numpy.zeros",
"numpy.bitwise_or",
"numpy.testing.assert_raises_regex",
"numpy.log",
"numpy.multiply",
"numpy.power",
"numpy.minimum.reduce",
"numpy.testing.assert_raises",
"numpy.equal",
"numpy.fmax",
"numpy.floor",
"numpy.positive",
"numpy.array",
"numpy.core.umath.multiply",
"numpy.multiply.__call__",
"numpy.fmin",
"numpy.absolute",
"numpy.core.umath.ldexp",
"numpy.shares_memory",
"numpy.datetime64",
"numpy.testing.assert_array_equal",
"numpy.add",
"numpy.random.uniform",
"numpy.isinf",
"numpy.arctanh",
"numpy.testing.assert_no_warnings",
"numpy.lcm.reduce",
"numpy.asarray",
"numpy.core.umath.minimum",
"numpy.arctan",
"numpy.core.umath.frexp.__doc__.startswith",
"numpy.ndarray",
"numpy.seterr",
"numpy.complex_",
"numpy.iinfo",
"numpy.nextafter",
"numpy.testing._gen_alignment_data",
"numpy.may_share_memory",
"numpy.testing.suppress_warnings",
"numpy.arcsin",
"numpy.core.umath.heaviside",
"numpy.logical_and.reduce",
"numpy.frompyfunc",
"numpy.atleast_1d",
"numpy.logical_or.reduce",
"numpy.argmax",
"numpy.float32",
"numpy.reciprocal",
"numpy.logical_xor.reduce",
"numpy.core.umath.exp",
"numpy.spacing",
"numpy.min",
"numpy.bitwise_not",
"numpy.float128",
"numpy.modf",
"numpy.timedelta64",
"numpy.cdouble",
"numpy.core.umath.hypot",
"numpy.random.rand",
"numpy.testing.assert_",
"numpy.errstate",
"numpy.not_equal",
"numpy.divmod",
"numpy.logaddexp.reduce",
"numpy.add.accumulate",
"numpy.arcsinh",
"numpy.add.reduceat",
"numpy.multiply.reduce",
"numpy.maximum",
"numpy.core.umath.absolute",
"numpy.ones",
"numpy.ldexp",
"numpy.remainder",
"numpy.logaddexp2",
"numpy.empty",
"numpy.matrix",
"numpy.gcd.reduce",
"numpy.longdouble",
"numpy.fmax.reduce",
"numpy.zeros_like",
"numpy.negative",
"numpy.complex256",
"numpy.random.randint",
"numpy.testing.assert_equal",
"numpy.trunc",
"numpy.add.reduce",
"numpy.choose",
"numpy.testing.assert_array_almost_equal",
"numpy.logical_not",
"numpy.logspace",
"numpy.isnan",
"numpy.rint",
"numpy.logaddexp2.reduce",
"numpy.testing.assert_allclose",
"numpy.logaddexp",
"numpy.core.umath.arctan2",
"numpy.log2",
"numpy.cos",
"numpy.core.umath.degrees",
"numpy.core.umath.log1p",
"numpy.exp2",
"numpy.isnat",
"numpy.ndarray.__new__",
"numpy.minimum.accumulate",
"numpy.dtype",
"numpy.bitwise_xor",
"numpy.core.umath.expm1",
"numpy.multiply.reduceat",
"numpy.any",
"numpy.clongdouble",
"numpy.frexp",
"numpy.fmod",
"numpy.ones_like",
"numpy.arange",
"numpy.finfo",
"numpy.core.umath.radians",
"numpy.testing.assert_almost_equal",
"numpy.signbit",
"numpy.core.umath.sign",
"numpy.floor_divide",
"numpy.core.umath.sqrt",
"numpy.int_",
"numpy.multiply.at",
"numpy.maximum.reduce",
"numpy.add.at",
"numpy.abs",
"numpy.random.seed",
"numpy.isfinite",
"numpy.add.outer",
"numpy.core.umath.hypot.reduce",
"numpy.sign",
"numpy.bitwise_and",
"numpy.float64",
"numpy.gcd",
"numpy.lcm",
"numpy.maximum.accumulate"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
weiwang2330/conditional-motion-propagation
|
[
"66ca0ca7c20d7724d1dcea74855376a7957da0cf"
] |
[
"dataset.py"
] |
[
"import numpy as np\nimport io\nfrom PIL import Image\n\nimport torch\nimport torchvision.transforms as transforms\nfrom torch.utils.data import Dataset\n\nfrom utils.flowlib import read_flo_file\nfrom utils import image_crop, image_resize, image_flow_crop, image_flow_resize, flow_sampler, image_flow_aug, flow_aug\n\nclass ColorAugmentation(object):\n\n def __init__(self, eig_vec=None, eig_val=None):\n if eig_vec == None:\n eig_vec = torch.Tensor([\n [ 0.4009, 0.7192, -0.5675],\n [-0.8140, -0.0045, -0.5808],\n [ 0.4203, -0.6948, -0.5836],\n ])\n if eig_val == None:\n eig_val = torch.Tensor([[0.2175, 0.0188, 0.0045]])\n self.eig_val = eig_val # 1*3\n self.eig_vec = eig_vec # 3*3\n\n def __call__(self, tensor):\n assert tensor.size(0) == 3\n alpha = torch.normal(means=torch.zeros_like(self.eig_val))*0.1\n quatity = torch.mm(self.eig_val*alpha, self.eig_vec)\n tensor = tensor + quatity.view(3, 1, 1)\n return tensor\n\n\ndef pil_loader(img_str, ch):\n buff = io.BytesIO(img_str)\n if ch == 1:\n return Image.open(buff)\n else:\n with Image.open(buff) as img:\n img = img.convert('RGB')\n return img\n\n\ndef pil_loader_str(img_str, ch):\n if ch == 1:\n return Image.open(img_str)\n else:\n with Image.open(img_str) as img:\n img = img.convert('RGB')\n return img\n\n\nclass ImageFlowDataset(Dataset):\n\n def __init__(self, meta_file, config, phase):\n self.img_transform = transforms.Compose([\n transforms.Normalize(config['data_mean'], config['data_div'])\n ])\n print(\"building dataset from {}\".format(meta_file))\n self.flow_file_type = config['flow_file_type']\n self.metas = []\n self.num = 0\n for mf in meta_file:\n with open(mf, 'r') as f:\n lines = f.readlines()\n self.num += len(lines)\n for line in lines:\n if self.flow_file_type == \"flo\":\n img0_path, img1_path, flow_path = line.rstrip().split()\n self.metas.append((img0_path, img1_path, flow_path))\n elif self.flow_file_type == \"jpg\":\n img0_path, img1_path, flow_path_x, flow_path_y = line.rstrip().split()\n self.metas.append((img0_path, img1_path, flow_path_x, flow_path_y))\n else:\n raise Exception(\"No such flow_file_type: {}\".format(self.flow_file_type))\n print(\"read meta done, total: {}\".format(self.num))\n\n self.phase = phase\n\n self.short_size = config.get('short_size', None)\n self.long_size = config.get('long_size', None)\n self.crop_size = config.get('crop_size', None)\n self.sample_strategy = config['sample_strategy']\n self.sample_bg_ratio = config['sample_bg_ratio']\n self.nms_ks = config['nms_ks']\n self.max_num_guide = config['max_num_guide']\n\n if self.phase == \"train\":\n self.aug_flip = config['image_flow_aug'].get('flip', False)\n self.aug_reverse = config['flow_aug'].get('reverse', False)\n self.aug_scale = config['flow_aug'].get('scale', False)\n self.aug_rotate = config['flow_aug'].get('rotate', False)\n \n def __len__(self):\n return self.num\n\n def __getitem__(self, idx):\n img1_fn = self.metas[idx][0]\n img2_fn = self.metas[idx][1]\n if self.flow_file_type == 'flo':\n flowname = self.metas[idx][2]\n flow = read_flo_file(flowname) # h, w, 2\n else:\n flownamex = self.metas[idx][2]\n flownamey = self.metas[idx][3]\n flowx = np.array(Image.open(flownamex)).astype(np.float32) / 255 * 100 - 50\n flowy = np.array(Image.open(flownamey)).astype(np.float32) / 255 * 100 - 50\n flow = np.concatenate((flowx[:,:,np.newaxis], flowy[:,:,np.newaxis]), axis=2)\n img1 = pil_loader_str(img1_fn, ch=3)\n img2 = pil_loader_str(img2_fn, ch=3)\n\n ## check size\n assert img1.height == flow.shape[0]\n assert img1.width == flow.shape[1]\n assert img2.height == flow.shape[0]\n assert img2.width == flow.shape[1]\n\n ## resize\n if self.short_size is not None or self.long_size is not None:\n img1, img2, flow, ratio = image_flow_resize(\n img1, img2, flow, short_size=self.short_size,\n long_size=self.long_size)\n\n ## crop\n if self.crop_size is not None:\n img1, img2, flow, offset = image_flow_crop(\n img1, img2, flow, self.crop_size, self.phase)\n\n ## augmentation\n if self.phase == 'train':\n # image flow aug\n img1, img2, flow = image_flow_aug(img1, img2, flow, flip_horizon=self.aug_flip)\n # flow aug\n flow = flow_aug(flow, reverse=self.aug_reverse,\n scale=self.aug_scale, rotate=self.aug_rotate)\n\n ## transform\n img1 = torch.from_numpy(np.array(img1).astype(np.float32).transpose((2,0,1)))\n img2 = torch.from_numpy(np.array(img2).astype(np.float32).transpose((2,0,1)))\n img1 = self.img_transform(img1)\n img2 = self.img_transform(img2)\n\n ## sparse sampling\n sparse_flow, mask = flow_sampler(\n flow, strategy=self.sample_strategy,\n bg_ratio=self.sample_bg_ratio, nms_ks=self.nms_ks,\n max_num_guide=self.max_num_guide) # (h,w,2), (h,w,2)\n\n flow = torch.from_numpy(flow.transpose((2, 0, 1)))\n sparse_flow = torch.from_numpy(sparse_flow.transpose((2, 0, 1)))\n mask = torch.from_numpy(mask.transpose((2, 0, 1)).astype(np.float32))\n return img1, sparse_flow, mask, flow, img2\n\n\nclass ImageDataset(Dataset):\n\n def __init__(self, meta_file, config):\n self.img_transform = transforms.Compose([\n transforms.Normalize(config['data_mean'], config['data_div'])\n ])\n print(\"building dataset from {}\".format(meta_file))\n with open(meta_file, 'r') as f:\n lines = f.readlines()\n self.num = len(lines)\n self.metas = [l.rstrip() for l in lines]\n print(\"read meta done, total: {}\".format(self.num))\n\n self.short_size = config.get('short_size', None)\n self.long_size = config.get('long_size', None)\n self.crop_size = config.get('crop_size', None)\n\n def __len__(self):\n return self.num\n\n def __getitem__(self, idx):\n img_fn = self.metas[idx]\n img = pil_loader_str(img_fn, ch=3)\n\n ## resize\n if self.short_size is not None or self.long_size is not None:\n img, size = image_resize(img, short_size=self.short_size, long_size=self.long_size)\n\n ## crop\n if self.crop_size is not None:\n img, offset = image_crop(img, self.crop_size)\n\n ## transform\n img = torch.from_numpy(np.array(img).astype(np.float32).transpose((2,0,1)))\n img = self.img_transform(img)\n\n return img, torch.LongTensor([idx]), torch.LongTensor(offset), torch.LongTensor(size)\n"
] |
[
[
"torch.LongTensor",
"torch.mm",
"torch.Tensor",
"torch.zeros_like",
"numpy.concatenate",
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
snowmanyukari/Keras-GAN-master
|
[
"44d3320e84ca00071de8a5c0fb4566d10486bb1d"
] |
[
"wgan_gp/wgan_gp.py"
] |
[
"\n# Large amount of credit goes to:\n# https://github.com/keras-team/keras-contrib/blob/master/examples/improved_wgan.py\n# which I've used as a reference for this implementation\n\nfrom __future__ import print_function, division\n\nfrom keras.datasets import mnist\nfrom keras.layers.merge import _Merge\nfrom keras.layers import Input, Dense, Reshape, Flatten, Dropout\nfrom keras.layers import BatchNormalization, Activation, ZeroPadding2D\nfrom keras.layers.advanced_activations import LeakyReLU\nfrom keras.layers.convolutional import UpSampling2D, Conv2D\nfrom keras.models import Sequential, Model\nfrom keras.optimizers import RMSprop\nfrom functools import partial\n\nimport keras.backend as K\n\nimport matplotlib.pyplot as plt\n\nimport sys\n\nimport numpy as np\n\nclass RandomWeightedAverage(_Merge):\n \"\"\"Provides a (random) weighted average between real and generated image samples\"\"\"\n def _merge_function(self, inputs):\n alpha = K.random_uniform((32, 1, 1, 1))\n return (alpha * inputs[0]) + ((1 - alpha) * inputs[1])\n\nclass WGANGP():\n def __init__(self):\n self.img_rows = 28\n self.img_cols = 28\n self.channels = 1\n self.img_shape = (self.img_rows, self.img_cols, self.channels)\n self.latent_dim = 100\n\n # Following parameter and optimizer set as recommended in paper\n self.n_critic = 5\n optimizer = RMSprop(lr=0.00005)\n\n # Build the generator and critic\n self.generator = self.build_generator()\n self.critic = self.build_critic()\n\n #-------------------------------\n # Construct Computational Graph\n # for the Critic\n #-------------------------------\n\n # Freeze generator's layers while training critic\n self.generator.trainable = False\n\n # Image input (real sample)\n real_img = Input(shape=self.img_shape)\n\n # Noise input\n z_disc = Input(shape=(self.latent_dim,))\n # Generate image based of noise (fake sample)\n fake_img = self.generator(z_disc)\n\n # Discriminator determines validity of the real and fake images\n fake = self.critic(fake_img)\n valid = self.critic(real_img)\n\n # Construct weighted average between real and fake images\n interpolated_img = RandomWeightedAverage()([real_img, fake_img])\n # Determine validity of weighted sample\n validity_interpolated = self.critic(interpolated_img)\n\n # Use Python partial to provide loss function with additional\n # 'averaged_samples' argument\n partial_gp_loss = partial(self.gradient_penalty_loss,\n averaged_samples=interpolated_img)\n partial_gp_loss.__name__ = 'gradient_penalty' # Keras requires function names\n\n self.critic_model = Model(inputs=[real_img, z_disc],\n outputs=[valid, fake, validity_interpolated])\n self.critic_model.compile(loss=[self.wasserstein_loss,\n self.wasserstein_loss,\n partial_gp_loss],\n optimizer=optimizer,\n loss_weights=[1, 1, 10])\n #-------------------------------\n # Construct Computational Graph\n # for Generator\n #-------------------------------\n\n # For the generator we freeze the critic's layers\n self.critic.trainable = False\n self.generator.trainable = True\n\n # Sampled noise for input to generator\n z_gen = Input(shape=(100,))\n # Generate images based of noise\n img = self.generator(z_gen)\n # Discriminator determines validity\n valid = self.critic(img)\n # Defines generator model\n self.generator_model = Model(z_gen, valid)\n self.generator_model.compile(loss=self.wasserstein_loss, optimizer=optimizer)\n\n\n def gradient_penalty_loss(self, y_true, y_pred, averaged_samples):\n \"\"\"\n Computes gradient penalty based on prediction and weighted real / fake samples\n \"\"\"\n gradients = K.gradients(y_pred, averaged_samples)[0]\n # compute the euclidean norm by squaring ...\n gradients_sqr = K.square(gradients)\n # ... summing over the rows ...\n gradients_sqr_sum = K.sum(gradients_sqr,\n axis=np.arange(1, len(gradients_sqr.shape)))\n # ... and sqrt\n gradient_l2_norm = K.sqrt(gradients_sqr_sum)\n # compute lambda * (1 - ||grad||)^2 still for each single sample\n gradient_penalty = K.square(1 - gradient_l2_norm)\n # return the mean as loss over all the batch samples\n return K.mean(gradient_penalty)\n\n\n def wasserstein_loss(self, y_true, y_pred):\n return K.mean(y_true * y_pred)\n\n def build_generator(self):\n\n model = Sequential()\n\n model.add(Dense(128 * 7 * 7, activation=\"relu\", input_dim=self.latent_dim))\n model.add(Reshape((7, 7, 128)))\n model.add(UpSampling2D())\n model.add(Conv2D(128, kernel_size=4, padding=\"same\"))\n model.add(BatchNormalization(momentum=0.8))\n model.add(Activation(\"relu\"))\n model.add(UpSampling2D())\n model.add(Conv2D(64, kernel_size=4, padding=\"same\"))\n model.add(BatchNormalization(momentum=0.8))\n model.add(Activation(\"relu\"))\n model.add(Conv2D(self.channels, kernel_size=4, padding=\"same\"))\n model.add(Activation(\"tanh\"))\n\n model.summary()\n\n noise = Input(shape=(self.latent_dim,))\n img = model(noise)\n\n return Model(noise, img)\n\n def build_critic(self):\n\n model = Sequential()\n\n model.add(Conv2D(16, kernel_size=3, strides=2, input_shape=self.img_shape, padding=\"same\"))\n model.add(LeakyReLU(alpha=0.2))\n model.add(Dropout(0.25))\n model.add(Conv2D(32, kernel_size=3, strides=2, padding=\"same\"))\n model.add(ZeroPadding2D(padding=((0,1),(0,1))))\n model.add(BatchNormalization(momentum=0.8))\n model.add(LeakyReLU(alpha=0.2))\n model.add(Dropout(0.25))\n model.add(Conv2D(64, kernel_size=3, strides=2, padding=\"same\"))\n model.add(BatchNormalization(momentum=0.8))\n model.add(LeakyReLU(alpha=0.2))\n model.add(Dropout(0.25))\n model.add(Conv2D(128, kernel_size=3, strides=1, padding=\"same\"))\n model.add(BatchNormalization(momentum=0.8))\n model.add(LeakyReLU(alpha=0.2))\n model.add(Dropout(0.25))\n model.add(Flatten())\n model.add(Dense(1))\n\n model.summary()\n\n img = Input(shape=self.img_shape)\n validity = model(img)\n\n return Model(img, validity)\n\n def train(self, epochs, batch_size, sample_interval=50):\n\n # Load the dataset\n (X_train, _), (_, _) = mnist.load_data()\n\n # Rescale -1 to 1\n X_train = (X_train.astype(np.float32) - 127.5) / 127.5\n X_train = np.expand_dims(X_train, axis=3)\n\n # Adversarial ground truths\n valid = -np.ones((batch_size, 1))\n fake = np.ones((batch_size, 1))\n dummy = np.zeros((batch_size, 1)) # Dummy gt for gradient penalty\n for epoch in range(epochs):\n\n for _ in range(self.n_critic):\n\n # ---------------------\n # Train Discriminator\n # ---------------------\n\n # Select a random batch of images\n idx = np.random.randint(0, X_train.shape[0], batch_size)\n imgs = X_train[idx]\n # Sample generator input\n noise = np.random.normal(0, 1, (batch_size, self.latent_dim))\n # Train the critic\n d_loss = self.critic_model.train_on_batch([imgs, noise],\n [valid, fake, dummy])\n\n # ---------------------\n # Train Generator\n # ---------------------\n\n g_loss = self.generator_model.train_on_batch(noise, valid)\n\n # Plot the progress\n print (\"%d [D loss: %f] [G loss: %f]\" % (epoch, d_loss[0], g_loss))\n\n # If at save interval => save generated image samples\n if epoch % sample_interval == 0:\n self.sample_images(epoch)\n\n def sample_images(self, epoch):\n r, c = 5, 5\n noise = np.random.normal(0, 1, (r * c, self.latent_dim))\n gen_imgs = self.generator.predict(noise)\n\n # Rescale images 0 - 1\n gen_imgs = 0.5 * gen_imgs + 0.5\n\n fig, axs = plt.subplots(r, c)\n cnt = 0\n for i in range(r):\n for j in range(c):\n axs[i,j].imshow(gen_imgs[cnt, :,:,0], cmap='gray')\n axs[i,j].axis('off')\n cnt += 1\n fig.savefig(\"images/mnist_%d.png\" % epoch)\n plt.close()\n\n\nif __name__ == '__main__':\n wgan = WGANGP()\n wgan.train(epochs=30000, batch_size=32, sample_interval=100)\n"
] |
[
[
"numpy.expand_dims",
"matplotlib.pyplot.subplots",
"numpy.ones",
"numpy.random.normal",
"matplotlib.pyplot.close",
"numpy.zeros",
"numpy.random.randint"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Konano/arknights-mower
|
[
"dc43270e3232a9e282207f4f0c778f2f91823be7"
] |
[
"arknights_mower/utils/recognize.py"
] |
[
"from __future__ import annotations\n\nimport cv2\nimport time\nimport numpy as np\nfrom typing import Optional, List\n\nfrom .. import __rootdir__\nfrom . import config, detector\nfrom .log import logger, save_screenshot\nfrom .scene import Scene, SceneComment\nfrom .image import bytes2img, loadimg, thres2, cropimg\nfrom .matcher import Matcher\nfrom .device import Device\nfrom . import typealias as tp\n\n\nclass RecognizeError(Exception):\n pass\n\n\nclass Recognizer(object):\n\n def __init__(self, device: Device, screencap: bytes = None) -> None:\n self.device = device\n self.start(screencap)\n\n def start(self, screencap: bytes = None, build: bool = True) -> None:\n \"\"\" init with screencap, build matcher \"\"\"\n retry_times = config.MAX_RETRYTIME\n while retry_times > 0:\n try:\n if screencap is not None:\n self.screencap = screencap\n else:\n self.screencap = self.device.screencap()\n self.img = bytes2img(self.screencap, False)\n self.gray = bytes2img(self.screencap, True)\n self.h, self.w, _ = self.img.shape\n self.matcher = Matcher(self.gray) if build else None\n self.scene = Scene.UNDEFINED\n return\n except cv2.error as e:\n logger.warning(e)\n retry_times -= 1\n time.sleep(1)\n continue\n raise RuntimeError('init Recognizer failed')\n\n def update(self, screencap: bytes = None, rebuild: bool = True) -> None:\n \"\"\" rebuild matcher \"\"\"\n self.start(screencap, rebuild)\n\n def color(self, x: int, y: int) -> tp.Pixel:\n \"\"\" get the color of the pixel \"\"\"\n return self.img[y][x]\n\n def save_screencap(self, folder):\n save_screenshot(self.screencap, subdir=f'{folder}/{self.h}x{self.w}')\n\n def get_scene(self) -> int:\n \"\"\" get the current scene in the game \"\"\"\n if self.scene != Scene.UNDEFINED:\n return self.scene\n if self.find('connecting', scope=((self.w//2, self.h//10*8), (self.w//4*3, self.h))) is not None:\n self.scene = Scene.CONNECTING\n elif self.find('index_nav', thres=250, scope=((0, 0), (100+self.w//4, self.h//10))) is not None:\n self.scene = Scene.INDEX\n elif self.find('nav_index') is not None:\n self.scene = Scene.NAVIGATION_BAR\n elif self.find('materiel_ico') is not None:\n self.scene = Scene.MATERIEL\n elif self.find('read_mail') is not None:\n self.scene = Scene.MAIL\n elif self.find('loading') is not None:\n self.scene = Scene.LOADING\n elif self.find('loading2') is not None:\n self.scene = Scene.LOADING\n elif self.find('loading3') is not None:\n self.scene = Scene.LOADING\n elif self.find('loading4') is not None:\n self.scene = Scene.LOADING\n elif self.is_black():\n self.scene = Scene.LOADING\n elif self.find('ope_plan') is not None:\n self.scene = Scene.OPERATOR_BEFORE\n elif self.find('ope_select_start') is not None:\n self.scene = Scene.OPERATOR_SELECT\n elif self.find('ope_agency_going') is not None:\n self.scene = Scene.OPERATOR_ONGOING\n elif self.find('ope_elimi_finished') is not None:\n self.scene = Scene.OPERATOR_ELIMINATE_FINISH\n elif self.find('ope_finish') is not None:\n self.scene = Scene.OPERATOR_FINISH\n elif self.find('ope_recover_potion_on') is not None:\n self.scene = Scene.OPERATOR_RECOVER_POTION\n elif self.find('ope_recover_originite_on') is not None:\n self.scene = Scene.OPERATOR_RECOVER_ORIGINITE\n elif self.find('double_confirm') is not None:\n if self.find('network_check') is not None:\n self.scene = Scene.NETWORK_CHECK\n else:\n self.scene = Scene.DOUBLE_CONFIRM\n elif self.find('ope_firstdrop') is not None:\n self.scene = Scene.OPERATOR_DROP\n elif self.find('ope_eliminate') is not None:\n self.scene = Scene.OPERATOR_ELIMINATE\n elif self.find('ope_elimi_agency_panel') is not None:\n self.scene = Scene.OPERATOR_ELIMINATE_AGENCY\n elif self.find('ope_giveup') is not None:\n self.scene = Scene.OPERATOR_GIVEUP\n elif self.find('ope_failed') is not None:\n self.scene = Scene.OPERATOR_FAILED\n elif self.find('friend_list_on') is not None:\n self.scene = Scene.FRIEND_LIST_ON\n elif self.find('credit_visiting') is not None:\n self.scene = Scene.FRIEND_VISITING\n elif self.find('infra_overview') is not None:\n self.scene = Scene.INFRA_MAIN\n elif self.find('infra_todo') is not None:\n self.scene = Scene.INFRA_TODOLIST\n elif self.find('clue') is not None:\n self.scene = Scene.INFRA_CONFIDENTIAL\n elif self.find('infra_overview_in') is not None:\n self.scene = Scene.INFRA_ARRANGE\n elif self.find('hidden_eye', thres=250, scope=((self.w//4*3, self.h//4*3), (self.w, self.h))) is not None:\n self.scene = Scene.INFRA_DETAILS\n elif self.find('arrange_confirm') is not None:\n self.scene = Scene.INFRA_ARRANGE_CONFIRM\n elif self.find('friend_list') is not None:\n self.scene = Scene.FRIEND_LIST_OFF\n elif self.find(\"mission_trainee_on\") is not None:\n self.scene = Scene.MISSION_TRAINEE\n elif self.find('mission_daily_on') is not None:\n self.scene = Scene.MISSION_DAILY\n elif self.find('mission_weekly_on') is not None:\n self.scene = Scene.MISSION_WEEKLY\n elif self.find('terminal_pre') is not None:\n self.scene = Scene.TERMINAL_MAIN\n elif self.find('open_recruitment') is not None:\n self.scene = Scene.RECRUIT_MAIN\n elif self.find('recruiting_instructions') is not None:\n self.scene = Scene.RECRUIT_TAGS\n elif self.find('agent_token') is not None:\n self.scene = Scene.RECRUIT_AGENT\n elif self.find('agent_token_1080_1440') is not None:\n self.scene = Scene.RECRUIT_AGENT\n elif self.find('agent_token_900_1440') is not None:\n self.scene = Scene.RECRUIT_AGENT\n elif self.find('agent_unlock') is not None:\n self.scene = Scene.SHOP_CREDIT\n elif self.find('shop_credit_2') is not None:\n self.scene = Scene.SHOP_OTHERS\n elif self.find('shop_cart') is not None:\n self.scene = Scene.SHOP_CREDIT_CONFIRM\n elif self.find('shop_assist') is not None:\n self.scene = Scene.SHOP_ASSIST\n elif self.find('login_logo') is not None and self.find('hypergryph') is not None:\n if self.find('login_awake') is not None:\n self.scene = Scene.LOGIN_QUICKLY\n elif self.find('login_account') is not None:\n self.scene = Scene.LOGIN_MAIN\n elif self.find('login_iknow') is not None:\n self.scene = Scene.LOGIN_ANNOUNCE\n else:\n self.scene = Scene.LOGIN_MAIN_NOENTRY\n elif self.find('register') is not None:\n self.scene = Scene.LOGIN_REGISTER\n elif self.find('login_loading') is not None:\n self.scene = Scene.LOGIN_LOADING\n elif self.find('login_iknow') is not None:\n self.scene = Scene.LOGIN_ANNOUNCE\n elif self.find('12cadpa') is not None:\n if self.find('cadpa_detail') is not None:\n self.scene = Scene.LOGIN_CADPA_DETAIL\n else:\n self.scene = Scene.LOGIN_START\n elif detector.announcement_close(self.img) is not None:\n self.scene = Scene.ANNOUNCEMENT\n elif self.find('skip') is not None:\n self.scene = Scene.SKIP\n elif self.find('upgrade') is not None:\n self.scene = Scene.UPGRADE\n elif detector.confirm(self.img) is not None:\n self.scene = Scene.CONFIRM\n elif self.find('login_verify') is not None:\n self.scene = Scene.LOGIN_INPUT\n elif self.find('login_captcha') is not None:\n self.scene = Scene.LOGIN_CAPTCHA\n elif self.find('login_connecting') is not None:\n self.scene = Scene.LOGIN_LOADING\n elif self.find('main_theme') is not None:\n self.scene = Scene.TERMINAL_MAIN_THEME\n elif self.find('episode') is not None:\n self.scene = Scene.TERMINAL_EPISODE\n elif self.find('biography') is not None:\n self.scene = Scene.TERMINAL_BIOGRAPHY\n elif self.find('collection') is not None:\n self.scene = Scene.TERMINAL_COLLECTION\n elif self.find('login_bilibili') is not None:\n self.scene = Scene.LOGIN_BILIBILI\n elif self.find('loading6') is not None:\n self.scene = Scene.LOADING\n elif self.find('loading7') is not None:\n self.scene = Scene.LOADING\n elif self.find('arrange_order_options_scene') is not None:\n self.scene = Scene.INFRA_ARRANGE_ORDER\n else:\n self.scene = Scene.UNKNOWN\n self.device.check_current_focus()\n # save screencap to analyse\n if config.SCREENSHOT_PATH is not None:\n self.save_screencap(self.scene)\n logger.info(f'Scene: {self.scene}: {SceneComment[self.scene]}')\n return self.scene\n\n def is_black(self) -> None:\n \"\"\" check if the current scene is all black \"\"\"\n return np.max(self.gray[:, 105:-105]) < 16\n\n def nav_button(self):\n \"\"\" find navigation button \"\"\"\n return self.find('nav_button', thres=128, scope=((0, 0), (100+self.w//4, self.h//10)))\n\n def find(self, res: str, draw: bool = False, scope: tp.Scope = None, thres: int = None, judge: bool = True, strict: bool = False) -> tp.Scope:\n \"\"\"\n 查找元素是否出现在画面中\n\n :param res: 待识别元素资源文件名\n :param draw: 是否将识别结果输出到屏幕\n :param scope: ((x0, y0), (x1, y1)),提前限定元素可能出现的范围\n :param thres: 是否在匹配前对图像进行二值化处理\n :param judge: 是否加入更加精确的判断\n :param strict: 是否启用严格模式,未找到时报错\n\n :return ret: 若匹配成功,则返回元素在游戏界面中出现的位置,否则返回 None\n \"\"\"\n logger.debug(f'find: {res}')\n res = f'{__rootdir__}/resources/{res}.png'\n\n if thres is not None:\n # 对图像二值化处理\n res_img = thres2(loadimg(res, True), thres)\n gray_img = cropimg(self.gray, scope)\n matcher = Matcher(thres2(gray_img, thres))\n ret = matcher.match(res_img, draw=draw, judge=judge)\n else:\n res_img = loadimg(res, True)\n matcher = self.matcher\n ret = matcher.match(res_img, draw=draw, scope=scope, judge=judge)\n if strict and ret is None:\n raise RecognizeError(f\"Can't find '{res}'\") \n return ret\n\n def score(self, res: str, draw: bool = False, scope: tp.Scope = None, thres: int = None) -> Optional[List[float]]:\n \"\"\"\n 查找元素是否出现在画面中,并返回分数\n\n :param res: 待识别元素资源文件名\n :param draw: 是否将识别结果输出到屏幕\n :param scope: ((x0, y0), (x1, y1)),提前限定元素可能出现的范围\n :param thres: 是否在匹配前对图像进行二值化处理\n\n :return ret: 若匹配成功,则返回元素在游戏界面中出现的位置,否则返回 None\n \"\"\"\n logger.debug(f'find: {res}')\n res = f'{__rootdir__}/resources/{res}.png'\n\n if thres is not None:\n # 对图像二值化处理\n res_img = thres2(loadimg(res, True), thres)\n gray_img = cropimg(self.gray, scope)\n matcher = Matcher(thres2(gray_img, thres))\n score = matcher.score(res_img, draw=draw, only_score=True)\n else:\n res_img = loadimg(res, True)\n matcher = self.matcher\n score = matcher.score(res_img, draw=draw, scope=scope, only_score=True)\n return score\n"
] |
[
[
"numpy.max"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
cyyever/DALI
|
[
"e2b2d5a061da605e3e9e681017a7b2d53fe41a62",
"e2b2d5a061da605e3e9e681017a7b2d53fe41a62",
"e2b2d5a061da605e3e9e681017a7b2d53fe41a62",
"e2b2d5a061da605e3e9e681017a7b2d53fe41a62",
"e2b2d5a061da605e3e9e681017a7b2d53fe41a62",
"e2b2d5a061da605e3e9e681017a7b2d53fe41a62",
"e2b2d5a061da605e3e9e681017a7b2d53fe41a62"
] |
[
"dali/test/python/test_operator_affine_transforms.py",
"dali/test/python/test_operator_reshape.py",
"dali/test/python/test_operator_gridmask.py",
"dali/test/python/test_operator_lookup_table.py",
"docs/examples/use_cases/tensorflow/efficientdet/pipeline/anchors_utils/region_similarity_calculator.py",
"dali/test/python/test_operator_multipaste.py",
"dali/test/python/test_operator_coord_transform.py"
] |
[
"# Copyright (c) 2020-2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom nvidia.dali.pipeline import Pipeline\nimport nvidia.dali.ops.transforms as T # Just here to verify that import works as expected\nimport nvidia.dali.ops as ops\nimport nvidia.dali.types as types\nimport nvidia.dali.fn as fn\nimport numpy as np\nimport os\n\nimport warnings\nfrom nose.tools import raises\n\nfrom scipy.spatial.transform import Rotation as scipy_rotate\n\ndef check_results_sample(T1, mat_ref, T0=None, reverse=False, atol=1e-6):\n ndim = mat_ref.shape[0] - 1\n ref_T1 = None\n if T0 is not None:\n mat_T0 = np.identity(ndim+1)\n mat_T0[:ndim, :] = T0\n if reverse:\n mat_T1 = np.dot(mat_T0, mat_ref)\n else:\n mat_T1 = np.dot(mat_ref, mat_T0)\n ref_T1 = mat_T1[:ndim, :]\n else:\n ref_T1 = mat_ref[:ndim, :]\n assert np.allclose(T1, ref_T1, atol=1e-6)\n\ndef check_results(T1, batch_size, mat_ref, T0=None, reverse=False, atol=1e-6):\n for idx in range(batch_size):\n check_results_sample(T1.at(idx), mat_ref, T0.at(idx) if T0 is not None else None, reverse, atol)\n\ndef translate_affine_mat(offset):\n ndim = len(offset)\n affine_mat = np.identity(ndim + 1)\n affine_mat[:ndim, -1] = offset\n return affine_mat\n\ndef check_transform_translation_op(offset, has_input = False, reverse_order=False, batch_size=1, num_threads=4, device_id=0):\n ndim = len(offset)\n pipe = Pipeline(batch_size=batch_size, num_threads=num_threads, device_id=device_id, seed = 1234)\n with pipe:\n if has_input:\n T0 = fn.random.uniform(range=(-1, 1), shape=(ndim, ndim+1))\n T1 = fn.transforms.translation(T0, device='cpu', offset=offset, reverse_order=reverse_order)\n pipe.set_outputs(T1, T0)\n else:\n T1 = fn.transforms.translation(device='cpu', offset=offset)\n pipe.set_outputs(T1)\n pipe.build()\n outs = pipe.run()\n ref_mat = translate_affine_mat(offset=offset)\n T0 = outs[1] if has_input else None\n check_results(outs[0], batch_size, ref_mat, T0, reverse_order)\n\ndef test_transform_translation_op(batch_size=3, num_threads=4, device_id=0):\n for offset in [(0.0, 1.0), (2.0, 1.0, 3.0)]:\n for has_input in [False, True]:\n for reverse_order in [False, True] if has_input else [False]:\n yield check_transform_translation_op, offset, has_input, reverse_order, \\\n batch_size, num_threads, device_id\n\ndef scale_affine_mat(scale, center = None, ndim = None):\n if ndim is None:\n ndim = len(scale)\n else:\n assert ndim == len(scale) or 1 == len(scale)\n assert center is None or len(center) == ndim\n\n s_mat = np.identity(ndim + 1)\n for d in range(ndim):\n s_mat[d, d] = scale[0] if len(scale) == 1 else scale[d]\n\n if center is not None:\n neg_offset = [-x for x in center]\n t1_mat = translate_affine_mat(neg_offset)\n t2_mat = translate_affine_mat(center)\n affine_mat = np.dot(t2_mat, np.dot(s_mat, t1_mat))\n else:\n affine_mat = s_mat\n\n return affine_mat\n\ndef check_transform_scale_op(scale, center=None, has_input = False, reverse_order=False, ndim=None, batch_size=1, num_threads=4, device_id=0):\n if ndim is None:\n ndim = len(scale)\n assert center is None or len(center) == ndim\n\n pipe = Pipeline(batch_size=batch_size, num_threads=num_threads, device_id=device_id, seed = 1234)\n with pipe:\n if has_input:\n T0 = fn.random.uniform(range=(-1, 1), shape=(ndim, ndim+1))\n T1 = fn.transforms.scale(T0, device='cpu', scale=scale, center=center, ndim=ndim, reverse_order=reverse_order)\n pipe.set_outputs(T1, T0)\n else:\n T1 = fn.transforms.scale(device='cpu', scale=scale, center=center, ndim=ndim)\n pipe.set_outputs(T1)\n pipe.build()\n outs = pipe.run()\n ref_mat = scale_affine_mat(scale=scale, center=center, ndim=ndim)\n T0 = outs[1] if has_input else None\n check_results(outs[0], batch_size, ref_mat, T0, reverse_order)\n\ndef test_transform_scale_op(batch_size=3, num_threads=4, device_id=0):\n for scale, center, ndim in [((0.0, 1.0), None, None),\n ((2.0, 1.0, 3.0), None, None),\n ((2.0, 1.0), (1.0, 0.5), None),\n ((2.0, ), (1.0, 0.5), 2)]:\n for has_input in [False, True]:\n for reverse_order in [False, True] if has_input else [False]:\n yield check_transform_scale_op, scale, center, has_input, reverse_order, \\\n ndim, batch_size, num_threads, device_id,\n\ndef rotate_affine_mat(angle, axis = None, center = None):\n assert axis is None or len(axis) == 3\n ndim = 3 if axis is not None else 2\n assert center is None or len(center) == ndim\n\n angle_rad = angle * np.pi / 180.0\n if ndim == 2:\n c = np.cos(angle_rad)\n s = np.sin(angle_rad)\n r_mat = np.array(\n [[ c, -s, 0.],\n [ s, c, 0.],\n [ 0., 0., 1.]])\n else: # ndim == 3\n norm_axis = axis / np.linalg.norm(axis)\n r_mat = np.identity(ndim + 1)\n r_mat[:ndim, :ndim] = scipy_rotate.from_rotvec(angle_rad * norm_axis).as_matrix()\n if center is not None:\n neg_offset = [-x for x in center]\n t1_mat = translate_affine_mat(neg_offset)\n t2_mat = translate_affine_mat(center)\n affine_mat = np.dot(t2_mat, np.dot(r_mat, t1_mat))\n else:\n affine_mat = r_mat\n\n return affine_mat\n\ndef check_transform_rotation_op(angle=None, axis=None, center=None, has_input = False,\n reverse_order=False, batch_size=1, num_threads=4, device_id=0):\n assert axis is None or len(axis) == 3\n ndim = 3 if axis is not None else 2\n assert center is None or len(center) == ndim\n random_angle = angle is None\n\n pipe = Pipeline(batch_size=batch_size, num_threads=num_threads, device_id=device_id, seed=12345)\n with pipe:\n outputs = []\n if random_angle:\n angle = fn.random.uniform(range=(-90, 90))\n\n if has_input:\n T0 = fn.random.uniform(range=(-1, 1), shape=(ndim, ndim+1))\n T1 = fn.transforms.rotation(T0, device='cpu', angle=angle, axis=axis, center=center, reverse_order=reverse_order)\n outputs = [T1, T0]\n else:\n T1 = fn.transforms.rotation(device='cpu', angle=angle, axis=axis, center=center)\n outputs = [T1]\n\n if random_angle:\n outputs.append(angle)\n\n pipe.set_outputs(*outputs)\n pipe.build()\n outs = pipe.run()\n out_idx = 1\n out_T0 = None\n out_angle = None\n if has_input:\n out_T0 = outs[out_idx]\n out_idx = out_idx + 1\n if random_angle:\n out_angle = outs[out_idx]\n out_idx = out_idx + 1\n for idx in range(batch_size):\n T0 = out_T0.at(idx) if has_input else None\n angle = out_angle.at(idx) if random_angle else angle\n ref_mat = rotate_affine_mat(angle=angle, axis=axis, center=center)\n check_results_sample(outs[0].at(idx), ref_mat, T0, reverse_order, atol=1e-6)\n\ndef test_transform_rotation_op(batch_size=3, num_threads=4, device_id=0):\n for angle, axis, center in [(None, None, None),\n (30.0, None, None),\n (None, None, (1.0, 0.5)),\n (30.0, None, (1.0, 0.5)),\n (40.0, (0.4, 0.3, 0.1), None),\n (40.0, (0.4, 0.3, 0.1), (1.0, -0.4, 10.0)),\n (None, (0.4, 0.3, 0.1), (1.0, -0.4, 10.0))]:\n for has_input in [False, True]:\n for reverse_order in [False, True] if has_input else [False]:\n yield check_transform_rotation_op, angle, axis, center, has_input, reverse_order, \\\n batch_size, num_threads, device_id\n\ndef shear_affine_mat(shear = None, angles = None, center = None):\n assert shear is not None or angles is not None\n\n if isinstance(shear, (list, tuple)):\n shear = np.float32(shear)\n if isinstance(angles, (list, tuple)):\n angles = np.float32(angles)\n\n if shear is None:\n shear = np.tan(angles * np.pi / 180.0)\n assert shear.size == 2 or shear.size == 6\n ndim = 3 if shear.size == 6 else 2\n assert center is None or len(center) == ndim\n\n if ndim == 2:\n sxy, syx = np.float32(shear).flatten()\n s_mat = np.array(\n [[ 1. , sxy, 0.],\n [ syx, 1., 0.],\n [ 0. , 0., 1.]])\n else: # ndim == 3\n sxy, sxz, syx, syz, szx, szy = np.float32(shear).flatten()\n s_mat = np.array(\n [[ 1 , sxy, sxz, 0 ],\n [ syx, 1, syz, 0 ],\n [ szx, szy, 1, 0 ],\n [ 0, 0, 0, 1 ]])\n\n if center is not None:\n neg_offset = [-x for x in center]\n t1_mat = translate_affine_mat(neg_offset)\n t2_mat = translate_affine_mat(center)\n affine_mat = np.dot(t2_mat, np.dot(s_mat, t1_mat))\n else:\n affine_mat = s_mat\n\n return affine_mat\n\ndef check_transform_shear_op(shear=None, angles=None, center=None, has_input = False, reverse_order=False, batch_size=1, num_threads=4, device_id=0):\n assert shear is not None or angles is not None\n if shear is not None:\n assert len(shear) == 2 or len(shear) == 6\n ndim = 3 if len(shear) == 6 else 2\n else:\n assert len(angles) == 2 or len(angles) == 6\n ndim = 3 if len(angles) == 6 else 2\n assert center is None or len(center) == ndim\n\n pipe = Pipeline(batch_size=batch_size, num_threads=num_threads, device_id=device_id, seed = 1234)\n with pipe:\n if has_input:\n T0 = fn.random.uniform(range=(-1, 1), shape=(ndim, ndim+1))\n T1 = fn.transforms.shear(T0, device='cpu', shear=shear, angles=angles, center=center, reverse_order=reverse_order)\n pipe.set_outputs(T1, T0)\n else:\n T1 = fn.transforms.shear(device='cpu', shear=shear, angles=angles, center=center)\n pipe.set_outputs(T1)\n pipe.build()\n outs = pipe.run()\n ref_mat = shear_affine_mat(shear=shear, angles=angles, center=center)\n T0 = outs[1] if has_input else None\n check_results(outs[0], batch_size, ref_mat, T0, reverse_order, atol=1e-6)\n\n\ndef check_transform_shear_op_runtime_args(ndim, use_angles, use_center, has_input=False, reverse_order=False, batch_size=1, num_threads=4, device_id=0):\n pipe = Pipeline(batch_size=batch_size, num_threads=num_threads, device_id=device_id, seed = 1234)\n with pipe:\n inputs = [fn.random.uniform(range=(-1, 1), shape=(ndim, ndim+1))] if has_input else []\n params = []\n angles_arg = None\n shear_arg = None\n center_arg = None\n if use_angles:\n angles_arg = fn.random.uniform(range=(-80,80), shape=[ndim, ndim-1])\n params.append(angles_arg)\n else:\n shear_arg = fn.random.uniform(range=(-2,2), shape=[ndim, ndim-1])\n params.append(shear_arg)\n if use_center:\n center_arg = fn.random.uniform(range=(-10,10), shape=[ndim])\n params.append(center_arg)\n\n T1 = fn.transforms.shear(*inputs, device='cpu', shear=shear_arg, angles=angles_arg, center=center_arg, reverse_order=reverse_order)\n pipe.set_outputs(T1, *inputs, *params)\n pipe.build()\n for _ in range(3):\n outs = pipe.run()\n T0 = outs[1] if has_input else None\n shear_param = outs[2 if has_input else 1]\n center_param = outs[3 if has_input else 2] if use_center else None\n for idx in range(batch_size):\n angles = None\n shear = None\n center = None\n if use_angles:\n angles = shear_param.at(idx)\n else:\n shear = shear_param.at(idx)\n if use_center:\n center = center_param.at(idx)\n ref_mat = shear_affine_mat(shear=shear, angles=angles, center=center)\n inp = T0.at(idx) if T0 is not None else None\n check_results_sample(outs[0].at(idx), ref_mat, inp, reverse_order, atol=1e-6)\n\ndef test_transform_shear_op(batch_size=3, num_threads=4, device_id=0):\n for shear, angles, center in [((1., 2.), None, None),\n ((1., 2.), None, (0.4, 0.5)),\n ((1., 2., 3., 4., 5., 6.), None, None),\n ((1., 2., 3., 4., 5., 6.), None, (0.4, 0.5, 0.6)),\n (None, (30., 10.), None),\n (None, (30., 10.), (0.4, 0.5)),\n (None, (40., 30., 10., 35., 25., 15.), None),\n (None, (40., 30., 10., 35., 25., 15.), (0.4, 0.5, 0.6))]:\n for has_input in [False, True]:\n for reverse_order in [False, True] if has_input else [False]:\n yield check_transform_shear_op, shear, angles, center, has_input, reverse_order, \\\n batch_size, num_threads, device_id\n\ndef test_transform_shear_op_runtime_args(batch_size=3, num_threads=4, device_id=0):\n for ndim in [2, 3]:\n for use_angles in [False, True]:\n for use_center in [False, True]:\n for has_input in [False, True]:\n for reverse_order in [False, True] if has_input else [False]:\n yield check_transform_shear_op_runtime_args, ndim, use_angles, use_center, has_input, reverse_order, 4, 4\n\n\ndef get_ndim(from_start, from_end, to_start, to_end):\n sizes = [len(a) for a in [from_start, from_end, to_start, to_end] if a is not None]\n ndim = max(sizes) if len(sizes) > 0 else 1\n for sz in sizes:\n assert sz == ndim or sz == 1\n return ndim\n\ndef expand_dims(from_start, from_end, to_start, to_end):\n ndim = get_ndim(from_start, from_end, to_start, to_end)\n def expand(arg, ndim, default_arg):\n if arg is None:\n return [default_arg] * ndim\n elif len(arg) == 1:\n return [arg[0]] * ndim\n else:\n assert len(arg) == ndim\n return arg\n return [expand(from_start, ndim, 0.), expand(from_end, ndim, 1.), expand(to_start, ndim, 0.), expand(to_end, ndim, 1.)]\n\n\ndef crop_affine_mat(from_start, from_end, to_start, to_end, absolute = False):\n from_start, from_end, to_start, to_end = (np.array(x) for x in expand_dims(from_start, from_end, to_start, to_end))\n if absolute:\n from_start, from_end = np.minimum(from_start, from_end), np.maximum(from_start, from_end)\n to_start, to_end = np.minimum(to_start, to_end), np.maximum(to_start, to_end)\n\n scale = (to_end - to_start) / (from_end - from_start)\n T1 = translate_affine_mat(-from_start)\n S = scale_affine_mat(scale)\n T2 = translate_affine_mat(to_start)\n affine_mat = np.dot(T2, np.dot(S, T1))\n return affine_mat\n\ndef check_transform_crop_op(from_start = None, from_end = None, to_start = None, to_end = None,\n absolute = False, has_input = False, reverse_order=False,\n batch_size=1, num_threads=4, device_id=0):\n ndim = get_ndim(from_start, from_end, to_start, to_end)\n pipe = Pipeline(batch_size=batch_size, num_threads=num_threads, device_id=device_id, seed = 1234)\n with pipe:\n if has_input:\n T0 = fn.random.uniform(range=(-1, 1), shape=(ndim, ndim+1))\n T1 = fn.transforms.crop(T0, device='cpu',\n from_start=from_start, from_end=from_end,\n to_start=to_start, to_end=to_end,\n absolute=absolute,\n reverse_order=reverse_order)\n pipe.set_outputs(T1, T0)\n else:\n T1 = fn.transforms.crop(device='cpu',\n from_start=from_start, from_end=from_end,\n to_start=to_start, to_end=to_end,\n absolute=absolute)\n pipe.set_outputs(T1)\n pipe.build()\n outs = pipe.run()\n\n ref_mat = crop_affine_mat(from_start, from_end, to_start, to_end, absolute=absolute)\n T0 = outs[1] if has_input else None\n T1 = outs[0]\n check_results(T1, batch_size, ref_mat, T0, reverse_order, atol=1e-6)\n if not has_input:\n from_start, from_end, to_start, to_end = expand_dims(from_start, from_end, to_start, to_end)\n if absolute:\n from_start, from_end = np.minimum(from_start, from_end), np.maximum(from_start, from_end)\n to_start, to_end = np.minimum(to_start, to_end), np.maximum(to_start, to_end)\n for idx in range(batch_size):\n MT = T1.at(idx)\n M, T = MT[:ndim, :ndim], MT[:, ndim]\n assert np.allclose(np.dot(M, from_start) + T, to_start, atol=1e-6)\n assert np.allclose(np.dot(M, from_end) + T, to_end, atol=1e-6)\n\ndef test_transform_crop_op(batch_size=3, num_threads=4, device_id=0):\n for from_start, from_end, to_start, to_end in \\\n [(None, None, None, None),\n ((0.1, 0.2), (1., 1.2), (0.3, 0.2), (0.5, 0.6)),\n ((0.1, 0.2), (0.4, 0.9), None, None),\n ((0.2, 0.2), None, None, None),\n (None, (0.4, 0.9), None, None),\n ((0.1, 0.2, 0.3), (1., 1.2, 1.3), (0.3, 0.2, 0.1), (0.5, 0.6, 0.7)),\n ((0.1, 0.2, 0.3), (1., 1.2, 1.3), None, None)]:\n for has_input in [False, True]:\n for reverse_order in [False, True] if has_input else [False]:\n yield check_transform_crop_op, from_start, from_end, to_start, to_end, \\\n False, has_input, reverse_order, \\\n batch_size, num_threads, device_id\n # Reversed start and end\n for absolute in [False, True]:\n yield check_transform_crop_op, from_end, from_start, to_end, to_start, \\\n absolute, has_input, reverse_order, \\\n batch_size, num_threads, device_id\n\ndef check_combine_transforms(num_transforms = 2, ndim = 2, reverse_order = False,\n batch_size=1, num_threads=4, device_id=0):\n pipe = Pipeline(batch_size=batch_size, num_threads=num_threads, device_id=device_id)\n with pipe:\n transforms = [fn.random.uniform(range=(-1, 1), shape=(ndim, ndim+1), seed = 1234) for _ in range(num_transforms)]\n T = fn.transforms.combine(*transforms)\n pipe.set_outputs(T, *transforms)\n pipe.build()\n outs = pipe.run()\n for idx in range(batch_size):\n num_mats = len(outs) - 1\n assert num_mats >= 2\n mats = [np.identity(ndim+1) for _ in range(num_mats)]\n for in_idx in range(len(mats)):\n mats[in_idx][:ndim, :] = outs[1 + in_idx].at(idx)\n\n # by default we want to access them in opposite order\n if not reverse_order:\n mats.reverse()\n ref_mat = np.identity(ndim+1)\n for mat in mats:\n ref_mat = np.dot(mat, ref_mat)\n\n assert np.allclose(outs[0].at(idx), ref_mat[:ndim,:], atol=1e-6)\n\ndef test_combine_transforms(batch_size=3, num_threads=4, device_id=0):\n for num_transforms in [2, 3, 10]:\n for ndim in [2, 3, 6]:\n for reverse_order in [False, True]:\n yield check_combine_transforms, num_transforms, ndim, reverse_order, \\\n batch_size, num_threads, device_id\n\ndef test_combine_transforms_correct_order(batch_size=3, num_threads=4, device_id=0):\n ndim = 2\n pipe = Pipeline(batch_size=batch_size, num_threads=num_threads, device_id=device_id)\n with pipe:\n import nvidia.dali.fn.transforms as T\n t1 = T.translation(offset=(1, 2))\n t2 = T.rotation(angle=30.0)\n t12 = T.rotation(T.translation(offset=(1, 2)), angle=30.0)\n t21 = T.translation(T.rotation(angle=30.0), offset=(1, 2))\n pipe.set_outputs(T.combine(t1, t2), t12, T.combine(t1, t2, reverse_order=True), t21)\n pipe.build()\n outs = pipe.run()\n for idx in range(batch_size):\n assert np.allclose(outs[0].at(idx), outs[1].at(idx), atol=1e-6)\n assert np.allclose(outs[2].at(idx), outs[3].at(idx), atol=1e-6)\n\ndef verify_deprecation(callback):\n with warnings.catch_warnings(record=True) as w:\n # Cause all warnings to always be triggered.\n warnings.simplefilter(\"always\")\n # Trigger a warning.\n callback()\n # Verify DeprecationWarning\n assert len(w) == 1\n assert issubclass(w[-1].category, DeprecationWarning)\n assert \"WARNING: `transform_translation` is now deprecated. Use `transforms.translation` instead.\" \\\n == str(w[-1].message)\n\ndef test_transform_translation_deprecation():\n verify_deprecation(lambda : fn.transform_translation(offset=(0, 0)))\n verify_deprecation(lambda : ops.TransformTranslation(offset=(0, 0))())\n",
"# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom nvidia.dali.pipeline import Pipeline\nfrom nvidia.dali import pipeline_def\nimport nvidia.dali.fn as fn\nimport nvidia.dali.ops as ops\nimport nvidia.dali.types as types\nimport nvidia.dali as dali\nfrom nvidia.dali.backend_impl import TensorListGPU\nimport numpy as np\nimport math\nfrom numpy.testing import assert_array_equal, assert_allclose\nfrom functools import partial\nimport os\nimport cv2\nfrom test_utils import check_batch\nfrom test_utils import compare_pipelines\nfrom test_utils import RandomDataIterator\nfrom nose_utils import assert_raises\n\ntest_data_root = os.environ['DALI_EXTRA_PATH']\ncaffe_db_folder = os.path.join(test_data_root, 'db', 'lmdb')\n\n\nclass ReshapePipeline(Pipeline):\n def __init__(self, device, batch_size, relative, use_wildcard, num_threads=3, device_id=0, num_gpus=1):\n super(ReshapePipeline, self).__init__(batch_size, num_threads, device_id, seed=7865, exec_async=True, exec_pipelined=True)\n self.device = device\n self.input = ops.readers.Caffe(path = caffe_db_folder, shard_id = device_id, num_shards = num_gpus)\n self.decode = ops.decoders.Image(device = \"cpu\", output_type = types.RGB)\n W = 320\n H = 224\n self.resize = ops.Resize(device = \"cpu\", resize_x = W, resize_y = H);\n WC = -1 if use_wildcard else W * 3\n if relative:\n rel_shape = (-1, 3) if use_wildcard else (1, 3)\n self.reshape = ops.Reshape(device = device, rel_shape = rel_shape, layout = \"ab\");\n else:\n self.reshape = ops.Reshape(device = device, shape = (H, WC), layout = \"ab\");\n\n def define_graph(self):\n jpegs, labels = self.input(name = \"Reader\")\n images = self.resize(self.decode(jpegs))\n if self.device == \"gpu\":\n images = images.gpu()\n reshaped = self.reshape(images)\n\n # `images+0` creates a (no-op) arithmetic expression node - this prevents the\n # original `images` node from being marked as pipeline output\n return [images+0, reshaped]\n\ndef CollapseChannels(image):\n new_shape = np.array([ image.shape[0], image.shape[1] * image.shape[2] ]).astype(np.int)\n return new_shape\n\ndef CollapseChannelsWildcard(image):\n new_shape = np.array([ image.shape[0], -1 ]).astype(np.int)\n return new_shape\n\nclass ReshapeWithInput(Pipeline):\n def __init__(self, device, batch_size, use_wildcard, num_threads=3, device_id=0, num_gpus=1):\n super(ReshapeWithInput, self).__init__(batch_size, num_threads, device_id, seed=7865, exec_async=False, exec_pipelined=False)\n self.device = device\n self.input = ops.readers.Caffe(path = caffe_db_folder, shard_id = device_id, num_shards = num_gpus)\n self.decode = ops.decoders.Image(device = \"cpu\", output_type = types.RGB)\n fn = CollapseChannelsWildcard if use_wildcard else CollapseChannels\n self.gen_shapes = ops.PythonFunction(function=fn)\n self.reshape = ops.Reshape(device = device, layout = \"ab\");\n\n def define_graph(self):\n jpegs, labels = self.input(name = \"Reader\")\n images_cpu = self.decode(jpegs)\n shapes = self.gen_shapes(images_cpu)\n images = images_cpu.gpu() if self.device == \"gpu\" else images_cpu\n reshaped = self.reshape(images, shapes)\n\n return [images, reshaped]\n\ndef MakeTallFunc(relative, wildcard):\n def func(image):\n if relative:\n return np.array([ -1 if wildcard else 2, 0.5, 1]).astype(np.float32)\n else:\n h, w, c = image.shape\n return np.array([ -1 if wildcard else 2*h, w/2, c]).astype(np.int)\n return func\n\nclass ReshapeWithArgInput(Pipeline):\n def __init__(self, device, batch_size, relative, use_wildcard, num_threads=3, device_id=0, num_gpus=1):\n super(ReshapeWithArgInput, self).__init__(batch_size, num_threads, device_id, seed=7865, exec_async=False, exec_pipelined=False)\n self.device = device\n self.input = ops.readers.Caffe(path = caffe_db_folder, shard_id = device_id, num_shards = num_gpus)\n self.resize = ops.Resize(device = \"cpu\");\n self.decode = ops.decoders.Image(device = \"cpu\", output_type = types.RGB)\n self.gen_shapes = ops.PythonFunction(function=MakeTallFunc(relative, use_wildcard))\n self.reshape = ops.Reshape(device = device);\n self.relative = relative\n\n def define_graph(self):\n jpegs, labels = self.input(name = \"Reader\")\n images_cpu = self.decode(jpegs)\n\n rng = ops.random.Uniform(range=[100,128])\n cast = ops.Cast(dtype=types.INT32)\n widths = cast(rng()) * 2.0\n heights = cast(rng()) * 2.0\n images_cpu = self.resize(images_cpu, resize_x = widths, resize_y = heights)\n\n shapes = self.gen_shapes(images_cpu)\n images = images_cpu.gpu() if self.device == \"gpu\" else images_cpu\n if self.relative:\n reshaped = self.reshape(images, rel_shape = shapes)\n else:\n reshaped = self.reshape(images, shape = shapes)\n\n return [images, reshaped]\n\ndef verify_tensor_layouts(imgs, reshaped):\n assert imgs.layout() == \"HWC\"\n assert reshaped.layout() == \"ab\"\n for i in range(len(imgs)):\n assert imgs[i].layout() == \"HWC\"\n assert reshaped[i].layout() == \"ab\"\n\ndef verify_flatten(imgs, reshaped, src_shape = None):\n assert imgs.layout() == \"HWC\"\n assert reshaped.layout() == \"ab\"\n for i in range(len(imgs)):\n if src_shape is not None:\n assert imgs.at(i).shape == src_shape\n img_shape = imgs.at(i).shape\n # collapse width and channels\n ref_shape = (img_shape[0], img_shape[1] * img_shape[2])\n assert reshaped.at(i).shape == ref_shape\n assert_array_equal(imgs.at(i).flatten(), reshaped.at(i).flatten())\n\ndef verify_make_tall(imgs, reshaped, src_shape = None):\n assert imgs.layout() == \"HWC\"\n assert reshaped.layout() == \"HWC\"\n for i in range(len(imgs)):\n if src_shape is not None:\n assert imgs.at(i).shape == src_shape\n img_shape = imgs.at(i).shape\n # collapse width and channels\n ref_shape = (img_shape[0] * 2, img_shape[1] // 2, 3)\n assert reshaped.at(i).shape == ref_shape\n assert_array_equal(imgs.at(i).flatten(), reshaped.at(i).flatten())\n\n\ndef check_reshape(device, batch_size, relative, use_wildcard):\n pipe = ReshapePipeline(device, batch_size, relative, use_wildcard)\n pipe.build()\n for iter in range(10):\n imgs, reshaped = pipe.run()\n if device == \"gpu\":\n verify_tensor_layouts(imgs, reshaped)\n imgs = imgs.as_cpu()\n reshaped = reshaped.as_cpu()\n verify_flatten(imgs, reshaped, (224, 320, 3))\n\ndef check_reshape_with_input(device, batch_size, use_wildcard):\n pipe = ReshapeWithInput(device, batch_size, use_wildcard)\n pipe.build()\n for iter in range(2):\n imgs, reshaped = pipe.run()\n if device == \"gpu\":\n verify_tensor_layouts(imgs, reshaped)\n imgs = imgs.as_cpu()\n reshaped = reshaped.as_cpu()\n verify_flatten(imgs, reshaped)\n\ndef check_reshape_with_arg_input(device, batch_size, relative, use_wildcard):\n pipe = ReshapeWithArgInput(device, batch_size, relative, use_wildcard)\n pipe.build()\n for iter in range(2):\n imgs, reshaped = pipe.run()\n if device == \"gpu\":\n imgs = imgs.as_cpu()\n reshaped = reshaped.as_cpu()\n verify_make_tall(imgs, reshaped)\n\ndef test_reshape_arg():\n for device in [\"cpu\", \"gpu\"]:\n for batch_size in [16]:\n for relative in [False, True]:\n for use_wildcard in [False, True]:\n yield check_reshape, device, batch_size, relative, use_wildcard\n\ndef test_reshape_input():\n for device in [\"cpu\", \"gpu\"]:\n for batch_size in [16]:\n for use_wildcard in [False, True]:\n yield check_reshape_with_input, device, batch_size, use_wildcard\n\ndef test_reshape_arg_input():\n for device in [\"cpu\", \"gpu\"]:\n for batch_size in [16]:\n for relative in [False, True]:\n for use_wildcard in [False, True]:\n yield check_reshape_with_arg_input, device, batch_size, relative, use_wildcard\n\nclass ReinterpretPipelineWithDefaultShape(Pipeline):\n def __init__(self, device, batch_size, num_threads=3, device_id=0, num_gpus=1):\n super(ReinterpretPipelineWithDefaultShape, self).__init__(batch_size, num_threads, device_id, seed=7865, exec_async=True, exec_pipelined=True)\n self.device = device\n self.ext_src = ops.ExternalSource()\n self.reinterpret = ops.Reinterpret(device = device, dtype = types.INT32)\n\n def define_graph(self):\n input = self.input = self.ext_src()\n if self.device == \"gpu\":\n input = input.gpu()\n reinterpreted = self.reinterpret(input)\n\n # `input+0` creates a (no-op) arithmetic expression node - this prevents the\n # original `input` node from being marked as pipeline output\n return [input, reinterpreted]\n\n def iter_setup(self):\n data = []\n for i in range(self.batch_size):\n shape = np.random.randint(4, 20, size = [2])\n shape[1] &= -4 # align to 4\n data.append(np.random.randint(0, 255, shape, dtype = np.uint8))\n self.feed_input(self.input, data)\n\n\ndef _test_reinterpret_default_shape(device):\n np.random.seed(31337)\n batch_size = 4\n pipe = ReinterpretPipelineWithDefaultShape(device, batch_size)\n pipe.build()\n pipe_outs = pipe.run()\n in_batch = pipe_outs[0].as_cpu() if device == \"gpu\" else pipe_outs[0]\n out_batch = pipe_outs[1].as_cpu() if device == \"gpu\" else pipe_outs[1]\n for i in range(batch_size):\n ref = in_batch.at(i).view(dtype = np.int32)\n out = out_batch.at(i)\n assert_array_equal(ref, out)\n\ndef test_reinterpret_default_shape():\n for device in [\"cpu\", \"gpu\"]:\n yield _test_reinterpret_default_shape, device\n\n\nclass ReinterpretPipelineWildcardDim(Pipeline):\n def __init__(self, device, batch_size, num_threads=3, device_id=0, num_gpus=1):\n super(ReinterpretPipelineWildcardDim, self).__init__(batch_size, num_threads, device_id, seed=7865, exec_async=True, exec_pipelined=True)\n self.device = device\n self.ext_src = ops.ExternalSource()\n self.reinterpret = ops.Reinterpret(device = device, shape = (20, 2), dtype = types.INT32)\n\n def define_graph(self):\n input = self.input = self.ext_src()\n if self.device == \"gpu\":\n input = input.gpu()\n reinterpreted = self.reinterpret(input)\n\n # `input+0` creates a (no-op) arithmetic expression node - this prevents the\n # original `input` node from being marked as pipeline output\n return [input, reinterpreted]\n\n def iter_setup(self):\n data = [np.random.randint(0, 255, [10, 16], dtype = np.uint8) for i in range(self.batch_size)]\n self.feed_input(self.input, data)\n\n\ndef _test_reinterpret_wildcard_shape(device):\n np.random.seed(31337)\n batch_size = 4\n pipe = ReinterpretPipelineWildcardDim(device, batch_size)\n pipe.build()\n pipe_outs = pipe.run()\n in_batch = pipe_outs[0].as_cpu() if device == \"gpu\" else pipe_outs[0]\n out_batch = pipe_outs[1].as_cpu() if device == \"gpu\" else pipe_outs[1]\n for i in range(batch_size):\n ref = in_batch.at(i).view(dtype = np.int32).reshape([20, 2])\n out = out_batch.at(i)\n assert_array_equal(ref, out)\n\ndef test_reinterpret_wildcard_shape():\n for device in [\"cpu\", \"gpu\"]:\n yield _test_reinterpret_wildcard_shape, device\n\ndef get_data(shapes):\n return [np.empty(shape, dtype = np.uint8) for shape in shapes]\n\n@pipeline_def\ndef reshape_pipe(shapes, src_dims=None, rel_shape=None):\n data = fn.external_source(lambda: get_data(shapes), batch=True, device = \"cpu\")\n return fn.reshape(data, src_dims=src_dims, rel_shape=rel_shape)\n\ndef _testimpl_reshape_src_dims_arg(src_dims, rel_shape, shapes, expected_out_shapes):\n batch_size = len(shapes)\n pipe = reshape_pipe(batch_size=batch_size, num_threads=1, device_id=0, shapes=shapes, src_dims=src_dims, rel_shape=rel_shape)\n pipe.build()\n for _ in range(3):\n outs = pipe.run()\n for i in range(batch_size):\n out_arr = np.array(outs[0][i])\n assert out_arr.shape == expected_out_shapes[i]\n\ndef test_reshape_src_dims_arg():\n # src_dims, rel_shape, shapes, expected_out_shapes\n args = [\n ([0, 1], None, [[200, 300, 1], [300, 400, 1]], [(200, 300), (300, 400)]),\n ([1, 2, 0], None, [[10, 20, 30], [30, 20, 10], [2, 1, 3]], [(20, 30, 10), (20, 10, 30), (1, 3, 2)]),\n ([1], None, [[1, 2, 1], [1, 3, 1]], [(2,), (3,)]),\n ([2, -1, 1, 0], None, [[10, 20, 30]], [(30, 1, 20, 10)]),\n ([-1, 2], None, [[1, 1, 30], [1, 1, 70]], [(1, 30), (1, 70)]),\n ([2, 0, 1], [0.5, 0.5, -1], [[200, 300, 100]], [(50, 100, 1200)]),\n ([], None, [[1]], [()]),\n ]\n for src_dims, rel_shape, shapes, expected_out_shapes in args:\n yield _testimpl_reshape_src_dims_arg, src_dims, rel_shape, shapes, expected_out_shapes\n\ndef test_reshape_src_dims_throw_error():\n args = [\n ([2, 0], None, [[20, 10, 20]],\n \"Reshape: The volume of the new shape should match the one of the original shape\\. Requested a shape with \\d* elements but the original shape has \\d* elements\\.\"),\n ([2, 0, 1], [1, -1], [[1, 2, 3]],\n \"Reshape: ``src_dims`` and ``rel_shape`` have different lengths: \\d* vs \\d*\"),\n ([0, 1, 3], None, [1, 2, 3], \"Reshape:.*is out of bounds.*\"),\n ]\n for src_dims, rel_shape, shapes, err_regex in args:\n pipe = reshape_pipe(batch_size=len(shapes), num_threads=1, device_id=0, shapes=shapes,\n src_dims=src_dims, rel_shape=rel_shape)\n pipe.build()\n with assert_raises(RuntimeError, regex=err_regex):\n pipe.run()\n",
"# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom nvidia.dali.pipeline import Pipeline\nimport nvidia.dali.fn as fn\nimport nvidia.dali.types as types\nimport numpy as np\nimport math\nimport os\nimport cv2\nfrom test_utils import get_dali_extra_path\n\ndata_root = get_dali_extra_path()\nimg_dir = os.path.join(data_root, 'db', 'single', 'jpeg')\n\ndef get_pipeline(device, batch_size, tile, ratio, angle):\n pipe = Pipeline(batch_size, 4, 0)\n with pipe:\n input, _ = fn.readers.file(file_root=img_dir)\n decoded = fn.decoders.image(input, device='cpu', output_type=types.RGB)\n decoded = decoded.gpu() if device == 'gpu' else decoded\n grided = fn.grid_mask(decoded, device=device, tile=tile, ratio=ratio, angle=angle)\n pipe.set_outputs(grided, decoded)\n return pipe\n\ndef get_random_pipeline(device, batch_size):\n pipe = Pipeline(batch_size, 4, 0)\n with pipe:\n input, _ = fn.readers.file(file_root=img_dir)\n decoded = fn.decoders.image(input, device='cpu', output_type=types.RGB)\n decoded = decoded.gpu() if device == 'gpu' else decoded\n tile = fn.cast(fn.random.uniform(range=(50, 200)), dtype=types.INT32)\n ratio = fn.random.uniform(range=(0.3, 0.7))\n angle = fn.random.uniform(range=(-math.pi, math.pi))\n grided = fn.grid_mask(decoded, device=device, tile=tile, ratio=ratio, angle=angle)\n pipe.set_outputs(grided, decoded, tile, ratio, angle)\n return pipe\n\ndef get_mask(w, h, tile, ratio, angle, d):\n ca = math.cos(angle)\n sa = math.sin(angle)\n b = tile * ratio\n\n i = np.tile(np.arange(w), (h, 1))\n j = np.transpose(np.tile(np.arange(h), (w, 1)))\n x = i * ca - j * sa\n y = i * sa + j * ca\n m = np.logical_or(((x+d) % tile > b+2*d), ((y+d) % tile > b+2*d))\n return m\n\ndef check(result, input, tile, ratio, angle):\n result = np.uint8(result)\n input = np.uint8(input)\n w = result.shape[1]\n h = result.shape[0]\n eps = 0.1\n\n # inside of squares should be black\n mask = np.uint8(1 - get_mask(w, h, tile, ratio, angle, -eps))\n result2 = cv2.bitwise_and(result, result, mask=mask)\n assert not np.any(result2)\n\n # outside of squares should be same as input\n mask = np.uint8(get_mask(w, h, tile, ratio, angle, eps))\n result2 = cv2.bitwise_and(result, result, mask=mask)\n input2 = cv2.bitwise_and(input, input, mask=mask)\n assert np.all(result2 == input2)\n\ndef test_gridmask_vs_cv():\n batch_size = 4\n for device in ['cpu', 'gpu']:\n for (tile, ratio, angle) in [(40, 0.5, 0),\n (100, 0.1, math.pi / 2),\n (200, 0.7, math.pi / 3),\n (150, 1/3, math.pi / 4),\n (50, 0.532, 1),\n (51, 0.38158387, 2.6810782),\n (123, 0.456, 0.789)]:\n pipe = get_pipeline(device, batch_size, tile, ratio, angle)\n pipe.build()\n results, inputs = pipe.run()\n if device == 'gpu':\n results, inputs = results.as_cpu(), inputs.as_cpu()\n for i in range(batch_size):\n yield check, results[i], inputs[i], tile, ratio, angle\n\ndef test_gridmask_vs_cv_random():\n batch_size = 4\n for device in ['cpu', 'gpu']:\n pipe = get_random_pipeline(device, batch_size)\n pipe.build()\n for _ in range(16):\n results, inputs, tiles, ratios, angles = pipe.run()\n if device == 'gpu':\n results, inputs = results.as_cpu(), inputs.as_cpu()\n for i in range(batch_size):\n tile = np.int32(tiles[i])\n ratio = np.float32(ratios[i])\n angle = np.float32(angles[i])\n yield check, results[i], inputs[i], tile, ratio, angle\n",
"# Copyright (c) 2019-2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom nvidia.dali.pipeline import Pipeline\nimport nvidia.dali.ops as ops\nimport nvidia.dali.types as types\nimport numpy as np\nimport random\nfrom test_utils import compare_pipelines\nfrom test_utils import RandomlyShapedDataIterator\n\nclass LookupTablePipeline(Pipeline):\n def __init__(self, device, batch_size, iterator, data_shape, data_layout, dtype, num_threads=1,\n device_id=0, dictionary={}, default_value=0.0):\n super(LookupTablePipeline, self).__init__(batch_size,\n num_threads,\n device_id)\n self.device = device\n self.iterator = iterator\n self.inputs = ops.ExternalSource()\n self.data_shape = data_shape\n self.data_layout = data_layout\n\n if dictionary:\n keys = [k for k in dictionary.keys()]\n values = [dictionary[k] for k in keys]\n self.lookup = ops.LookupTable(device = self.device,\n dtype = dtype,\n default_value = default_value,\n keys = keys,\n values = values)\n else:\n self.lookup = ops.LookupTable(device = self.device,\n dtype = dtype,\n default_value = default_value)\n\n def define_graph(self):\n self.data = self.inputs()\n input_data = self.data.gpu() if self.device == 'gpu' else self.data\n out = self.lookup(input_data)\n return out\n\n def iter_setup(self):\n data = self.iterator.next()\n self.feed_input(self.data, data, layout=self.data_layout)\n\nclass LookupTablePythonOpPipeline(Pipeline):\n def __init__(self, function, batch_size, iterator, data_shape, data_layout, dtype,\n num_threads=1, device_id=0, dictionary={}, default_value=0.0):\n super(LookupTablePythonOpPipeline, self).__init__(batch_size,\n num_threads,\n device_id,\n exec_async=False,\n exec_pipelined=False)\n self.iterator = iterator\n self.inputs = ops.ExternalSource()\n self.data_shape = data_shape\n self.data_layout = data_layout\n\n def lookup_table_func(input_data):\n return function(input_data,\n dictionary=dictionary,\n default_value=default_value)\n self.lookup = ops.PythonFunction(function=lookup_table_func, output_layouts=data_layout, batch_processing=False)\n self.cast = ops.Cast(dtype=dtype)\n\n def define_graph(self):\n self.data = self.inputs()\n out = self.lookup(self.data)\n out = self.cast(out)\n return out\n\n def iter_setup(self):\n data = self.iterator.next()\n self.feed_input(self.data, data, layout=self.data_layout)\n\ndef lookup_func(image, dictionary, default_value):\n arr = [default_value for k in range(0x1000)]\n for k in dictionary.keys():\n arr[k] = dictionary[k]\n lut = np.array(arr)\n return lut[image]\n\ndef check_lookup_table_vs_python_op(device, batch_size, layout, shape, dtype, dictionary_type, default_value):\n eii1 = RandomlyShapedDataIterator(batch_size, max_shape=shape)\n eii2 = RandomlyShapedDataIterator(batch_size, max_shape=shape)\n if dictionary_type == 'empty':\n dictionary = {}\n elif dictionary_type == 'random':\n dictionary = {k: random.random() for k in range(256)}\n elif dictionary_type == 'small':\n dictionary = {0: 0.1, 200: 0.99}\n else:\n assert(False)\n compare_pipelines(LookupTablePipeline(device, batch_size, iter(eii1),\n data_shape=shape, data_layout=layout, dtype=dtype,\n dictionary=dictionary,\n default_value=default_value),\n LookupTablePythonOpPipeline(lookup_func, batch_size, iter(eii2),\n data_shape=shape, data_layout=layout, dtype=dtype,\n dictionary=dictionary,\n default_value=default_value),\n batch_size=batch_size, N_iterations=3)\n\ndef test_lookup_table_vs_python_op():\n layout = types.NHWC\n for device in {'cpu', 'gpu'}:\n for dtype in {types.FLOAT, types.FLOAT16, types.INT64}:\n for batch_size, shape, dictionary_type, default_value in \\\n [(1, (300, 300, 3), 'random', 0.0),\n (1, (300, 300, 3), 'empty', 0.33),\n (10, (300, 300, 3), 'random', 0.9),\n (3, (300, 300, 3), 'small', 0.4)]:\n yield check_lookup_table_vs_python_op, device, batch_size, layout, shape, dtype, dictionary_type, default_value\n",
"# Copyright 2020 Google Research. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Region Similarity Calculators for BoxLists.\n\nRegion Similarity Calculators compare a pairwise measure of similarity\nbetween the boxes in two BoxLists.\n\"\"\"\nfrom abc import ABCMeta\nfrom abc import abstractmethod\n\nimport tensorflow.compat.v1 as tf\n\n\ndef area(boxlist, scope=None):\n \"\"\"Computes area of boxes.\n\n Args:\n boxlist: BoxList holding N boxes\n scope: name scope.\n\n Returns:\n a tensor with shape [N] representing box areas.\n \"\"\"\n with tf.name_scope(scope, \"Area\"):\n y_min, x_min, y_max, x_max = tf.split(\n value=boxlist.get(), num_or_size_splits=4, axis=1\n )\n return tf.squeeze((y_max - y_min) * (x_max - x_min), [1])\n\n\ndef intersection(boxlist1, boxlist2, scope=None):\n \"\"\"Compute pairwise intersection areas between boxes.\n\n Args:\n boxlist1: BoxList holding N boxes\n boxlist2: BoxList holding M boxes\n scope: name scope.\n\n Returns:\n a tensor with shape [N, M] representing pairwise intersections\n \"\"\"\n with tf.name_scope(scope, \"Intersection\"):\n y_min1, x_min1, y_max1, x_max1 = tf.split(\n value=boxlist1.get(), num_or_size_splits=4, axis=1\n )\n y_min2, x_min2, y_max2, x_max2 = tf.split(\n value=boxlist2.get(), num_or_size_splits=4, axis=1\n )\n all_pairs_min_ymax = tf.minimum(y_max1, tf.transpose(y_max2))\n all_pairs_max_ymin = tf.maximum(y_min1, tf.transpose(y_min2))\n intersect_heights = tf.maximum(0.0, all_pairs_min_ymax - all_pairs_max_ymin)\n all_pairs_min_xmax = tf.minimum(x_max1, tf.transpose(x_max2))\n all_pairs_max_xmin = tf.maximum(x_min1, tf.transpose(x_min2))\n intersect_widths = tf.maximum(0.0, all_pairs_min_xmax - all_pairs_max_xmin)\n return intersect_heights * intersect_widths\n\n\ndef iou(boxlist1, boxlist2, scope=None):\n \"\"\"Computes pairwise intersection-over-union between box collections.\n\n Args:\n boxlist1: BoxList holding N boxes\n boxlist2: BoxList holding M boxes\n scope: name scope.\n\n Returns:\n a tensor with shape [N, M] representing pairwise iou scores.\n \"\"\"\n with tf.name_scope(scope, \"IOU\"):\n intersections = intersection(boxlist1, boxlist2)\n areas1 = area(boxlist1)\n areas2 = area(boxlist2)\n unions = tf.expand_dims(areas1, 1) + tf.expand_dims(areas2, 0) - intersections\n return tf.where(\n tf.equal(intersections, 0.0),\n tf.zeros_like(intersections),\n tf.truediv(intersections, unions),\n )\n\n\nclass RegionSimilarityCalculator(object):\n \"\"\"Abstract base class for region similarity calculator.\"\"\"\n\n __metaclass__ = ABCMeta\n\n def compare(self, boxlist1, boxlist2, scope=None):\n \"\"\"Computes matrix of pairwise similarity between BoxLists.\n\n This op (to be overridden) computes a measure of pairwise similarity between\n the boxes in the given BoxLists. Higher values indicate more similarity.\n\n Note that this method simply measures similarity and does not explicitly\n perform a matching.\n\n Args:\n boxlist1: BoxList holding N boxes.\n boxlist2: BoxList holding M boxes.\n scope: Op scope name. Defaults to 'Compare' if None.\n\n Returns:\n a (float32) tensor of shape [N, M] with pairwise similarity score.\n \"\"\"\n with tf.name_scope(scope, \"Compare\", [boxlist1, boxlist2]) as scope:\n return self._compare(boxlist1, boxlist2)\n\n @abstractmethod\n def _compare(self, boxlist1, boxlist2):\n pass\n\n\nclass IouSimilarity(RegionSimilarityCalculator):\n \"\"\"Class to compute similarity based on Intersection over Union (IOU) metric.\n\n This class computes pairwise similarity between two BoxLists based on IOU.\n \"\"\"\n\n def _compare(self, boxlist1, boxlist2):\n \"\"\"Compute pairwise IOU similarity between the two BoxLists.\n\n Args:\n boxlist1: BoxList holding N boxes.\n boxlist2: BoxList holding M boxes.\n\n Returns:\n A tensor with shape [N, M] representing pairwise iou scores.\n \"\"\"\n return iou(boxlist1, boxlist2)\n",
"# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport sys\n\nfrom nvidia.dali.pipeline import Pipeline\nimport nvidia.dali.fn as fn\nimport nvidia.dali.types as types\nimport numpy as np\nimport os\nfrom test_utils import get_dali_extra_path\n\n\nDEBUG_LVL = 0\nSHOW_IMAGES = False\n\nnp.random.seed(1234)\n\ndata_root = get_dali_extra_path()\nimg_dir = os.path.join(data_root, 'db', 'single', 'jpeg')\n\n\nnp_type_map = {\n types.UINT8: np.uint8,\n types.UINT16: np.uint16,\n types.UINT32: np.uint32,\n types.UINT64: np.uint64,\n types.FLOAT16: np.float16,\n types.FLOAT: np.float32,\n types.FLOAT64: np.float64,\n types.INT8: np.int8,\n types.INT16: np.int16,\n types.INT32: np.int32,\n types.INT64: np.int64,\n}\n\n\ndef intersects(anchors1, shapes1, anchors2, shapes2):\n for i in range(len(anchors1)):\n if anchors1[i] + shapes1[i] <= anchors2[i] or anchors2[i] + shapes2[i] <= anchors1[i]:\n return False\n return True\n\n\ndef prepare_cuts(\n iters=4,\n batch_size=16,\n input_size=None,\n output_size=None,\n even_paste_count=False,\n no_intersections=False,\n full_input=False,\n in_anchor_top_left=False,\n in_anchor_range=None,\n out_anchor_top_left=False,\n out_anchor_range=None,\n out_of_bounds_count=0,\n):\n # Those two will not work together\n assert(out_of_bounds_count == 0 or not no_intersections)\n\n in_idx_l = [np.zeros(shape=(0,), dtype=np.int32) for _ in range(batch_size)]\n in_anchors_l = [np.zeros(shape=(0, 2), dtype=np.int32) for _ in range(batch_size)]\n shapes_l = [np.zeros(shape=(0, 2), dtype=np.int32) for _ in range(batch_size)]\n out_anchors_l = [np.zeros(shape=(0, 2), dtype=np.int32) for _ in range(batch_size)]\n assert len(input_size) == len(output_size)\n dim = len(input_size)\n for i in range(batch_size):\n for j in range(iters):\n while True:\n in_idx = np.int32(np.random.randint(batch_size))\n out_idx = np.int32(i if even_paste_count else np.random.randint(batch_size))\n shape = [np.int32(\n np.random.randint(\n min(input_size[i], output_size[i]) // (iters if no_intersections else 1)\n ) + 1\n ) for i in range(dim)] if not full_input else input_size\n\n if in_anchor_top_left:\n in_anchor = [0] * dim\n elif in_anchor_range is not None:\n in_anchor = [np.int32(np.random.randint(in_anchor_range[0][i], in_anchor_range[1][i])) for i in range(dim)]\n if full_input:\n shape = [np.int32(input_size[i] - in_anchor[i]) for i in range(dim)]\n else:\n in_anchor = [np.int32(np.random.randint(input_size[i] - shape[i] + 1)) for i in range(dim)]\n\n if out_anchor_top_left:\n out_anchor = [0] * dim\n elif out_anchor_range is not None:\n out_anchor = [np.int32(np.random.randint(out_anchor_range[0][i], out_anchor_range[1][i])) for i in range(dim)]\n else:\n out_anchor = [np.int32(np.random.randint(output_size[i] - shape[i] + 1)) for i in range(dim)]\n\n if no_intersections:\n is_ok = True\n for k in range(len(in_idx_l[out_idx])):\n if intersects(out_anchors_l[out_idx][k], shapes_l[out_idx][k], out_anchor, shape):\n is_ok = False\n break\n if not is_ok:\n continue\n break\n break\n\n if DEBUG_LVL >= 1:\n print(f\"\"\"in_idx: {in_idx}, out_idx: {out_idx}, in_anchor: {\n in_anchor}, in_shape: {shape}, out_anchor: {out_anchor}\"\"\")\n\n in_idx_l[out_idx] = np.append(in_idx_l[out_idx], [in_idx], axis=0)\n in_anchors_l[out_idx] = np.append(in_anchors_l[out_idx], [in_anchor], axis=0)\n shapes_l[out_idx] = np.append(shapes_l[out_idx], [shape], axis=0)\n out_anchors_l[out_idx] = np.append(out_anchors_l[out_idx], [out_anchor], axis=0)\n for i in range(out_of_bounds_count):\n clip_out_idx = np.random.randint(batch_size)\n while len(in_idx_l[clip_out_idx]) == 0:\n clip_out_idx = np.random.randint(batch_size)\n clip_in_idx = np.random.randint(len(in_idx_l[clip_out_idx]))\n change_in = np.random.randint(2) == 0\n below_zero = np.random.randint(2) == 0\n change_dim_idx = np.random.randint(dim)\n if below_zero:\n (in_anchors_l if change_in else out_anchors_l)[clip_out_idx][clip_in_idx][change_dim_idx] = \\\n np.int32(np.random.randint(5) - 5)\n else:\n (in_anchors_l if change_in else out_anchors_l)[clip_out_idx][clip_in_idx][change_dim_idx] = \\\n np.int32(\n (input_size if change_in else output_size)[change_dim_idx] -\n shapes_l[clip_out_idx][clip_in_idx][change_dim_idx] +\n np.random.randint(5) + 1\n )\n\n return in_idx_l, in_anchors_l, shapes_l, out_anchors_l\n\n\ndef get_pipeline(\n batch_size=4,\n in_size=None,\n out_size=None,\n even_paste_count=False,\n k=4,\n dtype=types.UINT8,\n no_intersections=True,\n full_input=False,\n in_anchor_top_left=False,\n in_anchor_range=None,\n out_anchor_top_left=False,\n out_anchor_range=None,\n use_gpu=False,\n num_out_of_bounds=0\n):\n pipe = Pipeline(batch_size=batch_size, num_threads=4, device_id=0, seed=np.random.randint(12345))\n with pipe:\n input, _ = fn.readers.file(file_root=img_dir)\n decoded = fn.decoders.image(input, device='cpu', output_type=types.RGB)\n resized = fn.resize(decoded, resize_x=in_size[1], resize_y=in_size[0])\n in_idx_l, in_anchors_l, shapes_l, out_anchors_l = prepare_cuts(\n k, batch_size, in_size, out_size, even_paste_count,\n no_intersections, full_input, in_anchor_top_left, in_anchor_range,\n out_anchor_top_left, out_anchor_range, num_out_of_bounds)\n in_idx = fn.external_source(lambda: in_idx_l)\n in_anchors = fn.external_source(lambda: in_anchors_l)\n shapes = fn.external_source(lambda: shapes_l)\n out_anchors = fn.external_source(lambda: out_anchors_l)\n kwargs = {\n \"in_ids\": in_idx,\n \"output_size\": out_size,\n \"dtype\": dtype\n }\n\n if not full_input:\n kwargs[\"shapes\"] = shapes\n\n if not in_anchor_top_left:\n kwargs[\"in_anchors\"] = in_anchors\n\n if not out_anchor_top_left:\n kwargs[\"out_anchors\"] = out_anchors\n\n pasted = fn.multi_paste(resized.gpu() if use_gpu else resized, **kwargs)\n pipe.set_outputs(pasted, resized)\n return pipe, in_idx_l, in_anchors_l, shapes_l, out_anchors_l\n\n\ndef verify_out_of_bounds(batch_size, in_idx_l, in_anchors_l, shapes_l, out_anchors_l, in_size, out_size):\n for i in range(batch_size):\n for j, idx in enumerate(in_idx_l[i]):\n dim = len(in_anchors_l[i][j])\n for d in range(dim):\n if in_anchors_l[i][j][d] < 0 or out_anchors_l[i][j][d] < 0 or \\\n in_anchors_l[i][j][d] + shapes_l[i][j][d] > in_size[d] or \\\n out_anchors_l[i][j][d] + shapes_l[i][j][d] > out_size[d]:\n return True\n return False\n\n\n\ndef manual_verify(batch_size, inp, output, in_idx_l, in_anchors_l, shapes_l, out_anchors_l, out_size_l, dtype):\n for i in range(batch_size):\n out = output.at(i)\n out_size = out_size_l[i]\n assert out.shape == out_size\n ref = np.zeros(out.shape)\n for j, idx in enumerate(in_idx_l[i]):\n roi_start = in_anchors_l[i][j]\n roi_end = roi_start + shapes_l[i][j]\n out_start = out_anchors_l[i][j]\n out_end = out_start + shapes_l[i][j]\n ref[out_start[0]:out_end[0], out_start[1]:out_end[1]] = inp.at(idx)[roi_start[0]:roi_end[0],\n roi_start[1]:roi_end[1]]\n ref = ref.astype(np_type_map[dtype])\n if DEBUG_LVL > 0 and not np.array_equal(out, ref):\n print(f\"Error on image {i}\")\n import PIL.Image\n PIL.Image.fromarray(out).save(\"multipaste_out.png\")\n PIL.Image.fromarray(ref).save(\"multipaste_ref.png\")\n assert np.array_equal(out, ref)\n\n\ndef show_images(batch_size, image_batch):\n import matplotlib.gridspec as gridspec\n import matplotlib.pyplot as plt\n columns = 4\n rows = (batch_size + 1) // (columns)\n fig = plt.figure(figsize=(32, (32 // columns) * rows))\n gs = gridspec.GridSpec(rows, columns)\n for j in range(rows * columns):\n plt.subplot(gs[j])\n plt.axis(\"off\")\n plt.imshow(image_batch.at(j))\n plt.show()\n\n\ndef check_operator_multipaste(bs, pastes, in_size, out_size, even_paste_count, no_intersections, full_input, in_anchor_top_left,\n in_anchor_range, out_anchor_top_left, out_anchor_range, out_dtype, num_out_of_bounds, device):\n pipe, in_idx_l, in_anchors_l, shapes_l, out_anchors_l = get_pipeline(\n batch_size=bs,\n in_size=in_size,\n out_size=out_size,\n even_paste_count=even_paste_count,\n k=pastes,\n dtype=out_dtype,\n no_intersections=no_intersections,\n full_input=full_input,\n in_anchor_top_left=in_anchor_top_left,\n in_anchor_range=in_anchor_range,\n out_anchor_top_left=out_anchor_top_left,\n out_anchor_range=out_anchor_range,\n num_out_of_bounds=num_out_of_bounds,\n use_gpu=device == 'gpu'\n )\n pipe.build()\n try:\n result, input = pipe.run()\n r = result.as_cpu() if device == 'gpu' else result\n if SHOW_IMAGES:\n show_images(bs, r)\n assert not verify_out_of_bounds(bs, in_idx_l, in_anchors_l, shapes_l, out_anchors_l, in_size, out_size)\n manual_verify(bs, input, r, in_idx_l, in_anchors_l, shapes_l, out_anchors_l, [out_size + (3,)] * bs, out_dtype)\n except RuntimeError as e:\n if \"Paste in/out coords should be within input/output bounds\" in str(e):\n assert verify_out_of_bounds(bs, in_idx_l, in_anchors_l, shapes_l, out_anchors_l, in_size, out_size)\n else:\n assert False\n\ndef test_operator_multipaste():\n tests = [\n # The arguments are:\n # - batch size\n # - average paster per output\n # - input dimensions\n # - output dimensions\n # - should each output have same number of pastes\n # - should generated pastes have no intersections\n # - should \"shapes\" parameter be omitted (shape to cover from input anchor to input end)\n # - should \"in_anchors\" parameter be omitted\n # - (Optional) in_anchor value range ((xmin, y_min), (xmax, ymax))\n # - should \"out_anchors\" parameter be omitted\n # - (Optional) out_anchor value range ((xmin, y_min), (xmax, ymax))\n # - output dtype\n # - number of out-of-bounds anchor changes\n [4, 2, (128, 256), (128, 128), False, False, False, False, None, False, None, types.UINT8, 0],\n [4, 2, (256, 128), (128, 128), False, True, False, False, None, False, None, types.UINT8, 0],\n [4, 2, (128, 128), (256, 128), True, False, False, False, None, False, None, types.UINT8, 0],\n [4, 2, (128, 128), (128, 256), True, True, False, False, None, False, None, types.UINT8, 0],\n\n [4, 2, (64, 64), (128, 128), False, False, True, False, None, False, None, types.UINT8, 0],\n [4, 2, (64, 64), (128, 128), False, False, True, False, ((10, 10), (20, 20)), False, None, types.UINT8, 0],\n [4, 2, (64, 64), (128, 128), False, False, False, True, None, False, None, types.UINT8, 0],\n [4, 2, (64, 64), (128, 128), False, False, False, False, None, True, None, types.UINT8, 0],\n\n [4, 2, (128, 128), (128, 128), False, False, False, False, None, False, None, types.UINT8, 0],\n [4, 2, (128, 128), (128, 128), False, False, False, False, None, False, None, types.INT16, 0],\n [4, 2, (128, 128), (128, 128), False, False, False, False, None, False, None, types.INT32, 0],\n [4, 2, (128, 128), (128, 128), False, False, False, False, None, False, None, types.FLOAT, 0],\n\n [4, 2, (128, 256), (128, 128), False, False, False, False, None, False, None, types.UINT8, 4],\n ]\n for t in tests:\n yield (check_operator_multipaste, *t, \"cpu\")\n yield (check_operator_multipaste, *t, \"gpu\")\n",
"# Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nimport nvidia.dali as dali\nimport nvidia.dali.fn as fn\nfrom test_utils import check_batch, dali_type\n\ndef make_param(kind, shape):\n if kind == \"input\":\n return fn.random.uniform(range=(0, 1), shape=shape)\n elif kind == \"scalar input\":\n return fn.reshape(fn.random.uniform(range=(0, 1)), shape=[])\n elif kind == \"vector\":\n return np.random.rand(*shape).astype(np.float32)\n elif kind == \"scalar\":\n return np.random.rand()\n else:\n return None\n\ndef clip(value, type = None):\n try:\n info = np.iinfo(type)\n return np.clip(value, info.min, info.max)\n except:\n return value\n\ndef make_data_batch(batch_size, in_dim, type):\n np.random.seed(1234)\n batch = []\n lo = 0\n hi = 1\n if np.issubdtype(type, np.integer):\n info = np.iinfo(type)\n # clip range to +/- 1000000 to prevent excessively large epsilons\n lo = max(info.min / 2, -1000000)\n hi = min(info.max / 2, 1000000)\n\n for i in range(batch_size):\n batch.append((np.random.rand(np.random.randint(0, 10000), in_dim)*(hi-lo) + lo).astype(type))\n return batch\n\ndef get_data_source(batch_size, in_dim, type):\n return lambda: make_data_batch(batch_size, in_dim, type)\n\ndef _run_test(device, batch_size, out_dim, in_dim, in_dtype, out_dtype, M_kind, T_kind):\n pipe = dali.pipeline.Pipeline(batch_size=batch_size, num_threads=4, device_id=0, seed=1234)\n with pipe:\n X = fn.external_source(source=get_data_source(batch_size, in_dim, in_dtype), device=device, layout = \"NX\")\n M = None\n T = None\n MT = None\n if T_kind == \"fused\":\n MT = make_param(M_kind, [out_dim, in_dim + 1])\n else:\n M = make_param(M_kind, [out_dim, in_dim])\n T = make_param(T_kind, [out_dim])\n\n Y = fn.coord_transform(X,\n MT = MT.flatten().tolist() if isinstance(MT, np.ndarray) else MT,\n M = M.flatten().tolist() if isinstance(M, np.ndarray) else M,\n T = T.flatten().tolist() if isinstance(T, np.ndarray) else T,\n dtype = dali_type(out_dtype)\n )\n if M is None:\n M = 1\n if T is None:\n T = 0\n if MT is None:\n MT = 0\n\n M, T, MT = (x if isinstance(x, dali.data_node.DataNode) else dali.types.Constant(x, dtype=dali.types.FLOAT) for x in (M, T, MT))\n\n pipe.set_outputs(X, Y, M, T, MT)\n\n pipe.build()\n for iter in range(3):\n outputs = pipe.run()\n outputs = [x.as_cpu() if hasattr(x, \"as_cpu\") else x for x in outputs]\n ref = []\n scale = 1\n for idx in range(batch_size):\n X = outputs[0].at(idx)\n if T_kind == \"fused\":\n MT = outputs[4].at(idx)\n if MT.size == 1:\n M = MT\n T = 0\n else:\n M = MT[:,:-1]\n T = MT[:,-1]\n else:\n M = outputs[2].at(idx)\n T = outputs[3].at(idx)\n\n if M.size == 1:\n Y = X.astype(np.float32) * M + T\n else:\n Y = np.matmul(X.astype(np.float32), M.transpose()) + T\n\n if np.issubdtype(out_dtype, np.integer):\n info = np.iinfo(out_dtype)\n Y = Y.clip(info.min, info.max)\n\n ref.append(Y)\n scale = max(scale, np.max(np.abs(Y)) - np.min(np.abs(Y))) if Y.size > 0 else 1\n avg = 1e-6 * scale\n eps = 1e-6 * scale\n if out_dtype != np.float32: # headroom for rounding\n avg += 0.33\n eps += 0.5\n check_batch(outputs[1], ref, batch_size, eps, eps, expected_layout=\"NX\", compare_layouts=True)\n\n\ndef test_all():\n for device in [\"cpu\", \"gpu\"]:\n for M_kind in [None, \"vector\", \"scalar\", \"input\", \"scalar input\"]:\n for T_kind in [None, \"vector\", \"scalar\", \"input\", \"scalar input\"]:\n for batch_size in [1,3]:\n yield _run_test, device, batch_size, 3, 3, np.float32, np.float32, M_kind, T_kind\n\n for device in [\"cpu\", \"gpu\"]:\n for in_dtype in [np.uint8, np.uint16, np.int16, np.int32, np.float32]:\n for out_dtype in set([in_dtype, np.float32]):\n for batch_size in [1,8]:\n yield _run_test, device, batch_size, 3, 3, in_dtype, out_dtype, \"input\", \"input\"\n\n for device in [\"cpu\", \"gpu\"]:\n for M_kind in [\"input\", \"vector\", None]:\n for in_dim in [1,2,3,4,5,6]:\n out_dims = [1,2,3,4,5,6] if M_kind == \"vector\" or M_kind == \"input\" else [in_dim]\n for out_dim in out_dims:\n yield _run_test, device, 2, out_dim, in_dim, np.float32, np.float32, M_kind, \"vector\"\n\n for device in [\"cpu\", \"gpu\"]:\n for MT_kind in [\"vector\", \"input\", \"scalar\"]:\n for in_dim in [1,2,3,4,5,6]:\n out_dims = [1,2,3,4,5,6] if MT_kind == \"vector\" or MT_kind == \"input\" else [in_dim]\n for out_dim in out_dims:\n yield _run_test, device, 2, out_dim, in_dim, np.float32, np.float32, MT_kind, \"fused\"\n\n\ndef _test_empty_input(device):\n pipe = dali.pipeline.Pipeline(batch_size=2, num_threads=4, device_id=0, seed=1234)\n with pipe:\n X = fn.external_source(source=[[np.zeros([0,3]), np.zeros([0, 3])]], device=\"cpu\", layout=\"AB\")\n Y = fn.coord_transform(X,\n M = (1,2,3,4,5,6),\n T = (1,2)\n )\n pipe.set_outputs(Y)\n pipe.build()\n o = pipe.run()\n assert o[0].layout() == \"AB\"\n assert len(o[0]) == 2\n for i in range(len(o[0])):\n assert o[0].at(0).size == 0\n\ndef test_empty_input():\n for device in [\"cpu\", \"gpu\"]:\n yield _test_empty_input, device\n"
] |
[
[
"numpy.dot",
"scipy.spatial.transform.Rotation.from_rotvec",
"numpy.minimum",
"numpy.allclose",
"numpy.maximum",
"numpy.cos",
"numpy.linalg.norm",
"numpy.sin",
"numpy.tan",
"numpy.identity",
"numpy.float32",
"numpy.array"
],
[
"numpy.random.seed",
"numpy.testing.assert_array_equal",
"numpy.array",
"numpy.empty",
"numpy.random.randint"
],
[
"numpy.uint8",
"numpy.arange",
"numpy.int32",
"numpy.logical_or",
"numpy.all",
"numpy.any",
"numpy.float32"
],
[
"numpy.array"
],
[
"tensorflow.compat.v1.expand_dims",
"tensorflow.compat.v1.equal",
"tensorflow.compat.v1.truediv",
"tensorflow.compat.v1.maximum",
"tensorflow.compat.v1.zeros_like",
"tensorflow.compat.v1.squeeze",
"tensorflow.compat.v1.transpose",
"tensorflow.compat.v1.name_scope"
],
[
"numpy.random.seed",
"numpy.array_equal",
"numpy.int32",
"numpy.append",
"matplotlib.pyplot.subplot",
"numpy.random.randint",
"matplotlib.gridspec.GridSpec",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.show",
"numpy.zeros",
"matplotlib.pyplot.figure"
],
[
"numpy.abs",
"numpy.random.seed",
"numpy.clip",
"numpy.issubdtype",
"numpy.random.rand",
"numpy.iinfo",
"numpy.zeros",
"numpy.random.randint"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.5",
"1.2",
"1.3",
"1.4"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
MattGreav/test
|
[
"f6bc7dcefd8b498b71fb92808ee70496f2206231",
"f6bc7dcefd8b498b71fb92808ee70496f2206231"
] |
[
"armi/reactor/composites.py",
"armi/utils/__init__.py"
] |
[
"# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThis module contains the basic composite pattern underlying the reactor package.\n\nThis follows the principles of the `Composite Design Pattern\n<https://en.wikipedia.org/wiki/Composite_pattern>`_ to allow the construction of a\npart/whole hierarchy representing a physical nuclear reactor. The composite objects act\nsomewhat like lists: they can be indexed, iterated over, appended, extended, inserted,\netc. Each member of the hierarchy knows its children and its parent, so full access to\nthe hierarchy is available from everywhere. This design was chosen because of the close\nanalogy of the model to the physical nature of nuclear reactors.\n\n.. warning:: Because each member of the hierarchy is linked to the entire tree,\n it is often unsafe to save references to individual members; it can cause\n large and unexpected memory inefficiencies.\n\nSee Also: :doc:`/developer/index`.\n\"\"\"\nimport collections\nimport itertools\nimport timeit\nfrom typing import Dict, Optional, Type, Tuple, List, Union\n\nimport numpy\nimport tabulate\nimport six\n\nimport armi\nfrom armi.reactor import parameters\nfrom armi.reactor.parameters import resolveCollections\nfrom armi.reactor.flags import Flags, TypeSpec\nfrom armi import runLog\nfrom armi import utils\nfrom armi.utils import units\nfrom armi.utils import densityTools\nfrom armi.nucDirectory import nucDir, nuclideBases\nfrom armi.nucDirectory import elements\nfrom armi.localization import exceptions\nfrom armi.reactor import grids\n\nfrom armi.physics.neutronics.fissionProductModel import fissionProductModel\nfrom armi.nuclearDataIO import xsCollections\nfrom armi.utils.densityTools import calculateNumberDensity\n\n\nclass FlagSerializer(parameters.Serializer):\n \"\"\"\n Serializer implementation for Flags.\n\n This operates by converting each set of Flags (too large to fit in a uint64) into a\n sequence of enough uint8 elements to represent all flags. These constitute a\n dimension of a 2-D numpy array containing all Flags for all objects provided to the\n ``pack()`` function.\n\n \"\"\"\n\n version = \"1\"\n\n @staticmethod\n def pack(data):\n \"\"\"\n Flags are represented as a 2-D numpy array of uint8 (single-byte, unsigned\n integers), where each row contains the bytes representing a single Flags\n instance. We also store the list of field names so that we can verify that the\n reader and the writer can agree on the meaning of each bit.\n\n Under the hood, this calls the private implementation providing the\n :py:class:`armi.reactor.flags.Flags` class as the target output class.\n \"\"\"\n return FlagSerializer._packImpl(data, Flags)\n\n @staticmethod\n def _packImpl(data, flagCls: Type[utils.Flag]):\n \"\"\"\n Implement the pack operation given a target output Flag class.\n\n This is kept separate from the public interface to permit testing of the\n functionality without having to do unholy things to ARMI's actual set of\n ``reactor.flags.Flags``.\n \"\"\"\n npa = numpy.array(\n [b for f in data for b in f.to_bytes()], dtype=numpy.uint8\n ).reshape((len(data), flagCls.width()))\n\n return npa, {\"flag_order\": flagCls.sortedFields()}\n\n @staticmethod\n def _remapBits(inp: int, mapping: Dict[int, int]):\n \"\"\"\n Given an input bitfield, map each bit to the appropriate new bit position based\n on the passed mapping.\n\n Parameters\n ==========\n inp : int\n input bitfield\n\n mapping : dict\n dictionary mapping from old bit position -> new bit position\n \"\"\"\n f = 0\n for bit in itertools.count():\n if (1 << bit) > inp:\n break\n if (1 << bit) & inp:\n f = f | (1 << mapping[bit])\n\n return f\n\n @classmethod\n def unpack(cls, data, version, attrs):\n \"\"\"\n Reverse the pack operation\n\n This will allow for some degree of conversion from old flags to a new set of\n flags, as long as all of the source flags still exist in the current set of\n flags.\n\n Under the hood, this calls the private implementation providing the\n :py:class:`armi.reactor.flags.Flags` class as the target output class.\n \"\"\"\n return cls._unpackImpl(data, version, attrs, Flags)\n\n @classmethod\n def _unpackImpl(cls, data, version, attrs, flagCls: Type[utils.Flag]):\n \"\"\"\n Implement the unpack operation given a target output Flag class.\n\n This is kept separate from the public interface to permit testing of the\n functionality without having to do unholy things to ARMI's actual set of\n ``reactor.flags.Flags``.\n\n If the set of flags for the currently-configured App match the input set of\n flags, they are read in directly, which is good and cheap. However, if the set\n of flags differ from the input and the current App, we will try to convert them\n (as long as all of the input flags exist in the current App). Conversion is done\n by forming a map from all input bit positions to the current-App bit positions\n of the same meaning. E.g., if FUEL flag used to be the 3rd bit position, but now\n it is the 6th bit position, the map will contain ``map[3] = 6``. Then for each\n bitfield that is read in, each bit position is queried and if present, mapped to\n the proper corresponding new bit position. The result of this mapping is used to\n construct the Flags object.\n \"\"\"\n flagOrderPassed = attrs[\"flag_order\"]\n flagOrderNow = flagCls.sortedFields()\n\n if version != cls.version:\n raise ValueError(\n \"The FlagSerializer version used to pack the data ({}) does not match \"\n \"the current version ({})! This database either needs to be migrated, \"\n \"or on-the-fly inter-version conversion needs to be implemented.\".format(\n version, cls.version\n )\n )\n\n flagSetIn = set(flagOrderPassed)\n flagSetNow = set(flagOrderNow)\n\n # Make sure that all of the old flags still exist\n if not flagSetIn.issubset(flagSetNow):\n missingFlags = flagSetIn - flagSetNow\n raise ValueError(\n \"The set of flags in the database includes unknown flags. \"\n \"Make sure you are using the correct ARMI app. Missing flags:\\n\"\n \"{}\".format(missingFlags)\n )\n\n if all(i == j for i, j in zip(flagOrderPassed, flagOrderNow)):\n out = [flagCls.from_bytes(row.tobytes()) for row in data]\n else:\n newFlags = {\n i: flagOrderNow.index(oldFlag)\n for (i, oldFlag) in enumerate(flagOrderPassed)\n }\n out = [\n flagCls(\n cls._remapBits(\n int.from_bytes(row.tobytes(), byteorder=\"little\"), newFlags\n )\n )\n for row in data\n ]\n\n return out\n\n\ndef _defineBaseParameters():\n \"\"\"\n Return parameter definitions that all ArmiObjects must have to function properly.\n\n For now, this pretty much just includes ``flags``, since these are used throughout\n the composite model to filter which objects are considered when traversing the\n reactor model.\n\n Note also that the base ParameterCollection class also has a ``serialNum``\n parameter. These are defined in different locations, since serialNum is a guaranteed\n feature of a ParameterCollection (for serialization to the database and history\n tracking), while the ``flags`` parameter is more a feature of the composite model.\n\n .. important::\n Notice that the ``flags`` parameter is not written to the database. This is for\n a couple of reasons:\n * Flags are derived from an ArmiObject's name. Since the name is stored on\n the DB, it is possible to recover the flags from that.\n * Storing flags to the DB may be complicated, since it is easier to imagine a\n number of flags that is greater than the width of natively-supported integer\n types, requiring some extra tricks to store the flages in an HDF5 file.\n * Allowing flags to be modified by plugins further complicates things, in that\n it is important to ensure that the meaning of all bits in the flag value are\n consistent between a database state and the current ARMI environment. This may\n require encoding these meanings in to the database as some form of metadata.\n \"\"\"\n pDefs = parameters.ParameterDefinitionCollection()\n\n pDefs.add(\n parameters.Parameter(\n \"flags\",\n units=None,\n description=\"The type specification of this object\",\n location=parameters.ParamLocation.AVERAGE,\n saveToDB=True,\n default=Flags(0),\n setter=parameters.NoDefault,\n categories=set(),\n serializer=FlagSerializer,\n )\n )\n\n return pDefs\n\n\nclass CompositeModelType(resolveCollections.ResolveParametersMeta):\n \"\"\"\n Metaclass for tracking subclasses of ArmiObject subclasses.\n\n It is often useful to have an easily-accessible collection of all classes that\n participate in the ARMI composite reactor model. This metaclass maintains a\n collection of all defined subclasses, called TYPES.\n \"\"\"\n\n # Dictionary mapping class name -> class object for all subclasses\n TYPES: Dict[str, Type] = dict()\n\n def __new__(cls, name, bases, attrs):\n newType = resolveCollections.ResolveParametersMeta.__new__(\n cls, name, bases, attrs\n )\n\n CompositeModelType.TYPES[name] = newType\n\n return newType\n\n\nclass ArmiObject(metaclass=CompositeModelType):\n \"\"\"\n The abstract base class for all composites and leaves.\n\n This:\n * declares the interface for objects in the composition\n * implements default behavior for the interface common to all\n classes\n * Declares an interface for accessing and managing\n child objects\n * Defines an interface for accessing parents.\n\n Called \"component\" in gang of four, this is an ArmiObject here because the word\n component was already taken in ARMI.\n\n The :py:class:`armi.reactor.parameters.ResolveParametersMeta` metaclass is used to\n automatically create ``ParameterCollection`` subclasses for storing parameters\n associated with any particular subclass of ArmiObject. Defining a ``pDefs`` class\n attribute in the definition of a subclass of ArmiObject will lead to the creation of\n a new subclass of py:class:`armi.reactor.parameters.ParameterCollection`, which will\n contain the definitions from that class's ``pDefs`` as well as the definitions for\n all of its parents. A new ``paramCollectionType`` class attribute will be added to\n the ArmiObject subclass to reflect which type of parameter collection should be\n used.\n\n .. warning::\n This class has far too many public methods. We are in the midst of a composite\n tree cleanup that will likely break these out onto a number of separate functional\n classes grouping things like composition, location, shape/dimensions, and\n various physics queries. Methods are being collected here from the various\n specialized subclasses (Block, Assembly) in preparation for this next step.\n As a result, the public API on this method should be considered unstable.\n\n Attributes\n ----------\n name : str\n Object name\n parent : ArmiObject\n The object's parent in a hierarchical tree\n cached : dict\n Some cached values for performance\n p : ParameterCollection\n The state variables\n spatialGrid : grids.Grid\n The spatial grid that this object contains\n spatialLocator : grids.LocationBase\n The location of this object in its parent grid, or global space\n\n See Also\n --------\n armi.reactor.parameters\n \"\"\"\n\n paramCollectionType: Optional[Type[parameters.ParameterCollection]] = None\n pDefs = _defineBaseParameters()\n\n def __init__(self, name):\n self.name = name\n self.parent = None\n self.cached = {}\n self._backupCache = None\n self.p = self.paramCollectionType()\n # TODO: These are not serialized to the database, and will therefore\n # lead to surprising behavior when using databases. We need to devise a\n # way to either represent them in parameters, or otherwise reliably\n # recover them.\n self._lumpedFissionProducts = None\n self.spatialGrid = None\n self.spatialLocator = grids.CoordinateLocation(0.0, 0.0, 0.0, None)\n\n def __lt__(self, other):\n \"\"\"\n Implement the less-than operator.\n\n Implementing this on the ArmiObject allows most objects, under most\n circumstances to be sorted. This is useful from the context of the Database\n classes, so that they can produce a stable layout of the serialized composite\n structure.\n\n By default, this sorts using the spatial locator, in K, J, I order, which should\n give a relatively intuitive order. For safety, it makes sure that the objects\n being sorted live in the same grid, since it probably doesn't make\n sense to sort things across containers or scopes. If this ends up being too\n restrictive, it can probably be relaxed or overridden on specific classes.\n \"\"\"\n\n if self.spatialLocator is None or other.spatialLocator is None:\n runLog.error(\"could not compare {} and {}\".format(self, other))\n raise ValueError(\n \"One or more of the compared objects have no spatialLocator\"\n )\n\n if self.spatialLocator.grid is not other.spatialLocator.grid:\n runLog.error(\"could not compare {} and {}\".format(self, other))\n raise ValueError(\n \"Composite grids must be the same to compare:\\n\"\n \"This grid: {}\\n\"\n \"Other grid: {}\".format(self.spatialGrid, other.spatialGrid)\n )\n try:\n t1 = tuple(reversed(self.spatialLocator.getCompleteIndices()))\n t2 = tuple(reversed(other.spatialLocator.getCompleteIndices()))\n return t1 < t2\n except ValueError:\n runLog.error(\n \"failed to compare {} and {}\".format(\n self.spatialLocator, other.spatialLocator\n )\n )\n raise\n\n def __getstate__(self):\n \"\"\"\n Python method for reducing data before pickling.\n\n This removes links to parent objects, which allows one to, for example, pickle\n an assembly without pickling the entire reactor. Likewise, one could\n MPI_COMM.bcast an assembly without broadcasting the entire reactor.\n\n Notes\n -----\n Special treatment of ``parent`` is not enough, since the spatialGrid also\n contains a reference back to the armiObject. Consequently, the ``spatialGrid``\n needs to be reassigned in ``__setstate__``.\n\n \"\"\"\n state = self.__dict__.copy()\n state[\"parent\"] = None\n\n if \"r\" in state:\n # XXX: This should never happen, it might make sense to raise an exception.\n del state[\"r\"]\n\n return state\n\n def __setstate__(self, state):\n \"\"\"\n Sets the state of this ArmiObject.\n\n Notes\n -----\n This ArmiObject may have lost a reference to its parent. If the parent was also\n pickled (serialized), then the parent should update the ``.parent`` attribute\n during its own ``__setstate__``. That means within the context of\n ``__setstate__`` one should not rely upon ``self.parent``.\n \"\"\"\n self.__dict__.update(state)\n\n if self.spatialGrid is not None:\n self.spatialGrid.armiObject = self\n\n # now \"reattach\" children\n for c in self:\n c.parent = self\n\n def __repr__(self):\n return \"<{}: {}>\".format(self.__class__.__name__, self.name)\n\n def __format__(self, spec):\n return format(str(self), spec)\n\n def __bool__(self):\n \"\"\"\n Flag that says this is non-zero in a boolean context\n\n Notes\n -----\n The default behavior for ``not [obj]`` that has a ``__len__`` defined is to see\n if the length is zero. However, for these composites, we'd like Assemblies, etc.\n to be considered non-zero even if they don't have any blocks. This is important\n for parent resolution, etc. If one of these objects exists, it is non-zero,\n regardless of its contents.\n\n \"\"\"\n return True\n\n __nonzero__ = __bool__ # Python 2 compatibility\n\n def __add__(self, other):\n \"\"\"Return a list of all children in this and another object.\"\"\"\n return self.getChildren() + other.getChildren()\n\n def duplicate(self):\n \"\"\"\n Make a clean copy of this object.\n\n .. warning:: Be careful with inter-object dependencies. If one object contains a\n reference to another object which contains links to the entire hierarchical\n tree, memory can fill up rather rapidly. Weak references are designed to help\n with this problem.\n \"\"\"\n raise NotImplementedError\n\n def clearCache(self):\n \"\"\"Clear the cache so all new values are recomputed.\"\"\"\n self.cached = {}\n for child in self.getChildren():\n child.clearCache()\n\n def _getCached(self, name): # TODO: stop the \"returns None\" nonsense?\n \"\"\"\n Obtain a value from the cache.\n\n Cached values can be used to temporarily store frequently read but\n long-to-compute values. The practice is generally discouraged because it's\n challenging to make sure to properly invalidate the cache when the state\n changes.\n\n \"\"\"\n return self.cached.get(name, None)\n\n def _setCache(self, name, val): # TODO: remove me\n \"\"\"\n Set a value in the cache.\n\n See Also\n --------\n _getCached : returns a previously-cached value\n \"\"\"\n self.cached[name] = val\n\n def copyParamsFrom(self, other):\n \"\"\"\n Overwrite this object's params with other object's\n\n Parameters\n ----------\n other : ArmiObject\n The object to copy params from\n\n \"\"\"\n self.p = other.p.__class__()\n for p, val in other.p.items():\n self.p[p] = val\n\n def updateParamsFrom(self, new):\n \"\"\"\n Update this object's params with a new object's.\n\n Parameters\n ----------\n new : ArmiObject\n The object to copy params from\n \"\"\"\n for paramName, val in new.p.items():\n self.p[paramName] = val\n\n def getChildren(self, deep=False, generationNum=1, includeMaterials=False):\n \"\"\"Return the children of this object.\"\"\"\n raise NotImplementedError\n\n def getChildrenWithFlags(self, typeSpec: TypeSpec, exactMatch=True):\n \"\"\"Get all children that have given flags.\"\"\"\n return NotImplementedError\n\n def getComponents(self, typeSpec: TypeSpec = None, exact=False):\n \"\"\"\n Return all armi.reactor.component.Component within this Composite.\n\n Parameters\n ----------\n typeSpec : TypeSpec\n Component flags. Will restrict Components to specific ones matching the\n flags specified.\n\n exact : bool, optional\n Only match exact component labels (names). If True, 'coolant' will not match\n 'interCoolant'. This has no impact if compLabel is None.\n\n Returns\n -------\n list of Component\n items matching compLabel and exact criteria\n \"\"\"\n raise NotImplementedError()\n\n def iterComponents(self, typeSpec: TypeSpec = None, exact=False):\n \"\"\"Yield components one by one in a generator.\"\"\"\n raise NotImplementedError()\n\n def doChildrenHaveFlags(self, typeSpec: TypeSpec, deep=False):\n \"\"\"\n Generator that yields True if the next child has given flags.\n\n Parameters\n ----------\n typeSpec : TypeSpec\n Requested type of the child\n\n \"\"\"\n for c in self.getChildren(deep):\n if c.hasFlags(typeSpec, exact=False):\n yield True\n else:\n yield False\n\n def containsAtLeastOneChildWithFlags(self, typeSpec: TypeSpec):\n \"\"\"\n Return True if any of the children are of a given type.\n\n Parameters\n ----------\n typeSpec : TypeSpec\n Requested type of the children\n\n See Also\n --------\n self.doChildrenHaveFlags\n self.containsOnlyChildrenWithFlags\n\n \"\"\"\n return any(self.doChildrenHaveFlags(typeSpec))\n\n def containsOnlyChildrenWithFlags(self, typeSpec: TypeSpec):\n \"\"\"\n Return True if all of the children are of a given type.\n\n Parameters\n ----------\n typeSpec : TypeSpec\n Requested type of the children\n\n See Also\n --------\n self.doChildrenHaveFlags\n self.containsAtLeastOneChildWithFlags\n\n \"\"\"\n return all(self.doChildrenHaveFlags(typeSpec))\n\n def copyParamsToChildren(self, paramNames):\n \"\"\"\n Copy param values in paramNames to all children.\n\n Parameters\n ----------\n paramNames : list\n List of param names to copy to children\n\n \"\"\"\n for paramName in paramNames:\n myVal = self.p[paramName]\n for c in self.getChildren():\n c.p[paramName] = myVal\n\n @classmethod\n def getParameterCollection(cls):\n \"\"\"\n Return a new instance of the specific ParameterCollection type associated with this object.\n\n This has the same effect as ``obj.paramCollectionType()``. Getting a new\n instance through a class method like this is useful in situations where the\n ``paramCollectionType`` is not a top-level object and therefore cannot be\n trivially pickled. Since we know that by the time we want to make any instances\n of/unpickle a given ``ArmiObject``, such a class attribute will have been\n created and associated. So, we use this top-level class method to dig\n dynamically down to the underlying parameter collection type.\n\n See Also\n --------\n :py:meth:`armi.reactor.parameters.parameterCollections.ParameterCollection.__reduce__`\n \"\"\"\n return cls.paramCollectionType()\n\n def getParamNames(self):\n \"\"\"\n Get a list of parameters keys that are available on this object.\n\n Will not have any corner, edge, or timenode dependence.\n \"\"\"\n return sorted(k for k in self.p.keys() if not isinstance(k, tuple))\n\n def nameContains(self, s):\n \"\"\"\n True if s is in this object's name (eg. nameContains('fuel')==True for 'testfuel'\n\n Notes\n -----\n Case insensitive (all gets converted to lower)\n \"\"\"\n name = self.name.lower()\n if isinstance(s, list):\n for n in s:\n if n.lower() in name:\n return True\n return False\n else:\n return s.lower() in name\n\n def getName(self):\n return self.name\n\n def setName(self, name):\n self.name = name\n\n def hasFlags(self, typeID: TypeSpec, exact=False):\n \"\"\"\n Determine if this object is of a certain type.\n\n Parameters\n ----------\n typeID : TypeSpec\n Flags to test the object against, to see if it contains them. If a list is\n provided, each element is treated as a \"candidate\" set of flags. Return True\n if any of candidates match. When exact is True, the object must match one of\n the candidates exactly. If exact is False, the object must have at least the\n flags contained in a candidate for that candidate to be a match; extra flags\n on the object are permitted. None matches all objects if exact is False, or\n no objects if exact is True.\n\n exact : bool, optional\n Require the type of the object to fully match the provided typeID(s)\n\n Returns\n -------\n hasFlags : bool\n True if this object is in the typeID list.\n\n Notes\n -----\n Type comparisons use bitwise comparisons using valid flags.\n\n If you have an 'inner control' assembly, then this will evaluate True for the\n INNER | CONTROL flag combination. If you just want all FUEL, simply use FUEL\n with no additional qualifiers. For more complex comparisons, use bitwise\n operations.\n\n Always returns true if typeID is none and exact is False, allowing for default\n parameters to be passed in when the method does not care about the object type.\n If the typeID is none and exact is True, this will always return False.\n\n Examples\n --------\n If you have an object with the ``INNER``, ``DRIVER``, and ``FUEL`` flags, then\n\n >>> obj.getType()\n [some integer]\n\n >>> obj.hasFlags(Flags.FUEL)\n True\n\n >>> obj.hasFlags(Flags.INNER | Flags.DRIVER | Flags.FUEL)\n True\n\n >>> obj.hasFlags(Flags.OUTER | Flags.DRIVER | Flags.FUEL)\n False\n\n >>> obj.hasFlags(Flags.INNER | Flags.FUEL)\n True\n\n >>> obj.hasFlags(Flags.INNER | Flags.FUEL, exact=True)\n False\n\n >>> obj.hasFlags([Flags.INNER | Flags.DRIVER | Flags.FUEL,\n ... Flags.OUTER | Flags.DRIVER | Flags.FUEL], exact=True)\n False\n\n \"\"\"\n if not typeID:\n return False if exact else True\n if isinstance(typeID, six.string_types):\n raise TypeError(\n \"Must pass Flags, or an iterable of Flags; Strings are no longer \"\n \"supported\"\n )\n\n elif not isinstance(typeID, Flags):\n # list behavior gives a spec1 OR spec2 OR ... behavior.\n return any(self.hasFlags(typeIDi, exact=exact) for typeIDi in typeID)\n\n if not self.p.flags:\n # default still set, or null flag. Do down here so we get proper error\n # handling of invalid typeSpecs\n return False\n\n if exact:\n # all bits must be identical for exact match\n return self.p.flags == typeID\n\n # all bits that are 1s in the typeID must be present\n return self.p.flags & typeID == typeID\n\n def getType(self):\n \"\"\"Return the object type.\"\"\"\n return self.p.type\n\n def setType(self, typ, flags: Optional[Flags] = None):\n \"\"\"\n Set the object type.\n\n Parameters\n ----------\n typ : str\n The desired \"type\" for the object. Type describes the general class of the\n object, and typically corresponds to the name of the blueprint that created\n it.\n\n flags : Flags, optional\n The set of Flags to apply to the object. If these are omitted, then Flags\n will be derived from the ``typ``.\n\n Warning\n -------\n We are in the process of developing more robust definitions for things like\n \"name\" and \"type\". \"type\" will generally refer to the name of the blueprint that\n created a particular object. When present, a \"name\" will refer to a specific\n instance of an object of a particular \"type\". Think unique names for each\n assembly in a core, even if they are all created from the same blueprint and\n therefore have the same \"type\". When this work is complete, it will be strongly\n discouraged, or even disallowed to change the type of an object after it has\n been created, and ``setType()`` may be removed entirely.\n \"\"\"\n self.p.flags = flags or Flags.fromStringIgnoreErrors(typ)\n self.p.type = typ\n\n def getVolume(self):\n return sum(child.getVolume() for child in self)\n\n def _updateVolume(self):\n \"\"\"Recompute and store volume.\"\"\"\n children = self.getChildren()\n # Derived shapes must come last so we temporarily change the order if we\n # have one.\n from armi.reactor.components import DerivedShape\n\n for child in children[:]:\n if isinstance(child, DerivedShape):\n children.remove(child)\n children.append(child)\n for child in children:\n child._updateVolume()\n\n def getVolumeFractions(self):\n \"\"\"\n Return volume fractions of each child.\n\n Sets volume or area of missing piece (like coolant) if it exists. Caching would\n be nice here.\n\n Returns\n -------\n fracs : list\n list of (component, volFrac) tuples\n\n See Also\n --------\n test_block.Block_TestCase.test_consistentAreaWithOverlappingComponents\n\n Notes\n -----\n void areas can be negative in gaps between fuel/clad/liner(s), but these\n negative areas are intended to account for overlapping positive areas to insure\n the total area of components inside the clad is accurate. See\n test_block.Block_TestCase.test_consistentAreaWithOverlappingComponents\n\n \"\"\"\n children = self.getChildren()\n numerator = [c.getVolume() for c in children]\n denom = sum(numerator)\n if denom == 0.0:\n numerator = [c.getArea() for c in children]\n denom = sum(numerator)\n\n fracs = [(ci, nu / denom) for ci, nu in zip(children, numerator)]\n return fracs\n\n def getVolumeFraction(self):\n \"\"\"Return the volume fraction that this object takes up in its parent.\"\"\"\n if self.parent is None:\n raise ValueError(\n f\"No parent is defined for {self}. Cannot compute its volume fraction.\"\n )\n\n for child, frac in self.parent.getVolumeFractions():\n if child is self:\n return frac\n\n def getMaxArea(self):\n \"\"\" \"\n The maximum area of this object if it were totally full.\n\n See Also\n --------\n armi.reactor.blocks.HexBlock.getMaxArea\n \"\"\"\n raise NotImplementedError()\n\n def getMaxVolume(self):\n \"\"\"\n The maximum volume of this object if it were totally full.\n\n Returns\n -------\n vol : float\n volume in cm^3.\n \"\"\"\n raise NotImplementedError()\n\n def getMass(self, nuclideNames=None):\n \"\"\"\n Determine the mass in grams of nuclide(s) and/or elements in this object.\n\n Parameters\n ----------\n nuclideNames : str, optional\n The nuclide/element specifier to get the mass of in the object.\n If omitted, total mass is returned.\n\n Returns\n -------\n mass : float\n The mass in grams.\n \"\"\"\n return sum(\n [c.getMass(nuclideNames=nuclideNames) for c in self.iterComponents()]\n )\n\n def getMassFrac(self, nucName):\n \"\"\"\n Get the mass fraction of a nuclide.\n\n Notes\n -----\n If you need multiple mass fractions, use ``getMassFracs``.\n\n \"\"\"\n nuclideNames = self._getNuclidesFromSpecifier(nucName)\n return sum(self.getMassFracs().get(nucName, 0.0) for nucName in nuclideNames)\n\n def getMicroSuffix(self):\n pass\n\n def _getNuclidesFromSpecifier(self, nucSpec):\n \"\"\"\n Convert a nuclide specification to a list of valid nuclide/element keys.\n\n nucSpec : nuclide specifier\n Can be a string name of a nuclide or element, or a list of such strings.\n\n This might get Zr isotopes when ZR is passed in if they exist, or it will get\n elemental ZR if that exists. When expanding elements, all known nuclides are\n returned, not just the natural ones.\n\n \"\"\"\n allNuclidesHere = self.getNuclides()\n if nucSpec is None:\n return allNuclidesHere\n elif isinstance(nucSpec, (str)):\n nuclideNames = [nucSpec]\n elif isinstance(nucSpec, list):\n nuclideNames = []\n for ns in nucSpec:\n nuclideNames.extend(self._getNuclidesFromSpecifier(ns))\n else:\n raise TypeError(\n \"nucSpec={0} is an invalid specifier. It is a {1}\"\n \"\".format(nucSpec, type(nucSpec))\n )\n\n # expand elementals if appropriate.\n convertedNucNames = []\n for nucName in nuclideNames:\n if nucName in allNuclidesHere:\n convertedNucNames.append(nucName)\n continue\n try:\n # Need all nuclide bases, not just natural isotopics because, e.g. PU\n # has no natural isotopics!\n nucs = [\n nb.name\n for nb in elements.bySymbol[nucName].nuclideBases\n if not isinstance(nb, nuclideBases.NaturalNuclideBase)\n ]\n convertedNucNames.extend(nucs)\n except KeyError:\n convertedNucNames.append(nucName)\n\n return sorted(set(convertedNucNames))\n\n def getMassFracs(self):\n \"\"\"\n Get mass fractions of all nuclides in object.\n\n Ni [1/cm3] * Ai [g/mole] ~ mass\n \"\"\"\n numDensities = self.getNumberDensities()\n return densityTools.getMassFractions(numDensities)\n\n def setMassFrac(self, nucName, val):\n \"\"\"\n Adjust the composition of this object so the mass fraction of nucName is val.\n\n See Also\n ---------\n setMassFracs : efficiently set multiple mass fractions at the same time.\n \"\"\"\n self.setMassFracs({nucName: val})\n\n def setMassFracs(self, massFracs):\n r\"\"\"\n Apply one or more adjusted mass fractions.\n\n This will adjust the total mass of the object, as the mass of everything\n designated will change, while anything else will not.\n\n .. math::\n\n m_i = \\frac{M_i}{\\sum_j(M_j)}\n\n (M_{j \\ne i} + M_i) m_i = M_i\n\n \\frac{m_i M_{j \\ne i}}{1-m_i} = M_i\n\n \\frac{m_i M_{j \\ne i}}{V(1-m_i)} = M_i/V = m_i \\rho\n\n N_i = \\frac{m_i \\rho N_A}{A_i}\n\n N_i = \\frac{m_i M_{j \\ne i} N_A}{V (1-m_i) {A_i}}\n\n \\frac{M_{j \\ne i}}{V} = m_{j \\ne i} \\rho\n\n m_{j \\ne i} = 1 - m_i\n\n Notes\n -----\n You can't just change one mass fraction though, you have scale all others to\n fill the remaining frac.\n\n Parameters\n ----------\n massFracs: dict\n nucName : new mass fraction pairs.\n\n \"\"\"\n rho = self.density()\n if not rho:\n raise ValueError(\n \"Cannot set mass fractions on {} because the mass density is zero.\".format(\n self\n )\n )\n oldMassFracs = self.getMassFracs()\n totalFracSet = 0.0\n for nucName, massFrac in massFracs.items():\n self.setNumberDensity(\n nucName,\n (\n massFrac\n * rho\n * units.MOLES_PER_CC_TO_ATOMS_PER_BARN_CM\n / nucDir.getAtomicWeight(nucName)\n ),\n )\n if nucName in oldMassFracs:\n del oldMassFracs[nucName]\n totalFracSet += massFrac\n totalOther = sum(oldMassFracs.values())\n if totalOther:\n # we normalize the remaining mass fractions so their concentrations relative\n # to each other stay constant.\n normalizedOtherMassFracs = {\n nucNameOther: val / totalOther\n for nucNameOther, val in oldMassFracs.items()\n }\n for nucNameOther, massFracOther in normalizedOtherMassFracs.items():\n self.setNumberDensity(\n nucNameOther,\n (\n (1.0 - totalFracSet)\n * massFracOther\n * rho\n * units.MOLES_PER_CC_TO_ATOMS_PER_BARN_CM\n / nucDir.getAtomicWeight(nucNameOther)\n ),\n )\n\n def adjustMassFrac(\n self,\n nuclideToAdjust=None,\n elementToAdjust=None,\n nuclideToHoldConstant=None,\n elementToHoldConstant=None,\n val=0.0,\n ):\n r\"\"\"\n Set the initial Zr mass fraction while maintaining Uranium enrichment, but general purpose.\n\n Parameters\n ----------\n nuclideToAdjust : str, optional\n The nuclide name to adjust\n elementToAdjust : str, optional\n The element to adjust. All isotopes in this element will adjust\n nuclideToHoldconstant : str, optional\n A nuclide to hold constant\n elementToHoldConstant : str\n Same\n val : float\n The value to set the adjust mass fraction to be.\n\n Notes\n -----\n If you use this for two elements one after the other, you will probably get\n something wrong. For instance, if you have U-10Zr and add Pu at 10% mass\n fraction, the Zr fraction will drop below 10% of the total. The U-Zr fractions\n will remain constant though. So this is mostly useful if you have U-10Zr and\n want to change it to U-5Zr.\n\n Theory:\n\n Mass fraction of each nuclide to be adjusted = Ai where A1+A2+A...+AI = A\n Mass fraction of nuclides to be held constant = Ci where sum = C\n Mass fraction of other nuclides is Oi, sum = O\n new value for A is v\n\n A+C+O = 1.0\n A'=v. If A>0, then A'=A*f1=v where f1 = v/A\n If A=0, then Ai' = v/len(A), distributing the value evenly among isotopes\n\n Now, to adjust the other nuclides, we know\n A'+C+O' = 1.0 , or v+C+O' = 1.0\n So, O'= 1.0-v-C\n We can scale each Oi evenly by multiplying by the factor f2\n Oi' = Oi * (1-C-v)/O = Oi * f2 where f2= (1-C-v)\n\n Since massFracs is not necessarily normalized, A+C+O actually =\n self.p.massFracNorm\n\n See Also\n --------\n\n setMassFrac\n getMassFrac\n \"\"\"\n self.clearCache() # don't keep densities around or anything.\n if val > 1.0 or val < 0:\n raise ValueError(\n \"Invalid mass fraction {0} for {1}/{2} in {3}\".format(\n val, nuclideToAdjust, elementToAdjust, self.getName()\n )\n )\n if not nuclideToAdjust and not elementToAdjust:\n raise TypeError(\n \"Must provide a nuclide or element to adjust to adjustMassFrac\"\n )\n\n # sum of other nuclide mass fractions before change is Y\n # need Yx+newZr = 1.0 where x is a scaling factor\n # so x=(1-newZr)/Y\n\n # determine nuclides to hold constant\n nuclides = set(self.getNuclides())\n if nuclideToHoldConstant or elementToHoldConstant:\n # note that if these arguments are false, you'll get ALL nuclides in the\n # material use material.getNuclides to get only non-zero ones. use\n # nucDir.getNuclides to get all. Intersect with current nuclides to\n # eliminate double counting of element/isotopes\n constantNuclides = set(\n nucDir.getNuclideNames(\n nucName=nuclideToHoldConstant, elementSymbol=elementToHoldConstant\n )\n ).intersection(nuclides)\n constantSum = sum(self.getMassFrac(nucName) for nucName in constantNuclides)\n else:\n constantNuclides = []\n constantSum = 0.0\n\n # determine which nuclides we're adjusting.\n # Rather than calling this material's getNuclides method, we call the\n # nucDirectory to do this. this way, even zeroed-out nuclides will get in the\n # mix\n adjustNuclides = set(\n nucDir.getNuclideNames(\n nucName=nuclideToAdjust, elementSymbol=elementToAdjust\n )\n ).intersection(nuclides)\n # get original mass frac A of those to be adjusted.\n A = sum(self.getMassFrac(ni) for ni in adjustNuclides)\n\n factor1 = val / A if A else None\n\n # set the ones we're adjusting to their given value.\n numNucs = len(adjustNuclides)\n newA = 0.0\n newMassFracs = {}\n for nuc in adjustNuclides:\n if factor1 is None:\n # this is for when adjust nuclides have zero mass fractions. Like Zr.\n # In this case, if there are multiple nuclides, we will distribute them\n # evenly because we have no other indication of how to adjust them.\n newMassFrac = val / numNucs\n else:\n # this is for when the nuclides we're adjusting already exist\n # with non-zero mass fractions could be Pu vector.\n newMassFrac = self.getMassFrac(nuc) * factor1\n newA += newMassFrac\n newMassFracs[nuc] = newMassFrac\n if nuc == \"ZR\":\n # custom parameter only set here to determine how to behave for UZr\n # density, linear expansion. Can't let it roam with each mass frac\n # 'cause then the density roams too and there are \"oscillations\"\n self.p.zrFrac = newMassFrac\n\n # error checking.\n if abs(newA - val) > 1e-10:\n runLog.error(\n \"Adjust Mass fraction did not adjust {0} from {1} to {2}. It got to {3}\".format(\n adjustNuclides, A, val, newA\n )\n )\n raise RuntimeError(\"Failed to adjust mass fraction.\")\n\n # determine the mass fraction of the nuclides that will be adjusted to\n # accomodate the requested change\n othersSum = 1.0 - A - constantSum\n if not othersSum:\n # no others to be modified.\n factor2 = 1.0\n else:\n # use newA rather than val\n factor2 = (1.0 - newA - constantSum) / othersSum\n\n # change all the other nuclides using f2 factor\n for nuc in self.getNuclides():\n if nuc not in adjustNuclides and nuc not in constantNuclides:\n newMassFracs[nuc] = self.getMassFrac(nuc) * factor2\n\n self.setMassFracs(newMassFracs)\n\n def adjustMassEnrichment(self, massFraction):\n \"\"\"\n Adjust the enrichment of this object.\n\n If it's Uranium, enrichment means U-235 fraction.\n If it's Boron, enrichment means B-10 fraction, etc.\n\n Parameters\n ----------\n newEnrich : float\n The new enrichment as a fraction.\n \"\"\"\n raise NotImplementedError\n\n def getNumberDensity(self, nucName):\n \"\"\"\n Return the number density of a nuclide in atoms/barn-cm.\n\n Notes\n -----\n This can get called very frequently and has to do volume computations so should\n use some kind of caching that is invalidated by any temperature, composition,\n etc. changes. Even with caching the volume calls are still somewhat expensive so\n prefer the methods in see also.\n\n See Also\n --------\n ArmiObject.getNuclideNumberDensities: More efficient for >1 specific nuc density is needed.\n ArmiObject.getNumberDensities: More efficient for when all nucs in object is needed.\n \"\"\"\n return self.getNuclideNumberDensities([nucName])[0]\n\n def getNuclideNumberDensities(self, nucNames):\n \"\"\"Return a list of number densities in atoms/barn-cm for the nuc names requested.\"\"\"\n volumes = numpy.array(\n [\n c.getVolume() / (c.parent.getSymmetryFactor() if c.parent else 1.0)\n for c in self.iterComponents()\n ]\n ) # c x 1\n totalVol = volumes.sum()\n if totalVol == 0.0:\n # there are no children so no volume or number density\n return [0.0] * len(nucNames)\n\n nucDensForEachComp = numpy.array(\n [\n [c.getNumberDensity(nuc) for nuc in nucNames]\n for c in self.iterComponents()\n ]\n ) # c x n\n return volumes.dot(nucDensForEachComp) / totalVol\n\n def _getNdensHelper(self):\n \"\"\"\n Return a number densities dict with unexpanded lfps.\n\n Notes\n -----\n This is implemented more simply on the component level.\n \"\"\"\n nucNames = self.getNuclides()\n return dict(zip(nucNames, self.getNuclideNumberDensities(nucNames)))\n\n def getNumberDensities(self, expandFissionProducts=False):\n \"\"\"\n Retrieve the number densities in atoms/barn-cm of all nuclides (or those requested) in the object.\n\n Parameters\n ----------\n expandFissionProducts : bool (optional)\n expand the fission product number densities\n\n nuclideNames : iterable (optional)\n nuclide names to get number densities\n\n Returns\n -------\n numberDensities : dict\n nucName keys, number density values (atoms/bn-cm)\n \"\"\"\n numberDensities = self._getNdensHelper()\n if expandFissionProducts:\n return self._expandLFPs(numberDensities)\n return numberDensities\n\n def getNeutronEnergyDepositionConstants(self):\n \"\"\"\n Get the neutron energy deposition group constants for a composite.\n\n Returns\n -------\n energyDepConstants: numpy.array\n Neutron energy generation group constants (in Joules/cm)\n\n Raises\n ------\n RuntimeError:\n Reports if a cross section library is not assigned to a reactor.\n \"\"\"\n if not self.r.core.lib:\n raise RuntimeError(\n \"Cannot get neutron energy deposition group constants without \"\n \"a library. Please ensure a library exists.\"\n )\n\n return xsCollections.computeNeutronEnergyDepositionConstants(\n self.getNumberDensities(), self.r.core.lib, self.getMicroSuffix()\n )\n\n def getGammaEnergyDepositionConstants(self):\n \"\"\"\n Get the gamma energy deposition group constants for a composite.\n\n Returns\n -------\n energyDepConstants: numpy.array\n Energy generation group constants (in Joules/cm)\n\n Raises\n ------\n RuntimeError:\n Reports if a cross section library is not assigned to a reactor.\n \"\"\"\n if not self.r.core.lib:\n raise RuntimeError(\n \"Cannot get gamma energy deposition group constants without \"\n \"a library. Please ensure a library exists.\"\n )\n\n return xsCollections.computeGammaEnergyDepositionConstants(\n self.getNumberDensities(), self.r.core.lib, self.getMicroSuffix()\n )\n\n def getTotalEnergyGenerationConstants(self):\n \"\"\"\n Get the total energy generation group constants for a composite.\n\n Gives the total energy generation rates when multiplied by the multigroup flux.\n\n Returns\n -------\n totalEnergyGenConstant: numpy.array\n Total (fission + capture) energy generation group constants (Joules/cm)\n \"\"\"\n return (\n self.getFissionEnergyGenerationConstants()\n + self.getCaptureEnergyGenerationConstants()\n )\n\n def getFissionEnergyGenerationConstants(self):\n \"\"\"\n Get the fission energy generation group constants for a composite.\n\n Gives the fission energy generation rates when multiplied by the multigroup\n flux.\n\n Returns\n -------\n fissionEnergyGenConstant: numpy.array\n Energy generation group constants (Joules/cm)\n\n Raises\n ------\n RuntimeError:\n Reports if a cross section library is not assigned to a reactor.\n \"\"\"\n if not self.r.core.lib:\n raise RuntimeError(\n \"Cannot compute energy generation group constants without a library\"\n \". Please ensure a library exists.\"\n )\n\n return xsCollections.computeFissionEnergyGenerationConstants(\n self.getNumberDensities(), self.r.core.lib, self.getMicroSuffix()\n )\n\n def getCaptureEnergyGenerationConstants(self):\n \"\"\"\n Get the capture energy generation group constants for a composite.\n\n Gives the capture energy generation rates when multiplied by the multigroup\n flux.\n\n Returns\n -------\n fissionEnergyGenConstant: numpy.array\n Energy generation group constants (Joules/cm)\n\n Raises\n ------\n RuntimeError:\n Reports if a cross section library is not assigned to a reactor.\n \"\"\"\n if not self.r.core.lib:\n raise RuntimeError(\n \"Cannot compute energy generation group constants without a library\"\n \". Please ensure a library exists.\"\n )\n\n return xsCollections.computeCaptureEnergyGenerationConstants(\n self.getNumberDensities(), self.r.core.lib, self.getMicroSuffix()\n )\n\n def _expandLFPs(self, numberDensities):\n \"\"\"\n Expand the LFPs on the numberDensities dictionary using this composite's\n lumpedFissionProductCollection.\n \"\"\"\n lfpCollection = self.getLumpedFissionProductCollection()\n if lfpCollection: # may not have lfps in non-fuel\n lfpDensities = lfpCollection.getNumberDensities(self)\n numberDensities = {\n nucName: numberDensities.get(nucName, 0.0)\n + lfpDensities.get(nucName, 0.0)\n for nucName in set(numberDensities) | set(lfpDensities)\n }\n # remove LFPs from the result\n for lfpName in lfpCollection:\n numberDensities.pop(lfpName, None)\n else:\n lfpMass = sum(\n dens\n for name, dens in numberDensities.items()\n if isinstance(nuclideBases.byName[name], nuclideBases.LumpNuclideBase)\n )\n if lfpMass:\n raise RuntimeError(\n \"Composite {} is attempting to expand lumped fission products, but does not have \"\n \"an lfpCollection.\".format(self)\n )\n return numberDensities\n\n def getChildrenWithNuclides(self, nucNames):\n \"\"\"Return children that contain any nuclides in nucNames.\"\"\"\n nucNames = set(nucNames) # only convert to set once\n return [child for child in self if nucNames.intersection(child.getNuclides())]\n\n def getAncestor(self, fn):\n \"\"\"\n Return the first ancestor that satisfies the supplied predicate.\n\n Parameters\n ----------\n fn : Function-like object\n The predicate used to test the validity of an ancestor. Should return true\n if the ancestor satisfies the caller's requirements\n \"\"\"\n if fn(self):\n return self\n if self.parent is None:\n return None\n else:\n return self.parent.getAncestor(fn)\n\n def getAncestorAndDistance(\n self, fn, _distance=0\n ) -> Optional[Tuple[\"ArmiObject\", int]]:\n \"\"\"\n Return the first ancestor that satisfies the supplied predicate, along with how\n many levels above self the ancestor lies.\n\n Parameters\n ----------\n fn : Function-like object\n The predicate used to test the validity of an ancestor. Should return true\n if the ancestor satisfies the caller's requirements\n \"\"\"\n if fn(self):\n return self, _distance\n if self.parent is None:\n return None\n else:\n return self.parent.getAncestorAndDistance(fn, _distance + 1)\n\n def getAncestorWithFlags(self, typeSpec: TypeSpec, exactMatch=False):\n \"\"\"\n Return the first ancestor that matches the passed flags.\n\n Parameters\n ----------\n typeSpec : TypeSpec\n A collection of flags to match on candidate parents\n\n exactMatch : bool\n Whether the flags match should be exact\n\n Returns\n -------\n armi.composites.ArmiObject\n the first ancestor up the chain of parents that matches the passed flags\n\n Notes\n -----\n This will throw an error if no ancestor can be found that matches the typeSpec\n\n See Also\n --------\n ArmiObject.hasFlags()\n \"\"\"\n if self.hasFlags(typeSpec, exact=exactMatch):\n return self\n\n if self.parent is None:\n return None\n else:\n return self.parent.getAncestorWithFlags(typeSpec, exactMatch=exactMatch)\n\n def getTotalNDens(self):\n \"\"\"\n Return the total number density of all atoms in this object.\n\n Returns\n -------\n nTot : float\n Total ndens of all nuclides in atoms/bn-cm. Not homogenized.\n \"\"\"\n nFPsPerLFP = (\n fissionProductModel.NUM_FISSION_PRODUCTS_PER_LFP\n ) # LFPs count as two! Big deal in non BOL cases.\n return sum(\n dens * (nFPsPerLFP if \"LFP\" in name else 1.0)\n for name, dens in self.getNumberDensities().items()\n )\n\n def setNumberDensity(self, nucName, val):\n \"\"\"\n Set the number density of this nuclide to this value.\n\n This distributes atom density evenly across all children that contain nucName.\n If the nuclide doesn't exist in any of the children, then that's actually an\n error. This would only happen if some unnatural nuclide like Pu239 built up in\n fresh UZr. That should be anticipated and dealt with elsewhere.\n\n \"\"\"\n activeChildren = self.getChildrenWithNuclides({nucName})\n if not activeChildren:\n activeVolumeFrac = 1.0\n if val:\n raise ValueError(\n \"The nuclide {} does not exist in any children of {}; \"\n \"cannot set its number density to {}. The nuclides here are: {}\".format(\n nucName, self, val, self.getNuclides()\n )\n )\n else:\n activeVolumeFrac = sum(\n vf for ci, vf in self.getVolumeFractions() if ci in activeChildren\n )\n dehomogenizedNdens = (\n val / activeVolumeFrac\n ) # scale up to dehomogenize on children.\n for child in activeChildren:\n child.setNumberDensity(nucName, dehomogenizedNdens)\n\n def setNumberDensities(self, numberDensities):\n \"\"\"\n Set one or more multiple number densities. Reset any non-listed nuclides to 0.0.\n\n Parameters\n ----------\n numberDensities : dict\n nucName: ndens pairs.\n\n Notes\n -----\n We'd like to not have to call setNumberDensity for each nuclide because we don't\n want to call ``getVolumeFractions`` for each nuclide (it's inefficient).\n\n \"\"\"\n numberDensities.update(\n {nuc: 0.0 for nuc in self.getNuclides() if nuc not in numberDensities}\n )\n self.updateNumberDensities(numberDensities)\n\n def updateNumberDensities(self, numberDensities):\n \"\"\"\n Set one or more multiple number densities. Leaves unlisted number densities alone.\n\n This changes a nuclide number density only on children that already have that\n nuclide, thereby allowing, for example, actinides to stay in the fuel component\n when setting block-level values.\n\n The complication is that various number densities are distributed among various\n components. This sets the number density for each nuclide evenly across all\n components that contain it.\n\n Parameters\n ----------\n numberDensities : dict\n nucName: ndens pairs.\n\n \"\"\"\n children, volFracs = zip(*self.getVolumeFractions())\n childNucs = tuple(set(child.getNuclides()) for child in children)\n\n allDehomogenizedNDens = collections.defaultdict(dict)\n\n # compute potentially-different homogenization factors for each child. evenly\n # distribute entire number density over the subset of active children.\n for nuc, dens in numberDensities.items():\n # get \"active\" indices, i.e., indices of children containing nuc\n # NOTE: this is one of the rare instances in which (imo), using explicit\n # indexing clarifies subsequent code since it's not necessary to zip +\n # filter + extract individual components (just extract by filtered index).\n indiciesToSet = tuple(\n i for i, nucsInChild in enumerate(childNucs) if nuc in nucsInChild\n )\n\n if not indiciesToSet:\n if dens == 0:\n # density is zero, skip\n continue\n\n # This nuc doesn't exist in any children but is to be set.\n # Evenly distribute it everywhere.\n childrenToSet = children\n dehomogenizedNDens = dens / sum(volFracs)\n\n else:\n childrenToSet = tuple(children[i] for i in indiciesToSet)\n dehomogenizedNDens = dens / sum(volFracs[i] for i in indiciesToSet)\n\n for child in childrenToSet:\n allDehomogenizedNDens[child][nuc] = dehomogenizedNDens\n\n # apply the child-dependent ndens vectors to the children\n for child, ndens in allDehomogenizedNDens.items():\n child.updateNumberDensities(ndens)\n\n def changeNDensByFactor(self, factor):\n \"\"\"Change the number density of all nuclides within the object by a multiplicative factor.\"\"\"\n densitiesScaled = {\n nuc: val * factor for nuc, val in self.getNumberDensities().items()\n }\n self.setNumberDensities(densitiesScaled)\n\n def clearNumberDensities(self):\n \"\"\"\n Reset all the number densities to nearly zero.\n\n Set to almost zero, so components remember which nuclides are where.\n \"\"\"\n ndens = {nuc: units.TRACE_NUMBER_DENSITY for nuc in self.getNuclides()}\n self.setNumberDensities(ndens)\n\n def density(self):\n \"\"\"Returns the mass density of the object in g/cc.\"\"\"\n density = 0.0\n for nuc in self.getNuclides():\n density += (\n self.getNumberDensity(nuc)\n * nucDir.getAtomicWeight(nuc)\n / units.MOLES_PER_CC_TO_ATOMS_PER_BARN_CM\n )\n\n return density\n\n def getNumberOfAtoms(self, nucName):\n \"\"\"Return the number of atoms of nucName in this object.\"\"\"\n numDens = self.getNumberDensity(nucName) # atoms/bn-cm\n return numDens * self.getVolume() / units.CM2_PER_BARN\n\n def getLumpedFissionProductCollection(self):\n \"\"\"\n Get collection of LFP objects. Will work for global or block-level LFP models.\n\n Returns\n -------\n lfps : LumpedFissionProduct\n lfpName keys , lfp object values\n\n See Also\n --------\n armi.physics.neutronics.fissionProductModel.lumpedFissionProduct.LumpedFissionProduct : LFP object\n \"\"\"\n return self._lumpedFissionProducts\n\n def setLumpedFissionProducts(self, lfpCollection):\n self._lumpedFissionProducts = lfpCollection\n\n def setChildrenLumpedFissionProducts(self, lfpCollection):\n for c in self.getChildren():\n c.setLumpedFissionProducts(lfpCollection)\n\n def getFissileMassEnrich(self):\n r\"\"\"returns the fissile mass enrichment. \"\"\"\n hm = self.getHMMass()\n if hm > 0:\n return self.getFissileMass() / hm\n else:\n return 0.0\n\n def getBoronMassEnrich(self):\n \"\"\"Return B-10 mass fraction.\"\"\"\n b10 = self.getMass(\"B10\")\n b11 = self.getMass(\"B11\")\n total = b11 + b10\n if total == 0.0:\n return 0.0\n return b10 / total\n\n def getUraniumMassEnrich(self):\n \"\"\"returns U-235 mass fraction assuming U-235 and U-238 only.\"\"\"\n u5 = self.getMass(\"U235\")\n if u5 < 1e-10:\n return 0.0\n u8 = self.getMass(\"U238\")\n return u5 / (u8 + u5)\n\n def getUraniumNumEnrich(self):\n \"\"\"Returns U-235 number fraction.\"\"\"\n u8 = self.getNumberDensity(\"U238\")\n if u8 < 1e-10:\n return 0.0\n u5 = self.getNumberDensity(\"U235\")\n return u5 / (u8 + u5)\n\n def getPuN(self):\n \"\"\"Returns total number density of Pu isotopes\"\"\"\n nucNames = [nuc.name for nuc in elements.byZ[94].nuclideBases]\n return sum(self.getNuclideNumberDensities(nucNames))\n\n def calcTotalParam(\n self,\n param,\n objs=None,\n volumeIntegrated=False,\n addSymmetricPositions=False,\n typeSpec: TypeSpec = None,\n generationNum=1,\n calcBasedOnFullObj=False,\n ):\n \"\"\"\n Sums up a parameter throughout the object's children or list of objects.\n\n Parameters\n ----------\n param : str\n Name of the block parameter to sum\n\n objs : iterable, optional\n A list of objects to sum over. If none, all children in object will be used\n\n volumeIntegrated : bool, optional\n Integrate over volume\n\n addSymmetricPositions : bool, optional\n If True, will multiply by the symmetry factor of the core (3 for 1/3 models,\n 1 for full core models)\n\n typeSpec : TypeSpec\n object types to restrict to\n\n generationNum : int, optional\n Which generation to consider. 1 means direct children, 2 means children of\n children. Default: Just return direct children.\n\n calcBasedOnFullObj : bool, optional\n Some assemblies or blocks, such as the center assembly in a third core\n model, are not modeled as full assemblies or blocks. In the third core model\n objects at these postions are modeled as having 1/3 the volume and thus 1/3\n the power. Setting this argument to True will apply the full value of the\n parameter as if it was a full block or assembly.\n \"\"\"\n tot = 0.0\n if objs is None:\n objs = self.getChildren(generationNum=generationNum)\n\n if addSymmetricPositions:\n if calcBasedOnFullObj:\n raise ValueError(\n \"AddSymmetricPositions is Incompatable with \"\n \"calcBasedOnFullObj. Will result in double counting.\"\n )\n try:\n coreMult = self.powerMultiplier\n except AttributeError:\n coreMult = self.parent.powerMultiplier\n if not coreMult:\n raise ValueError(\"powerMultiplier is equal to {}\".format(coreMult))\n else:\n coreMult = 1.0\n\n for a in objs:\n if not a.hasFlags(typeSpec):\n continue\n\n mult = a.getVolume() if volumeIntegrated else 1.0\n if calcBasedOnFullObj:\n mult *= a.getSymmetryFactor()\n\n tot += a.p[param] * mult\n\n return tot * coreMult\n\n def calcAvgParam(\n self,\n param,\n typeSpec: TypeSpec = None,\n weightingParam=None,\n volumeAveraged=True, # pylint: disable=too-many-arguments\n absolute=True,\n generationNum=1,\n ):\n r\"\"\"\n Calculate the child-wide average of a parameter.\n\n Parameters\n ----------\n param : str\n The ARMI block parameter that you want the average from\n\n typeSpec : TypeSpec\n The child types that should be included in the calculation. Restrict average\n to a certain child type with this parameter.\n\n weightingParam : None or str, optional\n An optional block param that the average will be weighted against\n\n volumeAveraged : bool, optional\n volume (or height, or area) average this param\n\n absolute : bool, optional\n Returns the average of the absolute value of param\n\n generationNum : int, optional\n Which generation to average over (1 for children, 2 for grandchildren)\n\n The weighted sum is:\n\n .. math::\n\n \\left<\\text{x}\\right> = \\frac{\\sum_{i} x_i w_i}{\\sum_i w_i}\n\n where :math:`i` is each child, :math:`x_i` is the param value of the i-th child,\n and :math:`w_i` is the weighting param value of the i-th child.\n\n Warning\n -------\n If a param is unset/zero on any of the children, this will be included in the\n average and may significantly perturb results.\n\n Returns\n -------\n float\n The average parameter value.\n \"\"\"\n total = 0.0\n weightSum = 0.0\n for child in self.getChildren(generationNum=generationNum):\n if child.hasFlags(typeSpec):\n if weightingParam:\n weight = child.p[weightingParam]\n if weight < 0:\n # Just for conservatism, do not allow negative weights.\n raise ValueError(\n \"Weighting value ({0},{1}) cannot be negative.\".format(\n weightingParam, weight\n )\n )\n else:\n weight = 1.0\n\n if volumeAveraged:\n weight *= child.getVolume()\n\n weightSum += weight\n if absolute:\n total += abs(child.p[param]) * weight\n else:\n total += child.p[param] * weight\n if not weightSum:\n raise ValueError(\n \"Cannot calculate {0}-weighted average of {1} in {2}. \"\n \"Weights sum to zero. typeSpec is {3}\"\n \"\".format(weightingParam, param, self, typeSpec)\n )\n return total / weightSum\n\n def getMaxParam(\n self,\n param,\n typeSpec: TypeSpec = None,\n absolute=True,\n generationNum=1,\n returnObj=False,\n ):\n \"\"\"\n Find the maximum value for the parameter in this container\n\n Parameters\n ----------\n param : str\n block parameter that will be sought.\n\n typeSpec : TypeSpec\n restricts the search to cover a variety of block types.\n\n absolute : bool\n looks for the largest magnitude value, regardless of sign, default: true\n\n returnObj : bool, optional\n If true, returns the child object as well as the value.\n\n Returns\n -------\n maxVal : float\n The maximum value of the parameter asked for\n obj : child object\n The object that has the max (only returned if ``returnObj==True``)\n \"\"\"\n compartor = lambda x, y: x > y\n return self._minMaxHelper(\n param,\n typeSpec,\n absolute,\n generationNum,\n returnObj,\n -float(\"inf\"),\n compartor,\n )\n\n def getMinParam(\n self,\n param,\n typeSpec: TypeSpec = None,\n absolute=True,\n generationNum=1,\n returnObj=False,\n ):\n \"\"\"\n Find the minimum value for the parameter in this container.\n\n See Also\n --------\n getMaxParam : details\n \"\"\"\n compartor = lambda x, y: x < y\n return self._minMaxHelper(\n param, typeSpec, absolute, generationNum, returnObj, float(\"inf\"), compartor\n )\n\n def _minMaxHelper(\n self,\n param,\n typeSpec: TypeSpec,\n absolute,\n generationNum,\n returnObj,\n startingNum,\n compartor,\n ):\n \"\"\"Helper for getMinParam and getMaxParam.\"\"\"\n maxP = (startingNum, None)\n realVal = 0.0\n objs = self.getChildren(generationNum=generationNum)\n for b in objs:\n if b.hasFlags(typeSpec):\n try:\n val = b.p[param]\n except parameters.UnknownParameterError:\n # No worries; not all Composite types are guaranteed to have the\n # relevant parameter. It might be a good idea to more strongly\n # type-check this, perhaps by passing the paramDef,\n # rather than its name?\n continue\n if val is None:\n # Neither bigger or smaller than anything (also illegal in Python3)\n continue\n if absolute:\n absVal = abs(val)\n else:\n absVal = val\n if compartor(absVal, maxP[0]):\n maxP = (absVal, b)\n realVal = val\n if returnObj:\n return realVal, maxP[1]\n else:\n return realVal\n\n def getChildParamValues(self, param):\n \"\"\"Get the child parameter values in a numpy array.\"\"\"\n return numpy.array([child.p[param] for child in self])\n\n def isFuel(self):\n \"\"\"True if this is a fuel block. \"\"\"\n return self.hasFlags(Flags.FUEL)\n\n def containsHeavyMetal(self):\n \"\"\"True if this has HM\"\"\"\n for nucName in self.getNuclides():\n # these already have non-zero density\n if nucDir.isHeavyMetal(nucName):\n return True\n return False\n\n def getNuclides(self):\n \"\"\"\n Determine which nuclides are present in this armi object.\n\n Returns\n -------\n list\n List of nuclide names that exist in this\n \"\"\"\n nucs = set()\n for child in self.getChildren():\n nucs.update(child.getNuclides())\n return nucs\n\n def getFissileMass(self):\n \"\"\"Returns fissile mass in grams.\"\"\"\n return self.getMass(nuclideBases.NuclideBase.fissile)\n\n def getHMMass(self):\n \"\"\"Returns heavy metal mass in grams\"\"\"\n nucs = []\n for nucName in self.getNuclides():\n if nucDir.isHeavyMetal(nucName):\n nucs.append(nucName)\n mass = self.getMass(nucs)\n return mass\n\n def getHMMoles(self):\n \"\"\"\n Get the number of moles of heavy metal in this object in full symmetry.\n\n Notes\n -----\n If an object is on a symmetry line, the number of moles will be scaled up by the\n symmetry factor. This is done because this is typically used for tracking\n burnup, and BOL moles are computed in full objects too so there are no\n complications as things move on and off of symmetry lines.\n\n Warning\n -------\n getHMMoles is different than every other get mass call since it multiplies by\n symmetry factor but getVolume() on the block level divides by symmetry factor\n causing them to cancel out.\n\n This was needed so that HM moles mass did not change based on if the\n block/assembly was on a symmetry line or not.\n \"\"\"\n\n return (\n self.getHMDens()\n / units.MOLES_PER_CC_TO_ATOMS_PER_BARN_CM\n * self.getVolume()\n * self.getSymmetryFactor()\n )\n\n def getHMDens(self):\n \"\"\"\n Compute the total heavy metal density of this object.\n\n Returns\n -------\n hmDens : float\n The total heavy metal number (atom) density in atoms/bn-cm.\n \"\"\"\n hmNuclides = [\n nuclide for nuclide in self.getNuclides() if nucDir.isHeavyMetal(nuclide)\n ]\n hmDens = sum(self.getNuclideNumberDensities(hmNuclides))\n return hmDens\n\n def getPuMass(self):\n \"\"\"Get the mass of Pu in this object in grams.\"\"\"\n nucs = []\n for nucName in [nuc.name for nuc in elements.byZ[94].nuclideBases]:\n nucs.append(nucName)\n pu = self.getMass(nucs)\n return pu\n\n def getPuFrac(self):\n \"\"\"\n Compute the Pu/HM mass fraction in this object.\n\n Returns\n -------\n puFrac : float\n The pu mass fraction in heavy metal in this assembly\n \"\"\"\n hm = self.getHMMass()\n pu = self.getPuMass()\n if hm == 0.0:\n return 0.0\n else:\n return pu / hm\n\n def getZrFrac(self):\n \"\"\"return the total zr/(hm+zr) fraction in this assembly\"\"\"\n hm = self.getHMMass()\n zrNucs = [nuc.name for nuc in elements.bySymbol[\"ZR\"].nuclideBases]\n zr = self.getMass(zrNucs)\n if hm + zr > 0:\n return zr / (hm + zr)\n else:\n return 0.0\n\n def getMaxUraniumMassEnrich(self):\n maxV = 0\n for child in self:\n v = child.getUraniumMassEnrich()\n if v > maxV:\n maxV = v\n return maxV\n\n def getFPMass(self):\n \"\"\"Returns mass of fission products in this block in grams\"\"\"\n nucs = []\n for nucName in self.getNuclides():\n if \"LFP\" in nucName:\n nucs.append(nucName)\n mass = self.getMass(nucs)\n return mass\n\n def getFuelMass(self):\n \"\"\"returns mass of fuel in grams. \"\"\"\n return sum([fuel.getMass() for fuel in self.iterComponents(Flags.FUEL)], 0.0)\n\n def constituentReport(self):\n \"\"\"A print out of some pertinent constituent information\"\"\"\n from armi.utils import iterables\n\n rows = [[\"Constituent\", \"HMFrac\", \"FuelFrac\"]]\n columns = [-1, self.getHMMass(), self.getFuelMass()]\n\n for base_ele in [\"U\", \"PU\"]:\n total = sum(\n [self.getMass(nuclide.name) for nuclide in elements.bySymbol[base_ele]]\n )\n rows.append([base_ele, total, total])\n\n fp_total = self.getFPMass()\n rows.append([\"FP\", fp_total, fp_total])\n\n ma_nuclides = iterables.flatten(\n [\n ele.nuclideBases\n for ele in [\n elements.byZ[key] for key in elements.byZ.keys() if key > 94\n ]\n ]\n )\n ma_total = sum([self.getMass(nuclide.name) for nuclide in ma_nuclides])\n rows.append([\"MA\", ma_total, ma_total])\n\n for i, row in enumerate(rows):\n for j, entry in enumerate(row):\n try:\n percent = entry / columns[j] * 100.0\n rows[i][j] = percent or \"-\"\n except ZeroDivisionError:\n rows[i][j] = \"NaN\"\n except TypeError:\n pass # trying to divide the string name\n\n return \"\\n\".join([\"{:<14}{:<10}{:<10}\".format(*row) for row in rows])\n\n def getAtomicWeight(self):\n r\"\"\"\n Calculate the atomic weight of this object in g/mole of atoms.\n\n .. warning:: This is not the molecular weight, which is grams per mole of\n molecules (grams/gram-molecule). That requires knowledge of the chemical\n formula. Don't be surprised when you run this on UO2 and find it to be 90;\n there are a lot of Oxygen atoms in UO2.\n\n .. math::\n\n A = \\frac{\\sum_i N_i A_i }{\\sum_i N_i}\n\n \"\"\"\n numerator = 0.0\n denominator = 0.0\n\n numDensities = self.getNumberDensities()\n\n for nucName, nDen in numDensities.items():\n atomicWeight = nuclideBases.byName[nucName].weight\n numerator += atomicWeight * nDen\n denominator += nDen\n return numerator / denominator\n\n def getMasses(self):\n \"\"\"\n Return a dictionary of masses indexed by their nuclide names.\n\n Notes\n -----\n Implemented to get number densities and then convert to mass\n because getMass is too slow on a large tree.\n \"\"\"\n numDensities = self.getNumberDensities()\n vol = self.getVolume()\n return {\n nucName: densityTools.getMassInGrams(nucName, vol, ndens)\n for nucName, ndens in numDensities.items()\n }\n\n def getIntegratedMgFlux(self, adjoint=False, gamma=False):\n raise NotImplementedError\n\n def getMgFlux(self, adjoint=False, average=False, volume=None, gamma=False):\n \"\"\"\n Return the multigroup neutron flux in [n/cm^2/s]\n\n The first entry is the first energy group (fastest neutrons). Each additional\n group is the next energy group, as set in the ISOTXS library.\n\n On blocks, it is stored integrated over volume on <block>.p.mgFlux\n\n Parameters\n ----------\n adjoint : bool, optional\n Return adjoint flux instead of real\n\n average : bool, optional\n If true, will return average flux between latest and previous. Doesn't work\n for pin detailed yet\n\n volume: float, optional\n If average=True, the volume-integrated flux is divided by volume before\n being returned. The user may specify a volume here, or the function will\n obtain the block volume directly.\n\n gamma : bool, optional\n Whether to return the neutron flux or the gamma flux.\n\n Returns\n -------\n flux : numpy.array\n multigroup neutron flux in [n/cm^2/s]\n \"\"\"\n if average:\n raise NotImplementedError(\n \"{} class has no method for producing average MG flux -- try\"\n \"using blocks\".format(self.__class__)\n )\n\n volume = volume or self.getVolume()\n return self.getIntegratedMgFlux(adjoint=adjoint, gamma=gamma) / volume\n\n def removeMass(self, nucName, mass):\n self.addMass(nucName, -mass)\n\n def addMass(self, nucName, mass):\n \"\"\"\n Parameters\n ----------\n nucName : str\n nuclide name e.g. 'U235'\n\n mass : float\n mass in grams of nuclide to be added to this armi Object\n \"\"\"\n volume = self.getVolume()\n addedNumberDensity = densityTools.calculateNumberDensity(nucName, mass, volume)\n self.setNumberDensity(\n nucName, self.getNumberDensity(nucName) + addedNumberDensity\n )\n\n def addMasses(self, masses):\n \"\"\"\n Adds a vector of masses.\n\n Parameters\n ----------\n masses : dict\n a dictionary of masses (g) indexed by nucNames (string)\n \"\"\"\n for nucName, mass in masses.items():\n if mass:\n self.addMass(nucName, mass)\n\n def setMass(self, nucName, mass):\n \"\"\"\n Set the mass in an object by adjusting the ndens of the nuclides.\n\n Parameters\n ----------\n nucName : str\n Nuclide name to set mass of\n mass : float\n Mass in grams to set.\n\n \"\"\"\n d = calculateNumberDensity(nucName, mass, self.getVolume())\n self.setNumberDensity(nucName, d)\n\n def setMasses(self, masses):\n \"\"\"\n Set a vector of masses.\n\n Parameters\n ----------\n masses : dict\n a dictionary of masses (g) indexed by nucNames (string)\n \"\"\"\n self.clearNumberDensities()\n for nucName, mass in masses.items():\n self.setMass(nucName, mass)\n\n def getSymmetryFactor(self):\n \"\"\"\n Return a scaling factor due to symmetry on the area of the object or its children.\n\n See Also\n --------\n armi.reactor.blocks.HexBlock.getSymmetryFactor : concrete implementation\n \"\"\"\n return 1.0\n\n def getBoundingIndices(self):\n \"\"\"\n Find the 3-D index bounds (min, max) of all children in the spatial grid of this object.\n\n Returns\n -------\n bounds : tuple\n ((minI, maxI), (minJ, maxJ), (minK, maxK))\n \"\"\"\n minI = minJ = minK = float(\"inf\")\n maxI = maxJ = maxK = -float(\"inf\")\n for obj in self:\n i, j, k = obj.spatialLocator.getCompleteIndices()\n if i >= maxI:\n maxI = i\n if i <= minI:\n minI = i\n\n if j >= maxJ:\n maxJ = j\n if j <= minJ:\n minJ = j\n\n if k >= maxK:\n maxK = k\n if k <= minK:\n minK = k\n\n return ((minI, maxI), (minJ, maxJ), (minK, maxK))\n\n def getComponentNames(self):\n r\"\"\"\n Get all unique component names of this Composite.\n\n Returns\n -------\n set or str\n A set of all unique component names found in this Composite.\n \"\"\"\n return set(c.getName() for c in self.iterComponents())\n\n def getComponentsOfShape(self, shapeClass):\n \"\"\"\n Return list of components in this block of a particular shape.\n\n Parameters\n ----------\n shapeClass : Component\n The class of component, e.g. Circle, Helix, Hexagon, etc.\n\n Returns\n -------\n param : list\n List of components in this block that are of the given shape.\n \"\"\"\n return [c for c in self.iterComponents() if isinstance(c, shapeClass)]\n\n def getComponentsOfMaterial(self, material=None, materialName=None):\n \"\"\"\n Return list of components in this block that are made of a particular material\n\n Only one of the selectors may be used\n\n Parameters\n ----------\n material : Material object, optional\n The material to match\n materialName : str, optional\n The material name to match.\n\n Returns\n -------\n componentsWithThisMat : list\n\n \"\"\"\n\n if materialName is None:\n materialName = material.getName()\n else:\n assert (\n material is None\n ), \"Cannot call with more than one selector. Choose one or the other.\"\n\n componentsWithThisMat = []\n for c in self.iterComponents():\n if c.getProperties().getName() == materialName:\n componentsWithThisMat.append(c)\n return componentsWithThisMat\n\n def hasComponents(self, typeSpec: Union[TypeSpec, List[TypeSpec]], exact=False):\n \"\"\"\n Return true if components matching all TypeSpec exist in this object.\n\n Parameters\n ----------\n typeSpec : Flags or iterable of Flags\n Component flags to check for\n \"\"\"\n # Wrap the typeSpec in a tuple if we got a scalar\n try:\n typeSpec = iter(typeSpec)\n except TypeError:\n typeSpec = (typeSpec,)\n\n for t in typeSpec:\n # loop b/c getComponents is a OR operation on the flags, but we need AND\n if not self.getComponents(t, exact):\n return False\n return True\n\n def getComponentByName(self, name):\n \"\"\"\n Gets a particular component from this object, based on its name\n\n Parameters\n ----------\n name : str\n The blueprint name of the component to return\n \"\"\"\n components = [c for c in self.iterComponents() if c.name == name]\n nComp = len(components)\n if nComp == 0:\n return None\n elif nComp > 1:\n raise ValueError(\n \"More than one component named '{}' in {}\".format(self, name)\n )\n else:\n return components[0]\n\n def getComponent(\n self, typeSpec: TypeSpec, exact=False, returnNull=False, quiet=False\n ):\n \"\"\"\n Get a particular component from this object.\n\n Parameters\n ----------\n typeSpec : flags.Flags or list of Flags\n The type specification of the component to return\n\n exact : boolean, optional\n Demand that the component flags be exactly equal to the typespec. Default: False\n\n quiet : boolean, optional\n Warn if the component is not found. Default: False\n\n Careful with multiple similar names in one object\n\n Returns\n -------\n Component : The component that matches the critera or None\n\n \"\"\"\n results = self.getComponents(typeSpec, exact=exact)\n if len(results) == 1:\n return results[0]\n elif not results:\n if not quiet:\n runLog.warning(\n \"No component matched {0} in {1}. Returning None\".format(\n typeSpec, self\n ),\n single=True,\n label=\"None component returned instead of {0}\".format(typeSpec),\n )\n return None\n else:\n raise ValueError(\n \"Multiple components match in {} match typeSpec {}: {}\".format(\n self, typeSpec, results\n )\n )\n\n def getNumComponents(self, typeSpec: TypeSpec, exact=False):\n \"\"\"\n Get the number of components that have these flags, taking into account multiplicity. Useful\n for getting nPins even when there are pin detailed cases.\n\n Parameters\n ----------\n typeSpec : Flags\n Expected flags of the component to get. e.g. Flags.FUEL\n\n Returns\n -------\n total : int\n the number of components of this type in this object, including multiplicity.\n \"\"\"\n total = 0\n for c in self.iterComponents(typeSpec, exact):\n total += int(c.getDimension(\"mult\"))\n return total\n\n def setComponentDimensionsReport(self):\n \"\"\"Makes a summary of the dimensions of the components in this object.\"\"\"\n reportGroups = []\n for c in self.iterComponents():\n reportGroups.append(c.setDimensionReport())\n\n return reportGroups\n\n def printDensities(self, expandFissionProducts=False):\n \"\"\"Get lines that have the number densities of a object.\"\"\"\n numberDensities = self.getNumberDensities(\n expandFissionProducts=expandFissionProducts\n )\n lines = []\n for nucName, nucDens in numberDensities.items():\n lines.append(\"{0:6s} {1:.7E}\".format(nucName, nucDens))\n return lines\n\n def expandAllElementalsToIsotopics(self):\n reactorNucs = self.getNuclides()\n for elemental in nuclideBases.where(\n lambda nb: isinstance(nb, nuclideBases.NaturalNuclideBase)\n and nb.name in reactorNucs\n ):\n self.expandElementalToIsotopics(elemental)\n\n def expandElementalToIsotopics(self, elementalNuclide):\n \"\"\"\n Expands the density of a specific elemental nuclides to its natural isotopics.\n\n Parameters\n ----------\n elementalNuclide : :class:`armi.nucDirectory.nuclideBases.NaturalNuclide`\n natural nuclide to replace.\n \"\"\"\n natName = elementalNuclide.name\n for component in self.iterComponents():\n elementalDensity = component.getNumberDensity(natName)\n if elementalDensity == 0.0:\n continue\n component.setNumberDensity(natName, 0.0) # clear the elemental\n del component.p.numberDensities[natName]\n # add in isotopics\n for natNuc in elementalNuclide.getNaturalIsotopics():\n component.setNumberDensity(\n natNuc.name, elementalDensity * natNuc.abundance\n )\n\n def getAverageTempInC(self, typeSpec: TypeSpec = None, exact=False):\n \"\"\"\n Return the average temperature of the ArmiObject in C by averaging all components\n \"\"\"\n tempNumerator = 0.0\n totalVol = 0.0\n for component in self.iterComponents(typeSpec, exact):\n vol = component.getVolume()\n tempNumerator += component.temperatureInC * vol\n totalVol += vol\n\n return tempNumerator / totalVol\n\n def getDominantMaterial(self, typeSpec: TypeSpec = None, exact=False):\n \"\"\"\n Return the first sample of the most dominant material (by volume) in this object.\n\n Parameters\n ----------\n typeSpec : Flags or iterable of Flags, optional\n The types of components to consider (e.g. ``[Flags.FUEL, Flags.CONTROL]``)\n exact : bool, optional\n Whether or not the TypeSpec is exact\n\n Returns\n -------\n mat : Material\n the first instance of the most dominant material (by volume) in this object.\n\n See Also\n --------\n getComponentsOfMaterial\n Gets components that are made of a particular material\n gatherMaterialsByVolume\n Classifies all materials by volume\n\n \"\"\"\n return getDominantMaterial([self], typeSpec, exact)\n\n\nclass Composite(ArmiObject):\n \"\"\"\n An ArmiObject that has children.\n\n This is a fundamental ARMI state object that generally represents some piece of the\n nuclear reactor that is made up of other smaller pieces. This object can cache\n information about its children to help performance.\n\n **Details about spatial representation**\n\n Spatial representation of a ``Composite`` is handled through a combination of the\n ``spatialLocator`` and ``spatialGrid`` parameters. The ``spatialLocator`` is a numpy\n triple representing either:\n\n 1. Indices in the parent's ``spatialGrid`` (for lattices, etc.), used when the dtype\n is int.\n\n 2. Coordinates in the parent's universe in cm, used when the dtype is float.\n\n The top parent of any composite must have a coordinate-based ``spatialLocator``. For\n example, a Reactor an a Pump should both have coordinates based on how far apart\n they are.\n\n The traversal of indices and grids is recursive. The Reactor/Core/Assembly/Block\n model is handled by putting a 2-D grid (either Theta-R, Hex, or Cartesian) on the\n Core and individual 1-D Z-meshes on the assemblies. Then, Assemblies have 2-D\n spatialLocators (i,j,0) and Blocks have 1-D spatiaLocators (0,0,k). These get added\n to form the global indices. This way, if an assembly is moved, all the blocks\n immediately and naturally move with it. Individual children may have\n coordinate-based spatialLocators mixed with siblings in a grid. This allows mixing\n grid-representation with explicit representation, often useful in advanced\n assemblies and thermal reactors.\n\n The traversal of indices and grids is recursive. The\n Reactor/Core/Assembly/Block model is handled by putting a 2-D grid (either\n Theta-R, Hex, or Cartesian) on the Core and individual 1-D Z-meshes on the\n assemblies. Then, Assemblies have 2-D spatialLocators (i,j,0) and Blocks\n have 1-D spatiaLocators (0,0,k). These get added to form the global indices.\n This way, if an assembly is moved, all the blocks immediately and naturally\n move with it. Individual children may have coordinate-based spatialLocators\n mixed with siblings in a grid. This allows mixing grid-representation with\n explicit representation, often useful in advanced assemblies and thermal\n reactors.\n\n \"\"\"\n\n def __init__(self, name):\n ArmiObject.__init__(self, name)\n self.childrenByLocator = {}\n self._children = []\n\n def __getitem__(self, index):\n return self._children[index]\n\n def __setitem__(self, index, obj):\n raise NotImplementedError(\"Unsafe to insert elements directly\")\n\n def __iter__(self):\n return iter(self._children)\n\n def __len__(self):\n return len(self._children)\n\n def __contains__(self, item):\n \"\"\"\n Membership check.\n\n This does not use quality checks for membership checking because equality\n operations can be fairly heavy. Rather, this only checks direct identity\n matches.\n\n \"\"\"\n return id(item) in set(id(c) for c in self._children)\n\n def index(self, obj):\n \"\"\"Obtain the list index of a particular child.\"\"\"\n return self._children.index(obj)\n\n def append(self, obj):\n \"\"\"Append a child to this object.\"\"\"\n self._children.append(obj)\n\n def extend(self, seq):\n \"\"\"Add a list of children to this object.\"\"\"\n self._children.extend(seq)\n\n def add(self, obj):\n \"\"\"Add one new child.\"\"\"\n if obj in self:\n raise RuntimeError(\n \"Cannot add {0} because it has already been added to {1}.\".format(\n obj, self\n )\n )\n obj.parent = self\n self._children.append(obj)\n\n def remove(self, obj):\n \"\"\"Remove a particular child.\"\"\"\n obj.parent = None\n obj.spatialLocator = obj.spatialLocator.detachedCopy()\n self._children.remove(obj)\n\n def moveTo(self, locator):\n \"\"\"Move to specific location in parent. Often in a grid.\"\"\"\n if locator.grid.armiObject is not self.parent:\n raise ValueError(\n \"Cannot move {} to a location in {}, which is not its parent ({}).\"\n \"\".format(self, locator.grid.armiObject, self.parent)\n )\n self.spatialLocator = locator\n\n def insert(self, index, obj):\n \"\"\"Insert an object into the list of children at a particular index.\"\"\"\n if obj in self._children:\n raise RuntimeError(\n \"Cannot insert {0} because it has already been added to {1}.\".format(\n obj, self\n )\n )\n obj.parent = self\n self._children.insert(index, obj)\n\n def removeAll(self):\n \"\"\"Remove all children.\"\"\"\n for c in self.getChildren()[:]:\n self.remove(c)\n\n def setChildren(self, items):\n \"\"\"Clear this container and fills it with new children.\"\"\"\n self.removeAll()\n for c in items:\n self.add(c)\n\n def getChildren(\n self, deep=False, generationNum=1, includeMaterials=False, predicate=None\n ):\n \"\"\"\n Return the children objects of this composite.\n\n Parameters\n ----------\n deep : boolean, optional\n Return all children of all levels.\n\n generationNum : int, optional\n Which generation to return. 1 means direct children, 2 means children of\n children. Setting this parameter will only return children of this\n generation, not their parents. Default: Just return direct children.\n\n includeMaterials : bool, optional\n Include the material properties\n\n predicate : callable, optional\n An optional unary predicate to use for filtering results. This can be used\n to request children of specific types, or with desired attributes. Not all\n ArmiObjects have the same methods and members, so care should be taken to\n make sure that the predicate executes gracefully in all cases (e.g., use\n ``getattr(obj, \"attribute\", None)`` to access instance attributes). Failure\n to meet the predicate only affects the object in question; children will\n still be considered.\n\n Examples\n --------\n >>> obj.getChildren()\n [child1, child2, child3]\n\n >>> obj.getChildren(generationNum=2)\n [grandchild1, grandchild2, grandchild3]\n\n >>> obj.getChildren(deep=True)\n [child1, child2, child3, grandchild1, grandchild2, grandchild3]\n\n # Assuming that grandchild1 and grandchild3 are Component objects\n >>> obj.getChildren(deep=True, predicate=lambda o: isinstance(o, Component))\n [grandchild1, grandchild3]\n\n \"\"\"\n _pred = predicate or (lambda x: True)\n if deep and generationNum > 1:\n raise RuntimeError(\n \"Cannot get children with a generation number set and the deep flag set\"\n )\n\n children = []\n for child in self._children:\n if generationNum == 1 or deep:\n if _pred(child):\n children.append(child)\n\n if generationNum > 1 or deep:\n children.extend(\n child.getChildren(\n deep=deep,\n generationNum=generationNum - 1,\n includeMaterials=includeMaterials,\n predicate=predicate,\n )\n )\n if includeMaterials:\n material = getattr(self, \"material\", None)\n if material:\n children.append(material)\n\n return children\n\n def getChildrenWithFlags(self, typeSpec: TypeSpec, exactMatch=False):\n \"\"\"Get all children of a specific type.\"\"\"\n children = []\n for child in self:\n if child.hasFlags(typeSpec, exact=exactMatch):\n children.append(child)\n return children\n\n def getChildrenOfType(self, typeName):\n \"\"\"Get children that have a specific input type name.\"\"\"\n children = []\n for child in self:\n if child.getType() == typeName:\n children.append(child)\n return children\n\n def getComponents(self, typeSpec: TypeSpec = None, exact=False):\n return list(self.iterComponents(typeSpec, exact))\n\n def iterComponents(self, typeSpec: TypeSpec = None, exact=False):\n \"\"\"\n Return an iterator of armi.reactor.component.Component objects within this Composite.\n\n Parameters\n ----------\n typeSpec : TypeSpec\n Component flags. Will restrict Components to specific ones matching the\n flags specified.\n\n exact : bool, optional\n Only match exact component labels (names). If True, 'coolant' will not match\n 'interCoolant'. This has no impact if typeSpec is None.\n\n Returns\n -------\n iterator of Component\n items matching typeSpec and exact criteria\n \"\"\"\n return (c for child in self for c in child.iterComponents(typeSpec, exact))\n\n def syncMpiState(self):\n \"\"\"\n Synchronize all parameters of this object and all children to all worker nodes\n over the network using MPI.\n\n In parallelized runs, if each process has its own copy of the entire reactor\n hierarchy, this method synchronizes the state of all parameters on all objects.\n\n Returns\n -------\n int\n number of parameters synchronized over all components\n \"\"\"\n if armi.MPI_SIZE == 1:\n return\n\n startTime = timeit.default_timer()\n # sync parameters...\n allComps = [self] + self.getChildren(deep=True, includeMaterials=True)\n sendBuf = [c.p.getSyncData() for c in allComps]\n runLog.debug(\"syncMpiState has {} comps\".format(len(allComps)))\n\n try:\n armi.MPI_COMM.barrier() # sync up\n allGatherTime = -timeit.default_timer()\n allSyncData = armi.MPI_COMM.allgather(sendBuf)\n allGatherTime += timeit.default_timer()\n except:\n msg = [\"Failure while trying to allgather.\"]\n for ci, compData in enumerate(sendBuf):\n if compData is not None:\n msg += [\"sendBuf[{}]: {}\".format(ci, compData)]\n runLog.error(\"\\n\".join(msg))\n raise\n\n errors = collections.defaultdict(\n list\n ) # key is (comp, paramName) value is conflicting nodes\n syncCount = 0\n compsPerNode = {len(nodeSyncData) for nodeSyncData in allSyncData}\n\n if len(compsPerNode) != 1:\n raise exceptions.SynchronizationError(\n \"The workers have different reactor sizes! comp lengths: {}\".format(\n compsPerNode\n )\n )\n\n for ci, comp in enumerate(allComps):\n data = (nodeSyncData[ci] for nodeSyncData in allSyncData)\n syncCount += comp._syncParameters( # pylint: disable=protected-access\n data, errors\n )\n\n if errors:\n errorData = sorted(\n (str(comp), comp.__class__.__name__, str(comp.parent), paramName, nodes)\n for (comp, paramName), nodes in errors.items()\n )\n message = (\n \"Synchronization failed due to overlapping data. Only the first \"\n \"duplicates are listed\\n{}\".format(\n tabulate.tabulate(\n errorData,\n headers=[\n \"Composite\",\n \"Composite Type\",\n \"Composite Parent\",\n \"ParameterName\",\n \"NodeRanks\",\n ],\n )\n )\n )\n raise exceptions.SynchronizationError(message)\n\n self._markSynchronized()\n runLog.extra(\n \"Synchronized reactor over MPI in {:.4f} seconds, {:.4f} seconds in MPI \"\n \"allgather. count:{}\".format(\n timeit.default_timer() - startTime, allGatherTime, syncCount\n )\n )\n\n return syncCount\n\n def _syncParameters(self, allSyncData, errors):\n # ensure no overlap with syncedKeys, use errors to report overlapping data\n syncedKeys = set()\n for nodeRank, nodeSyncData in enumerate(allSyncData):\n if nodeSyncData is None:\n continue\n # nodeSyncData is a list of tuples\n for key, val in nodeSyncData.items():\n if key in syncedKeys:\n # TODO: this requires further investigation and should be avoidable.\n # this situation results when a composite object is flagged as being\n # out of sync, and this parameter was also globally modified and\n # readjusted to the original value.\n curVal = self.p[key]\n if isinstance(val, numpy.ndarray) or isinstance(\n curVal, numpy.ndarray\n ):\n if (val != curVal).any():\n errors[self, key].append(nodeRank)\n elif curVal != val:\n errors[self, key].append(nodeRank)\n runLog.error(\n \"in {}, {} differ ({} != {})\".format(self, key, curVal, val)\n )\n continue\n syncedKeys.add(key)\n self.p[key] = val\n self.clearCache()\n return len(syncedKeys)\n\n def _markSynchronized(self):\n \"\"\"\n Mark the composite and child parameters as synchronized across MPI.\n\n We clear SINCE_LAST_DISTRIBUTE_STATE so that anything after this point will set\n the SINCE_LAST_DISTRIBUTE_STATE flag, indicating it has been modified\n SINCE_LAST_DISTRIBUTE_STATE.\n \"\"\"\n paramDefs = set()\n for child in [self] + self.getChildren(deep=True, includeMaterials=True):\n # below reads as: assigned & everything_but(SINCE_LAST_DISTRIBUTE_STATE)\n child.p.assigned &= ~parameters.SINCE_LAST_DISTRIBUTE_STATE\n paramDefs.add(child.p.paramDefs)\n for paramDef in paramDefs:\n paramDef.resetAssignmentFlag(parameters.SINCE_LAST_DISTRIBUTE_STATE)\n\n def retainState(self, paramsToApply=None):\n \"\"\"\n Restores a state before and after some operation.\n\n Parameters\n ----------\n paramsToApply : iterable\n Parameters that should be applied to the state after existing the state\n retainer. All others will be reverted to their values upon entering.\n\n Notes\n -----\n This should be used in a `with` statement.\n \"\"\"\n return StateRetainer(self, paramsToApply)\n\n def backUp(self):\n \"\"\"\n Create and store a backup of the state.\n\n This needed to be overridden due to linked components which actually have a\n parameter value of another ARMI component.\n \"\"\"\n self._backupCache = (self.cached, self._backupCache)\n self.cached = {} # don't .clear(), using reference above!\n self.p.backUp()\n if self.spatialGrid:\n self.spatialGrid.backUp()\n\n def restoreBackup(self, paramsToApply):\n \"\"\"\n Restore the parameters from previously created backup.\n\n Parameters\n ----------\n paramsToApply : list of ParmeterDefinitions\n restores the state of all parameters not in `paramsToApply`\n \"\"\"\n self.p.restoreBackup(paramsToApply)\n self.cached, self._backupCache = self._backupCache\n if self.spatialGrid:\n self.spatialGrid.restoreBackup()\n\n def getLumpedFissionProductsIfNecessary(self, nuclides=None):\n \"\"\"Return Lumped Fission Product objects that belong to this object or any of its children.\"\"\"\n if self.requiresLumpedFissionProducts(nuclides=nuclides):\n lfps = self.getLumpedFissionProductCollection()\n if lfps is None:\n for c in self:\n return c.getLumpedFissionProductsIfNecessary(nuclides=nuclides)\n else:\n return lfps\n # There are no lumped fission products in the batch so if you use a\n # dictionary no one will know the difference\n return {}\n\n def getLumpedFissionProductCollection(self):\n \"\"\"\n Get collection of LFP objects. Will work for global or block-level LFP models.\n\n Returns\n -------\n lfps : object\n lfpName keys, lfp object values\n\n See Also\n --------\n armi.physics.neutronics.fissionProductModel.lumpedFissionProduct.LumpedFissionProduct : LFP object\n \"\"\"\n lfps = ArmiObject.getLumpedFissionProductCollection(self)\n if lfps is None:\n for c in self.getChildren():\n lfps = c.getLumpedFissionProductCollection()\n if lfps is not None:\n break\n\n return lfps\n\n def requiresLumpedFissionProducts(self, nuclides=None):\n \"\"\"True if any of the nuclides in this object are Lumped nuclides.\"\"\"\n if nuclides is None:\n nuclides = self.getNuclides()\n\n for nucName in nuclides:\n if isinstance(nuclideBases.byName[nucName], nuclideBases.LumpNuclideBase):\n return True\n\n return False\n\n def getIntegratedMgFlux(self, adjoint=False, gamma=False):\n \"\"\"\n Returns the multigroup neutron tracklength in [n-cm/s].\n\n The first entry is the first energy group (fastest neutrons). Each additional\n group is the next energy group, as set in the ISOTXS library.\n\n Parameters\n ----------\n adjoint : bool, optional\n Return adjoint flux instead of real\n\n gamma : bool, optional\n Whether to return the neutron flux or the gamma flux.\n\n Returns\n -------\n integratedFlux : numpy.array\n multigroup neutron tracklength in [n-cm/s]\n \"\"\"\n integratedMgFlux = numpy.zeros(1)\n\n for c in self:\n integratedMgFlux = integratedMgFlux + c.getIntegratedMgFlux(\n adjoint=adjoint, gamma=gamma\n )\n return integratedMgFlux\n\n def getReactionRates(self, nucName, nDensity=None):\n \"\"\"\n Get the reaction rates of a certain nuclide on this object.\n\n Parameters\n ----------\n nucName - str\n nuclide name -- e.g. 'U235'\n nDensity - float\n number Density\n\n Returns\n -------\n rxnRates : dict\n reaction rates (1/s) for nG, nF, n2n, nA and nP\n\n Notes\n -----\n This is volume integrated NOT (1/cm3-s)\n\n If you set nDensity to 1 this makes 1-group cross section generation easier\n \"\"\"\n rxnRates = {\"nG\": 0, \"nF\": 0, \"n2n\": 0, \"nA\": 0, \"nP\": 0, \"n3n\": 0}\n\n for armiObject in self:\n for rxName, val in armiObject.getReactionRates(\n nucName, nDensity=nDensity\n ).items():\n rxnRates[rxName] += val\n\n return rxnRates\n\n def printContents(self, includeNuclides=True):\n \"\"\"Display information about all the comprising children in this object.\"\"\"\n runLog.important(self)\n for c in self.getChildren():\n c.printContents(includeNuclides=includeNuclides)\n\n def isOnWhichSymmetryLine(self):\n grid = self.parent.spatialGrid\n return grid.overlapsWhichSymmetryLine(self.spatialLocator.getCompleteIndices())\n\n def _genChildByLocationLookupTable(self):\n \"\"\"Update the childByLocation lookup table.\"\"\"\n runLog.extra(\"Generating location-to-child lookup table.\")\n self.childrenByLocator = {}\n for child in self:\n self.childrenByLocator[child.spatialLocator] = child\n\n\nclass Leaf(Composite):\n \"\"\"Defines behavior for primitive objects in the composition.\"\"\"\n\n def getChildren(\n self, deep=False, generationNum=1, includeMaterials=False, predicate=None\n ):\n \"\"\"Return empty list, representing that this object has no children.\"\"\"\n return []\n\n def getChildrenWithFlags(self, typeSpec: TypeSpec, exactMatch=True):\n \"\"\"Return empty list, representing that this object has no children.\"\"\"\n return []\n\n\nclass StateRetainer:\n \"\"\"\n Retains state during some operations.\n\n This can be used to temporarily cache state, perform an operation, extract some info, and\n then revert back to the original state.\n\n * A state retainer is faster than restoring state from a database as it reduces\n the number of IO reads; however, it does use more memory.\n\n * This can be used on any object within the composite pattern via with\n ``[rabc].retainState([list], [of], [parameters], [to], [retain]):``.\n Use on an object up in the hierarchy applies to all objects below as well.\n\n * This is intended to work across MPI, so that if you were to broadcast the\n reactor the state would be correct; however the exact implication on\n ``parameters`` may be unclear.\n\n \"\"\"\n\n def __init__(self, composite, paramsToApply=None):\n \"\"\"\n Create an instance of a StateRetainer\n\n Parameters\n ----------\n composite: Composite\n composite object to retain state (recursively)\n\n paramsToApply: iterable of parameters.Parameter\n Iterable of parameters.Parameter to retain updated values after `__exit__`.\n All other parameters are reverted to the original state, i.e. retained at\n the original value.\n \"\"\"\n self.composite = composite\n self.paramsToApply = set(paramsToApply or [])\n\n def __enter__(self):\n self._enterExitHelper(lambda obj: obj.backUp())\n return self\n\n def __exit__(self, *args):\n self._enterExitHelper(lambda obj: obj.restoreBackup(self.paramsToApply))\n\n def _enterExitHelper(self, func):\n \"\"\"Helper method for __enter__ and __exit__. func is a lambda to either backUp() or restoreBackup()\"\"\"\n paramDefs = set()\n for child in [self.composite] + self.composite.getChildren(\n deep=True, includeMaterials=True\n ):\n paramDefs.update(child.p.paramDefs)\n func(child)\n for paramDef in paramDefs:\n func(paramDef)\n\n\ndef gatherMaterialsByVolume(\n objects: List[ArmiObject], typeSpec: TypeSpec = None, exact=False\n):\n \"\"\"\n Compute the total volume of each material in a set of objects and give samples.\n\n Parameters\n ----------\n objects : list of ArmiObject\n Objects to look within. This argument allows clients to search though some subset\n of the three (e.g. when you're looking for all CLADDING components within FUEL blocks)\n\n typeSpec : TypeSpec\n Flags for the components to look at\n\n exact : bool\n Whether or not the TypeSpec is exact\n\n Notes\n -----\n This helper method is outside the main ArmiObject tree for the special clients that need\n to filter both by container type (e.g. Block type) with one set of flags, and Components\n with another set of flags.\n\n .. warning:: This is a **composition** related helper method that will likely be filed into\n classes/modules that deal specifically with the composition of things in the data model.\n Thus clients that use it from here should expect to need updates soon.\n \"\"\"\n volumes = {}\n samples = {}\n for obj in objects:\n for c in obj.iterComponents(typeSpec, exact):\n vol = c.getVolume()\n matName = c.material.getName()\n volumes[matName] = volumes.get(matName, 0.0) + vol\n if matName not in samples:\n samples[matName] = c.material\n return volumes, samples\n\n\ndef getDominantMaterial(\n objects: List[ArmiObject], typeSpec: TypeSpec = None, exact=False\n):\n \"\"\"\n Return the first sample of the most dominant material (by volume) in a set of objects\n\n .. warning:: This is a **composition** related helper method that will likely be filed into\n classes/modules that deal specifically with the composition of things in the data model.\n Thus clients that use it from here should expect to need updates soon.\n\n \"\"\"\n volumes, samples = gatherMaterialsByVolume(objects, typeSpec, exact)\n\n if volumes:\n # find matName with max volume\n maxMatName = list(sorted(volumes.items(), key=lambda item: item[1])).pop()[0]\n # return this material. Note that if this material\n # has properties like Zr-frac, enrichment, etc. then this will\n # just return one in the batch, not an average.\n return samples[maxMatName]\n",
"# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Generic ARMI utilities\"\"\"\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport time\nimport pickle\nimport re\nimport pkgutil\nimport importlib\nimport traceback\nimport getpass\nimport math\nimport datetime\nimport tempfile\nimport shutil\nimport threading\nimport subprocess\nimport collections\n\nimport hashlib\n\nimport numpy\nimport scipy.optimize as sciopt\n\nimport armi\nfrom armi import runLog\nfrom armi.utils import iterables\nfrom armi.localization import strings\nfrom armi.localization import warnings\nfrom armi.localization import exceptions\nfrom armi.utils.flags import Flag\n\n# Read in file 1 MB at a time to reduce memory burden of reading entire file at once\n_HASH_BUFFER_SIZE = 1024 * 1024\n\n# special pattern to deal with FORTRAN-produced scipats without E, like 3.2234-234\nSCIPAT_SPECIAL = re.compile(r\"([+-]?\\d*\\.\\d+)[eEdD]?([+-]\\d+)\")\n\n\ndef coverageReportHelper(config, dataPaths):\n \"\"\"\n Small utility function to generate coverage reports.\n\n This was created to side-step the difficulties in submitting multi-line python\n commands on-the-fly.\n\n This combines data paths and then makes html and xml reports for the\n fully-combined result.\n \"\"\"\n from coverage import Coverage\n import coverage\n\n try:\n cov = Coverage(config_file=config)\n if dataPaths:\n # fun fact: if you combine when there's only one file, it gets deleted.\n cov.combine(data_paths=dataPaths)\n cov.save()\n else:\n cov.load()\n cov.html_report()\n cov.xml_report()\n except PermissionError:\n # Albert has some issues with filename that start with a '.', such as the\n # .coverage files. If a permissions error is raised, it likely has something to\n # do with that. We changed the COVERAGE_RESULTS_FILE in cases.py for this reason.\n #\n # We print here, since this is used to run a one-off command, so runLog isn't\n # really appropriate.\n print(\n \"There was an issue in generating coverage reports. Probably related to \"\n \"Albert hidden file issues.\"\n )\n # disabled until we figure out the problem.\n # raise\n except coverage.misc.CoverageException as e:\n # This is happening when forming the unit test coverage report. This may be\n # caused by the TestFixture coverage report gobbling up all of the coverage\n # files before the UnitTests.cov_report task gets a chance to see them. It may\n # simply be that we dont want a coverage report generated for the TestFixture.\n # Something to think about. Either way, we do not want to fail the job just\n # because of this\n print(\n \"There was an issue generating coverage reports \"\n \"({}):\\n{}\".format(type(e), e.args)\n )\n\n\ndef getFileSHA1Hash(filePath, digits=40):\n \"\"\"\n Generate a SHA-1 hash of the input file.\n\n Parameters\n ----------\n filePath : str\n Path to file to obtain the SHA-1 hash\n digits : int, optional\n Number of digits to include in the hash (40 digit maximum for SHA-1)\n \"\"\"\n sha1 = hashlib.sha1()\n with open(filePath, \"rb\") as f:\n while True:\n data = f.read(_HASH_BUFFER_SIZE)\n if not data:\n break\n sha1.update(data)\n return sha1.hexdigest()[:digits]\n\n\ndef efmt(a):\n r\"\"\"Converts string exponential number to another string with just 2 digits in the exponent.\"\"\"\n # this assumes that none of our numbers will be more than 1e100 or less than 1e-100...\n if len(a.split(\"E\")) != 2:\n two = a.split(\"e\")\n else:\n two = a.split(\"E\")\n # print two\n exp = two[1] # this is '+002' or '+02' or something\n\n if len(exp) == 4: # it has 3 digits of exponent\n exp = exp[0] + exp[2:] # gets rid of the hundred's place digit\n\n return two[0] + \"E\" + exp\n\n\ndef fixThreeDigitExp(strToFloat):\n \"\"\"\n Convert FORTRAN numbers that cannot be converted into floats.\n\n Notes\n -----\n Converts a number line \"9.03231714805651-101\" (no e or E) to \"9.03231714805651e-101\".\n Some external depletion kernels currently need this fix. From contact with developer:\n The notation like 1.0-101 is a FORTRAN thing, with history going back to the 60's.\n They will only put E before an exponent 99 and below. Fortran will also read these guys\n just fine, and they are valid floating point numbers. It would not be a useful effort,\n in terms of time, trying to get FORTRAN to behave differently.\n The approach has been to write a routine in the reading code which will interpret these.\n\n This helps when the scientific number exponent does not fit.\n \"\"\"\n match = SCIPAT_SPECIAL.match(strToFloat)\n return float(\"{}E{}\".format(*match.groups()))\n\n\ndef findClosest(listToSearch, val, indx=False):\n r\"\"\"\n find closest item in a list.\n\n Parameters\n ----------\n listToSearch : list\n The list to search through\n\n val : float\n The target value that is being searched for in the list\n\n indx : bool, optional\n If true, returns minVal and minIndex, otherwise, just the value\n\n Returns\n -------\n minVal : float\n The item in the listToSearch that is closest to val\n minI : int\n The index of the item in listToSearch that is closest to val. Returned if indx=True.\n\n \"\"\"\n d = float(\"inf\")\n minVal = None\n minI = None\n for i, item in enumerate(listToSearch):\n if abs(item - val) < d:\n d = abs(item - val)\n minVal = item\n minI = i\n if indx:\n return minVal, minI\n else:\n # backwards compatibility\n return minVal\n\n\n# TODO: move into pathTools\ndef cleanPath(path):\n r\"\"\"\n Recursively delete a path.\n\n !!! careful with this !!! It can delete the entire cluster.\n\n We add copious os.path.exists checks in case an MPI set of things is trying to delete everything at the same time.\n Always check filenames for some special flag when calling this, especially\n with full permissions on the cluster. You could accidentally delete everyone's work\n with one misplaced line! This doesn't ask questions.\n\n Safety nets include a whitelist of paths.\n\n This makes use of shutil.rmtree and os.remove\n\n Returns\n -------\n success : bool\n True if file was deleted. False if it was not.\n\n \"\"\"\n valid = False\n if os.path.exists(path):\n runLog.extra(\"Clearing all files in {}\".format(path))\n else:\n runLog.extra(\"Nothing to clean in {}. Doing nothing. \".format(path))\n return True\n for validPath in [\n \"users\",\n \"shufflebranches\",\n \"snapshot\",\n \"failedruns\",\n \"armiruns\",\n \"mc2run\",\n \"tests\",\n \"mongoose\",\n ]:\n if validPath in path.lower():\n valid = True\n\n if not valid:\n raise Exception(\n \"Thou shalt not try to delete folders other than things in Users. Thou tried to delete {0}\"\n \"\".format(path)\n )\n\n for i in range(3):\n try:\n if os.path.exists(path) and os.path.isdir(path):\n shutil.rmtree(path)\n elif not os.path.isdir(path):\n # it's just a file. Delete it.\n os.remove(path)\n except:\n if i == 2:\n pass\n # in case the OS is behind or something\n time.sleep(0.1)\n time.sleep(0.3)\n if not os.path.exists(path):\n break\n time.sleep(0.3)\n\n if os.path.exists(path):\n return False\n else:\n return True\n\n\ndef copyWithoutBlocking(src, dest):\n \"\"\"\n Copy a file in a separate thread to avoid blocking while IO completes.\n\n Useful for copying large files while ARMI moves along.\n \"\"\"\n files = \"{} to {}\".format(src, dest)\n runLog.extra(\"Copying (without blocking) {}\".format(files))\n t = threading.Thread(target=shutil.copy, args=(src, dest))\n t.start()\n return t\n\n\ndef linearInterpolation(x0, y0, x1, y1, targetX=None, targetY=None):\n r\"\"\"\n does a linear interpolation (or extrapolation) for y=f(x)\n\n Parameters\n ----------\n x0,y0,x1,y1 : float\n Coordinates of two points to interpolate between\n\n targetX : float, optional\n X value to evaluate the line at\n\n targetY : float, optional\n Y value we want to find the x value for (inverse interpolation)\n\n Returns\n -------\n interpY : float\n The value of y(targetX), if targetX is not None\n\n interpX : float\n The value of x where y(x) = targetY (if targetY is not None)\n\n y = m(x-x0) + b\n\n x = (y-b)/m\n\n \"\"\"\n\n if x1 == x0:\n raise ZeroDivisionError(\"The x-values are identical. Cannot interpolate.\")\n\n m = (y1 - y0) / (x1 - x0)\n b = -m * x0 + y0\n\n if targetX is not None:\n return m * targetX + b\n else:\n return (targetY - b) / m\n\n\ndef parabolaFromPoints(p1, p2, p3):\n r\"\"\"\n find the parabola that passes through three points\n\n We solve a simultaneous equation with three points.\n\n A = x1**2 x1 1\n x2**2 x2 1\n x3**2 x3 1\n\n b = y1\n y2\n y3\n\n find coefficients Ax=b\n\n Parameters\n ----------\n p1 : tuple\n first point (x,y) coordinates\n p2,p3: tuple, second and third points.\n\n Returns\n -------\n a,b,c coefficients of y=ax^2+bx+c\n\n \"\"\"\n\n A = numpy.array(\n [[p1[0] ** 2, p1[0], 1], [p2[0] ** 2, p2[0], 1], [p3[0] ** 2, p3[0], 1]]\n )\n\n b = numpy.array([[p1[1]], [p2[1]], [p3[1]]])\n try:\n x = numpy.linalg.solve(A, b)\n except:\n print(\"Error in parabola {} {}\".format(A, b))\n raise\n\n return float(x[0]), float(x[1]), float(x[2])\n\n\ndef parabolicInterpolation(ap, bp, cp, targetY):\n r\"\"\"\n Given parabola coefficients, this interpolates the time\n that would give k=targetK.\n\n keff = at^2+bt+c\n We want to solve a*t^2+bt+c-targetK = 0.0 for time.\n if there are real roots, we should probably take the smallest one\n because the larger one might be at very high burnup.\n If there are no real roots, just take the point where the deriv ==0, or\n 2at+b=0, so t = -b/2a\n The slope of the curve is the solution to 2at+b at whatever t has been determined\n\n Parameters\n ----------\n ap, bp,cp : floats\n coefficients of a parabola y = ap*x^2 + bp*x + cp\n\n targetK : float\n The keff to find the cycle length of\n\n Returns\n -------\n realRoots : list of tuples\n (root, slope)\n The best guess of the cycle length that will give k=targetK\n If no positive root was found, this is the maximum of the curve. In that case,\n it will be a negative number. If there are two positive roots, there will be two entries.\n\n slope : float\n The slope of the keff vs. time curve at t=newTime\n\n \"\"\"\n roots = numpy.roots([ap, bp, cp - targetY])\n realRoots = []\n for r in roots:\n if r.imag == 0 and r.real > 0:\n realRoots.append((r.real, 2.0 * ap * r.real + bp))\n\n if not realRoots:\n # no positive real roots. Take maximum and give up for this cyclic.\n newTime = -bp / (2 * ap)\n if newTime < 0:\n raise RuntimeError(\"No positive roots or maxima.\")\n slope = 2.0 * ap * newTime + bp\n newTime = (\n -newTime\n ) # return a negative newTime to signal that it is not expected to be critical.\n realRoots = [(newTime, slope)]\n\n return realRoots\n\n\ndef getFloat(val):\n r\"\"\"returns float version of val, or None if it's impossible. Useful for converting\n user-input into floats when '' might be possible.\"\"\"\n try:\n newVal = float(val)\n return newVal\n except:\n return None\n\n\ndef relErr(v1, v2):\n if v1:\n return (v2 - v1) / v1\n else:\n return -1e99\n\n\ndef getTimeStepNum(cycleNumber, subcycleNumber, cs):\n \"\"\"Return the timestep associated with cycle and tn.\n\n Parameters\n ----------\n cycleNumber : int, The cycle number\n subcycleNumber : int, The intra-cycle time node (0 for BOC, etc.)\n cs : Settings object\n\n \"\"\"\n return cycleNumber * getNodesPerCycle(cs) + subcycleNumber\n\n\ndef getCycleNode(timeStepNum, cs):\n \"\"\"\n Return the (cycle, node) corresponding to a cumulative time step number.\n\n Parameters\n ----------\n timeStepNum\n The cumulative number of time steps since the beginning\n cs\n A case Settings object to get the nodes-per-cycle from\n \"\"\"\n nodesPerCycle = getNodesPerCycle(cs)\n\n return (timeStepNum // nodesPerCycle, timeStepNum % nodesPerCycle)\n\n\ndef getNodesPerCycle(cs):\n \"\"\"Return the number of nodes per cycles for this case settings.\"\"\"\n return cs[\"burnSteps\"] + 1\n\n\ndef getPreviousTimeStep(cycle, node, burnSteps):\n \"\"\"Return the time step before the specified time step\"\"\"\n if (cycle, node) == (0, 0):\n raise ValueError(\"There is not Time step before (0, 0)\")\n if node != 0:\n return (cycle, node - 1)\n else:\n # index starts at zero, so the last node in a cycle is equal to the number of\n # burn steps.\n return (cycle - 1, burnSteps)\n\n\ndef tryPickleOnAllContents(obj, ignore=None, path=None, verbose=False):\n r\"\"\"\n Attempts to pickle all members of this object and identifies those who cannot be pickled.\n\n Useful for debugging MPI-bcast errors\n\n Not recursive yet. Would be nice to have it loop through nested objects (blocks in assems in reactors)\n\n Parameters\n ----------\n obj : object\n Any object to be tested.\n ignore : iterable\n list of string variable names to ignore.\n path : str\n the path in which to test pickle.\n verbose : bool, optional\n Print all objects whether they fail or not\n\n \"\"\"\n if ignore is None:\n ignore = []\n\n # pickle gives better error messages than cPickle\n for name, ob in obj.__dict__.items():\n if name not in ignore:\n if verbose:\n print(\"Checking {0}...\".format(name))\n try:\n pickle.dumps(ob) # dump as a string\n except:\n print(\n \"{0} in {1} cannot be pickled. It is: {2}. \".format(name, obj, ob)\n )\n # traceback.print_exc(limit=0,file=sys.stdout)\n\n\ndef tryPickleOnAllContents2(*args, **kwargs):\n # helper\n print(doTestPickleOnAllContents2(*args, **kwargs))\n\n\ndef doTestPickleOnAllContents2(obj, ignore=None, path=None, verbose=False):\n r\"\"\"\n Attempts to find one unpickleable object in a nested object\n\n Returns\n -------\n pickleChain : list\n list of names in a chain that are unpickleable. Just one example per object\n e.g. ['r','assemblies','A101','lib] means the lib is unpicklable.\n \"\"\"\n if ignore is None:\n ignore = []\n unpickleable = []\n if not hasattr(obj, \"__dict__\"):\n print(\"done\")\n return unpickleable\n for name, ob in obj.__dict__.items():\n print((\"checking \", name))\n if name not in ignore:\n try:\n pickle.dumps(ob) # dump as a string\n except:\n unpickleable.append(name)\n print(\"Cant pickle {0}\".format(name))\n # recursive call.\n unpickleable.extend(\n doTestPickleOnAllContents2(ob, ignore=unpickleable + ignore)\n )\n\n return unpickleable\n\n\nclass MyPickler(pickle.Pickler):\n r\"\"\"\n The big guns. This will find your pickle errors if all else fails.\n\n Use with tryPickleOnAllContents3.\n \"\"\"\n\n def save(self, obj):\n try:\n pickle.Pickler.save(self, obj)\n except Exception:\n _excType, excValue, _excTraceback = sys.exc_info()\n print(\"Object that failed: {}. Err: {}\".format(obj, excValue))\n raise\n\n\ndef tryPickleOnAllContents3(obj, ignore=None, path=None, verbose=False):\n \"\"\"\n Definitely find pickle errors\n\n Notes\n -----\n In this form, this just finds one pickle error and then crashes. If you want\n to make it work like the other testPickle functions and handle errors, you could.\n But usually you just have to find one unpickleable SOB.\n \"\"\"\n\n with tempfile.TemporaryFile() as output:\n try:\n MyPickler(output).dump(obj)\n except (pickle.PicklingError, TypeError):\n pass\n\n\ndef classesInHierarchy(obj, classCounts, visited=None):\n \"\"\"\n Count the number of instances of each class contained in an objects heirarchy.\n \"\"\"\n if not isinstance(classCounts, collections.defaultdict):\n raise TypeError(\n \"Need to pass in a default dict for classCounts (it's an out param)\"\n )\n if visited is None:\n classCounts[type(obj)] += 1\n visited = set()\n visited.add(id(obj))\n\n try:\n for c in obj.__dict__.values():\n if id(c) not in visited:\n classCounts[type(c)] += 1\n visited.add(id(c))\n classesInHierarchy(c, classCounts, visited=visited)\n except AttributeError:\n pass\n\n\ndef slantSplit(val, ratio, nodes, order=\"low first\"):\n\n r\"\"\"\n Returns a list of values whose sum is equal to the value specified.\n The ratio between the highest and lowest value is equal to the specified ratio,\n and the middle values trend linearly between them.\n\n \"\"\"\n val = float(val)\n ratio = float(ratio)\n nodes = int(nodes)\n v0 = 2.0 * val / (nodes * (1.0 + ratio))\n X = []\n for i in range(nodes):\n X.append(v0 + i * (v0 * ratio - v0) / (nodes - 1))\n\n if order == \"high first\":\n X.reverse()\n\n return X\n\n\ndef newtonsMethod(\n func, goal, guess, maxIterations=None, cs=None, positiveGuesses=False\n):\n r\"\"\"\n Solves a Newton's method with the given function, goal value, and first guess.\n\n Parameters\n ----------\n func : function\n The function that guess will be changed to try to make it return the goal value.\n\n goal : float\n The function will be changed until it's return equals this value.\n\n guess : float\n The first guess value to do Newton's method on the func.\n\n maxIterations : int\n The maximum number of iterations that the Newton's method will be allowed to perform.\n\n\n Returns\n -------\n ans : float\n The guess that when input to the func returns the goal.\n\n \"\"\"\n\n def goalFunc(guess, func, positiveGuesses):\n if positiveGuesses is True:\n guess = abs(guess)\n funcVal = func(guess)\n val = abs(goal - funcVal)\n return val\n\n if (maxIterations is None) and (cs is not None):\n maxIterations = cs[\"maxNewtonsIterations\"]\n\n # try:\n ans = float(\n sciopt.newton(\n goalFunc,\n guess,\n args=(func, positiveGuesses),\n tol=1.0e-3,\n maxiter=maxIterations,\n )\n )\n\n if positiveGuesses is True:\n ans = abs(ans)\n\n return ans\n\n\ndef minimizeScalarFunc(\n func,\n goal,\n guess,\n maxIterations=None,\n cs=None,\n positiveGuesses=False,\n method=None,\n tol=1.0e-3,\n):\n r\"\"\"\n Use scipy minimize with the given function, goal value, and first guess.\n\n Parameters\n ----------\n func : function\n The function that guess will be changed to try to make it return the goal value.\n\n goal : float\n The function will be changed until it's return equals this value.\n\n guess : float\n The first guess value to do Newton's method on the func.\n\n maxIterations : int\n The maximum number of iterations that the Newton's method will be allowed to perform.\n\n\n Returns\n -------\n ans : float\n The guess that when input to the func returns the goal.\n\n \"\"\"\n\n def goalFunc(guess, func, positiveGuesses):\n if positiveGuesses is True:\n guess = abs(guess)\n funcVal = func(guess)\n val = abs(goal - funcVal)\n return val\n\n if (maxIterations is None) and (cs is not None):\n maxIterations = cs[\"maxNewtonsIterations\"]\n\n X = sciopt.minimize(\n goalFunc,\n guess,\n args=(func, positiveGuesses),\n method=method,\n tol=tol,\n options={\"maxiter\": maxIterations},\n )\n ans = float(X[\"x\"])\n if positiveGuesses is True:\n ans = abs(ans)\n\n return ans\n\n\ndef runFunctionFromAllModules(funcName, *args, **kwargs):\n r\"\"\"\n Runs funcName on all modules of ARMI, if it exists.\n\n Parameters\n ----------\n funcName : str\n The function to run if it is found in a module.\n\n \\*args, \\*\\*kwargs : arguments to pass to func if it is found\n\n Notes\n -----\n This imports all modules in ARMI, and if you have a script that isn't inside a\n ``if __name__=='__main__'``, you will be in trouble.\n\n This could also be useful for finding input consistency checkers for the GUI.\n\n See Also\n --------\n armi.settings.addAllDefaultSettings : gets all the settings from all modules\n\n \"\"\"\n for _modImporter, name, _ispkg in pkgutil.walk_packages(\n path=armi.__path__, prefix=armi.__name__ + \".\"\n ):\n try:\n mod = importlib.import_module(name)\n if funcName in dir(mod): # there is a module.funcName. so call it.\n func = getattr(mod, funcName)\n func(*args, **kwargs)\n except:\n # just print traceback but don't throw an error.\n traceback.print_exc()\n\n\n# TODO: move to pathTools\ndef mkdir(dirname):\n r\"\"\"\n Keeps trying to make a directory, outputting whatever errors it encounters,\n until it is successful.\n\n Parameters\n ----------\n dirname : str\n Path to the directory to create.\n What you would normally pass to os.mkdir.\n\n \"\"\"\n numTimesTried = 0\n while numTimesTried < 1000:\n try:\n os.mkdir(dirname)\n break\n\n except Exception as err:\n numTimesTried += 1\n # Only ouput err every 10 times.\n if numTimesTried % 10 == 0:\n print(err)\n # Wait 0.5 seconds, try again.\n time.sleep(0.5)\n\n\ndef prependToList(originalList, listToPrepend):\n \"\"\"\n Add a new list to the beginnning of an original list.\n\n Parameters\n ----------\n originalList : list\n The list to prepend to.\n\n listToPrepend : list\n The list to add to the beginning of (prepend) the originalList.\n\n Returns\n -------\n originalList : list\n The original list with the listToPrepend at it's beginning.\n\n \"\"\"\n listToPrepend.reverse()\n originalList.reverse()\n originalList.extend(listToPrepend)\n originalList.reverse()\n listToPrepend.reverse()\n return originalList\n\n\ndef capStrLen(string, length):\n \"\"\"\n Truncates a string to a certain length.\n\n Adds '...' if it's too long.\n\n Parameters\n ----------\n string : str\n The string to cap at length l.\n length : int\n The maximum length of the string s.\n \"\"\"\n if length <= 2:\n raise Exception(\"l must be at least 3 in utils.capStrLen\")\n\n if len(string) <= length:\n return string\n\n return string[0 : length - 3] + \"...\"\n\n\ndef list2str(strings, width=None, preStrings=None, fmt=None):\n \"\"\"\n Turn a list of strings into one string, applying the specified format to each.\n\n Parameters\n ----------\n strings : list\n The items to create centered strings in the line for.\n Can be str, float, int, etc.\n\n width : int, optional\n The maximum width that the strings are allowed to take up.\n Only strings are affected by this parameter, because it does\n not make sense to truncate ints or floats.\n\n preStrings : list of str, optional\n Any strings that come before the centered strings.\n\n fmt : str, optional\n The format to apply to each string, such as\n ' >4d', '^12.4E'.\n\n \"\"\"\n if preStrings is None:\n preStrings = []\n\n if fmt is None:\n fmt = \"\"\n\n newStrings = []\n for string in strings:\n if isinstance(string, str) and width is not None:\n string = capStrLen(str(string), width)\n string = \"{0:{fmt}}\".format(string, fmt=fmt)\n newStrings.append(string)\n\n preStrings.extend(newStrings)\n return \"\".join(preStrings)\n\n\ndef createFormattedStrWithDelimiter(\n dataList, maxNumberOfValuesBeforeDelimiter=9, delimiter=\"\\n\"\n):\n r\"\"\"\n Return a formatted string with delimiters from a list of data.\n\n Parameters\n ----------\n dataList : list\n List of data that will be formatted into a string\n maxNumberOfValuesBeforeDelimiter : int\n maximum number of values to have before the delimiter is added\n delimiter : str\n A delimiter on the formatted string (default: \"\\n\")\n\n Notes\n -----\n As an example::\n\n >>> createFormattedStrWithDelimiter(['hello', 'world', '1', '2', '3', '4'],\n ... maxNumberOfValuesBeforeDelimiter=3, delimiter = '\\n')\n \"hello, world, 1, \\n2, 3, \\n4, 5\\n\"\n\n \"\"\"\n formattedString = \"\"\n if not dataList:\n return formattedString\n\n if not maxNumberOfValuesBeforeDelimiter:\n numRows = 1\n else:\n numRows = (\n int(\n math.ceil(\n float(len(dataList)) / float(maxNumberOfValuesBeforeDelimiter)\n )\n )\n or 1\n )\n\n # Create a list of string delimiters to use when joining the strings\n commaList = [\",\" for d in dataList]\n commaList[-1] = \"\"\n dataList = [str(d) + commaList[i] for i, d in enumerate(dataList)]\n for splitList in iterables.split(dataList, n=numRows, padWith=\"\"):\n formattedString += \" \".join(splitList) + delimiter\n return formattedString\n\n\ndef rotateXY(x, y, degreesCounterclockwise=None, radiansCounterclockwise=None):\n \"\"\"\n Rotates x, y coordinates\n\n Parameters\n ----------\n x, y : array_like\n coordinates\n\n degreesCounterclockwise : float\n Degrees to rotate in the CCW direction\n\n radiansCounterclockwise : float\n Radians to rotate in the CCW direction\n\n Returns\n -------\n xr, yr : array_like\n the rotated coordinates.\n \"\"\"\n\n if radiansCounterclockwise is None:\n radiansCounterclockwise = degreesCounterclockwise * math.pi / 180.0\n\n sinT = math.sin(radiansCounterclockwise)\n cosT = math.cos(radiansCounterclockwise)\n rotationMatrix = numpy.array([[cosT, -sinT], [sinT, cosT]])\n xr, yr = rotationMatrix.dot(numpy.vstack((x, y)))\n if len(xr) > 1:\n ## Convert to lists because everyone prefers lists for some reason\n return xr.tolist(), yr.tolist()\n else:\n ## Convert to scalar for consistency with old implementation\n return xr[0], yr[0]\n\n\ndef convertToSlice(x, increment=False):\n \"\"\"\n Convert a int, float, list of ints or floats, None, or slice\n to a slice. Also optionally increments that slice to make it easy to line\n up lists that don't start with 0.\n\n Use this with numpy.array (numpy.ndarray) types to easily get selections of it's elements.\n\n Parameters\n ----------\n x : multiple types allowed.\n int: select one index.\n list of int: select these index numbers.\n None: select all indices.\n slice: select this slice\n\n Returns\n -------\n slice : slice\n Returns a slice object that can be used in an array\n like a[x] to select from its members.\n Also, the slice has its index numbers decremented by 1.\n It can also return a numpy array, which can be used\n to slice other numpy arrays in the same way as a slice.\n\n Examples\n --------\n a = numpy.array([10, 11, 12, 13])\n\n >>> convertToSlice(2)\n slice(2, 3, None)\n >>> a[convertToSlice(2)]\n array([12])\n\n >>> convertToSlice(2, increment=-1)\n slice(1, 2, None)\n >>> a[convertToSlice(2, increment=-1)]\n array([11])\n\n >>> a[convertToSlice(None)]\n array([10, 11, 12, 13])\n\n\n >>> a[utils.convertToSlice([1, 3])]\n array([11, 13])\n\n\n >>> a[utils.convertToSlice([1, 3], increment=-1)]\n array([10, 12])\n\n >>> a[utils.convertToSlice(slice(2, 3, None), increment=-1)]\n array([11])\n\n \"\"\"\n if increment is False:\n increment = 0\n\n if not isinstance(increment, int):\n raise Exception(\"increment must be False or an integer in utils.convertToSlice\")\n\n if x is None:\n x = numpy.s_[:]\n\n if isinstance(x, list):\n x = numpy.array(x)\n\n if isinstance(x, (int, numpy.integer)) or isinstance(x, (float, numpy.floating)):\n x = slice(int(x), int(x) + 1, None)\n\n # Correct the slice indices to be group instead of index based.\n # The energy groups are 1..x and the indices are 0..x-1.\n if isinstance(x, slice):\n if x.start is not None:\n jstart = x.start + increment\n else:\n jstart = None\n\n if x.stop is not None:\n if isinstance(x.stop, list):\n jstop = [x + increment for x in x.stop]\n else:\n jstop = x.stop + increment\n else:\n jstop = None\n\n jstep = x.step\n\n return numpy.s_[jstart:jstop:jstep]\n\n elif isinstance(x, numpy.ndarray):\n return numpy.array([i + increment for i in x])\n\n else:\n raise Exception(\n (\n \"It is not known how to handle x type: \" \"{0} in utils.convertToSlice\"\n ).format(type(x))\n )\n\n\ndef plotMatrix(\n matrix,\n fName,\n minV=None,\n maxV=None,\n show=False,\n title=None,\n xlabel=None,\n ylabel=None,\n xticks=None,\n yticks=None,\n cmap=None,\n figsize=None,\n):\n \"\"\"\n Plots a matrix\n \"\"\"\n import matplotlib\n import matplotlib.pyplot as plt\n\n if figsize:\n plt.figure(figsize=figsize) # dpi=300)\n else:\n plt.figure()\n if cmap is None:\n cmap = plt.cm.jet # @UndefinedVariable #pylint: disable=no-member\n cmap.set_bad(\"w\")\n try:\n matrix = matrix.todense()\n except:\n pass\n\n if minV:\n norm = matplotlib.colors.Normalize(minV, maxV)\n else:\n norm = None\n\n if title is None:\n title = fName\n plt.imshow(\n matrix, cmap=cmap, norm=norm, interpolation=\"nearest\"\n ) # or bicubic or nearest#,vmin=0, vmax=300)\n plt.colorbar()\n plt.title(title)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n if xticks:\n plt.xticks(*xticks, rotation=90)\n if yticks:\n plt.yticks(*yticks)\n plt.grid()\n plt.savefig(fName)\n if show:\n plt.show()\n plt.close()\n\n\ndef userName():\n \"\"\"\n Return a database-friendly username.\n\n This will return the current user's username, removing any prefix like ``pre-``, if\n present.\n\n Notes\n -----\n ARMI uses the user name in a number of places, namely in the database names, which\n cannot contain hyphens.\n \"\"\"\n return re.sub(\"^[a-zA-Z]-\", \"\", getpass.getuser())\n\n\ndef expandRepeatedFloats(repeatedList):\n \"\"\"\n Return an expanded repeat list.\n\n Notes\n -----\n R char is valid for showing the number of repeats in MCNP. For examples the list:\n [150, 200, '9R']\n indicates a 150 day cycle followed by 10 200 day cycles.\n \"\"\"\n nonRepeatList = []\n for val in repeatedList:\n isRepeat = False\n if isinstance(val, str):\n val = val.upper()\n if val.count(\"R\") > 1:\n raise ValueError(\"List had strings that were not repeats\")\n elif \"R\" in val:\n val = val.replace(\"R\", \"\")\n isRepeat = True\n if isRepeat:\n nonRepeatList += [nonRepeatList[-1]] * int(val)\n else:\n nonRepeatList.append(float(val))\n return nonRepeatList\n\n\ndef getStepsFromValues(values, prevValue=0.0):\n \"\"\"Convert list of floats to list of steps between each float.\"\"\"\n steps = []\n for val in values:\n currentVal = float(val)\n steps.append(currentVal - prevValue)\n prevValue = currentVal\n return steps\n\n\ndef average1DWithinTolerance(vals, tolerance=0.2):\n \"\"\"\n Compute the average of a series of arrays with a tolerance.\n\n Tuned for averaging assembly meshes or block heights.\n\n Parameters\n ----------\n vals : 2D numpy.array\n could be assembly x axial mesh tops or heights\n \"\"\"\n vals = numpy.array(vals)\n\n filterOut = numpy.array([False]) # this gets discarded\n while not filterOut.all(): # 20% difference is the default tolerance\n avg = vals.mean(axis=0) # average over all columns\n diff = abs(vals - avg) / avg # no nans, because all vals are non-zero\n filterOut = (diff > tolerance).sum(\n axis=1\n ) == 0 # True = 1, sum across axis means any height in assem is off\n vals = vals[filterOut] # filter anything that is skewing\n\n if vals.size == 0:\n raise ValueError(\"Nothing was near the mean, there are no acceptable values!\")\n\n if (avg <= 0.0).any():\n raise ValueError(\n \"A non-physical value (<=0) was computed, but this is not possible.\\n\"\n \"Values: {}\\navg: {}\".format(vals, avg)\n )\n\n return avg\n\n\ndef findNearestValue(searchList, searchValue):\n \"\"\"Search a given list for the value that is closest to the given search value.\"\"\"\n return findNearestValueAndIndex(searchList, searchValue)[0]\n\n\ndef findNearestValueAndIndex(searchList, searchValue):\n \"\"\"Search a given list for the value that is closest to the given search value. Return a tuple\n containing the value and its index in the list.\"\"\"\n searchArray = numpy.array(searchList)\n closestValueIndex = (numpy.abs(searchArray - searchValue)).argmin()\n return searchArray[closestValueIndex], closestValueIndex\n\n\nclass MergeableDict(dict):\n \"\"\"\n Overrides python dictionary and implements a merge method.\n\n Notes\n -----\n Allows multiple dictionaries to be combined in a single line\n \"\"\"\n\n def merge(self, *otherDictionaries):\n for dictionary in otherDictionaries:\n self.update(dictionary)\n\n\nshutil_copy = shutil.copy\n\n\ndef safeCopy(src, dst):\n \"\"\"This copy overwrites ``shutil.copy`` and checks that copy operation is truly completed before continuing.\"\"\"\n waitTime = 0.01 # 10 ms\n if os.path.isdir(dst):\n dst = os.path.join(dst, os.path.basename(src))\n srcSize = os.path.getsize(src)\n shutil.copyfile(src, dst)\n shutil.copymode(src, dst)\n while True:\n dstSize = os.path.getsize(dst)\n if srcSize == dstSize:\n break\n time.sleep(waitTime)\n runLog.extra(\"Copied {} -> {}\".format(src, dst))\n\n\nshutil.copy = safeCopy\n"
] |
[
[
"numpy.array",
"numpy.zeros"
],
[
"matplotlib.pyplot.imshow",
"scipy.optimize.newton",
"numpy.roots",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"matplotlib.pyplot.savefig",
"scipy.optimize.minimize",
"matplotlib.pyplot.show",
"numpy.array",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel",
"numpy.linalg.solve",
"numpy.abs",
"matplotlib.colors.Normalize",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.yticks",
"numpy.vstack"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
WinVector/data_algebra
|
[
"3d6002ddf8231d310e03537a0435df0554b62234"
] |
[
"build/lib/data_algebra/cdata.py"
] |
[
"import re\n\nimport numpy\n\nimport data_algebra\nimport data_algebra.util\n\n\nclass RecordSpecification:\n \"\"\"\n Class to represent a multi-row data record.\n \"\"\"\n def __init__(\n self,\n control_table,\n *,\n record_keys=None,\n control_table_keys=None,\n strict=False,\n local_data_model=None\n ):\n \"\"\"\n :param control_table: data.frame describing record layout\n :param record_keys: array of record key column names\n defaults to no columns.\n :param control_table_keys: array of control_table key column names,\n defaults to first column for non-trivial blocks and no columns for rows.\n :param strict: logical, if True more checks on transform\n :param local_data_model: data.frame data model\n \"\"\"\n if local_data_model is None:\n local_data_model = data_algebra.default_data_model\n control_table = control_table.reset_index(inplace=False, drop=True)\n if control_table.shape[0] < 1:\n raise ValueError(\"control table should have at least 1 row\")\n if len(control_table.columns) != len(set(control_table.columns)):\n raise ValueError(\"control table columns should be unique\")\n self.control_table = control_table.reset_index(drop=True, inplace=False)\n assert self.control_table.shape[0] > 0\n if record_keys is None:\n record_keys = []\n if isinstance(record_keys, str):\n record_keys = [record_keys]\n self.record_keys = [k for k in record_keys]\n if control_table_keys is None:\n if self.control_table.shape[0] > 1:\n control_table_keys = [self.control_table.columns[0]]\n else:\n control_table_keys = [] # single row records don't need to be keyed\n if isinstance(control_table_keys, str):\n control_table_keys = [control_table_keys]\n if strict and (self.control_table.shape[0] > 1):\n if len(control_table_keys) <= 0:\n raise ValueError(\"multi-row records must have at least one control table key\")\n self.control_table_keys = [k for k in control_table_keys]\n unknown = set(self.control_table_keys) - set(control_table.columns)\n if len(unknown) > 0:\n raise ValueError(\n \"control table keys that are not in the control table: \" + str(unknown)\n )\n if len(self.control_table_keys) >= control_table.shape[1]:\n raise ValueError(\"control table columns must not all be keys\")\n confused = set(record_keys).intersection(control_table_keys)\n if len(confused) > 0:\n raise ValueError(\n \"columns common to record_keys and control_table_keys: \" + str(confused)\n )\n for ck in self.control_table_keys:\n if any(local_data_model.bad_column_positions(control_table[ck])):\n raise ValueError(\"NA/NaN/inf/None not allowed as control table keys\")\n if strict:\n if not data_algebra.util.table_is_keyed_by_columns(\n self.control_table, self.control_table_keys\n ):\n raise ValueError(\"control table wasn't keyed by control table keys\")\n self.block_columns = self.record_keys + [c for c in self.control_table.columns]\n cvs = []\n for c in self.control_table:\n if c not in self.control_table_keys:\n col = self.control_table[c]\n isnull = col.isnull()\n if all(isnull):\n raise ValueError(\"column \" + c + \" was all null\")\n for i in range(len(col)):\n if not isnull[i]:\n v = col[i]\n if v not in cvs:\n cvs.append(v)\n confused = set(record_keys).intersection(cvs)\n if len(confused) > 0:\n raise ValueError(\n \"control table entries confused with row keys or control table keys\"\n )\n if strict:\n if len(set(cvs)) != len(cvs):\n raise ValueError(\"duplicate content keys\")\n self.content_keys = cvs\n self.row_columns = self.record_keys + cvs\n\n def row_version(self, *, include_record_keys=True):\n cols = []\n if include_record_keys:\n cols = cols + self.record_keys\n cols = cols + self.content_keys\n return cols\n\n def __repr__(self):\n s = (\n \"data_algebra.cdata.RecordSpecification(\\n\"\n + \" record_keys=\"\n + self.record_keys.__repr__()\n + \",\\n control_table=\"\n + data_algebra.util.pandas_to_example_str(self.control_table)\n + \",\\n control_table_keys=\"\n + self.control_table_keys.__repr__()\n + \")\"\n )\n return s\n\n def __eq__(self, other):\n if not isinstance(other, RecordSpecification):\n return False\n return self.__repr__() == other.__repr__()\n\n def fmt(self):\n \"\"\"\n Prepare for printing\n\n :return: multi line string representation.\n \"\"\"\n s = (\n \"RecordSpecification\\n\"\n + \" record_keys: \"\n + str(self.record_keys)\n + \"\\n\"\n + \" control_table_keys: \"\n + str(self.control_table_keys)\n + \"\\n\"\n + \" control_table:\\n\"\n + \" \"\n + re.sub(\"\\n\", \"\\n \", str(self.control_table))\n + \"\\n\"\n )\n return s\n\n def __str__(self):\n return self.fmt()\n\n def map_to_rows(self):\n \"\"\"\n Build a RecordMap mapping this RecordSpecification to rowrecs\n\n :return: RecordMap\n \"\"\"\n\n return RecordMap(blocks_in=self)\n\n def map_from_rows(self):\n \"\"\"\n Build a RecordMap mapping this RecordSpecification from rowrecs\n\n :return: RecordMap\n \"\"\"\n\n return RecordMap(blocks_out=self)\n\n\ndef blocks_to_rowrecs(data, *, blocks_in, local_data_model=None):\n assert isinstance(blocks_in, data_algebra.cdata.RecordSpecification)\n ck = [k for k in blocks_in.content_keys if k is not None]\n if len(ck) != len(set(ck)):\n raise ValueError(\"blocks_in can not have duplicate content keys\")\n if local_data_model is None:\n local_data_model = data_algebra.default_data_model\n data = data.reset_index(drop=True)\n missing_cols = set(blocks_in.control_table_keys).union(blocks_in.record_keys) - set(\n data.columns\n )\n if len(missing_cols) > 0:\n raise KeyError(\"missing required columns: \" + str(missing_cols))\n # table must be keyed by record_keys + control_table_keys\n if not data_algebra.util.table_is_keyed_by_columns(\n data, blocks_in.record_keys + blocks_in.control_table_keys\n ):\n raise ValueError(\n \"table is not keyed by blocks_in.record_keys + blocks_in.control_table_keys\"\n )\n # convert to row-records\n # regularize/complete records\n dtemp = data.copy() # TODO: select down columns\n dtemp[\"FALSE_AGG_KEY\"] = 1\n if len(blocks_in.record_keys) > 0:\n ideal = dtemp[blocks_in.record_keys + [\"FALSE_AGG_KEY\"]].copy()\n res = ideal.groupby(blocks_in.record_keys)[\"FALSE_AGG_KEY\"].agg(\"sum\")\n ideal = local_data_model.data_frame(res).reset_index(drop=False)\n ideal[\"FALSE_AGG_KEY\"] = 1\n ctemp = blocks_in.control_table[blocks_in.control_table_keys].copy()\n ctemp[\"FALSE_AGG_KEY\"] = 1\n ideal = ideal.merge(ctemp, how=\"outer\", on=\"FALSE_AGG_KEY\")\n ideal = ideal.reset_index(drop=True)\n dtemp = ideal.merge(\n right=dtemp,\n how=\"left\",\n on=blocks_in.record_keys + blocks_in.control_table_keys + [\"FALSE_AGG_KEY\"],\n )\n dtemp.sort_values(\n by=blocks_in.record_keys + blocks_in.control_table_keys, inplace=True\n )\n dtemp = dtemp.reset_index(drop=True)\n # start building up result frame\n if len(blocks_in.record_keys) > 0:\n res = dtemp.groupby(blocks_in.record_keys)[\"FALSE_AGG_KEY\"].agg(\"sum\")\n else:\n res = dtemp.groupby(\"FALSE_AGG_KEY\")[\"FALSE_AGG_KEY\"].agg(\"sum\")\n res = local_data_model.data_frame(res).reset_index(drop=False)\n res.sort_values(by=blocks_in.record_keys, inplace=True)\n res = local_data_model.data_frame(res).reset_index(drop=True)\n del res[\"FALSE_AGG_KEY\"]\n # now fill in columns\n ckeys = blocks_in.control_table_keys\n value_keys = [k for k in blocks_in.control_table.columns if k not in set(ckeys)]\n donor_cols = set(dtemp.columns)\n for i in range(blocks_in.control_table.shape[0]):\n want = numpy.ones((dtemp.shape[0],), dtype=bool)\n for ck in ckeys:\n want = numpy.logical_and(want, dtemp[ck] == blocks_in.control_table[ck][i])\n if numpy.any(want):\n for vk in value_keys:\n if vk in donor_cols:\n dcol = blocks_in.control_table[vk][i]\n res[dcol] = numpy.asarray(dtemp.loc[want, vk])\n # fill in any missed columns\n colset = set(res.columns)\n for c in blocks_in.row_version():\n if c not in colset:\n res[c] = None\n if data.shape[0] <= 0:\n res = res.loc[range(0), :]\n res = res.reset_index(inplace=False, drop=True)\n return res\n\n\ndef rowrecs_to_blocks(\n data, *, blocks_out, check_blocks_out_keying=False, local_data_model=None\n):\n assert isinstance(blocks_out, data_algebra.cdata.RecordSpecification)\n if local_data_model is None:\n local_data_model = data_algebra.default_data_model\n data = data.reset_index(drop=True)\n missing_cols = set(blocks_out.record_keys) - set(data.columns)\n if len(missing_cols) > 0:\n raise KeyError(\"missing required columns: \" + str(missing_cols))\n if check_blocks_out_keying:\n # prefer table be keyed by record_keys\n if not data_algebra.util.table_is_keyed_by_columns(\n data, blocks_out.record_keys\n ):\n raise ValueError(\"table is not keyed by blocks_out.record_keys\")\n # convert to block records, first build up parallel structures\n rv = [k for k in blocks_out.row_version(include_record_keys=True) if k is not None]\n if len(rv) != len(set(rv)):\n raise ValueError(\"duplicate row columns\")\n dtemp_cols = [\n k\n for k in rv\n if k is not None and k in set(blocks_out.record_keys + blocks_out.content_keys)\n ]\n dtemp = data[dtemp_cols].copy()\n dtemp.sort_values(by=blocks_out.record_keys, inplace=True)\n dtemp = dtemp.reset_index(drop=True)\n if len(dtemp.columns) != len(set(dtemp.columns)):\n raise ValueError(\"targeted data columns not unique\")\n ctemp = blocks_out.control_table.copy()\n dtemp[\"FALSE_JOIN_KEY\"] = 1\n ctemp[\"FALSE_JOIN_KEY\"] = 1\n res = dtemp[blocks_out.record_keys + [\"FALSE_JOIN_KEY\"]].merge(\n ctemp, how=\"outer\", on=[\"FALSE_JOIN_KEY\"]\n )\n del res[\"FALSE_JOIN_KEY\"]\n ckeys = blocks_out.control_table_keys\n res.sort_values(by=blocks_out.record_keys + ckeys, inplace=True)\n res = res.reset_index(drop=True)\n del ctemp[\"FALSE_JOIN_KEY\"]\n del dtemp[\"FALSE_JOIN_KEY\"]\n value_keys = [k for k in ctemp.columns if k not in set(ckeys)]\n donor_cols = set(dtemp.columns)\n for vk in value_keys:\n res[vk] = None\n # we now have parallel structures to copy between\n for i in range(ctemp.shape[0]):\n want = numpy.ones((res.shape[0],), dtype=bool)\n for ck in ckeys:\n want = numpy.logical_and(want, res[ck] == ctemp[ck][i])\n if numpy.any(want):\n for vk in value_keys:\n dcol = ctemp[vk][i]\n if dcol in donor_cols:\n nvals = numpy.asarray(dtemp[dcol])\n if len(nvals) < 1:\n nvals = [None] * numpy.sum(want)\n res.loc[want, vk] = nvals\n # see about promoting composite columns to numeric\n for vk in set(value_keys):\n converted = local_data_model.to_numeric(res[vk], errors=\"coerce\")\n if numpy.all(\n local_data_model.isnull(converted) == local_data_model.isnull(res[vk])\n ):\n res[vk] = converted\n if data.shape[0] < 1:\n # empty input produces emtpy output (with different column structure)\n res = res.iloc[range(0), :].reset_index(drop=True)\n if data.shape[0] <= 0:\n res = res.loc[range(0), :]\n res = res.reset_index(inplace=False, drop=True)\n return res\n\n\nclass RecordMap:\n def __init__(self, *, blocks_in=None, blocks_out=None):\n if blocks_in is not None:\n assert isinstance(blocks_in, data_algebra.cdata.RecordSpecification)\n ck = [k for k in blocks_in.content_keys if k is not None]\n if len(ck) != len(set(ck)):\n raise ValueError(\"blocks_in can not have duplicate content keys\")\n if blocks_out is not None:\n assert isinstance(blocks_out, data_algebra.cdata.RecordSpecification)\n if (blocks_in is None) and (blocks_out is None):\n raise ValueError(\n \"At least one of blocks_in or blocks_out should not be None\"\n )\n if (blocks_in is not None) and (blocks_out is not None):\n unknown = set(blocks_out.record_keys) - set(blocks_in.record_keys)\n if len(unknown) > 0:\n raise ValueError(\"unknown outgoing record_keys:\" + str(unknown))\n unknown = set(blocks_out.content_keys) - set(blocks_in.content_keys)\n if len(unknown) > 0:\n raise ValueError(\"unknown outgoing content_keys\" + str(unknown))\n self.blocks_in = blocks_in\n self.blocks_out = blocks_out\n if self.blocks_in is not None:\n self.columns_needed = self.blocks_in.block_columns\n else:\n self.columns_needed = self.blocks_out.row_columns\n if self.blocks_out is not None:\n self.columns_produced = self.blocks_out.block_columns\n else:\n self.columns_produced = self.blocks_in.row_columns\n self.fmt_string = self.fmt()\n\n def __eq__(self, other):\n if not isinstance(other, RecordMap):\n return False\n if (self.blocks_in is None) != (other.blocks_in is None):\n return False\n if (self.blocks_out is None) != (other.blocks_out is None):\n return False\n if self.blocks_in is not None:\n if self.blocks_in != other.blocks_in:\n return False\n if self.blocks_in is not None:\n if self.blocks_out != other.blocks_out:\n return False\n return True\n\n def record_keys(self):\n if self.blocks_in is not None:\n return self.blocks_in.record_keys.copy()\n if self.blocks_out is not None:\n return self.blocks_out.record_keys.copy()\n return None\n\n def example_input(self, *, local_data_model=None):\n if local_data_model is None:\n local_data_model = data_algebra.default_data_model\n if self.blocks_in is not None:\n example = self.blocks_in.control_table.copy()\n nrow = example.shape[0]\n for rk in self.blocks_in.record_keys:\n example[rk] = [rk] * nrow\n return example\n if self.blocks_in is not None:\n example = local_data_model.data_frame()\n for k in self.blocks_out.row_columns:\n example[k] = [k]\n return example\n return None\n\n # noinspection PyPep8Naming\n def transform(self, X, *, check_blocks_out_keying=False, local_data_model=None):\n unknown = set(self.columns_needed) - set(X.columns)\n if len(unknown) > 0:\n raise ValueError(\"missing required columns: \" + str(unknown))\n if local_data_model is None:\n local_data_model = data_algebra.default_data_model\n X = X.reset_index(drop=True)\n if self.blocks_in is not None:\n X = blocks_to_rowrecs(\n X, blocks_in=self.blocks_in, local_data_model=local_data_model\n )\n if self.blocks_out is not None:\n X = rowrecs_to_blocks(\n X,\n blocks_out=self.blocks_out,\n check_blocks_out_keying=check_blocks_out_keying,\n local_data_model=local_data_model,\n )\n return X\n\n def compose(self, other):\n \"\"\"\n Experimental method to compose transforms\n (self.compose(other)).transform(data) == self.transform(other.transform(data))\n\n :param other: another data_algebra.cdata.RecordMap\n :return:\n \"\"\"\n\n assert isinstance(other, RecordMap)\n # (s2.compose(s1)).transform(data) == s2.transform(s1.transform(data))\n s1 = other\n s2 = self\n rk = s1.record_keys()\n if set(rk) != set(s2.record_keys()):\n raise ValueError(\"can only compose operations with matching record_keys\")\n inp = s1.example_input()\n out = s2.transform(s1.transform(inp))\n rsi = inp.drop(rk, axis=1, inplace=False)\n rso = out.drop(rk, axis=1, inplace=False)\n if inp.shape[0] < 2:\n if out.shape[0] < 2:\n return None\n else:\n return RecordMap(\n blocks_out=data_algebra.cdata.RecordSpecification(\n control_table=rso,\n record_keys=rk,\n control_table_keys=s2.blocks_out.control_table_keys,\n )\n )\n else:\n if out.shape[0] < 2:\n return RecordMap(\n blocks_in=data_algebra.cdata.RecordSpecification(\n control_table=rsi,\n record_keys=rk,\n control_table_keys=s1.blocks_in.control_table_keys,\n )\n )\n else:\n return RecordMap(\n blocks_in=data_algebra.cdata.RecordSpecification(\n control_table=rsi,\n record_keys=rk,\n control_table_keys=s1.blocks_in.control_table_keys,\n ),\n blocks_out=data_algebra.cdata.RecordSpecification(\n control_table=rso,\n record_keys=rk,\n control_table_keys=s2.blocks_out.control_table_keys,\n ),\n )\n\n # noinspection PyTypeChecker\n def __rrshift__(self, other): # override other >> self\n if other is None:\n return self\n if isinstance(other, RecordMap):\n # (data >> other) >> self == data >> (other >> self)\n return self.compose(other)\n return self.transform(other)\n\n def inverse(self):\n return RecordMap(blocks_in=self.blocks_out, blocks_out=self.blocks_in)\n\n def fmt(self):\n if (self.blocks_in is None) and (self.blocks_out is None):\n return \"RecordMap(no-op)\"\n if (self.blocks_in is not None) and (self.blocks_out is not None):\n s = (\n \"Transform block records of structure:\\n\"\n + str(self.blocks_in)\n + \"to block records of structure:\\n\"\n + str(self.blocks_out)\n )\n return s\n if self.blocks_in is not None:\n s = (\n \"Transform block records of structure:\\n\"\n + str(self.blocks_in)\n + \"to row records of the form:\\n\"\n + \" record_keys: \"\n + str(self.blocks_in.record_keys)\n + \"\\n\"\n + \" \"\n + str(self.blocks_in.row_version(include_record_keys=False))\n + \"\\n\"\n )\n return s\n if self.blocks_out is not None:\n s = (\n \"Transform row records of the form:\\n\"\n + \" record_keys: \"\n + str(self.blocks_out.record_keys)\n + \"\\n\"\n + \" \"\n + str(self.blocks_out.row_version(include_record_keys=False))\n + \"\\n\"\n + \"to block records of structure:\\n\"\n + str(self.blocks_out)\n )\n return s\n raise ValueError(\"should not be reached\")\n\n def __repr__(self):\n s = (\n \"data_algebra.cdata.RecordMap(\"\n + \"\\n blocks_in=\"\n + self.blocks_in.__repr__()\n + \",\\n blocks_out=\"\n + self.blocks_out.__repr__()\n + \")\"\n )\n return s\n\n def __str__(self):\n return self.fmt_string\n\n # more of the sklearn step API\n\n # noinspection PyPep8Naming, PyUnusedLocal\n def fit(self, X, y=None):\n pass\n\n # noinspection PyPep8Naming, PyUnusedLocal\n def fit_transform(self, X, y=None):\n return self.transform(X)\n\n # noinspection PyUnusedLocal\n def get_feature_names(self, input_features=None):\n return self.columns_produced.copy()\n\n # noinspection PyUnusedLocal,PyMethodMayBeStatic\n def get_params(self, deep=False):\n return dict()\n\n def set_params(self, **params):\n pass\n\n # noinspection PyPep8Naming\n def inverse_transform(self, X):\n return self.inverse().transform(X)\n\n\ndef pivot_blocks_to_rowrecs(\n *,\n attribute_key_column,\n attribute_value_column,\n record_keys,\n record_value_columns,\n local_data_model=None\n):\n \"\"\"\n Build a block records to row records map. This is very similar to a SQL pivot.\n\n :param attribute_key_column: column to identify record attribute keys\n :param attribute_value_column: column for record attribute values\n :param record_keys: names of key columns identifying row record blocks\n :param record_value_columns: names of columns to take row record values from\n :param local_data_model: data.frame data model\n :return: RecordMap\n \"\"\"\n\n if local_data_model is None:\n local_data_model = data_algebra.default_data_model\n control_table = local_data_model.data_frame(\n {\n attribute_key_column: record_value_columns,\n attribute_value_column: record_value_columns,\n }\n )\n ct = RecordSpecification(\n control_table,\n record_keys=record_keys,\n control_table_keys=[attribute_key_column],\n local_data_model=local_data_model,\n )\n return ct.map_to_rows()\n\n\ndef pivot_rowrecs_to_blocks(\n *,\n attribute_key_column,\n attribute_value_column,\n record_keys,\n record_value_columns,\n local_data_model=None\n):\n \"\"\"\n Build a row records to block records map. This is very similar to a SQL unpivot.\n\n :param attribute_key_column: column to identify record attribute keys\n :param attribute_value_column: column for record attribute values\n :param record_keys: names of key columns identifying row record blocks\n :param record_value_columns: names of columns to take row record values from\n :param local_data_model: data.frame data model\n :return: RecordMap\n \"\"\"\n\n if local_data_model is None:\n local_data_model = data_algebra.default_data_model\n control_table = local_data_model.data_frame(\n {\n attribute_key_column: record_value_columns,\n attribute_value_column: record_value_columns,\n }\n )\n ct = RecordSpecification(\n control_table,\n record_keys=record_keys,\n control_table_keys=[attribute_key_column],\n local_data_model=local_data_model,\n )\n return ct.map_from_rows()\n"
] |
[
[
"numpy.asarray",
"numpy.ones",
"numpy.any",
"numpy.logical_and",
"numpy.sum"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
melaniebeck/video-classification
|
[
"145eb44ac70e7669a706d5f67914a7d28fd931fe",
"145eb44ac70e7669a706d5f67914a7d28fd931fe"
] |
[
"vidbench/predict.py",
"vidbench/data/process.py"
] |
[
"# ###########################################################################\n#\n# CLOUDERA APPLIED MACHINE LEARNING PROTOTYPE (AMP)\n# (C) Cloudera, Inc. 2021\n# All rights reserved.\n#\n# Applicable Open Source License: Apache 2.0\n#\n# NOTE: Cloudera open source products are modular software products\n# made up of hundreds of individual components, each of which was\n# individually copyrighted. Each Cloudera open source product is a\n# collective work under U.S. Copyright Law. Your license to use the\n# collective work is as provided in your written agreement with\n# Cloudera. Used apart from the collective work, this file is\n# licensed for your use pursuant to the open source license\n# identified above.\n#\n# This code is provided to you pursuant a written agreement with\n# (i) Cloudera, Inc. or (ii) a third-party authorized to distribute\n# this code. If you do not have a written agreement with Cloudera nor\n# with an authorized and properly licensed third party, you do not\n# have any rights to access nor to use this code.\n#\n# Absent a written agreement with Cloudera, Inc. (“Cloudera”) to the\n# contrary, A) CLOUDERA PROVIDES THIS CODE TO YOU WITHOUT WARRANTIES OF ANY\n# KIND; (B) CLOUDERA DISCLAIMS ANY AND ALL EXPRESS AND IMPLIED\n# WARRANTIES WITH RESPECT TO THIS CODE, INCLUDING BUT NOT LIMITED TO\n# IMPLIED WARRANTIES OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY AND\n# FITNESS FOR A PARTICULAR PURPOSE; (C) CLOUDERA IS NOT LIABLE TO YOU,\n# AND WILL NOT DEFEND, INDEMNIFY, NOR HOLD YOU HARMLESS FOR ANY CLAIMS\n# ARISING FROM OR RELATED TO THE CODE; AND (D)WITH RESPECT TO YOUR EXERCISE\n# OF ANY RIGHTS GRANTED TO YOU FOR THE CODE, CLOUDERA IS NOT LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, PUNITIVE OR\n# CONSEQUENTIAL DAMAGES INCLUDING, BUT NOT LIMITED TO, DAMAGES\n# RELATED TO LOST REVENUE, LOST PROFITS, LOSS OF INCOME, LOSS OF\n# BUSINESS ADVANTAGE OR UNAVAILABILITY, OR LOSS OR CORRUPTION OF\n# DATA.\n#\n# ###########################################################################\n\nfrom collections import defaultdict\nimport time\nfrom typing import List\n\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\n\nLabel = str\n\n\ndef sort_2d_array_rows(arr, mylabs, descending=True):\n \"\"\"Orders rows of arr and applies ordering indices to mylabs.\n\n Useful to find top prediction scores and labels, e.g. in classification\n\n Example\n Input is a 2d array, where each row has the scores for each class of an example\n and different rows correspond to different examples in a batch.\n arr = [[0.3, 0.1, 0.4],[0.6, 0.9, 0.7]]\n mylabs = ['a', 'b', 'c']\n descending=True\n Output\n [[0.4, 0.3, 0.1], [0.9, 0.7, 0.6]]\n [['c', 'a', 'b'], ['b', 'c', 'a']]\n\n Args\n arr: Numpy array of floating points, ndim = 2. shape: [batch_size, num_classes]\n mylabs: List of size equal to the second dimension of arr, namely num_classes\n descending: Sorts array in descending order if True, and in increasing order otherwise.\n Returns\n Tuple of arrays, each of which has same shape as arr\n \"\"\"\n\n assert arr.ndim == 2\n assert arr.shape[1] == len(mylabs)\n\n indices = np.argsort(arr, axis=1)\n if descending:\n indices = indices[..., ::-1]\n\n arr_sorted = np.take_along_axis(arr, indices, axis=1)\n labels_sorted = np.take(mylabs, indices)\n\n return arr_sorted, labels_sorted\n\n\ndef predict(video_np, model, verbose=False):\n \"\"\"Predict the class of a video using TensorFlow model\n\n Args:\n video_np: Batch of videos on which to run prediction. Accepted shapes\n ( num_frames, height, width, num_channels)\n (batch_size, num_frames, height, width, num_channels)\n model: I3DLoader model class\n\n \"\"\"\n\n video_tf = tf.constant(video_np, dtype=tf.float32)\n\n # Add batch axis\n if video_tf.ndim != 5:\n video_tf = video_tf[tf.newaxis, ...]\n\n if \"i3d\" in model.name.lower():\n I3D_MIN_ACCEPTABLE_FRAMES = 100 # TODO check actual value, fails with 4 frames\n num_frames = video_np.shape[1]\n if num_frames < I3D_MIN_ACCEPTABLE_FRAMES:\n raise ValueError(\n f\"Too few frames: {num_frames}, required at least {I3D_MIN_ACCEPTABLE_FRAMES}\"\n )\n\n # output is tensor of shape (batch_size, num_classes)\n # TODO process_time gives too high time estimate\n start_time = time.process_time()\n model_output = model.model(video_tf) # logits or soft maxed probabilities?\n run_time = time.process_time() - start_time\n # TODO CHeck that output of i3d has shape batch_size, num_classes\n\n # i3d output is dictionary with key 'default', and value tensor of shape (batch_size, num_classes)\n # print('Model name ', model_name)\n # print('DEBUG ', model_output)\n if \"i3d\" in model.name.lower():\n model_output = model_output[\"default\"]\n\n # Get probabilities of each video class\n probabilities = tf.nn.softmax(model_output)\n\n # Sort probabilities and generate a tensor of class labels sorted according to probability\n probabilities_sorted, labels_sorted = sort_2d_array_rows(\n probabilities.numpy(), model.labels\n )\n\n if verbose:\n # Print top 5 classes for at most the first 3 videos in the batch\n num_videos = min(3, video_tf.shape[0])\n num_top_classes = 5\n for video_count in range(num_videos):\n print()\n print(\"Top 5 predicted classes\")\n for class_count in range(num_top_classes):\n prob = probabilities_sorted[video_count, class_count] * 100\n label = labels_sorted[video_count, class_count]\n print(f\" {label:30}: {prob:5.2f}%\")\n\n print(f\"Execution time (sec): {run_time: 10.4f}\")\n\n args_max = np.argmax(probabilities, axis=1)\n\n return probabilities_sorted, labels_sorted, run_time, args_max\n\n\ndef store_results_in_dataframe(\n results_dict: dict, top_n: int = 5, savefile=None,\n) -> pd.DataFrame:\n \"\"\"Store results of an evaluation in a pandas DataFrame.\"\"\"\n df = pd.DataFrame(results_dict)\n df = (\n df.join(\n [\n pd.DataFrame(\n df[\"preds\"].to_list(), columns=[f\"pred_{i+1}\" for i in range(top_n)]\n ),\n pd.DataFrame(\n df[\"scores\"].to_list(),\n columns=[f\"score_{i+1}\" for i in range(top_n)],\n ),\n ]\n )\n .drop(columns=[\"scores\", \"preds\"])\n .rename_axis(index=\"video_id\")\n )\n if savefile:\n df.to_csv(savefile)\n return df\n\n\ndef compute_accuracy(results_df: pd.DataFrame, *, num_top_classes: int) -> float:\n \"\"\"Compute prediction accuracy using up to num_top_classes classes\"\"\"\n # TODO: make this function more elegant\n # TODO: currently the column names depend on what they are set to in store_results_in_dataframe\n # Remove that dependency or pass the column names\n results_df = results_df.copy()\n\n columns = [f\"pred_{str(id+1)}\" for id in range(num_top_classes)]\n\n for column in columns:\n results_df[column + \"_correct\"] = (\n results_df[\"Ground_Truth\"] == results_df[column]\n )\n\n columns_correct = [col for col in results_df.columns if \"correct\" in col]\n correct_df = results_df[columns_correct]\n\n columns_top = [f\"pred_{str(id+1)}_correct\" for id in range(num_top_classes)]\n correct_any_df = correct_df[columns_top].any(axis=1)\n\n try:\n accuracy = correct_any_df.value_counts(normalize=True)[True] * 100\n except KeyError: # no correct labels at all\n accuracy = 0\n print(f\"top-{num_top_classes} accuracy: {accuracy:5.2f}%\")\n\n return accuracy\n\n\ndef evaluate(model, dataset, num_videos, batch_size, top_n_results=5, **kwargs):\n \"\"\" Evaluate a model over a specific dataset.\"\"\"\n batch_kwargs = dict()\n try:\n batch_kwargs[\"num_frames\"] = kwargs.pop(\"num_frames\")\n except:\n pass\n results = defaultdict(list)\n for batch in dataset.get_batches(\n num_videos=num_videos, batch_size=batch_size, **batch_kwargs\n ):\n video_batch, labels, youtube_ids = batch\n scores, predictions, _, _ = predict(video_batch, model)\n\n # collect metadata and model results\n for i in range(batch_size):\n results[\"YouTube_Id\"].append(youtube_ids[i])\n results[\"Ground_Truth\"].append(labels[i])\n\n for s, p in zip(scores, predictions):\n results[\"scores\"].append(list(s[:top_n_results]))\n results[\"preds\"].append(list(p[:top_n_results]))\n\n return store_results_in_dataframe(results, top_n_results, **kwargs)\n",
"# ###########################################################################\n#\n# CLOUDERA APPLIED MACHINE LEARNING PROTOTYPE (AMP)\n# (C) Cloudera, Inc. 2021\n# All rights reserved.\n#\n# Applicable Open Source License: Apache 2.0\n#\n# NOTE: Cloudera open source products are modular software products\n# made up of hundreds of individual components, each of which was\n# individually copyrighted. Each Cloudera open source product is a\n# collective work under U.S. Copyright Law. Your license to use the\n# collective work is as provided in your written agreement with\n# Cloudera. Used apart from the collective work, this file is\n# licensed for your use pursuant to the open source license\n# identified above.\n#\n# This code is provided to you pursuant a written agreement with\n# (i) Cloudera, Inc. or (ii) a third-party authorized to distribute\n# this code. If you do not have a written agreement with Cloudera nor\n# with an authorized and properly licensed third party, you do not\n# have any rights to access nor to use this code.\n#\n# Absent a written agreement with Cloudera, Inc. (“Cloudera”) to the\n# contrary, A) CLOUDERA PROVIDES THIS CODE TO YOU WITHOUT WARRANTIES OF ANY\n# KIND; (B) CLOUDERA DISCLAIMS ANY AND ALL EXPRESS AND IMPLIED\n# WARRANTIES WITH RESPECT TO THIS CODE, INCLUDING BUT NOT LIMITED TO\n# IMPLIED WARRANTIES OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY AND\n# FITNESS FOR A PARTICULAR PURPOSE; (C) CLOUDERA IS NOT LIABLE TO YOU,\n# AND WILL NOT DEFEND, INDEMNIFY, NOR HOLD YOU HARMLESS FOR ANY CLAIMS\n# ARISING FROM OR RELATED TO THE CODE; AND (D)WITH RESPECT TO YOUR EXERCISE\n# OF ANY RIGHTS GRANTED TO YOU FOR THE CODE, CLOUDERA IS NOT LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, PUNITIVE OR\n# CONSEQUENTIAL DAMAGES INCLUDING, BUT NOT LIMITED TO, DAMAGES\n# RELATED TO LOST REVENUE, LOST PROFITS, LOSS OF INCOME, LOSS OF\n# BUSINESS ADVANTAGE OR UNAVAILABILITY, OR LOSS OR CORRUPTION OF\n# DATA.\n#\n# ###########################################################################\n\nimport cv2\nimport imageio\nimport numpy as np\nimport pathlib\nfrom tensorflow_docs.vis import embed\n\n\n# Adapted from https://www.tensorflow.org/hub/tutorials/action_recognition_with_tf_hub\ndef crop_center_square(frame):\n \"\"\"Crops a square from the center of a rectangular array.\"\"\"\n y, x = frame.shape[0:2]\n min_dim = min(y, x)\n start_x = (x // 2) - (min_dim // 2)\n start_y = (y // 2) - (min_dim // 2)\n return frame[start_y : start_y + min_dim, start_x : start_x + min_dim]\n\n\ndef pad_to_square(frame):\n \"\"\"Pads a rectangular array with zeros, so as to make it squared.\"\"\"\n y, x = frame.shape[0:2]\n if y > x:\n add_x_left = (y - x) // 2\n add_x_right = y - x - add_x_left\n frame = cv2.copyMakeBorder(\n frame, 0, 0, add_x_left, add_x_right, cv2.BORDER_CONSTANT, value=0\n )\n else:\n add_y_up = (x - y) // 2\n add_y_down = x - y - add_y_up\n frame = cv2.copyMakeBorder(\n frame, add_y_down, add_y_up, 0, 0, cv2.BORDER_CONSTANT, value=0\n )\n\n return frame\n\n\n# Adapted from https://www.tensorflow.org/hub/tutorials/action_recognition_with_tf_hub\ndef load_and_resize_video(path, resize=(224, 224), resize_type=\"crop\"):\n \"\"\"Convert video to Numpy array of shape and type expected by i3d model.\n\n The function resizes them to shape\n [max_frames, 224, 224, 3], in RGB format, with floating point values in\n range [0, 1], as expected by i3d.\n \"\"\"\n\n cap = cv2.VideoCapture(path)\n frames = []\n try:\n while True:\n ret, frame = cap.read() # frame is in BGR format\n if not ret:\n break\n\n if resize_type == \"crop\":\n frame = crop_center_square(frame)\n elif resize_type == \"pad\":\n frame = pad_to_square(frame)\n else:\n return ValueError(\"Invalid resize_type: \" + resize_type)\n\n frame = cv2.resize(frame, resize)\n frame = frame[:, :, [2, 1, 0]] # Convert from BGR to RGB\n frames.append(frame)\n finally:\n cap.release()\n return np.array(frames).astype(\"float32\") / 255.0\n\n\ndef resample_video(video: np.array, num_frames: int) -> np.array:\n \"\"\" Resample a video to have num_frames number of frames.\n \n Video must have shape (1, current_num_frames, :, :, :)\n \n if num_frames < current_num_frames, video is downsampled by removing frames\n more or less evenly spaced throughout the duration of the video. \n \n if num_frames > current_num_frames, video is upsampled by duplicating frames\n more or less evenly spaced throughout the duration of the video. \n \"\"\"\n current_num_frames = video.shape[1]\n indices = [(current_num_frames * i) // num_frames for i in range(num_frames)]\n return video[:, indices, :, :, :]\n\n\ndef video_acceptable(video_np, min_num_frames_acceptable: int = 128) -> bool:\n \"\"\"Checks if video has minimum acceptable temporal length\"\"\"\n num_frames = video_np.shape[1]\n if num_frames < min_num_frames_acceptable:\n video_path_no_dir = pathlib.Path(video_path).name\n print(f\"Skipping video {video_path_no_dir}, too few frames: {num_frames}\")\n return False\n return True\n\n\n# Adapted from https://www.tensorflow.org/hub/tutorials/action_recognition_with_tf_hub\ndef to_gif(images):\n \"\"\"Converts an array of images to gif.\"\"\"\n converted_images = np.clip(images * 255, 0, 255).astype(np.uint8)\n imageio.mimsave(\"./animation.gif\", converted_images, fps=25)\n return embed.embed_file(\"./animation.gif\")\n"
] |
[
[
"numpy.take_along_axis",
"tensorflow.nn.softmax",
"tensorflow.constant",
"numpy.take",
"pandas.DataFrame",
"numpy.argmax",
"numpy.argsort"
],
[
"numpy.array",
"numpy.clip"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
tum-pbs/VOLSIM
|
[
"795a31c813bf072eb88289126d7abd9fba8b0e54"
] |
[
"src/volsim/loss.py"
] |
[
"import numpy as np\nimport scipy.stats.stats as sciStats\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport logging\n\n\nfrom volsim.params import *\n\nclass CorrelationLoss(nn.modules.loss._Loss):\n def __init__(self, params:Params, useGPU:bool):\n super(CorrelationLoss, self).__init__()\n self.useGPU = useGPU\n if useGPU:\n self.epsilon = torch.tensor(0.0000001).cuda()\n else:\n self.epsilon = torch.tensor(0.0000001)\n self.params = params\n self.corHistoryMode = params.corHistoryMode\n self.weightMSE = params.lossFacMSE\n self.weightRelMSE = params.lossFacRelMSE\n self.weightPearsonCorr = params.lossFacPearsonCorr\n self.weightSlConvReg = params.lossFacSlConvReg\n self.weightSizeReg = params.lossFacSizeReg\n self.sizeRegExp = params.lossSizeExp\n self.useOnlineMean = params.lossOnlineMean\n self.aggregateCorr = params.lossCorrAggregate\n\n self.resetCorrAcc()\n\n self.stepHist = np.zeros(6)\n self.stepHistCount = 0\n\n self.lastSampleSliceCorr = 0\n self.epochHist = {\"pred\":[], \"targ\":[], \"path\":[], \"enstd\":[], \"tempPred\":[], \"tempTarg\":[], \"tempPath\":[], \"tempEnstd\":[]}\n\n # has to be called after all simulation pairs of one sample are processed\n # to ensure correct loss computation for next sample \n def resetCorrAcc(self):\n self.accX = torch.tensor([0.0]).cuda() if self.useGPU else torch.tensor([0.0])\n self.accY = torch.tensor([0.0]).cuda() if self.useGPU else torch.tensor([0.0])\n self.count = torch.tensor([0.0]).cuda() if self.useGPU else torch.tensor([0.0])\n self.accFinal = torch.tensor([0.0]).cuda() if self.useGPU else torch.tensor([0.0])\n self.countFinal = torch.tensor([0.0]).cuda() if self.useGPU else torch.tensor([0.0])\n self.accX.requires_grad = False\n self.accY.requires_grad = False\n self.count.requires_grad = False\n self.accFinal.requires_grad = False\n self.countFinal.requires_grad = False\n\n\n def forward(self, prediction:torch.Tensor, target:torch.Tensor, path:np.ndarray) -> torch.Tensor:\n if self.useGPU:\n prediction = prediction.cuda()\n target = target.cuda()\n\n corr = torch.tensor([0.0]).cuda() if self.useGPU else torch.tensor([0.0])\n correlation = torch.tensor([0.0]).cuda() if self.useGPU else torch.tensor([0.0])\n # pearson correlation\n if self.weightPearsonCorr > 0:\n corr = self.pearsonCorrOnline(prediction, target)\n self.lastSampleSliceCorr = torch.mean(corr).item()\n correlation = self.weightPearsonCorr * 0.5 * (1-corr)\n\n # mse\n l2 = torch.tensor([0.0]).cuda() if self.useGPU else torch.tensor([0.0])\n if self.weightMSE > 0:\n l2 = self.weightMSE * self.distanceL2(prediction, target)\n\n # relative mse\n relL2 = torch.tensor([0.0]).cuda() if self.useGPU else torch.tensor([0.0])\n if self.weightRelMSE > 0:\n predMean = self.accX.detach() / self.count.detach()\n targMean = self.accY.detach() / self.count.detach()\n relL2 = self.weightRelMSE * self.distanceL2(prediction-predMean, target-targMean)\n\n # size regularization\n sizeReg = torch.tensor([0.0]).cuda() if self.useGPU else torch.tensor([0.0])\n if self.weightSizeReg > 0:\n temp = torch.where(prediction > 1, torch.pow(prediction-1, self.sizeRegExp), torch.zeros_like(prediction))\n sizeReg = self.weightSizeReg * torch.sum(temp, dim=1)\n\n # step history\n self.stepHist = self.stepHist + np.array([\n torch.mean(l2+relL2+correlation+sizeReg).item(),\n torch.mean(l2).item(),\n torch.mean(correlation).item(),\n torch.mean(corr).item(),\n torch.mean(relL2).item(),\n torch.mean(sizeReg).item(),\n ])\n self.stepHistCount = self.stepHistCount + 1\n\n # epoch history\n self.epochHist[\"tempPred\"] += [prediction.cpu().detach().numpy()]\n self.epochHist[\"tempTarg\"] += [target.cpu().detach().numpy()]\n self.epochHist[\"tempPath\"] += [np.repeat(path[:,None], target.shape[1], axis=1)]\n\n result = torch.mean(l2 + relL2 + correlation + sizeReg)\n if torch.isnan(result):\n logging.error(\"NAN in loss!\")\n logging.error(\"L2 \" + str(l2))\n logging.error(\"Rel L2 \" + str(relL2))\n logging.error(\"Corr \" + str(corr))\n logging.error(\"Correlation \" + str(correlation))\n raise ValueError(\"NAN in loss!\")\n return result\n\n\n def updateMeanAccs(self, x:torch.Tensor, y:torch.Tensor):\n if self.useGPU:\n x = x.cuda()\n y = y.cuda()\n\n self.count = self.count + x.shape[1]\n self.accX = self.accX + torch.sum(x, dim=1, keepdim=True)\n self.accY = self.accY + torch.sum(y, dim=1, keepdim=True)\n\n\n def pearsonCorrOnline(self, x:torch.Tensor, y:torch.Tensor) -> torch.Tensor:\n if self.useOnlineMean:\n self.updateMeanAccs(x, y)\n\n if self.count <= 1:\n return torch.zeros_like(self.accFinal)\n\n meanX = self.accX.detach() / self.count.detach()\n meanY = self.accY.detach() / self.count.detach()\n xm = x - meanX\n ym = y - meanY\n rNum = torch.sum(xm*ym, dim=1, keepdim=True) #manual dot product\n rDen = torch.norm(xm, 2, dim=1, keepdim=True) * torch.norm(ym, 2, dim=1, keepdim=True)\n rVal = rNum / torch.max(rDen, self.epsilon) #epsilon for numerical stability\n\n if any(torch.isnan(rVal)):\n logging.error(\"NAN in correlation computation!\")\n logging.error(\"x \" + str(x))\n logging.error(\"y \" + str(y))\n logging.error(\"accX \" + str(self.accX))\n logging.error(\"accY \" + str(self.accY))\n logging.error(\"count \" + str(self.count))\n logging.error(\"meanX \" + str(meanX))\n logging.error(\"meanY \" + str(meanY))\n logging.error(\"rNum \" + str(rNum))\n logging.error(\"rDen \" + str(rDen))\n logging.error(\"rVal \" + str(rVal))\n raise ValueError(\"NAN in correlation computation!\")\n\n if self.aggregateCorr:\n # average over previous pairs from same sample for better stability\n self.accFinal = self.accFinal.detach() + rVal\n self.countFinal = self.countFinal.detach() + 1\n return self.accFinal / self.countFinal\n else:\n return rVal\n\n\n def getStepHistory(self) -> np.ndarray:\n result = self.stepHist / self.stepHistCount\n self.stepHist = np.zeros(6)\n self.stepHistCount = 0\n self.resetCorrAcc()\n\n # normalize all step distances to [0.1, 1.0]\n predStep = np.concatenate(self.epochHist[\"tempPred\"], axis=1) #[3,55]\n dMax = np.max(predStep, axis=1, keepdims=True) #[3,1]\n dMin = np.min(predStep, axis=1, keepdims=True) #[3,1]\n if (dMin == dMax).all():\n predStep = predStep - dMin + 0.1\n elif (dMin == dMax).any():\n for i in range(dMin.shape[0]):\n if dMin[i] == dMax[i]:\n predStep[i] = predStep[i] - dMin[i] + 0.1\n else:\n predStep[i] = 0.9 * ((predStep[i] - dMin[i]) / (dMax[i] - dMin[i])) + 0.1\n else:\n predStep = 0.9 * ((predStep - dMin) / (dMax - dMin)) + 0.1\n\n self.epochHist[\"pred\"] += [predStep]\n self.epochHist[\"targ\"] += [np.concatenate(self.epochHist[\"tempTarg\"], axis=1)]\n self.epochHist[\"path\"] += [np.concatenate(self.epochHist[\"tempPath\"], axis=1)]\n self.epochHist[\"tempPred\"] = []\n self.epochHist[\"tempTarg\"] = []\n self.epochHist[\"tempPath\"] = []\n return result\n\n def getEpochHistory(self, splits:dict=None) -> tuple:\n predEpoch = np.concatenate(self.epochHist[\"pred\"], axis=0)\n targEpoch = np.concatenate(self.epochHist[\"targ\"], axis=0)\n pathEpoch = np.concatenate(self.epochHist[\"path\"], axis=0)\n\n corrSplit = {}\n if splits:\n for split in splits:\n idx = np.core.defchararray.find(pathEpoch.astype(str), splits[split]) >= 0\n stacked = np.stack([predEpoch[idx], targEpoch[idx]])\n if self.corHistoryMode == \"pearson\":\n corr = np.corrcoef(stacked)[0,1]\n elif self.corHistoryMode == \"spearman\":\n corr, _ = sciStats.spearmanr(stacked.transpose((1,0)))\n else:\n raise ValueError(\"Invalid ground \")\n corrSplit[split] = corr\n\n stackedAll = np.stack([predEpoch.flatten(), targEpoch.flatten()])\n if self.corHistoryMode == \"pearson\":\n corrAll = np.corrcoef(stackedAll)[0,1]\n elif self.corHistoryMode == \"spearman\":\n corrAll, _ = sciStats.spearmanr(stackedAll.transpose((1,0)))\n else:\n raise ValueError(\"Invalid ground \")\n\n self.epochHist[\"pred\"] = []\n self.epochHist[\"targ\"] = []\n self.epochHist[\"path\"] = []\n return corrAll, corrSplit\n\n def distanceL2(self, x:torch.Tensor, y:torch.Tensor) -> torch.Tensor:\n return F.mse_loss(x, y, reduction='none')\n\n def distanceL1(self, x:torch.Tensor, y:torch.Tensor) -> torch.Tensor:\n return F.l1_loss(x, y, reduction='none')\n"
] |
[
[
"torch.mean",
"torch.norm",
"torch.max",
"torch.isnan",
"numpy.min",
"torch.nn.functional.l1_loss",
"torch.sum",
"torch.zeros_like",
"numpy.stack",
"torch.tensor",
"numpy.concatenate",
"numpy.max",
"torch.nn.functional.mse_loss",
"numpy.corrcoef",
"numpy.repeat",
"numpy.zeros",
"torch.pow"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.